diff options
Diffstat (limited to 'drivers/gpu/drm/amd/display')
265 files changed, 11117 insertions, 5447 deletions
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index 2d8e55e29637..901d1961b739 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -5,10 +5,10 @@ menu "Display Engine Configuration" config DRM_AMD_DC bool "AMD DC - Enable new display engine" default y - depends on BROKEN || !CC_IS_CLANG || X86_64 || SPARC64 || ARM64 + depends on BROKEN || !CC_IS_CLANG || ARM64 || RISCV || SPARC64 || X86_64 select SND_HDA_COMPONENT if SND_HDA_CORE # !CC_IS_CLANG: https://github.com/ClangBuiltLinux/linux/issues/1752 - select DRM_AMD_DC_FP if (X86 || (PPC64 && ALTIVEC) || (ARM64 && KERNEL_MODE_NEON && !CC_IS_CLANG)) + select DRM_AMD_DC_FP if (X86 || LOONGARCH || (PPC64 && ALTIVEC) || (ARM64 && KERNEL_MODE_NEON && !CC_IS_CLANG)) help Choose this option if you want to use the new display engine support for AMDGPU. This adds required support for Vega and @@ -42,16 +42,13 @@ config DEBUG_KERNEL_DC Choose this option if you want to hit kdgb_break in assert. config DRM_AMD_SECURE_DISPLAY - bool "Enable secure display support" - depends on DEBUG_FS - depends on DRM_AMD_DC_FP - help - Choose this option if you want to - support secure display - - This option enables the calculation - of crc of specific region via debugfs. - Cooperate with specific DMCU FW. + bool "Enable secure display support" + depends on DEBUG_FS + depends on DRM_AMD_DC_FP + help + Choose this option if you want to support secure display + This option enables the calculation of crc of specific region via + debugfs. Cooperate with specific DMCU FW. endmenu diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile index 249b073f6a23..8bf94920d23e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile @@ -38,7 +38,7 @@ AMDGPUDM += dc_fpu.o endif ifneq ($(CONFIG_DRM_AMD_DC),) -AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o amdgpu_dm_psr.o +AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o amdgpu_dm_psr.o amdgpu_dm_replay.o endif AMDGPUDM += amdgpu_dm_hdcp.o diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 7acd73e5004f..868946dd7ef1 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -65,6 +65,7 @@ #include "amdgpu_dm_debugfs.h" #endif #include "amdgpu_dm_psr.h" +#include "amdgpu_dm_replay.h" #include "ivsrcid/ivsrcid_vislands30.h" @@ -245,51 +246,52 @@ is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, */ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc) { + struct amdgpu_crtc *acrtc = NULL; + if (crtc >= adev->mode_info.num_crtc) return 0; - else { - struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc]; - if (acrtc->dm_irq_params.stream == NULL) { - DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", - crtc); - return 0; - } + acrtc = adev->mode_info.crtcs[crtc]; - return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream); + if (!acrtc->dm_irq_params.stream) { + DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", + crtc); + return 0; } + + return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream); } static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, u32 *vbl, u32 *position) { u32 v_blank_start, v_blank_end, h_position, v_position; + struct amdgpu_crtc *acrtc = NULL; if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) return -EINVAL; - else { - struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc]; - if (acrtc->dm_irq_params.stream == NULL) { - DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", - crtc); - return 0; - } + acrtc = adev->mode_info.crtcs[crtc]; - /* - * TODO rework base driver to use values directly. - * for now parse it back into reg-format - */ - dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream, - &v_blank_start, - &v_blank_end, - &h_position, - &v_position); - - *position = v_position | (h_position << 16); - *vbl = v_blank_start | (v_blank_end << 16); + if (!acrtc->dm_irq_params.stream) { + DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", + crtc); + return 0; } + /* + * TODO rework base driver to use values directly. + * for now parse it back into reg-format + */ + dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream, + &v_blank_start, + &v_blank_end, + &h_position, + &v_position); + + *position = v_position | (h_position << 16); + *vbl = v_blank_start | (v_blank_end << 16); + return 0; } @@ -365,6 +367,14 @@ static inline void reverse_planes_order(struct dc_surface_update *array_of_surfa * adjustments and preparation before calling it. This function is a wrapper * for the dc_update_planes_and_stream that does any required configuration * before passing control to DC. + * + * @dc: Display Core control structure + * @update_type: specify whether it is FULL/MEDIUM/FAST update + * @planes_count: planes count to update + * @stream: stream state + * @stream_update: stream update + * @array_of_surface_update: dc surface update pointer + * */ static inline bool update_planes_and_stream_adapter(struct dc *dc, int update_type, @@ -416,12 +426,12 @@ static void dm_pflip_high_irq(void *interrupt_params) spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); - if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ - DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n", - amdgpu_crtc->pflip_status, - AMDGPU_FLIP_SUBMITTED, - amdgpu_crtc->crtc_id, - amdgpu_crtc); + if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { + DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n", + amdgpu_crtc->pflip_status, + AMDGPU_FLIP_SUBMITTED, + amdgpu_crtc->crtc_id, + amdgpu_crtc); spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); return; } @@ -875,7 +885,7 @@ static int dm_set_powergating_state(void *handle, } /* Prototypes of private functions */ -static int dm_early_init(void* handle); +static int dm_early_init(void *handle); /* Allocate memory for FBC compressed data */ static void amdgpu_dm_fbc_init(struct drm_connector *connector) @@ -1264,17 +1274,21 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_ pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); - page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF; - page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12); - page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF; - page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12); - page_table_base.high_part = upper_32_bits(pt_base) & 0xF; + page_table_start.high_part = upper_32_bits(adev->gmc.gart_start >> + AMDGPU_GPU_PAGE_SHIFT); + page_table_start.low_part = lower_32_bits(adev->gmc.gart_start >> + AMDGPU_GPU_PAGE_SHIFT); + page_table_end.high_part = upper_32_bits(adev->gmc.gart_end >> + AMDGPU_GPU_PAGE_SHIFT); + page_table_end.low_part = lower_32_bits(adev->gmc.gart_end >> + AMDGPU_GPU_PAGE_SHIFT); + page_table_base.high_part = upper_32_bits(pt_base); page_table_base.low_part = lower_32_bits(pt_base); pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18; pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18; - pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ; + pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24; pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24; pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24; @@ -1339,6 +1353,15 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work) if (amdgpu_in_reset(adev)) goto skip; + if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY || + offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { + dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT); + spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags); + offload_work->offload_wq->is_handling_mst_msg_rdy_event = false; + spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); + goto skip; + } + mutex_lock(&adev->dm.dc_lock); if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) { dc_link_dp_handle_automated_test(dc_link); @@ -1357,8 +1380,7 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work) DP_TEST_RESPONSE, &test_response.raw, sizeof(test_response)); - } - else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) && + } else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) && dc_link_check_link_loss_status(dc_link, &offload_work->data) && dc_link_dp_allow_hpd_rx_irq(dc_link)) { /* offload_work->data is from handle_hpd_rx_irq-> @@ -1546,7 +1568,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) mutex_init(&adev->dm.dc_lock); mutex_init(&adev->dm.audio_lock); - if(amdgpu_dm_irq_init(adev)) { + if (amdgpu_dm_irq_init(adev)) { DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n"); goto error; } @@ -1646,11 +1668,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0) init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true; - /* Disable SubVP + DRR config by default */ - init_data.flags.disable_subvp_drr = true; - if (amdgpu_dc_feature_mask & DC_ENABLE_SUBVP_DRR) - init_data.flags.disable_subvp_drr = false; - init_data.flags.seamless_boot_edp_requested = false; if (check_seamless_boot_capability(adev)) { @@ -1672,9 +1689,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) adev->dm.dc = dc_create(&init_data); if (adev->dm.dc) { - DRM_INFO("Display Core initialized with v%s!\n", DC_VER); + DRM_INFO("Display Core v%s initialized on %s\n", DC_VER, + dce_version_to_string(adev->dm.dc->ctx->dce_version)); } else { - DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER); + DRM_INFO("Display Core v%s failed to initialize on %s\n", DC_VER, + dce_version_to_string(adev->dm.dc->ctx->dce_version)); goto error; } @@ -1691,9 +1710,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER) adev->dm.dc->debug.disable_stutter = true; - if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) { + if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) adev->dm.dc->debug.disable_dsc = true; - } if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING) adev->dm.dc->debug.disable_clock_gate = true; @@ -1776,12 +1794,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) dc_init_callbacks(adev->dm.dc, &init_params); } -#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) - adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev); - if (!adev->dm.secure_display_ctxs) { - DRM_ERROR("amdgpu: failed to initialize secure_display_ctxs.\n"); - } -#endif if (dc_is_dmub_outbox_supported(adev->dm.dc)) { init_completion(&adev->dm.dmub_aux_transfer_done); adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL); @@ -1816,9 +1828,14 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) * It is expected that DMUB will resend any pending notifications at this point, for * example HPD from DPIA. */ - if (dc_is_dmub_outbox_supported(adev->dm.dc)) + if (dc_is_dmub_outbox_supported(adev->dm.dc)) { dc_enable_dmub_outbox(adev->dm.dc); + /* DPIA trace goes to dmesg logs only if outbox is enabled */ + if (amdgpu_dc_debug_mask & DC_ENABLE_DPIA_TRACE) + dc_dmub_srv_enable_dpia_trace(adev->dm.dc); + } + if (amdgpu_dm_initialize_drm_device(adev)) { DRM_ERROR( "amdgpu: failed to initialize sw for display support.\n"); @@ -1840,6 +1857,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) goto error; } +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev); + if (!adev->dm.secure_display_ctxs) + DRM_ERROR("amdgpu: failed to initialize secure display contexts.\n"); +#endif DRM_DEBUG_DRIVER("KMS initialized.\n"); @@ -1938,8 +1960,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) mutex_destroy(&adev->dm.audio_lock); mutex_destroy(&adev->dm.dc_lock); mutex_destroy(&adev->dm.dpia_aux_lock); - - return; } static int load_dmcu_fw(struct amdgpu_device *adev) @@ -1948,7 +1968,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev) int r; const struct dmcu_firmware_header_v1_0 *hdr; - switch(adev->asic_type) { + switch (adev->asic_type) { #if defined(CONFIG_DRM_AMD_DC_SI) case CHIP_TAHITI: case CHIP_PITCAIRN: @@ -2320,14 +2340,62 @@ static int dm_late_init(void *handle) return detect_mst_link_for_all_connectors(adev_to_drm(adev)); } +static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr) +{ + int ret; + u8 guid[16]; + u64 tmp64; + + mutex_lock(&mgr->lock); + if (!mgr->mst_primary) + goto out_fail; + + if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) { + drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); + goto out_fail; + } + + ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, + DP_MST_EN | + DP_UP_REQ_EN | + DP_UPSTREAM_IS_SRC); + if (ret < 0) { + drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n"); + goto out_fail; + } + + /* Some hubs forget their guids after they resume */ + ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16); + if (ret != 16) { + drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); + goto out_fail; + } + + if (memchr_inv(guid, 0, 16) == NULL) { + tmp64 = get_jiffies_64(); + memcpy(&guid[0], &tmp64, sizeof(u64)); + memcpy(&guid[8], &tmp64, sizeof(u64)); + + ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, guid, 16); + + if (ret != 16) { + drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n"); + goto out_fail; + } + } + + memcpy(mgr->mst_primary->guid, guid, 16); + +out_fail: + mutex_unlock(&mgr->lock); +} + static void s3_handle_mst(struct drm_device *dev, bool suspend) { struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; struct drm_connector_list_iter iter; struct drm_dp_mst_topology_mgr *mgr; - int ret; - bool need_hotplug = false; drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { @@ -2349,18 +2417,15 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) if (!dp_is_lttpr_present(aconnector->dc_link)) try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD); - ret = drm_dp_mst_topology_mgr_resume(mgr, true); - if (ret < 0) { - dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, - aconnector->dc_link); - need_hotplug = true; - } + /* TODO: move resume_mst_branch_status() into drm mst resume again + * once topology probing work is pulled out from mst resume into mst + * resume 2nd step. mst resume 2nd step should be called after old + * state getting restored (i.e. drm_atomic_helper_resume()). + */ + resume_mst_branch_status(mgr); } } drm_connector_list_iter_end(&iter); - - if (need_hotplug) - drm_kms_helper_hotplug_event(dev); } static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) @@ -2705,7 +2770,7 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state, struct dc_scaling_info scaling_infos[MAX_SURFACES]; struct dc_flip_addrs flip_addrs[MAX_SURFACES]; struct dc_stream_update stream_update; - } * bundle; + } *bundle; int k, m; bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); @@ -2735,8 +2800,6 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state, cleanup: kfree(bundle); - - return; } static int dm_resume(void *handle) @@ -2756,7 +2819,8 @@ static int dm_resume(void *handle) struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state); enum dc_connection_type new_connection_type = dc_connection_none; struct dc_state *dc_state; - int i, r, j; + int i, r, j, ret; + bool need_hotplug = false; if (amdgpu_in_reset(adev)) { dc_state = dm->cached_dc_state; @@ -2854,7 +2918,7 @@ static int dm_resume(void *handle) continue; /* - * this is the case when traversing through already created + * this is the case when traversing through already created end sink * MST connectors, should be skipped */ if (aconnector && aconnector->mst_root) @@ -2914,6 +2978,27 @@ static int dm_resume(void *handle) dm->cached_state = NULL; + /* Do mst topology probing after resuming cached state*/ + drm_connector_list_iter_begin(ddev, &iter); + drm_for_each_connector_iter(connector, &iter) { + aconnector = to_amdgpu_dm_connector(connector); + if (aconnector->dc_link->type != dc_connection_mst_branch || + aconnector->mst_root) + continue; + + ret = drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr, true); + + if (ret < 0) { + dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, + aconnector->dc_link); + need_hotplug = true; + } + } + drm_connector_list_iter_end(&iter); + + if (need_hotplug) + drm_kms_helper_hotplug_event(ddev); + amdgpu_dm_irq_resume_late(adev); amdgpu_dm_smu_write_watermarks_table(adev); @@ -2950,8 +3035,7 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = { .set_powergating_state = dm_set_powergating_state, }; -const struct amdgpu_ip_block_version dm_ip_block = -{ +const struct amdgpu_ip_block_version dm_ip_block = { .type = AMD_IP_BLOCK_TYPE_DCE, .major = 1, .minor = 0, @@ -2996,9 +3080,12 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps; caps->aux_support = false; - if (caps->ext_caps->bits.oled == 1 /*|| - caps->ext_caps->bits.sdr_aux_backlight_control == 1 || - caps->ext_caps->bits.hdr_aux_backlight_control == 1*/) + if (caps->ext_caps->bits.oled == 1 + /* + * || + * caps->ext_caps->bits.sdr_aux_backlight_control == 1 || + * caps->ext_caps->bits.hdr_aux_backlight_control == 1 + */) caps->aux_support = true; if (amdgpu_backlight == 0) @@ -3232,84 +3319,6 @@ static void handle_hpd_irq(void *param) } -static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector) -{ - u8 esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; - u8 dret; - bool new_irq_handled = false; - int dpcd_addr; - int dpcd_bytes_to_read; - - const int max_process_count = 30; - int process_count = 0; - - const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link); - - if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) { - dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT; - /* DPCD 0x200 - 0x201 for downstream IRQ */ - dpcd_addr = DP_SINK_COUNT; - } else { - dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI; - /* DPCD 0x2002 - 0x2005 for downstream IRQ */ - dpcd_addr = DP_SINK_COUNT_ESI; - } - - dret = drm_dp_dpcd_read( - &aconnector->dm_dp_aux.aux, - dpcd_addr, - esi, - dpcd_bytes_to_read); - - while (dret == dpcd_bytes_to_read && - process_count < max_process_count) { - u8 retry; - dret = 0; - - process_count++; - - DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]); - /* handle HPD short pulse irq */ - if (aconnector->mst_mgr.mst_state) - drm_dp_mst_hpd_irq( - &aconnector->mst_mgr, - esi, - &new_irq_handled); - - if (new_irq_handled) { - /* ACK at DPCD to notify down stream */ - const int ack_dpcd_bytes_to_write = - dpcd_bytes_to_read - 1; - - for (retry = 0; retry < 3; retry++) { - u8 wret; - - wret = drm_dp_dpcd_write( - &aconnector->dm_dp_aux.aux, - dpcd_addr + 1, - &esi[1], - ack_dpcd_bytes_to_write); - if (wret == ack_dpcd_bytes_to_write) - break; - } - - /* check if there is new irq to be handled */ - dret = drm_dp_dpcd_read( - &aconnector->dm_dp_aux.aux, - dpcd_addr, - esi, - dpcd_bytes_to_read); - - new_irq_handled = false; - } else { - break; - } - } - - if (process_count == max_process_count) - DRM_DEBUG_DRIVER("Loop exceeded max iterations\n"); -} - static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq, union hpd_irq_data hpd_irq_data) { @@ -3371,7 +3380,23 @@ static void handle_hpd_rx_irq(void *param) if (dc_link_dp_allow_hpd_rx_irq(dc_link)) { if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY || hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { - dm_handle_mst_sideband_msg(aconnector); + bool skip = false; + + /* + * DOWN_REP_MSG_RDY is also handled by polling method + * mgr->cbs->poll_hpd_irq() + */ + spin_lock(&offload_wq->offload_lock); + skip = offload_wq->is_handling_mst_msg_rdy_event; + + if (!skip) + offload_wq->is_handling_mst_msg_rdy_event = true; + + spin_unlock(&offload_wq->offload_lock); + + if (!skip) + schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); + goto out; } @@ -3462,7 +3487,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev) aconnector = to_amdgpu_dm_connector(connector); dc_link = aconnector->dc_link; - if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) { + if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; int_params.irq_source = dc_link->irq_source_hpd; @@ -3471,7 +3496,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev) (void *) aconnector); } - if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) { + if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) { /* Also register for DP short pulse (hpd_rx). */ int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; @@ -3480,11 +3505,11 @@ static void register_hpd_handlers(struct amdgpu_device *adev) amdgpu_dm_irq_register_interrupt(adev, &int_params, handle_hpd_rx_irq, (void *) aconnector); - - if (adev->dm.hpd_rx_offload_wq) - adev->dm.hpd_rx_offload_wq[dc_link->link_index].aconnector = - aconnector; } + + if (adev->dm.hpd_rx_offload_wq) + adev->dm.hpd_rx_offload_wq[connector->index].aconnector = + aconnector; } } @@ -3497,7 +3522,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev) struct dc_interrupt_params int_params = {0}; int r; int i; - unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY; + unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY; int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; @@ -3511,11 +3536,12 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev) * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts * coming from DC hardware. * amdgpu_dm_irq_handler() will re-direct the interrupt to DC - * for acknowledging and handling. */ + * for acknowledging and handling. + */ /* Use VBLANK interrupt */ for (i = 0; i < adev->mode_info.num_crtc; i++) { - r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq); + r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq); if (r) { DRM_ERROR("Failed to add crtc irq id!\n"); return r; @@ -3523,7 +3549,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev) int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; int_params.irq_source = - dc_interrupt_to_irq_source(dc, i+1 , 0); + dc_interrupt_to_irq_source(dc, i + 1, 0); c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; @@ -3579,7 +3605,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) struct dc_interrupt_params int_params = {0}; int r; int i; - unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY; + unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY; if (adev->family >= AMDGPU_FAMILY_AI) client_id = SOC15_IH_CLIENTID_DCE; @@ -3596,7 +3622,8 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts * coming from DC hardware. * amdgpu_dm_irq_handler() will re-direct the interrupt to DC - * for acknowledging and handling. */ + * for acknowledging and handling. + */ /* Use VBLANK interrupt */ for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) { @@ -4043,7 +4070,7 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, } static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps, - unsigned *min, unsigned *max) + unsigned int *min, unsigned int *max) { if (!caps) return 0; @@ -4063,7 +4090,7 @@ static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps, static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps, uint32_t brightness) { - unsigned min, max; + unsigned int min, max; if (!get_brightness_range(caps, &min, &max)) return brightness; @@ -4076,7 +4103,7 @@ static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *c static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps, uint32_t brightness) { - unsigned min, max; + unsigned int min, max; if (!get_brightness_range(caps, &min, &max)) return brightness; @@ -4142,6 +4169,7 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm, int bl_idx) { + int ret; struct amdgpu_dm_backlight_caps caps; struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx]; @@ -4156,13 +4184,14 @@ static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm, if (!rc) return dm->brightness[bl_idx]; return convert_brightness_to_user(&caps, avg); - } else { - int ret = dc_link_get_backlight_level(link); - - if (ret == DC_ERROR_UNEXPECTED) - return dm->brightness[bl_idx]; - return convert_brightness_to_user(&caps, ret); } + + ret = dc_link_get_backlight_level(link); + + if (ret == DC_ERROR_UNEXPECTED) + return dm->brightness[bl_idx]; + + return convert_brightness_to_user(&caps, ret); } static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd) @@ -4309,6 +4338,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) enum dc_connection_type new_connection_type = dc_connection_none; const struct dc_plane_cap *plane; bool psr_feature_enabled = false; + bool replay_feature_enabled = false; int max_overlay = dm->dc->caps.max_slave_planes; dm->display_indexes_num = dm->dc->caps.max_streams; @@ -4418,6 +4448,20 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) } } + if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) { + switch (adev->ip_versions[DCE_HWIP][0]) { + case IP_VERSION(3, 1, 4): + case IP_VERSION(3, 1, 5): + case IP_VERSION(3, 1, 6): + case IP_VERSION(3, 2, 0): + case IP_VERSION(3, 2, 1): + replay_feature_enabled = true; + break; + default: + replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK; + break; + } + } /* loops over all connectors on the board */ for (i = 0; i < link_cnt; i++) { struct dc_link *link = NULL; @@ -4466,6 +4510,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) amdgpu_dm_update_connector_after_detect(aconnector); setup_backlight_device(dm, aconnector); + /* + * Disable psr if replay can be enabled + */ + if (replay_feature_enabled && amdgpu_dm_setup_replay(link, aconnector)) + psr_feature_enabled = false; + if (psr_feature_enabled) amdgpu_dm_set_psr_caps(link); @@ -4556,7 +4606,6 @@ fail: static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm) { drm_atomic_private_obj_fini(&dm->atomic_obj); - return; } /****************************************************************************** @@ -5057,11 +5106,7 @@ static inline void fill_dc_dirty_rect(struct drm_plane *plane, s32 y, s32 width, s32 height, int *i, bool ffu) { - if (*i > DC_MAX_DIRTY_RECTS) - return; - - if (*i == DC_MAX_DIRTY_RECTS) - goto out; + WARN_ON(*i >= DC_MAX_DIRTY_RECTS); dirty_rect->x = x; dirty_rect->y = y; @@ -5077,7 +5122,6 @@ static inline void fill_dc_dirty_rect(struct drm_plane *plane, "[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)", plane->base.id, x, y, width, height); -out: (*i)++; } @@ -5164,6 +5208,9 @@ static void fill_dc_dirty_rects(struct drm_plane *plane, *dirty_regions_changed = bb_changed; + if ((num_clips + (bb_changed ? 2 : 0)) > DC_MAX_DIRTY_RECTS) + goto ffu; + if (bb_changed) { fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], new_plane_state->crtc_x, @@ -5193,9 +5240,6 @@ static void fill_dc_dirty_rects(struct drm_plane *plane, new_plane_state->crtc_h, &i, false); } - if (i > DC_MAX_DIRTY_RECTS) - goto ffu; - flip_addrs->dirty_rect_count = i; return; @@ -5331,21 +5375,44 @@ get_aspect_ratio(const struct drm_display_mode *mode_in) } static enum dc_color_space -get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing) +get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing, + const struct drm_connector_state *connector_state) { enum dc_color_space color_space = COLOR_SPACE_SRGB; - switch (dc_crtc_timing->pixel_encoding) { - case PIXEL_ENCODING_YCBCR422: - case PIXEL_ENCODING_YCBCR444: - case PIXEL_ENCODING_YCBCR420: - { + switch (connector_state->colorspace) { + case DRM_MODE_COLORIMETRY_BT601_YCC: + if (dc_crtc_timing->flags.Y_ONLY) + color_space = COLOR_SPACE_YCBCR601_LIMITED; + else + color_space = COLOR_SPACE_YCBCR601; + break; + case DRM_MODE_COLORIMETRY_BT709_YCC: + if (dc_crtc_timing->flags.Y_ONLY) + color_space = COLOR_SPACE_YCBCR709_LIMITED; + else + color_space = COLOR_SPACE_YCBCR709; + break; + case DRM_MODE_COLORIMETRY_OPRGB: + color_space = COLOR_SPACE_ADOBERGB; + break; + case DRM_MODE_COLORIMETRY_BT2020_RGB: + case DRM_MODE_COLORIMETRY_BT2020_YCC: + if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) + color_space = COLOR_SPACE_2020_RGB_FULLRANGE; + else + color_space = COLOR_SPACE_2020_YCBCR; + break; + case DRM_MODE_COLORIMETRY_DEFAULT: // ITU601 + default: + if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) { + color_space = COLOR_SPACE_SRGB; /* * 27030khz is the separation point between HDTV and SDTV * according to HDMI spec, we use YCbCr709 and YCbCr601 * respectively */ - if (dc_crtc_timing->pix_clk_100hz > 270300) { + } else if (dc_crtc_timing->pix_clk_100hz > 270300) { if (dc_crtc_timing->flags.Y_ONLY) color_space = COLOR_SPACE_YCBCR709_LIMITED; @@ -5358,15 +5425,6 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing) else color_space = COLOR_SPACE_YCBCR601; } - - } - break; - case PIXEL_ENCODING_RGB: - color_space = COLOR_SPACE_SRGB; - break; - - default: - WARN_ON(1); break; } @@ -5379,6 +5437,7 @@ static bool adjust_colour_depth_from_display_info( { enum dc_color_depth depth = timing_out->display_color_depth; int normalized_clk; + do { normalized_clk = timing_out->pix_clk_100hz / 10; /* YCbCr 4:2:0 requires additional adjustment of 1/2 */ @@ -5505,7 +5564,7 @@ static void fill_stream_properties_from_drm_display_mode( } } - stream->output_color_space = get_output_color_space(timing_out); + stream->output_color_space = get_output_color_space(timing_out, connector_state); } static void fill_audio_info(struct audio_info *audio_info, @@ -5594,6 +5653,7 @@ create_fake_sink(struct amdgpu_dm_connector *aconnector) { struct dc_sink_init_data sink_init_data = { 0 }; struct dc_sink *sink = NULL; + sink_init_data.link = aconnector->dc_link; sink_init_data.sink_signal = aconnector->dc_link->connector_signal; @@ -5717,7 +5777,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector, return &aconnector->freesync_vid_base; /* Find the preferred mode */ - list_for_each_entry (m, list_head, head) { + list_for_each_entry(m, list_head, head) { if (m->type & DRM_MODE_TYPE_PREFERRED) { m_pref = m; break; @@ -5741,7 +5801,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector, * For some monitors, preferred mode is not the mode with highest * supported refresh rate. */ - list_for_each_entry (m, list_head, head) { + list_for_each_entry(m, list_head, head) { current_refresh = drm_mode_vrefresh(m); if (m->hdisplay == m_pref->hdisplay && @@ -5834,6 +5894,7 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector, edp_min_bpp_x16, edp_max_bpp_x16, dsc_caps, &stream->timing, + dc_link_get_highest_encoding_format(aconnector->dc_link), &bw_range)) { if (bw_range.max_kbps < link_bw_in_kbps) { @@ -5842,6 +5903,7 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector, &dsc_options, 0, &stream->timing, + dc_link_get_highest_encoding_format(aconnector->dc_link), &dsc_cfg)) { stream->timing.dsc_cfg = dsc_cfg; stream->timing.flags.DSC = 1; @@ -5856,6 +5918,7 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector, &dsc_options, link_bw_in_kbps, &stream->timing, + dc_link_get_highest_encoding_format(aconnector->dc_link), &dsc_cfg)) { stream->timing.dsc_cfg = dsc_cfg; stream->timing.flags.DSC = 1; @@ -5899,12 +5962,14 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, &dsc_options, link_bandwidth_kbps, &stream->timing, + dc_link_get_highest_encoding_format(aconnector->dc_link), &stream->timing.dsc_cfg)) { stream->timing.flags.DSC = 1; DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name); } } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { - timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing); + timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing, + dc_link_get_highest_encoding_format(aconnector->dc_link)); max_supported_bw_in_kbps = link_bandwidth_kbps; dsc_max_supported_bw_in_kbps = link_bandwidth_kbps; @@ -5916,6 +5981,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, &dsc_options, dsc_max_supported_bw_in_kbps, &stream->timing, + dc_link_get_highest_encoding_format(aconnector->dc_link), &stream->timing.dsc_cfg)) { stream->timing.flags.DSC = 1; DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n", @@ -5947,15 +6013,14 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, { struct drm_display_mode *preferred_mode = NULL; struct drm_connector *drm_connector; - const struct drm_connector_state *con_state = - dm_state ? &dm_state->base : NULL; + const struct drm_connector_state *con_state = &dm_state->base; struct dc_stream_state *stream = NULL; struct drm_display_mode mode; struct drm_display_mode saved_mode; struct drm_display_mode *freesync_mode = NULL; bool native_mode_found = false; bool recalculate_timing = false; - bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false; + bool scale = dm_state->scaling != RMX_OFF; int mode_refresh; int preferred_refresh = 0; enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN; @@ -6014,12 +6079,11 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, * This may not be an error, the use case is when we have no * usermode calls to reset and set mode upon hotplug. In this * case, we call set mode ourselves to restore the previous mode - * and the modelist may not be filled in in time. + * and the modelist may not be filled in time. */ DRM_DEBUG_DRIVER("No preferred mode found\n"); } else { - recalculate_timing = amdgpu_freesync_vid_mode && - is_freesync_video_mode(&mode, aconnector); + recalculate_timing = is_freesync_video_mode(&mode, aconnector); if (recalculate_timing) { freesync_mode = get_highest_refresh_rate_mode(aconnector, false); drm_mode_copy(&saved_mode, &mode); @@ -6034,13 +6098,11 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, if (recalculate_timing) drm_mode_set_crtcinfo(&saved_mode, 0); - else if (!dm_state) - drm_mode_set_crtcinfo(&mode, 0); /* - * If scaling is enabled and refresh rate didn't change - * we copy the vic and polarities of the old timings - */ + * If scaling is enabled and refresh rate didn't change + * we copy the vic and polarities of the old timings + */ if (!scale || mode_refresh != preferred_refresh) fill_stream_properties_from_drm_display_mode( stream, &mode, &aconnector->base, con_state, NULL, @@ -6075,7 +6137,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket); - if (stream->link->psr_settings.psr_feature_enabled) { + if (stream->link->psr_settings.psr_feature_enabled || stream->link->replay_settings.replay_feature_enabled) { // // should decide stream support vsc sdp colorimetry capability // before building vsc info packet @@ -6347,6 +6409,31 @@ amdgpu_dm_connector_late_register(struct drm_connector *connector) return 0; } +static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector) +{ + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct dc_link *dc_link = aconnector->dc_link; + struct dc_sink *dc_em_sink = aconnector->dc_em_sink; + struct edid *edid; + + if (!connector->edid_override) + return; + + drm_edid_override_connector_update(&aconnector->base); + edid = aconnector->base.edid_blob_ptr->data; + aconnector->edid = edid; + + /* Update emulated (virtual) sink's EDID */ + if (dc_em_sink && dc_link) { + memset(&dc_em_sink->edid_caps, 0, sizeof(struct dc_edid_caps)); + memmove(dc_em_sink->dc_edid.raw_edid, edid, (edid->extensions + 1) * EDID_LENGTH); + dm_helpers_parse_edid_caps( + dc_link, + &dc_em_sink->dc_edid, + &dc_em_sink->edid_caps); + } +} + static const struct drm_connector_funcs amdgpu_dm_connector_funcs = { .reset = amdgpu_dm_connector_funcs_reset, .detect = amdgpu_dm_connector_detect, @@ -6357,7 +6444,8 @@ static const struct drm_connector_funcs amdgpu_dm_connector_funcs = { .atomic_set_property = amdgpu_dm_connector_atomic_set_property, .atomic_get_property = amdgpu_dm_connector_atomic_get_property, .late_register = amdgpu_dm_connector_late_register, - .early_unregister = amdgpu_dm_connector_unregister + .early_unregister = amdgpu_dm_connector_unregister, + .force = amdgpu_dm_connector_funcs_force }; static int get_modes(struct drm_connector *connector) @@ -6374,11 +6462,19 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector) struct edid *edid; if (!aconnector->base.edid_blob_ptr) { - DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n", - aconnector->base.name); + /* if connector->edid_override valid, pass + * it to edid_override to edid_blob_ptr + */ - aconnector->base.force = DRM_FORCE_OFF; - return; + drm_edid_override_connector_update(&aconnector->base); + + if (!aconnector->base.edid_blob_ptr) { + DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n", + aconnector->base.name); + + aconnector->base.force = DRM_FORCE_OFF; + return; + } } edid = (struct edid *) aconnector->base.edid_blob_ptr->data; @@ -6563,7 +6659,11 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec goto fail; } - stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL); + drm_mode_set_crtcinfo(mode, 0); + + stream = create_validate_stream_for_sink(aconnector, mode, + to_dm_connector_state(connector->state), + NULL); if (stream) { dc_stream_release(stream); result = MODE_OK; @@ -6657,6 +6757,14 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn, if (!crtc) return 0; + if (new_con_state->colorspace != old_con_state->colorspace) { + new_crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(new_crtc_state)) + return PTR_ERR(new_crtc_state); + + new_crtc_state->mode_changed = true; + } + if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) { struct dc_info_packet hdr_infopacket; @@ -6679,7 +6787,7 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn, * set is permissible, however. So only force a * modeset if we're entering or exiting HDR. */ - new_crtc_state->mode_changed = + new_crtc_state->mode_changed = new_crtc_state->mode_changed || !old_con_state->hdr_output_metadata || !new_con_state->hdr_output_metadata; } @@ -6760,6 +6868,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, if (!state->duplicated) { int max_bpc = conn_state->max_requested_bpc; + is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) && aconnector->force_yuv420_output; color_depth = convert_color_depth_from_display_info(connector, @@ -7078,7 +7187,7 @@ static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector, { struct drm_display_mode *m; - list_for_each_entry (m, &aconnector->base.probed_modes, head) { + list_for_each_entry(m, &aconnector->base.probed_modes, head) { if (drm_mode_equal(m, mode)) return true; } @@ -7168,7 +7277,7 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); - if (!(amdgpu_freesync_vid_mode && edid)) + if (!edid) return; if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) @@ -7196,13 +7305,7 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) drm_add_modes_noedid(connector, 1920, 1080); } else { amdgpu_dm_connector_ddc_get_modes(connector, edid); - /* most eDP supports only timings from its edid, - * usually only detailed timings are available - * from eDP edid. timings which are not from edid - * may damage eDP - */ - if (connector->connector_type != DRM_MODE_CONNECTOR_eDP) - amdgpu_dm_connector_add_common_modes(encoder, connector); + amdgpu_dm_connector_add_common_modes(encoder, connector); amdgpu_dm_connector_add_freesync_modes(connector, edid); } amdgpu_dm_fbc_init(connector); @@ -7210,6 +7313,12 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) return amdgpu_dm_connector->num_modes; } +static const u32 supported_colorspaces = + BIT(DRM_MODE_COLORIMETRY_BT709_YCC) | + BIT(DRM_MODE_COLORIMETRY_OPRGB) | + BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) | + BIT(DRM_MODE_COLORIMETRY_BT2020_YCC); + void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, struct amdgpu_dm_connector *aconnector, int connector_type, @@ -7238,6 +7347,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE; memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info)); mutex_init(&aconnector->hpd_lock); + mutex_init(&aconnector->handle_mst_msg_ready); /* * configure support HPD hot plug connector_>polled default value is 0 @@ -7290,6 +7400,15 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, adev->mode_info.abm_level_property, 0); } + if (connector_type == DRM_MODE_CONNECTOR_HDMIA) { + if (!drm_mode_create_hdmi_colorspace_property(&aconnector->base, supported_colorspaces)) + drm_connector_attach_colorspace_property(&aconnector->base); + } else if ((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !aconnector->mst_root) || + connector_type == DRM_MODE_CONNECTOR_eDP) { + if (!drm_mode_create_dp_colorspace_property(&aconnector->base, supported_colorspaces)) + drm_connector_attach_colorspace_property(&aconnector->base); + } + if (connector_type == DRM_MODE_CONNECTOR_HDMIA || connector_type == DRM_MODE_CONNECTOR_DisplayPort || connector_type == DRM_MODE_CONNECTOR_eDP) { @@ -7388,7 +7507,6 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, link->priv = aconnector; - DRM_DEBUG_DRIVER("%s()\n", __func__); i2c = create_i2c(link->ddc, link->link_index, &res); if (!i2c) { @@ -7761,7 +7879,7 @@ static void update_freesync_state_on_stream( aconn = (struct amdgpu_dm_connector *)new_stream->dm_stream_context; - if (aconn && aconn->as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) { + if (aconn && (aconn->as_type == FREESYNC_TYPE_PCON_IN_WHITELIST || aconn->vsdb_info.replay_mode)) { pack_sdp_v1_3 = aconn->pack_sdp_v1_3; if (aconn->vsdb_info.amd_vsdb_version == 1) @@ -7916,7 +8034,6 @@ static inline uint32_t get_mem_type(struct drm_framebuffer *fb) } static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, - struct dc_state *dc_state, struct drm_device *dev, struct amdgpu_display_manager *dm, struct drm_crtc *pcrtc, @@ -8028,7 +8145,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->surface_updates[planes_count].plane_info = &bundle->plane_infos[planes_count]; - if (acrtc_state->stream->link->psr_settings.psr_feature_enabled) { + if (acrtc_state->stream->link->psr_settings.psr_feature_enabled || + acrtc_state->stream->link->replay_settings.replay_feature_enabled) { fill_dc_dirty_rects(plane, old_plane_state, new_plane_state, new_crtc_state, &bundle->flip_addrs[planes_count], @@ -8059,7 +8177,17 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * Only allow immediate flips for fast updates that don't * change memory domain, FB pitch, DCC state, rotation or * mirroring. + * + * dm_crtc_helper_atomic_check() only accepts async flips with + * fast updates. */ + if (crtc->state->async_flip && + (acrtc_state->update_type != UPDATE_TYPE_FAST || + get_mem_type(old_plane_state->fb) != get_mem_type(fb))) + drm_warn_once(state->dev, + "[PLANE:%d:%s] async flip with non-fast update\n", + plane->base.id, plane->name); + bundle->flip_addrs[planes_count].flip_immediate = crtc->state->async_flip && acrtc_state->update_type == UPDATE_TYPE_FAST && @@ -8102,8 +8230,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * DRI3/Present extension with defined target_msc. */ last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc); - } - else { + } else { /* For variable refresh rate mode only: * Get vblank of last completed flip to avoid > 1 vrr * flips per video frame by use of throttling, but allow @@ -8389,55 +8516,20 @@ static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_stat stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state); } -/** - * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation. - * @state: The atomic state to commit - * - * This will tell DC to commit the constructed DC state from atomic_check, - * programming the hardware. Any failures here implies a hardware failure, since - * atomic check should have filtered anything non-kosher. - */ -static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) +static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, + struct dc_state *dc_state) { struct drm_device *dev = state->dev; struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_display_manager *dm = &adev->dm; - struct dm_atomic_state *dm_state; - struct dc_state *dc_state = NULL, *dc_state_temp = NULL; - u32 i, j; struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state, *new_crtc_state; - unsigned long flags; - bool wait_for_vblank = true; - struct drm_connector *connector; - struct drm_connector_state *old_con_state, *new_con_state; struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; - int crtc_disable_count = 0; bool mode_set_reset_required = false; - int r; - - trace_amdgpu_dm_atomic_commit_tail_begin(state); - - r = drm_atomic_helper_wait_for_fences(dev, state, false); - if (unlikely(r)) - DRM_ERROR("Waiting for fences timed out!"); - - drm_atomic_helper_update_legacy_modeset_state(dev, state); - drm_dp_mst_atomic_wait_for_dependencies(state); - - dm_state = dm_atomic_get_new_state(state); - if (dm_state && dm_state->context) { - dc_state = dm_state->context; - } else { - /* No state changes, retain current state. */ - dc_state_temp = dc_create_state(dm->dc); - ASSERT(dc_state_temp); - dc_state = dc_state_temp; - dc_resource_state_copy_construct_current(dm->dc, dc_state); - } + u32 i; - for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state, - new_crtc_state, i) { + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); @@ -8460,9 +8552,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); drm_dbg_state(state->dev, - "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, " - "planes_changed:%d, mode_changed:%d,active_changed:%d," - "connectors_changed:%d\n", + "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n", acrtc->crtc_id, new_crtc_state->enable, new_crtc_state->active, @@ -8535,24 +8625,22 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) } } /* for_each_crtc_in_state() */ - if (dc_state) { - /* if there mode set or reset, disable eDP PSR */ - if (mode_set_reset_required) { - if (dm->vblank_control_workqueue) - flush_workqueue(dm->vblank_control_workqueue); + /* if there mode set or reset, disable eDP PSR */ + if (mode_set_reset_required) { + if (dm->vblank_control_workqueue) + flush_workqueue(dm->vblank_control_workqueue); - amdgpu_dm_psr_disable_all(dm); - } + amdgpu_dm_psr_disable_all(dm); + } - dm_enable_per_frame_crtc_master_sync(dc_state); - mutex_lock(&dm->dc_lock); - WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count)); + dm_enable_per_frame_crtc_master_sync(dc_state); + mutex_lock(&dm->dc_lock); + WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count)); - /* Allow idle optimization when vblank count is 0 for display off */ - if (dm->active_vblank_irq_count == 0) - dc_allow_idle_optimizations(dm->dc, true); - mutex_unlock(&dm->dc_lock); - } + /* Allow idle optimization when vblank count is 0 for display off */ + if (dm->active_vblank_irq_count == 0) + dc_allow_idle_optimizations(dm->dc, true); + mutex_unlock(&dm->dc_lock); for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); @@ -8572,6 +8660,44 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) acrtc->otg_inst = status->primary_otg_inst; } } +} + +/** + * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation. + * @state: The atomic state to commit + * + * This will tell DC to commit the constructed DC state from atomic_check, + * programming the hardware. Any failures here implies a hardware failure, since + * atomic check should have filtered anything non-kosher. + */ +static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) +{ + struct drm_device *dev = state->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_display_manager *dm = &adev->dm; + struct dm_atomic_state *dm_state; + struct dc_state *dc_state = NULL; + u32 i, j; + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + unsigned long flags; + bool wait_for_vblank = true; + struct drm_connector *connector; + struct drm_connector_state *old_con_state, *new_con_state; + struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; + int crtc_disable_count = 0; + + trace_amdgpu_dm_atomic_commit_tail_begin(state); + + drm_atomic_helper_update_legacy_modeset_state(dev, state); + drm_dp_mst_atomic_wait_for_dependencies(state); + + dm_state = dm_atomic_get_new_state(state); + if (dm_state && dm_state->context) { + dc_state = dm_state->context; + amdgpu_dm_commit_streams(state, dc_state); + } + for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); @@ -8694,13 +8820,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); - struct dc_surface_update dummy_updates[MAX_SURFACES]; + struct dc_surface_update *dummy_updates; struct dc_stream_update stream_update; struct dc_info_packet hdr_packet; struct dc_stream_status *status = NULL; bool abm_changed, hdr_changed, scaling_changed; - memset(&dummy_updates, 0, sizeof(dummy_updates)); memset(&stream_update, 0, sizeof(stream_update)); if (acrtc) { @@ -8759,6 +8884,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) * Here we create an empty update on each plane. * To fix this, DC should permit updating only stream properties. */ + dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC); for (j = 0; j < status->plane_count; j++) dummy_updates[j].surface = status->plane_states[0]; @@ -8770,6 +8896,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) dm_new_crtc_state->stream, &stream_update); mutex_unlock(&dm->dc_lock); + kfree(dummy_updates); } /** @@ -8848,8 +8975,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); if (dm_new_crtc_state->stream) - amdgpu_dm_commit_planes(state, dc_state, dev, - dm, crtc, wait_for_vblank); + amdgpu_dm_commit_planes(state, dev, dm, crtc, wait_for_vblank); } /* Update audio instances for each connector. */ @@ -8884,10 +9010,17 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) drm_atomic_helper_cleanup_planes(dev, state); - /* return the stolen vga memory back to VRAM */ - if (!adev->mman.keep_stolen_vga_memory) - amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL); - amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL); + /* Don't free the memory if we are hitting this as part of suspend. + * This way we don't free any memory during suspend; see + * amdgpu_bo_free_kernel(). The memory will be freed in the first + * non-suspend modeset or when the driver is torn down. + */ + if (!adev->in_suspend) { + /* return the stolen vga memory back to VRAM */ + if (!adev->mman.keep_stolen_vga_memory) + amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL); + amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL); + } /* * Finally, drop a runtime PM reference for each newly disabled CRTC, @@ -8897,9 +9030,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) for (i = 0; i < crtc_disable_count; i++) pm_runtime_put_autosuspend(dev->dev); pm_runtime_mark_last_busy(dev->dev); - - if (dc_state_temp) - dc_release_state(dc_state_temp); } static int dm_force_atomic_commit(struct drm_connector *connector) @@ -9031,8 +9161,8 @@ static int do_aquire_global_lock(struct drm_device *dev, &commit->flip_done, 10*HZ); if (ret == 0) - DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done " - "timed out\n", crtc->base.id, crtc->name); + DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n", + crtc->base.id, crtc->name); drm_crtc_commit_put(commit); } @@ -9117,7 +9247,8 @@ is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, return false; } -static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) { +static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) +{ u64 num, den, res; struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base; @@ -9221,8 +9352,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, * TODO: Refactor this function to allow this check to work * in all conditions. */ - if (amdgpu_freesync_vid_mode && - dm_new_crtc_state->stream && + if (dm_new_crtc_state->stream && is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) goto skip_modeset; @@ -9240,9 +9370,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, goto skip_modeset; drm_dbg_state(state->dev, - "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, " - "planes_changed:%d, mode_changed:%d,active_changed:%d," - "connectors_changed:%d\n", + "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n", acrtc->crtc_id, new_crtc_state->enable, new_crtc_state->active, @@ -9264,27 +9392,27 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, } /* Now check if we should set freesync video mode */ - if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream && + if (dm_new_crtc_state->stream && + dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && + dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) && is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) { new_crtc_state->mode_changed = false; DRM_DEBUG_DRIVER( - "Mode change not required for front porch change, " - "setting mode_changed to %d", + "Mode change not required for front porch change, setting mode_changed to %d", new_crtc_state->mode_changed); set_freesync_fixed_config(dm_new_crtc_state); goto skip_modeset; - } else if (amdgpu_freesync_vid_mode && aconnector && + } else if (aconnector && is_freesync_video_mode(&new_crtc_state->mode, aconnector)) { struct drm_display_mode *high_mode; high_mode = get_highest_refresh_rate_mode(aconnector, false); - if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) { + if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) set_freesync_fixed_config(dm_new_crtc_state); - } } ret = dm_atomic_get_state(state, &dm_state); @@ -9452,6 +9580,7 @@ static bool should_reset_plane(struct drm_atomic_state *state, */ for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) { struct amdgpu_framebuffer *old_afb, *new_afb; + if (other->type == DRM_PLANE_TYPE_CURSOR) continue; @@ -9550,11 +9679,12 @@ static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc, } /* Core DRM takes care of checking FB modifiers, so we only need to - * check tiling flags when the FB doesn't have a modifier. */ + * check tiling flags when the FB doesn't have a modifier. + */ if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) { if (adev->family < AMDGPU_FAMILY_AI) { linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 && - AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 && + AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 && AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0; } else { linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0; @@ -9688,8 +9818,8 @@ static int dm_update_plane_state(struct dc *dc, if (plane->type == DRM_PLANE_TYPE_OVERLAY) { if (is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay) return -EINVAL; - else - *is_top_most_overlay = false; + + *is_top_most_overlay = false; } DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n", @@ -9776,12 +9906,12 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state, /* On DCE and DCN there is no dedicated hardware cursor plane. We get a * cursor per pipe but it's going to inherit the scaling and * positioning from the underlying pipe. Check the cursor plane's - * blending properties match the underlying planes'. */ + * blending properties match the underlying planes'. + */ new_cursor_state = drm_atomic_get_new_plane_state(state, cursor); - if (!new_cursor_state || !new_cursor_state->fb) { + if (!new_cursor_state || !new_cursor_state->fb) return 0; - } dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h); cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w; @@ -9826,6 +9956,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm struct drm_connector_state *conn_state, *old_conn_state; struct amdgpu_dm_connector *aconnector = NULL; int i; + for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) { if (!conn_state->crtc) conn_state = old_conn_state; @@ -10017,6 +10148,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, /* Remove exiting planes if they are modified */ for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { + if (old_plane_state->fb && new_plane_state->fb && + get_mem_type(old_plane_state->fb) != + get_mem_type(new_plane_state->fb)) + lock_and_validation_needed = true; + ret = dm_update_plane_state(dc, state, plane, old_plane_state, new_plane_state, @@ -10260,13 +10396,24 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, } /* Store the overall update type for use later in atomic check. */ - for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) { + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + /* + * Only allow async flips for fast updates that don't change + * the FB pitch, the DCC state, rotation, etc. + */ + if (new_crtc_state->async_flip && lock_and_validation_needed) { + drm_dbg_atomic(crtc->dev, + "[CRTC:%d:%s] async flips are only supported for fast updates\n", + crtc->base.id, crtc->name); + ret = -EINVAL; + goto fail; + } + dm_new_crtc_state->update_type = lock_and_validation_needed ? - UPDATE_TYPE_FULL : - UPDATE_TYPE_FAST; + UPDATE_TYPE_FULL : UPDATE_TYPE_FAST; } /* Must be success */ @@ -10282,7 +10429,7 @@ fail: else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS) DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n"); else - DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret); + DRM_DEBUG_DRIVER("Atomic check failed with err: %d\n", ret); trace_amdgpu_dm_atomic_check_finish(state, ret); @@ -10336,7 +10483,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, input->cea_total_length = total_length; memcpy(input->payload, data, length); - res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd); + res = dm_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY); if (!res) { DRM_ERROR("EDID CEA parser failed\n"); return false; @@ -10438,6 +10585,41 @@ static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector, return ret; } +static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector, + struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) +{ + u8 *edid_ext = NULL; + int i; + int j = 0; + + if (edid == NULL || edid->extensions == 0) + return -ENODEV; + + /* Find DisplayID extension */ + for (i = 0; i < edid->extensions; i++) { + edid_ext = (void *)(edid + (i + 1)); + if (edid_ext[0] == DISPLAYID_EXT) + break; + } + + while (j < EDID_LENGTH) { + struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j]; + unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]); + + if (ieeeId == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID && + amd_vsdb->version == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3) { + vsdb_info->replay_mode = (amd_vsdb->feature_caps & AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE) ? true : false; + vsdb_info->amd_vsdb_version = HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3; + DRM_DEBUG_KMS("Panel supports Replay Mode: %d\n", vsdb_info->replay_mode); + + return true; + } + j++; + } + + return false; +} + static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) { @@ -10573,6 +10755,14 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, freesync_capable = true; } } + parse_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); + + if (vsdb_info.replay_mode) { + amdgpu_dm_connector->vsdb_info.replay_mode = vsdb_info.replay_mode; + amdgpu_dm_connector->vsdb_info.amd_vsdb_version = vsdb_info.amd_vsdb_version; + amdgpu_dm_connector->as_type = ADAPTIVE_SYNC_TYPE_EDP; + } + } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); if (i >= 0 && vsdb_info.freesync_supported) { @@ -10786,3 +10976,13 @@ bool check_seamless_boot_capability(struct amdgpu_device *adev) return false; } + +bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) +{ + return dc_dmub_srv_cmd_run(ctx->dmub_srv, cmd, wait_type); +} + +bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) +{ + return dc_dmub_srv_cmd_run_list(ctx->dmub_srv, count, cmd, wait_type); +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 2e2413fd73a4..9e4cc5eeda76 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -51,6 +51,9 @@ #define AMDGPU_DMUB_NOTIFICATION_MAX 5 +#define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID 0x00001A +#define AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE 0x40 +#define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3 0x3 /* #include "include/amdgpu_dal_power_if.h" #include "amdgpu_dm_irq.h" @@ -75,6 +78,12 @@ struct dmub_srv; struct dc_plane_state; struct dmub_notification; +struct amd_vsdb_block { + unsigned char ieee_id[3]; + unsigned char version; + unsigned char feature_caps; +}; + struct common_irq_params { struct amdgpu_device *adev; enum dc_irq_source irq_src; @@ -195,6 +204,11 @@ struct hpd_rx_irq_offload_work_queue { */ bool is_handling_link_loss; /** + * @is_handling_mst_msg_rdy_event: Used to prevent inserting mst message + * ready event when we're already handling mst message ready event + */ + bool is_handling_mst_msg_rdy_event; + /** * @aconnector: The aconnector that this work queue is attached to */ struct amdgpu_dm_connector *aconnector; @@ -604,6 +618,11 @@ struct amdgpu_hdmi_vsdb_info { * @max_refresh_rate_hz: FreeSync Maximum Refresh Rate in Hz */ unsigned int max_refresh_rate_hz; + + /** + * @replay_mode: Replay supported + */ + bool replay_mode; }; struct amdgpu_dm_connector { @@ -638,6 +657,8 @@ struct amdgpu_dm_connector { struct drm_dp_mst_port *mst_output_port; struct amdgpu_dm_connector *mst_root; struct drm_dp_aux *dsc_aux; + struct mutex handle_mst_msg_ready; + /* TODO see if we can merge with ddc_bus or make a dm_connector */ struct amdgpu_i2c_adapter *i2c; @@ -661,10 +682,6 @@ struct amdgpu_dm_connector { struct mutex hpd_lock; bool fake_enable; -#ifdef CONFIG_DEBUG_FS - uint32_t debugfs_dpcd_address; - uint32_t debugfs_dpcd_size; -#endif bool force_yuv420_output; struct dsc_preferred_settings dsc_settings; union dp_downstream_port_present mst_downstream_port_present; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index 27711743c22c..52ecfa746b54 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c @@ -83,12 +83,15 @@ const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc, } #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY -static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc) +static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc, struct dc_stream_state *stream) { struct drm_device *drm_dev = crtc->dev; + struct amdgpu_display_manager *dm = &drm_to_adev(drm_dev)->dm; struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + bool was_activated; spin_lock_irq(&drm_dev->event_lock); + was_activated = acrtc->dm_irq_params.window_param.activated; acrtc->dm_irq_params.window_param.x_start = 0; acrtc->dm_irq_params.window_param.y_start = 0; acrtc->dm_irq_params.window_param.x_end = 0; @@ -97,6 +100,14 @@ static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc) acrtc->dm_irq_params.window_param.update_win = false; acrtc->dm_irq_params.window_param.skip_frame_cnt = 0; spin_unlock_irq(&drm_dev->event_lock); + + /* Disable secure_display if it was enabled */ + if (was_activated) { + /* stop ROI update on this crtc */ + flush_work(&dm->secure_display_ctxs[crtc->index].notify_ta_work); + flush_work(&dm->secure_display_ctxs[crtc->index].forward_roi_work); + dc_stream_forward_crc_window(stream, NULL, true); + } } static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work) @@ -112,9 +123,8 @@ static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work) secure_display_ctx = container_of(work, struct secure_display_context, notify_ta_work); crtc = secure_display_ctx->crtc; - if (!crtc) { + if (!crtc) return; - } psp = &drm_to_adev(crtc->dev)->psp; @@ -140,9 +150,8 @@ static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work) ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC); if (!ret) { - if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) { + if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status); - } } mutex_unlock(&psp->securedisplay_context.mutex); @@ -204,9 +213,6 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc, struct dm_crtc_state *dm_crtc_state, enum amdgpu_dm_pipe_crc_source source) { -#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) - int i; -#endif struct amdgpu_device *adev = drm_to_adev(crtc->dev); struct dc_stream_state *stream_state = dm_crtc_state->stream; bool enable = amdgpu_dm_is_valid_crc_source(source); @@ -220,19 +226,6 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc, /* Enable or disable CRTC CRC generation */ if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) { -#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) - /* Disable secure_display if it was enabled */ - if (!enable) { - for (i = 0; i < adev->mode_info.num_crtc; i++) { - if (adev->dm.secure_display_ctxs[i].crtc == crtc) { - /* stop ROI update on this crtc */ - flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work); - flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work); - dc_stream_forward_crc_window(stream_state, NULL, true); - } - } - } -#endif if (!dc_stream_configure_crc(stream_state->ctx->dc, stream_state, NULL, enable, enable)) { ret = -EINVAL; @@ -363,7 +356,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) /* Reset secure_display when we change crc source from debugfs */ - amdgpu_dm_set_crc_window_default(crtc); + amdgpu_dm_set_crc_window_default(crtc, crtc_state->stream); #endif if (amdgpu_dm_crtc_configure_crc_source(crtc, crtc_state, source)) { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h index 935adca6f048..748e80ef40d0 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h @@ -100,7 +100,7 @@ struct secure_display_context *amdgpu_dm_crtc_secure_display_create_contexts( #else #define amdgpu_dm_crc_window_is_activated(x) #define amdgpu_dm_crtc_handle_crc_window_irq(x) -#define amdgpu_dm_crtc_secure_display_create_contexts() +#define amdgpu_dm_crtc_secure_display_create_contexts(x) #endif #endif /* AMD_DAL_DEV_AMDGPU_DM_AMDGPU_DM_CRC_H_ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 440fc0869a34..97b7a0b8a1c2 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -29,6 +29,7 @@ #include "dc.h" #include "amdgpu.h" #include "amdgpu_dm_psr.h" +#include "amdgpu_dm_replay.h" #include "amdgpu_dm_crtc.h" #include "amdgpu_dm_plane.h" #include "amdgpu_dm_trace.h" @@ -123,7 +124,12 @@ static void vblank_control_worker(struct work_struct *work) * fill_dc_dirty_rects(). */ if (vblank_work->stream && vblank_work->stream->link) { - if (vblank_work->enable) { + /* + * Prioritize replay, instead of psr + */ + if (vblank_work->stream->link->replay_settings.replay_feature_enabled) + amdgpu_dm_replay_enable(vblank_work->stream, false); + else if (vblank_work->enable) { if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && vblank_work->stream->link->psr_settings.psr_allow_active) amdgpu_dm_psr_disable(vblank_work->stream); @@ -132,6 +138,7 @@ static void vblank_control_worker(struct work_struct *work) #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY !amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base) && #endif + vblank_work->stream->link->panel_config.psr.disallow_replay && vblank_work->acrtc->dm_irq_params.allow_psr_entry) { amdgpu_dm_psr_enable(vblank_work->stream); } @@ -398,6 +405,18 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, return -EINVAL; } + /* + * Only allow async flips for fast updates that don't change the FB + * pitch, the DCC state, rotation, etc. + */ + if (crtc_state->async_flip && + dm_crtc_state->update_type != UPDATE_TYPE_FAST) { + drm_dbg_atomic(crtc->dev, + "[CRTC:%d:%s] async flips are only supported for fast updates\n", + crtc->base.id, crtc->name); + return -EINVAL; + } + /* In some use cases, like reset, no stream is attached */ if (!dm_crtc_state->stream) return 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 827fcb4fb3b3..7c21e21bcc51 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -336,6 +336,153 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf, return size; } +static bool dp_mst_is_end_device(struct amdgpu_dm_connector *aconnector) +{ + bool is_end_device = false; + struct drm_dp_mst_topology_mgr *mgr = NULL; + struct drm_dp_mst_port *port = NULL; + + if (aconnector->mst_root && aconnector->mst_root->mst_mgr.mst_state) { + mgr = &aconnector->mst_root->mst_mgr; + port = aconnector->mst_output_port; + + drm_modeset_lock(&mgr->base.lock, NULL); + if (port->pdt == DP_PEER_DEVICE_SST_SINK || + port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV) + is_end_device = true; + drm_modeset_unlock(&mgr->base.lock); + } + + return is_end_device; +} + +/* Change MST link setting + * + * valid lane count value: 1, 2, 4 + * valid link rate value: + * 06h = 1.62Gbps per lane + * 0Ah = 2.7Gbps per lane + * 0Ch = 3.24Gbps per lane + * 14h = 5.4Gbps per lane + * 1Eh = 8.1Gbps per lane + * 3E8h = 10.0Gbps per lane + * 546h = 13.5Gbps per lane + * 7D0h = 20.0Gbps per lane + * + * debugfs is located at /sys/kernel/debug/dri/0/DP-x/mst_link_settings + * + * for example, to force to 2 lane, 10.0GHz, + * echo 2 0x3e8 > /sys/kernel/debug/dri/0/DP-x/mst_link_settings + * + * Valid input will trigger hotplug event to get new link setting applied + * Invalid input will trigger training setting reset + * + * The usage can be referred to link_settings entry + * + */ +static ssize_t dp_mst_link_setting(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; + struct dc_link *link = aconnector->dc_link; + struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev); + struct dc *dc = (struct dc *)link->dc; + struct dc_link_settings prefer_link_settings; + char *wr_buf = NULL; + const uint32_t wr_buf_size = 40; + /* 0: lane_count; 1: link_rate */ + int max_param_num = 2; + uint8_t param_nums = 0; + long param[2]; + bool valid_input = true; + + if (!dp_mst_is_end_device(aconnector)) + return -EINVAL; + + if (size == 0) + return -EINVAL; + + wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); + if (!wr_buf) + return -ENOSPC; + + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, + (long *)param, buf, + max_param_num, + ¶m_nums)) { + kfree(wr_buf); + return -EINVAL; + } + + if (param_nums <= 0) { + kfree(wr_buf); + DRM_DEBUG_DRIVER("user data not be read\n"); + return -EINVAL; + } + + switch (param[0]) { + case LANE_COUNT_ONE: + case LANE_COUNT_TWO: + case LANE_COUNT_FOUR: + break; + default: + valid_input = false; + break; + } + + switch (param[1]) { + case LINK_RATE_LOW: + case LINK_RATE_HIGH: + case LINK_RATE_RBR2: + case LINK_RATE_HIGH2: + case LINK_RATE_HIGH3: + case LINK_RATE_UHBR10: + case LINK_RATE_UHBR13_5: + case LINK_RATE_UHBR20: + break; + default: + valid_input = false; + break; + } + + if (!valid_input) { + kfree(wr_buf); + DRM_DEBUG_DRIVER("Invalid Input value No HW will be programmed\n"); + mutex_lock(&adev->dm.dc_lock); + dc_link_set_preferred_training_settings(dc, NULL, NULL, link, false); + mutex_unlock(&adev->dm.dc_lock); + return -EINVAL; + } + + /* save user force lane_count, link_rate to preferred settings + * spread spectrum will not be changed + */ + prefer_link_settings.link_spread = link->cur_link_settings.link_spread; + prefer_link_settings.use_link_rate_set = false; + prefer_link_settings.lane_count = param[0]; + prefer_link_settings.link_rate = param[1]; + + /* skip immediate retrain, and train to new link setting after hotplug event triggered */ + mutex_lock(&adev->dm.dc_lock); + dc_link_set_preferred_training_settings(dc, &prefer_link_settings, NULL, link, true); + mutex_unlock(&adev->dm.dc_lock); + + mutex_lock(&aconnector->base.dev->mode_config.mutex); + aconnector->base.force = DRM_FORCE_OFF; + mutex_unlock(&aconnector->base.dev->mode_config.mutex); + drm_kms_helper_hotplug_event(aconnector->base.dev); + + msleep(100); + + mutex_lock(&aconnector->base.dev->mode_config.mutex); + aconnector->base.force = DRM_FORCE_UNSPECIFIED; + mutex_unlock(&aconnector->base.dev->mode_config.mutex); + drm_kms_helper_hotplug_event(aconnector->base.dev); + + kfree(wr_buf); + return size; +} + /* function: get current DP PHY settings: voltage swing, pre-emphasis, * post-cursor2 (defined by VESA DP specification) * @@ -907,6 +1054,61 @@ unlock: DEFINE_SHOW_ATTRIBUTE(amdgpu_current_bpc); /* + * Returns the current colorspace for the crtc. + * Example usage: cat /sys/kernel/debug/dri/0/crtc-0/amdgpu_current_colorspace + */ +static int amdgpu_current_colorspace_show(struct seq_file *m, void *data) +{ + struct drm_crtc *crtc = m->private; + struct drm_device *dev = crtc->dev; + struct dm_crtc_state *dm_crtc_state = NULL; + int res = -ENODEV; + + mutex_lock(&dev->mode_config.mutex); + drm_modeset_lock(&crtc->mutex, NULL); + if (crtc->state == NULL) + goto unlock; + + dm_crtc_state = to_dm_crtc_state(crtc->state); + if (dm_crtc_state->stream == NULL) + goto unlock; + + switch (dm_crtc_state->stream->output_color_space) { + case COLOR_SPACE_SRGB: + seq_puts(m, "sRGB"); + break; + case COLOR_SPACE_YCBCR601: + case COLOR_SPACE_YCBCR601_LIMITED: + seq_puts(m, "BT601_YCC"); + break; + case COLOR_SPACE_YCBCR709: + case COLOR_SPACE_YCBCR709_LIMITED: + seq_puts(m, "BT709_YCC"); + break; + case COLOR_SPACE_ADOBERGB: + seq_puts(m, "opRGB"); + break; + case COLOR_SPACE_2020_RGB_FULLRANGE: + seq_puts(m, "BT2020_RGB"); + break; + case COLOR_SPACE_2020_YCBCR: + seq_puts(m, "BT2020_YCC"); + break; + default: + goto unlock; + } + res = 0; + +unlock: + drm_modeset_unlock(&crtc->mutex); + mutex_unlock(&dev->mode_config.mutex); + + return res; +} +DEFINE_SHOW_ATTRIBUTE(amdgpu_current_colorspace); + + +/* * Example usage: * Disable dsc passthrough, i.e.,: have dsc decoding at converver, not external RX * echo 1 /sys/kernel/debug/dri/0/DP-1/dsc_disable_passthrough @@ -1039,88 +1241,6 @@ static ssize_t dp_sdp_message_debugfs_write(struct file *f, const char __user *b return write_size; } -static ssize_t dp_dpcd_address_write(struct file *f, const char __user *buf, - size_t size, loff_t *pos) -{ - int r; - struct amdgpu_dm_connector *connector = file_inode(f)->i_private; - - if (size < sizeof(connector->debugfs_dpcd_address)) - return -EINVAL; - - r = copy_from_user(&connector->debugfs_dpcd_address, - buf, sizeof(connector->debugfs_dpcd_address)); - - return size - r; -} - -static ssize_t dp_dpcd_size_write(struct file *f, const char __user *buf, - size_t size, loff_t *pos) -{ - int r; - struct amdgpu_dm_connector *connector = file_inode(f)->i_private; - - if (size < sizeof(connector->debugfs_dpcd_size)) - return -EINVAL; - - r = copy_from_user(&connector->debugfs_dpcd_size, - buf, sizeof(connector->debugfs_dpcd_size)); - - if (connector->debugfs_dpcd_size > 256) - connector->debugfs_dpcd_size = 0; - - return size - r; -} - -static ssize_t dp_dpcd_data_write(struct file *f, const char __user *buf, - size_t size, loff_t *pos) -{ - int r; - char *data; - struct amdgpu_dm_connector *connector = file_inode(f)->i_private; - struct dc_link *link = connector->dc_link; - uint32_t write_size = connector->debugfs_dpcd_size; - - if (!write_size || size < write_size) - return -EINVAL; - - data = kzalloc(write_size, GFP_KERNEL); - if (!data) - return 0; - - r = copy_from_user(data, buf, write_size); - - dm_helpers_dp_write_dpcd(link->ctx, link, - connector->debugfs_dpcd_address, data, write_size - r); - kfree(data); - return write_size - r; -} - -static ssize_t dp_dpcd_data_read(struct file *f, char __user *buf, - size_t size, loff_t *pos) -{ - int r; - char *data; - struct amdgpu_dm_connector *connector = file_inode(f)->i_private; - struct dc_link *link = connector->dc_link; - uint32_t read_size = connector->debugfs_dpcd_size; - - if (!read_size || size < read_size) - return 0; - - data = kzalloc(read_size, GFP_KERNEL); - if (!data) - return 0; - - dm_helpers_dp_read_dpcd(link->ctx, link, - connector->debugfs_dpcd_address, data, read_size); - - r = copy_to_user(buf, data, read_size); - - kfree(data); - return read_size - r; -} - /* function: Read link's DSC & FEC capabilities * * @@ -2682,25 +2802,6 @@ static const struct file_operations sdp_message_fops = { .llseek = default_llseek }; -static const struct file_operations dp_dpcd_address_debugfs_fops = { - .owner = THIS_MODULE, - .write = dp_dpcd_address_write, - .llseek = default_llseek -}; - -static const struct file_operations dp_dpcd_size_debugfs_fops = { - .owner = THIS_MODULE, - .write = dp_dpcd_size_write, - .llseek = default_llseek -}; - -static const struct file_operations dp_dpcd_data_debugfs_fops = { - .owner = THIS_MODULE, - .read = dp_dpcd_data_read, - .write = dp_dpcd_data_write, - .llseek = default_llseek -}; - static const struct file_operations dp_max_bpc_debugfs_fops = { .owner = THIS_MODULE, .read = dp_max_bpc_read, @@ -2714,6 +2815,12 @@ static const struct file_operations dp_dsc_disable_passthrough_debugfs_fops = { .llseek = default_llseek }; +static const struct file_operations dp_mst_link_settings_debugfs_fops = { + .owner = THIS_MODULE, + .write = dp_mst_link_setting, + .llseek = default_llseek +}; + static const struct { char *name; const struct file_operations *fops; @@ -2724,9 +2831,6 @@ static const struct { {"test_pattern", &dp_phy_test_pattern_fops}, {"hdcp_sink_capability", &hdcp_sink_capability_fops}, {"sdp_message", &sdp_message_fops}, - {"aux_dpcd_address", &dp_dpcd_address_debugfs_fops}, - {"aux_dpcd_size", &dp_dpcd_size_debugfs_fops}, - {"aux_dpcd_data", &dp_dpcd_data_debugfs_fops}, {"dsc_clock_en", &dp_dsc_clock_en_debugfs_fops}, {"dsc_slice_width", &dp_dsc_slice_width_debugfs_fops}, {"dsc_slice_height", &dp_dsc_slice_height_debugfs_fops}, @@ -2740,7 +2844,8 @@ static const struct { {"dsc_disable_passthrough", &dp_dsc_disable_passthrough_debugfs_fops}, {"is_mst_connector", &dp_is_mst_connector_fops}, {"mst_progress_status", &dp_mst_progress_status_fops}, - {"is_dpia_link", &is_dpia_link_fops} + {"is_dpia_link", &is_dpia_link_fops}, + {"mst_link_settings", &dp_mst_link_settings_debugfs_fops} }; static const struct { @@ -2809,6 +2914,32 @@ static int psr_read_residency(void *data, u64 *val) return 0; } +/* read allow_edp_hotplug_detection */ +static int allow_edp_hotplug_detection_get(void *data, u64 *val) +{ + struct amdgpu_dm_connector *aconnector = data; + struct drm_connector *connector = &aconnector->base; + struct drm_device *dev = connector->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + + *val = adev->dm.dc->config.allow_edp_hotplug_detection; + + return 0; +} + +/* set allow_edp_hotplug_detection */ +static int allow_edp_hotplug_detection_set(void *data, u64 val) +{ + struct amdgpu_dm_connector *aconnector = data; + struct drm_connector *connector = &aconnector->base; + struct drm_device *dev = connector->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + + adev->dm.dc->config.allow_edp_hotplug_detection = (uint32_t) val; + + return 0; +} + /* * Set dmcub trace event IRQ enable or disable. * Usage to enable dmcub trace event IRQ: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dmcub_trace_event_en @@ -2847,6 +2978,10 @@ DEFINE_DEBUGFS_ATTRIBUTE(psr_fops, psr_get, NULL, "%llu\n"); DEFINE_DEBUGFS_ATTRIBUTE(psr_residency_fops, psr_read_residency, NULL, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(allow_edp_hotplug_detection_fops, + allow_edp_hotplug_detection_get, + allow_edp_hotplug_detection_set, "%llu\n"); + DEFINE_SHOW_ATTRIBUTE(current_backlight); DEFINE_SHOW_ATTRIBUTE(target_backlight); @@ -2887,7 +3022,7 @@ static int edp_ilr_show(struct seq_file *m, void *unused) seq_printf(m, "[%d] %d kHz\n", entry/2, link_rate_in_khz); } } else { - seq_printf(m, "ILR is not supported by this eDP panel.\n"); + seq_puts(m, "ILR is not supported by this eDP panel.\n"); } return 0; @@ -3017,6 +3152,8 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector) &target_backlight_fops); debugfs_create_file("ilr_setting", 0644, dir, connector, &edp_ilr_debugfs_fops); + debugfs_create_file("allow_edp_hotplug_detection", 0644, dir, connector, + &allow_edp_hotplug_detection_fops); } for (i = 0; i < ARRAY_SIZE(connector_debugfs_entries); i++) { @@ -3025,9 +3162,6 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector) connector_debugfs_entries[i].fops); } - connector->debugfs_dpcd_address = 0; - connector->debugfs_dpcd_size = 0; - if (connector->base.connector_type == DRM_MODE_CONNECTOR_HDMIA) { for (i = 0; i < ARRAY_SIZE(hdmi_debugfs_entries); i++) { debugfs_create_file(hdmi_debugfs_entries[i].name, @@ -3246,6 +3380,8 @@ void crtc_debugfs_init(struct drm_crtc *crtc) #endif debugfs_create_file("amdgpu_current_bpc", 0644, crtc->debugfs_entry, crtc, &amdgpu_current_bpc_fops); + debugfs_create_file("amdgpu_current_colorspace", 0644, crtc->debugfs_entry, + crtc, &amdgpu_current_colorspace_fops); } /* diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index 5536d17306d0..20cfc5be21a4 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -39,10 +39,10 @@ static bool lp_write_i2c(void *handle, uint32_t address, const uint8_t *data, uint32_t size) { - struct dc_link *link = handle; struct i2c_payload i2c_payloads[] = {{true, address, size, (void *)data} }; - struct i2c_command cmd = {i2c_payloads, 1, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz}; + struct i2c_command cmd = {i2c_payloads, 1, I2C_COMMAND_ENGINE_HW, + link->dc->caps.i2c_speed_in_khz}; return dm_helpers_submit_i2c(link->ctx, link, &cmd); } @@ -52,8 +52,10 @@ lp_read_i2c(void *handle, uint32_t address, uint8_t offset, uint8_t *data, uint3 { struct dc_link *link = handle; - struct i2c_payload i2c_payloads[] = {{true, address, 1, &offset}, {false, address, size, data} }; - struct i2c_command cmd = {i2c_payloads, 2, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz}; + struct i2c_payload i2c_payloads[] = {{true, address, 1, &offset}, + {false, address, size, data} }; + struct i2c_command cmd = {i2c_payloads, 2, I2C_COMMAND_ENGINE_HW, + link->dc->caps.i2c_speed_in_khz}; return dm_helpers_submit_i2c(link->ctx, link, &cmd); } @@ -76,7 +78,6 @@ lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size) static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint32_t *srm_size) { - struct ta_hdcp_shared_memory *hdcp_cmd; if (!psp->hdcp_context.context.initialized) { @@ -96,13 +97,12 @@ static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint *srm_version = hdcp_cmd->out_msg.hdcp_get_srm.srm_version; *srm_size = hdcp_cmd->out_msg.hdcp_get_srm.srm_buf_size; - return hdcp_cmd->out_msg.hdcp_get_srm.srm_buf; } -static int psp_set_srm(struct psp_context *psp, uint8_t *srm, uint32_t srm_size, uint32_t *srm_version) +static int psp_set_srm(struct psp_context *psp, + u8 *srm, uint32_t srm_size, uint32_t *srm_version) { - struct ta_hdcp_shared_memory *hdcp_cmd; if (!psp->hdcp_context.context.initialized) { @@ -119,7 +119,8 @@ static int psp_set_srm(struct psp_context *psp, uint8_t *srm, uint32_t srm_size, psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); - if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS || hdcp_cmd->out_msg.hdcp_set_srm.valid_signature != 1 || + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS || + hdcp_cmd->out_msg.hdcp_set_srm.valid_signature != 1 || hdcp_cmd->out_msg.hdcp_set_srm.srm_version == PSP_SRM_VERSION_MAX) return -EINVAL; @@ -150,7 +151,6 @@ static void process_output(struct hdcp_workqueue *hdcp_work) static void link_lock(struct hdcp_workqueue *work, bool lock) { - int i = 0; for (i = 0; i < work->max_link; i++) { @@ -160,66 +160,60 @@ static void link_lock(struct hdcp_workqueue *work, bool lock) mutex_unlock(&work[i].mutex); } } + void hdcp_update_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, struct amdgpu_dm_connector *aconnector, - uint8_t content_type, + u8 content_type, bool enable_encryption) { struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; - struct mod_hdcp_display *display = &hdcp_work[link_index].display; - struct mod_hdcp_link *link = &hdcp_work[link_index].link; - struct mod_hdcp_display_query query; + struct mod_hdcp_link_adjustment link_adjust; + struct mod_hdcp_display_adjustment display_adjust; unsigned int conn_index = aconnector->base.index; mutex_lock(&hdcp_w->mutex); hdcp_w->aconnector[conn_index] = aconnector; - query.display = NULL; - mod_hdcp_query_display(&hdcp_w->hdcp, aconnector->base.index, &query); - - if (query.display != NULL) { - memcpy(display, query.display, sizeof(struct mod_hdcp_display)); - mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output); - - hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0; - - if (enable_encryption) { - /* Explicitly set the saved SRM as sysfs call will be after we already enabled hdcp - * (s3 resume case) - */ - if (hdcp_work->srm_size > 0) - psp_set_srm(hdcp_work->hdcp.config.psp.handle, hdcp_work->srm, hdcp_work->srm_size, - &hdcp_work->srm_version); - - display->adjust.disable = MOD_HDCP_DISPLAY_NOT_DISABLE; - if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) { - hdcp_w->link.adjust.hdcp1.disable = 0; - hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0; - } else if (content_type == DRM_MODE_HDCP_CONTENT_TYPE1) { - hdcp_w->link.adjust.hdcp1.disable = 1; - hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_1; - } + memset(&link_adjust, 0, sizeof(link_adjust)); + memset(&display_adjust, 0, sizeof(display_adjust)); - schedule_delayed_work(&hdcp_w->property_validate_dwork, - msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS)); - } else { - display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION; - hdcp_w->encryption_status[conn_index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; - cancel_delayed_work(&hdcp_w->property_validate_dwork); + if (enable_encryption) { + /* Explicitly set the saved SRM as sysfs call will be after we already enabled hdcp + * (s3 resume case) + */ + if (hdcp_work->srm_size > 0) + psp_set_srm(hdcp_work->hdcp.config.psp.handle, hdcp_work->srm, + hdcp_work->srm_size, + &hdcp_work->srm_version); + + display_adjust.disable = MOD_HDCP_DISPLAY_NOT_DISABLE; + + link_adjust.auth_delay = 2; + + if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) { + link_adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0; + } else if (content_type == DRM_MODE_HDCP_CONTENT_TYPE1) { + link_adjust.hdcp1.disable = 1; + link_adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_1; } - display->state = MOD_HDCP_DISPLAY_ACTIVE; + schedule_delayed_work(&hdcp_w->property_validate_dwork, + msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS)); + } else { + display_adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION; + hdcp_w->encryption_status[conn_index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + cancel_delayed_work(&hdcp_w->property_validate_dwork); } - mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output); + mod_hdcp_update_display(&hdcp_w->hdcp, conn_index, &link_adjust, &display_adjust, &hdcp_w->output); process_output(hdcp_w); mutex_unlock(&hdcp_w->mutex); } static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, - unsigned int link_index, + unsigned int link_index, struct amdgpu_dm_connector *aconnector) { struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; @@ -238,7 +232,8 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; DRM_DEBUG_DRIVER("[HDCP_DM] display %d, CP 2 -> 1, type %u, DPMS %u\n", - aconnector->base.index, conn_state->hdcp_content_type, aconnector->base.dpms); + aconnector->base.index, conn_state->hdcp_content_type, + aconnector->base.dpms); } mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output); @@ -246,6 +241,7 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, process_output(hdcp_w); mutex_unlock(&hdcp_w->mutex); } + void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index) { struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; @@ -274,15 +270,12 @@ void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index schedule_work(&hdcp_w->cpirq_work); } - - - static void event_callback(struct work_struct *work) { struct hdcp_workqueue *hdcp_work; hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue, - callback_dwork); + callback_dwork); mutex_lock(&hdcp_work->mutex); @@ -294,13 +287,12 @@ static void event_callback(struct work_struct *work) process_output(hdcp_work); mutex_unlock(&hdcp_work->mutex); - - } static void event_property_update(struct work_struct *work) { - struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue, property_update_work); + struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue, + property_update_work); struct amdgpu_dm_connector *aconnector = NULL; struct drm_device *dev; long ret; @@ -334,11 +326,10 @@ static void event_property_update(struct work_struct *work) mutex_lock(&hdcp_work->mutex); if (conn_state->commit) { - ret = wait_for_completion_interruptible_timeout( - &conn_state->commit->hw_done, 10 * HZ); + ret = wait_for_completion_interruptible_timeout(&conn_state->commit->hw_done, + 10 * HZ); if (ret == 0) { - DRM_ERROR( - "HDCP state unknown! Setting it to DESIRED"); + DRM_ERROR("HDCP state unknown! Setting it to DESIRED\n"); hdcp_work->encryption_status[conn_index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; } @@ -349,24 +340,20 @@ static void event_property_update(struct work_struct *work) DRM_MODE_HDCP_CONTENT_TYPE0 && hdcp_work->encryption_status[conn_index] <= MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON) { - DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_ENABLED\n"); - drm_hdcp_update_content_protection( - connector, - DRM_MODE_CONTENT_PROTECTION_ENABLED); + drm_hdcp_update_content_protection(connector, + DRM_MODE_CONTENT_PROTECTION_ENABLED); } else if (conn_state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE1 && hdcp_work->encryption_status[conn_index] == MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON) { - drm_hdcp_update_content_protection( - connector, - DRM_MODE_CONTENT_PROTECTION_ENABLED); + drm_hdcp_update_content_protection(connector, + DRM_MODE_CONTENT_PROTECTION_ENABLED); } } else { DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_DESIRED\n"); - drm_hdcp_update_content_protection( - connector, DRM_MODE_CONTENT_PROTECTION_DESIRED); - + drm_hdcp_update_content_protection(connector, + DRM_MODE_CONTENT_PROTECTION_DESIRED); } mutex_unlock(&hdcp_work->mutex); drm_modeset_unlock(&dev->mode_config.connection_mutex); @@ -402,7 +389,7 @@ static void event_property_validate(struct work_struct *work) &query); DRM_DEBUG_DRIVER("[HDCP_DM] disp %d, connector->CP %u, (query, work): (%d, %d)\n", - aconnector->base.index, + aconnector->base.index, aconnector->base.state->content_protection, query.encryption_status, hdcp_work->encryption_status[conn_index]); @@ -410,7 +397,8 @@ static void event_property_validate(struct work_struct *work) if (query.encryption_status != hdcp_work->encryption_status[conn_index]) { DRM_DEBUG_DRIVER("[HDCP_DM] encryption_status change from %x to %x\n", - hdcp_work->encryption_status[conn_index], query.encryption_status); + hdcp_work->encryption_status[conn_index], + query.encryption_status); hdcp_work->encryption_status[conn_index] = query.encryption_status; @@ -429,7 +417,7 @@ static void event_watchdog_timer(struct work_struct *work) struct hdcp_workqueue *hdcp_work; hdcp_work = container_of(to_delayed_work(work), - struct hdcp_workqueue, + struct hdcp_workqueue, watchdog_timer_dwork); mutex_lock(&hdcp_work->mutex); @@ -443,7 +431,6 @@ static void event_watchdog_timer(struct work_struct *work) process_output(hdcp_work); mutex_unlock(&hdcp_work->mutex); - } static void event_cpirq(struct work_struct *work) @@ -459,10 +446,8 @@ static void event_cpirq(struct work_struct *work) process_output(hdcp_work); mutex_unlock(&hdcp_work->mutex); - } - void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work) { int i = 0; @@ -478,10 +463,8 @@ void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work) kfree(hdcp_work); } - static bool enable_assr(void *handle, struct dc_link *link) { - struct hdcp_workqueue *hdcp_work = handle; struct mod_hdcp hdcp = hdcp_work->hdcp; struct psp_context *psp = hdcp.config.psp.handle; @@ -499,7 +482,8 @@ static bool enable_assr(void *handle, struct dc_link *link) memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory)); dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE; - dtm_cmd->dtm_in_message.topology_assr_enable.display_topology_dig_be_index = link->link_enc_hw_inst; + dtm_cmd->dtm_in_message.topology_assr_enable.display_topology_dig_be_index = + link->link_enc_hw_inst; dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE; psp_dtm_invoke(psp, dtm_cmd->cmd_id); @@ -521,7 +505,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) int link_index = aconnector->dc_link->link_index; struct mod_hdcp_display *display = &hdcp_work[link_index].display; struct mod_hdcp_link *link = &hdcp_work[link_index].link; - struct drm_connector_state *conn_state; + struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; struct dc_sink *sink = NULL; bool link_is_hdcp14 = false; @@ -541,7 +525,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) else if (aconnector->dc_em_sink) sink = aconnector->dc_em_sink; - if (sink != NULL) + if (sink) link->mode = mod_hdcp_signal_type_to_operation_mode(sink->sink_signal); display->controller = CONTROLLER_ID_D0 + config->otg_inst; @@ -564,19 +548,27 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION; link->adjust.auth_delay = 2; link->adjust.hdcp1.disable = 0; - conn_state = aconnector->base.state; + hdcp_w->encryption_status[display->index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; DRM_DEBUG_DRIVER("[HDCP_DM] display %d, CP %d, type %d\n", aconnector->base.index, - (!!aconnector->base.state) ? aconnector->base.state->content_protection : -1, - (!!aconnector->base.state) ? aconnector->base.state->hdcp_content_type : -1); + (!!aconnector->base.state) ? + aconnector->base.state->content_protection : -1, + (!!aconnector->base.state) ? + aconnector->base.state->hdcp_content_type : -1); - if (conn_state) - hdcp_update_display(hdcp_work, link_index, aconnector, - conn_state->hdcp_content_type, false); -} + mutex_lock(&hdcp_w->mutex); + + mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output); + + process_output(hdcp_w); + mutex_unlock(&hdcp_w->mutex); +} -/* NOTE: From the usermodes prospective you only need to call write *ONCE*, the kernel +/** + * DOC: Add sysfs interface for set/get srm + * + * NOTE: From the usermodes prospective you only need to call write *ONCE*, the kernel * will automatically call once or twice depending on the size * * call: "cat file > /sys/class/drm/card0/device/hdcp_srm" from usermode no matter what the size is @@ -587,23 +579,23 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) * sysfs interface doesn't tell us the size we will get so we are sending partial SRMs to psp and on * the last call we will send the full SRM. PSP will fail on every call before the last. * - * This means we don't know if the SRM is good until the last call. And because of this limitation we - * cannot throw errors early as it will stop the kernel from writing to sysfs + * This means we don't know if the SRM is good until the last call. And because of this + * limitation we cannot throw errors early as it will stop the kernel from writing to sysfs * * Example 1: - * Good SRM size = 5096 - * first call to write 4096 -> PSP fails - * Second call to write 1000 -> PSP Pass -> SRM is set + * Good SRM size = 5096 + * first call to write 4096 -> PSP fails + * Second call to write 1000 -> PSP Pass -> SRM is set * * Example 2: - * Bad SRM size = 4096 - * first call to write 4096 -> PSP fails (This is the same as above, but we don't know if this - * is the last call) + * Bad SRM size = 4096 + * first call to write 4096 -> PSP fails (This is the same as above, but we don't know if this + * is the last call) * * Solution?: - * 1: Parse the SRM? -> It is signed so we don't know the EOF - * 2: We can have another sysfs that passes the size before calling set. -> simpler solution - * below + * 1: Parse the SRM? -> It is signed so we don't know the EOF + * 2: We can have another sysfs that passes the size before calling set. -> simpler solution + * below * * Easy Solution: * Always call get after Set to verify if set was successful. @@ -612,20 +604,21 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) * +----------------------+ * PSP will only update its srm if its older than the one we are trying to load. * Always do set first than get. - * -if we try to "1. SET" a older version PSP will reject it and we can "2. GET" the newer - * version and save it + * -if we try to "1. SET" a older version PSP will reject it and we can "2. GET" the newer + * version and save it * - * -if we try to "1. SET" a newer version PSP will accept it and we can "2. GET" the - * same(newer) version back and save it + * -if we try to "1. SET" a newer version PSP will accept it and we can "2. GET" the + * same(newer) version back and save it * - * -if we try to "1. SET" a newer version and PSP rejects it. That means the format is - * incorrect/corrupted and we should correct our SRM by getting it from PSP + * -if we try to "1. SET" a newer version and PSP rejects it. That means the format is + * incorrect/corrupted and we should correct our SRM by getting it from PSP */ -static ssize_t srm_data_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, +static ssize_t srm_data_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buffer, loff_t pos, size_t count) { struct hdcp_workqueue *work; - uint32_t srm_version = 0; + u32 srm_version = 0; work = container_of(bin_attr, struct hdcp_workqueue, attr); link_lock(work, true); @@ -639,19 +632,19 @@ static ssize_t srm_data_write(struct file *filp, struct kobject *kobj, struct bi work->srm_version = srm_version; } - link_lock(work, false); return count; } -static ssize_t srm_data_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, +static ssize_t srm_data_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buffer, loff_t pos, size_t count) { struct hdcp_workqueue *work; - uint8_t *srm = NULL; - uint32_t srm_version; - uint32_t srm_size; + u8 *srm = NULL; + u32 srm_version; + u32 srm_size; size_t ret = count; work = container_of(bin_attr, struct hdcp_workqueue, attr); @@ -684,12 +677,12 @@ ret: /* From the hdcp spec (5.Renewability) SRM needs to be stored in a non-volatile memory. * * For example, - * if Application "A" sets the SRM (ver 2) and we reboot/suspend and later when Application "B" - * needs to use HDCP, the version in PSP should be SRM(ver 2). So SRM should be persistent - * across boot/reboots/suspend/resume/shutdown + * if Application "A" sets the SRM (ver 2) and we reboot/suspend and later when Application "B" + * needs to use HDCP, the version in PSP should be SRM(ver 2). So SRM should be persistent + * across boot/reboots/suspend/resume/shutdown * - * Currently when the system goes down (suspend/shutdown) the SRM is cleared from PSP. For HDCP we need - * to make the SRM persistent. + * Currently when the system goes down (suspend/shutdown) the SRM is cleared from PSP. For HDCP + * we need to make the SRM persistent. * * -PSP owns the checking of SRM but doesn't have the ability to store it in a non-volatile memory. * -The kernel cannot write to the file systems. @@ -699,8 +692,8 @@ ret: * * Usermode can read/write to/from PSP using the sysfs interface * For example: - * to save SRM from PSP to storage : cat /sys/class/drm/card0/device/hdcp_srm > srmfile - * to load from storage to PSP: cat srmfile > /sys/class/drm/card0/device/hdcp_srm + * to save SRM from PSP to storage : cat /sys/class/drm/card0/device/hdcp_srm > srmfile + * to load from storage to PSP: cat srmfile > /sys/class/drm/card0/device/hdcp_srm */ static const struct bin_attribute data_attr = { .attr = {.name = "hdcp_srm", .mode = 0664}, @@ -709,10 +702,9 @@ static const struct bin_attribute data_attr = { .read = srm_data_read, }; - -struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct cp_psp *cp_psp, struct dc *dc) +struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, + struct cp_psp *cp_psp, struct dc *dc) { - int max_caps = dc->caps.max_links; struct hdcp_workqueue *hdcp_work; int i = 0; @@ -721,14 +713,16 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct if (ZERO_OR_NULL_PTR(hdcp_work)) return NULL; - hdcp_work->srm = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm), GFP_KERNEL); + hdcp_work->srm = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, + sizeof(*hdcp_work->srm), GFP_KERNEL); - if (hdcp_work->srm == NULL) + if (!hdcp_work->srm) goto fail_alloc_context; - hdcp_work->srm_temp = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm_temp), GFP_KERNEL); + hdcp_work->srm_temp = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, + sizeof(*hdcp_work->srm_temp), GFP_KERNEL); - if (hdcp_work->srm_temp == NULL) + if (!hdcp_work->srm_temp) goto fail_alloc_context; hdcp_work->max_link = max_caps; @@ -781,10 +775,5 @@ fail_alloc_context: kfree(hdcp_work); return NULL; - - - } - - diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index c6ce2b7123b7..4b230933b28e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -44,18 +44,39 @@ #include "dm_helpers.h" #include "ddc_service_types.h" -/* MST Dock */ -static const uint8_t SYNAPTICS_DEVICE_ID[] = "SYNA"; +static u32 edid_extract_panel_id(struct edid *edid) +{ + return (u32)edid->mfg_id[0] << 24 | + (u32)edid->mfg_id[1] << 16 | + (u32)EDID_PRODUCT_ID(edid); +} -/* dm_helpers_parse_edid_caps - * - * Parse edid caps +static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps) +{ + uint32_t panel_id = edid_extract_panel_id(edid); + + switch (panel_id) { + /* Workaround for some monitors which does not work well with FAMS */ + case drm_edid_encode_panel_id('S', 'A', 'M', 0x0E5E): + case drm_edid_encode_panel_id('S', 'A', 'M', 0x7053): + case drm_edid_encode_panel_id('S', 'A', 'M', 0x71AC): + DRM_DEBUG_DRIVER("Disabling FAMS on monitor with panel id %X\n", panel_id); + edid_caps->panel_patch.disable_fams = true; + break; + default: + return; + } +} + +/** + * dm_helpers_parse_edid_caps() - Parse edid caps * + * @link: current detected link * @edid: [in] pointer to edid - * edid_caps: [in] pointer to edid caps - * @return - * void - * */ + * @edid_caps: [in] pointer to edid caps + * + * Return: void + */ enum dc_edid_status dm_helpers_parse_edid_caps( struct dc_link *link, const struct dc_edid *edid, @@ -96,7 +117,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps( if (sad_count <= 0) return result; - edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT; + edid_caps->audio_mode_count = min(sad_count, DC_MAX_AUDIO_DESC_COUNT); for (i = 0; i < edid_caps->audio_mode_count; ++i) { struct cea_sad *sad = &sads[i]; @@ -118,6 +139,8 @@ enum dc_edid_status dm_helpers_parse_edid_caps( else edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION; + apply_edid_quirks(edid_buf, edid_caps); + kfree(sads); kfree(sadb); @@ -232,7 +255,8 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( /* Accessing the connector state is required for vcpi_slots allocation * and directly relies on behaviour in commit check * that blocks before commit guaranteeing that the state - * is not gonna be swapped while still in use in commit tail */ + * is not gonna be swapped while still in use in commit tail + */ if (!aconnector || !aconnector->mst_root) return false; @@ -259,7 +283,8 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or * AUX message. The sequence is slot 1-63 allocated sequence for each * stream. AMD ASIC stream slot allocation should follow the same - * sequence. copy DRM MST allocation to dc */ + * sequence. copy DRM MST allocation to dc + */ fill_dc_mst_payload_table_from_drm(stream->link, enable, target_payload, proposed_table); return true; @@ -403,7 +428,7 @@ void dm_dtn_log_append_v(struct dc_context *ctx, total = log_ctx->pos + n + 1; if (total > log_ctx->size) { - char *buf = (char *)kvcalloc(total, sizeof(char), GFP_KERNEL); + char *buf = kvcalloc(total, sizeof(char), GFP_KERNEL); if (buf) { memcpy(buf, log_ctx->buf, log_ctx->pos); @@ -610,7 +635,7 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux, ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd)); if (ret < 0) { - DRM_ERROR(" execute_synaptics_rc_command - write cmd ..., err = %d\n", ret); + DRM_ERROR("%s: write cmd ..., err = %d\n", __func__, ret); return false; } @@ -632,7 +657,7 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux, drm_dp_dpcd_read(aux, SYNAPTICS_RC_DATA, data, length); } - DC_LOG_DC(" execute_synaptics_rc_command - success = %d\n", success); + DC_LOG_DC("%s: success = %d\n", __func__, success); return success; } @@ -641,7 +666,7 @@ static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux) { unsigned char data[16] = {0}; - DC_LOG_DC("Start apply_synaptics_fifo_reset_wa\n"); + DC_LOG_DC("Start %s\n", __func__); // Step 2 data[0] = 'P'; @@ -699,9 +724,12 @@ static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux) if (!execute_synaptics_rc_command(aux, true, 0x02, 0, 0, NULL)) return; - DC_LOG_DC("Done apply_synaptics_fifo_reset_wa\n"); + DC_LOG_DC("Done %s\n", __func__); } +/* MST Dock */ +static const uint8_t SYNAPTICS_DEVICE_ID[] = "SYNA"; + static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst( struct drm_dp_aux *aux, const struct dc_stream_state *stream, @@ -885,10 +913,34 @@ enum dc_edid_status dm_helpers_read_local_edid( DRM_ERROR("EDID err: %d, on connector: %s", edid_status, aconnector->base.name); + if (link->aux_mode) { + union test_request test_request = {0}; + union test_response test_response = {0}; - /* DP Compliance Test 4.2.2.3 */ - if (link->aux_mode) - drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, sink->dc_edid.raw_edid[sink->dc_edid.length-1]); + dm_helpers_dp_read_dpcd(ctx, + link, + DP_TEST_REQUEST, + &test_request.raw, + sizeof(union test_request)); + + if (!test_request.bits.EDID_READ) + return edid_status; + + test_response.bits.EDID_CHECKSUM_WRITE = 1; + + dm_helpers_dp_write_dpcd(ctx, + link, + DP_TEST_EDID_CHECKSUM, + &sink->dc_edid.raw_edid[sink->dc_edid.length-1], + 1); + + dm_helpers_dp_write_dpcd(ctx, + link, + DP_TEST_RESPONSE, + &test_response.raw, + sizeof(test_response)); + + } return edid_status; } @@ -945,9 +997,8 @@ void dm_helpers_override_panel_settings( struct dc_panel_config *panel_config) { // Feature DSC - if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) { + if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) panel_config->dsc.disable_dsc_edp = true; - } } void *dm_helpers_allocate_gpu_mem( diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index 19f543ba7205..51467f132c26 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -120,7 +120,8 @@ static void dm_irq_work_func(struct work_struct *work) /* Call a DAL subcomponent which registered for interrupt notification * at INTERRUPT_LOW_IRQ_CONTEXT. - * (The most common use is HPD interrupt) */ + * (The most common use is HPD interrupt) + */ } /* @@ -172,7 +173,8 @@ static struct list_head *remove_irq_handler(struct amdgpu_device *adev, if (handler_removed == false) { /* Not necessarily an error - caller may not - * know the context. */ + * know the context. + */ return NULL; } @@ -261,7 +263,7 @@ validate_irq_registration_params(struct dc_interrupt_params *int_params, static bool validate_irq_unregistration_params(enum dc_irq_source irq_source, irq_handler_idx handler_idx) { - if (DAL_INVALID_IRQ_HANDLER_IDX == handler_idx) { + if (handler_idx == DAL_INVALID_IRQ_HANDLER_IDX) { DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n"); return false; } @@ -343,7 +345,8 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev, /* This pointer will be stored by code which requested interrupt * registration. * The same pointer will be needed in order to unregister the - * interrupt. */ + * interrupt. + */ DRM_DEBUG_KMS( "DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n", @@ -390,7 +393,8 @@ void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev, if (handler_list == NULL) { /* If we got here, it means we searched all irq contexts - * for this irq source, but the handler was not found. */ + * for this irq source, but the handler was not found. + */ DRM_ERROR( "DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n", ih, irq_source); @@ -450,7 +454,8 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev) DM_IRQ_TABLE_LOCK(adev, irq_table_flags); /* The handler was removed from the table, * it means it is safe to flush all the 'work' - * (because no code can schedule a new one). */ + * (because no code can schedule a new one). + */ lh = &adev->dm.irq_handler_list_low_tab[src]; DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); @@ -494,7 +499,7 @@ int amdgpu_dm_irq_suspend(struct amdgpu_device *adev) DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); if (!list_empty(hnd_list_l)) { - list_for_each_safe (entry, tmp, hnd_list_l) { + list_for_each_safe(entry, tmp, hnd_list_l) { handler = list_entry( entry, struct amdgpu_dm_irq_handler_data, @@ -571,7 +576,7 @@ static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev, if (list_empty(handler_list)) return; - list_for_each_entry (handler_data, handler_list, list) { + list_for_each_entry(handler_data, handler_list, list) { if (queue_work(system_highpri_wq, &handler_data->work)) { work_queued = true; break; @@ -627,7 +632,8 @@ static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev, &adev->dm.irq_handler_list_high_tab[irq_source], list) { /* Call a subcomponent which registered for immediate - * interrupt notification */ + * interrupt notification + */ handler_data->handler(handler_data->handler_arg); } @@ -664,7 +670,7 @@ static int amdgpu_dm_irq_handler(struct amdgpu_device *adev, return 0; } -static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type) +static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned int type) { switch (type) { case AMDGPU_HPD_1: @@ -686,7 +692,7 @@ static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type) static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, - unsigned type, + unsigned int type, enum amdgpu_interrupt_state state) { enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type); @@ -698,7 +704,7 @@ static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev, static inline int dm_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, - unsigned crtc_id, + unsigned int crtc_id, enum amdgpu_interrupt_state state, const enum irq_type dal_irq_type, const char *func) @@ -729,7 +735,7 @@ static inline int dm_irq_state(struct amdgpu_device *adev, static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, - unsigned crtc_id, + unsigned int crtc_id, enum amdgpu_interrupt_state state) { return dm_irq_state( @@ -743,7 +749,7 @@ static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev, static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, - unsigned crtc_id, + unsigned int crtc_id, enum amdgpu_interrupt_state state) { return dm_irq_state( @@ -893,13 +899,13 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) const struct dc_link *dc_link = amdgpu_dm_connector->dc_link; - if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) { + if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd, true); } - if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) { + if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) { dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd_rx, true); @@ -928,13 +934,13 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) to_amdgpu_dm_connector(connector); const struct dc_link *dc_link = amdgpu_dm_connector->dc_link; - if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) { + if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd, false); } - if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) { + if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) { dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd_rx, false); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 810ab682f424..57230661132b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -45,8 +45,7 @@ #endif #include "dc/dcn20/dcn20_resource.h" -bool is_timing_changed(struct dc_stream_state *cur_stream, - struct dc_stream_state *new_stream); + #define PEAK_FACTOR_X1000 1006 static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, @@ -297,6 +296,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) if (!aconnector->edid) { struct edid *edid; + edid = drm_dp_mst_get_edid(connector, &aconnector->mst_root->mst_mgr, aconnector->mst_output_port); if (!edid) { @@ -620,8 +620,118 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, return connector; } +void dm_handle_mst_sideband_msg_ready_event( + struct drm_dp_mst_topology_mgr *mgr, + enum mst_msg_ready_type msg_rdy_type) +{ + uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; + uint8_t dret; + bool new_irq_handled = false; + int dpcd_addr; + uint8_t dpcd_bytes_to_read; + const uint8_t max_process_count = 30; + uint8_t process_count = 0; + u8 retry; + struct amdgpu_dm_connector *aconnector = + container_of(mgr, struct amdgpu_dm_connector, mst_mgr); + + + const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link); + + if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) { + dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT; + /* DPCD 0x200 - 0x201 for downstream IRQ */ + dpcd_addr = DP_SINK_COUNT; + } else { + dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI; + /* DPCD 0x2002 - 0x2005 for downstream IRQ */ + dpcd_addr = DP_SINK_COUNT_ESI; + } + + mutex_lock(&aconnector->handle_mst_msg_ready); + + while (process_count < max_process_count) { + u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {}; + + process_count++; + + dret = drm_dp_dpcd_read( + &aconnector->dm_dp_aux.aux, + dpcd_addr, + esi, + dpcd_bytes_to_read); + + if (dret != dpcd_bytes_to_read) { + DRM_DEBUG_KMS("DPCD read and acked number is not as expected!"); + break; + } + + DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]); + + switch (msg_rdy_type) { + case DOWN_REP_MSG_RDY_EVENT: + /* Only handle DOWN_REP_MSG_RDY case*/ + esi[1] &= DP_DOWN_REP_MSG_RDY; + break; + case UP_REQ_MSG_RDY_EVENT: + /* Only handle UP_REQ_MSG_RDY case*/ + esi[1] &= DP_UP_REQ_MSG_RDY; + break; + default: + /* Handle both cases*/ + esi[1] &= (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY); + break; + } + + if (!esi[1]) + break; + + /* handle MST irq */ + if (aconnector->mst_mgr.mst_state) + drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr, + esi, + ack, + &new_irq_handled); + + if (new_irq_handled) { + /* ACK at DPCD to notify down stream */ + for (retry = 0; retry < 3; retry++) { + ssize_t wret; + + wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux, + dpcd_addr + 1, + ack[1]); + if (wret == 1) + break; + } + + if (retry == 3) { + DRM_ERROR("Failed to ack MST event.\n"); + break; + } + + drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr); + + new_irq_handled = false; + } else { + break; + } + } + + mutex_unlock(&aconnector->handle_mst_msg_ready); + + if (process_count == max_process_count) + DRM_DEBUG_DRIVER("Loop exceeded max iterations\n"); +} + +static void dm_handle_mst_down_rep_msg_ready(struct drm_dp_mst_topology_mgr *mgr) +{ + dm_handle_mst_sideband_msg_ready_event(mgr, DOWN_REP_MSG_RDY_EVENT); +} + static const struct drm_dp_mst_topology_cbs dm_mst_cbs = { .add_connector = dm_dp_add_mst_connector, + .poll_hpd_irq = dm_handle_mst_down_rep_msg_ready, }; void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, @@ -718,6 +828,7 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p &dsc_options, 0, params[i].timing, + dc_link_get_highest_encoding_format(params[i].aconnector->dc_link), ¶ms[i].timing->dsc_cfg)) { params[i].timing->flags.DSC = 1; @@ -768,7 +879,9 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn) param.sink->ctx->dc->res_pool->dscs[0], ¶m.sink->dsc_caps.dsc_dec_caps, &dsc_options, - (int) kbps, param.timing, &dsc_config); + (int) kbps, param.timing, + dc_link_get_highest_encoding_format(param.aconnector->dc_link), + &dsc_config); return dsc_config.bits_per_pixel; } @@ -1006,8 +1119,11 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, dsc_policy.min_target_bpp * 16, dsc_policy.max_target_bpp * 16, &stream->sink->dsc_caps.dsc_dec_caps, - &stream->timing, ¶ms[count].bw_range)) - params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing); + &stream->timing, + dc_link_get_highest_encoding_format(dc_link), + ¶ms[count].bw_range)) + params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing, + dc_link_get_highest_encoding_format(dc_link)); count++; } @@ -1211,7 +1327,7 @@ int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, if (computed_streams[i]) continue; - if (!res_pool->funcs->remove_stream_from_ctx || + if (res_pool->funcs->remove_stream_from_ctx && res_pool->funcs->remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK) return -EINVAL; @@ -1422,7 +1538,7 @@ int pre_validate_dsc(struct drm_atomic_state *state, struct dc_stream_state *stream = dm_state->context->streams[i]; if (local_dc_state->streams[i] && - is_timing_changed(stream, local_dc_state->streams[i])) { + dc_is_timing_changed(stream, local_dc_state->streams[i])) { DRM_INFO_ONCE("crtc[%d] needs mode_changed\n", i); } else { int ind = find_crtc_index_in_state_by_stream(state, stream); @@ -1467,7 +1583,7 @@ static bool is_dsc_common_config_possible(struct dc_stream_state *stream, dsc_policy.min_target_bpp * 16, dsc_policy.max_target_bpp * 16, &stream->sink->dsc_caps.dsc_dec_caps, - &stream->timing, bw_range); + &stream->timing, dc_link_get_highest_encoding_format(stream->link), bw_range); return bw_range->max_target_bpp_x16 && bw_range->min_target_bpp_x16; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h index 1e4ede1e57ab..37c820ab0fdb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h @@ -49,6 +49,13 @@ #define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B 1031 #define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B 1000 +enum mst_msg_ready_type { + NONE_MSG_RDY_EVENT = 0, + DOWN_REP_MSG_RDY_EVENT = 1, + UP_REQ_MSG_RDY_EVENT = 2, + DOWN_OR_UP_MSG_RDY_EVENT = 3 +}; + struct amdgpu_display_manager; struct amdgpu_dm_connector; @@ -61,6 +68,10 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, void dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev); +void dm_handle_mst_sideband_msg_ready_event( + struct drm_dp_mst_topology_mgr *mgr, + enum mst_msg_ready_type msg_rdy_type); + struct dsc_mst_fairness_vars { int pbn; bool dsc_enabled; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c index 322668973747..cc74dd69acf2 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c @@ -113,6 +113,11 @@ void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state DRM_FORMAT_ARGB8888, DRM_FORMAT_RGBA8888, DRM_FORMAT_ABGR8888, + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_ABGR2101010, + DRM_FORMAT_ARGB16161616, + DRM_FORMAT_ABGR16161616, + DRM_FORMAT_ARGB16161616F, }; uint32_t format = plane_state->fb->format->format; unsigned int i; @@ -164,7 +169,7 @@ static bool modifier_has_dcc(uint64_t modifier) return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier); } -static unsigned modifier_gfx9_swizzle_mode(uint64_t modifier) +static unsigned int modifier_gfx9_swizzle_mode(uint64_t modifier) { if (modifier == DRM_FORMAT_MOD_LINEAR) return 0; @@ -581,7 +586,7 @@ static void add_gfx11_modifiers(struct amdgpu_device *adev, int pkrs = 0; u32 gb_addr_config; u8 i = 0; - unsigned swizzle_r_x; + unsigned int swizzle_r_x; uint64_t modifier_r_x; uint64_t modifier_dcc_best; uint64_t modifier_dcc_4k; @@ -698,8 +703,8 @@ static int get_plane_formats(const struct drm_plane *plane, * caps list. */ - switch (plane->type) { - case DRM_PLANE_TYPE_PRIMARY: + if (plane->type == DRM_PLANE_TYPE_PRIMARY || + (plane_cap && plane_cap->type == DC_PLANE_TYPE_DCN_UNIVERSAL && plane->type != DRM_PLANE_TYPE_CURSOR)) { for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) { if (num_formats >= max_formats) break; @@ -717,25 +722,29 @@ static int get_plane_formats(const struct drm_plane *plane, formats[num_formats++] = DRM_FORMAT_XBGR16161616F; formats[num_formats++] = DRM_FORMAT_ABGR16161616F; } - break; + } else { + switch (plane->type) { + case DRM_PLANE_TYPE_OVERLAY: + for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) { + if (num_formats >= max_formats) + break; - case DRM_PLANE_TYPE_OVERLAY: - for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) { - if (num_formats >= max_formats) - break; + formats[num_formats++] = overlay_formats[i]; + } + break; - formats[num_formats++] = overlay_formats[i]; - } - break; + case DRM_PLANE_TYPE_CURSOR: + for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) { + if (num_formats >= max_formats) + break; - case DRM_PLANE_TYPE_CURSOR: - for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) { - if (num_formats >= max_formats) - break; + formats[num_formats++] = cursor_formats[i]; + } + break; - formats[num_formats++] = cursor_formats[i]; + default: + break; } - break; } return num_formats; @@ -1260,6 +1269,13 @@ void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane, attributes.rotation_angle = 0; attributes.attribute_flags.value = 0; + /* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM + * legacy gamma setup. + */ + if (crtc_state->cm_is_degamma_srgb && + adev->dm.dc->caps.color.dpp.gamma_corr) + attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1; + attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0]; if (crtc_state->stream) { @@ -1459,6 +1475,15 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, drm_plane_create_blend_mode_property(plane, blend_caps); } + if (plane->type == DRM_PLANE_TYPE_PRIMARY) { + drm_plane_create_zpos_immutable_property(plane, 0); + } else if (plane->type == DRM_PLANE_TYPE_OVERLAY) { + unsigned int zpos = 1 + drm_plane_index(plane); + drm_plane_create_zpos_property(plane, zpos, 1, 254); + } else if (plane->type == DRM_PLANE_TYPE_CURSOR) { + drm_plane_create_zpos_immutable_property(plane, 255); + } + if (plane->type == DRM_PLANE_TYPE_PRIMARY && plane_cap && (plane_cap->pixel_format_support.nv12 || diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index 75284e2cec74..848c5b4bb301 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -334,7 +334,8 @@ bool dm_pp_get_clock_levels_by_type( if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) { /* This clock is higher the validation clock. * Than means the previous one is the highest - * non-boosted one. */ + * non-boosted one. + */ DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n", dc_clks->num_levels, i); dc_clks->num_levels = i > 0 ? i : 1; @@ -406,10 +407,10 @@ bool dm_pp_notify_wm_clock_changes( * TODO: expand this to other ASICs */ if ((adev->asic_type >= CHIP_POLARIS10) && - (adev->asic_type <= CHIP_VEGAM) && - !amdgpu_dpm_set_watermarks_for_clocks_ranges(adev, - (void *)wm_with_clock_ranges)) - return true; + (adev->asic_type <= CHIP_VEGAM) && + !amdgpu_dpm_set_watermarks_for_clocks_ranges(adev, + (void *)wm_with_clock_ranges)) + return true; return false; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c index d647f68fd563..08ce3bb8f640 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c @@ -24,6 +24,7 @@ */ #include "amdgpu_dm_psr.h" +#include "dc_dmub_srv.h" #include "dc.h" #include "dm_helpers.h" #include "amdgpu_dm.h" @@ -50,7 +51,7 @@ static bool link_supports_psrsu(struct dc_link *link) !link->dpcd_caps.psr_info.psr2_su_y_granularity_cap) return false; - return true; + return dc_dmub_check_min_version(dc->ctx->dmub_srv->dmub); } /* @@ -165,6 +166,7 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) */ if (vsync_rate_hz != 0) { unsigned int frame_time_microsec = 1000000 / vsync_rate_hz; + num_frames_static = (30000 / frame_time_microsec) + 1; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c new file mode 100644 index 000000000000..32d3086c4cb7 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c @@ -0,0 +1,183 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "amdgpu_dm_replay.h" +#include "dc.h" +#include "dm_helpers.h" +#include "amdgpu_dm.h" +#include "modules/power/power_helpers.h" +#include "dmub/inc/dmub_cmd.h" +#include "dc/inc/link.h" + +/* + * link_supports_replay() - check if the link supports replay + * @link: link + * @aconnector: aconnector + * + */ +static bool link_supports_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector) +{ + struct dm_connector_state *state = to_dm_connector_state(aconnector->base.state); + struct dpcd_caps *dpcd_caps = &link->dpcd_caps; + struct adaptive_sync_caps *as_caps = &link->dpcd_caps.adaptive_sync_caps; + + if (!state->freesync_capable) + return false; + + if (!aconnector->vsdb_info.replay_mode) + return false; + + // Check the eDP version + if (dpcd_caps->edp_rev < EDP_REVISION_13) + return false; + + if (!dpcd_caps->alpm_caps.bits.AUX_WAKE_ALPM_CAP) + return false; + + // Check adaptive sync support cap + if (!as_caps->dp_adap_sync_caps.bits.ADAPTIVE_SYNC_SDP_SUPPORT) + return false; + + return true; +} + +/* + * amdgpu_dm_setup_replay() - setup replay configuration + * @link: link + * @aconnector: aconnector + * + */ +bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector) +{ + struct replay_config pr_config; + union replay_debug_flags *debug_flags = NULL; + + // For eDP, if Replay is supported, return true to skip checks + if (link->replay_settings.config.replay_supported) + return true; + + if (!dc_is_embedded_signal(link->connector_signal)) + return false; + + if (link->panel_config.psr.disallow_replay) + return false; + + if (!link_supports_replay(link, aconnector)) + return false; + + // Mark Replay is supported in link and update related attributes + pr_config.replay_supported = true; + pr_config.replay_power_opt_supported = 0; + pr_config.replay_enable_option |= pr_enable_option_static_screen; + pr_config.replay_timing_sync_supported = aconnector->max_vfreq >= 2 * aconnector->min_vfreq ? true : false; + + if (!pr_config.replay_timing_sync_supported) + pr_config.replay_enable_option &= ~pr_enable_option_general_ui; + + debug_flags = (union replay_debug_flags *)&pr_config.debug_flags; + debug_flags->u32All = 0; + debug_flags->bitfields.visual_confirm = + link->ctx->dc->debug.visual_confirm == VISUAL_CONFIRM_REPLAY ? true : false; + + link->replay_settings.replay_feature_enabled = true; + + init_replay_config(link, &pr_config); + + return true; +} + + +/* + * amdgpu_dm_replay_enable() - enable replay f/w + * @stream: stream state + * + * Return: true if success + */ +bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool wait) +{ + uint64_t state; + unsigned int retry_count; + bool replay_active = true; + const unsigned int max_retry = 1000; + bool force_static = true; + struct dc_link *link = NULL; + + + if (stream == NULL) + return false; + + link = stream->link; + + if (link == NULL) + return false; + + link->dc->link_srv->edp_setup_replay(link, stream); + + link->dc->link_srv->edp_set_replay_allow_active(link, NULL, false, false, NULL); + + link->dc->link_srv->edp_set_replay_allow_active(link, &replay_active, false, true, NULL); + + if (wait == true) { + + for (retry_count = 0; retry_count <= max_retry; retry_count++) { + dc_link_get_replay_state(link, &state); + if (replay_active) { + if (state != REPLAY_STATE_0 && + (!force_static || state == REPLAY_STATE_3)) + break; + } else { + if (state == REPLAY_STATE_0) + break; + } + udelay(500); + } + + /* assert if max retry hit */ + if (retry_count >= max_retry) + ASSERT(0); + } else { + /* To-do: Add trace log */ + } + + return true; +} + +/* + * amdgpu_dm_replay_disable() - disable replay f/w + * @stream: stream state + * + * Return: true if success + */ +bool amdgpu_dm_replay_disable(struct dc_stream_state *stream) +{ + + if (stream->link) { + DRM_DEBUG_DRIVER("Disabling replay...\n"); + stream->link->dc->link_srv->edp_set_replay_allow_active(stream->link, NULL, false, false, NULL); + return true; + } + + return false; +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h new file mode 100644 index 000000000000..01cba3cd6246 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h @@ -0,0 +1,46 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef AMDGPU_DM_AMDGPU_DM_REPLAY_H_ +#define AMDGPU_DM_AMDGPU_DM_REPLAY_H_ + +#include "amdgpu.h" + +enum replay_enable_option { + pr_enable_option_static_screen = 0x1, + pr_enable_option_mpo_video = 0x2, + pr_enable_option_full_screen_video = 0x4, + pr_enable_option_general_ui = 0x8, + pr_enable_option_static_screen_coasting = 0x10000, + pr_enable_option_mpo_video_coasting = 0x20000, + pr_enable_option_full_screen_video_coasting = 0x40000, +}; + + +bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool enable); +bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector); +bool amdgpu_dm_replay_disable(struct dc_stream_state *stream); + +#endif /* AMDGPU_DM_AMDGPU_DM_REPLAY_H_ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c index c42aa947c969..172aa10a8800 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c @@ -33,6 +33,8 @@ #include <asm/cputable.h> #elif defined(CONFIG_ARM64) #include <asm/neon.h> +#elif defined(CONFIG_LOONGARCH) +#include <asm/fpu.h> #endif /** @@ -88,7 +90,7 @@ void dc_fpu_begin(const char *function_name, const int line) *pcpu += 1; if (*pcpu == 1) { -#if defined(CONFIG_X86) +#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH) migrate_disable(); kernel_fpu_begin(); #elif defined(CONFIG_PPC64) @@ -128,7 +130,7 @@ void dc_fpu_end(const char *function_name, const int line) pcpu = get_cpu_ptr(&fpu_recursion_depth); *pcpu -= 1; if (*pcpu <= 0) { -#if defined(CONFIG_X86) +#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH) kernel_fpu_end(); migrate_enable(); #elif defined(CONFIG_PPC64) diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index 69ffd4424dc7..1b8c2aef4633 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile @@ -78,3 +78,4 @@ DC_EDID += dc_edid_parser.o AMD_DISPLAY_DMUB = $(addprefix $(AMDDALPATH)/dc/,$(DC_DMUB)) AMD_DISPLAY_EDID = $(addprefix $(AMDDALPATH)/dc/,$(DC_EDID)) AMD_DISPLAY_FILES += $(AMD_DISPLAY_DMUB) $(AMD_DISPLAY_EDID) + diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c index 352e9afb85c6..e295a839ab47 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/conversion.c +++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c @@ -24,7 +24,7 @@ */ #include "dm_services.h" -#include "conversion.h" +#include "basics/conversion.h" #define DIVIDER 10000 diff --git a/drivers/gpu/drm/amd/display/dc/basics/vector.c b/drivers/gpu/drm/amd/display/dc/basics/vector.c index 84aeccf36b4b..6d2924114a3e 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/vector.c +++ b/drivers/gpu/drm/amd/display/dc/basics/vector.c @@ -50,12 +50,11 @@ bool dal_vector_construct( return true; } -static bool dal_vector_presized_costruct( - struct vector *vector, - struct dc_context *ctx, - uint32_t count, - void *initial_value, - uint32_t struct_size) +static bool dal_vector_presized_costruct(struct vector *vector, + struct dc_context *ctx, + uint32_t count, + void *initial_value, + uint32_t struct_size) { uint32_t i; diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index 27af9d3c2b73..6b3190447581 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -96,7 +96,7 @@ struct dc_bios *bios_parser_create( struct bp_init_data *init, enum dce_version dce_version) { - struct bios_parser *bp = NULL; + struct bios_parser *bp; bp = kzalloc(sizeof(struct bios_parser), GFP_KERNEL); if (!bp) @@ -2576,7 +2576,7 @@ static struct integrated_info *bios_parser_create_integrated_info( struct dc_bios *dcb) { struct bios_parser *bp = BP_FROM_DCB(dcb); - struct integrated_info *info = NULL; + struct integrated_info *info; info = kzalloc(sizeof(struct integrated_info), GFP_KERNEL); @@ -2593,11 +2593,10 @@ static struct integrated_info *bios_parser_create_integrated_info( return NULL; } -static enum bp_result update_slot_layout_info( - struct dc_bios *dcb, - unsigned int i, - struct slot_layout_info *slot_layout_info, - unsigned int record_offset) +static enum bp_result update_slot_layout_info(struct dc_bios *dcb, + unsigned int i, + struct slot_layout_info *slot_layout_info, + unsigned int record_offset) { unsigned int j; struct bios_parser *bp; @@ -2696,10 +2695,9 @@ static enum bp_result update_slot_layout_info( } -static enum bp_result get_bracket_layout_record( - struct dc_bios *dcb, - unsigned int bracket_layout_id, - struct slot_layout_info *slot_layout_info) +static enum bp_result get_bracket_layout_record(struct dc_bios *dcb, + unsigned int bracket_layout_id, + struct slot_layout_info *slot_layout_info) { unsigned int i; unsigned int record_offset; diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index cce47d3f1a13..484d62bcf2c2 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -340,9 +340,8 @@ static struct atom_display_object_path_v2 *get_bios_object( } /* from graphics_object_id, find display path which includes the object_id */ -static struct atom_display_object_path_v3 *get_bios_object_from_path_v3( - struct bios_parser *bp, - struct graphics_object_id id) +static struct atom_display_object_path_v3 *get_bios_object_from_path_v3(struct bios_parser *bp, + struct graphics_object_id id) { unsigned int i; struct graphics_object_id obj_id = {0}; @@ -521,9 +520,8 @@ static enum bp_result get_gpio_i2c_info( return BP_RESULT_OK; } -static struct atom_hpd_int_record *get_hpd_record_for_path_v3( - struct bios_parser *bp, - struct atom_display_object_path_v3 *object) +static struct atom_hpd_int_record *get_hpd_record_for_path_v3(struct bios_parser *bp, + struct atom_display_object_path_v3 *object) { struct atom_common_record_header *header; uint32_t offset; @@ -774,20 +772,20 @@ static enum bp_result bios_parser_get_device_tag( return BP_RESULT_BADINPUT; switch (bp->object_info_tbl.revision.minor) { - case 4: - default: + case 4: + default: /* getBiosObject will return MXM object */ - object = get_bios_object(bp, connector_object_id); + object = get_bios_object(bp, connector_object_id); if (!object) { BREAK_TO_DEBUGGER(); /* Invalid object id */ return BP_RESULT_BADINPUT; } - info->acpi_device = 0; /* BIOS no longer provides this */ - info->dev_id = device_type_from_device_id(object->device_tag); - break; - case 5: + info->acpi_device = 0; /* BIOS no longer provides this */ + info->dev_id = device_type_from_device_id(object->device_tag); + break; + case 5: object_path_v3 = get_bios_object_from_path_v3(bp, connector_object_id); if (!object_path_v3) { @@ -1582,13 +1580,13 @@ static bool bios_parser_is_device_id_supported( uint32_t mask = get_support_mask_for_device_id(id); switch (bp->object_info_tbl.revision.minor) { - case 4: - default: - return (le16_to_cpu(bp->object_info_tbl.v1_4->supporteddevices) & mask) != 0; - break; - case 5: - return (le16_to_cpu(bp->object_info_tbl.v1_5->supporteddevices) & mask) != 0; - break; + case 4: + default: + return (le16_to_cpu(bp->object_info_tbl.v1_4->supporteddevices) & mask) != 0; + break; + case 5: + return (le16_to_cpu(bp->object_info_tbl.v1_5->supporteddevices) & mask) != 0; + break; } return false; @@ -1757,7 +1755,7 @@ static enum bp_result bios_parser_get_firmware_info( case 2: case 3: result = get_firmware_info_v3_2(bp, info); - break; + break; case 4: result = get_firmware_info_v3_4(bp, info); break; @@ -2175,9 +2173,8 @@ static struct atom_disp_connector_caps_record *get_disp_connector_caps_record( return NULL; } -static struct atom_connector_caps_record *get_connector_caps_record( - struct bios_parser *bp, - struct atom_display_object_path_v3 *object) +static struct atom_connector_caps_record *get_connector_caps_record(struct bios_parser *bp, + struct atom_display_object_path_v3 *object) { struct atom_common_record_header *header; uint32_t offset; @@ -2228,7 +2225,7 @@ static enum bp_result bios_parser_get_disp_connector_caps_info( return BP_RESULT_BADINPUT; switch (bp->object_info_tbl.revision.minor) { - case 4: + case 4: default: object = get_bios_object(bp, object_id); @@ -2264,9 +2261,8 @@ static enum bp_result bios_parser_get_disp_connector_caps_info( return BP_RESULT_OK; } -static struct atom_connector_speed_record *get_connector_speed_cap_record( - struct bios_parser *bp, - struct atom_display_object_path_v3 *object) +static struct atom_connector_speed_record *get_connector_speed_cap_record(struct bios_parser *bp, + struct atom_display_object_path_v3 *object) { struct atom_common_record_header *header; uint32_t offset; @@ -3090,7 +3086,7 @@ static struct integrated_info *bios_parser_create_integrated_info( struct dc_bios *dcb) { struct bios_parser *bp = BP_FROM_DCB(dcb); - struct integrated_info *info = NULL; + struct integrated_info *info; info = kzalloc(sizeof(struct integrated_info), GFP_KERNEL); @@ -3679,7 +3675,7 @@ struct dc_bios *firmware_parser_create( struct bp_init_data *init, enum dce_version dce_version) { - struct bios_parser *bp = NULL; + struct bios_parser *bp; bp = kzalloc(sizeof(struct bios_parser), GFP_KERNEL); if (!bp) diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c index 1ef9e4053bb7..90a02d7bd3da 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c @@ -123,9 +123,7 @@ static void encoder_control_dmcub( sizeof(cmd.digx_encoder_control.header); cmd.digx_encoder_control.encoder_control.dig.stream_param = *dig; - dc_dmub_srv_cmd_queue(dmcub, &cmd); - dc_dmub_srv_cmd_execute(dmcub); - dc_dmub_srv_wait_idle(dmcub); + dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static enum bp_result encoder_control_digx_v1_5( @@ -261,9 +259,7 @@ static void transmitter_control_dmcub( sizeof(cmd.dig1_transmitter_control.header); cmd.dig1_transmitter_control.transmitter_control.dig = *dig; - dc_dmub_srv_cmd_queue(dmcub, &cmd); - dc_dmub_srv_cmd_execute(dmcub); - dc_dmub_srv_wait_idle(dmcub); + dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static enum bp_result transmitter_control_v1_6( @@ -325,9 +321,7 @@ static void transmitter_control_dmcub_v1_7( sizeof(cmd.dig1_transmitter_control.header); cmd.dig1_transmitter_control.transmitter_control.dig_v1_7 = *dig; - dc_dmub_srv_cmd_queue(dmcub, &cmd); - dc_dmub_srv_cmd_execute(dmcub); - dc_dmub_srv_wait_idle(dmcub); + dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static enum bp_result transmitter_control_v1_7( @@ -435,9 +429,7 @@ static void set_pixel_clock_dmcub( sizeof(cmd.set_pixel_clock.header); cmd.set_pixel_clock.pixel_clock.clk = *clk; - dc_dmub_srv_cmd_queue(dmcub, &cmd); - dc_dmub_srv_cmd_execute(dmcub); - dc_dmub_srv_wait_idle(dmcub); + dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static enum bp_result set_pixel_clock_v7( @@ -804,9 +796,7 @@ static void enable_disp_power_gating_dmcub( sizeof(cmd.enable_disp_power_gating.header); cmd.enable_disp_power_gating.power_gating.pwr = *pwr; - dc_dmub_srv_cmd_queue(dmcub, &cmd); - dc_dmub_srv_cmd_execute(dmcub); - dc_dmub_srv_wait_idle(dmcub); + dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static enum bp_result enable_disp_power_gating_v2_1( @@ -1016,10 +1006,7 @@ static void enable_lvtma_control_dmcub( panel_instance; cmd.lvtma_control.data.bypass_panel_control_wait = bypass_panel_control_wait; - dc_dmub_srv_cmd_queue(dmcub, &cmd); - dc_dmub_srv_cmd_execute(dmcub); - dc_dmub_srv_wait_idle(dmcub); - + dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static enum bp_result enable_lvtma_control( diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index 6127d6045336..dcedf9645161 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -117,6 +117,7 @@ void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_m continue; clk_mgr->psr_allow_active_cache = edp_link->psr_settings.psr_allow_active; dc->link_srv->edp_set_psr_allow_active(edp_link, &allow_active, false, false, NULL); + dc->link_srv->edp_set_replay_allow_active(edp_link, &allow_active, false, false, NULL); } } @@ -137,6 +138,8 @@ void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr) continue; dc->link_srv->edp_set_psr_allow_active(edp_link, &clk_mgr->psr_allow_active_cache, false, false, NULL); + dc->link_srv->edp_set_replay_allow_active(edp_link, + &clk_mgr->psr_allow_active_cache, false, false, NULL); } } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c index 934e6423dc1a..1f36ad8a7de4 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c @@ -111,12 +111,10 @@ int dce112_set_clock(struct clk_mgr *clk_mgr_base, int requested_clk_khz) bp->funcs->set_dce_clock(bp, &dce_clk_params); - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { - if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock) - dmcu->funcs->set_psr_wait_loop(dmcu, - actual_clock / 1000 / 7); - } + if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { + if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock) + dmcu->funcs->set_psr_wait_loop(dmcu, + actual_clock / 1000 / 7); } clk_mgr_dce->dfs_bypass_disp_clk = actual_clock; @@ -153,12 +151,10 @@ int dce112_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_clk_khz) clk_mgr->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { - if (clk_mgr->dfs_bypass_disp_clk != actual_clock) - dmcu->funcs->set_psr_wait_loop(dmcu, - actual_clock / 1000 / 7); - } + if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { + if (clk_mgr->dfs_bypass_disp_clk != actual_clock) + dmcu->funcs->set_psr_wait_loop(dmcu, + actual_clock / 1000 / 7); } clk_mgr->dfs_bypass_disp_clk = actual_clock; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c index 450eaead4f20..89b79dd39628 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c @@ -135,12 +135,10 @@ int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_di VBIOSSMC_MSG_SetDispclkFreq, khz_to_mhz_ceil(requested_dispclk_khz)); - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { - if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz) - dmcu->funcs->set_psr_wait_loop(dmcu, - actual_dispclk_set_mhz / 7); - } + if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { + if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz) + dmcu->funcs->set_psr_wait_loop(dmcu, + actual_dispclk_set_mhz / 7); } return actual_dispclk_set_mhz * 1000; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 650f3b4b562e..5ee87965a078 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -157,7 +157,7 @@ void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, struct int32_t N; int32_t j; - if (!pipe_ctx->stream) + if (!resource_is_pipe_type(pipe_ctx, OTG_MASTER)) continue; /* Virtual encoders don't have this function */ if (!stream_enc->funcs->get_fifo_cal_average_level) @@ -188,7 +188,7 @@ void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, struct int32_t N; int32_t j; - if (!pipe_ctx->stream) + if (!resource_is_pipe_type(pipe_ctx, OTG_MASTER)) continue; /* Virtual encoders don't have this function */ if (!stream_enc->funcs->get_fifo_cal_average_level) @@ -531,6 +531,11 @@ void dcn20_clk_mgr_construct( struct pp_smu_funcs *pp_smu, struct dccg *dccg) { + int dprefclk_did; + int target_div; + uint32_t pll_req_reg; + struct fixed31_32 pll_req; + clk_mgr->base.ctx = ctx; clk_mgr->pp_smu = pp_smu; clk_mgr->base.funcs = &dcn2_funcs; @@ -547,42 +552,34 @@ void dcn20_clk_mgr_construct( clk_mgr->base.dprefclk_khz = 700000; // 700 MHz planned if VCO is 3.85 GHz, will be retrieved - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { - dcn2_funcs.update_clocks = dcn2_update_clocks_fpga; - clk_mgr->base.dentist_vco_freq_khz = 3850000; + /* DFS Slice 2 should be used for DPREFCLK */ + dprefclk_did = REG_READ(CLK3_CLK2_DFS_CNTL); + /* Convert DPREFCLK DFS Slice DID to actual divider */ + target_div = dentist_get_divider_from_did(dprefclk_did); + /* get FbMult value */ + pll_req_reg = REG_READ(CLK3_CLK_PLL_REQ); - } else { - /* DFS Slice 2 should be used for DPREFCLK */ - int dprefclk_did = REG_READ(CLK3_CLK2_DFS_CNTL); - /* Convert DPREFCLK DFS Slice DID to actual divider*/ - int target_div = dentist_get_divider_from_did(dprefclk_did); - - /* get FbMult value */ - uint32_t pll_req_reg = REG_READ(CLK3_CLK_PLL_REQ); - struct fixed31_32 pll_req; - - /* set up a fixed-point number - * this works because the int part is on the right edge of the register - * and the frac part is on the left edge - */ + /* set up a fixed-point number + * this works because the int part is on the right edge of the register + * and the frac part is on the left edge + */ - pll_req = dc_fixpt_from_int(pll_req_reg & clk_mgr->clk_mgr_mask->FbMult_int); - pll_req.value |= pll_req_reg & clk_mgr->clk_mgr_mask->FbMult_frac; + pll_req = dc_fixpt_from_int(pll_req_reg & clk_mgr->clk_mgr_mask->FbMult_int); + pll_req.value |= pll_req_reg & clk_mgr->clk_mgr_mask->FbMult_frac; - /* multiply by REFCLK period */ - pll_req = dc_fixpt_mul_int(pll_req, 100000); + /* multiply by REFCLK period */ + pll_req = dc_fixpt_mul_int(pll_req, 100000); - /* integer part is now VCO frequency in kHz */ - clk_mgr->base.dentist_vco_freq_khz = dc_fixpt_floor(pll_req); + /* integer part is now VCO frequency in kHz */ + clk_mgr->base.dentist_vco_freq_khz = dc_fixpt_floor(pll_req); - /* in case we don't get a value from the register, use default */ - if (clk_mgr->base.dentist_vco_freq_khz == 0) - clk_mgr->base.dentist_vco_freq_khz = 3850000; + /* in case we don't get a value from the register, use default */ + if (clk_mgr->base.dentist_vco_freq_khz == 0) + clk_mgr->base.dentist_vco_freq_khz = 3850000; - /* Calculate the DPREFCLK in kHz.*/ - clk_mgr->base.dprefclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR - * clk_mgr->base.dentist_vco_freq_khz) / target_div; - } + /* Calculate the DPREFCLK in kHz.*/ + clk_mgr->base.dprefclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR + * clk_mgr->base.dentist_vco_freq_khz) / target_div; //Integrated_info table does not exist on dGPU projects so should not be referenced //anywhere in code for dGPUs. //Also there is no plan for now that DFS BYPASS will be used on NV10/12/14. @@ -590,4 +587,3 @@ void dcn20_clk_mgr_construct( dce_clock_read_ss_info(clk_mgr); } - diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c index 811720749faf..694fe4271b4d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c @@ -190,23 +190,17 @@ void dcn201_clk_mgr_construct(struct dc_context *ctx, clk_mgr->dprefclk_ss_divider = 1000; clk_mgr->ss_on_dprefclk = false; - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { - dcn201_funcs.update_clocks = dcn2_update_clocks_fpga; - clk_mgr->base.dprefclk_khz = 600000; - clk_mgr->base.dentist_vco_freq_khz = 3000000; - } else { - clk_mgr->base.dprefclk_khz = REG_READ(CLK4_CLK2_CURRENT_CNT); - clk_mgr->base.dprefclk_khz *= 100; + clk_mgr->base.dprefclk_khz = REG_READ(CLK4_CLK2_CURRENT_CNT); + clk_mgr->base.dprefclk_khz *= 100; - if (clk_mgr->base.dprefclk_khz == 0) - clk_mgr->base.dprefclk_khz = 600000; + if (clk_mgr->base.dprefclk_khz == 0) + clk_mgr->base.dprefclk_khz = 600000; - REG_GET(CLK4_CLK_PLL_REQ, FbMult_int, &clk_mgr->base.dentist_vco_freq_khz); - clk_mgr->base.dentist_vco_freq_khz *= 100000; + REG_GET(CLK4_CLK_PLL_REQ, FbMult_int, &clk_mgr->base.dentist_vco_freq_khz); + clk_mgr->base.dentist_vco_freq_khz *= 100000; - if (clk_mgr->base.dentist_vco_freq_khz == 0) - clk_mgr->base.dentist_vco_freq_khz = 3000000; - } + if (clk_mgr->base.dentist_vco_freq_khz == 0) + clk_mgr->base.dentist_vco_freq_khz = 3000000; if (!debug->disable_dfs_bypass && bp->integrated_info) if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index bd9fd0b54f46..0c6a4ab72b1d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -705,6 +705,7 @@ void rn_clk_mgr_construct( struct dpm_clocks clock_table = { 0 }; enum pp_smu_status status = 0; int is_green_sardine = 0; + struct clk_log_info log_info = {0}; #if defined(CONFIG_DRM_AMD_DC_FP) is_green_sardine = ASICREV_IS_GREEN_SARDINE(ctx->asic_id.hw_internal_rev); @@ -725,48 +726,41 @@ void rn_clk_mgr_construct( clk_mgr->smu_ver = rn_vbios_smu_get_smu_version(clk_mgr); - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { - dcn21_funcs.update_clocks = dcn2_update_clocks_fpga; + clk_mgr->periodic_retraining_disabled = rn_vbios_smu_is_periodic_retraining_disabled(clk_mgr); + + /* SMU Version 55.51.0 and up no longer have an issue + * that needs to limit minimum dispclk */ + if (clk_mgr->smu_ver >= SMU_VER_55_51_0) + debug->min_disp_clk_khz = 0; + + /* TODO: Check we get what we expect during bringup */ + clk_mgr->base.dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr); + + /* in case we don't get a value from the register, use default */ + if (clk_mgr->base.dentist_vco_freq_khz == 0) clk_mgr->base.dentist_vco_freq_khz = 3600000; - } else { - struct clk_log_info log_info = {0}; - - clk_mgr->periodic_retraining_disabled = rn_vbios_smu_is_periodic_retraining_disabled(clk_mgr); - - /* SMU Version 55.51.0 and up no longer have an issue - * that needs to limit minimum dispclk */ - if (clk_mgr->smu_ver >= SMU_VER_55_51_0) - debug->min_disp_clk_khz = 0; - - /* TODO: Check we get what we expect during bringup */ - clk_mgr->base.dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr); - - /* in case we don't get a value from the register, use default */ - if (clk_mgr->base.dentist_vco_freq_khz == 0) - clk_mgr->base.dentist_vco_freq_khz = 3600000; - - if (ctx->dc_bios->integrated_info->memory_type == LpDdr4MemType) { - if (clk_mgr->periodic_retraining_disabled) { - rn_bw_params.wm_table = lpddr4_wm_table_with_disabled_ppt; - } else { - if (is_green_sardine) - rn_bw_params.wm_table = lpddr4_wm_table_gs; - else - rn_bw_params.wm_table = lpddr4_wm_table_rn; - } + + if (ctx->dc_bios->integrated_info->memory_type == LpDdr4MemType) { + if (clk_mgr->periodic_retraining_disabled) { + rn_bw_params.wm_table = lpddr4_wm_table_with_disabled_ppt; } else { if (is_green_sardine) - rn_bw_params.wm_table = ddr4_wm_table_gs; - else { - if (ctx->dc->config.is_single_rank_dimm) - rn_bw_params.wm_table = ddr4_1R_wm_table_rn; - else - rn_bw_params.wm_table = ddr4_wm_table_rn; - } + rn_bw_params.wm_table = lpddr4_wm_table_gs; + else + rn_bw_params.wm_table = lpddr4_wm_table_rn; + } + } else { + if (is_green_sardine) + rn_bw_params.wm_table = ddr4_wm_table_gs; + else { + if (ctx->dc->config.is_single_rank_dimm) + rn_bw_params.wm_table = ddr4_1R_wm_table_rn; + else + rn_bw_params.wm_table = ddr4_wm_table_rn; } - /* Saved clocks configured at boot for debug purposes */ - rn_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info); } + /* Saved clocks configured at boot for debug purposes */ + rn_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info); clk_mgr->base.dprefclk_khz = 600000; dce_clock_read_ss_info(clk_mgr); @@ -786,9 +780,8 @@ void rn_clk_mgr_construct( } } - if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment) && clk_mgr->smu_ver >= 0x00371500) { - /* enable powerfeatures when displaycount goes to 0 */ + /* enable powerfeatures when displaycount goes to 0 */ + if (clk_mgr->smu_ver >= 0x00371500) rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(clk_mgr, !debug->disable_48mhz_pwrdwn); - } } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c index 27fbe906682f..8c9d45e5b13b 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c @@ -147,17 +147,14 @@ int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dis VBIOSSMC_MSG_SetDispclkFreq, khz_to_mhz_ceil(requested_dispclk_khz)); - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { - if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz) - dmcu->funcs->set_psr_wait_loop(dmcu, - actual_dispclk_set_mhz / 7); - } + if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { + if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz) + dmcu->funcs->set_psr_wait_loop(dmcu, + actual_dispclk_set_mhz / 7); } // pmfw always set clock more than or equal requested clock - if (!IS_DIAG_DC(dc->ctx->dce_environment)) - ASSERT(actual_dispclk_set_mhz >= khz_to_mhz_ceil(requested_dispclk_khz)); + ASSERT(actual_dispclk_set_mhz >= khz_to_mhz_ceil(requested_dispclk_khz)); return actual_dispclk_set_mhz * 1000; } @@ -221,15 +218,13 @@ void rn_vbios_smu_set_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phy int rn_vbios_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz) { int actual_dppclk_set_mhz = -1; - struct dc *dc = clk_mgr->base.ctx->dc; actual_dppclk_set_mhz = rn_vbios_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_SetDppclkFreq, khz_to_mhz_ceil(requested_dpp_khz)); - if (!IS_DIAG_DC(dc->ctx->dce_environment)) - ASSERT(actual_dppclk_set_mhz >= khz_to_mhz_ceil(requested_dpp_khz)); + ASSERT(actual_dppclk_set_mhz >= khz_to_mhz_ceil(requested_dpp_khz)); return actual_dppclk_set_mhz * 1000; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c index 694a9d3d92ae..3271c8c7905d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c @@ -206,7 +206,6 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base, bool force_reset = false; bool update_uclk = false; bool p_state_change_support; - int total_plane_count; if (dc->work_arounds.skip_clock_update || !clk_mgr->smu_present) return; @@ -247,8 +246,7 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base, clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz; clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support; - total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context); - p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0); + p_state_change_support = new_clocks->p_state_change_support; // invalidate the current P-State forced min in certain dc_mode_softmax situations if (dc->clk_mgr->dc_mode_softmax_enabled && safe_to_lower && !p_state_change_support) { @@ -523,6 +521,8 @@ void dcn3_clk_mgr_construct( struct pp_smu_funcs *pp_smu, struct dccg *dccg) { + struct clk_state_registers_and_bypass s = { 0 }; + clk_mgr->base.ctx = ctx; clk_mgr->base.funcs = &dcn3_funcs; clk_mgr->regs = &clk_mgr_regs; @@ -539,27 +539,19 @@ void dcn3_clk_mgr_construct( clk_mgr->base.dprefclk_khz = 730000; // 700 MHz planned if VCO is 3.85 GHz, will be retrieved - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { - clk_mgr->base.funcs = &dcn3_fpga_funcs; - clk_mgr->base.dentist_vco_freq_khz = 3650000; - - } else { - struct clk_state_registers_and_bypass s = { 0 }; + /* integer part is now VCO frequency in kHz */ + clk_mgr->base.dentist_vco_freq_khz = dcn30_get_vco_frequency_from_reg(clk_mgr); - /* integer part is now VCO frequency in kHz */ - clk_mgr->base.dentist_vco_freq_khz = dcn30_get_vco_frequency_from_reg(clk_mgr); - - /* in case we don't get a value from the register, use default */ - if (clk_mgr->base.dentist_vco_freq_khz == 0) - clk_mgr->base.dentist_vco_freq_khz = 3650000; - /* Convert dprefclk units from MHz to KHz */ - /* Value already divided by 10, some resolution lost */ + /* in case we don't get a value from the register, use default */ + if (clk_mgr->base.dentist_vco_freq_khz == 0) + clk_mgr->base.dentist_vco_freq_khz = 3650000; + /* Convert dprefclk units from MHz to KHz */ + /* Value already divided by 10, some resolution lost */ - /*TODO: uncomment assert once dcn3_dump_clk_registers is implemented */ - //ASSERT(s.dprefclk != 0); - if (s.dprefclk != 0) - clk_mgr->base.dprefclk_khz = s.dprefclk * 1000; - } + /*TODO: uncomment assert once dcn3_dump_clk_registers is implemented */ + //ASSERT(s.dprefclk != 0); + if (s.dprefclk != 0) + clk_mgr->base.dprefclk_khz = s.dprefclk * 1000; clk_mgr->dfs_bypass_enabled = false; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c index 1fbf1c105dc1..bdbf18306698 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c @@ -312,6 +312,9 @@ void dcn30_smu_set_display_refresh_from_mall(struct clk_mgr_internal *clk_mgr, b /* bits 8:7 for cache timer scale, bits 6:1 for cache timer delay, bit 0 = 1 for enable, = 0 for disable */ uint32_t param = (cache_timer_scale << 7) | (cache_timer_delay << 1) | (enable ? 1 : 0); + smu_print("SMU Set display refresh from mall: enable = %d, cache_timer_delay = %d, cache_timer_scale = %d\n", + enable, cache_timer_delay, cache_timer_scale); + dcn30_smu_send_msg_with_param(clk_mgr, DALSMC_MSG_SetDisplayRefreshFromMall, param, NULL); } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c index 01383aac6b41..a5489fe6875f 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c @@ -117,7 +117,7 @@ static void vg_update_clocks(struct clk_mgr *clk_mgr_base, display_count = vg_get_active_display_cnt_wa(dc, context); /* if we can go lower, go lower */ - if (display_count == 0 && !IS_DIAG_DC(dc->ctx->dce_environment)) { + if (display_count == 0) { union display_idle_optimization_u idle_info = { 0 }; idle_info.idle_info.df_request_disabled = 1; @@ -151,10 +151,8 @@ static void vg_update_clocks(struct clk_mgr *clk_mgr_base, } // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow. - if (!IS_DIAG_DC(dc->ctx->dce_environment)) { - if (new_clocks->dppclk_khz < 100000) - new_clocks->dppclk_khz = 100000; - } + if (new_clocks->dppclk_khz < 100000) + new_clocks->dppclk_khz = 100000; if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) @@ -664,6 +662,7 @@ void vg_clk_mgr_construct( struct dccg *dccg) { struct smu_dpm_clks smu_dpm_clks = { 0 }; + struct clk_log_info log_info = {0}; clk_mgr->base.base.ctx = ctx; clk_mgr->base.base.funcs = &vg_funcs; @@ -703,32 +702,25 @@ void vg_clk_mgr_construct( ASSERT(smu_dpm_clks.dpm_clks); - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { - vg_funcs.update_clocks = dcn2_update_clocks_fpga; - clk_mgr->base.base.dentist_vco_freq_khz = 3600000; - } else { - struct clk_log_info log_info = {0}; + clk_mgr->base.smu_ver = dcn301_smu_get_smu_version(&clk_mgr->base); - clk_mgr->base.smu_ver = dcn301_smu_get_smu_version(&clk_mgr->base); + if (clk_mgr->base.smu_ver) + clk_mgr->base.smu_present = true; - if (clk_mgr->base.smu_ver) - clk_mgr->base.smu_present = true; + /* TODO: Check we get what we expect during bringup */ + clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); - /* TODO: Check we get what we expect during bringup */ - clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); - - /* in case we don't get a value from the register, use default */ - if (clk_mgr->base.base.dentist_vco_freq_khz == 0) - clk_mgr->base.base.dentist_vco_freq_khz = 3600000; + /* in case we don't get a value from the register, use default */ + if (clk_mgr->base.base.dentist_vco_freq_khz == 0) + clk_mgr->base.base.dentist_vco_freq_khz = 3600000; - if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { - vg_bw_params.wm_table = lpddr5_wm_table; - } else { - vg_bw_params.wm_table = ddr4_wm_table; - } - /* Saved clocks configured at boot for debug purposes */ - vg_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info); + if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { + vg_bw_params.wm_table = lpddr5_wm_table; + } else { + vg_bw_params.wm_table = ddr4_wm_table; } + /* Saved clocks configured at boot for debug purposes */ + vg_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info); clk_mgr->base.base.dprefclk_khz = 600000; dce_clock_read_ss_info(&clk_mgr->base); @@ -746,12 +738,6 @@ void vg_clk_mgr_construct( if (smu_dpm_clks.dpm_clks && smu_dpm_clks.mc_address.quad_part != 0) dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER, smu_dpm_clks.dpm_clks); -/* - if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment) && clk_mgr->base.smu_ver) { - enable powerfeatures when displaycount goes to 0 - dcn301_smu_enable_phy_refclk_pwrdwn(clk_mgr, !debug->disable_48mhz_pwrdwn); - } -*/ } void vg_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c index f9e2e0c3095e..3db4ef564b99 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c @@ -87,6 +87,11 @@ static int dcn31_get_active_display_cnt_wa( stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK || stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) tmds_present = true; + + /* Checking stream / link detection ensuring that PHY is active*/ + if (dc_is_dp_signal(stream->signal) && !stream->dpms_off) + display_count++; + } for (i = 0; i < dc->link_count; i++) { @@ -205,10 +210,8 @@ void dcn31_update_clocks(struct clk_mgr *clk_mgr_base, } // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow. - if (!IS_DIAG_DC(dc->ctx->dce_environment)) { - if (new_clocks->dppclk_khz < 100000) - new_clocks->dppclk_khz = 100000; - } + if (new_clocks->dppclk_khz < 100000) + new_clocks->dppclk_khz = 100000; if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) @@ -250,9 +253,7 @@ void dcn31_update_clocks(struct clk_mgr *clk_mgr_base, cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz; cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz; - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr) @@ -674,6 +675,7 @@ void dcn31_clk_mgr_construct( struct dccg *dccg) { struct dcn31_smu_dpm_clks smu_dpm_clks = { 0 }; + struct clk_log_info log_info = {0}; clk_mgr->base.base.ctx = ctx; clk_mgr->base.base.funcs = &dcn31_funcs; @@ -713,29 +715,22 @@ void dcn31_clk_mgr_construct( ASSERT(smu_dpm_clks.dpm_clks); - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { - clk_mgr->base.base.funcs = &dcn3_fpga_funcs; - } else { - struct clk_log_info log_info = {0}; - - clk_mgr->base.smu_ver = dcn31_smu_get_smu_version(&clk_mgr->base); + clk_mgr->base.smu_ver = dcn31_smu_get_smu_version(&clk_mgr->base); - if (clk_mgr->base.smu_ver) - clk_mgr->base.smu_present = true; + if (clk_mgr->base.smu_ver) + clk_mgr->base.smu_present = true; - /* TODO: Check we get what we expect during bringup */ - clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); - - if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { - dcn31_bw_params.wm_table = lpddr5_wm_table; - } else { - dcn31_bw_params.wm_table = ddr5_wm_table; - } - /* Saved clocks configured at boot for debug purposes */ - dcn31_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, - &clk_mgr->base.base, &log_info); + /* TODO: Check we get what we expect during bringup */ + clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); + if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { + dcn31_bw_params.wm_table = lpddr5_wm_table; + } else { + dcn31_bw_params.wm_table = ddr5_wm_table; } + /* Saved clocks configured at boot for debug purposes */ + dcn31_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, + &clk_mgr->base.base, &log_info); clk_mgr->base.base.dprefclk_khz = 600000; clk_mgr->base.base.clks.ref_dtbclk_khz = 600000; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c index 0827c7df2855..32279c5db724 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c @@ -130,7 +130,7 @@ static int dcn31_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, if (result == VBIOSSMC_Result_Failed) { if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu && param == TABLE_WATERMARKS) - DC_LOG_WARNING("Watermarks table not configured properly by SMU"); + DC_LOG_DEBUG("Watermarks table not configured properly by SMU"); else ASSERT(0); REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c index 5cb44f838bde..7326b7565846 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c @@ -241,10 +241,8 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base, } // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow. - if (!IS_DIAG_DC(dc->ctx->dce_environment)) { - if (new_clocks->dppclk_khz < 100000) - new_clocks->dppclk_khz = 100000; - } + if (new_clocks->dppclk_khz < 100000) + new_clocks->dppclk_khz = 100000; if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) @@ -286,9 +284,7 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base, cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz; cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz; - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr) @@ -405,32 +401,32 @@ static struct wm_table lpddr5_wm_table = { .wm_inst = WM_A, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 16.5, - .sr_enter_plus_exit_time_us = 18.5, + .sr_exit_time_us = 30.0, + .sr_enter_plus_exit_time_us = 32.0, .valid = true, }, { .wm_inst = WM_B, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 16.5, - .sr_enter_plus_exit_time_us = 18.5, + .sr_exit_time_us = 30.0, + .sr_enter_plus_exit_time_us = 32.0, .valid = true, }, { .wm_inst = WM_C, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 16.5, - .sr_enter_plus_exit_time_us = 18.5, + .sr_exit_time_us = 30.0, + .sr_enter_plus_exit_time_us = 32.0, .valid = true, }, { .wm_inst = WM_D, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 16.5, - .sr_enter_plus_exit_time_us = 18.5, + .sr_exit_time_us = 30.0, + .sr_enter_plus_exit_time_us = 32.0, .valid = true, }, } @@ -726,6 +722,7 @@ void dcn314_clk_mgr_construct( struct dccg *dccg) { struct dcn314_smu_dpm_clks smu_dpm_clks = { 0 }; + struct clk_log_info log_info = {0}; clk_mgr->base.base.ctx = ctx; clk_mgr->base.base.funcs = &dcn314_funcs; @@ -765,35 +762,27 @@ void dcn314_clk_mgr_construct( ASSERT(smu_dpm_clks.dpm_clks); - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { - clk_mgr->base.base.funcs = &dcn3_fpga_funcs; - } else { - struct clk_log_info log_info = {0}; - - clk_mgr->base.smu_ver = dcn314_smu_get_smu_version(&clk_mgr->base); + clk_mgr->base.smu_ver = dcn314_smu_get_smu_version(&clk_mgr->base); - if (clk_mgr->base.smu_ver) - clk_mgr->base.smu_present = true; + if (clk_mgr->base.smu_ver) + clk_mgr->base.smu_present = true; - /* TODO: Check we get what we expect during bringup */ - clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); + /* TODO: Check we get what we expect during bringup */ + clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); - if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) - dcn314_bw_params.wm_table = lpddr5_wm_table; - else - dcn314_bw_params.wm_table = ddr5_wm_table; + if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) + dcn314_bw_params.wm_table = lpddr5_wm_table; + else + dcn314_bw_params.wm_table = ddr5_wm_table; - /* Saved clocks configured at boot for debug purposes */ - dcn314_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, - &clk_mgr->base.base, &log_info); - - } + /* Saved clocks configured at boot for debug purposes */ + dcn314_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, + &clk_mgr->base.base, &log_info); clk_mgr->base.base.dprefclk_khz = 600000; clk_mgr->base.base.clks.ref_dtbclk_khz = 600000; dce_clock_read_ss_info(&clk_mgr->base); /*if bios enabled SS, driver needs to adjust dtb clock, only enable with correct bios*/ - //clk_mgr->base.dccg->ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(clk_mgr_internal, clk_mgr->base.base.dprefclk_khz); clk_mgr->base.base.bw_params = &dcn314_bw_params; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c index 0765334f0825..07baa10a8647 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c @@ -145,7 +145,7 @@ static int dcn314_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, if (result == VBIOSSMC_Result_Failed) { if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu && param == TABLE_WATERMARKS) - DC_LOG_WARNING("Watermarks table not configured properly by SMU"); + DC_LOG_DEBUG("Watermarks table not configured properly by SMU"); else if (msg_id == VBIOSSMC_MSG_SetHardMinDcfclkByFreq || msg_id == VBIOSSMC_MSG_SetMinDeepSleepDcfclk) DC_LOG_WARNING("DCFCLK_DPM is not enabled by BIOS"); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c index b737cbc468f5..b2c4f97afc8b 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c @@ -184,12 +184,10 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base, } // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow. - if (!IS_DIAG_DC(dc->ctx->dce_environment)) { - if (new_clocks->dppclk_khz < MIN_DPP_DISP_CLK) - new_clocks->dppclk_khz = MIN_DPP_DISP_CLK; - if (new_clocks->dispclk_khz < MIN_DPP_DISP_CLK) - new_clocks->dispclk_khz = MIN_DPP_DISP_CLK; - } + if (new_clocks->dppclk_khz < MIN_DPP_DISP_CLK) + new_clocks->dppclk_khz = MIN_DPP_DISP_CLK; + if (new_clocks->dispclk_khz < MIN_DPP_DISP_CLK) + new_clocks->dispclk_khz = MIN_DPP_DISP_CLK; if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) @@ -234,9 +232,7 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base, cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz; cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz; - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static void dcn315_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, @@ -602,6 +598,7 @@ void dcn315_clk_mgr_construct( struct dccg *dccg) { struct dcn315_smu_dpm_clks smu_dpm_clks = { 0 }; + struct clk_log_info log_info = {0}; clk_mgr->base.base.ctx = ctx; clk_mgr->base.base.funcs = &dcn315_funcs; @@ -641,26 +638,19 @@ void dcn315_clk_mgr_construct( ASSERT(smu_dpm_clks.dpm_clks); - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { - clk_mgr->base.base.funcs = &dcn3_fpga_funcs; - } else { - struct clk_log_info log_info = {0}; - - clk_mgr->base.smu_ver = dcn315_smu_get_smu_version(&clk_mgr->base); + clk_mgr->base.smu_ver = dcn315_smu_get_smu_version(&clk_mgr->base); - if (clk_mgr->base.smu_ver > 0) - clk_mgr->base.smu_present = true; - - if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { - dcn315_bw_params.wm_table = lpddr5_wm_table; - } else { - dcn315_bw_params.wm_table = ddr5_wm_table; - } - /* Saved clocks configured at boot for debug purposes */ - dcn315_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, - &clk_mgr->base.base, &log_info); + if (clk_mgr->base.smu_ver > 0) + clk_mgr->base.smu_present = true; + if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { + dcn315_bw_params.wm_table = lpddr5_wm_table; + } else { + dcn315_bw_params.wm_table = ddr5_wm_table; } + /* Saved clocks configured at boot for debug purposes */ + dcn315_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, + &clk_mgr->base.base, &log_info); clk_mgr->base.base.dprefclk_khz = 600000; clk_mgr->base.base.dprefclk_khz = dcn315_smu_get_dpref_clk(&clk_mgr->base); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c index 925d6e13620e..1042cf1a3ab0 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c @@ -32,29 +32,28 @@ #define MAX_INSTANCE 6 #define MAX_SEGMENT 6 +#define SMU_REGISTER_WRITE_RETRY_COUNT 5 -struct IP_BASE_INSTANCE -{ +struct IP_BASE_INSTANCE { unsigned int segment[MAX_SEGMENT]; }; -struct IP_BASE -{ +struct IP_BASE { struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; }; static const struct IP_BASE MP0_BASE = { { { { 0x00016000, 0x00DC0000, 0x00E00000, 0x00E40000, 0x0243FC00, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } } } }; + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0x0241B000, 0x04040000 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } } } }; + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; #define regBIF_BX_PF2_RSMU_INDEX 0x0000 #define regBIF_BX_PF2_RSMU_INDEX_BASE_IDX 1 @@ -134,6 +133,8 @@ static int dcn315_smu_send_msg_with_param( unsigned int msg_id, unsigned int param) { uint32_t result; + uint32_t i = 0; + uint32_t read_back_data; result = dcn315_smu_wait_for_response(clk_mgr, 10, 200000); @@ -150,10 +151,19 @@ static int dcn315_smu_send_msg_with_param( /* Set the parameter register for the SMU message, unit is Mhz */ REG_WRITE(MP1_SMN_C2PMSG_37, param); - /* Trigger the message transaction by writing the message ID */ - generic_write_indirect_reg(CTX, - REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA), - mmMP1_C2PMSG_3, msg_id); + for (i = 0; i < SMU_REGISTER_WRITE_RETRY_COUNT; i++) { + /* Trigger the message transaction by writing the message ID */ + generic_write_indirect_reg(CTX, + REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA), + mmMP1_C2PMSG_3, msg_id); + read_back_data = generic_read_indirect_reg(CTX, + REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA), + mmMP1_C2PMSG_3); + if (read_back_data == msg_id) + break; + udelay(2); + smu_print("SMU msg id write fail %x times. \n", i + 1); + } result = dcn315_smu_wait_for_response(clk_mgr, 10, 200000); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c index 93db4dbee713..09151cc56ce4 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c @@ -45,24 +45,14 @@ #define MAX_INSTANCE 7 #define MAX_SEGMENT 6 -struct IP_BASE_INSTANCE -{ +struct IP_BASE_INSTANCE { unsigned int segment[MAX_SEGMENT]; }; -struct IP_BASE -{ +struct IP_BASE { struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; }; -static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0, 0 } }, - { { 0x00016E00, 0x02401C00, 0, 0, 0, 0 } }, - { { 0x00017000, 0x02402000, 0, 0, 0, 0 } }, - { { 0x00017200, 0x02402400, 0, 0, 0, 0 } }, - { { 0x0001B000, 0x0242D800, 0, 0, 0, 0 } }, - { { 0x0001B200, 0x0242DC00, 0, 0, 0, 0 } }, - { { 0x0001B400, 0x0242E000, 0, 0, 0, 0 } } } }; - #define regCLK1_CLK_PLL_REQ 0x0237 #define regCLK1_CLK_PLL_REQ_BASE_IDX 0 @@ -73,9 +63,6 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0, #define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L #define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L -#define REG(reg_name) \ - (CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name) - #define TO_CLK_MGR_DCN316(clk_mgr)\ container_of(clk_mgr, struct clk_mgr_dcn316, base) @@ -207,12 +194,10 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base, } // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow. - if (!IS_DIAG_DC(dc->ctx->dce_environment)) { - if (new_clocks->dppclk_khz < 100000) - new_clocks->dppclk_khz = 100000; - if (new_clocks->dispclk_khz < 100000) - new_clocks->dispclk_khz = 100000; - } + if (new_clocks->dppclk_khz < 100000) + new_clocks->dppclk_khz = 100000; + if (new_clocks->dispclk_khz < 100000) + new_clocks->dispclk_khz = 100000; if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) @@ -254,9 +239,7 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base, cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz; cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz; - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static void dcn316_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, @@ -581,36 +564,6 @@ static struct clk_mgr_funcs dcn316_funcs = { }; extern struct clk_mgr_funcs dcn3_fpga_funcs; -static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr) -{ - /* get FbMult value */ - struct fixed31_32 pll_req; - unsigned int fbmult_frac_val = 0; - unsigned int fbmult_int_val = 0; - - /* - * Register value of fbmult is in 8.16 format, we are converting to 31.32 - * to leverage the fix point operations available in driver - */ - - REG_GET(CLK1_CLK_PLL_REQ, FbMult_frac, &fbmult_frac_val); /* 16 bit fractional part*/ - REG_GET(CLK1_CLK_PLL_REQ, FbMult_int, &fbmult_int_val); /* 8 bit integer part */ - - pll_req = dc_fixpt_from_int(fbmult_int_val); - - /* - * since fractional part is only 16 bit in register definition but is 32 bit - * in our fix point definiton, need to shift left by 16 to obtain correct value - */ - pll_req.value |= fbmult_frac_val << 16; - - /* multiply by REFCLK period */ - pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz); - - /* integer part is now VCO frequency in kHz */ - return dc_fixpt_floor(pll_req); -} - void dcn316_clk_mgr_construct( struct dc_context *ctx, struct clk_mgr_dcn316 *clk_mgr, @@ -618,6 +571,7 @@ void dcn316_clk_mgr_construct( struct dccg *dccg) { struct dcn316_smu_dpm_clks smu_dpm_clks = { 0 }; + struct clk_log_info log_info = {0}; clk_mgr->base.base.ctx = ctx; clk_mgr->base.base.funcs = &dcn316_funcs; @@ -657,35 +611,28 @@ void dcn316_clk_mgr_construct( ASSERT(smu_dpm_clks.dpm_clks); - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { - clk_mgr->base.base.funcs = &dcn3_fpga_funcs; - clk_mgr->base.base.dentist_vco_freq_khz = 2500000; - } else { - struct clk_log_info log_info = {0}; - - clk_mgr->base.smu_ver = dcn316_smu_get_smu_version(&clk_mgr->base); - - if (clk_mgr->base.smu_ver > 0) - clk_mgr->base.smu_present = true; + clk_mgr->base.smu_ver = dcn316_smu_get_smu_version(&clk_mgr->base); - // Skip this for now as it did not work on DCN315, renable during bring up - clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); + if (clk_mgr->base.smu_ver > 0) + clk_mgr->base.smu_present = true; - /* in case we don't get a value from the register, use default */ - if (clk_mgr->base.base.dentist_vco_freq_khz == 0) - clk_mgr->base.base.dentist_vco_freq_khz = 2500000; /* 2400MHz */ + // Skip this for now as it did not work on DCN315, renable during bring up + //clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); + clk_mgr->base.base.dentist_vco_freq_khz = 2500000; + /* in case we don't get a value from the register, use default */ + if (clk_mgr->base.base.dentist_vco_freq_khz == 0) + clk_mgr->base.base.dentist_vco_freq_khz = 2500000; /* 2400MHz */ - if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { - dcn316_bw_params.wm_table = lpddr5_wm_table; - } else { - dcn316_bw_params.wm_table = ddr4_wm_table; - } - /* Saved clocks configured at boot for debug purposes */ - dcn316_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, - &clk_mgr->base.base, &log_info); + if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { + dcn316_bw_params.wm_table = lpddr5_wm_table; + } else { + dcn316_bw_params.wm_table = ddr4_wm_table; } + /* Saved clocks configured at boot for debug purposes */ + dcn316_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, + &clk_mgr->base.base, &log_info); clk_mgr->base.base.dprefclk_khz = 600000; clk_mgr->base.base.dprefclk_khz = dcn316_smu_get_dpref_clk(&clk_mgr->base); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c index 457a9254ae1c..3ed19197a755 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c @@ -34,23 +34,21 @@ #define MAX_INSTANCE 7 #define MAX_SEGMENT 6 -struct IP_BASE_INSTANCE -{ +struct IP_BASE_INSTANCE { unsigned int segment[MAX_SEGMENT]; }; -struct IP_BASE -{ +struct IP_BASE { struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; }; static const struct IP_BASE MP0_BASE = { { { { 0x00016000, 0x00DC0000, 0x00E00000, 0x00E40000, 0x0243FC00, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } } } }; + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; #define REG(reg_name) \ (MP0_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index 8d9444db092a..e9345f6554db 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -182,23 +182,32 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base) dcn32_init_single_clock(clk_mgr, PPCLK_DCFCLK, &clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz, &num_entries_per_clk->num_dcfclk_levels); + clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DCFCLK); /* SOCCLK */ dcn32_init_single_clock(clk_mgr, PPCLK_SOCCLK, &clk_mgr_base->bw_params->clk_table.entries[0].socclk_mhz, &num_entries_per_clk->num_socclk_levels); + clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_SOCCLK); /* DTBCLK */ - if (!clk_mgr->base.ctx->dc->debug.disable_dtb_ref_clk_switch) + if (!clk_mgr->base.ctx->dc->debug.disable_dtb_ref_clk_switch) { dcn32_init_single_clock(clk_mgr, PPCLK_DTBCLK, &clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz, &num_entries_per_clk->num_dtbclk_levels); + clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz = + dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DTBCLK); + } /* DISPCLK */ dcn32_init_single_clock(clk_mgr, PPCLK_DISPCLK, &clk_mgr_base->bw_params->clk_table.entries[0].dispclk_mhz, &num_entries_per_clk->num_dispclk_levels); num_levels = num_entries_per_clk->num_dispclk_levels; + clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DISPCLK); + //HW recommends limit of 1950 MHz in display clock for all DCN3.2.x + if (clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz > 1950) + clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = 1950; if (num_entries_per_clk->num_dcfclk_levels && num_entries_per_clk->num_dtbclk_levels && @@ -233,6 +242,32 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base) DC_FP_END(); } +static void dcn32_update_clocks_update_dtb_dto(struct clk_mgr_internal *clk_mgr, + struct dc_state *context, + int ref_dtbclk_khz) +{ + struct dccg *dccg = clk_mgr->dccg; + uint32_t tg_mask = 0; + int i; + + for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + struct dtbclk_dto_params dto_params = {0}; + + /* use mask to program DTO once per tg */ + if (pipe_ctx->stream_res.tg && + !(tg_mask & (1 << pipe_ctx->stream_res.tg->inst))) { + tg_mask |= (1 << pipe_ctx->stream_res.tg->inst); + + dto_params.otg_inst = pipe_ctx->stream_res.tg->inst; + dto_params.ref_dtbclk_khz = ref_dtbclk_khz; + + dccg->funcs->set_dtbclk_dto(clk_mgr->dccg, &dto_params); + //dccg->funcs->set_audio_dtbclk_dto(clk_mgr->dccg, &dto_params); + } + } +} + /* Since DPPCLK request to PMFW needs to be exact (due to DPP DTO programming), * update DPPCLK to be the exact frequency that will be set after the DPPCLK * divider is updated. This will prevent rounding issues that could cause DPP @@ -262,7 +297,7 @@ void dcn32_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr, clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz; for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { - int dpp_inst, dppclk_khz, prev_dppclk_khz; + int dpp_inst = 0, dppclk_khz, prev_dppclk_khz; dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz; @@ -320,7 +355,7 @@ static void dcn32_update_clocks_update_dentist( int32_t N; int32_t j; - if (!pipe_ctx->stream) + if (!resource_is_pipe_type(pipe_ctx, OTG_MASTER)) continue; /* Virtual encoders don't have this function */ if (!stream_enc->funcs->get_fifo_cal_average_level) @@ -366,7 +401,7 @@ static void dcn32_update_clocks_update_dentist( int32_t N; int32_t j; - if (!pipe_ctx->stream) + if (!resource_is_pipe_type(pipe_ctx, OTG_MASTER)) continue; /* Virtual encoders don't have this function */ if (!stream_enc->funcs->get_fifo_cal_average_level) @@ -433,10 +468,6 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, bool update_uclk = false, update_fclk = false; bool p_state_change_support; bool fclk_p_state_change_support; - int total_plane_count; - - if (dc->work_arounds.skip_clock_update) - return; if (clk_mgr_base->clks.dispclk_khz == 0 || (dc->debug.force_clock_mode & 0x1)) { @@ -462,10 +493,10 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, clk_mgr_base->clks.fclk_prev_p_state_change_support = clk_mgr_base->clks.fclk_p_state_change_support; - total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context); - fclk_p_state_change_support = new_clocks->fclk_p_state_change_support || (total_plane_count == 0); + fclk_p_state_change_support = new_clocks->fclk_p_state_change_support; - if (should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_p_state_change_support)) { + if (should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_p_state_change_support) && + !dc->work_arounds.clock_update_disable_mask.fclk) { clk_mgr_base->clks.fclk_p_state_change_support = fclk_p_state_change_support; /* To enable FCLK P-state switching, send FCLK_PSTATE_SUPPORTED message to PMFW */ @@ -479,12 +510,14 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ? new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000); - if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) { + if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz) && + !dc->work_arounds.clock_update_disable_mask.dcfclk) { clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz; dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DCFCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_khz)); } - if (should_set_clock(safe_to_lower, new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) { + if (should_set_clock(safe_to_lower, new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz) && + !dc->work_arounds.clock_update_disable_mask.dcfclk_ds) { clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz; dcn30_smu_set_min_deep_sleep_dcef_clk(clk_mgr, khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_deep_sleep_khz)); } @@ -502,36 +535,53 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, dcn32_smu_send_cab_for_uclk_message(clk_mgr, clk_mgr_base->clks.num_ways); } - - p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0); - if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) { + p_state_change_support = new_clocks->p_state_change_support; + if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support) && + !dc->work_arounds.clock_update_disable_mask.uclk) { clk_mgr_base->clks.p_state_change_support = p_state_change_support; /* to disable P-State switching, set UCLK min = max */ - if (!clk_mgr_base->clks.p_state_change_support) - dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, - clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1].memclk_mhz); + if (!clk_mgr_base->clks.p_state_change_support) { + if (dc->clk_mgr->dc_mode_softmax_enabled) { + /* On DCN32x we will never have the functional UCLK min above the softmax + * since we calculate mode support based on softmax being the max UCLK + * frequency. + */ + dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, + dc->clk_mgr->bw_params->dc_mode_softmax_memclk); + } else { + dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz); + } + } } + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) + dcn32_smu_wait_for_dmub_ack_mclk(clk_mgr, true); + else + dcn32_smu_wait_for_dmub_ack_mclk(clk_mgr, false); + /* Always update saved value, even if new value not set due to P-State switching unsupported. Also check safe_to_lower for FCLK */ if (safe_to_lower && (clk_mgr_base->clks.fclk_p_state_change_support != clk_mgr_base->clks.fclk_prev_p_state_change_support)) { update_fclk = true; } - if (clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21 && !clk_mgr_base->clks.fclk_p_state_change_support && update_fclk) { + if (clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21 && !clk_mgr_base->clks.fclk_p_state_change_support && update_fclk && + !dc->work_arounds.clock_update_disable_mask.fclk) { /* Handle code for sending a message to PMFW that FCLK P-state change is not supported */ dcn32_smu_send_fclk_pstate_message(clk_mgr, FCLK_PSTATE_NOTSUPPORTED); } /* Always update saved value, even if new value not set due to P-State switching unsupported */ - if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz)) { + if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz) && + !dc->work_arounds.clock_update_disable_mask.uclk) { clk_mgr_base->clks.dramclk_khz = new_clocks->dramclk_khz; update_uclk = true; } /* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */ if (clk_mgr_base->clks.p_state_change_support && - (update_uclk || !clk_mgr_base->clks.prev_p_state_change_support)) + (update_uclk || !clk_mgr_base->clks.prev_p_state_change_support) && + !dc->work_arounds.clock_update_disable_mask.uclk) dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz)); if (clk_mgr_base->clks.num_ways != new_clocks->num_ways && @@ -570,6 +620,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, /* DCCG requires KHz precision for DTBCLK */ clk_mgr_base->clks.ref_dtbclk_khz = dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DTBCLK, khz_to_mhz_ceil(new_clocks->ref_dtbclk_khz)); + dcn32_update_clocks_update_dtb_dto(clk_mgr, context, clk_mgr_base->clks.ref_dtbclk_khz); } if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) { @@ -756,7 +807,7 @@ static void dcn32_set_hard_min_memclk(struct clk_mgr *clk_mgr_base, bool current khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz)); else dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, - clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1].memclk_mhz); + clk_mgr_base->bw_params->max_memclk_mhz); } else { dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz); @@ -771,8 +822,7 @@ static void dcn32_set_hard_max_memclk(struct clk_mgr *clk_mgr_base) if (!clk_mgr->smu_present) return; - dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK, - clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1].memclk_mhz); + dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK, clk_mgr_base->bw_params->max_memclk_mhz); } /* Get current memclk states, update bounding box */ @@ -789,6 +839,8 @@ static void dcn32_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base) dcn32_init_single_clock(clk_mgr, PPCLK_UCLK, &clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz, &num_entries_per_clk->num_memclk_levels); + clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK); + clk_mgr_base->bw_params->dc_mode_softmax_memclk = clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz; /* memclk must have at least one level */ num_entries_per_clk->num_memclk_levels = num_entries_per_clk->num_memclk_levels ? num_entries_per_clk->num_memclk_levels : 1; @@ -796,13 +848,15 @@ static void dcn32_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base) dcn32_init_single_clock(clk_mgr, PPCLK_FCLK, &clk_mgr_base->bw_params->clk_table.entries[0].fclk_mhz, &num_entries_per_clk->num_fclk_levels); + clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_FCLK); if (num_entries_per_clk->num_memclk_levels >= num_entries_per_clk->num_fclk_levels) { num_levels = num_entries_per_clk->num_memclk_levels; } else { num_levels = num_entries_per_clk->num_fclk_levels; } - + clk_mgr_base->bw_params->max_memclk_mhz = + clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_memclk_levels - 1].memclk_mhz; clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1; if (clk_mgr->dpm_present && !num_levels) @@ -855,6 +909,25 @@ static bool dcn32_is_smu_present(struct clk_mgr *clk_mgr_base) return clk_mgr->smu_present; } +static void dcn32_set_max_memclk(struct clk_mgr *clk_mgr_base, unsigned int memclk_mhz) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + if (!clk_mgr->smu_present) + return; + + dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK, memclk_mhz); +} + +static void dcn32_set_min_memclk(struct clk_mgr *clk_mgr_base, unsigned int memclk_mhz) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + if (!clk_mgr->smu_present) + return; + + dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, memclk_mhz); +} static struct clk_mgr_funcs dcn32_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, @@ -865,6 +938,8 @@ static struct clk_mgr_funcs dcn32_funcs = { .notify_wm_ranges = dcn32_notify_wm_ranges, .set_hard_min_memclk = dcn32_set_hard_min_memclk, .set_hard_max_memclk = dcn32_set_hard_max_memclk, + .set_max_memclk = dcn32_set_max_memclk, + .set_min_memclk = dcn32_set_min_memclk, .get_memclk_states_from_smu = dcn32_get_memclk_states_from_smu, .are_clock_states_equal = dcn32_are_clock_states_equal, .enable_pme_wa = dcn32_enable_pme_wa, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c index fb524fe4ab26..700ce42036d7 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c @@ -139,3 +139,10 @@ unsigned int dcn32_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, ui return response; } + +void dcn32_smu_wait_for_dmub_ack_mclk(struct clk_mgr_internal *clk_mgr, bool enable) +{ + smu_print("PMFW to wait for DMCUB ack for MCLK : %d\n", enable); + + dcn32_smu_send_msg_with_param(clk_mgr, 0x14, enable ? 1 : 0, NULL); +} diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h index a68038a41972..a34c258c19dc 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h @@ -43,5 +43,6 @@ void dcn32_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr); void dcn32_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsigned int num_ways); void dcn32_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr); unsigned int dcn32_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint16_t freq_mhz); +void dcn32_smu_wait_for_dmub_ack_mclk(struct clk_mgr_internal *clk_mgr, bool enable); #endif /* __DCN32_CLK_MGR_SMU_MSG_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 7cde67b7f0c3..d08e60dff46d 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -515,8 +515,7 @@ dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv, cmd.secure_display.roi_info.y_end = rect->y + rect->height; } - dc_dmub_srv_cmd_queue(dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dmub_srv); + dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); } static inline void @@ -587,18 +586,15 @@ dc_stream_forward_crc_window(struct dc_stream_state *stream, bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream, struct crc_params *crc_window, bool enable, bool continuous) { - int i; struct pipe_ctx *pipe; struct crc_params param; struct timing_generator *tg; - for (i = 0; i < MAX_PIPES; i++) { - pipe = &dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe) - break; - } + pipe = resource_get_otg_master_for_stream( + &dc->current_state->res_ctx, stream); + /* Stream not found */ - if (i == MAX_PIPES) + if (pipe == NULL) return false; /* By default, capture the full frame */ @@ -858,7 +854,6 @@ static bool dc_construct_ctx(struct dc *dc, const struct dc_init_data *init_params) { struct dc_context *dc_ctx; - enum dce_version dc_version = DCE_VERSION_UNKNOWN; dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL); if (!dc_ctx) @@ -876,8 +871,7 @@ static bool dc_construct_ctx(struct dc *dc, /* Create logger */ - dc_version = resource_parse_asic_id(init_params->asic_id); - dc_ctx->dce_version = dc_version; + dc_ctx->dce_version = resource_parse_asic_id(init_params->asic_id); dc_ctx->perf_trace = dc_perf_trace_create(); if (!dc_ctx->perf_trace) { @@ -1050,8 +1044,10 @@ static void disable_all_writeback_pipes_for_stream( stream->writeback_info[i].wb_enabled = false; } -static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context, - struct dc_stream_state *stream, bool lock) +static void apply_ctx_interdependent_lock(struct dc *dc, + struct dc_state *context, + struct dc_stream_state *stream, + bool lock) { int i; @@ -1065,7 +1061,7 @@ static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *contex // Copied conditions that were previously in dce110_apply_ctx_for_surface if (stream == pipe_ctx->stream) { - if (!pipe_ctx->top_pipe && + if (resource_is_pipe_type(pipe_ctx, OPP_HEAD) && (pipe_ctx->plane_state || old_pipe_ctx->plane_state)) dc->hwss.pipe_control_lock(dc, pipe_ctx, lock); } @@ -1120,6 +1116,33 @@ static void phantom_pipe_blank( hws->funcs.wait_for_blank_complete(opp); } +static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) +{ + if (dc->ctx->dce_version >= DCN_VERSION_1_0) { + memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color)); + + if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) + get_hdr_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); + else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) + get_surface_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); + else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE) + get_surface_tile_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); + else { + if (dc->ctx->dce_version < DCN_VERSION_2_0) + color_space_to_black_color( + dc, pipe_ctx->stream->output_color_space, &(pipe_ctx->visual_confirm_color)); + } + if (dc->ctx->dce_version >= DCN_VERSION_2_0) { + if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) + get_mpctree_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); + else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP) + get_subvp_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color)); + else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) + get_mclk_switch_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color)); + } + } +} + static void disable_dangling_plane(struct dc *dc, struct dc_state *context) { int i, j; @@ -1190,6 +1213,9 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) dc_rem_all_planes_for_stream(dc, old_stream, dangling_context); disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); + if (pipe->stream && pipe->plane_state) + dc_update_viusal_confirm_color(dc, context, pipe); + if (dc->hwss.apply_ctx_for_surface) { apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true); dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context); @@ -1236,6 +1262,9 @@ static void disable_vbios_mode_if_required( if (stream == NULL) continue; + if (stream->apply_seamless_boot_optimization) + continue; + // only looking for first odm pipe if (pipe->prev_odm_pipe) continue; @@ -1602,6 +1631,9 @@ bool dc_validate_boot_timing(const struct dc *dc, return false; } + if (dc->debug.force_odm_combine) + return false; + /* Check for enabled DIG to identify enabled display */ if (!link->link_enc->funcs->is_dig_enabled(link->link_enc)) return false; @@ -1893,6 +1925,14 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c dc_trigger_sync(dc, context); + /* Full update should unconditionally be triggered when dc_commit_state_no_check is called */ + for (i = 0; i < context->stream_count; i++) { + uint32_t prev_dsc_changed = context->streams[i]->update_flags.bits.dsc_changed; + + context->streams[i]->update_flags.raw = 0xFFFFFFFF; + context->streams[i]->update_flags.bits.dsc_changed = prev_dsc_changed; + } + /* Program all planes within new context*/ if (dc->hwss.program_front_end_for_ctx) { dc->hwss.interdependent_update_lock(dc, context, true); @@ -1971,6 +2011,11 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c for (i = 0; i < context->stream_count; i++) context->streams[i]->mode_changed = false; + /* Clear update flags that were set earlier to avoid redundant programming */ + for (i = 0; i < context->stream_count; i++) { + context->streams[i]->update_flags.raw = 0x0; + } + old_state = dc->current_state; dc->current_state = context; @@ -2031,12 +2076,12 @@ enum dc_status dc_commit_streams(struct dc *dc, } } - /* Check for case where we are going from odm 2:1 to max - * pipe scenario. For these cases, we will call - * commit_minimal_transition_state() to exit out of odm 2:1 - * first before processing new streams + /* ODM Combine 2:1 power optimization is only applied for single stream + * scenario, it uses extra pipes than needed to reduce power consumption + * We need to switch off this feature to make room for new streams. */ - if (stream_count == dc->res_pool->pipe_count) { + if (stream_count > dc->current_state->stream_count && + dc->current_state->stream_count == 1) { for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe = &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->next_odm_pipe) @@ -2450,9 +2495,7 @@ static enum surface_update_type get_scaling_info_update_type( if (!u->scaling_info) return UPDATE_TYPE_FAST; - if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width - || u->scaling_info->clip_rect.height != u->surface->clip_rect.height - || u->scaling_info->dst_rect.width != u->surface->dst_rect.width + if (u->scaling_info->dst_rect.width != u->surface->dst_rect.width || u->scaling_info->dst_rect.height != u->surface->dst_rect.height || u->scaling_info->scaling_quality.integer_scaling != u->surface->scaling_quality.integer_scaling @@ -2504,9 +2547,6 @@ static enum surface_update_type det_surface_update(const struct dc *dc, enum surface_update_type overall_type = UPDATE_TYPE_FAST; union surface_update_flags *update_flags = &u->surface->update_flags; - if (u->flip_addr) - update_flags->bits.addr_update = 1; - if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) { update_flags->raw = 0xFFFFFFFF; return UPDATE_TYPE_FULL; @@ -2565,15 +2605,19 @@ static enum surface_update_type det_surface_update(const struct dc *dc, elevate_update_type(&overall_type, type); } - if (update_flags->bits.input_csc_change - || update_flags->bits.coeff_reduction_change - || update_flags->bits.lut_3d - || update_flags->bits.gamma_change - || update_flags->bits.gamut_remap_change) { + if (update_flags->bits.lut_3d) { type = UPDATE_TYPE_FULL; elevate_update_type(&overall_type, type); } + if (dc->debug.enable_legacy_fast_update && + (update_flags->bits.gamma_change || + update_flags->bits.gamut_remap_change || + update_flags->bits.input_csc_change || + update_flags->bits.coeff_reduction_change)) { + type = UPDATE_TYPE_FULL; + elevate_update_type(&overall_type, type); + } return overall_type; } @@ -2606,7 +2650,7 @@ static enum surface_update_type check_update_surfaces_for_stream( stream_update->integer_scaling_update) su_flags->bits.scaling = 1; - if (stream_update->out_transfer_func) + if (dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func) su_flags->bits.out_tf = 1; if (stream_update->abm_level) @@ -2626,14 +2670,23 @@ static enum surface_update_type check_update_surfaces_for_stream( if (stream_update->mst_bw_update) su_flags->bits.mst_bw = 1; - if (stream_update->crtc_timing_adjust && dc_extended_blank_supported(dc)) - su_flags->bits.crtc_timing_adjust = 1; + + if (stream_update->stream && stream_update->stream->freesync_on_desktop && + (stream_update->vrr_infopacket || stream_update->allow_freesync || + stream_update->vrr_active_variable || stream_update->vrr_active_fixed)) + su_flags->bits.fams_changed = 1; if (su_flags->raw != 0) overall_type = UPDATE_TYPE_FULL; if (stream_update->output_csc_transform || stream_update->output_color_space) su_flags->bits.out_csc = 1; + + /* Output transfer function changes do not require bandwidth recalculation, + * so don't trigger a full update + */ + if (!dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func) + su_flags->bits.out_tf = 1; } for (i = 0 ; i < surface_count; i++) { @@ -2646,96 +2699,6 @@ static enum surface_update_type check_update_surfaces_for_stream( return overall_type; } -static bool dc_check_is_fullscreen_video(struct rect src, struct rect clip_rect) -{ - int view_height, view_width, clip_x, clip_y, clip_width, clip_height; - - view_height = src.height; - view_width = src.width; - - clip_x = clip_rect.x; - clip_y = clip_rect.y; - - clip_width = clip_rect.width; - clip_height = clip_rect.height; - - /* check for centered video accounting for off by 1 scaling truncation */ - if ((view_height - clip_y - clip_height <= clip_y + 1) && - (view_width - clip_x - clip_width <= clip_x + 1) && - (view_height - clip_y - clip_height >= clip_y - 1) && - (view_width - clip_x - clip_width >= clip_x - 1)) { - - /* when OS scales up/down to letter box, it may end up - * with few blank pixels on the border due to truncating. - * Add offset margin to account for this - */ - if (clip_x <= 4 || clip_y <= 4) - return true; - } - - return false; -} - -static enum surface_update_type check_boundary_crossing_for_windowed_mpo_with_odm(struct dc *dc, - struct dc_surface_update *srf_updates, int surface_count, - enum surface_update_type update_type) -{ - enum surface_update_type new_update_type = update_type; - int i, j; - struct pipe_ctx *pipe = NULL; - struct dc_stream_state *stream; - - /* Check that we are in windowed MPO with ODM - * - look for MPO pipe by scanning pipes for first pipe matching - * surface that has moved ( position change ) - * - MPO pipe will have top pipe - * - check that top pipe has ODM pointer - */ - if ((surface_count > 1) && dc->config.enable_windowed_mpo_odm) { - for (i = 0; i < surface_count; i++) { - if (srf_updates[i].surface && srf_updates[i].scaling_info - && srf_updates[i].surface->update_flags.bits.position_change) { - - for (j = 0; j < dc->res_pool->pipe_count; j++) { - if (srf_updates[i].surface == dc->current_state->res_ctx.pipe_ctx[j].plane_state) { - pipe = &dc->current_state->res_ctx.pipe_ctx[j]; - stream = pipe->stream; - break; - } - } - - if (pipe && pipe->top_pipe && (get_num_odm_splits(pipe->top_pipe) > 0) && stream - && !dc_check_is_fullscreen_video(stream->src, srf_updates[i].scaling_info->clip_rect)) { - struct rect old_clip_rect, new_clip_rect; - bool old_clip_rect_left, old_clip_rect_right, old_clip_rect_middle; - bool new_clip_rect_left, new_clip_rect_right, new_clip_rect_middle; - - old_clip_rect = srf_updates[i].surface->clip_rect; - new_clip_rect = srf_updates[i].scaling_info->clip_rect; - - old_clip_rect_left = ((old_clip_rect.x + old_clip_rect.width) <= (stream->src.x + (stream->src.width/2))); - old_clip_rect_right = (old_clip_rect.x >= (stream->src.x + (stream->src.width/2))); - old_clip_rect_middle = !old_clip_rect_left && !old_clip_rect_right; - - new_clip_rect_left = ((new_clip_rect.x + new_clip_rect.width) <= (stream->src.x + (stream->src.width/2))); - new_clip_rect_right = (new_clip_rect.x >= (stream->src.x + (stream->src.width/2))); - new_clip_rect_middle = !new_clip_rect_left && !new_clip_rect_right; - - if (old_clip_rect_left && new_clip_rect_middle) - new_update_type = UPDATE_TYPE_FULL; - else if (old_clip_rect_middle && new_clip_rect_right) - new_update_type = UPDATE_TYPE_FULL; - else if (old_clip_rect_right && new_clip_rect_middle) - new_update_type = UPDATE_TYPE_FULL; - else if (old_clip_rect_middle && new_clip_rect_left) - new_update_type = UPDATE_TYPE_FULL; - } - } - } - } - return new_update_type; -} - /* * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full) * @@ -2767,10 +2730,6 @@ enum surface_update_type dc_check_update_surfaces_for_stream( updates[i].surface->update_flags.raw = 0xFFFFFFFF; } - if (type == UPDATE_TYPE_MED) - type = check_boundary_crossing_for_windowed_mpo_with_odm(dc, - updates, surface_count, type); - if (type == UPDATE_TYPE_FAST) { // If there's an available clock comparator, we use that. if (dc->clk_mgr->funcs->are_clock_states_equal) { @@ -2986,6 +2945,9 @@ static void copy_stream_update_to_stream(struct dc *dc, if (update->vrr_active_variable) stream->vrr_active_variable = *update->vrr_active_variable; + if (update->vrr_active_fixed) + stream->vrr_active_fixed = *update->vrr_active_fixed; + if (update->crtc_timing_adjust) stream->adjust = *update->crtc_timing_adjust; @@ -3202,7 +3164,7 @@ static void commit_planes_do_stream_update(struct dc *dc, for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; - if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) { + if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && pipe_ctx->stream == stream) { if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt) dc->hwss.setup_periodic_interrupt(dc, pipe_ctx); @@ -3290,6 +3252,13 @@ static void commit_planes_do_stream_update(struct dc *dc, dc->hwss.prepare_bandwidth(dc, dc->current_state); dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx); } + } else if (pipe_ctx->stream->link->wa_flags.blank_stream_on_ocs_change && stream_update->output_color_space + && !stream->dpms_off && dc_is_dp_signal(pipe_ctx->stream->signal)) { + /* + * Workaround for firmware issue in some receivers where they don't pick up + * correct output color space unless DP link is disabled/re-enabled + */ + dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx); } if (stream_update->abm_level && pipe_ctx->stream_res.abm) { @@ -3320,6 +3289,9 @@ static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_s && stream->ctx->dce_version >= DCN_VERSION_3_1) return true; + if (stream->link->replay_settings.config.replay_supported) + return true; + return false; } @@ -3330,7 +3302,6 @@ void dc_dmub_update_dirty_rect(struct dc *dc, struct dc_state *context) { union dmub_rb_cmd cmd; - struct dc_context *dc_ctx = dc->ctx; struct dmub_cmd_update_dirty_rect_data *update_dirty_rect; unsigned int i, j; unsigned int panel_inst = 0; @@ -3371,13 +3342,99 @@ void dc_dmub_update_dirty_rect(struct dc *dc, update_dirty_rect->panel_inst = panel_inst; update_dirty_rect->pipe_idx = j; - dc_dmub_srv_cmd_queue(dc_ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc_ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); } } } -static void commit_planes_for_stream(struct dc *dc, +static void build_dmub_update_dirty_rect( + struct dc *dc, + int surface_count, + struct dc_stream_state *stream, + struct dc_surface_update *srf_updates, + struct dc_state *context, + struct dc_dmub_cmd dc_dmub_cmd[], + unsigned int *dmub_cmd_count) +{ + union dmub_rb_cmd cmd; + struct dmub_cmd_update_dirty_rect_data *update_dirty_rect; + unsigned int i, j; + unsigned int panel_inst = 0; + + if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream)) + return; + + if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst)) + return; + + memset(&cmd, 0x0, sizeof(cmd)); + cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT; + cmd.update_dirty_rect.header.sub_type = 0; + cmd.update_dirty_rect.header.payload_bytes = + sizeof(cmd.update_dirty_rect) - + sizeof(cmd.update_dirty_rect.header); + update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data; + for (i = 0; i < surface_count; i++) { + struct dc_plane_state *plane_state = srf_updates[i].surface; + const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr; + + if (!srf_updates[i].surface || !flip_addr) + continue; + /* Do not send in immediate flip mode */ + if (srf_updates[i].surface->flip_immediate) + continue; + update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; + update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count; + memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects, + sizeof(flip_addr->dirty_rects)); + for (j = 0; j < dc->res_pool->pipe_count; j++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; + + if (pipe_ctx->stream != stream) + continue; + if (pipe_ctx->plane_state != plane_state) + continue; + update_dirty_rect->panel_inst = panel_inst; + update_dirty_rect->pipe_idx = j; + dc_dmub_cmd[*dmub_cmd_count].dmub_cmd = cmd; + dc_dmub_cmd[*dmub_cmd_count].wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; + (*dmub_cmd_count)++; + } + } +} + + +/** + * build_dmub_cmd_list() - Build an array of DMCUB commands to be sent to DMCUB + * + * @dc: Current DC state + * @srf_updates: Array of surface updates + * @surface_count: Number of surfaces that have an updated + * @stream: Corresponding stream to be updated in the current flip + * @context: New DC state to be programmed + * + * @dc_dmub_cmd: Array of DMCUB commands to be sent to DMCUB + * @dmub_cmd_count: Count indicating the number of DMCUB commands in dc_dmub_cmd array + * + * This function builds an array of DMCUB commands to be sent to DMCUB. This function is required + * to build an array of commands and have them sent while the OTG lock is acquired. + * + * Return: void + */ +static void build_dmub_cmd_list(struct dc *dc, + struct dc_surface_update *srf_updates, + int surface_count, + struct dc_stream_state *stream, + struct dc_state *context, + struct dc_dmub_cmd dc_dmub_cmd[], + unsigned int *dmub_cmd_count) +{ + // Initialize cmd count to 0 + *dmub_cmd_count = 0; + build_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context, dc_dmub_cmd, dmub_cmd_count); +} + +static void commit_planes_for_stream_fast(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_state *stream, @@ -3387,47 +3444,126 @@ static void commit_planes_for_stream(struct dc *dc, { int i, j; struct pipe_ctx *top_pipe_to_program = NULL; - bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST); - bool subvp_prev_use = false; - bool subvp_curr_use = false; - - // Once we apply the new subvp context to hardware it won't be in the - // dc->current_state anymore, so we have to cache it before we apply - // the new SubVP context - subvp_prev_use = false; + dc_z10_restore(dc); + top_pipe_to_program = resource_get_otg_master_for_stream( + &context->res_ctx, + stream); - dc_z10_restore(dc); + if (dc->debug.visual_confirm) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - if (update_type == UPDATE_TYPE_FULL) { - /* wait for all double-buffer activity to clear on all pipes */ - int pipe_idx; + if (pipe->stream && pipe->plane_state) + dc_update_viusal_confirm_color(dc, context, pipe); + } + } - for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx]; + for (i = 0; i < surface_count; i++) { + struct dc_plane_state *plane_state = srf_updates[i].surface; + /*set logical flag for lock/unlock use*/ + for (j = 0; j < dc->res_pool->pipe_count; j++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; - if (!pipe_ctx->stream) + if (!pipe_ctx->plane_state) continue; - - if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear) - pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg); + if (should_update_pipe_for_plane(context, pipe_ctx, plane_state)) + continue; + pipe_ctx->plane_state->triplebuffer_flips = false; + if (update_type == UPDATE_TYPE_FAST && + dc->hwss.program_triplebuffer && + !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { + /*triple buffer for VUpdate only*/ + pipe_ctx->plane_state->triplebuffer_flips = true; + } } } - if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) { - /* Optimize seamless boot flag keeps clocks and watermarks high until - * first flip. After first flip, optimization is required to lower - * bandwidth. Important to note that it is expected UEFI will - * only light up a single display on POST, therefore we only expect - * one stream with seamless boot flag set. - */ - if (stream->apply_seamless_boot_optimization) { - stream->apply_seamless_boot_optimization = false; + build_dmub_cmd_list(dc, + srf_updates, + surface_count, + stream, + context, + context->dc_dmub_cmd, + &(context->dmub_cmd_count)); + hwss_build_fast_sequence(dc, + context->dc_dmub_cmd, + context->dmub_cmd_count, + context->block_sequence, + &(context->block_sequence_steps), + top_pipe_to_program); + hwss_execute_sequence(dc, + context->block_sequence, + context->block_sequence_steps); + /* Clear update flags so next flip doesn't have redundant programming + * (if there's no stream update, the update flags are not cleared). + * Surface updates are cleared unconditionally at the beginning of each flip, + * so no need to clear here. + */ + if (top_pipe_to_program->stream) + top_pipe_to_program->stream->update_flags.raw = 0; +} - if (get_seamless_boot_stream_count(context) == 0) - dc->optimized_required = true; +static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state *dc_context) +{ +/* + * This function calls HWSS to wait for any potentially double buffered + * operations to complete. It should be invoked as a pre-amble prior + * to full update programming before asserting any HW locks. + */ + int pipe_idx; + int opp_inst; + int opp_count = dc->res_pool->pipe_count; + struct hubp *hubp; + int mpcc_inst; + const struct pipe_ctx *pipe_ctx; + + for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) { + pipe_ctx = &dc_context->res_ctx.pipe_ctx[pipe_idx]; + + if (!pipe_ctx->stream) + continue; + + if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear) + pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg); + + hubp = pipe_ctx->plane_res.hubp; + if (!hubp) + continue; + + mpcc_inst = hubp->inst; + // MPCC inst is equal to pipe index in practice + for (opp_inst = 0; opp_inst < opp_count; opp_inst++) { + if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) { + dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst); + dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false; + break; + } } } +} + +static void commit_planes_for_stream(struct dc *dc, + struct dc_surface_update *srf_updates, + int surface_count, + struct dc_stream_state *stream, + struct dc_stream_update *stream_update, + enum surface_update_type update_type, + struct dc_state *context) +{ + int i, j; + struct pipe_ctx *top_pipe_to_program = NULL; + bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST); + bool subvp_prev_use = false; + bool subvp_curr_use = false; + + // Once we apply the new subvp context to hardware it won't be in the + // dc->current_state anymore, so we have to cache it before we apply + // the new SubVP context + subvp_prev_use = false; + dc_z10_restore(dc); + if (update_type == UPDATE_TYPE_FULL) + wait_for_outstanding_hw_updates(dc, context); if (update_type == UPDATE_TYPE_FULL) { dc_allow_idle_optimizations(dc, false); @@ -3441,16 +3577,9 @@ static void commit_planes_for_stream(struct dc *dc, context_clock_trace(dc, context); } - for (j = 0; j < dc->res_pool->pipe_count; j++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; - - if (!pipe_ctx->top_pipe && - !pipe_ctx->prev_odm_pipe && - pipe_ctx->stream && - pipe_ctx->stream == stream) { - top_pipe_to_program = pipe_ctx; - } - } + top_pipe_to_program = resource_get_otg_master_for_stream( + &context->res_ctx, + stream); for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; @@ -3470,6 +3599,14 @@ static void commit_planes_for_stream(struct dc *dc, } } + if (dc->debug.visual_confirm) + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->stream && pipe->plane_state) + dc_update_viusal_confirm_color(dc, context, pipe); + } + if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) { struct pipe_ctx *mpcc_pipe; struct pipe_ctx *odm_pipe; @@ -3553,43 +3690,40 @@ static void commit_planes_for_stream(struct dc *dc, for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; - if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP && + if ((dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP || + dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) && pipe_ctx->stream && pipe_ctx->plane_state) { - /* Only update visual confirm for SUBVP here. + /* Only update visual confirm for SUBVP and Mclk switching here. * The bar appears on all pipes, so we need to update the bar on all displays, * so the information doesn't get stale. */ - struct mpcc_blnd_cfg blnd_cfg = { 0 }; - - dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, + dc->hwss.update_visual_confirm_color(dc, pipe_ctx, pipe_ctx->plane_res.hubp->inst); } } } - if (!IS_DIAG_DC(dc->ctx->dce_environment)) { - for (i = 0; i < surface_count; i++) { - struct dc_plane_state *plane_state = srf_updates[i].surface; - /*set logical flag for lock/unlock use*/ - for (j = 0; j < dc->res_pool->pipe_count; j++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; - if (!pipe_ctx->plane_state) - continue; - if (should_update_pipe_for_plane(context, pipe_ctx, plane_state)) - continue; - pipe_ctx->plane_state->triplebuffer_flips = false; - if (update_type == UPDATE_TYPE_FAST && - dc->hwss.program_triplebuffer != NULL && - !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { - /*triple buffer for VUpdate only*/ - pipe_ctx->plane_state->triplebuffer_flips = true; - } - } - if (update_type == UPDATE_TYPE_FULL) { - /* force vsync flip when reconfiguring pipes to prevent underflow */ - plane_state->flip_immediate = false; + for (i = 0; i < surface_count; i++) { + struct dc_plane_state *plane_state = srf_updates[i].surface; + /*set logical flag for lock/unlock use*/ + for (j = 0; j < dc->res_pool->pipe_count; j++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; + if (!pipe_ctx->plane_state) + continue; + if (should_update_pipe_for_plane(context, pipe_ctx, plane_state)) + continue; + pipe_ctx->plane_state->triplebuffer_flips = false; + if (update_type == UPDATE_TYPE_FAST && + dc->hwss.program_triplebuffer != NULL && + !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { + /*triple buffer for VUpdate only*/ + pipe_ctx->plane_state->triplebuffer_flips = true; } } + if (update_type == UPDATE_TYPE_FULL) { + /* force vsync flip when reconfiguring pipes to prevent underflow */ + plane_state->flip_immediate = false; + } } // Update Type FULL, Surface updates @@ -3886,9 +4020,9 @@ static bool commit_minimal_transition_state(struct dc *dc, struct dc_state *transition_base_context) { struct dc_state *transition_context = dc_create_state(dc); - enum pipe_split_policy tmp_mpc_policy; - bool temp_dynamic_odm_policy; - bool temp_subvp_policy; + enum pipe_split_policy tmp_mpc_policy = 0; + bool temp_dynamic_odm_policy = 0; + bool temp_subvp_policy = 0; enum dc_status ret = DC_ERROR_UNEXPECTED; unsigned int i, j; unsigned int pipe_in_use = 0; @@ -4006,6 +4140,161 @@ static bool commit_minimal_transition_state(struct dc *dc, return true; } +/** + * update_seamless_boot_flags() - Helper function for updating seamless boot flags + * + * @dc: Current DC state + * @context: New DC state to be programmed + * @surface_count: Number of surfaces that have an updated + * @stream: Corresponding stream to be updated in the current flip + * + * Updating seamless boot flags do not need to be part of the commit sequence. This + * helper function will update the seamless boot flags on each flip (if required) + * outside of the HW commit sequence (fast or slow). + * + * Return: void + */ +static void update_seamless_boot_flags(struct dc *dc, + struct dc_state *context, + int surface_count, + struct dc_stream_state *stream) +{ + if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) { + /* Optimize seamless boot flag keeps clocks and watermarks high until + * first flip. After first flip, optimization is required to lower + * bandwidth. Important to note that it is expected UEFI will + * only light up a single display on POST, therefore we only expect + * one stream with seamless boot flag set. + */ + if (stream->apply_seamless_boot_optimization) { + stream->apply_seamless_boot_optimization = false; + + if (get_seamless_boot_stream_count(context) == 0) + dc->optimized_required = true; + } + } +} + +static void populate_fast_updates(struct dc_fast_update *fast_update, + struct dc_surface_update *srf_updates, + int surface_count, + struct dc_stream_update *stream_update) +{ + int i = 0; + + if (stream_update) { + fast_update[0].out_transfer_func = stream_update->out_transfer_func; + fast_update[0].output_csc_transform = stream_update->output_csc_transform; + } + + for (i = 0; i < surface_count; i++) { + fast_update[i].flip_addr = srf_updates[i].flip_addr; + fast_update[i].gamma = srf_updates[i].gamma; + fast_update[i].gamut_remap_matrix = srf_updates[i].gamut_remap_matrix; + fast_update[i].input_csc_color_matrix = srf_updates[i].input_csc_color_matrix; + fast_update[i].coeff_reduction_factor = srf_updates[i].coeff_reduction_factor; + } +} + +static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_count) +{ + int i; + + if (fast_update[0].out_transfer_func || + fast_update[0].output_csc_transform) + return true; + + for (i = 0; i < surface_count; i++) { + if (fast_update[i].flip_addr || + fast_update[i].gamma || + fast_update[i].gamut_remap_matrix || + fast_update[i].input_csc_color_matrix || + fast_update[i].coeff_reduction_factor) + return true; + } + + return false; +} + +static bool full_update_required(struct dc *dc, + struct dc_surface_update *srf_updates, + int surface_count, + struct dc_stream_update *stream_update, + struct dc_stream_state *stream) +{ + + int i; + struct dc_stream_status *stream_status; + const struct dc_state *context = dc->current_state; + + for (i = 0; i < surface_count; i++) { + if (srf_updates && + (srf_updates[i].plane_info || + srf_updates[i].scaling_info || + (srf_updates[i].hdr_mult.value && + srf_updates[i].hdr_mult.value != srf_updates->surface->hdr_mult.value) || + srf_updates[i].in_transfer_func || + srf_updates[i].func_shaper || + srf_updates[i].lut3d_func || + srf_updates[i].blend_tf || + srf_updates[i].surface->force_full_update || + (srf_updates[i].flip_addr && + srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) || + !is_surface_in_context(context, srf_updates[i].surface))) + return true; + } + + if (stream_update && + (((stream_update->src.height != 0 && stream_update->src.width != 0) || + (stream_update->dst.height != 0 && stream_update->dst.width != 0) || + stream_update->integer_scaling_update) || + stream_update->hdr_static_metadata || + stream_update->abm_level || + stream_update->periodic_interrupt || + stream_update->vrr_infopacket || + stream_update->vsc_infopacket || + stream_update->vsp_infopacket || + stream_update->hfvsif_infopacket || + stream_update->vtem_infopacket || + stream_update->adaptive_sync_infopacket || + stream_update->dpms_off || + stream_update->allow_freesync || + stream_update->vrr_active_variable || + stream_update->vrr_active_fixed || + stream_update->gamut_remap || + stream_update->output_color_space || + stream_update->dither_option || + stream_update->wb_update || + stream_update->dsc_config || + stream_update->mst_bw_update || + stream_update->func_shaper || + stream_update->lut3d_func || + stream_update->pending_test_pattern || + stream_update->crtc_timing_adjust)) + return true; + + if (stream) { + stream_status = dc_stream_get_status(stream); + if (stream_status == NULL || stream_status->plane_count != surface_count) + return true; + } + if (dc->idle_optimizations_allowed) + return true; + + return false; +} + +static bool fast_update_only(struct dc *dc, + struct dc_fast_update *fast_update, + struct dc_surface_update *srf_updates, + int surface_count, + struct dc_stream_update *stream_update, + struct dc_stream_state *stream) +{ + return fast_updates_exist(fast_update, surface_count) + && !full_update_required(dc, srf_updates, surface_count, stream_update, stream); +} + bool dc_update_planes_and_stream(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_state *stream, @@ -4015,14 +4304,16 @@ bool dc_update_planes_and_stream(struct dc *dc, enum surface_update_type update_type; int i; struct mall_temp_config mall_temp_config; + struct dc_fast_update fast_update[MAX_SURFACES] = {0}; /* In cases where MPO and split or ODM are used transitions can * cause underflow. Apply stream configuration with minimal pipe * split first to avoid unsupported transitions for active pipes. */ - bool force_minimal_pipe_splitting; - bool is_plane_addition; + bool force_minimal_pipe_splitting = 0; + bool is_plane_addition = 0; + populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes( dc, stream, @@ -4072,14 +4363,26 @@ bool dc_update_planes_and_stream(struct dc *dc, update_type = UPDATE_TYPE_FULL; } - commit_planes_for_stream( - dc, - srf_updates, - surface_count, - stream, - stream_update, - update_type, - context); + update_seamless_boot_flags(dc, context, surface_count, stream); + if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) && + !dc->debug.enable_legacy_fast_update) { + commit_planes_for_stream_fast(dc, + srf_updates, + surface_count, + stream, + stream_update, + update_type, + context); + } else { + commit_planes_for_stream( + dc, + srf_updates, + surface_count, + stream, + stream_update, + update_type, + context); + } if (dc->current_state != context) { @@ -4119,7 +4422,9 @@ void dc_commit_updates_for_stream(struct dc *dc, struct dc_state *context; struct dc_context *dc_ctx = dc->ctx; int i, j; + struct dc_fast_update fast_update[MAX_SURFACES] = {0}; + populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); stream_status = dc_stream_get_status(stream); context = dc->current_state; @@ -4204,7 +4509,18 @@ void dc_commit_updates_for_stream(struct dc *dc, TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES); - commit_planes_for_stream( + update_seamless_boot_flags(dc, context, surface_count, stream); + if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) && + !dc->debug.enable_legacy_fast_update) { + commit_planes_for_stream_fast(dc, + srf_updates, + surface_count, + stream, + stream_update, + update_type, + context); + } else { + commit_planes_for_stream( dc, srf_updates, surface_count, @@ -4212,6 +4528,7 @@ void dc_commit_updates_for_stream(struct dc *dc, stream_update, update_type, context); + } /*update current_State*/ if (dc->current_state != context) { @@ -4298,9 +4615,6 @@ void dc_set_power_state( dc_z10_restore(dc); - if (dc->ctx->dmub_srv) - dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv); - dc->hwss.init_hw(dc); if (dc->hwss.init_sys_ctx != NULL && @@ -4507,15 +4821,17 @@ static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memcl */ void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable) { - uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev; - unsigned int softMax, maxDPM, funcMin; + unsigned int softMax = 0, maxDPM = 0, funcMin = 0, i; bool p_state_change_support; - if (!ASICREV_IS_BEIGE_GOBY_P(hw_internal_rev)) + if (!dc->config.dc_mode_clk_limit_support) return; softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk; - maxDPM = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz; + for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries; i++) { + if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz > maxDPM) + maxDPM = dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz; + } funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000; p_state_change_support = dc->clk_mgr->clks.p_state_change_support; @@ -4640,7 +4956,6 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc, { uint8_t action; union dmub_rb_cmd cmd = {0}; - struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; ASSERT(payload->length <= 16); @@ -4688,9 +5003,7 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc, ); } - dc_dmub_srv_cmd_queue(dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dmub_srv); - dc_dmub_srv_wait_idle(dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } @@ -4734,7 +5047,6 @@ bool dc_process_dmub_set_config_async(struct dc *dc, struct dmub_notification *notify) { union dmub_rb_cmd cmd = {0}; - struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; bool is_cmd_complete = true; /* prepare SET_CONFIG command */ @@ -4745,7 +5057,7 @@ bool dc_process_dmub_set_config_async(struct dc *dc, cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type; cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data; - if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd)) { + if (!dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) { /* command is not processed by dmub */ notify->sc_status = SET_CONFIG_UNKNOWN_ERROR; return is_cmd_complete; @@ -4780,7 +5092,6 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc, uint8_t *mst_slots_in_use) { union dmub_rb_cmd cmd = {0}; - struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; /* prepare MST_ALLOC_SLOTS command */ cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA; @@ -4789,7 +5100,7 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc, cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst; cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots; - if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd)) + if (!dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) /* command is not processed by dmub */ return DC_ERROR_UNEXPECTED; @@ -4823,19 +5134,28 @@ void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc, uint32_t hpd_int_enable) { union dmub_rb_cmd cmd = {0}; - struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE; cmd.dpia_hpd_int_enable.enable = hpd_int_enable; - dc_dmub_srv_cmd_queue(dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dmub_srv); - dc_dmub_srv_wait_idle(dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable); } /** + * dc_print_dmub_diagnostic_data - Print DMUB diagnostic data for debugging + * + * @dc: [in] dc structure + * + * + */ +void dc_print_dmub_diagnostic_data(const struct dc *dc) +{ + dc_dmub_srv_log_diagnostic_data(dc->ctx->dmub_srv); +} + +/** * dc_disable_accelerated_mode - disable accelerated mode * @dc: dc structure */ @@ -4866,6 +5186,9 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo if (link->psr_settings.psr_feature_enabled) return; + if (link->replay_settings.replay_feature_enabled) + return; + /*find primary pipe associated with stream*/ for (i = 0; i < MAX_PIPES; i++) { pipe = &dc->current_state->res_ctx.pipe_ctx[i]; @@ -4895,20 +5218,69 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst); } -/** - * dc_extended_blank_supported - Decide whether extended blank is supported - * - * @dc: [in] Current DC state - * - * Extended blank is a freesync optimization feature to be enabled in the - * future. During the extra vblank period gained from freesync, we have the - * ability to enter z9/z10. +/***************************************************************************** + * dc_abm_save_restore() - Interface to DC for save+pause and restore+un-pause + * ABM + * @dc: dc structure + * @stream: stream where vsync int state changed + * @pData: abm hw states * - * Return: - * Indicate whether extended blank is supported (%true or %false) - */ -bool dc_extended_blank_supported(struct dc *dc) + ****************************************************************************/ +bool dc_abm_save_restore( + struct dc *dc, + struct dc_stream_state *stream, + struct abm_save_restore *pData) { - return dc->debug.extended_blank_optimization && !dc->debug.disable_z10 - && dc->caps.zstate_support && dc->caps.is_apu; + int i; + int edp_num; + struct pipe_ctx *pipe = NULL; + struct dc_link *link = stream->sink->link; + struct dc_link *edp_links[MAX_NUM_EDP]; + + + /*find primary pipe associated with stream*/ + for (i = 0; i < MAX_PIPES; i++) { + pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (pipe->stream == stream && pipe->stream_res.tg) + break; + } + + if (i == MAX_PIPES) { + ASSERT(0); + return false; + } + + dc_get_edp_links(dc, edp_links, &edp_num); + + /* Determine panel inst */ + for (i = 0; i < edp_num; i++) + if (edp_links[i] == link) + break; + + if (i == edp_num) + return false; + + if (pipe->stream_res.abm && + pipe->stream_res.abm->funcs->save_restore) + return pipe->stream_res.abm->funcs->save_restore( + pipe->stream_res.abm, + i, + pData); + return false; +} + +void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties) +{ + unsigned int i; + bool subvp_in_use = false; + + for (i = 0; i < dc->current_state->stream_count; i++) { + if (dc->current_state->streams[i]->mall_stream_config.type != SUBVP_NONE) { + subvp_in_use = true; + break; + } + } + properties->cursor_size_limit = subvp_in_use ? 64 : dc->caps.max_cursor_size; } + diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index 2acbf692193f..f99ec1b0efaf 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -27,6 +27,8 @@ #include "core_types.h" #include "timing_generator.h" #include "hw_sequencer.h" +#include "hw_sequencer_private.h" +#include "basics/dc_common.h" #define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0])) @@ -185,6 +187,7 @@ static bool is_ycbcr709_limited_type( ret = true; return ret; } + static enum dc_color_space_type get_color_space_type(enum dc_color_space color_space) { enum dc_color_space_type type = COLOR_SPACE_RGB_TYPE; @@ -421,6 +424,7 @@ void get_hdr_visual_confirm_color( void get_subvp_visual_confirm_color( struct dc *dc, + struct dc_state *context, struct pipe_ctx *pipe_ctx, struct tg_color *color) { @@ -428,15 +432,17 @@ void get_subvp_visual_confirm_color( bool enable_subvp = false; int i; - if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx) + if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !context) return; for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; if (pipe->stream && pipe->stream->mall_stream_config.paired_stream && pipe->stream->mall_stream_config.type == SUBVP_MAIN) { /* SubVP enable - red */ + color->color_g_y = 0; + color->color_b_cb = 0; color->color_r_cr = color_value; enable_subvp = true; @@ -448,12 +454,374 @@ void get_subvp_visual_confirm_color( if (enable_subvp && pipe_ctx->stream->mall_stream_config.type == SUBVP_NONE) { color->color_r_cr = 0; - if (pipe_ctx->stream->ignore_msa_timing_param == 1) + if (pipe_ctx->stream->allow_freesync == 1) { /* SubVP enable and DRR on - green */ + color->color_b_cb = 0; color->color_g_y = color_value; - else + } else { /* SubVP enable and No DRR - blue */ + color->color_g_y = 0; + color->color_b_cb = color_value; + } + } +} + +void hwss_build_fast_sequence(struct dc *dc, + struct dc_dmub_cmd *dc_dmub_cmd, + unsigned int dmub_cmd_count, + struct block_sequence block_sequence[], + int *num_steps, + struct pipe_ctx *pipe_ctx) +{ + struct dc_plane_state *plane = pipe_ctx->plane_state; + struct dc_stream_state *stream = pipe_ctx->stream; + struct dce_hwseq *hws = dc->hwseq; + struct pipe_ctx *current_pipe = NULL; + struct pipe_ctx *current_mpc_pipe = NULL; + unsigned int i = 0; + + *num_steps = 0; // Initialize to 0 + + if (!plane || !stream) + return; + + if (dc->hwss.subvp_pipe_control_lock_fast) { + block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.dc = dc; + block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.lock = true; + block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.pipe_ctx = pipe_ctx; + block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST; + (*num_steps)++; + } + if (dc->hwss.pipe_control_lock) { + block_sequence[*num_steps].params.pipe_control_lock_params.dc = dc; + block_sequence[*num_steps].params.pipe_control_lock_params.lock = true; + block_sequence[*num_steps].params.pipe_control_lock_params.pipe_ctx = pipe_ctx; + block_sequence[*num_steps].func = OPTC_PIPE_CONTROL_LOCK; + (*num_steps)++; + } + + for (i = 0; i < dmub_cmd_count; i++) { + block_sequence[*num_steps].params.send_dmcub_cmd_params.ctx = dc->ctx; + block_sequence[*num_steps].params.send_dmcub_cmd_params.cmd = &(dc_dmub_cmd[i].dmub_cmd); + block_sequence[*num_steps].params.send_dmcub_cmd_params.wait_type = dc_dmub_cmd[i].wait_type; + block_sequence[*num_steps].func = DMUB_SEND_DMCUB_CMD; + (*num_steps)++; + } + + current_pipe = pipe_ctx; + while (current_pipe) { + current_mpc_pipe = current_pipe; + while (current_mpc_pipe) { + if (dc->hwss.set_flip_control_gsl && current_mpc_pipe->plane_state && current_mpc_pipe->plane_state->update_flags.raw) { + block_sequence[*num_steps].params.set_flip_control_gsl_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].params.set_flip_control_gsl_params.flip_immediate = current_mpc_pipe->plane_state->flip_immediate; + block_sequence[*num_steps].func = HUBP_SET_FLIP_CONTROL_GSL; + (*num_steps)++; + } + if (dc->hwss.program_triplebuffer && dc->debug.enable_tri_buf && current_mpc_pipe->plane_state->update_flags.raw) { + block_sequence[*num_steps].params.program_triplebuffer_params.dc = dc; + block_sequence[*num_steps].params.program_triplebuffer_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].params.program_triplebuffer_params.enableTripleBuffer = current_mpc_pipe->plane_state->triplebuffer_flips; + block_sequence[*num_steps].func = HUBP_PROGRAM_TRIPLEBUFFER; + (*num_steps)++; + } + if (dc->hwss.update_plane_addr && current_mpc_pipe->plane_state->update_flags.bits.addr_update) { + block_sequence[*num_steps].params.update_plane_addr_params.dc = dc; + block_sequence[*num_steps].params.update_plane_addr_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].func = HUBP_UPDATE_PLANE_ADDR; + (*num_steps)++; + } + + if (hws->funcs.set_input_transfer_func && current_mpc_pipe->plane_state->update_flags.bits.gamma_change) { + block_sequence[*num_steps].params.set_input_transfer_func_params.dc = dc; + block_sequence[*num_steps].params.set_input_transfer_func_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].params.set_input_transfer_func_params.plane_state = current_mpc_pipe->plane_state; + block_sequence[*num_steps].func = DPP_SET_INPUT_TRANSFER_FUNC; + (*num_steps)++; + } + + if (dc->hwss.program_gamut_remap && current_mpc_pipe->plane_state->update_flags.bits.gamut_remap_change) { + block_sequence[*num_steps].params.program_gamut_remap_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].func = DPP_PROGRAM_GAMUT_REMAP; + (*num_steps)++; + } + if (current_mpc_pipe->plane_state->update_flags.bits.input_csc_change) { + block_sequence[*num_steps].params.setup_dpp_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].func = DPP_SETUP_DPP; + (*num_steps)++; + } + if (current_mpc_pipe->plane_state->update_flags.bits.coeff_reduction_change) { + block_sequence[*num_steps].params.program_bias_and_scale_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].func = DPP_PROGRAM_BIAS_AND_SCALE; + (*num_steps)++; + } + if (hws->funcs.set_output_transfer_func && current_mpc_pipe->stream->update_flags.bits.out_tf) { + block_sequence[*num_steps].params.set_output_transfer_func_params.dc = dc; + block_sequence[*num_steps].params.set_output_transfer_func_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].params.set_output_transfer_func_params.stream = current_mpc_pipe->stream; + block_sequence[*num_steps].func = DPP_SET_OUTPUT_TRANSFER_FUNC; + (*num_steps)++; + } + + if (current_mpc_pipe->stream->update_flags.bits.out_csc) { + block_sequence[*num_steps].params.power_on_mpc_mem_pwr_params.mpc = dc->res_pool->mpc; + block_sequence[*num_steps].params.power_on_mpc_mem_pwr_params.mpcc_id = current_mpc_pipe->plane_res.hubp->inst; + block_sequence[*num_steps].params.power_on_mpc_mem_pwr_params.power_on = true; + block_sequence[*num_steps].func = MPC_POWER_ON_MPC_MEM_PWR; + (*num_steps)++; + + if (current_mpc_pipe->stream->csc_color_matrix.enable_adjustment == true) { + block_sequence[*num_steps].params.set_output_csc_params.mpc = dc->res_pool->mpc; + block_sequence[*num_steps].params.set_output_csc_params.opp_id = current_mpc_pipe->stream_res.opp->inst; + block_sequence[*num_steps].params.set_output_csc_params.regval = current_mpc_pipe->stream->csc_color_matrix.matrix; + block_sequence[*num_steps].params.set_output_csc_params.ocsc_mode = MPC_OUTPUT_CSC_COEF_A; + block_sequence[*num_steps].func = MPC_SET_OUTPUT_CSC; + (*num_steps)++; + } else { + block_sequence[*num_steps].params.set_ocsc_default_params.mpc = dc->res_pool->mpc; + block_sequence[*num_steps].params.set_ocsc_default_params.opp_id = current_mpc_pipe->stream_res.opp->inst; + block_sequence[*num_steps].params.set_ocsc_default_params.color_space = current_mpc_pipe->stream->output_color_space; + block_sequence[*num_steps].params.set_ocsc_default_params.ocsc_mode = MPC_OUTPUT_CSC_COEF_A; + block_sequence[*num_steps].func = MPC_SET_OCSC_DEFAULT; + (*num_steps)++; + } + } + current_mpc_pipe = current_mpc_pipe->bottom_pipe; + } + current_pipe = current_pipe->next_odm_pipe; + } + + if (dc->hwss.pipe_control_lock) { + block_sequence[*num_steps].params.pipe_control_lock_params.dc = dc; + block_sequence[*num_steps].params.pipe_control_lock_params.lock = false; + block_sequence[*num_steps].params.pipe_control_lock_params.pipe_ctx = pipe_ctx; + block_sequence[*num_steps].func = OPTC_PIPE_CONTROL_LOCK; + (*num_steps)++; + } + if (dc->hwss.subvp_pipe_control_lock_fast) { + block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.dc = dc; + block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.lock = false; + block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.pipe_ctx = pipe_ctx; + block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST; + (*num_steps)++; + } + + current_pipe = pipe_ctx; + while (current_pipe) { + current_mpc_pipe = current_pipe; + + while (current_mpc_pipe) { + if (!current_mpc_pipe->bottom_pipe && !current_mpc_pipe->next_odm_pipe && + current_mpc_pipe->stream && current_mpc_pipe->plane_state && + current_mpc_pipe->plane_state->update_flags.bits.addr_update && + !current_mpc_pipe->plane_state->skip_manual_trigger) { + block_sequence[*num_steps].params.program_manual_trigger_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].func = OPTC_PROGRAM_MANUAL_TRIGGER; + (*num_steps)++; + } + current_mpc_pipe = current_mpc_pipe->bottom_pipe; + } + current_pipe = current_pipe->next_odm_pipe; + } +} + +void hwss_execute_sequence(struct dc *dc, + struct block_sequence block_sequence[], + int num_steps) +{ + unsigned int i; + union block_sequence_params *params; + struct dce_hwseq *hws = dc->hwseq; + + for (i = 0; i < num_steps; i++) { + params = &(block_sequence[i].params); + switch (block_sequence[i].func) { + + case DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST: + dc->hwss.subvp_pipe_control_lock_fast(params); + break; + case OPTC_PIPE_CONTROL_LOCK: + dc->hwss.pipe_control_lock(params->pipe_control_lock_params.dc, + params->pipe_control_lock_params.pipe_ctx, + params->pipe_control_lock_params.lock); + break; + case HUBP_SET_FLIP_CONTROL_GSL: + dc->hwss.set_flip_control_gsl(params->set_flip_control_gsl_params.pipe_ctx, + params->set_flip_control_gsl_params.flip_immediate); + break; + case HUBP_PROGRAM_TRIPLEBUFFER: + dc->hwss.program_triplebuffer(params->program_triplebuffer_params.dc, + params->program_triplebuffer_params.pipe_ctx, + params->program_triplebuffer_params.enableTripleBuffer); + break; + case HUBP_UPDATE_PLANE_ADDR: + dc->hwss.update_plane_addr(params->update_plane_addr_params.dc, + params->update_plane_addr_params.pipe_ctx); + break; + case DPP_SET_INPUT_TRANSFER_FUNC: + hws->funcs.set_input_transfer_func(params->set_input_transfer_func_params.dc, + params->set_input_transfer_func_params.pipe_ctx, + params->set_input_transfer_func_params.plane_state); + break; + case DPP_PROGRAM_GAMUT_REMAP: + dc->hwss.program_gamut_remap(params->program_gamut_remap_params.pipe_ctx); + break; + case DPP_SETUP_DPP: + hwss_setup_dpp(params); + break; + case DPP_PROGRAM_BIAS_AND_SCALE: + hwss_program_bias_and_scale(params); + break; + case OPTC_PROGRAM_MANUAL_TRIGGER: + hwss_program_manual_trigger(params); + break; + case DPP_SET_OUTPUT_TRANSFER_FUNC: + hws->funcs.set_output_transfer_func(params->set_output_transfer_func_params.dc, + params->set_output_transfer_func_params.pipe_ctx, + params->set_output_transfer_func_params.stream); + break; + case MPC_UPDATE_VISUAL_CONFIRM: + dc->hwss.update_visual_confirm_color(params->update_visual_confirm_params.dc, + params->update_visual_confirm_params.pipe_ctx, + params->update_visual_confirm_params.mpcc_id); + break; + case MPC_POWER_ON_MPC_MEM_PWR: + hwss_power_on_mpc_mem_pwr(params); + break; + case MPC_SET_OUTPUT_CSC: + hwss_set_output_csc(params); + break; + case MPC_SET_OCSC_DEFAULT: + hwss_set_ocsc_default(params); + break; + case DMUB_SEND_DMCUB_CMD: + hwss_send_dmcub_cmd(params); + break; + default: + ASSERT(false); + break; + } + } +} + +void hwss_send_dmcub_cmd(union block_sequence_params *params) +{ + struct dc_context *ctx = params->send_dmcub_cmd_params.ctx; + union dmub_rb_cmd *cmd = params->send_dmcub_cmd_params.cmd; + enum dm_dmub_wait_type wait_type = params->send_dmcub_cmd_params.wait_type; + + dm_execute_dmub_cmd(ctx, cmd, wait_type); +} + +void hwss_program_manual_trigger(union block_sequence_params *params) +{ + struct pipe_ctx *pipe_ctx = params->program_manual_trigger_params.pipe_ctx; + + if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger) + pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg); +} + +void hwss_setup_dpp(union block_sequence_params *params) +{ + struct pipe_ctx *pipe_ctx = params->setup_dpp_params.pipe_ctx; + struct dpp *dpp = pipe_ctx->plane_res.dpp; + struct dc_plane_state *plane_state = pipe_ctx->plane_state; + + if (dpp && dpp->funcs->dpp_setup) { + // program the input csc + dpp->funcs->dpp_setup(dpp, + plane_state->format, + EXPANSION_MODE_ZERO, + plane_state->input_csc_color_matrix, + plane_state->color_space, + NULL); + } +} + +void hwss_program_bias_and_scale(union block_sequence_params *params) +{ + struct pipe_ctx *pipe_ctx = params->program_bias_and_scale_params.pipe_ctx; + struct dpp *dpp = pipe_ctx->plane_res.dpp; + struct dc_plane_state *plane_state = pipe_ctx->plane_state; + struct dc_bias_and_scale bns_params = {0}; + + //TODO :for CNVC set scale and bias registers if necessary + build_prescale_params(&bns_params, plane_state); + if (dpp->funcs->dpp_program_bias_and_scale) + dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params); +} + +void hwss_power_on_mpc_mem_pwr(union block_sequence_params *params) +{ + struct mpc *mpc = params->power_on_mpc_mem_pwr_params.mpc; + int mpcc_id = params->power_on_mpc_mem_pwr_params.mpcc_id; + bool power_on = params->power_on_mpc_mem_pwr_params.power_on; + + if (mpc->funcs->power_on_mpc_mem_pwr) + mpc->funcs->power_on_mpc_mem_pwr(mpc, mpcc_id, power_on); +} + +void hwss_set_output_csc(union block_sequence_params *params) +{ + struct mpc *mpc = params->set_output_csc_params.mpc; + int opp_id = params->set_output_csc_params.opp_id; + const uint16_t *matrix = params->set_output_csc_params.regval; + enum mpc_output_csc_mode ocsc_mode = params->set_output_csc_params.ocsc_mode; + + if (mpc->funcs->set_output_csc != NULL) + mpc->funcs->set_output_csc(mpc, + opp_id, + matrix, + ocsc_mode); +} + +void hwss_set_ocsc_default(union block_sequence_params *params) +{ + struct mpc *mpc = params->set_ocsc_default_params.mpc; + int opp_id = params->set_ocsc_default_params.opp_id; + enum dc_color_space colorspace = params->set_ocsc_default_params.color_space; + enum mpc_output_csc_mode ocsc_mode = params->set_ocsc_default_params.ocsc_mode; + + if (mpc->funcs->set_ocsc_default != NULL) + mpc->funcs->set_ocsc_default(mpc, + opp_id, + colorspace, + ocsc_mode); +} + +void get_mclk_switch_visual_confirm_color( + struct dc *dc, + struct dc_state *context, + struct pipe_ctx *pipe_ctx, + struct tg_color *color) +{ + uint32_t color_value = MAX_TG_COLOR_VALUE; + struct vba_vars_st *vba = &context->bw_ctx.dml.vba; + + if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !vba || !context) + return; + + if (vba->DRAMClockChangeSupport[vba->VoltageLevel][vba->maxMpcComb] != + dm_dram_clock_change_unsupported) { + /* MCLK switching is supported */ + if (!pipe_ctx->has_vactive_margin) { + /* In Vblank - yellow */ + color->color_r_cr = color_value; + color->color_g_y = color_value; + + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { + /* FPO + Vblank - cyan */ + color->color_r_cr = 0; + color->color_g_y = color_value; + color->color_b_cb = color_value; + } + } else { + /* In Vactive - pink */ + color->color_r_cr = color_value; color->color_b_cb = color_value; + } + /* SubVP */ + get_subvp_visual_confirm_color(dc, context, pipe_ctx, color); } } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c index 30c0644d4418..be5a6d008b29 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c @@ -169,11 +169,23 @@ static void add_link_enc_assignment( /* Return first available DIG link encoder. */ static enum engine_id find_first_avail_link_enc( const struct dc_context *ctx, - const struct dc_state *state) + const struct dc_state *state, + enum engine_id eng_id_requested) { enum engine_id eng_id = ENGINE_ID_UNKNOWN; int i; + if (eng_id_requested != ENGINE_ID_UNKNOWN) { + + for (i = 0; i < ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) { + eng_id = state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i]; + if (eng_id == eng_id_requested) + return eng_id; + } + } + + eng_id = ENGINE_ID_UNKNOWN; + for (i = 0; i < ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) { eng_id = state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i]; if (eng_id != ENGINE_ID_UNKNOWN) @@ -287,7 +299,7 @@ void link_enc_cfg_link_encs_assign( struct dc_stream_state *streams[], uint8_t stream_count) { - enum engine_id eng_id = ENGINE_ID_UNKNOWN; + enum engine_id eng_id = ENGINE_ID_UNKNOWN, eng_id_req = ENGINE_ID_UNKNOWN; int i; int j; @@ -377,8 +389,14 @@ void link_enc_cfg_link_encs_assign( * assigned to that endpoint. */ link_enc = get_link_enc_used_by_link(state, stream->link); - if (link_enc == NULL) - eng_id = find_first_avail_link_enc(stream->ctx, state); + if (link_enc == NULL) { + + if (stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && + stream->link->dpia_preferred_eng_id != ENGINE_ID_UNKNOWN) + eng_id_req = stream->link->dpia_preferred_eng_id; + + eng_id = find_first_avail_link_enc(stream->ctx, state, eng_id_req); + } else eng_id = link_enc->preferred_engine; @@ -402,7 +420,9 @@ void link_enc_cfg_link_encs_assign( DC_LOG_DEBUG("%s: CUR %s(%d) - enc_id(%d)\n", __func__, assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ? "PHY" : "DPIA", - assignment.ep_id.link_id.enum_id - 1, + assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ? + assignment.ep_id.link_id.enum_id : + assignment.ep_id.link_id.enum_id - 1, assignment.eng_id); } for (i = 0; i < MAX_PIPES; i++) { @@ -413,7 +433,9 @@ void link_enc_cfg_link_encs_assign( DC_LOG_DEBUG("%s: NEW %s(%d) - enc_id(%d)\n", __func__, assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ? "PHY" : "DPIA", - assignment.ep_id.link_id.enum_id - 1, + assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ? + assignment.ep_id.link_id.enum_id : + assignment.ep_id.link_id.enum_id - 1, assignment.eng_id); } @@ -478,7 +500,6 @@ struct dc_link *link_enc_cfg_get_link_using_link_enc( if (stream) link = stream->link; - // dm_output_to_console("%s: No link using DIG(%d).\n", __func__, eng_id); return link; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c index 18e098568cb4..ed94187c2afa 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c @@ -314,6 +314,24 @@ const struct dc_link_settings *dc_link_get_link_cap(const struct dc_link *link) return link->dc->link_srv->dp_get_verified_link_cap(link); } +enum dc_link_encoding_format dc_link_get_highest_encoding_format(const struct dc_link *link) +{ + if (dc_is_dp_signal(link->connector_signal)) { + if (link->dpcd_caps.dongle_type >= DISPLAY_DONGLE_DP_DVI_DONGLE && + link->dpcd_caps.dongle_type <= DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE) + return DC_LINK_ENCODING_HDMI_TMDS; + else if (link->dc->link_srv->dp_get_encoding_format(&link->verified_link_cap) == + DP_8b_10b_ENCODING) + return DC_LINK_ENCODING_DP_8b_10b; + else if (link->dc->link_srv->dp_get_encoding_format(&link->verified_link_cap) == + DP_128b_132b_ENCODING) + return DC_LINK_ENCODING_DP_128b_132b; + } else if (dc_is_hdmi_signal(link->connector_signal)) { + } + + return DC_LINK_ENCODING_UNSPECIFIED; +} + bool dc_link_is_dp_sink_present(struct dc_link *link) { return link->dc->link_srv->dp_is_sink_present(link); @@ -449,6 +467,11 @@ bool dc_link_setup_psr(struct dc_link *link, return link->dc->link_srv->edp_setup_psr(link, stream, psr_config, psr_context); } +bool dc_link_get_replay_state(const struct dc_link *link, uint64_t *state) +{ + return link->dc->link_srv->edp_get_replay_state(link, state); +} + bool dc_link_wait_for_t12(struct dc_link *link) { return link->dc->link_srv->edp_wait_for_t12(link); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index fe1551393b26..f7b51aca6020 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -45,6 +45,8 @@ #include "link/hwss/link_hwss_dio.h" #include "link/hwss/link_hwss_dpia.h" #include "link/hwss/link_hwss_hpo_dp.h" +#include "link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h" +#include "link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h" #if defined(CONFIG_DRM_AMD_DC_SI) #include "dce60/dce60_resource.h" @@ -69,8 +71,20 @@ #include "../dcn32/dcn32_resource.h" #include "../dcn321/dcn321_resource.h" +#define VISUAL_CONFIRM_BASE_DEFAULT 3 +#define VISUAL_CONFIRM_BASE_MIN 1 +#define VISUAL_CONFIRM_BASE_MAX 10 +/* we choose 240 because it is a common denominator of common v addressable + * such as 2160, 1440, 1200, 960. So we take 1/240 portion of v addressable as + * the visual confirm dpp offset height. So visual confirm height can stay + * relatively the same independent from timing used. + */ +#define VISUAL_CONFIRM_DPP_OFFSET_DENO 240 + #define DC_LOGGER_INIT(logger) +#define UNABLE_TO_SPLIT -1 + enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id) { enum dce_version dc_version = DCE_VERSION_UNKNOWN; @@ -718,10 +732,10 @@ static inline void get_vp_scan_direction( *flip_horz_scan_dir = !*flip_horz_scan_dir; } -int get_num_mpc_splits(struct pipe_ctx *pipe) +int resource_get_num_mpc_splits(const struct pipe_ctx *pipe) { int mpc_split_count = 0; - struct pipe_ctx *other_pipe = pipe->bottom_pipe; + const struct pipe_ctx *other_pipe = pipe->bottom_pipe; while (other_pipe && other_pipe->plane_state == pipe->plane_state) { mpc_split_count++; @@ -736,48 +750,46 @@ int get_num_mpc_splits(struct pipe_ctx *pipe) return mpc_split_count; } -int get_num_odm_splits(struct pipe_ctx *pipe) +int resource_get_num_odm_splits(const struct pipe_ctx *pipe) { int odm_split_count = 0; - struct pipe_ctx *next_pipe = pipe->next_odm_pipe; - while (next_pipe) { - odm_split_count++; - next_pipe = next_pipe->next_odm_pipe; - } - pipe = pipe->prev_odm_pipe; - while (pipe) { + + pipe = resource_get_otg_master(pipe); + + while (pipe->next_odm_pipe) { odm_split_count++; - pipe = pipe->prev_odm_pipe; + pipe = pipe->next_odm_pipe; } return odm_split_count; } -static void calculate_split_count_and_index(struct pipe_ctx *pipe_ctx, int *split_count, int *split_idx) +static int get_odm_split_index(struct pipe_ctx *pipe_ctx) { - *split_count = get_num_odm_splits(pipe_ctx); - *split_idx = 0; - if (*split_count == 0) { - /*Check for mpc split*/ - struct pipe_ctx *split_pipe = pipe_ctx->top_pipe; - - *split_count = get_num_mpc_splits(pipe_ctx); - while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) { - (*split_idx)++; - split_pipe = split_pipe->top_pipe; - } + int index = 0; - /* MPO window on right side of ODM split */ - if (split_pipe && split_pipe->prev_odm_pipe && !pipe_ctx->prev_odm_pipe) - (*split_idx)++; - } else { - /*Get odm split index*/ - struct pipe_ctx *split_pipe = pipe_ctx->prev_odm_pipe; + pipe_ctx = resource_get_opp_head(pipe_ctx); + if (!pipe_ctx) + return 0; - while (split_pipe) { - (*split_idx)++; - split_pipe = split_pipe->prev_odm_pipe; - } + while (pipe_ctx->prev_odm_pipe) { + index++; + pipe_ctx = pipe_ctx->prev_odm_pipe; } + + return index; +} + +static int get_mpc_split_index(struct pipe_ctx *pipe_ctx) +{ + struct pipe_ctx *split_pipe = pipe_ctx->top_pipe; + int index = 0; + + while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) { + index++; + split_pipe = split_pipe->top_pipe; + } + + return index; } /* @@ -799,82 +811,366 @@ static void calculate_viewport_size(struct pipe_ctx *pipe_ctx) } } -static void calculate_recout(struct pipe_ctx *pipe_ctx) +static struct rect intersect_rec(const struct rect *r0, const struct rect *r1) { - const struct dc_plane_state *plane_state = pipe_ctx->plane_state; - const struct dc_stream_state *stream = pipe_ctx->stream; - struct scaler_data *data = &pipe_ctx->plane_res.scl_data; - struct rect surf_clip = plane_state->clip_rect; - bool split_tb = stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM; - int split_count, split_idx; + struct rect rec; + int r0_x_end = r0->x + r0->width; + int r1_x_end = r1->x + r1->width; + int r0_y_end = r0->y + r0->height; + int r1_y_end = r1->y + r1->height; + + rec.x = r0->x > r1->x ? r0->x : r1->x; + rec.width = r0_x_end > r1_x_end ? r1_x_end - rec.x : r0_x_end - rec.x; + rec.y = r0->y > r1->y ? r0->y : r1->y; + rec.height = r0_y_end > r1_y_end ? r1_y_end - rec.y : r0_y_end - rec.y; + + /* in case that there is no intersection */ + if (rec.width < 0 || rec.height < 0) + memset(&rec, 0, sizeof(rec)); + + return rec; +} - calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx); - if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE) - split_idx = 0; +static struct rect shift_rec(const struct rect *rec_in, int x, int y) +{ + struct rect rec_out = *rec_in; + + rec_out.x += x; + rec_out.y += y; + + return rec_out; +} + +static struct rect calculate_odm_slice_in_timing_active(struct pipe_ctx *pipe_ctx) +{ + const struct dc_stream_state *stream = pipe_ctx->stream; + int odm_slice_count = resource_get_num_odm_splits(pipe_ctx) + 1; + int odm_slice_idx = get_odm_split_index(pipe_ctx); + bool is_last_odm_slice = (odm_slice_idx + 1) == odm_slice_count; + int h_active = stream->timing.h_addressable + + stream->timing.h_border_left + + stream->timing.h_border_right; + int odm_slice_width = h_active / odm_slice_count; + struct rect odm_rec; + + odm_rec.x = odm_slice_width * odm_slice_idx; + odm_rec.width = is_last_odm_slice ? + /* last slice width is the reminder of h_active */ + h_active - odm_slice_width * (odm_slice_count - 1) : + /* odm slice width is the floor of h_active / count */ + odm_slice_width; + odm_rec.y = 0; + odm_rec.height = stream->timing.v_addressable + + stream->timing.v_border_bottom + + stream->timing.v_border_top; + + return odm_rec; +} +static struct rect calculate_plane_rec_in_timing_active( + struct pipe_ctx *pipe_ctx, + const struct rect *rec_in) +{ /* - * Only the leftmost ODM pipe should be offset by a nonzero distance + * The following diagram shows an example where we map a 1920x1200 + * desktop to a 2560x1440 timing with a plane rect in the middle + * of the screen. To map a plane rect from Stream Source to Timing + * Active space, we first multiply stream scaling ratios (i.e 2304/1920 + * horizontal and 1440/1200 vertical) to the plane's x and y, then + * we add stream destination offsets (i.e 128 horizontal, 0 vertical). + * This will give us a plane rect's position in Timing Active. However + * we have to remove the fractional. The rule is that we find left/right + * and top/bottom positions and round the value to the adjacent integer. + * + * Stream Source Space + * ------------ + * __________________________________________________ + * |Stream Source (1920 x 1200) ^ | + * | y | + * | <------- w --------|> | + * | __________________V | + * |<-- x -->|Plane//////////////| ^ | + * | |(pre scale)////////| | | + * | |///////////////////| | | + * | |///////////////////| h | + * | |///////////////////| | | + * | |///////////////////| | | + * | |///////////////////| V | + * | | + * | | + * |__________________________________________________| + * + * + * Timing Active Space + * --------------------------------- + * + * Timing Active (2560 x 1440) + * __________________________________________________ + * |*****| Stteam Destination (2304 x 1440) |*****| + * |*****| |*****| + * |<128>| |*****| + * |*****| __________________ |*****| + * |*****| |Plane/////////////| |*****| + * |*****| |(post scale)//////| |*****| + * |*****| |//////////////////| |*****| + * |*****| |//////////////////| |*****| + * |*****| |//////////////////| |*****| + * |*****| |//////////////////| |*****| + * |*****| |*****| + * |*****| |*****| + * |*****| |*****| + * |*****|______________________________________|*****| + * + * So the resulting formulas are shown below: + * + * recout_x = 128 + round(plane_x * 2304 / 1920) + * recout_w = 128 + round((plane_x + plane_w) * 2304 / 1920) - recout_x + * recout_y = 0 + round(plane_y * 1440 / 1280) + * recout_h = 0 + round((plane_y + plane_h) * 1440 / 1200) - recout_y + * + * NOTE: fixed point division is not error free. To reduce errors + * introduced by fixed point division, we divide only after + * multiplication is complete. */ - if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->prev_odm_pipe && !pipe_ctx->prev_odm_pipe) { - /* MPO window on right side of ODM split */ - data->recout.x = stream->dst.x + (surf_clip.x - stream->src.x - stream->src.width/2) * - stream->dst.width / stream->src.width; - } else if (!pipe_ctx->prev_odm_pipe || split_idx == split_count) { - data->recout.x = stream->dst.x; - if (stream->src.x < surf_clip.x) - data->recout.x += (surf_clip.x - stream->src.x) * stream->dst.width - / stream->src.width; - } else - data->recout.x = 0; - - if (stream->src.x > surf_clip.x) - surf_clip.width -= stream->src.x - surf_clip.x; - data->recout.width = surf_clip.width * stream->dst.width / stream->src.width; - if (data->recout.width + data->recout.x > stream->dst.x + stream->dst.width) - data->recout.width = stream->dst.x + stream->dst.width - data->recout.x; - - data->recout.y = stream->dst.y; - if (stream->src.y < surf_clip.y) - data->recout.y += (surf_clip.y - stream->src.y) * stream->dst.height - / stream->src.height; - else if (stream->src.y > surf_clip.y) - surf_clip.height -= stream->src.y - surf_clip.y; - - data->recout.height = surf_clip.height * stream->dst.height / stream->src.height; - if (data->recout.height + data->recout.y > stream->dst.y + stream->dst.height) - data->recout.height = stream->dst.y + stream->dst.height - data->recout.y; - - /* Handle h & v split */ - if (split_tb) { - ASSERT(data->recout.height % 2 == 0); - data->recout.height /= 2; - } else if (split_count) { - if (!pipe_ctx->next_odm_pipe && !pipe_ctx->prev_odm_pipe) { - /* extra pixels in the division remainder need to go to pipes after - * the extra pixel index minus one(epimo) defined here as: - */ - int epimo = split_count - data->recout.width % (split_count + 1); + const struct dc_stream_state *stream = pipe_ctx->stream; + struct rect rec_out = {0}; + struct fixed31_32 temp; - data->recout.x += (data->recout.width / (split_count + 1)) * split_idx; - if (split_idx > epimo) - data->recout.x += split_idx - epimo - 1; - ASSERT(stream->view_format != VIEW_3D_FORMAT_SIDE_BY_SIDE || data->recout.width % 2 == 0); - data->recout.width = data->recout.width / (split_count + 1) + (split_idx > epimo ? 1 : 0); - } else { - /* odm */ - if (split_idx == split_count) { - /* rightmost pipe is the remainder recout */ - data->recout.width -= data->h_active * split_count - data->recout.x; - - /* ODM combine cases with MPO we can get negative widths */ - if (data->recout.width < 0) - data->recout.width = 0; - - data->recout.x = 0; - } else - data->recout.width = data->h_active - data->recout.x; - } + temp = dc_fixpt_from_fraction(rec_in->x * stream->dst.width, + stream->src.width); + rec_out.x = stream->dst.x + dc_fixpt_round(temp); + + temp = dc_fixpt_from_fraction( + (rec_in->x + rec_in->width) * stream->dst.width, + stream->src.width); + rec_out.width = stream->dst.x + dc_fixpt_round(temp) - rec_out.x; + + temp = dc_fixpt_from_fraction(rec_in->y * stream->dst.height, + stream->src.height); + rec_out.y = stream->dst.y + dc_fixpt_round(temp); + + temp = dc_fixpt_from_fraction( + (rec_in->y + rec_in->height) * stream->dst.height, + stream->src.height); + rec_out.height = stream->dst.y + dc_fixpt_round(temp) - rec_out.y; + + return rec_out; +} + +static struct rect calculate_mpc_slice_in_timing_active( + struct pipe_ctx *pipe_ctx, + struct rect *plane_clip_rec) +{ + const struct dc_stream_state *stream = pipe_ctx->stream; + int mpc_slice_count = resource_get_num_mpc_splits(pipe_ctx) + 1; + int mpc_slice_idx = get_mpc_split_index(pipe_ctx); + int epimo = mpc_slice_count - plane_clip_rec->width % mpc_slice_count - 1; + struct rect mpc_rec; + + mpc_rec.width = plane_clip_rec->width / mpc_slice_count; + mpc_rec.x = plane_clip_rec->x + mpc_rec.width * mpc_slice_idx; + mpc_rec.height = plane_clip_rec->height; + mpc_rec.y = plane_clip_rec->y; + ASSERT(mpc_slice_count == 1 || + stream->view_format != VIEW_3D_FORMAT_SIDE_BY_SIDE || + mpc_rec.width % 2 == 0); + + /* extra pixels in the division remainder need to go to pipes after + * the extra pixel index minus one(epimo) defined here as: + */ + if (mpc_slice_idx > epimo) { + mpc_rec.x += mpc_slice_idx - epimo - 1; + mpc_rec.width += 1; + } + + if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) { + ASSERT(mpc_rec.height % 2 == 0); + mpc_rec.height /= 2; + } + return mpc_rec; +} + +static void adjust_recout_for_visual_confirm(struct rect *recout, + struct pipe_ctx *pipe_ctx) +{ + struct dc *dc = pipe_ctx->stream->ctx->dc; + int dpp_offset, base_offset; + + if (dc->debug.visual_confirm == VISUAL_CONFIRM_DISABLE) + return; + + dpp_offset = pipe_ctx->stream->timing.v_addressable / VISUAL_CONFIRM_DPP_OFFSET_DENO; + dpp_offset *= pipe_ctx->plane_res.dpp->inst; + + if ((dc->debug.visual_confirm_rect_height >= VISUAL_CONFIRM_BASE_MIN) && + dc->debug.visual_confirm_rect_height <= VISUAL_CONFIRM_BASE_MAX) + base_offset = dc->debug.visual_confirm_rect_height; + else + base_offset = VISUAL_CONFIRM_BASE_DEFAULT; + + recout->height -= base_offset; + recout->height -= dpp_offset; +} + +/* + * The function maps a plane clip from Stream Source Space to ODM Slice Space + * and calculates the rec of the overlapping area of MPC slice of the plane + * clip, ODM slice associated with the pipe context and stream destination rec. + */ +static void calculate_recout(struct pipe_ctx *pipe_ctx) +{ + /* + * A plane clip represents the desired plane size and position in Stream + * Source Space. Stream Source is the destination where all planes are + * blended (i.e. positioned, scaled and overlaid). It is a canvas where + * all planes associated with the current stream are drawn together. + * After Stream Source is completed, we will further scale and + * reposition the entire canvas of the stream source to Stream + * Destination in Timing Active Space. This could be due to display + * overscan adjustment where we will need to rescale and reposition all + * the planes so they can fit into a TV with overscan or downscale + * upscale features such as GPU scaling or VSR. + * + * This two step blending is a virtual procedure in software. In + * hardware there is no such thing as Stream Source. all planes are + * blended once in Timing Active Space. Software virtualizes a Stream + * Source space to decouple the math complicity so scaling param + * calculation focuses on one step at a time. + * + * In the following two diagrams, user applied 10% overscan adjustment + * so the Stream Source needs to be scaled down a little before mapping + * to Timing Active Space. As a result the Plane Clip is also scaled + * down by the same ratio, Plane Clip position (i.e. x and y) with + * respect to Stream Source is also scaled down. To map it in Timing + * Active Space additional x and y offsets from Stream Destination are + * added to Plane Clip as well. + * + * Stream Source Space + * ------------ + * __________________________________________________ + * |Stream Source (3840 x 2160) ^ | + * | y | + * | | | + * | __________________V | + * |<-- x -->|Plane Clip/////////| | + * | |(pre scale)////////| | + * | |///////////////////| | + * | |///////////////////| | + * | |///////////////////| | + * | |///////////////////| | + * | |///////////////////| | + * | | + * | | + * |__________________________________________________| + * + * + * Timing Active Space (3840 x 2160) + * --------------------------------- + * + * Timing Active + * __________________________________________________ + * | y_____________________________________________ | + * |x |Stream Destination (3456 x 1944) | | + * | | | | + * | | __________________ | | + * | | |Plane Clip////////| | | + * | | |(post scale)//////| | | + * | | |//////////////////| | | + * | | |//////////////////| | | + * | | |//////////////////| | | + * | | |//////////////////| | | + * | | | | + * | | | | + * | |____________________________________________| | + * |__________________________________________________| + * + * + * In Timing Active Space a plane clip could be further sliced into + * pieces called MPC slices. Each Pipe Context is responsible for + * processing only one MPC slice so the plane processing workload can be + * distributed to multiple DPP Pipes. MPC slices could be blended + * together to a single ODM slice. Each ODM slice is responsible for + * processing a portion of Timing Active divided horizontally so the + * output pixel processing workload can be distributed to multiple OPP + * pipes. All ODM slices are mapped together in ODM block so all MPC + * slices belong to different ODM slices could be pieced together to + * form a single image in Timing Active. MPC slices must belong to + * single ODM slice. If an MPC slice goes across ODM slice boundary, it + * needs to be divided into two MPC slices one for each ODM slice. + * + * In the following diagram the output pixel processing workload is + * divided horizontally into two ODM slices one for each OPP blend tree. + * OPP0 blend tree is responsible for processing left half of Timing + * Active, while OPP2 blend tree is responsible for processing right + * half. + * + * The plane has two MPC slices. However since the right MPC slice goes + * across ODM boundary, two DPP pipes are needed one for each OPP blend + * tree. (i.e. DPP1 for OPP0 blend tree and DPP2 for OPP2 blend tree). + * + * Assuming that we have a Pipe Context associated with OPP0 and DPP1 + * working on processing the plane in the diagram. We want to know the + * width and height of the shaded rectangle and its relative position + * with respect to the ODM slice0. This is called the recout of the pipe + * context. + * + * Planes can be at arbitrary size and position and there could be an + * arbitrary number of MPC and ODM slices. The algorithm needs to take + * all scenarios into account. + * + * Timing Active Space (3840 x 2160) + * --------------------------------- + * + * Timing Active + * __________________________________________________ + * |OPP0(ODM slice0)^ |OPP2(ODM slice1) | + * | y | | + * | | <- w -> | + * | _____V________|____ | + * | |DPP0 ^ |DPP1 |DPP2| | + * |<------ x |-----|->|/////| | | + * | | | |/////| | | + * | | h |/////| | | + * | | | |/////| | | + * | |_____V__|/////|____| | + * | | | + * | | | + * | | | + * |_________________________|________________________| + * + * + */ + struct rect plane_clip; + struct rect mpc_slice_of_plane_clip; + struct rect odm_slice; + struct rect overlapping_area; + + plane_clip = calculate_plane_rec_in_timing_active(pipe_ctx, + &pipe_ctx->plane_state->clip_rect); + /* guard plane clip from drawing beyond stream dst here */ + plane_clip = intersect_rec(&plane_clip, + &pipe_ctx->stream->dst); + mpc_slice_of_plane_clip = calculate_mpc_slice_in_timing_active( + pipe_ctx, &plane_clip); + odm_slice = calculate_odm_slice_in_timing_active(pipe_ctx); + overlapping_area = intersect_rec(&mpc_slice_of_plane_clip, &odm_slice); + if (overlapping_area.height > 0 && + overlapping_area.width > 0) { + /* shift the overlapping area so it is with respect to current + * ODM slice's position + */ + pipe_ctx->plane_res.scl_data.recout = shift_rec( + &overlapping_area, + -odm_slice.x, -odm_slice.y); + adjust_recout_for_visual_confirm( + &pipe_ctx->plane_res.scl_data.recout, + pipe_ctx); + } else { + /* if there is no overlap, zero recout */ + memset(&pipe_ctx->plane_res.scl_data.recout, 0, + sizeof(struct rect)); } + } static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx) @@ -996,33 +1292,30 @@ static void calculate_init_and_vp( static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx) { const struct dc_plane_state *plane_state = pipe_ctx->plane_state; - const struct dc_stream_state *stream = pipe_ctx->stream; struct scaler_data *data = &pipe_ctx->plane_res.scl_data; struct rect src = plane_state->src_rect; + struct rect recout_dst_in_active_timing; + struct rect recout_clip_in_active_timing; + struct rect recout_clip_in_recout_dst; + struct rect overlap_in_active_timing; + struct rect odm_slice = calculate_odm_slice_in_timing_active(pipe_ctx); int vpc_div = (data->format == PIXEL_FORMAT_420BPP8 || data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1; - int split_count, split_idx, ro_lb, ro_tb, recout_full_x, recout_full_y; bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir; - calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx); - /* - * recout full is what the recout would have been if we didnt clip - * the source plane at all. We only care about left(ro_lb) and top(ro_tb) - * offsets of recout within recout full because those are the directions - * we scan from and therefore the only ones that affect inits. - */ - recout_full_x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x) - * stream->dst.width / stream->src.width; - recout_full_y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y) - * stream->dst.height / stream->src.height; - if (pipe_ctx->prev_odm_pipe && split_idx) - ro_lb = data->h_active * split_idx - recout_full_x; - else if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->prev_odm_pipe) - ro_lb = data->h_active * split_idx - recout_full_x + data->recout.x; + recout_clip_in_active_timing = shift_rec( + &data->recout, odm_slice.x, odm_slice.y); + recout_dst_in_active_timing = calculate_plane_rec_in_timing_active( + pipe_ctx, &plane_state->dst_rect); + overlap_in_active_timing = intersect_rec(&recout_clip_in_active_timing, + &recout_dst_in_active_timing); + if (overlap_in_active_timing.width > 0 && + overlap_in_active_timing.height > 0) + recout_clip_in_recout_dst = shift_rec(&overlap_in_active_timing, + -recout_dst_in_active_timing.x, + -recout_dst_in_active_timing.y); else - ro_lb = data->recout.x - recout_full_x; - ro_tb = data->recout.y - recout_full_y; - ASSERT(ro_lb >= 0 && ro_tb >= 0); + memset(&recout_clip_in_recout_dst, 0, sizeof(struct rect)); /* * Work in recout rotation since that requires less transformations @@ -1041,7 +1334,7 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx) calculate_init_and_vp( flip_horz_scan_dir, - ro_lb, + recout_clip_in_recout_dst.x, data->recout.width, src.width, data->taps.h_taps, @@ -1051,7 +1344,7 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx) &data->viewport.width); calculate_init_and_vp( flip_horz_scan_dir, - ro_lb, + recout_clip_in_recout_dst.x, data->recout.width, src.width / vpc_div, data->taps.h_taps_c, @@ -1061,7 +1354,7 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx) &data->viewport_c.width); calculate_init_and_vp( flip_vert_scan_dir, - ro_tb, + recout_clip_in_recout_dst.y, data->recout.height, src.height, data->taps.v_taps, @@ -1071,7 +1364,7 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx) &data->viewport.height); calculate_init_and_vp( flip_vert_scan_dir, - ro_tb, + recout_clip_in_recout_dst.y, data->recout.height, src.height / vpc_div, data->taps.v_taps_c, @@ -1096,6 +1389,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) { const struct dc_plane_state *plane_state = pipe_ctx->plane_state; struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; + const struct rect odm_slice_rec = calculate_odm_slice_in_timing_active(pipe_ctx); bool res = false; DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); @@ -1120,30 +1414,9 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) pipe_ctx->stream->dst.y += timing->v_border_top; /* Calculate H and V active size */ - pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + - timing->h_border_left + timing->h_border_right; - pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + - timing->v_border_top + timing->v_border_bottom; - if (pipe_ctx->next_odm_pipe || pipe_ctx->prev_odm_pipe) { - pipe_ctx->plane_res.scl_data.h_active /= get_num_odm_splits(pipe_ctx) + 1; - - DC_LOG_SCALER("%s pipe %d: next_odm_pipe:%d prev_odm_pipe:%d\n", - __func__, - pipe_ctx->pipe_idx, - pipe_ctx->next_odm_pipe ? pipe_ctx->next_odm_pipe->pipe_idx : -1, - pipe_ctx->prev_odm_pipe ? pipe_ctx->prev_odm_pipe->pipe_idx : -1); - } /* ODM + windows MPO, where window is on either right or left ODM half */ - else if (pipe_ctx->top_pipe && (pipe_ctx->top_pipe->next_odm_pipe || pipe_ctx->top_pipe->prev_odm_pipe)) { - - pipe_ctx->plane_res.scl_data.h_active /= get_num_odm_splits(pipe_ctx->top_pipe) + 1; - - DC_LOG_SCALER("%s ODM + windows MPO: pipe:%d top_pipe:%d top_pipe->next_odm_pipe:%d top_pipe->prev_odm_pipe:%d\n", - __func__, - pipe_ctx->pipe_idx, - pipe_ctx->top_pipe->pipe_idx, - pipe_ctx->top_pipe->next_odm_pipe ? pipe_ctx->top_pipe->next_odm_pipe->pipe_idx : -1, - pipe_ctx->top_pipe->prev_odm_pipe ? pipe_ctx->top_pipe->prev_odm_pipe->pipe_idx : -1); - } + pipe_ctx->plane_res.scl_data.h_active = odm_slice_rec.width; + pipe_ctx->plane_res.scl_data.v_active = odm_slice_rec.height; + /* depends on h_active */ calculate_recout(pipe_ctx); /* depends on pixel format */ @@ -1225,17 +1498,12 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width; } - if (!pipe_ctx->stream->ctx->dc->config.enable_windowed_mpo_odm) { - if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE || - pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE) - res = false; - } else { - /* Clamp minimum viewport size */ - if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE) - pipe_ctx->plane_res.scl_data.viewport.height = MIN_VIEWPORT_SIZE; - if (pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE) - pipe_ctx->plane_res.scl_data.viewport.width = MIN_VIEWPORT_SIZE; - } + /* Clamp minimum viewport size */ + if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE) + pipe_ctx->plane_res.scl_data.viewport.height = MIN_VIEWPORT_SIZE; + if (pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE) + pipe_ctx->plane_res.scl_data.viewport.width = MIN_VIEWPORT_SIZE; + DC_LOG_SCALER("%s pipe %d:\nViewport: height:%d width:%d x:%d y:%d Recout: height:%d width:%d x:%d y:%d HACTIVE:%d VACTIVE:%d\n" "src_rect: height:%d width:%d x:%d y:%d dst_rect: height:%d width:%d x:%d y:%d clip_rect: height:%d width:%d x:%d y:%d\n", @@ -1287,7 +1555,7 @@ enum dc_status resource_build_scaling_params_for_context( return DC_OK; } -struct pipe_ctx *find_idle_secondary_pipe( +struct pipe_ctx *resource_find_free_secondary_pipe_legacy( struct resource_context *res_ctx, const struct resource_pool *pool, const struct pipe_ctx *primary_pipe) @@ -1347,73 +1615,182 @@ struct pipe_ctx *find_idle_secondary_pipe( return secondary_pipe; } -struct pipe_ctx *resource_get_head_pipe_for_stream( - struct resource_context *res_ctx, - struct dc_stream_state *stream) +int resource_find_free_pipe_used_in_cur_mpc_blending_tree( + const struct resource_context *cur_res_ctx, + struct resource_context *new_res_ctx, + const struct pipe_ctx *cur_opp_head) +{ + const struct pipe_ctx *cur_sec_dpp = cur_opp_head->bottom_pipe; + struct pipe_ctx *new_pipe; + int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND; + + while (cur_sec_dpp) { + /* find a free pipe used in current opp blend tree, + * this is to avoid MPO pipe switching to different opp blending + * tree + */ + new_pipe = &new_res_ctx->pipe_ctx[cur_sec_dpp->pipe_idx]; + if (resource_is_pipe_type(new_pipe, FREE_PIPE)) { + free_pipe_idx = cur_sec_dpp->pipe_idx; + break; + } + cur_sec_dpp = cur_sec_dpp->bottom_pipe; + } + + return free_pipe_idx; +} + +int recource_find_free_pipe_not_used_in_cur_res_ctx( + const struct resource_context *cur_res_ctx, + struct resource_context *new_res_ctx, + const struct resource_pool *pool) { + int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND; + const struct pipe_ctx *new_pipe, *cur_pipe; int i; - for (i = 0; i < MAX_PIPES; i++) { - if (res_ctx->pipe_ctx[i].stream == stream - && !res_ctx->pipe_ctx[i].top_pipe - && !res_ctx->pipe_ctx[i].prev_odm_pipe) - return &res_ctx->pipe_ctx[i]; + for (i = 0; i < pool->pipe_count; i++) { + cur_pipe = &cur_res_ctx->pipe_ctx[i]; + new_pipe = &new_res_ctx->pipe_ctx[i]; + + if (resource_is_pipe_type(cur_pipe, FREE_PIPE) && + resource_is_pipe_type(new_pipe, FREE_PIPE)) { + free_pipe_idx = i; + break; + } } - return NULL; + + return free_pipe_idx; } -static struct pipe_ctx *resource_get_tail_pipe( - struct resource_context *res_ctx, - struct pipe_ctx *head_pipe) +int resource_find_free_pipe_used_as_cur_sec_dpp_in_mpcc_combine( + const struct resource_context *cur_res_ctx, + struct resource_context *new_res_ctx, + const struct resource_pool *pool) { - struct pipe_ctx *tail_pipe; - - tail_pipe = head_pipe->bottom_pipe; + int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND; + const struct pipe_ctx *new_pipe, *cur_pipe; + int i; - while (tail_pipe) { - head_pipe = tail_pipe; - tail_pipe = tail_pipe->bottom_pipe; + for (i = 0; i < pool->pipe_count; i++) { + cur_pipe = &cur_res_ctx->pipe_ctx[i]; + new_pipe = &new_res_ctx->pipe_ctx[i]; + + if (resource_is_pipe_type(cur_pipe, DPP_PIPE) && + !resource_is_pipe_type(cur_pipe, OPP_HEAD) && + resource_is_for_mpcc_combine(cur_pipe) && + resource_is_pipe_type(new_pipe, FREE_PIPE)) { + free_pipe_idx = i; + break; + } } - return head_pipe; + return free_pipe_idx; } -/* - * A free_pipe for a stream is defined here as a pipe - * that has no surface attached yet - */ -static struct pipe_ctx *acquire_free_pipe_for_head( - struct dc_state *context, - const struct resource_pool *pool, - struct pipe_ctx *head_pipe) +int resource_find_any_free_pipe(struct resource_context *new_res_ctx, + const struct resource_pool *pool) { + int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND; + const struct pipe_ctx *new_pipe; int i; - struct resource_context *res_ctx = &context->res_ctx; - if (!head_pipe->plane_state) - return head_pipe; + for (i = 0; i < pool->pipe_count; i++) { + new_pipe = &new_res_ctx->pipe_ctx[i]; - /* Re-use pipe already acquired for this stream if available*/ - for (i = pool->pipe_count - 1; i >= 0; i--) { - if (res_ctx->pipe_ctx[i].stream == head_pipe->stream && - !res_ctx->pipe_ctx[i].plane_state) { - return &res_ctx->pipe_ctx[i]; + if (resource_is_pipe_type(new_pipe, FREE_PIPE)) { + free_pipe_idx = i; + break; } } - /* - * At this point we have no re-useable pipe for this stream and we need - * to acquire an idle one to satisfy the request + return free_pipe_idx; +} + +bool resource_is_pipe_type(const struct pipe_ctx *pipe_ctx, enum pipe_type type) +{ +#ifdef DBG + if (pipe_ctx->stream == NULL) { + /* a free pipe with dangling states */ + ASSERT(!pipe_ctx->plane_state); + ASSERT(!pipe_ctx->prev_odm_pipe); + ASSERT(!pipe_ctx->next_odm_pipe); + ASSERT(!pipe_ctx->top_pipe); + ASSERT(!pipe_ctx->bottom_pipe); + } else if (pipe_ctx->top_pipe) { + /* a secondary DPP pipe must be signed to a plane */ + ASSERT(pipe_ctx->plane_state) + } + /* Add more checks here to prevent corrupted pipe ctx. It is very hard + * to debug this issue afterwards because we can't pinpoint the code + * location causing inconsistent pipe context states. */ +#endif + switch (type) { + case OTG_MASTER: + return !pipe_ctx->prev_odm_pipe && + !pipe_ctx->top_pipe && + pipe_ctx->stream; + case OPP_HEAD: + return !pipe_ctx->top_pipe && pipe_ctx->stream; + case DPP_PIPE: + return pipe_ctx->plane_state && pipe_ctx->stream; + case FREE_PIPE: + return !pipe_ctx->plane_state && !pipe_ctx->stream; + default: + return false; + } +} - if (!pool->funcs->acquire_idle_pipe_for_layer) { - if (!pool->funcs->acquire_idle_pipe_for_head_pipe_in_layer) - return NULL; - else - return pool->funcs->acquire_idle_pipe_for_head_pipe_in_layer(context, pool, head_pipe->stream, head_pipe); +bool resource_is_for_mpcc_combine(const struct pipe_ctx *pipe_ctx) +{ + return resource_get_num_mpc_splits(pipe_ctx) > 0; +} + +struct pipe_ctx *resource_get_otg_master_for_stream( + struct resource_context *res_ctx, + struct dc_stream_state *stream) +{ + int i; + + for (i = 0; i < MAX_PIPES; i++) { + if (res_ctx->pipe_ctx[i].stream == stream && + resource_is_pipe_type(&res_ctx->pipe_ctx[i], OTG_MASTER)) + return &res_ctx->pipe_ctx[i]; } + return NULL; +} - return pool->funcs->acquire_idle_pipe_for_layer(context, pool, head_pipe->stream); +struct pipe_ctx *resource_get_otg_master(const struct pipe_ctx *pipe_ctx) +{ + struct pipe_ctx *otg_master = resource_get_opp_head(pipe_ctx); + + while (otg_master->prev_odm_pipe) + otg_master = otg_master->prev_odm_pipe; + return otg_master; +} + +struct pipe_ctx *resource_get_opp_head(const struct pipe_ctx *pipe_ctx) +{ + struct pipe_ctx *opp_head = (struct pipe_ctx *) pipe_ctx; + + ASSERT(!resource_is_pipe_type(opp_head, FREE_PIPE)); + while (opp_head->top_pipe) + opp_head = opp_head->top_pipe; + return opp_head; +} + +static struct pipe_ctx *get_tail_pipe( + struct pipe_ctx *head_pipe) +{ + struct pipe_ctx *tail_pipe = head_pipe->bottom_pipe; + + while (tail_pipe) { + head_pipe = tail_pipe; + tail_pipe = tail_pipe->bottom_pipe; + } + + return head_pipe; } static int acquire_first_split_pipe( @@ -1446,275 +1823,126 @@ static int acquire_first_split_pipe( split_pipe->stream = stream; return i; - } else if (split_pipe->prev_odm_pipe && - split_pipe->prev_odm_pipe->plane_state == split_pipe->plane_state) { - split_pipe->prev_odm_pipe->next_odm_pipe = split_pipe->next_odm_pipe; - if (split_pipe->next_odm_pipe) - split_pipe->next_odm_pipe->prev_odm_pipe = split_pipe->prev_odm_pipe; - - if (split_pipe->prev_odm_pipe->plane_state) - resource_build_scaling_params(split_pipe->prev_odm_pipe); - - memset(split_pipe, 0, sizeof(*split_pipe)); - split_pipe->stream_res.tg = pool->timing_generators[i]; - split_pipe->plane_res.hubp = pool->hubps[i]; - split_pipe->plane_res.ipp = pool->ipps[i]; - split_pipe->plane_res.dpp = pool->dpps[i]; - split_pipe->stream_res.opp = pool->opps[i]; - split_pipe->plane_res.mpcc_inst = pool->dpps[i]->inst; - split_pipe->pipe_idx = i; - - split_pipe->stream = stream; - return i; } } - return -1; + return UNABLE_TO_SPLIT; } -bool dc_add_plane_to_context( - const struct dc *dc, - struct dc_stream_state *stream, +static bool add_plane_to_opp_head_pipes(struct pipe_ctx *otg_master_pipe, struct dc_plane_state *plane_state, struct dc_state *context) { - int i; - struct resource_pool *pool = dc->res_pool; - struct pipe_ctx *head_pipe, *tail_pipe, *free_pipe; - struct dc_stream_status *stream_status = NULL; - struct pipe_ctx *prev_right_head = NULL; - struct pipe_ctx *free_right_pipe = NULL; - struct pipe_ctx *prev_left_head = NULL; + struct pipe_ctx *opp_head_pipe = otg_master_pipe; - DC_LOGGER_INIT(stream->ctx->logger); - for (i = 0; i < context->stream_count; i++) - if (context->streams[i] == stream) { - stream_status = &context->stream_status[i]; - break; + while (opp_head_pipe) { + if (opp_head_pipe->plane_state) { + ASSERT(0); + return false; } - if (stream_status == NULL) { - dm_error("Existing stream not found; failed to attach surface!\n"); - return false; + opp_head_pipe->plane_state = plane_state; + opp_head_pipe = opp_head_pipe->next_odm_pipe; } + return true; +} - if (stream_status->plane_count == MAX_SURFACE_NUM) { - dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n", - plane_state, MAX_SURFACE_NUM); - return false; +static void insert_secondary_dpp_pipe_with_plane(struct pipe_ctx *opp_head_pipe, + struct pipe_ctx *sec_pipe, struct dc_plane_state *plane_state) +{ + struct pipe_ctx *tail_pipe = get_tail_pipe(opp_head_pipe); + + tail_pipe->bottom_pipe = sec_pipe; + sec_pipe->top_pipe = tail_pipe; + if (tail_pipe->prev_odm_pipe) { + ASSERT(tail_pipe->prev_odm_pipe->bottom_pipe); + sec_pipe->prev_odm_pipe = tail_pipe->prev_odm_pipe->bottom_pipe; + tail_pipe->prev_odm_pipe->bottom_pipe->next_odm_pipe = sec_pipe; } + sec_pipe->plane_state = plane_state; +} - head_pipe = resource_get_head_pipe_for_stream(&context->res_ctx, stream); +/* for each opp head pipe of an otg master pipe, acquire a secondary dpp pipe + * and add the plane. So the plane is added to all MPC blend trees associated + * with the otg master pipe. + */ +static bool acquire_secondary_dpp_pipes_and_add_plane( + struct pipe_ctx *otg_master_pipe, + struct dc_plane_state *plane_state, + struct dc_state *new_ctx, + struct dc_state *cur_ctx, + struct resource_pool *pool) +{ + struct pipe_ctx *opp_head_pipe, *sec_pipe; - if (!head_pipe) { - dm_error("Head pipe not found for stream_state %p !\n", stream); + if (!pool->funcs->acquire_free_pipe_as_secondary_dpp_pipe) return false; - } - /* retain new surface, but only once per stream */ - dc_plane_state_retain(plane_state); - - while (head_pipe) { - free_pipe = acquire_free_pipe_for_head(context, pool, head_pipe); + opp_head_pipe = otg_master_pipe; + while (opp_head_pipe) { + sec_pipe = pool->funcs->acquire_free_pipe_as_secondary_dpp_pipe( + cur_ctx, + new_ctx, + pool, + opp_head_pipe); + if (!sec_pipe) { + /* try tearing down MPCC combine */ + int pipe_idx = acquire_first_split_pipe( + &new_ctx->res_ctx, pool, + otg_master_pipe->stream); - if (!free_pipe) { - int pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream); if (pipe_idx >= 0) - free_pipe = &context->res_ctx.pipe_ctx[pipe_idx]; + sec_pipe = &new_ctx->res_ctx.pipe_ctx[pipe_idx]; } - if (!free_pipe) { - dc_plane_state_release(plane_state); + if (!sec_pipe) return false; - } - - free_pipe->plane_state = plane_state; - - if (head_pipe != free_pipe) { - tail_pipe = resource_get_tail_pipe(&context->res_ctx, head_pipe); - ASSERT(tail_pipe); - - /* ODM + window MPO, where MPO window is on right half only */ - if (free_pipe->plane_state && - (free_pipe->plane_state->clip_rect.x >= free_pipe->stream->src.x + free_pipe->stream->src.width/2) && - tail_pipe->next_odm_pipe) { - - /* For ODM + window MPO, in 3 plane case, if we already have a MPO window on - * the right side, then we will invalidate a 2nd one on the right side - */ - if (head_pipe->next_odm_pipe && tail_pipe->next_odm_pipe->bottom_pipe) { - dc_plane_state_release(plane_state); - return false; - } - - DC_LOG_SCALER("%s - ODM + window MPO(right). free_pipe:%d tail_pipe->next_odm_pipe:%d\n", - __func__, - free_pipe->pipe_idx, - tail_pipe->next_odm_pipe ? tail_pipe->next_odm_pipe->pipe_idx : -1); - - /* - * We want to avoid the case where the right side already has a pipe assigned to - * it and is different from free_pipe ( which would cause trigger a pipe - * reallocation ). - * Check the old context to see if the right side already has a pipe allocated - * - If not, continue to use free_pipe - * - If the right side already has a pipe, use that pipe instead if its available - */ - - /* - * We also want to avoid the case where with three plane ( 2 MPO videos ), we have - * both videos on the left side so one of the videos is invalidated. Then we - * move the invalidated video back to the right side. If the order of the plane - * states is such that the right MPO plane is processed first, the free pipe - * selected by the head will be the left MPO pipe. But since there was no right - * MPO pipe, it will assign the free pipe to the right MPO pipe instead and - * a pipe reallocation will occur. - * Check the old context to see if the left side already has a pipe allocated - * - If not, continue to use free_pipe - * - If the left side is already using this pipe, then pick another pipe for right - */ - - prev_right_head = &dc->current_state->res_ctx.pipe_ctx[tail_pipe->next_odm_pipe->pipe_idx]; - if ((prev_right_head->bottom_pipe) && - (free_pipe->pipe_idx != prev_right_head->bottom_pipe->pipe_idx)) { - free_right_pipe = acquire_free_pipe_for_head(context, pool, tail_pipe->next_odm_pipe); - } else { - prev_left_head = &dc->current_state->res_ctx.pipe_ctx[head_pipe->pipe_idx]; - if ((prev_left_head->bottom_pipe) && - (free_pipe->pipe_idx == prev_left_head->bottom_pipe->pipe_idx)) { - free_right_pipe = acquire_free_pipe_for_head(context, pool, head_pipe); - } - } - if (free_right_pipe) { - free_pipe->stream = NULL; - memset(&free_pipe->stream_res, 0, sizeof(struct stream_resource)); - memset(&free_pipe->plane_res, 0, sizeof(struct plane_resource)); - free_pipe->plane_state = NULL; - free_pipe->pipe_idx = 0; - free_right_pipe->plane_state = plane_state; - free_pipe = free_right_pipe; - } - - free_pipe->stream_res.tg = tail_pipe->next_odm_pipe->stream_res.tg; - free_pipe->stream_res.abm = tail_pipe->next_odm_pipe->stream_res.abm; - free_pipe->stream_res.opp = tail_pipe->next_odm_pipe->stream_res.opp; - free_pipe->stream_res.stream_enc = tail_pipe->next_odm_pipe->stream_res.stream_enc; - free_pipe->stream_res.audio = tail_pipe->next_odm_pipe->stream_res.audio; - free_pipe->clock_source = tail_pipe->next_odm_pipe->clock_source; - - free_pipe->top_pipe = tail_pipe->next_odm_pipe; - tail_pipe->next_odm_pipe->bottom_pipe = free_pipe; - } else if (free_pipe->plane_state && - (free_pipe->plane_state->clip_rect.x >= free_pipe->stream->src.x + free_pipe->stream->src.width/2) - && head_pipe->next_odm_pipe) { - - /* For ODM + window MPO, support 3 plane ( 2 MPO ) case. - * Here we have a desktop ODM + left window MPO and a new MPO window appears - * on the right side only. It fails the first case, because tail_pipe is the - * left window MPO, so it has no next_odm_pipe. So in this scenario, we check - * for head_pipe->next_odm_pipe instead - */ - DC_LOG_SCALER("%s - ODM + win MPO (left) + win MPO (right). free_pipe:%d head_pipe->next_odm:%d\n", - __func__, - free_pipe->pipe_idx, - head_pipe->next_odm_pipe ? head_pipe->next_odm_pipe->pipe_idx : -1); - - /* - * We want to avoid the case where the right side already has a pipe assigned to - * it and is different from free_pipe ( which would cause trigger a pipe - * reallocation ). - * Check the old context to see if the right side already has a pipe allocated - * - If not, continue to use free_pipe - * - If the right side already has a pipe, use that pipe instead if its available - */ - prev_right_head = &dc->current_state->res_ctx.pipe_ctx[head_pipe->next_odm_pipe->pipe_idx]; - if ((prev_right_head->bottom_pipe) && - (free_pipe->pipe_idx != prev_right_head->bottom_pipe->pipe_idx)) { - free_right_pipe = acquire_free_pipe_for_head(context, pool, head_pipe->next_odm_pipe); - if (free_right_pipe) { - free_pipe->stream = NULL; - memset(&free_pipe->stream_res, 0, sizeof(struct stream_resource)); - memset(&free_pipe->plane_res, 0, sizeof(struct plane_resource)); - free_pipe->plane_state = NULL; - free_pipe->pipe_idx = 0; - free_right_pipe->plane_state = plane_state; - free_pipe = free_right_pipe; - } - } - - free_pipe->stream_res.tg = head_pipe->next_odm_pipe->stream_res.tg; - free_pipe->stream_res.abm = head_pipe->next_odm_pipe->stream_res.abm; - free_pipe->stream_res.opp = head_pipe->next_odm_pipe->stream_res.opp; - free_pipe->stream_res.stream_enc = head_pipe->next_odm_pipe->stream_res.stream_enc; - free_pipe->stream_res.audio = head_pipe->next_odm_pipe->stream_res.audio; - free_pipe->clock_source = head_pipe->next_odm_pipe->clock_source; - - free_pipe->top_pipe = head_pipe->next_odm_pipe; - head_pipe->next_odm_pipe->bottom_pipe = free_pipe; - } else { - - /* For ODM + window MPO, in 3 plane case, if we already have a MPO window on - * the left side, then we will invalidate a 2nd one on the left side - */ - if (head_pipe->next_odm_pipe && tail_pipe->top_pipe) { - dc_plane_state_release(plane_state); - return false; - } - - free_pipe->stream_res.tg = tail_pipe->stream_res.tg; - free_pipe->stream_res.abm = tail_pipe->stream_res.abm; - free_pipe->stream_res.opp = tail_pipe->stream_res.opp; - free_pipe->stream_res.stream_enc = tail_pipe->stream_res.stream_enc; - free_pipe->stream_res.audio = tail_pipe->stream_res.audio; - free_pipe->clock_source = tail_pipe->clock_source; - - free_pipe->top_pipe = tail_pipe; - tail_pipe->bottom_pipe = free_pipe; - - /* Connect MPO pipes together if MPO window is in the centre */ - if (!(free_pipe->plane_state && - (free_pipe->plane_state->clip_rect.x + free_pipe->plane_state->clip_rect.width <= - free_pipe->stream->src.x + free_pipe->stream->src.width/2))) { - if (!free_pipe->next_odm_pipe && - tail_pipe->next_odm_pipe && tail_pipe->next_odm_pipe->bottom_pipe) { - free_pipe->next_odm_pipe = tail_pipe->next_odm_pipe->bottom_pipe; - tail_pipe->next_odm_pipe->bottom_pipe->prev_odm_pipe = free_pipe; - } - if (!free_pipe->prev_odm_pipe && - tail_pipe->prev_odm_pipe && tail_pipe->prev_odm_pipe->bottom_pipe) { - free_pipe->prev_odm_pipe = tail_pipe->prev_odm_pipe->bottom_pipe; - tail_pipe->prev_odm_pipe->bottom_pipe->next_odm_pipe = free_pipe; - } - } - } - } + insert_secondary_dpp_pipe_with_plane(opp_head_pipe, sec_pipe, + plane_state); + opp_head_pipe = opp_head_pipe->next_odm_pipe; + } + return true; +} - /* ODM + window MPO, where MPO window is on left half only */ - if (free_pipe->plane_state && - (free_pipe->plane_state->clip_rect.x + free_pipe->plane_state->clip_rect.width <= - free_pipe->stream->src.x + free_pipe->stream->src.width/2)) { - DC_LOG_SCALER("%s - ODM + window MPO(left). free_pipe:%d\n", - __func__, - free_pipe->pipe_idx); - break; - } - /* ODM + window MPO, where MPO window is on right half only */ - if (free_pipe->plane_state && - (free_pipe->plane_state->clip_rect.x >= free_pipe->stream->src.x + free_pipe->stream->src.width/2)) { - DC_LOG_SCALER("%s - ODM + window MPO(right). free_pipe:%d\n", - __func__, - free_pipe->pipe_idx); - break; - } +bool dc_add_plane_to_context( + const struct dc *dc, + struct dc_stream_state *stream, + struct dc_plane_state *plane_state, + struct dc_state *context) +{ + struct resource_pool *pool = dc->res_pool; + struct pipe_ctx *otg_master_pipe; + struct dc_stream_status *stream_status = NULL; + bool added = false; - head_pipe = head_pipe->next_odm_pipe; + stream_status = dc_stream_get_status_from_state(context, stream); + if (stream_status == NULL) { + dm_error("Existing stream not found; failed to attach surface!\n"); + goto out; + } else if (stream_status->plane_count == MAX_SURFACE_NUM) { + dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n", + plane_state, MAX_SURFACE_NUM); + goto out; } - /* assign new surfaces*/ - stream_status->plane_states[stream_status->plane_count] = plane_state; - stream_status->plane_count++; + otg_master_pipe = resource_get_otg_master_for_stream( + &context->res_ctx, stream); + if (otg_master_pipe->plane_state == NULL) + added = add_plane_to_opp_head_pipes(otg_master_pipe, + plane_state, context); + else + added = acquire_secondary_dpp_pipes_and_add_plane( + otg_master_pipe, plane_state, context, + dc->current_state, pool); + if (added) { + stream_status->plane_states[stream_status->plane_count] = + plane_state; + stream_status->plane_count++; + dc_plane_state_retain(plane_state); + } - return true; +out: + return added; } bool dc_remove_plane_from_context( @@ -1878,7 +2106,7 @@ bool dc_add_all_planes_for_stream( return add_all_planes_for_stream(dc, stream, &set, 1, context); } -bool is_timing_changed(struct dc_stream_state *cur_stream, +bool dc_is_timing_changed(struct dc_stream_state *cur_stream, struct dc_stream_state *new_stream) { if (cur_stream == NULL) @@ -1903,7 +2131,7 @@ static bool are_stream_backends_same( if (stream_a == NULL || stream_b == NULL) return false; - if (is_timing_changed(stream_a, stream_b)) + if (dc_is_timing_changed(stream_a, stream_b)) return false; if (stream_a->signal != stream_b->signal) @@ -2218,7 +2446,7 @@ enum dc_status dc_remove_stream_from_ctx( { int i; struct dc_context *dc_ctx = dc->ctx; - struct pipe_ctx *del_pipe = resource_get_head_pipe_for_stream(&new_ctx->res_ctx, stream); + struct pipe_ctx *del_pipe = resource_get_otg_master_for_stream(&new_ctx->res_ctx, stream); struct pipe_ctx *odm_pipe; if (!del_pipe) { @@ -3034,23 +3262,29 @@ static void set_avi_info_frame( hdmi_info.bits.S0_S1 = scan_type; /* C0, C1 : Colorimetry */ - if (color_space == COLOR_SPACE_YCBCR709 || - color_space == COLOR_SPACE_YCBCR709_LIMITED) + switch (color_space) { + case COLOR_SPACE_YCBCR709: + case COLOR_SPACE_YCBCR709_LIMITED: hdmi_info.bits.C0_C1 = COLORIMETRY_ITU709; - else if (color_space == COLOR_SPACE_YCBCR601 || - color_space == COLOR_SPACE_YCBCR601_LIMITED) + break; + case COLOR_SPACE_YCBCR601: + case COLOR_SPACE_YCBCR601_LIMITED: hdmi_info.bits.C0_C1 = COLORIMETRY_ITU601; - else { - hdmi_info.bits.C0_C1 = COLORIMETRY_NO_DATA; - } - if (color_space == COLOR_SPACE_2020_RGB_FULLRANGE || - color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE || - color_space == COLOR_SPACE_2020_YCBCR) { + break; + case COLOR_SPACE_2020_RGB_FULLRANGE: + case COLOR_SPACE_2020_RGB_LIMITEDRANGE: + case COLOR_SPACE_2020_YCBCR: hdmi_info.bits.EC0_EC2 = COLORIMETRYEX_BT2020RGBYCBCR; hdmi_info.bits.C0_C1 = COLORIMETRY_EXTENDED; - } else if (color_space == COLOR_SPACE_ADOBERGB) { + break; + case COLOR_SPACE_ADOBERGB: hdmi_info.bits.EC0_EC2 = COLORIMETRYEX_ADOBERGB; hdmi_info.bits.C0_C1 = COLORIMETRY_EXTENDED; + break; + case COLOR_SPACE_SRGB: + default: + hdmi_info.bits.C0_C1 = COLORIMETRY_NO_DATA; + break; } if (pixel_encoding && color_space == COLOR_SPACE_2020_YCBCR && @@ -3466,7 +3700,7 @@ enum dc_status resource_map_clock_resources( { /* acquire new resources */ const struct resource_pool *pool = dc->res_pool; - struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream( + struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream( &context->res_ctx, stream); if (!pipe_ctx) @@ -3528,7 +3762,7 @@ bool pipe_need_reprogram( if (pipe_ctx_old->stream_res.stream_enc != pipe_ctx->stream_res.stream_enc) return true; - if (is_timing_changed(pipe_ctx_old->stream, pipe_ctx->stream)) + if (dc_is_timing_changed(pipe_ctx_old->stream, pipe_ctx->stream)) return true; if (pipe_ctx_old->stream->dpms_off != pipe_ctx->stream->dpms_off) @@ -3856,10 +4090,7 @@ void reset_syncd_pipes_from_disabled_pipes(struct dc *dc, pipe_ctx_old = &dc->current_state->res_ctx.pipe_ctx[i]; pipe_ctx = &context->res_ctx.pipe_ctx[i]; - if (!pipe_ctx_old->stream) - continue; - - if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe) + if (!resource_is_pipe_type(pipe_ctx_old, OTG_MASTER)) continue; if (!pipe_ctx->stream || @@ -3983,11 +4214,13 @@ const struct link_hwss *get_link_hwss(const struct dc_link *link, * with an hpo encoder. Or we can return a very dummy one that doesn't * do work for all functions */ - return get_hpo_dp_link_hwss(); + return (requires_fixed_vs_pe_retimer_hpo_link_hwss(link) ? + get_hpo_fixed_vs_pe_retimer_dp_link_hwss() : get_hpo_dp_link_hwss()); else if (can_use_dpia_link_hwss(link, link_res)) return get_dpia_link_hwss(); else if (can_use_dio_link_hwss(link, link_res)) - return get_dio_link_hwss(); + return (requires_fixed_vs_pe_retimer_dio_link_hwss(link)) ? + get_dio_fixed_vs_pe_retimer_link_hwss() : get_dio_link_hwss(); else return get_virtual_link_hwss(); } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 72b261ad9587..01fe2d2fd241 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -71,8 +71,7 @@ static bool dc_stream_construct(struct dc_stream_state *stream, /* Copy audio modes */ /* TODO - Remove this translation */ - for (i = 0; i < (dc_sink_data->edid_caps.audio_mode_count); i++) - { + for (i = 0; i < (dc_sink_data->edid_caps.audio_mode_count); i++) { stream->audio_info.modes[i].channel_count = dc_sink_data->edid_caps.audio_modes[i].channel_count; stream->audio_info.modes[i].format_code = dc_sink_data->edid_caps.audio_modes[i].format_code; stream->audio_info.modes[i].sample_rates.all = dc_sink_data->edid_caps.audio_modes[i].sample_rate; @@ -276,8 +275,8 @@ static void program_cursor_attributes( } dc->hwss.set_cursor_attribute(pipe_ctx); - - dc_send_update_cursor_info_to_dmu(pipe_ctx, i); + if (dc->ctx->dmub_srv) + dc_send_update_cursor_info_to_dmu(pipe_ctx, i); if (dc->hwss.set_cursor_sdr_white_level) dc->hwss.set_cursor_sdr_white_level(pipe_ctx); } @@ -306,6 +305,32 @@ bool dc_optimize_timing_for_fsft( } #endif +static bool is_subvp_high_refresh_candidate(struct dc_stream_state *stream) +{ + uint32_t refresh_rate; + struct dc *dc = stream->ctx->dc; + + refresh_rate = (stream->timing.pix_clk_100hz * (uint64_t)100 + + stream->timing.v_total * stream->timing.h_total - (uint64_t)1); + refresh_rate = div_u64(refresh_rate, stream->timing.v_total); + refresh_rate = div_u64(refresh_rate, stream->timing.h_total); + + /* If there's any stream that fits the SubVP high refresh criteria, + * we must return true. This is because cursor updates are asynchronous + * with full updates, so we could transition into a SubVP config and + * remain in HW cursor mode if there's no cursor update which will + * then cause corruption. + */ + if ((refresh_rate >= 120 && refresh_rate <= 175 && + stream->timing.v_addressable >= 1440 && + stream->timing.v_addressable <= 2160) && + (dc->current_state->stream_count > 1 || + (dc->current_state->stream_count == 1 && !stream->allow_freesync))) + return true; + + return false; +} + /* * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address */ @@ -334,12 +359,13 @@ bool dc_stream_set_cursor_attributes( /* SubVP is not compatible with HW cursor larger than 64 x 64 x 4. * Therefore, if cursor is greater than 64 x 64 x 4, fallback to SW cursor in the following case: - * 1. For single display cases, if resolution is >= 5K and refresh rate < 120hz - * 2. For multi display cases, if resolution is >= 4K and refresh rate < 120hz - * - * [< 120hz is a requirement for SubVP configs] + * 1. If the config is a candidate for SubVP high refresh (both single an dual display configs) + * 2. If not subvp high refresh, for single display cases, if resolution is >= 5K and refresh rate < 120hz + * 3. If not subvp high refresh, for multi display cases, if resolution is >= 4K and refresh rate < 120hz */ if (dc->debug.allow_sw_cursor_fallback && attributes->height * attributes->width * 4 > 16384) { + if (!dc->debug.disable_subvp_high_refresh && is_subvp_high_refresh_candidate(stream)) + return false; if (dc->current_state->stream_count == 1 && stream->timing.v_addressable >= 2880 && ((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120) return false; @@ -396,8 +422,8 @@ static void program_cursor_position( } dc->hwss.set_cursor_position(pipe_ctx); - - dc_send_update_cursor_info_to_dmu(pipe_ctx, i); + if (dc->ctx->dmub_srv) + dc_send_update_cursor_info_to_dmu(pipe_ctx, i); } if (pipe_to_program) @@ -490,25 +516,6 @@ bool dc_stream_add_writeback(struct dc *dc, struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; dwb->otg_inst = stream_status->primary_otg_inst; } - if (IS_DIAG_DC(dc->ctx->dce_environment)) { - if (!dc->hwss.update_bandwidth(dc, dc->current_state)) { - dm_error("DC: update_bandwidth failed!\n"); - return false; - } - - /* enable writeback */ - if (dc->hwss.enable_writeback) { - struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; - - if (dwb->funcs->is_enabled(dwb)) { - /* writeback pipe already enabled, only need to update */ - dc->hwss.update_writeback(dc, wb_info, dc->current_state); - } else { - /* Enable writeback pipe from scratch*/ - dc->hwss.enable_writeback(dc, wb_info, dc->current_state); - } - } - } return true; } @@ -553,17 +560,6 @@ bool dc_stream_remove_writeback(struct dc *dc, } stream->num_wb_info = j; - if (IS_DIAG_DC(dc->ctx->dce_environment)) { - /* recalculate and apply DML parameters */ - if (!dc->hwss.update_bandwidth(dc, dc->current_state)) { - dm_error("DC: update_bandwidth failed!\n"); - return false; - } - - /* disable writeback */ - if (dc->hwss.disable_writeback) - dc->hwss.disable_writeback(dc, dwb_pipe_inst); - } return true; } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 30f0ba05a6e6..31e3183497a7 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -1,5 +1,5 @@ /* - * Copyright 2012-14 Advanced Micro Devices, Inc. + * Copyright 2012-2023 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -40,12 +40,14 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" +struct abm_save_restore; + /* forward declaration */ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.230" +#define DC_VER "3.2.247" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -60,7 +62,9 @@ struct dc_versions { }; enum dp_protocol_version { - DP_VERSION_1_4, + DP_VERSION_1_4 = 0, + DP_VERSION_2_1, + DP_VERSION_UNKNOWN, }; enum dc_plane_type { @@ -209,6 +213,8 @@ struct dc_color_caps { struct dc_dmub_caps { bool psr; bool mclk_sw; + bool subvp_psr; + bool gecc_enable; }; struct dc_caps { @@ -262,6 +268,7 @@ struct dc_caps { uint16_t subvp_pstate_allow_width_us; uint16_t subvp_vertical_int_margin_us; bool seamless_odm; + uint32_t max_v_total; uint8_t subvp_drr_vblank_start_margin_us; }; @@ -270,8 +277,13 @@ struct dc_bug_wa { bool dedcn20_305_wa; bool skip_clock_update; bool lt_early_cr_pattern; + struct { + uint8_t uclk : 1; + uint8_t fclk : 1; + uint8_t dcfclk : 1; + uint8_t dcfclk_ds: 1; + } clock_update_disable_mask; }; - struct dc_dcc_surface_param { struct dc_size surface_size; enum surface_pixel_format format; @@ -406,7 +418,7 @@ struct dc_config { uint8_t force_bios_fixed_vs; int sdpif_request_limit_words_per_umc; bool use_old_fixed_vs_sequence; - bool disable_subvp_drr; + bool dc_mode_clk_limit_support; }; enum visual_confirm { @@ -418,7 +430,9 @@ enum visual_confirm { VISUAL_CONFIRM_SWAPCHAIN = 6, VISUAL_CONFIRM_FAMS = 7, VISUAL_CONFIRM_SWIZZLE = 9, + VISUAL_CONFIRM_REPLAY = 12, VISUAL_CONFIRM_SUBVP = 14, + VISUAL_CONFIRM_MCLK_SWITCH = 16, }; enum dc_psr_power_opts { @@ -495,7 +509,7 @@ enum dcn_zstate_support_state { DCN_ZSTATE_SUPPORT_DISALLOW, }; -/** +/* * struct dc_clocks - DC pipe clocks * * For any clocks that may differ per pipe only the max is stored in this @@ -698,6 +712,8 @@ struct dc_virtual_addr_space_config { struct dc_bounding_box_overrides { int sr_exit_time_ns; int sr_enter_plus_exit_time_ns; + int sr_exit_z8_time_ns; + int sr_enter_plus_exit_z8_time_ns; int urgent_latency_ns; int percent_of_ideal_drambw; int dram_clock_change_latency_ns; @@ -715,7 +731,7 @@ struct resource_pool; struct dce_hwseq; struct link_service; -/** +/* * struct dc_debug_options - DC debug struct * * This struct provides a simple mechanism for developers to change some @@ -743,7 +759,7 @@ struct dc_debug_options { bool use_max_lb; enum dcc_option disable_dcc; - /** + /* * @pipe_split_policy: Define which pipe split policy is used by the * display core. */ @@ -767,6 +783,8 @@ struct dc_debug_options { int sr_enter_plus_exit_time_dpm0_ns; int sr_exit_time_ns; int sr_enter_plus_exit_time_ns; + int sr_exit_z8_time_ns; + int sr_enter_plus_exit_z8_time_ns; int urgent_latency_ns; uint32_t underflow_assert_delay_us; int percent_of_ideal_drambw; @@ -835,6 +853,7 @@ struct dc_debug_options { /* Enable dmub aux for legacy ddc */ bool enable_dmub_aux_for_legacy_ddc; bool disable_fams; + bool disable_fams_gaming; /* FEC/PSR1 sequence enable delay in 100us */ uint8_t fec_enable_delay_in100us; bool enable_driver_sequence_debug; @@ -845,6 +864,7 @@ struct dc_debug_options { bool psr_skip_crtc_disable; union dpia_debug_options dpia_debug; bool disable_fixed_vs_aux_timeout_wa; + uint32_t fixed_vs_aux_delay_config_wa; bool force_disable_subvp; bool force_subvp_mclk_switch; bool allow_sw_cursor_fallback; @@ -855,7 +875,6 @@ struct dc_debug_options { bool force_usr_allow; /* uses value at boot and disables switch */ bool disable_dtb_ref_clk_switch; - uint32_t fixed_vs_aux_delay_config_wa; bool extended_blank_optimization; union aux_wake_wa_options aux_wake_wa; uint32_t mst_start_top_delay; @@ -879,9 +898,26 @@ struct dc_debug_options { uint32_t fpo_vactive_margin_us; bool disable_fpo_vactive; bool disable_boot_optimizations; + bool override_odm_optimization; + bool minimize_dispclk_using_odm; + bool disable_subvp_high_refresh; + bool disable_dp_plus_plus_wa; + uint32_t fpo_vactive_min_active_margin_us; + uint32_t fpo_vactive_max_blank_us; + bool enable_legacy_fast_update; + bool disable_dc_mode_overwrite; + bool replay_skip_crtc_disabled; }; struct gpu_info_soc_bounding_box_v1_0; + +/* Generic structure that can be used to query properties of DC. More fields + * can be added as required. + */ +struct dc_current_properties { + unsigned int cursor_size_limit; +}; + struct dc { struct dc_debug_options debug; struct dc_versions versions; @@ -1242,6 +1278,16 @@ struct dc_scaling_info { struct scaling_taps scaling_quality; }; +struct dc_fast_update { + const struct dc_flip_addrs *flip_addr; + const struct dc_gamma *gamma; + const struct colorspace_transform *gamut_remap_matrix; + const struct dc_csc_transform *input_csc_color_matrix; + const struct fixed31_32 *coeff_reduction_factor; + struct dc_transfer_func *out_transfer_func; + struct dc_csc_transform *output_csc_transform; +}; + struct dc_surface_update { struct dc_plane_state *surface; @@ -1301,7 +1347,7 @@ struct dc_validation_set { struct dc_stream_state *stream; /** - * @plane_state: Surface state + * @plane_states: Surface state */ struct dc_plane_state *plane_states[MAX_SURFACES]; @@ -1376,10 +1422,14 @@ struct dc_plane_state *dc_get_surface_for_mpcc(struct dc *dc, uint32_t dc_get_opp_for_plane(struct dc *dc, struct dc_plane_state *plane); +void dc_set_disable_128b_132b_stream_overhead(bool disable); + /* The function returns minimum bandwidth required to drive a given timing * return - minimum required timing bandwidth in kbps. */ -uint32_t dc_bandwidth_in_kbps_from_timing(const struct dc_crtc_timing *timing); +uint32_t dc_bandwidth_in_kbps_from_timing( + const struct dc_crtc_timing *timing, + const enum dc_link_encoding_format link_encoding); /* Link Interfaces */ /* @@ -1446,8 +1496,10 @@ struct dc_link { * object creation. */ enum engine_id eng_id; + enum engine_id dpia_preferred_eng_id; bool test_pattern_enabled; + enum dp_test_pattern current_test_pattern; union compliance_test_state compliance_test_state; void *priv; @@ -1481,8 +1533,11 @@ struct dc_link { enum edp_revision edp_revision; union dpcd_sink_ext_caps dpcd_sink_ext_caps; + struct backlight_settings backlight_settings; struct psr_settings psr_settings; + struct replay_settings replay_settings; + /* Drive settings read from integrated info table */ struct dc_lane_settings bios_forced_drive_settings; @@ -1502,6 +1557,8 @@ struct dc_link { /* Forced DPIA into TBT3 compatibility mode. */ bool dpia_forced_tbt3_mode; bool dongle_mode_timing_override; + bool blank_stream_on_ocs_change; + bool read_dpcd204h_on_irq_hpd; } wa_flags; struct link_mst_stream_allocation_table mst_stream_alloc_table; @@ -1814,6 +1871,14 @@ enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format( */ const struct dc_link_settings *dc_link_get_link_cap(const struct dc_link *link); +/* Get the highest encoding format that the link supports; highest meaning the + * encoding format which supports the maximum bandwidth. + * + * @link - a link with DP RX connection + * return - highest encoding format link supports. + */ +enum dc_link_encoding_format dc_link_get_highest_encoding_format(const struct dc_link *link); + /* Check if a RX (ex. DP sink, MST hub, passive or active dongle) is connected * to a link with dp connector signal type. * @link - a link with dp connector signal type @@ -1948,6 +2013,8 @@ bool dc_link_setup_psr(struct dc_link *dc_link, const struct dc_stream_state *stream, struct psr_config *psr_config, struct psr_context *psr_context); +bool dc_link_get_replay_state(const struct dc_link *dc_link, uint64_t *state); + /* On eDP links this function call will stall until T12 has elapsed. * If the panel is not in power off state, this function will return * immediately. @@ -2126,8 +2193,6 @@ struct dc_sink_init_data { bool converter_disable_audio; }; -bool dc_extended_blank_supported(struct dc *dc); - struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params); /* Newer interfaces */ @@ -2197,6 +2262,11 @@ void dc_z10_save_init(struct dc *dc); bool dc_is_dmub_outbox_supported(struct dc *dc); bool dc_enable_dmub_notifications(struct dc *dc); +bool dc_abm_save_restore( + struct dc *dc, + struct dc_stream_state *stream, + struct abm_save_restore *pData); + void dc_enable_dmub_outbox(struct dc *dc); bool dc_process_dmub_aux_transfer_async(struct dc *dc, @@ -2220,10 +2290,17 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc, void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc, uint32_t hpd_int_enable); +void dc_print_dmub_diagnostic_data(const struct dc *dc); + +void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties); + /* DSC Interfaces */ #include "dc_dsc.h" /* Disable acc mode Interfaces */ void dc_disable_accelerated_mode(struct dc *dc); +bool dc_is_timing_changed(struct dc_stream_state *cur_stream, + struct dc_stream_state *new_stream); + #endif /* DC_INTERFACE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index a9b9490a532c..4c5ef3ef8dbd 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -31,6 +31,7 @@ #include "core_types.h" #include "../basics/conversion.h" #include "cursor_reg_cache.h" +#include "resource.h" #define CTX dc_dmub_srv->ctx #define DC_LOGGER CTX->logger @@ -65,47 +66,6 @@ void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv) } } -void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv, - union dmub_rb_cmd *cmd) -{ - struct dmub_srv *dmub = dc_dmub_srv->dmub; - struct dc_context *dc_ctx = dc_dmub_srv->ctx; - enum dmub_status status; - - status = dmub_srv_cmd_queue(dmub, cmd); - if (status == DMUB_STATUS_OK) - return; - - if (status != DMUB_STATUS_QUEUE_FULL) - goto error; - - /* Execute and wait for queue to become empty again. */ - dc_dmub_srv_cmd_execute(dc_dmub_srv); - dc_dmub_srv_wait_idle(dc_dmub_srv); - - /* Requeue the command. */ - status = dmub_srv_cmd_queue(dmub, cmd); - if (status == DMUB_STATUS_OK) - return; - -error: - DC_ERROR("Error queuing DMUB command: status=%d\n", status); - dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); -} - -void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv) -{ - struct dmub_srv *dmub = dc_dmub_srv->dmub; - struct dc_context *dc_ctx = dc_dmub_srv->ctx; - enum dmub_status status; - - status = dmub_srv_cmd_execute(dmub); - if (status != DMUB_STATUS_OK) { - DC_ERROR("Error starting DMUB execution: status=%d\n", status); - dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); - } -} - void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv) { struct dmub_srv *dmub = dc_dmub_srv->dmub; @@ -159,50 +119,89 @@ void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv, } } -bool dc_dmub_srv_cmd_with_reply_data(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd) +bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) +{ + return dc_dmub_srv_cmd_run_list(dc_dmub_srv, 1, cmd, wait_type); +} + +bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type) { + struct dc_context *dc_ctx; struct dmub_srv *dmub; enum dmub_status status; + int i; if (!dc_dmub_srv || !dc_dmub_srv->dmub) return false; + dc_ctx = dc_dmub_srv->ctx; dmub = dc_dmub_srv->dmub; - status = dmub_srv_cmd_with_reply_data(dmub, cmd); + for (i = 0 ; i < count; i++) { + // Queue command + status = dmub_srv_cmd_queue(dmub, &cmd_list[i]); + + if (status == DMUB_STATUS_QUEUE_FULL) { + /* Execute and wait for queue to become empty again. */ + dmub_srv_cmd_execute(dmub); + dmub_srv_wait_for_idle(dmub, 100000); + + /* Requeue the command. */ + status = dmub_srv_cmd_queue(dmub, &cmd_list[i]); + } + + if (status != DMUB_STATUS_OK) { + DC_ERROR("Error queueing DMUB command: status=%d\n", status); + dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); + return false; + } + } + + status = dmub_srv_cmd_execute(dmub); if (status != DMUB_STATUS_OK) { - DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status); + DC_ERROR("Error starting DMUB execution: status=%d\n", status); + dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); return false; } + // Wait for DMUB to process command + if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) { + status = dmub_srv_wait_for_idle(dmub, 100000); + + if (status != DMUB_STATUS_OK) { + DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status); + dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); + return false; + } + + // Copy data back from ring buffer into command + if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) + dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list); + } + return true; } -void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv) +bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv) { - struct dmub_srv *dmub = dc_dmub_srv->dmub; - struct dc_context *dc_ctx = dc_dmub_srv->ctx; + struct dmub_srv *dmub; + struct dc_context *dc_ctx; + union dmub_fw_boot_status boot_status; enum dmub_status status; - for (;;) { - /* Wait up to a second for PHY init. */ - status = dmub_srv_wait_for_phy_init(dmub, 1000000); - if (status == DMUB_STATUS_OK) - /* Initialization OK */ - break; - - DC_ERROR("DMCUB PHY init failed: status=%d\n", status); - ASSERT(0); + if (!dc_dmub_srv || !dc_dmub_srv->dmub) + return false; - if (status != DMUB_STATUS_TIMEOUT) - /* - * Server likely initialized or we don't have - * DMCUB HW support - this won't end. - */ - break; + dmub = dc_dmub_srv->dmub; + dc_ctx = dc_dmub_srv->ctx; - /* Continue spinning so we don't hang the ASIC. */ + status = dmub_srv_get_fw_boot_status(dmub, &boot_status); + if (status != DMUB_STATUS_OK) { + DC_ERROR("Error querying DMUB boot status: error=%d\n", status); + return false; } + + return boot_status.bits.optimized_init_done; } bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv, @@ -267,9 +266,7 @@ void dc_dmub_srv_drr_update_cmd(struct dc *dc, uint32_t tg_inst, uint32_t vtotal cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header); // Send the command to the DMCUB. - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst) @@ -283,9 +280,7 @@ void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst) cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header); // Send the command to the DMCUB. - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static uint8_t dc_dmub_srv_get_pipes_for_stream(struct dc *dc, struct dc_stream_state *stream) @@ -362,7 +357,7 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru for (i = 0, k = 0; context && i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->stream && pipe->stream->fpo_in_use) { + if (resource_is_pipe_type(pipe, OTG_MASTER) && pipe->stream->fpo_in_use) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; uint8_t min_refresh_in_hz = (pipe->stream->timing.min_refresh_in_uhz + 999999) / 1000000; @@ -378,21 +373,17 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru sizeof(cmd.fw_assisted_mclk_switch) - sizeof(cmd.fw_assisted_mclk_switch.header); // Send the command to the DMCUB. - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } -void dc_dmub_srv_query_caps_cmd(struct dmub_srv *dmub) +void dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv *dc_dmub_srv) { union dmub_rb_cmd cmd = { 0 }; - enum dmub_status status; - if (!dmub) { + if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation) return; - } memset(&cmd, 0, sizeof(cmd)); @@ -402,15 +393,10 @@ void dc_dmub_srv_query_caps_cmd(struct dmub_srv *dmub) cmd.query_feature_caps.header.ret_status = 1; cmd.query_feature_caps.header.payload_bytes = sizeof(struct dmub_cmd_query_feature_caps_data); - /* Send command to fw */ - status = dmub_srv_cmd_with_reply_data(dmub, &cmd); - - ASSERT(status == DMUB_STATUS_OK); - /* If command was processed, copy feature caps to dmub srv */ - if (status == DMUB_STATUS_OK && + if (dm_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && cmd.query_feature_caps.header.ret_status == 0) { - memcpy(&dmub->feature_caps, + memcpy(&dc_dmub_srv->dmub->feature_caps, &cmd.query_feature_caps.query_feature_caps_data, sizeof(struct dmub_feature_caps)); } @@ -419,7 +405,6 @@ void dc_dmub_srv_query_caps_cmd(struct dmub_srv *dmub) void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pipe_ctx) { union dmub_rb_cmd cmd = { 0 }; - enum dmub_status status; unsigned int panel_inst = 0; dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst); @@ -433,13 +418,8 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi cmd.visual_confirm_color.header.payload_bytes = sizeof(struct dmub_cmd_visual_confirm_color_data); cmd.visual_confirm_color.visual_confirm_color_data.visual_confirm_color.panel_inst = panel_inst; - // Send command to fw - status = dmub_srv_cmd_with_reply_data(dc->ctx->dmub_srv->dmub, &cmd); - - ASSERT(status == DMUB_STATUS_OK); - // If command was processed, copy feature caps to dmub srv - if (status == DMUB_STATUS_OK && + if (dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && cmd.visual_confirm_color.header.ret_status == 0) { memcpy(&dc->ctx->dmub_srv->dmub->visual_confirm_color, &cmd.visual_confirm_color.visual_confirm_color_data, @@ -552,7 +532,8 @@ static void populate_subvp_cmd_vblank_pipe_info(struct dc *dc, // We check for master pipe, but it shouldn't matter since we only need // the pipe for timing info (stream should be same for any pipe splits) - if (!pipe->stream || !pipe->plane_state || pipe->top_pipe || pipe->prev_odm_pipe) + if (!resource_is_pipe_type(pipe, OTG_MASTER) || + !resource_is_pipe_type(pipe, DPP_PIPE)) continue; // Find the SubVP pipe @@ -749,12 +730,10 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - if (!pipe->stream) - continue; - /* For SubVP pipe count, only count the top most (ODM / MPC) pipe */ - if (pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe && + if (resource_is_pipe_type(pipe, OTG_MASTER) && + resource_is_pipe_type(pipe, DPP_PIPE) && pipe->stream->mall_stream_config.type == SUBVP_MAIN) subvp_pipes[subvp_count++] = pipe; } @@ -771,12 +750,14 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, * Any ODM or MPC splits being used in SubVP will be handled internally in * populate_subvp_cmd_pipe_info */ - if (pipe->plane_state && pipe->stream->mall_stream_config.paired_stream && - !pipe->top_pipe && !pipe->prev_odm_pipe && + if (resource_is_pipe_type(pipe, OTG_MASTER) && + resource_is_pipe_type(pipe, DPP_PIPE) && + pipe->stream->mall_stream_config.paired_stream && pipe->stream->mall_stream_config.type == SUBVP_MAIN) { populate_subvp_cmd_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++); - } else if (pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_NONE && - !pipe->top_pipe && !pipe->prev_odm_pipe) { + } else if (resource_is_pipe_type(pipe, OTG_MASTER) && + resource_is_pipe_type(pipe, DPP_PIPE) && + pipe->stream->mall_stream_config.type == SUBVP_NONE) { // Don't need to check for ActiveDRAMClockChangeMargin < 0, not valid in cases where // we run through DML without calculating "natural" P-state support populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++); @@ -797,9 +778,8 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, cmd.fw_assisted_mclk_switch_v2.config_data.watermark_a_cache = wm_val_refclk < 0xFFFF ? wm_val_refclk : 0xFFFF; } - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *diag_data) @@ -823,74 +803,40 @@ void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv) return; } - DC_LOG_DEBUG( - "DMCUB STATE\n" - " dmcub_version : %08x\n" - " scratch [0] : %08x\n" - " scratch [1] : %08x\n" - " scratch [2] : %08x\n" - " scratch [3] : %08x\n" - " scratch [4] : %08x\n" - " scratch [5] : %08x\n" - " scratch [6] : %08x\n" - " scratch [7] : %08x\n" - " scratch [8] : %08x\n" - " scratch [9] : %08x\n" - " scratch [10] : %08x\n" - " scratch [11] : %08x\n" - " scratch [12] : %08x\n" - " scratch [13] : %08x\n" - " scratch [14] : %08x\n" - " scratch [15] : %08x\n" - " pc : %08x\n" - " unk_fault_addr : %08x\n" - " inst_fault_addr : %08x\n" - " data_fault_addr : %08x\n" - " inbox1_rptr : %08x\n" - " inbox1_wptr : %08x\n" - " inbox1_size : %08x\n" - " inbox0_rptr : %08x\n" - " inbox0_wptr : %08x\n" - " inbox0_size : %08x\n" - " is_enabled : %d\n" - " is_soft_reset : %d\n" - " is_secure_reset : %d\n" - " is_traceport_en : %d\n" - " is_cw0_en : %d\n" - " is_cw6_en : %d\n", - diag_data.dmcub_version, - diag_data.scratch[0], - diag_data.scratch[1], - diag_data.scratch[2], - diag_data.scratch[3], - diag_data.scratch[4], - diag_data.scratch[5], - diag_data.scratch[6], - diag_data.scratch[7], - diag_data.scratch[8], - diag_data.scratch[9], - diag_data.scratch[10], - diag_data.scratch[11], - diag_data.scratch[12], - diag_data.scratch[13], - diag_data.scratch[14], - diag_data.scratch[15], - diag_data.pc, - diag_data.undefined_address_fault_addr, - diag_data.inst_fetch_fault_addr, - diag_data.data_write_fault_addr, - diag_data.inbox1_rptr, - diag_data.inbox1_wptr, - diag_data.inbox1_size, - diag_data.inbox0_rptr, - diag_data.inbox0_wptr, - diag_data.inbox0_size, - diag_data.is_dmcub_enabled, - diag_data.is_dmcub_soft_reset, - diag_data.is_dmcub_secure_reset, - diag_data.is_traceport_en, - diag_data.is_cw0_enabled, - diag_data.is_cw6_enabled); + DC_LOG_DEBUG("DMCUB STATE:"); + DC_LOG_DEBUG(" dmcub_version : %08x", diag_data.dmcub_version); + DC_LOG_DEBUG(" scratch [0] : %08x", diag_data.scratch[0]); + DC_LOG_DEBUG(" scratch [1] : %08x", diag_data.scratch[1]); + DC_LOG_DEBUG(" scratch [2] : %08x", diag_data.scratch[2]); + DC_LOG_DEBUG(" scratch [3] : %08x", diag_data.scratch[3]); + DC_LOG_DEBUG(" scratch [4] : %08x", diag_data.scratch[4]); + DC_LOG_DEBUG(" scratch [5] : %08x", diag_data.scratch[5]); + DC_LOG_DEBUG(" scratch [6] : %08x", diag_data.scratch[6]); + DC_LOG_DEBUG(" scratch [7] : %08x", diag_data.scratch[7]); + DC_LOG_DEBUG(" scratch [8] : %08x", diag_data.scratch[8]); + DC_LOG_DEBUG(" scratch [9] : %08x", diag_data.scratch[9]); + DC_LOG_DEBUG(" scratch [10] : %08x", diag_data.scratch[10]); + DC_LOG_DEBUG(" scratch [11] : %08x", diag_data.scratch[11]); + DC_LOG_DEBUG(" scratch [12] : %08x", diag_data.scratch[12]); + DC_LOG_DEBUG(" scratch [13] : %08x", diag_data.scratch[13]); + DC_LOG_DEBUG(" scratch [14] : %08x", diag_data.scratch[14]); + DC_LOG_DEBUG(" scratch [15] : %08x", diag_data.scratch[15]); + DC_LOG_DEBUG(" pc : %08x", diag_data.pc); + DC_LOG_DEBUG(" unk_fault_addr : %08x", diag_data.undefined_address_fault_addr); + DC_LOG_DEBUG(" inst_fault_addr : %08x", diag_data.inst_fetch_fault_addr); + DC_LOG_DEBUG(" data_fault_addr : %08x", diag_data.data_write_fault_addr); + DC_LOG_DEBUG(" inbox1_rptr : %08x", diag_data.inbox1_rptr); + DC_LOG_DEBUG(" inbox1_wptr : %08x", diag_data.inbox1_wptr); + DC_LOG_DEBUG(" inbox1_size : %08x", diag_data.inbox1_size); + DC_LOG_DEBUG(" inbox0_rptr : %08x", diag_data.inbox0_rptr); + DC_LOG_DEBUG(" inbox0_wptr : %08x", diag_data.inbox0_wptr); + DC_LOG_DEBUG(" inbox0_size : %08x", diag_data.inbox0_size); + DC_LOG_DEBUG(" is_enabled : %d", diag_data.is_dmcub_enabled); + DC_LOG_DEBUG(" is_soft_reset : %d", diag_data.is_dmcub_soft_reset); + DC_LOG_DEBUG(" is_secure_reset : %d", diag_data.is_dmcub_secure_reset); + DC_LOG_DEBUG(" is_traceport_en : %d", diag_data.is_traceport_en); + DC_LOG_DEBUG(" is_cw0_en : %d", diag_data.is_cw0_enabled); + DC_LOG_DEBUG(" is_cw6_en : %d", diag_data.is_cw6_enabled); } static bool dc_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx) @@ -953,6 +899,9 @@ static bool dc_dmub_should_update_cursor_data(struct pipe_ctx *pipe_ctx) pipe_ctx->stream->ctx->dce_version >= DCN_VERSION_3_1) return true; + if (pipe_ctx->stream->link->replay_settings.config.replay_supported) + return true; + return false; } @@ -982,14 +931,6 @@ static void dc_build_cursor_update_payload0( payload->panel_inst = panel_inst; } -static void dc_send_cmd_to_dmu(struct dc_dmub_srv *dmub_srv, - union dmub_rb_cmd *cmd) -{ - dc_dmub_srv_cmd_queue(dmub_srv, cmd); - dc_dmub_srv_cmd_execute(dmub_srv); - dc_dmub_srv_wait_idle(dmub_srv); -} - static void dc_build_cursor_position_update_payload0( struct dmub_cmd_update_cursor_payload0 *pl, const uint8_t p_idx, const struct hubp *hubp, const struct dpp *dpp) @@ -1032,9 +973,11 @@ static void dc_build_cursor_attribute_update_payload1( void dc_send_update_cursor_info_to_dmu( struct pipe_ctx *pCtx, uint8_t pipe_idx) { - union dmub_rb_cmd cmd = { 0 }; - union dmub_cmd_update_cursor_info_data *update_cursor_info = - &cmd.update_cursor_info.update_cursor_info_data; + union dmub_rb_cmd cmd[2]; + union dmub_cmd_update_cursor_info_data *update_cursor_info_0 = + &cmd[0].update_cursor_info.update_cursor_info_data; + + memset(cmd, 0, sizeof(cmd)); if (!dc_dmub_should_update_cursor_data(pCtx)) return; @@ -1051,31 +994,64 @@ void dc_send_update_cursor_info_to_dmu( { /* Build Payload#0 Header */ - cmd.update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO; - cmd.update_cursor_info.header.payload_bytes = - sizeof(cmd.update_cursor_info.update_cursor_info_data); - cmd.update_cursor_info.header.multi_cmd_pending = 1; /* To combine multi dmu cmd, 1st cmd */ + cmd[0].update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO; + cmd[0].update_cursor_info.header.payload_bytes = + sizeof(cmd[0].update_cursor_info.update_cursor_info_data); + cmd[0].update_cursor_info.header.multi_cmd_pending = 1; //To combine multi dmu cmd, 1st cmd /* Prepare Payload */ - dc_build_cursor_update_payload0(pCtx, pipe_idx, &update_cursor_info->payload0); + dc_build_cursor_update_payload0(pCtx, pipe_idx, &update_cursor_info_0->payload0); - dc_build_cursor_position_update_payload0(&update_cursor_info->payload0, pipe_idx, + dc_build_cursor_position_update_payload0(&update_cursor_info_0->payload0, pipe_idx, pCtx->plane_res.hubp, pCtx->plane_res.dpp); - /* Send update_curosr_info to queue */ - dc_dmub_srv_cmd_queue(pCtx->stream->ctx->dmub_srv, &cmd); - } + } { /* Build Payload#1 Header */ - memset(update_cursor_info, 0, sizeof(union dmub_cmd_update_cursor_info_data)); - cmd.update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO; - cmd.update_cursor_info.header.payload_bytes = sizeof(struct cursor_attributes_cfg); - cmd.update_cursor_info.header.multi_cmd_pending = 0; /* Indicate it's the last command. */ + cmd[1].update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO; + cmd[1].update_cursor_info.header.payload_bytes = sizeof(struct cursor_attributes_cfg); + cmd[1].update_cursor_info.header.multi_cmd_pending = 0; //Indicate it's the last command. dc_build_cursor_attribute_update_payload1( - &cmd.update_cursor_info.update_cursor_info_data.payload1.attribute_cfg, + &cmd[1].update_cursor_info.update_cursor_info_data.payload1.attribute_cfg, pipe_idx, pCtx->plane_res.hubp, pCtx->plane_res.dpp); /* Combine 2nd cmds update_curosr_info to DMU */ - dc_send_cmd_to_dmu(pCtx->stream->ctx->dmub_srv, &cmd); + dm_execute_dmub_cmd_list(pCtx->stream->ctx, 2, cmd, DM_DMUB_WAIT_TYPE_WAIT); } } + +bool dc_dmub_check_min_version(struct dmub_srv *srv) +{ + if (!srv->hw_funcs.is_psrsu_supported) + return true; + return srv->hw_funcs.is_psrsu_supported(srv); +} + +void dc_dmub_srv_enable_dpia_trace(const struct dc *dc) +{ + struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv; + struct dmub_srv *dmub; + enum dmub_status status; + static const uint32_t timeout_us = 30; + + if (!dc_dmub_srv || !dc_dmub_srv->dmub) { + DC_LOG_ERROR("%s: invalid parameters.", __func__); + return; + } + + dmub = dc_dmub_srv->dmub; + + status = dmub_srv_send_gpint_command(dmub, DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1, 0x0010, timeout_us); + if (status != DMUB_STATUS_OK) { + DC_LOG_ERROR("timeout updating trace buffer mask word\n"); + return; + } + + status = dmub_srv_send_gpint_command(dmub, DMUB_GPINT__UPDATE_TRACE_BUFFER_MASK, 0x0000, timeout_us); + if (status != DMUB_STATUS_OK) { + DC_LOG_ERROR("timeout updating trace buffer mask word\n"); + return; + } + + DC_LOG_DEBUG("Enabled DPIA trace\n"); +}
\ No newline at end of file diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h index d34f5563df2e..bb3fe162dd93 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h @@ -26,7 +26,7 @@ #ifndef _DMUB_DC_SRV_H_ #define _DMUB_DC_SRV_H_ -#include "os_types.h" +#include "dm_services_types.h" #include "dmub/dmub_srv.h" struct dmub_srv; @@ -52,16 +52,13 @@ struct dc_dmub_srv { void *dm; }; -void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv, - union dmub_rb_cmd *cmd); - -void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv); - void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv); -void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv); +bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv); + +bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type); -bool dc_dmub_srv_cmd_with_reply_data(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd); +bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type); bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv, unsigned int stream_mask); @@ -77,7 +74,7 @@ void dc_dmub_srv_drr_update_cmd(struct dc *dc, uint32_t tg_inst, uint32_t vtotal void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst); bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool enable_pstate, struct dc_state *context); -void dc_dmub_srv_query_caps_cmd(struct dmub_srv *dmub); +void dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv *dc_dmub_srv); void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pipe_ctx); void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dmub_srv); void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dmub_srv); @@ -89,4 +86,8 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, struct dc_state *context, b void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv); void dc_send_update_cursor_info_to_dmu(struct pipe_ctx *pCtx, uint8_t pipe_idx); +bool dc_dmub_check_min_version(struct dmub_srv *srv); + +void dc_dmub_srv_enable_dpia_trace(const struct dc *dc); + #endif /* _DMUB_DC_SRV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index 49aab1924665..cfaa39c5dd16 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -61,7 +61,7 @@ enum dc_link_rate { */ LINK_RATE_UHBR10 = 1000, // UHBR10 - 10.0 Gbps/Lane LINK_RATE_UHBR13_5 = 1350, // UHBR13.5 - 13.5 Gbps/Lane - LINK_RATE_UHBR20 = 2000, // UHBR10 - 20.0 Gbps/Lane + LINK_RATE_UHBR20 = 2000, // UHBR20 - 20.0 Gbps/Lane }; enum dc_link_spread { @@ -566,6 +566,12 @@ struct dpcd_amd_device_id { uint8_t dal_version_byte2; }; +struct target_luminance_value { + uint8_t byte0; + uint8_t byte1; + uint8_t byte2; +}; + struct dpcd_source_backlight_set { struct { uint8_t byte0; @@ -1111,6 +1117,11 @@ struct edp_psr_info { uint8_t force_psrsu_cap; }; +struct replay_info { + uint8_t pixel_deviation_per_line; + uint8_t max_deviation_line; +}; + struct dprx_states { bool cable_id_written; }; @@ -1225,10 +1236,13 @@ struct dpcd_caps { union dp_main_line_channel_coding_cap channel_coding_cap; union dp_sink_video_fallback_formats fallback_formats; union dp_fec_capability1 fec_cap1; + bool panel_luminance_control; union dp_cable_id cable_id; uint8_t edp_rev; union edp_alpm_caps alpm_caps; struct edp_psr_info psr_info; + + struct replay_info pr_info; }; union dpcd_sink_ext_caps { @@ -1269,6 +1283,28 @@ union dpcd_psr_configuration { unsigned char raw; }; +union replay_enable_and_configuration { + struct { + unsigned char FREESYNC_PANEL_REPLAY_MODE :1; + unsigned char TIMING_DESYNC_ERROR_VERIFICATION :1; + unsigned char STATE_TRANSITION_ERROR_DETECTION :1; + unsigned char RESERVED0 :1; + unsigned char RESERVED1 :4; + } bits; + unsigned char raw; +}; + +union dpcd_replay_configuration { + struct { + unsigned char STATE_TRANSITION_ERROR_STATUS : 1; + unsigned char DESYNC_ERROR_STATUS : 1; + unsigned char SINK_DEVICE_REPLAY_STATUS : 3; + unsigned char SINK_FRAME_LOCKED : 2; + unsigned char RESERVED : 1; + } bits; + unsigned char raw; +}; + union dpcd_alpm_configuration { struct { unsigned char ENABLE : 1; diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h index 0e92a322c2ed..fe3078b8789e 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h @@ -58,6 +58,7 @@ struct dc_dsc_config_options { uint32_t dsc_min_slice_height_override; uint32_t max_target_bpp_limit_override_x16; uint32_t slice_height_granularity; + uint32_t dsc_force_odm_hslice_override; }; bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, @@ -72,6 +73,7 @@ bool dc_dsc_compute_bandwidth_range( uint32_t max_bpp_x16, const struct dsc_dec_dpcd_caps *dsc_sink_caps, const struct dc_crtc_timing *timing, + const enum dc_link_encoding_format link_encoding, struct dc_dsc_bw_range *range); bool dc_dsc_compute_config( @@ -80,6 +82,7 @@ bool dc_dsc_compute_config( const struct dc_dsc_config_options *options, uint32_t target_bandwidth_kbps, const struct dc_crtc_timing *timing, + const enum dc_link_encoding_format link_encoding, struct dc_dsc_config *dsc_cfg); uint32_t dc_dsc_stream_bandwidth_in_kbps(const struct dc_crtc_timing *timing, diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c index f43cce16bb6c..3907eeff560c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c @@ -41,19 +41,13 @@ static inline void submit_dmub_read_modify_write( const struct dc_context *ctx) { struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write; - bool gather = false; offload->should_burst_write = (offload->same_addr_count == (DMUB_READ_MODIFY_WRITE_SEQ__MAX - 1)); cmd_buf->header.payload_bytes = sizeof(struct dmub_cmd_read_modify_write_sequence) * offload->reg_seq_count; - gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress; - ctx->dmub_srv->reg_helper_offload.gather_in_progress = false; - - dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data); - - ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather; + dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT); memset(cmd_buf, 0, sizeof(*cmd_buf)); @@ -66,17 +60,11 @@ static inline void submit_dmub_burst_write( const struct dc_context *ctx) { struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write; - bool gather = false; cmd_buf->header.payload_bytes = sizeof(uint32_t) * offload->reg_seq_count; - gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress; - ctx->dmub_srv->reg_helper_offload.gather_in_progress = false; - - dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data); - - ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather; + dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT); memset(cmd_buf, 0, sizeof(*cmd_buf)); @@ -88,17 +76,11 @@ static inline void submit_dmub_reg_wait( const struct dc_context *ctx) { struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait; - bool gather = false; - - gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress; - ctx->dmub_srv->reg_helper_offload.gather_in_progress = false; - dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data); + dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT); memset(cmd_buf, 0, sizeof(*cmd_buf)); offload->reg_seq_count = 0; - - ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather; } struct dc_reg_value_masks { @@ -151,7 +133,6 @@ static void dmub_flush_buffer_execute( const struct dc_context *ctx) { submit_dmub_read_modify_write(offload, ctx); - dc_dmub_srv_cmd_execute(ctx->dmub_srv); } static void dmub_flush_burst_write_buffer_execute( @@ -159,7 +140,6 @@ static void dmub_flush_burst_write_buffer_execute( const struct dc_context *ctx) { submit_dmub_burst_write(offload, ctx); - dc_dmub_srv_cmd_execute(ctx->dmub_srv); } static bool dmub_reg_value_burst_set_pack(const struct dc_context *ctx, uint32_t addr, @@ -484,8 +464,7 @@ void generic_reg_wait(const struct dc_context *ctx, field_value = get_reg_field_value_ex(reg_val, mask, shift); if (field_value == condition_value) { - if (i * delay_between_poll_us > 1000 && - !IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) + if (i * delay_between_poll_us > 1000) DC_LOG_DC("REG_WAIT taking a while: %dms in %s line:%d\n", delay_between_poll_us * i / 1000, func_name, line); @@ -497,8 +476,7 @@ void generic_reg_wait(const struct dc_context *ctx, delay_between_poll_us, time_out_num_tries, func_name, line); - if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) - BREAK_TO_DEBUGGER(); + BREAK_TO_DEBUGGER(); } void generic_write_indirect_reg(const struct dc_context *ctx, @@ -691,8 +669,6 @@ void reg_sequence_start_execute(const struct dc_context *ctx) default: return; } - - dc_dmub_srv_cmd_execute(ctx->dmub_srv); } } @@ -712,3 +688,59 @@ void reg_sequence_wait_done(const struct dc_context *ctx) dc_dmub_srv_wait_idle(ctx->dmub_srv); } } + +char *dce_version_to_string(const int version) +{ + switch (version) { + case DCE_VERSION_8_0: + return "DCE 8.0"; + case DCE_VERSION_8_1: + return "DCE 8.1"; + case DCE_VERSION_8_3: + return "DCE 8.3"; + case DCE_VERSION_10_0: + return "DCE 10.0"; + case DCE_VERSION_11_0: + return "DCE 11.0"; + case DCE_VERSION_11_2: + return "DCE 11.2"; + case DCE_VERSION_11_22: + return "DCE 11.22"; + case DCE_VERSION_12_0: + return "DCE 12.0"; + case DCE_VERSION_12_1: + return "DCE 12.1"; + case DCN_VERSION_1_0: + return "DCN 1.0"; + case DCN_VERSION_1_01: + return "DCN 1.0.1"; + case DCN_VERSION_2_0: + return "DCN 2.0"; + case DCN_VERSION_2_1: + return "DCN 2.1"; + case DCN_VERSION_2_01: + return "DCN 2.0.1"; + case DCN_VERSION_3_0: + return "DCN 3.0"; + case DCN_VERSION_3_01: + return "DCN 3.0.1"; + case DCN_VERSION_3_02: + return "DCN 3.0.2"; + case DCN_VERSION_3_03: + return "DCN 3.0.3"; + case DCN_VERSION_3_1: + return "DCN 3.1"; + case DCN_VERSION_3_14: + return "DCN 3.1.4"; + case DCN_VERSION_3_15: + return "DCN 3.1.5"; + case DCN_VERSION_3_16: + return "DCN 3.1.6"; + case DCN_VERSION_3_2: + return "DCN 3.2"; + case DCN_VERSION_3_21: + return "DCN 3.2.1"; + default: + return "Unknown"; + } +} diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 25284006019c..3697ea1d14c1 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -131,6 +131,7 @@ union stream_update_flags { uint32_t dsc_changed : 1; uint32_t mst_bw : 1; uint32_t crtc_timing_adjust : 1; + uint32_t fams_changed : 1; } bits; uint32_t raw; @@ -171,6 +172,10 @@ struct mall_temp_config { bool is_phantom_plane[MAX_PIPES]; }; +struct dc_stream_debug_options { + char force_odm_combine_segments; +}; + struct dc_stream_state { // sink is deprecated, new code should not reference // this pointer @@ -181,6 +186,7 @@ struct dc_stream_state { * a stream via the volatile dc_state rather than the static dc_link. */ struct link_encoder *link_enc; + struct dc_stream_debug_options debug; struct dc_panel_patch sink_patches; union display_content_support content_support; struct dc_crtc_timing timing; @@ -227,6 +233,7 @@ struct dc_stream_state { */ bool vrr_active_variable; bool freesync_on_desktop; + bool vrr_active_fixed; bool converter_disable_audio; uint8_t qs_bit; @@ -295,6 +302,7 @@ struct dc_stream_state { bool vblank_synchronized; bool fpo_in_use; struct mall_stream_config mall_stream_config; + bool skip_edp_power_down; }; #define ABM_LEVEL_IMMEDIATE_DISABLE 255 @@ -320,6 +328,7 @@ struct dc_stream_update { bool integer_scaling_update; bool *allow_freesync; bool *vrr_active_variable; + bool *vrr_active_fixed; struct colorspace_transform *gamut_remap; enum dc_color_space *output_color_space; diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 45ab48fe5d00..445ad79001ce 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -69,13 +69,6 @@ enum dce_environment { DCE_ENV_VIRTUAL_HW }; -/* Note: use these macro definitions instead of direct comparison! */ -#define IS_FPGA_MAXIMUS_DC(dce_environment) \ - (dce_environment == DCE_ENV_FPGA_MAXIMUS) - -#define IS_DIAG_DC(dce_environment) \ - (IS_FPGA_MAXIMUS_DC(dce_environment) || (dce_environment == DCE_ENV_DIAG)) - struct dc_perf_trace { unsigned long read_count; unsigned long write_count; @@ -83,7 +76,7 @@ struct dc_perf_trace { unsigned long last_entry_write; }; -#define MAX_SURFACE_NUM 4 +#define MAX_SURFACE_NUM 6 #define NUM_PIXEL_FORMATS 10 enum tiling_mode { @@ -603,6 +596,7 @@ enum dc_psr_state { PSR_STATE4b_FULL_FRAME, PSR_STATE4c_FULL_FRAME, PSR_STATE4_FULL_FRAME_POWERUP, + PSR_STATE4_FULL_FRAME_HW_LOCK, PSR_STATE5, PSR_STATE5a, PSR_STATE5b, @@ -884,7 +878,7 @@ struct dsc_dec_dpcd_caps { uint32_t branch_overall_throughput_0_mps; /* In MPs */ uint32_t branch_overall_throughput_1_mps; /* In MPs */ uint32_t branch_max_line_width; - bool is_dp; + bool is_dp; /* Decoded format */ }; struct dc_golden_table { @@ -907,6 +901,14 @@ enum dc_gpu_mem_alloc_type { DC_MEM_ALLOC_TYPE_AGP }; +enum dc_link_encoding_format { + DC_LINK_ENCODING_UNSPECIFIED = 0, + DC_LINK_ENCODING_DP_8b_10b, + DC_LINK_ENCODING_DP_128b_132b, + DC_LINK_ENCODING_HDMI_TMDS, + DC_LINK_ENCODING_HDMI_FRL +}; + enum dc_psr_version { DC_PSR_VERSION_1 = 0, DC_PSR_VERSION_SU_1 = 1, @@ -1000,6 +1002,10 @@ struct link_mst_stream_allocation_table { struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM]; }; +struct backlight_settings { + uint32_t backlight_millinits; +}; + /* PSR feature flags */ struct psr_settings { bool psr_feature_enabled; // PSR is supported by sink @@ -1019,6 +1025,45 @@ struct psr_settings { unsigned int psr_power_opt; }; +enum replay_coasting_vtotal_type { + PR_COASTING_TYPE_NOM = 0, + PR_COASTING_TYPE_STATIC, + PR_COASTING_TYPE_FULL_SCREEN_VIDEO, + PR_COASTING_TYPE_TEST_HARNESS, + PR_COASTING_TYPE_NUM, +}; + +union replay_error_status { + struct { + unsigned char STATE_TRANSITION_ERROR :1; + unsigned char LINK_CRC_ERROR :1; + unsigned char DESYNC_ERROR :1; + unsigned char RESERVED :5; + } bits; + unsigned char raw; +}; + +struct replay_config { + bool replay_supported; // Replay feature is supported + unsigned int replay_power_opt_supported; // Power opt flags that are supported + bool replay_smu_opt_supported; // SMU optimization is supported + unsigned int replay_enable_option; // Replay enablement option + uint32_t debug_flags; // Replay debug flags + bool replay_timing_sync_supported; // Replay desync is supported + union replay_error_status replay_error_status; // Replay error status +}; + +/* Replay feature flags */ +struct replay_settings { + struct replay_config config; // Replay configuration + bool replay_feature_enabled; // Replay feature is ready for activating + bool replay_allow_active; // Replay is currently active + unsigned int replay_power_opt_active; // Power opt flags that are activated currently + bool replay_smu_opt_enable; // SMU optimization is enabled + uint16_t coasting_vtotal; // Current Coasting vtotal + uint16_t coasting_vtotal_table[PR_COASTING_TYPE_NUM]; // Coasting vtotal table +}; + /* To split out "global" and "per-panel" config settings. * Add a struct dc_panel_config under dc_link */ @@ -1045,9 +1090,11 @@ struct dc_panel_config { struct psr { bool disable_psr; bool disallow_psrsu; + bool disallow_replay; bool rc_disable; bool rc_allow_static_screen; bool rc_allow_fullscreen_VPB; + unsigned int replay_enable_option; } psr; /* ABM */ struct varib { diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile index 0d7db132a20f..15b64c26d5a2 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile @@ -29,8 +29,8 @@ DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \ dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \ dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \ -dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o dmub_psr.o dmub_abm.o dce_panel_cntl.o \ -dmub_hw_lock_mgr.o dmub_outbox.o +dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o dmub_psr.o dmub_abm.o dmub_abm_lcd.o dce_panel_cntl.o \ +dmub_hw_lock_mgr.o dmub_outbox.o dmub_replay.o AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE)) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h index e6c06325742a..168cb7094c95 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h @@ -266,7 +266,24 @@ type MASTER_COMM_INTERRUPT; \ type MASTER_COMM_CMD_REG_BYTE0; \ type MASTER_COMM_CMD_REG_BYTE1; \ - type MASTER_COMM_CMD_REG_BYTE2 + type MASTER_COMM_CMD_REG_BYTE2; \ + type ABM1_HG_BIN_33_40_SHIFT_INDEX; \ + type ABM1_HG_BIN_33_64_SHIFT_FLAG; \ + type ABM1_HG_BIN_41_48_SHIFT_INDEX; \ + type ABM1_HG_BIN_49_56_SHIFT_INDEX; \ + type ABM1_HG_BIN_57_64_SHIFT_INDEX; \ + type ABM1_HG_RESULT_DATA; \ + type ABM1_HG_RESULT_INDEX; \ + type ABM1_ACE_SLOPE_DATA; \ + type ABM1_ACE_OFFSET_DATA; \ + type ABM1_ACE_OFFSET_SLOPE_INDEX; \ + type ABM1_ACE_THRES_INDEX; \ + type ABM1_ACE_IGNORE_MASTER_LOCK_EN; \ + type ABM1_ACE_READBACK_DB_REG_VALUE_EN; \ + type ABM1_ACE_DBUF_REG_UPDATE_PENDING; \ + type ABM1_ACE_LOCK; \ + type ABM1_ACE_THRES_DATA_1; \ + type ABM1_ACE_THRES_DATA_2 struct dce_abm_shift { ABM_REG_FIELD_LIST(uint8_t); @@ -288,6 +305,16 @@ struct dce_abm_registers { uint32_t DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES; uint32_t DC_ABM1_HGLS_REG_READ_PROGRESS; uint32_t DC_ABM1_ACE_OFFSET_SLOPE_0; + uint32_t DC_ABM1_ACE_OFFSET_SLOPE_DATA; + uint32_t DC_ABM1_ACE_PWL_CNTL; + uint32_t DC_ABM1_HG_BIN_33_40_SHIFT_INDEX; + uint32_t DC_ABM1_HG_BIN_33_64_SHIFT_FLAG; + uint32_t DC_ABM1_HG_BIN_41_48_SHIFT_INDEX; + uint32_t DC_ABM1_HG_BIN_49_56_SHIFT_INDEX; + uint32_t DC_ABM1_HG_BIN_57_64_SHIFT_INDEX; + uint32_t DC_ABM1_HG_RESULT_DATA; + uint32_t DC_ABM1_HG_RESULT_INDEX; + uint32_t DC_ABM1_ACE_THRES_DATA; uint32_t DC_ABM1_ACE_THRES_12; uint32_t MASTER_COMM_CNTL_REG; uint32_t MASTER_COMM_CMD_REG; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c index 07359eb89efc..e7acd6eec1fd 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c @@ -640,7 +640,7 @@ static void dce11_pplib_apply_display_requirements( * on power saving. * */ - pp_display_cfg->min_dcfclock_khz = (context->stream_count > 4)? + pp_display_cfg->min_dcfclock_khz = (context->stream_count > 4) ? pp_display_cfg->min_engine_clock_khz : 0; pp_display_cfg->min_engine_clock_deep_sleep_khz diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c index 462c7a3ec3cc..ed8936405dfa 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c @@ -920,25 +920,6 @@ static bool dce112_program_pix_clk( struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source); struct bp_pixel_clock_parameters bp_pc_params = {0}; - if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) { - unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0; - unsigned dp_dto_ref_100hz = 7000000; - unsigned clock_100hz = pll_settings->actual_pix_clk_100hz; - - /* Set DTO values: phase = target clock, modulo = reference clock */ - REG_WRITE(PHASE[inst], clock_100hz); - REG_WRITE(MODULO[inst], dp_dto_ref_100hz); - - /* Enable DTO */ - if (clk_src->cs_mask->PIPE0_DTO_SRC_SEL) - REG_UPDATE_2(PIXEL_RATE_CNTL[inst], - DP_DTO0_ENABLE, 1, - PIPE0_DTO_SRC_SEL, 1); - else - REG_UPDATE(PIXEL_RATE_CNTL[inst], - DP_DTO0_ENABLE, 1); - return true; - } /* First disable SS * ATOMBIOS will enable by default SS on PLL for DP, * do not disable it here @@ -1015,25 +996,6 @@ static bool dcn31_program_pix_clk( REG_UPDATE(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 1); } else { - if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) { - unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0; - unsigned dp_dto_ref_100hz = 7000000; - unsigned clock_100hz = pll_settings->actual_pix_clk_100hz; - - /* Set DTO values: phase = target clock, modulo = reference clock */ - REG_WRITE(PHASE[inst], clock_100hz); - REG_WRITE(MODULO[inst], dp_dto_ref_100hz); - - /* Enable DTO */ - if (clk_src->cs_mask->PIPE0_DTO_SRC_SEL) - REG_UPDATE_2(PIXEL_RATE_CNTL[inst], - DP_DTO0_ENABLE, 1, - PIPE0_DTO_SRC_SEL, 1); - else - REG_UPDATE(PIXEL_RATE_CNTL[inst], - DP_DTO0_ENABLE, 1); - return true; - } if (clk_src->cs_mask->PIPE0_DTO_SRC_SEL) REG_UPDATE(PIXEL_RATE_CNTL[inst], diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c index e74266cc0098..b87bfecb7755 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c @@ -76,9 +76,9 @@ static bool dce_dmcu_init(struct dmcu *dmcu) } static bool dce_dmcu_load_iram(struct dmcu *dmcu, - unsigned int start_offset, - const char *src, - unsigned int bytes) + unsigned int start_offset, + const char *src, + unsigned int bytes) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); unsigned int count = 0; @@ -1093,11 +1093,9 @@ static void dcn21_dmcu_construct( dce_dmcu_construct(dmcu_dce, ctx, regs, dmcu_shift, dmcu_mask); - if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { - psp_version = dm_read_reg(ctx, mmMP0_SMN_C2PMSG_58); - dmcu_dce->base.auto_load_dmcu = ((psp_version & 0x00FF00FF) > 0x00110029); - dmcu_dce->base.psp_version = psp_version; - } + psp_version = dm_read_reg(ctx, mmMP0_SMN_C2PMSG_58); + dmcu_dce->base.auto_load_dmcu = ((psp_version & 0x00FF00FF) > 0x00110029); + dmcu_dce->base.psp_version = psp_version; } struct dmcu *dce_dmcu_create( diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h index a3fee929cd12..86233f94db4a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h @@ -98,6 +98,29 @@ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 4), \ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 5) +#define HWSEQ_PIXEL_RATE_REG_LIST_302(blk) \ + SRII(PIXEL_RATE_CNTL, blk, 0), \ + SRII(PIXEL_RATE_CNTL, blk, 1),\ + SRII(PIXEL_RATE_CNTL, blk, 2),\ + SRII(PIXEL_RATE_CNTL, blk, 3), \ + SRII(PIXEL_RATE_CNTL, blk, 4) + +#define HWSEQ_PHYPLL_REG_LIST_302(blk) \ + SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 0), \ + SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 1),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 2),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 3), \ + SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 4) + +#define HWSEQ_PIXEL_RATE_REG_LIST_303(blk) \ + SRII(PIXEL_RATE_CNTL, blk, 0), \ + SRII(PIXEL_RATE_CNTL, blk, 1) + +#define HWSEQ_PHYPLL_REG_LIST_303(blk) \ + SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 0), \ + SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 1) + + #define HWSEQ_PHYPLL_REG_LIST_201(blk) \ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 0), \ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 1) @@ -387,7 +410,11 @@ SR(MPC_CRC_RESULT_C), \ SR(MPC_CRC_RESULT_AR), \ SR(AZALIA_AUDIO_DTO), \ - SR(AZALIA_CONTROLLER_CLOCK_GATING) + SR(AZALIA_CONTROLLER_CLOCK_GATING), \ + SR(HPO_TOP_CLOCK_CONTROL), \ + SR(ODM_MEM_PWR_CTRL3), \ + SR(DMU_MEM_PWR_CNTL), \ + SR(MMHUBBUB_MEM_PWR_CNTL) #define HWSEQ_DCN301_REG_LIST()\ SR(REFCLK_CNTL), \ @@ -508,8 +535,11 @@ SR(D5VGA_CONTROL), \ SR(D6VGA_CONTROL), \ SR(DC_IP_REQUEST_CNTL), \ + HWSEQ_PIXEL_RATE_REG_LIST_302(OTG), \ + HWSEQ_PHYPLL_REG_LIST_302(OTG), \ SR(AZALIA_AUDIO_DTO), \ - SR(AZALIA_CONTROLLER_CLOCK_GATING) + SR(AZALIA_CONTROLLER_CLOCK_GATING), \ + SR(HPO_TOP_CLOCK_CONTROL) #define HWSEQ_DCN303_REG_LIST() \ HWSEQ_DCN_REG_LIST(), \ @@ -540,28 +570,6 @@ SR(AZALIA_CONTROLLER_CLOCK_GATING), \ SR(HPO_TOP_CLOCK_CONTROL) -#define HWSEQ_PIXEL_RATE_REG_LIST_302(blk) \ - SRII(PIXEL_RATE_CNTL, blk, 0), \ - SRII(PIXEL_RATE_CNTL, blk, 1),\ - SRII(PIXEL_RATE_CNTL, blk, 2),\ - SRII(PIXEL_RATE_CNTL, blk, 3), \ - SRII(PIXEL_RATE_CNTL, blk, 4) - -#define HWSEQ_PHYPLL_REG_LIST_302(blk) \ - SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 0), \ - SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 1),\ - SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 2),\ - SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 3), \ - SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 4) - -#define HWSEQ_PIXEL_RATE_REG_LIST_303(blk) \ - SRII(PIXEL_RATE_CNTL, blk, 0), \ - SRII(PIXEL_RATE_CNTL, blk, 1) - -#define HWSEQ_PHYPLL_REG_LIST_303(blk) \ - SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 0), \ - SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 1) - struct dce_hwseq_registers { uint32_t DCFE_CLOCK_CONTROL[6]; uint32_t DCFEV_CLOCK_CONTROL; @@ -663,14 +671,15 @@ struct dce_hwseq_registers { uint32_t MC_VM_XGMI_LFB_CNTL; uint32_t AZALIA_AUDIO_DTO; uint32_t AZALIA_CONTROLLER_CLOCK_GATING; + /* MMHUB VM */ + uint32_t MC_VM_FB_LOCATION_BASE; + uint32_t MC_VM_FB_LOCATION_TOP; + uint32_t MC_VM_FB_OFFSET; + uint32_t MMHUBBUB_MEM_PWR_CNTL; uint32_t HPO_TOP_CLOCK_CONTROL; uint32_t ODM_MEM_PWR_CTRL3; uint32_t DMU_MEM_PWR_CNTL; - uint32_t MMHUBBUB_MEM_PWR_CNTL; uint32_t DCHUBBUB_ARB_HOSTVM_CNTL; - uint32_t MC_VM_FB_LOCATION_BASE; - uint32_t MC_VM_FB_LOCATION_TOP; - uint32_t MC_VM_FB_OFFSET; uint32_t HPO_TOP_HW_CONTROL; }; /* set field name */ @@ -915,6 +924,7 @@ struct dce_hwseq_registers { #define HWSEQ_DCN30_MASK_SH_LIST(mask_sh)\ HWSEQ_DCN2_MASK_SH_LIST(mask_sh), \ HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \ + HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_GATE_DIS, mask_sh), \ HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \ HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \ HWS_SF(, DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, mask_sh), \ @@ -1012,7 +1022,8 @@ struct dce_hwseq_registers { HWS_SF(, DOMAIN19_PG_STATUS, DOMAIN19_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DOMAIN20_PG_STATUS, DOMAIN20_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ - HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh) + HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \ + HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_GATE_DIS, mask_sh) #define HWSEQ_DCN303_MASK_SH_LIST(mask_sh) \ HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c index 6d1b01c267b7..4f552c3e7663 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c @@ -442,10 +442,9 @@ struct dce_i2c_hw *acquire_i2c_hw_engine( return dce_i2c_hw; } -static enum i2c_channel_operation_result dce_i2c_hw_engine_wait_on_operation_result( - struct dce_i2c_hw *dce_i2c_hw, - uint32_t timeout, - enum i2c_channel_operation_result expected_result) +static enum i2c_channel_operation_result dce_i2c_hw_engine_wait_on_operation_result(struct dce_i2c_hw *dce_i2c_hw, + uint32_t timeout, + enum i2c_channel_operation_result expected_result) { enum i2c_channel_operation_result result; uint32_t i = 0; @@ -509,11 +508,10 @@ static uint32_t get_transaction_timeout_hw( return period_timeout * num_of_clock_stretches; } -static bool dce_i2c_hw_engine_submit_payload( - struct dce_i2c_hw *dce_i2c_hw, - struct i2c_payload *payload, - bool middle_of_transaction, - uint32_t speed) +static bool dce_i2c_hw_engine_submit_payload(struct dce_i2c_hw *dce_i2c_hw, + struct i2c_payload *payload, + bool middle_of_transaction, + uint32_t speed) { struct i2c_request_transaction_data request; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c index f1aeb6d1967c..e188447c8156 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c @@ -367,6 +367,7 @@ static bool dce_i2c_sw_engine_acquire_engine( return true; } + bool dce_i2c_engine_acquire_sw( struct dce_i2c_sw *dce_i2c_sw, struct ddc *ddc_handle) @@ -392,12 +393,8 @@ bool dce_i2c_engine_acquire_sw( return result; } - - - -static void dce_i2c_sw_engine_submit_channel_request( - struct dce_i2c_sw *engine, - struct i2c_request_transaction_data *req) +static void dce_i2c_sw_engine_submit_channel_request(struct dce_i2c_sw *engine, + struct i2c_request_transaction_data *req) { struct ddc *ddc = engine->ddc; uint16_t clock_delay_div_4 = engine->clock_delay >> 2; @@ -439,10 +436,9 @@ static void dce_i2c_sw_engine_submit_channel_request( I2C_CHANNEL_OPERATION_FAILED; } -static bool dce_i2c_sw_engine_submit_payload( - struct dce_i2c_sw *engine, - struct i2c_payload *payload, - bool middle_of_transaction) +static bool dce_i2c_sw_engine_submit_payload(struct dce_i2c_sw *engine, + struct i2c_payload *payload, + bool middle_of_transaction) { struct i2c_request_transaction_data request; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c index fa314493ffc5..136bd93c3b65 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c @@ -941,9 +941,7 @@ bool dce110_link_encoder_validate_output_with_stream( break; case SIGNAL_TYPE_EDP: case SIGNAL_TYPE_LVDS: - is_valid = - (stream->timing. - pixel_encoding == PIXEL_ENCODING_RGB) ? true : false; + is_valid = stream->timing.pixel_encoding == PIXEL_ENCODING_RGB; break; case SIGNAL_TYPE_VIRTUAL: is_valid = true; @@ -1645,7 +1643,7 @@ void dce110_link_encoder_enable_hpd(struct link_encoder *enc) uint32_t hpd_enable = 0; uint32_t value = dm_read_reg(ctx, addr); - get_reg_field_value(hpd_enable, DC_HPD_CONTROL, DC_HPD_EN); + hpd_enable = get_reg_field_value(hpd_enable, DC_HPD_CONTROL, DC_HPD_EN); if (hpd_enable == 0) set_reg_field_value(value, 1, DC_HPD_CONTROL, DC_HPD_EN); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c index 9fc48208c2e4..d3e6544022b7 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c @@ -24,212 +24,167 @@ */ #include "dmub_abm.h" -#include "dce_abm.h" +#include "dmub_abm_lcd.h" #include "dc.h" -#include "dc_dmub_srv.h" -#include "dmub/dmub_srv.h" #include "core_types.h" -#include "dm_services.h" -#include "reg_helper.h" -#include "fixed31_32.h" - -#include "atom.h" +#include "dmub_cmd.h" #define TO_DMUB_ABM(abm)\ container_of(abm, struct dce_abm, base) -#define REG(reg) \ - (dce_abm->regs->reg) +#define ABM_FEATURE_NO_SUPPORT 0 +#define ABM_LCD_SUPPORT 1 -#undef FN -#define FN(reg_name, field_name) \ - dce_abm->abm_shift->field_name, dce_abm->abm_mask->field_name +static unsigned int abm_feature_support(struct abm *abm, unsigned int panel_inst) +{ + struct dc_context *dc = abm->ctx; + struct dc_link *edp_links[MAX_NUM_EDP]; + int i; + int edp_num; + unsigned int ret = ABM_FEATURE_NO_SUPPORT; -#define CTX \ - dce_abm->base.ctx + dc_get_edp_links(dc->dc, edp_links, &edp_num); -#define DISABLE_ABM_IMMEDIATELY 255 + for (i = 0; i < edp_num; i++) { + if (panel_inst == i) + break; + } + if (i < edp_num) { + ret = ABM_LCD_SUPPORT; + } + return ret; +} -static void dmub_abm_enable_fractional_pwm(struct dc_context *dc) +static void dmub_abm_init_ex(struct abm *abm, uint32_t backlight) { - union dmub_rb_cmd cmd; - uint32_t fractional_pwm = (dc->dc->config.disable_fractional_pwm == false) ? 1 : 0; - uint32_t edp_id_count = dc->dc_edp_id_count; - int i; - uint8_t panel_mask = 0; - - for (i = 0; i < edp_id_count; i++) - panel_mask |= 0x01 << i; - - memset(&cmd, 0, sizeof(cmd)); - cmd.abm_set_pwm_frac.header.type = DMUB_CMD__ABM; - cmd.abm_set_pwm_frac.header.sub_type = DMUB_CMD__ABM_SET_PWM_FRAC; - cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.fractional_pwm = fractional_pwm; - cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; - cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.panel_mask = panel_mask; - cmd.abm_set_pwm_frac.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pwm_frac_data); - - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dmub_abm_init(abm, backlight); } -static void dmub_abm_init(struct abm *abm, uint32_t backlight) +static unsigned int dmub_abm_get_current_backlight_ex(struct abm *abm) { - struct dce_abm *dce_abm = TO_DMUB_ABM(abm); - - REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x3); - REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x1); - REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x3); - REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x1); - REG_WRITE(BL1_PWM_BL_UPDATE_SAMPLE_RATE, 0x1); - - REG_SET_3(DC_ABM1_HG_MISC_CTRL, 0, - ABM1_HG_NUM_OF_BINS_SEL, 0, - ABM1_HG_VMAX_SEL, 1, - ABM1_HG_BIN_BITWIDTH_SIZE_SEL, 0); - - REG_SET_3(DC_ABM1_IPCSC_COEFF_SEL, 0, - ABM1_IPCSC_COEFF_SEL_R, 2, - ABM1_IPCSC_COEFF_SEL_G, 4, - ABM1_IPCSC_COEFF_SEL_B, 2); + return dmub_abm_get_current_backlight(abm); +} - REG_UPDATE(BL1_PWM_CURRENT_ABM_LEVEL, - BL1_PWM_CURRENT_ABM_LEVEL, backlight); +static unsigned int dmub_abm_get_target_backlight_ex(struct abm *abm) +{ + return dmub_abm_get_target_backlight(abm); +} - REG_UPDATE(BL1_PWM_TARGET_ABM_LEVEL, - BL1_PWM_TARGET_ABM_LEVEL, backlight); +static bool dmub_abm_set_level_ex(struct abm *abm, uint32_t level) +{ + bool ret = false; + unsigned int feature_support, i; + uint8_t panel_mask0 = 0; - REG_UPDATE(BL1_PWM_USER_LEVEL, - BL1_PWM_USER_LEVEL, backlight); + for (i = 0; i < MAX_NUM_EDP; i++) { + feature_support = abm_feature_support(abm, i); - REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, - ABM1_LS_MIN_PIXEL_VALUE_THRES, 0, - ABM1_LS_MAX_PIXEL_VALUE_THRES, 1000); + if (feature_support == ABM_LCD_SUPPORT) + panel_mask0 |= (0x01 << i); + } - REG_SET_3(DC_ABM1_HGLS_REG_READ_PROGRESS, 0, - ABM1_HG_REG_READ_MISSED_FRAME_CLEAR, 1, - ABM1_LS_REG_READ_MISSED_FRAME_CLEAR, 1, - ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, 1); + if (panel_mask0) + ret = dmub_abm_set_level(abm, level, panel_mask0); - dmub_abm_enable_fractional_pwm(abm->ctx); + return ret; } -static unsigned int dmub_abm_get_current_backlight(struct abm *abm) +static bool dmub_abm_init_config_ex(struct abm *abm, + const char *src, + unsigned int bytes, + unsigned int inst) { - struct dce_abm *dce_abm = TO_DMUB_ABM(abm); - unsigned int backlight = REG_READ(BL1_PWM_CURRENT_ABM_LEVEL); + unsigned int feature_support; - /* return backlight in hardware format which is unsigned 17 bits, with - * 1 bit integer and 16 bit fractional - */ - return backlight; -} + feature_support = abm_feature_support(abm, inst); -static unsigned int dmub_abm_get_target_backlight(struct abm *abm) -{ - struct dce_abm *dce_abm = TO_DMUB_ABM(abm); - unsigned int backlight = REG_READ(BL1_PWM_TARGET_ABM_LEVEL); + if (feature_support == ABM_LCD_SUPPORT) + dmub_abm_init_config(abm, src, bytes, inst); - /* return backlight in hardware format which is unsigned 17 bits, with - * 1 bit integer and 16 bit fractional - */ - return backlight; + return true; } -static bool dmub_abm_set_level(struct abm *abm, uint32_t level) +static bool dmub_abm_set_pause_ex(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int stream_inst) { - union dmub_rb_cmd cmd; - struct dc_context *dc = abm->ctx; - struct dc_link *edp_links[MAX_NUM_EDP]; - int i; - int edp_num; - uint8_t panel_mask = 0; - - dc_get_edp_links(dc->dc, edp_links, &edp_num); - - for (i = 0; i < edp_num; i++) { - if (edp_links[i]->link_status.link_active) - panel_mask |= (0x01 << i); - } + bool ret = false; + unsigned int feature_support; - memset(&cmd, 0, sizeof(cmd)); - cmd.abm_set_level.header.type = DMUB_CMD__ABM; - cmd.abm_set_level.header.sub_type = DMUB_CMD__ABM_SET_LEVEL; - cmd.abm_set_level.abm_set_level_data.level = level; - cmd.abm_set_level.abm_set_level_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; - cmd.abm_set_level.abm_set_level_data.panel_mask = panel_mask; - cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_level_data); + feature_support = abm_feature_support(abm, panel_inst); - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + if (feature_support == ABM_LCD_SUPPORT) + ret = dmub_abm_set_pause(abm, pause, panel_inst, stream_inst); - return true; + return ret; } -static bool dmub_abm_init_config(struct abm *abm, - const char *src, - unsigned int bytes, - unsigned int inst) +/***************************************************************************** + * dmub_abm_save_restore_ex() - calls dmub_abm_save_restore for preserving DMUB's + * Varibright states for LCD only. OLED is TBD + * @abm: used to check get dc context + * @panel_inst: panel instance index + * @pData: contains command to pause/un-pause abm and abm parameters + * + * + ***************************************************************************/ +static bool dmub_abm_save_restore_ex( + struct abm *abm, + unsigned int panel_inst, + struct abm_save_restore *pData) { - union dmub_rb_cmd cmd; + bool ret = false; + unsigned int feature_support; struct dc_context *dc = abm->ctx; - uint8_t panel_mask = 0x01 << inst; - // TODO: Optimize by only reading back final 4 bytes - dmub_flush_buffer_mem(&dc->dmub_srv->dmub->scratch_mem_fb); + feature_support = abm_feature_support(abm, panel_inst); + + if (feature_support == ABM_LCD_SUPPORT) + ret = dmub_abm_save_restore(dc, panel_inst, pData); - // Copy iramtable into cw7 - memcpy(dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, (void *)src, bytes); + return ret; +} - memset(&cmd, 0, sizeof(cmd)); - // Fw will copy from cw7 to fw_state - cmd.abm_init_config.header.type = DMUB_CMD__ABM; - cmd.abm_init_config.header.sub_type = DMUB_CMD__ABM_INIT_CONFIG; - cmd.abm_init_config.abm_init_config_data.src.quad_part = dc->dmub_srv->dmub->scratch_mem_fb.gpu_addr; - cmd.abm_init_config.abm_init_config_data.bytes = bytes; - cmd.abm_init_config.abm_init_config_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; - cmd.abm_init_config.abm_init_config_data.panel_mask = panel_mask; +static bool dmub_abm_set_pipe_ex(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst) +{ + bool ret = false; + unsigned int feature_support; - cmd.abm_init_config.header.payload_bytes = sizeof(struct dmub_cmd_abm_init_config_data); + feature_support = abm_feature_support(abm, panel_inst); - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + if (feature_support == ABM_LCD_SUPPORT) + ret = dmub_abm_set_pipe(abm, otg_inst, option, panel_inst); - return true; + return ret; } -static bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int stream_inst) +static bool dmub_abm_set_backlight_level_pwm_ex(struct abm *abm, + unsigned int backlight_pwm_u16_16, + unsigned int frame_ramp, + unsigned int controller_id, + unsigned int panel_inst) { - union dmub_rb_cmd cmd; - struct dc_context *dc = abm->ctx; - uint8_t panel_mask = 0x01 << panel_inst; + bool ret = false; + unsigned int feature_support; - memset(&cmd, 0, sizeof(cmd)); - cmd.abm_pause.header.type = DMUB_CMD__ABM; - cmd.abm_pause.header.sub_type = DMUB_CMD__ABM_PAUSE; - cmd.abm_pause.abm_pause_data.enable = pause; - cmd.abm_pause.abm_pause_data.panel_mask = panel_mask; - cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_pause_data); + feature_support = abm_feature_support(abm, panel_inst); - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + if (feature_support == ABM_LCD_SUPPORT) + ret = dmub_abm_set_backlight_level(abm, backlight_pwm_u16_16, frame_ramp, panel_inst); - return true; + return ret; } static const struct abm_funcs abm_funcs = { - .abm_init = dmub_abm_init, - .set_abm_level = dmub_abm_set_level, - .get_current_backlight = dmub_abm_get_current_backlight, - .get_target_backlight = dmub_abm_get_target_backlight, - .init_abm_config = dmub_abm_init_config, - .set_abm_pause = dmub_abm_set_pause, + .abm_init = dmub_abm_init_ex, + .set_abm_level = dmub_abm_set_level_ex, + .get_current_backlight = dmub_abm_get_current_backlight_ex, + .get_target_backlight = dmub_abm_get_target_backlight_ex, + .init_abm_config = dmub_abm_init_config_ex, + .set_abm_pause = dmub_abm_set_pause_ex, + .save_restore = dmub_abm_save_restore_ex, + .set_pipe_ex = dmub_abm_set_pipe_ex, + .set_backlight_level_pwm = dmub_abm_set_backlight_level_pwm_ex, }; static void dmub_abm_construct( @@ -256,16 +211,19 @@ struct abm *dmub_abm_create( const struct dce_abm_shift *abm_shift, const struct dce_abm_mask *abm_mask) { - struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL); + if (ctx->dc->caps.dmcub_support) { + struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL); - if (abm_dce == NULL) { - BREAK_TO_DEBUGGER(); - return NULL; - } + if (abm_dce == NULL) { + BREAK_TO_DEBUGGER(); + return NULL; + } - dmub_abm_construct(abm_dce, ctx, regs, abm_shift, abm_mask); + dmub_abm_construct(abm_dce, ctx, regs, abm_shift, abm_mask); - return &abm_dce->base; + return &abm_dce->base; + } + return NULL; } void dmub_abm_destroy(struct abm **abm) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c new file mode 100644 index 000000000000..592a8f7a1c6d --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c @@ -0,0 +1,298 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dmub_abm.h" +#include "dmub_abm_lcd.h" +#include "dce_abm.h" +#include "dc.h" +#include "dc_dmub_srv.h" +#include "dmub/dmub_srv.h" +#include "core_types.h" +#include "dm_services.h" +#include "reg_helper.h" +#include "fixed31_32.h" + +#ifdef _WIN32 +#include "atombios.h" +#else +#include "atom.h" +#endif + +#define TO_DMUB_ABM(abm)\ + container_of(abm, struct dce_abm, base) + +#define REG(reg) \ + (dce_abm->regs->reg) + +#undef FN +#define FN(reg_name, field_name) \ + dce_abm->abm_shift->field_name, dce_abm->abm_mask->field_name + +#define CTX \ + dce_abm->base.ctx + +#define DISABLE_ABM_IMMEDIATELY 255 + + + +static void dmub_abm_enable_fractional_pwm(struct dc_context *dc) +{ + union dmub_rb_cmd cmd; + uint32_t fractional_pwm = (dc->dc->config.disable_fractional_pwm == false) ? 1 : 0; + uint32_t edp_id_count = dc->dc_edp_id_count; + int i; + uint8_t panel_mask = 0; + + for (i = 0; i < edp_id_count; i++) + panel_mask |= 0x01 << i; + + memset(&cmd, 0, sizeof(cmd)); + cmd.abm_set_pwm_frac.header.type = DMUB_CMD__ABM; + cmd.abm_set_pwm_frac.header.sub_type = DMUB_CMD__ABM_SET_PWM_FRAC; + cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.fractional_pwm = fractional_pwm; + cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; + cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.panel_mask = panel_mask; + cmd.abm_set_pwm_frac.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pwm_frac_data); + + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); +} + +void dmub_abm_init(struct abm *abm, uint32_t backlight) +{ + struct dce_abm *dce_abm = TO_DMUB_ABM(abm); + + REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x3); + REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x1); + REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x3); + REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x1); + REG_WRITE(BL1_PWM_BL_UPDATE_SAMPLE_RATE, 0x1); + + REG_SET_3(DC_ABM1_HG_MISC_CTRL, 0, + ABM1_HG_NUM_OF_BINS_SEL, 0, + ABM1_HG_VMAX_SEL, 1, + ABM1_HG_BIN_BITWIDTH_SIZE_SEL, 0); + + REG_SET_3(DC_ABM1_IPCSC_COEFF_SEL, 0, + ABM1_IPCSC_COEFF_SEL_R, 2, + ABM1_IPCSC_COEFF_SEL_G, 4, + ABM1_IPCSC_COEFF_SEL_B, 2); + + REG_UPDATE(BL1_PWM_CURRENT_ABM_LEVEL, + BL1_PWM_CURRENT_ABM_LEVEL, backlight); + + REG_UPDATE(BL1_PWM_TARGET_ABM_LEVEL, + BL1_PWM_TARGET_ABM_LEVEL, backlight); + + REG_UPDATE(BL1_PWM_USER_LEVEL, + BL1_PWM_USER_LEVEL, backlight); + + REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, + ABM1_LS_MIN_PIXEL_VALUE_THRES, 0, + ABM1_LS_MAX_PIXEL_VALUE_THRES, 1000); + + REG_SET_3(DC_ABM1_HGLS_REG_READ_PROGRESS, 0, + ABM1_HG_REG_READ_MISSED_FRAME_CLEAR, 1, + ABM1_LS_REG_READ_MISSED_FRAME_CLEAR, 1, + ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, 1); + + dmub_abm_enable_fractional_pwm(abm->ctx); +} + +unsigned int dmub_abm_get_current_backlight(struct abm *abm) +{ + struct dce_abm *dce_abm = TO_DMUB_ABM(abm); + unsigned int backlight = REG_READ(BL1_PWM_CURRENT_ABM_LEVEL); + + /* return backlight in hardware format which is unsigned 17 bits, with + * 1 bit integer and 16 bit fractional + */ + return backlight; +} + +unsigned int dmub_abm_get_target_backlight(struct abm *abm) +{ + struct dce_abm *dce_abm = TO_DMUB_ABM(abm); + unsigned int backlight = REG_READ(BL1_PWM_TARGET_ABM_LEVEL); + + /* return backlight in hardware format which is unsigned 17 bits, with + * 1 bit integer and 16 bit fractional + */ + return backlight; +} + +bool dmub_abm_set_level(struct abm *abm, uint32_t level, uint8_t panel_mask) +{ + union dmub_rb_cmd cmd; + struct dc_context *dc = abm->ctx; + + memset(&cmd, 0, sizeof(cmd)); + cmd.abm_set_level.header.type = DMUB_CMD__ABM; + cmd.abm_set_level.header.sub_type = DMUB_CMD__ABM_SET_LEVEL; + cmd.abm_set_level.abm_set_level_data.level = level; + cmd.abm_set_level.abm_set_level_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; + cmd.abm_set_level.abm_set_level_data.panel_mask = panel_mask; + cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_level_data); + + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + + return true; +} + +void dmub_abm_init_config(struct abm *abm, + const char *src, + unsigned int bytes, + unsigned int inst) +{ + union dmub_rb_cmd cmd; + struct dc_context *dc = abm->ctx; + uint8_t panel_mask = 0x01 << inst; + + // TODO: Optimize by only reading back final 4 bytes + dmub_flush_buffer_mem(&dc->dmub_srv->dmub->scratch_mem_fb); + + // Copy iramtable into cw7 + memcpy(dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, (void *)src, bytes); + + memset(&cmd, 0, sizeof(cmd)); + // Fw will copy from cw7 to fw_state + cmd.abm_init_config.header.type = DMUB_CMD__ABM; + cmd.abm_init_config.header.sub_type = DMUB_CMD__ABM_INIT_CONFIG; + cmd.abm_init_config.abm_init_config_data.src.quad_part = dc->dmub_srv->dmub->scratch_mem_fb.gpu_addr; + cmd.abm_init_config.abm_init_config_data.bytes = bytes; + cmd.abm_init_config.abm_init_config_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; + cmd.abm_init_config.abm_init_config_data.panel_mask = panel_mask; + + cmd.abm_init_config.header.payload_bytes = sizeof(struct dmub_cmd_abm_init_config_data); + + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + +} + +bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int stream_inst) +{ + union dmub_rb_cmd cmd; + struct dc_context *dc = abm->ctx; + uint8_t panel_mask = 0x01 << panel_inst; + + memset(&cmd, 0, sizeof(cmd)); + cmd.abm_pause.header.type = DMUB_CMD__ABM; + cmd.abm_pause.header.sub_type = DMUB_CMD__ABM_PAUSE; + cmd.abm_pause.abm_pause_data.enable = pause; + cmd.abm_pause.abm_pause_data.panel_mask = panel_mask; + cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_pause_data); + + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + + return true; +} + + +/***************************************************************************** + * dmub_abm_save_restore() - dmub interface for abm save+pause and restore+ + * un-pause + * @dc: dc context + * @panel_inst: panel instance index + * @pData: contains command to pause/un-pause abm and exchange abm parameters + * + * When called Pause will get abm data and store in pData, and un-pause will + * set/apply abm data stored in pData. + * + *****************************************************************************/ +bool dmub_abm_save_restore( + struct dc_context *dc, + unsigned int panel_inst, + struct abm_save_restore *pData) +{ + union dmub_rb_cmd cmd; + uint8_t panel_mask = 0x01 << panel_inst; + unsigned int bytes = sizeof(struct abm_save_restore); + + // TODO: Optimize by only reading back final 4 bytes + dmub_flush_buffer_mem(&dc->dmub_srv->dmub->scratch_mem_fb); + + // Copy iramtable into cw7 + memcpy(dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, (void *)pData, bytes); + + memset(&cmd, 0, sizeof(cmd)); + cmd.abm_save_restore.header.type = DMUB_CMD__ABM; + cmd.abm_save_restore.header.sub_type = DMUB_CMD__ABM_SAVE_RESTORE; + + cmd.abm_save_restore.abm_init_config_data.src.quad_part = dc->dmub_srv->dmub->scratch_mem_fb.gpu_addr; + cmd.abm_save_restore.abm_init_config_data.bytes = bytes; + cmd.abm_save_restore.abm_init_config_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; + cmd.abm_save_restore.abm_init_config_data.panel_mask = panel_mask; + + cmd.abm_save_restore.header.payload_bytes = sizeof(struct dmub_rb_cmd_abm_save_restore); + + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + + // Copy iramtable data into local structure + memcpy((void *)pData, dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, bytes); + + return true; +} + +bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst) +{ + union dmub_rb_cmd cmd; + struct dc_context *dc = abm->ctx; + uint32_t ramping_boundary = 0xFFFF; + + memset(&cmd, 0, sizeof(cmd)); + cmd.abm_set_pipe.header.type = DMUB_CMD__ABM; + cmd.abm_set_pipe.header.sub_type = DMUB_CMD__ABM_SET_PIPE; + cmd.abm_set_pipe.abm_set_pipe_data.otg_inst = otg_inst; + cmd.abm_set_pipe.abm_set_pipe_data.set_pipe_option = option; + cmd.abm_set_pipe.abm_set_pipe_data.panel_inst = panel_inst; + cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary; + cmd.abm_set_pipe.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pipe_data); + + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + + return true; +} + +bool dmub_abm_set_backlight_level(struct abm *abm, + unsigned int backlight_pwm_u16_16, + unsigned int frame_ramp, + unsigned int panel_inst) +{ + union dmub_rb_cmd cmd; + struct dc_context *dc = abm->ctx; + + memset(&cmd, 0, sizeof(cmd)); + cmd.abm_set_backlight.header.type = DMUB_CMD__ABM; + cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT; + cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = frame_ramp; + cmd.abm_set_backlight.abm_set_backlight_data.backlight_user_level = backlight_pwm_u16_16; + cmd.abm_set_backlight.abm_set_backlight_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; + cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_inst); + cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data); + + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + + return true; +} + diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h new file mode 100644 index 000000000000..853564d7f471 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h @@ -0,0 +1,52 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DMUB_ABM_LCD_H__ +#define __DMUB_ABM_LCD_H__ + +#include "abm.h" + +struct abm_save_restore; + +void dmub_abm_init(struct abm *abm, uint32_t backlight); +bool dmub_abm_set_level(struct abm *abm, uint32_t level, uint8_t panel_mask); +unsigned int dmub_abm_get_current_backlight(struct abm *abm); +unsigned int dmub_abm_get_target_backlight(struct abm *abm); +void dmub_abm_init_config(struct abm *abm, + const char *src, + unsigned int bytes, + unsigned int inst); + +bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int stream_inst); +bool dmub_abm_save_restore( + struct dc_context *dc, + unsigned int panel_inst, + struct abm_save_restore *pData); +bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst); +bool dmub_abm_set_backlight_level(struct abm *abm, + unsigned int backlight_pwm_u16_16, + unsigned int frame_ramp, + unsigned int panel_inst); +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c index 3f32e9c3fbaf..2aa0e01a6891 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c @@ -47,9 +47,7 @@ void dmub_hw_lock_mgr_cmd(struct dc_dmub_srv *dmub_srv, if (!lock) cmd.lock_hw.lock_hw_data.should_release = 1; - dc_dmub_srv_cmd_queue(dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dmub_srv); - dc_dmub_srv_wait_idle(dmub_srv); + dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c index fff1d07d865d..d8009b2dc56a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c @@ -48,7 +48,5 @@ void dmub_enable_outbox_notification(struct dc_dmub_srv *dmub_srv) sizeof(cmd.outbox1_enable.header); cmd.outbox1_enable.enable = true; - dc_dmub_srv_cmd_queue(dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dmub_srv); - dc_dmub_srv_wait_idle(dmub_srv); + dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c index 9705d8f88382..0f24b6fbd220 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c @@ -87,6 +87,8 @@ static enum dc_psr_state convert_psr_state(uint32_t raw_state) state = PSR_STATE4c_FULL_FRAME; else if (raw_state == 0x4E) state = PSR_STATE4_FULL_FRAME_POWERUP; + else if (raw_state == 0x4F) + state = PSR_STATE4_FULL_FRAME_HW_LOCK; else if (raw_state == 0x60) state = PSR_STATE_HWLOCK_MGR; else if (raw_state == 0x61) @@ -168,9 +170,7 @@ static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state * cmd.psr_set_version.psr_set_version_data.panel_inst = panel_inst; cmd.psr_set_version.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data); - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } @@ -198,9 +198,7 @@ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait, uint8 cmd.psr_enable.header.payload_bytes = 0; // Send header only - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc->dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); /* Below loops 1000 x 500us = 500 ms. * Exit PSR may need to wait 1-2 frames to power up. Timeout after at @@ -248,9 +246,7 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level, uint8_ cmd.psr_set_level.psr_set_level_data.psr_level = psr_level; cmd.psr_set_level.psr_set_level_data.cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; cmd.psr_set_level.psr_set_level_data.panel_inst = panel_inst; - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } /* @@ -269,9 +265,7 @@ static void dmub_psr_set_sink_vtotal_in_psr_active(struct dmub_psr *dmub, cmd.psr_set_vtotal.psr_set_vtotal_data.psr_vtotal_idle = psr_vtotal_idle; cmd.psr_set_vtotal.psr_set_vtotal_data.psr_vtotal_su = psr_vtotal_su; - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } /* @@ -290,9 +284,7 @@ static void dmub_psr_set_power_opt(struct dmub_psr *dmub, unsigned int power_opt cmd.psr_set_power_opt.psr_set_power_opt_data.power_opt = power_opt; cmd.psr_set_power_opt.psr_set_power_opt_data.panel_inst = panel_inst; - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } /* @@ -422,9 +414,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub, copy_settings_data->relock_delay_frame_cnt = 2; copy_settings_data->dsc_slice_height = psr_context->dsc_slice_height; - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } @@ -445,9 +435,7 @@ static void dmub_psr_force_static(struct dmub_psr *dmub, uint8_t panel_inst) cmd.psr_force_static.header.sub_type = DMUB_CMD__PSR_FORCE_STATIC; cmd.psr_enable.header.payload_bytes = 0; - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } /* diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c new file mode 100644 index 000000000000..28149e53c2a6 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c @@ -0,0 +1,303 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dc.h" +#include "dc_dmub_srv.h" +#include "dmub/dmub_srv.h" +#include "core_types.h" +#include "dmub_replay.h" + +#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */ + +#define MAX_PIPES 6 + +/* + * Get Replay state from firmware. + */ +static void dmub_replay_get_state(struct dmub_replay *dmub, enum replay_state *state, uint8_t panel_inst) +{ + struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub; + /* uint32_t raw_state = 0; */ + uint32_t retry_count = 0; + enum dmub_status status; + + do { + // Send gpint command and wait for ack + status = dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_REPLAY_STATE, panel_inst, 30); + + if (status == DMUB_STATUS_OK) { + // GPINT was executed, get response + dmub_srv_get_gpint_response(srv, (uint32_t *)state); + } else + // Return invalid state when GPINT times out + *state = REPLAY_STATE_INVALID; + } while (++retry_count <= 1000 && *state == REPLAY_STATE_INVALID); + + // Assert if max retry hit + if (retry_count >= 1000 && *state == REPLAY_STATE_INVALID) { + ASSERT(0); + /* To-do: Add retry fail log */ + } +} + +/* + * Enable/Disable Replay. + */ +static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait, uint8_t panel_inst) +{ + union dmub_rb_cmd cmd; + struct dc_context *dc = dmub->ctx; + uint32_t retry_count; + enum replay_state state = REPLAY_STATE_0; + + memset(&cmd, 0, sizeof(cmd)); + cmd.replay_enable.header.type = DMUB_CMD__REPLAY; + cmd.replay_enable.data.panel_inst = panel_inst; + + cmd.replay_enable.header.sub_type = DMUB_CMD__REPLAY_ENABLE; + if (enable) + cmd.replay_enable.data.enable = REPLAY_ENABLE; + else + cmd.replay_enable.data.enable = REPLAY_DISABLE; + + cmd.replay_enable.header.payload_bytes = sizeof(struct dmub_rb_cmd_replay_enable_data); + + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + + /* Below loops 1000 x 500us = 500 ms. + * Exit REPLAY may need to wait 1-2 frames to power up. Timeout after at + * least a few frames. Should never hit the max retry assert below. + */ + if (wait) { + for (retry_count = 0; retry_count <= 1000; retry_count++) { + dmub_replay_get_state(dmub, &state, panel_inst); + + if (enable) { + if (state != REPLAY_STATE_0) + break; + } else { + if (state == REPLAY_STATE_0) + break; + } + + fsleep(500); + } + + /* assert if max retry hit */ + if (retry_count >= 1000) + ASSERT(0); + } + +} + +/* + * Set REPLAY power optimization flags. + */ +static void dmub_replay_set_power_opt(struct dmub_replay *dmub, unsigned int power_opt, uint8_t panel_inst) +{ + union dmub_rb_cmd cmd; + struct dc_context *dc = dmub->ctx; + + memset(&cmd, 0, sizeof(cmd)); + cmd.replay_set_power_opt.header.type = DMUB_CMD__REPLAY; + cmd.replay_set_power_opt.header.sub_type = DMUB_CMD__SET_REPLAY_POWER_OPT; + cmd.replay_set_power_opt.header.payload_bytes = sizeof(struct dmub_cmd_replay_set_power_opt_data); + cmd.replay_set_power_opt.replay_set_power_opt_data.power_opt = power_opt; + cmd.replay_set_power_opt.replay_set_power_opt_data.panel_inst = panel_inst; + + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); +} + +/* + * Setup Replay by programming phy registers and sending replay hw context values to firmware. + */ +static bool dmub_replay_copy_settings(struct dmub_replay *dmub, + struct dc_link *link, + struct replay_context *replay_context, + uint8_t panel_inst) +{ + union dmub_rb_cmd cmd; + struct dc_context *dc = dmub->ctx; + struct dmub_cmd_replay_copy_settings_data *copy_settings_data + = &cmd.replay_copy_settings.replay_copy_settings_data; + struct pipe_ctx *pipe_ctx = NULL; + struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx; + int i = 0; + + for (i = 0; i < MAX_PIPES; i++) { + if (res_ctx && + res_ctx->pipe_ctx[i].stream && + res_ctx->pipe_ctx[i].stream->link && + res_ctx->pipe_ctx[i].stream->link == link && + res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) { + pipe_ctx = &res_ctx->pipe_ctx[i]; + //TODO: refactor for multi edp support + break; + } + } + + if (!pipe_ctx) + return false; + + memset(&cmd, 0, sizeof(cmd)); + cmd.replay_copy_settings.header.type = DMUB_CMD__REPLAY; + cmd.replay_copy_settings.header.sub_type = DMUB_CMD__REPLAY_COPY_SETTINGS; + cmd.replay_copy_settings.header.payload_bytes = sizeof(struct dmub_cmd_replay_copy_settings_data); + + // HW insts + copy_settings_data->aux_inst = replay_context->aux_inst; + copy_settings_data->digbe_inst = replay_context->digbe_inst; + copy_settings_data->digfe_inst = replay_context->digfe_inst; + + if (pipe_ctx->plane_res.dpp) + copy_settings_data->dpp_inst = pipe_ctx->plane_res.dpp->inst; + else + copy_settings_data->dpp_inst = 0; + if (pipe_ctx->stream_res.tg) + copy_settings_data->otg_inst = pipe_ctx->stream_res.tg->inst; + else + copy_settings_data->otg_inst = 0; + + copy_settings_data->dpphy_inst = link->link_enc->transmitter; + + // Misc + copy_settings_data->line_time_in_ns = replay_context->line_time_in_ns; + copy_settings_data->panel_inst = panel_inst; + copy_settings_data->debug.u32All = link->replay_settings.config.debug_flags; + copy_settings_data->pixel_deviation_per_line = link->dpcd_caps.pr_info.pixel_deviation_per_line; + copy_settings_data->max_deviation_line = link->dpcd_caps.pr_info.max_deviation_line; + copy_settings_data->smu_optimizations_en = link->replay_settings.replay_smu_opt_enable; + copy_settings_data->replay_timing_sync_supported = link->replay_settings.config.replay_timing_sync_supported; + + copy_settings_data->flags.u32All = 0; + copy_settings_data->flags.bitfields.fec_enable_status = (link->fec_state == dc_link_fec_enabled); + copy_settings_data->flags.bitfields.dsc_enable_status = (pipe_ctx->stream->timing.flags.DSC == 1); + // WA for PSRSU+DSC on specific TCON, if DSC is enabled, force PSRSU as ffu mode(full frame update) + if (((link->dpcd_caps.fec_cap.bits.FEC_CAPABLE && + !link->dc->debug.disable_fec) && + (link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT && + !link->panel_config.dsc.disable_dsc_edp && + link->dc->caps.edp_dsc_support)) && + link->dpcd_caps.sink_dev_id == DP_DEVICE_ID_38EC11 /*&& + (!memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_1, + sizeof(DP_SINK_DEVICE_STR_ID_1)) || + !memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_2, + sizeof(DP_SINK_DEVICE_STR_ID_2)))*/) + copy_settings_data->flags.bitfields.force_wakeup_by_tps3 = 1; + else + copy_settings_data->flags.bitfields.force_wakeup_by_tps3 = 0; + + + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + + return true; +} + +/* + * Set coasting vtotal. + */ +static void dmub_replay_set_coasting_vtotal(struct dmub_replay *dmub, + uint16_t coasting_vtotal, + uint8_t panel_inst) +{ + union dmub_rb_cmd cmd; + struct dc_context *dc = dmub->ctx; + + memset(&cmd, 0, sizeof(cmd)); + cmd.replay_set_coasting_vtotal.header.type = DMUB_CMD__REPLAY; + cmd.replay_set_coasting_vtotal.header.sub_type = DMUB_CMD__REPLAY_SET_COASTING_VTOTAL; + cmd.replay_set_coasting_vtotal.header.payload_bytes = sizeof(struct dmub_cmd_replay_set_coasting_vtotal_data); + cmd.replay_set_coasting_vtotal.replay_set_coasting_vtotal_data.coasting_vtotal = coasting_vtotal; + + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); +} + +/* + * Get Replay residency from firmware. + */ +static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst, + uint32_t *residency, const bool is_start, const bool is_alpm) +{ + struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub; + uint16_t param = (uint16_t)(panel_inst << 8); + + if (is_alpm) + param |= REPLAY_RESIDENCY_MODE_ALPM; + + if (is_start) + param |= REPLAY_RESIDENCY_ENABLE; + + // Send gpint command and wait for ack + dmub_srv_send_gpint_command(srv, DMUB_GPINT__REPLAY_RESIDENCY, param, 30); + + if (!is_start) + dmub_srv_get_gpint_response(srv, residency); + else + *residency = 0; +} + +static const struct dmub_replay_funcs replay_funcs = { + .replay_copy_settings = dmub_replay_copy_settings, + .replay_enable = dmub_replay_enable, + .replay_get_state = dmub_replay_get_state, + .replay_set_power_opt = dmub_replay_set_power_opt, + .replay_set_coasting_vtotal = dmub_replay_set_coasting_vtotal, + .replay_residency = dmub_replay_residency, +}; + +/* + * Construct Replay object. + */ +static void dmub_replay_construct(struct dmub_replay *replay, struct dc_context *ctx) +{ + replay->ctx = ctx; + replay->funcs = &replay_funcs; +} + +/* + * Allocate and initialize Replay object. + */ +struct dmub_replay *dmub_replay_create(struct dc_context *ctx) +{ + struct dmub_replay *replay = kzalloc(sizeof(struct dmub_replay), GFP_KERNEL); + + if (replay == NULL) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dmub_replay_construct(replay, ctx); + + return replay; +} + +/* + * Deallocate Replay object. + */ +void dmub_replay_destroy(struct dmub_replay **dmub) +{ + kfree(*dmub); + *dmub = NULL; +} diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h new file mode 100644 index 000000000000..e8385bbf51fc --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h @@ -0,0 +1,58 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_REPLAY_H_ +#define _DMUB_REPLAY_H_ + +#include "dc_types.h" +#include "dmub_cmd.h" +struct dc_link; +struct dmub_replay_funcs; + +struct dmub_replay { + struct dc_context *ctx; + const struct dmub_replay_funcs *funcs; +}; + +struct dmub_replay_funcs { + void (*replay_get_state)(struct dmub_replay *dmub, enum replay_state *state, + uint8_t panel_inst); + void (*replay_enable)(struct dmub_replay *dmub, bool enable, bool wait, + uint8_t panel_inst); + bool (*replay_copy_settings)(struct dmub_replay *dmub, struct dc_link *link, + struct replay_context *replay_context, uint8_t panel_inst); + void (*replay_set_power_opt)(struct dmub_replay *dmub, unsigned int power_opt, + uint8_t panel_inst); + void (*replay_set_coasting_vtotal)(struct dmub_replay *dmub, uint16_t coasting_vtotal, + uint8_t panel_inst); + void (*replay_residency)(struct dmub_replay *dmub, + uint8_t panel_inst, uint32_t *residency, const bool is_start, const bool is_alpm); +}; + +struct dmub_replay *dmub_replay_create(struct dc_context *ctx); +void dmub_replay_destroy(struct dmub_replay **dmub); + + +#endif /* _DMUB_REPLAY_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c index 54805802cbd5..899b25b0bad8 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c @@ -401,6 +401,10 @@ static const struct dc_plane_cap plane_cap = { } }; +static const struct dc_debug_options debug_defaults = { + .enable_legacy_fast_update = true, +}; + #define CTX ctx #define REG(reg) mm ## reg @@ -820,7 +824,7 @@ static enum dc_status build_mapped_resource( struct dc_state *context, struct dc_stream_state *stream) { - struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream); + struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream); if (!pipe_ctx) return DC_ERROR_UNEXPECTED; @@ -1071,6 +1075,7 @@ static bool dce100_resource_construct( dc->caps.dual_link_dvi = true; dc->caps.disable_dp_clk_share = true; dc->caps.extended_aux_timeout_support = false; + dc->debug = debug_defaults; for (i = 0; i < pool->base.pipe_count; i++) { pool->base.timing_generators[i] = diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 8d2460d06bce..2a6157555fd1 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -209,9 +209,6 @@ static bool dce110_enable_display_power_gating( struct dc_context *ctx = dc->ctx; unsigned int underlay_idx = dc->res_pool->underlay_pipe_index; - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) - return true; - if (power_gating == PIPE_GATING_CONTROL_INIT) cntl = ASIC_PIPE_INIT; else if (power_gating == PIPE_GATING_CONTROL_ENABLE) @@ -222,7 +219,7 @@ static bool dce110_enable_display_power_gating( if (controller_id == underlay_idx) controller_id = CONTROLLER_ID_UNDERLAY0 - 1; - if (power_gating != PIPE_GATING_CONTROL_INIT || controller_id == 0){ + if (power_gating != PIPE_GATING_CONTROL_INIT || controller_id == 0) { bp_result = dcb->funcs->enable_disp_power_gating( dcb, controller_id + 1, cntl); @@ -780,7 +777,8 @@ void dce110_edp_wait_for_hpd_ready( dal_gpio_destroy_irq(&hpd); /* ensure that the panel is detected */ - ASSERT(edp_hpd_high); + if (!edp_hpd_high) + DC_LOG_DC("%s: wait timed out!\n", __func__); } void dce110_edp_power_control( @@ -966,7 +964,9 @@ void dce110_edp_backlight_control( return; } - if (link->panel_cntl) { + if (link->panel_cntl && !(link->dpcd_sink_ext_caps.bits.oled || + link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 || + link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1)) { bool is_backlight_on = link->panel_cntl->funcs->is_panel_backlight_on(link->panel_cntl); if ((enable && is_backlight_on) || (!enable && !is_backlight_on)) { @@ -1153,6 +1153,8 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) struct timing_generator *tg = pipe_ctx->stream_res.tg; struct dtbclk_dto_params dto_params = {0}; int dp_hpo_inst; + struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link); + struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc; if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) { pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets( @@ -1176,9 +1178,14 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) dto_params.otg_inst = tg->inst; dto_params.timing = &pipe_ctx->stream->timing; dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst; - dccg->funcs->set_dtbclk_dto(dccg, &dto_params); - dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst); - dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst); + if (dccg) { + dccg->funcs->set_dtbclk_dto(dccg, &dto_params); + dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst); + dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst); + } + } else if (dccg && dccg->funcs->disable_symclk_se) { + dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst, + link_enc->transmitter - TRANSMITTER_UNIPHY_A); } if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { @@ -1219,7 +1226,8 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx) struct dce_hwseq *hws = link->dc->hwseq; if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { - hws->funcs.edp_backlight_control(link, false); + if (!stream->skip_edp_power_down) + hws->funcs.edp_backlight_control(link, false); link->dc->hwss.set_abm_immediate_disable(pipe_ctx); } @@ -1587,6 +1595,7 @@ static enum dc_status apply_single_controller_ctx_to_hw( */ if (pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) { pipe_ctx->stream->link->psr_settings.psr_feature_enabled = false; + pipe_ctx->stream->link->replay_settings.replay_feature_enabled = false; } return DC_OK; } @@ -1794,10 +1803,13 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) hws->funcs.edp_backlight_control(edp_link_with_sink, false); } /*resume from S3, no vbios posting, no need to power down again*/ + clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr); + power_down_all_hw_blocks(dc); disable_vga_and_power_gate_all_controllers(dc); if (edp_link_with_sink && !keep_edp_vdd_on) dc->hwss.edp_power_control(edp_link_with_sink, false); + clk_mgr_optimize_pwr_state(dc, dc->clk_mgr); } bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 1); } @@ -2015,6 +2027,10 @@ static bool should_enable_fbc(struct dc *dc, if (pipe_ctx->stream->link->psr_settings.psr_feature_enabled) return false; + /* Replay should not be enabled */ + if (pipe_ctx->stream->link->replay_settings.replay_feature_enabled) + return false; + /* Nothing to compress */ if (!pipe_ctx->plane_state) return false; @@ -2291,6 +2307,11 @@ enum dc_status dce110_apply_ctx_to_hw( if (DC_OK != status) return status; + +#ifdef CONFIG_DRM_AMD_DC_FP + if (hws->funcs.resync_fifo_dccg_dio) + hws->funcs.resync_fifo_dccg_dio(hws, dc, context); +#endif } if (dc->fbc_compressor) @@ -2640,11 +2661,11 @@ void dce110_prepare_bandwidth( struct clk_mgr *dccg = dc->clk_mgr; dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool); - - dccg->funcs->update_clocks( - dccg, - context, - false); + if (dccg) + dccg->funcs->update_clocks( + dccg, + context, + false); } void dce110_optimize_bandwidth( @@ -2655,10 +2676,11 @@ void dce110_optimize_bandwidth( dce110_set_displaymarks(dc, context); - dccg->funcs->update_clocks( - dccg, - context, - true); + if (dccg) + dccg->funcs->update_clocks( + dccg, + context, + true); } static void dce110_program_front_end_for_pipe( diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index a4a45a6ce61e..1289b9418877 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c @@ -424,6 +424,10 @@ static const struct dc_plane_cap plane_cap = { 64 }; +static const struct dc_debug_options debug_defaults = { + .enable_legacy_fast_update = true, +}; + static const struct dc_plane_cap underlay_plane_cap = { .type = DC_PLANE_TYPE_DCE_UNDERLAY, .per_pixel_alpha = 1, @@ -938,7 +942,7 @@ static enum dc_status build_mapped_resource( struct dc_state *context, struct dc_stream_state *stream) { - struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream); + struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream); if (!pipe_ctx) return DC_ERROR_UNEXPECTED; @@ -1115,13 +1119,15 @@ static enum dc_status dce110_add_stream_to_ctx( } static struct pipe_ctx *dce110_acquire_underlay( - struct dc_state *context, + const struct dc_state *cur_ctx, + struct dc_state *new_ctx, const struct resource_pool *pool, - struct dc_stream_state *stream) + const struct pipe_ctx *opp_head_pipe) { + struct dc_stream_state *stream = opp_head_pipe->stream; struct dc *dc = stream->ctx->dc; struct dce_hwseq *hws = dc->hwseq; - struct resource_context *res_ctx = &context->res_ctx; + struct resource_context *res_ctx = &new_ctx->res_ctx; unsigned int underlay_idx = pool->underlay_pipe_index; struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[underlay_idx]; @@ -1169,7 +1175,7 @@ static struct pipe_ctx *dce110_acquire_underlay( stream->timing.h_total, stream->timing.v_total, stream->timing.pix_clk_100hz / 10, - context->stream_count); + new_ctx->stream_count); color_space_to_black_color(dc, COLOR_SPACE_YCBCR601, &black_color); @@ -1229,7 +1235,7 @@ static const struct resource_funcs dce110_res_pool_funcs = { .panel_cntl_create = dce110_panel_cntl_create, .validate_bandwidth = dce110_validate_bandwidth, .validate_plane = dce110_validate_plane, - .acquire_idle_pipe_for_layer = dce110_acquire_underlay, + .acquire_free_pipe_as_secondary_dpp_pipe = dce110_acquire_underlay, .add_stream_to_ctx = dce110_add_stream_to_ctx, .validate_global = dce110_validate_global, .find_first_free_match_stream_enc_for_link = dce110_find_first_free_match_stream_enc_for_link @@ -1368,6 +1374,7 @@ static bool dce110_resource_construct( dc->caps.min_horizontal_blanking_period = 80; dc->caps.is_apu = true; dc->caps.extended_aux_timeout_support = false; + dc->debug = debug_defaults; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c index 27cbb5b42c7e..6424e7f279dc 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c @@ -288,7 +288,7 @@ bool dce110_timing_generator_program_timing_generator( uint32_t vsync_offset = dc_crtc_timing->v_border_bottom + dc_crtc_timing->v_front_porch; - uint32_t v_sync_start =dc_crtc_timing->v_addressable + vsync_offset; + uint32_t v_sync_start = dc_crtc_timing->v_addressable + vsync_offset; uint32_t hsync_offset = dc_crtc_timing->h_border_right + dc_crtc_timing->h_front_porch; @@ -603,7 +603,7 @@ void dce110_timing_generator_program_blanking( { uint32_t vsync_offset = timing->v_border_bottom + timing->v_front_porch; - uint32_t v_sync_start =timing->v_addressable + vsync_offset; + uint32_t v_sync_start = timing->v_addressable + vsync_offset; uint32_t hsync_offset = timing->h_border_right + timing->h_front_porch; diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c index 19873ee1f78d..0ef9ebb3c1e2 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c @@ -120,9 +120,6 @@ static bool dce112_enable_display_power_gating( enum bp_pipe_control_action cntl; struct dc_context *ctx = dc->ctx; - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) - return true; - if (power_gating == PIPE_GATING_CONTROL_INIT) cntl = ASIC_PIPE_INIT; else if (power_gating == PIPE_GATING_CONTROL_ENABLE) @@ -130,7 +127,7 @@ static bool dce112_enable_display_power_gating( else cntl = ASIC_PIPE_DISABLE; - if (power_gating != PIPE_GATING_CONTROL_INIT || controller_id == 0){ + if (power_gating != PIPE_GATING_CONTROL_INIT || controller_id == 0) { bp_result = dcb->funcs->enable_disp_power_gating( dcb, controller_id + 1, cntl); diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index e179e80667d1..2b20180f1a32 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c @@ -429,6 +429,10 @@ static const struct dc_plane_cap plane_cap = { 64 }; +static const struct dc_debug_options debug_defaults = { + .enable_legacy_fast_update = true, +}; + #define CTX ctx #define REG(reg) mm ## reg @@ -869,7 +873,7 @@ static enum dc_status build_mapped_resource( struct dc_state *context, struct dc_stream_state *stream) { - struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream); + struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream); if (!pipe_ctx) return DC_ERROR_UNEXPECTED; @@ -960,7 +964,7 @@ enum dc_status resource_map_phy_clock_resources( { /* acquire new resources */ - struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream( + struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream( &context->res_ctx, stream); if (!pipe_ctx) @@ -970,10 +974,12 @@ enum dc_status resource_map_phy_clock_resources( || dc_is_virtual_signal(pipe_ctx->stream->signal)) pipe_ctx->clock_source = dc->res_pool->dp_clock_source; - else - pipe_ctx->clock_source = find_matching_pll( - &context->res_ctx, dc->res_pool, - stream); + else { + if (stream && stream->link && stream->link->link_enc) + pipe_ctx->clock_source = find_matching_pll( + &context->res_ctx, dc->res_pool, + stream); + } if (pipe_ctx->clock_source == NULL) return DC_NO_CLOCK_SOURCE_RESOURCE; @@ -1239,6 +1245,7 @@ static bool dce112_resource_construct( dc->caps.min_horizontal_blanking_period = 80; dc->caps.dual_link_dvi = true; dc->caps.extended_aux_timeout_support = false; + dc->debug = debug_defaults; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c index d4afe6c824d2..45e08c4d5861 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c @@ -159,9 +159,6 @@ static bool dce120_enable_display_power_gating( enum bp_pipe_control_action cntl; struct dc_context *ctx = dc->ctx; - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) - return true; - if (power_gating == PIPE_GATING_CONTROL_INIT) cntl = ASIC_PIPE_INIT; else if (power_gating == PIPE_GATING_CONTROL_ENABLE) diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index af631085e88c..18c5a86d2d61 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -526,6 +526,7 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults = { .disable_clock_gate = true, + .enable_legacy_fast_update = true, }; static struct clock_source *dce120_clock_source_create( diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index 5825e6f412bd..061221394ce0 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c @@ -58,13 +58,13 @@ #include "dce/dce_i2c.h" /* TODO remove this include */ -#include "dce80_resource.h" - #ifndef mmMC_HUB_RDREQ_DMIF_LIMIT #include "gmc/gmc_7_1_d.h" #include "gmc/gmc_7_1_sh_mask.h" #endif +#include "dce80/dce80_resource.h" + #ifndef mmDP_DPHY_INTERNAL_CTRL #define mmDP_DPHY_INTERNAL_CTRL 0x1CDE #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x1CDE @@ -418,6 +418,10 @@ static const struct dc_plane_cap plane_cap = { } }; +static const struct dc_debug_options debug_defaults = { + .enable_legacy_fast_update = true, +}; + static const struct dce_dmcu_registers dmcu_regs = { DMCU_DCE80_REG_LIST() }; @@ -969,6 +973,7 @@ static bool dce80_construct( dc->caps.min_horizontal_blanking_period = 80; dc->caps.dual_link_dvi = true; dc->caps.extended_aux_timeout_support = false; + dc->debug = debug_defaults; /************************************************* * Create resources * @@ -1369,6 +1374,7 @@ static bool dce83_construct( dc->caps.max_cursor_size = 128; dc->caps.min_horizontal_blanking_period = 80; dc->caps.is_apu = true; + dc->debug = debug_defaults; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c index 7a00fe525dfb..3538973bd0c6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c @@ -308,7 +308,10 @@ bool cm_helper_convert_to_custom_float( #define NUMBER_REGIONS 32 #define NUMBER_SW_SEGMENTS 16 -bool cm_helper_translate_curve_to_hw_format( +#define DC_LOGGER \ + ctx->logger + +bool cm_helper_translate_curve_to_hw_format(struct dc_context *ctx, const struct dc_transfer_func *output_tf, struct pwl_params *lut_params, bool fixpoint) { @@ -482,10 +485,18 @@ bool cm_helper_translate_curve_to_hw_format( rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green); rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue); + if (fixpoint == true) { - rgb->delta_red_reg = dc_fixpt_clamp_u0d10(rgb->delta_red); - rgb->delta_green_reg = dc_fixpt_clamp_u0d10(rgb->delta_green); - rgb->delta_blue_reg = dc_fixpt_clamp_u0d10(rgb->delta_blue); + uint32_t red_clamp = dc_fixpt_clamp_u0d14(rgb->delta_red); + uint32_t green_clamp = dc_fixpt_clamp_u0d14(rgb->delta_green); + uint32_t blue_clamp = dc_fixpt_clamp_u0d14(rgb->delta_blue); + + if (red_clamp >> 10 || green_clamp >> 10 || blue_clamp >> 10) + DC_LOG_WARNING("Losing delta precision while programming shaper LUT."); + + rgb->delta_red_reg = red_clamp & 0x3ff; + rgb->delta_green_reg = green_clamp & 0x3ff; + rgb->delta_blue_reg = blue_clamp & 0x3ff; rgb->red_reg = dc_fixpt_clamp_u0d14(rgb->red); rgb->green_reg = dc_fixpt_clamp_u0d14(rgb->green); rgb->blue_reg = dc_fixpt_clamp_u0d14(rgb->blue); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h index 3b8cd7410498..0a68b63d6126 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h @@ -106,6 +106,7 @@ bool cm_helper_convert_to_custom_float( bool fixpoint); bool cm_helper_translate_curve_to_hw_format( + struct dc_context *ctx, const struct dc_transfer_func *output_tf, struct pwl_params *lut_params, bool fixpoint); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c index b33955928bd0..5ca9ab8a76e8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c @@ -39,9 +39,6 @@ #define BLACK_OFFSET_RGB_Y 0x0 #define BLACK_OFFSET_CBCR 0x8000 -#define VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT 3 -#define VISUAL_CONFIRM_RECT_HEIGHT_MIN 1 -#define VISUAL_CONFIRM_RECT_HEIGHT_MAX 10 #define REG(reg)\ dpp->tf_regs->reg @@ -200,8 +197,7 @@ static void dpp1_dscl_set_lb( DITHER_EN, 0, /* Dithering enable: Disabled */ INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */ LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */ - } - else { + } else { /* DSCL caps: pixel data processed in float format */ REG_SET_2(LB_DATA_FORMAT, 0, INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */ @@ -591,18 +587,6 @@ static void dpp1_dscl_set_manual_ratio_init( static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp, const struct rect *recout) { - int visual_confirm_on = 0; - unsigned short visual_confirm_rect_height = VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT; - - if (dpp->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) - visual_confirm_on = 1; - - /* Check bounds to ensure the VC bar height was set to a sane value */ - if ((dpp->base.ctx->dc->debug.visual_confirm_rect_height >= VISUAL_CONFIRM_RECT_HEIGHT_MIN) && - (dpp->base.ctx->dc->debug.visual_confirm_rect_height <= VISUAL_CONFIRM_RECT_HEIGHT_MAX)) { - visual_confirm_rect_height = dpp->base.ctx->dc->debug.visual_confirm_rect_height; - } - REG_SET_2(RECOUT_START, 0, /* First pixel of RECOUT in the active OTG area */ RECOUT_START_X, recout->x, @@ -613,8 +597,7 @@ static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp, /* Number of RECOUT horizontal pixels */ RECOUT_WIDTH, recout->width, /* Number of RECOUT vertical lines */ - RECOUT_HEIGHT, recout->height - - visual_confirm_on * 2 * (dpp->base.inst + visual_confirm_rect_height)); + RECOUT_HEIGHT, recout->height); } /** diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index 0b17c2993ca5..09784222cc03 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -690,6 +690,8 @@ struct dcn_hubp_state { uint32_t primary_surface_addr_hi; uint32_t primary_meta_addr_lo; uint32_t primary_meta_addr_hi; + uint32_t uclk_pstate_force; + uint32_t hubp_cntl; }; struct dcn10_hubp { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 1c3b6f25a782..9834b75f1837 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1012,31 +1012,29 @@ static void dcn10_reset_back_end_for_pipe( return; } - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - link = pipe_ctx->stream->link; - /* DPMS may already disable or */ - /* dpms_off status is incorrect due to fastboot - * feature. When system resume from S4 with second - * screen only, the dpms_off would be true but - * VBIOS lit up eDP, so check link status too. - */ - if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) - dc->link_srv->set_dpms_off(pipe_ctx); - else if (pipe_ctx->stream_res.audio) - dc->hwss.disable_audio_stream(pipe_ctx); - - if (pipe_ctx->stream_res.audio) { - /*disable az_endpoint*/ - pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); - - /*free audio*/ - if (dc->caps.dynamic_audio == true) { - /*we have to dynamic arbitrate the audio endpoints*/ - /*we free the resource, need reset is_audio_acquired*/ - update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, - pipe_ctx->stream_res.audio, false); - pipe_ctx->stream_res.audio = NULL; - } + link = pipe_ctx->stream->link; + /* DPMS may already disable or */ + /* dpms_off status is incorrect due to fastboot + * feature. When system resume from S4 with second + * screen only, the dpms_off would be true but + * VBIOS lit up eDP, so check link status too. + */ + if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) + dc->link_srv->set_dpms_off(pipe_ctx); + else if (pipe_ctx->stream_res.audio) + dc->hwss.disable_audio_stream(pipe_ctx); + + if (pipe_ctx->stream_res.audio) { + /*disable az_endpoint*/ + pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); + + /*free audio*/ + if (dc->caps.dynamic_audio == true) { + /*we have to dynamic arbitrate the audio endpoints*/ + /*we free the resource, need reset is_audio_acquired*/ + update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, + pipe_ctx->stream_res.audio, false); + pipe_ctx->stream_res.audio = NULL; } } @@ -1499,54 +1497,32 @@ void dcn10_init_hw(struct dc *dc) if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init) dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg); - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - - REG_WRITE(REFCLK_CNTL, 0); - REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); - REG_WRITE(DIO_MEM_PWR_CTRL, 0); - - if (!dc->debug.disable_clock_gate) { - /* enable all DCN clock gating */ - REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); - - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); - - REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); - } - - //Enable ability to power gate / don't force power on permanently - if (hws->funcs.enable_power_gating_plane) - hws->funcs.enable_power_gating_plane(hws, true); - - return; - } - if (!dcb->funcs->is_accelerated_mode(dcb)) hws->funcs.disable_vga(dc->hwseq); - hws->funcs.bios_golden_init(dc); + if (!dc_dmub_srv_optimized_init_done(dc->ctx->dmub_srv)) + hws->funcs.bios_golden_init(dc); + if (dc->ctx->dc_bios->fw_info_valid) { res_pool->ref_clocks.xtalin_clock_inKhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency; - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - if (res_pool->dccg && res_pool->hubbub) { + if (res_pool->dccg && res_pool->hubbub) { - (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg, - dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency, - &res_pool->ref_clocks.dccg_ref_clock_inKhz); + (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg, + dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency, + &res_pool->ref_clocks.dccg_ref_clock_inKhz); - (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub, - res_pool->ref_clocks.dccg_ref_clock_inKhz, - &res_pool->ref_clocks.dchub_ref_clock_inKhz); - } else { - // Not all ASICs have DCCG sw component - res_pool->ref_clocks.dccg_ref_clock_inKhz = - res_pool->ref_clocks.xtalin_clock_inKhz; - res_pool->ref_clocks.dchub_ref_clock_inKhz = - res_pool->ref_clocks.xtalin_clock_inKhz; - } + (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub, + res_pool->ref_clocks.dccg_ref_clock_inKhz, + &res_pool->ref_clocks.dchub_ref_clock_inKhz); + } else { + // Not all ASICs have DCCG sw component + res_pool->ref_clocks.dccg_ref_clock_inKhz = + res_pool->ref_clocks.xtalin_clock_inKhz; + res_pool->ref_clocks.dchub_ref_clock_inKhz = + res_pool->ref_clocks.xtalin_clock_inKhz; } } else ASSERT_CRITICAL(false); @@ -1867,7 +1843,7 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, /* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full * update. */ - else if (cm_helper_translate_curve_to_hw_format( + else if (cm_helper_translate_curve_to_hw_format(dc->ctx, stream->out_transfer_func, &dpp->regamma_params, false)) { dpp->funcs->dpp_program_regamma_pwl( @@ -1923,6 +1899,11 @@ void dcn10_pipe_control_lock( * * TODO: Optimize cursor programming to be once per frame before VUPDATE * to avoid the need for this workaround. + * + * @dc: Current DC state + * @pipe_ctx: Pipe_ctx pointer for delayed cursor update + * + * Return: void */ static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx) { @@ -2600,23 +2581,15 @@ static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params); } -void dcn10_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id) +void dcn10_update_visual_confirm_color(struct dc *dc, + struct pipe_ctx *pipe_ctx, + int mpcc_id) { struct mpc *mpc = dc->res_pool->mpc; - if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) - get_hdr_visual_confirm_color(pipe_ctx, color); - else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) - get_surface_visual_confirm_color(pipe_ctx, color); - else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE) - get_surface_tile_visual_confirm_color(pipe_ctx, color); - else - color_space_to_black_color( - dc, pipe_ctx->stream->output_color_space, color); - if (mpc->funcs->set_bg_color) { - memcpy(&pipe_ctx->plane_state->visual_confirm_color, color, sizeof(struct tg_color)); - mpc->funcs->set_bg_color(mpc, color, mpcc_id); + memcpy(&pipe_ctx->plane_state->visual_confirm_color, &(pipe_ctx->visual_confirm_color), sizeof(struct tg_color)); + mpc->funcs->set_bg_color(mpc, &(pipe_ctx->visual_confirm_color), mpcc_id); } } @@ -2669,7 +2642,7 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) /* If there is no full update, don't need to touch MPC tree*/ if (!pipe_ctx->plane_state->update_flags.bits.full_update) { mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id); - dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id); + dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id); return; } @@ -2691,7 +2664,7 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) NULL, hubp->inst, mpcc_id); - dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id); + dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id); ASSERT(new_mpcc != NULL); hubp->opp_id = pipe_ctx->stream_res.opp->inst; @@ -3076,15 +3049,13 @@ void dcn10_prepare_bandwidth( if (dc->debug.sanity_checks) hws->funcs.verify_allow_pstate_change_high(dc); - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - if (context->stream_count == 0) - context->bw_ctx.bw.dcn.clk.phyclk_khz = 0; + if (context->stream_count == 0) + context->bw_ctx.bw.dcn.clk.phyclk_khz = 0; - dc->clk_mgr->funcs->update_clocks( - dc->clk_mgr, - context, - false); - } + dc->clk_mgr->funcs->update_clocks( + dc->clk_mgr, + context, + false); dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub, &context->bw_ctx.bw.dcn.watermarks, @@ -3116,15 +3087,13 @@ void dcn10_optimize_bandwidth( if (dc->debug.sanity_checks) hws->funcs.verify_allow_pstate_change_high(dc); - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - if (context->stream_count == 0) - context->bw_ctx.bw.dcn.clk.phyclk_khz = 0; + if (context->stream_count == 0) + context->bw_ctx.bw.dcn.clk.phyclk_khz = 0; - dc->clk_mgr->funcs->update_clocks( - dc->clk_mgr, - context, - true); - } + dc->clk_mgr->funcs->update_clocks( + dc->clk_mgr, + context, + true); hubbub->funcs->program_watermarks(hubbub, &context->bw_ctx.bw.dcn.watermarks, @@ -3309,7 +3278,8 @@ void dcn10_wait_for_mpcc_disconnect( if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) { struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst); - if (pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg)) + if (pipe_ctx->stream_res.tg && + pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg)) res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst); pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false; hubp->funcs->set_blank(hubp, true); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h index 0ef7bf7ddb75..ef6d56da417c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h @@ -202,7 +202,6 @@ void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits); void dcn10_update_visual_confirm_color( struct dc *dc, struct pipe_ctx *pipe_ctx, - struct tg_color *color, int mpcc_id); #endif /* __DC_HWSS_DCN10_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c index a0f8e31d2adc..46a2ebcabd1a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c @@ -45,7 +45,8 @@ #include "dcn10_cm_common.h" #include "clk_mgr.h" -unsigned int snprintf_count(char *pBuf, unsigned int bufSize, char *fmt, ...) +__printf(3, 4) +unsigned int snprintf_count(char *pbuf, unsigned int bufsize, char *fmt, ...) { int ret_vsnprintf; unsigned int chars_printed; @@ -53,15 +54,15 @@ unsigned int snprintf_count(char *pBuf, unsigned int bufSize, char *fmt, ...) va_list args; va_start(args, fmt); - ret_vsnprintf = vsnprintf(pBuf, bufSize, fmt, args); + ret_vsnprintf = vsnprintf(pbuf, bufsize, fmt, args); va_end(args); if (ret_vsnprintf > 0) { - if (ret_vsnprintf < bufSize) + if (ret_vsnprintf < bufsize) chars_printed = ret_vsnprintf; else - chars_printed = bufSize - 1; + chars_printed = bufsize - 1; } else chars_printed = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c index ee08b545aaea..377f1ba1a81b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c @@ -1056,7 +1056,7 @@ void dcn10_link_encoder_disable_output( struct bp_transmitter_control cntl = { 0 }; enum bp_result result; - if (!dcn10_is_dig_enabled(enc)) { + if (enc->funcs->is_dig_enabled && !enc->funcs->is_dig_enabled(enc)) { /* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */ /*in DP_Alt_No_Connect case, we turn off the dig already, after excuation the PHY w/a sequence, not allow touch PHY any more*/ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c index 8e9384094f6d..f2f55565e98a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c @@ -212,8 +212,9 @@ struct mpcc *mpc1_insert_plane( /* check insert_above_mpcc exist in tree->opp_list */ struct mpcc *temp_mpcc = tree->opp_list; - while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc) - temp_mpcc = temp_mpcc->mpcc_bot; + if (temp_mpcc != insert_above_mpcc) + while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc) + temp_mpcc = temp_mpcc->mpcc_bot; if (temp_mpcc == NULL) return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c index 41cec7acf51f..0dec57679269 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c @@ -37,14 +37,14 @@ #define CTX \ oppn10->base.ctx - -/************* FORMATTER ************/ - /** - * set_truncation + * opp1_set_truncation(): * 1) set truncation depth: 0 for 18 bpp or 1 for 24 bpp * 2) enable truncation * 3) HW remove 12bit FMT support for DCE11 power saving reason. + * + * @oppn10: output_pixel_processor struct instance for dcn10. + * @params: pointer to bit_depth_reduction_params. */ static void opp1_set_truncation( struct dcn10_opp *oppn10, @@ -149,11 +149,12 @@ void opp1_program_bit_depth_reduction( } /** - * set_pixel_encoding - * - * Set Pixel Encoding + * opp1_set_pixel_encoding(): * 0: RGB 4:4:4 or YCbCr 4:4:4 or YOnly * 1: YCbCr 4:2:2 + * + * @oppn10: output_pixel_processor struct instance for dcn10. + * @params: pointer to clamping_and_pixel_encoding_params. */ static void opp1_set_pixel_encoding( struct dcn10_opp *oppn10, @@ -180,13 +181,16 @@ static void opp1_set_pixel_encoding( } /** - * Set Clamping + * opp1_set_clamping(): * 1) Set clamping format based on bpc - 0 for 6bpc (No clamping) * 1 for 8 bpc * 2 for 10 bpc * 3 for 12 bpc * 7 for programable * 2) Enable clamp if Limited range requested + * + * @oppn10: output_pixel_processor struct instance for dcn10. + * @params: pointer to clamping_and_pixel_encoding_params. */ static void opp1_set_clamping( struct dcn10_opp *oppn10, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index c9e53dc49c92..0e8f4f36c87c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c @@ -42,11 +42,13 @@ #define STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN 0x100 /** -* apply_front_porch_workaround TODO FPGA still need? -* -* This is a workaround for a bug that has existed since R5xx and has not been -* fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive. -*/ + * apply_front_porch_workaround() - This is a workaround for a bug that has + * existed since R5xx and has not been fixed + * keep Front porch at minimum 2 for Interlaced + * mode or 1 for progressive. + * + * @timing: Timing parameters used to configure DCN blocks. + */ static void apply_front_porch_workaround(struct dc_crtc_timing *timing) { if (timing->flags.INTERLACE == 1) { @@ -133,9 +135,20 @@ void optc1_setup_vertical_interrupt2( } /** - * program_timing_generator used by mode timing set - * Program CRTC Timing Registers - OTG_H_*, OTG_V_*, Pixel repetition. - * Including SYNC. Call BIOS command table to program Timings. + * optc1_program_timing() - used by mode timing set Program + * CRTC Timing Registers - OTG_H_*, + * OTG_V_*, Pixel repetition. + * Including SYNC. Call BIOS command table to program Timings. + * + * @optc: timing_generator instance. + * @dc_crtc_timing: Timing parameters used to configure DCN blocks. + * @vready_offset: Vready's starting position. + * @vstartup_start: Vstartup period. + * @vupdate_offset: Vupdate starting position. + * @vupdate_width: Vupdate duration. + * @signal: DC signal types. + * @use_vbios: to program timings from BIOS command table. + * */ void optc1_program_timing( struct timing_generator *optc, @@ -385,6 +398,9 @@ void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enab * Sets double buffer point for V_TOTAL, H_TOTAL, VTOTAL_MIN, * VTOTAL_MAX, VTOTAL_MIN_SEL and VTOTAL_MAX_SEL registers. * + * @optc: timing_generator instance. + * @enable: Enable DRR double buffering control if true, disable otherwise. + * * Options: any time, start of frame, dp start of frame (range timing) */ void optc1_set_timing_double_buffer(struct timing_generator *optc, bool enable) @@ -397,8 +413,9 @@ void optc1_set_timing_double_buffer(struct timing_generator *optc, bool enable) } /** - * unblank_crtc - * Call ASIC Control Object to UnBlank CRTC. + * optc1_unblank_crtc() - Call ASIC Control Object to UnBlank CRTC. + * + * @optc: timing_generator instance. */ static void optc1_unblank_crtc(struct timing_generator *optc) { @@ -419,8 +436,9 @@ static void optc1_unblank_crtc(struct timing_generator *optc) } /** - * blank_crtc - * Call ASIC Control Object to Blank CRTC. + * optc1_blank_crtc() - Call ASIC Control Object to Blank CRTC. + * + * @optc: timing_generator instance. */ static void optc1_blank_crtc(struct timing_generator *optc) @@ -493,8 +511,9 @@ void optc1_enable_optc_clock(struct timing_generator *optc, bool enable) } /** - * Enable CRTC - * Enable CRTC - call ASIC Control Object to enable Timing generator. + * optc1_enable_crtc() - Enable CRTC - call ASIC Control Object to enable Timing generator. + * + * @optc: timing_generator instance. */ static bool optc1_enable_crtc(struct timing_generator *optc) { @@ -653,11 +672,9 @@ void optc1_lock(struct timing_generator *optc) REG_SET(OTG_MASTER_UPDATE_LOCK, 0, OTG_MASTER_UPDATE_LOCK, 1); - /* Should be fast, status does not update on maximus */ - if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS) - REG_WAIT(OTG_MASTER_UPDATE_LOCK, - UPDATE_LOCK_STATUS, 1, - 1, 10); + REG_WAIT(OTG_MASTER_UPDATE_LOCK, + UPDATE_LOCK_STATUS, 1, + 1, 10); TRACE_OPTC_LOCK_UNLOCK_STATE(optc1, optc->inst, true); } @@ -892,15 +909,11 @@ static void optc1_program_manual_trigger(struct timing_generator *optc) MANUAL_FLOW_CONTROL, 0); } - /** - ***************************************************************************** - * Function: set_drr + * optc1_set_drr() - Program dynamic refresh rate registers m_OTGx_OTG_V_TOTAL_*. * - * @brief - * Program dynamic refresh rate registers m_OTGx_OTG_V_TOTAL_*. - * - ***************************************************************************** + * @optc: timing_generator instance. + * @params: parameters used for Dynamic Refresh Rate. */ void optc1_set_drr( struct timing_generator *optc, @@ -932,19 +945,10 @@ void optc1_set_drr( OTG_FORCE_LOCK_ON_EVENT, 0, OTG_SET_V_TOTAL_MIN_MASK_EN, 0, OTG_SET_V_TOTAL_MIN_MASK, 0); - - // Setup manual flow control for EOF via TRIG_A - optc->funcs->setup_manual_trigger(optc); - - } else { - REG_UPDATE_4(OTG_V_TOTAL_CONTROL, - OTG_SET_V_TOTAL_MIN_MASK, 0, - OTG_V_TOTAL_MIN_SEL, 0, - OTG_V_TOTAL_MAX_SEL, 0, - OTG_FORCE_LOCK_ON_EVENT, 0); - - optc->funcs->set_vtotal_min_max(optc, 0, 0); } + + // Setup manual flow control for EOF via TRIG_A + optc->funcs->setup_manual_trigger(optc); } void optc1_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 21ec1ba5ed75..9f9145742f14 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -553,6 +553,7 @@ static const struct dc_debug_options debug_defaults_drv = { .recovery_enabled = false, /*enable this by default after testing.*/ .max_downscale_src_width = 3840, .underflow_assert_delay_us = 0xFFFFFFFF, + .enable_legacy_fast_update = true, }; static const struct dc_debug_options debug_defaults_diags = { @@ -886,13 +887,6 @@ static const struct resource_create_funcs res_create_funcs = { .create_hwseq = dcn10_hwseq_create, }; -static const struct resource_create_funcs res_create_maximus_funcs = { - .read_dce_straps = NULL, - .create_audio = NULL, - .create_stream_encoder = NULL, - .create_hwseq = dcn10_hwseq_create, -}; - static void dcn10_clock_source_destroy(struct clock_source **clk_src) { kfree(TO_DCE110_CLK_SRC(*clk_src)); @@ -1061,7 +1055,7 @@ static enum dc_status build_mapped_resource( struct dc_state *context, struct dc_stream_state *stream) { - struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream); + struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream); if (!pipe_ctx) return DC_ERROR_UNEXPECTED; @@ -1089,14 +1083,15 @@ static enum dc_status dcn10_add_stream_to_ctx( return result; } -static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer( - struct dc_state *context, +static struct pipe_ctx *dcn10_acquire_free_pipe_for_layer( + const struct dc_state *cur_ctx, + struct dc_state *new_ctx, const struct resource_pool *pool, - struct dc_stream_state *stream) + const struct pipe_ctx *opp_head_pipe) { - struct resource_context *res_ctx = &context->res_ctx; - struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream); - struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool, head_pipe); + struct resource_context *res_ctx = &new_ctx->res_ctx; + struct pipe_ctx *head_pipe = resource_get_otg_master_for_stream(res_ctx, opp_head_pipe->stream); + struct pipe_ctx *idle_pipe = resource_find_free_secondary_pipe_legacy(res_ctx, pool, head_pipe); if (!head_pipe) { ASSERT(0); @@ -1277,7 +1272,7 @@ static const struct resource_funcs dcn10_res_pool_funcs = { .link_enc_create = dcn10_link_encoder_create, .panel_cntl_create = dcn10_panel_cntl_create, .validate_bandwidth = dcn10_validate_bandwidth, - .acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn10_acquire_free_pipe_for_layer, .validate_plane = dcn10_validate_plane, .validate_global = dcn10_validate_global, .add_stream_to_ctx = dcn10_add_stream_to_ctx, @@ -1651,9 +1646,8 @@ static bool dcn10_resource_construct( } if (!resource_construct(num_virtual_links, dc, &pool->base, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) - goto fail; + &res_create_funcs)) + goto fail; dcn10_hw_sequencer_construct(dc); dc->caps.max_planes = pool->base.pipe_count; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h index 7bdc146f7cb5..c8602bcfa393 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h @@ -208,7 +208,9 @@ #define DCCG314_REG_FIELD_LIST(type) \ type DSCCLK3_DTO_PHASE;\ type DSCCLK3_DTO_MODULO;\ - type DSCCLK3_DTO_ENABLE; + type DSCCLK3_DTO_ENABLE;\ + type DENTIST_DISPCLK_RDIVIDER;\ + type DENTIST_DISPCLK_WDIVIDER; #define DCCG32_REG_FIELD_LIST(type) \ type DPSTREAMCLK0_EN;\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c index 5bd698cd6d20..5eebe7f03ddc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c @@ -30,22 +30,13 @@ #include "dsc/dscc_types.h" #include "dsc/rc_calc.h" -static void dsc_log_pps(struct display_stream_compressor *dsc, struct drm_dsc_config *pps); -static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values *dsc_reg_vals, - struct dsc_optc_config *dsc_optc_cfg); -static void dsc_init_reg_values(struct dsc_reg_values *reg_vals); -static void dsc_update_from_dsc_parameters(struct dsc_reg_values *reg_vals, const struct dsc_parameters *dsc_params); static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals); -static enum dsc_pixel_format dsc_dc_pixel_encoding_to_dsc_pixel_format(enum dc_pixel_encoding dc_pix_enc, bool is_ycbcr422_simple); -static enum dsc_bits_per_comp dsc_dc_color_depth_to_dsc_bits_per_comp(enum dc_color_depth); /* Object I/F functions */ -static void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz); static void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state *s); static bool dsc2_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg); static void dsc2_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, struct dsc_optc_config *dsc_optc_cfg); -static bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, uint8_t *dsc_packed_pps); static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe); static void dsc2_disable(struct display_stream_compressor *dsc); static void dsc2_disconnect(struct display_stream_compressor *dsc); @@ -108,7 +99,7 @@ void dsc2_construct(struct dcn20_dsc *dsc, /* This returns the capabilities for a single DSC encoder engine. Number of slices and total throughput * can be doubled, tripled etc. by using additional DSC engines. */ -static void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz) +void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz) { dsc_enc_caps->dsc_version = 0x21; /* v1.2 - DP spec defined it in reverse order and we kept it */ @@ -184,7 +175,7 @@ static bool dsc2_validate_stream(struct display_stream_compressor *dsc, const st } -static void dsc_config_log(struct display_stream_compressor *dsc, const struct dsc_config *config) +void dsc_config_log(struct display_stream_compressor *dsc, const struct dsc_config *config) { DC_LOG_DSC("\tnum_slices_h %d", config->dc_dsc_cfg.num_slices_h); DC_LOG_DSC("\tnum_slices_v %d", config->dc_dsc_cfg.num_slices_v); @@ -211,7 +202,7 @@ static void dsc2_set_config(struct display_stream_compressor *dsc, const struct } -static bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, uint8_t *dsc_packed_pps) +bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, uint8_t *dsc_packed_pps) { bool is_config_ok; struct dsc_reg_values dsc_reg_vals; @@ -291,7 +282,7 @@ static void dsc2_disconnect(struct display_stream_compressor *dsc) } /* This module's internal functions */ -static void dsc_log_pps(struct display_stream_compressor *dsc, struct drm_dsc_config *pps) +void dsc_log_pps(struct display_stream_compressor *dsc, struct drm_dsc_config *pps) { int i; int bits_per_pixel = pps->bits_per_pixel; @@ -345,7 +336,7 @@ static void dsc_log_pps(struct display_stream_compressor *dsc, struct drm_dsc_co } } -static void dsc_override_rc_params(struct rc_params *rc, const struct dc_dsc_rc_params_override *override) +void dsc_override_rc_params(struct rc_params *rc, const struct dc_dsc_rc_params_override *override) { uint8_t i; @@ -372,7 +363,7 @@ static void dsc_override_rc_params(struct rc_params *rc, const struct dc_dsc_rc_ rc->flatness_det_thresh = override->flatness_det_thresh; } -static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values *dsc_reg_vals, +bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values *dsc_reg_vals, struct dsc_optc_config *dsc_optc_cfg) { struct dsc_parameters dsc_params; @@ -463,7 +454,7 @@ static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_ } -static enum dsc_pixel_format dsc_dc_pixel_encoding_to_dsc_pixel_format(enum dc_pixel_encoding dc_pix_enc, bool is_ycbcr422_simple) +enum dsc_pixel_format dsc_dc_pixel_encoding_to_dsc_pixel_format(enum dc_pixel_encoding dc_pix_enc, bool is_ycbcr422_simple) { enum dsc_pixel_format dsc_pix_fmt = DSC_PIXFMT_UNKNOWN; @@ -495,7 +486,7 @@ static enum dsc_pixel_format dsc_dc_pixel_encoding_to_dsc_pixel_format(enum dc_p } -static enum dsc_bits_per_comp dsc_dc_color_depth_to_dsc_bits_per_comp(enum dc_color_depth dc_color_depth) +enum dsc_bits_per_comp dsc_dc_color_depth_to_dsc_bits_per_comp(enum dc_color_depth dc_color_depth) { enum dsc_bits_per_comp bpc = DSC_BPC_UNKNOWN; @@ -518,7 +509,7 @@ static enum dsc_bits_per_comp dsc_dc_color_depth_to_dsc_bits_per_comp(enum dc_co } -static void dsc_init_reg_values(struct dsc_reg_values *reg_vals) +void dsc_init_reg_values(struct dsc_reg_values *reg_vals) { int i; @@ -574,7 +565,7 @@ static void dsc_init_reg_values(struct dsc_reg_values *reg_vals) * This is required because dscc_compute_dsc_parameters returns a modified PPS, which in turn * affects non-PPS register values. */ -static void dsc_update_from_dsc_parameters(struct dsc_reg_values *reg_vals, const struct dsc_parameters *dsc_params) +void dsc_update_from_dsc_parameters(struct dsc_reg_values *reg_vals, const struct dsc_parameters *dsc_params) { int i; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h index 7ce64a3c1b02..ba869387c3c5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h @@ -549,6 +549,27 @@ struct dcn20_dsc { int max_image_width; }; +void dsc_config_log(struct display_stream_compressor *dsc, + const struct dsc_config *config); + +void dsc_log_pps(struct display_stream_compressor *dsc, + struct drm_dsc_config *pps); + +void dsc_override_rc_params(struct rc_params *rc, + const struct dc_dsc_rc_params_override *override); + +bool dsc_prepare_config(const struct dsc_config *dsc_cfg, + struct dsc_reg_values *dsc_reg_vals, + struct dsc_optc_config *dsc_optc_cfg); + +enum dsc_pixel_format dsc_dc_pixel_encoding_to_dsc_pixel_format(enum dc_pixel_encoding dc_pix_enc, + bool is_ycbcr422_simple); + +enum dsc_bits_per_comp dsc_dc_color_depth_to_dsc_bits_per_comp(enum dc_color_depth dc_color_depth); + +void dsc_init_reg_values(struct dsc_reg_values *reg_vals); + +void dsc_update_from_dsc_parameters(struct dsc_reg_values *reg_vals, const struct dsc_parameters *dsc_params); void dsc2_construct(struct dcn20_dsc *dsc, struct dc_context *ctx, @@ -557,5 +578,12 @@ void dsc2_construct(struct dcn20_dsc *dsc, const struct dcn20_dsc_shift *dsc_shift, const struct dcn20_dsc_mask *dsc_mask); +void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, + int pixel_clock_100Hz); + +bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, + const struct dsc_config *dsc_cfg, + uint8_t *dsc_packed_pps); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c index 24bd93219936..6eebcb22e317 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c @@ -623,6 +623,17 @@ void hubbub2_read_state(struct hubbub *hubbub, struct dcn_hubbub_state *hubbub_s REG_GET(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, &hubbub_state->vm_error_vmid); REG_GET(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, &hubbub_state->vm_error_pipe); } + + if (REG(DCHUBBUB_TEST_DEBUG_INDEX) && REG(DCHUBBUB_TEST_DEBUG_DATA)) { + REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, 0x6); + hubbub_state->test_debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA); + } + + if (REG(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL)) + hubbub_state->watermark_change_cntl = REG_READ(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL); + + if (REG(DCHUBBUB_ARB_DRAM_STATE_CNTL)) + hubbub_state->dram_state_cntl = REG_READ(DCHUBBUB_ARB_DRAM_STATE_CNTL); } static const struct hubbub_funcs hubbub2_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 5403e9399a46..aeadc587433f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -313,6 +313,10 @@ void dcn20_init_blank( } opp = dc->res_pool->opps[opp_id_src0]; + /* don't override the blank pattern if already enabled with the correct one. */ + if (opp->funcs->dpg_is_blanked && opp->funcs->dpg_is_blanked(opp)) + return; + if (num_opps == 2) { otg_active_width = otg_active_width / 2; @@ -863,7 +867,7 @@ bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, params = &stream->out_transfer_func->pwl; else if (pipe_ctx->stream->out_transfer_func->type == TF_TYPE_DISTRIBUTED_POINTS && - cm_helper_translate_curve_to_hw_format( + cm_helper_translate_curve_to_hw_format(dc->ctx, stream->out_transfer_func, &mpc->blender_params, false)) params = &mpc->blender_params; @@ -892,7 +896,7 @@ bool dcn20_set_blend_lut( if (plane_state->blend_tf->type == TF_TYPE_HWPWL) blend_lut = &plane_state->blend_tf->pwl; else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) { - cm_helper_translate_curve_to_hw_format( + cm_helper_translate_curve_to_hw_format(plane_state->ctx, plane_state->blend_tf, &dpp_base->regamma_params, false); blend_lut = &dpp_base->regamma_params; @@ -914,7 +918,7 @@ bool dcn20_set_shaper_3dlut( if (plane_state->in_shaper_func->type == TF_TYPE_HWPWL) shaper_lut = &plane_state->in_shaper_func->pwl; else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) { - cm_helper_translate_curve_to_hw_format( + cm_helper_translate_curve_to_hw_format(plane_state->ctx, plane_state->in_shaper_func, &dpp_base->shaper_params, true); shaper_lut = &dpp_base->shaper_params; @@ -1050,9 +1054,9 @@ void dcn20_blank_pixel_data( enum controller_dp_color_space test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED; struct pipe_ctx *odm_pipe; int odm_cnt = 1; - - int width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; - int height = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top; + int h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; + int v_active = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top; + int odm_slice_width, last_odm_slice_width, offset = 0; if (stream->link->test_pattern_enabled) return; @@ -1062,8 +1066,8 @@ void dcn20_blank_pixel_data( for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) odm_cnt++; - - width = width / odm_cnt; + odm_slice_width = h_active / odm_cnt; + last_odm_slice_width = h_active - odm_slice_width * (odm_cnt - 1); if (blank) { dc->hwss.set_abm_immediate_disable(pipe_ctx); @@ -1076,51 +1080,31 @@ void dcn20_blank_pixel_data( test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE; } - dc->hwss.set_disp_pattern_generator(dc, - pipe_ctx, - test_pattern, - test_pattern_color_space, - stream->timing.display_color_depth, - &black_color, - width, - height, - 0); + odm_pipe = pipe_ctx; - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { + while (odm_pipe->next_odm_pipe) { dc->hwss.set_disp_pattern_generator(dc, odm_pipe, - dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE && blank ? - CONTROLLER_DP_TEST_PATTERN_COLORRAMP : test_pattern, + test_pattern, test_pattern_color_space, stream->timing.display_color_depth, &black_color, - width, - height, - 0); + odm_slice_width, + v_active, + offset); + offset += odm_slice_width; + odm_pipe = odm_pipe->next_odm_pipe; } - if (!blank && dc->debug.enable_single_display_2to1_odm_policy) { - /* when exiting dynamic ODM need to reinit DPG state for unused pipes */ - struct pipe_ctx *old_odm_pipe = dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx].next_odm_pipe; - - odm_pipe = pipe_ctx->next_odm_pipe; - - while (old_odm_pipe) { - if (!odm_pipe || old_odm_pipe->pipe_idx != odm_pipe->pipe_idx) - dc->hwss.set_disp_pattern_generator(dc, - old_odm_pipe, - CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, - CONTROLLER_DP_COLOR_SPACE_UDEFINED, - COLOR_DEPTH_888, - NULL, - 0, - 0, - 0); - old_odm_pipe = old_odm_pipe->next_odm_pipe; - if (odm_pipe) - odm_pipe = odm_pipe->next_odm_pipe; - } - } + dc->hwss.set_disp_pattern_generator(dc, + odm_pipe, + test_pattern, + test_pattern_color_space, + stream->timing.display_color_depth, + &black_color, + last_odm_slice_width, + v_active, + offset); if (!blank) if (stream_res->abm) { @@ -1262,20 +1246,21 @@ void dcn20_pipe_control_lock( } if (flip_immediate && lock) { - const int TIMEOUT_FOR_FLIP_PENDING = 100000; + const int TIMEOUT_FOR_FLIP_PENDING_US = 100000; + unsigned int polling_interval_us = 1; int i; temp_pipe = pipe; while (temp_pipe) { if (temp_pipe->plane_state && temp_pipe->plane_state->flip_immediate) { - for (i = 0; i < TIMEOUT_FOR_FLIP_PENDING; ++i) { + for (i = 0; i < TIMEOUT_FOR_FLIP_PENDING_US / polling_interval_us; ++i) { if (!temp_pipe->plane_res.hubp->funcs->hubp_is_flip_pending(temp_pipe->plane_res.hubp)) break; - udelay(1); + udelay(polling_interval_us); } /* no reason it should take this long for immediate flips */ - ASSERT(i != TIMEOUT_FOR_FLIP_PENDING); + ASSERT(i != TIMEOUT_FOR_FLIP_PENDING_US); } temp_pipe = temp_pipe->bottom_pipe; } @@ -1357,6 +1342,7 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx new_pipe->update_flags.bits.dppclk = 1; new_pipe->update_flags.bits.hubp_interdependent = 1; new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; + new_pipe->update_flags.bits.unbounded_req = 1; new_pipe->update_flags.bits.gamut_remap = 1; new_pipe->update_flags.bits.scaler = 1; new_pipe->update_flags.bits.viewport = 1; @@ -1500,6 +1486,9 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx memcmp(&old_pipe->rq_regs, &new_pipe->rq_regs, sizeof(old_pipe->rq_regs))) new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; } + + if (old_pipe->unbounded_req != new_pipe->unbounded_req) + new_pipe->update_flags.bits.unbounded_req = 1; } static void dcn20_update_dchubp_dpp( @@ -1533,10 +1522,11 @@ static void dcn20_update_dchubp_dpp( &pipe_ctx->ttu_regs, &pipe_ctx->rq_regs, &pipe_ctx->pipe_dlg_param); - - if (hubp->funcs->set_unbounded_requesting) - hubp->funcs->set_unbounded_requesting(hubp, pipe_ctx->unbounded_req); } + + if (pipe_ctx->update_flags.bits.unbounded_req && hubp->funcs->set_unbounded_requesting) + hubp->funcs->set_unbounded_requesting(hubp, pipe_ctx->unbounded_req); + if (pipe_ctx->update_flags.bits.hubp_interdependent) hubp->funcs->hubp_setup_interdependent( hubp, @@ -1571,17 +1561,6 @@ static void dcn20_update_dchubp_dpp( || plane_state->update_flags.bits.global_alpha_change || plane_state->update_flags.bits.per_pixel_alpha_change) { // MPCC inst is equal to pipe index in practice - int mpcc_inst = hubp->inst; - int opp_inst; - int opp_count = dc->res_pool->pipe_count; - - for (opp_inst = 0; opp_inst < opp_count; opp_inst++) { - if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) { - dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst); - dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false; - break; - } - } hws->funcs.update_mpcc(dc, pipe_ctx); } @@ -1625,6 +1604,7 @@ static void dcn20_update_dchubp_dpp( if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed || pipe_ctx->update_flags.bits.plane_changed || pipe_ctx->stream->update_flags.bits.gamut_remap + || plane_state->update_flags.bits.gamut_remap_change || pipe_ctx->stream->update_flags.bits.out_csc) { /* dpp/cm gamut remap*/ dc->hwss.program_gamut_remap(pipe_ctx); @@ -1708,11 +1688,16 @@ static void dcn20_program_pipe( struct dc_state *context) { struct dce_hwseq *hws = dc->hwseq; - /* Only need to unblank on top pipe */ - if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.abm_level) - && !pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe) - hws->funcs.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible); + /* Only need to unblank on top pipe */ + if (resource_is_pipe_type(pipe_ctx, OTG_MASTER)) { + if (pipe_ctx->update_flags.bits.enable || + pipe_ctx->update_flags.bits.odm || + pipe_ctx->stream->update_flags.bits.abm_level) + hws->funcs.blank_pixel_data(dc, pipe_ctx, + !pipe_ctx->plane_state || + !pipe_ctx->plane_state->visible); + } /* Only update TG on top pipe */ if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe @@ -1755,8 +1740,9 @@ static void dcn20_program_pipe( hws->funcs.set_hdr_multiplier(pipe_ctx); if (pipe_ctx->update_flags.bits.enable || - pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || - pipe_ctx->plane_state->update_flags.bits.gamma_change) + pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || + pipe_ctx->plane_state->update_flags.bits.gamma_change || + pipe_ctx->plane_state->update_flags.bits.lut_3d) hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state); /* dcn10_translate_regamma_to_hw_format takes 750us to finish @@ -1939,7 +1925,8 @@ void dcn20_post_unlock_program_front_end( struct dc_state *context) { int i; - const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100; + const unsigned int TIMEOUT_FOR_PIPE_ENABLE_US = 100000; + unsigned int polling_interval_us = 1; struct dce_hwseq *hwseq = dc->hwseq; DC_LOGGER_INIT(dc->ctx->logger); @@ -1961,10 +1948,9 @@ void dcn20_post_unlock_program_front_end( pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) { struct hubp *hubp = pipe->plane_res.hubp; int j = 0; - - for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS*1000 + for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_US / polling_interval_us && hubp->funcs->hubp_is_flip_pending(hubp); j++) - udelay(1); + udelay(polling_interval_us); } } @@ -2113,11 +2099,20 @@ void dcn20_optimize_bandwidth( if (hubbub->funcs->program_compbuf_size) hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true); + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { + dc_dmub_srv_p_state_delegate(dc, + true, context); + context->bw_ctx.bw.dcn.clk.p_state_change_support = true; + dc->clk_mgr->clks.fw_based_mclk_switching = true; + } else { + dc->clk_mgr->clks.fw_based_mclk_switching = false; + } + dc->clk_mgr->funcs->update_clocks( dc->clk_mgr, context, true); - if (dc_extended_blank_supported(dc) && context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) { + if (context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) { for (i = 0; i < dc->res_pool->pipe_count; ++i) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; @@ -2125,7 +2120,7 @@ void dcn20_optimize_bandwidth( && pipe_ctx->stream->adjust.v_total_min == pipe_ctx->stream->adjust.v_total_max && pipe_ctx->stream->adjust.v_total_max > pipe_ctx->stream->timing.v_total) pipe_ctx->plane_res.hubp->funcs->program_extended_blank(pipe_ctx->plane_res.hubp, - pipe_ctx->dlg_regs.optimized_min_dst_y_next_start); + pipe_ctx->dlg_regs.min_dst_y_next_start); } } } @@ -2462,36 +2457,31 @@ static void dcn20_reset_back_end_for_pipe( return; } - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - /* DPMS may already disable or */ - /* dpms_off status is incorrect due to fastboot - * feature. When system resume from S4 with second - * screen only, the dpms_off would be true but - * VBIOS lit up eDP, so check link status too. - */ - if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) - dc->link_srv->set_dpms_off(pipe_ctx); - else if (pipe_ctx->stream_res.audio) - dc->hwss.disable_audio_stream(pipe_ctx); - - /* free acquired resources */ - if (pipe_ctx->stream_res.audio) { - /*disable az_endpoint*/ - pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); - - /*free audio*/ - if (dc->caps.dynamic_audio == true) { - /*we have to dynamic arbitrate the audio endpoints*/ - /*we free the resource, need reset is_audio_acquired*/ - update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, - pipe_ctx->stream_res.audio, false); - pipe_ctx->stream_res.audio = NULL; - } + /* DPMS may already disable or */ + /* dpms_off status is incorrect due to fastboot + * feature. When system resume from S4 with second + * screen only, the dpms_off would be true but + * VBIOS lit up eDP, so check link status too. + */ + if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) + dc->link_srv->set_dpms_off(pipe_ctx); + else if (pipe_ctx->stream_res.audio) + dc->hwss.disable_audio_stream(pipe_ctx); + + /* free acquired resources */ + if (pipe_ctx->stream_res.audio) { + /*disable az_endpoint*/ + pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); + + /*free audio*/ + if (dc->caps.dynamic_audio == true) { + /*we have to dynamic arbitrate the audio endpoints*/ + /*we free the resource, need reset is_audio_acquired*/ + update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, + pipe_ctx->stream_res.audio, false); + pipe_ctx->stream_res.audio = NULL; } } - else if (pipe_ctx->stream_res.dsc) { - dc->link_srv->set_dsc_enable(pipe_ctx, false); - } /* by upper caller loop, parent pipe: pipe0, will be reset last. * back end share by all pipes and will be disable only when disable @@ -2567,28 +2557,6 @@ void dcn20_reset_hw_ctx_wrap( } } -void dcn20_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id) -{ - struct mpc *mpc = dc->res_pool->mpc; - - // input to MPCC is always RGB, by default leave black_color at 0 - if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) - get_hdr_visual_confirm_color(pipe_ctx, color); - else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) - get_surface_visual_confirm_color(pipe_ctx, color); - else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) - get_mpctree_visual_confirm_color(pipe_ctx, color); - else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE) - get_surface_tile_visual_confirm_color(pipe_ctx, color); - else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP) - get_subvp_visual_confirm_color(dc, pipe_ctx, color); - - if (mpc->funcs->set_bg_color) { - memcpy(&pipe_ctx->plane_state->visual_confirm_color, color, sizeof(struct tg_color)); - mpc->funcs->set_bg_color(mpc, color, mpcc_id); - } -} - void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) { struct hubp *hubp = pipe_ctx->plane_res.hubp; @@ -2644,7 +2612,7 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) if (!pipe_ctx->plane_state->update_flags.bits.full_update && !pipe_ctx->update_flags.bits.mpcc) { mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id); - dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id); + dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id); return; } @@ -2666,7 +2634,7 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) NULL, hubp->inst, mpcc_id); - dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id); + dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id); ASSERT(new_mpcc != NULL); hubp->opp_id = pipe_ctx->stream_res.opp->inst; @@ -2743,8 +2711,8 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) dto_params.timing = &pipe_ctx->stream->timing; dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr); dccg->funcs->set_dtbclk_dto(dccg, &dto_params); - } - + } else { + } if (hws->funcs.calculate_dccg_k1_k2_values && dc->res_pool->dccg->funcs->set_pixel_rate_div) { hws->funcs.calculate_dccg_k1_k2_values(pipe_ctx, &k1_div, &k2_div); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h index 33a36c02b2f8..01901b08644c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h @@ -150,10 +150,5 @@ void dcn20_set_disp_pattern_generator(const struct dc *dc, const struct tg_color *solid_color, int width, int height, int offset); -void dcn20_update_visual_confirm_color(struct dc *dc, - struct pipe_ctx *pipe_ctx, - struct tg_color *color, - int mpcc_id); - #endif /* __DC_HWSS_DCN20_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c index 7c5817c426fa..e4b44e691ce6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c @@ -102,7 +102,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = { .disable_link_output = dce110_disable_link_output, .set_disp_pattern_generator = dcn20_set_disp_pattern_generator, .get_dcc_en_bits = dcn10_get_dcc_en_bits, - .update_visual_confirm_color = dcn20_update_visual_confirm_color + .update_visual_confirm_color = dcn10_update_visual_confirm_color, }; static const struct hwseq_private_funcs dcn20_private_funcs = { @@ -145,8 +145,4 @@ void dcn20_hw_sequencer_construct(struct dc *dc) dc->hwss = dcn20_funcs; dc->hwseq->funcs = dcn20_private_funcs; - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - dc->hwss.init_hw = dcn20_fpga_init_hw; - dc->hwseq->funcs.init_pipes = NULL; - } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c index a08c335b7383..58bdbd859bf9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c @@ -38,8 +38,12 @@ optc1->tg_shift->field_name, optc1->tg_mask->field_name /** - * Enable CRTC - * Enable CRTC - call ASIC Control Object to enable Timing generator. + * optc2_enable_crtc() - Enable CRTC - call ASIC Control Object to enable Timing generator. + * + * @optc: timing_generator instance. + * + * Return: If CRTC is enabled, return true. + * */ bool optc2_enable_crtc(struct timing_generator *optc) { @@ -73,15 +77,18 @@ bool optc2_enable_crtc(struct timing_generator *optc) } /** - *For the below, I'm not sure how your GSL parameters are stored in your env, - * so I will assume a gsl_params struct for now + * optc2_set_gsl() - Assign OTG to GSL groups, + * set one of the OTGs to be master & rest are slaves + * + * @optc: timing_generator instance. + * @params: pointer to gsl_params */ void optc2_set_gsl(struct timing_generator *optc, const struct gsl_params *params) { struct optc *optc1 = DCN10TG_FROM_TG(optc); -/** +/* * There are (MAX_OPTC+1)/2 gsl groups available for use. * In each group (assign an OTG to a group by setting OTG_GSLX_EN = 1, * set one of the OTGs to be the master (OTG_GSL_MASTER_EN = 1) and the rest are slaves. @@ -391,10 +398,9 @@ void optc2_triplebuffer_lock(struct timing_generator *optc) REG_SET(OTG_MASTER_UPDATE_LOCK, 0, OTG_MASTER_UPDATE_LOCK, 1); - if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS) - REG_WAIT(OTG_MASTER_UPDATE_LOCK, - UPDATE_LOCK_STATUS, 1, - 1, 10); + REG_WAIT(OTG_MASTER_UPDATE_LOCK, + UPDATE_LOCK_STATUS, 1, + 1, 10); } void optc2_triplebuffer_unlock(struct timing_generator *optc) @@ -456,6 +462,16 @@ void optc2_setup_manual_trigger(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); + /* Set the min/max selectors unconditionally so that + * DMCUB fw may change OTG timings when necessary + * TODO: Remove the w/a after fixing the issue in DMCUB firmware + */ + REG_UPDATE_4(OTG_V_TOTAL_CONTROL, + OTG_V_TOTAL_MIN_SEL, 1, + OTG_V_TOTAL_MAX_SEL, 1, + OTG_FORCE_LOCK_ON_EVENT, 0, + OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */ + REG_SET_8(OTG_TRIGA_CNTL, 0, OTG_TRIGA_SOURCE_SELECT, 21, OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 1d8c5805ef20..d587f807dfd7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -712,7 +712,7 @@ static const struct dc_debug_options debug_defaults_drv = { .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, - .pipe_split_policy = MPC_SPLIT_DYNAMIC, + .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, @@ -722,22 +722,7 @@ static const struct dc_debug_options debug_defaults_drv = { .scl_reset_length10 = true, .sanity_checks = false, .underflow_assert_delay_us = 0xFFFFFFFF, -}; - -static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = false, - .force_abm_enable = false, - .timing_trace = true, - .clock_trace = true, - .disable_dpp_power_gate = true, - .disable_hubp_power_gate = true, - .disable_clock_gate = true, - .disable_pplib_clock_request = true, - .disable_pplib_wm_range = true, - .disable_stutter = true, - .scl_reset_length10 = true, - .underflow_assert_delay_us = 0xFFFFFFFF, - .enable_tri_buf = true, + .enable_legacy_fast_update = true, }; void dcn20_dpp_destroy(struct dpp **dpp) @@ -1066,13 +1051,6 @@ static const struct resource_create_funcs res_create_funcs = { .create_hwseq = dcn20_hwseq_create, }; -static const struct resource_create_funcs res_create_maximus_funcs = { - .read_dce_straps = NULL, - .create_audio = NULL, - .create_stream_encoder = NULL, - .create_hwseq = dcn20_hwseq_create, -}; - static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu); void dcn20_clock_source_destroy(struct clock_source **clk_src) @@ -1316,7 +1294,7 @@ static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx) enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream) { enum dc_status status = DC_OK; - struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream); + struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream); if (!pipe_ctx) return DC_ERROR_UNEXPECTED; @@ -1970,7 +1948,7 @@ int dcn20_validate_apply_pipe_split_flags( v->ODMCombineEnablePerState[vlevel][pipe_plane]; if (v->ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) { - if (get_num_mpc_splits(pipe) == 1) { + if (resource_get_num_mpc_splits(pipe) == 1) { /*If need split for mpc but 2 way split already*/ if (split[i] == 4) split[i] = 2; /* 2 -> 4 MPC */ @@ -1978,7 +1956,7 @@ int dcn20_validate_apply_pipe_split_flags( split[i] = 0; /* 2 -> 2 MPC */ else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) merge[i] = true; /* 2 -> 1 MPC */ - } else if (get_num_mpc_splits(pipe) == 3) { + } else if (resource_get_num_mpc_splits(pipe) == 3) { /*If need split for mpc but 4 way split already*/ if (split[i] == 2 && ((pipe->top_pipe && !pipe->top_pipe->top_pipe) || !pipe->bottom_pipe)) { @@ -1987,7 +1965,7 @@ int dcn20_validate_apply_pipe_split_flags( pipe->top_pipe->plane_state == pipe->plane_state) merge[i] = true; /* 4 -> 1 MPC */ split[i] = 0; - } else if (get_num_odm_splits(pipe)) { + } else if (resource_get_num_odm_splits(pipe)) { /* ODM -> MPC transition */ if (pipe->prev_odm_pipe) { split[i] = 0; @@ -1995,7 +1973,7 @@ int dcn20_validate_apply_pipe_split_flags( } } } else { - if (get_num_odm_splits(pipe) == 1) { + if (resource_get_num_odm_splits(pipe) == 1) { /*If need split for odm but 2 way split already*/ if (split[i] == 4) split[i] = 2; /* 2 -> 4 ODM */ @@ -2005,7 +1983,7 @@ int dcn20_validate_apply_pipe_split_flags( ASSERT(0); /* NOT expected yet */ merge[i] = true; /* exit ODM */ } - } else if (get_num_odm_splits(pipe) == 3) { + } else if (resource_get_num_odm_splits(pipe) == 3) { /*If need split for odm but 4 way split already*/ if (split[i] == 2 && ((pipe->prev_odm_pipe && !pipe->prev_odm_pipe->prev_odm_pipe) || !pipe->next_odm_pipe)) { @@ -2015,7 +1993,7 @@ int dcn20_validate_apply_pipe_split_flags( merge[i] = true; /* exit ODM */ } split[i] = 0; - } else if (get_num_mpc_splits(pipe)) { + } else if (resource_get_num_mpc_splits(pipe)) { /* MPC -> ODM transition */ ASSERT(0); /* NOT expected yet */ if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) { @@ -2169,31 +2147,31 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, return voltage_supported; } -struct pipe_ctx *dcn20_acquire_idle_pipe_for_layer( - struct dc_state *state, +struct pipe_ctx *dcn20_acquire_free_pipe_for_layer( + const struct dc_state *cur_ctx, + struct dc_state *new_ctx, const struct resource_pool *pool, - struct dc_stream_state *stream) + const struct pipe_ctx *opp_head) { - struct resource_context *res_ctx = &state->res_ctx; - struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream); - struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool, head_pipe); + struct resource_context *res_ctx = &new_ctx->res_ctx; + struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(res_ctx, opp_head->stream); + struct pipe_ctx *sec_dpp_pipe = resource_find_free_secondary_pipe_legacy(res_ctx, pool, otg_master); - if (!head_pipe) - ASSERT(0); + ASSERT(otg_master); - if (!idle_pipe) + if (!sec_dpp_pipe) return NULL; - idle_pipe->stream = head_pipe->stream; - idle_pipe->stream_res.tg = head_pipe->stream_res.tg; - idle_pipe->stream_res.opp = head_pipe->stream_res.opp; + sec_dpp_pipe->stream = opp_head->stream; + sec_dpp_pipe->stream_res.tg = opp_head->stream_res.tg; + sec_dpp_pipe->stream_res.opp = opp_head->stream_res.opp; - idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx]; - idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx]; - idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx]; - idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst; + sec_dpp_pipe->plane_res.hubp = pool->hubps[sec_dpp_pipe->pipe_idx]; + sec_dpp_pipe->plane_res.ipp = pool->ipps[sec_dpp_pipe->pipe_idx]; + sec_dpp_pipe->plane_res.dpp = pool->dpps[sec_dpp_pipe->pipe_idx]; + sec_dpp_pipe->plane_res.mpcc_inst = pool->dpps[sec_dpp_pipe->pipe_idx]->inst; - return idle_pipe; + return sec_dpp_pipe; } bool dcn20_get_dcc_compression_cap(const struct dc *dc, @@ -2238,7 +2216,7 @@ static const struct resource_funcs dcn20_res_pool_funcs = { .link_enc_create = dcn20_link_encoder_create, .panel_cntl_create = dcn20_panel_cntl_create, .validate_bandwidth = dcn20_validate_bandwidth, - .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, .add_stream_to_ctx = dcn20_add_stream_to_ctx, .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, @@ -2488,15 +2466,9 @@ static bool dcn20_resource_construct( dc->caps.dp_hdmi21_pcon_support = true; - if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) { + if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - } else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { - pool->base.pipe_count = 4; - pool->base.mpcc_count = pool->base.pipe_count; - dc->debug = debug_defaults_diags; - } else { - dc->debug = debug_defaults_diags; - } + //dcn2.0x dc->work_arounds.dedcn20_305_wa = true; @@ -2734,9 +2706,8 @@ static bool dcn20_resource_construct( } if (!resource_construct(num_virtual_links, dc, &pool->base, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) - goto create_fail; + &res_create_funcs)) + goto create_fail; dcn20_hw_sequencer_construct(dc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h index da0241e8c255..6d1a8924e57b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h @@ -58,10 +58,11 @@ unsigned int dcn20_calc_max_scaled_time( enum mmhubbub_wbif_mode mode, unsigned int urgent_watermark); -struct pipe_ctx *dcn20_acquire_idle_pipe_for_layer( - struct dc_state *state, +struct pipe_ctx *dcn20_acquire_free_pipe_for_layer( + const struct dc_state *cur_ctx, + struct dc_state *new_ctx, const struct resource_pool *pool, - struct dc_stream_state *stream); + const struct pipe_ctx *opp_head_pipe); struct stream_encoder *dcn20_stream_encoder_create( enum engine_id eng_id, diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c index 1aeb04fbd89d..9e027db6d752 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c @@ -231,52 +231,39 @@ void dcn201_init_hw(struct dc *dc) if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF); - REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF); - - hws->funcs.dccg_init(hws); - - REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2); - REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); - REG_WRITE(REFCLK_CNTL, 0); - } else { - hws->funcs.bios_golden_init(dc); - - if (dc->ctx->dc_bios->fw_info_valid) { - res_pool->ref_clocks.xtalin_clock_inKhz = - dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency; - - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - if (res_pool->dccg && res_pool->hubbub) { - (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg, - dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency, - &res_pool->ref_clocks.dccg_ref_clock_inKhz); - - (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub, - res_pool->ref_clocks.dccg_ref_clock_inKhz, - &res_pool->ref_clocks.dchub_ref_clock_inKhz); - } else { - res_pool->ref_clocks.dccg_ref_clock_inKhz = - res_pool->ref_clocks.xtalin_clock_inKhz; - res_pool->ref_clocks.dchub_ref_clock_inKhz = - res_pool->ref_clocks.xtalin_clock_inKhz; - } - } - } else - ASSERT_CRITICAL(false); - for (i = 0; i < dc->link_count; i++) { - /* Power up AND update implementation according to the - * required signal (which may be different from the - * default signal on connector). - */ - struct dc_link *link = dc->links[i]; - - link->link_enc->funcs->hw_init(link->link_enc); + hws->funcs.bios_golden_init(dc); + + if (dc->ctx->dc_bios->fw_info_valid) { + res_pool->ref_clocks.xtalin_clock_inKhz = + dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency; + + if (res_pool->dccg && res_pool->hubbub) { + (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg, + dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency, + &res_pool->ref_clocks.dccg_ref_clock_inKhz); + + (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub, + res_pool->ref_clocks.dccg_ref_clock_inKhz, + &res_pool->ref_clocks.dchub_ref_clock_inKhz); + } else { + res_pool->ref_clocks.dccg_ref_clock_inKhz = + res_pool->ref_clocks.xtalin_clock_inKhz; + res_pool->ref_clocks.dchub_ref_clock_inKhz = + res_pool->ref_clocks.xtalin_clock_inKhz; } - if (hws->fb_offset.quad_part == 0) - read_mmhub_vm_setup(hws); + } else + ASSERT_CRITICAL(false); + for (i = 0; i < dc->link_count; i++) { + /* Power up AND update implementation according to the + * required signal (which may be different from the + * default signal on connector). + */ + struct dc_link *link = dc->links[i]; + + link->link_enc->funcs->hw_init(link->link_enc); } + if (hws->fb_offset.quad_part == 0) + read_mmhub_vm_setup(hws); /* Blank pixel data with OPP DPG */ for (i = 0; i < res_pool->timing_generator_count; i++) { @@ -362,10 +349,6 @@ void dcn201_init_hw(struct dc *dc) tg->funcs->tg_init(tg); } - /* end of FPGA. Below if real ASIC */ - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) - return; - for (i = 0; i < res_pool->audio_count; i++) { struct audio *audio = res_pool->audios[i]; @@ -496,7 +479,7 @@ void dcn201_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) /* If there is no full update, don't need to touch MPC tree*/ if (!pipe_ctx->plane_state->update_flags.bits.full_update) { - dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id); + dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id); mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id); return; } @@ -521,7 +504,7 @@ void dcn201_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) dc->res_pool->mpc, mpcc_id); /* Call MPC to insert new plane */ - dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id); + dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id); new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc, mpc_tree_params, &blnd_cfg, diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c index 9c16633e473a..92dd4cddbab8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c @@ -91,7 +91,7 @@ static const struct hw_sequencer_funcs dcn201_funcs = { .enable_dp_link_output = dce110_enable_dp_link_output, .disable_link_output = dce110_disable_link_output, .set_disp_pattern_generator = dcn20_set_disp_pattern_generator, - .update_visual_confirm_color = dcn20_update_visual_confirm_color, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, }; static const struct hwseq_private_funcs dcn201_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c index 730875dfd8b4..70fcbec03fb6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c @@ -55,10 +55,9 @@ static void optc201_triplebuffer_lock(struct timing_generator *optc) REG_SET(OTG_MASTER_UPDATE_LOCK, 0, OTG_MASTER_UPDATE_LOCK, 1); - if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS) - REG_WAIT(OTG_MASTER_UPDATE_LOCK, - UPDATE_LOCK_STATUS, 1, - 1, 10); + REG_WAIT(OTG_MASTER_UPDATE_LOCK, + UPDATE_LOCK_STATUS, 1, + 1, 10); } static void optc201_triplebuffer_unlock(struct timing_generator *optc) diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c index 6ea70da28aaa..2dc4d2c1410b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c @@ -613,6 +613,7 @@ static const struct dc_debug_options debug_defaults_drv = { .sanity_checks = false, .underflow_assert_delay_us = 0xFFFFFFFF, .enable_tri_buf = false, + .enable_legacy_fast_update = true, }; static void dcn201_dpp_destroy(struct dpp **dpp) @@ -896,13 +897,6 @@ static const struct resource_create_funcs res_create_funcs = { .create_hwseq = dcn201_hwseq_create, }; -static const struct resource_create_funcs res_create_maximus_funcs = { - .read_dce_straps = NULL, - .create_audio = NULL, - .create_stream_encoder = NULL, - .create_hwseq = dcn201_hwseq_create, -}; - static void dcn201_clock_source_destroy(struct clock_source **clk_src) { kfree(TO_DCE110_CLK_SRC(*clk_src)); @@ -998,14 +992,15 @@ static struct hubp *dcn201_hubp_create( return NULL; } -static struct pipe_ctx *dcn201_acquire_idle_pipe_for_layer( - struct dc_state *context, +static struct pipe_ctx *dcn201_acquire_free_pipe_for_layer( + const struct dc_state *cur_ctx, + struct dc_state *new_ctx, const struct resource_pool *pool, - struct dc_stream_state *stream) + const struct pipe_ctx *opp_head_pipe) { - struct resource_context *res_ctx = &context->res_ctx; - struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream); - struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool, head_pipe); + struct resource_context *res_ctx = &new_ctx->res_ctx; + struct pipe_ctx *head_pipe = resource_get_otg_master_for_stream(res_ctx, opp_head_pipe->stream); + struct pipe_ctx *idle_pipe = resource_find_free_secondary_pipe_legacy(res_ctx, pool, head_pipe); if (!head_pipe) ASSERT(0); @@ -1073,7 +1068,7 @@ static struct resource_funcs dcn201_res_pool_funcs = { .add_stream_to_ctx = dcn20_add_stream_to_ctx, .add_dsc_to_stream_resource = NULL, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, - .acquire_idle_pipe_for_layer = dcn201_acquire_idle_pipe_for_layer, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn201_acquire_free_pipe_for_layer, .populate_dml_writeback_from_context = dcn201_populate_dml_writeback_from_context, .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .set_mcif_arb_params = dcn20_set_mcif_arb_params, @@ -1272,9 +1267,8 @@ static bool dcn201_resource_construct( } if (!resource_construct(num_virtual_links, dc, &pool->base, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) - goto create_fail; + &res_create_funcs)) + goto create_fail; dcn201_hw_sequencer_construct(dc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.c index 33fc9aa8621b..d07c04458d31 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.c @@ -43,7 +43,7 @@ #define DC_LOGGER \ dccg->ctx->logger -void dccg21_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk) +static void dccg21_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h index e44a37491c1e..b7efa777ec73 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h @@ -32,6 +32,5 @@ struct dccg *dccg21_create( const struct dccg_shift *dccg_shift, const struct dccg_mask *dccg_mask); -void dccg21_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk); #endif /* __DCN21_DCCG_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c index 58e459c7e7d3..f976fac8dc3f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c @@ -667,7 +667,6 @@ static void program_surface_flip_and_addr(struct hubp *hubp, struct surface_flip static void dmcub_PLAT_54186_wa(struct hubp *hubp, struct surface_flip_registers *flip_regs) { - struct dc_dmub_srv *dmcub = hubp->ctx->dmub_srv; struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); union dmub_rb_cmd cmd; @@ -690,11 +689,7 @@ static void dmcub_PLAT_54186_wa(struct hubp *hubp, cmd.PLAT_54186_wa.flip.flip_params.vmid = flip_regs->vmid; PERF_TRACE(); // TODO: remove after performance is stable. - dc_dmub_srv_cmd_queue(dmcub, &cmd); - PERF_TRACE(); // TODO: remove after performance is stable. - dc_dmub_srv_cmd_execute(dmcub); - PERF_TRACE(); // TODO: remove after performance is stable. - dc_dmub_srv_wait_idle(dmcub); + dm_execute_dmub_cmd(hubp->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); PERF_TRACE(); // TODO: remove after performance is stable. } diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c index 2a182c2f57d6..43463d08f21b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c @@ -152,13 +152,28 @@ static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t optio cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary; cmd.abm_set_pipe.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pipe_data); - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } +static void dmub_abm_set_backlight(struct dc_context *dc, uint32_t backlight_pwm_u16_16, + uint32_t frame_ramp, uint32_t panel_inst) +{ + union dmub_rb_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.abm_set_backlight.header.type = DMUB_CMD__ABM; + cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT; + cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = frame_ramp; + cmd.abm_set_backlight.abm_set_backlight_data.backlight_user_level = backlight_pwm_u16_16; + cmd.abm_set_backlight.abm_set_backlight_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; + cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_inst); + cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data); + + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); +} + void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx) { struct abm *abm = pipe_ctx->stream_res.abm; @@ -173,8 +188,12 @@ void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx) } if (abm && panel_cntl) { - dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_IMMEDIATELY_DISABLE, - panel_cntl->inst); + if (abm->funcs && abm->funcs->set_pipe_ex) { + abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_IMMEDIATELY_DISABLE, + panel_cntl->inst); + } else { + dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_IMMEDIATELY_DISABLE, panel_cntl->inst); + } panel_cntl->funcs->store_backlight_level(panel_cntl); } } @@ -191,18 +210,21 @@ void dcn21_set_pipe(struct pipe_ctx *pipe_ctx) return; } - if (abm && panel_cntl) - dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst); + if (abm && panel_cntl) { + if (abm->funcs && abm->funcs->set_pipe_ex) { + abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst); + } else { + dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst); + } + } } bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx, uint32_t backlight_pwm_u16_16, uint32_t frame_ramp) { - union dmub_rb_cmd cmd; struct dc_context *dc = pipe_ctx->stream->ctx; struct abm *abm = pipe_ctx->stream_res.abm; - uint32_t otg_inst = pipe_ctx->stream_res.tg->inst; struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl; if (dc->dc->res_pool->dmcu) { @@ -210,21 +232,23 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx, return true; } - if (abm && panel_cntl) - dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst); + if (abm != NULL) { + uint32_t otg_inst = pipe_ctx->stream_res.tg->inst; - memset(&cmd, 0, sizeof(cmd)); - cmd.abm_set_backlight.header.type = DMUB_CMD__ABM; - cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT; - cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = frame_ramp; - cmd.abm_set_backlight.abm_set_backlight_data.backlight_user_level = backlight_pwm_u16_16; - cmd.abm_set_backlight.abm_set_backlight_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; - cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_cntl->inst); - cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data); + if (abm && panel_cntl) { + if (abm->funcs && abm->funcs->set_pipe_ex) { + abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst); + } else { + dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst); + } + } + } - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + if (abm && abm->funcs && abm->funcs->set_backlight_level_pwm) + abm->funcs->set_backlight_level_pwm(abm, backlight_pwm_u16_16, + frame_ramp, 0, panel_cntl->inst); + else + dmub_abm_set_backlight(dc, backlight_pwm_u16_16, frame_ramp, panel_cntl->inst); return true; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c index fe1a8e2e08ef..f024157bd6eb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c @@ -106,7 +106,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = { .is_abm_supported = dcn21_is_abm_supported, .set_disp_pattern_generator = dcn20_set_disp_pattern_generator, .get_dcc_en_bits = dcn10_get_dcc_en_bits, - .update_visual_confirm_color = dcn20_update_visual_confirm_color, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, }; static const struct hwseq_private_funcs dcn21_private_funcs = { @@ -151,8 +151,4 @@ void dcn21_hw_sequencer_construct(struct dc *dc) dc->hwss = dcn21_funcs; dc->hwseq->funcs = dcn21_private_funcs; - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - dc->hwss.init_hw = dcn20_fpga_init_hw; - dc->hwseq->funcs.init_pipes = NULL; - } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 19aaa557b2db..d1a25fe6c44f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -653,28 +653,14 @@ static const struct dc_debug_options debug_defaults_drv = { .usbc_combo_phy_reset_wa = true, .dmub_command_table = true, .use_max_lb = true, -}; - -static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = false, - .force_abm_enable = false, - .timing_trace = true, - .clock_trace = true, - .disable_dpp_power_gate = true, - .disable_hubp_power_gate = true, - .disable_clock_gate = true, - .disable_pplib_clock_request = true, - .disable_pplib_wm_range = true, - .disable_stutter = true, - .disable_48mhz_pwrdwn = true, - .enable_tri_buf = true, - .use_max_lb = true + .enable_legacy_fast_update = true, }; static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, .disallow_psrsu = false, + .disallow_replay = false, }, .ilr = { .optimize_edp_link_rate = true, @@ -869,8 +855,8 @@ bool dcn21_fast_validate_bw(struct dc *dc, /* We only support full screen mpo with ODM */ if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled && pipe->plane_state && mpo_pipe - && memcmp(&mpo_pipe->plane_res.scl_data.recout, - &pipe->plane_res.scl_data.recout, + && memcmp(&mpo_pipe->plane_state->clip_rect, + &pipe->stream->src, sizeof(struct rect)) != 0) { ASSERT(mpo_pipe->plane_state != pipe->plane_state); goto validate_fail; @@ -1219,13 +1205,6 @@ static const struct resource_create_funcs res_create_funcs = { .create_hwseq = dcn21_hwseq_create, }; -static const struct resource_create_funcs res_create_maximus_funcs = { - .read_dce_straps = NULL, - .create_audio = NULL, - .create_stream_encoder = NULL, - .create_hwseq = dcn21_hwseq_create, -}; - static const struct encoder_feature_support link_enc_feature = { .max_hdmi_deep_color = COLOR_DEPTH_121212, .max_hdmi_pixel_clock = 600000, @@ -1409,7 +1388,7 @@ static const struct resource_funcs dcn21_res_pool_funcs = { .add_stream_to_ctx = dcn20_add_stream_to_ctx, .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, - .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, .populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context, .patch_unknown_plane_state = dcn21_patch_unknown_plane_state, .set_mcif_arb_params = dcn20_set_mcif_arb_params, @@ -1503,11 +1482,6 @@ static bool dcn21_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { - pool->base.pipe_count = 4; - dc->debug = debug_defaults_diags; - } else - dc->debug = debug_defaults_diags; // Init the vm_helper if (dc->vm_helper) @@ -1721,9 +1695,8 @@ static bool dcn21_resource_construct( } if (!resource_construct(num_virtual_links, dc, &pool->base, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) - goto create_fail; + &res_create_funcs)) + goto create_fail; dcn21_hw_sequencer_construct(dc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile index b7c2ae9ddfda..4a3e9e47b6b6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile @@ -1,16 +1,16 @@ -# +# # Copyright 2020 Advanced Micro Devices, Inc. -# +# # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: -# +# # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. -# +# # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL @@ -18,17 +18,31 @@ # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. -# +# # Authors: AMD -# -# +# +# + +DCN30 := \ + dcn30_init.o \ + dcn30_hubbub.o \ + dcn30_hubp.o \ + dcn30_dpp.o \ + dcn30_optc.o \ + dcn30_dccg.o \ + dcn30_hwseq.o \ + dcn30_mpc.o dcn30_vpg.o \ + dcn30_afmt.o \ + dcn30_dio_stream_encoder.o \ + dcn30_dwb.o \ + dcn30_dpp_cm.o \ + dcn30_dwb_cm.o \ + dcn30_cm_common.o \ + dcn30_mmhubbub.o \ + dcn30_resource.o \ + dcn30_dio_link_encoder.o -DCN30 = dcn30_init.o dcn30_hubbub.o dcn30_hubp.o dcn30_dpp.o dcn30_optc.o \ - dcn30_dccg.o dcn30_hwseq.o dcn30_mpc.o dcn30_vpg.o \ - dcn30_afmt.o dcn30_dio_stream_encoder.o dcn30_dwb.o \ - dcn30_dpp_cm.o dcn30_dwb_cm.o dcn30_cm_common.o dcn30_mmhubbub.o \ - dcn30_dio_link_encoder.o dcn30_resource.o AMD_DAL_DCN30 = $(addprefix $(AMDDALPATH)/dc/dcn30/,$(DCN30)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c index 9d08127d209b..005dbe099a7a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c @@ -436,6 +436,21 @@ void enc3_stream_encoder_update_dp_info_packets( &info_frame->vsc, true); } + /* TODO: VSC SDP at packetIndex 1 should be retricted only if PSR-SU on. + * There should have another Infopacket type (e.g. vsc_psrsu) for PSR_SU. + * In addition, currently the driver check the valid bit then update and + * send the corresponding Infopacket. For PSR-SU, the SDP only be sent + * while entering PSR-SU mode. So we need another parameter(e.g. send) + * in dc_info_packet to indicate which infopacket should be enabled by + * default here. + */ + if (info_frame->vsc.valid) { + enc->vpg->funcs->update_generic_info_packet( + enc->vpg, + 1, /* packetIndex */ + &info_frame->vsc, + true); + } /* TODO: VSC SDP at packetIndex 1 should be restricted only if PSR-SU on. * There should have another Infopacket type (e.g. vsc_psrsu) for PSR_SU. * In addition, currently the driver check the valid bit then update and diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c index e5b7ef7422b8..50dc83404644 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c @@ -357,8 +357,11 @@ void dpp3_set_cursor_attributes( int cur_rom_en = 0; if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA || - color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) - cur_rom_en = 1; + color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) { + if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) { + cur_rom_en = 1; + } + } REG_UPDATE_3(CURSOR0_CONTROL, CUR0_MODE, color_format, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c index 6a3d3a0ec0a3..701c7d8bc038 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c @@ -280,7 +280,7 @@ bool dwb3_ogam_set_input_transfer_func( dwb_ogam_lut = kzalloc(sizeof(*dwb_ogam_lut), GFP_KERNEL); if (dwb_ogam_lut) { - cm_helper_translate_curve_to_hw_format( + cm_helper_translate_curve_to_hw_format(dwbc->ctx, in_transfer_func_dwb_ogam, dwb_ogam_lut, false); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c index e46bbe7ddcc9..2861d974fcf6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c @@ -449,6 +449,12 @@ void hubp3_read_state(struct hubp *hubp) SWATH_HEIGHT_C, &rq_regs->rq_regs_c.swath_height, PTE_ROW_HEIGHT_LINEAR_C, &rq_regs->rq_regs_c.pte_row_height_linear); + if (REG(UCLK_PSTATE_FORCE)) + s->uclk_pstate_force = REG_READ(UCLK_PSTATE_FORCE); + + if (REG(DCHUBP_CNTL)) + s->hubp_cntl = REG_READ(DCHUBP_CNTL); + } void hubp3_setup( diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index 32121db2851e..255713ec29bb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -106,7 +106,7 @@ static bool dcn30_set_mpc_shaper_3dlut(struct pipe_ctx *pipe_ctx, if (stream->func_shaper->type == TF_TYPE_HWPWL) { shaper_lut = &stream->func_shaper->pwl; } else if (stream->func_shaper->type == TF_TYPE_DISTRIBUTED_POINTS) { - cm_helper_translate_curve_to_hw_format(stream->func_shaper, + cm_helper_translate_curve_to_hw_format(stream->ctx, stream->func_shaper, &dpp_base->shaper_params, true); shaper_lut = &dpp_base->shaper_params; } @@ -330,10 +330,6 @@ void dcn30_enable_writeback( DC_LOG_DWB("%s dwb_pipe_inst = %d, mpcc_inst = %d",\ __func__, wb_info->dwb_pipe_inst,\ wb_info->mpcc_inst); - if (IS_DIAG_DC(dc->ctx->dce_environment)) { - /*till diags switch to warmup interface*/ - dcn30_mmhubbub_warmup(dc, 1, wb_info); - } /* Update writeback pipe */ dcn30_set_writeback(dc, wb_info, context); @@ -447,28 +443,6 @@ void dcn30_init_hw(struct dc *dc) if (res_pool->dccg->funcs->dccg_init) res_pool->dccg->funcs->dccg_init(res_pool->dccg); - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - - REG_WRITE(REFCLK_CNTL, 0); - REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); - REG_WRITE(DIO_MEM_PWR_CTRL, 0); - - if (!dc->debug.disable_clock_gate) { - /* enable all DCN clock gating */ - REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); - - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); - - REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); - } - - //Enable ability to power gate / don't force power on permanently - if (hws->funcs.enable_power_gating_plane) - hws->funcs.enable_power_gating_plane(hws, true); - - return; - } - if (!dcb->funcs->is_accelerated_mode(dcb)) { hws->funcs.bios_golden_init(dc); hws->funcs.disable_vga(dc->hwseq); @@ -487,27 +461,30 @@ void dcn30_init_hw(struct dc *dc) REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1); } + if (dc->debug.enable_mem_low_power.bits.vga) { + // Power down VGA memory + REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1); + } + if (dc->ctx->dc_bios->fw_info_valid) { res_pool->ref_clocks.xtalin_clock_inKhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency; - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - if (res_pool->dccg && res_pool->hubbub) { + if (res_pool->dccg && res_pool->hubbub) { - (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg, - dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency, - &res_pool->ref_clocks.dccg_ref_clock_inKhz); + (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg, + dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency, + &res_pool->ref_clocks.dccg_ref_clock_inKhz); - (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub, - res_pool->ref_clocks.dccg_ref_clock_inKhz, - &res_pool->ref_clocks.dchub_ref_clock_inKhz); - } else { - // Not all ASICs have DCCG sw component - res_pool->ref_clocks.dccg_ref_clock_inKhz = - res_pool->ref_clocks.xtalin_clock_inKhz; - res_pool->ref_clocks.dchub_ref_clock_inKhz = - res_pool->ref_clocks.xtalin_clock_inKhz; - } + (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub, + res_pool->ref_clocks.dccg_ref_clock_inKhz, + &res_pool->ref_clocks.dchub_ref_clock_inKhz); + } else { + // Not all ASICs have DCCG sw component + res_pool->ref_clocks.dccg_ref_clock_inKhz = + res_pool->ref_clocks.xtalin_clock_inKhz; + res_pool->ref_clocks.dchub_ref_clock_inKhz = + res_pool->ref_clocks.xtalin_clock_inKhz; } } else ASSERT_CRITICAL(false); @@ -632,7 +609,7 @@ void dcn30_init_hw(struct dc *dc) dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub); // Get DMCUB capabilities - dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub); + dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv); dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch; } @@ -736,8 +713,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable) cmd.mall.header.sub_type = DMUB_CMD__MALL_ACTION_NO_DF_REQ; cmd.mall.header.payload_bytes = sizeof(cmd.mall) - sizeof(cmd.mall.header); - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); return true; } @@ -859,9 +835,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable) cmd.mall.cursor_height = cursor_attr.height; cmd.mall.cursor_pitch = cursor_attr.pitch; - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); /* Use copied cursor, and it's okay to not switch back */ cursor_attr.address.quad_part = cmd.mall.cursor_copy_dst.quad_part; @@ -877,8 +851,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable) cmd.mall.tmr_scale = tmr_scale; cmd.mall.debug_bits = dc->debug.mall_error_as_fatal; - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); return true; } @@ -895,9 +868,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable) cmd.mall.header.payload_bytes = sizeof(cmd.mall) - sizeof(cmd.mall.header); - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } @@ -983,13 +954,53 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc, } void dcn30_prepare_bandwidth(struct dc *dc, - struct dc_state *context) + struct dc_state *context) { + bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support; + /* Any transition into an FPO config should disable MCLK switching first to avoid + * driver and FW P-State synchronization issues. + */ + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) { + dc->optimized_required = true; + context->bw_ctx.bw.dcn.clk.p_state_change_support = false; + } + if (dc->clk_mgr->dc_mode_softmax_enabled) if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 && context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz); dcn20_prepare_bandwidth(dc, context); + /* + * enabled -> enabled: do not disable + * enabled -> disabled: disable + * disabled -> enabled: don't care + * disabled -> disabled: don't care + */ + if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) + dc_dmub_srv_p_state_delegate(dc, false, context); + + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) { + /* After disabling P-State, restore the original value to ensure we get the correct P-State + * on the next optimize. */ + context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support; + } } +void dcn30_set_static_screen_control(struct pipe_ctx **pipe_ctx, + int num_pipes, const struct dc_static_screen_params *params) +{ + unsigned int i; + unsigned int triggers = 0; + + if (params->triggers.surface_update) + triggers |= 0x100; + if (params->triggers.cursor_update) + triggers |= 0x8; + if (params->triggers.force_trigger) + triggers |= 0x1; + + for (i = 0; i < num_pipes; i++) + pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(pipe_ctx[i]->stream_res.tg, + triggers, params->num_frames); +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h index a24a8e33a3d2..ce19c54097f8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h @@ -87,5 +87,7 @@ void dcn30_set_hubp_blank(const struct dc *dc, void dcn30_prepare_bandwidth(struct dc *dc, struct dc_state *context); +void dcn30_set_static_screen_control(struct pipe_ctx **pipe_ctx, + int num_pipes, const struct dc_static_screen_params *params); #endif /* __DC_HWSS_DCN30_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c index 3216d10c58ba..0de8b2783cf6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c @@ -64,7 +64,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = { .update_bandwidth = dcn20_update_bandwidth, .set_drr = dcn10_set_drr, .get_position = dcn10_get_position, - .set_static_screen_control = dcn10_set_static_screen_control, + .set_static_screen_control = dcn30_set_static_screen_control, .setup_stereo = dcn10_setup_stereo, .set_avmute = dcn30_set_avmute, .log_hw_state = dcn10_log_hw_state, @@ -106,7 +106,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = { .disable_link_output = dce110_disable_link_output, .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, .get_dcc_en_bits = dcn10_get_dcc_en_bits, - .update_visual_confirm_color = dcn20_update_visual_confirm_color, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, .is_abm_supported = dcn21_is_abm_supported }; @@ -151,8 +151,4 @@ void dcn30_hw_sequencer_construct(struct dc *dc) dc->hwss = dcn30_funcs; dc->hwseq->funcs = dcn30_private_funcs; - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - dc->hwss.init_hw = dcn20_fpga_init_hw; - dc->hwseq->funcs.init_pipes = NULL; - } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c index c95f000b63b2..5bf4d0aa6230 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c @@ -55,10 +55,9 @@ void optc3_triplebuffer_lock(struct timing_generator *optc) REG_SET(OTG_MASTER_UPDATE_LOCK, 0, OTG_MASTER_UPDATE_LOCK, 1); - if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS) - REG_WAIT(OTG_MASTER_UPDATE_LOCK, - UPDATE_LOCK_STATUS, 1, - 1, 10); + REG_WAIT(OTG_MASTER_UPDATE_LOCK, + UPDATE_LOCK_STATUS, 1, + 1, 10); TRACE_OPTC_LOCK_UNLOCK_STATE(optc1, optc->inst, true); } @@ -216,7 +215,7 @@ void optc3_set_odm_bypass(struct timing_generator *optc, optc1->opp_count = 1; } -static void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, +void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, struct dc_crtc_timing *timing) { struct optc *optc1 = DCN10TG_FROM_TG(optc); @@ -280,6 +279,9 @@ static void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, in * Sets double buffer point for V_TOTAL, H_TOTAL, VTOTAL_MIN, * VTOTAL_MAX, VTOTAL_MIN_SEL and VTOTAL_MAX_SEL registers. * + * @optc: timing_generator instance. + * @enable: Enable DRR double buffering control if true, disable otherwise. + * * Options: any time, start of frame, dp start of frame (range timing) */ static void optc3_set_timing_double_buffer(struct timing_generator *optc, bool enable) @@ -291,7 +293,7 @@ static void optc3_set_timing_double_buffer(struct timing_generator *optc, bool e OTG_DRR_TIMING_DBUF_UPDATE_MODE, mode); } -static void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc) +void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); @@ -301,7 +303,12 @@ static void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *o void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max) { - optc1_set_vtotal_min_max(optc, vtotal_min, vtotal_max); + struct dc *dc = optc->ctx->dc; + + if (dc->caps.dmub_caps.mclk_sw && !dc->debug.disable_fams) + dc_dmub_srv_drr_update_cmd(dc, optc->inst, vtotal_min, vtotal_max); + else + optc1_set_vtotal_min_max(optc, vtotal_min, vtotal_max); } void optc3_tg_init(struct timing_generator *optc) diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h index fb06dc9a4893..d3a056c12b0d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h @@ -351,6 +351,9 @@ void optc3_set_timing_db_mode(struct timing_generator *optc, bool enable); void optc3_set_odm_bypass(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing); +void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, + struct dc_crtc_timing *timing); +void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc); void optc3_tg_init(struct timing_generator *optc); void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max); #endif /* __DC_OPTC_DCN30_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c index 67a34cda3774..88c0b24a3249 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c @@ -725,31 +725,15 @@ static const struct dc_debug_options debug_defaults_drv = { .dwb_fi_phase = -1, // -1 = disable, .dmub_command_table = true, .use_max_lb = true, - .exit_idle_opt_for_cursor_updates = true -}; - -static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = true, //No dmcu on DCN30 - .force_abm_enable = false, - .timing_trace = true, - .clock_trace = true, - .disable_dpp_power_gate = true, - .disable_hubp_power_gate = true, - .disable_clock_gate = true, - .disable_pplib_clock_request = true, - .disable_pplib_wm_range = true, - .disable_stutter = false, - .scl_reset_length10 = true, - .dwb_fi_phase = -1, // -1 = disable - .dmub_command_table = true, - .enable_tri_buf = true, - .use_max_lb = true + .exit_idle_opt_for_cursor_updates = true, + .enable_legacy_fast_update = false, }; static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, .disallow_psrsu = false, + .disallow_replay = false, }, }; @@ -1076,13 +1060,6 @@ static const struct resource_create_funcs res_create_funcs = { .create_hwseq = dcn30_hwseq_create, }; -static const struct resource_create_funcs res_create_maximus_funcs = { - .read_dce_straps = NULL, - .create_audio = NULL, - .create_stream_encoder = NULL, - .create_hwseq = dcn30_hwseq_create, -}; - static void dcn30_resource_destruct(struct dcn30_resource_pool *pool) { unsigned int i; @@ -1729,8 +1706,8 @@ noinline bool dcn30_internal_validate_bw( /* We only support full screen mpo with ODM */ if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled && pipe->plane_state && mpo_pipe - && memcmp(&mpo_pipe->plane_res.scl_data.recout, - &pipe->plane_res.scl_data.recout, + && memcmp(&mpo_pipe->plane_state->clip_rect, + &pipe->stream->src, sizeof(struct rect)) != 0) { ASSERT(mpo_pipe->plane_state != pipe->plane_state); goto validate_fail; @@ -2011,11 +1988,10 @@ bool dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, if (!is_refresh_rate_support_mclk_switch_using_fw_based_vblank_stretch(context)) return false; - // check if freesync enabled if (!context->streams[0]->allow_freesync) return false; - if (context->streams[0]->vrr_active_variable) + if (context->streams[0]->vrr_active_variable && dc->debug.disable_fams_gaming) return false; context->streams[0]->fpo_in_use = true; @@ -2087,7 +2063,8 @@ bool dcn30_validate_bandwidth(struct dc *dc, } DC_FP_START(); - dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); + if (dc->res_pool->funcs->calculate_wm_and_dlg) + dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); DC_FP_END(); BW_VAL_TRACE_END_WATERMARKS(); @@ -2239,7 +2216,7 @@ static const struct resource_funcs dcn30_res_pool_funcs = { .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg, .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, - .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, .add_stream_to_ctx = dcn30_add_stream_to_ctx, .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, @@ -2353,6 +2330,7 @@ static bool dcn30_resource_construct( dc->caps.color.mpc.ocsc = 1; dc->caps.dp_hdmi21_pcon_support = true; + dc->caps.max_v_total = (1 << 15) - 1; /* read VBIOS LTTPR caps */ { @@ -2376,10 +2354,7 @@ static bool dcn30_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { - dc->debug = debug_defaults_diags; - } else - dc->debug = debug_defaults_diags; + // Init the vm_helper if (dc->vm_helper) vm_helper_init(dc->vm_helper, 16); @@ -2577,8 +2552,7 @@ static bool dcn30_resource_construct( /* Audio, Stream Encoders including DIG and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, &pool->base, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) + &res_create_funcs)) goto create_fail; /* HW Sequencer and Plane caps */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile index 7aa628c21973..9002cb10a6ae 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile @@ -11,7 +11,8 @@ # Makefile for dcn30. DCN301 = dcn301_init.o dcn301_resource.o dcn301_dccg.o \ - dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o + dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o \ + dcn301_optc.o AMD_DAL_DCN301 = $(addprefix $(AMDDALPATH)/dc/dcn301/,$(DCN301)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c index 6192851c59ed..61205cdbe2d5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c @@ -75,6 +75,7 @@ static const struct hw_sequencer_funcs dcn301_funcs = { .get_hw_state = dcn10_get_hw_state, .clear_status_bits = dcn10_clear_status_bits, .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, + .edp_backlight_control = dce110_edp_backlight_control, .edp_power_control = dce110_edp_power_control, .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, .set_cursor_position = dcn10_set_cursor_position, @@ -107,7 +108,7 @@ static const struct hw_sequencer_funcs dcn301_funcs = { .get_dcc_en_bits = dcn10_get_dcc_en_bits, .optimize_pwr_state = dcn21_optimize_pwr_state, .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, - .update_visual_confirm_color = dcn20_update_visual_confirm_color, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, }; static const struct hwseq_private_funcs dcn301_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c new file mode 100644 index 000000000000..b3cfcb887905 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c @@ -0,0 +1,185 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "reg_helper.h" +#include "dcn301_optc.h" +#include "dc.h" +#include "dcn_calc_math.h" +#include "dc_dmub_srv.h" + +#include "dml/dcn30/dcn30_fpu.h" +#include "dc_trace.h" + +#define REG(reg)\ + optc1->tg_regs->reg + +#define CTX \ + optc1->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + optc1->tg_shift->field_name, optc1->tg_mask->field_name + + +/** + * optc301_set_drr() - Program dynamic refresh rate registers m_OTGx_OTG_V_TOTAL_*. + * + * @optc: timing_generator instance. + * @params: parameters used for Dynamic Refresh Rate. + */ +void optc301_set_drr( + struct timing_generator *optc, + const struct drr_params *params) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + if (params != NULL && + params->vertical_total_max > 0 && + params->vertical_total_min > 0) { + + if (params->vertical_total_mid != 0) { + + REG_SET(OTG_V_TOTAL_MID, 0, + OTG_V_TOTAL_MID, params->vertical_total_mid - 1); + + REG_UPDATE_2(OTG_V_TOTAL_CONTROL, + OTG_VTOTAL_MID_REPLACING_MAX_EN, 1, + OTG_VTOTAL_MID_FRAME_NUM, + (uint8_t)params->vertical_total_mid_frame_num); + + } + + optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1); + + REG_UPDATE_5(OTG_V_TOTAL_CONTROL, + OTG_V_TOTAL_MIN_SEL, 1, + OTG_V_TOTAL_MAX_SEL, 1, + OTG_FORCE_LOCK_ON_EVENT, 0, + OTG_SET_V_TOTAL_MIN_MASK_EN, 0, + OTG_SET_V_TOTAL_MIN_MASK, 0); + // Setup manual flow control for EOF via TRIG_A + optc->funcs->setup_manual_trigger(optc); + + } else { + REG_UPDATE_4(OTG_V_TOTAL_CONTROL, + OTG_SET_V_TOTAL_MIN_MASK, 0, + OTG_V_TOTAL_MIN_SEL, 0, + OTG_V_TOTAL_MAX_SEL, 0, + OTG_FORCE_LOCK_ON_EVENT, 0); + + optc->funcs->set_vtotal_min_max(optc, 0, 0); + } +} + + +void optc301_setup_manual_trigger(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_SET_8(OTG_TRIGA_CNTL, 0, + OTG_TRIGA_SOURCE_SELECT, 21, + OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst, + OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1, + OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 0, + OTG_TRIGA_POLARITY_SELECT, 0, + OTG_TRIGA_FREQUENCY_SELECT, 0, + OTG_TRIGA_DELAY, 0, + OTG_TRIGA_CLEAR, 1); +} + +static struct timing_generator_funcs dcn30_tg_funcs = { + .validate_timing = optc1_validate_timing, + .program_timing = optc1_program_timing, + .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, + .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1, + .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2, + .program_global_sync = optc1_program_global_sync, + .enable_crtc = optc2_enable_crtc, + .disable_crtc = optc1_disable_crtc, + /* used by enable_timing_synchronization. Not need for FPGA */ + .is_counter_moving = optc1_is_counter_moving, + .get_position = optc1_get_position, + .get_frame_count = optc1_get_vblank_counter, + .get_scanoutpos = optc1_get_crtc_scanoutpos, + .get_otg_active_size = optc1_get_otg_active_size, + .set_early_control = optc1_set_early_control, + /* used by enable_timing_synchronization. Not need for FPGA */ + .wait_for_state = optc1_wait_for_state, + .set_blank_color = optc3_program_blank_color, + .did_triggered_reset_occur = optc1_did_triggered_reset_occur, + .triplebuffer_lock = optc3_triplebuffer_lock, + .triplebuffer_unlock = optc2_triplebuffer_unlock, + .enable_reset_trigger = optc1_enable_reset_trigger, + .enable_crtc_reset = optc1_enable_crtc_reset, + .disable_reset_trigger = optc1_disable_reset_trigger, + .lock = optc3_lock, + .unlock = optc1_unlock, + .lock_doublebuffer_enable = optc3_lock_doublebuffer_enable, + .lock_doublebuffer_disable = optc3_lock_doublebuffer_disable, + .enable_optc_clock = optc1_enable_optc_clock, + .set_drr = optc301_set_drr, + .get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal, + .set_vtotal_min_max = optc3_set_vtotal_min_max, + .set_static_screen_control = optc1_set_static_screen_control, + .program_stereo = optc1_program_stereo, + .is_stereo_left_eye = optc1_is_stereo_left_eye, + .tg_init = optc3_tg_init, + .is_tg_enabled = optc1_is_tg_enabled, + .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred, + .clear_optc_underflow = optc1_clear_optc_underflow, + .setup_global_swap_lock = NULL, + .get_crc = optc1_get_crc, + .configure_crc = optc2_configure_crc, + .set_dsc_config = optc3_set_dsc_config, + .get_dsc_status = optc2_get_dsc_status, + .set_dwb_source = NULL, + .set_odm_bypass = optc3_set_odm_bypass, + .set_odm_combine = optc3_set_odm_combine, + .get_optc_source = optc2_get_optc_source, + .set_out_mux = optc3_set_out_mux, + .set_drr_trigger_window = optc3_set_drr_trigger_window, + .set_vtotal_change_limit = optc3_set_vtotal_change_limit, + .set_gsl = optc2_set_gsl, + .set_gsl_source_select = optc2_set_gsl_source_select, + .set_vtg_params = optc1_set_vtg_params, + .program_manual_trigger = optc2_program_manual_trigger, + .setup_manual_trigger = optc301_setup_manual_trigger, + .get_hw_timing = optc1_get_hw_timing, + .wait_drr_doublebuffer_pending_clear = optc3_wait_drr_doublebuffer_pending_clear, +}; + +void dcn301_timing_generator_init(struct optc *optc1) +{ + optc1->base.funcs = &dcn30_tg_funcs; + + optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1; + optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1; + + optc1->min_h_blank = 32; + optc1->min_v_blank = 3; + optc1->min_v_blank_interlace = 5; + optc1->min_h_sync_width = 4; + optc1->min_v_sync_width = 1; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h new file mode 100644 index 000000000000..b49585682a15 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h @@ -0,0 +1,36 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_OPTC_DCN301_H__ +#define __DC_OPTC_DCN301_H__ + +#include "dcn20/dcn20_optc.h" +#include "dcn30/dcn30_optc.h" + +void dcn301_timing_generator_init(struct optc *optc1); +void optc301_setup_manual_trigger(struct timing_generator *optc); +void optc301_set_drr(struct timing_generator *optc, const struct drr_params *params); + +#endif /* __DC_OPTC_DCN301_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c index 5ac2a272c380..79d6697d13b6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c @@ -42,7 +42,7 @@ #include "dcn30/dcn30_hubp.h" #include "irq/dcn30/irq_service_dcn30.h" #include "dcn30/dcn30_dpp.h" -#include "dcn30/dcn30_optc.h" +#include "dcn301/dcn301_optc.h" #include "dcn20/dcn20_hwseq.h" #include "dcn30/dcn30_hwseq.h" #include "dce110/dce110_hw_sequencer.h" @@ -702,23 +702,6 @@ static const struct dc_debug_options debug_defaults_drv = { .exit_idle_opt_for_cursor_updates = true }; -static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = true, - .force_abm_enable = false, - .timing_trace = true, - .clock_trace = true, - .disable_dpp_power_gate = false, - .disable_hubp_power_gate = false, - .disable_clock_gate = true, - .disable_pplib_clock_request = true, - .disable_pplib_wm_range = true, - .disable_stutter = true, - .scl_reset_length10 = true, - .dwb_fi_phase = -1, // -1 = disable - .dmub_command_table = true, - .use_max_lb = false, -}; - static void dcn301_dpp_destroy(struct dpp **dpp) { kfree(TO_DCN20_DPP(*dpp)); @@ -872,7 +855,7 @@ static struct timing_generator *dcn301_timing_generator_create( tgn10->tg_shift = &optc_shift; tgn10->tg_mask = &optc_mask; - dcn30_timing_generator_init(tgn10); + dcn301_timing_generator_init(tgn10); return &tgn10->base; } @@ -1047,13 +1030,6 @@ static const struct resource_create_funcs res_create_funcs = { .create_hwseq = dcn301_hwseq_create, }; -static const struct resource_create_funcs res_create_maximus_funcs = { - .read_dce_straps = NULL, - .create_audio = NULL, - .create_stream_encoder = NULL, - .create_hwseq = dcn301_hwseq_create, -}; - static void dcn301_destruct(struct dcn301_resource_pool *pool) { unsigned int i; @@ -1403,7 +1379,7 @@ static struct resource_funcs dcn301_res_pool_funcs = { .calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg, .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, - .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, .add_stream_to_ctx = dcn30_add_stream_to_ctx, .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, @@ -1449,9 +1425,9 @@ static bool dcn301_resource_construct( dc->caps.max_cursor_size = 256; dc->caps.min_horizontal_blanking_period = 80; dc->caps.dmdata_alloc_size = 2048; - dc->caps.max_slave_planes = 1; - dc->caps.max_slave_yuv_planes = 1; - dc->caps.max_slave_rgb_planes = 1; + dc->caps.max_slave_planes = 2; + dc->caps.max_slave_yuv_planes = 2; + dc->caps.max_slave_rgb_planes = 2; dc->caps.is_apu = true; dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; @@ -1513,10 +1489,7 @@ static bool dcn301_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { - dc->debug = debug_defaults_diags; - } else - dc->debug = debug_defaults_diags; + // Init the vm_helper if (dc->vm_helper) vm_helper_init(dc->vm_helper, 16); @@ -1710,9 +1683,8 @@ static bool dcn301_resource_construct( /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, &pool->base, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) - goto create_fail; + &res_create_funcs)) + goto create_fail; /* HW Sequencer and Plane caps */ dcn301_hw_sequencer_construct(dc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c index 9f93c43115ba..447abcd593be 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c @@ -95,31 +95,15 @@ static const struct dc_debug_options debug_defaults_drv = { .dwb_fi_phase = -1, // -1 = disable, .dmub_command_table = true, .use_max_lb = true, - .exit_idle_opt_for_cursor_updates = true -}; - -static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = true, - .force_abm_enable = false, - .timing_trace = true, - .clock_trace = true, - .disable_dpp_power_gate = true, - .disable_hubp_power_gate = true, - .disable_clock_gate = true, - .disable_pplib_clock_request = true, - .disable_pplib_wm_range = true, - .disable_stutter = false, - .scl_reset_length10 = true, - .dwb_fi_phase = -1, // -1 = disable - .dmub_command_table = true, - .enable_tri_buf = true, - .use_max_lb = true + .exit_idle_opt_for_cursor_updates = true, + .enable_legacy_fast_update = false, }; static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, .disallow_psrsu = false, + .disallow_replay = false, }, }; @@ -954,13 +938,6 @@ static const struct resource_create_funcs res_create_funcs = { .create_hwseq = dcn302_hwseq_create, }; -static const struct resource_create_funcs res_create_maximus_funcs = { - .read_dce_straps = NULL, - .create_audio = NULL, - .create_stream_encoder = NULL, - .create_hwseq = dcn302_hwseq_create, -}; - static bool is_soc_bounding_box_valid(struct dc *dc) { uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev; @@ -1159,7 +1136,7 @@ static struct resource_funcs dcn302_res_pool_funcs = { .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg, .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, - .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, .add_stream_to_ctx = dcn30_add_stream_to_ctx, .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, @@ -1252,6 +1229,7 @@ static bool dcn302_resource_construct( dc->caps.force_dp_tps4_for_cp2520 = true; dc->caps.extended_aux_timeout_support = true; dc->caps.dmcub_support = true; + dc->caps.max_v_total = (1 << 15) - 1; /* Color pipeline capabilities */ dc->caps.color.dpp.dcn_arch = 1; @@ -1309,8 +1287,6 @@ static bool dcn302_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - else - dc->debug = debug_defaults_diags; // Init the vm_helper if (dc->vm_helper) @@ -1489,8 +1465,7 @@ static bool dcn302_resource_construct( /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, pool, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) + &res_create_funcs)) goto create_fail; /* HW Sequencer and Plane caps */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c index 7f72ef882ca4..adf4989177f7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c @@ -65,7 +65,7 @@ static const struct dc_debug_options debug_defaults_drv = { .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, - .pipe_split_policy = MPC_SPLIT_DYNAMIC, + .pipe_split_policy = MPC_SPLIT_AVOID, .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, @@ -81,27 +81,11 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_idle_power_optimizations = false, }; -static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = true, - .force_abm_enable = false, - .timing_trace = true, - .clock_trace = true, - .disable_dpp_power_gate = true, - .disable_hubp_power_gate = true, - .disable_clock_gate = true, - .disable_pplib_clock_request = true, - .disable_pplib_wm_range = true, - .disable_stutter = false, - .scl_reset_length10 = true, - .dwb_fi_phase = -1, // -1 = disable - .dmub_command_table = true, - .enable_tri_buf = true, -}; - static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, .disallow_psrsu = false, + .disallow_replay = false, }, }; @@ -881,13 +865,6 @@ static const struct resource_create_funcs res_create_funcs = { .create_hwseq = dcn303_hwseq_create, }; -static const struct resource_create_funcs res_create_maximus_funcs = { - .read_dce_straps = NULL, - .create_audio = NULL, - .create_stream_encoder = NULL, - .create_hwseq = dcn303_hwseq_create, -}; - static bool is_soc_bounding_box_valid(struct dc *dc) { uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev; @@ -1085,7 +1062,7 @@ static struct resource_funcs dcn303_res_pool_funcs = { .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg, .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, - .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, .add_stream_to_ctx = dcn30_add_stream_to_ctx, .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, @@ -1176,6 +1153,7 @@ static bool dcn303_resource_construct( dc->caps.force_dp_tps4_for_cp2520 = true; dc->caps.extended_aux_timeout_support = true; dc->caps.dmcub_support = true; + dc->caps.max_v_total = (1 << 15) - 1; /* Color pipeline capabilities */ dc->caps.color.dpp.dcn_arch = 1; @@ -1213,6 +1191,7 @@ static bool dcn303_resource_construct( dc->caps.dp_hdmi21_pcon_support = true; + dc->config.dc_mode_clk_limit_support = true; /* read VBIOS LTTPR caps */ if (ctx->dc_bios->funcs->get_lttpr_caps) { enum bp_result bp_query_result; @@ -1232,8 +1211,6 @@ static bool dcn303_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - else - dc->debug = debug_defaults_diags; // Init the vm_helper if (dc->vm_helper) @@ -1400,8 +1377,7 @@ static bool dcn303_resource_construct( /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, pool, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) + &res_create_funcs)) goto create_fail; /* HW Sequencer and Plane caps */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c index 4c2fdfea162f..8664f0c4c9b7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c @@ -47,6 +47,14 @@ void dccg31_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + if (dccg->dpp_clock_gated[dpp_inst]) { + /* + * Do not update the DPPCLK DTO if the clock is stopped. + * It is treated the same as if the pipe itself were in PG. + */ + return; + } + if (dccg->ref_dppclk && req_dppclk) { int ref_dppclk = dccg->ref_dppclk; int modulo, phase; @@ -76,7 +84,8 @@ static enum phyd32clk_clock_source get_phy_mux_symclk( struct dcn_dccg *dccg_dcn, enum phyd32clk_clock_source src) { - if (dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { + if (dccg_dcn->base.ctx->asic_id.chip_family == FAMILY_YELLOW_CARP && + dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { if (src == PHYD32CLKC) src = PHYD32CLKF; if (src == PHYD32CLKD) @@ -276,19 +285,11 @@ void dccg31_enable_symclk32_le( /* select one of the PHYD32CLKs as the source for symclk32_le */ switch (hpo_le_inst) { case 0: - if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) - REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, - SYMCLK32_LE0_GATE_DISABLE, 1, - SYMCLK32_ROOT_LE0_GATE_DISABLE, 1); REG_UPDATE_2(SYMCLK32_LE_CNTL, SYMCLK32_LE0_SRC_SEL, phyd32clk, SYMCLK32_LE0_EN, 1); break; case 1: - if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) - REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, - SYMCLK32_LE1_GATE_DISABLE, 1, - SYMCLK32_ROOT_LE1_GATE_DISABLE, 1); REG_UPDATE_2(SYMCLK32_LE_CNTL, SYMCLK32_LE1_SRC_SEL, phyd32clk, SYMCLK32_LE1_EN, 1); @@ -311,19 +312,38 @@ void dccg31_disable_symclk32_le( REG_UPDATE_2(SYMCLK32_LE_CNTL, SYMCLK32_LE0_SRC_SEL, 0, SYMCLK32_LE0_EN, 0); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) - REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, - SYMCLK32_LE0_GATE_DISABLE, 0, - SYMCLK32_ROOT_LE0_GATE_DISABLE, 0); break; case 1: REG_UPDATE_2(SYMCLK32_LE_CNTL, SYMCLK32_LE1_SRC_SEL, 0, SYMCLK32_LE1_EN, 0); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) - REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, - SYMCLK32_LE1_GATE_DISABLE, 0, - SYMCLK32_ROOT_LE1_GATE_DISABLE, 0); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + +void dccg31_set_symclk32_le_root_clock_gating( + struct dccg *dccg, + int hpo_le_inst, + bool enable) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + if (!dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) + return; + + switch (hpo_le_inst) { + case 0: + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_LE0_GATE_DISABLE, enable ? 1 : 0, + SYMCLK32_ROOT_LE0_GATE_DISABLE, enable ? 1 : 0); + break; + case 1: + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_LE1_GATE_DISABLE, enable ? 1 : 0, + SYMCLK32_ROOT_LE1_GATE_DISABLE, enable ? 1 : 0); break; default: BREAK_TO_DEBUGGER(); @@ -652,10 +672,8 @@ void dccg31_init(struct dccg *dccg) dccg31_disable_symclk32_se(dccg, 2); dccg31_disable_symclk32_se(dccg, 3); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) { - dccg31_disable_symclk32_le(dccg, 0); - dccg31_disable_symclk32_le(dccg, 1); - } + dccg31_set_symclk32_le_root_clock_gating(dccg, 0, false); + dccg31_set_symclk32_le_root_clock_gating(dccg, 1, false); if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) { dccg31_disable_dpstreamclk(dccg, 0); diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h index 0902ce5eb8a1..e3caaacf7493 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h @@ -179,6 +179,11 @@ void dccg31_disable_symclk32_le( struct dccg *dccg, int hpo_le_inst); +void dccg31_set_symclk32_le_root_clock_gating( + struct dccg *dccg, + int hpo_le_inst, + bool enable); + void dccg31_set_physymclk( struct dccg *dccg, int phy_inst, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c index 745a5d187a98..4596f3bac1b4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c @@ -117,7 +117,6 @@ static bool query_dp_alt_from_dmub(struct link_encoder *enc, union dmub_rb_cmd *cmd) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); - struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv; memset(cmd, 0, sizeof(*cmd)); cmd->query_dp_alt.header.type = DMUB_CMD__VBIOS; @@ -126,7 +125,7 @@ static bool query_dp_alt_from_dmub(struct link_encoder *enc, cmd->query_dp_alt.header.payload_bytes = sizeof(cmd->query_dp_alt.data); cmd->query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter); - if (!dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, cmd)) + if (!dm_execute_dmub_cmd(enc->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) return false; return true; @@ -425,7 +424,6 @@ static bool link_dpia_control(struct dc_context *dc_ctx, struct dmub_cmd_dig_dpia_control_data *dpia_control) { union dmub_rb_cmd cmd; - struct dc_dmub_srv *dmub = dc_ctx->dmub_srv; memset(&cmd, 0, sizeof(cmd)); @@ -438,9 +436,7 @@ static bool link_dpia_control(struct dc_context *dc_ctx, cmd.dig1_dpia_control.dpia_control = *dpia_control; - dc_dmub_srv_cmd_queue(dmub, &cmd); - dc_dmub_srv_cmd_execute(dmub); - dc_dmub_srv_wait_idle(dmub); + dm_execute_dmub_cmd(dc_ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } @@ -562,7 +558,7 @@ void dcn31_link_encoder_disable_output( struct dmub_cmd_dig_dpia_control_data dpia_control = { 0 }; struct dc_link *link; - if (!dcn10_is_dig_enabled(enc)) + if (enc->funcs->is_dig_enabled && !enc->funcs->is_dig_enabled(enc)) return; link = link_enc_cfg_get_link_using_link_enc(enc->ctx->dc, enc->preferred_engine); diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c index 0278bae50a9d..45143459eedd 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c @@ -154,7 +154,7 @@ static void dcn31_hpo_dp_stream_enc_dp_blank( VID_STREAM_STATUS, 0, 10, 5000); - /* Disable SDP tranmission */ + /* Disable SDP transmission */ REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL, SDP_STREAM_ENABLE, 0); diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c index 7e7cd5b64e6a..1f4e0b6261ad 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c @@ -103,6 +103,7 @@ static void dcn31_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigne default: break; } + DC_LOG_DEBUG("Set DET%d to %d segments\n", hubp_inst, det_size_segments); /* Should never be hit, if it is we have an erroneous hw config*/ ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size + hubbub2->det3_size + hubbub2->compbuf_size_segments <= hubbub2->crb_size_segs); @@ -1017,8 +1018,8 @@ void hubbub31_init(struct hubbub *hubbub) /*done in hwseq*/ /*REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);*/ REG_UPDATE_2(DCHUBBUB_CLOCK_CNTL, - DISPCLK_R_DCHUBBUB_GATE_DIS, 0, - DCFCLK_R_DCHUBBUB_GATE_DIS, 0); + DISPCLK_R_DCHUBBUB_GATE_DIS, 1, + DCFCLK_R_DCHUBBUB_GATE_DIS, 1); } /* diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c index 62ce36c75c4d..2a7f47642a44 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c @@ -117,28 +117,6 @@ void dcn31_init_hw(struct dc *dc) if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - - REG_WRITE(REFCLK_CNTL, 0); - REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); - REG_WRITE(DIO_MEM_PWR_CTRL, 0); - - if (!dc->debug.disable_clock_gate) { - /* enable all DCN clock gating */ - REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); - - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); - - REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); - } - - //Enable ability to power gate / don't force power on permanently - if (hws->funcs.enable_power_gating_plane) - hws->funcs.enable_power_gating_plane(hws, true); - - return; - } - if (!dcb->funcs->is_accelerated_mode(dcb)) { hws->funcs.bios_golden_init(dc); if (hws->funcs.disable_vga) @@ -154,23 +132,21 @@ void dcn31_init_hw(struct dc *dc) res_pool->ref_clocks.xtalin_clock_inKhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency; - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - if (res_pool->dccg && res_pool->hubbub) { - - (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg, - dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency, - &res_pool->ref_clocks.dccg_ref_clock_inKhz); - - (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub, - res_pool->ref_clocks.dccg_ref_clock_inKhz, - &res_pool->ref_clocks.dchub_ref_clock_inKhz); - } else { - // Not all ASICs have DCCG sw component - res_pool->ref_clocks.dccg_ref_clock_inKhz = - res_pool->ref_clocks.xtalin_clock_inKhz; - res_pool->ref_clocks.dchub_ref_clock_inKhz = - res_pool->ref_clocks.xtalin_clock_inKhz; - } + if (res_pool->dccg && res_pool->hubbub) { + + (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg, + dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency, + &res_pool->ref_clocks.dccg_ref_clock_inKhz); + + (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub, + res_pool->ref_clocks.dccg_ref_clock_inKhz, + &res_pool->ref_clocks.dchub_ref_clock_inKhz); + } else { + // Not all ASICs have DCCG sw component + res_pool->ref_clocks.dccg_ref_clock_inKhz = + res_pool->ref_clocks.xtalin_clock_inKhz; + res_pool->ref_clocks.dchub_ref_clock_inKhz = + res_pool->ref_clocks.xtalin_clock_inKhz; } } else ASSERT_CRITICAL(false); @@ -197,10 +173,6 @@ void dcn31_init_hw(struct dc *dc) } } - /* Enables outbox notifications for usb4 dpia */ - if (dc->res_pool->usb4_dpia_count) - dmub_enable_outbox_notification(dc->ctx->dmub_srv); - /* we want to turn off all dp displays before doing detection */ dc->link_srv->blank_all_dp_displays(dc); @@ -297,8 +269,9 @@ void dcn31_init_hw(struct dc *dc) #endif // Get DMCUB capabilities - dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub); + dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv); dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; + dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch; } void dcn31_dsc_pg_control( @@ -442,9 +415,7 @@ void dcn31_z10_save_init(struct dc *dc) cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT; cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT; - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } void dcn31_z10_restore(const struct dc *dc) @@ -462,9 +433,7 @@ void dcn31_z10_restore(const struct dc *dc) cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT; cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_RESTORE; - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on) @@ -560,35 +529,31 @@ static void dcn31_reset_back_end_for_pipe( pipe_ctx->stream_res.tg->funcs->set_drr( pipe_ctx->stream_res.tg, NULL); - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - link = pipe_ctx->stream->link; - /* DPMS may already disable or */ - /* dpms_off status is incorrect due to fastboot - * feature. When system resume from S4 with second - * screen only, the dpms_off would be true but - * VBIOS lit up eDP, so check link status too. - */ - if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) - dc->link_srv->set_dpms_off(pipe_ctx); - else if (pipe_ctx->stream_res.audio) - dc->hwss.disable_audio_stream(pipe_ctx); - - /* free acquired resources */ - if (pipe_ctx->stream_res.audio) { - /*disable az_endpoint*/ - pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); - - /*free audio*/ - if (dc->caps.dynamic_audio == true) { - /*we have to dynamic arbitrate the audio endpoints*/ - /*we free the resource, need reset is_audio_acquired*/ - update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, - pipe_ctx->stream_res.audio, false); - pipe_ctx->stream_res.audio = NULL; - } + link = pipe_ctx->stream->link; + /* DPMS may already disable or */ + /* dpms_off status is incorrect due to fastboot + * feature. When system resume from S4 with second + * screen only, the dpms_off would be true but + * VBIOS lit up eDP, so check link status too. + */ + if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) + dc->link_srv->set_dpms_off(pipe_ctx); + else if (pipe_ctx->stream_res.audio) + dc->hwss.disable_audio_stream(pipe_ctx); + + /* free acquired resources */ + if (pipe_ctx->stream_res.audio) { + /*disable az_endpoint*/ + pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); + + /*free audio*/ + if (dc->caps.dynamic_audio == true) { + /*we have to dynamic arbitrate the audio endpoints*/ + /*we free the resource, need reset is_audio_acquired*/ + update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, + pipe_ctx->stream_res.audio, false); + pipe_ctx->stream_res.audio = NULL; } - } else if (pipe_ctx->stream_res.dsc) { - dc->link_srv->set_dsc_enable(pipe_ctx, false); } pipe_ctx->stream = NULL; diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c index 3a32810bbe38..1d7bc1e39afe 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c @@ -58,6 +58,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = { .enable_audio_stream = dce110_enable_audio_stream, .disable_audio_stream = dce110_disable_audio_stream, .disable_plane = dcn20_disable_plane, + .disable_pixel_data = dcn20_disable_pixel_data, .pipe_control_lock = dcn20_pipe_control_lock, .interdependent_update_lock = dcn10_lock_all_pipes, .cursor_lock = dcn10_cursor_lock, @@ -66,7 +67,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = { .update_bandwidth = dcn20_update_bandwidth, .set_drr = dcn10_set_drr, .get_position = dcn10_get_position, - .set_static_screen_control = dcn10_set_static_screen_control, + .set_static_screen_control = dcn30_set_static_screen_control, .setup_stereo = dcn10_setup_stereo, .set_avmute = dcn30_set_avmute, .log_hw_state = dcn10_log_hw_state, @@ -109,7 +110,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = { .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, .optimize_pwr_state = dcn21_optimize_pwr_state, .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, - .update_visual_confirm_color = dcn20_update_visual_confirm_color, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, }; static const struct hwseq_private_funcs dcn31_private_funcs = { @@ -153,8 +154,4 @@ void dcn31_hw_sequencer_construct(struct dc *dc) dc->hwss = dcn31_funcs; dc->hwseq->funcs = dcn31_private_funcs; - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - dc->hwss.init_hw = dcn20_fpga_init_hw; - dc->hwseq->funcs.init_pipes = NULL; - } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c index 11ea9d13e312..217acd4e292a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c @@ -52,7 +52,7 @@ static bool dcn31_query_backlight_info(struct panel_cntl *panel_cntl, union dmub cmd->panel_cntl.header.payload_bytes = sizeof(cmd->panel_cntl.data); cmd->panel_cntl.data.inst = dcn31_panel_cntl->base.inst; - return dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, cmd); + return dm_execute_dmub_cmd(dc_dmub_srv->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY); } static uint32_t dcn31_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cntl) @@ -85,7 +85,7 @@ static uint32_t dcn31_panel_cntl_hw_init(struct panel_cntl *panel_cntl) panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV; cmd.panel_cntl.data.bl_pwm_ref_div2 = panel_cntl->stored_backlight_registers.PANEL_PWRSEQ_REF_DIV2; - if (!dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, &cmd)) + if (!dm_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) return 0; panel_cntl->stored_backlight_registers.BL_PWM_CNTL = cmd.panel_cntl.data.bl_pwm_cntl; diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c index ff8cd5076434..82de4fe2637f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c @@ -96,6 +96,7 @@ #include "dce/dmub_psr.h" #include "dce/dce_aux.h" #include "dce/dce_i2c.h" +#include "dce/dmub_replay.h" #include "dml/dcn30/display_mode_vba_30.h" #include "vm_helper.h" @@ -887,32 +888,16 @@ static const struct dc_debug_options debug_defaults_drv = { } }, .disable_z10 = true, + .enable_legacy_fast_update = true, .enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/ .dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE, }; -static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = true, - .force_abm_enable = false, - .timing_trace = true, - .clock_trace = true, - .disable_dpp_power_gate = true, - .disable_hubp_power_gate = true, - .disable_clock_gate = true, - .disable_pplib_clock_request = true, - .disable_pplib_wm_range = true, - .disable_stutter = false, - .scl_reset_length10 = true, - .dwb_fi_phase = -1, // -1 = disable - .dmub_command_table = true, - .enable_tri_buf = true, - .use_max_lb = true -}; - static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, .disallow_psrsu = false, + .disallow_replay = false, }, .ilr = { .optimize_edp_link_rate = true, @@ -1341,13 +1326,6 @@ static struct dce_hwseq *dcn31_hwseq_create( hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; - /* DCN3.1 FPGA Workaround - * Need to enable HPO DP Stream Encoder before setting OTG master enable. - * To do so, move calling function enable_stream_timing to only be done AFTER calling - * function core_link_enable_stream - */ - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) - hws->wa.dp_hpo_and_otg_sequence = true; } return hws; } @@ -1360,15 +1338,6 @@ static const struct resource_create_funcs res_create_funcs = { .create_hwseq = dcn31_hwseq_create, }; -static const struct resource_create_funcs res_create_maximus_funcs = { - .read_dce_straps = NULL, - .create_audio = NULL, - .create_stream_encoder = NULL, - .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create, - .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create, - .create_hwseq = dcn31_hwseq_create, -}; - static void dcn31_resource_destruct(struct dcn31_resource_pool *pool) { unsigned int i; @@ -1512,6 +1481,9 @@ static void dcn31_resource_destruct(struct dcn31_resource_pool *pool) if (pool->base.psr != NULL) dmub_psr_destroy(&pool->base.psr); + if (pool->base.replay != NULL) + dmub_replay_destroy(&pool->base.replay); + if (pool->base.dccg != NULL) dcn_dccg_destroy(&pool->base.dccg); } @@ -1809,8 +1781,8 @@ bool dcn31_validate_bandwidth(struct dc *dc, BW_VAL_TRACE_SKIP(fast); goto validate_out; } - - dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); + if (dc->res_pool->funcs->calculate_wm_and_dlg) + dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); BW_VAL_TRACE_END_WATERMARKS(); @@ -1851,7 +1823,7 @@ static struct resource_funcs dcn31_res_pool_funcs = { .calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg, .update_soc_for_wm_a = dcn31_update_soc_for_wm_a, .populate_dml_pipes = dcn31_populate_dml_pipes_from_context, - .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, .add_stream_to_ctx = dcn30_add_stream_to_ctx, .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, @@ -1988,10 +1960,7 @@ static bool dcn31_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { - dc->debug = debug_defaults_diags; - } else - dc->debug = debug_defaults_diags; + // Init the vm_helper if (dc->vm_helper) vm_helper_init(dc->vm_helper, 16); @@ -2121,6 +2090,14 @@ static bool dcn31_resource_construct( goto create_fail; } + /* Replay */ + pool->base.replay = dmub_replay_create(ctx); + if (pool->base.replay == NULL) { + dm_error("DC: failed to create replay obj!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + /* ABM */ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { pool->base.multiple_abms[i] = dmub_abm_create(ctx, @@ -2195,9 +2172,8 @@ static bool dcn31_resource_construct( /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, &pool->base, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) - goto create_fail; + &res_create_funcs)) + goto create_fail; /* HW Sequencer and Plane caps */ dcn31_hw_sequencer_construct(dc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c index de7bfba2c179..ad3f019a784f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c @@ -45,6 +45,16 @@ #define DC_LOGGER \ dccg->ctx->logger +static void dccg314_trigger_dio_fifo_resync( + struct dccg *dccg) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + uint32_t dispclk_rdivider_value = 0; + + REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_RDIVIDER, &dispclk_rdivider_value); + REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value); +} + static void dccg314_get_pixel_rate_div( struct dccg *dccg, uint32_t otg_inst, @@ -322,6 +332,9 @@ static void dccg314_dpp_root_clock_control( { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + if (dccg->dpp_clock_gated[dpp_inst] != clock_on) + return; + if (clock_on) { /* turn off the DTO and leave phase/modulo at max */ REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_ENABLE[dpp_inst], 0); @@ -335,6 +348,8 @@ static void dccg314_dpp_root_clock_control( DPPCLK0_DTO_PHASE, 0, DPPCLK0_DTO_MODULO, 1); } + + dccg->dpp_clock_gated[dpp_inst] = !clock_on; } static const struct dccg_funcs dccg314_funcs = { @@ -347,6 +362,7 @@ static const struct dccg_funcs dccg314_funcs = { .disable_symclk32_se = dccg31_disable_symclk32_se, .enable_symclk32_le = dccg31_enable_symclk32_le, .disable_symclk32_le = dccg31_disable_symclk32_le, + .set_symclk32_le_root_clock_gating = dccg31_set_symclk32_le_root_clock_gating, .set_physymclk = dccg31_set_physymclk, .set_dtbclk_dto = dccg314_set_dtbclk_dto, .set_audio_dtbclk_dto = dccg31_set_audio_dtbclk_dto, @@ -357,6 +373,7 @@ static const struct dccg_funcs dccg314_funcs = { .disable_dsc = dccg31_disable_dscclk, .enable_dsc = dccg31_enable_dscclk, .set_pixel_rate_div = dccg314_set_pixel_rate_div, + .trigger_dio_fifo_resync = dccg314_trigger_dio_fifo_resync, .set_valid_pixel_rate = dccg314_set_valid_pixel_rate, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h index 90687a9e8fdd..8e07d3151f91 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h @@ -192,7 +192,10 @@ DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYBSYMCLK_GATE_DISABLE, mask_sh),\ DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYCSYMCLK_GATE_DISABLE, mask_sh),\ DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYDSYMCLK_GATE_DISABLE, mask_sh),\ - DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_GATE_DISABLE, mask_sh) + DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_GATE_DISABLE, mask_sh),\ + DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_MODULO, mask_sh),\ + DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_RDIVIDER, mask_sh),\ + DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, mask_sh) struct dccg *dccg314_create( struct dc_context *ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c index cc3fe9cac5b5..4d2820ffe468 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c @@ -390,6 +390,35 @@ void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx) pix_per_cycle); } +void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context) +{ + unsigned int i; + struct pipe_ctx *pipe = NULL; + bool otg_disabled[MAX_PIPES] = {false}; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (pipe->top_pipe || pipe->prev_odm_pipe) + continue; + + if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) { + pipe->stream_res.tg->funcs->disable_crtc(pipe->stream_res.tg); + reset_sync_context_for_pipe(dc, context, i); + otg_disabled[i] = true; + } + } + + hws->ctx->dc->res_pool->dccg->funcs->trigger_dio_fifo_resync(hws->ctx->dc->res_pool->dccg); + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (otg_disabled[i]) + pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg); + } +} + void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on) { if (!hws->ctx->dc->debug.root_clock_optimization.bits.dpp) @@ -400,29 +429,6 @@ void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, hws->ctx->dc->res_pool->dccg, dpp_inst, clock_on); } -void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on) -{ - struct dc_context *ctx = hws->ctx; - union dmub_rb_cmd cmd; - - if (hws->ctx->dc->debug.disable_hubp_power_gate) - return; - - PERF_TRACE(); - - memset(&cmd, 0, sizeof(cmd)); - cmd.domain_control.header.type = DMUB_CMD__VBIOS; - cmd.domain_control.header.sub_type = DMUB_CMD__VBIOS_DOMAIN_CONTROL; - cmd.domain_control.header.payload_bytes = sizeof(cmd.domain_control.data); - cmd.domain_control.data.inst = hubp_inst; - cmd.domain_control.data.power_gate = !power_on; - - dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(ctx->dmub_srv); - dc_dmub_srv_wait_idle(ctx->dmub_srv); - - PERF_TRACE(); -} static void apply_symclk_on_tx_off_wa(struct dc_link *link) { /* There are use cases where SYMCLK is referenced by OTG. For instance diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h index 6d0b62503caa..eafcc4ea6d24 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h @@ -41,7 +41,7 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx); -void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on); +void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context); void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on); diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c index a588f46b166f..4ef85c3a0688 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c @@ -60,6 +60,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = { .enable_audio_stream = dce110_enable_audio_stream, .disable_audio_stream = dce110_disable_audio_stream, .disable_plane = dcn20_disable_plane, + .disable_pixel_data = dcn20_disable_pixel_data, .pipe_control_lock = dcn20_pipe_control_lock, .interdependent_update_lock = dcn10_lock_all_pipes, .cursor_lock = dcn10_cursor_lock, @@ -68,7 +69,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = { .update_bandwidth = dcn20_update_bandwidth, .set_drr = dcn10_set_drr, .get_position = dcn10_get_position, - .set_static_screen_control = dcn10_set_static_screen_control, + .set_static_screen_control = dcn30_set_static_screen_control, .setup_stereo = dcn10_setup_stereo, .set_avmute = dcn30_set_avmute, .log_hw_state = dcn10_log_hw_state, @@ -111,7 +112,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = { .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, .optimize_pwr_state = dcn21_optimize_pwr_state, .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, - .update_visual_confirm_color = dcn20_update_visual_confirm_color, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, }; static const struct hwseq_private_funcs dcn314_private_funcs = { @@ -138,7 +139,7 @@ static const struct hwseq_private_funcs dcn314_private_funcs = { .plane_atomic_power_down = dcn10_plane_atomic_power_down, .enable_power_gating_plane = dcn314_enable_power_gating_plane, .dpp_root_clock_control = dcn314_dpp_root_clock_control, - .hubp_pg_control = dcn314_hubp_pg_control, + .hubp_pg_control = dcn31_hubp_pg_control, .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree, .update_odm = dcn314_update_odm, .dsc_pg_control = dcn314_dsc_pg_control, @@ -151,6 +152,7 @@ static const struct hwseq_private_funcs dcn314_private_funcs = { .setup_hpo_hw_control = dcn31_setup_hpo_hw_control, .calculate_dccg_k1_k2_values = dcn314_calculate_dccg_k1_k2_values, .set_pixels_per_cycle = dcn314_set_pixels_per_cycle, + .resync_fifo_dccg_dio = dcn314_resync_fifo_dccg_dio, }; void dcn314_hw_sequencer_construct(struct dc *dc) @@ -158,8 +160,4 @@ void dcn314_hw_sequencer_construct(struct dc *dc) dc->hwss = dcn314_funcs; dc->hwseq->funcs = dcn314_private_funcs; - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - dc->hwss.init_hw = dcn20_fpga_init_hw; - dc->hwseq->funcs.init_pipes = NULL; - } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c index abeeede38fb3..004beed9bd44 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c @@ -93,6 +93,7 @@ #include "reg_helper.h" #include "dce/dmub_abm.h" #include "dce/dmub_psr.h" +#include "dce/dmub_replay.h" #include "dce/dce_aux.h" #include "dce/dce_i2c.h" #include "dml/dcn314/display_mode_vba_314.h" @@ -117,23 +118,6 @@ #define regBIF_BX2_BIOS_SCRATCH_6 0x003e #define regBIF_BX2_BIOS_SCRATCH_6_BASE_IDX 1 -struct IP_BASE_INSTANCE { - unsigned int segment[MAX_SEGMENT]; -}; - -struct IP_BASE { - struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; -}; - -static const struct IP_BASE DCN_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C0, 0x00009000, 0x02403C00, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0, 0, 0 } } } }; - - #define DC_LOGGER_INIT(logger) enum dcn31_clk_src_array_id { @@ -887,12 +871,13 @@ static const struct dc_debug_options debug_defaults_drv = { .enable_z9_disable_interface = true, .minimum_z8_residency_time = 2000, .psr_skip_crtc_disable = true, + .replay_skip_crtc_disabled = true, .disable_dmcu = true, .force_abm_enable = false, .timing_trace = false, .clock_trace = true, - .disable_dpp_power_gate = true, - .disable_hubp_power_gate = true, + .disable_dpp_power_gate = false, + .disable_hubp_power_gate = false, .disable_pplib_clock_request = false, .pipe_split_policy = MPC_SPLIT_DYNAMIC, .force_single_disp_pipe_split = false, @@ -921,6 +906,22 @@ static const struct dc_debug_options debug_defaults_drv = { .afmt = true, } }, + + .root_clock_optimization = { + .bits = { + .dpp = true, + .dsc = true, + .hdmistream = true, + .hdmichar = true, + .dpstream = true, + .symclk32_se = true, + .symclk32_le = true, + .symclk_fe = true, + .physymclk = true, + .dpiasymclk = true, + } + }, + .seamless_boot_odm_combine = true }; @@ -946,6 +947,7 @@ static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, .disallow_psrsu = false, + .disallow_replay = false, }, .ilr = { .optimize_edp_link_rate = true, @@ -1030,6 +1032,28 @@ static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCN30(_MASK) }; +/* ========================================================== */ + +/* + * DPIA index | Preferred Encoder | Host Router + * 0 | C | 0 + * 1 | First Available | 0 + * 2 | D | 1 + * 3 | First Available | 1 + */ +/* ========================================================== */ +static const enum engine_id dpia_to_preferred_enc_id_table[] = { + ENGINE_ID_DIGC, + ENGINE_ID_DIGC, + ENGINE_ID_DIGD, + ENGINE_ID_DIGD +}; + +static enum engine_id dcn314_get_preferred_eng_id_dpia(unsigned int dpia_index) +{ + return dpia_to_preferred_enc_id_table[dpia_index]; +} + static struct dce_i2c_hw *dcn31_i2c_hw_create( struct dc_context *ctx, uint32_t inst) @@ -1375,13 +1399,6 @@ static struct dce_hwseq *dcn314_hwseq_create( hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; - /* DCN3.1 FPGA Workaround - * Need to enable HPO DP Stream Encoder before setting OTG master enable. - * To do so, move calling function enable_stream_timing to only be done AFTER calling - * function core_link_enable_stream - */ - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) - hws->wa.dp_hpo_and_otg_sequence = true; } return hws; } @@ -1394,15 +1411,6 @@ static const struct resource_create_funcs res_create_funcs = { .create_hwseq = dcn314_hwseq_create, }; -static const struct resource_create_funcs res_create_maximus_funcs = { - .read_dce_straps = NULL, - .create_audio = NULL, - .create_stream_encoder = NULL, - .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create, - .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create, - .create_hwseq = dcn314_hwseq_create, -}; - static void dcn314_resource_destruct(struct dcn314_resource_pool *pool) { unsigned int i; @@ -1545,6 +1553,9 @@ static void dcn314_resource_destruct(struct dcn314_resource_pool *pool) if (pool->base.psr != NULL) dmub_psr_destroy(&pool->base.psr); + if (pool->base.replay != NULL) + dmub_replay_destroy(&pool->base.replay); + if (pool->base.dccg != NULL) dcn_dccg_destroy(&pool->base.dccg); } @@ -1700,7 +1711,9 @@ static bool filter_modes_for_single_channel_workaround(struct dc *dc, struct dc_state *context) { // Filter 2K@240Hz+8K@24fps above combination timing if memory only has single dimm LPDDR - if (dc->clk_mgr->bw_params->vram_type == 34 && dc->clk_mgr->bw_params->num_channels < 2) { + if (dc->clk_mgr->bw_params->vram_type == 34 && + dc->clk_mgr->bw_params->num_channels < 2 && + context->stream_count > 1) { int total_phy_pix_clk = 0; for (int i = 0; i < context->stream_count; i++) @@ -1749,8 +1762,8 @@ bool dcn314_validate_bandwidth(struct dc *dc, BW_VAL_TRACE_SKIP(fast); goto validate_out; } - - dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); + if (dc->res_pool->funcs->calculate_wm_and_dlg) + dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); BW_VAL_TRACE_END_WATERMARKS(); @@ -1782,7 +1795,7 @@ static struct resource_funcs dcn314_res_pool_funcs = { .calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg, .update_soc_for_wm_a = dcn31_update_soc_for_wm_a, .populate_dml_pipes = dcn314_populate_dml_pipes_from_context, - .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, .add_stream_to_ctx = dcn30_add_stream_to_ctx, .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, @@ -1794,6 +1807,7 @@ static struct resource_funcs dcn314_res_pool_funcs = { .update_bw_bounding_box = dcn314_update_bw_bounding_box, .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .get_panel_config_defaults = dcn314_get_panel_config_defaults, + .get_preferred_eng_id_dpia = dcn314_get_preferred_eng_id_dpia, }; static struct clock_source *dcn30_clock_source_create( @@ -1920,6 +1934,14 @@ static bool dcn314_resource_construct( dc->debug = debug_defaults_drv; else dc->debug = debug_defaults_diags; + + /* Disable pipe power gating */ + dc->debug.disable_dpp_power_gate = true; + dc->debug.disable_hubp_power_gate = true; + + /* Disable root clock optimization */ + dc->debug.root_clock_optimization.u32All = 0; + // Init the vm_helper if (dc->vm_helper) vm_helper_init(dc->vm_helper, 16); @@ -2034,6 +2056,14 @@ static bool dcn314_resource_construct( goto create_fail; } + /* Replay */ + pool->base.replay = dmub_replay_create(ctx); + if (pool->base.replay == NULL) { + dm_error("DC: failed to create replay obj!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + /* ABM */ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { pool->base.multiple_abms[i] = dmub_abm_create(ctx, @@ -2101,8 +2131,7 @@ static bool dcn314_resource_construct( /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, &pool->base, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) + &res_create_funcs)) goto create_fail; /* HW Sequencer and Plane caps */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c index 41c972c8eb19..127487ea3d7d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c @@ -136,6 +136,9 @@ #define DCN3_15_MAX_DET_SIZE 384 #define DCN3_15_CRB_SEGMENT_SIZE_KB 64 +#define DCN3_15_MAX_DET_SEGS (DCN3_15_MAX_DET_SIZE / DCN3_15_CRB_SEGMENT_SIZE_KB) +/* Minimum 2 extra segments need to be in compbuf and claimable to guarantee seamless mpo transitions */ +#define MIN_RESERVED_DET_SEGS 2 enum dcn31_clk_src_array_id { DCN31_CLK_SRC_PLL0, @@ -884,31 +887,15 @@ static const struct dc_debug_options debug_defaults_drv = { .afmt = true, } }, + .enable_legacy_fast_update = true, .psr_power_use_phy_fsm = 0, }; -static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = true, - .force_abm_enable = false, - .timing_trace = true, - .clock_trace = true, - .disable_dpp_power_gate = true, - .disable_hubp_power_gate = true, - .disable_clock_gate = true, - .disable_pplib_clock_request = true, - .disable_pplib_wm_range = true, - .disable_stutter = false, - .scl_reset_length10 = true, - .dwb_fi_phase = -1, // -1 = disable - .dmub_command_table = true, - .enable_tri_buf = true, - .use_max_lb = true -}; - static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, .disallow_psrsu = false, + .disallow_replay = false, }, .ilr = { .optimize_edp_link_rate = true, @@ -1339,13 +1326,6 @@ static struct dce_hwseq *dcn31_hwseq_create( hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; - /* DCN3.1 FPGA Workaround - * Need to enable HPO DP Stream Encoder before setting OTG master enable. - * To do so, move calling function enable_stream_timing to only be done AFTER calling - * function core_link_enable_stream - */ - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) - hws->wa.dp_hpo_and_otg_sequence = true; } return hws; } @@ -1358,15 +1338,6 @@ static const struct resource_create_funcs res_create_funcs = { .create_hwseq = dcn31_hwseq_create, }; -static const struct resource_create_funcs res_create_maximus_funcs = { - .read_dce_straps = NULL, - .create_audio = NULL, - .create_stream_encoder = NULL, - .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create, - .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create, - .create_hwseq = dcn31_hwseq_create, -}; - static void dcn315_resource_destruct(struct dcn315_resource_pool *pool) { unsigned int i; @@ -1636,21 +1607,69 @@ static bool is_dual_plane(enum surface_pixel_format format) return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA; } +static int source_format_to_bpp (enum source_format_class SourcePixelFormat) +{ + if (SourcePixelFormat == dm_444_64) + return 8; + else if (SourcePixelFormat == dm_444_16) + return 2; + else if (SourcePixelFormat == dm_444_8) + return 1; + else if (SourcePixelFormat == dm_rgbe_alpha) + return 5; + else if (SourcePixelFormat == dm_420_8) + return 3; + else if (SourcePixelFormat == dm_420_12) + return 6; + else + return 4; +} + +static bool allow_pixel_rate_crb(struct dc *dc, struct dc_state *context) +{ + int i; + struct resource_context *res_ctx = &context->res_ctx; + + /*Don't apply for single stream*/ + if (context->stream_count < 2) + return false; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (!res_ctx->pipe_ctx[i].stream) + continue; + + /*Don't apply if scaling*/ + if (res_ctx->pipe_ctx[i].stream->src.width != res_ctx->pipe_ctx[i].stream->dst.width || + res_ctx->pipe_ctx[i].stream->src.height != res_ctx->pipe_ctx[i].stream->dst.height || + (res_ctx->pipe_ctx[i].plane_state && (res_ctx->pipe_ctx[i].plane_state->src_rect.width + != res_ctx->pipe_ctx[i].plane_state->dst_rect.width || + res_ctx->pipe_ctx[i].plane_state->src_rect.height + != res_ctx->pipe_ctx[i].plane_state->dst_rect.height))) + return false; + /*Don't apply if MPO to avoid transition issues*/ + if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state != res_ctx->pipe_ctx[i].plane_state) + return false; + } + return true; +} + static int dcn315_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, bool fast_validate) { - int i, pipe_cnt; + int i, pipe_cnt, crb_idx, crb_pipes; struct resource_context *res_ctx = &context->res_ctx; - struct pipe_ctx *pipe; + struct pipe_ctx *pipe = NULL; const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB; + int remaining_det_segs = max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB; + bool pixel_rate_crb = allow_pixel_rate_crb(dc, context); DC_FP_START(); dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); DC_FP_END(); - for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { + for (i = 0, pipe_cnt = 0, crb_pipes = 0; i < dc->res_pool->pipe_count; i++) { struct dc_crtc_timing *timing; if (!res_ctx->pipe_ctx[i].stream) @@ -1671,6 +1690,28 @@ static int dcn315_populate_dml_pipes_from_context( pipes[pipe_cnt].dout.dsc_input_bpc = 0; DC_FP_START(); dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt); + if (pixel_rate_crb && !pipe->top_pipe && !pipe->prev_odm_pipe) { + int bpp = source_format_to_bpp(pipes[pipe_cnt].pipe.src.source_format); + /* Ceil to crb segment size */ + int approx_det_segs_required_for_pstate = dcn_get_approx_det_segs_required_for_pstate( + &context->bw_ctx.dml.soc, timing->pix_clk_100hz, bpp, DCN3_15_CRB_SEGMENT_SIZE_KB); + + if (approx_det_segs_required_for_pstate <= 2 * DCN3_15_MAX_DET_SEGS) { + bool split_required = approx_det_segs_required_for_pstate > DCN3_15_MAX_DET_SEGS; + split_required = split_required || timing->pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc); + split_required = split_required || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120); + + /* Minimum 2 segments to allow mpc/odm combine if its used later */ + if (approx_det_segs_required_for_pstate < 2) + approx_det_segs_required_for_pstate = 2; + if (split_required) + approx_det_segs_required_for_pstate += approx_det_segs_required_for_pstate % 2; + pipes[pipe_cnt].pipe.src.det_size_override = approx_det_segs_required_for_pstate; + remaining_det_segs -= approx_det_segs_required_for_pstate; + } else + remaining_det_segs = -1; + crb_pipes++; + } DC_FP_END(); if (pipes[pipe_cnt].dout.dsc_enable) { @@ -1689,16 +1730,55 @@ static int dcn315_populate_dml_pipes_from_context( break; } } - pipe_cnt++; } + /* Spread remaining unreserved crb evenly among all pipes*/ + if (pixel_rate_crb) { + for (i = 0, pipe_cnt = 0, crb_idx = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &res_ctx->pipe_ctx[i]; + if (!pipe->stream) + continue; + + /* Do not use asymetric crb if not enough for pstate support */ + if (remaining_det_segs < 0) { + pipes[pipe_cnt].pipe.src.det_size_override = 0; + pipe_cnt++; + continue; + } + + if (!pipe->top_pipe && !pipe->prev_odm_pipe) { + bool split_required = pipe->stream->timing.pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc) + || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120); + + if (remaining_det_segs > MIN_RESERVED_DET_SEGS) + pipes[pipe_cnt].pipe.src.det_size_override += (remaining_det_segs - MIN_RESERVED_DET_SEGS) / crb_pipes + + (crb_idx < (remaining_det_segs - MIN_RESERVED_DET_SEGS) % crb_pipes ? 1 : 0); + if (pipes[pipe_cnt].pipe.src.det_size_override > 2 * DCN3_15_MAX_DET_SEGS) { + /* Clamp to 2 pipe split max det segments */ + remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override - 2 * (DCN3_15_MAX_DET_SEGS); + pipes[pipe_cnt].pipe.src.det_size_override = 2 * DCN3_15_MAX_DET_SEGS; + } + if (pipes[pipe_cnt].pipe.src.det_size_override > DCN3_15_MAX_DET_SEGS || split_required) { + /* If we are splitting we must have an even number of segments */ + remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override % 2; + pipes[pipe_cnt].pipe.src.det_size_override -= pipes[pipe_cnt].pipe.src.det_size_override % 2; + } + /* Convert segments into size for DML use */ + pipes[pipe_cnt].pipe.src.det_size_override *= DCN3_15_CRB_SEGMENT_SIZE_KB; + + crb_idx++; + } + pipe_cnt++; + } + } + if (pipe_cnt) context->bw_ctx.dml.ip.det_buffer_size_kbytes = (max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB / pipe_cnt) * DCN3_15_CRB_SEGMENT_SIZE_KB; if (context->bw_ctx.dml.ip.det_buffer_size_kbytes > DCN3_15_MAX_DET_SIZE) context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_15_MAX_DET_SIZE; - ASSERT(context->bw_ctx.dml.ip.det_buffer_size_kbytes >= DCN3_15_DEFAULT_DET_SIZE); + dc->config.enable_4to1MPC = false; if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) { if (is_dual_plane(pipe->plane_state->format) @@ -1738,7 +1818,7 @@ static struct resource_funcs dcn315_res_pool_funcs = { .calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg, .update_soc_for_wm_a = dcn315_update_soc_for_wm_a, .populate_dml_pipes = dcn315_populate_dml_pipes_from_context, - .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, .add_stream_to_ctx = dcn30_add_stream_to_ctx, .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, @@ -1845,10 +1925,7 @@ static bool dcn315_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { - dc->debug = debug_defaults_diags; - } else - dc->debug = debug_defaults_diags; + // Init the vm_helper if (dc->vm_helper) vm_helper_init(dc->vm_helper, 16); @@ -2029,9 +2106,8 @@ static bool dcn315_resource_construct( /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, &pool->base, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) - goto create_fail; + &res_create_funcs)) + goto create_fail; /* HW Sequencer and Plane caps */ dcn31_hw_sequencer_construct(dc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c index 9ead347a33e9..5fe2c61527df 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c @@ -884,30 +884,14 @@ static const struct dc_debug_options debug_defaults_drv = { .afmt = true, } }, -}; - -static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = true, - .force_abm_enable = false, - .timing_trace = true, - .clock_trace = true, - .disable_dpp_power_gate = true, - .disable_hubp_power_gate = true, - .disable_clock_gate = true, - .disable_pplib_clock_request = true, - .disable_pplib_wm_range = true, - .disable_stutter = false, - .scl_reset_length10 = true, - .dwb_fi_phase = -1, // -1 = disable - .dmub_command_table = true, - .enable_tri_buf = true, - .use_max_lb = true + .enable_legacy_fast_update = true, }; static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, .disallow_psrsu = false, + .disallow_replay = false, }, .ilr = { .optimize_edp_link_rate = true, @@ -1340,13 +1324,6 @@ static struct dce_hwseq *dcn31_hwseq_create( hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; - /* DCN3.1 FPGA Workaround - * Need to enable HPO DP Stream Encoder before setting OTG master enable. - * To do so, move calling function enable_stream_timing to only be done AFTER calling - * function core_link_enable_stream - */ - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) - hws->wa.dp_hpo_and_otg_sequence = true; } return hws; } @@ -1359,15 +1336,6 @@ static const struct resource_create_funcs res_create_funcs = { .create_hwseq = dcn31_hwseq_create, }; -static const struct resource_create_funcs res_create_maximus_funcs = { - .read_dce_straps = NULL, - .create_audio = NULL, - .create_stream_encoder = NULL, - .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create, - .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create, - .create_hwseq = dcn31_hwseq_create, -}; - static void dcn316_resource_destruct(struct dcn316_resource_pool *pool) { unsigned int i; @@ -1737,7 +1705,7 @@ static struct resource_funcs dcn316_res_pool_funcs = { .calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg, .update_soc_for_wm_a = dcn31_update_soc_for_wm_a, .populate_dml_pipes = dcn316_populate_dml_pipes_from_context, - .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, .add_stream_to_ctx = dcn30_add_stream_to_ctx, .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, @@ -1844,10 +1812,7 @@ static bool dcn316_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { - dc->debug = debug_defaults_diags; - } else - dc->debug = debug_defaults_diags; + // Init the vm_helper if (dc->vm_helper) vm_helper_init(dc->vm_helper, 16); @@ -2028,9 +1993,8 @@ static bool dcn316_resource_construct( /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, &pool->base, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) - goto create_fail; + &res_create_funcs)) + goto create_fail; /* HW Sequencer and Plane caps */ dcn31_hw_sequencer_construct(dc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c index ffbb739d85b6..921f58c0c729 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c @@ -42,18 +42,17 @@ #define DC_LOGGER \ dccg->ctx->logger -/* This function is a workaround for writing to OTG_PIXEL_RATE_DIV - * without the probability of causing a DIG FIFO error. - */ -static void dccg32_wait_for_dentist_change_done( +static void dccg32_trigger_dio_fifo_resync( struct dccg *dccg) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + uint32_t dispclk_rdivider_value = 0; - uint32_t dentist_dispclk_value = REG_READ(DENTIST_DISPCLK_CNTL); + REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_RDIVIDER, &dispclk_rdivider_value); - REG_WRITE(DENTIST_DISPCLK_CNTL, dentist_dispclk_value); - REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000); + /* Not valid for the WDIVIDER to be set to 0 */ + if (dispclk_rdivider_value != 0) + REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value); } static void dccg32_get_pixel_rate_div( @@ -124,29 +123,21 @@ static void dccg32_set_pixel_rate_div( REG_UPDATE_2(OTG_PIXEL_RATE_DIV, OTG0_PIXEL_RATE_DIVK1, k1, OTG0_PIXEL_RATE_DIVK2, k2); - - dccg32_wait_for_dentist_change_done(dccg); break; case 1: REG_UPDATE_2(OTG_PIXEL_RATE_DIV, OTG1_PIXEL_RATE_DIVK1, k1, OTG1_PIXEL_RATE_DIVK2, k2); - - dccg32_wait_for_dentist_change_done(dccg); break; case 2: REG_UPDATE_2(OTG_PIXEL_RATE_DIV, OTG2_PIXEL_RATE_DIVK1, k1, OTG2_PIXEL_RATE_DIVK2, k2); - - dccg32_wait_for_dentist_change_done(dccg); break; case 3: REG_UPDATE_2(OTG_PIXEL_RATE_DIV, OTG3_PIXEL_RATE_DIVK1, k1, OTG3_PIXEL_RATE_DIVK2, k2); - - dccg32_wait_for_dentist_change_done(dccg); break; default: BREAK_TO_DEBUGGER(); @@ -290,7 +281,8 @@ static void dccg32_set_dpstreamclk( struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); /* set the dtbclk_p source */ - dccg32_set_dtbclk_p_src(dccg, src, otg_inst); + /* always program refclk as DTBCLK. No use-case expected to require DPREFCLK as refclk */ + dccg32_set_dtbclk_p_src(dccg, DTBCLK0, otg_inst); /* enabled to select one of the DTBCLKs for pipe */ switch (dp_hpo_inst) { @@ -352,6 +344,7 @@ static const struct dccg_funcs dccg32_funcs = { .otg_add_pixel = dccg32_otg_add_pixel, .otg_drop_pixel = dccg32_otg_drop_pixel, .set_pixel_rate_div = dccg32_set_pixel_rate_div, + .trigger_dio_fifo_resync = dccg32_trigger_dio_fifo_resync, }; struct dccg *dccg32_create( diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h index 8071ab98d708..cf5508718122 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h @@ -112,8 +112,9 @@ DCCG_SF(DTBCLK_P_CNTL, DTBCLK_P3_EN, mask_sh),\ DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, mask_sh),\ DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, mask_sh),\ - DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh) - + DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh),\ + DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_RDIVIDER, mask_sh),\ + DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, mask_sh) struct dccg *dccg32_create( struct dc_context *ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c index eb08ccc38e79..8bfef6d095b2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c @@ -42,8 +42,8 @@ hubbub2->shifts->field_name, hubbub2->masks->field_name /** - * @DCN32_CRB_SEGMENT_SIZE_KB: Maximum Configurable Return Buffer size for - * DCN32 + * DCN32_CRB_SEGMENT_SIZE_KB: Maximum Configurable Return Buffer size for + * DCN32 */ #define DCN32_CRB_SEGMENT_SIZE_KB 64 @@ -955,8 +955,8 @@ void hubbub32_init(struct hubbub *hubbub) /*REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);*/ REG_UPDATE_2(DCHUBBUB_CLOCK_CNTL, - DISPCLK_R_DCHUBBUB_GATE_DIS, 0, - DCFCLK_R_DCHUBBUB_GATE_DIS, 0); + DISPCLK_R_DCHUBBUB_GATE_DIS, 1, + DCFCLK_R_DCHUBBUB_GATE_DIS, 1); } /* ignore the "df_pre_cstate_req" from the SDP port control. diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c index 2d604f7ee782..ca5b4b28a664 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c @@ -179,6 +179,7 @@ static struct hubp_funcs dcn32_hubp_funcs = { .hubp_setup_interdependent = hubp2_setup_interdependent, .hubp_set_vm_system_aperture_settings = hubp3_set_vm_system_aperture_settings, .set_blank = hubp2_set_blank, + .set_blank_regs = hubp2_set_blank_regs, .dcc_control = hubp3_dcc_control, .mem_program_viewport = min_set_viewport, .set_cursor_attributes = hubp32_cursor_set_attributes, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c index 1f5ee5cde6e1..680e7fa8d18a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c @@ -47,11 +47,9 @@ #include "clk_mgr.h" #include "dsc.h" #include "dcn20/dcn20_optc.h" -#include "dmub_subvp_state.h" #include "dce/dmub_hw_lock_mgr.h" #include "dcn32_resource.h" #include "link.h" -#include "dmub/inc/dmub_subvp_state.h" #define DC_LOGGER_INIT(logger) @@ -274,8 +272,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable) cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_DCN_REQ; cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header); - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); return true; } @@ -309,8 +306,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable) cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header); cmd.cab.cab_alloc_ways = ways; - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); return true; } @@ -326,9 +322,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable) cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header); - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } @@ -413,6 +407,30 @@ void dcn32_subvp_pipe_control_lock(struct dc *dc, } } +void dcn32_subvp_pipe_control_lock_fast(union block_sequence_params *params) +{ + struct dc *dc = params->subvp_pipe_control_lock_fast_params.dc; + bool lock = params->subvp_pipe_control_lock_fast_params.lock; + struct pipe_ctx *pipe_ctx = params->subvp_pipe_control_lock_fast_params.pipe_ctx; + bool subvp_immediate_flip = false; + + if (pipe_ctx && pipe_ctx->stream && pipe_ctx->plane_state) { + if (pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN && + pipe_ctx->plane_state->flip_immediate) + subvp_immediate_flip = true; + } + + // Don't need to lock for DRR VSYNC flips -- FW will wait for DRR pending update cleared. + if (subvp_immediate_flip) { + union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 }; + + hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK; + hw_lock_cmd.bits.hw_lock_client = HW_LOCK_CLIENT_DRIVER; + hw_lock_cmd.bits.lock = lock; + hw_lock_cmd.bits.should_release = !lock; + dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd); + } +} bool dcn32_set_mpc_shaper_3dlut( struct pipe_ctx *pipe_ctx, const struct dc_stream_state *stream) @@ -428,7 +446,7 @@ bool dcn32_set_mpc_shaper_3dlut( if (stream->func_shaper->type == TF_TYPE_HWPWL) shaper_lut = &stream->func_shaper->pwl; else if (stream->func_shaper->type == TF_TYPE_DISTRIBUTED_POINTS) { - cm_helper_translate_curve_to_hw_format( + cm_helper_translate_curve_to_hw_format(stream->ctx, stream->func_shaper, &dpp_base->shaper_params, true); shaper_lut = &dpp_base->shaper_params; @@ -464,7 +482,7 @@ bool dcn32_set_mcm_luts( if (plane_state->blend_tf->type == TF_TYPE_HWPWL) lut_params = &plane_state->blend_tf->pwl; else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) { - cm_helper_translate_curve_to_hw_format( + cm_helper_translate_curve_to_hw_format(plane_state->ctx, plane_state->blend_tf, &dpp_base->regamma_params, false); lut_params = &dpp_base->regamma_params; @@ -479,7 +497,7 @@ bool dcn32_set_mcm_luts( else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) { // TODO: dpp_base replace ASSERT(false); - cm_helper_translate_curve_to_hw_format( + cm_helper_translate_curve_to_hw_format(plane_state->ctx, plane_state->in_shaper_func, &dpp_base->shaper_params, true); lut_params = &dpp_base->shaper_params; @@ -549,7 +567,7 @@ bool dcn32_set_output_transfer_func(struct dc *dc, bool ret = false; /* program OGAM or 3DLUT only for the top pipe*/ - if (pipe_ctx->top_pipe == NULL) { + if (resource_is_pipe_type(pipe_ctx, OPP_HEAD)) { /*program shaper and 3dlut in MPC*/ ret = dcn32_set_mpc_shaper_3dlut(pipe_ctx, stream); if (ret == false && mpc->funcs->set_output_gamma && stream->out_transfer_func) { @@ -587,8 +605,8 @@ void dcn32_update_force_pstate(struct dc *dc, struct dc_state *context) struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; struct hubp *hubp = pipe->plane_res.hubp; - if (!pipe->stream || (pipe->stream && !(pipe->stream->mall_stream_config.type == SUBVP_MAIN || - pipe->stream->fpo_in_use))) { + if (!pipe->stream || !(pipe->stream->mall_stream_config.type == SUBVP_MAIN || + pipe->stream->fpo_in_use)) { if (hubp && hubp->funcs->hubp_update_force_pstate_disallow) hubp->funcs->hubp_update_force_pstate_disallow(hubp, false); } @@ -596,7 +614,7 @@ void dcn32_update_force_pstate(struct dc *dc, struct dc_state *context) /* Today only FPO uses cursor P-State force. Only clear cursor P-State force * if it's not FPO. */ - if (!pipe->stream || (pipe->stream && !pipe->stream->fpo_in_use)) { + if (!pipe->stream || !pipe->stream->fpo_in_use) { if (hubp && hubp->funcs->hubp_update_force_cursor_pstate_disallow) hubp->funcs->hubp_update_force_cursor_pstate_disallow(hubp, false); } @@ -721,6 +739,9 @@ static void dcn32_initialize_min_clocks(struct dc *dc) clocks->socclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].socclk_mhz * 1000; clocks->dramclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 1000; clocks->dppclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dppclk_mhz * 1000; + clocks->ref_dtbclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dtbclk_mhz * 1000; + clocks->fclk_p_state_change_support = true; + clocks->p_state_change_support = true; if (dc->debug.disable_boot_optimizations) { clocks->dispclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz * 1000; } else { @@ -730,9 +751,6 @@ static void dcn32_initialize_min_clocks(struct dc *dc) * freq to ensure that the timing is valid and unchanged. */ clocks->dispclk_khz = dc->clk_mgr->funcs->get_dispclk_from_dentist(dc->clk_mgr); - clocks->ref_dtbclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dtbclk_mhz * 1000; - clocks->fclk_p_state_change_support = true; - clocks->p_state_change_support = true; } dc->clk_mgr->funcs->update_clocks( @@ -946,8 +964,10 @@ void dcn32_init_hw(struct dc *dc) // Get DMCUB capabilities if (dc->ctx->dmub_srv) { - dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub); + dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv); dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; + dc->caps.dmub_caps.subvp_psr = dc->ctx->dmub_srv->dmub->feature_caps.subvp_psr_support; + dc->caps.dmub_caps.gecc_enable = dc->ctx->dmub_srv->dmub->feature_caps.gecc_enable; dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch; } } @@ -1125,10 +1145,6 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign unsigned int odm_combine_factor = 0; bool two_pix_per_container = false; - // For phantom pipes, use the same programming as the main pipes - if (pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM) { - stream = pipe_ctx->stream->mall_stream_config.paired_stream; - } two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing); odm_combine_factor = get_odm_config(pipe_ctx, NULL); @@ -1177,6 +1193,36 @@ void dcn32_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx) pix_per_cycle); } +void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context) +{ + unsigned int i; + struct pipe_ctx *pipe = NULL; + bool otg_disabled[MAX_PIPES] = {false}; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (!resource_is_pipe_type(pipe, OTG_MASTER)) + continue; + + if ((pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal)) + && pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) { + pipe->stream_res.tg->funcs->disable_crtc(pipe->stream_res.tg); + reset_sync_context_for_pipe(dc, context, i); + otg_disabled[i] = true; + } + } + + hws->ctx->dc->res_pool->dccg->funcs->trigger_dio_fifo_resync(hws->ctx->dc->res_pool->dccg); + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (otg_disabled[i]) + pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg); + } +} + void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings) { @@ -1253,7 +1299,7 @@ static void apply_symclk_on_tx_off_wa(struct dc_link *link) if (link->phy_state.symclk_ref_cnts.otg > 0) { for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx->stream && pipe_ctx->stream->link == link && pipe_ctx->top_pipe == NULL) { + if (resource_is_pipe_type(pipe_ctx, OPP_HEAD) && pipe_ctx->stream->link == link) { pipe_ctx->clock_source->funcs->program_pix_clk( pipe_ctx->clock_source, &pipe_ctx->stream_res.pix_clk_params, @@ -1336,7 +1382,7 @@ void dcn32_apply_update_flags_for_phantom(struct pipe_ctx *phantom_pipe) { phantom_pipe->update_flags.raw = 0; if (phantom_pipe->stream && phantom_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { - if (phantom_pipe->stream && phantom_pipe->plane_state) { + if (resource_is_pipe_type(phantom_pipe, DPP_PIPE)) { phantom_pipe->update_flags.bits.enable = 1; phantom_pipe->update_flags.bits.mpcc = 1; phantom_pipe->update_flags.bits.dppclk = 1; @@ -1346,7 +1392,7 @@ void dcn32_apply_update_flags_for_phantom(struct pipe_ctx *phantom_pipe) phantom_pipe->update_flags.bits.scaler = 1; phantom_pipe->update_flags.bits.viewport = 1; phantom_pipe->update_flags.bits.det_size = 1; - if (!phantom_pipe->top_pipe && !phantom_pipe->prev_odm_pipe) { + if (resource_is_pipe_type(phantom_pipe, OTG_MASTER)) { phantom_pipe->update_flags.bits.odm = 1; phantom_pipe->update_flags.bits.global_sync = 1; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h index 6694c1d14aa3..2d2628f31bed 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h @@ -75,6 +75,8 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign void dcn32_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx); +void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context); + void dcn32_subvp_pipe_control_lock(struct dc *dc, struct dc_state *context, bool lock, @@ -82,6 +84,8 @@ void dcn32_subvp_pipe_control_lock(struct dc *dc, struct pipe_ctx *top_pipe_to_program, bool subvp_prev_use); +void dcn32_subvp_pipe_control_lock_fast(union block_sequence_params *params); + void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings); diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c index 8085f2acb1a9..c7417147dff1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c @@ -56,6 +56,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = { .enable_audio_stream = dce110_enable_audio_stream, .disable_audio_stream = dce110_disable_audio_stream, .disable_plane = dcn20_disable_plane, + .disable_pixel_data = dcn20_disable_pixel_data, .pipe_control_lock = dcn20_pipe_control_lock, .interdependent_update_lock = dcn10_lock_all_pipes, .cursor_lock = dcn10_cursor_lock, @@ -64,7 +65,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = { .update_bandwidth = dcn20_update_bandwidth, .set_drr = dcn10_set_drr, .get_position = dcn10_get_position, - .set_static_screen_control = dcn10_set_static_screen_control, + .set_static_screen_control = dcn30_set_static_screen_control, .setup_stereo = dcn10_setup_stereo, .set_avmute = dcn30_set_avmute, .log_hw_state = dcn10_log_hw_state, @@ -109,7 +110,8 @@ static const struct hw_sequencer_funcs dcn32_funcs = { .commit_subvp_config = dcn32_commit_subvp_config, .enable_phantom_streams = dcn32_enable_phantom_streams, .subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock, - .update_visual_confirm_color = dcn20_update_visual_confirm_color, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, + .subvp_pipe_control_lock_fast = dcn32_subvp_pipe_control_lock_fast, .update_phantom_vp_position = dcn32_update_phantom_vp_position, .update_dsc_pg = dcn32_update_dsc_pg, .apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom, @@ -153,6 +155,7 @@ static const struct hwseq_private_funcs dcn32_private_funcs = { .update_mall_sel = dcn32_update_mall_sel, .calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values, .set_pixels_per_cycle = dcn32_set_pixels_per_cycle, + .resync_fifo_dccg_dio = dcn32_resync_fifo_dccg_dio, .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy, }; @@ -161,8 +164,4 @@ void dcn32_hw_sequencer_init_functions(struct dc *dc) dc->hwss = dcn32_funcs; dc->hwseq->funcs = dcn32_private_funcs; - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - dc->hwss.init_hw = dcn20_fpga_init_hw; - dc->hwseq->funcs.init_pipes = NULL; - } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c index c8041cfd594d..1d052f08aff5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c @@ -64,7 +64,7 @@ void mpc32_mpc_init(struct mpc *mpc) } } -static void mpc32_power_on_blnd_lut( +void mpc32_power_on_blnd_lut( struct mpc *mpc, uint32_t mpcc_id, bool power_on) @@ -75,7 +75,7 @@ static void mpc32_power_on_blnd_lut( if (power_on) { REG_UPDATE(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_1DLUT_MEM_PWR_FORCE, 0); REG_WAIT(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_1DLUT_MEM_PWR_STATE, 0, 1, 5); - } else { + } else if (!mpc->ctx->dc->debug.disable_mem_low_power) { ASSERT(false); /* TODO: change to mpc * dpp_base->ctx->dc->optimized_required = true; @@ -120,7 +120,7 @@ static enum dc_lut_mode mpc32_get_post1dlut_current(struct mpc *mpc, uint32_t mp return mode; } -static void mpc32_configure_post1dlut( +void mpc32_configure_post1dlut( struct mpc *mpc, uint32_t mpcc_id, bool is_ram_a) @@ -163,7 +163,7 @@ static void mpc32_post1dlut_get_reg_field( } /*program blnd lut RAM A*/ -static void mpc32_program_post1dluta_settings( +void mpc32_program_post1dluta_settings( struct mpc *mpc, uint32_t mpcc_id, const struct pwl_params *params) @@ -192,7 +192,7 @@ static void mpc32_program_post1dluta_settings( } /*program blnd lut RAM B*/ -static void mpc32_program_post1dlutb_settings( +void mpc32_program_post1dlutb_settings( struct mpc *mpc, uint32_t mpcc_id, const struct pwl_params *params) @@ -220,7 +220,7 @@ static void mpc32_program_post1dlutb_settings( cm_helper_program_gamcor_xfer_func(mpc->ctx, params, &gam_regs); } -static void mpc32_program_post1dlut_pwl( +void mpc32_program_post1dlut_pwl( struct mpc *mpc, uint32_t mpcc_id, const struct pwl_result_data *rgb, @@ -321,7 +321,7 @@ static enum dc_lut_mode mpc32_get_shaper_current(struct mpc *mpc, uint32_t mpcc_ } -static void mpc32_configure_shaper_lut( +void mpc32_configure_shaper_lut( struct mpc *mpc, bool is_ram_a, uint32_t mpcc_id) @@ -336,7 +336,7 @@ static void mpc32_configure_shaper_lut( } -static void mpc32_program_shaper_luta_settings( +void mpc32_program_shaper_luta_settings( struct mpc *mpc, const struct pwl_params *params, uint32_t mpcc_id) @@ -486,7 +486,7 @@ static void mpc32_program_shaper_luta_settings( } -static void mpc32_program_shaper_lutb_settings( +void mpc32_program_shaper_lutb_settings( struct mpc *mpc, const struct pwl_params *params, uint32_t mpcc_id) @@ -637,7 +637,7 @@ static void mpc32_program_shaper_lutb_settings( } -static void mpc32_program_shaper_lut( +void mpc32_program_shaper_lut( struct mpc *mpc, const struct pwl_result_data *rgb, uint32_t num, @@ -671,7 +671,7 @@ static void mpc32_program_shaper_lut( } -static void mpc32_power_on_shaper_3dlut( +void mpc32_power_on_shaper_3dlut( struct mpc *mpc, uint32_t mpcc_id, bool power_on) @@ -789,7 +789,7 @@ static enum dc_lut_mode get3dlut_config( } -static void mpc32_select_3dlut_ram( +void mpc32_select_3dlut_ram( struct mpc *mpc, enum dc_lut_mode mode, bool is_color_channel_12bits, @@ -803,7 +803,7 @@ static void mpc32_select_3dlut_ram( } -static void mpc32_select_3dlut_ram_mask( +void mpc32_select_3dlut_ram_mask( struct mpc *mpc, uint32_t ram_selection_mask, uint32_t mpcc_id) @@ -816,7 +816,7 @@ static void mpc32_select_3dlut_ram_mask( } -static void mpc32_set3dlut_ram12( +void mpc32_set3dlut_ram12( struct mpc *mpc, const struct dc_rgb *lut, uint32_t entries, @@ -848,7 +848,7 @@ static void mpc32_set3dlut_ram12( } -static void mpc32_set3dlut_ram10( +void mpc32_set3dlut_ram10( struct mpc *mpc, const struct dc_rgb *lut, uint32_t entries, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h index 2c2ecd053806..9ac584fa89ce 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h @@ -332,4 +332,65 @@ void dcn32_mpc_construct(struct dcn30_mpc *mpc30, int num_mpcc, int num_rmu); +void mpc32_power_on_blnd_lut( + struct mpc *mpc, + uint32_t mpcc_id, + bool power_on); +void mpc32_program_post1dlut_pwl( + struct mpc *mpc, + uint32_t mpcc_id, + const struct pwl_result_data *rgb, + uint32_t num); +void mpc32_program_post1dlutb_settings( + struct mpc *mpc, + uint32_t mpcc_id, + const struct pwl_params *params); +void mpc32_program_post1dluta_settings( + struct mpc *mpc, + uint32_t mpcc_id, + const struct pwl_params *params); +void mpc32_configure_post1dlut( + struct mpc *mpc, + uint32_t mpcc_id, + bool is_ram_a); +void mpc32_program_shaper_lut( + struct mpc *mpc, + const struct pwl_result_data *rgb, + uint32_t num, + uint32_t mpcc_id); +void mpc32_program_shaper_lutb_settings( + struct mpc *mpc, + const struct pwl_params *params, + uint32_t mpcc_id); +void mpc32_program_shaper_luta_settings( + struct mpc *mpc, + const struct pwl_params *params, + uint32_t mpcc_id); +void mpc32_configure_shaper_lut( + struct mpc *mpc, + bool is_ram_a, + uint32_t mpcc_id); +void mpc32_power_on_shaper_3dlut( + struct mpc *mpc, + uint32_t mpcc_id, + bool power_on); +void mpc32_set3dlut_ram10( + struct mpc *mpc, + const struct dc_rgb *lut, + uint32_t entries, + uint32_t mpcc_id); +void mpc32_set3dlut_ram12( + struct mpc *mpc, + const struct dc_rgb *lut, + uint32_t entries, + uint32_t mpcc_id); +void mpc32_select_3dlut_ram_mask( + struct mpc *mpc, + uint32_t ram_selection_mask, + uint32_t mpcc_id); +void mpc32_select_3dlut_ram( + struct mpc *mpc, + enum dc_lut_mode mode, + bool is_color_channel_12bits, + uint32_t mpcc_id); #endif //__DC_MPCC_DCN32_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c index 2ee798965bc2..8abb94f60078 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c @@ -98,7 +98,7 @@ static void optc32_set_odm_combine(struct timing_generator *optc, int *opp_id, i optc1->opp_count = opp_cnt; } -static void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode) +void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode) { struct optc *optc1 = DCN10TG_FROM_TG(optc); @@ -106,8 +106,11 @@ static void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, b OTG_H_TIMING_DIV_MODE_MANUAL, manual_mode ? 1 : 0); } /** - * Enable CRTC - * Enable CRTC - call ASIC Control Object to enable Timing generator. + * optc32_enable_crtc() - Enable CRTC - call ASIC Control Object to enable Timing generator. + * + * @optc: timing_generator instance. + * + * Return: If CRTC is enabled, return true. */ static bool optc32_enable_crtc(struct timing_generator *optc) { @@ -245,16 +248,9 @@ static void optc32_set_drr( } optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1); - optc32_setup_manual_trigger(optc); - } else { - REG_UPDATE_4(OTG_V_TOTAL_CONTROL, - OTG_SET_V_TOTAL_MIN_MASK, 0, - OTG_V_TOTAL_MIN_SEL, 0, - OTG_V_TOTAL_MAX_SEL, 0, - OTG_FORCE_LOCK_ON_EVENT, 0); - - optc->funcs->set_vtotal_min_max(optc, 0, 0); } + + optc32_setup_manual_trigger(optc); } static struct timing_generator_funcs dcn32_tg_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h index b92ba8c75694..abf0121a1006 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h @@ -179,5 +179,6 @@ SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh) void dcn32_timing_generator_init(struct optc *optc1); +void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode); #endif /* __DC_OPTC_DCN32_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c index 22dd1ebea618..f9d601c8c721 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c @@ -726,28 +726,13 @@ static const struct dc_debug_options debug_defaults_drv = { .override_dispclk_programming = true, .disable_fpo_optimizations = false, .fpo_vactive_margin_us = 2000, // 2000us - .disable_fpo_vactive = true, + .disable_fpo_vactive = false, .disable_boot_optimizations = false, -}; - -static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = true, - .force_abm_enable = false, - .timing_trace = true, - .clock_trace = true, - .disable_dpp_power_gate = true, - .disable_hubp_power_gate = true, - .disable_dsc_power_gate = true, - .disable_clock_gate = true, - .disable_pplib_clock_request = true, - .disable_pplib_wm_range = true, - .disable_stutter = false, - .scl_reset_length10 = true, - .dwb_fi_phase = -1, // -1 = disable - .dmub_command_table = true, - .enable_tri_buf = true, - .use_max_lb = true, - .force_disable_subvp = true + .disable_subvp_high_refresh = false, + .disable_dp_plus_plus_wa = true, + .fpo_vactive_min_active_margin_us = 200, + .fpo_vactive_max_blank_us = 1000, + .enable_legacy_fast_update = false, }; static struct dce_aux *dcn32_aux_engine_create( @@ -1353,15 +1338,6 @@ static const struct resource_create_funcs res_create_funcs = { .create_hwseq = dcn32_hwseq_create, }; -static const struct resource_create_funcs res_create_maximus_funcs = { - .read_dce_straps = NULL, - .create_audio = NULL, - .create_stream_encoder = NULL, - .create_hpo_dp_stream_encoder = dcn32_hpo_dp_stream_encoder_create, - .create_hpo_dp_link_encoder = dcn32_hpo_dp_link_encoder_create, - .create_hwseq = dcn32_hwseq_create, -}; - static void dcn32_resource_destruct(struct dcn32_resource_pool *pool) { unsigned int i; @@ -1733,8 +1709,8 @@ void dcn32_retain_phantom_pipes(struct dc *dc, struct dc_state *context) for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - if (!pipe->top_pipe && !pipe->prev_odm_pipe && - pipe->plane_state && pipe->stream && + if (resource_is_pipe_type(pipe, OTG_MASTER) && + resource_is_pipe_type(pipe, DPP_PIPE) && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { phantom_plane = pipe->plane_state; phantom_stream = pipe->stream; @@ -1888,6 +1864,8 @@ bool dcn32_validate_bandwidth(struct dc *dc, dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); + dcn32_override_min_req_memclk(dc, context); + BW_VAL_TRACE_END_WATERMARKS(); goto validate_out; @@ -1914,7 +1892,7 @@ int dcn32_populate_dml_pipes_from_context( { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; - struct pipe_ctx *pipe; + struct pipe_ctx *pipe = NULL; bool subvp_in_use = false; struct dc_crtc_timing *timing; bool vsr_odm_support = false; @@ -2060,7 +2038,7 @@ static struct resource_funcs dcn32_res_pool_funcs = { .validate_bandwidth = dcn32_validate_bandwidth, .calculate_wm_and_dlg = dcn32_calculate_wm_and_dlg, .populate_dml_pipes = dcn32_populate_dml_pipes_from_context, - .acquire_idle_pipe_for_head_pipe_in_layer = dcn32_acquire_idle_pipe_for_head_pipe_in_layer, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn32_acquire_free_pipe_as_secondary_dpp_pipe, .add_stream_to_ctx = dcn30_add_stream_to_ctx, .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, @@ -2198,6 +2176,7 @@ static bool dcn32_resource_construct( dc->caps.extended_aux_timeout_support = true; dc->caps.dmcub_support = true; dc->caps.seamless_odm = true; + dc->caps.max_v_total = (1 << 15) - 1; /* Color pipeline capabilities */ dc->caps.color.dpp.dcn_arch = 1; @@ -2236,6 +2215,7 @@ static bool dcn32_resource_construct( /* Use pipe context based otg sync logic */ dc->config.use_pipe_ctx_sync_logic = true; + dc->config.dc_mode_clk_limit_support = true; /* read VBIOS LTTPR caps */ { if (ctx->dc_bios->funcs->get_lttpr_caps) { @@ -2254,10 +2234,7 @@ static bool dcn32_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { - dc->debug = debug_defaults_diags; - } else - dc->debug = debug_defaults_diags; + // Init the vm_helper if (dc->vm_helper) vm_helper_init(dc->vm_helper, 16); @@ -2313,8 +2290,7 @@ static bool dcn32_resource_construct( } /* DML */ - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) - dml_init_instance(&dc->dml, &dcn3_2_soc, &dcn3_2_ip, DML_PROJECT_DCN32); + dml_init_instance(&dc->dml, &dcn3_2_soc, &dcn3_2_ip, DML_PROJECT_DCN32); /* IRQ Service */ init_data.ctx = dc->ctx; @@ -2451,9 +2427,8 @@ static bool dcn32_resource_construct( /* Audio, HWSeq, Stream Encoders including HPO and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, &pool->base, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) - goto create_fail; + &res_create_funcs)) + goto create_fail; /* HW Sequencer init functions and Plane caps */ dcn32_hw_sequencer_init_functions(dc); @@ -2510,6 +2485,85 @@ struct resource_pool *dcn32_create_resource_pool( return NULL; } +/* + * Find the most optimal free pipe from res_ctx, which could be used as a + * secondary dpp pipe for input opp head pipe. + * + * a free pipe - a pipe in input res_ctx not yet used for any streams or + * planes. + * secondary dpp pipe - a pipe gets inserted to a head OPP pipe's MPC blending + * tree. This is typical used for rendering MPO planes or additional offset + * areas in MPCC combine. + * + * Hardware Transition Minimization Algorithm for Finding a Secondary DPP Pipe + * ------------------------------------------------------------------------- + * + * PROBLEM: + * + * 1. There is a hardware limitation that a secondary DPP pipe cannot be + * transferred from one MPC blending tree to the other in a single frame. + * Otherwise it could cause glitches on the screen. + * + * For instance, we cannot transition from state 1 to state 2 in one frame. This + * is because PIPE1 is transferred from PIPE0's MPC blending tree over to + * PIPE2's MPC blending tree, which is not supported by hardware. + * To support this transition we need to first remove PIPE1 from PIPE0's MPC + * blending tree in one frame and then insert PIPE1 to PIPE2's MPC blending tree + * in the next frame. This is not optimal as it will delay the flip for two + * frames. + * + * State 1: + * PIPE0 -- secondary DPP pipe --> (PIPE1) + * PIPE2 -- secondary DPP pipe --> NONE + * + * State 2: + * PIPE0 -- secondary DPP pipe --> NONE + * PIPE2 -- secondary DPP pipe --> (PIPE1) + * + * 2. We want to in general minimize the unnecessary changes in pipe topology. + * If a pipe is already added in current blending tree and there are no changes + * to plane topology, we don't want to swap it with another free pipe + * unnecessarily in every update. Powering up and down a pipe would require a + * full update which delays the flip for 1 frame. If we use the original pipe + * we don't have to toggle its power. So we can flip faster. + */ +static int find_optimal_free_pipe_as_secondary_dpp_pipe( + const struct resource_context *cur_res_ctx, + struct resource_context *new_res_ctx, + const struct resource_pool *pool, + const struct pipe_ctx *new_opp_head) +{ + const struct pipe_ctx *cur_opp_head; + int free_pipe_idx; + + cur_opp_head = &cur_res_ctx->pipe_ctx[new_opp_head->pipe_idx]; + free_pipe_idx = resource_find_free_pipe_used_in_cur_mpc_blending_tree( + cur_res_ctx, new_res_ctx, cur_opp_head); + + /* Up until here if we have not found a free secondary pipe, we will + * need to wait for at least one frame to complete the transition + * sequence. + */ + if (free_pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) + free_pipe_idx = recource_find_free_pipe_not_used_in_cur_res_ctx( + cur_res_ctx, new_res_ctx, pool); + + /* Up until here if we have not found a free secondary pipe, we will + * need to wait for at least two frames to complete the transition + * sequence. It really doesn't matter which pipe we decide take from + * current enabled pipes. It won't save our frame time when we swap only + * one pipe or more pipes. + */ + if (free_pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) + free_pipe_idx = resource_find_free_pipe_used_as_cur_sec_dpp_in_mpcc_combine( + cur_res_ctx, new_res_ctx, pool); + + if (free_pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) + free_pipe_idx = resource_find_any_free_pipe(new_res_ctx, pool); + + return free_pipe_idx; +} + static struct pipe_ctx *find_idle_secondary_pipe_check_mpo( struct resource_context *res_ctx, const struct resource_pool *pool, @@ -2572,11 +2626,11 @@ static struct pipe_ctx *find_idle_secondary_pipe_check_mpo( return secondary_pipe; } -struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer( +static struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer( struct dc_state *state, const struct resource_pool *pool, struct dc_stream_state *stream, - struct pipe_ctx *head_pipe) + const struct pipe_ctx *head_pipe) { struct resource_context *res_ctx = &state->res_ctx; struct pipe_ctx *idle_pipe, *pipe; @@ -2615,6 +2669,43 @@ struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer( return idle_pipe; } +struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_dpp_pipe( + const struct dc_state *cur_ctx, + struct dc_state *new_ctx, + const struct resource_pool *pool, + const struct pipe_ctx *opp_head_pipe) +{ + + int free_pipe_idx; + struct pipe_ctx *free_pipe; + + if (!opp_head_pipe->stream->ctx->dc->config.enable_windowed_mpo_odm) + return dcn32_acquire_idle_pipe_for_head_pipe_in_layer( + new_ctx, pool, opp_head_pipe->stream, opp_head_pipe); + + free_pipe_idx = find_optimal_free_pipe_as_secondary_dpp_pipe( + &cur_ctx->res_ctx, &new_ctx->res_ctx, + pool, opp_head_pipe); + if (free_pipe_idx >= 0) { + free_pipe = &new_ctx->res_ctx.pipe_ctx[free_pipe_idx]; + free_pipe->pipe_idx = free_pipe_idx; + free_pipe->stream = opp_head_pipe->stream; + free_pipe->stream_res.tg = opp_head_pipe->stream_res.tg; + free_pipe->stream_res.opp = opp_head_pipe->stream_res.opp; + + free_pipe->plane_res.hubp = pool->hubps[free_pipe->pipe_idx]; + free_pipe->plane_res.ipp = pool->ipps[free_pipe->pipe_idx]; + free_pipe->plane_res.dpp = pool->dpps[free_pipe->pipe_idx]; + free_pipe->plane_res.mpcc_inst = + pool->dpps[free_pipe->pipe_idx]->inst; + } else { + ASSERT(opp_head_pipe); + free_pipe = NULL; + } + + return free_pipe; +} + unsigned int dcn32_calc_num_avail_chans_for_mall(struct dc *dc, int num_chans) { /* diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h index 3937dbc1e552..103a2b54d025 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h @@ -37,9 +37,10 @@ #define DCN3_2_MBLK_WIDTH 128 #define DCN3_2_MBLK_HEIGHT_4BPE 128 #define DCN3_2_MBLK_HEIGHT_8BPE 64 -#define DCN3_2_VMIN_DISPCLK_HZ 717000000 #define DCN3_2_DCFCLK_DS_INIT_KHZ 10000 // Choose 10Mhz for init DCFCLK DS freq -#define DCN3_2_MIN_ACTIVE_SWITCH_MARGIN_FPO_US 100 // Only allow FPO + Vactive if active margin >= 100 +#define SUBVP_HIGH_REFRESH_LIST_LEN 3 +#define DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ 1800 +#define DCN3_2_VMIN_DISPCLK_HZ 717000000 #define TO_DCN32_RES_POOL(pool)\ container_of(pool, struct dcn32_resource_pool, base) @@ -47,6 +48,15 @@ extern struct _vcs_dpi_ip_params_st dcn3_2_ip; extern struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc; +struct subvp_high_refresh_list { + int min_refresh; + int max_refresh; + struct resolution { + int width; + int height; + } res[SUBVP_HIGH_REFRESH_LIST_LEN]; +}; + struct dcn32_resource_pool { struct resource_pool base; }; @@ -126,11 +136,11 @@ bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context); bool dcn32_is_center_timing(struct pipe_ctx *pipe); bool dcn32_is_psr_capable(struct pipe_ctx *pipe); -struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer( - struct dc_state *state, +struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_dpp_pipe( + const struct dc_state *cur_ctx, + struct dc_state *new_ctx, const struct resource_pool *pool, - struct dc_stream_state *stream, - struct pipe_ctx *head_pipe); + const struct pipe_ctx *opp_head_pipe); void dcn32_determine_det_override(struct dc *dc, struct dc_state *context, @@ -151,10 +161,18 @@ struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stre bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe); +bool dcn32_allow_subvp_high_refresh_rate(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe); + unsigned int dcn32_calc_num_avail_chans_for_mall(struct dc *dc, int num_chans); double dcn32_determine_max_vratio_prefetch(struct dc *dc, struct dc_state *context); +bool dcn32_check_native_scaling_for_res(struct pipe_ctx *pipe, unsigned int width, unsigned int height); + +bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context); + +bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int vlevel); + /* definitions for run time init of reg offsets */ /* CLK SRC */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c index eeca16faf31a..3ad2b48954e0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c @@ -94,18 +94,15 @@ uint32_t dcn32_helper_calculate_mall_bytes_for_cursor( } /** - * ******************************************************************************************** - * dcn32_helper_calculate_num_ways_for_subvp: Calculate number of ways needed for SubVP + * dcn32_helper_calculate_num_ways_for_subvp(): Calculate number of ways needed for SubVP * * Gets total allocation required for the phantom viewport calculated by DML in bytes and * converts to number of cache ways. * - * @param [in] dc: current dc state - * @param [in] context: new dc state + * @dc: current dc state + * @context: new dc state * - * @return: number of ways required for SubVP - * - * ******************************************************************************************** + * Return: number of ways required for SubVP */ uint32_t dcn32_helper_calculate_num_ways_for_subvp( struct dc *dc, @@ -258,11 +255,8 @@ bool dcn32_is_psr_capable(struct pipe_ctx *pipe) return psr_capable; } -#define DCN3_2_NEW_DET_OVERRIDE_MIN_MULTIPLIER 7 - /** - * ******************************************************************************************* - * dcn32_determine_det_override: Determine DET allocation for each pipe + * dcn32_determine_det_override(): Determine DET allocation for each pipe * * This function determines how much DET to allocate for each pipe. The total number of * DET segments will be split equally among each of the streams, and after that the DET @@ -271,6 +265,7 @@ bool dcn32_is_psr_capable(struct pipe_ctx *pipe) * If there is a plane that's driven by more than 1 pipe (i.e. pipe split), then the * number of DET for that given plane will be split among the pipes driving that plane. * + * * High level algorithm: * 1. Split total DET among number of streams * 2. For each stream, split DET among the planes @@ -278,25 +273,11 @@ bool dcn32_is_psr_capable(struct pipe_ctx *pipe) * among those pipes. * 4. Assign the DET override to the DML pipes. * - * Special cases: - * - * For two displays that have a large difference in pixel rate, we may experience - * underflow on the larger display when we divide the DET equally. For this, we - * will implement a modified algorithm to assign more DET to larger display. - * - * 1. Calculate difference in pixel rates ( multiplier ) between two displays - * 2. If the multiplier exceeds DCN3_2_NEW_DET_OVERRIDE_MIN_MULTIPLIER, then - * implement the modified DET override algorithm. - * 3. Assign smaller DET size for lower pixel display and higher DET size for - * higher pixel display + * @dc: Current DC state + * @context: New DC state to be programmed + * @pipes: Array of DML pipes * - * @param [in]: dc: Current DC state - * @param [in]: context: New DC state to be programmed - * @param [in]: pipes: Array of DML pipes - * - * @return: void - * - * ******************************************************************************************* + * Return: void */ void dcn32_determine_det_override(struct dc *dc, struct dc_state *context, @@ -309,31 +290,10 @@ void dcn32_determine_det_override(struct dc *dc, struct dc_plane_state *current_plane = NULL; uint8_t stream_count = 0; - int phy_pix_clk_mult, lower_mode_stream_index; - int phy_pix_clk[MAX_PIPES] = {0}; - bool use_new_det_override_algorithm = false; - for (i = 0; i < context->stream_count; i++) { /* Don't count SubVP streams for DET allocation */ - if (context->streams[i]->mall_stream_config.type != SUBVP_PHANTOM) { - phy_pix_clk[i] = context->streams[i]->phy_pix_clk; + if (context->streams[i]->mall_stream_config.type != SUBVP_PHANTOM) stream_count++; - } - } - - /* Check for special case with two displays, one with much higher pixel rate */ - if (stream_count == 2) { - ASSERT((phy_pix_clk[0] > 0) && (phy_pix_clk[1] > 0)); - if (phy_pix_clk[0] < phy_pix_clk[1]) { - lower_mode_stream_index = 0; - phy_pix_clk_mult = phy_pix_clk[1] / phy_pix_clk[0]; - } else { - lower_mode_stream_index = 1; - phy_pix_clk_mult = phy_pix_clk[0] / phy_pix_clk[1]; - } - - if (phy_pix_clk_mult >= DCN3_2_NEW_DET_OVERRIDE_MIN_MULTIPLIER) - use_new_det_override_algorithm = true; } if (stream_count > 0) { @@ -342,13 +302,6 @@ void dcn32_determine_det_override(struct dc *dc, if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM) continue; - if (use_new_det_override_algorithm) { - if (i == lower_mode_stream_index) - stream_segments = 4; - else - stream_segments = 14; - } - if (context->stream_status[i].plane_count > 0) plane_segments = stream_segments / context->stream_status[i].plane_count; else @@ -432,8 +385,7 @@ void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context, } /** - * ******************************************************************************************* - * dcn32_save_mall_state: Save MALL (SubVP) state for fast validation cases + * dcn32_save_mall_state(): Save MALL (SubVP) state for fast validation cases * * This function saves the MALL (SubVP) case for fast validation cases. For fast validation, * there are situations where a shallow copy of the dc->current_state is created for the @@ -446,13 +398,11 @@ void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context, * NOTE: This function ONLY works if the streams are not moved to a different pipe in the * validation. We don't expect this to happen in fast_validation=1 cases. * - * @param [in]: dc: Current DC state - * @param [in]: context: New DC state to be programmed - * @param [out]: temp_config: struct used to cache the existing MALL state - * - * @return: void + * @dc: Current DC state + * @context: New DC state to be programmed + * @temp_config: struct used to cache the existing MALL state * - * ******************************************************************************************* + * Return: void */ void dcn32_save_mall_state(struct dc *dc, struct dc_state *context, @@ -472,18 +422,15 @@ void dcn32_save_mall_state(struct dc *dc, } /** - * ******************************************************************************************* - * dcn32_restore_mall_state: Restore MALL (SubVP) state for fast validation cases + * dcn32_restore_mall_state(): Restore MALL (SubVP) state for fast validation cases * * Restore the MALL state based on the previously saved state from dcn32_save_mall_state * - * @param [in]: dc: Current DC state - * @param [in/out]: context: New DC state to be programmed, restore MALL state into here - * @param [in]: temp_config: struct that has the cached MALL state + * @dc: Current DC state + * @context: New DC state to be programmed, restore MALL state into here + * @temp_config: struct that has the cached MALL state * - * @return: void - * - * ******************************************************************************************* + * Return: void */ void dcn32_restore_mall_state(struct dc *dc, struct dc_state *context, @@ -588,10 +535,11 @@ static int get_refresh_rate(struct dc_stream_state *fpo_candidate_stream) } /** - * dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch - Determines if config can support FPO + * dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch() - Determines if config can + * support FPO * - * @param [in]: dc - current dc state - * @param [in]: context - new dc state + * @dc: current dc state + * @context: new dc state * * Return: Pointer to FPO stream candidate if config can support FPO, otherwise NULL */ @@ -626,7 +574,7 @@ struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stre DC_FP_END(); DC_FP_START(); - is_fpo_vactive = dcn32_find_vactive_pipe(dc, context, DCN3_2_MIN_ACTIVE_SWITCH_MARGIN_FPO_US); + is_fpo_vactive = dcn32_find_vactive_pipe(dc, context, dc->debug.fpo_vactive_min_active_margin_us); DC_FP_END(); if (!is_fpo_vactive || dc->debug.disable_fpo_vactive) return NULL; @@ -647,12 +595,140 @@ struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stre if (!is_refresh_rate_support_mclk_switch_using_fw_based_vblank_stretch(fpo_candidate_stream, fpo_vactive_margin_us)) return NULL; - // check if freesync enabled if (!fpo_candidate_stream->allow_freesync) return NULL; - if (fpo_candidate_stream->vrr_active_variable) + if (fpo_candidate_stream->vrr_active_variable && dc->debug.disable_fams_gaming) return NULL; return fpo_candidate_stream; } + +bool dcn32_check_native_scaling_for_res(struct pipe_ctx *pipe, unsigned int width, unsigned int height) +{ + bool is_native_scaling = false; + + if (pipe->stream->timing.h_addressable == width && + pipe->stream->timing.v_addressable == height && + pipe->plane_state->src_rect.width == width && + pipe->plane_state->src_rect.height == height && + pipe->plane_state->dst_rect.width == width && + pipe->plane_state->dst_rect.height == height) + is_native_scaling = true; + + return is_native_scaling; +} + +/** + * dcn32_subvp_drr_admissable() - Determine if SubVP + DRR config is admissible + * + * @dc: Current DC state + * @context: New DC state to be programmed + * + * SubVP + DRR is admissible under the following conditions: + * - Config must have 2 displays (i.e., 2 non-phantom master pipes) + * - One display is SubVP + * - Other display must have Freesync enabled + * - The potential DRR display must not be PSR capable + * + * Return: True if admissible, false otherwise + */ +bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context) +{ + bool result = false; + uint32_t i; + uint8_t subvp_count = 0; + uint8_t non_subvp_pipes = 0; + bool drr_pipe_found = false; + bool drr_psr_capable = false; + uint64_t refresh_rate = 0; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (resource_is_pipe_type(pipe, OPP_HEAD) && + resource_is_pipe_type(pipe, DPP_PIPE)) { + if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) { + subvp_count++; + + refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 + + pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1); + refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total); + refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total); + } + if (pipe->stream->mall_stream_config.type == SUBVP_NONE) { + non_subvp_pipes++; + drr_psr_capable = (drr_psr_capable || dcn32_is_psr_capable(pipe)); + if (pipe->stream->ignore_msa_timing_param && + (pipe->stream->allow_freesync || pipe->stream->vrr_active_variable)) { + drr_pipe_found = true; + } + } + } + } + + if (subvp_count == 1 && non_subvp_pipes == 1 && drr_pipe_found && !drr_psr_capable && + ((uint32_t)refresh_rate < 120)) + result = true; + + return result; +} + +/** + * dcn32_subvp_vblank_admissable() - Determine if SubVP + Vblank config is admissible + * + * @dc: Current DC state + * @context: New DC state to be programmed + * @vlevel: Voltage level calculated by DML + * + * SubVP + Vblank is admissible under the following conditions: + * - Config must have 2 displays (i.e., 2 non-phantom master pipes) + * - One display is SubVP + * - Other display must not have Freesync capability + * - DML must have output DRAM clock change support as SubVP + Vblank + * - The potential vblank display must not be PSR capable + * + * Return: True if admissible, false otherwise + */ +bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int vlevel) +{ + bool result = false; + uint32_t i; + uint8_t subvp_count = 0; + uint8_t non_subvp_pipes = 0; + bool drr_pipe_found = false; + struct vba_vars_st *vba = &context->bw_ctx.dml.vba; + bool vblank_psr_capable = false; + uint64_t refresh_rate = 0; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (resource_is_pipe_type(pipe, OPP_HEAD) && + resource_is_pipe_type(pipe, DPP_PIPE)) { + if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) { + subvp_count++; + + refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 + + pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1); + refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total); + refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total); + } + if (pipe->stream->mall_stream_config.type == SUBVP_NONE) { + non_subvp_pipes++; + vblank_psr_capable = (vblank_psr_capable || dcn32_is_psr_capable(pipe)); + if (pipe->stream->ignore_msa_timing_param && + (pipe->stream->allow_freesync || pipe->stream->vrr_active_variable)) { + drr_pipe_found = true; + } + } + } + } + + if (subvp_count == 1 && non_subvp_pipes == 1 && !drr_pipe_found && !vblank_psr_capable && + ((uint32_t)refresh_rate < 120) && + vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_vblank_w_mall_sub_vp) + result = true; + + return result; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c index a60ddb343d13..8d73cceb485b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c @@ -725,31 +725,15 @@ static const struct dc_debug_options debug_defaults_drv = { .override_dispclk_programming = true, .disable_fpo_optimizations = false, .fpo_vactive_margin_us = 2000, // 2000us - .disable_fpo_vactive = true, + .disable_fpo_vactive = false, .disable_boot_optimizations = false, + .disable_subvp_high_refresh = false, + .fpo_vactive_min_active_margin_us = 200, + .fpo_vactive_max_blank_us = 1000, + .enable_legacy_fast_update = false, + .disable_dc_mode_overwrite = true, }; -static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = true, - .force_abm_enable = false, - .timing_trace = true, - .clock_trace = true, - .disable_dpp_power_gate = true, - .disable_hubp_power_gate = true, - .disable_dsc_power_gate = true, - .disable_clock_gate = true, - .disable_pplib_clock_request = true, - .disable_pplib_wm_range = true, - .disable_stutter = false, - .scl_reset_length10 = true, - .dwb_fi_phase = -1, // -1 = disable - .dmub_command_table = true, - .enable_tri_buf = true, - .use_max_lb = true, - .force_disable_subvp = true, -}; - - static struct dce_aux *dcn321_aux_engine_create( struct dc_context *ctx, uint32_t inst) @@ -1340,15 +1324,6 @@ static const struct resource_create_funcs res_create_funcs = { .create_hwseq = dcn321_hwseq_create, }; -static const struct resource_create_funcs res_create_maximus_funcs = { - .read_dce_straps = NULL, - .create_audio = NULL, - .create_stream_encoder = NULL, - .create_hpo_dp_stream_encoder = dcn321_hpo_dp_stream_encoder_create, - .create_hpo_dp_link_encoder = dcn321_hpo_dp_link_encoder_create, - .create_hwseq = dcn321_hwseq_create, -}; - static void dcn321_resource_destruct(struct dcn321_resource_pool *pool) { unsigned int i; @@ -1613,7 +1588,7 @@ static struct resource_funcs dcn321_res_pool_funcs = { .validate_bandwidth = dcn32_validate_bandwidth, .calculate_wm_and_dlg = dcn32_calculate_wm_and_dlg, .populate_dml_pipes = dcn32_populate_dml_pipes_from_context, - .acquire_idle_pipe_for_head_pipe_in_layer = dcn32_acquire_idle_pipe_for_head_pipe_in_layer, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn32_acquire_free_pipe_as_secondary_dpp_pipe, .add_stream_to_ctx = dcn30_add_stream_to_ctx, .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, @@ -1735,9 +1710,9 @@ static bool dcn321_resource_construct( dc->caps.subvp_pstate_allow_width_us = 20; dc->caps.subvp_vertical_int_margin_us = 30; dc->caps.subvp_drr_vblank_start_margin_us = 100; // 100us margin - dc->caps.max_slave_planes = 1; - dc->caps.max_slave_yuv_planes = 1; - dc->caps.max_slave_rgb_planes = 1; + dc->caps.max_slave_planes = 2; + dc->caps.max_slave_yuv_planes = 2; + dc->caps.max_slave_rgb_planes = 2; dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; dc->caps.dp_hpo = true; @@ -1745,6 +1720,7 @@ static bool dcn321_resource_construct( dc->caps.edp_dsc_support = true; dc->caps.extended_aux_timeout_support = true; dc->caps.dmcub_support = true; + dc->caps.max_v_total = (1 << 15) - 1; /* Color pipeline capabilities */ dc->caps.color.dpp.dcn_arch = 1; @@ -1780,6 +1756,7 @@ static bool dcn321_resource_construct( dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + dc->config.dc_mode_clk_limit_support = true; /* read VBIOS LTTPR caps */ { if (ctx->dc_bios->funcs->get_lttpr_caps) { @@ -1798,10 +1775,7 @@ static bool dcn321_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { - dc->debug = debug_defaults_diags; - } else - dc->debug = debug_defaults_diags; + // Init the vm_helper if (dc->vm_helper) vm_helper_init(dc->vm_helper, 16); @@ -1857,8 +1831,7 @@ static bool dcn321_resource_construct( } /* DML */ - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) - dml_init_instance(&dc->dml, &dcn3_21_soc, &dcn3_21_ip, DML_PROJECT_DCN32); + dml_init_instance(&dc->dml, &dcn3_21_soc, &dcn3_21_ip, DML_PROJECT_DCN32); /* IRQ Service */ init_data.ctx = dc->ctx; @@ -1990,9 +1963,8 @@ static bool dcn321_resource_construct( /* Audio, HWSeq, Stream Encoders including HPO and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, &pool->base, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) - goto create_fail; + &res_create_funcs)) + goto create_fail; /* HW Sequencer init functions and Plane caps */ dcn32_hw_sequencer_init_functions(dc); diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h index 9a3f2a44f882..d0eed3b4771e 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_services.h +++ b/drivers/gpu/drm/amd/display/dc/dm_services.h @@ -40,6 +40,7 @@ struct dmub_srv; struct dc_dmub_srv; +union dmub_rb_cmd; irq_handler_idx dm_register_interrupt( struct dc_context *ctx, @@ -274,6 +275,12 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line, struct dc #define PERF_TRACE_CTX(__CTX) dm_perf_trace_timestamp(__func__, __LINE__, __CTX) /* + * DMUB Interfaces + */ +bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type); +bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type); + +/* * Debug and verification hooks */ @@ -285,4 +292,6 @@ void dm_dtn_log_append_v(struct dc_context *ctx, void dm_dtn_log_end(struct dc_context *ctx, struct dc_log_buffer_ctx *log_ctx); +char *dce_version_to_string(const int version); + #endif /* __DM_SERVICES_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h index b52ba6ffabe1..facf269c4326 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h +++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h @@ -269,4 +269,10 @@ struct dtn_min_clk_info { uint32_t min_memory_clock_khz; }; +enum dm_dmub_wait_type { + DM_DMUB_WAIT_TYPE_NO_WAIT, + DM_DMUB_WAIT_TYPE_WAIT, + DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY, +}; + #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index 01db035589c5..77cf5545c94c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -38,6 +38,11 @@ ifdef CONFIG_ARM64 dml_rcflags := -mgeneral-regs-only endif +ifdef CONFIG_LOONGARCH +dml_ccflags := -mfpu=64 +dml_rcflags := -msoft-float +endif + ifdef CONFIG_CC_IS_GCC ifneq ($(call gcc-min-version, 70100),y) IS_OLD_GCC = 1 diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c index 0100a6053ab6..f2dfa96f9ef5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c @@ -3015,7 +3015,7 @@ static bool all_displays_in_sync(const struct pipe_ctx pipe[], int i, num_active_pipes = 0; for (i = 0; i < pipe_count; i++) { - if (!pipe[i].stream || pipe[i].top_pipe) + if (!resource_is_pipe_type(&pipe[i], OPP_HEAD)) continue; active_pipes[num_active_pipes++] = &pipe[i]; diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c index e73f089c84bb..50b0434354f8 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c @@ -1258,7 +1258,7 @@ bool dcn_validate_bandwidth( hsplit_pipe->pipe_dlg_param.vblank_end = pipe->pipe_dlg_param.vblank_end; } else { /* pipe not split previously needs split */ - hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, pool, pipe); + hsplit_pipe = resource_find_free_secondary_pipe_legacy(&context->res_ctx, pool, pipe); ASSERT(hsplit_pipe); split_stream_across_pipes(&context->res_ctx, pool, pipe, hsplit_pipe); } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c index f1c1a4b5fcac..5805fb02af14 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c @@ -948,10 +948,10 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc { int plane_count; int i; - unsigned int optimized_min_dst_y_next_start_us; + unsigned int min_dst_y_next_start_us; plane_count = 0; - optimized_min_dst_y_next_start_us = 0; + min_dst_y_next_start_us = 0; for (i = 0; i < dc->res_pool->pipe_count; i++) { if (context->res_ctx.pipe_ctx[i].plane_state) plane_count++; @@ -973,19 +973,18 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) { struct dc_link *link = context->streams[0]->sink->link; struct dc_stream_status *stream_status = &context->stream_status[0]; + struct dc_stream_state *current_stream = context->streams[0]; int minmum_z8_residency = dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000; bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency; bool is_pwrseq0 = link->link_index == 0; + bool isFreesyncVideo; - if (dc_extended_blank_supported(dc)) { - for (i = 0; i < dc->res_pool->pipe_count; i++) { - if (context->res_ctx.pipe_ctx[i].stream == context->streams[0] - && context->res_ctx.pipe_ctx[i].stream->adjust.v_total_min == context->res_ctx.pipe_ctx[i].stream->adjust.v_total_max - && context->res_ctx.pipe_ctx[i].stream->adjust.v_total_min > context->res_ctx.pipe_ctx[i].stream->timing.v_total) { - optimized_min_dst_y_next_start_us = - context->res_ctx.pipe_ctx[i].dlg_regs.optimized_min_dst_y_next_start_us; - break; - } + isFreesyncVideo = current_stream->adjust.v_total_min == current_stream->adjust.v_total_max; + isFreesyncVideo = isFreesyncVideo && current_stream->timing.v_total < current_stream->adjust.v_total_min; + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (context->res_ctx.pipe_ctx[i].stream == current_stream && isFreesyncVideo) { + min_dst_y_next_start_us = context->res_ctx.pipe_ctx[i].dlg_regs.min_dst_y_next_start_us; + break; } } @@ -993,7 +992,7 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc if (stream_status->plane_count > 1) return DCN_ZSTATE_SUPPORT_DISALLOW; - if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || optimized_min_dst_y_next_start_us > 5000)) + if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || min_dst_y_next_start_us > 5000)) return DCN_ZSTATE_SUPPORT_ALLOW; else if (is_pwrseq0 && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr) return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY; @@ -1043,7 +1042,7 @@ void dcn20_calculate_dlg_params(struct dc *dc, int pipe_cnt, int vlevel) { - int i, pipe_idx; + int i, pipe_idx, active_hubp_count = 0; dc_assert_fp_enabled(); @@ -1079,6 +1078,8 @@ void dcn20_calculate_dlg_params(struct dc *dc, for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { if (!context->res_ctx.pipe_ctx[i].stream) continue; + if (context->res_ctx.pipe_ctx[i].plane_state) + active_hubp_count++; pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); @@ -1098,13 +1099,18 @@ void dcn20_calculate_dlg_params(struct dc *dc, context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000; context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest; - if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid) + if (dc->ctx->dce_version < DCN_VERSION_3_1 && + context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid) dcn20_adjust_freesync_v_startup( &context->res_ctx.pipe_ctx[i].stream->timing, &context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start); pipe_idx++; } + /* If DCN isn't making memory requests we can allow pstate change */ + if (!active_hubp_count) { + context->bw_ctx.bw.dcn.clk.p_state_change_support = true; + } /*save a original dppclock copy*/ context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz; context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz; @@ -1304,7 +1310,7 @@ int dcn20_populate_dml_pipes_from_context(struct dc *dc, pipes[pipe_cnt].dout.is_virtual = 0; pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min; pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max; - switch (get_num_odm_splits(&res_ctx->pipe_ctx[i])) { + switch (resource_get_num_odm_splits(&res_ctx->pipe_ctx[i])) { case 1: pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_2to1; break; @@ -1885,6 +1891,17 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; } + if ((int)(bb->sr_exit_z8_time_us * 1000) + != dc->bb_overrides.sr_exit_z8_time_ns + && dc->bb_overrides.sr_exit_z8_time_ns) { + bb->sr_exit_z8_time_us = dc->bb_overrides.sr_exit_z8_time_ns / 1000.0; + } + + if ((int)(bb->sr_enter_plus_exit_z8_time_us * 1000) + != dc->bb_overrides.sr_enter_plus_exit_z8_time_ns + && dc->bb_overrides.sr_enter_plus_exit_z8_time_ns) { + bb->sr_enter_plus_exit_z8_time_us = dc->bb_overrides.sr_enter_plus_exit_z8_time_ns / 1000.0; + } if ((int)(bb->urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns && dc->bb_overrides.urgent_latency_ns) { bb->urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c index 6266b0788387..7bf4bb7ad044 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c @@ -4356,12 +4356,16 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l locals->PSCL_FACTOR[k] / locals->ReturnBWPerState[i][0], locals->EffectiveLBLatencyHidingSourceLinesLuma), locals->SwathHeightYPerState[i][j][k]); - - locals->EffectiveDETLBLinesChroma = dml_floor(locals->LinesInDETChroma + dml_min( - locals->LinesInDETChroma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETC[k] * - locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i][0], - locals->EffectiveLBLatencyHidingSourceLinesChroma), - locals->SwathHeightCPerState[i][j][k]); + if (locals->LinesInDETChroma) { + locals->EffectiveDETLBLinesChroma = dml_floor(locals->LinesInDETChroma + + dml_min(locals->LinesInDETChroma * locals->RequiredDISPCLK[i][j] * + locals->BytePerPixelInDETC[k] * + locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i][0], + locals->EffectiveLBLatencyHidingSourceLinesChroma), + locals->SwathHeightCPerState[i][j][k]); + } else { + locals->EffectiveDETLBLinesChroma = 0; + } if (locals->BytePerPixelInDETC[k] == 0) { locals->UrgentLatencySupportUsPerState[i][j][k] = locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k]) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c index b7c2844d0cbe..57cf0358cc43 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c @@ -810,7 +810,7 @@ static bool CalculatePrefetchSchedule( *swath_width_chroma_ub = dml_ceil(SwathWidthY / 2 - 1, myPipe->BlockWidth256BytesC) + myPipe->BlockWidth256BytesC; } else { *swath_width_luma_ub = dml_ceil(SwathWidthY - 1, myPipe->BlockHeight256BytesY) + myPipe->BlockHeight256BytesY; - if (myPipe->BlockWidth256BytesC > 0) + if (myPipe->BlockHeight256BytesC > 0) *swath_width_chroma_ub = dml_ceil(SwathWidthY / 2 - 1, myPipe->BlockHeight256BytesC) + myPipe->BlockHeight256BytesC; } @@ -3194,7 +3194,7 @@ static void CalculateFlipSchedule( unsigned int HostVMDynamicLevels; double TimeForFetchingMetaPTEImmediateFlip; double TimeForFetchingRowInVBlankImmediateFlip; - double ImmediateFlipBW; + double ImmediateFlipBW = 1.0; double HostVMInefficiencyFactor; double VRatioClamped; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c index a352c703e258..ccb4ad78f667 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c @@ -674,10 +674,19 @@ void dcn30_fpu_update_bw_bounding_box(struct dc *dc, } /** - * Finds dummy_latency_index when MCLK switching using firmware based - * vblank stretch is enabled. This function will iterate through the - * table of dummy pstate latencies until the lowest value that allows + * dcn30_find_dummy_latency_index_for_fw_based_mclk_switch() - Finds + * dummy_latency_index when MCLK switching using firmware based vblank stretch + * is enabled. This function will iterate through the table of dummy pstate + * latencies until the lowest value that allows * dm_allow_self_refresh_and_mclk_switch to happen is found + * + * @dc: Current DC state + * @context: new dc state + * @pipes: DML pipe params + * @pipe_cnt: number of DML pipes + * @vlevel: Voltage level calculated by DML + * + * Return: lowest dummy_latency_index value */ int dcn30_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc, struct dc_state *context, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c index 7d0626e42ea6..ad741a723c0e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c @@ -784,8 +784,7 @@ static unsigned int dscComputeDelay(enum output_format_class pixelFormat, enum o Delay = Delay + 1; // sft Delay = Delay + 1; - } - else { + } else { // sfr Delay = Delay + 2; // dsccif @@ -3489,8 +3488,7 @@ static double TruncToValidBPP( if (Format == dm_n422) { MinDSCBPP = 7; MaxDSCBPP = 2 * DSCInputBitPerComponent - 1.0 / 16.0; - } - else { + } else { MinDSCBPP = 8; MaxDSCBPP = 3 * DSCInputBitPerComponent - 1.0 / 16.0; } @@ -4939,8 +4937,8 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } v->TotImmediateFlipBytes = 0.0; for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) { - v->TotImmediateFlipBytes = v->TotImmediateFlipBytes + v->NoOfDPP[i][j][k] * v->PDEAndMetaPTEBytesPerFrame[i][j][k] - + v->MetaRowBytes[i][j][k] + v->DPTEBytesPerRow[i][j][k]; + v->TotImmediateFlipBytes = v->TotImmediateFlipBytes + v->NoOfDPP[i][j][k] * (v->PDEAndMetaPTEBytesPerFrame[i][j][k] + + v->MetaRowBytes[i][j][k] + v->DPTEBytesPerRow[i][j][k]); } for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) { @@ -5130,7 +5128,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l ViewportExceedsSurface = true; if (v->SourcePixelFormat[k] != dm_444_64 && v->SourcePixelFormat[k] != dm_444_32 && v->SourcePixelFormat[k] != dm_444_16 - && v->SourcePixelFormat[k] != dm_444_8 && v->SourcePixelFormat[k] != dm_rgbe) { + && v->SourcePixelFormat[k] != dm_444_16 && v->SourcePixelFormat[k] != dm_444_8 && v->SourcePixelFormat[k] != dm_rgbe) { if (v->ViewportWidthChroma[k] > v->SurfaceWidthC[k] || v->ViewportHeightChroma[k] > v->SurfaceHeightC[k]) { ViewportExceedsSurface = true; } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c index cd3cfcb2a2b0..0497a5d74a62 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c @@ -980,7 +980,7 @@ static void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, unsigned int vstartup_start = 0; unsigned int dst_x_after_scaler = 0; - unsigned int dst_y_after_scaler = 0; + int dst_y_after_scaler = 0; double line_wait = 0; double dst_y_prefetch = 0; double dst_y_per_vm_vblank = 0; @@ -1171,6 +1171,8 @@ static void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, dst_x_after_scaler = get_dst_x_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); dst_y_after_scaler = get_dst_y_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); + if (dst_y_after_scaler < 0) + dst_y_after_scaler = 0; // do some adjustment on the dst_after scaler to account for odm combine mode dml_print("DML_DLG: %s: input dst_x_after_scaler = %d\n", diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c index 422f17aefd4a..6ce90678b33c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c @@ -333,45 +333,43 @@ void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param memcpy(s, dcn3_01_soc.clock_limits, sizeof(dcn3_01_soc.clock_limits)); /* Default clock levels are used for diags, which may lead to overclocking. */ - if (!IS_DIAG_DC(dc->ctx->dce_environment)) { - dcn3_01_ip.max_num_otg = pool->base.res_cap->num_timing_generator; - dcn3_01_ip.max_num_dpp = pool->base.pipe_count; - dcn3_01_soc.num_chans = bw_params->num_channels; - - ASSERT(clk_table->num_entries); - for (i = 0; i < clk_table->num_entries; i++) { - /* loop backwards*/ - for (closest_clk_lvl = 0, j = dcn3_01_soc.num_states - 1; j >= 0; j--) { - if ((unsigned int) dcn3_01_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) { - closest_clk_lvl = j; - break; - } + dcn3_01_ip.max_num_otg = pool->base.res_cap->num_timing_generator; + dcn3_01_ip.max_num_dpp = pool->base.pipe_count; + dcn3_01_soc.num_chans = bw_params->num_channels; + + ASSERT(clk_table->num_entries); + for (i = 0; i < clk_table->num_entries; i++) { + /* loop backwards*/ + for (closest_clk_lvl = 0, j = dcn3_01_soc.num_states - 1; j >= 0; j--) { + if ((unsigned int) dcn3_01_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) { + closest_clk_lvl = j; + break; } - - s[i].state = i; - s[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; - s[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz; - s[i].socclk_mhz = clk_table->entries[i].socclk_mhz; - s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2; - - s[i].dispclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz; - s[i].dppclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz; - s[i].dram_bw_per_chan_gbps = - dcn3_01_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps; - s[i].dscclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dscclk_mhz; - s[i].dtbclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dtbclk_mhz; - s[i].phyclk_d18_mhz = - dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz; - s[i].phyclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_mhz; } - if (clk_table->num_entries) { - dcn3_01_soc.num_states = clk_table->num_entries; - /* duplicate last level */ - s[dcn3_01_soc.num_states] = - dcn3_01_soc.clock_limits[dcn3_01_soc.num_states - 1]; - s[dcn3_01_soc.num_states].state = dcn3_01_soc.num_states; - } + s[i].state = i; + s[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; + s[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz; + s[i].socclk_mhz = clk_table->entries[i].socclk_mhz; + s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2; + + s[i].dispclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz; + s[i].dppclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz; + s[i].dram_bw_per_chan_gbps = + dcn3_01_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps; + s[i].dscclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dscclk_mhz; + s[i].dtbclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dtbclk_mhz; + s[i].phyclk_d18_mhz = + dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz; + s[i].phyclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_mhz; + } + + if (clk_table->num_entries) { + dcn3_01_soc.num_states = clk_table->num_entries; + /* duplicate last level */ + s[dcn3_01_soc.num_states] = + dcn3_01_soc.clock_limits[dcn3_01_soc.num_states - 1]; + s[dcn3_01_soc.num_states].state = dcn3_01_soc.num_states; } memcpy(dcn3_01_soc.clock_limits, s, sizeof(dcn3_01_soc.clock_limits)); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c index 59836570603a..deb6d162a2d5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c @@ -483,7 +483,7 @@ void dcn31_calculate_wm_and_dlg_fp( int pipe_cnt, int vlevel) { - int i, pipe_idx, active_hubp_count = 0; + int i, pipe_idx, total_det = 0, active_hubp_count = 0; double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; dc_assert_fp_enabled(); @@ -563,6 +563,18 @@ void dcn31_calculate_wm_and_dlg_fp( if (context->res_ctx.pipe_ctx[i].stream) context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = 0; } + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + + context->res_ctx.pipe_ctx[i].det_buffer_size_kb = + get_det_buffer_size_kbytes(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + if (context->res_ctx.pipe_ctx[i].det_buffer_size_kb > 384) + context->res_ctx.pipe_ctx[i].det_buffer_size_kb /= 2; + total_det += context->res_ctx.pipe_ctx[i].det_buffer_size_kb; + pipe_idx++; + } + context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - total_det; } void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) @@ -570,6 +582,7 @@ void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params struct _vcs_dpi_voltage_scaling_st *s = dc->scratch.update_bw_bounding_box.clock_limits; struct clk_limit_table *clk_table = &bw_params->clk_table; unsigned int i, closest_clk_lvl; + int max_dispclk_mhz = 0, max_dppclk_mhz = 0; int j; dc_assert_fp_enabled(); @@ -577,59 +590,55 @@ void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params memcpy(s, dcn3_1_soc.clock_limits, sizeof(dcn3_1_soc.clock_limits)); // Default clock levels are used for diags, which may lead to overclocking. - if (!IS_DIAG_DC(dc->ctx->dce_environment)) { - int max_dispclk_mhz = 0, max_dppclk_mhz = 0; + dcn3_1_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator; + dcn3_1_ip.max_num_dpp = dc->res_pool->pipe_count; + dcn3_1_soc.num_chans = bw_params->num_channels; - dcn3_1_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator; - dcn3_1_ip.max_num_dpp = dc->res_pool->pipe_count; - dcn3_1_soc.num_chans = bw_params->num_channels; + ASSERT(clk_table->num_entries); - ASSERT(clk_table->num_entries); + /* Prepass to find max clocks independent of voltage level. */ + for (i = 0; i < clk_table->num_entries; ++i) { + if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz) + max_dispclk_mhz = clk_table->entries[i].dispclk_mhz; + if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz) + max_dppclk_mhz = clk_table->entries[i].dppclk_mhz; + } - /* Prepass to find max clocks independent of voltage level. */ - for (i = 0; i < clk_table->num_entries; ++i) { - if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz) - max_dispclk_mhz = clk_table->entries[i].dispclk_mhz; - if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz) - max_dppclk_mhz = clk_table->entries[i].dppclk_mhz; + for (i = 0; i < clk_table->num_entries; i++) { + /* loop backwards*/ + for (closest_clk_lvl = 0, j = dcn3_1_soc.num_states - 1; j >= 0; j--) { + if ((unsigned int) dcn3_1_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) { + closest_clk_lvl = j; + break; + } } - for (i = 0; i < clk_table->num_entries; i++) { - /* loop backwards*/ - for (closest_clk_lvl = 0, j = dcn3_1_soc.num_states - 1; j >= 0; j--) { - if ((unsigned int) dcn3_1_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) { - closest_clk_lvl = j; - break; - } - } + s[i].state = i; - s[i].state = i; - - /* Clocks dependent on voltage level. */ - s[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; - s[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz; - s[i].socclk_mhz = clk_table->entries[i].socclk_mhz; - s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * - 2 * clk_table->entries[i].wck_ratio; - - /* Clocks independent of voltage level. */ - s[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz : - dcn3_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz; - - s[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz : - dcn3_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz; - - s[i].dram_bw_per_chan_gbps = - dcn3_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps; - s[i].dscclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz; - s[i].dtbclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz; - s[i].phyclk_d18_mhz = - dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz; - s[i].phyclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz; - } - if (clk_table->num_entries) { - dcn3_1_soc.num_states = clk_table->num_entries; - } + /* Clocks dependent on voltage level. */ + s[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; + s[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz; + s[i].socclk_mhz = clk_table->entries[i].socclk_mhz; + s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * + 2 * clk_table->entries[i].wck_ratio; + + /* Clocks independent of voltage level. */ + s[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz : + dcn3_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz; + + s[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz : + dcn3_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz; + + s[i].dram_bw_per_chan_gbps = + dcn3_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps; + s[i].dscclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz; + s[i].dtbclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz; + s[i].phyclk_d18_mhz = + dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz; + s[i].phyclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz; + } + if (clk_table->num_entries) { + dcn3_1_soc.num_states = clk_table->num_entries; } memcpy(dcn3_1_soc.clock_limits, s, sizeof(dcn3_1_soc.clock_limits)); @@ -643,10 +652,7 @@ void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params dcn3_1_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000; } - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) - dml_init_instance(&dc->dml, &dcn3_1_soc, &dcn3_1_ip, DML_PROJECT_DCN31); - else - dml_init_instance(&dc->dml, &dcn3_1_soc, &dcn3_1_ip, DML_PROJECT_DCN31_FPGA); + dml_init_instance(&dc->dml, &dcn3_1_soc, &dcn3_1_ip, DML_PROJECT_DCN31); } void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) @@ -707,10 +713,7 @@ void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param dcn3_15_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000; } - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) - dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN315); - else - dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN31_FPGA); + dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN315); } void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) @@ -726,71 +729,68 @@ void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param memcpy(s, dcn3_16_soc.clock_limits, sizeof(dcn3_16_soc.clock_limits)); // Default clock levels are used for diags, which may lead to overclocking. - if (!IS_DIAG_DC(dc->ctx->dce_environment)) { + dcn3_16_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator; + dcn3_16_ip.max_num_dpp = dc->res_pool->pipe_count; + dcn3_16_soc.num_chans = bw_params->num_channels; - dcn3_16_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator; - dcn3_16_ip.max_num_dpp = dc->res_pool->pipe_count; - dcn3_16_soc.num_chans = bw_params->num_channels; - - ASSERT(clk_table->num_entries); + ASSERT(clk_table->num_entries); - /* Prepass to find max clocks independent of voltage level. */ - for (i = 0; i < clk_table->num_entries; ++i) { - if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz) - max_dispclk_mhz = clk_table->entries[i].dispclk_mhz; - if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz) - max_dppclk_mhz = clk_table->entries[i].dppclk_mhz; - } + /* Prepass to find max clocks independent of voltage level. */ + for (i = 0; i < clk_table->num_entries; ++i) { + if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz) + max_dispclk_mhz = clk_table->entries[i].dispclk_mhz; + if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz) + max_dppclk_mhz = clk_table->entries[i].dppclk_mhz; + } - for (i = 0; i < clk_table->num_entries; i++) { - /* loop backwards*/ - for (closest_clk_lvl = 0, j = dcn3_16_soc.num_states - 1; j >= 0; j--) { - if ((unsigned int) dcn3_16_soc.clock_limits[j].dcfclk_mhz <= - clk_table->entries[i].dcfclk_mhz) { - closest_clk_lvl = j; - break; - } - } - // Ported from DCN315 - if (clk_table->num_entries == 1) { - /*smu gives one DPM level, let's take the highest one*/ - closest_clk_lvl = dcn3_16_soc.num_states - 1; + for (i = 0; i < clk_table->num_entries; i++) { + /* loop backwards*/ + for (closest_clk_lvl = 0, j = dcn3_16_soc.num_states - 1; j >= 0; j--) { + if ((unsigned int) dcn3_16_soc.clock_limits[j].dcfclk_mhz <= + clk_table->entries[i].dcfclk_mhz) { + closest_clk_lvl = j; + break; } + } + // Ported from DCN315 + if (clk_table->num_entries == 1) { + /*smu gives one DPM level, let's take the highest one*/ + closest_clk_lvl = dcn3_16_soc.num_states - 1; + } - s[i].state = i; + s[i].state = i; - /* Clocks dependent on voltage level. */ - s[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; - if (clk_table->num_entries == 1 && - s[i].dcfclk_mhz < - dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) { - /*SMU fix not released yet*/ - s[i].dcfclk_mhz = - dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz; - } - s[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz; - s[i].socclk_mhz = clk_table->entries[i].socclk_mhz; - s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * - 2 * clk_table->entries[i].wck_ratio; - - /* Clocks independent of voltage level. */ - s[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz : - dcn3_16_soc.clock_limits[closest_clk_lvl].dispclk_mhz; - - s[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz : - dcn3_16_soc.clock_limits[closest_clk_lvl].dppclk_mhz; - - s[i].dram_bw_per_chan_gbps = - dcn3_16_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps; - s[i].dscclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dscclk_mhz; - s[i].dtbclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dtbclk_mhz; - s[i].phyclk_d18_mhz = - dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz; - s[i].phyclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_mhz; - } - if (clk_table->num_entries) { - dcn3_16_soc.num_states = clk_table->num_entries; + /* Clocks dependent on voltage level. */ + s[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; + if (clk_table->num_entries == 1 && + s[i].dcfclk_mhz < + dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) { + /*SMU fix not released yet*/ + s[i].dcfclk_mhz = + dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz; } + s[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz; + s[i].socclk_mhz = clk_table->entries[i].socclk_mhz; + s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * + 2 * clk_table->entries[i].wck_ratio; + + /* Clocks independent of voltage level. */ + s[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz : + dcn3_16_soc.clock_limits[closest_clk_lvl].dispclk_mhz; + + s[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz : + dcn3_16_soc.clock_limits[closest_clk_lvl].dppclk_mhz; + + s[i].dram_bw_per_chan_gbps = + dcn3_16_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps; + s[i].dscclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dscclk_mhz; + s[i].dtbclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dtbclk_mhz; + s[i].phyclk_d18_mhz = + dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz; + s[i].phyclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_mhz; + } + if (clk_table->num_entries) { + dcn3_16_soc.num_states = clk_table->num_entries; } memcpy(dcn3_16_soc.clock_limits, s, sizeof(dcn3_16_soc.clock_limits)); @@ -805,13 +805,21 @@ void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param dcn3_16_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000; } - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) - dml_init_instance(&dc->dml, &dcn3_16_soc, &dcn3_16_ip, DML_PROJECT_DCN31); - else - dml_init_instance(&dc->dml, &dcn3_16_soc, &dcn3_16_ip, DML_PROJECT_DCN31_FPGA); + dml_init_instance(&dc->dml, &dcn3_16_soc, &dcn3_16_ip, DML_PROJECT_DCN31); } int dcn_get_max_non_odm_pix_rate_100hz(struct _vcs_dpi_soc_bounding_box_st *soc) { return soc->clock_limits[0].dispclk_mhz * 10000.0 / (1.0 + soc->dcn_downspread_percent / 100.0); } + +int dcn_get_approx_det_segs_required_for_pstate( + struct _vcs_dpi_soc_bounding_box_st *soc, + int pix_clk_100hz, int bpp, int seg_size_kb) +{ + /* Roughly calculate required crb to hide latency. In practice there is slightly + * more buffer available for latency hiding + */ + return (int)(soc->dram_clock_change_latency_us * pix_clk_100hz * bpp + / 10240000 + seg_size_kb - 1) / seg_size_kb; +} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h index 687d3522cc33..8f9c8faed260 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h @@ -47,6 +47,9 @@ void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params); void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params); int dcn_get_max_non_odm_pix_rate_100hz(struct _vcs_dpi_soc_bounding_box_st *soc); +int dcn_get_approx_det_segs_required_for_pstate( + struct _vcs_dpi_soc_bounding_box_st *soc, + int pix_clk_100hz, int bpp, int seg_size_kb); int dcn31x_populate_dml_pipes_from_context(struct dc *dc, struct dc_state *context, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c index bd674dc30df3..adea459e7d36 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c @@ -532,7 +532,8 @@ static void CalculateStutterEfficiency( static void CalculateSwathAndDETConfiguration( bool ForceSingleDPP, int NumberOfActivePlanes, - unsigned int DETBufferSizeInKByte, + bool DETSharedByAllDPP, + unsigned int DETBufferSizeInKByte[], double MaximumSwathWidthLuma[], double MaximumSwathWidthChroma[], enum scan_direction_class SourceScan[], @@ -3118,7 +3119,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman v->SurfaceWidthC[k], v->SurfaceHeightY[k], v->SurfaceHeightC[k], - v->DETBufferSizeInKByte[0] * 1024, + v->DETBufferSizeInKByte[k] * 1024, v->BlockHeight256BytesY[k], v->BlockHeight256BytesC[k], v->SurfaceTiling[k], @@ -3313,7 +3314,8 @@ static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib) CalculateSwathAndDETConfiguration( false, v->NumberOfActivePlanes, - v->DETBufferSizeInKByte[0], + mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[0], + v->DETBufferSizeInKByte, dummy1, dummy2, v->SourceScan, @@ -3503,7 +3505,7 @@ static void CalculateFlipSchedule( unsigned int HostVMDynamicLevelsTrips; double TimeForFetchingMetaPTEImmediateFlip; double TimeForFetchingRowInVBlankImmediateFlip; - double ImmediateFlipBW; + double ImmediateFlipBW = 1.0; double LineTime = v->HTotal[k] / v->PixelClock[k]; if (v->GPUVMEnable == true && v->HostVMEnable == true) { @@ -3779,14 +3781,16 @@ static noinline void CalculatePrefetchSchedulePerPlane( &v->VReadyOffsetPix[k]); } -static void PatchDETBufferSizeInKByte(unsigned int NumberOfActivePlanes, int NoOfDPPThisState[], unsigned int config_return_buffer_size_in_kbytes, unsigned int *DETBufferSizeInKByte) +static void PatchDETBufferSizeInKByte(unsigned int NumberOfActivePlanes, int NoOfDPPThisState[], unsigned int config_return_buffer_size_in_kbytes, unsigned int DETBufferSizeInKByte[]) { int i, total_pipes = 0; for (i = 0; i < NumberOfActivePlanes; i++) total_pipes += NoOfDPPThisState[i]; - *DETBufferSizeInKByte = ((config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB) / 64 / total_pipes) * 64; - if (*DETBufferSizeInKByte > DCN3_15_MAX_DET_SIZE) - *DETBufferSizeInKByte = DCN3_15_MAX_DET_SIZE; + DETBufferSizeInKByte[0] = ((config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB) / 64 / total_pipes) * 64; + if (DETBufferSizeInKByte[0] > DCN3_15_MAX_DET_SIZE) + DETBufferSizeInKByte[0] = DCN3_15_MAX_DET_SIZE; + for (i = 1; i < NumberOfActivePlanes; i++) + DETBufferSizeInKByte[i] = DETBufferSizeInKByte[0]; } @@ -4026,7 +4030,8 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l CalculateSwathAndDETConfiguration( true, v->NumberOfActivePlanes, - v->DETBufferSizeInKByte[0], + mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[0], + v->DETBufferSizeInKByte, v->MaximumSwathWidthLuma, v->MaximumSwathWidthChroma, v->SourceScan, @@ -4130,7 +4135,9 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } if (v->OutputFormat[k] == dm_420 && v->HActive[k] > DCN31_MAX_FMT_420_BUFFER_WIDTH && v->ODMCombineEnablePerState[i][k] != dm_odm_combine_mode_4to1) { - if (v->HActive[k] / 2 > DCN31_MAX_FMT_420_BUFFER_WIDTH) { + if (v->Output[k] == dm_hdmi) { + FMTBufferExceeded = true; + } else if (v->HActive[k] / 2 > DCN31_MAX_FMT_420_BUFFER_WIDTH) { v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_4to1; v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine4To1; @@ -4166,6 +4173,10 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l || (v->PlaneRequiredDISPCLK > v->MaxDispclkRoundedDownToDFSGranularity)) { v->DISPCLK_DPPCLK_Support[i][j] = false; } + if (mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[k] > DCN3_15_MAX_DET_SIZE && v->NoOfDPP[i][j][k] < 2) { + v->MPCCombine[i][j][k] = true; + v->NoOfDPP[i][j][k] = 2; + } } v->TotalNumberOfActiveDPP[i][j] = 0; v->TotalNumberOfSingleDPPPlanes[i][j] = 0; @@ -4642,12 +4653,13 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l v->ODMCombineEnableThisState[k] = v->ODMCombineEnablePerState[i][k]; } - if (v->NumberOfActivePlanes > 1 && mode_lib->project == DML_PROJECT_DCN315) - PatchDETBufferSizeInKByte(v->NumberOfActivePlanes, v->NoOfDPPThisState, v->ip.config_return_buffer_size_in_kbytes, &v->DETBufferSizeInKByte[0]); + if (v->NumberOfActivePlanes > 1 && mode_lib->project == DML_PROJECT_DCN315 && !v->DETSizeOverride[0]) + PatchDETBufferSizeInKByte(v->NumberOfActivePlanes, v->NoOfDPPThisState, v->ip.config_return_buffer_size_in_kbytes, v->DETBufferSizeInKByte); CalculateSwathAndDETConfiguration( false, v->NumberOfActivePlanes, - v->DETBufferSizeInKByte[0], + mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[0], + v->DETBufferSizeInKByte, v->MaximumSwathWidthLuma, v->MaximumSwathWidthChroma, v->SourceScan, @@ -5274,8 +5286,8 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l v->TotImmediateFlipBytes = 0.0; for (k = 0; k < v->NumberOfActivePlanes; k++) { v->TotImmediateFlipBytes = v->TotImmediateFlipBytes - + v->NoOfDPP[i][j][k] * v->PDEAndMetaPTEBytesPerFrame[i][j][k] + v->MetaRowBytes[i][j][k] - + v->DPTEBytesPerRow[i][j][k]; + + v->NoOfDPP[i][j][k] * (v->PDEAndMetaPTEBytesPerFrame[i][j][k] + v->MetaRowBytes[i][j][k] + + v->DPTEBytesPerRow[i][j][k]); } for (k = 0; k < v->NumberOfActivePlanes; k++) { @@ -6611,7 +6623,8 @@ static void CalculateStutterEfficiency( static void CalculateSwathAndDETConfiguration( bool ForceSingleDPP, int NumberOfActivePlanes, - unsigned int DETBufferSizeInKByte, + bool DETSharedByAllDPP, + unsigned int DETBufferSizeInKByteA[], double MaximumSwathWidthLuma[], double MaximumSwathWidthChroma[], enum scan_direction_class SourceScan[], @@ -6695,6 +6708,10 @@ static void CalculateSwathAndDETConfiguration( *ViewportSizeSupport = true; for (k = 0; k < NumberOfActivePlanes; ++k) { + unsigned int DETBufferSizeInKByte = DETBufferSizeInKByteA[k]; + + if (DETSharedByAllDPP && DPPPerPlane[k]) + DETBufferSizeInKByte /= DPPPerPlane[k]; if ((SourcePixelFormat[k] == dm_444_64 || SourcePixelFormat[k] == dm_444_32 || SourcePixelFormat[k] == dm_444_16 || SourcePixelFormat[k] == dm_mono_16 || SourcePixelFormat[k] == dm_mono_8 || SourcePixelFormat[k] == dm_rgbe)) { if (SurfaceTiling[k] == dm_sw_linear @@ -7017,7 +7034,7 @@ static double CalculateUrgentLatency( return ret; } -static void UseMinimumDCFCLK( +static noinline_for_stack void UseMinimumDCFCLK( struct display_mode_lib *mode_lib, int MaxPrefetchMode, int ReorderingBytes) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c index 2244e4fb8c96..4113ce79c4af 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c @@ -987,8 +987,7 @@ static void dml_rq_dlg_get_dlg_params( dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start; disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start) * dml_pow(2, 2)); - disp_dlg_regs->optimized_min_dst_y_next_start_us = 0; - disp_dlg_regs->optimized_min_dst_y_next_start = disp_dlg_regs->min_dst_y_next_start; + disp_dlg_regs->min_dst_y_next_start_us = 0; ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)dml_pow(2, 18)); dml_print("DML_DLG: %s: min_ttu_vblank (us) = %3.2f\n", __func__, min_ttu_vblank); @@ -1433,14 +1432,6 @@ static void dml_rq_dlg_get_dlg_params( dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_flip); dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_flip); - // hack for FPGA - if (mode_lib->project == DML_PROJECT_DCN31_FPGA) { - if (disp_dlg_regs->vratio_prefetch >= (unsigned int) dml_pow(2, 22)) { - disp_dlg_regs->vratio_prefetch = (unsigned int) dml_pow(2, 22) - 1; - dml_print("vratio_prefetch exceed the max value, the register field is [21:0]\n"); - } - } - disp_dlg_regs->refcyc_per_pte_group_vblank_l = (unsigned int) (dst_y_per_row_vblank * (double) htotal * ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_l); ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)dml_pow(2, 13)); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c index 9e54e3d0eb78..fb21572750e8 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c @@ -31,6 +31,7 @@ #include "dml/dcn20/dcn20_fpu.h" #include "dml/dcn31/dcn31_fpu.h" #include "dml/display_mode_vba.h" +#include "dml/dml_inline_defs.h" struct _vcs_dpi_ip_params_st dcn3_14_ip = { .VBlankNomDefaultUS = 668, @@ -190,8 +191,7 @@ void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p dc_assert_fp_enabled(); // Default clock levels are used for diags, which may lead to overclocking. - if (!IS_DIAG_DC(dc->ctx->dce_environment) && dc->config.use_default_clock_table == false) { - + if (dc->config.use_default_clock_table == false) { dcn3_14_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator; dcn3_14_ip.max_num_dpp = dc->res_pool->pipe_count; @@ -266,11 +266,7 @@ void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p } dcn20_patch_bounding_box(dc, &dcn3_14_soc); - - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) - dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN314); - else - dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31_FPGA); + dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN314); } static bool is_dual_plane(enum surface_pixel_format format) @@ -278,6 +274,36 @@ static bool is_dual_plane(enum surface_pixel_format format) return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA; } +/* + * micro_sec_to_vert_lines () - converts time to number of vertical lines for a given timing + * + * @param: num_us: number of microseconds + * @return: number of vertical lines. If exact number of vertical lines is not found then + * it will round up to next number of lines to guarantee num_us + */ +static unsigned int micro_sec_to_vert_lines(unsigned int num_us, struct dc_crtc_timing *timing) +{ + unsigned int num_lines = 0; + unsigned int lines_time_in_ns = 1000.0 * + (((float)timing->h_total * 1000.0) / + ((float)timing->pix_clk_100hz / 10.0)); + + num_lines = dml_ceil(1000.0 * num_us / lines_time_in_ns, 1.0); + + return num_lines; +} + +static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing) +{ + unsigned int v_active = 0, v_blank = 0, v_back_porch = 0; + + v_active = timing->v_border_top + timing->v_addressable + timing->v_border_bottom; + v_blank = timing->v_total - v_active; + v_back_porch = v_blank - timing->v_front_porch - timing->v_sync_width; + + return v_back_porch; +} + int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, bool fast_validate) @@ -286,6 +312,7 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c struct resource_context *res_ctx = &context->res_ctx; struct pipe_ctx *pipe; bool upscaled = false; + const unsigned int max_allowed_vblank_nom = 1023; dc_assert_fp_enabled(); @@ -293,15 +320,32 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { struct dc_crtc_timing *timing; + unsigned int num_lines = 0; + unsigned int v_back_porch = 0; if (!res_ctx->pipe_ctx[i].stream) continue; pipe = &res_ctx->pipe_ctx[i]; timing = &pipe->stream->timing; - if (dc_extended_blank_supported(dc) && pipe->stream->adjust.v_total_max == pipe->stream->adjust.v_total_min - && pipe->stream->adjust.v_total_min > timing->v_total) + num_lines = micro_sec_to_vert_lines(dcn3_14_ip.VBlankNomDefaultUS, timing); + + if (pipe->stream->adjust.v_total_min != 0) pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min; + else + pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total; + + v_back_porch = get_vertical_back_porch(timing); + + pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive; + pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, num_lines); + // vblank_nom should not smaller than (VSync (timing->v_sync_width + v_back_porch) + 2) + // + 2 is because + // 1 -> VStartup_start should be 1 line before VSync + // 1 -> always reserve 1 line between start of vblank to vstartup signal + pipes[pipe_cnt].pipe.dest.vblank_nom = + max(pipes[pipe_cnt].pipe.dest.vblank_nom, timing->v_sync_width + v_back_porch + 2); + pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, max_allowed_vblank_nom); if (pipe->plane_state && (pipe->plane_state->src_rect.height < pipe->plane_state->dst_rect.height || @@ -323,8 +367,6 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0; pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0; pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch; - pipes[pipe_cnt].pipe.dest.vblank_nom = - dcn3_14_ip.VBlankNomDefaultUS / (timing->h_total / (timing->pix_clk_100hz / 10000.0)); pipes[pipe_cnt].pipe.src.dcc_rate = 3; pipes[pipe_cnt].dout.dsc_input_bpc = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c index 7eb2173b7691..a94aa0f21a7f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c @@ -3613,7 +3613,7 @@ static void CalculateFlipSchedule( unsigned int HostVMDynamicLevelsTrips; double TimeForFetchingMetaPTEImmediateFlip; double TimeForFetchingRowInVBlankImmediateFlip; - double ImmediateFlipBW; + double ImmediateFlipBW = 1.0; double LineTime = v->HTotal[k] / v->PixelClock[k]; if (v->GPUVMEnable == true && v->HostVMEnable == true) { @@ -4227,7 +4227,9 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_ } if (v->OutputFormat[k] == dm_420 && v->HActive[k] > DCN314_MAX_FMT_420_BUFFER_WIDTH && v->ODMCombineEnablePerState[i][k] != dm_odm_combine_mode_4to1) { - if (v->HActive[k] / 2 > DCN314_MAX_FMT_420_BUFFER_WIDTH) { + if (v->Output[k] == dm_hdmi) { + FMTBufferExceeded = true; + } else if (v->HActive[k] / 2 > DCN314_MAX_FMT_420_BUFFER_WIDTH) { v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_4to1; v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine4To1; @@ -5371,8 +5373,8 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_ v->TotImmediateFlipBytes = 0.0; for (k = 0; k < v->NumberOfActivePlanes; k++) { v->TotImmediateFlipBytes = v->TotImmediateFlipBytes - + v->NoOfDPP[i][j][k] * v->PDEAndMetaPTEBytesPerFrame[i][j][k] + v->MetaRowBytes[i][j][k] - + v->DPTEBytesPerRow[i][j][k]; + + v->NoOfDPP[i][j][k] * (v->PDEAndMetaPTEBytesPerFrame[i][j][k] + v->MetaRowBytes[i][j][k] + + v->DPTEBytesPerRow[i][j][k]); } for (k = 0; k < v->NumberOfActivePlanes; k++) { @@ -5557,6 +5559,65 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_ } } } + for (i = v->soc.num_states; i >= 0; i--) { + for (j = 0; j < 2; j++) { + enum dm_validation_status status = DML_VALIDATION_OK; + + if (!v->ScaleRatioAndTapsSupport) { + status = DML_FAIL_SCALE_RATIO_TAP; + } else if (!v->SourceFormatPixelAndScanSupport) { + status = DML_FAIL_SOURCE_PIXEL_FORMAT; + } else if (!v->ViewportSizeSupport[i][j]) { + status = DML_FAIL_VIEWPORT_SIZE; + } else if (P2IWith420) { + status = DML_FAIL_P2I_WITH_420; + } else if (DSCOnlyIfNecessaryWithBPP) { + status = DML_FAIL_DSC_ONLY_IF_NECESSARY_WITH_BPP; + } else if (DSC422NativeNotSupported) { + status = DML_FAIL_NOT_DSC422_NATIVE; + } else if (!v->ODMCombine4To1SupportCheckOK[i]) { + status = DML_FAIL_ODM_COMBINE4TO1; + } else if (v->NotEnoughDSCUnits[i]) { + status = DML_FAIL_NOT_ENOUGH_DSC; + } else if (!v->ROBSupport[i][j]) { + status = DML_FAIL_REORDERING_BUFFER; + } else if (!v->DISPCLK_DPPCLK_Support[i][j]) { + status = DML_FAIL_DISPCLK_DPPCLK; + } else if (!v->TotalAvailablePipesSupport[i][j]) { + status = DML_FAIL_TOTAL_AVAILABLE_PIPES; + } else if (!EnoughWritebackUnits) { + status = DML_FAIL_ENOUGH_WRITEBACK_UNITS; + } else if (!v->WritebackLatencySupport) { + status = DML_FAIL_WRITEBACK_LATENCY; + } else if (!v->WritebackScaleRatioAndTapsSupport) { + status = DML_FAIL_WRITEBACK_SCALE_RATIO_TAP; + } else if (!v->CursorSupport) { + status = DML_FAIL_CURSOR_SUPPORT; + } else if (!v->PitchSupport) { + status = DML_FAIL_PITCH_SUPPORT; + } else if (ViewportExceedsSurface) { + status = DML_FAIL_VIEWPORT_EXCEEDS_SURFACE; + } else if (!v->PrefetchSupported[i][j]) { + status = DML_FAIL_PREFETCH_SUPPORT; + } else if (!v->DynamicMetadataSupported[i][j]) { + status = DML_FAIL_DYNAMIC_METADATA; + } else if (!v->TotalVerticalActiveBandwidthSupport[i][j]) { + status = DML_FAIL_TOTAL_V_ACTIVE_BW; + } else if (!v->VRatioInPrefetchSupported[i][j]) { + status = DML_FAIL_V_RATIO_PREFETCH; + } else if (!v->PTEBufferSizeNotExceeded[i][j]) { + status = DML_FAIL_PTE_BUFFER_SIZE; + } else if (v->NonsupportedDSCInputBPC) { + status = DML_FAIL_DSC_INPUT_BPC; + } else if ((v->HostVMEnable + && !v->ImmediateFlipSupportedForState[i][j])) { + status = DML_FAIL_HOST_VM_IMMEDIATE_FLIP; + } else if (FMTBufferExceeded) { + status = DML_FAIL_FMT_BUFFER_EXCEEDED; + } + mode_lib->vba.ValidationStatus[i] = status; + } + } { unsigned int MaximumMPCCombine = 0; @@ -7061,7 +7122,7 @@ static double CalculateUrgentLatency( return ret; } -static void UseMinimumDCFCLK( +static noinline_for_stack void UseMinimumDCFCLK( struct display_mode_lib *mode_lib, int MaxPrefetchMode, int ReorderingBytes) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c index ea4eb66066c4..b3e8dc08030c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c @@ -951,7 +951,6 @@ static void dml_rq_dlg_get_dlg_params( { const display_pipe_source_params_st *src = &e2e_pipe_param[pipe_idx].pipe.src; const display_pipe_dest_params_st *dst = &e2e_pipe_param[pipe_idx].pipe.dest; - const display_output_params_st *dout = &e2e_pipe_param[pipe_idx].dout; const display_clocks_and_cfg_st *clks = &e2e_pipe_param[pipe_idx].clks_cfg; const scaler_ratio_depth_st *scl = &e2e_pipe_param[pipe_idx].pipe.scale_ratio_depth; const scaler_taps_st *taps = &e2e_pipe_param[pipe_idx].pipe.scale_taps; @@ -1000,8 +999,6 @@ static void dml_rq_dlg_get_dlg_params( unsigned int vupdate_width; unsigned int vready_offset; - unsigned int dispclk_delay_subtotal; - unsigned int vstartup_start; unsigned int dst_x_after_scaler; unsigned int dst_y_after_scaler; @@ -1051,7 +1048,6 @@ static void dml_rq_dlg_get_dlg_params( float vba__refcyc_per_req_delivery_pre_l = get_refcyc_per_req_delivery_pre_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA float vba__refcyc_per_req_delivery_l = get_refcyc_per_req_delivery_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA - int blank_lines = 0; memset(disp_dlg_regs, 0, sizeof(*disp_dlg_regs)); memset(disp_ttu_regs, 0, sizeof(*disp_ttu_regs)); @@ -1075,17 +1071,10 @@ static void dml_rq_dlg_get_dlg_params( min_ttu_vblank = get_min_ttu_vblank_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start; - disp_dlg_regs->optimized_min_dst_y_next_start = disp_dlg_regs->min_dst_y_next_start; - disp_dlg_regs->optimized_min_dst_y_next_start_us = 0; - disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start) * dml_pow(2, 2)); - blank_lines = (dst->vblank_end + dst->vtotal_min - dst->vblank_start - dst->vstartup_start - 1); - if (blank_lines < 0) - blank_lines = 0; - if (blank_lines != 0) { - disp_dlg_regs->optimized_min_dst_y_next_start = vba__min_dst_y_next_start; - disp_dlg_regs->optimized_min_dst_y_next_start_us = (disp_dlg_regs->optimized_min_dst_y_next_start * dst->hactive) / (unsigned int) dst->pixel_rate_mhz; - disp_dlg_regs->min_dst_y_next_start = disp_dlg_regs->optimized_min_dst_y_next_start; - } + disp_dlg_regs->min_dst_y_next_start_us = + (vba__min_dst_y_next_start * dst->hactive) / (unsigned int) dst->pixel_rate_mhz; + disp_dlg_regs->min_dst_y_next_start = vba__min_dst_y_next_start * dml_pow(2, 2); + ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)dml_pow(2, 18)); dml_print("DML_DLG: %s: min_ttu_vblank (us) = %3.2f\n", __func__, min_ttu_vblank); @@ -1127,13 +1116,6 @@ static void dml_rq_dlg_get_dlg_params( vupdate_offset = dst->vupdate_offset; vupdate_width = dst->vupdate_width; vready_offset = dst->vready_offset; - dispclk_delay_subtotal = mode_lib->ip.dispclk_delay_subtotal; - - if (dout->dsc_enable) { - double dsc_delay = get_dsc_delay(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // FROM VBA - - dispclk_delay_subtotal += dsc_delay; - } vstartup_start = dst->vstartup_start; if (interlaced) { @@ -1538,14 +1520,6 @@ static void dml_rq_dlg_get_dlg_params( dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_flip); dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_flip); - // hack for FPGA - if (mode_lib->project == DML_PROJECT_DCN31_FPGA) { - if (disp_dlg_regs->vratio_prefetch >= (unsigned int) dml_pow(2, 22)) { - disp_dlg_regs->vratio_prefetch = (unsigned int) dml_pow(2, 22) - 1; - dml_print("vratio_prefetch exceed the max value, the register field is [21:0]\n"); - } - } - disp_dlg_regs->refcyc_per_pte_group_vblank_l = (unsigned int) (dst_y_per_row_vblank * (double) htotal * ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_l); ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)dml_pow(2, 13)); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index 0c4c3208def1..711d4085b33b 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -35,6 +35,15 @@ #define DC_LOGGER_INIT(logger) +static const struct subvp_high_refresh_list subvp_high_refresh_list = { + .min_refresh = 120, + .max_refresh = 175, + .res = { + {.width = 3840, .height = 2160, }, + {.width = 3440, .height = 1440, }, + {.width = 2560, .height = 1440, }}, +}; + struct _vcs_dpi_ip_params_st dcn3_2_ip = { .gpuvm_enable = 0, .gpuvm_max_page_table_levels = 4, @@ -476,24 +485,20 @@ static void get_optimal_ntuple(struct _vcs_dpi_voltage_scaling_st *entry) } } -void insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table, +static void insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries, struct _vcs_dpi_voltage_scaling_st *entry) { int i = 0; int index = 0; - float net_bw_of_new_state = 0; dc_assert_fp_enabled(); - get_optimal_ntuple(entry); - if (*num_entries == 0) { table[0] = *entry; (*num_entries)++; } else { - net_bw_of_new_state = calculate_net_bw_in_kbytes_sec(entry); - while (net_bw_of_new_state > calculate_net_bw_in_kbytes_sec(&table[index])) { + while (entry->net_bw_in_kbytes_sec > table[index].net_bw_in_kbytes_sec) { index++; if (index >= *num_entries) break; @@ -670,7 +675,6 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc, unsigned int max_frame_time = 0; bool valid_assignment_found = false; unsigned int free_pipes = dcn32_get_num_free_pipes(dc, context); - bool current_assignment_freesync = false; struct vba_vars_st *vba = &context->bw_ctx.dml.vba; for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { @@ -692,8 +696,12 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc, * to combine this with SubVP can cause issues with the scheduling). * - Not TMZ surface */ - if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) && !dcn32_is_psr_capable(pipe) && - pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120 && !pipe->plane_state->address.tmz_surface && + if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) && + !(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) && + (!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) && + pipe->stream->mall_stream_config.type == SUBVP_NONE && + (refresh_rate < 120 || dcn32_allow_subvp_high_refresh_rate(dc, context, pipe)) && + !pipe->plane_state->address.tmz_surface && (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0 || (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 && dcn32_allow_subvp_with_active_margin(pipe)))) { @@ -707,19 +715,10 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc, struct dc_stream_state *stream = pipe->stream; unsigned int frame_us = (stream->timing.v_total * stream->timing.h_total / (double)(stream->timing.pix_clk_100hz * 100)) * 1000000; - if (frame_us > max_frame_time && !stream->ignore_msa_timing_param) { + if (frame_us > max_frame_time) { *index = i; max_frame_time = frame_us; valid_assignment_found = true; - current_assignment_freesync = false; - /* For the 2-Freesync display case, still choose the one with the - * longest frame time - */ - } else if (stream->ignore_msa_timing_param && (!valid_assignment_found || - (current_assignment_freesync && frame_us > max_frame_time))) { - *index = i; - valid_assignment_found = true; - current_assignment_freesync = true; } } } @@ -757,7 +756,7 @@ static bool dcn32_enough_pipes_for_subvp(struct dc *dc, struct dc_state *context struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; // Find the minimum pipe split count for non SubVP pipes - if (pipe->stream && !pipe->top_pipe && + if (resource_is_pipe_type(pipe, OPP_HEAD) && pipe->stream->mall_stream_config.type == SUBVP_NONE) { split_cnt = 0; while (pipe) { @@ -851,10 +850,9 @@ static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context) } /** - * subvp_drr_schedulable - Determine if SubVP + DRR config is schedulable + * subvp_drr_schedulable() - Determine if SubVP + DRR config is schedulable * @dc: current dc state * @context: new dc state - * @drr_pipe: DRR pipe_ctx for the SubVP + DRR config * * High level algorithm: * 1. Get timing for SubVP pipe, phantom pipe, and DRR pipe @@ -865,11 +863,12 @@ static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context) * * Return: True if the SubVP + DRR config is schedulable, false otherwise */ -static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struct pipe_ctx *drr_pipe) +static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context) { bool schedulable = false; uint32_t i; struct pipe_ctx *pipe = NULL; + struct pipe_ctx *drr_pipe = NULL; struct dc_crtc_timing *main_timing = NULL; struct dc_crtc_timing *phantom_timing = NULL; struct dc_crtc_timing *drr_timing = NULL; @@ -880,10 +879,6 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struc int16_t stretched_drr_us = 0; int16_t drr_stretched_vblank_us = 0; int16_t max_vblank_mallregion = 0; - const struct dc_config *config = &dc->config; - - if (config->disable_subvp_drr) - return false; // Find SubVP pipe for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -891,7 +886,8 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struc // We check for master pipe, but it shouldn't matter since we only need // the pipe for timing info (stream should be same for any pipe splits) - if (!pipe->stream || !pipe->plane_state || pipe->top_pipe || pipe->prev_odm_pipe) + if (!resource_is_pipe_type(pipe, OTG_MASTER) || + !resource_is_pipe_type(pipe, DPP_PIPE)) continue; // Find the SubVP pipe @@ -899,6 +895,20 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struc break; } + // Find the DRR pipe + for (i = 0; i < dc->res_pool->pipe_count; i++) { + drr_pipe = &context->res_ctx.pipe_ctx[i]; + + // We check for master pipe only + if (!resource_is_pipe_type(pipe, OTG_MASTER) || + !resource_is_pipe_type(pipe, DPP_PIPE)) + continue; + + if (drr_pipe->stream->mall_stream_config.type == SUBVP_NONE && drr_pipe->stream->ignore_msa_timing_param && + (drr_pipe->stream->allow_freesync || drr_pipe->stream->vrr_active_variable)) + break; + } + main_timing = &pipe->stream->timing; phantom_timing = &pipe->stream->mall_stream_config.paired_stream->timing; drr_timing = &drr_pipe->stream->timing; @@ -972,7 +982,8 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context) // We check for master pipe, but it shouldn't matter since we only need // the pipe for timing info (stream should be same for any pipe splits) - if (!pipe->stream || !pipe->plane_state || pipe->top_pipe || pipe->prev_odm_pipe) + if (!resource_is_pipe_type(pipe, OTG_MASTER) || + !resource_is_pipe_type(pipe, DPP_PIPE)) continue; if (!found && pipe->stream->mall_stream_config.type == SUBVP_NONE) { @@ -984,13 +995,7 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context) if (!subvp_pipe && pipe->stream->mall_stream_config.type == SUBVP_MAIN) subvp_pipe = pipe; } - // Use ignore_msa_timing_param and VRR active, or Freesync flag to identify as DRR On - if (found && context->res_ctx.pipe_ctx[vblank_index].stream->ignore_msa_timing_param && - (context->res_ctx.pipe_ctx[vblank_index].stream->allow_freesync || - context->res_ctx.pipe_ctx[vblank_index].stream->vrr_active_variable)) { - // SUBVP + DRR case -- only allowed if run through DRR validation path - schedulable = false; - } else if (found) { + if (found) { main_timing = &subvp_pipe->stream->timing; phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing; vblank_timing = &context->res_ctx.pipe_ctx[vblank_index].stream->timing; @@ -1020,6 +1025,56 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context) } /** + * subvp_subvp_admissable() - Determine if subvp + subvp config is admissible + * + * @dc: Current DC state + * @context: New DC state to be programmed + * + * SubVP + SubVP is admissible under the following conditions: + * - All SubVP pipes are < 120Hz OR + * - All SubVP pipes are >= 120hz + * + * Return: True if admissible, false otherwise + */ +static bool subvp_subvp_admissable(struct dc *dc, + struct dc_state *context) +{ + bool result = false; + uint32_t i; + uint8_t subvp_count = 0; + uint32_t min_refresh = subvp_high_refresh_list.min_refresh, max_refresh = 0; + uint64_t refresh_rate = 0; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (!pipe->stream) + continue; + + if (pipe->plane_state && !pipe->top_pipe && + pipe->stream->mall_stream_config.type == SUBVP_MAIN) { + refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 + + pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1); + refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total); + refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total); + + if ((uint32_t)refresh_rate < min_refresh) + min_refresh = (uint32_t)refresh_rate; + if ((uint32_t)refresh_rate > max_refresh) + max_refresh = (uint32_t)refresh_rate; + subvp_count++; + } + } + + if (subvp_count == 2 && ((min_refresh < 120 && max_refresh < 120) || + (min_refresh >= subvp_high_refresh_list.min_refresh && + max_refresh <= subvp_high_refresh_list.max_refresh))) + result = true; + + return result; +} + +/** * subvp_validate_static_schedulability - Check which SubVP case is calculated * and handle static analysis based on the case. * @dc: current dc state @@ -1037,11 +1092,12 @@ static bool subvp_validate_static_schedulability(struct dc *dc, struct dc_state *context, int vlevel) { - bool schedulable = true; // true by default for single display case + bool schedulable = false; struct vba_vars_st *vba = &context->bw_ctx.dml.vba; uint32_t i, pipe_idx; uint8_t subvp_count = 0; uint8_t vactive_count = 0; + uint8_t non_subvp_pipes = 0; for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; @@ -1049,14 +1105,18 @@ static bool subvp_validate_static_schedulability(struct dc *dc, if (!pipe->stream) continue; - if (pipe->plane_state && !pipe->top_pipe && - pipe->stream->mall_stream_config.type == SUBVP_MAIN) - subvp_count++; + if (pipe->plane_state && !pipe->top_pipe) { + if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) + subvp_count++; + if (pipe->stream->mall_stream_config.type == SUBVP_NONE) { + non_subvp_pipes++; + } + } // Count how many planes that aren't SubVP/phantom are capable of VACTIVE // switching (SubVP + VACTIVE unsupported). In situations where we force // SubVP for a VACTIVE plane, we don't want to increment the vactive_count. - if (vba->ActiveDRAMClockChangeLatencyMargin[vba->pipe_plane[pipe_idx]] > 0 && + if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vlevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 && pipe->stream->mall_stream_config.type == SUBVP_NONE) { vactive_count++; } @@ -1065,13 +1125,14 @@ static bool subvp_validate_static_schedulability(struct dc *dc, if (subvp_count == 2) { // Static schedulability check for SubVP + SubVP case - schedulable = subvp_subvp_schedulable(dc, context); - } else if (vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_vblank_w_mall_sub_vp) { - // Static schedulability check for SubVP + VBLANK case. Also handle the case where - // DML outputs SubVP + VBLANK + VACTIVE (DML will report as SubVP + VBLANK) - if (vactive_count > 0) - schedulable = false; - else + schedulable = subvp_subvp_admissable(dc, context) && subvp_subvp_schedulable(dc, context); + } else if (subvp_count == 1 && non_subvp_pipes == 0) { + // Single SubVP configs will be supported by default as long as it's suppported by DML + schedulable = true; + } else if (subvp_count == 1 && non_subvp_pipes == 1) { + if (dcn32_subvp_drr_admissable(dc, context)) + schedulable = subvp_drr_schedulable(dc, context); + else if (dcn32_subvp_vblank_admissable(dc, context, vlevel)) schedulable = subvp_vblank_schedulable(dc, context); } else if (vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_vactive_w_mall_sub_vp && vactive_count > 0) { @@ -1095,10 +1156,6 @@ static void dcn32_full_validate_bw_helper(struct dc *dc, unsigned int dc_pipe_idx = 0; int i = 0; bool found_supported_config = false; - struct pipe_ctx *pipe = NULL; - uint32_t non_subvp_pipes = 0; - bool drr_pipe_found = false; - uint32_t drr_pipe_index = 0; dc_assert_fp_enabled(); @@ -1129,7 +1186,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc, * 4. Display configuration passes validation * 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch) */ - if (!dc->debug.force_disable_subvp && dcn32_all_pipes_have_stream_and_plane(dc, context) && + if (!dc->debug.force_disable_subvp && !dc->caps.dmub_caps.gecc_enable && dcn32_all_pipes_have_stream_and_plane(dc, context) && !dcn32_mpo_in_use(context) && !dcn32_any_surfaces_rotated(dc, context) && (*vlevel == context->bw_ctx.dml.soc.num_states || vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported || @@ -1188,31 +1245,12 @@ static void dcn32_full_validate_bw_helper(struct dc *dc, } } - if (*vlevel < context->bw_ctx.dml.soc.num_states && - vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] != dm_dram_clock_change_unsupported - && subvp_validate_static_schedulability(dc, context, *vlevel)) { + if (*vlevel < context->bw_ctx.dml.soc.num_states + && subvp_validate_static_schedulability(dc, context, *vlevel)) found_supported_config = true; - } else if (*vlevel < context->bw_ctx.dml.soc.num_states) { - /* Case where 1 SubVP is added, and DML reports MCLK unsupported or DRR is allowed. - * This handles the case for SubVP + DRR, where the DRR display does not support MCLK - * switch at it's native refresh rate / timing, or DRR is allowed for the non-subvp - * display. - */ - for (i = 0; i < dc->res_pool->pipe_count; i++) { - pipe = &context->res_ctx.pipe_ctx[i]; - if (pipe->stream && pipe->plane_state && !pipe->top_pipe && - pipe->stream->mall_stream_config.type == SUBVP_NONE) { - non_subvp_pipes++; - // Use ignore_msa_timing_param flag to identify as DRR - if (pipe->stream->ignore_msa_timing_param && pipe->stream->allow_freesync) { - drr_pipe_found = true; - drr_pipe_index = i; - } - } - } - // If there is only 1 remaining non SubVP pipe that is DRR, check static - // schedulability for SubVP + DRR. - if (non_subvp_pipes == 1 && drr_pipe_found) { + if (found_supported_config) { + // For SubVP + DRR cases, we can force the lowest vlevel that supports the mode + if (dcn32_subvp_drr_admissable(dc, context) && subvp_drr_schedulable(dc, context)) { /* find lowest vlevel that supports the config */ for (i = *vlevel; i >= 0; i--) { if (vba->ModeSupport[i][vba->maxMpcComb]) { @@ -1221,9 +1259,6 @@ static void dcn32_full_validate_bw_helper(struct dc *dc, break; } } - - found_supported_config = subvp_drr_schedulable(dc, context, - &context->res_ctx.pipe_ctx[drr_pipe_index]); } } } @@ -1315,6 +1350,7 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context, int i, pipe_idx, active_hubp_count = 0; bool usr_retraining_support = false; bool unbounded_req_enabled = false; + struct vba_vars_st *vba = &context->bw_ctx.dml.vba; dc_assert_fp_enabled(); @@ -1396,6 +1432,11 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context, context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes = get_surface_size_in_mall(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0) + context->res_ctx.pipe_ctx[i].has_vactive_margin = true; + else + context->res_ctx.pipe_ctx[i].has_vactive_margin = false; + /* MALL Allocation Sizes */ /* count from active, top pipes per plane only */ if (context->res_ctx.pipe_ctx[i].stream && context->res_ctx.pipe_ctx[i].plane_state && @@ -1432,6 +1473,7 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context, context->bw_ctx.bw.dcn.clk.dramclk_khz = 0; context->bw_ctx.bw.dcn.clk.fclk_khz = 0; context->bw_ctx.bw.dcn.clk.p_state_change_support = true; + context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = true; } /*save a original dppclock copy*/ context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz; @@ -1679,8 +1721,8 @@ bool dcn32_internal_validate_bw(struct dc *dc, if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled && !dc->config.enable_windowed_mpo_odm && pipe->plane_state && mpo_pipe - && memcmp(&mpo_pipe->plane_res.scl_data.recout, - &pipe->plane_res.scl_data.recout, + && memcmp(&mpo_pipe->plane_state->clip_rect, + &pipe->stream->src, sizeof(struct rect)) != 0) { ASSERT(mpo_pipe->plane_state != pipe->plane_state); goto validate_fail; @@ -2005,6 +2047,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb; dcfclk_from_fw_based_mclk_switching = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; pstate_en = true; + context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank; } else { /* Restore FCLK latency and re-run validation to go back to original validation * output if we find that enabling FPO does not give us any benefit (i.e. lower @@ -2062,6 +2105,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, * sr_enter_exit/sr_exit should be lower than used for DRAM (TBD after bringup or later, use as decided in Clk Mgr) */ + /* if (dcn3_2_soc.num_states > 2) { vlevel_temp = 0; dcfclk = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz; @@ -2088,6 +2132,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.d.usr_retraining_ns = get_usr_retraining_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + */ /* Set C, for Dummy P-State: * All clocks min. @@ -2189,6 +2234,9 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, context->bw_ctx.bw.dcn.watermarks.a.usr_retraining_ns = get_usr_retraining_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; } + /* Make set D = set A since we do not optimized watermarks for MALL */ + context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a; + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { if (!context->res_ctx.pipe_ctx[i].stream) continue; @@ -2303,14 +2351,105 @@ void dcn32_patch_dpm_table(struct clk_bw_params *bw_params) bw_params->clk_table.entries[0].memclk_mhz = dcn3_2_soc.clock_limits[0].dram_speed_mts / 16; } -static int build_synthetic_soc_states(struct clk_bw_params *bw_params, +static void swap_table_entries(struct _vcs_dpi_voltage_scaling_st *first_entry, + struct _vcs_dpi_voltage_scaling_st *second_entry) +{ + struct _vcs_dpi_voltage_scaling_st temp_entry = *first_entry; + *first_entry = *second_entry; + *second_entry = temp_entry; +} + +/* + * sort_entries_with_same_bw - Sort entries sharing the same bandwidth by DCFCLK + */ +static void sort_entries_with_same_bw(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries) +{ + unsigned int start_index = 0; + unsigned int end_index = 0; + unsigned int current_bw = 0; + + for (int i = 0; i < (*num_entries - 1); i++) { + if (table[i].net_bw_in_kbytes_sec == table[i+1].net_bw_in_kbytes_sec) { + current_bw = table[i].net_bw_in_kbytes_sec; + start_index = i; + end_index = ++i; + + while ((i < (*num_entries - 1)) && (table[i+1].net_bw_in_kbytes_sec == current_bw)) + end_index = ++i; + } + + if (start_index != end_index) { + for (int j = start_index; j < end_index; j++) { + for (int k = start_index; k < end_index; k++) { + if (table[k].dcfclk_mhz > table[k+1].dcfclk_mhz) + swap_table_entries(&table[k], &table[k+1]); + } + } + } + + start_index = 0; + end_index = 0; + + } +} + +/* + * remove_inconsistent_entries - Ensure entries with the same bandwidth have MEMCLK and FCLK monotonically increasing + * and remove entries that do not + */ +static void remove_inconsistent_entries(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries) +{ + for (int i = 0; i < (*num_entries - 1); i++) { + if (table[i].net_bw_in_kbytes_sec == table[i+1].net_bw_in_kbytes_sec) { + if ((table[i].dram_speed_mts > table[i+1].dram_speed_mts) || + (table[i].fabricclk_mhz > table[i+1].fabricclk_mhz)) + remove_entry_from_table_at_index(table, num_entries, i); + } + } +} + +/* + * override_max_clk_values - Overwrite the max clock frequencies with the max DC mode timings + * Input: + * max_clk_limit - struct containing the desired clock timings + * Output: + * curr_clk_limit - struct containing the timings that need to be overwritten + * Return: 0 upon success, non-zero for failure + */ +static int override_max_clk_values(struct clk_limit_table_entry *max_clk_limit, + struct clk_limit_table_entry *curr_clk_limit) +{ + if (NULL == max_clk_limit || NULL == curr_clk_limit) + return -1; //invalid parameters + + //only overwrite if desired max clock frequency is initialized + if (max_clk_limit->dcfclk_mhz != 0) + curr_clk_limit->dcfclk_mhz = max_clk_limit->dcfclk_mhz; + + if (max_clk_limit->fclk_mhz != 0) + curr_clk_limit->fclk_mhz = max_clk_limit->fclk_mhz; + + if (max_clk_limit->memclk_mhz != 0) + curr_clk_limit->memclk_mhz = max_clk_limit->memclk_mhz; + + if (max_clk_limit->socclk_mhz != 0) + curr_clk_limit->socclk_mhz = max_clk_limit->socclk_mhz; + + if (max_clk_limit->dtbclk_mhz != 0) + curr_clk_limit->dtbclk_mhz = max_clk_limit->dtbclk_mhz; + + if (max_clk_limit->dispclk_mhz != 0) + curr_clk_limit->dispclk_mhz = max_clk_limit->dispclk_mhz; + + return 0; +} + +static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk_bw_params *bw_params, struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries) { int i, j; struct _vcs_dpi_voltage_scaling_st entry = {0}; - - unsigned int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, - max_phyclk_mhz = 0, max_dtbclk_mhz = 0, max_fclk_mhz = 0, max_uclk_mhz = 0; + struct clk_limit_table_entry max_clk_data = {0}; unsigned int min_dcfclk_mhz = 199, min_fclk_mhz = 299; @@ -2321,51 +2460,76 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params, unsigned int num_fclk_dpms = 0; unsigned int num_dcfclk_dpms = 0; - for (i = 0; i < MAX_NUM_DPM_LVL; i++) { - if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz) - max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz; - if (bw_params->clk_table.entries[i].fclk_mhz > max_fclk_mhz) - max_fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz; - if (bw_params->clk_table.entries[i].memclk_mhz > max_uclk_mhz) - max_uclk_mhz = bw_params->clk_table.entries[i].memclk_mhz; - if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz) - max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz; - if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz) - max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz; - if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz) - max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz; - if (bw_params->clk_table.entries[i].dtbclk_mhz > max_dtbclk_mhz) - max_dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz; + unsigned int num_dc_uclk_dpms = 0; + unsigned int num_dc_fclk_dpms = 0; + unsigned int num_dc_dcfclk_dpms = 0; - if (bw_params->clk_table.entries[i].memclk_mhz > 0) + for (i = 0; i < MAX_NUM_DPM_LVL; i++) { + if (bw_params->clk_table.entries[i].dcfclk_mhz > max_clk_data.dcfclk_mhz) + max_clk_data.dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz; + if (bw_params->clk_table.entries[i].fclk_mhz > max_clk_data.fclk_mhz) + max_clk_data.fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz; + if (bw_params->clk_table.entries[i].memclk_mhz > max_clk_data.memclk_mhz) + max_clk_data.memclk_mhz = bw_params->clk_table.entries[i].memclk_mhz; + if (bw_params->clk_table.entries[i].dispclk_mhz > max_clk_data.dispclk_mhz) + max_clk_data.dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz; + if (bw_params->clk_table.entries[i].dppclk_mhz > max_clk_data.dppclk_mhz) + max_clk_data.dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz; + if (bw_params->clk_table.entries[i].phyclk_mhz > max_clk_data.phyclk_mhz) + max_clk_data.phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz; + if (bw_params->clk_table.entries[i].dtbclk_mhz > max_clk_data.dtbclk_mhz) + max_clk_data.dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz; + + if (bw_params->clk_table.entries[i].memclk_mhz > 0) { num_uclk_dpms++; - if (bw_params->clk_table.entries[i].fclk_mhz > 0) + if (bw_params->clk_table.entries[i].memclk_mhz <= bw_params->dc_mode_limit.memclk_mhz) + num_dc_uclk_dpms++; + } + if (bw_params->clk_table.entries[i].fclk_mhz > 0) { num_fclk_dpms++; - if (bw_params->clk_table.entries[i].dcfclk_mhz > 0) + if (bw_params->clk_table.entries[i].fclk_mhz <= bw_params->dc_mode_limit.fclk_mhz) + num_dc_fclk_dpms++; + } + if (bw_params->clk_table.entries[i].dcfclk_mhz > 0) { num_dcfclk_dpms++; + if (bw_params->clk_table.entries[i].dcfclk_mhz <= bw_params->dc_mode_limit.dcfclk_mhz) + num_dc_dcfclk_dpms++; + } + } + + if (!disable_dc_mode_overwrite) { + //Overwrite max frequencies with max DC mode frequencies for DC mode systems + override_max_clk_values(&bw_params->dc_mode_limit, &max_clk_data); + num_uclk_dpms = num_dc_uclk_dpms; + num_fclk_dpms = num_dc_fclk_dpms; + num_dcfclk_dpms = num_dc_dcfclk_dpms; + bw_params->clk_table.num_entries_per_clk.num_memclk_levels = num_uclk_dpms; + bw_params->clk_table.num_entries_per_clk.num_fclk_levels = num_fclk_dpms; } if (num_dcfclk_dpms > 0 && bw_params->clk_table.entries[0].fclk_mhz > min_fclk_mhz) min_fclk_mhz = bw_params->clk_table.entries[0].fclk_mhz; - if (!max_dcfclk_mhz || !max_dispclk_mhz || !max_dtbclk_mhz) + if (!max_clk_data.dcfclk_mhz || !max_clk_data.dispclk_mhz || !max_clk_data.dtbclk_mhz) return -1; - if (max_dppclk_mhz == 0) - max_dppclk_mhz = max_dispclk_mhz; + if (max_clk_data.dppclk_mhz == 0) + max_clk_data.dppclk_mhz = max_clk_data.dispclk_mhz; - if (max_fclk_mhz == 0) - max_fclk_mhz = max_dcfclk_mhz * dcn3_2_soc.pct_ideal_sdp_bw_after_urgent / dcn3_2_soc.pct_ideal_fabric_bw_after_urgent; + if (max_clk_data.fclk_mhz == 0) + max_clk_data.fclk_mhz = max_clk_data.dcfclk_mhz * + dcn3_2_soc.pct_ideal_sdp_bw_after_urgent / + dcn3_2_soc.pct_ideal_fabric_bw_after_urgent; - if (max_phyclk_mhz == 0) - max_phyclk_mhz = dcn3_2_soc.clock_limits[0].phyclk_mhz; + if (max_clk_data.phyclk_mhz == 0) + max_clk_data.phyclk_mhz = dcn3_2_soc.clock_limits[0].phyclk_mhz; *num_entries = 0; - entry.dispclk_mhz = max_dispclk_mhz; - entry.dscclk_mhz = max_dispclk_mhz / 3; - entry.dppclk_mhz = max_dppclk_mhz; - entry.dtbclk_mhz = max_dtbclk_mhz; - entry.phyclk_mhz = max_phyclk_mhz; + entry.dispclk_mhz = max_clk_data.dispclk_mhz; + entry.dscclk_mhz = max_clk_data.dispclk_mhz / 3; + entry.dppclk_mhz = max_clk_data.dppclk_mhz; + entry.dtbclk_mhz = max_clk_data.dtbclk_mhz; + entry.phyclk_mhz = max_clk_data.phyclk_mhz; entry.phyclk_d18_mhz = dcn3_2_soc.clock_limits[0].phyclk_d18_mhz; entry.phyclk_d32_mhz = dcn3_2_soc.clock_limits[0].phyclk_d32_mhz; @@ -2375,14 +2539,18 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params, entry.fabricclk_mhz = 0; entry.dram_speed_mts = 0; + get_optimal_ntuple(&entry); + entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry); insert_entry_into_table_sorted(table, num_entries, &entry); } // Insert the max DCFCLK - entry.dcfclk_mhz = max_dcfclk_mhz; + entry.dcfclk_mhz = max_clk_data.dcfclk_mhz; entry.fabricclk_mhz = 0; entry.dram_speed_mts = 0; + get_optimal_ntuple(&entry); + entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry); insert_entry_into_table_sorted(table, num_entries, &entry); // Insert the UCLK DPMS @@ -2391,6 +2559,8 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params, entry.fabricclk_mhz = 0; entry.dram_speed_mts = bw_params->clk_table.entries[i].memclk_mhz * 16; + get_optimal_ntuple(&entry); + entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry); insert_entry_into_table_sorted(table, num_entries, &entry); } @@ -2401,15 +2571,19 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params, entry.fabricclk_mhz = bw_params->clk_table.entries[i].fclk_mhz; entry.dram_speed_mts = 0; + get_optimal_ntuple(&entry); + entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry); insert_entry_into_table_sorted(table, num_entries, &entry); } } // If FCLK fine grained, only insert max else { entry.dcfclk_mhz = 0; - entry.fabricclk_mhz = max_fclk_mhz; + entry.fabricclk_mhz = max_clk_data.fclk_mhz; entry.dram_speed_mts = 0; + get_optimal_ntuple(&entry); + entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry); insert_entry_into_table_sorted(table, num_entries, &entry); } @@ -2419,12 +2593,27 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params, // Remove states that require higher clocks than are supported for (i = *num_entries - 1; i >= 0 ; i--) { - if (table[i].dcfclk_mhz > max_dcfclk_mhz || - table[i].fabricclk_mhz > max_fclk_mhz || - table[i].dram_speed_mts > max_uclk_mhz * 16) + if (table[i].dcfclk_mhz > max_clk_data.dcfclk_mhz || + table[i].fabricclk_mhz > max_clk_data.fclk_mhz || + table[i].dram_speed_mts > max_clk_data.memclk_mhz * 16) remove_entry_from_table_at_index(table, num_entries, i); } + // Insert entry with all max dc limits without bandwidth matching + if (!disable_dc_mode_overwrite) { + struct _vcs_dpi_voltage_scaling_st max_dc_limits_entry = entry; + + max_dc_limits_entry.dcfclk_mhz = max_clk_data.dcfclk_mhz; + max_dc_limits_entry.fabricclk_mhz = max_clk_data.fclk_mhz; + max_dc_limits_entry.dram_speed_mts = max_clk_data.memclk_mhz * 16; + + max_dc_limits_entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&max_dc_limits_entry); + insert_entry_into_table_sorted(table, num_entries, &max_dc_limits_entry); + + sort_entries_with_same_bw(table, num_entries); + remove_inconsistent_entries(table, num_entries); + } + // At this point, the table only contains supported points of interest // it could be used as is, but some states may be redundant due to // coarse grained nature of some clocks, so we want to round up to @@ -2508,80 +2697,78 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa { dc_assert_fp_enabled(); - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - /* Overrides from dc->config options */ - dcn3_2_ip.clamp_min_dcfclk = dc->config.clamp_min_dcfclk; - - /* Override from passed dc->bb_overrides if available*/ - if ((int)(dcn3_2_soc.sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns - && dc->bb_overrides.sr_exit_time_ns) { - dcn3_2_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0; - } + /* Overrides from dc->config options */ + dcn3_2_ip.clamp_min_dcfclk = dc->config.clamp_min_dcfclk; - if ((int)(dcn3_2_soc.sr_enter_plus_exit_time_us * 1000) - != dc->bb_overrides.sr_enter_plus_exit_time_ns - && dc->bb_overrides.sr_enter_plus_exit_time_ns) { - dcn3_2_soc.sr_enter_plus_exit_time_us = - dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; - } + /* Override from passed dc->bb_overrides if available*/ + if ((int)(dcn3_2_soc.sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns + && dc->bb_overrides.sr_exit_time_ns) { + dcn3_2_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0; + } - if ((int)(dcn3_2_soc.urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns - && dc->bb_overrides.urgent_latency_ns) { - dcn3_2_soc.urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0; - dcn3_2_soc.urgent_latency_pixel_data_only_us = dc->bb_overrides.urgent_latency_ns / 1000.0; - } + if ((int)(dcn3_2_soc.sr_enter_plus_exit_time_us * 1000) + != dc->bb_overrides.sr_enter_plus_exit_time_ns + && dc->bb_overrides.sr_enter_plus_exit_time_ns) { + dcn3_2_soc.sr_enter_plus_exit_time_us = + dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; + } - if ((int)(dcn3_2_soc.dram_clock_change_latency_us * 1000) - != dc->bb_overrides.dram_clock_change_latency_ns - && dc->bb_overrides.dram_clock_change_latency_ns) { - dcn3_2_soc.dram_clock_change_latency_us = - dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; - } + if ((int)(dcn3_2_soc.urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns + && dc->bb_overrides.urgent_latency_ns) { + dcn3_2_soc.urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0; + dcn3_2_soc.urgent_latency_pixel_data_only_us = dc->bb_overrides.urgent_latency_ns / 1000.0; + } - if ((int)(dcn3_2_soc.fclk_change_latency_us * 1000) - != dc->bb_overrides.fclk_clock_change_latency_ns - && dc->bb_overrides.fclk_clock_change_latency_ns) { - dcn3_2_soc.fclk_change_latency_us = - dc->bb_overrides.fclk_clock_change_latency_ns / 1000; - } + if ((int)(dcn3_2_soc.dram_clock_change_latency_us * 1000) + != dc->bb_overrides.dram_clock_change_latency_ns + && dc->bb_overrides.dram_clock_change_latency_ns) { + dcn3_2_soc.dram_clock_change_latency_us = + dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; + } - if ((int)(dcn3_2_soc.dummy_pstate_latency_us * 1000) - != dc->bb_overrides.dummy_clock_change_latency_ns - && dc->bb_overrides.dummy_clock_change_latency_ns) { - dcn3_2_soc.dummy_pstate_latency_us = - dc->bb_overrides.dummy_clock_change_latency_ns / 1000.0; - } + if ((int)(dcn3_2_soc.fclk_change_latency_us * 1000) + != dc->bb_overrides.fclk_clock_change_latency_ns + && dc->bb_overrides.fclk_clock_change_latency_ns) { + dcn3_2_soc.fclk_change_latency_us = + dc->bb_overrides.fclk_clock_change_latency_ns / 1000; + } - /* Override from VBIOS if VBIOS bb_info available */ - if (dc->ctx->dc_bios->funcs->get_soc_bb_info) { - struct bp_soc_bb_info bb_info = {0}; + if ((int)(dcn3_2_soc.dummy_pstate_latency_us * 1000) + != dc->bb_overrides.dummy_clock_change_latency_ns + && dc->bb_overrides.dummy_clock_change_latency_ns) { + dcn3_2_soc.dummy_pstate_latency_us = + dc->bb_overrides.dummy_clock_change_latency_ns / 1000.0; + } - if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) { - if (bb_info.dram_clock_change_latency_100ns > 0) - dcn3_2_soc.dram_clock_change_latency_us = - bb_info.dram_clock_change_latency_100ns * 10; + /* Override from VBIOS if VBIOS bb_info available */ + if (dc->ctx->dc_bios->funcs->get_soc_bb_info) { + struct bp_soc_bb_info bb_info = {0}; - if (bb_info.dram_sr_enter_exit_latency_100ns > 0) - dcn3_2_soc.sr_enter_plus_exit_time_us = - bb_info.dram_sr_enter_exit_latency_100ns * 10; + if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) { + if (bb_info.dram_clock_change_latency_100ns > 0) + dcn3_2_soc.dram_clock_change_latency_us = + bb_info.dram_clock_change_latency_100ns * 10; - if (bb_info.dram_sr_exit_latency_100ns > 0) - dcn3_2_soc.sr_exit_time_us = - bb_info.dram_sr_exit_latency_100ns * 10; - } - } + if (bb_info.dram_sr_enter_exit_latency_100ns > 0) + dcn3_2_soc.sr_enter_plus_exit_time_us = + bb_info.dram_sr_enter_exit_latency_100ns * 10; - /* Override from VBIOS for num_chan */ - if (dc->ctx->dc_bios->vram_info.num_chans) { - dcn3_2_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans; - dcn3_2_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc, - dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel); + if (bb_info.dram_sr_exit_latency_100ns > 0) + dcn3_2_soc.sr_exit_time_us = + bb_info.dram_sr_exit_latency_100ns * 10; } + } - if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) - dcn3_2_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; + /* Override from VBIOS for num_chan */ + if (dc->ctx->dc_bios->vram_info.num_chans) { + dcn3_2_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans; + dcn3_2_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc, + dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel); } + if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) + dcn3_2_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; + /* DML DSC delay factor workaround */ dcn3_2_ip.dsc_delay_factor_wa = dc->debug.dsc_delay_factor_wa_x1000 / 1000.0; @@ -2592,7 +2779,7 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; /* Overrides Clock levelsfrom CLK Mgr table entries as reported by PM FW */ - if ((!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) && (bw_params->clk_table.entries[0].memclk_mhz)) { + if (bw_params->clk_table.entries[0].memclk_mhz) { if (dc->debug.use_legacy_soc_bb_mechanism) { unsigned int i = 0, j = 0, num_states = 0; @@ -2736,7 +2923,8 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa dcn3_2_soc.clock_limits[i].phyclk_d32_mhz = dcn3_2_soc.clock_limits[0].phyclk_d32_mhz; } } else { - build_synthetic_soc_states(bw_params, dcn3_2_soc.clock_limits, &dcn3_2_soc.num_states); + build_synthetic_soc_states(dc->debug.disable_dc_mode_overwrite, bw_params, + dcn3_2_soc.clock_limits, &dcn3_2_soc.num_states); } /* Re-init DML with updated bb */ @@ -2783,15 +2971,76 @@ bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe) } /** - * ******************************************************************************************* - * dcn32_determine_max_vratio_prefetch: Determine max Vratio for prefetch by driver policy + * dcn32_allow_subvp_high_refresh_rate: Determine if the high refresh rate config will allow subvp * - * @param [in]: dc: Current DC state - * @param [in]: context: New DC state to be programmed + * @dc: Current DC state + * @context: New DC state to be programmed + * @pipe: Pipe to be considered for use in subvp + * + * On high refresh rate display configs, we will allow subvp under the following conditions: + * 1. Resolution is 3840x2160, 3440x1440, or 2560x1440 + * 2. Refresh rate is between 120hz - 165hz + * 3. No scaling + * 4. Freesync is inactive + * 5. For single display cases, freesync must be disabled + * + * Return: True if pipe can be used for subvp, false otherwise + */ +bool dcn32_allow_subvp_high_refresh_rate(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe) +{ + bool allow = false; + uint32_t refresh_rate = 0; + uint32_t subvp_min_refresh = subvp_high_refresh_list.min_refresh; + uint32_t subvp_max_refresh = subvp_high_refresh_list.max_refresh; + uint32_t min_refresh = subvp_max_refresh; + uint32_t i; + + /* Only allow SubVP on high refresh displays if all connected displays + * are considered "high refresh" (i.e. >= 120hz). We do not want to + * allow combinations such as 120hz (SubVP) + 60hz (SubVP). + */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (!pipe_ctx->stream) + continue; + refresh_rate = (pipe_ctx->stream->timing.pix_clk_100hz * 100 + + pipe_ctx->stream->timing.v_total * pipe_ctx->stream->timing.h_total - 1) + / (double)(pipe_ctx->stream->timing.v_total * pipe_ctx->stream->timing.h_total); + + if (refresh_rate < min_refresh) + min_refresh = refresh_rate; + } + + if (!dc->debug.disable_subvp_high_refresh && min_refresh >= subvp_min_refresh && pipe->stream && + pipe->plane_state && !(pipe->stream->vrr_active_variable || pipe->stream->vrr_active_fixed)) { + refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 + + pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1) + / (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total); + if (refresh_rate >= subvp_min_refresh && refresh_rate <= subvp_max_refresh) { + for (i = 0; i < SUBVP_HIGH_REFRESH_LIST_LEN; i++) { + uint32_t width = subvp_high_refresh_list.res[i].width; + uint32_t height = subvp_high_refresh_list.res[i].height; + + if (dcn32_check_native_scaling_for_res(pipe, width, height)) { + if ((context->stream_count == 1 && !pipe->stream->allow_freesync) || context->stream_count > 1) { + allow = true; + break; + } + } + } + } + } + return allow; +} + +/** + * dcn32_determine_max_vratio_prefetch: Determine max Vratio for prefetch by driver policy * - * @return: Max vratio for prefetch + * @dc: Current DC state + * @context: New DC state to be programmed * - * ******************************************************************************************* + * Return: Max vratio for prefetch */ double dcn32_determine_max_vratio_prefetch(struct dc *dc, struct dc_state *context) { @@ -2821,9 +3070,9 @@ double dcn32_determine_max_vratio_prefetch(struct dc *dc, struct dc_state *conte * ActiveMargin <= 0 to be the FPO stream candidate if found. * * - * @param [in]: dc - current dc state - * @param [in]: context - new dc state - * @param [out]: fpo_candidate_stream - pointer to FPO stream candidate if one is found + * @dc: current dc state + * @context: new dc state + * @fpo_candidate_stream: pointer to FPO stream candidate if one is found * * Return: void */ @@ -2849,10 +3098,9 @@ void dcn32_assign_fpo_vactive_candidate(struct dc *dc, const struct dc_state *co /** * dcn32_find_vactive_pipe - Determines if the config has a pipe that can switch in VACTIVE * - * @param [in]: dc - current dc state - * @param [in]: context - new dc state - * @param [in]: vactive_margin_req_us - The vactive marign required for a vactive pipe to be - * considered "found" + * @dc: current dc state + * @context: new dc state + * @vactive_margin_req_us: The vactive marign required for a vactive pipe to be considered "found" * * Return: True if VACTIVE display is found, false otherwise */ @@ -2861,6 +3109,7 @@ bool dcn32_find_vactive_pipe(struct dc *dc, const struct dc_state *context, uint unsigned int i, pipe_idx; const struct vba_vars_st *vba = &context->bw_ctx.dml.vba; bool vactive_found = false; + unsigned int blank_us = 0; for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { const struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; @@ -2868,7 +3117,10 @@ bool dcn32_find_vactive_pipe(struct dc *dc, const struct dc_state *context, uint if (!pipe->stream) continue; - if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] >= vactive_margin_req_us) { + blank_us = ((pipe->stream->timing.v_total - pipe->stream->timing.v_addressable) * pipe->stream->timing.h_total / + (double)(pipe->stream->timing.pix_clk_100hz * 100)) * 1000000; + if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] >= vactive_margin_req_us && + !(pipe->stream->vrr_active_variable || pipe->stream->vrr_active_fixed) && blank_us < dc->debug.fpo_vactive_max_blank_us) { vactive_found = true; break; } @@ -2882,3 +3134,18 @@ void dcn32_set_clock_limits(const struct _vcs_dpi_soc_bounding_box_st *soc_bb) dc_assert_fp_enabled(); dcn3_2_soc.clock_limits[0].dcfclk_mhz = 1200.0; } + +void dcn32_override_min_req_memclk(struct dc *dc, struct dc_state *context) +{ + // WA: restrict FPO and SubVP to use first non-strobe mode (DCN32 BW issue) + if ((context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dcn32_subvp_in_use(dc, context)) && + dc->dml.soc.num_chans <= 8) { + int num_mclk_levels = dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels; + + if (context->bw_ctx.dml.vba.DRAMSpeed <= dc->clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 16 && + num_mclk_levels > 1) { + context->bw_ctx.dml.vba.DRAMSpeed = dc->clk_mgr->bw_params->clk_table.entries[1].memclk_mhz * 16; + context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16; + } + } +} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h index dcf512cd3072..defbee866be6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h @@ -39,10 +39,6 @@ void dcn32_helper_populate_phantom_dlg_params(struct dc *dc, uint8_t dcn32_predict_pipe_split(struct dc_state *context, display_e2e_pipe_params_st *pipe_e2e); -void insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table, - unsigned int *num_entries, - struct _vcs_dpi_voltage_scaling_st *entry); - void dcn32_set_phantom_stream_timing(struct dc *dc, struct dc_state *context, struct pipe_ctx *ref_pipe, @@ -80,6 +76,8 @@ void dcn32_assign_fpo_vactive_candidate(struct dc *dc, const struct dc_state *co bool dcn32_find_vactive_pipe(struct dc *dc, const struct dc_state *context, uint32_t vactive_margin_req); +void dcn32_override_min_req_memclk(struct dc *dc, struct dc_state *context); + void dcn32_set_clock_limits(const struct _vcs_dpi_soc_bounding_box_st *soc_bb); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c index d75248b6cae9..cbdfb762c10c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c @@ -811,7 +811,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman v->SwathHeightC[k], TWait, (v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ || - v->DCFCLKPerState[mode_lib->vba.VoltageLevel] <= MIN_DCFCLK_FREQ_MHZ) ? + v->DCFCLKPerState[mode_lib->vba.VoltageLevel] <= DCFCLK_FREQ_EXTRA_PREFETCH_REQ_MHZ) ? mode_lib->vba.ip.min_prefetch_in_strobe_us : 0, /* Output */ &v->DSTXAfterScaler[k], @@ -2323,10 +2323,14 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.LinkCapacitySupport[i] = true; for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) { if (mode_lib->vba.BlendingAndTiming[k] == k - && (mode_lib->vba.Output[k] == dm_dp || mode_lib->vba.Output[k] == dm_dp2p0 - || mode_lib->vba.Output[k] == dm_edp - || mode_lib->vba.Output[k] == dm_hdmi) - && mode_lib->vba.OutputBppPerState[i][k] == 0) { + && (mode_lib->vba.Output[k] == dm_dp || mode_lib->vba.Output[k] == dm_dp2p0 + || mode_lib->vba.Output[k] == dm_edp + || mode_lib->vba.Output[k] == dm_hdmi) + && mode_lib->vba.OutputBppPerState[i][k] == 0 && + (mode_lib->vba.UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe)) { + /* Phantom pipes don't consider DSC in DML, so it could fail link check. + * However, we don't care about the link for phantom pipes. + */ mode_lib->vba.LinkCapacitySupport[i] = false; } } @@ -3311,7 +3315,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l v->swath_width_chroma_ub_this_state[k], v->SwathHeightYThisState[k], v->SwathHeightCThisState[k], v->TWait, - (v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ || v->DCFCLKState[i][j] <= MIN_DCFCLK_FREQ_MHZ) ? + (v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ || v->DCFCLKState[i][j] <= DCFCLK_FREQ_EXTRA_PREFETCH_REQ_MHZ) ? mode_lib->vba.ip.min_prefetch_in_strobe_us : 0, /* Output */ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h index d98e36a9a09c..c4745d63039b 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h @@ -53,7 +53,7 @@ #define BPP_BLENDED_PIPE 0xffffffff #define MEM_STROBE_FREQ_MHZ 1600 -#define MIN_DCFCLK_FREQ_MHZ 200 +#define DCFCLK_FREQ_EXTRA_PREFETCH_REQ_MHZ 300 #define MEM_STROBE_MAX_DELIVERY_TIME_US 60.0 struct display_mode_lib; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c index 61cc4904ade4..ecea008f19d3 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c @@ -1595,7 +1595,6 @@ double dml32_TruncToValidBPP( unsigned int NonDSCBPP0; unsigned int NonDSCBPP1; unsigned int NonDSCBPP2; - unsigned int NonDSCBPP3; if (Format == dm_420) { NonDSCBPP0 = 12; @@ -1604,10 +1603,9 @@ double dml32_TruncToValidBPP( MinDSCBPP = 6; MaxDSCBPP = 1.5 * DSCInputBitPerComponent - 1 / 16; } else if (Format == dm_444) { - NonDSCBPP0 = 18; - NonDSCBPP1 = 24; - NonDSCBPP2 = 30; - NonDSCBPP3 = 36; + NonDSCBPP0 = 24; + NonDSCBPP1 = 30; + NonDSCBPP2 = 36; MinDSCBPP = 8; MaxDSCBPP = 3 * DSCInputBitPerComponent - 1.0 / 16; } else { @@ -1661,9 +1659,7 @@ double dml32_TruncToValidBPP( else return dml_floor(16.0 * MaxLinkBPP, 1.0) / 16.0; } else { - if (MaxLinkBPP >= NonDSCBPP3) - return NonDSCBPP3; - else if (MaxLinkBPP >= NonDSCBPP2) + if (MaxLinkBPP >= NonDSCBPP2) return NonDSCBPP2; else if (MaxLinkBPP >= NonDSCBPP1) return NonDSCBPP1; @@ -1674,7 +1670,7 @@ double dml32_TruncToValidBPP( } } else { if (!((DSCEnable == false && (DesiredBPP == NonDSCBPP2 || DesiredBPP == NonDSCBPP1 || - DesiredBPP == NonDSCBPP0 || DesiredBPP == NonDSCBPP3)) || + DesiredBPP <= NonDSCBPP0)) || (DSCEnable && DesiredBPP >= MinDSCBPP && DesiredBPP <= MaxDSCBPP))) return BPP_INVALID; else @@ -3463,6 +3459,7 @@ bool dml32_CalculatePrefetchSchedule( double TimeForFetchingMetaPTE = 0; double TimeForFetchingRowInVBlank = 0; double LinesToRequestPrefetchPixelData = 0; + double LinesForPrefetchBandwidth = 0; unsigned int HostVMDynamicLevelsTrips; double trip_to_mem; double Tvm_trips; @@ -3892,11 +3889,15 @@ bool dml32_CalculatePrefetchSchedule( TimeForFetchingMetaPTE = Tvm_oto; TimeForFetchingRowInVBlank = Tr0_oto; *PrefetchBandwidth = prefetch_bw_oto; + /* Clamp to oto for bandwidth calculation */ + LinesForPrefetchBandwidth = dst_y_prefetch_oto; } else { *DestinationLinesForPrefetch = dst_y_prefetch_equ; TimeForFetchingMetaPTE = Tvm_equ; TimeForFetchingRowInVBlank = Tr0_equ; *PrefetchBandwidth = prefetch_bw_equ; + /* Clamp to equ for bandwidth calculation */ + LinesForPrefetchBandwidth = dst_y_prefetch_equ; } *DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * TimeForFetchingMetaPTE / LineTime, 1.0) / 4.0; @@ -3904,7 +3905,7 @@ bool dml32_CalculatePrefetchSchedule( *DestinationLinesToRequestRowInVBlank = dml_ceil(4.0 * TimeForFetchingRowInVBlank / LineTime, 1.0) / 4.0; - LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch - + LinesToRequestPrefetchPixelData = LinesForPrefetchBandwidth - *DestinationLinesToRequestVMInVBlank - 2 * *DestinationLinesToRequestRowInVBlank; #ifdef __DML_VBA_DEBUG__ @@ -4128,7 +4129,7 @@ void dml32_CalculateFlipSchedule( unsigned int HostVMDynamicLevelsTrips; double TimeForFetchingMetaPTEImmediateFlip; double TimeForFetchingRowInVBlankImmediateFlip; - double ImmediateFlipBW; + double ImmediateFlipBW = 1.0; if (GPUVMEnable == true && HostVMEnable == true) HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels; @@ -4342,7 +4343,7 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport( + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK; } if (v->USRRetrainingRequiredFinal) - v->Watermark.WritebackUrgentWatermark = v->Watermark.WritebackUrgentWatermark + v->Watermark.WritebackDRAMClockChangeWatermark = v->Watermark.WritebackDRAMClockChangeWatermark + mmSOCParameters.USRRetrainingLatency; if (TotalActiveWriteback <= 1) { @@ -4660,6 +4661,10 @@ void dml32_CalculateMinAndMaxPrefetchMode( } else if (AllowForPStateChangeOrStutterInVBlankFinal == dm_prefetch_support_uclk_fclk_and_stutter) { *MinPrefetchMode = 0; *MaxPrefetchMode = 0; + } else if (AllowForPStateChangeOrStutterInVBlankFinal == + dm_prefetch_support_uclk_fclk_and_stutter_if_possible) { + *MinPrefetchMode = 0; + *MaxPrefetchMode = 3; } else { *MinPrefetchMode = 0; *MaxPrefetchMode = 3; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c index 395ae8761980..9ba6cb67655f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c @@ -116,7 +116,7 @@ void dml32_rq_dlg_get_rq_reg(display_rq_regs_st *rq_regs, else rq_regs->rq_regs_l.min_meta_chunk_size = dml_log2(min_meta_chunk_bytes) - 6 + 1; - if (min_meta_chunk_bytes == 0) + if (p1_min_meta_chunk_bytes == 0) rq_regs->rq_regs_c.min_meta_chunk_size = 0; else rq_regs->rq_regs_c.min_meta_chunk_size = dml_log2(p1_min_meta_chunk_bytes) - 6 + 1; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c index 342a1bcb4927..b26fcf86014c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c @@ -207,24 +207,20 @@ static float calculate_net_bw_in_kbytes_sec(struct _vcs_dpi_voltage_scaling_st * return limiting_bw_kbytes_sec; } -void dcn321_insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table, +static void dcn321_insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries, struct _vcs_dpi_voltage_scaling_st *entry) { int i = 0; int index = 0; - float net_bw_of_new_state = 0; dc_assert_fp_enabled(); - get_optimal_ntuple(entry); - if (*num_entries == 0) { table[0] = *entry; (*num_entries)++; } else { - net_bw_of_new_state = calculate_net_bw_in_kbytes_sec(entry); - while (net_bw_of_new_state > calculate_net_bw_in_kbytes_sec(&table[index])) { + while (entry->net_bw_in_kbytes_sec > table[index].net_bw_in_kbytes_sec) { index++; if (index >= *num_entries) break; @@ -252,14 +248,105 @@ static void remove_entry_from_table_at_index(struct _vcs_dpi_voltage_scaling_st memset(&table[--(*num_entries)], 0, sizeof(struct _vcs_dpi_voltage_scaling_st)); } -static int build_synthetic_soc_states(struct clk_bw_params *bw_params, +static void swap_table_entries(struct _vcs_dpi_voltage_scaling_st *first_entry, + struct _vcs_dpi_voltage_scaling_st *second_entry) +{ + struct _vcs_dpi_voltage_scaling_st temp_entry = *first_entry; + *first_entry = *second_entry; + *second_entry = temp_entry; +} + +/* + * sort_entries_with_same_bw - Sort entries sharing the same bandwidth by DCFCLK + */ +static void sort_entries_with_same_bw(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries) +{ + unsigned int start_index = 0; + unsigned int end_index = 0; + unsigned int current_bw = 0; + + for (int i = 0; i < (*num_entries - 1); i++) { + if (table[i].net_bw_in_kbytes_sec == table[i+1].net_bw_in_kbytes_sec) { + current_bw = table[i].net_bw_in_kbytes_sec; + start_index = i; + end_index = ++i; + + while ((i < (*num_entries - 1)) && (table[i+1].net_bw_in_kbytes_sec == current_bw)) + end_index = ++i; + } + + if (start_index != end_index) { + for (int j = start_index; j < end_index; j++) { + for (int k = start_index; k < end_index; k++) { + if (table[k].dcfclk_mhz > table[k+1].dcfclk_mhz) + swap_table_entries(&table[k], &table[k+1]); + } + } + } + + start_index = 0; + end_index = 0; + + } +} + +/* + * remove_inconsistent_entries - Ensure entries with the same bandwidth have MEMCLK and FCLK monotonically increasing + * and remove entries that do not follow this order + */ +static void remove_inconsistent_entries(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries) +{ + for (int i = 0; i < (*num_entries - 1); i++) { + if (table[i].net_bw_in_kbytes_sec == table[i+1].net_bw_in_kbytes_sec) { + if ((table[i].dram_speed_mts > table[i+1].dram_speed_mts) || + (table[i].fabricclk_mhz > table[i+1].fabricclk_mhz)) + remove_entry_from_table_at_index(table, num_entries, i); + } + } +} + +/* + * override_max_clk_values - Overwrite the max clock frequencies with the max DC mode timings + * Input: + * max_clk_limit - struct containing the desired clock timings + * Output: + * curr_clk_limit - struct containing the timings that need to be overwritten + * Return: 0 upon success, non-zero for failure + */ +static int override_max_clk_values(struct clk_limit_table_entry *max_clk_limit, + struct clk_limit_table_entry *curr_clk_limit) +{ + if (NULL == max_clk_limit || NULL == curr_clk_limit) + return -1; //invalid parameters + + //only overwrite if desired max clock frequency is initialized + if (max_clk_limit->dcfclk_mhz != 0) + curr_clk_limit->dcfclk_mhz = max_clk_limit->dcfclk_mhz; + + if (max_clk_limit->fclk_mhz != 0) + curr_clk_limit->fclk_mhz = max_clk_limit->fclk_mhz; + + if (max_clk_limit->memclk_mhz != 0) + curr_clk_limit->memclk_mhz = max_clk_limit->memclk_mhz; + + if (max_clk_limit->socclk_mhz != 0) + curr_clk_limit->socclk_mhz = max_clk_limit->socclk_mhz; + + if (max_clk_limit->dtbclk_mhz != 0) + curr_clk_limit->dtbclk_mhz = max_clk_limit->dtbclk_mhz; + + if (max_clk_limit->dispclk_mhz != 0) + curr_clk_limit->dispclk_mhz = max_clk_limit->dispclk_mhz; + + return 0; +} + +static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk_bw_params *bw_params, struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries) { int i, j; struct _vcs_dpi_voltage_scaling_st entry = {0}; - - unsigned int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, - max_phyclk_mhz = 0, max_dtbclk_mhz = 0, max_fclk_mhz = 0, max_uclk_mhz = 0; + struct clk_limit_table_entry max_clk_data = {0}; unsigned int min_dcfclk_mhz = 199, min_fclk_mhz = 299; @@ -270,51 +357,76 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params, unsigned int num_fclk_dpms = 0; unsigned int num_dcfclk_dpms = 0; + unsigned int num_dc_uclk_dpms = 0; + unsigned int num_dc_fclk_dpms = 0; + unsigned int num_dc_dcfclk_dpms = 0; + for (i = 0; i < MAX_NUM_DPM_LVL; i++) { - if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz) - max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz; - if (bw_params->clk_table.entries[i].fclk_mhz > max_fclk_mhz) - max_fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz; - if (bw_params->clk_table.entries[i].memclk_mhz > max_uclk_mhz) - max_uclk_mhz = bw_params->clk_table.entries[i].memclk_mhz; - if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz) - max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz; - if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz) - max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz; - if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz) - max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz; - if (bw_params->clk_table.entries[i].dtbclk_mhz > max_dtbclk_mhz) - max_dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz; - - if (bw_params->clk_table.entries[i].memclk_mhz > 0) + if (bw_params->clk_table.entries[i].dcfclk_mhz > max_clk_data.dcfclk_mhz) + max_clk_data.dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz; + if (bw_params->clk_table.entries[i].fclk_mhz > max_clk_data.fclk_mhz) + max_clk_data.fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz; + if (bw_params->clk_table.entries[i].memclk_mhz > max_clk_data.memclk_mhz) + max_clk_data.memclk_mhz = bw_params->clk_table.entries[i].memclk_mhz; + if (bw_params->clk_table.entries[i].dispclk_mhz > max_clk_data.dispclk_mhz) + max_clk_data.dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz; + if (bw_params->clk_table.entries[i].dppclk_mhz > max_clk_data.dppclk_mhz) + max_clk_data.dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz; + if (bw_params->clk_table.entries[i].phyclk_mhz > max_clk_data.phyclk_mhz) + max_clk_data.phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz; + if (bw_params->clk_table.entries[i].dtbclk_mhz > max_clk_data.dtbclk_mhz) + max_clk_data.dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz; + + if (bw_params->clk_table.entries[i].memclk_mhz > 0) { num_uclk_dpms++; - if (bw_params->clk_table.entries[i].fclk_mhz > 0) + if (bw_params->clk_table.entries[i].memclk_mhz <= bw_params->dc_mode_limit.memclk_mhz) + num_dc_uclk_dpms++; + } + if (bw_params->clk_table.entries[i].fclk_mhz > 0) { num_fclk_dpms++; - if (bw_params->clk_table.entries[i].dcfclk_mhz > 0) + if (bw_params->clk_table.entries[i].fclk_mhz <= bw_params->dc_mode_limit.fclk_mhz) + num_dc_fclk_dpms++; + } + if (bw_params->clk_table.entries[i].dcfclk_mhz > 0) { num_dcfclk_dpms++; + if (bw_params->clk_table.entries[i].dcfclk_mhz <= bw_params->dc_mode_limit.dcfclk_mhz) + num_dc_dcfclk_dpms++; + } + } + + if (!disable_dc_mode_overwrite) { + //Overwrite max frequencies with max DC mode frequencies for DC mode systems + override_max_clk_values(&bw_params->dc_mode_limit, &max_clk_data); + num_uclk_dpms = num_dc_uclk_dpms; + num_fclk_dpms = num_dc_fclk_dpms; + num_dcfclk_dpms = num_dc_dcfclk_dpms; + bw_params->clk_table.num_entries_per_clk.num_memclk_levels = num_uclk_dpms; + bw_params->clk_table.num_entries_per_clk.num_fclk_levels = num_fclk_dpms; } if (num_dcfclk_dpms > 0 && bw_params->clk_table.entries[0].fclk_mhz > min_fclk_mhz) min_fclk_mhz = bw_params->clk_table.entries[0].fclk_mhz; - if (!max_dcfclk_mhz || !max_dispclk_mhz || !max_dtbclk_mhz) + if (!max_clk_data.dcfclk_mhz || !max_clk_data.dispclk_mhz || !max_clk_data.dtbclk_mhz) return -1; - if (max_dppclk_mhz == 0) - max_dppclk_mhz = max_dispclk_mhz; + if (max_clk_data.dppclk_mhz == 0) + max_clk_data.dppclk_mhz = max_clk_data.dispclk_mhz; - if (max_fclk_mhz == 0) - max_fclk_mhz = max_dcfclk_mhz * dcn3_21_soc.pct_ideal_sdp_bw_after_urgent / dcn3_21_soc.pct_ideal_fabric_bw_after_urgent; + if (max_clk_data.fclk_mhz == 0) + max_clk_data.fclk_mhz = max_clk_data.dcfclk_mhz * + dcn3_21_soc.pct_ideal_sdp_bw_after_urgent / + dcn3_21_soc.pct_ideal_fabric_bw_after_urgent; - if (max_phyclk_mhz == 0) - max_phyclk_mhz = dcn3_21_soc.clock_limits[0].phyclk_mhz; + if (max_clk_data.phyclk_mhz == 0) + max_clk_data.phyclk_mhz = dcn3_21_soc.clock_limits[0].phyclk_mhz; *num_entries = 0; - entry.dispclk_mhz = max_dispclk_mhz; - entry.dscclk_mhz = max_dispclk_mhz / 3; - entry.dppclk_mhz = max_dppclk_mhz; - entry.dtbclk_mhz = max_dtbclk_mhz; - entry.phyclk_mhz = max_phyclk_mhz; + entry.dispclk_mhz = max_clk_data.dispclk_mhz; + entry.dscclk_mhz = max_clk_data.dispclk_mhz / 3; + entry.dppclk_mhz = max_clk_data.dppclk_mhz; + entry.dtbclk_mhz = max_clk_data.dtbclk_mhz; + entry.phyclk_mhz = max_clk_data.phyclk_mhz; entry.phyclk_d18_mhz = dcn3_21_soc.clock_limits[0].phyclk_d18_mhz; entry.phyclk_d32_mhz = dcn3_21_soc.clock_limits[0].phyclk_d32_mhz; @@ -324,14 +436,18 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params, entry.fabricclk_mhz = 0; entry.dram_speed_mts = 0; + get_optimal_ntuple(&entry); + entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry); dcn321_insert_entry_into_table_sorted(table, num_entries, &entry); } // Insert the max DCFCLK - entry.dcfclk_mhz = max_dcfclk_mhz; + entry.dcfclk_mhz = max_clk_data.dcfclk_mhz; entry.fabricclk_mhz = 0; entry.dram_speed_mts = 0; + get_optimal_ntuple(&entry); + entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry); dcn321_insert_entry_into_table_sorted(table, num_entries, &entry); // Insert the UCLK DPMS @@ -340,6 +456,8 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params, entry.fabricclk_mhz = 0; entry.dram_speed_mts = bw_params->clk_table.entries[i].memclk_mhz * 16; + get_optimal_ntuple(&entry); + entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry); dcn321_insert_entry_into_table_sorted(table, num_entries, &entry); } @@ -350,15 +468,19 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params, entry.fabricclk_mhz = bw_params->clk_table.entries[i].fclk_mhz; entry.dram_speed_mts = 0; + get_optimal_ntuple(&entry); + entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry); dcn321_insert_entry_into_table_sorted(table, num_entries, &entry); } } // If FCLK fine grained, only insert max else { entry.dcfclk_mhz = 0; - entry.fabricclk_mhz = max_fclk_mhz; + entry.fabricclk_mhz = max_clk_data.fclk_mhz; entry.dram_speed_mts = 0; + get_optimal_ntuple(&entry); + entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry); dcn321_insert_entry_into_table_sorted(table, num_entries, &entry); } @@ -368,12 +490,29 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params, // Remove states that require higher clocks than are supported for (i = *num_entries - 1; i >= 0 ; i--) { - if (table[i].dcfclk_mhz > max_dcfclk_mhz || - table[i].fabricclk_mhz > max_fclk_mhz || - table[i].dram_speed_mts > max_uclk_mhz * 16) + if (table[i].dcfclk_mhz > max_clk_data.dcfclk_mhz || + table[i].fabricclk_mhz > max_clk_data.fclk_mhz || + table[i].dram_speed_mts > max_clk_data.memclk_mhz * 16) remove_entry_from_table_at_index(table, num_entries, i); } + // Insert entry with all max dc limits without bandwitch matching + if (!disable_dc_mode_overwrite) { + struct _vcs_dpi_voltage_scaling_st max_dc_limits_entry = entry; + + max_dc_limits_entry.dcfclk_mhz = max_clk_data.dcfclk_mhz; + max_dc_limits_entry.fabricclk_mhz = max_clk_data.fclk_mhz; + max_dc_limits_entry.dram_speed_mts = max_clk_data.memclk_mhz * 16; + + max_dc_limits_entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&max_dc_limits_entry); + dcn321_insert_entry_into_table_sorted(table, num_entries, &max_dc_limits_entry); + + sort_entries_with_same_bw(table, num_entries); + remove_inconsistent_entries(table, num_entries); + } + + + // At this point, the table only contains supported points of interest // it could be used as is, but some states may be redundant due to // coarse grained nature of some clocks, so we want to round up to @@ -471,80 +610,78 @@ static void dcn321_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts, void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params) { dc_assert_fp_enabled(); - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - /* Overrides from dc->config options */ - dcn3_21_ip.clamp_min_dcfclk = dc->config.clamp_min_dcfclk; - - /* Override from passed dc->bb_overrides if available*/ - if ((int)(dcn3_21_soc.sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns - && dc->bb_overrides.sr_exit_time_ns) { - dcn3_21_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0; - } + /* Overrides from dc->config options */ + dcn3_21_ip.clamp_min_dcfclk = dc->config.clamp_min_dcfclk; - if ((int)(dcn3_21_soc.sr_enter_plus_exit_time_us * 1000) - != dc->bb_overrides.sr_enter_plus_exit_time_ns - && dc->bb_overrides.sr_enter_plus_exit_time_ns) { - dcn3_21_soc.sr_enter_plus_exit_time_us = - dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; - } + /* Override from passed dc->bb_overrides if available*/ + if ((int)(dcn3_21_soc.sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns + && dc->bb_overrides.sr_exit_time_ns) { + dcn3_21_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0; + } - if ((int)(dcn3_21_soc.urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns - && dc->bb_overrides.urgent_latency_ns) { - dcn3_21_soc.urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0; - dcn3_21_soc.urgent_latency_pixel_data_only_us = dc->bb_overrides.urgent_latency_ns / 1000.0; - } + if ((int)(dcn3_21_soc.sr_enter_plus_exit_time_us * 1000) + != dc->bb_overrides.sr_enter_plus_exit_time_ns + && dc->bb_overrides.sr_enter_plus_exit_time_ns) { + dcn3_21_soc.sr_enter_plus_exit_time_us = + dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; + } - if ((int)(dcn3_21_soc.dram_clock_change_latency_us * 1000) - != dc->bb_overrides.dram_clock_change_latency_ns - && dc->bb_overrides.dram_clock_change_latency_ns) { - dcn3_21_soc.dram_clock_change_latency_us = - dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; - } + if ((int)(dcn3_21_soc.urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns + && dc->bb_overrides.urgent_latency_ns) { + dcn3_21_soc.urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0; + dcn3_21_soc.urgent_latency_pixel_data_only_us = dc->bb_overrides.urgent_latency_ns / 1000.0; + } - if ((int)(dcn3_21_soc.fclk_change_latency_us * 1000) - != dc->bb_overrides.fclk_clock_change_latency_ns - && dc->bb_overrides.fclk_clock_change_latency_ns) { - dcn3_21_soc.fclk_change_latency_us = - dc->bb_overrides.fclk_clock_change_latency_ns / 1000; - } + if ((int)(dcn3_21_soc.dram_clock_change_latency_us * 1000) + != dc->bb_overrides.dram_clock_change_latency_ns + && dc->bb_overrides.dram_clock_change_latency_ns) { + dcn3_21_soc.dram_clock_change_latency_us = + dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; + } - if ((int)(dcn3_21_soc.dummy_pstate_latency_us * 1000) - != dc->bb_overrides.dummy_clock_change_latency_ns - && dc->bb_overrides.dummy_clock_change_latency_ns) { - dcn3_21_soc.dummy_pstate_latency_us = - dc->bb_overrides.dummy_clock_change_latency_ns / 1000.0; - } + if ((int)(dcn3_21_soc.fclk_change_latency_us * 1000) + != dc->bb_overrides.fclk_clock_change_latency_ns + && dc->bb_overrides.fclk_clock_change_latency_ns) { + dcn3_21_soc.fclk_change_latency_us = + dc->bb_overrides.fclk_clock_change_latency_ns / 1000; + } - /* Override from VBIOS if VBIOS bb_info available */ - if (dc->ctx->dc_bios->funcs->get_soc_bb_info) { - struct bp_soc_bb_info bb_info = {0}; + if ((int)(dcn3_21_soc.dummy_pstate_latency_us * 1000) + != dc->bb_overrides.dummy_clock_change_latency_ns + && dc->bb_overrides.dummy_clock_change_latency_ns) { + dcn3_21_soc.dummy_pstate_latency_us = + dc->bb_overrides.dummy_clock_change_latency_ns / 1000.0; + } - if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) { - if (bb_info.dram_clock_change_latency_100ns > 0) - dcn3_21_soc.dram_clock_change_latency_us = - bb_info.dram_clock_change_latency_100ns * 10; + /* Override from VBIOS if VBIOS bb_info available */ + if (dc->ctx->dc_bios->funcs->get_soc_bb_info) { + struct bp_soc_bb_info bb_info = {0}; - if (bb_info.dram_sr_enter_exit_latency_100ns > 0) - dcn3_21_soc.sr_enter_plus_exit_time_us = - bb_info.dram_sr_enter_exit_latency_100ns * 10; + if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) { + if (bb_info.dram_clock_change_latency_100ns > 0) + dcn3_21_soc.dram_clock_change_latency_us = + bb_info.dram_clock_change_latency_100ns * 10; - if (bb_info.dram_sr_exit_latency_100ns > 0) - dcn3_21_soc.sr_exit_time_us = - bb_info.dram_sr_exit_latency_100ns * 10; - } - } + if (bb_info.dram_sr_enter_exit_latency_100ns > 0) + dcn3_21_soc.sr_enter_plus_exit_time_us = + bb_info.dram_sr_enter_exit_latency_100ns * 10; - /* Override from VBIOS for num_chan */ - if (dc->ctx->dc_bios->vram_info.num_chans) { - dcn3_21_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans; - dcn3_21_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc, - dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel); + if (bb_info.dram_sr_exit_latency_100ns > 0) + dcn3_21_soc.sr_exit_time_us = + bb_info.dram_sr_exit_latency_100ns * 10; } + } - if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) - dcn3_21_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; + /* Override from VBIOS for num_chan */ + if (dc->ctx->dc_bios->vram_info.num_chans) { + dcn3_21_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans; + dcn3_21_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc, + dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel); } + if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) + dcn3_21_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; + /* DML DSC delay factor workaround */ dcn3_21_ip.dsc_delay_factor_wa = dc->debug.dsc_delay_factor_wa_x1000 / 1000.0; @@ -555,150 +692,149 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; /* Overrides Clock levelsfrom CLK Mgr table entries as reported by PM FW */ - if ((!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) && (bw_params->clk_table.entries[0].memclk_mhz)) { - if (dc->debug.use_legacy_soc_bb_mechanism) { - unsigned int i = 0, j = 0, num_states = 0; - - unsigned int dcfclk_mhz[DC__VOLTAGE_STATES] = {0}; - unsigned int dram_speed_mts[DC__VOLTAGE_STATES] = {0}; - unsigned int optimal_uclk_for_dcfclk_sta_targets[DC__VOLTAGE_STATES] = {0}; - unsigned int optimal_dcfclk_for_uclk[DC__VOLTAGE_STATES] = {0}; - - unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {615, 906, 1324, 1564}; - unsigned int num_dcfclk_sta_targets = 4, num_uclk_states = 0; - unsigned int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0; - - for (i = 0; i < MAX_NUM_DPM_LVL; i++) { - if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz) - max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz; - if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz) - max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz; - if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz) - max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz; - if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz) - max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz; - } - if (!max_dcfclk_mhz) - max_dcfclk_mhz = dcn3_21_soc.clock_limits[0].dcfclk_mhz; - if (!max_dispclk_mhz) - max_dispclk_mhz = dcn3_21_soc.clock_limits[0].dispclk_mhz; - if (!max_dppclk_mhz) - max_dppclk_mhz = dcn3_21_soc.clock_limits[0].dppclk_mhz; - if (!max_phyclk_mhz) - max_phyclk_mhz = dcn3_21_soc.clock_limits[0].phyclk_mhz; - - if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) { - // If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array - dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz; - num_dcfclk_sta_targets++; - } else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) { - // If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates - for (i = 0; i < num_dcfclk_sta_targets; i++) { - if (dcfclk_sta_targets[i] > max_dcfclk_mhz) { - dcfclk_sta_targets[i] = max_dcfclk_mhz; - break; - } + if (dc->debug.use_legacy_soc_bb_mechanism) { + unsigned int i = 0, j = 0, num_states = 0; + + unsigned int dcfclk_mhz[DC__VOLTAGE_STATES] = {0}; + unsigned int dram_speed_mts[DC__VOLTAGE_STATES] = {0}; + unsigned int optimal_uclk_for_dcfclk_sta_targets[DC__VOLTAGE_STATES] = {0}; + unsigned int optimal_dcfclk_for_uclk[DC__VOLTAGE_STATES] = {0}; + + unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {615, 906, 1324, 1564}; + unsigned int num_dcfclk_sta_targets = 4, num_uclk_states = 0; + unsigned int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0; + + for (i = 0; i < MAX_NUM_DPM_LVL; i++) { + if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz) + max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz; + if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz) + max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz; + if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz) + max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz; + if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz) + max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz; + } + if (!max_dcfclk_mhz) + max_dcfclk_mhz = dcn3_21_soc.clock_limits[0].dcfclk_mhz; + if (!max_dispclk_mhz) + max_dispclk_mhz = dcn3_21_soc.clock_limits[0].dispclk_mhz; + if (!max_dppclk_mhz) + max_dppclk_mhz = dcn3_21_soc.clock_limits[0].dppclk_mhz; + if (!max_phyclk_mhz) + max_phyclk_mhz = dcn3_21_soc.clock_limits[0].phyclk_mhz; + + if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) { + // If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array + dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz; + num_dcfclk_sta_targets++; + } else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) { + // If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates + for (i = 0; i < num_dcfclk_sta_targets; i++) { + if (dcfclk_sta_targets[i] > max_dcfclk_mhz) { + dcfclk_sta_targets[i] = max_dcfclk_mhz; + break; } - // Update size of array since we "removed" duplicates - num_dcfclk_sta_targets = i + 1; } + // Update size of array since we "removed" duplicates + num_dcfclk_sta_targets = i + 1; + } - num_uclk_states = bw_params->clk_table.num_entries; + num_uclk_states = bw_params->clk_table.num_entries; - // Calculate optimal dcfclk for each uclk - for (i = 0; i < num_uclk_states; i++) { - dcn321_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16, - &optimal_dcfclk_for_uclk[i], NULL); - if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) { - optimal_dcfclk_for_uclk[i] = bw_params->clk_table.entries[0].dcfclk_mhz; - } + // Calculate optimal dcfclk for each uclk + for (i = 0; i < num_uclk_states; i++) { + dcn321_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16, + &optimal_dcfclk_for_uclk[i], NULL); + if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) { + optimal_dcfclk_for_uclk[i] = bw_params->clk_table.entries[0].dcfclk_mhz; } + } - // Calculate optimal uclk for each dcfclk sta target - for (i = 0; i < num_dcfclk_sta_targets; i++) { - for (j = 0; j < num_uclk_states; j++) { - if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) { - optimal_uclk_for_dcfclk_sta_targets[i] = - bw_params->clk_table.entries[j].memclk_mhz * 16; - break; - } + // Calculate optimal uclk for each dcfclk sta target + for (i = 0; i < num_dcfclk_sta_targets; i++) { + for (j = 0; j < num_uclk_states; j++) { + if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) { + optimal_uclk_for_dcfclk_sta_targets[i] = + bw_params->clk_table.entries[j].memclk_mhz * 16; + break; } } + } - i = 0; - j = 0; - // create the final dcfclk and uclk table - while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) { - if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) { - dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; - dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; + i = 0; + j = 0; + // create the final dcfclk and uclk table + while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) { + if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) { + dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; + dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; + } else { + if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) { + dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j]; + dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16; } else { - if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) { - dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j]; - dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16; - } else { - j = num_uclk_states; - } + j = num_uclk_states; } } + } - while (i < num_dcfclk_sta_targets && num_states < DC__VOLTAGE_STATES) { - dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; - dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; - } + while (i < num_dcfclk_sta_targets && num_states < DC__VOLTAGE_STATES) { + dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; + dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; + } - while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES && - optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) { - dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j]; - dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16; - } + while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES && + optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) { + dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j]; + dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16; + } - dcn3_21_soc.num_states = num_states; - for (i = 0; i < dcn3_21_soc.num_states; i++) { - dcn3_21_soc.clock_limits[i].state = i; - dcn3_21_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i]; - dcn3_21_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i]; - - /* Fill all states with max values of all these clocks */ - dcn3_21_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz; - dcn3_21_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz; - dcn3_21_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz; - dcn3_21_soc.clock_limits[i].dscclk_mhz = max_dispclk_mhz / 3; - - /* Populate from bw_params for DTBCLK, SOCCLK */ - if (i > 0) { - if (!bw_params->clk_table.entries[i].dtbclk_mhz) { - dcn3_21_soc.clock_limits[i].dtbclk_mhz = dcn3_21_soc.clock_limits[i-1].dtbclk_mhz; - } else { - dcn3_21_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz; - } - } else if (bw_params->clk_table.entries[i].dtbclk_mhz) { + dcn3_21_soc.num_states = num_states; + for (i = 0; i < dcn3_21_soc.num_states; i++) { + dcn3_21_soc.clock_limits[i].state = i; + dcn3_21_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i]; + dcn3_21_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i]; + + /* Fill all states with max values of all these clocks */ + dcn3_21_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz; + dcn3_21_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz; + dcn3_21_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz; + dcn3_21_soc.clock_limits[i].dscclk_mhz = max_dispclk_mhz / 3; + + /* Populate from bw_params for DTBCLK, SOCCLK */ + if (i > 0) { + if (!bw_params->clk_table.entries[i].dtbclk_mhz) { + dcn3_21_soc.clock_limits[i].dtbclk_mhz = dcn3_21_soc.clock_limits[i-1].dtbclk_mhz; + } else { dcn3_21_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz; } + } else if (bw_params->clk_table.entries[i].dtbclk_mhz) { + dcn3_21_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz; + } - if (!bw_params->clk_table.entries[i].socclk_mhz && i > 0) - dcn3_21_soc.clock_limits[i].socclk_mhz = dcn3_21_soc.clock_limits[i-1].socclk_mhz; - else - dcn3_21_soc.clock_limits[i].socclk_mhz = bw_params->clk_table.entries[i].socclk_mhz; + if (!bw_params->clk_table.entries[i].socclk_mhz && i > 0) + dcn3_21_soc.clock_limits[i].socclk_mhz = dcn3_21_soc.clock_limits[i-1].socclk_mhz; + else + dcn3_21_soc.clock_limits[i].socclk_mhz = bw_params->clk_table.entries[i].socclk_mhz; - if (!dram_speed_mts[i] && i > 0) - dcn3_21_soc.clock_limits[i].dram_speed_mts = dcn3_21_soc.clock_limits[i-1].dram_speed_mts; - else - dcn3_21_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i]; + if (!dram_speed_mts[i] && i > 0) + dcn3_21_soc.clock_limits[i].dram_speed_mts = dcn3_21_soc.clock_limits[i-1].dram_speed_mts; + else + dcn3_21_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i]; - /* These clocks cannot come from bw_params, always fill from dcn3_21_soc[0] */ - /* PHYCLK_D18, PHYCLK_D32 */ - dcn3_21_soc.clock_limits[i].phyclk_d18_mhz = dcn3_21_soc.clock_limits[0].phyclk_d18_mhz; - dcn3_21_soc.clock_limits[i].phyclk_d32_mhz = dcn3_21_soc.clock_limits[0].phyclk_d32_mhz; - } - } else { - build_synthetic_soc_states(bw_params, dcn3_21_soc.clock_limits, &dcn3_21_soc.num_states); + /* These clocks cannot come from bw_params, always fill from dcn3_21_soc[0] */ + /* PHYCLK_D18, PHYCLK_D32 */ + dcn3_21_soc.clock_limits[i].phyclk_d18_mhz = dcn3_21_soc.clock_limits[0].phyclk_d18_mhz; + dcn3_21_soc.clock_limits[i].phyclk_d32_mhz = dcn3_21_soc.clock_limits[0].phyclk_d32_mhz; } - - /* Re-init DML with updated bb */ - dml_init_instance(&dc->dml, &dcn3_21_soc, &dcn3_21_ip, DML_PROJECT_DCN32); - if (dc->current_state) - dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_21_soc, &dcn3_21_ip, DML_PROJECT_DCN32); + } else { + build_synthetic_soc_states(dc->debug.disable_dc_mode_overwrite, bw_params, + dcn3_21_soc.clock_limits, &dcn3_21_soc.num_states); } + + /* Re-init DML with updated bb */ + dml_init_instance(&dc->dml, &dcn3_21_soc, &dcn3_21_ip, DML_PROJECT_DCN32); + if (dc->current_state) + dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_21_soc, &dcn3_21_ip, DML_PROJECT_DCN32); } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.h index e8fad9b4be69..c6623b3705ca 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.h @@ -29,10 +29,6 @@ #include "dml/display_mode_vba.h" -void dcn321_insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table, - unsigned int *num_entries, - struct _vcs_dpi_voltage_scaling_st *entry); - void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h index 0bffae95f3a2..d5831a34f5a1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h @@ -190,6 +190,14 @@ enum dm_validation_status { DML_FAIL_DSC_INPUT_BPC, DML_FAIL_PREFETCH_SUPPORT, DML_FAIL_V_RATIO_PREFETCH, + DML_FAIL_P2I_WITH_420, + DML_FAIL_DSC_ONLY_IF_NECESSARY_WITH_BPP, + DML_FAIL_NOT_DSC422_NATIVE, + DML_FAIL_ODM_COMBINE4TO1, + DML_FAIL_ENOUGH_WRITEBACK_UNITS, + DML_FAIL_VIEWPORT_EXCEEDS_SURFACE, + DML_FAIL_DYNAMIC_METADATA, + DML_FAIL_FMT_BUFFER_EXCEEDED, }; enum writeback_config { diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c index bdf3ac6cadd5..da0cfbb071e6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c @@ -113,7 +113,6 @@ void dml_init_instance(struct display_mode_lib *lib, lib->funcs = dml30_funcs; break; case DML_PROJECT_DCN31: - case DML_PROJECT_DCN31_FPGA: case DML_PROJECT_DCN315: lib->funcs = dml31_funcs; break; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h index a9d49ef58fb5..5edf69fa40d1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h @@ -41,7 +41,6 @@ enum dml_project { DML_PROJECT_DCN30, DML_PROJECT_DCN31, DML_PROJECT_DCN315, - DML_PROJECT_DCN31_FPGA, DML_PROJECT_DCN314, DML_PROJECT_DCN32, }; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h index 3c077164f362..fb17f8868cb4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h @@ -167,6 +167,7 @@ struct _vcs_dpi_voltage_scaling_st { double phyclk_mhz; double dppclk_mhz; double dtbclk_mhz; + float net_bw_in_kbytes_sec; }; /** @@ -619,8 +620,7 @@ struct _vcs_dpi_display_dlg_regs_st { unsigned int refcyc_h_blank_end; unsigned int dlg_vblank_end; unsigned int min_dst_y_next_start; - unsigned int optimized_min_dst_y_next_start; - unsigned int optimized_min_dst_y_next_start_us; + unsigned int min_dst_y_next_start_us; unsigned int refcyc_per_htotal; unsigned int refcyc_x_after_scaler; unsigned int dst_y_after_scaler; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c index f9653f511baa..9a3ded311195 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c @@ -571,6 +571,10 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) mode_lib->vba.OutputLinkDPRate[mode_lib->vba.NumberOfActivePlanes] = dout->dp_rate; mode_lib->vba.ODMUse[mode_lib->vba.NumberOfActivePlanes] = dst->odm_combine_policy; mode_lib->vba.DETSizeOverride[mode_lib->vba.NumberOfActivePlanes] = src->det_size_override; + if (src->det_size_override) + mode_lib->vba.DETBufferSizeInKByte[mode_lib->vba.NumberOfActivePlanes] = src->det_size_override; + else + mode_lib->vba.DETBufferSizeInKByte[mode_lib->vba.NumberOfActivePlanes] = ip->det_buffer_size_kbytes; //TODO: Need to assign correct values to dp_multistream vars mode_lib->vba.OutputMultistreamEn[mode_lib->vba.NumberOfActiveSurfaces] = dout->dp_multistream_en; mode_lib->vba.OutputMultistreamId[mode_lib->vba.NumberOfActiveSurfaces] = dout->dp_multistream_id; @@ -785,6 +789,8 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) mode_lib->vba.pipe_plane[k] = mode_lib->vba.NumberOfActivePlanes; mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes]++; + if (src_k->det_size_override) + mode_lib->vba.DETBufferSizeInKByte[mode_lib->vba.NumberOfActivePlanes] = src_k->det_size_override; if (mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes] == dm_horz) { mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] += @@ -927,18 +933,16 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) } /** - * ******************************************************************************************** * cache_debug_params: Cache any params that needed to be maintained from the initial validation * for debug purposes. * * The DML getters can modify some of the VBA params that we are interested in (for example when * calculating with dummy p-state latency), so cache any params here that we want for debugging * - * @param [in] mode_lib: mode_lib input/output of validate call + * @mode_lib: mode_lib input/output of validate call * - * @return: void + * Return: void * - * ******************************************************************************************** */ static void cache_debug_params(struct display_mode_lib *mode_lib) { diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index 2bdc47615543..3966845c7694 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -40,6 +40,8 @@ static bool dsc_policy_enable_dsc_when_not_needed; static bool dsc_policy_disable_dsc_stream_overhead; +static bool disable_128b_132b_stream_overhead; + #ifndef MAX #define MAX(X, Y) ((X) > (Y) ? (X) : (Y)) #endif @@ -47,8 +49,44 @@ static bool dsc_policy_disable_dsc_stream_overhead; #define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) #endif +/* Need to account for padding due to pixel-to-symbol packing + * for uncompressed 128b/132b streams. + */ +static uint32_t apply_128b_132b_stream_overhead( + const struct dc_crtc_timing *timing, const uint32_t kbps) +{ + uint32_t total_kbps = kbps; + + if (disable_128b_132b_stream_overhead) + return kbps; + + if (!timing->flags.DSC) { + struct fixed31_32 bpp; + struct fixed31_32 overhead_factor; + + bpp = dc_fixpt_from_int(kbps); + bpp = dc_fixpt_div_int(bpp, timing->pix_clk_100hz / 10); + + /* Symbols_per_HActive = HActive * bpp / (4 lanes * 32-bit symbol size) + * Overhead_factor = ceil(Symbols_per_HActive) / Symbols_per_HActive + */ + overhead_factor = dc_fixpt_from_int(timing->h_addressable); + overhead_factor = dc_fixpt_mul(overhead_factor, bpp); + overhead_factor = dc_fixpt_div_int(overhead_factor, 128); + overhead_factor = dc_fixpt_div( + dc_fixpt_from_int(dc_fixpt_ceil(overhead_factor)), + overhead_factor); + + total_kbps = dc_fixpt_ceil( + dc_fixpt_mul_int(overhead_factor, total_kbps)); + } + + return total_kbps; +} + uint32_t dc_bandwidth_in_kbps_from_timing( - const struct dc_crtc_timing *timing) + const struct dc_crtc_timing *timing, + const enum dc_link_encoding_format link_encoding) { uint32_t bits_per_channel = 0; uint32_t kbps; @@ -96,6 +134,9 @@ uint32_t dc_bandwidth_in_kbps_from_timing( kbps = kbps * 2 / 3; } + if (link_encoding == DC_LINK_ENCODING_DP_128b_132b) + kbps = apply_128b_132b_stream_overhead(timing, kbps); + return kbps; } @@ -107,6 +148,7 @@ static bool decide_dsc_bandwidth_range( const uint32_t num_slices_h, const struct dsc_enc_caps *dsc_caps, const struct dc_crtc_timing *timing, + const enum dc_link_encoding_format link_encoding, struct dc_dsc_bw_range *range); static uint32_t compute_bpp_x16_from_target_bandwidth( @@ -133,6 +175,7 @@ static bool setup_dsc_config( int target_bandwidth_kbps, const struct dc_crtc_timing *timing, const struct dc_dsc_config_options *options, + const enum dc_link_encoding_format link_encoding, struct dc_dsc_config *dsc_cfg); static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size) @@ -398,6 +441,7 @@ bool dc_dsc_compute_bandwidth_range( uint32_t max_bpp_x16, const struct dsc_dec_dpcd_caps *dsc_sink_caps, const struct dc_crtc_timing *timing, + const enum dc_link_encoding_format link_encoding, struct dc_dsc_bw_range *range) { bool is_dsc_possible = false; @@ -417,11 +461,11 @@ bool dc_dsc_compute_bandwidth_range( if (is_dsc_possible) is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing, - &options, &config); + &options, link_encoding, &config); if (is_dsc_possible) is_dsc_possible = decide_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16, - config.num_slices_h, &dsc_common_caps, timing, range); + config.num_slices_h, &dsc_common_caps, timing, link_encoding, range); return is_dsc_possible; } @@ -557,6 +601,7 @@ static bool decide_dsc_bandwidth_range( const uint32_t num_slices_h, const struct dsc_enc_caps *dsc_caps, const struct dc_crtc_timing *timing, + const enum dc_link_encoding_format link_encoding, struct dc_dsc_bw_range *range) { uint32_t preferred_bpp_x16 = timing->dsc_fixed_bits_per_pixel_x16; @@ -586,7 +631,7 @@ static bool decide_dsc_bandwidth_range( /* populate output structure */ if (range->max_target_bpp_x16 >= range->min_target_bpp_x16 && range->min_target_bpp_x16 > 0) { /* native stream bandwidth */ - range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing); + range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing, link_encoding); /* max dsc target bpp */ range->max_kbps = dc_dsc_stream_bandwidth_in_kbps(timing, @@ -612,6 +657,7 @@ static bool decide_dsc_target_bpp_x16( const int target_bandwidth_kbps, const struct dc_crtc_timing *timing, const int num_slices_h, + const enum dc_link_encoding_format link_encoding, int *target_bpp_x16) { struct dc_dsc_bw_range range; @@ -619,7 +665,7 @@ static bool decide_dsc_target_bpp_x16( *target_bpp_x16 = 0; if (decide_dsc_bandwidth_range(policy->min_target_bpp * 16, policy->max_target_bpp * 16, - num_slices_h, dsc_common_caps, timing, &range)) { + num_slices_h, dsc_common_caps, timing, link_encoding, &range)) { if (target_bandwidth_kbps >= range.stream_kbps) { if (policy->enable_dsc_when_not_needed) /* enable max bpp even dsc is not needed */ @@ -645,8 +691,6 @@ static int get_available_dsc_slices(union dsc_enc_slice_caps slice_caps, int *av { int idx = 0; - memset(available_slices, -1, MIN_AVAILABLE_SLICES_SIZE); - if (slice_caps.bits.NUM_SLICES_1) available_slices[idx++] = 1; @@ -700,7 +744,7 @@ static int inc_num_slices(union dsc_enc_slice_caps slice_caps, int num_slices) } } - if (new_num_slices == num_slices) // No biger number of slices found + if (new_num_slices == num_slices) // No bigger number of slices found new_num_slices++; return new_num_slices; @@ -798,6 +842,7 @@ static bool setup_dsc_config( int target_bandwidth_kbps, const struct dc_crtc_timing *timing, const struct dc_dsc_config_options *options, + const enum dc_link_encoding_format link_encoding, struct dc_dsc_config *dsc_cfg) { struct dsc_enc_caps dsc_common_caps; @@ -952,6 +997,13 @@ static bool setup_dsc_config( else is_dsc_possible = false; } + // When we force 2:1 ODM, we can't have 1 slice to divide amongst 2 separate DSC instances + // need to enforce at minimum 2 horizontal slices + if (options->dsc_force_odm_hslice_override) { + num_slices_h = fit_num_slices_up(dsc_common_caps.slice_caps, 2); + if (num_slices_h == 0) + is_dsc_possible = false; + } if (!is_dsc_possible) goto done; @@ -990,6 +1042,7 @@ static bool setup_dsc_config( target_bandwidth_kbps, timing, num_slices_h, + link_encoding, &target_bpp); dsc_cfg->bits_per_pixel = target_bpp; } @@ -1018,6 +1071,7 @@ bool dc_dsc_compute_config( const struct dc_dsc_config_options *options, uint32_t target_bandwidth_kbps, const struct dc_crtc_timing *timing, + const enum dc_link_encoding_format link_encoding, struct dc_dsc_config *dsc_cfg) { bool is_dsc_possible = false; @@ -1027,7 +1081,7 @@ bool dc_dsc_compute_config( is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, target_bandwidth_kbps, - timing, options, dsc_cfg); + timing, options, link_encoding, dsc_cfg); return is_dsc_possible; } @@ -1160,9 +1214,15 @@ void dc_dsc_policy_set_disable_dsc_stream_overhead(bool disable) dsc_policy_disable_dsc_stream_overhead = disable; } +void dc_set_disable_128b_132b_stream_overhead(bool disable) +{ + disable_128b_132b_stream_overhead = disable; +} + void dc_dsc_get_default_config_option(const struct dc *dc, struct dc_dsc_config_options *options) { options->dsc_min_slice_height_override = dc->debug.dsc_min_slice_height_override; + options->dsc_force_odm_hslice_override = dc->debug.force_odm_combine; options->max_target_bpp_limit_override_x16 = 0; options->slice_height_granularity = 1; } diff --git a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h index 59884ef651b3..4a2bf81286d8 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h +++ b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h @@ -31,21 +31,21 @@ /****************************** new register headers */ /*** following in header */ -#define DDC_GPIO_REG_LIST_ENTRY(type,cd,id) \ +#define DDC_GPIO_REG_LIST_ENTRY(type, cd, id) \ .type ## _reg = REG(DC_GPIO_DDC ## id ## _ ## type),\ .type ## _mask = DC_GPIO_DDC ## id ## _ ## type ## __DC_GPIO_DDC ## id ## cd ## _ ## type ## _MASK,\ .type ## _shift = DC_GPIO_DDC ## id ## _ ## type ## __DC_GPIO_DDC ## id ## cd ## _ ## type ## __SHIFT -#define DDC_GPIO_REG_LIST(cd,id) \ +#define DDC_GPIO_REG_LIST(cd, id) \ {\ - DDC_GPIO_REG_LIST_ENTRY(MASK,cd,id),\ - DDC_GPIO_REG_LIST_ENTRY(A,cd,id),\ - DDC_GPIO_REG_LIST_ENTRY(EN,cd,id),\ - DDC_GPIO_REG_LIST_ENTRY(Y,cd,id)\ + DDC_GPIO_REG_LIST_ENTRY(MASK, cd, id),\ + DDC_GPIO_REG_LIST_ENTRY(A, cd, id),\ + DDC_GPIO_REG_LIST_ENTRY(EN, cd, id),\ + DDC_GPIO_REG_LIST_ENTRY(Y, cd, id)\ } -#define DDC_REG_LIST(cd,id) \ - DDC_GPIO_REG_LIST(cd,id),\ +#define DDC_REG_LIST(cd, id) \ + DDC_GPIO_REG_LIST(cd, id),\ .ddc_setup = REG(DC_I2C_DDC ## id ## _SETUP) #define DDC_REG_LIST_DCN2(cd, id) \ @@ -54,34 +54,34 @@ .phy_aux_cntl = REG(PHY_AUX_CNTL), \ .dc_gpio_aux_ctrl_5 = REG(DC_GPIO_AUX_CTRL_5) -#define DDC_GPIO_VGA_REG_LIST_ENTRY(type,cd)\ +#define DDC_GPIO_VGA_REG_LIST_ENTRY(type, cd)\ .type ## _reg = REG(DC_GPIO_DDCVGA_ ## type),\ .type ## _mask = DC_GPIO_DDCVGA_ ## type ## __DC_GPIO_DDCVGA ## cd ## _ ## type ## _MASK,\ .type ## _shift = DC_GPIO_DDCVGA_ ## type ## __DC_GPIO_DDCVGA ## cd ## _ ## type ## __SHIFT #define DDC_GPIO_VGA_REG_LIST(cd) \ {\ - DDC_GPIO_VGA_REG_LIST_ENTRY(MASK,cd),\ - DDC_GPIO_VGA_REG_LIST_ENTRY(A,cd),\ - DDC_GPIO_VGA_REG_LIST_ENTRY(EN,cd),\ - DDC_GPIO_VGA_REG_LIST_ENTRY(Y,cd)\ + DDC_GPIO_VGA_REG_LIST_ENTRY(MASK, cd),\ + DDC_GPIO_VGA_REG_LIST_ENTRY(A, cd),\ + DDC_GPIO_VGA_REG_LIST_ENTRY(EN, cd),\ + DDC_GPIO_VGA_REG_LIST_ENTRY(Y, cd)\ } #define DDC_VGA_REG_LIST(cd) \ DDC_GPIO_VGA_REG_LIST(cd),\ .ddc_setup = mmDC_I2C_DDCVGA_SETUP -#define DDC_GPIO_I2C_REG_LIST_ENTRY(type,cd) \ +#define DDC_GPIO_I2C_REG_LIST_ENTRY(type, cd) \ .type ## _reg = REG(DC_GPIO_I2CPAD_ ## type),\ .type ## _mask = DC_GPIO_I2CPAD_ ## type ## __DC_GPIO_ ## cd ## _ ## type ## _MASK,\ .type ## _shift = DC_GPIO_I2CPAD_ ## type ## __DC_GPIO_ ## cd ## _ ## type ## __SHIFT #define DDC_GPIO_I2C_REG_LIST(cd) \ {\ - DDC_GPIO_I2C_REG_LIST_ENTRY(MASK,cd),\ - DDC_GPIO_I2C_REG_LIST_ENTRY(A,cd),\ - DDC_GPIO_I2C_REG_LIST_ENTRY(EN,cd),\ - DDC_GPIO_I2C_REG_LIST_ENTRY(Y,cd)\ + DDC_GPIO_I2C_REG_LIST_ENTRY(MASK, cd),\ + DDC_GPIO_I2C_REG_LIST_ENTRY(A, cd),\ + DDC_GPIO_I2C_REG_LIST_ENTRY(EN, cd),\ + DDC_GPIO_I2C_REG_LIST_ENTRY(Y, cd)\ } #define DDC_I2C_REG_LIST(cd) \ @@ -150,12 +150,12 @@ struct ddc_sh_mask { #define ddc_data_regs(id) \ {\ - DDC_REG_LIST(DATA,id)\ + DDC_REG_LIST(DATA, id)\ } #define ddc_clk_regs(id) \ {\ - DDC_REG_LIST(CLK,id)\ + DDC_REG_LIST(CLK, id)\ } #define ddc_vga_data_regs \ diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hpd_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/hpd_regs.h index dcfdd71b2304..debb363cfcf4 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hpd_regs.h +++ b/drivers/gpu/drm/amd/display/dc/gpio/hpd_regs.h @@ -36,17 +36,17 @@ #define ONE_MORE_5 6 -#define HPD_GPIO_REG_LIST_ENTRY(type,cd,id) \ +#define HPD_GPIO_REG_LIST_ENTRY(type, cd, id) \ .type ## _reg = REG(DC_GPIO_HPD_## type),\ .type ## _mask = DC_GPIO_HPD_ ## type ## __DC_GPIO_HPD ## id ## _ ## type ## _MASK,\ .type ## _shift = DC_GPIO_HPD_ ## type ## __DC_GPIO_HPD ## id ## _ ## type ## __SHIFT #define HPD_GPIO_REG_LIST(id) \ {\ - HPD_GPIO_REG_LIST_ENTRY(MASK,cd,id),\ - HPD_GPIO_REG_LIST_ENTRY(A,cd,id),\ - HPD_GPIO_REG_LIST_ENTRY(EN,cd,id),\ - HPD_GPIO_REG_LIST_ENTRY(Y,cd,id)\ + HPD_GPIO_REG_LIST_ENTRY(MASK, cd, id),\ + HPD_GPIO_REG_LIST_ENTRY(A, cd, id),\ + HPD_GPIO_REG_LIST_ENTRY(EN, cd, id),\ + HPD_GPIO_REG_LIST_ENTRY(Y, cd, id)\ } #define HPD_REG_LIST(id) \ diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 2eb597a24425..eaad1260bfd1 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -37,6 +37,7 @@ #include "dwb.h" #include "mcif_wb.h" #include "panel_cntl.h" +#include "dmub/inc/dmub_cmd.h" #define MAX_CLOCK_SOURCES 7 #define MAX_SVP_PHANTOM_STREAMS 2 @@ -64,6 +65,7 @@ struct resource_context; struct clk_bw_params; struct resource_funcs { + enum engine_id (*get_preferred_eng_id_dpia)(unsigned int dpia_index); void (*destroy)(struct resource_pool **pool); void (*link_init)(struct dc_link *link); struct panel_cntl*(*panel_cntl_create)( @@ -124,39 +126,15 @@ struct resource_funcs { struct dc *dc, struct dc_state *context); - /* - * Acquires a free pipe for the head pipe. - * The head pipe is first pipe in the current context that matches the stream - * and does not have a top pipe or prev_odm_pipe. - */ - struct pipe_ctx *(*acquire_idle_pipe_for_layer)( - struct dc_state *context, - const struct resource_pool *pool, - struct dc_stream_state *stream); - - /* - * Acquires a free pipe for the head pipe with some additional checks for odm. - * The head pipe is passed in as an argument unlike acquire_idle_pipe_for_layer - * where it is read from the context. So this allows us look for different - * idle_pipe if the head_pipes are different ( ex. in odm 2:1 when we have - * a left and right pipe ). - * - * It also checks the old context to see if: - * - * 1. a pipe has already been allocated for the head pipe. If so, it will - * try to select that pipe as the idle pipe if it is available in the current - * context. - * 2. if the head_pipe is on the left, it will check if the right pipe has - * a pipe already allocated. If so, it will not use that pipe if it is - * selected as the idle pipe. - */ - struct pipe_ctx *(*acquire_idle_pipe_for_head_pipe_in_layer)( - struct dc_state *context, + struct pipe_ctx *(*acquire_free_pipe_as_secondary_dpp_pipe)( + const struct dc_state *cur_ctx, + struct dc_state *new_ctx, const struct resource_pool *pool, - struct dc_stream_state *stream, - struct pipe_ctx *head_pipe); + const struct pipe_ctx *opp_head_pipe); - enum dc_status (*validate_plane)(const struct dc_plane_state *plane_state, struct dc_caps *caps); + enum dc_status (*validate_plane)( + const struct dc_plane_state *plane_state, + struct dc_caps *caps); enum dc_status (*add_stream_to_ctx)( struct dc *dc, @@ -303,6 +281,8 @@ struct resource_pool { struct dmcu *dmcu; struct dmub_psr *psr; + struct dmub_replay *replay; + struct abm *multiple_abms[MAX_PIPES]; const struct resource_funcs *funcs; @@ -374,6 +354,7 @@ union pipe_update_flags { uint32_t viewport : 1; uint32_t plane_changed : 1; uint32_t det_size : 1; + uint32_t unbounded_req : 1; } bits; uint32_t raw; }; @@ -426,6 +407,8 @@ struct pipe_ctx { struct dwbc *dwbc; struct mcif_wb *mcif_wb; union pipe_update_flags update_flags; + struct tg_color visual_confirm_color; + bool has_vactive_margin; }; /* Data used for dynamic link encoder assignment. @@ -496,6 +479,11 @@ struct bw_context { struct display_mode_lib dml; }; +struct dc_dmub_cmd { + union dmub_rb_cmd dmub_cmd; + enum dm_dmub_wait_type wait_type; +}; + /** * struct dc_state - The full description of a state requested by users */ @@ -544,6 +532,11 @@ struct dc_state { */ struct bw_context bw_ctx; + struct block_sequence block_sequence[50]; + unsigned int block_sequence_steps; + struct dc_dmub_cmd dc_dmub_cmd[10]; + unsigned int dmub_cmd_count; + /** * @refcount: refcount reference * @@ -558,6 +551,23 @@ struct dc_state { } perf_params; }; +struct replay_context { + /* ddc line */ + enum channel_id aux_inst; + /* Transmitter id */ + enum transmitter digbe_inst; + /* Engine Id is used for Dig Be source select */ + enum engine_id digfe_inst; + /* Controller Id used for Dig Fe source select */ + enum controller_id controllerId; + unsigned int line_time_in_ns; +}; + +enum dc_replay_enable { + DC_REPLAY_DISABLE = 0, + DC_REPLAY_ENABLE = 1, +}; + struct dc_bounding_box_max_clk { int max_dcfclk_mhz; int max_dispclk_mhz; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h index ecb4191b6e64..33db15d69f23 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h @@ -27,6 +27,8 @@ #include "dm_services_types.h" +struct abm_save_restore; + struct abm { struct dc_context *ctx; const struct abm_funcs *funcs; @@ -55,6 +57,14 @@ struct abm_funcs { unsigned int bytes, unsigned int inst); bool (*set_abm_pause)(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int otg_inst); + bool (*save_restore)( + struct abm *abm, + unsigned int panel_inst, + struct abm_save_restore *pData); + bool (*set_pipe_ex)(struct abm *abm, + unsigned int otg_inst, + unsigned int option, + unsigned int panel_inst); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h index 7254182b7c72..af6b9509d09d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h @@ -172,8 +172,6 @@ struct aux_engine_funcs { struct aux_engine *engine, uint8_t *returned_bytes); bool (*is_engine_available)(struct aux_engine *engine); - enum i2caux_engine_type (*get_engine_type)( - const struct aux_engine *engine); bool (*acquire)( struct aux_engine *engine, struct ddc *ddc); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index bef843cc32a1..ecb7bcc39469 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -230,9 +230,11 @@ struct clk_bw_params { unsigned int dram_channel_width_bytes; unsigned int dispclk_vco_khz; unsigned int dc_mode_softmax_memclk; + unsigned int max_memclk_mhz; struct clk_limit_table clk_table; struct wm_table wm_table; struct dummy_pstate_entry dummy_pstate_table[4]; + struct clk_limit_table_entry dc_mode_limit; }; /* Public interfaces */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index ad6acd1b34e1..3e2f0f64c98c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -68,6 +68,7 @@ struct dccg { const struct dccg_funcs *funcs; int pipe_dppclk_khz[MAX_PIPES]; int ref_dppclk; + bool dpp_clock_gated[MAX_PIPES]; //int dtbclk_khz[MAX_PIPES];/* TODO needs to be removed */ //int audio_dtbclk_khz;/* TODO needs to be removed */ //int ref_dtbclk_khz;/* TODO needs to be removed */ @@ -122,6 +123,11 @@ struct dccg_funcs { struct dccg *dccg, int hpo_le_inst); + void (*set_symclk32_le_root_clock_gating)( + struct dccg *dccg, + int hpo_le_inst, + bool enable); + void (*set_physymclk)( struct dccg *dccg, int phy_inst, @@ -159,10 +165,23 @@ struct dccg_funcs { int otg_inst, int pixclk_khz); + void (*trigger_dio_fifo_resync)( + struct dccg *dccg); + void (*dpp_root_clock_control)( struct dccg *dccg, unsigned int dpp_inst, bool clock_on); + + void (*enable_symclk_se)( + struct dccg *dccg, + uint32_t stream_enc_inst, + uint32_t link_enc_inst); + + void (*disable_symclk_se)( + struct dccg *dccg, + uint32_t stream_enc_inst, + uint32_t link_enc_inst); }; #endif //__DAL_DCCG_H__ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index beb26dc8a07f..f5677dbb4e7d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -49,6 +49,8 @@ struct dcn_hubbub_wm_set { uint32_t dram_clk_change; uint32_t usr_retrain; uint32_t fclk_pstate_change; + uint32_t sr_enter_exit_Z8; + uint32_t sr_enter_Z8; }; struct dcn_hubbub_wm { @@ -111,6 +113,9 @@ struct dcn_hubbub_state { uint32_t vm_error_vmid; uint32_t vm_error_pipe; uint32_t vm_error_mode; + uint32_t test_debug_data; + uint32_t watermark_change_cntl; + uint32_t dram_state_cntl; }; struct hubbub_funcs { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h index c4fbbf08ef86..a6dedf3c7d74 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h @@ -269,6 +269,7 @@ struct stream_encoder_funcs { struct stream_encoder *enc, unsigned int pix_per_container); void (*enable_fifo)(struct stream_encoder *enc); void (*disable_fifo)(struct stream_encoder *enc); + void (*map_stream_to_link)(struct stream_encoder *enc, uint32_t stream_enc_inst, uint32_t link_enc_inst); }; struct hpo_dp_stream_encoder_state { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index 88ac723d10aa..02ff99f7bec2 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -44,6 +44,138 @@ struct dc_virtual_addr_space_config; struct dpp; struct dce_hwseq; struct link_resource; +struct dc_dmub_cmd; + +struct subvp_pipe_control_lock_fast_params { + struct dc *dc; + bool lock; + struct pipe_ctx *pipe_ctx; +}; + +struct pipe_control_lock_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; + bool lock; +}; + +struct set_flip_control_gsl_params { + struct pipe_ctx *pipe_ctx; + bool flip_immediate; +}; + +struct program_triplebuffer_params { + const struct dc *dc; + struct pipe_ctx *pipe_ctx; + bool enableTripleBuffer; +}; + +struct update_plane_addr_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; +}; + +struct set_input_transfer_func_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; + struct dc_plane_state *plane_state; +}; + +struct program_gamut_remap_params { + struct pipe_ctx *pipe_ctx; +}; + +struct program_manual_trigger_params { + struct pipe_ctx *pipe_ctx; +}; + +struct send_dmcub_cmd_params { + struct dc_context *ctx; + union dmub_rb_cmd *cmd; + enum dm_dmub_wait_type wait_type; +}; + +struct setup_dpp_params { + struct pipe_ctx *pipe_ctx; +}; + +struct program_bias_and_scale_params { + struct pipe_ctx *pipe_ctx; +}; + +struct set_output_transfer_func_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; + const struct dc_stream_state *stream; +}; + +struct update_visual_confirm_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; + int mpcc_id; +}; + +struct power_on_mpc_mem_pwr_params { + struct mpc *mpc; + int mpcc_id; + bool power_on; +}; + +struct set_output_csc_params { + struct mpc *mpc; + int opp_id; + const uint16_t *regval; + enum mpc_output_csc_mode ocsc_mode; +}; + +struct set_ocsc_default_params { + struct mpc *mpc; + int opp_id; + enum dc_color_space color_space; + enum mpc_output_csc_mode ocsc_mode; +}; + +union block_sequence_params { + struct update_plane_addr_params update_plane_addr_params; + struct subvp_pipe_control_lock_fast_params subvp_pipe_control_lock_fast_params; + struct pipe_control_lock_params pipe_control_lock_params; + struct set_flip_control_gsl_params set_flip_control_gsl_params; + struct program_triplebuffer_params program_triplebuffer_params; + struct set_input_transfer_func_params set_input_transfer_func_params; + struct program_gamut_remap_params program_gamut_remap_params; + struct program_manual_trigger_params program_manual_trigger_params; + struct send_dmcub_cmd_params send_dmcub_cmd_params; + struct setup_dpp_params setup_dpp_params; + struct program_bias_and_scale_params program_bias_and_scale_params; + struct set_output_transfer_func_params set_output_transfer_func_params; + struct update_visual_confirm_params update_visual_confirm_params; + struct power_on_mpc_mem_pwr_params power_on_mpc_mem_pwr_params; + struct set_output_csc_params set_output_csc_params; + struct set_ocsc_default_params set_ocsc_default_params; +}; + +enum block_sequence_func { + DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST = 0, + OPTC_PIPE_CONTROL_LOCK, + HUBP_SET_FLIP_CONTROL_GSL, + HUBP_PROGRAM_TRIPLEBUFFER, + HUBP_UPDATE_PLANE_ADDR, + DPP_SET_INPUT_TRANSFER_FUNC, + DPP_PROGRAM_GAMUT_REMAP, + OPTC_PROGRAM_MANUAL_TRIGGER, + DMUB_SEND_DMCUB_CMD, + DPP_SETUP_DPP, + DPP_PROGRAM_BIAS_AND_SCALE, + DPP_SET_OUTPUT_TRANSFER_FUNC, + MPC_UPDATE_VISUAL_CONFIRM, + MPC_POWER_ON_MPC_MEM_PWR, + MPC_SET_OUTPUT_CSC, + MPC_SET_OCSC_DEFAULT, +}; + +struct block_sequence { + union block_sequence_params params; + enum block_sequence_func func; +}; struct hw_sequencer_funcs { void (*hardware_release)(struct dc *dc); @@ -252,12 +384,12 @@ struct hw_sequencer_funcs { const struct tg_color *solid_color, int width, int height, int offset); + void (*subvp_pipe_control_lock_fast)(union block_sequence_params *params); void (*z10_restore)(const struct dc *dc); void (*z10_save_init)(struct dc *dc); void (*update_visual_confirm_color)(struct dc *dc, struct pipe_ctx *pipe_ctx, - struct tg_color *color, int mpcc_id); void (*update_phantom_vp_position)(struct dc *dc, @@ -294,6 +426,7 @@ void get_surface_visual_confirm_color( void get_subvp_visual_confirm_color( struct dc *dc, + struct dc_state *context, struct pipe_ctx *pipe_ctx, struct tg_color *color); @@ -306,4 +439,36 @@ void get_mpctree_visual_confirm_color( void get_surface_tile_visual_confirm_color( struct pipe_ctx *pipe_ctx, struct tg_color *color); + +void get_mclk_switch_visual_confirm_color( + struct dc *dc, + struct dc_state *context, + struct pipe_ctx *pipe_ctx, + struct tg_color *color); + +void hwss_execute_sequence(struct dc *dc, + struct block_sequence block_sequence[], + int num_steps); + +void hwss_build_fast_sequence(struct dc *dc, + struct dc_dmub_cmd *dc_dmub_cmd, + unsigned int dmub_cmd_count, + struct block_sequence block_sequence[], + int *num_steps, + struct pipe_ctx *pipe_ctx); + +void hwss_send_dmcub_cmd(union block_sequence_params *params); + +void hwss_program_manual_trigger(union block_sequence_params *params); + +void hwss_setup_dpp(union block_sequence_params *params); + +void hwss_program_bias_and_scale(union block_sequence_params *params); + +void hwss_power_on_mpc_mem_pwr(union block_sequence_params *params); + +void hwss_set_output_csc(union block_sequence_params *params); + +void hwss_set_ocsc_default(union block_sequence_params *params); + #endif /* __DC_HW_SEQUENCER_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h index 4513544559be..4ca4192c1e12 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h @@ -160,6 +160,8 @@ struct hwseq_private_funcs { unsigned int *k1_div, unsigned int *k2_div); void (*set_pixels_per_cycle)(struct pipe_ctx *pipe_ctx); + void (*resync_fifo_dccg_dio)(struct dce_hwseq *hws, struct dc *dc, + struct dc_state *context); bool (*is_dp_dig_pixel_rate_div_policy)(struct pipe_ctx *pipe_ctx); #endif }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h index f839494d59d8..e3e8c76c17cf 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link.h @@ -179,6 +179,10 @@ struct link_service { int (*aux_transfer_raw)(struct ddc_service *ddc, struct aux_payload *payload, enum aux_return_code_type *operation_result); + bool (*configure_fixed_vs_pe_retimer)( + struct ddc_service *ddc, + const uint8_t *data, + uint32_t len); bool (*aux_transfer_with_retries_no_mutex)(struct ddc_service *ddc, struct aux_payload *payload); bool (*is_in_aux_transaction_mode)(struct ddc_service *ddc); @@ -269,6 +273,20 @@ struct link_service { uint16_t psr_vtotal_su); void (*edp_get_psr_residency)( const struct dc_link *link, uint32_t *residency); + + bool (*edp_get_replay_state)( + const struct dc_link *link, uint64_t *state); + bool (*edp_set_replay_allow_active)(struct dc_link *dc_link, + const bool *enable, bool wait, bool force_static, + const unsigned int *power_opts); + bool (*edp_setup_replay)(struct dc_link *link, + const struct dc_stream_state *stream); + bool (*edp_set_coasting_vtotal)( + struct dc_link *link, uint16_t coasting_vtotal); + bool (*edp_replay_residency)(const struct dc_link *link, + unsigned int *residency, const bool is_start, + const bool is_alpm); + bool (*edp_wait_for_t12)(struct dc_link *link); bool (*edp_is_ilr_optimization_required)(struct dc_link *link, struct dc_crtc_timing *crtc_timing); diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index eaeb684c8a48..e546b9c506c1 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -142,10 +142,6 @@ struct clock_source *dc_resource_find_first_free_pll( struct resource_context *res_ctx, const struct resource_pool *pool); -struct pipe_ctx *resource_get_head_pipe_for_stream( - struct resource_context *res_ctx, - struct dc_stream_state *stream); - bool resource_attach_surfaces_to_context( struct dc_plane_state *const *plane_state, int surface_count, @@ -153,11 +149,232 @@ bool resource_attach_surfaces_to_context( struct dc_state *context, const struct resource_pool *pool); -struct pipe_ctx *find_idle_secondary_pipe( +#define FREE_PIPE_INDEX_NOT_FOUND -1 + +/* + * pipe types are identified based on MUXes in DCN front end that are capable + * of taking input from one DCN pipeline to another DCN pipeline. The name is + * in a form of XXXX_YYYY, where XXXX is the DCN front end hardware block the + * pipeline ends with and YYYY is the rendering role that the pipe is in. + * + * For instance OTG_MASTER is a pipe ending with OTG hardware block in its + * pipeline and it is in a role of a master pipe for timing generation. + * + * For quick reference a diagram of each pipe type's areas of responsibility + * for outputting timings on the screen is shown below: + * + * Timing Active for Stream 0 + * __________________________________________________ + * |OTG master 0 (OPP head 0)|OPP head 2 (DPP pipe 2) | + * | (DPP pipe 0)| | + * | Top Plane 0 | | + * | ______________|____ | + * | |DPP pipe 1 |DPP | | + * | | |pipe| | + * | | Bottom |3 | | + * | | Plane 1 | | | + * | | | | | + * | |______________|____| | + * | | | + * | | | + * | ODM slice 0 | ODM slice 1 | + * |_________________________|________________________| + * + * Timing Active for Stream 1 + * __________________________________________________ + * |OTG master 4 (OPP head 4) | + * | | + * | | + * | | + * | | + * | | + * | Blank Pixel Data | + * | (generated by DPG4) | + * | | + * | | + * | | + * | | + * | | + * |__________________________________________________| + * + * Inter-pipe Relation + * __________________________________________________ + * |PIPE IDX| DPP PIPES | OPP HEADS | OTG MASTER | + * | | plane 0 | slice 0 | | + * | 0 | -------------MPC---------ODM----------- | + * | | plane 1 | | | | | + * | 1 | ------------- | | | | + * | | plane 0 | slice 1 | | | + * | 2 | -------------MPC--------- | | + * | | plane 1 | | | | + * | 3 | ------------- | | | + * | | | blank | | + * | 4 | | ----------------------- | + * | | | | | + * | 5 | (FREE) | | | + * |________|_______________|___________|_____________| + */ +enum pipe_type { + /* free pipe - free pipe is an uninitialized pipe without a stream + * associated with it. It is a free DCN pipe resource. It can be + * acquired as any type of pipe. + */ + FREE_PIPE, + + /* OTG master pipe - the master pipe of its OPP head pipes with a + * functional OTG. It merges all its OPP head pipes pixel data in ODM + * block and output to backend DIG. OTG master pipe is responsible for + * generating entire crtc timing to backend DIG. An OTG master pipe may + * or may not have a plane. If it has a plane it blends it as the left + * most MPC slice of the top most layer. If it doesn't have a plane it + * can output pixel data from its OPP head pipes' test pattern + * generators (DPG) such as solid black pixel data to blank the screen. + */ + OTG_MASTER, + + /* OPP head pipe - the head pipe of an MPC blending tree with a + * functional OPP outputting to an OTG. OPP head pipe is responsible for + * processing output pixels in its own ODM slice. It may or may not have + * a plane. If it has a plane it blends it as the top most layer within + * its own ODM slice. If it doesn't have a plane it can output pixel + * data from its DPG such as solid black pixel data to blank the pixel + * data in its own ODM slice. OTG master pipe is also an OPP head pipe + * but with more responsibility. + */ + OPP_HEAD, + + /* DPP pipe - the pipe with a functional DPP outputting to an OPP head + * pipe's MPC. DPP pipe is responsible for processing pixel data from + * its own MPC slice of a plane. It must be connected to an OPP head + * pipe and it must have a plane associated with it. + */ + DPP_PIPE, +}; + +/* + * Determine if the input pipe ctx is of a pipe type. + * return - true if pipe ctx is of the input type. + */ +bool resource_is_pipe_type(const struct pipe_ctx *pipe_ctx, enum pipe_type type); + +/* + * Determine if the input pipe ctx is used for rendering a plane with MPCC + * combine. MPCC combine is a hardware feature to combine multiple DPP pipes + * into a single plane. It is typically used for bypassing pipe bandwidth + * limitation for rendering a very large plane or saving power by reducing UCLK + * and DPPCLK speeds. + * + * For instance in the Inter-pipe Relation diagram shown below, both PIPE 0 and + * 1 are for MPCC combine for plane 0 + * + * Inter-pipe Relation + * __________________________________________________ + * |PIPE IDX| DPP PIPES | OPP HEADS | OTG MASTER | + * | | plane 0 | | | + * | 0 | -------------MPC----------------------- | + * | | plane 0 | | | | + * | 1 | ------------- | | | + * |________|_______________|___________|_____________| + * + * return - true if pipe ctx is used for mpcc combine. + */ +bool resource_is_for_mpcc_combine(const struct pipe_ctx *pipe_ctx); + +/* + * Look for a free pipe in new resource context that is used as a secondary DPP + * pipe in MPC blending tree associated with input OPP head pipe. + * + * return - FREE_PIPE_INDEX_NOT_FOUND if free pipe is not found, otherwise + * pipe idx of the free pipe + */ +int resource_find_free_pipe_used_in_cur_mpc_blending_tree( + const struct resource_context *cur_res_ctx, + struct resource_context *new_res_ctx, + const struct pipe_ctx *cur_opp_head); + +/* + * Look for a free pipe in new resource context that is not used in current + * resource context. + * + * return - FREE_PIPE_INDEX_NOT_FOUND if free pipe is not found, otherwise + * pipe idx of the free pipe + */ +int recource_find_free_pipe_not_used_in_cur_res_ctx( + const struct resource_context *cur_res_ctx, + struct resource_context *new_res_ctx, + const struct resource_pool *pool); + +/* + * Look for a free pipe in new resource context that is used as a secondary DPP + * pipe in any MPCC combine in current resource context. + * return - FREE_PIPE_INDEX_NOT_FOUND if free pipe is not found, otherwise + * pipe idx of the free pipe + */ +int resource_find_free_pipe_used_as_cur_sec_dpp_in_mpcc_combine( + const struct resource_context *cur_res_ctx, + struct resource_context *new_res_ctx, + const struct resource_pool *pool); + +/* + * Look for any free pipe in new resource context. + * return - FREE_PIPE_INDEX_NOT_FOUND if free pipe is not found, otherwise + * pipe idx of the free pipe + */ +int resource_find_any_free_pipe(struct resource_context *new_res_ctx, + const struct resource_pool *pool); + +/* + * Legacy find free secondary pipe logic deprecated for newer DCNs as it doesn't + * find the most optimal free pipe to prevent from time consuming hardware state + * transitions. + */ +struct pipe_ctx *resource_find_free_secondary_pipe_legacy( struct resource_context *res_ctx, const struct resource_pool *pool, const struct pipe_ctx *primary_pipe); +/* + * Get number of MPC "cuts" of the plane associated with the pipe. MPC slice + * count is equal to MPC splits + 1. For example if a plane is cut 3 times, it + * will have 4 pieces of slice. + * return - 0 if pipe is not used for a plane with MPCC combine. otherwise + * the number of MPC "cuts" for the plane. + */ +int resource_get_num_mpc_splits(const struct pipe_ctx *pipe); + +/* + * Get number of ODM "cuts" of the timing associated with the pipe. ODM slice + * count is equal to ODM splits + 1. For example if a timing is cut 3 times, it + * will have 4 pieces of slice. + * return - 0 if pipe is not used for ODM combine. otherwise + * the number of ODM "cuts" for the timing. + */ +int resource_get_num_odm_splits(const struct pipe_ctx *pipe); + +/* + * Get the OTG master pipe in resource context associated with the stream. + * return - NULL if not found. Otherwise the OTG master pipe associated with the + * stream. + */ +struct pipe_ctx *resource_get_otg_master_for_stream( + struct resource_context *res_ctx, + struct dc_stream_state *stream); + +/* + * Get the OTG master pipe for the input pipe context. + * return - the OTG master pipe for the input pipe + * context. + */ +struct pipe_ctx *resource_get_otg_master(const struct pipe_ctx *pipe_ctx); + +/* + * Get the OPP head pipe for the input pipe context. + * return - the OPP head pipe for the input pipe + * context. + */ +struct pipe_ctx *resource_get_opp_head(const struct pipe_ctx *pipe_ctx); + + bool resource_validate_attach_surfaces( const struct dc_validation_set set[], int set_count, @@ -193,10 +410,6 @@ unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format); void get_audio_check(struct audio_info *aud_modes, struct audio_check *aud_chk); -int get_num_mpc_splits(struct pipe_ctx *pipe); - -int get_num_odm_splits(struct pipe_ctx *pipe); - bool get_temp_dp_link_res(struct dc_link *link, struct link_resource *link_res, struct dc_link_settings *link_settings); diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c b/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c index c923b2af8510..37bc98faa7a0 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c @@ -38,10 +38,9 @@ #define DCN_BASE__INST0_SEG2 0x000034C0 -static enum dc_irq_source to_dal_irq_source_dcn314( - struct irq_service *irq_service, - uint32_t src_id, - uint32_t ext_id) +static enum dc_irq_source to_dal_irq_source_dcn314(struct irq_service *irq_service, + uint32_t src_id, + uint32_t ext_id) { switch (src_id) { case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: diff --git a/drivers/gpu/drm/amd/display/dc/link/Makefile b/drivers/gpu/drm/amd/display/dc/link/Makefile index a52b56e2859e..6af8a97d4a77 100644 --- a/drivers/gpu/drm/amd/display/dc/link/Makefile +++ b/drivers/gpu/drm/amd/display/dc/link/Makefile @@ -42,7 +42,8 @@ AMD_DISPLAY_FILES += $(AMD_DAL_LINK_ACCESSORIES) ############################################################################### # hwss ############################################################################### -LINK_HWSS = link_hwss_dio.o link_hwss_dpia.o link_hwss_hpo_dp.o +LINK_HWSS = link_hwss_dio.o link_hwss_dpia.o link_hwss_hpo_dp.o \ +link_hwss_dio_fixed_vs_pe_retimer.o link_hwss_hpo_fixed_vs_pe_retimer_dp.o AMD_DAL_LINK_HWSS = $(addprefix $(AMDDALPATH)/dc/link/hwss/, \ $(LINK_HWSS)) diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c index db9f1baa27e5..fe4282771cd0 100644 --- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c @@ -428,15 +428,24 @@ static void set_crtc_test_pattern(struct dc_link *link, stream->timing.display_color_depth; struct bit_depth_reduction_params params; struct output_pixel_processor *opp = pipe_ctx->stream_res.opp; - int width = pipe_ctx->stream->timing.h_addressable + + struct pipe_ctx *odm_pipe; + int odm_cnt = 1; + int h_active = pipe_ctx->stream->timing.h_addressable + pipe_ctx->stream->timing.h_border_left + pipe_ctx->stream->timing.h_border_right; - int height = pipe_ctx->stream->timing.v_addressable + + int v_active = pipe_ctx->stream->timing.v_addressable + pipe_ctx->stream->timing.v_border_bottom + pipe_ctx->stream->timing.v_border_top; + int odm_slice_width, last_odm_slice_width, offset = 0; memset(¶ms, 0, sizeof(params)); + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) + odm_cnt++; + + odm_slice_width = h_active / odm_cnt; + last_odm_slice_width = h_active - odm_slice_width * (odm_cnt - 1); + switch (test_pattern) { case DP_TEST_PATTERN_COLOR_SQUARES: controller_test_pattern = @@ -473,16 +482,13 @@ static void set_crtc_test_pattern(struct dc_link *link, { /* disable bit depth reduction */ pipe_ctx->stream->bit_depth_params = params; - opp->funcs->opp_program_bit_depth_reduction(opp, ¶ms); - if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) + if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) { + opp->funcs->opp_program_bit_depth_reduction(opp, ¶ms); pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, controller_test_pattern, color_depth); - else if (link->dc->hwss.set_disp_pattern_generator) { - struct pipe_ctx *odm_pipe; + } else if (link->dc->hwss.set_disp_pattern_generator) { enum controller_dp_color_space controller_color_space; - int opp_cnt = 1; - int offset = 0; - int dpg_width = width; + struct output_pixel_processor *odm_opp; switch (test_pattern_color_space) { case DP_TEST_PATTERN_COLOR_SPACE_RGB: @@ -502,24 +508,9 @@ static void set_crtc_test_pattern(struct dc_link *link, break; } - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) - opp_cnt++; - dpg_width = width / opp_cnt; - offset = dpg_width; - - link->dc->hwss.set_disp_pattern_generator(link->dc, - pipe_ctx, - controller_test_pattern, - controller_color_space, - color_depth, - NULL, - dpg_width, - height, - 0); - - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { - struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp; - + odm_pipe = pipe_ctx; + while (odm_pipe->next_odm_pipe) { + odm_opp = odm_pipe->stream_res.opp; odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); link->dc->hwss.set_disp_pattern_generator(link->dc, odm_pipe, @@ -527,11 +518,23 @@ static void set_crtc_test_pattern(struct dc_link *link, controller_color_space, color_depth, NULL, - dpg_width, - height, + odm_slice_width, + v_active, offset); - offset += offset; + offset += odm_slice_width; + odm_pipe = odm_pipe->next_odm_pipe; } + odm_opp = odm_pipe->stream_res.opp; + odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); + link->dc->hwss.set_disp_pattern_generator(link->dc, + odm_pipe, + controller_test_pattern, + controller_color_space, + color_depth, + NULL, + last_odm_slice_width, + v_active, + offset); } } break; @@ -540,23 +543,17 @@ static void set_crtc_test_pattern(struct dc_link *link, /* restore bitdepth reduction */ resource_build_bit_depth_reduction_params(pipe_ctx->stream, ¶ms); pipe_ctx->stream->bit_depth_params = params; - opp->funcs->opp_program_bit_depth_reduction(opp, ¶ms); - if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) + if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) { + opp->funcs->opp_program_bit_depth_reduction(opp, ¶ms); pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, - CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, - color_depth); - else if (link->dc->hwss.set_disp_pattern_generator) { - struct pipe_ctx *odm_pipe; - int opp_cnt = 1; - int dpg_width; - - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) - opp_cnt++; - - dpg_width = width / opp_cnt; - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { - struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp; + CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, + color_depth); + } else if (link->dc->hwss.set_disp_pattern_generator) { + struct output_pixel_processor *odm_opp; + odm_pipe = pipe_ctx; + while (odm_pipe->next_odm_pipe) { + odm_opp = odm_pipe->stream_res.opp; odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); link->dc->hwss.set_disp_pattern_generator(link->dc, odm_pipe, @@ -564,19 +561,23 @@ static void set_crtc_test_pattern(struct dc_link *link, CONTROLLER_DP_COLOR_SPACE_UDEFINED, color_depth, NULL, - dpg_width, - height, - 0); + odm_slice_width, + v_active, + offset); + offset += odm_slice_width; + odm_pipe = odm_pipe->next_odm_pipe; } + odm_opp = odm_pipe->stream_res.opp; + odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); link->dc->hwss.set_disp_pattern_generator(link->dc, - pipe_ctx, + odm_pipe, CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, CONTROLLER_DP_COLOR_SPACE_UDEFINED, color_depth, NULL, - dpg_width, - height, - 0); + last_odm_slice_width, + v_active, + offset); } } break; @@ -674,7 +675,8 @@ bool dp_set_test_pattern( if (pipes[i].stream == NULL) continue; - if (pipes[i].stream->link == link && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe) { + if (resource_is_pipe_type(&pipes[i], OTG_MASTER) && + pipes[i].stream->link == link) { pipe_ctx = &pipes[i]; break; } @@ -702,6 +704,7 @@ bool dp_set_test_pattern( /* Reset Test Pattern state */ link->test_pattern_enabled = false; + link->current_test_pattern = test_pattern; return true; } @@ -739,6 +742,7 @@ bool dp_set_test_pattern( if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) { /* Set Test Pattern state */ link->test_pattern_enabled = true; + link->current_test_pattern = test_pattern; if (p_link_settings != NULL) dpcd_set_link_settings(link, p_link_settings); @@ -937,6 +941,7 @@ bool dp_set_test_pattern( /* Set Test Pattern state */ link->test_pattern_enabled = true; + link->current_test_pattern = test_pattern; } return true; diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c index bebf9c4c8702..1328a0ade342 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c @@ -46,6 +46,9 @@ void setup_dio_stream_encoder(struct pipe_ctx *pipe_ctx) if (dc_is_dp_signal(pipe_ctx->stream->signal)) pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE); + if (stream_enc->funcs->map_stream_to_link) + stream_enc->funcs->map_stream_to_link(stream_enc, + stream_enc->stream_enc_inst, link_enc->transmitter - TRANSMITTER_UNIPHY_A); if (stream_enc->funcs->enable_fifo) stream_enc->funcs->enable_fifo(stream_enc); } @@ -163,7 +166,7 @@ void set_dio_dp_lane_settings(struct dc_link *link, link_enc->funcs->dp_set_lane_settings(link_enc, link_settings, lane_settings); } -static void update_dio_stream_allocation_table(struct dc_link *link, +void update_dio_stream_allocation_table(struct dc_link *link, const struct link_resource *link_res, const struct link_mst_stream_allocation_table *table) { diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h index 8b8a099feeb0..f4633d3cf9b9 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h @@ -55,5 +55,8 @@ void setup_dio_audio_output(struct pipe_ctx *pipe_ctx, struct audio_output *audio_output, uint32_t audio_inst); void enable_dio_audio_packet(struct pipe_ctx *pipe_ctx); void disable_dio_audio_packet(struct pipe_ctx *pipe_ctx); +void update_dio_stream_allocation_table(struct dc_link *link, + const struct link_resource *link_res, + const struct link_mst_stream_allocation_table *table); #endif /* __LINK_HWSS_DIO_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c new file mode 100644 index 000000000000..b659baa23147 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c @@ -0,0 +1,200 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "link_hwss_dio.h" +#include "link_hwss_dio_fixed_vs_pe_retimer.h" +#include "link_enc_cfg.h" + +uint8_t dp_dio_fixed_vs_pe_retimer_lane_cfg_to_hw_cfg(struct dc_link *link) +{ + // TODO: Get USB-C cable orientation + if (link->cur_link_settings.lane_count == LANE_COUNT_FOUR) + return 0xF2; + else + return 0x12; +} + +void dp_dio_fixed_vs_pe_retimer_exit_manual_automation(struct dc_link *link) +{ + const uint8_t dp_type = dp_dio_fixed_vs_pe_retimer_lane_cfg_to_hw_cfg(link); + const uint8_t vendor_lttpr_exit_manual_automation_0[4] = {0x1, 0x11, 0x0, 0x06}; + const uint8_t vendor_lttpr_exit_manual_automation_1[4] = {0x1, 0x50, dp_type, 0x0}; + const uint8_t vendor_lttpr_exit_manual_automation_2[4] = {0x1, 0x50, 0x50, 0x0}; + const uint8_t vendor_lttpr_exit_manual_automation_3[4] = {0x1, 0x51, 0x50, 0x0}; + const uint8_t vendor_lttpr_exit_manual_automation_4[4] = {0x1, 0x10, 0x58, 0x0}; + const uint8_t vendor_lttpr_exit_manual_automation_5[4] = {0x1, 0x10, 0x59, 0x0}; + const uint8_t vendor_lttpr_exit_manual_automation_6[4] = {0x1, 0x30, 0x51, 0x0}; + const uint8_t vendor_lttpr_exit_manual_automation_7[4] = {0x1, 0x30, 0x52, 0x0}; + const uint8_t vendor_lttpr_exit_manual_automation_8[4] = {0x1, 0x30, 0x54, 0x0}; + const uint8_t vendor_lttpr_exit_manual_automation_9[4] = {0x1, 0x30, 0x55, 0x0}; + + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_exit_manual_automation_0[0], sizeof(vendor_lttpr_exit_manual_automation_0)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_exit_manual_automation_1[0], sizeof(vendor_lttpr_exit_manual_automation_1)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_exit_manual_automation_2[0], sizeof(vendor_lttpr_exit_manual_automation_2)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_exit_manual_automation_3[0], sizeof(vendor_lttpr_exit_manual_automation_3)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_exit_manual_automation_4[0], sizeof(vendor_lttpr_exit_manual_automation_4)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_exit_manual_automation_5[0], sizeof(vendor_lttpr_exit_manual_automation_5)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_exit_manual_automation_6[0], sizeof(vendor_lttpr_exit_manual_automation_6)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_exit_manual_automation_7[0], sizeof(vendor_lttpr_exit_manual_automation_7)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_exit_manual_automation_8[0], sizeof(vendor_lttpr_exit_manual_automation_8)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_exit_manual_automation_9[0], sizeof(vendor_lttpr_exit_manual_automation_9)); +} + +static bool set_dio_fixed_vs_pe_retimer_dp_link_test_pattern_override(struct dc_link *link, + const struct link_resource *link_res, struct encoder_set_dp_phy_pattern_param *tp_params, + const struct link_hwss *link_hwss) +{ + struct encoder_set_dp_phy_pattern_param hw_tp_params = { 0 }; + const uint8_t pltpat_custom[10] = {0x1F, 0x7C, 0xF0, 0xC1, 0x07, 0x1F, 0x7C, 0xF0, 0xC1, 0x07}; + const uint8_t vendor_lttpr_write_data_pg0[4] = {0x1, 0x11, 0x0, 0x0}; + const uint8_t vendor_lttpr_exit_manual_automation_0[4] = {0x1, 0x11, 0x0, 0x06}; + + + if (tp_params == NULL) + return false; + + if (link->current_test_pattern >= DP_TEST_PATTERN_SQUARE_BEGIN && + link->current_test_pattern <= DP_TEST_PATTERN_SQUARE_END) { + // Deprogram overrides from previous test pattern + dp_dio_fixed_vs_pe_retimer_exit_manual_automation(link); + } + + switch (tp_params->dp_phy_pattern) { + case DP_TEST_PATTERN_80BIT_CUSTOM: + if (tp_params->custom_pattern_size == 0 || memcmp(tp_params->custom_pattern, + pltpat_custom, tp_params->custom_pattern_size) != 0) + return false; + break; + case DP_TEST_PATTERN_D102: + break; + default: + if (link->current_test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM || + link->current_test_pattern == DP_TEST_PATTERN_D102) + // Deprogram overrides from previous test pattern + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_exit_manual_automation_0[0], + sizeof(vendor_lttpr_exit_manual_automation_0)); + + return false; + } + + hw_tp_params.dp_phy_pattern = tp_params->dp_phy_pattern; + hw_tp_params.dp_panel_mode = tp_params->dp_panel_mode; + + if (link_hwss->ext.set_dp_link_test_pattern) + link_hwss->ext.set_dp_link_test_pattern(link, link_res, &hw_tp_params); + + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pg0[0], sizeof(vendor_lttpr_write_data_pg0)); + + return true; +} + +static void set_dio_fixed_vs_pe_retimer_dp_link_test_pattern(struct dc_link *link, + const struct link_resource *link_res, + struct encoder_set_dp_phy_pattern_param *tp_params) +{ + struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link); + + if (!set_dio_fixed_vs_pe_retimer_dp_link_test_pattern_override( + link, link_res, tp_params, get_dio_link_hwss())) { + link_enc->funcs->dp_set_phy_pattern(link_enc, tp_params); + } + link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN); +} + +void enable_dio_fixed_vs_pe_retimer_program_4lane_output(struct dc_link *link) +{ + const uint8_t vendor_lttpr_write_data_4lane_1[4] = {0x1, 0x6E, 0xF2, 0x19}; + const uint8_t vendor_lttpr_write_data_4lane_2[4] = {0x1, 0x6B, 0xF2, 0x01}; + const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18}; + const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03}; + const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06}; + + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_1[0], sizeof(vendor_lttpr_write_data_4lane_1)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_2[0], sizeof(vendor_lttpr_write_data_4lane_2)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_3[0], sizeof(vendor_lttpr_write_data_4lane_3)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_4[0], sizeof(vendor_lttpr_write_data_4lane_4)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_5[0], sizeof(vendor_lttpr_write_data_4lane_5)); +} + +static void enable_dio_fixed_vs_pe_retimer_dp_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal, + enum clock_source_id clock_source, + const struct dc_link_settings *link_settings) +{ + if (link_settings->lane_count == LANE_COUNT_FOUR) + enable_dio_fixed_vs_pe_retimer_program_4lane_output(link); + + enable_dio_dp_link_output(link, link_res, signal, clock_source, link_settings); +} + +static const struct link_hwss dio_fixed_vs_pe_retimer_link_hwss = { + .setup_stream_encoder = setup_dio_stream_encoder, + .reset_stream_encoder = reset_dio_stream_encoder, + .setup_stream_attribute = setup_dio_stream_attribute, + .disable_link_output = disable_dio_link_output, + .setup_audio_output = setup_dio_audio_output, + .enable_audio_packet = enable_dio_audio_packet, + .disable_audio_packet = disable_dio_audio_packet, + .ext = { + .set_throttled_vcp_size = set_dio_throttled_vcp_size, + .enable_dp_link_output = enable_dio_fixed_vs_pe_retimer_dp_link_output, + .set_dp_link_test_pattern = set_dio_fixed_vs_pe_retimer_dp_link_test_pattern, + .set_dp_lane_settings = set_dio_dp_lane_settings, + .update_stream_allocation_table = update_dio_stream_allocation_table, + }, +}; + +bool requires_fixed_vs_pe_retimer_dio_link_hwss(const struct dc_link *link) +{ + if (!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)) + return false; + + if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED) + return false; + + return true; +} + +const struct link_hwss *get_dio_fixed_vs_pe_retimer_link_hwss(void) +{ + return &dio_fixed_vs_pe_retimer_link_hwss; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h new file mode 100644 index 000000000000..9ac08a332540 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h @@ -0,0 +1,37 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef __LINK_HWSS_DIO_FIXED_VS_PE_RETIMER_H__ +#define __LINK_HWSS_DIO_FIXED_VS_PE_RETIMER_H__ + +#include "link.h" + +uint32_t dp_dio_fixed_vs_pe_retimer_get_lttpr_write_address(struct dc_link *link); +uint8_t dp_dio_fixed_vs_pe_retimer_lane_cfg_to_hw_cfg(struct dc_link *link); +void dp_dio_fixed_vs_pe_retimer_exit_manual_automation(struct dc_link *link); +void enable_dio_fixed_vs_pe_retimer_program_4lane_output(struct dc_link *link); +bool requires_fixed_vs_pe_retimer_dio_link_hwss(const struct dc_link *link); +const struct link_hwss *get_dio_fixed_vs_pe_retimer_link_hwss(void); + +#endif /* __LINK_HWSS_DIO_FIXED_VS_PE_RETIMER_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c index edd7d026a762..e1257404357b 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c @@ -28,25 +28,7 @@ #include "dccg.h" #include "clk_mgr.h" -static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link) -{ - switch (link->link_enc->transmitter) { - case TRANSMITTER_UNIPHY_A: - return PHYD32CLKA; - case TRANSMITTER_UNIPHY_B: - return PHYD32CLKB; - case TRANSMITTER_UNIPHY_C: - return PHYD32CLKC; - case TRANSMITTER_UNIPHY_D: - return PHYD32CLKD; - case TRANSMITTER_UNIPHY_E: - return PHYD32CLKE; - default: - return PHYD32CLKA; - } -} - -static void set_hpo_dp_throttled_vcp_size(struct pipe_ctx *pipe_ctx, +void set_hpo_dp_throttled_vcp_size(struct pipe_ctx *pipe_ctx, struct fixed31_32 throttled_vcp_size) { struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = @@ -59,7 +41,7 @@ static void set_hpo_dp_throttled_vcp_size(struct pipe_ctx *pipe_ctx, throttled_vcp_size); } -static void set_hpo_dp_hblank_min_symbol_width(struct pipe_ctx *pipe_ctx, +void set_hpo_dp_hblank_min_symbol_width(struct pipe_ctx *pipe_ctx, const struct dc_link_settings *link_settings, struct fixed31_32 throttled_vcp_size) { @@ -87,7 +69,7 @@ static void set_hpo_dp_hblank_min_symbol_width(struct pipe_ctx *pipe_ctx, hblank_min_symbol_width); } -static void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx) +void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx) { struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc; struct hpo_dp_link_encoder *link_enc = pipe_ctx->link_res.hpo_dp_link_enc; @@ -96,14 +78,14 @@ static void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx) stream_enc->funcs->map_stream_to_link(stream_enc, stream_enc->inst, link_enc->inst); } -static void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx) +void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx) { struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc; stream_enc->funcs->disable(stream_enc); } -static void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx) +void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx) { struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc; struct dc_stream_state *stream = pipe_ctx->stream; @@ -120,81 +102,36 @@ static void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx) DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR); } -static void enable_hpo_dp_fpga_link_output(struct dc_link *link, +void enable_hpo_dp_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal, enum clock_source_id clock_source, const struct dc_link_settings *link_settings) { - const struct dc *dc = link->dc; - enum phyd32clk_clock_source phyd32clk = get_phyd32clk_src(link); - int phyd32clk_freq_khz = link_settings->link_rate == LINK_RATE_UHBR10 ? 312500 : - link_settings->link_rate == LINK_RATE_UHBR13_5 ? 412875 : - link_settings->link_rate == LINK_RATE_UHBR20 ? 625000 : 0; - - dm_set_phyd32clk(dc->ctx, phyd32clk_freq_khz); - dc->res_pool->dccg->funcs->set_physymclk( - dc->res_pool->dccg, - link->link_enc_hw_inst, - PHYSYMCLK_FORCE_SRC_PHYD32CLK, - true); - dc->res_pool->dccg->funcs->enable_symclk32_le( - dc->res_pool->dccg, - link_res->hpo_dp_link_enc->inst, - phyd32clk); - link_res->hpo_dp_link_enc->funcs->link_enable( + if (link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating) + link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating( + link->dc->res_pool->dccg, + link_res->hpo_dp_link_enc->inst, + true); + link_res->hpo_dp_link_enc->funcs->enable_link_phy( link_res->hpo_dp_link_enc, - link_settings->lane_count); - -} - -static void enable_hpo_dp_link_output(struct dc_link *link, - const struct link_resource *link_res, - enum signal_type signal, - enum clock_source_id clock_source, - const struct dc_link_settings *link_settings) -{ - if (IS_FPGA_MAXIMUS_DC(link->dc->ctx->dce_environment)) - enable_hpo_dp_fpga_link_output(link, link_res, signal, - clock_source, link_settings); - else - link_res->hpo_dp_link_enc->funcs->enable_link_phy( - link_res->hpo_dp_link_enc, - link_settings, - link->link_enc->transmitter, - link->link_enc->hpd_source); -} - - -static void disable_hpo_dp_fpga_link_output(struct dc_link *link, - const struct link_resource *link_res, - enum signal_type signal) -{ - const struct dc *dc = link->dc; - - link_res->hpo_dp_link_enc->funcs->link_disable(link_res->hpo_dp_link_enc); - dc->res_pool->dccg->funcs->disable_symclk32_le( - dc->res_pool->dccg, - link_res->hpo_dp_link_enc->inst); - dc->res_pool->dccg->funcs->set_physymclk( - dc->res_pool->dccg, - link->link_enc_hw_inst, - PHYSYMCLK_FORCE_SRC_SYMCLK, - false); - dm_set_phyd32clk(dc->ctx, 0); + link_settings, + link->link_enc->transmitter, + link->link_enc->hpd_source); } -static void disable_hpo_dp_link_output(struct dc_link *link, +void disable_hpo_dp_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal) { - if (IS_FPGA_MAXIMUS_DC(link->dc->ctx->dce_environment)) { - disable_hpo_dp_fpga_link_output(link, link_res, signal); - } else { link_res->hpo_dp_link_enc->funcs->link_disable(link_res->hpo_dp_link_enc); link_res->hpo_dp_link_enc->funcs->disable_link_phy( link_res->hpo_dp_link_enc, signal); - } + if (link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating) + link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating( + link->dc->res_pool->dccg, + link_res->hpo_dp_link_enc->inst, + false); } static void set_hpo_dp_link_test_pattern(struct dc_link *link, @@ -217,7 +154,7 @@ static void set_hpo_dp_lane_settings(struct dc_link *link, lane_settings[0].FFE_PRESET.raw); } -static void update_hpo_dp_stream_allocation_table(struct dc_link *link, +void update_hpo_dp_stream_allocation_table(struct dc_link *link, const struct link_resource *link_res, const struct link_mst_stream_allocation_table *table) { @@ -226,7 +163,7 @@ static void update_hpo_dp_stream_allocation_table(struct dc_link *link, table); } -static void setup_hpo_dp_audio_output(struct pipe_ctx *pipe_ctx, +void setup_hpo_dp_audio_output(struct pipe_ctx *pipe_ctx, struct audio_output *audio_output, uint32_t audio_inst) { pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_setup( @@ -235,13 +172,13 @@ static void setup_hpo_dp_audio_output(struct pipe_ctx *pipe_ctx, &pipe_ctx->stream->audio_info); } -static void enable_hpo_dp_audio_packet(struct pipe_ctx *pipe_ctx) +void enable_hpo_dp_audio_packet(struct pipe_ctx *pipe_ctx) { pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_enable( pipe_ctx->stream_res.hpo_dp_stream_enc); } -static void disable_hpo_dp_audio_packet(struct pipe_ctx *pipe_ctx) +void disable_hpo_dp_audio_packet(struct pipe_ctx *pipe_ctx) { if (pipe_ctx->stream_res.audio) pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_disable( diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h index 3cbb94b41a23..1d3ed8ca83b5 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h @@ -28,9 +28,35 @@ #include "link_hwss.h" #include "link.h" +void set_hpo_dp_throttled_vcp_size(struct pipe_ctx *pipe_ctx, + struct fixed31_32 throttled_vcp_size); +void set_hpo_dp_hblank_min_symbol_width(struct pipe_ctx *pipe_ctx, + const struct dc_link_settings *link_settings, + struct fixed31_32 throttled_vcp_size); +void set_hpo_dp_hblank_min_symbol_width(struct pipe_ctx *pipe_ctx, + const struct dc_link_settings *link_settings, + struct fixed31_32 throttled_vcp_size); +void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx); +void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx); +void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx); +void enable_hpo_dp_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal, + enum clock_source_id clock_source, + const struct dc_link_settings *link_settings); +void disable_hpo_dp_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal); +void update_hpo_dp_stream_allocation_table(struct dc_link *link, + const struct link_resource *link_res, + const struct link_mst_stream_allocation_table *table); +void setup_hpo_dp_audio_output(struct pipe_ctx *pipe_ctx, + struct audio_output *audio_output, uint32_t audio_inst); +void enable_hpo_dp_audio_packet(struct pipe_ctx *pipe_ctx); +void disable_hpo_dp_audio_packet(struct pipe_ctx *pipe_ctx); +const struct link_hwss *get_hpo_dp_link_hwss(void); bool can_use_hpo_dp_link_hwss(const struct dc_link *link, const struct link_resource *link_res); -const struct link_hwss *get_hpo_dp_link_hwss(void); #endif /* __LINK_HWSS_HPO_DP_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c new file mode 100644 index 000000000000..b621b97711b6 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c @@ -0,0 +1,229 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "link_hwss_hpo_dp.h" +#include "link_hwss_hpo_fixed_vs_pe_retimer_dp.h" +#include "link_hwss_dio_fixed_vs_pe_retimer.h" + +static void dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(struct dc_link *link, + const struct dc_lane_settings *hw_lane_settings) +{ + const uint8_t vendor_ffe_preset_table[16] = { + 0x01, 0x41, 0x61, 0x81, + 0xB1, 0x05, 0x35, 0x65, + 0x85, 0xA5, 0x09, 0x39, + 0x59, 0x89, 0x0F, 0x24}; + + const uint8_t ffe_mask[4] = { + (hw_lane_settings[0].FFE_PRESET.settings.no_deemphasis != 0 ? 0x0F : 0xFF) + & (hw_lane_settings[0].FFE_PRESET.settings.no_preshoot != 0 ? 0xF1 : 0xFF), + (hw_lane_settings[1].FFE_PRESET.settings.no_deemphasis != 0 ? 0x0F : 0xFF) + & (hw_lane_settings[1].FFE_PRESET.settings.no_preshoot != 0 ? 0xF1 : 0xFF), + (hw_lane_settings[2].FFE_PRESET.settings.no_deemphasis != 0 ? 0x0F : 0xFF) + & (hw_lane_settings[2].FFE_PRESET.settings.no_preshoot != 0 ? 0xF1 : 0xFF), + (hw_lane_settings[3].FFE_PRESET.settings.no_deemphasis != 0 ? 0x0F : 0xFF) + & (hw_lane_settings[3].FFE_PRESET.settings.no_preshoot != 0 ? 0xF1 : 0xFF)}; + + const uint8_t ffe_cfg[4] = { + vendor_ffe_preset_table[hw_lane_settings[0].FFE_PRESET.settings.level] & ffe_mask[0], + vendor_ffe_preset_table[hw_lane_settings[1].FFE_PRESET.settings.level] & ffe_mask[1], + vendor_ffe_preset_table[hw_lane_settings[2].FFE_PRESET.settings.level] & ffe_mask[2], + vendor_ffe_preset_table[hw_lane_settings[3].FFE_PRESET.settings.level] & ffe_mask[3]}; + + const uint8_t dp_type = dp_dio_fixed_vs_pe_retimer_lane_cfg_to_hw_cfg(link); + + const uint8_t vendor_lttpr_write_data_ffe1[4] = {0x01, 0x50, dp_type, 0x0F}; + const uint8_t vendor_lttpr_write_data_ffe2[4] = {0x01, 0x55, dp_type, ffe_cfg[0]}; + const uint8_t vendor_lttpr_write_data_ffe3[4] = {0x01, 0x56, dp_type, ffe_cfg[1]}; + const uint8_t vendor_lttpr_write_data_ffe4[4] = {0x01, 0x57, dp_type, ffe_cfg[2]}; + const uint8_t vendor_lttpr_write_data_ffe5[4] = {0x01, 0x58, dp_type, ffe_cfg[3]}; + + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_ffe1[0], sizeof(vendor_lttpr_write_data_ffe1)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_ffe2[0], sizeof(vendor_lttpr_write_data_ffe2)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_ffe3[0], sizeof(vendor_lttpr_write_data_ffe3)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_ffe4[0], sizeof(vendor_lttpr_write_data_ffe4)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_ffe5[0], sizeof(vendor_lttpr_write_data_ffe5)); +} + +static void dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(struct dc_link *link, + struct encoder_set_dp_phy_pattern_param *tp_params) +{ + const uint8_t vendor_lttpr_write_data_pg0[4] = {0x1, 0x11, 0x0, 0x0}; + const uint8_t vendor_lttpr_write_data_pg1[4] = {0x1, 0x50, 0x50, 0x0}; + const uint8_t vendor_lttpr_write_data_pg2[4] = {0x1, 0x51, 0x50, 0x0}; + const uint8_t vendor_lttpr_write_data_pg3[4] = {0x1, 0x10, 0x58, 0x21}; + const uint8_t vendor_lttpr_write_data_pg4[4] = {0x1, 0x10, 0x59, 0x21}; + const uint8_t vendor_lttpr_write_data_pg5[4] = {0x1, 0x1C, 0x58, 0x4F}; + const uint8_t vendor_lttpr_write_data_pg6[4] = {0x1, 0x1C, 0x59, 0x4F}; + const uint8_t vendor_lttpr_write_data_pg7[4] = {0x1, 0x30, 0x51, 0x20}; + const uint8_t vendor_lttpr_write_data_pg8[4] = {0x1, 0x30, 0x52, 0x20}; + const uint8_t vendor_lttpr_write_data_pg9[4] = {0x1, 0x30, 0x54, 0x20}; + const uint8_t vendor_lttpr_write_data_pg10[4] = {0x1, 0x30, 0x55, 0x20}; + + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pg0[0], sizeof(vendor_lttpr_write_data_pg0)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pg1[0], sizeof(vendor_lttpr_write_data_pg1)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pg2[0], sizeof(vendor_lttpr_write_data_pg2)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pg3[0], sizeof(vendor_lttpr_write_data_pg3)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pg4[0], sizeof(vendor_lttpr_write_data_pg4)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pg5[0], sizeof(vendor_lttpr_write_data_pg5)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pg6[0], sizeof(vendor_lttpr_write_data_pg6)); + + if (link->cur_link_settings.lane_count == LANE_COUNT_FOUR) + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pg7[0], sizeof(vendor_lttpr_write_data_pg7)); + + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pg8[0], sizeof(vendor_lttpr_write_data_pg8)); + + if (link->cur_link_settings.lane_count == LANE_COUNT_FOUR) + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pg9[0], sizeof(vendor_lttpr_write_data_pg9)); + + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pg10[0], sizeof(vendor_lttpr_write_data_pg10)); +} + +static bool dp_hpo_fixed_vs_pe_retimer_set_override_test_pattern(struct dc_link *link, + const struct link_resource *link_res, struct encoder_set_dp_phy_pattern_param *tp_params, + const struct link_hwss *link_hwss) +{ + struct encoder_set_dp_phy_pattern_param hw_tp_params = { 0 }; + const uint8_t vendor_lttpr_exit_manual_automation_0[4] = {0x1, 0x11, 0x0, 0x06}; + + if (tp_params == NULL) + return false; + + if (tp_params->dp_phy_pattern < DP_TEST_PATTERN_SQUARE_BEGIN || + tp_params->dp_phy_pattern > DP_TEST_PATTERN_SQUARE_END) { + // Deprogram overrides from previously set square wave override + if (link->current_test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM || + link->current_test_pattern == DP_TEST_PATTERN_D102) + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_exit_manual_automation_0[0], + sizeof(vendor_lttpr_exit_manual_automation_0)); + else + dp_dio_fixed_vs_pe_retimer_exit_manual_automation(link); + + return false; + } + + hw_tp_params.dp_phy_pattern = DP_TEST_PATTERN_PRBS31; + hw_tp_params.dp_panel_mode = tp_params->dp_panel_mode; + + if (link_hwss->ext.set_dp_link_test_pattern) + link_hwss->ext.set_dp_link_test_pattern(link, link_res, &hw_tp_params); + + dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(link, tp_params); + + dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(link, &link->cur_lane_setting[0]); + + return true; +} + +static void set_hpo_fixed_vs_pe_retimer_dp_link_test_pattern(struct dc_link *link, + const struct link_resource *link_res, + struct encoder_set_dp_phy_pattern_param *tp_params) +{ + if (!dp_hpo_fixed_vs_pe_retimer_set_override_test_pattern( + link, link_res, tp_params, get_hpo_dp_link_hwss())) { + link_res->hpo_dp_link_enc->funcs->set_link_test_pattern( + link_res->hpo_dp_link_enc, tp_params); + } + link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN); +} + +static void set_hpo_fixed_vs_pe_retimer_dp_lane_settings(struct dc_link *link, + const struct link_resource *link_res, + const struct dc_link_settings *link_settings, + const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) +{ + link_res->hpo_dp_link_enc->funcs->set_ffe( + link_res->hpo_dp_link_enc, + link_settings, + lane_settings[0].FFE_PRESET.raw); + + // FFE is programmed when retimer is programmed for SQ128, but explicit + // programming needed here as well in case FFE-only update is requested + if (link->current_test_pattern >= DP_TEST_PATTERN_SQUARE_BEGIN && + link->current_test_pattern <= DP_TEST_PATTERN_SQUARE_END) + dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(link, &lane_settings[0]); +} + +static void enable_hpo_fixed_vs_pe_retimer_dp_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal, + enum clock_source_id clock_source, + const struct dc_link_settings *link_settings) +{ + if (link_settings->lane_count == LANE_COUNT_FOUR) + enable_dio_fixed_vs_pe_retimer_program_4lane_output(link); + + enable_hpo_dp_link_output(link, link_res, signal, clock_source, link_settings); +} + +static const struct link_hwss hpo_fixed_vs_pe_retimer_dp_link_hwss = { + .setup_stream_encoder = setup_hpo_dp_stream_encoder, + .reset_stream_encoder = reset_hpo_dp_stream_encoder, + .setup_stream_attribute = setup_hpo_dp_stream_attribute, + .disable_link_output = disable_hpo_dp_link_output, + .setup_audio_output = setup_hpo_dp_audio_output, + .enable_audio_packet = enable_hpo_dp_audio_packet, + .disable_audio_packet = disable_hpo_dp_audio_packet, + .ext = { + .set_throttled_vcp_size = set_hpo_dp_throttled_vcp_size, + .set_hblank_min_symbol_width = set_hpo_dp_hblank_min_symbol_width, + .enable_dp_link_output = enable_hpo_fixed_vs_pe_retimer_dp_link_output, + .set_dp_link_test_pattern = set_hpo_fixed_vs_pe_retimer_dp_link_test_pattern, + .set_dp_lane_settings = set_hpo_fixed_vs_pe_retimer_dp_lane_settings, + .update_stream_allocation_table = update_hpo_dp_stream_allocation_table, + }, +}; + +bool requires_fixed_vs_pe_retimer_hpo_link_hwss(const struct dc_link *link) +{ + if (!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)) + return false; + + if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED) + return false; + + return true; +} + +const struct link_hwss *get_hpo_fixed_vs_pe_retimer_dp_link_hwss(void) +{ + return &hpo_fixed_vs_pe_retimer_dp_link_hwss; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h new file mode 100644 index 000000000000..82301187bc7c --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h @@ -0,0 +1,33 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef __LINK_HWSS_HPO_FIXED_VS_PE_RETIMER_DP_H__ +#define __LINK_HWSS_HPO_FIXED_VS_PE_RETIMER_DP_H__ + +#include "link.h" + +bool requires_fixed_vs_pe_retimer_hpo_link_hwss(const struct dc_link *link); +const struct link_hwss *get_hpo_fixed_vs_pe_retimer_dp_link_hwss(void); + +#endif /* __LINK_HWSS_HPO_FIXED_VS_PE_RETIMER_DP_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c index d471d58aba92..c9b6676eaf53 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c @@ -593,6 +593,10 @@ static bool detect_dp(struct dc_link *link, /* DP SST branch */ link->type = dc_connection_sst_branch; } else { + if (link->dc->debug.disable_dp_plus_plus_wa && + link->link_enc->features.flags.bits.IS_UHBR20_CAPABLE) + return false; + /* DP passive dongles */ sink_caps->signal = dp_passive_dongle_detection(link->ddc, sink_caps, @@ -872,8 +876,7 @@ static bool detect_link_and_local_sink(struct dc_link *link, (link->dpcd_sink_ext_caps.bits.oled == 1)) { dpcd_set_source_specific_data(link); msleep(post_oui_delay); - set_default_brightness_aux(link); - //TODO: use cached + set_cached_brightness_aux(link); } return true; diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index 2267fb097830..79aef205598b 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -182,11 +182,8 @@ void link_resume(struct dc_link *link) static bool is_master_pipe_for_link(const struct dc_link *link, const struct pipe_ctx *pipe) { - return (pipe->stream && - pipe->stream->link && - pipe->stream->link == link && - pipe->top_pipe == NULL && - pipe->prev_odm_pipe == NULL); + return resource_is_pipe_type(pipe, OTG_MASTER) && + pipe->stream->link == link; } /* @@ -765,7 +762,7 @@ static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable) struct dc_stream_state *stream = pipe_ctx->stream; bool result = false; - if (dc_is_virtual_signal(stream->signal) || IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) + if (dc_is_virtual_signal(stream->signal)) result = true; else result = dm_helpers_dp_write_dsc_enable(dc->ctx, stream, enable); @@ -778,7 +775,6 @@ static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable) void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) { struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; - struct dc *dc = pipe_ctx->stream->ctx->dc; struct dc_stream_state *stream = pipe_ctx->stream; struct pipe_ctx *odm_pipe; int opp_cnt = 1; @@ -816,8 +812,7 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED; /* Enable DSC in encoder */ - if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) - && !dp_is_128b_132b_signal(pipe_ctx)) { + if (dc_is_dp_signal(stream->signal) && !dp_is_128b_132b_signal(pipe_ctx)) { DC_LOG_DSC("Setting stream encoder DSC config for engine %d:", (int)pipe_ctx->stream_res.stream_enc->id); dsc_optc_config_log(dsc, &dsc_optc_cfg); pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc, @@ -849,7 +844,7 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) false, NULL, true); - else if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { + else { pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config( pipe_ctx->stream_res.stream_enc, OPTC_DSC_DISABLED, 0, 0); @@ -1081,8 +1076,14 @@ static struct fixed31_32 get_pbn_from_bw_in_kbps(uint64_t kbps) static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx) { uint64_t kbps; + enum dc_link_encoding_format link_encoding; + + if (dp_is_128b_132b_signal(pipe_ctx)) + link_encoding = DC_LINK_ENCODING_DP_128b_132b; + else + link_encoding = DC_LINK_ENCODING_DP_8b_10b; - kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing); + kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing, link_encoding); return get_pbn_from_bw_in_kbps(kbps); } @@ -1540,7 +1541,8 @@ struct fixed31_32 link_calculate_sst_avg_time_slots_per_mtp( dc_fixpt_div_int(link_bw_effective, MAX_MTP_SLOT_COUNT); struct fixed31_32 timing_bw = dc_fixpt_from_int( - dc_bandwidth_in_kbps_from_timing(&stream->timing)); + dc_bandwidth_in_kbps_from_timing(&stream->timing, + dc_link_get_highest_encoding_format(link))); struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_div(timing_bw, timeslot_bw_effective); @@ -1973,6 +1975,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx) bool is_vga_mode = (stream->timing.h_addressable == 640) && (stream->timing.v_addressable == 480); struct dc *dc = pipe_ctx->stream->ctx->dc; + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); if (stream->phy_pix_clk == 0) stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10; @@ -2012,6 +2015,12 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx) if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) display_color_depth = COLOR_DEPTH_888; + /* We need to enable stream encoder for TMDS first to apply 1/4 TMDS + * character clock in case that beyond 340MHz. + */ + if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) + link_hwss->setup_stream_encoder(pipe_ctx); + dc->hwss.enable_tmds_link_output( link, &pipe_ctx->link_res, @@ -2131,7 +2140,8 @@ static enum dc_status enable_link_dp(struct dc_state *state, if (link->dpcd_sink_ext_caps.bits.oled == 1 || link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1 || link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1) { - set_default_brightness_aux(link); // TODO: use cached if known + set_cached_brightness_aux(link); + if (link->dpcd_sink_ext_caps.bits.oled == 1) msleep(bl_oled_enable_delay); edp_backlight_enable_aux(link, true); @@ -2209,9 +2219,8 @@ static enum dc_status enable_link( * link settings. Need to call disable first before enabling at * new link settings. */ - if (link->link_status.link_active) { + if (link->link_status.link_active && !stream->skip_edp_power_down) disable_link(link, &pipe_ctx->link_res, pipe_ctx->stream->signal); - } switch (pipe_ctx->stream->signal) { case SIGNAL_TYPE_DISPLAY_PORT: @@ -2271,8 +2280,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx) } } - if (!IS_DIAG_DC(dc->ctx->dce_environment) && - dc_is_virtual_signal(pipe_ctx->stream->signal)) + if (dc_is_virtual_signal(pipe_ctx->stream->signal)) return; if (!pipe_ctx->stream->sink->edid_caps.panel_patch.skip_avmute) { @@ -2330,7 +2338,9 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx) dc->hwss.disable_stream(pipe_ctx); } else { dc->hwss.disable_stream(pipe_ctx); - disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal); + if (!pipe_ctx->stream->skip_edp_power_down) { + disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal); + } } if (pipe_ctx->stream->timing.flags.DSC) { @@ -2358,6 +2368,8 @@ void link_set_dpms_on( enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO; struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg; const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); + bool apply_edp_fast_boot_optimization = + pipe_ctx->stream->apply_edp_fast_boot_optimization; ASSERT(is_master_pipe_for_link(link, pipe_ctx)); @@ -2375,8 +2387,7 @@ void link_set_dpms_on( } } - if (!IS_DIAG_DC(dc->ctx->dce_environment) && - dc_is_virtual_signal(pipe_ctx->stream->signal)) + if (dc_is_virtual_signal(pipe_ctx->stream->signal)) return; link_enc = link_enc_cfg_get_link_enc(link); @@ -2402,138 +2413,126 @@ void link_set_dpms_on( link_hwss->setup_stream_attribute(pipe_ctx); - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - bool apply_edp_fast_boot_optimization = - pipe_ctx->stream->apply_edp_fast_boot_optimization; - - pipe_ctx->stream->apply_edp_fast_boot_optimization = false; - - // Enable VPG before building infoframe - if (vpg && vpg->funcs->vpg_poweron) - vpg->funcs->vpg_poweron(vpg); + pipe_ctx->stream->apply_edp_fast_boot_optimization = false; - resource_build_info_frame(pipe_ctx); - dc->hwss.update_info_frame(pipe_ctx); + // Enable VPG before building infoframe + if (vpg && vpg->funcs->vpg_poweron) + vpg->funcs->vpg_poweron(vpg); - if (dc_is_dp_signal(pipe_ctx->stream->signal)) - dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); - - /* Do not touch link on seamless boot optimization. */ - if (pipe_ctx->stream->apply_seamless_boot_optimization) { - pipe_ctx->stream->dpms_off = false; + resource_build_info_frame(pipe_ctx); + dc->hwss.update_info_frame(pipe_ctx); - /* Still enable stream features & audio on seamless boot for DP external displays */ - if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT) { - enable_stream_features(pipe_ctx); - dc->hwss.enable_audio_stream(pipe_ctx); - } + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); - update_psp_stream_config(pipe_ctx, false); - return; - } + /* Do not touch link on seamless boot optimization. */ + if (pipe_ctx->stream->apply_seamless_boot_optimization) { + pipe_ctx->stream->dpms_off = false; - /* eDP lit up by bios already, no need to enable again. */ - if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && - apply_edp_fast_boot_optimization && - !pipe_ctx->stream->timing.flags.DSC && - !pipe_ctx->next_odm_pipe) { - pipe_ctx->stream->dpms_off = false; - update_psp_stream_config(pipe_ctx, false); - return; + /* Still enable stream features & audio on seamless boot for DP external displays */ + if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT) { + enable_stream_features(pipe_ctx); + dc->hwss.enable_audio_stream(pipe_ctx); } - if (pipe_ctx->stream->dpms_off) - return; + update_psp_stream_config(pipe_ctx, false); + return; + } - /* Have to setup DSC before DIG FE and BE are connected (which happens before the - * link training). This is to make sure the bandwidth sent to DIG BE won't be - * bigger than what the link and/or DIG BE can handle. VBID[6]/CompressedStream_flag - * will be automatically set at a later time when the video is enabled - * (DP_VID_STREAM_EN = 1). - */ - if (pipe_ctx->stream->timing.flags.DSC) { - if (dc_is_dp_signal(pipe_ctx->stream->signal) || - dc_is_virtual_signal(pipe_ctx->stream->signal)) - link_set_dsc_enable(pipe_ctx, true); + /* eDP lit up by bios already, no need to enable again. */ + if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && + apply_edp_fast_boot_optimization && + !pipe_ctx->stream->timing.flags.DSC && + !pipe_ctx->next_odm_pipe) { + pipe_ctx->stream->dpms_off = false; + update_psp_stream_config(pipe_ctx, false); + return; + } - } + if (pipe_ctx->stream->dpms_off) + return; - status = enable_link(state, pipe_ctx); + /* Have to setup DSC before DIG FE and BE are connected (which happens before the + * link training). This is to make sure the bandwidth sent to DIG BE won't be + * bigger than what the link and/or DIG BE can handle. VBID[6]/CompressedStream_flag + * will be automatically set at a later time when the video is enabled + * (DP_VID_STREAM_EN = 1). + */ + if (pipe_ctx->stream->timing.flags.DSC) { + if (dc_is_dp_signal(pipe_ctx->stream->signal) || + dc_is_virtual_signal(pipe_ctx->stream->signal)) + link_set_dsc_enable(pipe_ctx, true); - if (status != DC_OK) { - DC_LOG_WARNING("enabling link %u failed: %d\n", - pipe_ctx->stream->link->link_index, - status); + } - /* Abort stream enable *unless* the failure was due to - * DP link training - some DP monitors will recover and - * show the stream anyway. But MST displays can't proceed - * without link training. - */ - if (status != DC_FAIL_DP_LINK_TRAINING || - pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { - if (false == stream->link->link_status.link_active) - disable_link(stream->link, &pipe_ctx->link_res, - pipe_ctx->stream->signal); - BREAK_TO_DEBUGGER(); - return; - } - } + status = enable_link(state, pipe_ctx); - /* turn off otg test pattern if enable */ - if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) - pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, - CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, - COLOR_DEPTH_UNDEFINED); + if (status != DC_OK) { + DC_LOG_WARNING("enabling link %u failed: %d\n", + pipe_ctx->stream->link->link_index, + status); - /* This second call is needed to reconfigure the DIG - * as a workaround for the incorrect value being applied - * from transmitter control. + /* Abort stream enable *unless* the failure was due to + * DP link training - some DP monitors will recover and + * show the stream anyway. But MST displays can't proceed + * without link training. */ - if (!(dc_is_virtual_signal(pipe_ctx->stream->signal) || - dp_is_128b_132b_signal(pipe_ctx))) { - if (link_enc) - link_enc->funcs->setup( - link_enc, + if (status != DC_FAIL_DP_LINK_TRAINING || + pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + if (false == stream->link->link_status.link_active) + disable_link(stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal); - } + BREAK_TO_DEBUGGER(); + return; + } + } - dc->hwss.enable_stream(pipe_ctx); + /* turn off otg test pattern if enable */ + if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) + pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, + CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, + COLOR_DEPTH_UNDEFINED); - /* Set DPS PPS SDP (AKA "info frames") */ - if (pipe_ctx->stream->timing.flags.DSC) { - if (dc_is_dp_signal(pipe_ctx->stream->signal) || - dc_is_virtual_signal(pipe_ctx->stream->signal)) { - dp_set_dsc_on_rx(pipe_ctx, true); - link_set_dsc_pps_packet(pipe_ctx, true, true); - } + /* This second call is needed to reconfigure the DIG + * as a workaround for the incorrect value being applied + * from transmitter control. + */ + if (!(dc_is_virtual_signal(pipe_ctx->stream->signal) || + dp_is_128b_132b_signal(pipe_ctx))) { + if (link_enc) + link_enc->funcs->setup( + link_enc, + pipe_ctx->stream->signal); } - if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) - allocate_mst_payload(pipe_ctx); - else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && - dp_is_128b_132b_signal(pipe_ctx)) - update_sst_payload(pipe_ctx, true); + dc->hwss.enable_stream(pipe_ctx); - dc->hwss.unblank_stream(pipe_ctx, - &pipe_ctx->stream->link->cur_link_settings); + /* Set DPS PPS SDP (AKA "info frames") */ + if (pipe_ctx->stream->timing.flags.DSC) { + if (dc_is_dp_signal(pipe_ctx->stream->signal) || + dc_is_virtual_signal(pipe_ctx->stream->signal)) { + dp_set_dsc_on_rx(pipe_ctx, true); + link_set_dsc_pps_packet(pipe_ctx, true, true); + } + } - if (stream->sink_patches.delay_ignore_msa > 0) - msleep(stream->sink_patches.delay_ignore_msa); + if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) + allocate_mst_payload(pipe_ctx); + else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && + dp_is_128b_132b_signal(pipe_ctx)) + update_sst_payload(pipe_ctx, true); - if (dc_is_dp_signal(pipe_ctx->stream->signal)) - enable_stream_features(pipe_ctx); - update_psp_stream_config(pipe_ctx, false); + dc->hwss.unblank_stream(pipe_ctx, + &pipe_ctx->stream->link->cur_link_settings); - dc->hwss.enable_audio_stream(pipe_ctx); + if (stream->sink_patches.delay_ignore_msa > 0) + msleep(stream->sink_patches.delay_ignore_msa); - } else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) - if (dp_is_128b_132b_signal(pipe_ctx)) - dp_fpga_hpo_enable_link_and_stream(state, pipe_ctx); - if (dc_is_dp_signal(pipe_ctx->stream->signal) || - dc_is_virtual_signal(pipe_ctx->stream->signal)) - link_set_dsc_enable(pipe_ctx, true); - } + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + enable_stream_features(pipe_ctx); + update_psp_stream_config(pipe_ctx, false); + + dc->hwss.enable_audio_stream(pipe_ctx); if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) { set_avmute(pipe_ctx, false); diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c index 1515c817f03b..0895742a3102 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c @@ -132,6 +132,7 @@ static void construct_link_service_ddc(struct link_service *link_srv) link_srv->destroy_ddc_service = link_destroy_ddc_service; link_srv->query_ddc_data = link_query_ddc_data; link_srv->aux_transfer_raw = link_aux_transfer_raw; + link_srv->configure_fixed_vs_pe_retimer = link_configure_fixed_vs_pe_retimer; link_srv->aux_transfer_with_retries_no_mutex = link_aux_transfer_with_retries_no_mutex; link_srv->is_in_aux_transaction_mode = link_is_in_aux_transaction_mode; @@ -207,6 +208,13 @@ static void construct_link_service_edp_panel_control(struct link_service *link_s link_srv->edp_set_sink_vtotal_in_psr_active = edp_set_sink_vtotal_in_psr_active; link_srv->edp_get_psr_residency = edp_get_psr_residency; + + link_srv->edp_get_replay_state = edp_get_replay_state; + link_srv->edp_set_replay_allow_active = edp_set_replay_allow_active; + link_srv->edp_setup_replay = edp_setup_replay; + link_srv->edp_set_coasting_vtotal = edp_set_coasting_vtotal; + link_srv->edp_replay_residency = edp_replay_residency; + link_srv->edp_wait_for_t12 = edp_wait_for_t12; link_srv->edp_is_ilr_optimization_required = edp_is_ilr_optimization_required; @@ -563,11 +571,9 @@ static bool construct_phy(struct dc_link *link, goto create_fail; } - /* TODO: #DAL3 Implement id to str function.*/ - LINK_INFO("Connector[%d] description:" - "signal %d\n", + LINK_INFO("Connector[%d] description: signal: %s\n", init_params->connector_index, - link->connector_signal); + signal_type_to_string(link->connector_signal)); ddc_service_init_data.ctx = link->ctx; ddc_service_init_data.id = link->link_id; @@ -785,6 +791,10 @@ static bool construct_dpia(struct dc_link *link, /* Set dpia port index : 0 to number of dpia ports */ link->ddc_hw_inst = init_params->connector_index; + // Assign Dpia preferred eng_id + if (link->dc->res_pool->funcs->get_preferred_eng_id_dpia) + link->dpia_preferred_eng_id = link->dc->res_pool->funcs->get_preferred_eng_id_dpia(link->ddc_hw_inst); + /* TODO: Create link encoder */ link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c index e8b2fc4002a5..b45fda96eaf6 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c @@ -130,7 +130,8 @@ static bool dp_active_dongle_validate_timing( /* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */ outputTiming.flags.DSC = 0; #endif - if (dc_bandwidth_in_kbps_from_timing(&outputTiming) > dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps) + if (dc_bandwidth_in_kbps_from_timing(&outputTiming, DC_LINK_ENCODING_HDMI_FRL) > + dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps) return false; } else { // DP to HDMI TMDS converter if (get_tmds_output_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10)) @@ -285,7 +286,7 @@ static bool dp_validate_mode_timing( link_setting = &link->verified_link_cap; */ - req_bw = dc_bandwidth_in_kbps_from_timing(timing); + req_bw = dc_bandwidth_in_kbps_from_timing(timing, dc_link_get_highest_encoding_format(link)); max_bw = dp_link_bandwidth_kbps(link, link_setting); if (req_bw <= max_bw) { @@ -357,7 +358,8 @@ bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const un for (uint8_t i = 0; i < num_streams; ++i) { link[i] = stream[i].link; - bw_needed[i] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing); + bw_needed[i] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing, + dc_link_get_highest_encoding_format(link[i])); } ret = dpia_validate_usb4_bw(link, bw_needed, num_streams); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c index 0fa1228bc178..ecfd83299e75 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c @@ -412,6 +412,88 @@ int link_aux_transfer_raw(struct ddc_service *ddc, } } +uint32_t link_get_fixed_vs_pe_retimer_write_address(struct dc_link *link) +{ + uint32_t vendor_lttpr_write_address = 0xF004F; + uint8_t offset; + + switch (link->dpcd_caps.lttpr_caps.phy_repeater_cnt) { + case 0x80: // 1 lttpr repeater + offset = 1; + break; + case 0x40: // 2 lttpr repeaters + offset = 2; + break; + case 0x20: // 3 lttpr repeaters + offset = 3; + break; + case 0x10: // 4 lttpr repeaters + offset = 4; + break; + case 0x08: // 5 lttpr repeaters + offset = 5; + break; + case 0x04: // 6 lttpr repeaters + offset = 6; + break; + case 0x02: // 7 lttpr repeaters + offset = 7; + break; + case 0x01: // 8 lttpr repeaters + offset = 8; + break; + default: + offset = 0xFF; + } + + if (offset != 0xFF) { + vendor_lttpr_write_address += + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + } + return vendor_lttpr_write_address; +} + +uint32_t link_get_fixed_vs_pe_retimer_read_address(struct dc_link *link) +{ + return link_get_fixed_vs_pe_retimer_write_address(link) + 4; +} + +bool link_configure_fixed_vs_pe_retimer(struct ddc_service *ddc, const uint8_t *data, uint32_t length) +{ + struct aux_payload write_payload = { + .i2c_over_aux = false, + .write = true, + .address = link_get_fixed_vs_pe_retimer_write_address(ddc->link), + .length = length, + .data = (uint8_t *) data, + .reply = NULL, + .mot = I2C_MOT_UNDEF, + .write_status_update = false, + .defer_delay = 0, + }; + + return link_aux_transfer_with_retries_no_mutex(ddc, + &write_payload); +} + +bool link_query_fixed_vs_pe_retimer(struct ddc_service *ddc, uint8_t *data, uint32_t length) +{ + struct aux_payload read_payload = { + .i2c_over_aux = false, + .write = false, + .address = link_get_fixed_vs_pe_retimer_read_address(ddc->link), + .length = length, + .data = data, + .reply = NULL, + .mot = I2C_MOT_UNDEF, + .write_status_update = false, + .defer_delay = 0, + }; + + return link_aux_transfer_with_retries_no_mutex(ddc, + &read_payload); +} + bool link_aux_transfer_with_retries_no_mutex(struct ddc_service *ddc, struct aux_payload *payload) { @@ -427,7 +509,7 @@ bool try_to_configure_aux_timeout(struct ddc_service *ddc, if ((ddc->link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && !ddc->link->dc->debug.disable_fixed_vs_aux_timeout_wa && - ASICREV_IS_YELLOW_CARP(ddc->ctx->asic_id.hw_internal_rev)) { + ddc->ctx->dce_version == DCN_VERSION_3_1) { /* Fixed VS workaround for AUX timeout */ const uint32_t fixed_vs_address = 0xF004F; const uint8_t fixed_vs_data[4] = {0x1, 0x22, 0x63, 0xc}; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h index 860ef15d7f1b..a3e25e55bed6 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h @@ -72,6 +72,20 @@ bool link_query_ddc_data( bool link_aux_transfer_with_retries_no_mutex(struct ddc_service *ddc, struct aux_payload *payload); +bool link_configure_fixed_vs_pe_retimer( + struct ddc_service *ddc, + const uint8_t *data, + uint32_t length); + +bool link_query_fixed_vs_pe_retimer( + struct ddc_service *ddc, + uint8_t *data, + uint32_t length); + +uint32_t link_get_fixed_vs_pe_retimer_read_address(struct dc_link *link); +uint32_t link_get_fixed_vs_pe_retimer_write_address(struct dc_link *link); + + void write_scdc_data( struct ddc_service *ddc_service, uint32_t pix_clk, diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c index ba98013fecd0..237e0ff955f3 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -326,8 +326,7 @@ bool dp_is_fec_supported(const struct dc_link *link) return (dc_is_dp_signal(link->connector_signal) && link_enc && link_enc->features.fec_supported && - link->dpcd_caps.fec_cap.bits.FEC_CAPABLE && - !IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment)); + link->dpcd_caps.fec_cap.bits.FEC_CAPABLE); } bool dp_should_enable_fec(const struct dc_link *link) @@ -907,7 +906,7 @@ bool link_decide_link_settings(struct dc_stream_state *stream, struct dc_link_settings *link_setting) { struct dc_link *link = stream->link; - uint32_t req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing); + uint32_t req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing, dc_link_get_highest_encoding_format(link)); memset(link_setting, 0, sizeof(*link_setting)); @@ -940,7 +939,8 @@ bool link_decide_link_settings(struct dc_stream_state *stream, tmp_link_setting.link_rate = LINK_RATE_UNKNOWN; tmp_timing.flags.DSC = 0; - orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing); + orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing, + dc_link_get_highest_encoding_format(link)); edp_decide_link_settings(link, &tmp_link_setting, orig_req_bw); max_link_rate = tmp_link_setting.link_rate; } @@ -1043,9 +1043,7 @@ static enum dc_status wake_up_aux_channel(struct dc_link *link) DP_SET_POWER, &dpcd_power_state, sizeof(dpcd_power_state)); - if (status < 0) - DC_LOG_DC("%s: Failed to power up sink: %s\n", __func__, - dpcd_power_state == DP_SET_POWER_D0 ? "D0" : "D3"); + DC_LOG_DC("%s: Failed to power up sink\n", __func__); return DC_ERROR_UNEXPECTED; } @@ -1396,7 +1394,7 @@ static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id) cmd.cable_id.header.payload_bytes = sizeof(cmd.cable_id.data); cmd.cable_id.data.input.phy_inst = resource_transmitter_to_phy_idx( link->dc, link->link_enc->transmitter); - if (dc_dmub_srv_cmd_with_reply_data(link->ctx->dmub_srv, &cmd) && + if (dm_execute_dmub_cmd(link->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && cmd.cable_id.header.ret_status == 1) { cable_id->raw = cmd.cable_id.data.output_raw; DC_LOG_DC("usbc_cable_id = %d.\n", cable_id->raw); @@ -1452,7 +1450,8 @@ bool read_is_mst_supported(struct dc_link *link) */ static bool dpcd_read_sink_ext_caps(struct dc_link *link) { - uint8_t dpcd_data; + uint8_t dpcd_data = 0; + uint8_t edp_general_cap2 = 0; if (!link) return false; @@ -1461,6 +1460,12 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link) return false; link->dpcd_sink_ext_caps.raw = dpcd_data; + + if (core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_2, &edp_general_cap2, 1) != DC_OK) + return false; + + link->dpcd_caps.panel_luminance_control = (edp_general_cap2 & DP_EDP_PANEL_LUMINANCE_CONTROL_CAPABLE) != 0; + return true; } @@ -1554,6 +1559,9 @@ static bool retrieve_link_cap(struct dc_link *link) int i; struct dp_sink_hw_fw_revision dp_hw_fw_revision; const uint32_t post_oui_delay = 30; // 30ms + bool is_fec_supported = false; + bool is_dsc_basic_supported = false; + bool is_dsc_passthrough_supported = false; memset(dpcd_data, '\0', sizeof(dpcd_data)); memset(&down_strm_port_count, @@ -1696,6 +1704,7 @@ static bool retrieve_link_cap(struct dc_link *link) /* TODO - decouple raw mst capability from policy decision */ link->dpcd_caps.is_mst_capable = read_is_mst_supported(link); + DC_LOG_DC("%s: MST_Support: %s\n", __func__, str_yes_no(link->dpcd_caps.is_mst_capable)); get_active_converter_info(ds_port.byte, link); @@ -1803,6 +1812,17 @@ static bool retrieve_link_cap(struct dc_link *link) DP_DSC_SUPPORT, link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, sizeof(link->dpcd_caps.dsc_caps.dsc_basic_caps.raw)); + if (status == DC_OK) { + is_fec_supported = link->dpcd_caps.fec_cap.bits.FEC_CAPABLE; + is_dsc_basic_supported = link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT; + is_dsc_passthrough_supported = link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT; + DC_LOG_DC("%s: FEC_Sink_Support: %s\n", __func__, + str_yes_no(is_fec_supported)); + DC_LOG_DC("%s: DSC_Basic_Sink_Support: %s\n", __func__, + str_yes_no(is_dsc_basic_supported)); + DC_LOG_DC("%s: DSC_Passthrough_Sink_Support: %s\n", __func__, + str_yes_no(is_dsc_passthrough_supported)); + } if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) { status = core_link_read_dpcd( link, @@ -1931,6 +1951,9 @@ void detect_edp_sink_caps(struct dc_link *link) link_rate_in_khz = (supported_link_rates[entry+1] * 0x100 + supported_link_rates[entry]) * 200; + DC_LOG_DC("%s: eDP v1.4 supported sink rates: [%d] %d kHz\n", __func__, + entry / 2, link_rate_in_khz); + if (link_rate_in_khz != 0) { link_rate = linkRateInKHzToLinkRateMultiplier(link_rate_in_khz); link->dpcd_caps.edp_supported_link_rates[link->dpcd_caps.edp_supported_link_rates_count] = link_rate; @@ -1986,6 +2009,16 @@ void detect_edp_sink_caps(struct dc_link *link) core_link_read_dpcd(link, DP_RECEIVER_ALPM_CAP, &link->dpcd_caps.alpm_caps.raw, sizeof(link->dpcd_caps.alpm_caps.raw)); + + /* + * Read REPLAY info + */ + core_link_read_dpcd(link, DP_SINK_PR_PIXEL_DEVIATION_PER_LINE, + &link->dpcd_caps.pr_info.pixel_deviation_per_line, + sizeof(link->dpcd_caps.pr_info.pixel_deviation_per_line)); + core_link_read_dpcd(link, DP_SINK_PR_MAX_NUMBER_OF_DEVIATION_LINE, + &link->dpcd_caps.pr_info.max_deviation_line, + sizeof(link->dpcd_caps.pr_info.max_deviation_line)); } bool dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap) @@ -2143,7 +2176,9 @@ static bool dp_verify_link_cap( link, &irq_data)) (*fail_count)++; - + } else if (status == LINK_TRAINING_LINK_LOSS) { + success = true; + (*fail_count)++; } else { (*fail_count)++; } @@ -2166,6 +2201,7 @@ bool dp_verify_link_cap_with_retries( int i = 0; bool success = false; int fail_count = 0; + struct dc_link_settings last_verified_link_cap = fail_safe_link_settings; dp_trace_detect_lt_init(link); @@ -2182,10 +2218,14 @@ bool dp_verify_link_cap_with_retries( if (!link_detect_connection_type(link, &type) || type == dc_connection_none) { link->verified_link_cap = fail_safe_link_settings; break; - } else if (dp_verify_link_cap(link, known_limit_link_setting, - &fail_count) && fail_count == 0) { - success = true; - break; + } else if (dp_verify_link_cap(link, known_limit_link_setting, &fail_count)) { + last_verified_link_cap = link->verified_link_cap; + if (fail_count == 0) { + success = true; + break; + } + } else { + link->verified_link_cap = last_verified_link_cap; } fsleep(10 * 1000); } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c index 4626fabc0a96..0bb749133909 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c @@ -90,7 +90,7 @@ bool dpia_query_hpd_status(struct dc_link *link) cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA; /* Return HPD status reported by DMUB if query successfully executed. */ - if (dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd) && cmd.query_hpd.data.status == AUX_RET_SUCCESS) + if (dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && cmd.query_hpd.data.status == AUX_RET_SUCCESS) is_hpd_high = cmd.query_hpd.data.result; DC_LOG_DEBUG("%s: link(%d) dpia(%d) cmd_status(%d) result(%d)\n", diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c index ba95facc4ee8..e047bbeaa49a 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c @@ -82,8 +82,15 @@ bool dp_parse_link_loss_status( } /* Check interlane align.*/ - if (sink_status_changed || - !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) { + if (link_dp_get_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING && + (!hpd_irq_dpcd_data->bytes.lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b || + !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b)) { + sink_status_changed = true; + } else if (!hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) { + sink_status_changed = true; + } + + if (sink_status_changed) { DC_LOG_HW_HPD_IRQ("%s: Link Status changed.\n", __func__); @@ -175,6 +182,68 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link) return false; } +static bool handle_hpd_irq_replay_sink(struct dc_link *link) +{ + union dpcd_replay_configuration replay_configuration; + /*AMD Replay version reuse DP_PSR_ERROR_STATUS for REPLAY_ERROR status.*/ + union psr_error_status replay_error_status; + + if (!link->replay_settings.replay_feature_enabled) + return false; + + dm_helpers_dp_read_dpcd( + link->ctx, + link, + DP_SINK_PR_REPLAY_STATUS, + &replay_configuration.raw, + sizeof(replay_configuration.raw)); + + dm_helpers_dp_read_dpcd( + link->ctx, + link, + DP_PSR_ERROR_STATUS, + &replay_error_status.raw, + sizeof(replay_error_status.raw)); + + link->replay_settings.config.replay_error_status.bits.LINK_CRC_ERROR = + replay_error_status.bits.LINK_CRC_ERROR; + link->replay_settings.config.replay_error_status.bits.DESYNC_ERROR = + replay_configuration.bits.DESYNC_ERROR_STATUS; + link->replay_settings.config.replay_error_status.bits.STATE_TRANSITION_ERROR = + replay_configuration.bits.STATE_TRANSITION_ERROR_STATUS; + + if (link->replay_settings.config.replay_error_status.bits.LINK_CRC_ERROR || + link->replay_settings.config.replay_error_status.bits.DESYNC_ERROR || + link->replay_settings.config.replay_error_status.bits.STATE_TRANSITION_ERROR) { + bool allow_active; + + /* Acknowledge and clear configuration bits */ + dm_helpers_dp_write_dpcd( + link->ctx, + link, + DP_SINK_PR_REPLAY_STATUS, + &replay_configuration.raw, + sizeof(replay_configuration.raw)); + + /* Acknowledge and clear error bits */ + dm_helpers_dp_write_dpcd( + link->ctx, + link, + DP_PSR_ERROR_STATUS,/*DpcdAddress_REPLAY_Error_Status*/ + &replay_error_status.raw, + sizeof(replay_error_status.raw)); + + /* Replay error, disable and re-enable Replay */ + if (link->replay_settings.replay_allow_active) { + allow_active = false; + edp_set_replay_allow_active(link, &allow_active, true, false, NULL); + allow_active = true; + edp_set_replay_allow_active(link, &allow_active, true, false, NULL); + } + } + return true; +} + void dp_handle_link_loss(struct dc_link *link) { struct pipe_ctx *pipes[MAX_PIPES]; @@ -201,6 +270,25 @@ void dp_handle_link_loss(struct dc_link *link) } } +static void read_dpcd204h_on_irq_hpd(struct dc_link *link, union hpd_irq_data *irq_data) +{ + enum dc_status retval; + union lane_align_status_updated dpcd_lane_status_updated; + + retval = core_link_read_dpcd( + link, + DP_LANE_ALIGN_STATUS_UPDATED, + &dpcd_lane_status_updated.raw, + sizeof(union lane_align_status_updated)); + + if (retval == DC_OK) { + irq_data->bytes.lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b = + dpcd_lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b; + irq_data->bytes.lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b = + dpcd_lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b; + } +} + enum dc_status dp_read_hpd_rx_irq_data( struct dc_link *link, union hpd_irq_data *irq_data) @@ -242,6 +330,13 @@ enum dc_status dp_read_hpd_rx_irq_data( irq_data->bytes.lane23_status.raw = tmp[DP_LANE2_3_STATUS_ESI - DP_SINK_COUNT_ESI]; irq_data->bytes.lane_status_updated.raw = tmp[DP_LANE_ALIGN_STATUS_UPDATED_ESI - DP_SINK_COUNT_ESI]; irq_data->bytes.sink_status.raw = tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI]; + + /* + * This display doesn't have correct values in DPCD200Eh. + * Read and check DPCD204h instead. + */ + if (link->wa_flags.read_dpcd204h_on_irq_hpd) + read_dpcd204h_on_irq_hpd(link, irq_data); } return retval; @@ -327,6 +422,10 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link, /* PSR-related error was detected and handled */ return true; + if (handle_hpd_irq_replay_sink(link)) + /* Replay-related error was detected and handled */ + return true; + /* If PSR-related error handled, Main link may be off, * so do not handle as a normal sink status change interrupt. */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c index 579fa222810d..90339c2dfd84 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c @@ -1653,10 +1653,19 @@ bool perform_link_training_with_retries( break; } - DC_LOG_WARNING("%s: Link(%d) training attempt %u of %d failed @ rate(%d) x lane(%d) @ spread = %x : fail reason:(%d)\n", - __func__, link->link_index, (unsigned int)j + 1, attempts, - cur_link_settings.link_rate, cur_link_settings.lane_count, - cur_link_settings.link_spread, status); + if (j == (attempts - 1)) { + DC_LOG_WARNING( + "%s: Link(%d) training attempt %u of %d failed @ rate(%d) x lane(%d) @ spread = %x : fail reason:(%d)\n", + __func__, link->link_index, (unsigned int)j + 1, attempts, + cur_link_settings.link_rate, cur_link_settings.lane_count, + cur_link_settings.link_spread, status); + } else { + DC_LOG_HW_LINK_TRAINING( + "%s: Link(%d) training attempt %u of %d failed @ rate(%d) x lane(%d) @ spread = %x : fail reason:(%d)\n", + __func__, link->link_index, (unsigned int)j + 1, attempts, + cur_link_settings.link_rate, cur_link_settings.lane_count, + cur_link_settings.link_spread, status); + } dp_disable_link_phy(link, &pipe_ctx->link_res, signal); @@ -1690,13 +1699,20 @@ bool perform_link_training_with_retries( } else if (do_fallback) { /* Try training at lower link bandwidth if doing fallback. */ uint32_t req_bw; uint32_t link_bw; + enum dc_link_encoding_format link_encoding = DC_LINK_ENCODING_UNSPECIFIED; decide_fallback_link_setting(link, &max_link_settings, &cur_link_settings, status); + + if (link_dp_get_encoding_format(&cur_link_settings) == DP_8b_10b_ENCODING) + link_encoding = DC_LINK_ENCODING_DP_8b_10b; + else if (link_dp_get_encoding_format(&cur_link_settings) == DP_128b_132b_ENCODING) + link_encoding = DC_LINK_ENCODING_DP_128b_132b; + /* Flag if reduced link bandwidth no longer meets stream requirements or fallen back to * minimum link bandwidth. */ - req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing); + req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing, link_encoding); link_bw = dp_link_bandwidth_kbps(link, &cur_link_settings); is_link_bw_low = (req_bw > link_bw); is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) && diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c index 23d380f09a21..db87cfe37b5c 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c @@ -211,11 +211,17 @@ enum link_training_result dp_perform_128b_132b_link_training( dpcd_set_link_settings(link, lt_settings); - if (result == LINK_TRAINING_SUCCESS) + if (result == LINK_TRAINING_SUCCESS) { result = dp_perform_128b_132b_channel_eq_done_sequence(link, link_res, lt_settings); + if (result == LINK_TRAINING_SUCCESS) + DC_LOG_HW_LINK_TRAINING("%s: Channel EQ done.\n", __func__); + } - if (result == LINK_TRAINING_SUCCESS) + if (result == LINK_TRAINING_SUCCESS) { result = dp_perform_128b_132b_cds_done_sequence(link, link_res, lt_settings); + if (result == LINK_TRAINING_SUCCESS) + DC_LOG_HW_LINK_TRAINING("%s: CDS done.\n", __func__); + } return result; } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c index 3889ebb2256b..2b4c15b0b407 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c @@ -388,6 +388,8 @@ enum link_training_result dp_perform_8b_10b_link_training( link_res, lt_settings, repeater_id); + if (status == LINK_TRAINING_SUCCESS) + DC_LOG_HW_LINK_TRAINING("%s: Channel EQ done.\n", __func__); repeater_training_done(link, repeater_id); @@ -409,6 +411,8 @@ enum link_training_result dp_perform_8b_10b_link_training( link_res, lt_settings, DPRX); + if (status == LINK_TRAINING_SUCCESS) + DC_LOG_HW_LINK_TRAINING("%s: Channel EQ done.\n", __func__); } } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c index 5731c4b61f9f..fd8f6f198146 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c @@ -36,6 +36,7 @@ #include "link_dpcd.h" #include "link_dp_phy.h" #include "link_dp_capability.h" +#include "link_ddc.h" #define DC_LOGGER \ link->ctx->logger @@ -46,42 +47,20 @@ void dp_fixed_vs_pe_read_lane_adjust( { const uint8_t vendor_lttpr_write_data_vs[3] = {0x0, 0x53, 0x63}; const uint8_t vendor_lttpr_write_data_pe[3] = {0x0, 0x54, 0x63}; - const uint8_t offset = dp_parse_lttpr_repeater_count( - link->dpcd_caps.lttpr_caps.phy_repeater_cnt); - uint32_t vendor_lttpr_write_address = 0xF004F; - uint32_t vendor_lttpr_read_address = 0xF0053; uint8_t dprx_vs = 0; uint8_t dprx_pe = 0; uint8_t lane; - if (offset != 0xFF) { - vendor_lttpr_write_address += - ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); - vendor_lttpr_read_address += - ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); - } - /* W/A to read lane settings requested by DPRX */ - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_vs[0], - sizeof(vendor_lttpr_write_data_vs)); - core_link_read_dpcd( - link, - vendor_lttpr_read_address, - &dprx_vs, - 1); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_pe[0], - sizeof(vendor_lttpr_write_data_pe)); - core_link_read_dpcd( - link, - vendor_lttpr_read_address, - &dprx_pe, - 1); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); + + link_query_fixed_vs_pe_retimer(link->ddc, &dprx_vs, 1); + + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); + + link_query_fixed_vs_pe_retimer(link->ddc, &dprx_pe, 1); for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET = (dprx_vs >> (2 * lane)) & 0x3; @@ -95,19 +74,11 @@ void dp_fixed_vs_pe_set_retimer_lane_settings( const union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX], uint8_t lane_count) { - const uint8_t offset = dp_parse_lttpr_repeater_count( - link->dpcd_caps.lttpr_caps.phy_repeater_cnt); const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF}; - uint32_t vendor_lttpr_write_address = 0xF004F; uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; uint8_t lane = 0; - if (offset != 0xFF) { - vendor_lttpr_write_address += - ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); - } - for (lane = 0; lane < lane_count; lane++) { vendor_lttpr_write_data_vs[3] |= dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET << (2 * lane); @@ -116,21 +87,14 @@ void dp_fixed_vs_pe_set_retimer_lane_settings( } /* Force LTTPR to output desired VS and PE */ - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_reset[0], - sizeof(vendor_lttpr_write_data_reset)); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_vs[0], - sizeof(vendor_lttpr_write_data_vs)); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_pe[0], - sizeof(vendor_lttpr_write_data_pe)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_reset[0], sizeof(vendor_lttpr_write_data_reset)); + + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); + + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); } static enum link_training_result perform_fixed_vs_pe_nontransparent_training_sequence( @@ -233,10 +197,14 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( link->dpcd_caps.lttpr_caps.phy_repeater_cnt); const uint8_t vendor_lttpr_write_data_intercept_en[4] = {0x1, 0x55, 0x63, 0x0}; const uint8_t vendor_lttpr_write_data_intercept_dis[4] = {0x1, 0x55, 0x63, 0x68}; - uint32_t pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa; + uint32_t pre_disable_intercept_delay_ms = 0; uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; - uint32_t vendor_lttpr_write_address = 0xF004F; + const uint8_t vendor_lttpr_write_data_4lane_1[4] = {0x1, 0x6E, 0xF2, 0x19}; + const uint8_t vendor_lttpr_write_data_4lane_2[4] = {0x1, 0x6B, 0xF2, 0x01}; + const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18}; + const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03}; + const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06}; enum link_training_result status = LINK_TRAINING_SUCCESS; uint8_t lane = 0; union down_spread_ctrl downspread = {0}; @@ -254,37 +222,27 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( } if (offset != 0xFF) { - vendor_lttpr_write_address += - ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + if (offset == 2) { + pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa; /* Certain display and cable configuration require extra delay */ - if (offset > 2) + } else if (offset > 2) { pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2; + } } /* Vendor specific: Reset lane settings */ - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_reset[0], - sizeof(vendor_lttpr_write_data_reset)); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_vs[0], - sizeof(vendor_lttpr_write_data_vs)); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_pe[0], - sizeof(vendor_lttpr_write_data_pe)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_reset[0], sizeof(vendor_lttpr_write_data_reset)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); /* Vendor specific: Enable intercept */ - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_intercept_en[0], - sizeof(vendor_lttpr_write_data_intercept_en)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_intercept_en[0], sizeof(vendor_lttpr_write_data_intercept_en)); + /* 1. set link rate, lane count and spread. */ @@ -335,6 +293,19 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( DP_DOWNSPREAD_CTRL, lt_settings->link_settings.link_spread); + if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) { + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_1[0], sizeof(vendor_lttpr_write_data_4lane_1)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_2[0], sizeof(vendor_lttpr_write_data_4lane_2)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_3[0], sizeof(vendor_lttpr_write_data_4lane_3)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_4[0], sizeof(vendor_lttpr_write_data_4lane_4)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_5[0], sizeof(vendor_lttpr_write_data_4lane_5)); + } + /* 2. Perform link training */ /* Perform Clock Recovery Sequence */ @@ -347,7 +318,6 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; union lane_align_status_updated dpcd_lane_status_updated; union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; - enum dc_status dpcd_status = DC_OK; uint8_t i = 0; retries_cr = 0; @@ -380,19 +350,14 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( 0); /* Vendor specific: Disable intercept */ for (i = 0; i < max_vendor_dpcd_retries; i++) { - msleep(pre_disable_intercept_delay_ms); - dpcd_status = core_link_write_dpcd( - link, - vendor_lttpr_write_address, + if (pre_disable_intercept_delay_ms != 0) + msleep(pre_disable_intercept_delay_ms); + if (link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_intercept_dis[0], - sizeof(vendor_lttpr_write_data_intercept_dis)); - - if (dpcd_status == DC_OK) + sizeof(vendor_lttpr_write_data_intercept_dis))) break; - core_link_write_dpcd( - link, - vendor_lttpr_write_address, + link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_intercept_en[0], sizeof(vendor_lttpr_write_data_intercept_en)); } @@ -408,16 +373,10 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( } /* Vendor specific: Update VS and PE to DPRX requested value */ - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_vs[0], - sizeof(vendor_lttpr_write_data_vs)); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_pe[0], - sizeof(vendor_lttpr_write_data_pe)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); dpcd_set_lane_settings( link, @@ -513,16 +472,10 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( } /* Vendor specific: Update VS and PE to DPRX requested value */ - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_vs[0], - sizeof(vendor_lttpr_write_data_vs)); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_pe[0], - sizeof(vendor_lttpr_write_data_pe)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); /* 2. update DPCD*/ if (!retries_ch_eq) @@ -591,11 +544,14 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( const uint8_t vendor_lttpr_write_data_adicora_eq1[4] = {0x1, 0x55, 0x63, 0x2E}; const uint8_t vendor_lttpr_write_data_adicora_eq2[4] = {0x1, 0x55, 0x63, 0x01}; const uint8_t vendor_lttpr_write_data_adicora_eq3[4] = {0x1, 0x55, 0x63, 0x68}; - uint32_t pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa; + uint32_t pre_disable_intercept_delay_ms = 0; uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; - - uint32_t vendor_lttpr_write_address = 0xF004F; + const uint8_t vendor_lttpr_write_data_4lane_1[4] = {0x1, 0x6E, 0xF2, 0x19}; + const uint8_t vendor_lttpr_write_data_4lane_2[4] = {0x1, 0x6B, 0xF2, 0x01}; + const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18}; + const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03}; + const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06}; enum link_training_result status = LINK_TRAINING_SUCCESS; uint8_t lane = 0; union down_spread_ctrl downspread = {0}; @@ -613,37 +569,26 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( } if (offset != 0xFF) { - vendor_lttpr_write_address += - ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + if (offset == 2) { + pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa; /* Certain display and cable configuration require extra delay */ - if (offset > 2) + } else if (offset > 2) { pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2; + } } /* Vendor specific: Reset lane settings */ - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_reset[0], - sizeof(vendor_lttpr_write_data_reset)); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_vs[0], - sizeof(vendor_lttpr_write_data_vs)); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_pe[0], - sizeof(vendor_lttpr_write_data_pe)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_reset[0], sizeof(vendor_lttpr_write_data_reset)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); /* Vendor specific: Enable intercept */ - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_intercept_en[0], - sizeof(vendor_lttpr_write_data_intercept_en)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_intercept_en[0], sizeof(vendor_lttpr_write_data_intercept_en)); /* 1. set link rate, lane count and spread. */ @@ -694,6 +639,19 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( DP_DOWNSPREAD_CTRL, lt_settings->link_settings.link_spread); + if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) { + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_1[0], sizeof(vendor_lttpr_write_data_4lane_1)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_2[0], sizeof(vendor_lttpr_write_data_4lane_2)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_3[0], sizeof(vendor_lttpr_write_data_4lane_3)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_4[0], sizeof(vendor_lttpr_write_data_4lane_4)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_5[0], sizeof(vendor_lttpr_write_data_4lane_5)); + } + /* 2. Perform link training */ /* Perform Clock Recovery Sequence */ @@ -706,7 +664,6 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; union lane_align_status_updated dpcd_lane_status_updated; union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; - enum dc_status dpcd_status = DC_OK; uint8_t i = 0; retries_cr = 0; @@ -739,19 +696,14 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( 0); /* Vendor specific: Disable intercept */ for (i = 0; i < max_vendor_dpcd_retries; i++) { - msleep(pre_disable_intercept_delay_ms); - dpcd_status = core_link_write_dpcd( - link, - vendor_lttpr_write_address, + if (pre_disable_intercept_delay_ms != 0) + msleep(pre_disable_intercept_delay_ms); + if (link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_intercept_dis[0], - sizeof(vendor_lttpr_write_data_intercept_dis)); - - if (dpcd_status == DC_OK) + sizeof(vendor_lttpr_write_data_intercept_dis))) break; - core_link_write_dpcd( - link, - vendor_lttpr_write_address, + link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_intercept_en[0], sizeof(vendor_lttpr_write_data_intercept_en)); } @@ -767,16 +719,10 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( } /* Vendor specific: Update VS and PE to DPRX requested value */ - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_vs[0], - sizeof(vendor_lttpr_write_data_vs)); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_pe[0], - sizeof(vendor_lttpr_write_data_pe)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); dpcd_set_lane_settings( link, @@ -849,17 +795,14 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; - core_link_write_dpcd( - link, - vendor_lttpr_write_address, + link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_adicora_eq1[0], sizeof(vendor_lttpr_write_data_adicora_eq1)); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, + link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_adicora_eq2[0], sizeof(vendor_lttpr_write_data_adicora_eq2)); + /* Note: also check that TPS4 is a supported feature*/ tr_pattern = lt_settings->pattern_for_eq; @@ -883,16 +826,10 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( } /* Vendor specific: Update VS and PE to DPRX requested value */ - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_vs[0], - sizeof(vendor_lttpr_write_data_vs)); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_pe[0], - sizeof(vendor_lttpr_write_data_pe)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); /* 2. update DPCD*/ if (!retries_ch_eq) { @@ -905,11 +842,10 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( lt_settings, tr_pattern, 0); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_adicora_eq3[0], - sizeof(vendor_lttpr_write_data_adicora_eq3)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_adicora_eq3[0], + sizeof(vendor_lttpr_write_data_adicora_eq3)); + } else dpcd_set_lane_settings(link, lt_settings, 0); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c index 8d1df863659c..98e715aa6d8e 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -34,9 +34,13 @@ #include "dm_helpers.h" #include "dal_asic_id.h" #include "dce/dmub_psr.h" +#include "dc/dc_dmub_srv.h" +#include "dce/dmub_replay.h" #include "abm.h" #define DC_LOGGER_INIT(logger) +#define DP_SINK_PR_ENABLE_AND_CONFIGURATION 0x37B + /* Travis */ static const uint8_t DP_VGA_LVDS_CONVERTER_ID_2[] = "sivarT"; /* Nutmeg */ @@ -46,43 +50,42 @@ void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode) { union dpcd_edp_config edp_config_set; bool panel_mode_edp = false; + enum dc_status result; memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config)); - if (panel_mode != DP_PANEL_MODE_DEFAULT) { + switch (panel_mode) { + case DP_PANEL_MODE_EDP: + case DP_PANEL_MODE_SPECIAL: + panel_mode_edp = true; + break; - switch (panel_mode) { - case DP_PANEL_MODE_EDP: - case DP_PANEL_MODE_SPECIAL: - panel_mode_edp = true; - break; + default: + break; + } - default: - break; - } + /*set edp panel mode in receiver*/ + result = core_link_read_dpcd( + link, + DP_EDP_CONFIGURATION_SET, + &edp_config_set.raw, + sizeof(edp_config_set.raw)); + + if (result == DC_OK && + edp_config_set.bits.PANEL_MODE_EDP + != panel_mode_edp) { - /*set edp panel mode in receiver*/ - core_link_read_dpcd( + edp_config_set.bits.PANEL_MODE_EDP = + panel_mode_edp; + result = core_link_write_dpcd( link, DP_EDP_CONFIGURATION_SET, &edp_config_set.raw, sizeof(edp_config_set.raw)); - if (edp_config_set.bits.PANEL_MODE_EDP - != panel_mode_edp) { - enum dc_status result; - - edp_config_set.bits.PANEL_MODE_EDP = - panel_mode_edp; - result = core_link_write_dpcd( - link, - DP_EDP_CONFIGURATION_SET, - &edp_config_set.raw, - sizeof(edp_config_set.raw)); - - ASSERT(result == DC_OK); - } + ASSERT(result == DC_OK); } + link->panel_mode = panel_mode; DC_LOG_DETECTION_DP_CAPS("Link: %d eDP panel mode supported: %d " "eDP panel mode enabled: %d \n", @@ -164,15 +167,37 @@ bool edp_set_backlight_level_nits(struct dc_link *link, *(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits; *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms; + link->backlight_settings.backlight_millinits = backlight_millinits; - if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, + if (!link->dpcd_caps.panel_luminance_control) { + if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, (uint8_t *)(&dpcd_backlight_set), sizeof(dpcd_backlight_set)) != DC_OK) - return false; + return false; - if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL, + if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL, &backlight_control, 1) != DC_OK) - return false; + return false; + } else { + const uint8_t backlight_enable = DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE; + struct target_luminance_value *target_luminance = NULL; + + //if target luminance value is greater than 24 bits, clip the value to 24 bits + if (backlight_millinits > 0xFFFFFF) + backlight_millinits = 0xFFFFFF; + + target_luminance = (struct target_luminance_value *)&backlight_millinits; + + if (core_link_write_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, + &backlight_enable, + sizeof(backlight_enable)) != DC_OK) + return false; + + if (core_link_write_dpcd(link, DP_EDP_PANEL_TARGET_LUMINANCE_VALUE, + (uint8_t *)(target_luminance), + sizeof(struct target_luminance_value)) != DC_OK) + return false; + } return true; } @@ -230,10 +255,20 @@ static bool read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millin link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) return false; - if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, - (uint8_t *) backlight_millinits, - sizeof(uint32_t))) - return false; + if (!link->dpcd_caps.panel_luminance_control) { + if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, + (uint8_t *)backlight_millinits, + sizeof(uint32_t))) + return false; + } else { + //setting to 0 as a precaution, since target_luminance_value is 3 bytes + memset(backlight_millinits, 0, sizeof(uint32_t)); + + if (!core_link_read_dpcd(link, DP_EDP_PANEL_TARGET_LUMINANCE_VALUE, + (uint8_t *)backlight_millinits, + sizeof(struct target_luminance_value))) + return false; + } return true; } @@ -255,6 +290,16 @@ bool set_default_brightness_aux(struct dc_link *link) return false; } +bool set_cached_brightness_aux(struct dc_link *link) +{ + if (link->backlight_settings.backlight_millinits) + return edp_set_backlight_level_nits(link, true, + link->backlight_settings.backlight_millinits, 0); + else + return set_default_brightness_aux(link); + return false; +} + bool edp_is_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timing *crtc_timing) { @@ -288,7 +333,7 @@ bool edp_is_ilr_optimization_required(struct dc_link *link, core_link_read_dpcd(link, DP_LANE_COUNT_SET, &lane_count_set.raw, sizeof(lane_count_set)); - req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing); + req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing, dc_link_get_highest_encoding_format(link)); if (!crtc_timing->flags.DSC) edp_decide_link_settings(link, &link_setting, req_bw); @@ -786,6 +831,167 @@ bool edp_set_sink_vtotal_in_psr_active(const struct dc_link *link, uint16_t psr_ return true; } +bool edp_set_replay_allow_active(struct dc_link *link, const bool *allow_active, + bool wait, bool force_static, const unsigned int *power_opts) +{ + struct dc *dc = link->ctx->dc; + struct dmub_replay *replay = dc->res_pool->replay; + unsigned int panel_inst; + + if (replay == NULL && force_static) + return false; + + if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) + return false; + + /* Set power optimization flag */ + if (power_opts && link->replay_settings.replay_power_opt_active != *power_opts) { + if (link->replay_settings.replay_feature_enabled && replay->funcs->replay_set_power_opt) { + replay->funcs->replay_set_power_opt(replay, *power_opts, panel_inst); + link->replay_settings.replay_power_opt_active = *power_opts; + } + } + + /* Activate or deactivate Replay */ + if (allow_active && link->replay_settings.replay_allow_active != *allow_active) { + // TODO: Handle mux change case if force_static is set + // If force_static is set, just change the replay_allow_active state directly + if (replay != NULL && link->replay_settings.replay_feature_enabled) + replay->funcs->replay_enable(replay, *allow_active, wait, panel_inst); + link->replay_settings.replay_allow_active = *allow_active; + } + + return true; +} + +bool edp_get_replay_state(const struct dc_link *link, uint64_t *state) +{ + struct dc *dc = link->ctx->dc; + struct dmub_replay *replay = dc->res_pool->replay; + unsigned int panel_inst; + enum replay_state pr_state = REPLAY_STATE_0; + + if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) + return false; + + if (replay != NULL && link->replay_settings.replay_feature_enabled) + replay->funcs->replay_get_state(replay, &pr_state, panel_inst); + *state = pr_state; + + return true; +} + +bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream) +{ + /* To-do: Setup Replay */ + struct dc *dc = link->ctx->dc; + struct dmub_replay *replay = dc->res_pool->replay; + int i; + unsigned int panel_inst; + struct replay_context replay_context = { 0 }; + unsigned int lineTimeInNs = 0; + + + union replay_enable_and_configuration replay_config; + + union dpcd_alpm_configuration alpm_config; + + replay_context.controllerId = CONTROLLER_ID_UNDEFINED; + + if (!link) + return false; + + if (!replay) + return false; + + if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) + return false; + + replay_context.aux_inst = link->ddc->ddc_pin->hw_info.ddc_channel; + replay_context.digbe_inst = link->link_enc->transmitter; + replay_context.digfe_inst = link->link_enc->preferred_engine; + + for (i = 0; i < MAX_PIPES; i++) { + if (dc->current_state->res_ctx.pipe_ctx[i].stream + == stream) { + /* dmcu -1 for all controller id values, + * therefore +1 here + */ + replay_context.controllerId = + dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg->inst + 1; + break; + } + } + + lineTimeInNs = + ((stream->timing.h_total * 1000000) / + (stream->timing.pix_clk_100hz / 10)) + 1; + + replay_context.line_time_in_ns = lineTimeInNs; + + if (replay) + link->replay_settings.replay_feature_enabled = + replay->funcs->replay_copy_settings(replay, link, &replay_context, panel_inst); + if (link->replay_settings.replay_feature_enabled) { + + replay_config.bits.FREESYNC_PANEL_REPLAY_MODE = 1; + replay_config.bits.TIMING_DESYNC_ERROR_VERIFICATION = + link->replay_settings.config.replay_timing_sync_supported; + replay_config.bits.STATE_TRANSITION_ERROR_DETECTION = 1; + dm_helpers_dp_write_dpcd(link->ctx, link, + DP_SINK_PR_ENABLE_AND_CONFIGURATION, + (uint8_t *)&(replay_config.raw), sizeof(uint8_t)); + + memset(&alpm_config, 0, sizeof(alpm_config)); + alpm_config.bits.ENABLE = 1; + dm_helpers_dp_write_dpcd( + link->ctx, + link, + DP_RECEIVER_ALPM_CONFIG, + &alpm_config.raw, + sizeof(alpm_config.raw)); + } + return true; +} + +bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal) +{ + struct dc *dc = link->ctx->dc; + struct dmub_replay *replay = dc->res_pool->replay; + unsigned int panel_inst; + + if (!replay) + return false; + + if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) + return false; + + if (coasting_vtotal && link->replay_settings.coasting_vtotal != coasting_vtotal) { + replay->funcs->replay_set_coasting_vtotal(replay, coasting_vtotal, panel_inst); + link->replay_settings.coasting_vtotal = coasting_vtotal; + } + + return true; +} + +bool edp_replay_residency(const struct dc_link *link, + unsigned int *residency, const bool is_start, const bool is_alpm) +{ + struct dc *dc = link->ctx->dc; + struct dmub_replay *replay = dc->res_pool->replay; + unsigned int panel_inst; + + if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) + return false; + + if (replay != NULL && link->replay_settings.replay_feature_enabled) + replay->funcs->replay_residency(replay, panel_inst, residency, is_start, is_alpm); + else + *residency = 0; + + return true; +} + static struct abm *get_abm_from_stream_res(const struct dc_link *link) { int i; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h index 28f552080558..0a5bbda8c739 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h @@ -30,6 +30,7 @@ enum dp_panel_mode dp_get_panel_mode(struct dc_link *link); void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode); bool set_default_brightness_aux(struct dc_link *link); +bool set_cached_brightness_aux(struct dc_link *link); void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd); int edp_get_backlight_level(const struct dc_link *link); bool edp_get_backlight_level_nits(struct dc_link *link, @@ -52,6 +53,14 @@ bool edp_setup_psr(struct dc_link *link, bool edp_set_sink_vtotal_in_psr_active(const struct dc_link *link, uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su); void edp_get_psr_residency(const struct dc_link *link, uint32_t *residency); +bool edp_set_replay_allow_active(struct dc_link *dc_link, const bool *enable, + bool wait, bool force_static, const unsigned int *power_opts); +bool edp_setup_replay(struct dc_link *link, + const struct dc_stream_state *stream); +bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal); +bool edp_replay_residency(const struct dc_link *link, + unsigned int *residency, const bool is_start, const bool is_alpm); +bool edp_get_replay_state(const struct dc_link *link, uint64_t *state); bool edp_wait_for_t12(struct dc_link *link); bool edp_is_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timing *crtc_timing); diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index ba1715e2d25a..2d995c87fbb9 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -271,7 +271,7 @@ struct dmub_srv_hw_params { */ struct dmub_diagnostic_data { uint32_t dmcub_version; - uint32_t scratch[16]; + uint32_t scratch[17]; uint32_t pc; uint32_t undefined_address_fault_addr; uint32_t inst_fetch_fault_addr; @@ -282,6 +282,7 @@ struct dmub_diagnostic_data { uint32_t inbox0_rptr; uint32_t inbox0_wptr; uint32_t inbox0_size; + uint32_t gpint_datain0; uint8_t is_dmcub_enabled : 1; uint8_t is_dmcub_soft_reset : 1; uint8_t is_dmcub_secure_reset : 1; @@ -340,6 +341,8 @@ struct dmub_srv_hw_funcs { void (*setup_mailbox)(struct dmub_srv *dmub, const struct dmub_region *inbox1); + uint32_t (*get_inbox1_wptr)(struct dmub_srv *dmub); + uint32_t (*get_inbox1_rptr)(struct dmub_srv *dmub); void (*set_inbox1_wptr)(struct dmub_srv *dmub, uint32_t wptr_offset); @@ -364,9 +367,10 @@ struct dmub_srv_hw_funcs { bool (*is_supported)(struct dmub_srv *dmub); + bool (*is_psrsu_supported)(struct dmub_srv *dmub); + bool (*is_hw_init)(struct dmub_srv *dmub); - bool (*is_phy_init)(struct dmub_srv *dmub); void (*enable_dmub_boot_options)(struct dmub_srv *dmub, const struct dmub_srv_hw_params *params); @@ -374,6 +378,7 @@ struct dmub_srv_hw_funcs { union dmub_fw_boot_status (*get_fw_status)(struct dmub_srv *dmub); + union dmub_fw_boot_options (*get_fw_boot_option)(struct dmub_srv *dmub); void (*set_gpint)(struct dmub_srv *dmub, union dmub_gpint_data_register reg); @@ -490,7 +495,7 @@ struct dmub_notification { * of a firmware to know if feature or functionality is supported or present. */ #define DMUB_FW_VERSION(major, minor, revision) \ - ((((major) & 0xFF) << 24) | (((minor) & 0xFF) << 16) | ((revision) & 0xFFFF)) + ((((major) & 0xFF) << 24) | (((minor) & 0xFF) << 16) | (((revision) & 0xFF) << 8)) /** * dmub_srv_create() - creates the DMUB service. @@ -602,6 +607,18 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub); /** + * dmub_srv_sync_inbox1() - sync sw state with hw state + * @dmub: the dmub service + * + * Sync sw state with hw state when resume from S0i3 + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub); + +/** * dmub_srv_cmd_queue() - queues a command to the DMUB * @dmub: the dmub service * @cmd: the command to queue @@ -762,9 +779,15 @@ void dmub_flush_buffer_mem(const struct dmub_fb *fb); enum dmub_status dmub_srv_get_fw_boot_status(struct dmub_srv *dmub, union dmub_fw_boot_status *status); +enum dmub_status dmub_srv_get_fw_boot_option(struct dmub_srv *dmub, + union dmub_fw_boot_options *option); + enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub, union dmub_rb_cmd *cmd); +enum dmub_status dmub_srv_set_skip_panel_power_sequence(struct dmub_srv *dmub, + bool skip); + bool dmub_srv_get_outbox0_msg(struct dmub_srv *dmub, struct dmcub_trace_buf_entry *entry); bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data); diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 598fa1de54ce..7afa78b918b5 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -170,6 +170,95 @@ extern "C" { #endif #pragma pack(push, 1) +#define ABM_NUM_OF_ACE_SEGMENTS 5 + +union abm_flags { + struct { + /** + * @abm_enabled: Indicates if ABM is enabled. + */ + unsigned int abm_enabled : 1; + + /** + * @disable_abm_requested: Indicates if driver has requested ABM to be disabled. + */ + unsigned int disable_abm_requested : 1; + + /** + * @disable_abm_immediately: Indicates if driver has requested ABM to be disabled + * immediately. + */ + unsigned int disable_abm_immediately : 1; + + /** + * @disable_abm_immediate_keep_gain: Indicates if driver has requested ABM + * to be disabled immediately and keep gain. + */ + unsigned int disable_abm_immediate_keep_gain : 1; + + /** + * @fractional_pwm: Indicates if fractional duty cycle for backlight PWM is enabled. + */ + unsigned int fractional_pwm : 1; + + /** + * @abm_gradual_bl_change: Indicates if algorithm has completed gradual adjustment + * of user backlight level. + */ + unsigned int abm_gradual_bl_change : 1; + } bitfields; + + unsigned int u32All; +}; + +struct abm_save_restore { + /** + * @flags: Misc. ABM flags. + */ + union abm_flags flags; + + /** + * @pause: true: pause ABM and get state + * false: unpause ABM after setting state + */ + uint32_t pause; + + /** + * @next_ace_slope: Next ACE slopes to be programmed in HW (u3.13) + */ + uint32_t next_ace_slope[ABM_NUM_OF_ACE_SEGMENTS]; + + /** + * @next_ace_thresh: Next ACE thresholds to be programmed in HW (u10.6) + */ + uint32_t next_ace_thresh[ABM_NUM_OF_ACE_SEGMENTS]; + + /** + * @next_ace_offset: Next ACE offsets to be programmed in HW (u10.6) + */ + uint32_t next_ace_offset[ABM_NUM_OF_ACE_SEGMENTS]; + + + /** + * @knee_threshold: Current x-position of ACE knee (u0.16). + */ + uint32_t knee_threshold; + /** + * @current_gain: Current backlight reduction (u16.16). + */ + uint32_t current_gain; + /** + * @curr_bl_level: Current actual backlight level converging to target backlight level. + */ + uint16_t curr_bl_level; + + /** + * @curr_user_bl_level: Current nominal backlight level converging to level requested by user. + */ + uint16_t curr_user_bl_level; + +}; + /** * union dmub_addr - DMUB physical/virtual 64-bit address. */ @@ -249,6 +338,112 @@ union dmub_psr_debug_flags { }; /** + * Flags that can be set by driver to change some Replay behaviour. + */ +union replay_debug_flags { + struct { + /** + * Enable visual confirm in FW. + */ + uint32_t visual_confirm : 1; + + /** + * @skip_crc: Set if need to skip CRC. + */ + uint32_t skip_crc : 1; + + /** + * @force_link_power_on: Force disable ALPM control + */ + uint32_t force_link_power_on : 1; + + /** + * @force_phy_power_on: Force phy power on + */ + uint32_t force_phy_power_on : 1; + + /** + * @timing_resync_disabled: Disabled Replay normal sleep mode timing resync + */ + uint32_t timing_resync_disabled : 1; + + /** + * @skip_crtc_disabled: CRTC disable skipped + */ + uint32_t skip_crtc_disabled : 1; + + /** + * @force_defer_one_frame_update: Force defer one frame update in ultra sleep mode + */ + uint32_t force_defer_one_frame_update : 1; + /** + * @disable_delay_alpm_on: Force disable delay alpm on + */ + uint32_t disable_delay_alpm_on : 1; + /** + * @disable_desync_error_check: Force disable desync error check + */ + uint32_t disable_desync_error_check : 1; + /** + * @disable_desync_error_check: Force disable desync error check + */ + uint32_t disable_dmub_save_restore : 1; + + uint32_t reserved : 22; + } bitfields; + + uint32_t u32All; +}; + +union replay_hw_flags { + struct { + /** + * @allow_alpm_fw_standby_mode: To indicate whether the + * ALPM FW standby mode is allowed + */ + uint32_t allow_alpm_fw_standby_mode : 1; + + /* + * @dsc_enable_status: DSC enable status in driver + */ + uint32_t dsc_enable_status : 1; + + /** + * @fec_enable_status: receive fec enable/disable status from driver + */ + uint32_t fec_enable_status : 1; + + /* + * @smu_optimizations_en: SMU power optimization. + * Only when active display is Replay capable and display enters Replay. + * Trigger interrupt to SMU to powerup/down. + */ + uint32_t smu_optimizations_en : 1; + + /** + * @otg_powered_down: Flag to keep track of OTG power state. + */ + uint32_t otg_powered_down : 1; + + /** + * @phy_power_state: Indicates current phy power state + */ + uint32_t phy_power_state : 1; + + /** + * @link_power_state: Indicates current link power state + */ + uint32_t link_power_state : 1; + /** + * Use TPS3 signal when restore main link. + */ + uint32_t force_wakeup_by_tps3 : 1; + } bitfields; + + uint32_t u32All; +}; + +/** * DMUB visual confirm color */ struct dmub_feature_caps { @@ -257,7 +452,9 @@ struct dmub_feature_caps { */ uint8_t psr; uint8_t fw_assisted_mclk_switch; - uint8_t reserved[6]; + uint8_t reserved[4]; + uint8_t subvp_psr_support; + uint8_t gecc_enable; }; struct dmub_visual_confirm_color { @@ -360,7 +557,7 @@ union dmub_fw_boot_status { uint32_t optimized_init_done : 1; /**< 1 if optimized init done */ uint32_t restore_required : 1; /**< 1 if driver should call restore */ uint32_t defer_load : 1; /**< 1 if VBIOS data is deferred programmed */ - uint32_t reserved : 1; + uint32_t fams_enabled : 1; /**< 1 if VBIOS data is deferred programmed */ uint32_t detection_required: 1; /**< if detection need to be triggered by driver */ uint32_t hw_power_init_done: 1; /**< 1 if hw power init is completed */ } bits; /**< status bits */ @@ -376,6 +573,7 @@ enum dmub_fw_boot_status_bit { DMUB_FW_BOOT_STATUS_BIT_OPTIMIZED_INIT_DONE = (1 << 2), /**< 1 if init done */ DMUB_FW_BOOT_STATUS_BIT_RESTORE_REQUIRED = (1 << 3), /**< 1 if driver should call restore */ DMUB_FW_BOOT_STATUS_BIT_DEFERRED_LOADED = (1 << 4), /**< 1 if VBIOS data is deferred programmed */ + DMUB_FW_BOOT_STATUS_BIT_FAMS_ENABLED = (1 << 5), /**< 1 if FAMS is enabled*/ DMUB_FW_BOOT_STATUS_BIT_DETECTION_REQUIRED = (1 << 6), /**< 1 if detection need to be triggered by driver*/ DMUB_FW_BOOT_STATUS_BIT_HW_POWER_INIT_DONE = (1 << 7), /**< 1 if hw power init is completed */ }; @@ -395,6 +593,12 @@ enum dmub_lvtma_status_bit { DMUB_LVTMA_STATUS_BIT_EDP_ON = (1 << 1), }; +enum dmub_ips_disable_type { + DMUB_IPS_DISABLE_IPS1 = 1, + DMUB_IPS_DISABLE_IPS2 = 2, + DMUB_IPS_DISABLE_IPS2_Z10 = 3, +}; + /** * union dmub_fw_boot_options - Boot option definitions for SCRATCH14 */ @@ -419,7 +623,10 @@ union dmub_fw_boot_options { uint32_t dpia_hpd_int_enable_supported: 1; /* 1 if dpia hpd int enable supported */ uint32_t usb4_dpia_bw_alloc_supported: 1; /* 1 if USB4 dpia BW allocation supported */ uint32_t disable_clk_ds: 1; /* 1 if disallow dispclk_ds and dppclk_ds*/ - uint32_t reserved : 14; /**< reserved */ + uint32_t disable_timeout_recovery : 1; /* 1 if timeout recovery should be disabled */ + uint32_t ips_pg_disable: 1; /* 1 to disable ONO domains power gating*/ + uint32_t ips_disable: 2; /* options to disable ips support*/ + uint32_t reserved : 10; /**< reserved */ } bits; /**< boot bits */ uint32_t all; /**< 32-bit access to bits */ }; @@ -554,9 +761,42 @@ enum dmub_gpint_command { DMUB_GPINT__PSR_RESIDENCY = 9, /** + * DESC: Get REPLAY state from FW. + * RETURN: REPLAY state enum. This enum may need to be converted to the legacy REPLAY state value. + */ + DMUB_GPINT__GET_REPLAY_STATE = 13, + + /** + * DESC: Start REPLAY residency counter. Stop REPLAY resdiency counter and get value. + * ARGS: We can measure residency from various points. The argument will specify the residency mode. + * By default, it is measured from after we powerdown the PHY, to just before we powerup the PHY. + * RETURN: REPLAY residency in milli-percent. + */ + DMUB_GPINT__REPLAY_RESIDENCY = 14, + + + /** * DESC: Notifies DMCUB detection is done so detection required can be cleared. */ DMUB_GPINT__NOTIFY_DETECTION_DONE = 12, + /** + * DESC: Updates the trace buffer lower 32-bit mask. + * ARGS: The new mask + * RETURN: Lower 32-bit mask. + */ + DMUB_GPINT__UPDATE_TRACE_BUFFER_MASK = 101, + /** + * DESC: Updates the trace buffer lower 32-bit mask. + * ARGS: The new mask + * RETURN: Lower 32-bit mask. + */ + DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD0 = 102, + /** + * DESC: Updates the trace buffer mask bi0~bit15. + * ARGS: The new mask + * RETURN: Lower 32-bit mask. + */ + DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1 = 103, }; /** @@ -752,6 +992,11 @@ enum dmub_cmd_type { */ /** + * Command type used for all REPLAY commands. + */ + DMUB_CMD__REPLAY = 83, + + /** * Command type used for all SECURE_DISPLAY commands. */ DMUB_CMD__SECURE_DISPLAY = 85, @@ -988,16 +1233,25 @@ struct dmub_rb_cmd_mall { }; /** - * enum dmub_cmd_cab_type - TODO: + * enum dmub_cmd_cab_type - CAB command data. */ enum dmub_cmd_cab_type { + /** + * No idle optimizations (i.e. no CAB) + */ DMUB_CMD__CAB_NO_IDLE_OPTIMIZATION = 0, + /** + * No DCN requests for memory + */ DMUB_CMD__CAB_NO_DCN_REQ = 1, + /** + * Fit surfaces in CAB (i.e. CAB enable) + */ DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB = 2, }; /** - * struct dmub_rb_cmd_cab_for_ss - TODO: + * struct dmub_rb_cmd_cab - CAB command data. */ struct dmub_rb_cmd_cab_for_ss { struct dmub_cmd_header header; @@ -1005,6 +1259,9 @@ struct dmub_rb_cmd_cab_for_ss { uint8_t debug_bits; /* debug bits */ }; +/** + * Enum for indicating which MCLK switch mode per pipe + */ enum mclk_switch_mode { NONE = 0, FPO = 1, @@ -1125,8 +1382,6 @@ struct dmub_rb_cmd_idle_opt_dcn_restore { */ struct dmub_dcn_notify_idle_cntl_data { uint8_t driver_idle; - uint8_t d3_entry; - uint8_t trigger; uint8_t pad[1]; }; @@ -1889,6 +2144,10 @@ enum dmub_phy_fsm_state { DMUB_PHY_FSM_PLL_EN, DMUB_PHY_FSM_TX_EN, DMUB_PHY_FSM_FAST_LP, + DMUB_PHY_FSM_P2_PLL_OFF_CPM, + DMUB_PHY_FSM_P2_PLL_OFF_PG, + DMUB_PHY_FSM_P2_PLL_OFF, + DMUB_PHY_FSM_P2_PLL_ON, }; /** @@ -2474,6 +2733,272 @@ struct dmub_cmd_psr_set_power_opt_data { uint32_t power_opt; }; +#define REPLAY_RESIDENCY_MODE_SHIFT (0) +#define REPLAY_RESIDENCY_ENABLE_SHIFT (1) + +#define REPLAY_RESIDENCY_MODE_MASK (0x1 << REPLAY_RESIDENCY_MODE_SHIFT) +# define REPLAY_RESIDENCY_MODE_PHY (0x0 << REPLAY_RESIDENCY_MODE_SHIFT) +# define REPLAY_RESIDENCY_MODE_ALPM (0x1 << REPLAY_RESIDENCY_MODE_SHIFT) + +#define REPLAY_RESIDENCY_ENABLE_MASK (0x1 << REPLAY_RESIDENCY_ENABLE_SHIFT) +# define REPLAY_RESIDENCY_DISABLE (0x0 << REPLAY_RESIDENCY_ENABLE_SHIFT) +# define REPLAY_RESIDENCY_ENABLE (0x1 << REPLAY_RESIDENCY_ENABLE_SHIFT) + +enum replay_state { + REPLAY_STATE_0 = 0x0, + REPLAY_STATE_1 = 0x10, + REPLAY_STATE_1A = 0x11, + REPLAY_STATE_2 = 0x20, + REPLAY_STATE_3 = 0x30, + REPLAY_STATE_3INIT = 0x31, + REPLAY_STATE_4 = 0x40, + REPLAY_STATE_4A = 0x41, + REPLAY_STATE_4B = 0x42, + REPLAY_STATE_4C = 0x43, + REPLAY_STATE_4D = 0x44, + REPLAY_STATE_4B_LOCKED = 0x4A, + REPLAY_STATE_4C_UNLOCKED = 0x4B, + REPLAY_STATE_5 = 0x50, + REPLAY_STATE_5A = 0x51, + REPLAY_STATE_5B = 0x52, + REPLAY_STATE_5A_LOCKED = 0x5A, + REPLAY_STATE_5B_UNLOCKED = 0x5B, + REPLAY_STATE_6 = 0x60, + REPLAY_STATE_6A = 0x61, + REPLAY_STATE_6B = 0x62, + REPLAY_STATE_INVALID = 0xFF, +}; + +/** + * Replay command sub-types. + */ +enum dmub_cmd_replay_type { + /** + * Copy driver-calculated parameters to REPLAY state. + */ + DMUB_CMD__REPLAY_COPY_SETTINGS = 0, + /** + * Enable REPLAY. + */ + DMUB_CMD__REPLAY_ENABLE = 1, + /** + * Set Replay power option. + */ + DMUB_CMD__SET_REPLAY_POWER_OPT = 2, + /** + * Set coasting vtotal. + */ + DMUB_CMD__REPLAY_SET_COASTING_VTOTAL = 3, +}; + +/** + * Data passed from driver to FW in a DMUB_CMD__REPLAY_COPY_SETTINGS command. + */ +struct dmub_cmd_replay_copy_settings_data { + /** + * Flags that can be set by driver to change some replay behaviour. + */ + union replay_debug_flags debug; + + /** + * @flags: Flags used to determine feature functionality. + */ + union replay_hw_flags flags; + + /** + * DPP HW instance. + */ + uint8_t dpp_inst; + /** + * OTG HW instance. + */ + uint8_t otg_inst; + /** + * DIG FE HW instance. + */ + uint8_t digfe_inst; + /** + * DIG BE HW instance. + */ + uint8_t digbe_inst; + /** + * AUX HW instance. + */ + uint8_t aux_inst; + /** + * Panel Instance. + * Panel isntance to identify which psr_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; + /** + * @pixel_deviation_per_line: Indicate the maximum pixel deviation per line compare + * to Source timing when Sink maintains coasting vtotal during the Replay normal sleep mode + */ + uint8_t pixel_deviation_per_line; + /** + * @max_deviation_line: The max number of deviation line that can keep the timing + * synchronized between the Source and Sink during Replay normal sleep mode. + */ + uint8_t max_deviation_line; + /** + * Length of each horizontal line in ns. + */ + uint32_t line_time_in_ns; + /** + * PHY instance. + */ + uint8_t dpphy_inst; + /** + * Determines if SMU optimzations are enabled/disabled. + */ + uint8_t smu_optimizations_en; + /** + * Determines if timing sync are enabled/disabled. + */ + uint8_t replay_timing_sync_supported; + /* + * Use FSM state for Replay power up/down + */ + uint8_t use_phy_fsm; +}; + +/** + * Definition of a DMUB_CMD__REPLAY_COPY_SETTINGS command. + */ +struct dmub_rb_cmd_replay_copy_settings { + /** + * Command header. + */ + struct dmub_cmd_header header; + /** + * Data passed from driver to FW in a DMUB_CMD__REPLAY_COPY_SETTINGS command. + */ + struct dmub_cmd_replay_copy_settings_data replay_copy_settings_data; +}; + +/** + * Replay disable / enable state for dmub_rb_cmd_replay_enable_data.enable + */ +enum replay_enable { + /** + * Disable REPLAY. + */ + REPLAY_DISABLE = 0, + /** + * Enable REPLAY. + */ + REPLAY_ENABLE = 1, +}; + +/** + * Data passed from driver to FW in a DMUB_CMD__REPLAY_ENABLE command. + */ +struct dmub_rb_cmd_replay_enable_data { + /** + * Replay enable or disable. + */ + uint8_t enable; + /** + * Panel Instance. + * Panel isntance to identify which replay_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; + /** + * Phy state to enter. + * Values to use are defined in dmub_phy_fsm_state + */ + uint8_t phy_fsm_state; + /** + * Phy rate for DP - RBR/HBR/HBR2/HBR3. + * Set this using enum phy_link_rate. + * This does not support HDMI/DP2 for now. + */ + uint8_t phy_rate; +}; + +/** + * Definition of a DMUB_CMD__REPLAY_ENABLE command. + * Replay enable/disable is controlled using action in data. + */ +struct dmub_rb_cmd_replay_enable { + /** + * Command header. + */ + struct dmub_cmd_header header; + + struct dmub_rb_cmd_replay_enable_data data; +}; + +/** + * Data passed from driver to FW in a DMUB_CMD__SET_REPLAY_POWER_OPT command. + */ +struct dmub_cmd_replay_set_power_opt_data { + /** + * Panel Instance. + * Panel isntance to identify which replay_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; + /** + * Explicit padding to 4 byte boundary. + */ + uint8_t pad[3]; + /** + * REPLAY power option + */ + uint32_t power_opt; +}; + +/** + * Definition of a DMUB_CMD__SET_REPLAY_POWER_OPT command. + */ +struct dmub_rb_cmd_replay_set_power_opt { + /** + * Command header. + */ + struct dmub_cmd_header header; + /** + * Definition of a DMUB_CMD__SET_REPLAY_POWER_OPT command. + */ + struct dmub_cmd_replay_set_power_opt_data replay_set_power_opt_data; +}; + +/** + * Data passed from driver to FW in a DMUB_CMD__REPLAY_SET_COASTING_VTOTAL command. + */ +struct dmub_cmd_replay_set_coasting_vtotal_data { + /** + * 16-bit value dicated by driver that indicates the coasting vtotal. + */ + uint16_t coasting_vtotal; + /** + * REPLAY control version. + */ + uint8_t cmd_version; + /** + * Panel Instance. + * Panel isntance to identify which replay_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; +}; + +/** + * Definition of a DMUB_CMD__REPLAY_SET_COASTING_VTOTAL command. + */ +struct dmub_rb_cmd_replay_set_coasting_vtotal { + /** + * Command header. + */ + struct dmub_cmd_header header; + /** + * Definition of a DMUB_CMD__REPLAY_SET_COASTING_VTOTAL command. + */ + struct dmub_cmd_replay_set_coasting_vtotal_data replay_set_coasting_vtotal_data; +}; + /** * Definition of a DMUB_CMD__SET_PSR_POWER_OPT command. */ @@ -2565,6 +3090,10 @@ enum hw_lock_client { */ HW_LOCK_CLIENT_PSR_SU = 1, /** + * Replay is the client of HW Lock Manager. + */ + HW_LOCK_CLIENT_REPLAY = 4, + /** * Invalid client. */ HW_LOCK_CLIENT_INVALID = 0xFFFFFFFF, @@ -2650,6 +3179,12 @@ enum dmub_cmd_abm_type { * unregister vertical interrupt after steady state is reached */ DMUB_CMD__ABM_PAUSE = 6, + + /** + * Save and Restore ABM state. On save we save parameters, and + * on restore we update state with passed in data. + */ + DMUB_CMD__ABM_SAVE_RESTORE = 7, }; /** @@ -3034,6 +3569,7 @@ struct dmub_cmd_abm_pause_data { uint8_t pad[1]; }; + /** * Definition of a DMUB_CMD__ABM_PAUSE command. */ @@ -3050,6 +3586,36 @@ struct dmub_rb_cmd_abm_pause { }; /** + * Definition of a DMUB_CMD__ABM_SAVE_RESTORE command. + */ +struct dmub_rb_cmd_abm_save_restore { + /** + * Command header. + */ + struct dmub_cmd_header header; + + /** + * OTG hw instance + */ + uint8_t otg_inst; + + /** + * Enable or disable ABM pause + */ + uint8_t freeze; + + /** + * Explicit padding to 4 byte boundary. + */ + uint8_t debug; + + /** + * Data passed from driver to FW in a DMUB_CMD__ABM_INIT_CONFIG command. + */ + struct dmub_cmd_abm_init_config_data abm_init_config_data; +}; + +/** * Data passed from driver to FW in a DMUB_CMD__QUERY_FEATURE_CAPS command. */ struct dmub_cmd_query_feature_caps_data { @@ -3487,6 +4053,11 @@ union dmub_rb_cmd { struct dmub_rb_cmd_abm_pause abm_pause; /** + * Definition of a DMUB_CMD__ABM_SAVE_RESTORE command. + */ + struct dmub_rb_cmd_abm_save_restore abm_save_restore; + + /** * Definition of a DMUB_CMD__DP_AUX_ACCESS command. */ struct dmub_rb_cmd_dp_aux_access dp_aux_access; @@ -3550,6 +4121,26 @@ union dmub_rb_cmd { * Definition of a DMUB_CMD__DPIA_HPD_INT_ENABLE command. */ struct dmub_rb_cmd_dpia_hpd_int_enable dpia_hpd_int_enable; + /** + * Definition of a DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE command. + */ + struct dmub_rb_cmd_idle_opt_dcn_notify_idle idle_opt_notify_idle; + /* + * Definition of a DMUB_CMD__REPLAY_COPY_SETTINGS command. + */ + struct dmub_rb_cmd_replay_copy_settings replay_copy_settings; + /** + * Definition of a DMUB_CMD__REPLAY_ENABLE command. + */ + struct dmub_rb_cmd_replay_enable replay_enable; + /** + * Definition of a DMUB_CMD__SET_REPLAY_POWER_OPT command. + */ + struct dmub_rb_cmd_replay_set_power_opt replay_set_power_opt; + /** + * Definition of a DMUB_CMD__REPLAY_SET_COASTING_VTOTAL command. + */ + struct dmub_rb_cmd_replay_set_coasting_vtotal replay_set_coasting_vtotal; }; /** diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_subvp_state.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_subvp_state.h deleted file mode 100644 index 21b02bad696f..000000000000 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_subvp_state.h +++ /dev/null @@ -1,183 +0,0 @@ -/* - * Copyright 2019 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef DMUB_SUBVP_STATE_H -#define DMUB_SUBVP_STATE_H - -#include "dmub_cmd.h" - -#define DMUB_SUBVP_INST0 0 -#define DMUB_SUBVP_INST1 1 -#define SUBVP_MAX_WATERMARK 0xFFFF - -struct dmub_subvp_hubp_state { - uint32_t CURSOR0_0_CURSOR_POSITION; - uint32_t CURSOR0_0_CURSOR_HOT_SPOT; - uint32_t CURSOR0_0_CURSOR_DST_OFFSET; - uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH; - uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS; - uint32_t CURSOR0_0_CURSOR_SIZE; - uint32_t CURSOR0_0_CURSOR_CONTROL; - uint32_t HUBPREQ0_CURSOR_SETTINGS; - uint32_t HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH; - uint32_t HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE; - uint32_t HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH; - uint32_t HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS; - uint32_t HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS; - uint32_t HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH; - uint32_t HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C; - uint32_t HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C; - uint32_t HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C; - uint32_t HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C; -}; - -enum subvp_error_code { - DMUB_SUBVP_INVALID_STATE, - DMUB_SUBVP_INVALID_TRANSITION, -}; - -enum subvp_state { - DMUB_SUBVP_DISABLED, - DMUB_SUBVP_IDLE, - DMUB_SUBVP_TRY_ACQUIRE_LOCKS, - DMUB_SUBVP_WAIT_FOR_LOCKS, - DMUB_SUBVP_PRECONFIGURE, - DMUB_SUBVP_PREPARE, - DMUB_SUBVP_ENABLE, - DMUB_SUBVP_SWITCHING, - DMUB_SUBVP_END, - DMUB_SUBVP_RESTORE, -}; - -/* Defines information for SUBVP to handle vertical interrupts. */ -struct dmub_subvp_vertical_interrupt_event { - /** - * @inst: Hardware instance of vertical interrupt. - */ - uint8_t otg_inst; - - /** - * @pad: Align structure to 4 byte boundary. - */ - uint8_t pad[3]; - - enum subvp_state curr_state; -}; - -struct dmub_subvp_vertical_interrupt_state { - /** - * @events: Event list. - */ - struct dmub_subvp_vertical_interrupt_event events[DMUB_MAX_STREAMS]; -}; - -struct dmub_subvp_vline_interrupt_event { - - uint8_t hubp_inst; - uint8_t pad[3]; -}; - -struct dmub_subvp_vline_interrupt_state { - struct dmub_subvp_vline_interrupt_event events[DMUB_MAX_PLANES]; -}; - -struct dmub_subvp_interrupt_ctx { - struct dmub_subvp_vertical_interrupt_state vertical_int; - struct dmub_subvp_vline_interrupt_state vline_int; -}; - -struct dmub_subvp_pipe_state { - uint32_t pix_clk_100hz; - uint16_t main_vblank_start; - uint16_t main_vblank_end; - uint16_t mall_region_lines; - uint16_t prefetch_lines; - uint16_t prefetch_to_mall_start_lines; - uint16_t processing_delay_lines; - uint8_t main_pipe_index; - uint8_t phantom_pipe_index; - uint16_t htotal; // htotal for main / phantom pipe - uint16_t vtotal; - uint16_t optc_underflow_count; - uint16_t hubp_underflow_count; - uint8_t pad[2]; -}; - -/** - * struct dmub_subvp_vblank_drr_info - Store DRR state when handling - * SubVP + VBLANK with DRR multi-display case. - * - * The info stored in this struct is only valid if drr_in_use = 1. - */ -struct dmub_subvp_vblank_drr_info { - uint8_t drr_in_use; - uint8_t drr_window_size_ms; // DRR window size -- indicates largest VMIN/VMAX adjustment per frame - uint16_t min_vtotal_supported; // Min VTOTAL that supports switching in VBLANK - uint16_t max_vtotal_supported; // Max VTOTAL that can still support SubVP static scheduling requirements - uint16_t prev_vmin; // Store VMIN value before MCLK switch (used to restore after MCLK end) - uint16_t prev_vmax; // Store VMAX value before MCLK switch (used to restore after MCLK end) - uint8_t use_ramping; // Use ramping or not - uint8_t pad[1]; -}; - -struct dmub_subvp_vblank_pipe_info { - uint32_t pix_clk_100hz; - uint16_t vblank_start; - uint16_t vblank_end; - uint16_t vstartup_start; - uint16_t vtotal; - uint16_t htotal; - uint8_t pipe_index; - uint8_t pad[1]; - struct dmub_subvp_vblank_drr_info drr_info; // DRR considered as part of SubVP + VBLANK case -}; - -enum subvp_switch_type { - DMUB_SUBVP_ONLY, // Used for SubVP only, and SubVP + VACTIVE - DMUB_SUBVP_AND_SUBVP, // 2 SubVP displays - DMUB_SUBVP_AND_VBLANK, - DMUB_SUBVP_AND_FPO, -}; - -/* SubVP state. */ -struct dmub_subvp_state { - struct dmub_subvp_pipe_state pipe_state[DMUB_MAX_SUBVP_STREAMS]; - struct dmub_subvp_interrupt_ctx int_ctx; - struct dmub_subvp_vblank_pipe_info vblank_info; - enum subvp_state state; // current state - enum subvp_switch_type switch_type; // enum take up 4 bytes (?) - uint8_t mclk_pending; - uint8_t num_subvp_streams; - uint8_t vertical_int_margin_us; - uint8_t pstate_allow_width_us; - uint32_t subvp_mclk_switch_count; - uint32_t subvp_wait_lock_count; - uint32_t driver_wait_lock_count; - uint32_t subvp_vblank_frame_count; - uint16_t watermark_a_cache; - uint8_t pad[2]; -}; - -#endif /* _DMUB_SUBVP_STATE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/Makefile b/drivers/gpu/drm/amd/display/dmub/src/Makefile index 0589ad4778ee..caf095aca8f3 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/Makefile +++ b/drivers/gpu/drm/amd/display/dmub/src/Makefile @@ -22,7 +22,7 @@ DMUB = dmub_srv.o dmub_srv_stat.o dmub_reg.o dmub_dcn20.o dmub_dcn21.o DMUB += dmub_dcn30.o dmub_dcn301.o dmub_dcn302.o dmub_dcn303.o -DMUB += dmub_dcn31.o dmub_dcn315.o dmub_dcn316.o +DMUB += dmub_dcn31.o dmub_dcn314.o dmub_dcn315.o dmub_dcn316.o DMUB += dmub_dcn32.o AMD_DAL_DMUB = $(addprefix $(AMDDALPATH)/dmub/src/,$(DMUB)) diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c index a6540e27044d..98dad0d47e72 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c @@ -282,6 +282,11 @@ void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub, REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base); } +uint32_t dmub_dcn20_get_inbox1_wptr(struct dmub_srv *dmub) +{ + return REG_READ(DMCUB_INBOX1_WPTR); +} + uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub) { return REG_READ(DMCUB_INBOX1_RPTR); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h index c2e5831ac52c..1df128e57ed3 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h @@ -202,6 +202,8 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub, void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub, const struct dmub_region *inbox1); +uint32_t dmub_dcn20_get_inbox1_wptr(struct dmub_srv *dmub); + uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub); void dmub_dcn20_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c index 51bb9bceb1b1..2d212bc974cc 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c @@ -54,9 +54,3 @@ const struct dmub_srv_common_regs dmub_srv_dcn21_regs = { #undef DMUB_SF }; -/* Shared functions. */ - -bool dmub_dcn21_is_phy_init(struct dmub_srv *dmub) -{ - return REG_READ(DMCUB_SCRATCH10) == 0; -} diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h index 6fd5b0cd4ef3..8c4033ae4007 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h @@ -32,8 +32,4 @@ extern const struct dmub_srv_common_regs dmub_srv_dcn21_regs; -/* Hardware functions. */ - -bool dmub_dcn21_is_phy_init(struct dmub_srv *dmub); - #endif /* _DMUB_DCN21_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c index c90b9ee42e12..094e9f864557 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c @@ -242,6 +242,11 @@ void dmub_dcn31_setup_mailbox(struct dmub_srv *dmub, REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base); } +uint32_t dmub_dcn31_get_inbox1_wptr(struct dmub_srv *dmub) +{ + return REG_READ(DMCUB_INBOX1_WPTR); +} + uint32_t dmub_dcn31_get_inbox1_rptr(struct dmub_srv *dmub) { return REG_READ(DMCUB_INBOX1_RPTR); @@ -297,6 +302,11 @@ bool dmub_dcn31_is_supported(struct dmub_srv *dmub) return supported; } +bool dmub_dcn31_is_psrsu_supported(struct dmub_srv *dmub) +{ + return dmub->fw_version >= DMUB_FW_VERSION(4, 0, 59); +} + void dmub_dcn31_set_gpint(struct dmub_srv *dmub, union dmub_gpint_data_register reg) { @@ -342,6 +352,14 @@ union dmub_fw_boot_status dmub_dcn31_get_fw_boot_status(struct dmub_srv *dmub) return status; } +union dmub_fw_boot_options dmub_dcn31_get_fw_boot_option(struct dmub_srv *dmub) +{ + union dmub_fw_boot_options option; + + option.all = REG_READ(DMCUB_SCRATCH14); + return option; +} + void dmub_dcn31_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmub_srv_hw_params *params) { union dmub_fw_boot_options boot_options = {0}; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h index f6db6f89d45d..4d520a893c7b 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h @@ -204,6 +204,8 @@ void dmub_dcn31_setup_windows(struct dmub_srv *dmub, void dmub_dcn31_setup_mailbox(struct dmub_srv *dmub, const struct dmub_region *inbox1); +uint32_t dmub_dcn31_get_inbox1_wptr(struct dmub_srv *dmub); + uint32_t dmub_dcn31_get_inbox1_rptr(struct dmub_srv *dmub); void dmub_dcn31_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset); @@ -219,6 +221,8 @@ bool dmub_dcn31_is_hw_init(struct dmub_srv *dmub); bool dmub_dcn31_is_supported(struct dmub_srv *dmub); +bool dmub_dcn31_is_psrsu_supported(struct dmub_srv *dmub); + void dmub_dcn31_set_gpint(struct dmub_srv *dmub, union dmub_gpint_data_register reg); @@ -235,6 +239,8 @@ void dmub_dcn31_skip_dmub_panel_power_sequence(struct dmub_srv *dmub, bool skip) union dmub_fw_boot_status dmub_dcn31_get_fw_boot_status(struct dmub_srv *dmub); +union dmub_fw_boot_options dmub_dcn31_get_fw_boot_option(struct dmub_srv *dmub); + void dmub_dcn31_setup_outbox0(struct dmub_srv *dmub, const struct dmub_region *outbox0); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.c new file mode 100644 index 000000000000..f161aeb7e7c4 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.c @@ -0,0 +1,67 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "../dmub_srv.h" +#include "dmub_reg.h" +#include "dmub_dcn314.h" + +#include "dcn/dcn_3_1_4_offset.h" +#include "dcn/dcn_3_1_4_sh_mask.h" + +#define DCN_BASE__INST0_SEG0 0x00000012 +#define DCN_BASE__INST0_SEG1 0x000000C0 +#define DCN_BASE__INST0_SEG2 0x000034C0 +#define DCN_BASE__INST0_SEG3 0x00009000 +#define DCN_BASE__INST0_SEG4 0x02403C00 +#define DCN_BASE__INST0_SEG5 0 + +#define BASE_INNER(seg) DCN_BASE__INST0_SEG##seg +#define CTX dmub +#define REGS dmub->regs_dcn31 +#define REG_OFFSET_EXP(reg_name) (BASE(reg##reg_name##_BASE_IDX) + reg##reg_name) + +/* Registers. */ + +const struct dmub_srv_dcn31_regs dmub_srv_dcn314_regs = { +#define DMUB_SR(reg) REG_OFFSET_EXP(reg), + { + DMUB_DCN31_REGS() + DMCUB_INTERNAL_REGS() + }, +#undef DMUB_SR + +#define DMUB_SF(reg, field) FD_MASK(reg, field), + { DMUB_DCN31_FIELDS() }, +#undef DMUB_SF + +#define DMUB_SF(reg, field) FD_SHIFT(reg, field), + { DMUB_DCN31_FIELDS() }, +#undef DMUB_SF +}; + +bool dmub_dcn314_is_psrsu_supported(struct dmub_srv *dmub) +{ + return dmub->fw_version >= DMUB_FW_VERSION(8, 0, 16); +} diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.h new file mode 100644 index 000000000000..f213bd82c911 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.h @@ -0,0 +1,35 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_DCN314_H_ +#define _DMUB_DCN314_H_ + +#include "dmub_dcn31.h" + +extern const struct dmub_srv_dcn31_regs dmub_srv_dcn314_regs; + +bool dmub_dcn314_is_psrsu_supported(struct dmub_srv *dmub); + +#endif /* _DMUB_DCN314_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c index 9c20516be066..bf5994e292d9 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c @@ -116,10 +116,6 @@ void dmub_dcn32_reset(struct dmub_srv *dmub) break; } - /* Clear the GPINT command manually so we don't reset again. */ - cmd.all = 0; - dmub->hw_funcs.set_gpint(dmub, cmd); - /* Force reset in case we timed out, DMCUB is likely hung. */ } @@ -133,6 +129,10 @@ void dmub_dcn32_reset(struct dmub_srv *dmub) REG_WRITE(DMCUB_OUTBOX0_RPTR, 0); REG_WRITE(DMCUB_OUTBOX0_WPTR, 0); REG_WRITE(DMCUB_SCRATCH0, 0); + + /* Clear the GPINT command manually so we don't reset again. */ + cmd.all = 0; + dmub->hw_funcs.set_gpint(dmub, cmd); } void dmub_dcn32_reset_release(struct dmub_srv *dmub) @@ -266,6 +266,11 @@ void dmub_dcn32_setup_mailbox(struct dmub_srv *dmub, REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base); } +uint32_t dmub_dcn32_get_inbox1_wptr(struct dmub_srv *dmub) +{ + return REG_READ(DMCUB_INBOX1_WPTR); +} + uint32_t dmub_dcn32_get_inbox1_rptr(struct dmub_srv *dmub) { return REG_READ(DMCUB_INBOX1_RPTR); @@ -434,6 +439,7 @@ void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti diag_data->scratch[13] = REG_READ(DMCUB_SCRATCH13); diag_data->scratch[14] = REG_READ(DMCUB_SCRATCH14); diag_data->scratch[15] = REG_READ(DMCUB_SCRATCH15); + diag_data->scratch[16] = REG_READ(DMCUB_SCRATCH16); diag_data->undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR); diag_data->inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR); @@ -464,6 +470,8 @@ void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled); diag_data->is_cw6_enabled = is_cw6_enabled; + + diag_data->gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0); } void dmub_dcn32_configure_dmub_in_system_memory(struct dmub_srv *dmub) { diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h index 7d1a6eb4d665..d58a1e4b9f1c 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h @@ -107,6 +107,7 @@ struct dmub_srv; DMUB_SR(DMCUB_SCRATCH15) \ DMUB_SR(DMCUB_SCRATCH16) \ DMUB_SR(DMCUB_SCRATCH17) \ + DMUB_SR(DMCUB_GPINT_DATAIN0) \ DMUB_SR(DMCUB_GPINT_DATAIN1) \ DMUB_SR(DMCUB_GPINT_DATAOUT) \ DMUB_SR(CC_DC_PIPE_DIS) \ @@ -206,6 +207,8 @@ void dmub_dcn32_setup_windows(struct dmub_srv *dmub, void dmub_dcn32_setup_mailbox(struct dmub_srv *dmub, const struct dmub_region *inbox1); +uint32_t dmub_dcn32_get_inbox1_wptr(struct dmub_srv *dmub); + uint32_t dmub_dcn32_get_inbox1_rptr(struct dmub_srv *dmub); void dmub_dcn32_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index 92c18bfb98b3..93624ffe4eb8 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -32,6 +32,7 @@ #include "dmub_dcn302.h" #include "dmub_dcn303.h" #include "dmub_dcn31.h" +#include "dmub_dcn314.h" #include "dmub_dcn315.h" #include "dmub_dcn316.h" #include "dmub_dcn32.h" @@ -166,6 +167,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) funcs->backdoor_load = dmub_dcn20_backdoor_load; funcs->setup_windows = dmub_dcn20_setup_windows; funcs->setup_mailbox = dmub_dcn20_setup_mailbox; + funcs->get_inbox1_wptr = dmub_dcn20_get_inbox1_wptr; funcs->get_inbox1_rptr = dmub_dcn20_get_inbox1_rptr; funcs->set_inbox1_wptr = dmub_dcn20_set_inbox1_wptr; funcs->is_supported = dmub_dcn20_is_supported; @@ -190,11 +192,9 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) funcs->get_diagnostic_data = dmub_dcn20_get_diagnostic_data; - if (asic == DMUB_ASIC_DCN21) { + if (asic == DMUB_ASIC_DCN21) dmub->regs = &dmub_srv_dcn21_regs; - funcs->is_phy_init = dmub_dcn21_is_phy_init; - } if (asic == DMUB_ASIC_DCN30) { dmub->regs = &dmub_srv_dcn30_regs; @@ -226,17 +226,23 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) case DMUB_ASIC_DCN314: case DMUB_ASIC_DCN315: case DMUB_ASIC_DCN316: - if (asic == DMUB_ASIC_DCN315) + if (asic == DMUB_ASIC_DCN314) { + dmub->regs_dcn31 = &dmub_srv_dcn314_regs; + funcs->is_psrsu_supported = dmub_dcn314_is_psrsu_supported; + } else if (asic == DMUB_ASIC_DCN315) { dmub->regs_dcn31 = &dmub_srv_dcn315_regs; - else if (asic == DMUB_ASIC_DCN316) + } else if (asic == DMUB_ASIC_DCN316) { dmub->regs_dcn31 = &dmub_srv_dcn316_regs; - else + } else { dmub->regs_dcn31 = &dmub_srv_dcn31_regs; + funcs->is_psrsu_supported = dmub_dcn31_is_psrsu_supported; + } funcs->reset = dmub_dcn31_reset; funcs->reset_release = dmub_dcn31_reset_release; funcs->backdoor_load = dmub_dcn31_backdoor_load; funcs->setup_windows = dmub_dcn31_setup_windows; funcs->setup_mailbox = dmub_dcn31_setup_mailbox; + funcs->get_inbox1_wptr = dmub_dcn31_get_inbox1_wptr; funcs->get_inbox1_rptr = dmub_dcn31_get_inbox1_rptr; funcs->set_inbox1_wptr = dmub_dcn31_set_inbox1_wptr; funcs->setup_out_mailbox = dmub_dcn31_setup_out_mailbox; @@ -249,6 +255,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) funcs->get_gpint_response = dmub_dcn31_get_gpint_response; funcs->get_gpint_dataout = dmub_dcn31_get_gpint_dataout; funcs->get_fw_status = dmub_dcn31_get_fw_boot_status; + funcs->get_fw_boot_option = dmub_dcn31_get_fw_boot_option; funcs->enable_dmub_boot_options = dmub_dcn31_enable_dmub_boot_options; funcs->skip_dmub_panel_power_sequence = dmub_dcn31_skip_dmub_panel_power_sequence; //outbox0 call stacks @@ -275,6 +282,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) funcs->backdoor_load_zfb_mode = dmub_dcn32_backdoor_load_zfb_mode; funcs->setup_windows = dmub_dcn32_setup_windows; funcs->setup_mailbox = dmub_dcn32_setup_mailbox; + funcs->get_inbox1_wptr = dmub_dcn32_get_inbox1_wptr; funcs->get_inbox1_rptr = dmub_dcn32_get_inbox1_rptr; funcs->set_inbox1_wptr = dmub_dcn32_set_inbox1_wptr; funcs->setup_out_mailbox = dmub_dcn32_setup_out_mailbox; @@ -632,11 +640,11 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, if (dmub->hw_funcs.enable_dmub_boot_options) dmub->hw_funcs.enable_dmub_boot_options(dmub, params); - if (dmub->hw_funcs.skip_dmub_panel_power_sequence) + if (dmub->hw_funcs.skip_dmub_panel_power_sequence && !dmub->is_virtual) dmub->hw_funcs.skip_dmub_panel_power_sequence(dmub, params->skip_panel_power_sequence); - if (dmub->hw_funcs.reset_release) + if (dmub->hw_funcs.reset_release && !dmub->is_virtual) dmub->hw_funcs.reset_release(dmub); dmub->hw_init = true; @@ -644,6 +652,20 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, return DMUB_STATUS_OK; } +enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub) +{ + if (!dmub->sw_init) + return DMUB_STATUS_INVALID; + + if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) { + dmub->inbox1_rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub); + dmub->inbox1_rb.wrpt = dmub->hw_funcs.get_inbox1_wptr(dmub); + dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt; + } + + return DMUB_STATUS_OK; +} + enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub) { if (!dmub->sw_init) @@ -721,27 +743,6 @@ enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub, return DMUB_STATUS_TIMEOUT; } -enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub, - uint32_t timeout_us) -{ - uint32_t i = 0; - - if (!dmub->hw_init) - return DMUB_STATUS_INVALID; - - if (!dmub->hw_funcs.is_phy_init) - return DMUB_STATUS_OK; - - for (i = 0; i <= timeout_us; i += 10) { - if (dmub->hw_funcs.is_phy_init(dmub)) - return DMUB_STATUS_OK; - - udelay(10); - } - - return DMUB_STATUS_TIMEOUT; -} - enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub, uint32_t timeout_us) { @@ -846,6 +847,32 @@ enum dmub_status dmub_srv_get_fw_boot_status(struct dmub_srv *dmub, return DMUB_STATUS_OK; } +enum dmub_status dmub_srv_get_fw_boot_option(struct dmub_srv *dmub, + union dmub_fw_boot_options *option) +{ + option->all = 0; + + if (!dmub->sw_init) + return DMUB_STATUS_INVALID; + + if (dmub->hw_funcs.get_fw_boot_option) + *option = dmub->hw_funcs.get_fw_boot_option(dmub); + + return DMUB_STATUS_OK; +} + +enum dmub_status dmub_srv_set_skip_panel_power_sequence(struct dmub_srv *dmub, + bool skip) +{ + if (!dmub->sw_init) + return DMUB_STATUS_INVALID; + + if (dmub->hw_funcs.skip_dmub_panel_power_sequence && !dmub->is_virtual) + dmub->hw_funcs.skip_dmub_panel_power_sequence(dmub, skip); + + return DMUB_STATUS_OK; +} + enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub, union dmub_rb_cmd *cmd) { diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h index f843fc497855..68dfc7968017 100644 --- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h +++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h @@ -40,6 +40,7 @@ #define DP_BRANCH_HW_REV_20 0x20 #define DP_DEVICE_ID_38EC11 0x38EC11 +#define DP_DEVICE_ID_BA4159 0xBA4159 #define DP_FORCE_PSRSU_CAPABILITY 0x40F #define DP_SINK_PSR_ACTIVE_VTOTAL 0x373 diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h index c062a44db078..914f28e9f224 100644 --- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h +++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h @@ -172,6 +172,9 @@ enum dpcd_psr_sink_states { #define DP_SOURCE_BACKLIGHT_CURRENT_PEAK 0x326 #define DP_SOURCE_BACKLIGHT_CONTROL 0x32E #define DP_SOURCE_BACKLIGHT_ENABLE 0x32F -#define DP_SOURCE_MINIMUM_HBLANK_SUPPORTED 0x340 +#define DP_SOURCE_MINIMUM_HBLANK_SUPPORTED 0x340 +#define DP_SINK_PR_REPLAY_STATUS 0x378 +#define DP_SINK_PR_PIXEL_DEVIATION_PER_LINE 0x379 +#define DP_SINK_PR_MAX_NUMBER_OF_DEVIATION_LINE 0x37A #endif /* __DAL_DPCD_DEFS_H__ */ diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h index ece97ae0e826..d4cf7ead1d87 100644 --- a/drivers/gpu/drm/amd/display/include/fixed31_32.h +++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h @@ -525,7 +525,7 @@ static inline struct fixed31_32 dc_fixpt_truncate(struct fixed31_32 arg, unsigne if (negative) arg.value = -arg.value; - arg.value &= (~0LL) << (FIXED31_32_BITS_PER_FRACTIONAL_PART - frac_bits); + arg.value &= (~0ULL) << (FIXED31_32_BITS_PER_FRACTIONAL_PART - frac_bits); if (negative) arg.value = -arg.value; return arg; diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h index cd870af5fd25..1b8ab20f1715 100644 --- a/drivers/gpu/drm/amd/display/include/link_service_types.h +++ b/drivers/gpu/drm/amd/display/include/link_service_types.h @@ -53,7 +53,7 @@ enum { BITS_PER_DP_BYTE = 10, DATA_EFFICIENCY_8b_10b_x10000 = 8000, /* 80% data efficiency */ DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100 = 97, /* 97% data efficiency when FEC is enabled */ - DATA_EFFICIENCY_128b_132b_x10000 = 9646, /* 96.71% data efficiency x 99.75% downspread factor */ + DATA_EFFICIENCY_128b_132b_x10000 = 9641, /* 96.71% data efficiency x 99.7% downspread factor */ }; enum lttpr_mode { diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h index 23a308c3eccb..325c5ba4c82a 100644 --- a/drivers/gpu/drm/amd/display/include/signal_types.h +++ b/drivers/gpu/drm/amd/display/include/signal_types.h @@ -44,6 +44,34 @@ enum signal_type { SIGNAL_TYPE_VIRTUAL = (1 << 9), /* Virtual Display */ }; +static inline const char *signal_type_to_string(const int type) +{ + switch (type) { + case SIGNAL_TYPE_NONE: + return "No signal"; + case SIGNAL_TYPE_DVI_SINGLE_LINK: + return "DVI: Single Link"; + case SIGNAL_TYPE_DVI_DUAL_LINK: + return "DVI: Dual Link"; + case SIGNAL_TYPE_HDMI_TYPE_A: + return "HDMI: TYPE A"; + case SIGNAL_TYPE_LVDS: + return "LVDS"; + case SIGNAL_TYPE_RGB: + return "RGB"; + case SIGNAL_TYPE_DISPLAY_PORT: + return "Display Port"; + case SIGNAL_TYPE_DISPLAY_PORT_MST: + return "Display Port: MST"; + case SIGNAL_TYPE_EDP: + return "Embedded Display Port"; + case SIGNAL_TYPE_VIRTUAL: + return "Virtual"; + default: + return "Unknown"; + } +} + /* help functions for signal types manipulation */ static inline bool dc_is_hdmi_tmds_signal(enum signal_type signal) { diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index 67a062af3ab0..ff8e5708735d 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -359,7 +359,7 @@ static struct fixed31_32 translate_from_linear_space( scratch_1 = dc_fixpt_add(one, args->a3); /* In the first region (first 16 points) and in the * region delimited by START/END we calculate with - * full precision to avoid error accumulation. + * full precision to avoid error accumulation. */ if ((cal_buffer->buffer_index >= PRECISE_LUT_REGION_START && cal_buffer->buffer_index <= PRECISE_LUT_REGION_END) || @@ -379,8 +379,7 @@ static struct fixed31_32 translate_from_linear_space( scratch_1 = dc_fixpt_sub(scratch_1, args->a2); return scratch_1; - } - else + } else return dc_fixpt_mul(args->arg, args->a1); } diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index 5c41a4751db4..ef3a67409021 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -1,5 +1,5 @@ /* - * Copyright 2016 Advanced Micro Devices, Inc. + * Copyright 2016-2023 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -338,7 +338,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync, * - Delta for CEIL: delta_from_mid_point_in_us_1 * - Delta for FLOOR: delta_from_mid_point_in_us_2 */ - if ((last_render_time_in_us / mid_point_frames_ceil) < in_out_vrr->min_duration_in_us) { + if (mid_point_frames_ceil && + (last_render_time_in_us / mid_point_frames_ceil) < + in_out_vrr->min_duration_in_us) { /* Check for out of range. * If using CEIL produces a value that is out of range, * then we are forced to use FLOOR. @@ -385,8 +387,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync, /* Either we've calculated the number of frames to insert, * or we need to insert min duration frames */ - if (last_render_time_in_us / frames_to_insert < - in_out_vrr->min_duration_in_us){ + if (frames_to_insert && + (last_render_time_in_us / frames_to_insert) < + in_out_vrr->min_duration_in_us){ frames_to_insert -= (frames_to_insert > 1) ? 1 : 0; } @@ -989,6 +992,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, unsigned int refresh_range = 0; unsigned long long min_refresh_in_uhz = 0; unsigned long long max_refresh_in_uhz = 0; + unsigned long long min_hardware_refresh_in_uhz = 0; if (mod_freesync == NULL) return; @@ -999,7 +1003,13 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, nominal_field_rate_in_uhz = mod_freesync_calc_nominal_field_rate(stream); - min_refresh_in_uhz = in_config->min_refresh_in_uhz; + if (stream->ctx->dc->caps.max_v_total != 0 && stream->timing.h_total != 0) { + min_hardware_refresh_in_uhz = div64_u64((stream->timing.pix_clk_100hz * 100000000ULL), + (stream->timing.h_total * stream->ctx->dc->caps.max_v_total)); + } + /* Limit minimum refresh rate to what can be supported by hardware */ + min_refresh_in_uhz = min_hardware_refresh_in_uhz > in_config->min_refresh_in_uhz ? + min_hardware_refresh_in_uhz : in_config->min_refresh_in_uhz; max_refresh_in_uhz = in_config->max_refresh_in_uhz; /* Full range may be larger than current video timing, so cap at nominal */ @@ -1137,10 +1147,6 @@ void mod_freesync_handle_preflip(struct mod_freesync *mod_freesync, if (in_out_vrr->supported && in_out_vrr->state == VRR_STATE_ACTIVE_VARIABLE) { - unsigned int oldest_index = plane->time.index + 1; - - if (oldest_index >= DC_PLANE_UPDATE_TIMES_MAX) - oldest_index = 0; last_render_time_in_us = curr_time_stamp_in_us - plane->time.prev_update_time_in_us; diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c index ec64f19e1786..84f9b412a4f1 100644 --- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c +++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c @@ -149,6 +149,8 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream, /* VSC packet set to 4 for PSR-SU, or 2 for PSR1 */ if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) vsc_packet_revision = vsc_packet_rev4; + else if (stream->link->replay_settings.config.replay_supported) + vsc_packet_revision = vsc_packet_rev4; else if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) vsc_packet_revision = vsc_packet_rev2; @@ -536,6 +538,9 @@ void mod_build_adaptive_sync_infopacket(const struct dc_stream_state *stream, case FREESYNC_TYPE_PCON_IN_WHITELIST: mod_build_adaptive_sync_infopacket_v1(info_packet); break; + case ADAPTIVE_SYNC_TYPE_EDP: + mod_build_adaptive_sync_infopacket_v1(info_packet); + break; case ADAPTIVE_SYNC_TYPE_NONE: case FREESYNC_TYPE_PCON_NOT_IN_WHITELIST: default: diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c index 51e76bce92ea..73a2b37fbbd7 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c @@ -116,6 +116,27 @@ static const struct abm_parameters * const abm_settings[] = { abm_settings_config2, }; +static const struct dm_bl_data_point custom_backlight_curve0[] = { + {2, 14}, {4, 16}, {6, 18}, {8, 21}, {10, 23}, {12, 26}, {14, 29}, {16, 32}, {18, 35}, + {20, 38}, {22, 41}, {24, 44}, {26, 48}, {28, 52}, {30, 55}, {32, 59}, {34, 62}, + {36, 67}, {38, 71}, {40, 75}, {42, 80}, {44, 84}, {46, 88}, {48, 93}, {50, 98}, + {52, 103}, {54, 108}, {56, 113}, {58, 118}, {60, 123}, {62, 129}, {64, 135}, {66, 140}, + {68, 146}, {70, 152}, {72, 158}, {74, 164}, {76, 171}, {78, 177}, {80, 183}, {82, 190}, + {84, 197}, {86, 204}, {88, 211}, {90, 218}, {92, 225}, {94, 232}, {96, 240}, {98, 247}}; + +struct custom_backlight_profile { + uint8_t ac_level_percentage; + uint8_t dc_level_percentage; + uint8_t min_input_signal; + uint8_t max_input_signal; + uint8_t num_data_points; + const struct dm_bl_data_point *data_points; +}; + +static const struct custom_backlight_profile custom_backlight_profiles[] = { + {100, 32, 12, 255, ARRAY_SIZE(custom_backlight_curve0), custom_backlight_curve0}, +}; + #define NUM_AMBI_LEVEL 5 #define NUM_AGGR_LEVEL 4 #define NUM_POWER_FN_SEGS 8 @@ -905,6 +926,11 @@ void mod_power_calc_psr_configs(struct psr_config *psr_config, !link->dpcd_caps.psr_info.psr_dpcd_caps.bits.LINK_TRAINING_ON_EXIT_NOT_REQUIRED; } +void init_replay_config(struct dc_link *link, struct replay_config *pr_config) +{ + link->replay_settings.config = *pr_config; +} + bool mod_power_only_edp(const struct dc_state *context, const struct dc_stream_state *stream) { return context && context->stream_count == 1 && dc_is_embedded_signal(stream->signal); @@ -944,3 +970,25 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link, return true; } + +bool fill_custom_backlight_caps(unsigned int config_no, struct dm_acpi_atif_backlight_caps *caps) +{ + unsigned int data_points_size; + + if (config_no >= ARRAY_SIZE(custom_backlight_profiles)) + return false; + + data_points_size = custom_backlight_profiles[config_no].num_data_points + * sizeof(custom_backlight_profiles[config_no].data_points[0]); + + caps->size = sizeof(struct dm_acpi_atif_backlight_caps) - sizeof(caps->data_points) + data_points_size; + caps->flags = 0; + caps->error_code = 0; + caps->ac_level_percentage = custom_backlight_profiles[config_no].ac_level_percentage; + caps->dc_level_percentage = custom_backlight_profiles[config_no].dc_level_percentage; + caps->min_input_signal = custom_backlight_profiles[config_no].min_input_signal; + caps->max_input_signal = custom_backlight_profiles[config_no].max_input_signal; + caps->num_data_points = custom_backlight_profiles[config_no].num_data_points; + memcpy(caps->data_points, custom_backlight_profiles[config_no].data_points, data_points_size); + return true; +} diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h index 1d3079e56799..d9e0d67d67f7 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h @@ -53,6 +53,8 @@ bool dmub_init_abm_config(struct resource_pool *res_pool, struct dmcu_iram_parameters params, unsigned int inst); +void init_replay_config(struct dc_link *link, struct replay_config *pr_config); + bool is_psr_su_specific_panel(struct dc_link *link); void mod_power_calc_psr_configs(struct psr_config *psr_config, struct dc_link *link, @@ -62,4 +64,7 @@ bool mod_power_only_edp(const struct dc_state *context, bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link, struct dc_stream_state *stream, struct psr_config *config); + +bool fill_custom_backlight_caps(unsigned int config_no, + struct dm_acpi_atif_backlight_caps *caps); #endif /* MODULES_POWER_POWER_HELPERS_H_ */ |