aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/display
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/display')
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c130
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h14
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c43
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c194
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c88
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c247
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c201
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c118
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h86
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c107
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c522
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/Makefile26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c400
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn315/Makefile26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c239
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn316/Makefile26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c238
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c617
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h67
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c784
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_translate_dcn30.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_translate_dcn30.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_hwss.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dp_trace.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dp_trace.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.h1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h84
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c1
-rw-r--r--drivers/gpu/drm/amd/display/include/ddc_service_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_id.h1
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c76
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c38
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h12
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c6
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h12
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h2
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c5
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c38
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h1
142 files changed, 3136 insertions, 2238 deletions
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 127667e549c1..b4029c0d5d8c 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -20,6 +20,7 @@ config DRM_AMD_DC_DCN
config DRM_AMD_DC_HDCP
bool "Enable HDCP support in DC"
depends on DRM_AMD_DC
+ select DRM_DISPLAY_HDCP_HELPER
help
Choose this option if you want to support HDCP authentication.
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index b30656959fd8..70be67a56673 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -48,7 +48,7 @@
#include "amdgpu_dm.h"
#ifdef CONFIG_DRM_AMD_DC_HDCP
#include "amdgpu_dm_hdcp.h"
-#include <drm/drm_hdcp.h>
+#include <drm/display/drm_hdcp_helper.h>
#endif
#include "amdgpu_pm.h"
#include "amdgpu_atombios.h"
@@ -73,17 +73,17 @@
#include <linux/firmware.h>
#include <linux/component.h>
+#include <drm/display/drm_dp_mst_helper.h>
+#include <drm/display/drm_hdmi_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/dp/drm_dp_mst_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_edid.h>
#include <drm/drm_vblank.h>
#include <drm/drm_audio_component.h>
-#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
#include "dcn/dcn_1_0_offset.h"
@@ -92,7 +92,6 @@
#include "vega10_ip_offset.h"
#include "soc15_common.h"
-#endif
#include "modules/inc/mod_freesync.h"
#include "modules/power/power_helpers.h"
@@ -603,7 +602,6 @@ static void dm_crtc_high_irq(void *interrupt_params)
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
/**
* dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
@@ -771,7 +769,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
do {
dc_stat_get_dmub_notification(adev->dm.dc, &notify);
- if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
+ if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
DRM_ERROR("DM: notify type %d invalid!", notify.type);
continue;
}
@@ -827,7 +825,6 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
if (count > DMUB_TRACE_MAX_READ)
DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
}
-#endif /* CONFIG_DRM_AMD_DC_DCN */
static int dm_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
@@ -1125,9 +1122,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
switch (adev->ip_versions[DCE_HWIP][0]) {
case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
hw_params.dpia_supported = true;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
-#endif
break;
default:
break;
@@ -1189,7 +1184,6 @@ static void dm_dmub_hw_resume(struct amdgpu_device *adev)
}
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
{
uint64_t pt_base;
@@ -1244,8 +1238,7 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
pa_config->is_hvm_enabled = 0;
}
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+
static void vblank_control_worker(struct work_struct *work)
{
struct vblank_control_work *vblank_work =
@@ -1282,8 +1275,6 @@ static void vblank_control_worker(struct work_struct *work)
kfree(vblank_work);
}
-#endif
-
static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
{
struct hpd_rx_irq_offload_work *offload_work;
@@ -1410,9 +1401,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
mutex_init(&adev->dm.dc_lock);
mutex_init(&adev->dm.audio_lock);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
spin_lock_init(&adev->dm.vblank_lock);
-#endif
if(amdgpu_dm_irq_init(adev)) {
DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
@@ -1505,12 +1494,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
init_data.flags.edp_no_power_sequencing = true;
-#ifdef CONFIG_DRM_AMD_DC_DCN
if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
-#endif
init_data.flags.seamless_boot_edp_requested = false;
@@ -1566,7 +1553,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
goto error;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
struct dc_phy_addr_space_config pa_config;
@@ -1575,7 +1561,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
// Call the DC init_memory func
dc_setup_system_context(adev->dm.dc, &pa_config);
}
-#endif
adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
if (!adev->dm.freesync_module) {
@@ -1587,14 +1572,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
amdgpu_dm_init_color_mod();
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (adev->dm.dc->caps.max_links > 0) {
adev->dm.vblank_control_workqueue =
create_singlethread_workqueue("dm_vblank_control_workqueue");
if (!adev->dm.vblank_control_workqueue)
DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
}
-#endif
#ifdef CONFIG_DRM_AMD_DC_HDCP
if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
@@ -1626,7 +1609,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
}
amdgpu_dm_outbox_init(adev);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
dmub_aux_setconfig_callback, false)) {
DRM_ERROR("amdgpu: fail to register dmub aux callback");
@@ -1640,7 +1622,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
DRM_ERROR("amdgpu: fail to register dmub hpd callback");
goto error;
}
-#endif /* CONFIG_DRM_AMD_DC_DCN */
}
if (amdgpu_dm_initialize_drm_device(adev)) {
@@ -1687,12 +1668,10 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
{
int i;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (adev->dm.vblank_control_workqueue) {
destroy_workqueue(adev->dm.vblank_control_workqueue);
adev->dm.vblank_control_workqueue = NULL;
}
-#endif
for (i = 0; i < adev->dm.display_indexes_num; i++) {
drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
@@ -2403,9 +2382,7 @@ static int dm_suspend(void *handle)
if (amdgpu_in_reset(adev)) {
mutex_lock(&dm->dc_lock);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
dc_allow_idle_optimizations(adev->dm.dc, false);
-#endif
dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
@@ -2714,7 +2691,8 @@ static int dm_resume(void *handle)
* this is the case when traversing through already created
* MST connectors, should be skipped
*/
- if (aconnector->mst_port)
+ if (aconnector->dc_link &&
+ aconnector->dc_link->type == dc_connection_mst_branch)
continue;
mutex_lock(&aconnector->hpd_lock);
@@ -3557,7 +3535,6 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
return 0;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
/* Register IRQ sources and initialize IRQ callbacks */
static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
{
@@ -3746,7 +3723,6 @@ static int register_outbox_irq_handlers(struct amdgpu_device *adev)
return 0;
}
-#endif
/*
* Acquires the lock for the atomic state object and returns
@@ -3972,7 +3948,7 @@ static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *cap
max - min);
}
-static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
+static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
int bl_idx,
u32 user_brightness)
{
@@ -4003,7 +3979,8 @@ static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
}
- return rc ? 0 : 1;
+ if (rc)
+ dm->actual_brightness[bl_idx] = user_brightness;
}
static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
@@ -4249,7 +4226,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
/* Use Outbox interrupt */
switch (adev->ip_versions[DCE_HWIP][0]) {
case IP_VERSION(3, 0, 0):
@@ -4282,7 +4258,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
break;
}
}
-#endif
/* Disable vblank IRQs aggressively for power-saving. */
adev_to_drm(adev)->vblank_disable_immediate = true;
@@ -4378,7 +4353,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
}
break;
default:
-#if defined(CONFIG_DRM_AMD_DC_DCN)
switch (adev->ip_versions[DCE_HWIP][0]) {
case IP_VERSION(1, 0, 0):
case IP_VERSION(1, 0, 1):
@@ -4404,7 +4378,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
adev->ip_versions[DCE_HWIP][0]);
goto fail;
}
-#endif
break;
}
@@ -4553,7 +4526,7 @@ static int dm_early_init(void *handle)
adev->mode_info.num_dig = 6;
break;
default:
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+
switch (adev->ip_versions[DCE_HWIP][0]) {
case IP_VERSION(2, 0, 2):
case IP_VERSION(3, 0, 0):
@@ -4590,7 +4563,6 @@ static int dm_early_init(void *handle)
adev->ip_versions[DCE_HWIP][0]);
return -EINVAL;
}
-#endif
break;
}
@@ -5409,17 +5381,19 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
static void
fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
- bool *per_pixel_alpha, bool *global_alpha,
- int *global_alpha_value)
+ bool *per_pixel_alpha, bool *pre_multiplied_alpha,
+ bool *global_alpha, int *global_alpha_value)
{
*per_pixel_alpha = false;
+ *pre_multiplied_alpha = true;
*global_alpha = false;
*global_alpha_value = 0xff;
if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
return;
- if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
+ if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
+ plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
static const uint32_t alpha_formats[] = {
DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGBA8888,
@@ -5434,6 +5408,9 @@ fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
break;
}
}
+
+ if (per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
+ *pre_multiplied_alpha = false;
}
if (plane_state->alpha < 0xffff) {
@@ -5596,7 +5573,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
return ret;
fill_blending_from_plane_state(
- plane_state, &plane_info->per_pixel_alpha,
+ plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
&plane_info->global_alpha, &plane_info->global_alpha_value);
return 0;
@@ -5643,6 +5620,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
dc_plane_state->tiling_info = plane_info.tiling_info;
dc_plane_state->visible = plane_info.visible;
dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
+ dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
dc_plane_state->global_alpha = plane_info.global_alpha;
dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
dc_plane_state->dcc = plane_info.dcc;
@@ -6644,10 +6622,8 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = drm_to_adev(crtc->dev);
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
struct amdgpu_display_manager *dm = &adev->dm;
struct vblank_control_work *work;
-#endif
int rc = 0;
if (enable) {
@@ -6670,7 +6646,6 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
if (amdgpu_in_reset(adev))
return 0;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dm->vblank_control_workqueue) {
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work)
@@ -6688,7 +6663,6 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
queue_work(dm->vblank_control_workqueue, &work->work);
}
-#endif
return 0;
}
@@ -7581,9 +7555,6 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
struct amdgpu_device *adev;
struct amdgpu_bo *rbo;
struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
- struct list_head list;
- struct ttm_validate_buffer tv;
- struct ww_acquire_ctx ticket;
uint32_t domain;
int r;
@@ -7596,18 +7567,19 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
obj = new_state->fb->obj[0];
rbo = gem_to_amdgpu_bo(obj);
adev = amdgpu_ttm_adev(rbo->tbo.bdev);
- INIT_LIST_HEAD(&list);
- tv.bo = &rbo->tbo;
- tv.num_shared = 1;
- list_add(&tv.head, &list);
-
- r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
+ r = amdgpu_bo_reserve(rbo, true);
if (r) {
dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
return r;
}
+ r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
+ if (r) {
+ dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
+ goto error_unlock;
+ }
+
if (plane->type != DRM_PLANE_TYPE_CURSOR)
domain = amdgpu_display_supported_domains(adev, rbo->flags);
else
@@ -7617,19 +7589,16 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
- ttm_eu_backoff_reservation(&ticket, &list);
- return r;
+ goto error_unlock;
}
r = amdgpu_ttm_alloc_gart(&rbo->tbo);
if (unlikely(r != 0)) {
- amdgpu_bo_unpin(rbo);
- ttm_eu_backoff_reservation(&ticket, &list);
DRM_ERROR("%p bind failed\n", rbo);
- return r;
+ goto error_unpin;
}
- ttm_eu_backoff_reservation(&ticket, &list);
+ amdgpu_bo_unreserve(rbo);
afb->address = amdgpu_bo_gpu_offset(rbo);
@@ -7661,6 +7630,13 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
}
return 0;
+
+error_unpin:
+ amdgpu_bo_unpin(rbo);
+
+error_unlock:
+ amdgpu_bo_unreserve(rbo);
+ return r;
}
static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
@@ -7941,7 +7917,8 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
plane_cap && plane_cap->per_pixel_alpha) {
unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
- BIT(DRM_MODE_BLEND_PREMULTI);
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE);
drm_plane_create_alpha_property(plane);
drm_plane_create_blend_mode_property(plane, blend_caps);
@@ -9236,7 +9213,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* deadlock during GPU reset when this fence will not signal
* but we hold reservation lock for the BO.
*/
- r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
+ r = dma_resv_wait_timeout(abo->tbo.base.resv,
+ DMA_RESV_USAGE_WRITE, false,
msecs_to_jiffies(5000));
if (unlikely(r <= 0))
DRM_ERROR("Waiting for fences timed out!");
@@ -9248,7 +9226,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
&bundle->flip_addrs[planes_count].address,
afb->tmz_surface, false);
- DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
+ drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
new_plane_state->plane->index,
bundle->plane_infos[planes_count].dcc.enable);
@@ -9282,7 +9260,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
dc_plane,
bundle->flip_addrs[planes_count].flip_timestamp_in_us);
- DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
+ drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
__func__,
bundle->flip_addrs[planes_count].address.grph.addr.high_part,
bundle->flip_addrs[planes_count].address.grph.addr.low_part);
@@ -9363,14 +9341,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
/* Update the planes if changed or disable if we don't have any. */
if ((planes_count || acrtc_state->active_planes == 0) &&
acrtc_state->stream) {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
/*
* If PSR or idle optimizations are enabled then flush out
* any pending work before hardware programming.
*/
if (dm->vblank_control_workqueue)
flush_workqueue(dm->vblank_control_workqueue);
-#endif
bundle->stream_update.stream = acrtc_state->stream;
if (new_pcrtc_state->mode_changed) {
@@ -9624,7 +9600,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
- DRM_DEBUG_ATOMIC(
+ drm_dbg_state(state->dev,
"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
"planes_changed:%d, mode_changed:%d,active_changed:%d,"
"connectors_changed:%d\n",
@@ -9703,21 +9679,19 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
if (dc_state) {
/* if there mode set or reset, disable eDP PSR */
if (mode_set_reset_required) {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dm->vblank_control_workqueue)
flush_workqueue(dm->vblank_control_workqueue);
-#endif
+
amdgpu_dm_psr_disable_all(dm);
}
dm_enable_per_frame_crtc_master_sync(dc_state);
mutex_lock(&dm->dc_lock);
WARN_ON(!dc_commit_state(dm->dc, dc_state));
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- /* Allow idle optimization when vblank count is 0 for display off */
- if (dm->active_vblank_irq_count == 0)
- dc_allow_idle_optimizations(dm->dc,true);
-#endif
+
+ /* Allow idle optimization when vblank count is 0 for display off */
+ if (dm->active_vblank_irq_count == 0)
+ dc_allow_idle_optimizations(dm->dc, true);
mutex_unlock(&dm->dc_lock);
}
@@ -9947,7 +9921,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
/* restore the backlight level */
for (i = 0; i < dm->num_of_edps; i++) {
if (dm->backlight_dev[i] &&
- (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
+ (dm->actual_brightness[i] != dm->brightness[i]))
amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
}
#endif
@@ -10328,7 +10302,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
goto skip_modeset;
- DRM_DEBUG_ATOMIC(
+ drm_dbg_state(state->dev,
"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
"planes_changed:%d, mode_changed:%d,active_changed:%d,"
"connectors_changed:%d\n",
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 6a908d736d6a..aa34c0068f41 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -26,10 +26,10 @@
#ifndef __AMDGPU_DM_H__
#define __AMDGPU_DM_H__
+#include <drm/display/drm_dp_mst_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
-#include <drm/dp/drm_dp_mst_helper.h>
#include <drm/drm_plane.h>
/*
@@ -358,14 +358,12 @@ struct amdgpu_display_manager {
*/
struct mutex audio_lock;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
/**
* @vblank_lock:
*
* Guards access to deferred vblank work state.
*/
spinlock_t vblank_lock;
-#endif
/**
* @audio_component:
@@ -469,14 +467,12 @@ struct amdgpu_display_manager {
struct hdcp_workqueue *hdcp_workqueue;
#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN)
/**
* @vblank_control_workqueue:
*
* Deferred work for vblank control events.
*/
struct workqueue_struct *vblank_control_workqueue;
-#endif
struct drm_atomic_state *cached_state;
struct dc_state *cached_dc_state;
@@ -493,14 +489,12 @@ struct amdgpu_display_manager {
*/
const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
/**
* @active_vblank_irq_count:
*
* number of currently active vblank irqs
*/
uint32_t active_vblank_irq_count;
-#endif
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
/**
@@ -540,6 +534,12 @@ struct amdgpu_display_manager {
* cached backlight values.
*/
u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
+ /**
+ * @actual_brightness:
+ *
+ * last successfully applied backlight values.
+ */
+ u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP];
};
enum dsc_clock_force_state {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index da17ece1a2c5..c7a592d68feb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -291,9 +291,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
case LINK_RATE_RBR2:
case LINK_RATE_HIGH2:
case LINK_RATE_HIGH3:
-#if defined(CONFIG_DRM_AMD_DC_DCN)
case LINK_RATE_UHBR10:
-#endif
break;
default:
valid_input = false;
@@ -3411,7 +3409,6 @@ static int disable_hpd_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(disable_hpd_ops, disable_hpd_get,
disable_hpd_set, "%llu\n");
-#if defined(CONFIG_DRM_AMD_DC_DCN)
/*
* Temporary w/a to force sst sequence in M42D DP2 mst receiver
* Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dp_set_mst_en_for_sst
@@ -3459,7 +3456,6 @@ static int dp_ignore_cable_id_get(void *data, u64 *val)
}
DEFINE_DEBUGFS_ATTRIBUTE(dp_ignore_cable_id_ops, dp_ignore_cable_id_get,
dp_ignore_cable_id_set, "%llu\n");
-#endif
/*
* Sets the DC visual confirm debug option from the given string.
@@ -3491,6 +3487,40 @@ DEFINE_SHOW_ATTRIBUTE(mst_topo);
DEFINE_DEBUGFS_ATTRIBUTE(visual_confirm_fops, visual_confirm_get,
visual_confirm_set, "%llu\n");
+
+/*
+ * Sets the DC skip_detection_link_training debug option from the given string.
+ * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_skip_detection_link_training
+ */
+static int skip_detection_link_training_set(void *data, u64 val)
+{
+ struct amdgpu_device *adev = data;
+
+ if (val == 0)
+ adev->dm.dc->debug.skip_detection_link_training = false;
+ else
+ adev->dm.dc->debug.skip_detection_link_training = true;
+
+ return 0;
+}
+
+/*
+ * Reads the DC skip_detection_link_training debug option value into the given buffer.
+ * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_skip_detection_link_training
+ */
+static int skip_detection_link_training_get(void *data, u64 *val)
+{
+ struct amdgpu_device *adev = data;
+
+ *val = adev->dm.dc->debug.skip_detection_link_training;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(skip_detection_link_training_fops,
+ skip_detection_link_training_get,
+ skip_detection_link_training_set, "%llu\n");
+
/*
* Dumps the DCC_EN bit for each pipe.
* Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dcc_en
@@ -3574,16 +3604,17 @@ void dtn_debugfs_init(struct amdgpu_device *adev)
adev, &mst_topo_fops);
debugfs_create_file("amdgpu_dm_dtn_log", 0644, root, adev,
&dtn_log_fops);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
debugfs_create_file("amdgpu_dm_dp_set_mst_en_for_sst", 0644, root, adev,
&dp_set_mst_en_for_sst_ops);
debugfs_create_file("amdgpu_dm_dp_ignore_cable_id", 0644, root, adev,
&dp_ignore_cable_id_ops);
-#endif
debugfs_create_file_unsafe("amdgpu_dm_visual_confirm", 0644, root, adev,
&visual_confirm_fops);
+ debugfs_create_file_unsafe("amdgpu_dm_skip_detection_link_training", 0644, root, adev,
+ &skip_detection_link_training_fops);
+
debugfs_create_file_unsafe("amdgpu_dm_dmub_tracebuffer", 0644, root,
adev, &dmub_tracebuffer_fops);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index bf0d50277f8f..15c0e3f2a9c3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -27,7 +27,7 @@
#include "amdgpu.h"
#include "amdgpu_dm.h"
#include "dm_helpers.h"
-#include <drm/drm_hdcp.h>
+#include <drm/display/drm_hdcp_helper.h>
#include "hdcp_psp.h"
/*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index f5f39984702f..7c799ddc1d27 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -90,7 +90,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
{
struct amdgpu_dm_connector *aconnector = link->priv;
struct drm_connector *connector = &aconnector->base;
- struct edid *edid_buf = (struct edid *) edid->raw_edid;
+ struct edid *edid_buf = edid ? (struct edid *) edid->raw_edid : NULL;
struct cea_sad *sads;
int sad_count = -1;
int sadb_count = -1;
@@ -977,9 +977,7 @@ void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz)
// TODO
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable)
{
/* TODO: add periodic detection implementation */
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 4aba0e8c84f8..19f543ba7205 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -928,7 +928,11 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
to_amdgpu_dm_connector(connector);
const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
- dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd, false);
+ if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
+ dc_interrupt_set(adev->dm.dc,
+ dc_link->irq_source_hpd,
+ false);
+ }
if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
dc_interrupt_set(adev->dm.dc,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 31ac1fce36f8..9221b6690a4a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -23,10 +23,10 @@
*
*/
+#include <drm/display/drm_dp_helper.h>
+#include <drm/display/drm_dp_mst_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/dp/drm_dp_mst_helper.h>
-#include <drm/dp/drm_dp_helper.h>
#include "dm_services.h"
#include "amdgpu.h"
#include "amdgpu_dm.h"
@@ -45,12 +45,10 @@
#include "amdgpu_dm_debugfs.h"
#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "dc/dcn20/dcn20_resource.h"
bool is_timing_changed(struct dc_stream_state *cur_stream,
struct dc_stream_state *new_stream);
-#endif
static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index c561e0d872d6..85628ad59e6c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -46,8 +46,6 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
void
dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-
struct dsc_mst_fairness_vars {
int pbn;
bool dsc_enabled;
@@ -64,6 +62,5 @@ bool needs_dsc_aux_workaround(struct dc_link *link);
void pre_validate_dsc(struct drm_atomic_state *state,
struct dm_atomic_state **dm_state_ptr,
struct dsc_mst_fairness_vars *vars);
-#endif
#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index 0c923a90615c..141fd2721501 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -27,8 +27,8 @@
#include "dc.h"
#include "dm_helpers.h"
#include "amdgpu_dm.h"
+#include "modules/power/power_helpers.h"
-#ifdef CONFIG_DRM_AMD_DC_DCN
static bool link_supports_psrsu(struct dc_link *link)
{
struct dc *dc = link->ctx->dc;
@@ -39,6 +39,9 @@ static bool link_supports_psrsu(struct dc_link *link)
if (dc->ctx->dce_version < DCN_VERSION_3_1)
return false;
+ if (!is_psr_su_specific_panel(link))
+ return false;
+
if (!link->dpcd_caps.alpm_caps.bits.AUX_WAKE_ALPM_CAP ||
!link->dpcd_caps.psr_info.psr_dpcd_caps.bits.Y_COORDINATE_REQUIRED)
return false;
@@ -49,7 +52,6 @@ static bool link_supports_psrsu(struct dc_link *link)
return true;
}
-#endif
/*
* amdgpu_dm_set_psr_caps() - set link psr capabilities
@@ -69,17 +71,18 @@ void amdgpu_dm_set_psr_caps(struct dc_link *link)
link->psr_settings.psr_feature_enabled = false;
} else {
-#ifdef CONFIG_DRM_AMD_DC_DCN
if (link_supports_psrsu(link))
link->psr_settings.psr_version = DC_PSR_VERSION_SU_1;
else
-#endif
link->psr_settings.psr_version = DC_PSR_VERSION_1;
link->psr_settings.psr_feature_enabled = true;
}
- DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
+ DRM_INFO("PSR support %d, DC PSR ver %d, sink PSR ver %d\n",
+ link->psr_settings.psr_feature_enabled,
+ link->psr_settings.psr_version,
+ link->dpcd_caps.psr_info.psr_version);
}
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index f05ab2bf20c5..b4eca0236435 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -63,9 +63,7 @@ DISPLAY_CORE = dc.o dc_stat.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink
dc_surface.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o \
dc_link_enc_cfg.o dc_link_dpia.o dc_link_dpcd.o
-ifdef CONFIG_DRM_AMD_DC_DCN
DISPLAY_CORE += dc_vm_helper.o
-endif
AMD_DISPLAY_CORE = $(addprefix $(AMDDALPATH)/dc/core/,$(DISPLAY_CORE))
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.c
index a8cb039d2572..34e3a64f556e 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.c
@@ -213,6 +213,9 @@ static enum connector_id connector_id_from_bios_object_id(
case CONNECTOR_OBJECT_ID_MXM:
id = CONNECTOR_ID_MXM;
break;
+ case CONNECTOR_OBJECT_ID_USBC:
+ id = CONNECTOR_ID_USBC;
+ break;
default:
id = CONNECTOR_ID_UNKNOWN;
break;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 0e36cd800fc9..32efa92422e8 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -522,7 +522,8 @@ static enum bp_result transmitter_control_v2(
*/
params.acConfig.ucEncoderSel = 1;
- if (CONNECTOR_ID_DISPLAY_PORT == connector_id)
+ if (CONNECTOR_ID_DISPLAY_PORT == connector_id
+ || CONNECTOR_ID_USBC == connector_id)
/* Bit4: DP connector flag
* =0 connector is none-DP connector
* =1 connector is DP connector
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index dd9704ef4ccc..f3792286f571 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -65,7 +65,6 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
case DCE_VERSION_12_1:
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
case DCN_VERSION_1_0:
case DCN_VERSION_1_01:
case DCN_VERSION_2_0:
@@ -80,7 +79,7 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
case DCN_VERSION_3_16:
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
-#endif
+
default:
/* Unsupported DCE */
BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
index dfba6138f538..26feefbb8990 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
@@ -374,7 +374,7 @@ void dce_clock_read_ss_info(struct clk_mgr_internal *clk_mgr_dce)
clk_mgr_dce->dprefclk_ss_percentage =
info.spread_spectrum_percentage;
}
- if (clk_mgr_dce->base.ctx->dc->debug.ignore_dpref_ss)
+ if (clk_mgr_dce->base.ctx->dc->config.ignore_dpref_ss)
clk_mgr_dce->dprefclk_ss_percentage = 0;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
index fbdd0a92d146..451e8d6cd8bd 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
@@ -157,8 +157,7 @@ static void dcn201_update_clocks(struct clk_mgr *clk_mgr_base,
} else {
if (update_dppclk || update_dispclk)
dcn20_update_clocks_update_dentist(clk_mgr, context);
- if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
- dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index f4dee0e48a67..cf1b5f354ae9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -88,11 +88,22 @@ static int rn_get_active_display_cnt_wa(struct dc *dc, struct dc_state *context)
static void rn_set_low_power_state(struct clk_mgr *clk_mgr_base)
{
+ int display_count;
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct dc *dc = clk_mgr_base->ctx->dc;
+ struct dc_state *context = dc->current_state;
+
+ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
+
+ display_count = rn_get_active_display_cnt_wa(dc, context);
- rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_LOW_POWER);
- /* update power state */
- clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
+ /* if we can go lower, go lower */
+ if (display_count == 0) {
+ rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_LOW_POWER);
+ /* update power state */
+ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
+ }
+ }
}
static void rn_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
@@ -111,7 +122,7 @@ static void rn_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
dpp_inst = clk_mgr->base.ctx->dc->res_pool->dpps[i]->inst;
dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
- prev_dppclk_khz = clk_mgr->dccg->pipe_dppclk_khz[i];
+ prev_dppclk_khz = clk_mgr->dccg->pipe_dppclk_khz[dpp_inst];
if (safe_to_lower || prev_dppclk_khz < dppclk_khz)
clk_mgr->dccg->funcs->update_dpp_dto(
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
index 8161a6ae410d..30c6f9cd717f 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
@@ -94,6 +94,9 @@ static int rn_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
{
uint32_t result;
+ result = rn_smu_wait_for_response(clk_mgr, 10, 200000);
+ ASSERT(result == VBIOSSMC_Result_OK);
+
/* First clear response register */
REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Status_BUSY);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
index d9920d91838d..1cae01a91a69 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
@@ -94,6 +94,8 @@ static int dcn301_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
{
uint32_t result;
+ result = dcn301_smu_wait_for_response(clk_mgr, 10, 200000);
+
/* First clear response register */
REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Status_BUSY);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
index bc4ddc36fe58..f310b0d25a07 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
@@ -176,8 +176,7 @@ static void vg_update_clocks(struct clk_mgr *clk_mgr_base,
if (update_dppclk || update_dispclk)
dcn301_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
// always update dtos unless clock is lowered and not safe to lower
- if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
- dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index 59fdd7f0d609..ceb34376decb 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -615,13 +615,37 @@ static void dcn31_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
}
}
+static void dcn31_set_low_power_state(struct clk_mgr *clk_mgr_base)
+{
+ int display_count;
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct dc *dc = clk_mgr_base->ctx->dc;
+ struct dc_state *context = dc->current_state;
+
+ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
+ display_count = dcn31_get_active_display_cnt_wa(dc, context);
+ /* if we can go lower, go lower */
+ if (display_count == 0) {
+ union display_idle_optimization_u idle_info = { 0 };
+
+ idle_info.idle_info.df_request_disabled = 1;
+ idle_info.idle_info.phy_ref_clk_off = 1;
+ idle_info.idle_info.s0i2_rdy = 1;
+ dcn31_smu_set_display_idle_optimization(clk_mgr, idle_info.data);
+ /* update power state */
+ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
+ }
+ }
+}
+
static struct clk_mgr_funcs dcn31_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.update_clocks = dcn31_update_clocks,
.init_clocks = dcn31_init_clocks,
.enable_pme_wa = dcn31_enable_pme_wa,
.are_clock_states_equal = dcn31_are_clock_states_equal,
- .notify_wm_ranges = dcn31_notify_wm_ranges
+ .notify_wm_ranges = dcn31_notify_wm_ranges,
+ .set_low_power_state = dcn31_set_low_power_state
};
extern struct clk_mgr_funcs dcn3_fpga_funcs;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
index edda572dc570..a2ade6e93f5e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
@@ -91,7 +91,8 @@ static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
- if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
+ if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
+ dc_is_virtual_signal(pipe->stream->signal))) {
if (disable)
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
else
@@ -219,8 +220,50 @@ static void dcn315_dump_clk_registers(struct clk_state_registers_and_bypass *reg
static struct clk_bw_params dcn315_bw_params = {
.vram_type = Ddr4MemType,
- .num_channels = 1,
+ .num_channels = 2,
.clk_table = {
+ .entries = {
+ {
+ .voltage = 0,
+ .dispclk_mhz = 640,
+ .dppclk_mhz = 640,
+ .phyclk_mhz = 810,
+ .phyclk_d18_mhz = 667,
+ .dtbclk_mhz = 600,
+ },
+ {
+ .voltage = 1,
+ .dispclk_mhz = 739,
+ .dppclk_mhz = 739,
+ .phyclk_mhz = 810,
+ .phyclk_d18_mhz = 667,
+ .dtbclk_mhz = 600,
+ },
+ {
+ .voltage = 2,
+ .dispclk_mhz = 960,
+ .dppclk_mhz = 960,
+ .phyclk_mhz = 810,
+ .phyclk_d18_mhz = 667,
+ .dtbclk_mhz = 600,
+ },
+ {
+ .voltage = 3,
+ .dispclk_mhz = 1200,
+ .dppclk_mhz = 1200,
+ .phyclk_mhz = 810,
+ .phyclk_d18_mhz = 667,
+ .dtbclk_mhz = 600,
+ },
+ {
+ .voltage = 4,
+ .dispclk_mhz = 1372,
+ .dppclk_mhz = 1372,
+ .phyclk_mhz = 810,
+ .phyclk_d18_mhz = 667,
+ .dtbclk_mhz = 600,
+ },
+ },
.num_entries = 5,
},
@@ -300,8 +343,8 @@ static struct wm_table lpddr5_wm_table = {
}
};
-static DpmClocks_315_t dummy_clocks;
-
+/* Temporary Place holder until we can get them from fuse */
+static DpmClocks_315_t dummy_clocks = { 0 };
static struct dcn315_watermarks dummy_wms = { 0 };
static void dcn315_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn315_watermarks *table)
@@ -415,80 +458,105 @@ static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
return max;
}
-static unsigned int find_clk_for_voltage(
- const DpmClocks_315_t *clock_table,
- const uint32_t clocks[],
- unsigned int voltage)
-{
- int i;
-
- for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++) {
- if (clock_table->SocVoltage[i] == voltage)
- return clocks[i];
- }
-
- ASSERT(0);
- return 0;
-}
-
static void dcn315_clk_mgr_helper_populate_bw_params(
struct clk_mgr_internal *clk_mgr,
struct integrated_info *bios_info,
const DpmClocks_315_t *clock_table)
{
- int i, j;
+ int i;
struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
- uint32_t max_dispclk = 0, max_dppclk = 0;
-
- j = -1;
-
- ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL);
-
- /* Find lowest DPM, FCLK is filled in reverse order*/
-
- for (i = NUM_DF_PSTATE_LEVELS - 1; i >= 0; i--) {
- if (clock_table->DfPstateTable[i].FClk != 0) {
- j = i;
- break;
+ uint32_t max_pstate = 0, max_fclk = 0, min_pstate = 0;
+ struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1];
+
+ /* Find highest fclk pstate */
+ for (i = 0; i < clock_table->NumDfPstatesEnabled; i++) {
+ if (clock_table->DfPstateTable[i].FClk > max_fclk) {
+ max_fclk = clock_table->DfPstateTable[i].FClk;
+ max_pstate = i;
}
}
- if (j == -1) {
- /* clock table is all 0s, just use our own hardcode */
- ASSERT(0);
- return;
- }
+ /* For 315 we want to base clock table on dcfclk, need at least one entry regardless of pmfw table */
+ for (i = 0; i < clock_table->NumDcfClkLevelsEnabled; i++) {
+ int j;
+ uint32_t min_fclk = clock_table->DfPstateTable[0].FClk;
- bw_params->clk_table.num_entries = j + 1;
-
- /* dispclk and dppclk can be max at any voltage, same number of levels for both */
- if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS &&
- clock_table->NumDispClkLevelsEnabled <= NUM_DPPCLK_DPM_LEVELS) {
- max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled);
- max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled);
- } else {
- ASSERT(0);
- }
-
- for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) {
- int temp;
+ for (j = 1; j < clock_table->NumDfPstatesEnabled; j++) {
+ if (clock_table->DfPstateTable[j].Voltage <= clock_table->SocVoltage[i]
+ && clock_table->DfPstateTable[j].FClk < min_fclk) {
+ min_fclk = clock_table->DfPstateTable[j].FClk;
+ min_pstate = j;
+ }
+ }
- bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].FClk;
- bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].MemClk;
- bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].Voltage;
+ /* First search defaults for the clocks we don't read using closest lower or equal default dcfclk */
+ for (j = bw_params->clk_table.num_entries - 1; j > 0; j--)
+ if (bw_params->clk_table.entries[j].dcfclk_mhz <= clock_table->DcfClocks[i])
+ break;
+ bw_params->clk_table.entries[i].phyclk_mhz = bw_params->clk_table.entries[j].phyclk_mhz;
+ bw_params->clk_table.entries[i].phyclk_d18_mhz = bw_params->clk_table.entries[j].phyclk_d18_mhz;
+ bw_params->clk_table.entries[i].dtbclk_mhz = bw_params->clk_table.entries[j].dtbclk_mhz;
+
+ /* Now update clocks we do read */
+ bw_params->clk_table.entries[i].fclk_mhz = min_fclk;
+ bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[min_pstate].MemClk;
+ bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[min_pstate].Voltage;
+ bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i];
+ bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i];
+ bw_params->clk_table.entries[i].dispclk_mhz = clock_table->DispClocks[i];
+ bw_params->clk_table.entries[i].dppclk_mhz = clock_table->DppClocks[i];
+ bw_params->clk_table.entries[i].wck_ratio = 1;
+ };
+
+ /* Make sure to include at least one entry and highest pstate */
+ if (max_pstate != min_pstate || i == 0) {
+ bw_params->clk_table.entries[i].fclk_mhz = max_fclk;
+ bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[max_pstate].MemClk;
+ bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[max_pstate].Voltage;
+ bw_params->clk_table.entries[i].dcfclk_mhz = find_max_clk_value(clock_table->DcfClocks, NUM_DCFCLK_DPM_LEVELS);
bw_params->clk_table.entries[i].wck_ratio = 1;
- temp = find_clk_for_voltage(clock_table, clock_table->DcfClocks, clock_table->DfPstateTable[j].Voltage);
- if (temp)
- bw_params->clk_table.entries[i].dcfclk_mhz = temp;
- temp = find_clk_for_voltage(clock_table, clock_table->SocClocks, clock_table->DfPstateTable[j].Voltage);
- if (temp)
- bw_params->clk_table.entries[i].socclk_mhz = temp;
- bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
- bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
+ i++;
}
-
+ bw_params->clk_table.num_entries = i--;
+
+ /* Make sure all highest clocks are included*/
+ bw_params->clk_table.entries[i].socclk_mhz = find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dispclk_mhz = find_max_clk_value(clock_table->DispClocks, NUM_DISPCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dppclk_mhz = find_max_clk_value(clock_table->DppClocks, NUM_DPPCLK_DPM_LEVELS);
+ ASSERT(clock_table->DcfClocks[i] == find_max_clk_value(clock_table->DcfClocks, NUM_DCFCLK_DPM_LEVELS));
+ bw_params->clk_table.entries[i].phyclk_mhz = def_max.phyclk_mhz;
+ bw_params->clk_table.entries[i].phyclk_d18_mhz = def_max.phyclk_d18_mhz;
+ bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz;
+
+ /* Set any 0 clocks to max default setting. Not an issue for
+ * power since we aren't doing switching in such case anyway
+ */
+ for (i = 0; i < bw_params->clk_table.num_entries; i++) {
+ if (!bw_params->clk_table.entries[i].fclk_mhz) {
+ bw_params->clk_table.entries[i].fclk_mhz = def_max.fclk_mhz;
+ bw_params->clk_table.entries[i].memclk_mhz = def_max.memclk_mhz;
+ bw_params->clk_table.entries[i].voltage = def_max.voltage;
+ }
+ if (!bw_params->clk_table.entries[i].dcfclk_mhz)
+ bw_params->clk_table.entries[i].dcfclk_mhz = def_max.dcfclk_mhz;
+ if (!bw_params->clk_table.entries[i].socclk_mhz)
+ bw_params->clk_table.entries[i].socclk_mhz = def_max.socclk_mhz;
+ if (!bw_params->clk_table.entries[i].dispclk_mhz)
+ bw_params->clk_table.entries[i].dispclk_mhz = def_max.dispclk_mhz;
+ if (!bw_params->clk_table.entries[i].dppclk_mhz)
+ bw_params->clk_table.entries[i].dppclk_mhz = def_max.dppclk_mhz;
+ if (!bw_params->clk_table.entries[i].phyclk_mhz)
+ bw_params->clk_table.entries[i].phyclk_mhz = def_max.phyclk_mhz;
+ if (!bw_params->clk_table.entries[i].phyclk_d18_mhz)
+ bw_params->clk_table.entries[i].phyclk_d18_mhz = def_max.phyclk_d18_mhz;
+ if (!bw_params->clk_table.entries[i].dtbclk_mhz)
+ bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz;
+ }
+ ASSERT(bw_params->clk_table.entries[i].dcfclk_mhz);
bw_params->vram_type = bios_info->memory_type;
bw_params->num_channels = bios_info->ma_channel_number;
+ if (!bw_params->num_channels)
+ bw_params->num_channels = 2;
for (i = 0; i < WM_SET_COUNT; i++) {
bw_params->wm_table.entries[i].wm_inst = i;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
index 880ffea2afc6..2600313fea57 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
@@ -80,8 +80,8 @@ static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D
#define VBIOSSMC_MSG_SetDppclkFreq 0x06 ///< Set DPP clock frequency in MHZ
#define VBIOSSMC_MSG_SetHardMinDcfclkByFreq 0x07 ///< Set DCF clock frequency hard min in MHZ
#define VBIOSSMC_MSG_SetMinDeepSleepDcfclk 0x08 ///< Set DCF clock minimum frequency in deep sleep in MHZ
-#define VBIOSSMC_MSG_SetPhyclkVoltageByFreq 0x09 ///< Set display phy clock frequency in MHZ in case VMIN does not support phy frequency
-#define VBIOSSMC_MSG_GetFclkFrequency 0x0A ///< Get FCLK frequency, return frequemcy in MHZ
+#define VBIOSSMC_MSG_GetDtbclkFreq 0x09 ///< Get display dtb clock frequency in MHZ in case VMIN does not support phy frequency
+#define VBIOSSMC_MSG_SetDtbClk 0x0A ///< Set dtb clock frequency, return frequemcy in MHZ
#define VBIOSSMC_MSG_SetDisplayCount 0x0B ///< Inform PMFW of number of display connected
#define VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown 0x0C ///< To ask PMFW turn off TMDP 48MHz refclk during display off to save power
#define VBIOSSMC_MSG_UpdatePmeRestore 0x0D ///< To ask PMFW to write into Azalia for PME wake up event
@@ -324,15 +324,26 @@ int dcn315_smu_get_dpref_clk(struct clk_mgr_internal *clk_mgr)
return (dprefclk_get_mhz * 1000);
}
-int dcn315_smu_get_smu_fclk(struct clk_mgr_internal *clk_mgr)
+int dcn315_smu_get_dtbclk(struct clk_mgr_internal *clk_mgr)
{
int fclk_get_mhz = -1;
if (clk_mgr->smu_present) {
fclk_get_mhz = dcn315_smu_send_msg_with_param(
clk_mgr,
- VBIOSSMC_MSG_GetFclkFrequency,
+ VBIOSSMC_MSG_GetDtbclkFreq,
0);
}
return (fclk_get_mhz * 1000);
}
+
+void dcn315_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable)
+{
+ if (!clk_mgr->smu_present)
+ return;
+
+ dcn315_smu_send_msg_with_param(
+ clk_mgr,
+ VBIOSSMC_MSG_SetDtbClk,
+ enable);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.h
index 66fa42f8dd18..5aa3275ac7d8 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.h
@@ -37,6 +37,7 @@
#define NUM_SOC_VOLTAGE_LEVELS 4
#define NUM_DF_PSTATE_LEVELS 4
+
typedef struct {
uint16_t MinClock; // This is either DCFCLK or SOCCLK (in MHz)
uint16_t MaxClock; // This is either DCFCLK or SOCCLK (in MHz)
@@ -124,5 +125,6 @@ void dcn315_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr);
void dcn315_smu_request_voltage_via_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phyclk_khz);
void dcn315_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr);
int dcn315_smu_get_dpref_clk(struct clk_mgr_internal *clk_mgr);
-int dcn315_smu_get_smu_fclk(struct clk_mgr_internal *clk_mgr);
+int dcn315_smu_get_dtbclk(struct clk_mgr_internal *clk_mgr);
+void dcn315_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable);
#endif /* DAL_DC_315_SMU_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index 702d00ce7da4..fc3af81ed6c6 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -122,7 +122,8 @@ static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
- if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
+ if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
+ dc_is_virtual_signal(pipe->stream->signal))) {
if (disable)
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
else
@@ -686,8 +687,8 @@ void dcn316_clk_mgr_construct(
clk_mgr->base.base.dprefclk_khz = dcn316_smu_get_dpref_clk(&clk_mgr->base);
clk_mgr->base.dccg->ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz;
dce_clock_read_ss_info(&clk_mgr->base);
- clk_mgr->base.dccg->ref_dtbclk_khz =
- dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz);
+ /*clk_mgr->base.dccg->ref_dtbclk_khz =
+ dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz);*/
clk_mgr->base.base.bw_params = &dcn316_bw_params;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index f6e19efea756..f14449401188 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -829,14 +829,12 @@ static void dc_destruct(struct dc *dc)
kfree(dc->bw_dceip);
dc->bw_dceip = NULL;
-#ifdef CONFIG_DRM_AMD_DC_DCN
kfree(dc->dcn_soc);
dc->dcn_soc = NULL;
kfree(dc->dcn_ip);
dc->dcn_ip = NULL;
-#endif
kfree(dc->vm_helper);
dc->vm_helper = NULL;
@@ -882,10 +880,8 @@ static bool dc_construct(struct dc *dc,
struct dc_context *dc_ctx;
struct bw_calcs_dceip *dc_dceip;
struct bw_calcs_vbios *dc_vbios;
-#ifdef CONFIG_DRM_AMD_DC_DCN
struct dcn_soc_bounding_box *dcn_soc;
struct dcn_ip_params *dcn_ip;
-#endif
dc->config = init_params->flags;
@@ -913,7 +909,6 @@ static bool dc_construct(struct dc *dc,
}
dc->bw_vbios = dc_vbios;
-#ifdef CONFIG_DRM_AMD_DC_DCN
dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
if (!dcn_soc) {
dm_error("%s: failed to create dcn_soc\n", __func__);
@@ -929,7 +924,6 @@ static bool dc_construct(struct dc *dc,
}
dc->dcn_ip = dcn_ip;
-#endif
if (!dc_construct_ctx(dc, init_params)) {
dm_error("%s: failed to create ctx\n", __func__);
@@ -1569,11 +1563,24 @@ bool dc_validate_boot_timing(const struct dc *dc,
if (dc_is_dp_signal(link->connector_signal)) {
unsigned int pix_clk_100hz;
+ uint32_t numOdmPipes = 1;
+ uint32_t id_src[4] = {0};
dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
dc->res_pool->dp_clock_source,
tg_inst, &pix_clk_100hz);
+ if (tg->funcs->get_optc_source)
+ tg->funcs->get_optc_source(tg,
+ &numOdmPipes, &id_src[0], &id_src[1]);
+
+ if (numOdmPipes == 2)
+ pix_clk_100hz *= 2;
+ if (numOdmPipes == 4)
+ pix_clk_100hz *= 4;
+
+ // Note: In rare cases, HW pixclk may differ from crtc's pixclk
+ // slightly due to rounding issues in 10 kHz units.
if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
return false;
@@ -1667,7 +1674,6 @@ static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
return stream_mask;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
void dc_z10_restore(const struct dc *dc)
{
if (dc->hwss.z10_restore)
@@ -1679,7 +1685,7 @@ void dc_z10_save_init(struct dc *dc)
if (dc->hwss.z10_save_init)
dc->hwss.z10_save_init(dc);
}
-#endif
+
/*
* Applies given context to HW and copy it into current context.
* It's up to the user to release the src context afterwards.
@@ -1693,10 +1699,8 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
struct dc_state *old_state;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
dc_z10_restore(dc);
dc_allow_idle_optimizations(dc, false);
-#endif
for (i = 0; i < context->stream_count; i++)
dc_streams[i] = context->streams[i];
@@ -1855,7 +1859,6 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
return (result == DC_OK);
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
bool dc_acquire_release_mpc_3dlut(
struct dc *dc, bool acquire,
struct dc_stream_state *stream,
@@ -1891,7 +1894,7 @@ bool dc_acquire_release_mpc_3dlut(
}
return ret;
}
-#endif
+
static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
{
int i;
@@ -1912,7 +1915,6 @@ static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
return false;
}
-#ifdef CONFIG_DRM_AMD_DC_DCN
/* Perform updates here which need to be deferred until next vupdate
*
* i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered
@@ -1931,7 +1933,6 @@ static void process_deferred_updates(struct dc *dc)
dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]);
}
}
-#endif /* CONFIG_DRM_AMD_DC_DCN */
void dc_post_update_surfaces_to_stream(struct dc *dc)
{
@@ -1958,9 +1959,7 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
}
-#ifdef CONFIG_DRM_AMD_DC_DCN
process_deferred_updates(dc);
-#endif
dc->hwss.optimize_bandwidth(dc, context);
@@ -1974,9 +1973,7 @@ static void init_state(struct dc *dc, struct dc_state *context)
* initialize and obtain IP and SOC the base DML instance from DC is
* initially copied into every context
*/
-#ifdef CONFIG_DRM_AMD_DC_DCN
memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
-#endif
}
struct dc_state *dc_create_state(struct dc *dc)
@@ -2348,11 +2345,9 @@ static enum surface_update_type check_update_surfaces_for_stream(
int i;
enum surface_update_type overall_type = UPDATE_TYPE_FAST;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dc->idle_optimizations_allowed)
overall_type = UPDATE_TYPE_FULL;
-#endif
if (stream_status == NULL || stream_status->plane_count != surface_count)
overall_type = UPDATE_TYPE_FULL;
@@ -2389,6 +2384,8 @@ static enum surface_update_type check_update_surfaces_for_stream(
if (stream_update->mst_bw_update)
su_flags->bits.mst_bw = 1;
+ if (stream_update->crtc_timing_adjust && dc_extended_blank_supported(dc))
+ su_flags->bits.crtc_timing_adjust = 1;
if (su_flags->raw != 0)
overall_type = UPDATE_TYPE_FULL;
@@ -2650,6 +2647,9 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->vrr_infopacket)
stream->vrr_infopacket = *update->vrr_infopacket;
+ if (update->crtc_timing_adjust)
+ stream->adjust = *update->crtc_timing_adjust;
+
if (update->dpms_off)
stream->dpms_off = *update->dpms_off;
@@ -2836,9 +2836,7 @@ static void commit_planes_for_stream(struct dc *dc,
struct pipe_ctx *top_pipe_to_program = NULL;
bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
dc_z10_restore(dc);
-#endif
if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
/* Optimize seamless boot flag keeps clocks and watermarks high until
@@ -2856,10 +2854,8 @@ static void commit_planes_for_stream(struct dc *dc,
}
if (update_type == UPDATE_TYPE_FULL) {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
dc_allow_idle_optimizations(dc, false);
-#endif
if (get_seamless_boot_stream_count(context) == 0)
dc->hwss.prepare_bandwidth(dc, context);
@@ -2877,7 +2873,6 @@ static void commit_planes_for_stream(struct dc *dc,
}
}
-#ifdef CONFIG_DRM_AMD_DC_DCN
if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
struct pipe_ctx *mpcc_pipe;
struct pipe_ctx *odm_pipe;
@@ -2886,7 +2881,6 @@ static void commit_planes_for_stream(struct dc *dc,
for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
}
-#endif
if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if (top_pipe_to_program &&
@@ -2907,14 +2901,15 @@ static void commit_planes_for_stream(struct dc *dc,
top_pipe_to_program->stream_res.tg);
}
- if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
+ if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
dc->hwss.interdependent_update_lock(dc, context, true);
- else
+ } else {
/* Lock the top pipe while updating plane addrs, since freesync requires
* plane addr update event triggers to be synchronized.
* top_pipe_to_program is expected to never be NULL
*/
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
+ }
// Stream updates
if (stream_update)
@@ -2930,10 +2925,11 @@ static void commit_planes_for_stream(struct dc *dc,
if (dc->hwss.program_front_end_for_ctx)
dc->hwss.program_front_end_for_ctx(dc, context);
- if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
+ if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
dc->hwss.interdependent_update_lock(dc, context, false);
- else
+ } else {
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
+ }
dc->hwss.post_unlock_program_front_end(dc, context);
return;
}
@@ -2996,7 +2992,6 @@ static void commit_planes_for_stream(struct dc *dc,
}
if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
dc->hwss.program_front_end_for_ctx(dc, context);
-#ifdef CONFIG_DRM_AMD_DC_DCN
if (dc->debug.validate_dml_output) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
@@ -3010,7 +3005,6 @@ static void commit_planes_for_stream(struct dc *dc,
&context->res_ctx.pipe_ctx[i].ttu_regs);
}
}
-#endif
}
// Update Type FAST, Surface updates
@@ -3060,10 +3054,11 @@ static void commit_planes_for_stream(struct dc *dc,
}
- if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
+ if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
dc->hwss.interdependent_update_lock(dc, context, false);
- else
+ } else {
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
+ }
if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
@@ -3301,9 +3296,8 @@ void dc_set_power_state(
case DC_ACPI_CM_POWER_STATE_D0:
dc_resource_state_construct(dc, dc->current_state);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
dc_z10_restore(dc);
-#endif
+
if (dc->ctx->dmub_srv)
dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv);
@@ -3565,8 +3559,6 @@ bool dc_set_psr_allow_active(struct dc *dc, bool enable)
return true;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-
void dc_allow_idle_optimizations(struct dc *dc, bool allow)
{
if (dc->debug.disable_idle_power_optimizations)
@@ -3722,7 +3714,6 @@ void dc_hardware_release(struct dc *dc)
if (dc->hwss.hardware_release)
dc->hwss.hardware_release(dc);
}
-#endif
/*
*****************************************************************************
@@ -3742,13 +3733,12 @@ void dc_hardware_release(struct dc *dc)
*/
bool dc_is_dmub_outbox_supported(struct dc *dc)
{
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- /* YELLOW_CARP B0 USB4 DPIA needs dmub notifications for interrupts */
+ /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */
if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
!dc->debug.dpia_debug.bits.disable_dpia)
return true;
-#endif
+
/* dmub aux needs dmub notifications to be enabled */
return dc->debug.enable_dmub_aux_for_legacy_ddc;
}
@@ -4051,3 +4041,17 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo
if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
}
+/*
+ * dc_extended_blank_supported: Decide whether extended blank is supported
+ *
+ * Extended blank is a freesync optimization feature to be enabled in the future.
+ * During the extra vblank period gained from freesync, we have the ability to enter z9/z10.
+ *
+ * @param [in] dc: Current DC state
+ * @return: Indicate whether extended blank is supported (true or false)
+ */
+bool dc_extended_blank_supported(struct dc *dc)
+{
+ return dc->debug.extended_blank_optimization && !dc->debug.disable_z10
+ && dc->caps.zstate_support && dc->caps.is_apu;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index 643762542e4d..72376075db0c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -345,7 +345,6 @@ void context_clock_trace(
struct dc *dc,
struct dc_state *context)
{
-#if defined(CONFIG_DRM_AMD_DC_DCN)
DC_LOGGER_INIT(dc->ctx->logger);
CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
"dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
@@ -363,7 +362,6 @@ void context_clock_trace(
context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
context->bw_ctx.bw.dcn.clk.fclk_khz,
context->bw_ctx.bw.dcn.clk.socclk_khz);
-#endif
}
/**
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index cb87dd643180..a789ea8af27f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -33,6 +33,7 @@
#include "gpio_service_interface.h"
#include "core_status.h"
#include "dc_link_dp.h"
+#include "dc_link_dpia.h"
#include "dc_link_ddc.h"
#include "link_hwss.h"
#include "opp.h"
@@ -240,7 +241,7 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
/* Link may not have physical HPD pin. */
if (link->ep_type != DISPLAY_ENDPOINT_PHY) {
- if (link->is_hpd_pending || !link->hpd_status)
+ if (link->is_hpd_pending || !dc_link_dpia_query_hpd_status(link))
*type = dc_connection_none;
else
*type = dc_connection_single;
@@ -345,6 +346,7 @@ static enum signal_type get_basic_signal_type(struct graphics_object_id encoder,
case CONNECTOR_ID_LVDS:
return SIGNAL_TYPE_LVDS;
case CONNECTOR_ID_DISPLAY_PORT:
+ case CONNECTOR_ID_USBC:
return SIGNAL_TYPE_DISPLAY_PORT;
case CONNECTOR_ID_EDP:
return SIGNAL_TYPE_EDP;
@@ -380,7 +382,8 @@ bool dc_link_is_dp_sink_present(struct dc_link *link)
bool present =
((connector_id == CONNECTOR_ID_DISPLAY_PORT) ||
- (connector_id == CONNECTOR_ID_EDP));
+ (connector_id == CONNECTOR_ID_EDP) ||
+ (connector_id == CONNECTOR_ID_USBC));
ddc = dal_ddc_service_get_ddc_pin(link->ddc);
@@ -476,7 +479,8 @@ static enum signal_type link_detect_sink(struct dc_link *link,
result = SIGNAL_TYPE_DVI_SINGLE_LINK;
}
break;
- case CONNECTOR_ID_DISPLAY_PORT: {
+ case CONNECTOR_ID_DISPLAY_PORT:
+ case CONNECTOR_ID_USBC: {
/* DP HPD short pulse. Passive DP dongle will not
* have short pulse
*/
@@ -801,7 +805,6 @@ static bool wait_for_entering_dp_alt_mode(struct dc_link *link)
static void apply_dpia_mst_dsc_always_on_wa(struct dc_link *link)
{
-#if defined(CONFIG_DRM_AMD_DC_DCN)
/* Apply work around for tunneled MST on certain USB4 docks. Always use DSC if dock
* reports DSC support.
*/
@@ -812,7 +815,6 @@ static void apply_dpia_mst_dsc_always_on_wa(struct dc_link *link)
link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT &&
!link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around)
link->wa_flags.dpia_mst_dsc_always_on = true;
-#endif
}
static void revert_dpia_mst_dsc_always_on_wa(struct dc_link *link)
@@ -878,9 +880,7 @@ static bool should_prepare_phy_clocks_for_link_verification(const struct dc *dc,
static void prepare_phy_clocks_for_destructive_link_verification(const struct dc *dc)
{
-#if defined(CONFIG_DRM_AMD_DC_DCN)
dc_z10_restore(dc);
-#endif
clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr);
}
@@ -983,8 +983,7 @@ static bool should_verify_link_capability_destructively(struct dc_link *link,
destrictive = false;
}
}
- } else if (dc_is_hdmi_signal(link->local_sink->sink_signal))
- destrictive = true;
+ }
return destrictive;
}
@@ -1592,6 +1591,7 @@ static bool dc_link_construct_legacy(struct dc_link *link,
link->connector_signal = SIGNAL_TYPE_DVI_DUAL_LINK;
break;
case CONNECTOR_ID_DISPLAY_PORT:
+ case CONNECTOR_ID_USBC:
link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT;
if (link->hpd_gpio)
@@ -1605,8 +1605,25 @@ static bool dc_link_construct_legacy(struct dc_link *link,
if (link->hpd_gpio) {
if (!link->dc->config.allow_edp_hotplug_detection)
link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
- link->irq_source_hpd_rx =
- dal_irq_get_rx_source(link->hpd_gpio);
+
+ switch (link->dc->config.allow_edp_hotplug_detection) {
+ case 1: // only the 1st eDP handles hotplug
+ if (link->link_index == 0)
+ link->irq_source_hpd_rx =
+ dal_irq_get_rx_source(link->hpd_gpio);
+ else
+ link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+ break;
+ case 2: // only the 2nd eDP handles hotplug
+ if (link->link_index == 1)
+ link->irq_source_hpd_rx =
+ dal_irq_get_rx_source(link->hpd_gpio);
+ else
+ link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+ break;
+ default:
+ break;
+ }
}
break;
@@ -3076,6 +3093,11 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active
if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
return false;
+ if (allow_active && link->type == dc_connection_none) {
+ // Don't enter PSR if panel is not connected
+ return false;
+ }
+
/* Set power optimization flag */
if (power_opts && link->psr_settings.psr_power_opt != *power_opts) {
link->psr_settings.psr_power_opt = *power_opts;
@@ -3084,18 +3106,18 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active
psr->funcs->psr_set_power_opt(psr, link->psr_settings.psr_power_opt, panel_inst);
}
+ if (psr != NULL && link->psr_settings.psr_feature_enabled &&
+ force_static && psr->funcs->psr_force_static)
+ psr->funcs->psr_force_static(psr, panel_inst);
+
/* Enable or Disable PSR */
if (allow_active && link->psr_settings.psr_allow_active != *allow_active) {
link->psr_settings.psr_allow_active = *allow_active;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (!link->psr_settings.psr_allow_active)
dc_z10_restore(dc);
-#endif
if (psr != NULL && link->psr_settings.psr_feature_enabled) {
- if (force_static && psr->funcs->psr_force_static)
- psr->funcs->psr_force_static(psr, panel_inst);
psr->funcs->psr_enable(psr, link->psr_settings.psr_allow_active, wait, panel_inst);
} else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) &&
link->psr_settings.psr_feature_enabled)
@@ -3307,9 +3329,12 @@ bool dc_link_setup_psr(struct dc_link *link,
*/
psr_context->frame_delay = 0;
- if (psr)
+ if (psr) {
link->psr_settings.psr_feature_enabled = psr->funcs->psr_copy_settings(psr,
link, psr_context, panel_inst);
+ link->psr_settings.psr_power_opt = 0;
+ link->psr_settings.psr_allow_active = 0;
+ }
else
link->psr_settings.psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context);
@@ -3477,8 +3502,6 @@ static enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx,
{
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
- struct hpo_dp_link_encoder *hpo_dp_link_encoder = pipe_ctx->link_res.hpo_dp_link_enc;
- struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc;
struct link_mst_stream_allocation_table proposed_table = {0};
struct fixed31_32 avg_time_slots_per_mtp;
const struct dc_link_settings empty_link_settings = {0};
@@ -3512,7 +3535,7 @@ static enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx,
pipe_ctx->pipe_idx);
}
- proposed_table.stream_allocations[0].hpo_dp_stream_enc = hpo_dp_stream_encoder;
+ proposed_table.stream_allocations[0].hpo_dp_stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
ASSERT(proposed_table.stream_count == 1);
@@ -3525,8 +3548,7 @@ static enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx,
proposed_table.stream_allocations[0].slot_count);
/* program DP source TX for payload */
- hpo_dp_link_encoder->funcs->update_stream_allocation_table(
- hpo_dp_link_encoder,
+ link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res,
&proposed_table);
/* poll for ACT handled */
@@ -3536,7 +3558,8 @@ static enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx,
}
/* slot X.Y for SST payload allocate */
- if (allocate) {
+ if (allocate && dp_get_link_encoding_format(&link->cur_link_settings) ==
+ DP_128b_132b_ENCODING) {
avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(stream, link);
dc_log_vcp_x_y(link, avg_time_slots_per_mtp);
@@ -3563,8 +3586,6 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
- struct link_encoder *link_encoder = NULL;
- struct hpo_dp_link_encoder *hpo_dp_link_encoder = pipe_ctx->link_res.hpo_dp_link_enc;
struct dp_mst_stream_allocation_table proposed_table = {0};
struct fixed31_32 avg_time_slots_per_mtp;
struct fixed31_32 pbn;
@@ -3574,9 +3595,6 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
DC_LOGGER_INIT(link->ctx->logger);
- link_encoder = link_enc_cfg_get_link_enc(link);
- ASSERT(link_encoder);
-
/* enable_link_dp_mst already check link->enabled_stream_count
* and stream is in link->stream[]. This is called during set mode,
* stream_enc is available.
@@ -3621,37 +3639,17 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
ASSERT(proposed_table.stream_count > 0);
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
- static enum dc_status status;
- uint8_t mst_alloc_slots = 0, prev_mst_slots_in_use = 0xFF;
-
- for (i = 0; i < link->mst_stream_alloc_table.stream_count; i++)
- mst_alloc_slots += link->mst_stream_alloc_table.stream_allocations[i].slot_count;
-
- status = dc_process_dmub_set_mst_slots(link->dc, link->link_index,
- mst_alloc_slots, &prev_mst_slots_in_use);
- ASSERT(status == DC_OK);
- DC_LOG_MST("dpia : status[%d]: alloc_slots[%d]: used_slots[%d]\n",
- status, mst_alloc_slots, prev_mst_slots_in_use);
- }
-
/* program DP source TX for payload */
- switch (dp_get_link_encoding_format(&link->cur_link_settings)) {
- case DP_8b_10b_ENCODING:
- link_encoder->funcs->update_mst_stream_allocation_table(
- link_encoder,
- &link->mst_stream_alloc_table);
- break;
- case DP_128b_132b_ENCODING:
- hpo_dp_link_encoder->funcs->update_stream_allocation_table(
- hpo_dp_link_encoder,
- &link->mst_stream_alloc_table);
- break;
- case DP_UNKNOWN_ENCODING:
+ if (link_hwss->ext.update_stream_allocation_table == NULL ||
+ dp_get_link_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) {
DC_LOG_ERROR("Failure: unknown encoding format\n");
return DC_ERROR_UNEXPECTED;
}
+ link_hwss->ext.update_stream_allocation_table(link,
+ &pipe_ctx->link_res,
+ &link->mst_stream_alloc_table);
+
/* send down message */
ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger(
stream->ctx,
@@ -3693,7 +3691,6 @@ enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw
struct fixed31_32 avg_time_slots_per_mtp;
struct fixed31_32 pbn;
struct fixed31_32 pbn_per_slot;
- struct link_encoder *link_encoder = link->link_enc;
struct dp_mst_stream_allocation_table proposed_table = {0};
uint8_t i;
enum act_return_status ret;
@@ -3757,8 +3754,13 @@ enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw
ASSERT(proposed_table.stream_count > 0);
/* update mst stream allocation table hardware state */
- link_encoder->funcs->update_mst_stream_allocation_table(
- link_encoder,
+ if (link_hwss->ext.update_stream_allocation_table == NULL ||
+ dp_get_link_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) {
+ DC_LOG_ERROR("Failure: unknown encoding format\n");
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res,
&link->mst_stream_alloc_table);
/* poll for immediate branch device ACT handled */
@@ -3853,8 +3855,6 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
- struct link_encoder *link_encoder = NULL;
- struct hpo_dp_link_encoder *hpo_dp_link_encoder = pipe_ctx->link_res.hpo_dp_link_enc;
struct dp_mst_stream_allocation_table proposed_table = {0};
struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0);
int i;
@@ -3863,9 +3863,6 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
const struct dc_link_settings empty_link_settings = {0};
DC_LOGGER_INIT(link->ctx->logger);
- link_encoder = link_enc_cfg_get_link_enc(link);
- ASSERT(link_encoder);
-
/* deallocate_mst_payload is called before disable link. When mode or
* disable/enable monitor, new stream is created which is not in link
* stream[] yet. For this, payload is not allocated yet, so de-alloc
@@ -3923,36 +3920,16 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
link->mst_stream_alloc_table.stream_allocations[i].slot_count);
}
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
- enum dc_status status;
- uint8_t mst_alloc_slots = 0, prev_mst_slots_in_use = 0xFF;
-
- for (i = 0; i < link->mst_stream_alloc_table.stream_count; i++)
- mst_alloc_slots += link->mst_stream_alloc_table.stream_allocations[i].slot_count;
-
- status = dc_process_dmub_set_mst_slots(link->dc, link->link_index,
- mst_alloc_slots, &prev_mst_slots_in_use);
- ASSERT(status != DC_NOT_SUPPORTED);
- DC_LOG_MST("dpia : status[%d]: alloc_slots[%d]: used_slots[%d]\n",
- status, mst_alloc_slots, prev_mst_slots_in_use);
- }
-
- switch (dp_get_link_encoding_format(&link->cur_link_settings)) {
- case DP_8b_10b_ENCODING:
- link_encoder->funcs->update_mst_stream_allocation_table(
- link_encoder,
- &link->mst_stream_alloc_table);
- break;
- case DP_128b_132b_ENCODING:
- hpo_dp_link_encoder->funcs->update_stream_allocation_table(
- hpo_dp_link_encoder,
- &link->mst_stream_alloc_table);
- break;
- case DP_UNKNOWN_ENCODING:
+ /* update mst stream allocation table hardware state */
+ if (link_hwss->ext.update_stream_allocation_table == NULL ||
+ dp_get_link_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) {
DC_LOG_DEBUG("Unknown encoding format\n");
return DC_ERROR_UNEXPECTED;
}
+ link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res,
+ &link->mst_stream_alloc_table);
+
if (mst_mode) {
dm_helpers_dp_mst_poll_for_allocation_change_trigger(
stream->ctx,
@@ -4005,8 +3982,12 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
if (is_dp_128b_132b_signal(pipe_ctx))
config.link_enc_idx = pipe_ctx->link_res.hpo_dp_link_enc->inst;
- /* dio output index */
- config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
+ /* dio output index is dpia index for DPIA endpoint & dcio index by default */
+ if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ config.dio_output_idx = pipe_ctx->stream->link->link_id.enum_id - ENUM_ID_1;
+ else
+ config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
+
/* phy index */
config.phy_idx = resource_transmitter_to_phy_idx(
@@ -4099,8 +4080,8 @@ static void fpga_dp_hpo_enable_link_and_stream(struct dc_state *state, struct pi
proposed_table.stream_allocations[0].hpo_dp_stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
}
- pipe_ctx->link_res.hpo_dp_link_enc->funcs->update_stream_allocation_table(
- pipe_ctx->link_res.hpo_dp_link_enc,
+ link_hwss->ext.update_stream_allocation_table(stream->link,
+ &pipe_ctx->link_res,
&proposed_table);
if (link_hwss->ext.set_throttled_vcp_size)
@@ -4120,6 +4101,7 @@ void core_link_enable_stream(
struct link_encoder *link_enc;
enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO;
struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg;
+ const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
if (is_dp_128b_132b_signal(pipe_ctx))
vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg;
@@ -4148,56 +4130,19 @@ void core_link_enable_stream(
link_enc->funcs->setup(
link_enc,
pipe_ctx->stream->signal);
- pipe_ctx->stream_res.stream_enc->funcs->setup_stereo_sync(
- pipe_ctx->stream_res.stream_enc,
- pipe_ctx->stream_res.tg->inst,
- stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE);
- }
-
- if (is_dp_128b_132b_signal(pipe_ctx)) {
- pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->set_stream_attribute(
- pipe_ctx->stream_res.hpo_dp_stream_enc,
- &stream->timing,
- stream->output_color_space,
- stream->use_vsc_sdp_for_colorimetry,
- stream->timing.flags.DSC,
- false);
- otg_out_dest = OUT_MUX_HPO_DP;
- } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
- pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(
- pipe_ctx->stream_res.stream_enc,
- &stream->timing,
- stream->output_color_space,
- stream->use_vsc_sdp_for_colorimetry,
- stream->link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
}
- if (dc_is_dp_signal(pipe_ctx->stream->signal))
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR);
-
- if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
- pipe_ctx->stream_res.stream_enc->funcs->hdmi_set_stream_attribute(
- pipe_ctx->stream_res.stream_enc,
- &stream->timing,
- stream->phy_pix_clk,
- pipe_ctx->stream_res.audio != NULL);
-
pipe_ctx->stream->link->link_state_valid = true;
- if (pipe_ctx->stream_res.tg->funcs->set_out_mux)
+ if (pipe_ctx->stream_res.tg->funcs->set_out_mux) {
+ if (is_dp_128b_132b_signal(pipe_ctx))
+ otg_out_dest = OUT_MUX_HPO_DP;
+ else
+ otg_out_dest = OUT_MUX_DIO;
pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, otg_out_dest);
+ }
- if (dc_is_dvi_signal(pipe_ctx->stream->signal))
- pipe_ctx->stream_res.stream_enc->funcs->dvi_set_stream_attribute(
- pipe_ctx->stream_res.stream_enc,
- &stream->timing,
- (pipe_ctx->stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) ?
- true : false);
-
- if (dc_is_lvds_signal(pipe_ctx->stream->signal))
- pipe_ctx->stream_res.stream_enc->funcs->lvds_set_stream_attribute(
- pipe_ctx->stream_res.stream_enc,
- &stream->timing);
+ link_hwss->setup_stream_attribute(pipe_ctx);
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
bool apply_edp_fast_boot_optimization =
@@ -4332,13 +4277,11 @@ void core_link_enable_stream(
dc->hwss.enable_audio_stream(pipe_ctx);
} else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- if (is_dp_128b_132b_signal(pipe_ctx)) {
+ if (is_dp_128b_132b_signal(pipe_ctx))
fpga_dp_hpo_enable_link_and_stream(state, pipe_ctx);
- }
if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
dc_is_virtual_signal(pipe_ctx->stream->signal))
dp_set_dsc_enable(pipe_ctx, true);
-
}
if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
@@ -4684,22 +4627,22 @@ bool dc_link_is_fec_supported(const struct dc_link *link)
bool dc_link_should_enable_fec(const struct dc_link *link)
{
- bool is_fec_disable = false;
- bool ret = false;
+ bool force_disable = false;
- if ((link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST &&
+ if (link->fec_state == dc_link_fec_enabled)
+ force_disable = false;
+ else if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST &&
link->local_sink &&
- link->local_sink->edid_caps.panel_patch.disable_fec) ||
- (link->connector_signal == SIGNAL_TYPE_EDP
- // enable FEC for EDP if DSC is supported
- && link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT == false
- ))
- is_fec_disable = true;
-
- if (dc_link_is_fec_supported(link) && !link->dc->debug.disable_fec && !is_fec_disable)
- ret = true;
-
- return ret;
+ link->local_sink->edid_caps.panel_patch.disable_fec)
+ force_disable = true;
+ else if (link->connector_signal == SIGNAL_TYPE_EDP
+ && (link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.
+ dsc_support.DSC_SUPPORT == false
+ || link->dc->debug.disable_dsc_edp
+ || !link->dc->caps.edp_dsc_support))
+ force_disable = true;
+
+ return !force_disable && dc_link_is_fec_supported(link);
}
uint32_t dc_bandwidth_in_kbps_from_timing(
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index 1d4863763df9..2b09310965bc 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -543,15 +543,9 @@ bool dal_ddc_service_query_ddc_data(
uint32_t payloads_num = write_payloads + read_payloads;
-
- if (write_size > EDID_SEGMENT_SIZE || read_size > EDID_SEGMENT_SIZE)
- return false;
-
if (!payloads_num)
return false;
- /*TODO: len of payload data for i2c and aux is uint8!!!!,
- * but we want to read 256 over i2c!!!!*/
if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) {
struct aux_payload payload;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 351081f574cb..dc30ac366a50 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -793,7 +793,7 @@ bool dp_is_interlane_aligned(union lane_align_status_updated align_status)
void dp_hw_to_dpcd_lane_settings(
const struct link_training_settings *lt_settings,
const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
- union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX])
+ union dpcd_training_lane dpcd_lane_settings[])
{
uint8_t lane = 0;
@@ -823,7 +823,7 @@ void dp_decide_lane_settings(
const struct link_training_settings *lt_settings,
const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
- union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX])
+ union dpcd_training_lane dpcd_lane_settings[])
{
uint32_t lane;
@@ -2783,31 +2783,37 @@ bool perform_link_training_with_retries(
struct dc_link *link = stream->link;
enum dp_panel_mode panel_mode = dp_get_panel_mode(link);
enum link_training_result status = LINK_TRAINING_CR_FAIL_LANE0;
- struct dc_link_settings current_setting = *link_setting;
+ struct dc_link_settings cur_link_settings = *link_setting;
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
int fail_count = 0;
+ bool is_link_bw_low = false; /* link bandwidth < stream bandwidth */
+ bool is_link_bw_min = /* RBR x 1 */
+ (cur_link_settings.link_rate <= LINK_RATE_LOW) &&
+ (cur_link_settings.lane_count <= LANE_COUNT_ONE);
dp_trace_commit_lt_init(link);
- if (dp_get_link_encoding_format(&current_setting) == DP_8b_10b_ENCODING)
+ if (dp_get_link_encoding_format(&cur_link_settings) == DP_8b_10b_ENCODING)
/* We need to do this before the link training to ensure the idle
* pattern in SST mode will be sent right after the link training
*/
link_hwss->setup_stream_encoder(pipe_ctx);
dp_trace_set_lt_start_timestamp(link, false);
- for (j = 0; j < attempts; ++j) {
+ j = 0;
+ while (j < attempts && fail_count < (attempts * 10)) {
- DC_LOG_HW_LINK_TRAINING("%s: Beginning link training attempt %u of %d\n",
- __func__, (unsigned int)j + 1, attempts);
+ DC_LOG_HW_LINK_TRAINING("%s: Beginning link training attempt %u of %d @ rate(%d) x lane(%d)\n",
+ __func__, (unsigned int)j + 1, attempts, cur_link_settings.link_rate,
+ cur_link_settings.lane_count);
dp_enable_link_phy(
link,
&pipe_ctx->link_res,
signal,
pipe_ctx->clock_source->id,
- &current_setting);
+ &cur_link_settings);
if (stream->sink_patches.dppowerup_delay > 0) {
int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay;
@@ -2832,30 +2838,30 @@ bool perform_link_training_with_retries(
dp_set_panel_mode(link, panel_mode);
if (link->aux_access_disabled) {
- dc_link_dp_perform_link_training_skip_aux(link, &pipe_ctx->link_res, &current_setting);
+ dc_link_dp_perform_link_training_skip_aux(link, &pipe_ctx->link_res, &cur_link_settings);
return true;
} else {
/** @todo Consolidate USB4 DP and DPx.x training. */
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
status = dc_link_dpia_perform_link_training(link,
&pipe_ctx->link_res,
- &current_setting,
+ &cur_link_settings,
skip_video_pattern);
/* Transmit idle pattern once training successful. */
- if (status == LINK_TRAINING_SUCCESS)
+ if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low)
dp_set_hw_test_pattern(link, &pipe_ctx->link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
} else {
status = dc_link_dp_perform_link_training(link,
&pipe_ctx->link_res,
- &current_setting,
+ &cur_link_settings,
skip_video_pattern);
}
dp_trace_lt_total_count_increment(link, false);
dp_trace_lt_result_update(link, status, false);
dp_trace_set_lt_end_timestamp(link, false);
- if (status == LINK_TRAINING_SUCCESS)
+ if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low)
return true;
}
@@ -2866,8 +2872,9 @@ bool perform_link_training_with_retries(
if (j == (attempts - 1) && link->ep_type == DISPLAY_ENDPOINT_PHY)
break;
- DC_LOG_WARNING("%s: Link training attempt %u of %d failed\n",
- __func__, (unsigned int)j + 1, attempts);
+ DC_LOG_WARNING("%s: Link training attempt %u of %d failed @ rate(%d) x lane(%d)\n",
+ __func__, (unsigned int)j + 1, attempts, cur_link_settings.link_rate,
+ cur_link_settings.lane_count);
dp_disable_link_phy(link, &pipe_ctx->link_res, signal);
@@ -2876,27 +2883,49 @@ bool perform_link_training_with_retries(
enum dc_connection_type type = dc_connection_none;
dc_link_detect_sink(link, &type);
- if (type == dc_connection_none)
+ if (type == dc_connection_none) {
+ DC_LOG_HW_LINK_TRAINING("%s: Aborting training because sink unplugged\n", __func__);
break;
- } else if (do_fallback) {
+ }
+ }
+
+ /* Try to train again at original settings if:
+ * - not falling back between training attempts;
+ * - aborted previous attempt due to reasons other than sink unplug;
+ * - successfully trained but at a link rate lower than that required by stream;
+ * - reached minimum link bandwidth.
+ */
+ if (!do_fallback || (status == LINK_TRAINING_ABORT) ||
+ (status == LINK_TRAINING_SUCCESS && is_link_bw_low) ||
+ is_link_bw_min) {
+ j++;
+ cur_link_settings = *link_setting;
+ delay_between_attempts += LINK_TRAINING_RETRY_DELAY;
+ is_link_bw_low = false;
+ is_link_bw_min = (cur_link_settings.link_rate <= LINK_RATE_LOW) &&
+ (cur_link_settings.lane_count <= LANE_COUNT_ONE);
+
+ } else if (do_fallback) { /* Try training at lower link bandwidth if doing fallback. */
uint32_t req_bw;
uint32_t link_bw;
- decide_fallback_link_setting(link, *link_setting, &current_setting, status);
- /* Fail link training if reduced link bandwidth no longer meets
- * stream requirements.
+ decide_fallback_link_setting(link, *link_setting, &cur_link_settings, status);
+ /* Flag if reduced link bandwidth no longer meets stream requirements or fallen back to
+ * minimum link bandwidth.
*/
req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
- link_bw = dc_link_bandwidth_kbps(link, &current_setting);
- if (req_bw > link_bw)
- break;
+ link_bw = dc_link_bandwidth_kbps(link, &cur_link_settings);
+ is_link_bw_low = (req_bw > link_bw);
+ is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) &&
+ (cur_link_settings.lane_count <= LANE_COUNT_ONE));
+
+ if (is_link_bw_low)
+ DC_LOG_WARNING("%s: Link bandwidth too low after fallback req_bw(%d) > link_bw(%d)\n",
+ __func__, req_bw, link_bw);
}
msleep(delay_between_attempts);
-
- delay_between_attempts += LINK_TRAINING_RETRY_DELAY;
}
-
return false;
}
@@ -4085,9 +4114,32 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link)
return false;
}
+static enum dc_link_rate get_link_rate_from_test_link_rate(uint8_t test_rate)
+{
+ switch (test_rate) {
+ case DP_TEST_LINK_RATE_RBR:
+ return LINK_RATE_LOW;
+ case DP_TEST_LINK_RATE_HBR:
+ return LINK_RATE_HIGH;
+ case DP_TEST_LINK_RATE_HBR2:
+ return LINK_RATE_HIGH2;
+ case DP_TEST_LINK_RATE_HBR3:
+ return LINK_RATE_HIGH3;
+ case DP_TEST_LINK_RATE_UHBR10:
+ return LINK_RATE_UHBR10;
+ case DP_TEST_LINK_RATE_UHBR20:
+ return LINK_RATE_UHBR20;
+ case DP_TEST_LINK_RATE_UHBR13_5:
+ return LINK_RATE_UHBR13_5;
+ default:
+ return LINK_RATE_UNKNOWN;
+ }
+}
+
static void dp_test_send_link_training(struct dc_link *link)
{
struct dc_link_settings link_settings = {0};
+ uint8_t test_rate = 0;
core_link_read_dpcd(
link,
@@ -4097,8 +4149,9 @@ static void dp_test_send_link_training(struct dc_link *link)
core_link_read_dpcd(
link,
DP_TEST_LINK_RATE,
- (unsigned char *)(&link_settings.link_rate),
+ &test_rate,
1);
+ link_settings.link_rate = get_link_rate_from_test_link_rate(test_rate);
/* Set preferred link settings */
link->verified_link_cap.lane_count = link_settings.lane_count;
@@ -4440,7 +4493,7 @@ static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video
&dpcd_pattern_type.value,
sizeof(dpcd_pattern_type));
- channel_count = dpcd_test_mode.bits.channel_count + 1;
+ channel_count = min(dpcd_test_mode.bits.channel_count + 1, AUDIO_CHANNELS_COUNT);
// read pattern periods for requested channels when sawTooth pattern is requested
if (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH ||
@@ -4552,6 +4605,7 @@ void dc_link_dp_handle_link_loss(struct dc_link *link)
{
int i;
struct pipe_ctx *pipe_ctx;
+ struct dc_link_settings prev_link_settings = link->preferred_link_setting;
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
@@ -4562,6 +4616,10 @@ void dc_link_dp_handle_link_loss(struct dc_link *link)
if (pipe_ctx == NULL || pipe_ctx->stream == NULL)
return;
+ /* toggle stream state with the preference for current link settings */
+ dc_link_set_preferred_training_settings((struct dc *)link->dc,
+ &link->cur_link_settings, NULL, link, true);
+
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
@@ -4577,6 +4635,10 @@ void dc_link_dp_handle_link_loss(struct dc_link *link)
core_link_enable_stream(link->dc->current_state, pipe_ctx);
}
}
+
+ /* restore previous link settings preference */
+ dc_link_set_preferred_training_settings((struct dc *)link->dc,
+ &prev_link_settings, NULL, link, true);
}
bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss,
@@ -5100,6 +5162,7 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link)
else
link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
}
+
#if defined(CONFIG_DRM_AMD_DC_DCN)
/* Check DP tunnel LTTPR mode debug option. */
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
@@ -5116,11 +5179,6 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link)
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
lttpr_dpcd_data,
sizeof(lttpr_dpcd_data));
- if (status != DC_OK) {
- DC_LOG_DP2("%s: Read LTTPR caps data failed.\n", __func__);
- link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
- return false;
- }
link->dpcd_caps.lttpr_caps.revision.raw =
lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV -
@@ -5145,18 +5203,16 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link)
link->dpcd_caps.lttpr_caps.max_ext_timeout =
lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT -
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw =
lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER -
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw =
- lttpr_dpcd_data[DP_PHY_REPEATER_128b_132b_RATES -
+ lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES -
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
/* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
is_lttpr_present = (link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
- link->dpcd_caps.lttpr_caps.phy_repeater_cnt < 0xff &&
link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
link->dpcd_caps.lttpr_caps.revision.raw >= 0x14);
if (is_lttpr_present) {
@@ -5216,6 +5272,62 @@ static void retrieve_cable_id(struct dc_link *link)
&link->dpcd_caps.cable_id, &usbc_cable_id);
}
+/* DPRX may take some time to respond to AUX messages after HPD asserted.
+ * If AUX read unsuccessful, try to wake unresponsive DPRX by toggling DPCD SET_POWER (0x600).
+ */
+static enum dc_status wa_try_to_wake_dprx(struct dc_link *link, uint64_t timeout_ms)
+{
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+ uint8_t dpcd_data = 0;
+ uint64_t start_ts = 0;
+ uint64_t current_ts = 0;
+ uint64_t time_taken_ms = 0;
+ enum dc_connection_type type = dc_connection_none;
+
+ status = core_link_read_dpcd(
+ link,
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
+ &dpcd_data,
+ sizeof(dpcd_data));
+
+ if (status != DC_OK) {
+ DC_LOG_WARNING("%s: Read DPCD LTTPR_CAP failed - try to toggle DPCD SET_POWER for %lld ms.",
+ __func__,
+ timeout_ms);
+ start_ts = dm_get_timestamp(link->ctx);
+
+ do {
+ if (!dc_link_detect_sink(link, &type) || type == dc_connection_none)
+ break;
+
+ dpcd_data = DP_SET_POWER_D3;
+ status = core_link_write_dpcd(
+ link,
+ DP_SET_POWER,
+ &dpcd_data,
+ sizeof(dpcd_data));
+
+ dpcd_data = DP_SET_POWER_D0;
+ status = core_link_write_dpcd(
+ link,
+ DP_SET_POWER,
+ &dpcd_data,
+ sizeof(dpcd_data));
+
+ current_ts = dm_get_timestamp(link->ctx);
+ time_taken_ms = div_u64(dm_get_elapse_time_in_ns(link->ctx, current_ts, start_ts), 1000000);
+ } while (status != DC_OK && time_taken_ms < timeout_ms);
+
+ DC_LOG_WARNING("%s: DPCD SET_POWER %s after %lld ms%s",
+ __func__,
+ (status == DC_OK) ? "succeeded" : "failed",
+ time_taken_ms,
+ (type == dc_connection_none) ? ". Unplugged." : ".");
+ }
+
+ return status;
+}
+
static bool retrieve_link_cap(struct dc_link *link)
{
/* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16,
@@ -5251,6 +5363,15 @@ static bool retrieve_link_cap(struct dc_link *link)
dc_link_aux_try_to_configure_timeout(link->ddc,
LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
+ /* Try to ensure AUX channel active before proceeding. */
+ if (link->dc->debug.aux_wake_wa.bits.enable_wa) {
+ uint64_t timeout_ms = link->dc->debug.aux_wake_wa.bits.timeout_ms;
+
+ if (link->dc->debug.aux_wake_wa.bits.use_default_timeout)
+ timeout_ms = LINK_AUX_WAKE_TIMEOUT_MS;
+ status = wa_try_to_wake_dprx(link, timeout_ms);
+ }
+
is_lttpr_present = dp_retrieve_lttpr_cap(link);
/* Read DP tunneling information. */
status = dpcd_get_tunneling_device_data(link);
@@ -5497,9 +5618,7 @@ static bool retrieve_link_cap(struct dc_link *link)
* only if required.
*/
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
-#if defined(CONFIG_DRM_AMD_DC_DCN)
!link->dc->debug.dpia_debug.bits.disable_force_tbt3_work_around &&
-#endif
link->dpcd_caps.is_branch_dev &&
link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
link->dpcd_caps.branch_hw_revision == DP_BRANCH_HW_REV_10 &&
@@ -5733,6 +5852,10 @@ void detect_edp_sink_caps(struct dc_link *link)
core_link_read_dpcd(link, DP_PSR_SUPPORT,
&link->dpcd_caps.psr_info.psr_version,
sizeof(link->dpcd_caps.psr_info.psr_version));
+ if (link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_001CF8)
+ core_link_read_dpcd(link, DP_FORCE_PSRSU_CAPABILITY,
+ &link->dpcd_caps.psr_info.force_psrsu_cap,
+ sizeof(link->dpcd_caps.psr_info.force_psrsu_cap));
core_link_read_dpcd(link, DP_PSR_CAPS,
&link->dpcd_caps.psr_info.psr_dpcd_caps.raw,
sizeof(link->dpcd_caps.psr_info.psr_dpcd_caps.raw));
@@ -7476,6 +7599,7 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_u
DC_LOG_DSC(" ");
dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]);
+ memcpy(&stream->dsc_packed_pps[0], &dsc_packed_pps[0], sizeof(stream->dsc_packed_pps));
if (dc_is_dp_signal(stream->signal)) {
DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id);
if (is_dp_128b_132b_signal(pipe_ctx))
@@ -7493,6 +7617,7 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_u
}
} else {
/* disable DSC PPS in stream encoder */
+ memset(&stream->dsc_packed_pps[0], 0, sizeof(stream->dsc_packed_pps));
if (dc_is_dp_signal(stream->signal)) {
if (is_dp_128b_132b_signal(pipe_ctx))
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet(
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c
index 48a18766f002..af110bf9470f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c
@@ -27,8 +27,8 @@
#include <dc_link.h>
#include <inc/link_hwss.h>
#include <inc/link_dpcd.h>
-#include <drm/dp/drm_dp_helper.h>
#include <dc_dp_types.h>
+#include <drm/display/drm_dp_helper.h>
#include "dm_helpers.h"
#define END_ADDRESS(start, size) (start + size - 1)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
index 0e95bc5df4e7..1b7a8774b0c9 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
@@ -34,6 +34,7 @@
#include "dm_helpers.h"
#include "dmub/inc/dmub_cmd.h"
#include "inc/link_dpcd.h"
+#include "dc_dmub_srv.h"
#define DC_LOGGER \
link->ctx->logger
@@ -69,6 +70,24 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link)
return status;
}
+bool dc_link_dpia_query_hpd_status(struct dc_link *link)
+{
+ union dmub_rb_cmd cmd = {0};
+ struct dc_dmub_srv *dmub_srv = link->ctx->dmub_srv;
+ bool is_hpd_high = false;
+
+ /* prepare QUERY_HPD command */
+ cmd.query_hpd.header.type = DMUB_CMD__QUERY_HPD_STATE;
+ cmd.query_hpd.data.instance = link->link_id.enum_id - ENUM_ID_1;
+ cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA;
+
+ /* Return HPD status reported by DMUB if query successfully executed. */
+ if (dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd) && cmd.query_hpd.data.status == AUX_RET_SUCCESS)
+ is_hpd_high = cmd.query_hpd.data.result;
+
+ return is_hpd_high;
+}
+
/* Configure link as prescribed in link_setting; set LTTPR mode; and
* Initialize link training settings.
* Abort link training if sink unplug detected.
@@ -547,11 +566,9 @@ static uint32_t dpia_get_eq_aux_rd_interval(const struct dc_link *link,
dp_translate_training_aux_read_interval(
link->dpcd_caps.lttpr_caps.aux_rd_interval[hop - 1]);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
/* Check debug option for extending aux read interval. */
if (link->dc->debug.dpia_debug.bits.extend_aux_rd_interval)
wait_time_microsec = DPIA_DEBUG_EXTENDED_AUX_RD_INTERVAL_US;
-#endif
return wait_time_microsec;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 7af153434e9e..6774dd8bb53e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -56,7 +56,6 @@
#include "dce110/dce110_resource.h"
#include "dce112/dce112_resource.h"
#include "dce120/dce120_resource.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "dcn10/dcn10_resource.h"
#include "dcn20/dcn20_resource.h"
#include "dcn21/dcn21_resource.h"
@@ -68,7 +67,6 @@
#include "dcn31/dcn31_resource.h"
#include "dcn315/dcn315_resource.h"
#include "dcn316/dcn316_resource.h"
-#endif
#define DC_LOGGER_INIT(logger)
@@ -124,7 +122,6 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
else
dc_version = DCE_VERSION_12_0;
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
case FAMILY_RV:
dc_version = DCN_VERSION_1_0;
if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev))
@@ -165,7 +162,6 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
if (ASICREV_IS_GC_10_3_7(asic_id.hw_internal_rev))
dc_version = DCN_VERSION_3_16;
break;
-#endif
default:
dc_version = DCE_VERSION_UNKNOWN;
@@ -397,7 +393,6 @@ bool resource_construct(
}
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
for (i = 0; i < caps->num_mpc_3dlut; i++) {
pool->mpc_lut[i] = dc_create_3dlut_func();
if (pool->mpc_lut[i] == NULL)
@@ -406,7 +401,7 @@ bool resource_construct(
if (pool->mpc_shaper[i] == NULL)
DC_ERR("DC: failed to create MPC shaper!\n");
}
-#endif
+
dc->caps.dynamic_audio = false;
if (pool->audio_count < pool->stream_enc_count) {
dc->caps.dynamic_audio = true;
@@ -1076,6 +1071,15 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
bool res = false;
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
+ /* Invalid input */
+ if (!plane_state->dst_rect.width ||
+ !plane_state->dst_rect.height ||
+ !plane_state->src_rect.width ||
+ !plane_state->src_rect.height) {
+ ASSERT(0);
+ return false;
+ }
+
pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
pipe_ctx->plane_state->format);
@@ -1360,7 +1364,6 @@ static struct pipe_ctx *acquire_free_pipe_for_head(
return pool->funcs->acquire_idle_pipe_for_layer(context, pool, head_pipe->stream);
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
static int acquire_first_split_pipe(
struct resource_context *res_ctx,
const struct resource_pool *pool,
@@ -1395,7 +1398,6 @@ static int acquire_first_split_pipe(
}
return -1;
}
-#endif
bool dc_add_plane_to_context(
const struct dc *dc,
@@ -1438,13 +1440,12 @@ bool dc_add_plane_to_context(
while (head_pipe) {
free_pipe = acquire_free_pipe_for_head(context, pool, head_pipe);
- #if defined(CONFIG_DRM_AMD_DC_DCN)
if (!free_pipe) {
int pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
if (pipe_idx >= 0)
free_pipe = &context->res_ctx.pipe_ctx[pipe_idx];
}
- #endif
+
if (!free_pipe) {
dc_plane_state_release(plane_state);
return false;
@@ -1685,8 +1686,8 @@ bool dc_is_stream_unchanged(
if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param)
return false;
- // Only Have Audio left to check whether it is same or not. This is a corner case for Tiled sinks
- if (old_stream->audio_info.mode_count != stream->audio_info.mode_count)
+ /*compare audio info*/
+ if (memcmp(&old_stream->audio_info, &stream->audio_info, sizeof(stream->audio_info)) != 0)
return false;
return true;
@@ -2111,6 +2112,8 @@ static int acquire_resource_from_hw_enabled_state(
{
struct dc_link *link = stream->link;
unsigned int i, inst, tg_inst = 0;
+ uint32_t numPipes = 1;
+ uint32_t id_src[4] = {0};
/* Check for enabled DIG to identify enabled display */
if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
@@ -2140,37 +2143,67 @@ static int acquire_resource_from_hw_enabled_state(
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[tg_inst];
pipe_ctx->stream_res.tg = pool->timing_generators[tg_inst];
- pipe_ctx->plane_res.mi = pool->mis[tg_inst];
- pipe_ctx->plane_res.hubp = pool->hubps[tg_inst];
- pipe_ctx->plane_res.ipp = pool->ipps[tg_inst];
- pipe_ctx->plane_res.xfm = pool->transforms[tg_inst];
- pipe_ctx->plane_res.dpp = pool->dpps[tg_inst];
- pipe_ctx->stream_res.opp = pool->opps[tg_inst];
-
- if (pool->dpps[tg_inst]) {
- pipe_ctx->plane_res.mpcc_inst = pool->dpps[tg_inst]->inst;
+ id_src[0] = tg_inst;
- // Read DPP->MPCC->OPP Pipe from HW State
- if (pool->mpc->funcs->read_mpcc_state) {
- struct mpcc_state s = {0};
+ if (pipe_ctx->stream_res.tg->funcs->get_optc_source)
+ pipe_ctx->stream_res.tg->funcs->get_optc_source(pipe_ctx->stream_res.tg,
+ &numPipes, &id_src[0], &id_src[1]);
- pool->mpc->funcs->read_mpcc_state(pool->mpc, pipe_ctx->plane_res.mpcc_inst, &s);
-
- if (s.dpp_id < MAX_MPCC)
- pool->mpc->mpcc_array[pipe_ctx->plane_res.mpcc_inst].dpp_id = s.dpp_id;
+ if (id_src[0] == 0xf && id_src[1] == 0xf) {
+ id_src[0] = tg_inst;
+ numPipes = 1;
+ }
- if (s.bot_mpcc_id < MAX_MPCC)
- pool->mpc->mpcc_array[pipe_ctx->plane_res.mpcc_inst].mpcc_bot =
- &pool->mpc->mpcc_array[s.bot_mpcc_id];
+ for (i = 0; i < numPipes; i++) {
+ //Check if src id invalid
+ if (id_src[i] == 0xf)
+ return -1;
+
+ pipe_ctx->stream_res.tg = pool->timing_generators[tg_inst];
+ pipe_ctx->plane_res.mi = pool->mis[id_src[i]];
+ pipe_ctx->plane_res.hubp = pool->hubps[id_src[i]];
+ pipe_ctx->plane_res.ipp = pool->ipps[id_src[i]];
+ pipe_ctx->plane_res.xfm = pool->transforms[id_src[i]];
+ pipe_ctx->plane_res.dpp = pool->dpps[id_src[i]];
+ pipe_ctx->stream_res.opp = pool->opps[id_src[i]];
+
+ if (pool->dpps[id_src[i]]) {
+ pipe_ctx->plane_res.mpcc_inst = pool->dpps[id_src[i]]->inst;
+
+ if (pool->mpc->funcs->read_mpcc_state) {
+ struct mpcc_state s = {0};
+ pool->mpc->funcs->read_mpcc_state(pool->mpc, pipe_ctx->plane_res.mpcc_inst, &s);
+ if (s.dpp_id < MAX_MPCC)
+ pool->mpc->mpcc_array[pipe_ctx->plane_res.mpcc_inst].dpp_id =
+ s.dpp_id;
+ if (s.bot_mpcc_id < MAX_MPCC)
+ pool->mpc->mpcc_array[pipe_ctx->plane_res.mpcc_inst].mpcc_bot =
+ &pool->mpc->mpcc_array[s.bot_mpcc_id];
+ if (s.opp_id < MAX_OPP)
+ pipe_ctx->stream_res.opp->mpc_tree_params.opp_id = s.opp_id;
+ }
+ }
+ pipe_ctx->pipe_idx = id_src[i];
- if (s.opp_id < MAX_OPP)
- pipe_ctx->stream_res.opp->mpc_tree_params.opp_id = s.opp_id;
+ if (id_src[i] >= pool->timing_generator_count) {
+ id_src[i] = pool->timing_generator_count - 1;
+ pipe_ctx->stream_res.tg = pool->timing_generators[id_src[i]];
+ pipe_ctx->stream_res.opp = pool->opps[id_src[i]];
}
+
+ pipe_ctx->stream = stream;
}
- pipe_ctx->pipe_idx = tg_inst;
- pipe_ctx->stream = stream;
- return tg_inst;
+ if (numPipes == 2) {
+ stream->apply_boot_odm_mode = dm_odm_combine_policy_2to1;
+ res_ctx->pipe_ctx[id_src[0]].next_odm_pipe = &res_ctx->pipe_ctx[id_src[1]];
+ res_ctx->pipe_ctx[id_src[0]].prev_odm_pipe = NULL;
+ res_ctx->pipe_ctx[id_src[1]].next_odm_pipe = NULL;
+ res_ctx->pipe_ctx[id_src[1]].prev_odm_pipe = &res_ctx->pipe_ctx[id_src[0]];
+ } else
+ stream->apply_boot_odm_mode = dm_odm_combine_mode_disabled;
+
+ return id_src[0];
}
return -1;
@@ -2218,10 +2251,8 @@ enum dc_status resource_map_pool_resources(
/* acquire new resources */
pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream);
-#ifdef CONFIG_DRM_AMD_DC_DCN
if (pipe_idx < 0)
pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
-#endif
if (pipe_idx < 0 || context->res_ctx.pipe_ctx[pipe_idx].stream_res.tg == NULL)
return DC_NO_CONTROLLER_RESOURCE;
@@ -2284,14 +2315,10 @@ enum dc_status resource_map_pool_resources(
/* Add ABM to the resource if on EDP */
if (pipe_ctx->stream && dc_is_embedded_signal(pipe_ctx->stream->signal)) {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (pool->abm)
pipe_ctx->stream_res.abm = pool->abm;
else
pipe_ctx->stream_res.abm = pool->multiple_abms[pipe_ctx->stream_res.tg->inst];
-#else
- pipe_ctx->stream_res.abm = pool->abm;
-#endif
}
for (i = 0; i < context->stream_count; i++)
@@ -2412,7 +2439,6 @@ enum dc_status dc_validate_global_state(
if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate))
result = DC_FAIL_BANDWIDTH_VALIDATE;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
/*
* Only update link encoder to stream assignment after bandwidth validation passed.
* TODO: Split out assignment and validation.
@@ -2420,7 +2446,6 @@ enum dc_status dc_validate_global_state(
if (result == DC_OK && dc->res_pool->funcs->link_encs_assign && fast_validate == false)
dc->res_pool->funcs->link_encs_assign(
dc, new_ctx, new_ctx->streams, new_ctx->stream_count);
-#endif
return result;
}
@@ -3148,10 +3173,8 @@ unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format)
case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
-#if defined(CONFIG_DRM_AMD_DC_DCN)
case SURFACE_PIXEL_FORMAT_GRPH_RGBE:
case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
-#endif
return 32;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
@@ -3304,7 +3327,6 @@ uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter tr
/* TODO - get transmitter to phy idx mapping from DMUB */
uint8_t phy_idx = transmitter - TRANSMITTER_UNIPHY_A;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dc->ctx->dce_version == DCN_VERSION_3_1 &&
dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
switch (transmitter) {
@@ -3328,7 +3350,7 @@ uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter tr
break;
}
}
-#endif
+
return phy_idx;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index c4e871f358ab..de8b214132a2 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -314,9 +314,7 @@ bool dc_stream_set_cursor_attributes(
const struct dc_cursor_attributes *attributes)
{
struct dc *dc;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
bool reset_idle_optimizations = false;
-#endif
if (NULL == stream) {
dm_error("DC: dc_stream is NULL!\n");
@@ -335,7 +333,6 @@ bool dc_stream_set_cursor_attributes(
dc = stream->ctx->dc;
stream->cursor_attributes = *attributes;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
dc_z10_restore(dc);
/* disable idle optimizations while updating cursor */
if (dc->idle_optimizations_allowed) {
@@ -343,15 +340,12 @@ bool dc_stream_set_cursor_attributes(
reset_idle_optimizations = true;
}
-#endif
program_cursor_attributes(dc, stream, attributes);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
/* re-enable idle optimizations if necessary */
if (reset_idle_optimizations)
dc_allow_idle_optimizations(dc, true);
-#endif
return true;
}
@@ -396,9 +390,7 @@ bool dc_stream_set_cursor_position(
const struct dc_cursor_position *position)
{
struct dc *dc;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
bool reset_idle_optimizations = false;
-#endif
if (NULL == stream) {
dm_error("DC: dc_stream is NULL!\n");
@@ -411,7 +403,6 @@ bool dc_stream_set_cursor_position(
}
dc = stream->ctx->dc;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
dc_z10_restore(dc);
/* disable idle optimizations if enabling cursor */
@@ -420,16 +411,13 @@ bool dc_stream_set_cursor_position(
reset_idle_optimizations = true;
}
-#endif
stream->cursor_position = *position;
program_cursor_position(dc, stream, position);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
/* re-enable idle optimizations if necessary */
if (reset_idle_optimizations)
dc_allow_idle_optimizations(dc, true);
-#endif
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index e6b9c6a71841..5bc6ff2fa73e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -61,6 +61,8 @@ static void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *pl
plane_state->blend_tf->type = TF_TYPE_BYPASS;
}
+ plane_state->pre_multiplied_alpha = true;
+
}
static void dc_plane_destruct(struct dc_plane_state *plane_state)
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 4ffab7bb1098..3960c74482be 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -47,7 +47,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.177"
+#define DC_VER "3.2.186"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -188,6 +188,7 @@ struct dc_caps {
bool psp_setup_panel_mode;
bool extended_aux_timeout_support;
bool dmcub_support;
+ bool zstate_support;
uint32_t num_of_internal_disp;
enum dp_protocol_version max_dp_protocol_version;
unsigned int mall_size_per_mem_channel;
@@ -221,7 +222,6 @@ struct dc_dcc_setting {
unsigned int max_compressed_blk_size;
unsigned int max_uncompressed_blk_size;
bool independent_64b_blks;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
//These bitfields to be used starting with DCN
struct {
uint32_t dcc_256_64_64 : 1;//available in ASICs before DCN (the worst compression case)
@@ -229,7 +229,6 @@ struct dc_dcc_setting {
uint32_t dcc_256_128_128 : 1; //available starting with DCN
uint32_t dcc_256_256_unconstrained : 1; //available in ASICs before DCN (the best compression case)
} dcc_controls;
-#endif
};
struct dc_surface_dcc_cap {
@@ -330,15 +329,14 @@ struct dc_config {
bool disable_dmcu;
bool enable_4to1MPC;
bool enable_windowed_mpo_odm;
- bool allow_edp_hotplug_detection;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+ uint32_t allow_edp_hotplug_detection;
bool clamp_min_dcfclk;
-#endif
uint64_t vblank_alignment_dto_params;
uint8_t vblank_alignment_max_frame_time_diff;
bool is_asymmetric_memory;
bool is_single_rank_dimm;
bool use_pipe_ctx_sync_logic;
+ bool ignore_dpref_ss;
};
enum visual_confirm {
@@ -357,6 +355,12 @@ enum dc_psr_power_opts {
psr_power_opt_ds_disable_allow = 0x100,
};
+enum dml_hostvm_override_opts {
+ DML_HOSTVM_NO_OVERRIDE = 0x0,
+ DML_HOSTVM_OVERRIDE_FALSE = 0x1,
+ DML_HOSTVM_OVERRIDE_TRUE = 0x2,
+};
+
enum dcc_option {
DCC_ENABLE = 0,
DCC_DISABLE = 1,
@@ -387,14 +391,12 @@ enum dcn_pwr_state {
DCN_PWR_STATE_LOW_POWER = 3,
};
-#if defined(CONFIG_DRM_AMD_DC_DCN)
enum dcn_zstate_support_state {
DCN_ZSTATE_SUPPORT_UNKNOWN,
DCN_ZSTATE_SUPPORT_ALLOW,
DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY,
DCN_ZSTATE_SUPPORT_DISALLOW,
};
-#endif
/*
* For any clocks that may differ per pipe
* only the max is stored in this structure
@@ -412,10 +414,8 @@ struct dc_clocks {
int phyclk_khz;
int dramclk_khz;
bool p_state_change_support;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
enum dcn_zstate_support_state zstate_support;
bool dtbclk_en;
-#endif
enum dcn_pwr_state pwr_state;
/*
* Elements below are not compared for the purposes of
@@ -525,6 +525,22 @@ union dpia_debug_options {
uint32_t raw;
};
+/* AUX wake work around options
+ * 0: enable/disable work around
+ * 1: use default timeout LINK_AUX_WAKE_TIMEOUT_MS
+ * 15-2: reserved
+ * 31-16: timeout in ms
+ */
+union aux_wake_wa_options {
+ struct {
+ uint32_t enable_wa : 1;
+ uint32_t use_default_timeout : 1;
+ uint32_t rsvd: 14;
+ uint32_t timeout_ms : 16;
+ } bits;
+ uint32_t raw;
+};
+
struct dc_debug_data {
uint32_t ltFailCount;
uint32_t i2cErrorCount;
@@ -629,9 +645,7 @@ struct dc_debug_options {
bool disable_pplib_clock_request;
bool disable_clock_gate;
bool disable_mem_low_power;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
bool pstate_enabled;
-#endif
bool disable_dmcu;
bool disable_psr;
bool force_abm_enable;
@@ -648,20 +662,17 @@ struct dc_debug_options {
uint32_t edid_read_retry_times;
bool remove_disconnect_edp;
unsigned int force_odm_combine; //bit vector based on otg inst
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+ unsigned int seamless_boot_odm_combine;
unsigned int force_odm_combine_4to1; //bit vector based on otg inst
bool disable_z9_mpc;
-#endif
unsigned int force_fclk_khz;
bool enable_tri_buf;
bool dmub_offload_enabled;
bool dmcub_emulation;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
bool disable_idle_power_optimizations;
unsigned int mall_size_override;
unsigned int mall_additional_timer_percent;
bool mall_error_as_fatal;
-#endif
bool dmub_command_table; /* for testing only */
struct dc_bw_validation_profile bw_val_profile;
bool disable_fec;
@@ -670,9 +681,7 @@ struct dc_debug_options {
* watermarks are not affected.
*/
unsigned int force_min_dcfclk_mhz;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
int dwb_fi_phase;
-#endif
bool disable_timing_sync;
bool cm_in_bypass;
int force_clock_mode;/*every mode change.*/
@@ -703,15 +712,15 @@ struct dc_debug_options {
bool enable_driver_sequence_debug;
enum det_size crb_alloc_policy;
int crb_alloc_policy_min_disp_count;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
bool disable_z10;
bool enable_z9_disable_interface;
bool enable_sw_cntl_psr;
union dpia_debug_options dpia_debug;
-#endif
bool apply_vendor_specific_lttpr_wa;
- bool ignore_dpref_ss;
+ bool extended_blank_optimization;
+ union aux_wake_wa_options aux_wake_wa;
uint8_t psr_power_use_phy_fsm;
+ enum dml_hostvm_override_opts dml_hostvm_override;
};
struct gpu_info_soc_bounding_box_v1_0;
@@ -740,11 +749,9 @@ struct dc {
/* Inputs into BW and WM calculations. */
struct bw_calcs_dceip *bw_dceip;
struct bw_calcs_vbios *bw_vbios;
-#ifdef CONFIG_DRM_AMD_DC_DCN
struct dcn_soc_bounding_box *dcn_soc;
struct dcn_ip_params *dcn_ip;
struct display_mode_lib dml;
-#endif
/* HW functions */
struct hw_sequencer_funcs hwss;
@@ -753,12 +760,8 @@ struct dc {
/* Require to optimize clocks and bandwidth for added/removed planes */
bool optimized_required;
bool wm_optimized_required;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
bool idle_optimizations_allowed;
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN)
bool enable_c20_dtm_b0;
-#endif
/* Require to maintain clocks and bandwidth for UEFI enabled HW */
@@ -808,9 +811,7 @@ struct dc_init_data {
uint64_t log_mask;
struct dpcd_vendor_signature vendor_signature;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
bool force_smu_not_present;
-#endif
};
struct dc_callback_init {
@@ -1003,15 +1004,14 @@ struct dc_plane_state {
struct dc_transfer_func *in_shaper_func;
struct dc_transfer_func *blend_tf;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
struct dc_transfer_func *gamcor_tf;
-#endif
enum surface_pixel_format format;
enum dc_rotation_angle rotation;
enum plane_stereo_format stereo_format;
bool is_tiling_rotated;
bool per_pixel_alpha;
+ bool pre_multiplied_alpha;
bool global_alpha;
int global_alpha_value;
bool visible;
@@ -1046,6 +1046,7 @@ struct dc_plane_info {
bool horizontal_mirror;
bool visible;
bool per_pixel_alpha;
+ bool pre_multiplied_alpha;
bool global_alpha;
int global_alpha_value;
bool input_csc_enabled;
@@ -1102,18 +1103,6 @@ struct dc_transfer_func *dc_create_transfer_func(void);
struct dc_3dlut *dc_create_3dlut_func(void);
void dc_3dlut_func_release(struct dc_3dlut *lut);
void dc_3dlut_func_retain(struct dc_3dlut *lut);
-/*
- * This structure holds a surface address. There could be multiple addresses
- * in cases such as Stereo 3D, Planar YUV, etc. Other per-flip attributes such
- * as frame durations and DCC format can also be set.
- */
-struct dc_flip_addrs {
- struct dc_plane_address address;
- unsigned int flip_timestamp_in_us;
- bool flip_immediate;
- /* TODO: add flip duration for FreeSync */
- bool triplebuffer_flips;
-};
void dc_post_update_surfaces_to_stream(
struct dc *dc);
@@ -1154,13 +1143,11 @@ void dc_resource_state_construct(
const struct dc *dc,
struct dc_state *dst_ctx);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
bool dc_acquire_release_mpc_3dlut(
struct dc *dc, bool acquire,
struct dc_stream_state *stream,
struct dc_3dlut **lut,
struct dc_transfer_func **shaper);
-#endif
void dc_resource_state_copy_construct(
const struct dc_state *src_ctx,
@@ -1291,10 +1278,8 @@ struct hdcp_caps {
#include "dc_link.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN)
uint32_t dc_get_opp_for_plane(struct dc *dc, struct dc_plane_state *plane);
-#endif
/*******************************************************************************
* Sink Interfaces - A sink corresponds to a display output device
******************************************************************************/
@@ -1369,6 +1354,8 @@ struct dc_sink_init_data {
bool converter_disable_audio;
};
+bool dc_extended_blank_supported(struct dc *dc);
+
struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params);
/* Newer interfaces */
@@ -1416,7 +1403,6 @@ bool dc_is_dmcu_initialized(struct dc *dc);
enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping);
void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
struct dc_cursor_attributes *cursor_attr);
@@ -1441,13 +1427,9 @@ void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable);
/* cleanup on driver unload */
void dc_hardware_release(struct dc *dc);
-#endif
-
bool dc_set_psr_allow_active(struct dc *dc, bool enable);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
void dc_z10_restore(const struct dc *dc);
void dc_z10_save_init(struct dc *dc);
-#endif
bool dc_is_dmub_outbox_supported(struct dc *dc);
bool dc_enable_dmub_notifications(struct dc *dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 36ac2a8746bd..2c54b6e0498b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -133,6 +133,16 @@ enum dp_link_encoding {
DP_128b_132b_ENCODING = 2,
};
+enum dp_test_link_rate {
+ DP_TEST_LINK_RATE_RBR = 0x06,
+ DP_TEST_LINK_RATE_HBR = 0x0A,
+ DP_TEST_LINK_RATE_HBR2 = 0x14,
+ DP_TEST_LINK_RATE_HBR3 = 0x1E,
+ DP_TEST_LINK_RATE_UHBR10 = 0x01,
+ DP_TEST_LINK_RATE_UHBR20 = 0x02,
+ DP_TEST_LINK_RATE_UHBR13_5 = 0x03,
+};
+
struct dc_link_settings {
enum dc_lane_count lane_count;
enum dc_link_rate link_rate;
@@ -620,7 +630,7 @@ union test_request {
uint8_t LINK_TEST_PATTRN :1;
uint8_t EDID_READ :1;
uint8_t PHY_TEST_PATTERN :1;
- uint8_t RESERVED :1;
+ uint8_t PHY_TEST_CHANNEL_CODING_TYPE :2;
uint8_t AUDIO_TEST_PATTERN :1;
uint8_t TEST_AUDIO_DISABLED_VIDEO :1;
} bits;
@@ -993,8 +1003,8 @@ union dp_128b_132b_supported_link_rates {
union dp_128b_132b_supported_lttpr_link_rates {
struct {
uint8_t UHBR10 :1;
- uint8_t UHBR13_5:1;
uint8_t UHBR20 :1;
+ uint8_t UHBR13_5:1;
uint8_t RESERVED:5;
} bits;
uint8_t raw;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index c964f598755a..aa7e3a07191d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -201,8 +201,9 @@ enum surface_pixel_format {
SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb,
SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr,
SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb,
+ SURFACE_PIXEL_FORMAT_SUBSAMPLE_END,
+ SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010 =
SURFACE_PIXEL_FORMAT_SUBSAMPLE_END,
- SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010,
SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102,
SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888,
SURFACE_PIXEL_FORMAT_INVALID
@@ -235,6 +236,22 @@ enum pixel_format {
PIXEL_FORMAT_UNKNOWN
};
+/*
+ * This structure holds a surface address. There could be multiple addresses
+ * in cases such as Stereo 3D, Planar YUV, etc. Other per-flip attributes such
+ * as frame durations and DCC format can also be set.
+ */
+#define DC_MAX_DIRTY_RECTS 3
+struct dc_flip_addrs {
+ struct dc_plane_address address;
+ unsigned int flip_timestamp_in_us;
+ bool flip_immediate;
+ /* TODO: add flip duration for FreeSync */
+ bool triplebuffer_flips;
+ unsigned int dirty_rect_count;
+ struct rect dirty_rects[DC_MAX_DIRTY_RECTS];
+};
+
enum tile_split_values {
DC_DISPLAY_MICRO_TILING = 0x0,
DC_THIN_MICRO_TILING = 0x1,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index aa818bf840eb..a3c37ee3f849 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -67,13 +67,9 @@ struct link_mst_stream_allocation_table {
struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
};
-struct time_stamp {
- uint64_t edp_poweroff;
- uint64_t edp_poweron;
-};
-
-struct link_trace {
- struct time_stamp time_stamp;
+struct edp_trace_power_timestamps {
+ uint64_t poweroff;
+ uint64_t poweron;
};
struct dp_trace_lt_counts {
@@ -96,6 +92,7 @@ struct dp_trace {
struct dp_trace_lt commit_lt_trace;
unsigned int link_loss_count;
bool is_initialized;
+ struct edp_trace_power_timestamps edp_trace_power_timestamps;
};
/* PSR feature flags */
@@ -231,7 +228,6 @@ struct dc_link {
struct dc_link_status link_status;
struct dprx_states dprx_states;
- struct link_trace link_trace;
struct gpio *hpd_gpio;
enum dc_link_fec_state fec_state;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 99a750f561f8..58941f4defb3 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -131,6 +131,7 @@ union stream_update_flags {
uint32_t wb_update:1;
uint32_t dsc_changed : 1;
uint32_t mst_bw : 1;
+ uint32_t crtc_timing_adjust : 1;
} bits;
uint32_t raw;
@@ -161,7 +162,7 @@ struct dc_stream_state {
struct dc_info_packet vrr_infopacket;
struct dc_info_packet vsc_infopacket;
struct dc_info_packet vsp_infopacket;
-
+ uint8_t dsc_packed_pps[128];
struct rect src; /* composition area */
struct rect dst; /* stream addressable area */
@@ -245,6 +246,7 @@ struct dc_stream_state {
bool apply_edp_fast_boot_optimization;
bool apply_seamless_boot_optimization;
+ uint32_t apply_boot_odm_mode;
uint32_t stream_id;
@@ -289,6 +291,7 @@ struct dc_stream_update {
struct dc_3dlut *lut3d_func;
struct test_pattern *pending_test_pattern;
+ struct dc_crtc_timing_adjust *crtc_timing_adjust;
};
bool dc_is_stream_unchanged(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 8e814000db62..9e39cd7b203e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -87,7 +87,8 @@ static void release_engine(
engine->ddc = NULL;
- REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, 1);
+ REG_UPDATE_2(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, 1,
+ AUX_SW_USE_AUX_REG_REQ, 0);
}
#define SW_CAN_ACCESS_AUX 1
@@ -565,13 +566,11 @@ int dce_aux_transfer_raw(struct ddc_service *ddc,
struct ddc *ddc_pin = ddc->ddc_pin;
struct dce_aux *aux_engine;
struct aux_request_transaction_data aux_req;
- struct aux_reply_transaction_data aux_rep;
uint8_t returned_bytes = 0;
int res = -1;
uint32_t status;
memset(&aux_req, 0, sizeof(aux_req));
- memset(&aux_rep, 0, sizeof(aux_rep));
aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
if (!acquire(aux_engine, ddc_pin)) {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index cc5128e67daf..845aa8a1027d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -918,7 +918,6 @@ static bool dce112_program_pix_clk(
struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source);
struct bp_pixel_clock_parameters bp_pc_params = {0};
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) {
unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
unsigned dp_dto_ref_100hz = 7000000;
@@ -932,7 +931,6 @@ static bool dce112_program_pix_clk(
REG_UPDATE(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 1);
return true;
}
-#endif
/* First disable SS
* ATOMBIOS will enable by default SS on PLL for DP,
* do not disable it here
@@ -971,7 +969,6 @@ static bool dce112_program_pix_clk(
return true;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
static bool dcn31_program_pix_clk(
struct clock_source *clock_source,
struct pixel_clk_params *pix_clk_params,
@@ -985,7 +982,7 @@ static bool dcn31_program_pix_clk(
struct bp_pixel_clock_parameters bp_pc_params = {0};
enum transmitter_color_depth bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24;
// For these signal types Driver to program DP_DTO without calling VBIOS Command table
- if (dc_is_dp_signal(pix_clk_params->signal_type)) {
+ if (dc_is_dp_signal(pix_clk_params->signal_type) || dc_is_virtual_signal(pix_clk_params->signal_type)) {
if (e) {
/* Set DTO values: phase = target clock, modulo = reference clock*/
REG_WRITE(PHASE[inst], e->target_pixel_rate_khz * e->mult_factor);
@@ -1062,7 +1059,6 @@ static bool dcn31_program_pix_clk(
return true;
}
-#endif
static bool dce110_clock_source_power_down(
struct clock_source *clk_src)
@@ -1105,9 +1101,12 @@ static bool get_pixel_clk_frequency_100hz(
* not be programmed equal to DPREFCLK
*/
modulo_hz = REG_READ(MODULO[inst]);
- *pixel_clk_khz = div_u64((uint64_t)clock_hz*
- clock_source->ctx->dc->clk_mgr->dprefclk_khz*10,
- modulo_hz);
+ if (modulo_hz)
+ *pixel_clk_khz = div_u64((uint64_t)clock_hz*
+ clock_source->ctx->dc->clk_mgr->dprefclk_khz*10,
+ modulo_hz);
+ else
+ *pixel_clk_khz = 0;
} else {
/* NOTE: There is agreement with VBIOS here that MODULO is
* programmed equal to DPREFCLK, in which case PHASE will be
@@ -1121,7 +1120,6 @@ static bool get_pixel_clk_frequency_100hz(
return false;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
/* this table is use to find *1.001 and /1.001 pixel rates from non-precise pixel rate */
const struct pixel_rate_range_table_entry video_optimized_pixel_rates[] = {
// /1.001 rates
@@ -1171,7 +1169,6 @@ const struct pixel_rate_range_table_entry *look_up_in_video_optimized_rate_tlb(
return NULL;
}
-#endif
static bool dcn20_program_pix_clk(
struct clock_source *clock_source,
@@ -1218,7 +1215,6 @@ static const struct clock_source_funcs dcn20_clk_src_funcs = {
.override_dp_pix_clk = dcn20_override_dp_pix_clk
};
-#if defined(CONFIG_DRM_AMD_DC_DCN)
static bool dcn3_program_pix_clk(
struct clock_source *clock_source,
struct pixel_clk_params *pix_clk_params,
@@ -1254,7 +1250,7 @@ static uint32_t dcn3_get_pix_clk_dividers(
struct pixel_clk_params *pix_clk_params,
struct pll_settings *pll_settings)
{
- unsigned long long actual_pix_clk_100Hz = pix_clk_params->requested_pix_clk_100hz;
+ unsigned long long actual_pix_clk_100Hz = pix_clk_params ? pix_clk_params->requested_pix_clk_100hz : 0;
struct dce110_clk_src *clk_src;
clk_src = TO_DCE110_CLK_SRC(cs);
@@ -1304,7 +1300,7 @@ static const struct clock_source_funcs dcn31_clk_src_funcs = {
.get_pix_clk_dividers = dcn3_get_pix_clk_dividers,
.get_pixel_clk_frequency_100hz = get_pixel_clk_frequency_100hz
};
-#endif
+
/*****************************************/
/* Constructor */
/*****************************************/
@@ -1690,7 +1686,6 @@ bool dcn20_clk_src_construct(
return ret;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
bool dcn3_clk_src_construct(
struct dce110_clk_src *clk_src,
struct dc_context *ctx,
@@ -1706,9 +1701,7 @@ bool dcn3_clk_src_construct(
return ret;
}
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN)
bool dcn31_clk_src_construct(
struct dce110_clk_src *clk_src,
struct dc_context *ctx,
@@ -1724,9 +1717,7 @@ bool dcn31_clk_src_construct(
return ret;
}
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN)
bool dcn301_clk_src_construct(
struct dce110_clk_src *clk_src,
struct dc_context *ctx,
@@ -1742,4 +1733,3 @@ bool dcn301_clk_src_construct(
return ret;
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
index 069de7649c8c..9eec3524335f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
@@ -100,7 +100,6 @@
SRII(PIXEL_RATE_CNTL, OTG, 2),\
SRII(PIXEL_RATE_CNTL, OTG, 3)
-#if defined(CONFIG_DRM_AMD_DC_DCN)
#define CS_COMMON_REG_LIST_DCN3_0(index, pllid) \
SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\
SRII(PHASE, DP_DTO, 0),\
@@ -130,9 +129,7 @@
SRII(PIXEL_RATE_CNTL, OTG, 1),\
SRII(PIXEL_RATE_CNTL, OTG, 2),\
SRII(PIXEL_RATE_CNTL, OTG, 3)
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN)
#define CS_COMMON_REG_LIST_DCN3_02(index, pllid) \
SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\
SRII(PHASE, DP_DTO, 0),\
@@ -160,15 +157,13 @@
SRII(PIXEL_RATE_CNTL, OTG, 0),\
SRII(PIXEL_RATE_CNTL, OTG, 1)
-#endif
+
#define CS_COMMON_MASK_SH_LIST_DCN2_0(mask_sh)\
CS_SF(DP_DTO0_PHASE, DP_DTO0_PHASE, mask_sh),\
CS_SF(DP_DTO0_MODULO, DP_DTO0_MODULO, mask_sh),\
CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_DCCG_DEEP_COLOR_CNTL, mask_sh),\
CS_SF(OTG0_PIXEL_RATE_CNTL, DP_DTO0_ENABLE, mask_sh)
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-
#define CS_COMMON_REG_LIST_DCN1_0(index, pllid) \
SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\
SRII(PHASE, DP_DTO, 0),\
@@ -190,7 +185,6 @@
CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_DCCG_DEEP_COLOR_CNTL, mask_sh),\
CS_SF(OTG0_PIXEL_RATE_CNTL, DP_DTO0_ENABLE, mask_sh)
-#endif
#define CS_REG_FIELD_LIST(type) \
type PLL_REF_DIV_SRC; \
@@ -274,7 +268,6 @@ bool dcn20_clk_src_construct(
const struct dce110_clk_src_shift *cs_shift,
const struct dce110_clk_src_mask *cs_mask);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
bool dcn3_clk_src_construct(
struct dce110_clk_src *clk_src,
struct dc_context *ctx,
@@ -301,7 +294,6 @@ bool dcn31_clk_src_construct(
const struct dce110_clk_src_regs *regs,
const struct dce110_clk_src_shift *cs_shift,
const struct dce110_clk_src_mask *cs_mask);
-#endif
/* this table is use to find *1.001 and /1.001 pixel rates from non-precise pixel rate */
struct pixel_rate_range_table_entry {
@@ -312,10 +304,8 @@ struct pixel_rate_range_table_entry {
unsigned short div_factor;
};
-#if defined(CONFIG_DRM_AMD_DC_DCN)
extern const struct pixel_rate_range_table_entry video_optimized_pixel_rates[];
const struct pixel_rate_range_table_entry *look_up_in_video_optimized_rate_tlb(
unsigned int pixel_rate_khz);
-#endif
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index 8cd841320ded..7183ac5780a4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -70,9 +70,7 @@
//Register access policy version
#define mmMP0_SMN_C2PMSG_91 0x1609B
-#if defined(CONFIG_DRM_AMD_DC_DCN)
static const uint32_t abm_gain_stepsize = 0x0060;
-#endif
static bool dce_dmcu_init(struct dmcu *dmcu)
{
@@ -333,7 +331,6 @@ static void dce_get_psr_wait_loop(
return;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
static void dcn10_get_dmcu_version(struct dmcu *dmcu)
{
struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
@@ -930,7 +927,6 @@ static bool dcn10_recv_edid_cea_ack(struct dmcu *dmcu, int *offset)
return false;
}
-#endif //(CONFIG_DRM_AMD_DC_DCN)
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
static void dcn10_forward_crc_window(struct dmcu *dmcu,
@@ -1021,7 +1017,6 @@ static const struct dmcu_funcs dce_funcs = {
.is_dmcu_initialized = dce_is_dmcu_initialized
};
-#if defined(CONFIG_DRM_AMD_DC_DCN)
static const struct dmcu_funcs dcn10_funcs = {
.dmcu_init = dcn10_dmcu_init,
.load_iram = dcn10_dmcu_load_iram,
@@ -1065,7 +1060,6 @@ static const struct dmcu_funcs dcn21_funcs = {
.lock_phy = dcn20_lock_phy,
.unlock_phy = dcn20_unlock_phy
};
-#endif
static void dce_dmcu_construct(
struct dce_dmcu *dmcu_dce,
@@ -1085,7 +1079,6 @@ static void dce_dmcu_construct(
dmcu_dce->dmcu_mask = dmcu_mask;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
static void dcn21_dmcu_construct(
struct dce_dmcu *dmcu_dce,
struct dc_context *ctx,
@@ -1103,7 +1096,6 @@ static void dcn21_dmcu_construct(
dmcu_dce->base.psp_version = psp_version;
}
}
-#endif
struct dmcu *dce_dmcu_create(
struct dc_context *ctx,
@@ -1126,7 +1118,6 @@ struct dmcu *dce_dmcu_create(
return &dmcu_dce->base;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
struct dmcu *dcn10_dmcu_create(
struct dc_context *ctx,
const struct dce_dmcu_registers *regs,
@@ -1189,7 +1180,6 @@ struct dmcu *dcn21_dmcu_create(
return &dmcu_dce->base;
}
-#endif
void dce_dmcu_destroy(struct dmcu **dmcu)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index 779bc92a2968..a8c92b517df1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -136,7 +136,7 @@ static void dce110_update_generic_info_packet(
AFMT_GENERIC0_UPDATE, (packet_index == 0),
AFMT_GENERIC2_UPDATE, (packet_index == 2));
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+
if (REG(AFMT_VBI_PACKET_CONTROL1)) {
switch (packet_index) {
case 0:
@@ -175,7 +175,6 @@ static void dce110_update_generic_info_packet(
break;
}
}
-#endif
}
static void dce110_update_hdmi_info_packet(
@@ -230,7 +229,6 @@ static void dce110_update_hdmi_info_packet(
HDMI_GENERIC1_SEND, send,
HDMI_GENERIC1_LINE, line);
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
case 4:
if (REG(HDMI_GENERIC_PACKET_CONTROL2))
REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL2,
@@ -259,7 +257,6 @@ static void dce110_update_hdmi_info_packet(
HDMI_GENERIC1_SEND, send,
HDMI_GENERIC1_LINE, line);
break;
-#endif
default:
/* invalid HW packet index */
DC_LOG_WARNING(
@@ -277,7 +274,6 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
bool use_vsc_sdp_for_colorimetry,
uint32_t enable_sdp_splitting)
{
-#if defined(CONFIG_DRM_AMD_DC_DCN)
uint32_t h_active_start;
uint32_t v_active_start;
uint32_t misc0 = 0;
@@ -288,7 +284,6 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
uint8_t colorimetry_bpc;
uint8_t dynamic_range_rgb = 0; /*full range*/
uint8_t dynamic_range_ycbcr = 1; /*bt709*/
-#endif
struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
struct dc_crtc_timing hw_crtc_timing = *crtc_timing;
@@ -329,10 +324,8 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
if (enc110->se_mask->DP_VID_M_DOUBLE_VALUE_EN)
REG_UPDATE(DP_VID_TIMING, DP_VID_M_DOUBLE_VALUE_EN, 1);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (enc110->se_mask->DP_VID_N_MUL)
REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1);
-#endif
break;
default:
REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
@@ -340,10 +333,8 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
break;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (REG(DP_MSA_MISC))
misc1 = REG_READ(DP_MSA_MISC);
-#endif
/* set color depth */
@@ -374,7 +365,6 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
/* set dynamic range and YCbCr range */
-#if defined(CONFIG_DRM_AMD_DC_DCN)
switch (hw_crtc_timing.display_color_depth) {
case COLOR_DEPTH_666:
colorimetry_bpc = 0;
@@ -454,7 +444,6 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
DP_DYN_RANGE, dynamic_range_rgb,
DP_YCBCR_RANGE, dynamic_range_ycbcr);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (REG(DP_MSA_COLORIMETRY))
REG_SET(DP_MSA_COLORIMETRY, 0, DP_MSA_MISC0, misc0);
@@ -468,7 +457,6 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
REG_SET_2(DP_MSA_TIMING_PARAM1, 0,
DP_MSA_HTOTAL, hw_crtc_timing.h_total,
DP_MSA_VTOTAL, hw_crtc_timing.v_total);
-#endif
/* calcuate from vesa timing parameters
* h_active_start related to leading edge of sync
@@ -489,7 +477,6 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
hw_crtc_timing.v_front_porch;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
/* start at begining of left border */
if (REG(DP_MSA_TIMING_PARAM2))
REG_SET_2(DP_MSA_TIMING_PARAM2, 0,
@@ -514,9 +501,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
hw_crtc_timing.h_addressable + hw_crtc_timing.h_border_right,
DP_MSA_VHEIGHT, hw_crtc_timing.v_border_top +
hw_crtc_timing.v_addressable + hw_crtc_timing.v_border_bottom);
-#endif
}
-#endif
}
static void dce110_stream_encoder_set_stream_attribute_helper(
@@ -787,7 +772,6 @@ static void dce110_stream_encoder_update_hdmi_info_packets(
dce110_update_hdmi_info_packet(enc110, 3, &info_frame->hdrsmd);
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (enc110->se_mask->HDMI_DB_DISABLE) {
/* for bring up, disable dp double TODO */
if (REG(HDMI_DB_CONTROL))
@@ -799,7 +783,6 @@ static void dce110_stream_encoder_update_hdmi_info_packets(
dce110_update_hdmi_info_packet(enc110, 3, &info_frame->spd);
dce110_update_hdmi_info_packet(enc110, 4, &info_frame->hdrsmd);
}
-#endif
}
static void dce110_stream_encoder_stop_hdmi_info_packets(
@@ -825,7 +808,6 @@ static void dce110_stream_encoder_stop_hdmi_info_packets(
HDMI_GENERIC1_LINE, 0,
HDMI_GENERIC1_SEND, 0);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
/* stop generic packets 2 & 3 on HDMI */
if (REG(HDMI_GENERIC_PACKET_CONTROL2))
REG_SET_6(HDMI_GENERIC_PACKET_CONTROL2, 0,
@@ -844,7 +826,6 @@ static void dce110_stream_encoder_stop_hdmi_info_packets(
HDMI_GENERIC1_CONT, 0,
HDMI_GENERIC1_LINE, 0,
HDMI_GENERIC1_SEND, 0);
-#endif
}
static void dce110_stream_encoder_update_dp_info_packets(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index 312c68172689..1d4f0c45b536 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -231,7 +231,7 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level, uint8_
dc_dmub_srv_wait_idle(dc->dmub_srv);
}
-/**
+/*
* Set PSR power optimization flags.
*/
static void dmub_psr_set_power_opt(struct dmub_psr *dmub, unsigned int power_opt, uint8_t panel_inst)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 248602c15f3a..7eff7811769d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -67,6 +67,7 @@
#include "dcn10/dcn10_hw_sequencer.h"
+#include "link/link_dp_trace.h"
#include "dce110_hw_sequencer.h"
#define GAMMA_HW_POINTS_NUM 256
@@ -819,19 +820,19 @@ void dce110_edp_power_control(
div64_u64(dm_get_elapse_time_in_ns(
ctx,
current_ts,
- link->link_trace.time_stamp.edp_poweroff), 1000000);
+ dp_trace_get_edp_poweroff_timestamp(link)), 1000000);
unsigned long long time_since_edp_poweron_ms =
div64_u64(dm_get_elapse_time_in_ns(
ctx,
current_ts,
- link->link_trace.time_stamp.edp_poweron), 1000000);
+ dp_trace_get_edp_poweron_timestamp(link)), 1000000);
DC_LOG_HW_RESUME_S3(
"%s: transition: power_up=%d current_ts=%llu edp_poweroff=%llu edp_poweron=%llu time_since_edp_poweroff_ms=%llu time_since_edp_poweron_ms=%llu",
__func__,
power_up,
current_ts,
- link->link_trace.time_stamp.edp_poweroff,
- link->link_trace.time_stamp.edp_poweron,
+ dp_trace_get_edp_poweroff_timestamp(link),
+ dp_trace_get_edp_poweron_timestamp(link),
time_since_edp_poweroff_ms,
time_since_edp_poweron_ms);
@@ -846,7 +847,7 @@ void dce110_edp_power_control(
link->local_sink->edid_caps.panel_patch.extra_t12_ms;
/* Adjust remaining_min_edp_poweroff_time_ms if this is not the first time. */
- if (link->link_trace.time_stamp.edp_poweroff != 0) {
+ if (dp_trace_get_edp_poweroff_timestamp(link) != 0) {
if (time_since_edp_poweroff_ms < remaining_min_edp_poweroff_time_ms)
remaining_min_edp_poweroff_time_ms =
remaining_min_edp_poweroff_time_ms - time_since_edp_poweroff_ms;
@@ -904,17 +905,13 @@ void dce110_edp_power_control(
__func__, (power_up ? "On":"Off"),
bp_result);
- if (!power_up)
- /*save driver power off time stamp*/
- link->link_trace.time_stamp.edp_poweroff = dm_get_timestamp(ctx);
- else
- link->link_trace.time_stamp.edp_poweron = dm_get_timestamp(ctx);
+ dp_trace_set_edp_power_timestamp(link, power_up);
DC_LOG_HW_RESUME_S3(
"%s: updated values: edp_poweroff=%llu edp_poweron=%llu\n",
__func__,
- link->link_trace.time_stamp.edp_poweroff,
- link->link_trace.time_stamp.edp_poweron);
+ dp_trace_get_edp_poweroff_timestamp(link),
+ dp_trace_get_edp_poweron_timestamp(link));
if (bp_result != BP_RESULT_OK)
DC_LOG_ERROR(
@@ -942,14 +939,14 @@ void dce110_edp_wait_for_T12(
return;
if (!link->panel_cntl->funcs->is_panel_powered_on(link->panel_cntl) &&
- link->link_trace.time_stamp.edp_poweroff != 0) {
+ dp_trace_get_edp_poweroff_timestamp(link) != 0) {
unsigned int t12_duration = 500; // Default T12 as per spec
unsigned long long current_ts = dm_get_timestamp(ctx);
unsigned long long time_since_edp_poweroff_ms =
div64_u64(dm_get_elapse_time_in_ns(
ctx,
current_ts,
- link->link_trace.time_stamp.edp_poweroff), 1000000);
+ dp_trace_get_edp_poweroff_timestamp(link)), 1000000);
t12_duration += link->local_sink->edid_caps.panel_patch.extra_t12_ms; // Add extra T12
@@ -1368,11 +1365,9 @@ static void program_scaler(const struct dc *dc,
{
struct tg_color color = {0};
-#if defined(CONFIG_DRM_AMD_DC_DCN)
/* TOFPGA */
if (pipe_ctx->plane_res.xfm->funcs->transform_set_pixel_storage_depth == NULL)
return;
-#endif
if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
get_surface_visual_confirm_color(pipe_ctx, &color);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index fbff6beb78be..3a7f76e2c598 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -1316,7 +1316,7 @@ void hubp1_set_flip_int(struct hubp *hubp)
*
* @hubp: hubp struct reference.
*/
-void hubp1_wait_pipe_read_start(struct hubp *hubp)
+static void hubp1_wait_pipe_read_start(struct hubp *hubp)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index c3e141c19a77..e3a62873c0e7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -1259,6 +1259,7 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
{
int i;
struct dce_hwseq *hws = dc->hwseq;
+ struct hubbub *hubbub = dc->res_pool->hubbub;
bool can_apply_seamless_boot = false;
for (i = 0; i < context->stream_count; i++) {
@@ -1294,6 +1295,21 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
}
}
+ /* Reset det size */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct hubp *hubp = dc->res_pool->hubps[i];
+
+ /* Do not need to reset for seamless boot */
+ if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
+ continue;
+
+ if (hubbub && hubp) {
+ if (hubbub->funcs->program_det_size)
+ hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
+ }
+ }
+
/* num_opp will be equal to number of mpcc */
for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
@@ -1359,6 +1375,11 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
pipe_ctx->stream_res.tg = NULL;
pipe_ctx->plane_res.hubp = NULL;
+ if (tg->funcs->is_tg_enabled(tg)) {
+ if (tg->funcs->init_odm)
+ tg->funcs->init_odm(tg);
+ }
+
tg->funcs->tg_init(tg);
}
@@ -1493,20 +1514,20 @@ void dcn10_init_hw(struct dc *dc)
/* Check for enabled DIG to identify enabled display */
if (link->link_enc->funcs->is_dig_enabled &&
- link->link_enc->funcs->is_dig_enabled(link->link_enc))
+ link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
link->link_status.link_active = true;
- }
-
- /* Power gate DSCs */
- if (!is_optimized_init_done) {
- for (i = 0; i < res_pool->res_cap->num_dsc; i++)
- if (hws->funcs.dsc_pg_control != NULL)
- hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
+ if (link->link_enc->funcs->fec_is_active &&
+ link->link_enc->funcs->fec_is_active(link->link_enc))
+ link->fec_state = dc_link_fec_enabled;
+ }
}
/* we want to turn off all dp displays before doing detection */
dc_link_blank_all_dp_displays(dc);
+ if (hws->funcs.enable_power_gating_plane)
+ hws->funcs.enable_power_gating_plane(dc->hwseq, true);
+
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
* pipes we want to use.
@@ -1559,8 +1580,6 @@ void dcn10_init_hw(struct dc *dc)
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
- if (hws->funcs.enable_power_gating_plane)
- hws->funcs.enable_power_gating_plane(dc->hwseq, true);
if (dc->clk_mgr->funcs->notify_wm_ranges)
dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
@@ -2056,7 +2075,7 @@ static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
{
struct dc_context *dc_ctx = dc->ctx;
int i, master = -1, embedded = -1;
- struct dc_crtc_timing hw_crtc_timing[MAX_PIPES] = {0};
+ struct dc_crtc_timing *hw_crtc_timing;
uint64_t phase[MAX_PIPES];
uint64_t modulo[MAX_PIPES];
unsigned int pclk;
@@ -2067,6 +2086,10 @@ static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
uint32_t dp_ref_clk_100hz =
dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
+ hw_crtc_timing = kcalloc(MAX_PIPES, sizeof(*hw_crtc_timing), GFP_KERNEL);
+ if (!hw_crtc_timing)
+ return master;
+
if (dc->config.vblank_alignment_dto_params &&
dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
embedded_h_total =
@@ -2130,6 +2153,8 @@ static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
}
}
+
+ kfree(hw_crtc_timing);
return master;
}
@@ -2522,27 +2547,32 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
struct mpc *mpc = dc->res_pool->mpc;
struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
- if (per_pixel_alpha)
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
- else
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
-
blnd_cfg.overlap_only = false;
blnd_cfg.global_gain = 0xff;
+ if (per_pixel_alpha) {
+ /* DCN1.0 has output CM before MPC which seems to screw with
+ * pre-multiplied alpha.
+ */
+ blnd_cfg.pre_multiplied_alpha = (is_rgb_cspace(
+ pipe_ctx->stream->output_color_space)
+ && pipe_ctx->plane_state->pre_multiplied_alpha);
+ if (pipe_ctx->plane_state->global_alpha) {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
+ blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
+ } else {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+ }
+ } else {
+ blnd_cfg.pre_multiplied_alpha = false;
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
+ }
+
if (pipe_ctx->plane_state->global_alpha)
blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
else
blnd_cfg.global_alpha = 0xff;
- /* DCN1.0 has output CM before MPC which seems to screw with
- * pre-multiplied alpha.
- */
- blnd_cfg.pre_multiplied_alpha = is_rgb_cspace(
- pipe_ctx->stream->output_color_space)
- && per_pixel_alpha;
-
-
/*
* TODO: remove hack
* Note: currently there is a bug in init_hw such that
@@ -2979,8 +3009,11 @@ void dcn10_prepare_bandwidth(
true);
dcn10_stereo_hw_frame_pack_wa(dc, context);
- if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
+ if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
+ DC_FP_START();
dcn_bw_notify_pplib_of_wm_ranges(dc);
+ DC_FP_END();
+ }
if (dc->debug.sanity_checks)
hws->funcs.verify_allow_pstate_change_high(dc);
@@ -3013,8 +3046,11 @@ void dcn10_optimize_bandwidth(
dcn10_stereo_hw_frame_pack_wa(dc, context);
- if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
+ if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
+ DC_FP_START();
dcn_bw_notify_pplib_of_wm_ranges(dc);
+ DC_FP_END();
+ }
if (dc->debug.sanity_checks)
hws->funcs.verify_allow_pstate_change_high(dc);
@@ -3039,12 +3075,16 @@ void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
* as well.
*/
for (i = 0; i < num_pipes; i++) {
- pipe_ctx[i]->stream_res.tg->funcs->set_drr(
- pipe_ctx[i]->stream_res.tg, &params);
- if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
- pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
- pipe_ctx[i]->stream_res.tg,
- event_triggers, num_frames);
+ if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) {
+ if (pipe_ctx[i]->stream_res.tg->funcs->set_drr)
+ pipe_ctx[i]->stream_res.tg->funcs->set_drr(
+ pipe_ctx[i]->stream_res.tg, &params);
+ if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
+ if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control)
+ pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
+ pipe_ctx[i]->stream_res.tg,
+ event_triggers, num_frames);
+ }
}
}
@@ -3175,7 +3215,8 @@ void dcn10_wait_for_mpcc_disconnect(
if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
- res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
+ if (pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
+ res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
hubp->funcs->set_blank(hubp, true);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 4048908dd265..bca049b2f867 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -1141,6 +1141,20 @@ static void dcn10_destroy_resource_pool(struct resource_pool **pool)
*pool = NULL;
}
+static bool dcn10_validate_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+ bool fast_validate)
+{
+ bool voltage_supported;
+
+ DC_FP_START();
+ voltage_supported = dcn_validate_bandwidth(dc, context, fast_validate);
+ DC_FP_END();
+
+ return voltage_supported;
+}
+
static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps)
{
if (plane_state->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
@@ -1492,6 +1506,7 @@ static bool dcn10_resource_construct(
&& pool->base.pp_smu->rv_funcs.set_pme_wa_enable != NULL)
dc->debug.az_endpoint_mute_only = false;
+ DC_FP_START();
if (!dc->debug.disable_pplib_clock_request)
dcn_bw_update_from_pplib(dc);
dcn_bw_sync_calcs_and_dml(dc);
@@ -1499,6 +1514,7 @@ static bool dcn10_resource_construct(
dc->res_pool = &pool->base;
dcn_bw_notify_pplib_of_wm_ranges(dc);
}
+ DC_FP_END();
{
struct irq_service_init_data init_data;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index b0c08ee6bc2c..7608187751c8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -25,6 +25,7 @@
#include <linux/delay.h>
+#include "dm_services.h"
#include "dc_bios_types.h"
#include "dcn10_stream_encoder.h"
#include "reg_helper.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
index 687d7e4bf7ca..293595a33982 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
@@ -92,6 +92,8 @@
SRI(DP_VID_STREAM_CNTL, DP, id), \
SRI(DP_VID_TIMING, DP, id), \
SRI(DP_SEC_AUD_N, DP, id), \
+ SRI(DP_SEC_AUD_N_READBACK, DP, id), \
+ SRI(DP_SEC_AUD_M_READBACK, DP, id), \
SRI(DP_SEC_TIMESTAMP, DP, id), \
SRI(DIG_CLOCK_PATTERN, DIG, id)
@@ -140,6 +142,8 @@ struct dcn10_stream_enc_registers {
uint32_t DP_VID_STREAM_CNTL;
uint32_t DP_VID_TIMING;
uint32_t DP_SEC_AUD_N;
+ uint32_t DP_SEC_AUD_N_READBACK;
+ uint32_t DP_SEC_AUD_M_READBACK;
uint32_t DP_SEC_TIMESTAMP;
uint32_t HDMI_CONTROL;
uint32_t HDMI_GC;
@@ -256,6 +260,8 @@ struct dcn10_stream_enc_registers {
SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, mask_sh),\
SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, mask_sh),\
SE_SF(DP0_DP_SEC_AUD_N, DP_SEC_AUD_N, mask_sh),\
+ SE_SF(DP0_DP_SEC_AUD_N_READBACK, DP_SEC_AUD_N_READBACK, mask_sh),\
+ SE_SF(DP0_DP_SEC_AUD_M_READBACK, DP_SEC_AUD_M_READBACK, mask_sh),\
SE_SF(DP0_DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ASP_ENABLE, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ATP_ENABLE, mask_sh),\
@@ -473,6 +479,8 @@ struct dcn10_stream_enc_registers {
type AFMT_60958_CS_CHANNEL_NUMBER_6;\
type AFMT_60958_CS_CHANNEL_NUMBER_7;\
type DP_SEC_AUD_N;\
+ type DP_SEC_AUD_N_READBACK;\
+ type DP_SEC_AUD_M_READBACK;\
type DP_SEC_TIMESTAMP_MODE;\
type DP_SEC_ASP_ENABLE;\
type DP_SEC_ATP_ENABLE;\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
index ef5c4c0f4d6c..136a9dc062bc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
@@ -23,6 +23,8 @@
*
*/
+#include <drm/display/drm_dsc_helper.h>
+
#include "reg_helper.h"
#include "dcn20_dsc.h"
#include "dsc/dscc_types.h"
@@ -45,6 +47,7 @@ static void dsc2_set_config(struct display_stream_compressor *dsc, const struct
static bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, uint8_t *dsc_packed_pps);
static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe);
static void dsc2_disable(struct display_stream_compressor *dsc);
+static void dsc2_disconnect(struct display_stream_compressor *dsc);
const struct dsc_funcs dcn20_dsc_funcs = {
.dsc_get_enc_caps = dsc2_get_enc_caps,
@@ -54,6 +57,7 @@ const struct dsc_funcs dcn20_dsc_funcs = {
.dsc_get_packed_pps = dsc2_get_packed_pps,
.dsc_enable = dsc2_enable,
.dsc_disable = dsc2_disable,
+ .dsc_disconnect = dsc2_disconnect,
};
/* Macro definitios for REG_SET macros*/
@@ -276,6 +280,15 @@ static void dsc2_disable(struct display_stream_compressor *dsc)
DSC_CLOCK_EN, 0);
}
+static void dsc2_disconnect(struct display_stream_compressor *dsc)
+{
+ struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
+
+ DC_LOG_DSC("disconnect DSC %d", dsc->inst);
+
+ REG_UPDATE(DSCRM_DSC_FORWARD_CONFIG,
+ DSCRM_DSC_FORWARD_EN, 0);
+}
/* This module's internal functions */
static void dsc_log_pps(struct display_stream_compressor *dsc, struct drm_dsc_config *pps)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
index 1118e33aaa2c..c21ecedc4692 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
@@ -26,7 +26,7 @@
#include "dsc.h"
#include "dsc/dscc_types.h"
-#include <drm/drm_dsc.h>
+#include <drm/display/drm_dsc.h>
#define TO_DCN20_DSC(dsc)\
container_of(dsc, struct dcn20_dsc, base)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index ab910deed481..ec6aa8d8b251 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1593,7 +1593,6 @@ static void dcn20_program_pipe(
pipe_ctx->pipe_dlg_param.vupdate_offset,
pipe_ctx->pipe_dlg_param.vupdate_width);
- pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
@@ -1774,7 +1773,6 @@ void dcn20_post_unlock_program_front_end(
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable) {
struct hubp *hubp = pipe->plane_res.hubp;
int j = 0;
@@ -1857,6 +1855,7 @@ void dcn20_optimize_bandwidth(
struct dc_state *context)
{
struct hubbub *hubbub = dc->res_pool->hubbub;
+ int i;
/* program dchubbub watermarks */
hubbub->funcs->program_watermarks(hubbub,
@@ -1873,6 +1872,17 @@ void dcn20_optimize_bandwidth(
dc->clk_mgr,
context,
true);
+ if (dc_extended_blank_supported(dc) && context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) {
+ for (i = 0; i < dc->res_pool->pipe_count; ++i) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream && pipe_ctx->plane_res.hubp->funcs->program_extended_blank
+ && pipe_ctx->stream->adjust.v_total_min == pipe_ctx->stream->adjust.v_total_max
+ && pipe_ctx->stream->adjust.v_total_max > pipe_ctx->stream->timing.v_total)
+ pipe_ctx->plane_res.hubp->funcs->program_extended_blank(pipe_ctx->plane_res.hubp,
+ pipe_ctx->dlg_regs.optimized_min_dst_y_next_start);
+ }
+ }
/* increase compbuf size */
if (hubbub->funcs->program_compbuf_size)
hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true);
@@ -2332,14 +2342,22 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
struct mpc *mpc = dc->res_pool->mpc;
struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
- if (per_pixel_alpha)
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
- else
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
-
blnd_cfg.overlap_only = false;
blnd_cfg.global_gain = 0xff;
+ if (per_pixel_alpha) {
+ blnd_cfg.pre_multiplied_alpha = pipe_ctx->plane_state->pre_multiplied_alpha;
+ if (pipe_ctx->plane_state->global_alpha) {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
+ blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
+ } else {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+ }
+ } else {
+ blnd_cfg.pre_multiplied_alpha = false;
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
+ }
+
if (pipe_ctx->plane_state->global_alpha)
blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
else
@@ -2350,7 +2368,7 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
blnd_cfg.top_gain = 0x1f000;
blnd_cfg.bottom_inside_gain = 0x1f000;
blnd_cfg.bottom_outside_gain = 0x1f000;
- blnd_cfg.pre_multiplied_alpha = per_pixel_alpha;
+
if (pipe_ctx->plane_state->format
== SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA)
blnd_cfg.pre_multiplied_alpha = false;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index d473708d5399..7802d603f796 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -1976,7 +1976,6 @@ int dcn20_validate_apply_pipe_split_flags(
/*If need split for odm but 4 way split already*/
if (split[i] == 2 && ((pipe->prev_odm_pipe && !pipe->prev_odm_pipe->prev_odm_pipe)
|| !pipe->next_odm_pipe)) {
- ASSERT(0); /* NOT expected yet */
merge[i] = true; /* 4 -> 2 ODM */
} else if (split[i] == 0 && pipe->prev_odm_pipe) {
ASSERT(0); /* NOT expected yet */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c
index fe22530242d2..05b3fba9ccce 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c
@@ -95,8 +95,6 @@ static void gpu_addr_to_uma(struct dce_hwseq *hwseq,
} else if (hwseq->fb_offset.quad_part <= addr->quad_part &&
addr->quad_part <= hwseq->uma_top.quad_part) {
is_in_uma = true;
- } else if (addr->quad_part == 0) {
- is_in_uma = false;
} else {
is_in_uma = false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 612732656772..faab59508d82 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -644,7 +644,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.clock_trace = true,
.disable_pplib_clock_request = true,
.min_disp_clk_khz = 100000,
- .pipe_split_policy = MPC_SPLIT_DYNAMIC,
+ .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
@@ -997,6 +997,7 @@ static struct clock_source *dcn21_clock_source_create(
return &clk_src->base;
}
+ kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h
index e2c264ecb20f..42140e73c3b2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h
@@ -95,6 +95,8 @@
SRI(DP_VID_STREAM_CNTL, DP, id), \
SRI(DP_VID_TIMING, DP, id), \
SRI(DP_SEC_AUD_N, DP, id), \
+ SRI(DP_SEC_AUD_N_READBACK, DP, id), \
+ SRI(DP_SEC_AUD_M_READBACK, DP, id), \
SRI(DP_SEC_TIMESTAMP, DP, id), \
SRI(DP_DSC_CNTL, DP, id), \
SRI(DP_DSC_BYTES_PER_PIXEL, DP, id), \
@@ -157,6 +159,8 @@
SE_SF(DIG0_HDMI_ACR_48_0, HDMI_ACR_CTS_48, mask_sh),\
SE_SF(DIG0_HDMI_ACR_48_1, HDMI_ACR_N_48, mask_sh),\
SE_SF(DP0_DP_SEC_AUD_N, DP_SEC_AUD_N, mask_sh),\
+ SE_SF(DP0_DP_SEC_AUD_N_READBACK, DP_SEC_AUD_N_READBACK, mask_sh),\
+ SE_SF(DP0_DP_SEC_AUD_M_READBACK, DP_SEC_AUD_M_READBACK, mask_sh),\
SE_SF(DP0_DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ASP_ENABLE, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ATP_ENABLE, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index ed0a0e5fd805..782b8db451b4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -535,8 +535,12 @@ void dcn30_init_hw(struct dc *dc)
/* Check for enabled DIG to identify enabled display */
if (link->link_enc->funcs->is_dig_enabled &&
- link->link_enc->funcs->is_dig_enabled(link->link_enc))
+ link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
link->link_status.link_active = true;
+ if (link->link_enc->funcs->fec_is_active &&
+ link->link_enc->funcs->fec_is_active(link->link_enc))
+ link->fec_state = dc_link_fec_enabled;
+ }
}
/* Power gate DSCs */
@@ -547,6 +551,9 @@ void dcn30_init_hw(struct dc *dc)
/* we want to turn off all dp displays before doing detection */
dc_link_blank_all_dp_displays(dc);
+ if (hws->funcs.enable_power_gating_plane)
+ hws->funcs.enable_power_gating_plane(dc->hwseq, true);
+
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
* pipes we want to use.
@@ -624,8 +631,6 @@ void dcn30_init_hw(struct dc *dc)
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
- if (hws->funcs.enable_power_gating_plane)
- hws->funcs.enable_power_gating_plane(dc->hwseq, true);
if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h
index b7dc78624963..34b9cedbd012 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h
@@ -99,11 +99,6 @@
SRII(MPCC_OGAM_CONTROL, MPCC_OGAM, inst),\
SRII(MPCC_OGAM_LUT_CONTROL, MPCC_OGAM, inst)
-/*
- SRII(MPCC_OGAM_LUT_RAM_CONTROL, MPCC_OGAM, inst),\
- SRII(MPCC_OGAM_MODE, MPCC_OGAM, inst)
-*/
-
#define MPC_OUT_MUX_REG_LIST_DCN3_0(inst) \
MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(inst),\
SRII(CSC_MODE, MPC_OUT, inst),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
index f5e8916601d3..b604fb26f288 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
@@ -28,6 +28,8 @@
#include "dc.h"
#include "dcn_calc_math.h"
+#include "dml/dcn30/dcn30_fpu.h"
+
#define REG(reg)\
optc1->tg_regs->reg
@@ -184,6 +186,14 @@ void optc3_set_dsc_config(struct timing_generator *optc,
}
+void optc3_set_vrr_m_const(struct timing_generator *optc,
+ double vtotal_avg)
+{
+ DC_FP_START();
+ optc3_fpu_set_vrr_m_const(optc, vtotal_avg);
+ DC_FP_END();
+}
+
void optc3_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h
index 736e63bc80c2..97f11ef6e9f0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h
@@ -344,5 +344,5 @@ void optc3_set_timing_db_mode(struct timing_generator *optc, bool enable);
void optc3_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing);
void optc3_tg_init(struct timing_generator *optc);
-
+void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max);
#endif /* __DC_OPTC_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index e6a62cc75139..1c1a67c4cec1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -84,6 +84,7 @@
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
+#include "dml/dcn30/dcn30_fpu.h"
#include "dml/dcn30/display_mode_vba_30.h"
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
@@ -91,137 +92,6 @@
#define DC_LOGGER_INIT(logger)
-struct _vcs_dpi_ip_params_st dcn3_0_ip = {
- .use_min_dcfclk = 0,
- .clamp_min_dcfclk = 0,
- .odm_capable = 1,
- .gpuvm_enable = 0,
- .hostvm_enable = 0,
- .gpuvm_max_page_table_levels = 4,
- .hostvm_max_page_table_levels = 4,
- .hostvm_cached_page_table_levels = 0,
- .pte_group_size_bytes = 2048,
- .num_dsc = 6,
- .rob_buffer_size_kbytes = 184,
- .det_buffer_size_kbytes = 184,
- .dpte_buffer_size_in_pte_reqs_luma = 84,
- .pde_proc_buffer_size_64k_reqs = 48,
- .dpp_output_buffer_pixels = 2560,
- .opp_output_buffer_lines = 1,
- .pixel_chunk_size_kbytes = 8,
- .pte_enable = 1,
- .max_page_table_levels = 2,
- .pte_chunk_size_kbytes = 2, // ?
- .meta_chunk_size_kbytes = 2,
- .writeback_chunk_size_kbytes = 8,
- .line_buffer_size_bits = 789504,
- .is_line_buffer_bpp_fixed = 0, // ?
- .line_buffer_fixed_bpp = 0, // ?
- .dcc_supported = true,
- .writeback_interface_buffer_size_kbytes = 90,
- .writeback_line_buffer_buffer_size = 0,
- .max_line_buffer_lines = 12,
- .writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640
- .writeback_chroma_buffer_size_kbytes = 8,
- .writeback_chroma_line_buffer_width_pixels = 4,
- .writeback_max_hscl_ratio = 1,
- .writeback_max_vscl_ratio = 1,
- .writeback_min_hscl_ratio = 1,
- .writeback_min_vscl_ratio = 1,
- .writeback_max_hscl_taps = 1,
- .writeback_max_vscl_taps = 1,
- .writeback_line_buffer_luma_buffer_size = 0,
- .writeback_line_buffer_chroma_buffer_size = 14643,
- .cursor_buffer_size = 8,
- .cursor_chunk_size = 2,
- .max_num_otg = 6,
- .max_num_dpp = 6,
- .max_num_wb = 1,
- .max_dchub_pscl_bw_pix_per_clk = 4,
- .max_pscl_lb_bw_pix_per_clk = 2,
- .max_lb_vscl_bw_pix_per_clk = 4,
- .max_vscl_hscl_bw_pix_per_clk = 4,
- .max_hscl_ratio = 6,
- .max_vscl_ratio = 6,
- .hscl_mults = 4,
- .vscl_mults = 4,
- .max_hscl_taps = 8,
- .max_vscl_taps = 8,
- .dispclk_ramp_margin_percent = 1,
- .underscan_factor = 1.11,
- .min_vblank_lines = 32,
- .dppclk_delay_subtotal = 46,
- .dynamic_metadata_vm_enabled = true,
- .dppclk_delay_scl_lb_only = 16,
- .dppclk_delay_scl = 50,
- .dppclk_delay_cnvc_formatter = 27,
- .dppclk_delay_cnvc_cursor = 6,
- .dispclk_delay_subtotal = 119,
- .dcfclk_cstate_latency = 5.2, // SRExitTime
- .max_inter_dcn_tile_repeaters = 8,
- .odm_combine_4to1_supported = true,
-
- .xfc_supported = false,
- .xfc_fill_bw_overhead_percent = 10.0,
- .xfc_fill_constant_bytes = 0,
- .gfx7_compat_tiling_supported = 0,
- .number_of_cursors = 1,
-};
-
-struct _vcs_dpi_soc_bounding_box_st dcn3_0_soc = {
- .clock_limits = {
- {
- .state = 0,
- .dispclk_mhz = 562.0,
- .dppclk_mhz = 300.0,
- .phyclk_mhz = 300.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 405.6,
- },
- },
- .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
- .num_states = 1,
- .sr_exit_time_us = 15.5,
- .sr_enter_plus_exit_time_us = 20,
- .urgent_latency_us = 4.0,
- .urgent_latency_pixel_data_only_us = 4.0,
- .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
- .urgent_latency_vm_data_only_us = 4.0,
- .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
- .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
- .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
- .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
- .max_avg_sdp_bw_use_normal_percent = 60.0,
- .max_avg_dram_bw_use_normal_percent = 40.0,
- .writeback_latency_us = 12.0,
- .max_request_size_bytes = 256,
- .fabric_datapath_to_dcn_data_return_bytes = 64,
- .dcn_downspread_percent = 0.5,
- .downspread_percent = 0.38,
- .dram_page_open_time_ns = 50.0,
- .dram_rw_turnaround_time_ns = 17.5,
- .dram_return_buffer_per_channel_bytes = 8192,
- .round_trip_ping_latency_dcfclk_cycles = 191,
- .urgent_out_of_order_return_per_channel_bytes = 4096,
- .channel_interleave_bytes = 256,
- .num_banks = 8,
- .gpuvm_min_page_size_bytes = 4096,
- .hostvm_min_page_size_bytes = 4096,
- .dram_clock_change_latency_us = 404,
- .dummy_pstate_latency_us = 5,
- .writeback_dram_clock_change_latency_us = 23.0,
- .return_bus_width_bytes = 64,
- .dispclk_dppclk_vco_speed_mhz = 3650,
- .xfc_bus_transport_time_us = 20, // ?
- .xfc_xbuf_latency_tolerance_us = 4, // ?
- .use_urgent_burst_bw = 1, // ?
- .do_urgent_latency_adjustment = true,
- .urgent_latency_adjustment_fabric_clock_component_us = 1.0,
- .urgent_latency_adjustment_fabric_clock_reference_mhz = 1000,
-};
-
enum dcn30_clk_src_array_id {
DCN30_CLK_SRC_PLL0,
DCN30_CLK_SRC_PLL1,
@@ -1480,90 +1350,9 @@ int dcn30_populate_dml_pipes_from_context(
void dcn30_populate_dml_writeback_from_context(
struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
{
- int pipe_cnt, i, j;
- double max_calc_writeback_dispclk;
- double writeback_dispclk;
- struct writeback_st dout_wb;
-
- for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
- struct dc_stream_state *stream = res_ctx->pipe_ctx[i].stream;
-
- if (!stream)
- continue;
- max_calc_writeback_dispclk = 0;
-
- /* Set writeback information */
- pipes[pipe_cnt].dout.wb_enable = 0;
- pipes[pipe_cnt].dout.num_active_wb = 0;
- for (j = 0; j < stream->num_wb_info; j++) {
- struct dc_writeback_info *wb_info = &stream->writeback_info[j];
-
- if (wb_info->wb_enabled && wb_info->writeback_source_plane &&
- (wb_info->writeback_source_plane == res_ctx->pipe_ctx[i].plane_state)) {
- pipes[pipe_cnt].dout.wb_enable = 1;
- pipes[pipe_cnt].dout.num_active_wb++;
- dout_wb.wb_src_height = wb_info->dwb_params.cnv_params.crop_en ?
- wb_info->dwb_params.cnv_params.crop_height :
- wb_info->dwb_params.cnv_params.src_height;
- dout_wb.wb_src_width = wb_info->dwb_params.cnv_params.crop_en ?
- wb_info->dwb_params.cnv_params.crop_width :
- wb_info->dwb_params.cnv_params.src_width;
- dout_wb.wb_dst_width = wb_info->dwb_params.dest_width;
- dout_wb.wb_dst_height = wb_info->dwb_params.dest_height;
-
- /* For IP that doesn't support WB scaling, set h/v taps to 1 to avoid DML validation failure */
- if (dc->dml.ip.writeback_max_hscl_taps > 1) {
- dout_wb.wb_htaps_luma = wb_info->dwb_params.scaler_taps.h_taps;
- dout_wb.wb_vtaps_luma = wb_info->dwb_params.scaler_taps.v_taps;
- } else {
- dout_wb.wb_htaps_luma = 1;
- dout_wb.wb_vtaps_luma = 1;
- }
- dout_wb.wb_htaps_chroma = 0;
- dout_wb.wb_vtaps_chroma = 0;
- dout_wb.wb_hratio = wb_info->dwb_params.cnv_params.crop_en ?
- (double)wb_info->dwb_params.cnv_params.crop_width /
- (double)wb_info->dwb_params.dest_width :
- (double)wb_info->dwb_params.cnv_params.src_width /
- (double)wb_info->dwb_params.dest_width;
- dout_wb.wb_vratio = wb_info->dwb_params.cnv_params.crop_en ?
- (double)wb_info->dwb_params.cnv_params.crop_height /
- (double)wb_info->dwb_params.dest_height :
- (double)wb_info->dwb_params.cnv_params.src_height /
- (double)wb_info->dwb_params.dest_height;
- if (wb_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_ARGB ||
- wb_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_RGBA)
- dout_wb.wb_pixel_format = dm_444_64;
- else
- dout_wb.wb_pixel_format = dm_444_32;
-
- /* Workaround for cases where multiple writebacks are connected to same plane
- * In which case, need to compute worst case and set the associated writeback parameters
- * This workaround is necessary due to DML computation assuming only 1 set of writeback
- * parameters per pipe
- */
- writeback_dispclk = dml30_CalculateWriteBackDISPCLK(
- dout_wb.wb_pixel_format,
- pipes[pipe_cnt].pipe.dest.pixel_rate_mhz,
- dout_wb.wb_hratio,
- dout_wb.wb_vratio,
- dout_wb.wb_htaps_luma,
- dout_wb.wb_vtaps_luma,
- dout_wb.wb_src_width,
- dout_wb.wb_dst_width,
- pipes[pipe_cnt].pipe.dest.htotal,
- dc->current_state->bw_ctx.dml.ip.writeback_line_buffer_buffer_size);
-
- if (writeback_dispclk > max_calc_writeback_dispclk) {
- max_calc_writeback_dispclk = writeback_dispclk;
- pipes[pipe_cnt].dout.wb = dout_wb;
- }
- }
- }
-
- pipe_cnt++;
- }
-
+ DC_FP_START();
+ dcn30_fpu_populate_dml_writeback_from_context(dc, res_ctx, pipes);
+ DC_FP_END();
}
unsigned int dcn30_calc_max_scaled_time(
@@ -1598,7 +1387,7 @@ void dcn30_set_mcif_arb_params(
enum mmhubbub_wbif_mode wbif_mode;
struct display_mode_lib *dml = &context->bw_ctx.dml;
struct mcif_arb_params *wb_arb_params;
- int i, j, k, dwb_pipe;
+ int i, j, dwb_pipe;
/* Writeback MCIF_WB arbitration parameters */
dwb_pipe = 0;
@@ -1622,17 +1411,15 @@ void dcn30_set_mcif_arb_params(
else
wbif_mode = PACKED_444;
- for (k = 0; k < sizeof(wb_arb_params->cli_watermark)/sizeof(wb_arb_params->cli_watermark[0]); k++) {
- wb_arb_params->cli_watermark[k] = get_wm_writeback_urgent(dml, pipes, pipe_cnt) * 1000;
- wb_arb_params->pstate_watermark[k] = get_wm_writeback_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
- }
+ DC_FP_START();
+ dcn30_fpu_set_mcif_arb_params(wb_arb_params, dml, pipes, pipe_cnt, j);
+ DC_FP_END();
wb_arb_params->time_per_pixel = (1000000 << 6) / context->res_ctx.pipe_ctx[i].stream->phy_pix_clk; /* time_per_pixel should be in u6.6 format */
wb_arb_params->slice_lines = 32;
wb_arb_params->arbitration_slice = 2; /* irrelevant since there is no YUV output */
wb_arb_params->max_scaled_time = dcn30_calc_max_scaled_time(wb_arb_params->time_per_pixel,
wbif_mode,
wb_arb_params->cli_watermark[0]); /* assume 4 watermark sets have the same value */
- wb_arb_params->dram_speed_change_duration = dml->vba.WritebackAllowDRAMClockChangeEndPosition[j] * pipes[0].clks_cfg.refclk_mhz; /* num_clock_cycles = us * MHz */
dwb_pipe++;
@@ -2111,178 +1898,11 @@ validate_out:
return out;
}
-/*
- * This must be noinline to ensure anything that deals with FP registers
- * is contained within this call; previously our compiling with hard-float
- * would result in fp instructions being emitted outside of the boundaries
- * of the DC_FP_START/END macros, which makes sense as the compiler has no
- * idea about what is wrapped and what is not
- *
- * This is largely just a workaround to avoid breakage introduced with 5.6,
- * ideally all fp-using code should be moved into its own file, only that
- * should be compiled with hard-float, and all code exported from there
- * should be strictly wrapped with DC_FP_START/END
- */
-static noinline void dcn30_calculate_wm_and_dlg_fp(
- struct dc *dc, struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- int pipe_cnt,
- int vlevel)
+void dcn30_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
{
- int maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
- int i, pipe_idx;
- double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][maxMpcComb];
- bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != dm_dram_clock_change_unsupported;
-
- if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk)
- dcfclk = context->bw_ctx.dml.soc.min_dcfclk;
-
- pipes[0].clks_cfg.voltage = vlevel;
- pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
- pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
-
- /* Set B:
- * DCFCLK: 1GHz or min required above 1GHz
- * FCLK/UCLK: Max
- */
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) {
- if (vlevel == 0) {
- pipes[0].clks_cfg.voltage = 1;
- pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz;
- }
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
- pipes[0].clks_cfg.voltage = vlevel;
- pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
-
- /* Set D:
- * DCFCLK: Min Required
- * FCLK(proportional to UCLK): 1GHz or Max
- * MALL stutter, sr_enter_exit = 4, sr_exit = 2us
- */
- /*
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- */
-
- /* Set C:
- * DCFCLK: Min Required
- * FCLK(proportional to UCLK): 1GHz or Max
- * pstate latency overridden to 5us
- */
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
- unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
- unsigned int min_dram_speed_mts_margin = 160;
-
- if (context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_unsupported)
- min_dram_speed_mts = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz * 16;
-
- /* find largest table entry that is lower than dram speed, but lower than DPM0 still uses DPM0 */
- for (i = 3; i > 0; i--)
- if (min_dram_speed_mts + min_dram_speed_mts_margin > dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts)
- break;
-
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
- }
-
- context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
- if (!pstate_en) {
- /* The only difference between A and C is p-state latency, if p-state is not supported we want to
- * calculate DLG based on dummy p-state latency, and max out the set A p-state watermark
- */
- context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0;
- } else {
- /* Set A:
- * DCFCLK: Min Required
- * FCLK(proportional to UCLK): 1GHz or Max
- *
- * Set A calculated last so that following calculations are based on Set A
- */
- dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
- context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- }
-
- context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod;
-
- /* Make set D = set A until set D is enabled */
- context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
-
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
-
- pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
- pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
-
- if (dc->config.forced_clocks) {
- pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
- pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
- }
- if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
- pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
- if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
- pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
-
- pipe_idx++;
- }
-
DC_FP_START();
- dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+ dcn30_fpu_update_soc_for_wm_a(dc, context);
DC_FP_END();
-
- if (!pstate_en)
- /* Restore full p-state latency */
- context->bw_ctx.dml.soc.dram_clock_change_latency_us =
- dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
-}
-
-void dcn30_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
-{
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) {
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
- }
}
void dcn30_calculate_wm_and_dlg(
@@ -2292,7 +1912,7 @@ void dcn30_calculate_wm_and_dlg(
int vlevel)
{
DC_FP_START();
- dcn30_calculate_wm_and_dlg_fp(dc, context, pipes, pipe_cnt, vlevel);
+ dcn30_fpu_calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
DC_FP_END();
}
@@ -2351,40 +1971,6 @@ validate_out:
return out;
}
-/*
- * This must be noinline to ensure anything that deals with FP registers
- * is contained within this call; previously our compiling with hard-float
- * would result in fp instructions being emitted outside of the boundaries
- * of the DC_FP_START/END macros, which makes sense as the compiler has no
- * idea about what is wrapped and what is not
- *
- * This is largely just a workaround to avoid breakage introduced with 5.6,
- * ideally all fp-using code should be moved into its own file, only that
- * should be compiled with hard-float, and all code exported from there
- * should be strictly wrapped with DC_FP_START/END
- */
-static noinline void dcn30_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
- unsigned int *optimal_dcfclk,
- unsigned int *optimal_fclk)
-{
- double bw_from_dram, bw_from_dram1, bw_from_dram2;
-
- bw_from_dram1 = uclk_mts * dcn3_0_soc.num_chans *
- dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_dram_bw_use_normal_percent / 100);
- bw_from_dram2 = uclk_mts * dcn3_0_soc.num_chans *
- dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100);
-
- bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2;
-
- if (optimal_fclk)
- *optimal_fclk = bw_from_dram /
- (dcn3_0_soc.fabric_datapath_to_dcn_data_return_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100));
-
- if (optimal_dcfclk)
- *optimal_dcfclk = bw_from_dram /
- (dcn3_0_soc.return_bus_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100));
-}
-
void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
unsigned int i, j;
@@ -2399,47 +1985,43 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
unsigned int num_dcfclk_sta_targets = 4;
unsigned int num_uclk_states;
+ struct dc_bounding_box_max_clk dcn30_bb_max_clk;
+
+ memset(&dcn30_bb_max_clk, 0, sizeof(dcn30_bb_max_clk));
+
if (dc->ctx->dc_bios->vram_info.num_chans)
dcn3_0_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
- if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
- dcn3_0_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
-
- dcn3_0_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
- dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+ DC_FP_START();
+ dcn30_fpu_update_dram_channel_width_bytes(dc);
+ DC_FP_END();
if (bw_params->clk_table.entries[0].memclk_mhz) {
- int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0;
for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
- if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
- max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
- if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
- max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
- if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
- max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
- if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
- max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
+ if (bw_params->clk_table.entries[i].dcfclk_mhz > dcn30_bb_max_clk.max_dcfclk_mhz)
+ dcn30_bb_max_clk.max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
+ if (bw_params->clk_table.entries[i].dispclk_mhz > dcn30_bb_max_clk.max_dispclk_mhz)
+ dcn30_bb_max_clk.max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
+ if (bw_params->clk_table.entries[i].dppclk_mhz > dcn30_bb_max_clk.max_dppclk_mhz)
+ dcn30_bb_max_clk.max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
+ if (bw_params->clk_table.entries[i].phyclk_mhz > dcn30_bb_max_clk.max_phyclk_mhz)
+ dcn30_bb_max_clk.max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
}
- if (!max_dcfclk_mhz)
- max_dcfclk_mhz = dcn3_0_soc.clock_limits[0].dcfclk_mhz;
- if (!max_dispclk_mhz)
- max_dispclk_mhz = dcn3_0_soc.clock_limits[0].dispclk_mhz;
- if (!max_dppclk_mhz)
- max_dppclk_mhz = dcn3_0_soc.clock_limits[0].dppclk_mhz;
- if (!max_phyclk_mhz)
- max_phyclk_mhz = dcn3_0_soc.clock_limits[0].phyclk_mhz;
+ DC_FP_START();
+ dcn30_fpu_update_max_clk(&dcn30_bb_max_clk);
+ DC_FP_END();
- if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
+ if (dcn30_bb_max_clk.max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
// If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array
- dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz;
+ dcfclk_sta_targets[num_dcfclk_sta_targets] = dcn30_bb_max_clk.max_dcfclk_mhz;
num_dcfclk_sta_targets++;
- } else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
+ } else if (dcn30_bb_max_clk.max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
// If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates
for (i = 0; i < num_dcfclk_sta_targets; i++) {
- if (dcfclk_sta_targets[i] > max_dcfclk_mhz) {
- dcfclk_sta_targets[i] = max_dcfclk_mhz;
+ if (dcfclk_sta_targets[i] > dcn30_bb_max_clk.max_dcfclk_mhz) {
+ dcfclk_sta_targets[i] = dcn30_bb_max_clk.max_dcfclk_mhz;
break;
}
}
@@ -2452,7 +2034,7 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
// Calculate optimal dcfclk for each uclk
for (i = 0; i < num_uclk_states; i++) {
DC_FP_START();
- dcn30_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16,
+ dcn30_fpu_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16,
&optimal_dcfclk_for_uclk[i], NULL);
DC_FP_END();
if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) {
@@ -2479,7 +2061,7 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
- if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
+ if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= dcn30_bb_max_clk.max_dcfclk_mhz) {
dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
} else {
@@ -2494,33 +2076,15 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
}
while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES &&
- optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
+ optimal_dcfclk_for_uclk[j] <= dcn30_bb_max_clk.max_dcfclk_mhz) {
dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
}
dcn3_0_soc.num_states = num_states;
- for (i = 0; i < dcn3_0_soc.num_states; i++) {
- dcn3_0_soc.clock_limits[i].state = i;
- dcn3_0_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i];
- dcn3_0_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i];
- dcn3_0_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i];
-
- /* Fill all states with max values of all other clocks */
- dcn3_0_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
- dcn3_0_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz;
- dcn3_0_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz;
- dcn3_0_soc.clock_limits[i].dtbclk_mhz = dcn3_0_soc.clock_limits[0].dtbclk_mhz;
- /* These clocks cannot come from bw_params, always fill from dcn3_0_soc[1] */
- /* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */
- dcn3_0_soc.clock_limits[i].phyclk_d18_mhz = dcn3_0_soc.clock_limits[0].phyclk_d18_mhz;
- dcn3_0_soc.clock_limits[i].socclk_mhz = dcn3_0_soc.clock_limits[0].socclk_mhz;
- dcn3_0_soc.clock_limits[i].dscclk_mhz = dcn3_0_soc.clock_limits[0].dscclk_mhz;
- }
- /* re-init DML with updated bb */
- dml_init_instance(&dc->dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
- if (dc->current_state)
- dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
+ DC_FP_START();
+ dcn30_fpu_update_bw_bounding_box(dc, bw_params, &dcn30_bb_max_clk, dcfclk_mhz, dram_speed_mts);
+ DC_FP_END();
}
}
@@ -2602,9 +2166,9 @@ static bool dcn30_resource_construct(
dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * dc->ctx->dc_bios->vram_info.num_chans * 1048576;
dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;
- dc->caps.max_slave_planes = 1;
- dc->caps.max_slave_yuv_planes = 1;
- dc->caps.max_slave_rgb_planes = 1;
+ dc->caps.max_slave_planes = 2;
+ dc->caps.max_slave_yuv_planes = 2;
+ dc->caps.max_slave_rgb_planes = 2;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.extended_aux_timeout_support = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
index b92e4cc0232f..3330a1026fa5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
@@ -35,6 +35,9 @@ struct dc;
struct resource_pool;
struct _vcs_dpi_display_pipe_params_st;
+extern struct _vcs_dpi_ip_params_st dcn3_0_ip;
+extern struct _vcs_dpi_soc_bounding_box_st dcn3_0_soc;
+
struct dcn30_resource_pool {
struct resource_pool base;
};
@@ -96,4 +99,6 @@ enum dc_status dcn30_add_stream_to_ctx(
void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
+void dcn30_setup_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, struct dc_state *context);
+
#endif /* _DCN30_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index 4daf8931aa7c..a5df74110284 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -81,6 +81,8 @@
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
+#include "dml/dcn30/dcn30_fpu.h"
+
#include "dml/dcn30/display_mode_vba_30.h"
#include "dml/dcn301/dcn301_fpu.h"
#include "vm_helper.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index 88318e8ffca8..f537888f4fa6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -43,6 +43,8 @@
#include "dcn20/dcn20_dsc.h"
#include "dcn20/dcn20_resource.h"
+#include "dml/dcn30/dcn30_fpu.h"
+
#include "dcn10/dcn10_resource.h"
#include "dce/dce_abm.h"
@@ -1219,9 +1221,9 @@ static bool dcn302_resource_construct(
/* total size = mall per channel * num channels * 1024 * 1024 */
dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * dc->ctx->dc_bios->vram_info.num_chans * 1048576;
dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;
- dc->caps.max_slave_planes = 1;
- dc->caps.max_slave_yuv_planes = 1;
- dc->caps.max_slave_rgb_planes = 1;
+ dc->caps.max_slave_planes = 2;
+ dc->caps.max_slave_yuv_planes = 2;
+ dc->caps.max_slave_rgb_planes = 2;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.extended_aux_timeout_support = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
index 4fcbc0502808..76f863eb86ef 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
@@ -25,6 +25,8 @@
#include "dcn20/dcn20_dsc.h"
#include "dcn20/dcn20_resource.h"
+#include "dml/dcn30/dcn30_fpu.h"
+
#include "dcn10/dcn10_resource.h"
#include "dc_link_ddc.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
index d20e3b8ccc30..ec041e3cda30 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
@@ -15,32 +15,6 @@ DCN31 = dcn31_resource.o dcn31_hubbub.o dcn31_hwseq.o dcn31_init.o dcn31_hubp.o
dcn31_apg.o dcn31_hpo_dp_stream_encoder.o dcn31_hpo_dp_link_encoder.o \
dcn31_afmt.o dcn31_vpg.o
-ifdef CONFIG_X86
-CFLAGS_$(AMDDALPATH)/dc/dcn31/dcn31_resource.o := -msse
-endif
-
-ifdef CONFIG_PPC64
-CFLAGS_$(AMDDALPATH)/dc/dcn31/dcn31_resource.o := -mhard-float -maltivec
-endif
-
-ifdef CONFIG_CC_IS_GCC
-ifeq ($(call cc-ifversion, -lt, 0701, y), y)
-IS_OLD_GCC = 1
-endif
-CFLAGS_$(AMDDALPATH)/dc/dcn31/dcn31_resource.o += -mhard-float
-endif
-
-ifdef CONFIG_X86
-ifdef IS_OLD_GCC
-# Stack alignment mismatch, proceed with caution.
-# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
-# (8B stack alignment).
-CFLAGS_$(AMDDALPATH)/dc/dcn31/dcn31_resource.o += -mpreferred-stack-boundary=4
-else
-CFLAGS_$(AMDDALPATH)/dc/dcn31/dcn31_resource.o += -msse2
-endif
-endif
-
AMD_DAL_DCN31 = $(addprefix $(AMDDALPATH)/dc/dcn31/,$(DCN31))
AMD_DISPLAY_FILES += $(AMD_DAL_DCN31)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
index 3e6d6ebd199e..51c5f3685470 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
@@ -1042,5 +1042,7 @@ void hubbub31_construct(struct dcn20_hubbub *hubbub31,
hubbub31->detile_buf_size = det_size_kb * 1024;
hubbub31->pixel_chunk_size = pixel_chunk_size_kb * 1024;
hubbub31->crb_size_segs = config_return_buffer_size_kb / DCN31_CRB_SEGMENT_SIZE_KB;
+
+ hubbub31->debug_test_index_pstate = 0x6;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c
index 53b792b997b7..197a5cae068b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c
@@ -54,6 +54,14 @@ void hubp31_soft_reset(struct hubp *hubp, bool reset)
REG_UPDATE(DCHUBP_CNTL, HUBP_SOFT_RESET, reset);
}
+static void hubp31_program_extended_blank(struct hubp *hubp,
+ unsigned int min_dst_y_next_start_optimized)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_SET(BLANK_OFFSET_1, 0, MIN_DST_Y_NEXT_START, min_dst_y_next_start_optimized);
+}
+
static struct hubp_funcs dcn31_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
@@ -80,6 +88,7 @@ static struct hubp_funcs dcn31_hubp_funcs = {
.set_unbounded_requesting = hubp31_set_unbounded_requesting,
.hubp_soft_reset = hubp31_soft_reset,
.hubp_in_blank = hubp1_in_blank,
+ .program_extended_blank = hubp31_program_extended_blank,
};
bool hubp31_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
index 4be228680909..531dd2c65007 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
@@ -188,8 +188,12 @@ void dcn31_init_hw(struct dc *dc)
/* Check for enabled DIG to identify enabled display */
if (link->link_enc->funcs->is_dig_enabled &&
- link->link_enc->funcs->is_dig_enabled(link->link_enc))
+ link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
link->link_status.link_active = true;
+ if (link->link_enc->funcs->fec_is_active &&
+ link->link_enc->funcs->fec_is_active(link->link_enc))
+ link->fec_state = dc_link_fec_enabled;
+ }
}
/* Enables outbox notifications for usb4 dpia */
@@ -199,6 +203,9 @@ void dcn31_init_hw(struct dc *dc)
/* we want to turn off all dp displays before doing detection */
dc_link_blank_all_dp_displays(dc);
+ if (hws->funcs.enable_power_gating_plane)
+ hws->funcs.enable_power_gating_plane(dc->hwseq, true);
+
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
* pipes we want to use.
@@ -248,8 +255,6 @@ void dcn31_init_hw(struct dc *dc)
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
- if (hws->funcs.enable_power_gating_plane)
- hws->funcs.enable_power_gating_plane(dc->hwseq, true);
if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
@@ -338,20 +343,20 @@ void dcn31_enable_power_gating_plane(
bool enable)
{
bool force_on = true; /* disable power gating */
+ uint32_t org_ip_request_cntl = 0;
if (enable && !hws->ctx->dc->debug.disable_hubp_power_gate)
force_on = false;
+ REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
/* DCHUBP0/1/2/3/4/5 */
REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- REG_WAIT(DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- REG_WAIT(DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
/* DPP0/1/2/3/4/5 */
REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- REG_WAIT(DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- REG_WAIT(DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
force_on = true; /* disable power gating */
if (enable && !hws->ctx->dc->debug.disable_dsc_power_gate)
@@ -359,11 +364,11 @@ void dcn31_enable_power_gating_plane(
/* DCS0/1/2/3/4/5 */
REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- REG_WAIT(DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- REG_WAIT(DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- REG_WAIT(DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
+
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
}
void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
index d7559e5a99ce..e708f07fe75a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
@@ -153,9 +153,4 @@ void dcn31_hw_sequencer_construct(struct dc *dc)
dc->hwss.init_hw = dcn20_fpga_init_hw;
dc->hwseq->funcs.init_pipes = NULL;
}
- if (dc->debug.disable_z10) {
- /*hw not support z10 or sw disable it*/
- dc->hwss.z10_restore = NULL;
- dc->hwss.z10_save_init = NULL;
- }
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
index 8afe2130d7c5..c51f7dca94f8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
@@ -91,8 +91,7 @@ static void optc31_set_odm_combine(struct timing_generator *optc, int *opp_id, i
optc1->opp_count = opp_cnt;
}
-/**
- * Enable CRTC
+/*
* Enable CRTC - call ASIC Control Object to enable Timing generator.
*/
static bool optc31_enable_crtc(struct timing_generator *optc)
@@ -124,7 +123,6 @@ static bool optc31_enable_crtc(struct timing_generator *optc)
static bool optc31_disable_crtc(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
-
/* disable otg request until end of the first line
* in the vertical blank region
*/
@@ -138,6 +136,7 @@ static bool optc31_disable_crtc(struct timing_generator *optc)
REG_WAIT(OTG_CLOCK_CONTROL,
OTG_BUSY, 0,
1, 100000);
+ optc1_clear_optc_underflow(optc);
return true;
}
@@ -158,6 +157,9 @@ static bool optc31_immediate_disable_crtc(struct timing_generator *optc)
OTG_BUSY, 0,
1, 100000);
+ /* clear the false state */
+ optc1_clear_optc_underflow(optc);
+
return true;
}
@@ -211,6 +213,26 @@ void optc31_set_drr(
}
}
+void optc3_init_odm(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_SET_5(OPTC_DATA_SOURCE_SELECT, 0,
+ OPTC_NUM_OF_INPUT_SEGMENT, 0,
+ OPTC_SEG0_SRC_SEL, optc->inst,
+ OPTC_SEG1_SRC_SEL, 0xf,
+ OPTC_SEG2_SRC_SEL, 0xf,
+ OPTC_SEG3_SRC_SEL, 0xf
+ );
+
+ REG_SET(OTG_H_TIMING_CNTL, 0,
+ OTG_H_TIMING_DIV_MODE, 0);
+
+ REG_SET(OPTC_MEMORY_CONFIG, 0,
+ OPTC_MEM_SEL, 0);
+ optc1->opp_count = 1;
+}
+
static struct timing_generator_funcs dcn31_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
@@ -270,6 +292,7 @@ static struct timing_generator_funcs dcn31_tg_funcs = {
.program_manual_trigger = optc2_program_manual_trigger,
.setup_manual_trigger = optc2_setup_manual_trigger,
.get_hw_timing = optc1_get_hw_timing,
+ .init_odm = optc3_init_odm,
};
void dcn31_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h
index a37b16040c1d..9e881f2ce74b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h
@@ -258,4 +258,6 @@ void dcn31_timing_generator_init(struct optc *optc1);
void optc31_set_drr(struct timing_generator *optc, const struct drr_params *params);
+void optc3_init_odm(struct timing_generator *optc);
+
#endif /* __DC_OPTC_DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index 89b7b6b7254a..3d9f07d4770b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -36,6 +36,8 @@
#include "dcn20/dcn20_resource.h"
#include "dcn30/dcn30_resource.h"
+#include "dml/dcn30/dcn30_fpu.h"
+
#include "dcn10/dcn10_ipp.h"
#include "dcn30/dcn30_hubbub.h"
#include "dcn31/dcn31_hubbub.h"
@@ -65,6 +67,7 @@
#include "virtual/virtual_stream_encoder.h"
#include "dce110/dce110_resource.h"
#include "dml/display_mode_vba.h"
+#include "dml/dcn31/dcn31_fpu.h"
#include "dcn31/dcn31_dccg.h"
#include "dcn10/dcn10_resource.h"
#include "dcn31_panel_cntl.h"
@@ -102,152 +105,6 @@
#define DC_LOGGER_INIT(logger)
-#define DCN3_1_DEFAULT_DET_SIZE 384
-
-struct _vcs_dpi_ip_params_st dcn3_1_ip = {
- .gpuvm_enable = 1,
- .gpuvm_max_page_table_levels = 1,
- .hostvm_enable = 1,
- .hostvm_max_page_table_levels = 2,
- .rob_buffer_size_kbytes = 64,
- .det_buffer_size_kbytes = DCN3_1_DEFAULT_DET_SIZE,
- .config_return_buffer_size_in_kbytes = 1792,
- .compressed_buffer_segment_size_in_kbytes = 64,
- .meta_fifo_size_in_kentries = 32,
- .zero_size_buffer_entries = 512,
- .compbuf_reserved_space_64b = 256,
- .compbuf_reserved_space_zs = 64,
- .dpp_output_buffer_pixels = 2560,
- .opp_output_buffer_lines = 1,
- .pixel_chunk_size_kbytes = 8,
- .meta_chunk_size_kbytes = 2,
- .min_meta_chunk_size_bytes = 256,
- .writeback_chunk_size_kbytes = 8,
- .ptoi_supported = false,
- .num_dsc = 3,
- .maximum_dsc_bits_per_component = 10,
- .dsc422_native_support = false,
- .is_line_buffer_bpp_fixed = true,
- .line_buffer_fixed_bpp = 48,
- .line_buffer_size_bits = 789504,
- .max_line_buffer_lines = 12,
- .writeback_interface_buffer_size_kbytes = 90,
- .max_num_dpp = 4,
- .max_num_otg = 4,
- .max_num_hdmi_frl_outputs = 1,
- .max_num_wb = 1,
- .max_dchub_pscl_bw_pix_per_clk = 4,
- .max_pscl_lb_bw_pix_per_clk = 2,
- .max_lb_vscl_bw_pix_per_clk = 4,
- .max_vscl_hscl_bw_pix_per_clk = 4,
- .max_hscl_ratio = 6,
- .max_vscl_ratio = 6,
- .max_hscl_taps = 8,
- .max_vscl_taps = 8,
- .dpte_buffer_size_in_pte_reqs_luma = 64,
- .dpte_buffer_size_in_pte_reqs_chroma = 34,
- .dispclk_ramp_margin_percent = 1,
- .max_inter_dcn_tile_repeaters = 8,
- .cursor_buffer_size = 16,
- .cursor_chunk_size = 2,
- .writeback_line_buffer_buffer_size = 0,
- .writeback_min_hscl_ratio = 1,
- .writeback_min_vscl_ratio = 1,
- .writeback_max_hscl_ratio = 1,
- .writeback_max_vscl_ratio = 1,
- .writeback_max_hscl_taps = 1,
- .writeback_max_vscl_taps = 1,
- .dppclk_delay_subtotal = 46,
- .dppclk_delay_scl = 50,
- .dppclk_delay_scl_lb_only = 16,
- .dppclk_delay_cnvc_formatter = 27,
- .dppclk_delay_cnvc_cursor = 6,
- .dispclk_delay_subtotal = 119,
- .dynamic_metadata_vm_enabled = false,
- .odm_combine_4to1_supported = false,
- .dcc_supported = true,
-};
-
-struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc = {
- /*TODO: correct dispclk/dppclk voltage level determination*/
- .clock_limits = {
- {
- .state = 0,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 600.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 186.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 1,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 209.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 2,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 209.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 3,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 371.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 4,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 417.0,
- .dtbclk_mhz = 625.0,
- },
- },
- .num_states = 5,
- .sr_exit_time_us = 9.0,
- .sr_enter_plus_exit_time_us = 11.0,
- .sr_exit_z8_time_us = 442.0,
- .sr_enter_plus_exit_z8_time_us = 560.0,
- .writeback_latency_us = 12.0,
- .dram_channel_width_bytes = 4,
- .round_trip_ping_latency_dcfclk_cycles = 106,
- .urgent_latency_pixel_data_only_us = 4.0,
- .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
- .urgent_latency_vm_data_only_us = 4.0,
- .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
- .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
- .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
- .pct_ideal_sdp_bw_after_urgent = 80.0,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
- .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
- .max_avg_sdp_bw_use_normal_percent = 60.0,
- .max_avg_dram_bw_use_normal_percent = 60.0,
- .fabric_datapath_to_dcn_data_return_bytes = 32,
- .return_bus_width_bytes = 64,
- .downspread_percent = 0.38,
- .dcn_downspread_percent = 0.5,
- .gpuvm_min_page_size_bytes = 4096,
- .hostvm_min_page_size_bytes = 4096,
- .do_urgent_latency_adjustment = false,
- .urgent_latency_adjustment_fabric_clock_component_us = 0,
- .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
-};
-
enum dcn31_clk_src_array_id {
DCN31_CLK_SRC_PLL0,
DCN31_CLK_SRC_PLL1,
@@ -1030,10 +887,12 @@ static const struct dc_debug_options debug_defaults_drv = {
.afmt = true,
}
},
+ .disable_z10 = true,
.optimize_edp_link_rate = true,
.enable_sw_cntl_psr = true,
.apply_vendor_specific_lttpr_wa = true,
.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
+ .dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -1391,12 +1250,6 @@ static struct stream_encoder *dcn31_stream_encoder_create(
return NULL;
}
- if (ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
- ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
- if ((eng_id == ENGINE_ID_DIGC) || (eng_id == ENGINE_ID_DIGD))
- eng_id = eng_id + 3; // For B0 only. C->F, D->G.
- }
-
dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios,
eng_id, vpg, afmt,
&stream_enc_regs[eng_id],
@@ -1810,9 +1663,7 @@ int dcn31_populate_dml_pipes_from_context(
* intermittently experienced depending on peak b/w requirements.
*/
pipes[pipe_cnt].pipe.src.immediate_flip = true;
-
pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
- pipes[pipe_cnt].pipe.src.hostvm = dc->res_pool->hubbub->riommu_active;
pipes[pipe_cnt].pipe.src.gpuvm = true;
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
@@ -1820,6 +1671,13 @@ int dcn31_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.src.dcc_rate = 3;
pipes[pipe_cnt].dout.dsc_input_bpc = 0;
+ if (dc->debug.dml_hostvm_override == DML_HOSTVM_NO_OVERRIDE)
+ pipes[pipe_cnt].pipe.src.hostvm = dc->res_pool->hubbub->riommu_active;
+ else if (dc->debug.dml_hostvm_override == DML_HOSTVM_OVERRIDE_FALSE)
+ pipes[pipe_cnt].pipe.src.hostvm = false;
+ else if (dc->debug.dml_hostvm_override == DML_HOSTVM_OVERRIDE_TRUE)
+ pipes[pipe_cnt].pipe.src.hostvm = true;
+
if (pipes[pipe_cnt].dout.dsc_enable) {
switch (timing->display_color_depth) {
case COLOR_DEPTH_888:
@@ -1869,151 +1727,35 @@ void dcn31_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
}
}
-static void dcn31_calculate_wm_and_dlg_fp(
+void dcn31_calculate_wm_and_dlg(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt,
int vlevel)
{
- int i, pipe_idx;
- double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
-
- if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk)
- dcfclk = context->bw_ctx.dml.soc.min_dcfclk;
-
- /* We don't recalculate clocks for 0 pipe configs, which can block
- * S0i3 as high clocks will block low power states
- * Override any clocks that can block S0i3 to min here
- */
- if (pipe_cnt == 0) {
- context->bw_ctx.bw.dcn.clk.dcfclk_khz = dcfclk; // always should be vlevel 0
- return;
- }
-
- pipes[0].clks_cfg.voltage = vlevel;
- pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
- pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
-
-#if 0 // TODO
- /* Set B:
- * TODO
- */
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) {
- if (vlevel == 0) {
- pipes[0].clks_cfg.voltage = 1;
- pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz;
- }
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
- pipes[0].clks_cfg.voltage = vlevel;
- pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
-
- /* Set C:
- * TODO
- */
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
- /* Set D:
- * TODO
- */
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#endif
-
- /* Set A:
- * All clocks min required
- *
- * Set A calculated last so that following calculations are based on Set A
- */
- dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
- context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- /* TODO: remove: */
- context->bw_ctx.bw.dcn.watermarks.b = context->bw_ctx.bw.dcn.watermarks.a;
- context->bw_ctx.bw.dcn.watermarks.c = context->bw_ctx.bw.dcn.watermarks.a;
- context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
- /* end remove*/
-
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
-
- pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
- pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
-
- if (dc->config.forced_clocks || dc->debug.max_disp_clk) {
- pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
- pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
- }
- if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
- pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
- if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
- pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
-
- pipe_idx++;
- }
+ DC_FP_START();
+ dcn31_calculate_wm_and_dlg_fp(dc, context, pipes, pipe_cnt, vlevel);
+ DC_FP_END();
+}
+void
+dcn31_populate_dml_writeback_from_context(struct dc *dc,
+ struct resource_context *res_ctx,
+ display_e2e_pipe_params_st *pipes)
+{
DC_FP_START();
- dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+ dcn30_populate_dml_writeback_from_context(dc, res_ctx, pipes);
DC_FP_END();
}
-void dcn31_calculate_wm_and_dlg(
- struct dc *dc, struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- int pipe_cnt,
- int vlevel)
+void
+dcn31_set_mcif_arb_params(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt)
{
DC_FP_START();
- dcn31_calculate_wm_and_dlg_fp(dc, context, pipes, pipe_cnt, vlevel);
+ dcn30_set_mcif_arb_params(dc, context, pipes, pipe_cnt);
DC_FP_END();
}
@@ -2032,7 +1774,9 @@ bool dcn31_validate_bandwidth(struct dc *dc,
BW_VAL_TRACE_COUNT();
+ DC_FP_START();
out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
+ DC_FP_END();
// Disable fast_validate to set min dcfclk in alculate_wm_and_dlg
if (pipe_cnt == 0)
@@ -2073,77 +1817,6 @@ static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
-void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
-{
- struct clk_limit_table *clk_table = &bw_params->clk_table;
- struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
- unsigned int i, closest_clk_lvl;
- int j;
-
- // Default clock levels are used for diags, which may lead to overclocking.
- if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
- int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
-
- dcn3_1_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
- dcn3_1_ip.max_num_dpp = dc->res_pool->pipe_count;
- dcn3_1_soc.num_chans = bw_params->num_channels;
-
- ASSERT(clk_table->num_entries);
-
- /* Prepass to find max clocks independent of voltage level. */
- for (i = 0; i < clk_table->num_entries; ++i) {
- if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
- max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
- if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
- max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
- }
-
- for (i = 0; i < clk_table->num_entries; i++) {
- /* loop backwards*/
- for (closest_clk_lvl = 0, j = dcn3_1_soc.num_states - 1; j >= 0; j--) {
- if ((unsigned int) dcn3_1_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
- closest_clk_lvl = j;
- break;
- }
- }
-
- clock_limits[i].state = i;
-
- /* Clocks dependent on voltage level. */
- clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
- clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
- clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
- clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio;
-
- /* Clocks independent of voltage level. */
- clock_limits[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
- dcn3_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
-
- clock_limits[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
- dcn3_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
-
- clock_limits[i].dram_bw_per_chan_gbps = dcn3_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
- clock_limits[i].dscclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
- clock_limits[i].dtbclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
- clock_limits[i].phyclk_d18_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
- clock_limits[i].phyclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
- }
- for (i = 0; i < clk_table->num_entries; i++)
- dcn3_1_soc.clock_limits[i] = clock_limits[i];
- if (clk_table->num_entries) {
- dcn3_1_soc.num_states = clk_table->num_entries;
- }
- }
-
- dcn3_1_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
- dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
-
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- dml_init_instance(&dc->dml, &dcn3_1_soc, &dcn3_1_ip, DML_PROJECT_DCN31);
- else
- dml_init_instance(&dc->dml, &dcn3_1_soc, &dcn3_1_ip, DML_PROJECT_DCN31_FPGA);
-}
-
static struct resource_funcs dcn31_res_pool_funcs = {
.destroy = dcn31_destroy_resource_pool,
.link_enc_create = dcn31_link_encoder_create,
@@ -2159,8 +1832,8 @@ static struct resource_funcs dcn31_res_pool_funcs = {
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
- .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context,
- .set_mcif_arb_params = dcn30_set_mcif_arb_params,
+ .populate_dml_writeback_from_context = dcn31_populate_dml_writeback_from_context,
+ .set_mcif_arb_params = dcn31_set_mcif_arb_params,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
.acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
@@ -2221,9 +1894,9 @@ static bool dcn31_resource_construct(
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
- dc->caps.max_slave_planes = 1;
- dc->caps.max_slave_yuv_planes = 1;
- dc->caps.max_slave_rgb_planes = 1;
+ dc->caps.max_slave_planes = 2;
+ dc->caps.max_slave_yuv_planes = 2;
+ dc->caps.max_slave_rgb_planes = 2;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.dp_hpo = true;
@@ -2232,6 +1905,7 @@ static bool dcn31_resource_construct(
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
dc->caps.is_apu = true;
+ dc->caps.zstate_support = true;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
index 4b7ab21ea15b..393458015d6a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
@@ -31,6 +31,9 @@
#define TO_DCN31_RES_POOL(pool)\
container_of(pool, struct dcn31_resource_pool, base)
+extern struct _vcs_dpi_ip_params_st dcn3_1_ip;
+extern struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc;
+
struct dcn31_resource_pool {
struct resource_pool base;
};
@@ -47,7 +50,15 @@ int dcn31_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
bool fast_validate);
-void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
+void
+dcn31_populate_dml_writeback_from_context(struct dc *dc,
+ struct resource_context *res_ctx,
+ display_e2e_pipe_params_st *pipes);
+void
+dcn31_set_mcif_arb_params(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt);
void dcn31_update_soc_for_wm_a(struct dc *dc, struct dc_state *context);
struct resource_pool *dcn31_create_resource_pool(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/Makefile b/drivers/gpu/drm/amd/display/dc/dcn315/Makefile
index c831ad46e81c..59381d24800b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn315/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn315/Makefile
@@ -25,32 +25,6 @@
DCN315 = dcn315_resource.o
-ifdef CONFIG_X86
-CFLAGS_$(AMDDALPATH)/dc/dcn315/dcn315_resource.o := -msse
-endif
-
-ifdef CONFIG_PPC64
-CFLAGS_$(AMDDALPATH)/dc/dcn315/dcn315_resource.o := -mhard-float -maltivec
-endif
-
-ifdef CONFIG_CC_IS_GCC
-ifeq ($(call cc-ifversion, -lt, 0701, y), y)
-IS_OLD_GCC = 1
-endif
-CFLAGS_$(AMDDALPATH)/dc/dcn315/dcn315_resource.o += -mhard-float
-endif
-
-ifdef CONFIG_X86
-ifdef IS_OLD_GCC
-# Stack alignment mismatch, proceed with caution.
-# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
-# (8B stack alignment).
-CFLAGS_$(AMDDALPATH)/dc/dcn315/dcn315_resource.o += -mpreferred-stack-boundary=4
-else
-CFLAGS_$(AMDDALPATH)/dc/dcn315/dcn315_resource.o += -msse2
-endif
-endif
-
AMD_DAL_DCN315 = $(addprefix $(AMDDALPATH)/dc/dcn315/,$(DCN315))
AMD_DISPLAY_FILES += $(AMD_DAL_DCN315)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
index 06adb77c206b..2b42af030b33 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
@@ -66,6 +66,7 @@
#include "virtual/virtual_stream_encoder.h"
#include "dce110/dce110_resource.h"
#include "dml/display_mode_vba.h"
+#include "dml/dcn31/dcn31_fpu.h"
#include "dcn31/dcn31_dccg.h"
#include "dcn10/dcn10_resource.h"
#include "dcn31/dcn31_panel_cntl.h"
@@ -133,158 +134,9 @@
#include "link_enc_cfg.h"
-#define DC_LOGGER_INIT(logger)
-
-#define DCN3_15_DEFAULT_DET_SIZE 192
#define DCN3_15_MAX_DET_SIZE 384
-#define DCN3_15_MIN_COMPBUF_SIZE_KB 128
#define DCN3_15_CRB_SEGMENT_SIZE_KB 64
-struct _vcs_dpi_ip_params_st dcn3_15_ip = {
- .gpuvm_enable = 1,
- .gpuvm_max_page_table_levels = 1,
- .hostvm_enable = 1,
- .hostvm_max_page_table_levels = 2,
- .rob_buffer_size_kbytes = 64,
- .det_buffer_size_kbytes = DCN3_15_DEFAULT_DET_SIZE,
- .min_comp_buffer_size_kbytes = DCN3_15_MIN_COMPBUF_SIZE_KB,
- .config_return_buffer_size_in_kbytes = 1024,
- .compressed_buffer_segment_size_in_kbytes = 64,
- .meta_fifo_size_in_kentries = 32,
- .zero_size_buffer_entries = 512,
- .compbuf_reserved_space_64b = 256,
- .compbuf_reserved_space_zs = 64,
- .dpp_output_buffer_pixels = 2560,
- .opp_output_buffer_lines = 1,
- .pixel_chunk_size_kbytes = 8,
- .meta_chunk_size_kbytes = 2,
- .min_meta_chunk_size_bytes = 256,
- .writeback_chunk_size_kbytes = 8,
- .ptoi_supported = false,
- .num_dsc = 3,
- .maximum_dsc_bits_per_component = 10,
- .dsc422_native_support = false,
- .is_line_buffer_bpp_fixed = true,
- .line_buffer_fixed_bpp = 49,
- .line_buffer_size_bits = 789504,
- .max_line_buffer_lines = 12,
- .writeback_interface_buffer_size_kbytes = 90,
- .max_num_dpp = 4,
- .max_num_otg = 4,
- .max_num_hdmi_frl_outputs = 1,
- .max_num_wb = 1,
- .max_dchub_pscl_bw_pix_per_clk = 4,
- .max_pscl_lb_bw_pix_per_clk = 2,
- .max_lb_vscl_bw_pix_per_clk = 4,
- .max_vscl_hscl_bw_pix_per_clk = 4,
- .max_hscl_ratio = 6,
- .max_vscl_ratio = 6,
- .max_hscl_taps = 8,
- .max_vscl_taps = 8,
- .dpte_buffer_size_in_pte_reqs_luma = 64,
- .dpte_buffer_size_in_pte_reqs_chroma = 34,
- .dispclk_ramp_margin_percent = 1,
- .max_inter_dcn_tile_repeaters = 9,
- .cursor_buffer_size = 16,
- .cursor_chunk_size = 2,
- .writeback_line_buffer_buffer_size = 0,
- .writeback_min_hscl_ratio = 1,
- .writeback_min_vscl_ratio = 1,
- .writeback_max_hscl_ratio = 1,
- .writeback_max_vscl_ratio = 1,
- .writeback_max_hscl_taps = 1,
- .writeback_max_vscl_taps = 1,
- .dppclk_delay_subtotal = 46,
- .dppclk_delay_scl = 50,
- .dppclk_delay_scl_lb_only = 16,
- .dppclk_delay_cnvc_formatter = 27,
- .dppclk_delay_cnvc_cursor = 6,
- .dispclk_delay_subtotal = 119,
- .dynamic_metadata_vm_enabled = false,
- .odm_combine_4to1_supported = false,
- .dcc_supported = true,
-};
-
-struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
- /*TODO: correct dispclk/dppclk voltage level determination*/
- .clock_limits = {
- {
- .state = 0,
- .dispclk_mhz = 1372.0,
- .dppclk_mhz = 1372.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 417.0,
- .dtbclk_mhz = 600.0,
- },
- {
- .state = 1,
- .dispclk_mhz = 1372.0,
- .dppclk_mhz = 1372.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 417.0,
- .dtbclk_mhz = 600.0,
- },
- {
- .state = 2,
- .dispclk_mhz = 1372.0,
- .dppclk_mhz = 1372.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 417.0,
- .dtbclk_mhz = 600.0,
- },
- {
- .state = 3,
- .dispclk_mhz = 1372.0,
- .dppclk_mhz = 1372.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 417.0,
- .dtbclk_mhz = 600.0,
- },
- {
- .state = 4,
- .dispclk_mhz = 1372.0,
- .dppclk_mhz = 1372.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 417.0,
- .dtbclk_mhz = 600.0,
- },
- },
- .num_states = 5,
- .sr_exit_time_us = 9.0,
- .sr_enter_plus_exit_time_us = 11.0,
- .sr_exit_z8_time_us = 50.0,
- .sr_enter_plus_exit_z8_time_us = 50.0,
- .writeback_latency_us = 12.0,
- .dram_channel_width_bytes = 4,
- .round_trip_ping_latency_dcfclk_cycles = 106,
- .urgent_latency_pixel_data_only_us = 4.0,
- .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
- .urgent_latency_vm_data_only_us = 4.0,
- .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
- .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
- .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
- .pct_ideal_sdp_bw_after_urgent = 80.0,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
- .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
- .max_avg_sdp_bw_use_normal_percent = 60.0,
- .max_avg_dram_bw_use_normal_percent = 60.0,
- .fabric_datapath_to_dcn_data_return_bytes = 32,
- .return_bus_width_bytes = 64,
- .downspread_percent = 0.38,
- .dcn_downspread_percent = 0.38,
- .gpuvm_min_page_size_bytes = 4096,
- .hostvm_min_page_size_bytes = 4096,
- .do_urgent_latency_adjustment = false,
- .urgent_latency_adjustment_fabric_clock_component_us = 0,
- .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
-};
-
enum dcn31_clk_src_array_id {
DCN31_CLK_SRC_PLL0,
DCN31_CLK_SRC_PLL1,
@@ -1859,88 +1711,6 @@ static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
-static void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
-{
- struct clk_limit_table *clk_table = &bw_params->clk_table;
- struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
- unsigned int i, closest_clk_lvl;
- int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
- int j;
-
- // Default clock levels are used for diags, which may lead to overclocking.
- if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
-
- dcn3_15_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
- dcn3_15_ip.max_num_dpp = dc->res_pool->pipe_count;
- dcn3_15_soc.num_chans = bw_params->num_channels;
-
- ASSERT(clk_table->num_entries);
-
- /* Prepass to find max clocks independent of voltage level. */
- for (i = 0; i < clk_table->num_entries; ++i) {
- if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
- max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
- if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
- max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
- }
-
- for (i = 0; i < clk_table->num_entries; i++) {
- /* loop backwards*/
- for (closest_clk_lvl = 0, j = dcn3_15_soc.num_states - 1; j >= 0; j--) {
- if ((unsigned int) dcn3_15_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
- closest_clk_lvl = j;
- break;
- }
- }
- if (clk_table->num_entries == 1) {
- /*smu gives one DPM level, let's take the highest one*/
- closest_clk_lvl = dcn3_15_soc.num_states - 1;
- }
-
- clock_limits[i].state = i;
-
- /* Clocks dependent on voltage level. */
- clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
- if (clk_table->num_entries == 1 &&
- clock_limits[i].dcfclk_mhz < dcn3_15_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
- /*SMU fix not released yet*/
- clock_limits[i].dcfclk_mhz = dcn3_15_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
- }
- clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
- clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
- clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio;
-
- /* Clocks independent of voltage level. */
- clock_limits[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
- dcn3_15_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
-
- clock_limits[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
- dcn3_15_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
-
- clock_limits[i].dram_bw_per_chan_gbps = dcn3_15_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
- clock_limits[i].dscclk_mhz = dcn3_15_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
- clock_limits[i].dtbclk_mhz = dcn3_15_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
- clock_limits[i].phyclk_d18_mhz = dcn3_15_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
- clock_limits[i].phyclk_mhz = dcn3_15_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
- }
- for (i = 0; i < clk_table->num_entries; i++)
- dcn3_15_soc.clock_limits[i] = clock_limits[i];
- if (clk_table->num_entries) {
- dcn3_15_soc.num_states = clk_table->num_entries;
- }
- }
-
- if (max_dispclk_mhz) {
- dcn3_15_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
- dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
- }
-
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN31);
- else
- dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN31_FPGA);
-}
-
static struct resource_funcs dcn315_res_pool_funcs = {
.destroy = dcn315_destroy_resource_pool,
.link_enc_create = dcn31_link_encoder_create,
@@ -1956,8 +1726,8 @@ static struct resource_funcs dcn315_res_pool_funcs = {
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
- .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context,
- .set_mcif_arb_params = dcn30_set_mcif_arb_params,
+ .populate_dml_writeback_from_context = dcn31_populate_dml_writeback_from_context,
+ .set_mcif_arb_params = dcn31_set_mcif_arb_params,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
.acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
@@ -1988,11 +1758,10 @@ static bool dcn315_resource_construct(
pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;
dc->caps.max_downscale_ratio = 600;
dc->caps.i2c_speed_in_khz = 100;
- dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.4 w/a applied by default*/
+ dc->caps.i2c_speed_in_khz_hdcp = 100;
dc->caps.max_cursor_size = 256;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
-
dc->caps.max_slave_planes = 1;
dc->caps.max_slave_yuv_planes = 1;
dc->caps.max_slave_rgb_planes = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h
index f3a36820a31f..39929fa67a51 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h
@@ -31,6 +31,9 @@
#define TO_DCN315_RES_POOL(pool)\
container_of(pool, struct dcn315_resource_pool, base)
+extern struct _vcs_dpi_ip_params_st dcn3_15_ip;
+extern struct _vcs_dpi_ip_params_st dcn3_15_soc;
+
struct dcn315_resource_pool {
struct resource_pool base;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/Makefile b/drivers/gpu/drm/amd/display/dc/dcn316/Makefile
index cd87b687c5e2..819d44a9439b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn316/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn316/Makefile
@@ -25,32 +25,6 @@
DCN316 = dcn316_resource.o
-ifdef CONFIG_X86
-CFLAGS_$(AMDDALPATH)/dc/dcn316/dcn316_resource.o := -msse
-endif
-
-ifdef CONFIG_PPC64
-CFLAGS_$(AMDDALPATH)/dc/dcn316/dcn316_resource.o := -mhard-float -maltivec
-endif
-
-ifdef CONFIG_CC_IS_GCC
-ifeq ($(call cc-ifversion, -lt, 0701, y), y)
-IS_OLD_GCC = 1
-endif
-CFLAGS_$(AMDDALPATH)/dc/dcn316/dcn316_resource.o += -mhard-float
-endif
-
-ifdef CONFIG_X86
-ifdef IS_OLD_GCC
-# Stack alignment mismatch, proceed with caution.
-# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
-# (8B stack alignment).
-CFLAGS_$(AMDDALPATH)/dc/dcn316/dcn316_resource.o += -mpreferred-stack-boundary=4
-else
-CFLAGS_$(AMDDALPATH)/dc/dcn316/dcn316_resource.o += -msse2
-endif
-endif
-
AMD_DAL_DCN316 = $(addprefix $(AMDDALPATH)/dc/dcn316/,$(DCN316))
AMD_DISPLAY_FILES += $(AMD_DAL_DCN316)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
index 8decc3ccf8ca..ef16260b7f3f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
@@ -66,6 +66,7 @@
#include "virtual/virtual_stream_encoder.h"
#include "dce110/dce110_resource.h"
#include "dml/display_mode_vba.h"
+#include "dml/dcn31/dcn31_fpu.h"
#include "dcn31/dcn31_dccg.h"
#include "dcn10/dcn10_resource.h"
#include "dcn31/dcn31_panel_cntl.h"
@@ -123,157 +124,10 @@
#include "link_enc_cfg.h"
-#define DC_LOGGER_INIT(logger)
-
-#define DCN3_16_DEFAULT_DET_SIZE 192
#define DCN3_16_MAX_DET_SIZE 384
#define DCN3_16_MIN_COMPBUF_SIZE_KB 128
#define DCN3_16_CRB_SEGMENT_SIZE_KB 64
-struct _vcs_dpi_ip_params_st dcn3_16_ip = {
- .gpuvm_enable = 1,
- .gpuvm_max_page_table_levels = 1,
- .hostvm_enable = 1,
- .hostvm_max_page_table_levels = 2,
- .rob_buffer_size_kbytes = 64,
- .det_buffer_size_kbytes = DCN3_16_DEFAULT_DET_SIZE,
- .config_return_buffer_size_in_kbytes = 1024,
- .compressed_buffer_segment_size_in_kbytes = 64,
- .meta_fifo_size_in_kentries = 32,
- .zero_size_buffer_entries = 512,
- .compbuf_reserved_space_64b = 256,
- .compbuf_reserved_space_zs = 64,
- .dpp_output_buffer_pixels = 2560,
- .opp_output_buffer_lines = 1,
- .pixel_chunk_size_kbytes = 8,
- .meta_chunk_size_kbytes = 2,
- .min_meta_chunk_size_bytes = 256,
- .writeback_chunk_size_kbytes = 8,
- .ptoi_supported = false,
- .num_dsc = 3,
- .maximum_dsc_bits_per_component = 10,
- .dsc422_native_support = false,
- .is_line_buffer_bpp_fixed = true,
- .line_buffer_fixed_bpp = 48,
- .line_buffer_size_bits = 789504,
- .max_line_buffer_lines = 12,
- .writeback_interface_buffer_size_kbytes = 90,
- .max_num_dpp = 4,
- .max_num_otg = 4,
- .max_num_hdmi_frl_outputs = 1,
- .max_num_wb = 1,
- .max_dchub_pscl_bw_pix_per_clk = 4,
- .max_pscl_lb_bw_pix_per_clk = 2,
- .max_lb_vscl_bw_pix_per_clk = 4,
- .max_vscl_hscl_bw_pix_per_clk = 4,
- .max_hscl_ratio = 6,
- .max_vscl_ratio = 6,
- .max_hscl_taps = 8,
- .max_vscl_taps = 8,
- .dpte_buffer_size_in_pte_reqs_luma = 64,
- .dpte_buffer_size_in_pte_reqs_chroma = 34,
- .dispclk_ramp_margin_percent = 1,
- .max_inter_dcn_tile_repeaters = 8,
- .cursor_buffer_size = 16,
- .cursor_chunk_size = 2,
- .writeback_line_buffer_buffer_size = 0,
- .writeback_min_hscl_ratio = 1,
- .writeback_min_vscl_ratio = 1,
- .writeback_max_hscl_ratio = 1,
- .writeback_max_vscl_ratio = 1,
- .writeback_max_hscl_taps = 1,
- .writeback_max_vscl_taps = 1,
- .dppclk_delay_subtotal = 46,
- .dppclk_delay_scl = 50,
- .dppclk_delay_scl_lb_only = 16,
- .dppclk_delay_cnvc_formatter = 27,
- .dppclk_delay_cnvc_cursor = 6,
- .dispclk_delay_subtotal = 119,
- .dynamic_metadata_vm_enabled = false,
- .odm_combine_4to1_supported = false,
- .dcc_supported = true,
-};
-
-struct _vcs_dpi_soc_bounding_box_st dcn3_16_soc = {
- /*TODO: correct dispclk/dppclk voltage level determination*/
- .clock_limits = {
- {
- .state = 0,
- .dispclk_mhz = 556.0,
- .dppclk_mhz = 556.0,
- .phyclk_mhz = 600.0,
- .phyclk_d18_mhz = 445.0,
- .dscclk_mhz = 186.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 1,
- .dispclk_mhz = 625.0,
- .dppclk_mhz = 625.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 209.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 2,
- .dispclk_mhz = 625.0,
- .dppclk_mhz = 625.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 209.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 3,
- .dispclk_mhz = 1112.0,
- .dppclk_mhz = 1112.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 371.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 4,
- .dispclk_mhz = 1250.0,
- .dppclk_mhz = 1250.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 417.0,
- .dtbclk_mhz = 625.0,
- },
- },
- .num_states = 5,
- .sr_exit_time_us = 9.0,
- .sr_enter_plus_exit_time_us = 11.0,
- .sr_exit_z8_time_us = 442.0,
- .sr_enter_plus_exit_z8_time_us = 560.0,
- .writeback_latency_us = 12.0,
- .dram_channel_width_bytes = 4,
- .round_trip_ping_latency_dcfclk_cycles = 106,
- .urgent_latency_pixel_data_only_us = 4.0,
- .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
- .urgent_latency_vm_data_only_us = 4.0,
- .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
- .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
- .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
- .pct_ideal_sdp_bw_after_urgent = 80.0,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
- .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
- .max_avg_sdp_bw_use_normal_percent = 60.0,
- .max_avg_dram_bw_use_normal_percent = 60.0,
- .fabric_datapath_to_dcn_data_return_bytes = 32,
- .return_bus_width_bytes = 64,
- .downspread_percent = 0.38,
- .dcn_downspread_percent = 0.5,
- .gpuvm_min_page_size_bytes = 4096,
- .hostvm_min_page_size_bytes = 4096,
- .do_urgent_latency_adjustment = false,
- .urgent_latency_adjustment_fabric_clock_component_us = 0,
- .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
-};
-
enum dcn31_clk_src_array_id {
DCN31_CLK_SRC_PLL0,
DCN31_CLK_SRC_PLL1,
@@ -1859,89 +1713,6 @@ static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
-static void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
-{
- struct clk_limit_table *clk_table = &bw_params->clk_table;
- struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
- unsigned int i, closest_clk_lvl;
- int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
- int j;
-
- // Default clock levels are used for diags, which may lead to overclocking.
- if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
-
- dcn3_16_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
- dcn3_16_ip.max_num_dpp = dc->res_pool->pipe_count;
- dcn3_16_soc.num_chans = bw_params->num_channels;
-
- ASSERT(clk_table->num_entries);
-
- /* Prepass to find max clocks independent of voltage level. */
- for (i = 0; i < clk_table->num_entries; ++i) {
- if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
- max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
- if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
- max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
- }
-
- for (i = 0; i < clk_table->num_entries; i++) {
- /* loop backwards*/
- for (closest_clk_lvl = 0, j = dcn3_16_soc.num_states - 1; j >= 0; j--) {
- if ((unsigned int) dcn3_16_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
- closest_clk_lvl = j;
- break;
- }
- }
- // Ported from DCN315
- if (clk_table->num_entries == 1) {
- /*smu gives one DPM level, let's take the highest one*/
- closest_clk_lvl = dcn3_16_soc.num_states - 1;
- }
-
- clock_limits[i].state = i;
-
- /* Clocks dependent on voltage level. */
- clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
- if (clk_table->num_entries == 1 &&
- clock_limits[i].dcfclk_mhz < dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
- /*SMU fix not released yet*/
- clock_limits[i].dcfclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
- }
- clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
- clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
- clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio;
-
- /* Clocks independent of voltage level. */
- clock_limits[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
- dcn3_16_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
-
- clock_limits[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
- dcn3_16_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
-
- clock_limits[i].dram_bw_per_chan_gbps = dcn3_16_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
- clock_limits[i].dscclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
- clock_limits[i].dtbclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
- clock_limits[i].phyclk_d18_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
- clock_limits[i].phyclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
- }
- for (i = 0; i < clk_table->num_entries; i++)
- dcn3_16_soc.clock_limits[i] = clock_limits[i];
- if (clk_table->num_entries) {
- dcn3_16_soc.num_states = clk_table->num_entries;
- }
- }
-
- if (max_dispclk_mhz) {
- dcn3_16_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
- dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
- }
-
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- dml_init_instance(&dc->dml, &dcn3_16_soc, &dcn3_16_ip, DML_PROJECT_DCN31);
- else
- dml_init_instance(&dc->dml, &dcn3_16_soc, &dcn3_16_ip, DML_PROJECT_DCN31_FPGA);
-}
-
static struct resource_funcs dcn316_res_pool_funcs = {
.destroy = dcn316_destroy_resource_pool,
.link_enc_create = dcn31_link_encoder_create,
@@ -1957,8 +1728,8 @@ static struct resource_funcs dcn316_res_pool_funcs = {
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
- .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context,
- .set_mcif_arb_params = dcn30_set_mcif_arb_params,
+ .populate_dml_writeback_from_context = dcn31_populate_dml_writeback_from_context,
+ .set_mcif_arb_params = dcn31_set_mcif_arb_params,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
.acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
@@ -1989,11 +1760,10 @@ static bool dcn316_resource_construct(
pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;
dc->caps.max_downscale_ratio = 600;
dc->caps.i2c_speed_in_khz = 100;
- dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.4 w/a applied by default*/
+ dc->caps.i2c_speed_in_khz_hdcp = 100;
dc->caps.max_cursor_size = 256;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
-
dc->caps.max_slave_planes = 1;
dc->caps.max_slave_yuv_planes = 1;
dc->caps.max_slave_rgb_planes = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h
index 9d0d60cb9482..0dc5a6c13ae7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h
@@ -31,6 +31,9 @@
#define TO_DCN316_RES_POOL(pool)\
container_of(pool, struct dcn316_resource_pool, base)
+extern struct _vcs_dpi_ip_params_st dcn3_16_ip;
+extern struct _vcs_dpi_ip_params_st dcn3_16_soc;
+
struct dcn316_resource_pool {
struct resource_pool base;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index 31109db02e93..fb6a2d7b6470 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -160,9 +160,7 @@ void dm_set_dcn_clocks(
struct dc_context *ctx,
struct dc_clocks *clks);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable);
-#endif
void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 28978ce62f87..a64b88ca01a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -71,6 +71,8 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(fram
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/dcn30_fpu.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/dcn31_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn302/dcn302_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn303/dcn303_fpu.o := $(dml_ccflags)
@@ -112,8 +114,9 @@ DML += dcn20/dcn20_fpu.o
DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_20.o
DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o
DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o
-DML += dcn30/display_mode_vba_30.o dcn30/display_rq_dlg_calc_30.o
+DML += dcn30/dcn30_fpu.o dcn30/display_mode_vba_30.o dcn30/display_rq_dlg_calc_30.o
DML += dcn31/display_mode_vba_31.o dcn31/display_rq_dlg_calc_31.o
+DML += dcn31/dcn31_fpu.o
DML += dcn301/dcn301_fpu.o
DML += dcn302/dcn302_fpu.o
DML += dcn303/dcn303_fpu.o
diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
index e447c74be713..db3b16b77034 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
@@ -639,7 +639,6 @@ static bool dcn_bw_apply_registry_override(struct dc *dc)
{
bool updated = false;
- DC_FP_START();
if ((int)(dc->dcn_soc->sr_exit_time * 1000) != dc->debug.sr_exit_time_ns
&& dc->debug.sr_exit_time_ns) {
updated = true;
@@ -675,7 +674,6 @@ static bool dcn_bw_apply_registry_override(struct dc *dc)
dc->dcn_soc->dram_clock_change_latency =
dc->debug.dram_clock_change_latency_ns / 1000.0;
}
- DC_FP_END();
return updated;
}
@@ -764,7 +762,7 @@ static unsigned int get_highest_allowed_voltage_level(uint32_t chip_family,
return 4;
}
-bool dcn10_validate_bandwidth(
+bool dcn_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate)
@@ -790,7 +788,6 @@ bool dcn10_validate_bandwidth(
dcn_bw_sync_calcs_and_dml(dc);
memset(v, 0, sizeof(*v));
- DC_FP_START();
v->sr_exit_time = dc->dcn_soc->sr_exit_time;
v->sr_enter_plus_exit_time = dc->dcn_soc->sr_enter_plus_exit_time;
@@ -1323,8 +1320,6 @@ bool dcn10_validate_bandwidth(
bw_limit = dc->dcn_soc->percent_disp_bw_limit * v->fabric_and_dram_bandwidth_vmax0p9;
bw_limit_pass = (v->total_data_read_bandwidth / 1000.0) < bw_limit;
- DC_FP_END();
-
PERFORMANCE_TRACE_END();
BW_VAL_TRACE_FINISH();
@@ -1495,8 +1490,6 @@ void dcn_bw_update_from_pplib(struct dc *dc)
res = dm_pp_get_clock_levels_by_type_with_voltage(
ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks);
- DC_FP_START();
-
if (res)
res = verify_clock_values(&fclks);
@@ -1526,13 +1519,9 @@ void dcn_bw_update_from_pplib(struct dc *dc)
} else
BREAK_TO_DEBUGGER();
- DC_FP_END();
-
res = dm_pp_get_clock_levels_by_type_with_voltage(
ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks);
- DC_FP_START();
-
if (res)
res = verify_clock_values(&dcfclks);
@@ -1543,8 +1532,6 @@ void dcn_bw_update_from_pplib(struct dc *dc)
dc->dcn_soc->dcfclkv_max0p9 = dcfclks.data[dcfclks.num_levels - 1].clocks_in_khz / 1000.0;
} else
BREAK_TO_DEBUGGER();
-
- DC_FP_END();
}
void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
@@ -1559,11 +1546,9 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
if (!pp || !pp->set_wm_ranges)
return;
- DC_FP_START();
min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32;
min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000;
socclk_khz = dc->dcn_soc->socclk * 1000;
- DC_FP_END();
/* Now notify PPLib/SMU about which Watermarks sets they should select
* depending on DPM state they are in. And update BW MGR GFX Engine and
@@ -1614,7 +1599,6 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
void dcn_bw_sync_calcs_and_dml(struct dc *dc)
{
- DC_FP_START();
DC_LOG_BANDWIDTH_CALCS("sr_exit_time: %f ns\n"
"sr_enter_plus_exit_time: %f ns\n"
"urgent_latency: %f ns\n"
@@ -1803,5 +1787,4 @@ void dcn_bw_sync_calcs_and_dml(struct dc *dc)
dc->dml.ip.bug_forcing_LC_req_same_size_fixed =
dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes;
dc->dml.ip.dcfclk_cstate_latency = dc->dcn_ip->dcfclk_cstate_latency;
- DC_FP_END();
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index 2f6122153bdb..f79dd40f8d81 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -722,8 +722,10 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
{
int plane_count;
int i;
+ unsigned int optimized_min_dst_y_next_start_us;
plane_count = 0;
+ optimized_min_dst_y_next_start_us = 0;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (context->res_ctx.pipe_ctx[i].plane_state)
plane_count++;
@@ -744,11 +746,22 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
struct dc_link *link = context->streams[0]->sink->link;
struct dc_stream_status *stream_status = &context->stream_status[0];
+ if (dc_extended_blank_supported(dc)) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (context->res_ctx.pipe_ctx[i].stream == context->streams[0]
+ && context->res_ctx.pipe_ctx[i].stream->adjust.v_total_min == context->res_ctx.pipe_ctx[i].stream->adjust.v_total_max
+ && context->res_ctx.pipe_ctx[i].stream->adjust.v_total_min > context->res_ctx.pipe_ctx[i].stream->timing.v_total) {
+ optimized_min_dst_y_next_start_us =
+ context->res_ctx.pipe_ctx[i].dlg_regs.optimized_min_dst_y_next_start_us;
+ break;
+ }
+ }
+ }
/* zstate only supported on PWRSEQ0 and when there's <2 planes*/
if (link->link_index != 0 || stream_status->plane_count > 1)
return DCN_ZSTATE_SUPPORT_DISALLOW;
- if (context->bw_ctx.dml.vba.StutterPeriod > 5000.0)
+ if (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || optimized_min_dst_y_next_start_us > 5000)
return DCN_ZSTATE_SUPPORT_ALLOW;
else if (link->psr_settings.psr_version == DC_PSR_VERSION_1 && !dc->debug.disable_psr)
return DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
@@ -786,8 +799,6 @@ void dcn20_calculate_dlg_params(
!= dm_dram_clock_change_unsupported;
context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
- context->bw_ctx.bw.dcn.clk.zstate_support = decide_zstate_support(dc, context);
-
context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context);
if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz)
@@ -843,6 +854,7 @@ void dcn20_calculate_dlg_params(
&pipes[pipe_idx].pipe);
pipe_idx++;
}
+ context->bw_ctx.bw.dcn.clk.zstate_support = decide_zstate_support(dc, context);
}
static void swizzle_to_dml_params(
@@ -1290,9 +1302,7 @@ int dcn20_populate_dml_pipes_from_context(
}
/* populate writeback information */
- DC_FP_START();
dc->res_pool->funcs->populate_dml_writeback_from_context(dc, res_ctx, pipes);
- DC_FP_END();
return pipe_cnt;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
new file mode 100644
index 000000000000..574676a0711a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
@@ -0,0 +1,617 @@
+/*
+ * Copyright 2020-2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "resource.h"
+#include "clk_mgr.h"
+#include "reg_helper.h"
+#include "dcn_calc_math.h"
+#include "dcn20/dcn20_resource.h"
+#include "dcn30/dcn30_resource.h"
+
+
+#include "display_mode_vba_30.h"
+#include "dcn30_fpu.h"
+
+#define REG(reg)\
+ optc1->tg_regs->reg
+
+#define CTX \
+ optc1->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ optc1->tg_shift->field_name, optc1->tg_mask->field_name
+
+
+struct _vcs_dpi_ip_params_st dcn3_0_ip = {
+ .use_min_dcfclk = 0,
+ .clamp_min_dcfclk = 0,
+ .odm_capable = 1,
+ .gpuvm_enable = 0,
+ .hostvm_enable = 0,
+ .gpuvm_max_page_table_levels = 4,
+ .hostvm_max_page_table_levels = 4,
+ .hostvm_cached_page_table_levels = 0,
+ .pte_group_size_bytes = 2048,
+ .num_dsc = 6,
+ .rob_buffer_size_kbytes = 184,
+ .det_buffer_size_kbytes = 184,
+ .dpte_buffer_size_in_pte_reqs_luma = 84,
+ .pde_proc_buffer_size_64k_reqs = 48,
+ .dpp_output_buffer_pixels = 2560,
+ .opp_output_buffer_lines = 1,
+ .pixel_chunk_size_kbytes = 8,
+ .pte_enable = 1,
+ .max_page_table_levels = 2,
+ .pte_chunk_size_kbytes = 2, // ?
+ .meta_chunk_size_kbytes = 2,
+ .writeback_chunk_size_kbytes = 8,
+ .line_buffer_size_bits = 789504,
+ .is_line_buffer_bpp_fixed = 0, // ?
+ .line_buffer_fixed_bpp = 0, // ?
+ .dcc_supported = true,
+ .writeback_interface_buffer_size_kbytes = 90,
+ .writeback_line_buffer_buffer_size = 0,
+ .max_line_buffer_lines = 12,
+ .writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640
+ .writeback_chroma_buffer_size_kbytes = 8,
+ .writeback_chroma_line_buffer_width_pixels = 4,
+ .writeback_max_hscl_ratio = 1,
+ .writeback_max_vscl_ratio = 1,
+ .writeback_min_hscl_ratio = 1,
+ .writeback_min_vscl_ratio = 1,
+ .writeback_max_hscl_taps = 1,
+ .writeback_max_vscl_taps = 1,
+ .writeback_line_buffer_luma_buffer_size = 0,
+ .writeback_line_buffer_chroma_buffer_size = 14643,
+ .cursor_buffer_size = 8,
+ .cursor_chunk_size = 2,
+ .max_num_otg = 6,
+ .max_num_dpp = 6,
+ .max_num_wb = 1,
+ .max_dchub_pscl_bw_pix_per_clk = 4,
+ .max_pscl_lb_bw_pix_per_clk = 2,
+ .max_lb_vscl_bw_pix_per_clk = 4,
+ .max_vscl_hscl_bw_pix_per_clk = 4,
+ .max_hscl_ratio = 6,
+ .max_vscl_ratio = 6,
+ .hscl_mults = 4,
+ .vscl_mults = 4,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .dispclk_ramp_margin_percent = 1,
+ .underscan_factor = 1.11,
+ .min_vblank_lines = 32,
+ .dppclk_delay_subtotal = 46,
+ .dynamic_metadata_vm_enabled = true,
+ .dppclk_delay_scl_lb_only = 16,
+ .dppclk_delay_scl = 50,
+ .dppclk_delay_cnvc_formatter = 27,
+ .dppclk_delay_cnvc_cursor = 6,
+ .dispclk_delay_subtotal = 119,
+ .dcfclk_cstate_latency = 5.2, // SRExitTime
+ .max_inter_dcn_tile_repeaters = 8,
+ .max_num_hdmi_frl_outputs = 1,
+ .odm_combine_4to1_supported = true,
+
+ .xfc_supported = false,
+ .xfc_fill_bw_overhead_percent = 10.0,
+ .xfc_fill_constant_bytes = 0,
+ .gfx7_compat_tiling_supported = 0,
+ .number_of_cursors = 1,
+};
+
+struct _vcs_dpi_soc_bounding_box_st dcn3_0_soc = {
+ .clock_limits = {
+ {
+ .state = 0,
+ .dispclk_mhz = 562.0,
+ .dppclk_mhz = 300.0,
+ .phyclk_mhz = 300.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 405.6,
+ },
+ },
+
+ .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
+ .num_states = 1,
+ .sr_exit_time_us = 15.5,
+ .sr_enter_plus_exit_time_us = 20,
+ .urgent_latency_us = 4.0,
+ .urgent_latency_pixel_data_only_us = 4.0,
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+ .urgent_latency_vm_data_only_us = 4.0,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
+ .max_avg_sdp_bw_use_normal_percent = 60.0,
+ .max_avg_dram_bw_use_normal_percent = 40.0,
+ .writeback_latency_us = 12.0,
+ .max_request_size_bytes = 256,
+ .fabric_datapath_to_dcn_data_return_bytes = 64,
+ .dcn_downspread_percent = 0.5,
+ .downspread_percent = 0.38,
+ .dram_page_open_time_ns = 50.0,
+ .dram_rw_turnaround_time_ns = 17.5,
+ .dram_return_buffer_per_channel_bytes = 8192,
+ .round_trip_ping_latency_dcfclk_cycles = 191,
+ .urgent_out_of_order_return_per_channel_bytes = 4096,
+ .channel_interleave_bytes = 256,
+ .num_banks = 8,
+ .gpuvm_min_page_size_bytes = 4096,
+ .hostvm_min_page_size_bytes = 4096,
+ .dram_clock_change_latency_us = 404,
+ .dummy_pstate_latency_us = 5,
+ .writeback_dram_clock_change_latency_us = 23.0,
+ .return_bus_width_bytes = 64,
+ .dispclk_dppclk_vco_speed_mhz = 3650,
+ .xfc_bus_transport_time_us = 20, // ?
+ .xfc_xbuf_latency_tolerance_us = 4, // ?
+ .use_urgent_burst_bw = 1, // ?
+ .do_urgent_latency_adjustment = true,
+ .urgent_latency_adjustment_fabric_clock_component_us = 1.0,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 1000,
+};
+
+
+void optc3_fpu_set_vrr_m_const(struct timing_generator *optc,
+ double vtotal_avg)
+{
+struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ double vtotal_min, vtotal_max;
+ double ratio, modulo, phase;
+ uint32_t vblank_start;
+ uint32_t v_total_mask_value = 0;
+
+ dc_assert_fp_enabled();
+
+ /* Compute VTOTAL_MIN and VTOTAL_MAX, so that
+ * VOTAL_MAX - VTOTAL_MIN = 1
+ */
+ v_total_mask_value = 16;
+ vtotal_min = dcn_bw_floor(vtotal_avg);
+ vtotal_max = dcn_bw_ceil(vtotal_avg);
+
+ /* Check that bottom VBLANK is at least 2 lines tall when running with
+ * VTOTAL_MIN. Note that VTOTAL registers are defined as 'total number
+ * of lines in a frame - 1'.
+ */
+ REG_GET(OTG_V_BLANK_START_END, OTG_V_BLANK_START,
+ &vblank_start);
+ ASSERT(vtotal_min >= vblank_start + 1);
+
+ /* Special case where the average frame rate can be achieved
+ * without using the DTO
+ */
+ if (vtotal_min == vtotal_max) {
+ REG_SET(OTG_V_TOTAL, 0, OTG_V_TOTAL, (uint32_t)vtotal_min);
+
+ optc->funcs->set_vtotal_min_max(optc, 0, 0);
+ REG_SET(OTG_M_CONST_DTO0, 0, OTG_M_CONST_DTO_PHASE, 0);
+ REG_SET(OTG_M_CONST_DTO1, 0, OTG_M_CONST_DTO_MODULO, 0);
+ REG_UPDATE_3(OTG_V_TOTAL_CONTROL,
+ OTG_V_TOTAL_MIN_SEL, 0,
+ OTG_V_TOTAL_MAX_SEL, 0,
+ OTG_SET_V_TOTAL_MIN_MASK_EN, 0);
+ return;
+ }
+
+ ratio = vtotal_max - vtotal_avg;
+ modulo = 65536.0 * 65536.0 - 1.0; /* 2^32 - 1 */
+ phase = ratio * modulo;
+
+ /* Special cases where the DTO phase gets rounded to 0 or
+ * to DTO modulo
+ */
+ if (phase <= 0 || phase >= modulo) {
+ REG_SET(OTG_V_TOTAL, 0, OTG_V_TOTAL,
+ phase <= 0 ?
+ (uint32_t)vtotal_max : (uint32_t)vtotal_min);
+ REG_SET(OTG_V_TOTAL_MIN, 0, OTG_V_TOTAL_MIN, 0);
+ REG_SET(OTG_V_TOTAL_MAX, 0, OTG_V_TOTAL_MAX, 0);
+ REG_SET(OTG_M_CONST_DTO0, 0, OTG_M_CONST_DTO_PHASE, 0);
+ REG_SET(OTG_M_CONST_DTO1, 0, OTG_M_CONST_DTO_MODULO, 0);
+ REG_UPDATE_3(OTG_V_TOTAL_CONTROL,
+ OTG_V_TOTAL_MIN_SEL, 0,
+ OTG_V_TOTAL_MAX_SEL, 0,
+ OTG_SET_V_TOTAL_MIN_MASK_EN, 0);
+ return;
+ }
+ REG_UPDATE_6(OTG_V_TOTAL_CONTROL,
+ OTG_V_TOTAL_MIN_SEL, 1,
+ OTG_V_TOTAL_MAX_SEL, 1,
+ OTG_SET_V_TOTAL_MIN_MASK_EN, 1,
+ OTG_SET_V_TOTAL_MIN_MASK, v_total_mask_value,
+ OTG_VTOTAL_MID_REPLACING_MIN_EN, 0,
+ OTG_VTOTAL_MID_REPLACING_MAX_EN, 0);
+ REG_SET(OTG_V_TOTAL, 0, OTG_V_TOTAL, (uint32_t)vtotal_min);
+ optc->funcs->set_vtotal_min_max(optc, vtotal_min, vtotal_max);
+ REG_SET(OTG_M_CONST_DTO0, 0, OTG_M_CONST_DTO_PHASE, (uint32_t)phase);
+ REG_SET(OTG_M_CONST_DTO1, 0, OTG_M_CONST_DTO_MODULO, (uint32_t)modulo);
+}
+
+void dcn30_fpu_populate_dml_writeback_from_context(
+ struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
+{
+ int pipe_cnt, i, j;
+ double max_calc_writeback_dispclk;
+ double writeback_dispclk;
+ struct writeback_st dout_wb;
+
+ dc_assert_fp_enabled();
+
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_stream_state *stream = res_ctx->pipe_ctx[i].stream;
+
+ if (!stream)
+ continue;
+ max_calc_writeback_dispclk = 0;
+
+ /* Set writeback information */
+ pipes[pipe_cnt].dout.wb_enable = 0;
+ pipes[pipe_cnt].dout.num_active_wb = 0;
+ for (j = 0; j < stream->num_wb_info; j++) {
+ struct dc_writeback_info *wb_info = &stream->writeback_info[j];
+
+ if (wb_info->wb_enabled && wb_info->writeback_source_plane &&
+ (wb_info->writeback_source_plane == res_ctx->pipe_ctx[i].plane_state)) {
+ pipes[pipe_cnt].dout.wb_enable = 1;
+ pipes[pipe_cnt].dout.num_active_wb++;
+ dout_wb.wb_src_height = wb_info->dwb_params.cnv_params.crop_en ?
+ wb_info->dwb_params.cnv_params.crop_height :
+ wb_info->dwb_params.cnv_params.src_height;
+ dout_wb.wb_src_width = wb_info->dwb_params.cnv_params.crop_en ?
+ wb_info->dwb_params.cnv_params.crop_width :
+ wb_info->dwb_params.cnv_params.src_width;
+ dout_wb.wb_dst_width = wb_info->dwb_params.dest_width;
+ dout_wb.wb_dst_height = wb_info->dwb_params.dest_height;
+
+ /* For IP that doesn't support WB scaling, set h/v taps to 1 to avoid DML validation failure */
+ if (dc->dml.ip.writeback_max_hscl_taps > 1) {
+ dout_wb.wb_htaps_luma = wb_info->dwb_params.scaler_taps.h_taps;
+ dout_wb.wb_vtaps_luma = wb_info->dwb_params.scaler_taps.v_taps;
+ } else {
+ dout_wb.wb_htaps_luma = 1;
+ dout_wb.wb_vtaps_luma = 1;
+ }
+ dout_wb.wb_htaps_chroma = 0;
+ dout_wb.wb_vtaps_chroma = 0;
+ dout_wb.wb_hratio = wb_info->dwb_params.cnv_params.crop_en ?
+ (double)wb_info->dwb_params.cnv_params.crop_width /
+ (double)wb_info->dwb_params.dest_width :
+ (double)wb_info->dwb_params.cnv_params.src_width /
+ (double)wb_info->dwb_params.dest_width;
+ dout_wb.wb_vratio = wb_info->dwb_params.cnv_params.crop_en ?
+ (double)wb_info->dwb_params.cnv_params.crop_height /
+ (double)wb_info->dwb_params.dest_height :
+ (double)wb_info->dwb_params.cnv_params.src_height /
+ (double)wb_info->dwb_params.dest_height;
+ if (wb_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_ARGB ||
+ wb_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_RGBA)
+ dout_wb.wb_pixel_format = dm_444_64;
+ else
+ dout_wb.wb_pixel_format = dm_444_32;
+
+ /* Workaround for cases where multiple writebacks are connected to same plane
+ * In which case, need to compute worst case and set the associated writeback parameters
+ * This workaround is necessary due to DML computation assuming only 1 set of writeback
+ * parameters per pipe
+ */
+ writeback_dispclk = dml30_CalculateWriteBackDISPCLK(
+ dout_wb.wb_pixel_format,
+ pipes[pipe_cnt].pipe.dest.pixel_rate_mhz,
+ dout_wb.wb_hratio,
+ dout_wb.wb_vratio,
+ dout_wb.wb_htaps_luma,
+ dout_wb.wb_vtaps_luma,
+ dout_wb.wb_src_width,
+ dout_wb.wb_dst_width,
+ pipes[pipe_cnt].pipe.dest.htotal,
+ dc->current_state->bw_ctx.dml.ip.writeback_line_buffer_buffer_size);
+
+ if (writeback_dispclk > max_calc_writeback_dispclk) {
+ max_calc_writeback_dispclk = writeback_dispclk;
+ pipes[pipe_cnt].dout.wb = dout_wb;
+ }
+ }
+ }
+
+ pipe_cnt++;
+ }
+}
+
+void dcn30_fpu_set_mcif_arb_params(struct mcif_arb_params *wb_arb_params,
+ struct display_mode_lib *dml,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int cur_pipe)
+{
+ int i;
+
+ dc_assert_fp_enabled();
+
+ for (i = 0; i < sizeof(wb_arb_params->cli_watermark)/sizeof(wb_arb_params->cli_watermark[0]); i++) {
+ wb_arb_params->cli_watermark[i] = get_wm_writeback_urgent(dml, pipes, pipe_cnt) * 1000;
+ wb_arb_params->pstate_watermark[i] = get_wm_writeback_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
+ }
+
+ wb_arb_params->dram_speed_change_duration = dml->vba.WritebackAllowDRAMClockChangeEndPosition[cur_pipe] * pipes[0].clks_cfg.refclk_mhz; /* num_clock_cycles = us * MHz */
+}
+
+void dcn30_fpu_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
+{
+
+dc_assert_fp_enabled();
+
+if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) {
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
+ }
+}
+
+void dcn30_fpu_calculate_wm_and_dlg(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel)
+{
+int maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
+ int i, pipe_idx;
+ double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][maxMpcComb];
+ bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != dm_dram_clock_change_unsupported;
+
+dc_assert_fp_enabled();
+
+ if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk)
+ dcfclk = context->bw_ctx.dml.soc.min_dcfclk;
+
+ pipes[0].clks_cfg.voltage = vlevel;
+ pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
+ pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
+
+ /* Set B:
+ * DCFCLK: 1GHz or min required above 1GHz
+ * FCLK/UCLK: Max
+ */
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) {
+ if (vlevel == 0) {
+ pipes[0].clks_cfg.voltage = 1;
+ pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz;
+ }
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us;
+ }
+ context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+ pipes[0].clks_cfg.voltage = vlevel;
+ pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
+
+ /* Set D:
+ * DCFCLK: Min Required
+ * FCLK(proportional to UCLK): 1GHz or Max
+ * MALL stutter, sr_enter_exit = 4, sr_exit = 2us
+ */
+ /*
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us;
+ }
+ context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ */
+
+ /* Set C:
+ * DCFCLK: Min Required
+ * FCLK(proportional to UCLK): 1GHz or Max
+ * pstate latency overridden to 5us
+ */
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
+ unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
+ unsigned int min_dram_speed_mts_margin = 160;
+
+ if (context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_unsupported)
+ min_dram_speed_mts = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz * 16;
+
+ /* find largest table entry that is lower than dram speed, but lower than DPM0 still uses DPM0 */
+ for (i = 3; i > 0; i--)
+ if (min_dram_speed_mts + min_dram_speed_mts_margin > dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts)
+ break;
+
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
+
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
+ }
+
+ context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+ if (!pstate_en) {
+ /* The only difference between A and C is p-state latency, if p-state is not supported we want to
+ * calculate DLG based on dummy p-state latency, and max out the set A p-state watermark
+ */
+ context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0;
+ } else {
+ /* Set A:
+ * DCFCLK: Min Required
+ * FCLK(proportional to UCLK): 1GHz or Max
+ *
+ * Set A calculated last so that following calculations are based on Set A
+ */
+ dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
+ context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ }
+
+ context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod;
+
+ /* Make set D = set A until set D is enabled */
+ context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
+
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+
+ if (dc->config.forced_clocks) {
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
+ }
+ if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
+ if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
+
+ pipe_idx++;
+ }
+
+ DC_FP_START();
+ dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+ DC_FP_END();
+
+ if (!pstate_en)
+ /* Restore full p-state latency */
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us =
+ dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
+
+}
+
+void dcn30_fpu_update_dram_channel_width_bytes(struct dc *dc)
+{
+ dc_assert_fp_enabled();
+
+ if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
+ dcn3_0_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
+}
+
+void dcn30_fpu_update_max_clk(struct dc_bounding_box_max_clk *dcn30_bb_max_clk)
+{
+ dc_assert_fp_enabled();
+
+ if (!dcn30_bb_max_clk->max_dcfclk_mhz)
+ dcn30_bb_max_clk->max_dcfclk_mhz = dcn3_0_soc.clock_limits[0].dcfclk_mhz;
+ if (!dcn30_bb_max_clk->max_dispclk_mhz)
+ dcn30_bb_max_clk->max_dispclk_mhz = dcn3_0_soc.clock_limits[0].dispclk_mhz;
+ if (!dcn30_bb_max_clk->max_dppclk_mhz)
+ dcn30_bb_max_clk->max_dppclk_mhz = dcn3_0_soc.clock_limits[0].dppclk_mhz;
+ if (!dcn30_bb_max_clk->max_phyclk_mhz)
+ dcn30_bb_max_clk->max_phyclk_mhz = dcn3_0_soc.clock_limits[0].phyclk_mhz;
+}
+
+void dcn30_fpu_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
+ unsigned int *optimal_dcfclk,
+ unsigned int *optimal_fclk)
+{
+ double bw_from_dram, bw_from_dram1, bw_from_dram2;
+
+ dc_assert_fp_enabled();
+
+ bw_from_dram1 = uclk_mts * dcn3_0_soc.num_chans *
+ dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_dram_bw_use_normal_percent / 100);
+ bw_from_dram2 = uclk_mts * dcn3_0_soc.num_chans *
+ dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100);
+
+ bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2;
+
+ if (optimal_fclk)
+ *optimal_fclk = bw_from_dram /
+ (dcn3_0_soc.fabric_datapath_to_dcn_data_return_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100));
+
+ if (optimal_dcfclk)
+ *optimal_dcfclk = bw_from_dram /
+ (dcn3_0_soc.return_bus_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100));
+}
+
+void dcn30_fpu_update_bw_bounding_box(struct dc *dc,
+ struct clk_bw_params *bw_params,
+ struct dc_bounding_box_max_clk *dcn30_bb_max_clk,
+ unsigned int *dcfclk_mhz,
+ unsigned int *dram_speed_mts)
+{
+ unsigned int i;
+
+ dc_assert_fp_enabled();
+
+ dcn3_0_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+ dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+
+ for (i = 0; i < dcn3_0_soc.num_states; i++) {
+ dcn3_0_soc.clock_limits[i].state = i;
+ dcn3_0_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i];
+ dcn3_0_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i];
+ dcn3_0_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i];
+
+ /* Fill all states with max values of all other clocks */
+ dcn3_0_soc.clock_limits[i].dispclk_mhz = dcn30_bb_max_clk->max_dispclk_mhz;
+ dcn3_0_soc.clock_limits[i].dppclk_mhz = dcn30_bb_max_clk->max_dppclk_mhz;
+ dcn3_0_soc.clock_limits[i].phyclk_mhz = dcn30_bb_max_clk->max_phyclk_mhz;
+ dcn3_0_soc.clock_limits[i].dtbclk_mhz = dcn3_0_soc.clock_limits[0].dtbclk_mhz;
+ /* These clocks cannot come from bw_params, always fill from dcn3_0_soc[1] */
+ /* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */
+ dcn3_0_soc.clock_limits[i].phyclk_d18_mhz = dcn3_0_soc.clock_limits[0].phyclk_d18_mhz;
+ dcn3_0_soc.clock_limits[i].socclk_mhz = dcn3_0_soc.clock_limits[0].socclk_mhz;
+ dcn3_0_soc.clock_limits[i].dscclk_mhz = dcn3_0_soc.clock_limits[0].dscclk_mhz;
+ }
+ /* re-init DML with updated bb */
+ dml_init_instance(&dc->dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
+ if (dc->current_state)
+ dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
+
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h
new file mode 100644
index 000000000000..dedfe7b5f173
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2020-2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN30_FPU_H__
+#define __DCN30_FPU_H__
+
+#include "core_types.h"
+#include "dcn20/dcn20_optc.h"
+
+void optc3_fpu_set_vrr_m_const(struct timing_generator *optc,
+ double vtotal_avg);
+
+void dcn30_fpu_populate_dml_writeback_from_context(
+ struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes);
+
+void dcn30_fpu_set_mcif_arb_params(struct mcif_arb_params *wb_arb_params,
+ struct display_mode_lib *dml,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int cur_pipe);
+
+void dcn30_fpu_update_soc_for_wm_a(struct dc *dc, struct dc_state *context);
+
+void dcn30_fpu_calculate_wm_and_dlg(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel);
+
+void dcn30_fpu_update_dram_channel_width_bytes(struct dc *dc);
+
+void dcn30_fpu_update_max_clk(struct dc_bounding_box_max_clk *dcn30_bb_max_clk);
+
+void dcn30_fpu_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
+ unsigned int *optimal_dcfclk,
+ unsigned int *optimal_fclk);
+
+void dcn30_fpu_update_bw_bounding_box(struct dc *dc,
+ struct clk_bw_params *bw_params,
+ struct dc_bounding_box_max_clk *dcn30_bb_max_clk,
+ unsigned int *dcfclk_mhz,
+ unsigned int *dram_speed_mts);
+
+
+#endif /* __DCN30_FPU_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
new file mode 100644
index 000000000000..54db2eca9e6b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
@@ -0,0 +1,784 @@
+/*
+ * Copyright 2019-2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "resource.h"
+#include "clk_mgr.h"
+
+#include "dml/dcn20/dcn20_fpu.h"
+#include "dcn31_fpu.h"
+
+/**
+ * DOC: DCN31x FPU manipulation Overview
+ *
+ * The DCN architecture relies on FPU operations, which require special
+ * compilation flags and the use of kernel_fpu_begin/end functions; ideally, we
+ * want to avoid spreading FPU access across multiple files. With this idea in
+ * mind, this file aims to centralize all DCN3.1.x functions that require FPU
+ * access in a single place. Code in this file follows the following code
+ * pattern:
+ *
+ * 1. Functions that use FPU operations should be isolated in static functions.
+ * 2. The FPU functions should have the noinline attribute to ensure anything
+ * that deals with FP register is contained within this call.
+ * 3. All function that needs to be accessed outside this file requires a
+ * public interface that not uses any FPU reference.
+ * 4. Developers **must not** use DC_FP_START/END in this file, but they need
+ * to ensure that the caller invokes it before access any function available
+ * in this file. For this reason, public functions in this file must invoke
+ * dc_assert_fp_enabled();
+ */
+
+struct _vcs_dpi_ip_params_st dcn3_1_ip = {
+ .gpuvm_enable = 1,
+ .gpuvm_max_page_table_levels = 1,
+ .hostvm_enable = 1,
+ .hostvm_max_page_table_levels = 2,
+ .rob_buffer_size_kbytes = 64,
+ .det_buffer_size_kbytes = DCN3_1_DEFAULT_DET_SIZE,
+ .config_return_buffer_size_in_kbytes = 1792,
+ .compressed_buffer_segment_size_in_kbytes = 64,
+ .meta_fifo_size_in_kentries = 32,
+ .zero_size_buffer_entries = 512,
+ .compbuf_reserved_space_64b = 256,
+ .compbuf_reserved_space_zs = 64,
+ .dpp_output_buffer_pixels = 2560,
+ .opp_output_buffer_lines = 1,
+ .pixel_chunk_size_kbytes = 8,
+ .meta_chunk_size_kbytes = 2,
+ .min_meta_chunk_size_bytes = 256,
+ .writeback_chunk_size_kbytes = 8,
+ .ptoi_supported = false,
+ .num_dsc = 3,
+ .maximum_dsc_bits_per_component = 10,
+ .dsc422_native_support = false,
+ .is_line_buffer_bpp_fixed = true,
+ .line_buffer_fixed_bpp = 48,
+ .line_buffer_size_bits = 789504,
+ .max_line_buffer_lines = 12,
+ .writeback_interface_buffer_size_kbytes = 90,
+ .max_num_dpp = 4,
+ .max_num_otg = 4,
+ .max_num_hdmi_frl_outputs = 1,
+ .max_num_wb = 1,
+ .max_dchub_pscl_bw_pix_per_clk = 4,
+ .max_pscl_lb_bw_pix_per_clk = 2,
+ .max_lb_vscl_bw_pix_per_clk = 4,
+ .max_vscl_hscl_bw_pix_per_clk = 4,
+ .max_hscl_ratio = 6,
+ .max_vscl_ratio = 6,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .dpte_buffer_size_in_pte_reqs_luma = 64,
+ .dpte_buffer_size_in_pte_reqs_chroma = 34,
+ .dispclk_ramp_margin_percent = 1,
+ .max_inter_dcn_tile_repeaters = 8,
+ .cursor_buffer_size = 16,
+ .cursor_chunk_size = 2,
+ .writeback_line_buffer_buffer_size = 0,
+ .writeback_min_hscl_ratio = 1,
+ .writeback_min_vscl_ratio = 1,
+ .writeback_max_hscl_ratio = 1,
+ .writeback_max_vscl_ratio = 1,
+ .writeback_max_hscl_taps = 1,
+ .writeback_max_vscl_taps = 1,
+ .dppclk_delay_subtotal = 46,
+ .dppclk_delay_scl = 50,
+ .dppclk_delay_scl_lb_only = 16,
+ .dppclk_delay_cnvc_formatter = 27,
+ .dppclk_delay_cnvc_cursor = 6,
+ .dispclk_delay_subtotal = 119,
+ .dynamic_metadata_vm_enabled = false,
+ .odm_combine_4to1_supported = false,
+ .dcc_supported = true,
+};
+
+struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc = {
+ /*TODO: correct dispclk/dppclk voltage level determination*/
+ .clock_limits = {
+ {
+ .state = 0,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 600.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 186.0,
+ .dtbclk_mhz = 625.0,
+ },
+ {
+ .state = 1,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 209.0,
+ .dtbclk_mhz = 625.0,
+ },
+ {
+ .state = 2,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 209.0,
+ .dtbclk_mhz = 625.0,
+ },
+ {
+ .state = 3,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 371.0,
+ .dtbclk_mhz = 625.0,
+ },
+ {
+ .state = 4,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 417.0,
+ .dtbclk_mhz = 625.0,
+ },
+ },
+ .num_states = 5,
+ .sr_exit_time_us = 9.0,
+ .sr_enter_plus_exit_time_us = 11.0,
+ .sr_exit_z8_time_us = 442.0,
+ .sr_enter_plus_exit_z8_time_us = 560.0,
+ .writeback_latency_us = 12.0,
+ .dram_channel_width_bytes = 4,
+ .round_trip_ping_latency_dcfclk_cycles = 106,
+ .urgent_latency_pixel_data_only_us = 4.0,
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+ .urgent_latency_vm_data_only_us = 4.0,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+ .pct_ideal_sdp_bw_after_urgent = 80.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
+ .max_avg_sdp_bw_use_normal_percent = 60.0,
+ .max_avg_dram_bw_use_normal_percent = 60.0,
+ .fabric_datapath_to_dcn_data_return_bytes = 32,
+ .return_bus_width_bytes = 64,
+ .downspread_percent = 0.38,
+ .dcn_downspread_percent = 0.5,
+ .gpuvm_min_page_size_bytes = 4096,
+ .hostvm_min_page_size_bytes = 4096,
+ .do_urgent_latency_adjustment = false,
+ .urgent_latency_adjustment_fabric_clock_component_us = 0,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+};
+
+struct _vcs_dpi_ip_params_st dcn3_15_ip = {
+ .gpuvm_enable = 1,
+ .gpuvm_max_page_table_levels = 1,
+ .hostvm_enable = 1,
+ .hostvm_max_page_table_levels = 2,
+ .rob_buffer_size_kbytes = 64,
+ .det_buffer_size_kbytes = DCN3_15_DEFAULT_DET_SIZE,
+ .min_comp_buffer_size_kbytes = DCN3_15_MIN_COMPBUF_SIZE_KB,
+ .config_return_buffer_size_in_kbytes = 1024,
+ .compressed_buffer_segment_size_in_kbytes = 64,
+ .meta_fifo_size_in_kentries = 32,
+ .zero_size_buffer_entries = 512,
+ .compbuf_reserved_space_64b = 256,
+ .compbuf_reserved_space_zs = 64,
+ .dpp_output_buffer_pixels = 2560,
+ .opp_output_buffer_lines = 1,
+ .pixel_chunk_size_kbytes = 8,
+ .meta_chunk_size_kbytes = 2,
+ .min_meta_chunk_size_bytes = 256,
+ .writeback_chunk_size_kbytes = 8,
+ .ptoi_supported = false,
+ .num_dsc = 3,
+ .maximum_dsc_bits_per_component = 10,
+ .dsc422_native_support = false,
+ .is_line_buffer_bpp_fixed = true,
+ .line_buffer_fixed_bpp = 49,
+ .line_buffer_size_bits = 789504,
+ .max_line_buffer_lines = 12,
+ .writeback_interface_buffer_size_kbytes = 90,
+ .max_num_dpp = 4,
+ .max_num_otg = 4,
+ .max_num_hdmi_frl_outputs = 1,
+ .max_num_wb = 1,
+ .max_dchub_pscl_bw_pix_per_clk = 4,
+ .max_pscl_lb_bw_pix_per_clk = 2,
+ .max_lb_vscl_bw_pix_per_clk = 4,
+ .max_vscl_hscl_bw_pix_per_clk = 4,
+ .max_hscl_ratio = 6,
+ .max_vscl_ratio = 6,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .dpte_buffer_size_in_pte_reqs_luma = 64,
+ .dpte_buffer_size_in_pte_reqs_chroma = 34,
+ .dispclk_ramp_margin_percent = 1,
+ .max_inter_dcn_tile_repeaters = 9,
+ .cursor_buffer_size = 16,
+ .cursor_chunk_size = 2,
+ .writeback_line_buffer_buffer_size = 0,
+ .writeback_min_hscl_ratio = 1,
+ .writeback_min_vscl_ratio = 1,
+ .writeback_max_hscl_ratio = 1,
+ .writeback_max_vscl_ratio = 1,
+ .writeback_max_hscl_taps = 1,
+ .writeback_max_vscl_taps = 1,
+ .dppclk_delay_subtotal = 46,
+ .dppclk_delay_scl = 50,
+ .dppclk_delay_scl_lb_only = 16,
+ .dppclk_delay_cnvc_formatter = 27,
+ .dppclk_delay_cnvc_cursor = 6,
+ .dispclk_delay_subtotal = 119,
+ .dynamic_metadata_vm_enabled = false,
+ .odm_combine_4to1_supported = false,
+ .dcc_supported = true,
+};
+
+struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
+ .sr_exit_time_us = 9.0,
+ .sr_enter_plus_exit_time_us = 11.0,
+ .sr_exit_z8_time_us = 50.0,
+ .sr_enter_plus_exit_z8_time_us = 50.0,
+ .writeback_latency_us = 12.0,
+ .dram_channel_width_bytes = 4,
+ .round_trip_ping_latency_dcfclk_cycles = 106,
+ .urgent_latency_pixel_data_only_us = 4.0,
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+ .urgent_latency_vm_data_only_us = 4.0,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+ .pct_ideal_sdp_bw_after_urgent = 80.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
+ .max_avg_sdp_bw_use_normal_percent = 60.0,
+ .max_avg_dram_bw_use_normal_percent = 60.0,
+ .fabric_datapath_to_dcn_data_return_bytes = 32,
+ .return_bus_width_bytes = 64,
+ .downspread_percent = 0.38,
+ .dcn_downspread_percent = 0.38,
+ .gpuvm_min_page_size_bytes = 4096,
+ .hostvm_min_page_size_bytes = 4096,
+ .do_urgent_latency_adjustment = false,
+ .urgent_latency_adjustment_fabric_clock_component_us = 0,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+};
+
+struct _vcs_dpi_ip_params_st dcn3_16_ip = {
+ .gpuvm_enable = 1,
+ .gpuvm_max_page_table_levels = 1,
+ .hostvm_enable = 1,
+ .hostvm_max_page_table_levels = 2,
+ .rob_buffer_size_kbytes = 64,
+ .det_buffer_size_kbytes = DCN3_16_DEFAULT_DET_SIZE,
+ .config_return_buffer_size_in_kbytes = 1024,
+ .compressed_buffer_segment_size_in_kbytes = 64,
+ .meta_fifo_size_in_kentries = 32,
+ .zero_size_buffer_entries = 512,
+ .compbuf_reserved_space_64b = 256,
+ .compbuf_reserved_space_zs = 64,
+ .dpp_output_buffer_pixels = 2560,
+ .opp_output_buffer_lines = 1,
+ .pixel_chunk_size_kbytes = 8,
+ .meta_chunk_size_kbytes = 2,
+ .min_meta_chunk_size_bytes = 256,
+ .writeback_chunk_size_kbytes = 8,
+ .ptoi_supported = false,
+ .num_dsc = 3,
+ .maximum_dsc_bits_per_component = 10,
+ .dsc422_native_support = false,
+ .is_line_buffer_bpp_fixed = true,
+ .line_buffer_fixed_bpp = 48,
+ .line_buffer_size_bits = 789504,
+ .max_line_buffer_lines = 12,
+ .writeback_interface_buffer_size_kbytes = 90,
+ .max_num_dpp = 4,
+ .max_num_otg = 4,
+ .max_num_hdmi_frl_outputs = 1,
+ .max_num_wb = 1,
+ .max_dchub_pscl_bw_pix_per_clk = 4,
+ .max_pscl_lb_bw_pix_per_clk = 2,
+ .max_lb_vscl_bw_pix_per_clk = 4,
+ .max_vscl_hscl_bw_pix_per_clk = 4,
+ .max_hscl_ratio = 6,
+ .max_vscl_ratio = 6,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .dpte_buffer_size_in_pte_reqs_luma = 64,
+ .dpte_buffer_size_in_pte_reqs_chroma = 34,
+ .dispclk_ramp_margin_percent = 1,
+ .max_inter_dcn_tile_repeaters = 8,
+ .cursor_buffer_size = 16,
+ .cursor_chunk_size = 2,
+ .writeback_line_buffer_buffer_size = 0,
+ .writeback_min_hscl_ratio = 1,
+ .writeback_min_vscl_ratio = 1,
+ .writeback_max_hscl_ratio = 1,
+ .writeback_max_vscl_ratio = 1,
+ .writeback_max_hscl_taps = 1,
+ .writeback_max_vscl_taps = 1,
+ .dppclk_delay_subtotal = 46,
+ .dppclk_delay_scl = 50,
+ .dppclk_delay_scl_lb_only = 16,
+ .dppclk_delay_cnvc_formatter = 27,
+ .dppclk_delay_cnvc_cursor = 6,
+ .dispclk_delay_subtotal = 119,
+ .dynamic_metadata_vm_enabled = false,
+ .odm_combine_4to1_supported = false,
+ .dcc_supported = true,
+};
+
+struct _vcs_dpi_soc_bounding_box_st dcn3_16_soc = {
+ /*TODO: correct dispclk/dppclk voltage level determination*/
+ .clock_limits = {
+ {
+ .state = 0,
+ .dispclk_mhz = 556.0,
+ .dppclk_mhz = 556.0,
+ .phyclk_mhz = 600.0,
+ .phyclk_d18_mhz = 445.0,
+ .dscclk_mhz = 186.0,
+ .dtbclk_mhz = 625.0,
+ },
+ {
+ .state = 1,
+ .dispclk_mhz = 625.0,
+ .dppclk_mhz = 625.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 209.0,
+ .dtbclk_mhz = 625.0,
+ },
+ {
+ .state = 2,
+ .dispclk_mhz = 625.0,
+ .dppclk_mhz = 625.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 209.0,
+ .dtbclk_mhz = 625.0,
+ },
+ {
+ .state = 3,
+ .dispclk_mhz = 1112.0,
+ .dppclk_mhz = 1112.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 371.0,
+ .dtbclk_mhz = 625.0,
+ },
+ {
+ .state = 4,
+ .dispclk_mhz = 1250.0,
+ .dppclk_mhz = 1250.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 417.0,
+ .dtbclk_mhz = 625.0,
+ },
+ },
+ .num_states = 5,
+ .sr_exit_time_us = 9.0,
+ .sr_enter_plus_exit_time_us = 11.0,
+ .sr_exit_z8_time_us = 442.0,
+ .sr_enter_plus_exit_z8_time_us = 560.0,
+ .writeback_latency_us = 12.0,
+ .dram_channel_width_bytes = 4,
+ .round_trip_ping_latency_dcfclk_cycles = 106,
+ .urgent_latency_pixel_data_only_us = 4.0,
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+ .urgent_latency_vm_data_only_us = 4.0,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+ .pct_ideal_sdp_bw_after_urgent = 80.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
+ .max_avg_sdp_bw_use_normal_percent = 60.0,
+ .max_avg_dram_bw_use_normal_percent = 60.0,
+ .fabric_datapath_to_dcn_data_return_bytes = 32,
+ .return_bus_width_bytes = 64,
+ .downspread_percent = 0.38,
+ .dcn_downspread_percent = 0.5,
+ .gpuvm_min_page_size_bytes = 4096,
+ .hostvm_min_page_size_bytes = 4096,
+ .do_urgent_latency_adjustment = false,
+ .urgent_latency_adjustment_fabric_clock_component_us = 0,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+};
+
+void dcn31_calculate_wm_and_dlg_fp(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel)
+{
+ int i, pipe_idx;
+ double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
+
+ dc_assert_fp_enabled();
+
+ if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk)
+ dcfclk = context->bw_ctx.dml.soc.min_dcfclk;
+
+ /* We don't recalculate clocks for 0 pipe configs, which can block
+ * S0i3 as high clocks will block low power states
+ * Override any clocks that can block S0i3 to min here
+ */
+ if (pipe_cnt == 0) {
+ context->bw_ctx.bw.dcn.clk.dcfclk_khz = dcfclk; // always should be vlevel 0
+ return;
+ }
+
+ pipes[0].clks_cfg.voltage = vlevel;
+ pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
+ pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
+
+#if 0 // TODO
+ /* Set B:
+ * TODO
+ */
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) {
+ if (vlevel == 0) {
+ pipes[0].clks_cfg.voltage = 1;
+ pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz;
+ }
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us;
+ }
+ context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+ pipes[0].clks_cfg.voltage = vlevel;
+ pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
+
+ /* Set C:
+ * TODO
+ */
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
+ }
+ context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+ /* Set D:
+ * TODO
+ */
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us;
+ }
+ context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+#endif
+
+ /* Set A:
+ * All clocks min required
+ *
+ * Set A calculated last so that following calculations are based on Set A
+ */
+ dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
+ context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ /* TODO: remove: */
+ context->bw_ctx.bw.dcn.watermarks.b = context->bw_ctx.bw.dcn.watermarks.a;
+ context->bw_ctx.bw.dcn.watermarks.c = context->bw_ctx.bw.dcn.watermarks.a;
+ context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
+ /* end remove*/
+
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+
+ if (dc->config.forced_clocks || dc->debug.max_disp_clk) {
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
+ }
+ if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
+ if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
+
+ pipe_idx++;
+ }
+
+ dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+}
+
+void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
+{
+ struct clk_limit_table *clk_table = &bw_params->clk_table;
+ struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
+ unsigned int i, closest_clk_lvl;
+ int j;
+
+ dc_assert_fp_enabled();
+
+ // Default clock levels are used for diags, which may lead to overclocking.
+ if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
+ int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
+
+ dcn3_1_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
+ dcn3_1_ip.max_num_dpp = dc->res_pool->pipe_count;
+ dcn3_1_soc.num_chans = bw_params->num_channels;
+
+ ASSERT(clk_table->num_entries);
+
+ /* Prepass to find max clocks independent of voltage level. */
+ for (i = 0; i < clk_table->num_entries; ++i) {
+ if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
+ if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
+ }
+
+ for (i = 0; i < clk_table->num_entries; i++) {
+ /* loop backwards*/
+ for (closest_clk_lvl = 0, j = dcn3_1_soc.num_states - 1; j >= 0; j--) {
+ if ((unsigned int) dcn3_1_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
+ closest_clk_lvl = j;
+ break;
+ }
+ }
+
+ clock_limits[i].state = i;
+
+ /* Clocks dependent on voltage level. */
+ clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+ clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio;
+
+ /* Clocks independent of voltage level. */
+ clock_limits[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
+ dcn3_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+
+ clock_limits[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
+ dcn3_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+
+ clock_limits[i].dram_bw_per_chan_gbps = dcn3_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ clock_limits[i].dscclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ clock_limits[i].dtbclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ clock_limits[i].phyclk_d18_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ clock_limits[i].phyclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ }
+ for (i = 0; i < clk_table->num_entries; i++)
+ dcn3_1_soc.clock_limits[i] = clock_limits[i];
+ if (clk_table->num_entries) {
+ dcn3_1_soc.num_states = clk_table->num_entries;
+ }
+ }
+
+ dcn3_1_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+ dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+ dml_init_instance(&dc->dml, &dcn3_1_soc, &dcn3_1_ip, DML_PROJECT_DCN31);
+ else
+ dml_init_instance(&dc->dml, &dcn3_1_soc, &dcn3_1_ip, DML_PROJECT_DCN31_FPGA);
+}
+
+void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
+{
+ struct clk_limit_table *clk_table = &bw_params->clk_table;
+ int i, max_dispclk_mhz = 0, max_dppclk_mhz = 0;
+
+ dc_assert_fp_enabled();
+
+ dcn3_15_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
+ dcn3_15_ip.max_num_dpp = dc->res_pool->pipe_count;
+ dcn3_15_soc.num_chans = bw_params->num_channels;
+
+ ASSERT(clk_table->num_entries);
+
+ /* Setup soc to always use max dispclk/dppclk to avoid odm-to-lower-voltage */
+ for (i = 0; i < clk_table->num_entries; ++i) {
+ if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
+ if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
+ }
+
+ for (i = 0; i < clk_table->num_entries; i++) {
+ dcn3_15_soc.clock_limits[i].state = i;
+
+ /* Clocks dependent on voltage level. */
+ dcn3_15_soc.clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ dcn3_15_soc.clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ dcn3_15_soc.clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+ dcn3_15_soc.clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio;
+
+ /* These aren't actually read from smu, but rather set in clk_mgr defaults */
+ dcn3_15_soc.clock_limits[i].dtbclk_mhz = clk_table->entries[i].dtbclk_mhz;
+ dcn3_15_soc.clock_limits[i].phyclk_d18_mhz = clk_table->entries[i].phyclk_d18_mhz;
+ dcn3_15_soc.clock_limits[i].phyclk_mhz = clk_table->entries[i].phyclk_mhz;
+
+ /* Clocks independent of voltage level. */
+ dcn3_15_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
+ dcn3_15_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz;
+ dcn3_15_soc.clock_limits[i].dscclk_mhz = max_dispclk_mhz / 3.0;
+ }
+ dcn3_15_soc.num_states = clk_table->num_entries;
+
+
+ /* Set vco to max_dispclk * 2 to make sure the highest dispclk is always available for dml calcs,
+ * no impact outside of dml validation
+ */
+ dcn3_15_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
+
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+ dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN31);
+ else
+ dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN31_FPGA);
+}
+
+void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
+{
+ struct clk_limit_table *clk_table = &bw_params->clk_table;
+ struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
+ unsigned int i, closest_clk_lvl;
+ int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
+ int j;
+
+ dc_assert_fp_enabled();
+
+ // Default clock levels are used for diags, which may lead to overclocking.
+ if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
+
+ dcn3_16_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
+ dcn3_16_ip.max_num_dpp = dc->res_pool->pipe_count;
+ dcn3_16_soc.num_chans = bw_params->num_channels;
+
+ ASSERT(clk_table->num_entries);
+
+ /* Prepass to find max clocks independent of voltage level. */
+ for (i = 0; i < clk_table->num_entries; ++i) {
+ if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
+ if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
+ }
+
+ for (i = 0; i < clk_table->num_entries; i++) {
+ /* loop backwards*/
+ for (closest_clk_lvl = 0, j = dcn3_16_soc.num_states - 1; j >= 0; j--) {
+ if ((unsigned int) dcn3_16_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
+ closest_clk_lvl = j;
+ break;
+ }
+ }
+ // Ported from DCN315
+ if (clk_table->num_entries == 1) {
+ /*smu gives one DPM level, let's take the highest one*/
+ closest_clk_lvl = dcn3_16_soc.num_states - 1;
+ }
+
+ clock_limits[i].state = i;
+
+ /* Clocks dependent on voltage level. */
+ clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ if (clk_table->num_entries == 1 &&
+ clock_limits[i].dcfclk_mhz < dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
+ /*SMU fix not released yet*/
+ clock_limits[i].dcfclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
+ }
+ clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+ clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio;
+
+ /* Clocks independent of voltage level. */
+ clock_limits[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
+ dcn3_16_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+
+ clock_limits[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
+ dcn3_16_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+
+ clock_limits[i].dram_bw_per_chan_gbps = dcn3_16_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ clock_limits[i].dscclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ clock_limits[i].dtbclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ clock_limits[i].phyclk_d18_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ clock_limits[i].phyclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ }
+ for (i = 0; i < clk_table->num_entries; i++)
+ dcn3_16_soc.clock_limits[i] = clock_limits[i];
+ if (clk_table->num_entries) {
+ dcn3_16_soc.num_states = clk_table->num_entries;
+ }
+ }
+
+ if (max_dispclk_mhz) {
+ dcn3_16_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
+ dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
+ }
+
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+ dml_init_instance(&dc->dml, &dcn3_16_soc, &dcn3_16_ip, DML_PROJECT_DCN31);
+ else
+ dml_init_instance(&dc->dml, &dcn3_16_soc, &dcn3_16_ip, DML_PROJECT_DCN31_FPGA);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
new file mode 100644
index 000000000000..24ac19c83687
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2019-2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN31_FPU_H__
+#define __DCN31_FPU_H__
+
+#define DCN3_1_DEFAULT_DET_SIZE 384
+#define DCN3_15_DEFAULT_DET_SIZE 192
+#define DCN3_15_MIN_COMPBUF_SIZE_KB 128
+#define DCN3_16_DEFAULT_DET_SIZE 192
+
+void dcn31_calculate_wm_and_dlg_fp(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel);
+
+void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
+void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
+void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
+
+#endif /* __DCN31_FPU_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
index e0fecf127bd5..53d760e169e6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
@@ -1055,6 +1055,7 @@ static void dml_rq_dlg_get_dlg_params(
float vba__refcyc_per_req_delivery_pre_l = get_refcyc_per_req_delivery_pre_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
float vba__refcyc_per_req_delivery_l = get_refcyc_per_req_delivery_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
+ int blank_lines;
memset(disp_dlg_regs, 0, sizeof(*disp_dlg_regs));
memset(disp_ttu_regs, 0, sizeof(*disp_ttu_regs));
@@ -1080,6 +1081,18 @@ static void dml_rq_dlg_get_dlg_params(
dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start) * dml_pow(2, 2));
+ blank_lines = (dst->vblank_end + dst->vtotal_min - dst->vblank_start - dst->vstartup_start - 1);
+ if (blank_lines < 0)
+ blank_lines = 0;
+ if (blank_lines != 0) {
+ disp_dlg_regs->optimized_min_dst_y_next_start_us =
+ ((unsigned int) blank_lines * dst->hactive) / (unsigned int) dst->pixel_rate_mhz;
+ disp_dlg_regs->optimized_min_dst_y_next_start =
+ (unsigned int)(((double) (dlg_vblank_start + blank_lines)) * dml_pow(2, 2));
+ } else {
+ // use unoptimized value
+ disp_dlg_regs->optimized_min_dst_y_next_start = disp_dlg_regs->min_dst_y_next_start;
+ }
ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)dml_pow(2, 18));
dml_print("DML_DLG: %s: min_ttu_vblank (us) = %3.2f\n", __func__, min_ttu_vblank);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 59f0a61c33cf..2df660cd8801 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -446,6 +446,8 @@ struct _vcs_dpi_display_dlg_regs_st {
unsigned int refcyc_h_blank_end;
unsigned int dlg_vblank_end;
unsigned int min_dst_y_next_start;
+ unsigned int optimized_min_dst_y_next_start;
+ unsigned int optimized_min_dst_y_next_start_us;
unsigned int refcyc_per_htotal;
unsigned int refcyc_x_after_scaler;
unsigned int dst_y_after_scaler;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h
index cad244c023cd..d7cd8cc24758 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h
@@ -27,7 +27,7 @@
#define __RC_CALC_FPU_H__
#include "os_types.h"
-#include <drm/drm_dsc.h>
+#include <drm/display/drm_dsc.h>
#define QP_SET_SIZE 15
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index efc2339f1fa0..fa39a06eed1d 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -22,10 +22,10 @@
* Author: AMD
*/
-#include <drm/drm_dsc.h>
+#include <drm/display/drm_dp_helper.h>
+#include <drm/display/drm_dsc_helper.h>
#include "dc_hw_types.h"
#include "dsc.h"
-#include <drm/dp/drm_dp_helper.h>
#include "dc.h"
#include "rc_calc.h"
#include "fixed31_32.h"
@@ -864,11 +864,11 @@ static bool setup_dsc_config(
min_slices_h = inc_num_slices(dsc_common_caps.slice_caps, min_slices_h);
}
+ is_dsc_possible = (min_slices_h <= max_slices_h);
+
if (pic_width % min_slices_h != 0)
min_slices_h = 0; // DSC TODO: Maybe try increasing the number of slices first?
- is_dsc_possible = (min_slices_h <= max_slices_h);
-
if (min_slices_h == 0 && max_slices_h == 0)
is_dsc_possible = false;
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h b/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h
index 9f70e87b3ecb..ad80bde9bc0f 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h
@@ -26,7 +26,7 @@
#ifndef __DSCC_TYPES_H__
#define __DSCC_TYPES_H__
-#include <drm/drm_dsc.h>
+#include <drm/display/drm_dsc.h>
#ifndef NUM_BUF_RANGES
#define NUM_BUF_RANGES 15
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
index 7e306aa3e2b9..f0aea988fef0 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
@@ -22,7 +22,7 @@
* Authors: AMD
*
*/
-#include <drm/drm_dsc.h>
+#include <drm/display/drm_dsc_helper.h>
#include "dscc_types.h"
#include "rc_calc.h"
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/Makefile b/drivers/gpu/drm/amd/display/dc/gpio/Makefile
index d1c2ec58676d..0f4a22be8c40 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/gpio/Makefile
@@ -73,7 +73,6 @@ AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCE120)
###############################################################################
# DCN 1x
###############################################################################
-ifdef CONFIG_DRM_AMD_DC_DCN
GPIO_DCN10 = hw_translate_dcn10.o hw_factory_dcn10.o
AMD_DAL_GPIO_DCN10 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn10/,$(GPIO_DCN10))
@@ -114,7 +113,7 @@ GPIO_DCN315 = hw_translate_dcn315.o hw_factory_dcn315.o
AMD_DAL_GPIO_DCN315 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn315/,$(GPIO_DCN315))
AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCN315)
-endif
+
###############################################################################
# Diagnostics on FPGA
###############################################################################
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c
index 3b7df1ac26be..687d4f128480 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c
@@ -22,7 +22,6 @@
* Authors: AMD
*
*/
-#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "dm_services.h"
#include "include/gpio_types.h"
#include "../hw_factory.h"
@@ -266,4 +265,3 @@ void dal_hw_factory_dcn30_init(struct hw_factory *factory)
factory->funcs = &funcs;
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.h
index 131e742b050a..e491af845b83 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.h
@@ -22,7 +22,6 @@
* Authors: AMD
*
*/
-#if defined(CONFIG_DRM_AMD_DC_DCN)
#ifndef __DAL_HW_FACTORY_DCN30_H__
#define __DAL_HW_FACTORY_DCN30_H__
@@ -30,4 +29,3 @@
void dal_hw_factory_dcn30_init(struct hw_factory *factory);
#endif /* __DAL_HW_FACTORY_DCN30_H__ */
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_translate_dcn30.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_translate_dcn30.c
index 6b6b7c7bd12f..3169c567475f 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_translate_dcn30.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_translate_dcn30.c
@@ -26,7 +26,6 @@
/*
* Pre-requisites: headers required by header of this unit
*/
-#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "hw_translate_dcn30.h"
#include "dm_services.h"
@@ -384,4 +383,3 @@ void dal_hw_translate_dcn30_init(struct hw_translate *tr)
tr->funcs = &funcs;
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_translate_dcn30.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_translate_dcn30.h
index ed55410b7a4e..511a0dd49140 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_translate_dcn30.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_translate_dcn30.h
@@ -22,7 +22,6 @@
* Authors: AMD
*
*/
-#if defined(CONFIG_DRM_AMD_DC_DCN)
#ifndef __DAL_HW_TRANSLATE_DCN30_H__
#define __DAL_HW_TRANSLATE_DCN30_H__
@@ -32,4 +31,3 @@ struct hw_translate;
void dal_hw_translate_dcn30_init(struct hw_translate *tr);
#endif /* __DAL_HW_TRANSLATE_DCN30_H__ */
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
index 5c00ffde3996..ef4f69612097 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -48,13 +48,11 @@
#include "dce80/hw_factory_dce80.h"
#include "dce110/hw_factory_dce110.h"
#include "dce120/hw_factory_dce120.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "dcn10/hw_factory_dcn10.h"
#include "dcn20/hw_factory_dcn20.h"
#include "dcn21/hw_factory_dcn21.h"
#include "dcn30/hw_factory_dcn30.h"
#include "dcn315/hw_factory_dcn315.h"
-#endif
#include "diagnostics/hw_factory_diag.h"
@@ -98,7 +96,6 @@ bool dal_hw_factory_init(
case DCE_VERSION_12_1:
dal_hw_factory_dce120_init(factory);
return true;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
case DCN_VERSION_1_0:
case DCN_VERSION_1_01:
dal_hw_factory_dcn10_init(factory);
@@ -121,7 +118,6 @@ bool dal_hw_factory_init(
case DCN_VERSION_3_15:
dal_hw_factory_dcn315_init(factory);
return true;
-#endif
default:
ASSERT_CRITICAL(false);
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
index 7a39cbc01f63..1db4f1414d7e 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
@@ -46,13 +46,11 @@
#include "dce80/hw_translate_dce80.h"
#include "dce110/hw_translate_dce110.h"
#include "dce120/hw_translate_dce120.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "dcn10/hw_translate_dcn10.h"
#include "dcn20/hw_translate_dcn20.h"
#include "dcn21/hw_translate_dcn21.h"
#include "dcn30/hw_translate_dcn30.h"
#include "dcn315/hw_translate_dcn315.h"
-#endif
#include "diagnostics/hw_translate_diag.h"
@@ -93,7 +91,6 @@ bool dal_hw_translate_init(
case DCE_VERSION_12_1:
dal_hw_translate_dce120_init(translate);
return true;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
case DCN_VERSION_1_0:
case DCN_VERSION_1_01:
dal_hw_translate_dcn10_init(translate);
@@ -116,7 +113,6 @@ bool dal_hw_translate_init(
case DCN_VERSION_3_15:
dal_hw_translate_dcn315_init(translate);
return true;
-#endif
default:
BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 951c9b60917d..555d4d9e1454 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -33,9 +33,7 @@
#include "dc_bios_types.h"
#include "mem_input.h"
#include "hubp.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "mpc.h"
-#endif
#include "dwb.h"
#include "mcif_wb.h"
#include "panel_cntl.h"
@@ -181,7 +179,6 @@ struct resource_funcs {
void (*update_bw_bounding_box)(
struct dc *dc,
struct clk_bw_params *bw_params);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
bool (*acquire_post_bldn_3dlut)(
struct resource_context *res_ctx,
const struct resource_pool *pool,
@@ -194,7 +191,7 @@ struct resource_funcs {
const struct resource_pool *pool,
struct dc_3dlut **lut,
struct dc_transfer_func **shaper);
-#endif
+
enum dc_status (*add_dsc_to_stream_resource)(
struct dc *dc, struct dc_state *state,
struct dc_stream_state *stream);
@@ -254,10 +251,9 @@ struct resource_pool {
struct hpo_dp_stream_encoder *hpo_dp_stream_enc[MAX_HPO_DP2_ENCODERS];
unsigned int hpo_dp_link_enc_count;
struct hpo_dp_link_encoder *hpo_dp_link_enc[MAX_HPO_DP2_LINK_ENCODERS];
-#if defined(CONFIG_DRM_AMD_DC_DCN)
struct dc_3dlut *mpc_lut[MAX_PIPES];
struct dc_transfer_func *mpc_shaper[MAX_PIPES];
-#endif
+
struct {
unsigned int xtalin_clock_inKhz;
unsigned int dccg_ref_clock_inKhz;
@@ -286,9 +282,7 @@ struct resource_pool {
struct dmcu *dmcu;
struct dmub_psr *psr;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
struct abm *multiple_abms[MAX_PIPES];
-#endif
const struct resource_funcs *funcs;
const struct resource_caps *res_cap;
@@ -380,7 +374,6 @@ struct pipe_ctx {
struct pipe_ctx *next_odm_pipe;
struct pipe_ctx *prev_odm_pipe;
-#ifdef CONFIG_DRM_AMD_DC_DCN
struct _vcs_dpi_display_dlg_regs_st dlg_regs;
struct _vcs_dpi_display_ttu_regs_st ttu_regs;
struct _vcs_dpi_display_rq_regs_st rq_regs;
@@ -390,7 +383,7 @@ struct pipe_ctx {
struct _vcs_dpi_display_e2e_pipe_params_st dml_input;
int det_buffer_size_kb;
bool unbounded_req;
-#endif
+
union pipe_update_flags update_flags;
struct dwbc *dwbc;
struct mcif_wb *mcif_wb;
@@ -419,9 +412,7 @@ struct resource_context {
bool is_hpo_dp_stream_enc_acquired[MAX_HPO_DP2_ENCODERS];
unsigned int hpo_dp_link_enc_to_link_idx[MAX_HPO_DP2_LINK_ENCODERS];
int hpo_dp_link_enc_ref_cnts[MAX_HPO_DP2_LINK_ENCODERS];
-#if defined(CONFIG_DRM_AMD_DC_DCN)
bool is_mpc_3dlut_acquired[MAX_PIPES];
-#endif
};
struct dce_bw_output {
@@ -484,9 +475,7 @@ struct dc_state {
/* Note: these are big structures, do *not* put on stack! */
struct dm_pp_display_configuration pp_display_cfg;
-#ifdef CONFIG_DRM_AMD_DC_DCN
struct dcn_bw_internal_vars dcn_bw_vars;
-#endif
struct clk_mgr *clk_mgr;
@@ -497,4 +486,11 @@ struct dc_state {
} perf_params;
};
+struct dc_bounding_box_max_clk {
+ int max_dcfclk_mhz;
+ int max_dispclk_mhz;
+ int max_dppclk_mhz;
+ int max_phyclk_mhz;
+};
+
#endif /* _CORE_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index ab9939db8cea..a3c1e9c56d8b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -33,6 +33,7 @@
#define MAX_MTP_SLOT_COUNT 64
#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50
#define TRAINING_AUX_RD_INTERVAL 100 //us
+#define LINK_AUX_WAKE_TIMEOUT_MS 1500 // Timeout when trying to wake unresponsive DPRX.
struct dc_link;
struct dc_stream_state;
@@ -147,12 +148,12 @@ bool dp_is_max_vs_reached(
void dp_hw_to_dpcd_lane_settings(
const struct link_training_settings *lt_settings,
const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
- union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]);
+ union dpcd_training_lane dpcd_lane_settings[]);
void dp_decide_lane_settings(
const struct link_training_settings *lt_settings,
const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
- union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]);
+ union dpcd_training_lane dpcd_lane_settings[]);
uint32_t dp_translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h
index 74dafd0f9d3d..39c1d1d07357 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h
@@ -87,6 +87,11 @@ union dpia_set_config_data {
*/
enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link);
+/* Query hot plug status of USB4 DP tunnel.
+ * Returns true if HPD high.
+ */
+bool dc_link_dpia_query_hpd_status(struct dc_link *link);
+
/* Train DP tunneling link for USB4 DPIA display endpoint.
* DPIA equivalent of dc_link_dp_perfrorm_link_training.
* Aborts link training upon detection of sink unplug.
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index 337c0161e72d..806f3041db14 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -619,7 +619,7 @@ struct dcn_ip_params {
};
extern const struct dcn_ip_params dcn10_ip_defaults;
-bool dcn10_validate_bandwidth(
+bool dcn_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index c920c4b6077d..46ce5a0ee4ec 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -91,6 +91,7 @@ struct clk_limit_table_entry {
unsigned int dispclk_mhz;
unsigned int dppclk_mhz;
unsigned int phyclk_mhz;
+ unsigned int phyclk_d18_mhz;
unsigned int wck_ratio;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
index 346f0ba73e86..d7b8d586b523 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
@@ -104,6 +104,7 @@ struct dsc_funcs {
uint8_t *dsc_packed_pps);
void (*dsc_enable)(struct display_stream_compressor *dsc, int opp_pipe);
void (*dsc_disable)(struct display_stream_compressor *dsc);
+ void (*dsc_disconnect)(struct display_stream_compressor *dsc);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index e45b7993c5c5..ad69d78c4ac3 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -195,6 +195,9 @@ struct hubp_funcs {
void (*hubp_set_flip_int)(struct hubp *hubp);
+ void (*program_extended_blank)(struct hubp *hubp,
+ unsigned int min_dst_y_next_start_optimized);
+
void (*hubp_wait_pipe_read_start)(struct hubp *hubp);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 59a704781e34..554d2e33bd7f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -310,6 +310,8 @@ struct timing_generator_funcs {
uint32_t slave_pixel_clock_100Hz,
uint8_t master_clock_divider,
uint8_t slave_clock_divider);
+
+ void (*init_odm)(struct timing_generator *tg);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
index 3b3090e3d327..e6c49ef8b584 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
@@ -37,9 +37,12 @@ struct dc_link;
struct link_resource;
struct pipe_ctx;
struct encoder_set_dp_phy_pattern_param;
+struct link_mst_stream_allocation_table;
struct link_hwss_ext {
- /* function pointers below require check for NULL at all time
+ /* function pointers below may require to check for NULL if caller
+ * considers missing implementation as expected in some cases or none
+ * critical to be investigated immediately
* *********************************************************************
*/
void (*set_hblank_min_symbol_width)(struct pipe_ctx *pipe_ctx,
@@ -62,6 +65,9 @@ struct link_hwss_ext {
const struct link_resource *link_res,
const struct dc_link_settings *link_settings,
const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]);
+ void (*update_stream_allocation_table)(struct dc_link *link,
+ const struct link_resource *link_res,
+ const struct link_mst_stream_allocation_table *table);
};
struct link_hwss {
@@ -72,6 +78,7 @@ struct link_hwss {
*/
void (*setup_stream_encoder)(struct pipe_ctx *pipe_ctx);
void (*reset_stream_encoder)(struct pipe_ctx *pipe_ctx);
+ void (*setup_stream_attribute)(struct pipe_ctx *pipe_ctx);
};
#endif /* __DC_LINK_HWSS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile
index f305d4c9a122..5f49048dde47 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile
@@ -71,7 +71,6 @@ AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCE12)
###############################################################################
# DCN 1x
###############################################################################
-ifdef CONFIG_DRM_AMD_DC_DCN
IRQ_DCN1 = irq_service_dcn10.o
AMD_DAL_IRQ_DCN1 = $(addprefix $(AMDDALPATH)/dc/irq/dcn10/,$(IRQ_DCN1))
@@ -136,7 +135,7 @@ IRQ_DCN31 = irq_service_dcn31.o
AMD_DAL_IRQ_DCN31= $(addprefix $(AMDDALPATH)/dc/irq/dcn31/,$(IRQ_DCN31))
AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN31)
-endif
+
###############################################################################
# DCN 315
###############################################################################
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
index ac0c6a62d17b..146cd1819912 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
@@ -22,8 +22,6 @@
*
*/
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-
#include "dm_services.h"
#include "include/logger_interface.h"
@@ -450,4 +448,3 @@ struct irq_service *dal_irq_service_dcn30_create(
return irq_service;
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.h b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.h
index 080e21239688..c6c7b184d3c1 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.h
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.h
@@ -23,8 +23,6 @@
*
*/
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-
#ifndef __DAL_IRQ_SERVICE_DCN30_H__
#define __DAL_IRQ_SERVICE_DCN30_H__
@@ -34,4 +32,3 @@ struct irq_service *dal_irq_service_dcn30_create(
struct irq_service_init_data *init_data);
#endif /* __DAL_IRQ_SERVICE_DCN30_H__ */
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
index a2a4fbeb83f8..cb38d4c527d4 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
@@ -37,13 +37,8 @@
#endif
#include "dce80/irq_service_dce80.h"
-
#include "dce120/irq_service_dce120.h"
-
-
-#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "dcn10/irq_service_dcn10.h"
-#endif
#include "reg_helper.h"
#include "irq_service.h"
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.c b/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.c
index e7047391934b..2c1a3bfcdb50 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.c
@@ -144,3 +144,23 @@ unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link)
{
return link->dp_trace.link_loss_count;
}
+
+void dp_trace_set_edp_power_timestamp(struct dc_link *link,
+ bool power_up)
+{
+ if (!power_up)
+ /*save driver power off time stamp*/
+ link->dp_trace.edp_trace_power_timestamps.poweroff = dm_get_timestamp(link->dc->ctx);
+ else
+ link->dp_trace.edp_trace_power_timestamps.poweron = dm_get_timestamp(link->dc->ctx);
+}
+
+uint64_t dp_trace_get_edp_poweron_timestamp(struct dc_link *link)
+{
+ return link->dp_trace.edp_trace_power_timestamps.poweron;
+}
+
+uint64_t dp_trace_get_edp_poweroff_timestamp(struct dc_link *link)
+{
+ return link->dp_trace.edp_trace_power_timestamps.poweroff;
+} \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.h b/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.h
index 702f97c6ead0..26700e3cd65e 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.h
@@ -54,4 +54,9 @@ struct dp_trace_lt_counts *dc_dp_trace_get_lt_counts(struct dc_link *link,
bool in_detection);
unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link);
+void dp_trace_set_edp_power_timestamp(struct dc_link *link,
+ bool power_up);
+uint64_t dp_trace_get_edp_poweron_timestamp(struct dc_link *link);
+uint64_t dp_trace_get_edp_poweroff_timestamp(struct dc_link *link);
+
#endif /* __LINK_DP_TRACE_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c
index 0f845113a6aa..776e822abcbb 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c
@@ -62,6 +62,46 @@ void reset_dio_stream_encoder(struct pipe_ctx *pipe_ctx)
}
+void setup_dio_stream_attribute(struct pipe_ctx *pipe_ctx)
+{
+ struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+
+ if (!dc_is_virtual_signal(stream->signal))
+ stream_encoder->funcs->setup_stereo_sync(
+ stream_encoder,
+ pipe_ctx->stream_res.tg->inst,
+ stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE);
+
+ if (dc_is_dp_signal(stream->signal))
+ stream_encoder->funcs->dp_set_stream_attribute(
+ stream_encoder,
+ &stream->timing,
+ stream->output_color_space,
+ stream->use_vsc_sdp_for_colorimetry,
+ link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
+ else if (dc_is_hdmi_tmds_signal(stream->signal))
+ stream_encoder->funcs->hdmi_set_stream_attribute(
+ stream_encoder,
+ &stream->timing,
+ stream->phy_pix_clk,
+ pipe_ctx->stream_res.audio != NULL);
+ else if (dc_is_dvi_signal(stream->signal))
+ stream_encoder->funcs->dvi_set_stream_attribute(
+ stream_encoder,
+ &stream->timing,
+ (stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) ?
+ true : false);
+ else if (dc_is_lvds_signal(stream->signal))
+ stream_encoder->funcs->lvds_set_stream_attribute(
+ stream_encoder,
+ &stream->timing);
+
+ if (dc_is_dp_signal(stream->signal))
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR);
+}
+
void enable_dio_dp_link_output(struct dc_link *link,
const struct link_resource *link_res,
enum signal_type signal,
@@ -113,15 +153,27 @@ void set_dio_dp_lane_settings(struct dc_link *link,
link_enc->funcs->dp_set_lane_settings(link_enc, link_settings, lane_settings);
}
+static void update_dio_stream_allocation_table(struct dc_link *link,
+ const struct link_resource *link_res,
+ const struct link_mst_stream_allocation_table *table)
+{
+ struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+
+ ASSERT(link_enc);
+ link_enc->funcs->update_mst_stream_allocation_table(link_enc, table);
+}
+
static const struct link_hwss dio_link_hwss = {
.setup_stream_encoder = setup_dio_stream_encoder,
.reset_stream_encoder = reset_dio_stream_encoder,
+ .setup_stream_attribute = setup_dio_stream_attribute,
.ext = {
.set_throttled_vcp_size = set_dio_throttled_vcp_size,
.enable_dp_link_output = enable_dio_dp_link_output,
.disable_dp_link_output = disable_dio_dp_link_output,
.set_dp_link_test_pattern = set_dio_dp_link_test_pattern,
.set_dp_lane_settings = set_dio_dp_lane_settings,
+ .update_stream_allocation_table = update_dio_stream_allocation_table,
},
};
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h
index 680df20b1fa3..08f22b32df48 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h
@@ -34,6 +34,7 @@ void set_dio_throttled_vcp_size(struct pipe_ctx *pipe_ctx,
struct fixed31_32 throttled_vcp_size);
void setup_dio_stream_encoder(struct pipe_ctx *pipe_ctx);
void reset_dio_stream_encoder(struct pipe_ctx *pipe_ctx);
+void setup_dio_stream_attribute(struct pipe_ctx *pipe_ctx);
void enable_dio_dp_link_output(struct dc_link *link,
const struct link_resource *link_res,
enum signal_type signal,
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c
index 35b206225201..89d4e8159138 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c
@@ -25,16 +25,44 @@
#include "link_hwss_dpia.h"
#include "core_types.h"
#include "link_hwss_dio.h"
+#include "link_enc_cfg.h"
+
+#define DC_LOGGER_INIT(logger)
+
+static void update_dpia_stream_allocation_table(struct dc_link *link,
+ const struct link_resource *link_res,
+ const struct link_mst_stream_allocation_table *table)
+{
+ struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+ static enum dc_status status;
+ uint8_t mst_alloc_slots = 0, prev_mst_slots_in_use = 0xFF;
+ int i;
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ for (i = 0; i < table->stream_count; i++)
+ mst_alloc_slots += table->stream_allocations[i].slot_count;
+
+ status = dc_process_dmub_set_mst_slots(link->dc, link->link_index,
+ mst_alloc_slots, &prev_mst_slots_in_use);
+ ASSERT(status == DC_OK);
+ DC_LOG_MST("dpia : status[%d]: alloc_slots[%d]: used_slots[%d]\n",
+ status, mst_alloc_slots, prev_mst_slots_in_use);
+
+ ASSERT(link_enc);
+ link_enc->funcs->update_mst_stream_allocation_table(link_enc, table);
+}
static const struct link_hwss dpia_link_hwss = {
.setup_stream_encoder = setup_dio_stream_encoder,
.reset_stream_encoder = reset_dio_stream_encoder,
+ .setup_stream_attribute = setup_dio_stream_attribute,
.ext = {
.set_throttled_vcp_size = set_dio_throttled_vcp_size,
.enable_dp_link_output = enable_dio_dp_link_output,
.disable_dp_link_output = disable_dio_dp_link_output,
.set_dp_link_test_pattern = set_dio_dp_link_test_pattern,
.set_dp_lane_settings = set_dio_dp_lane_settings,
+ .update_stream_allocation_table = update_dpia_stream_allocation_table,
},
};
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
index 74919491675f..87972dc8443d 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
@@ -131,6 +131,22 @@ static void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst);
}
+static void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx)
+{
+ struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+
+ stream_enc->funcs->set_stream_attribute(
+ stream_enc,
+ &stream->timing,
+ stream->output_color_space,
+ stream->use_vsc_sdp_for_colorimetry,
+ stream->timing.flags.DSC,
+ false);
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR);
+}
+
static void enable_hpo_dp_fpga_link_output(struct dc_link *link,
const struct link_resource *link_res,
enum signal_type signal,
@@ -228,9 +244,19 @@ static void set_hpo_dp_lane_settings(struct dc_link *link,
lane_settings[0].FFE_PRESET.raw);
}
+static void update_hpo_dp_stream_allocation_table(struct dc_link *link,
+ const struct link_resource *link_res,
+ const struct link_mst_stream_allocation_table *table)
+{
+ link_res->hpo_dp_link_enc->funcs->update_stream_allocation_table(
+ link_res->hpo_dp_link_enc,
+ table);
+}
+
static const struct link_hwss hpo_dp_link_hwss = {
.setup_stream_encoder = setup_hpo_dp_stream_encoder,
.reset_stream_encoder = reset_hpo_dp_stream_encoder,
+ .setup_stream_attribute = setup_hpo_dp_stream_attribute,
.ext = {
.set_throttled_vcp_size = set_hpo_dp_throttled_vcp_size,
.set_hblank_min_symbol_width = set_hpo_dp_hblank_min_symbol_width,
@@ -238,6 +264,7 @@ static const struct link_hwss hpo_dp_link_hwss = {
.disable_dp_link_output = disable_hpo_dp_link_output,
.set_dp_link_test_pattern = set_hpo_dp_link_test_pattern,
.set_dp_lane_settings = set_hpo_dp_lane_settings,
+ .update_stream_allocation_table = update_hpo_dp_stream_allocation_table,
},
};
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.c
index 9df273ca699b..4b5eccd994c4 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.c
@@ -26,9 +26,28 @@
#include "core_types.h"
#include "virtual/virtual_link_hwss.h"
+static void setup_hpo_frl_stream_attribute(struct pipe_ctx *pipe_ctx)
+{
+ struct hpo_frl_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_frl_stream_enc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct pipe_ctx *odm_pipe;
+ int odm_combine_num_segments = 1;
+
+ /* get number of ODM combine input segments */
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ odm_combine_num_segments++;
+
+ stream_enc->funcs->hdmi_frl_set_stream_attribute(
+ stream_enc,
+ &stream->timing,
+ &stream->link->frl_link_settings.borrow_params,
+ odm_combine_num_segments);
+}
+
static const struct link_hwss hpo_frl_link_hwss = {
.setup_stream_encoder = virtual_setup_stream_encoder,
.reset_stream_encoder = virtual_reset_stream_encoder,
+ .setup_stream_attribute = setup_hpo_frl_stream_attribute,
};
bool can_use_hpo_frl_link_hwss(const struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index 17d05071b809..981a9ed6fb61 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -35,8 +35,8 @@
#include <asm/byteorder.h>
+#include <drm/display/drm_dp_helper.h>
#include <drm/drm_print.h>
-#include <drm/dp/drm_dp_helper.h>
#include "cgs_common.h"
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c
index 525eba2a3354..501173ce270e 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c
@@ -29,12 +29,17 @@ void virtual_setup_stream_encoder(struct pipe_ctx *pipe_ctx)
{
}
+void virtual_setup_stream_attribute(struct pipe_ctx *pipe_ctx)
+{
+}
+
void virtual_reset_stream_encoder(struct pipe_ctx *pipe_ctx)
{
}
static const struct link_hwss virtual_link_hwss = {
.setup_stream_encoder = virtual_setup_stream_encoder,
.reset_stream_encoder = virtual_reset_stream_encoder,
+ .setup_stream_attribute = virtual_setup_stream_attribute,
};
const struct link_hwss *get_virtual_link_hwss(void)
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.h b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.h
index e6bcb4bb0f3a..fbcbc5afb47d 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.h
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.h
@@ -28,6 +28,7 @@
#include "core_types.h"
void virtual_setup_stream_encoder(struct pipe_ctx *pipe_ctx);
+void virtual_setup_stream_attribute(struct pipe_ctx *pipe_ctx);
void virtual_reset_stream_encoder(struct pipe_ctx *pipe_ctx);
const struct link_hwss *get_virtual_link_hwss(void);
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index 56757a286a03..f5cb8932bd5c 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -243,6 +243,7 @@ struct dmub_srv_hw_params {
bool power_optimization;
bool dpia_supported;
bool disable_dpia;
+ bool usb4_cm_version;
};
/**
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 71214c7a60fc..385c28238beb 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -44,24 +44,6 @@
#endif // defined(_TEST_HARNESS) || defined(FPGA_USB4)
-/* Firmware versioning. */
-#ifdef DMUB_EXPOSE_VERSION
-#define DMUB_FW_VERSION_GIT_HASH 0x929554ba
-#define DMUB_FW_VERSION_MAJOR 0
-#define DMUB_FW_VERSION_MINOR 0
-#define DMUB_FW_VERSION_REVISION 108
-#define DMUB_FW_VERSION_TEST 0
-#define DMUB_FW_VERSION_VBIOS 0
-#define DMUB_FW_VERSION_HOTFIX 0
-#define DMUB_FW_VERSION_UCODE (((DMUB_FW_VERSION_MAJOR & 0xFF) << 24) | \
- ((DMUB_FW_VERSION_MINOR & 0xFF) << 16) | \
- ((DMUB_FW_VERSION_REVISION & 0xFF) << 8) | \
- ((DMUB_FW_VERSION_TEST & 0x1) << 7) | \
- ((DMUB_FW_VERSION_VBIOS & 0x1) << 6) | \
- (DMUB_FW_VERSION_HOTFIX & 0x3F))
-
-#endif
-
//<DMUB_TYPES>==================================================================
/* Basic type definitions. */
@@ -368,8 +350,9 @@ union dmub_fw_boot_options {
uint32_t power_optimization: 1;
uint32_t diag_env: 1; /* 1 if diagnostic environment */
uint32_t gpint_scratch8: 1; /* 1 if GPINT is in scratch8*/
+ uint32_t usb4_cm_version: 1; /**< 1 CM support */
- uint32_t reserved : 18; /**< reserved */
+ uint32_t reserved : 17; /**< reserved */
} bits; /**< boot bits */
uint32_t all; /**< 32-bit access to bits */
};
@@ -672,6 +655,10 @@ enum dmub_cmd_type {
*/
DMUB_CMD_GET_USBC_CABLE_ID = 81,
/**
+ * Command type used to query HPD state.
+ */
+ DMUB_CMD__QUERY_HPD_STATE = 82,
+ /**
* Command type used for all VBIOS interface commands.
*/
DMUB_CMD__VBIOS = 128,
@@ -1370,6 +1357,30 @@ struct dmub_rb_cmd_dp_set_config_reply {
struct set_config_reply_control_data set_config_reply_control;
};
+/**
+ * Data passed from driver to FW in a DMUB_CMD__QUERY_HPD_STATE command.
+ */
+struct dmub_cmd_hpd_state_query_data {
+ uint8_t instance; /**< HPD instance or DPIA instance */
+ uint8_t result; /**< For returning HPD state */
+ enum aux_channel_type ch_type; /**< enum aux_channel_type */
+ enum aux_return_code_type status; /**< for returning the status of command */
+};
+
+/**
+ * Definition of a DMUB_CMD__QUERY_HPD_STATE command.
+ */
+struct dmub_rb_cmd_query_hpd_state {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+ /**
+ * Data passed from driver to FW in a DMUB_CMD__QUERY_HPD_STATE command.
+ */
+ struct dmub_cmd_hpd_state_query_data data;
+};
+
/*
* Command IDs should be treated as stable ABI.
* Do not reuse or modify IDs.
@@ -1523,8 +1534,6 @@ enum dmub_phy_fsm_state {
DMUB_PHY_FSM_FAST_LP,
};
-
-
/**
* Data passed from driver to FW in a DMUB_CMD__PSR_COPY_SETTINGS command.
*/
@@ -1704,9 +1713,16 @@ struct dmub_rb_cmd_psr_enable_data {
*/
uint8_t panel_inst;
/**
- * Explicit padding to 4 byte boundary.
+ * Phy state to enter.
+ * Values to use are defined in dmub_phy_fsm_state
*/
- uint8_t pad[2];
+ uint8_t phy_fsm_state;
+ /**
+ * Phy rate for DP - RBR/HBR/HBR2/HBR3.
+ * Set this using enum phy_link_rate.
+ * This does not support HDMI/DP2 for now.
+ */
+ uint8_t phy_rate;
};
/**
@@ -1772,16 +1788,9 @@ struct dmub_cmd_psr_force_static_data {
*/
uint8_t panel_inst;
/**
- * Phy state to enter.
- * Values to use are defined in dmub_phy_fsm_state
- */
- uint8_t phy_fsm_state;
- /**
- * Phy rate for DP - RBR/HBR/HBR2/HBR3.
- * Set this using enum phy_link_rate.
- * This does not support HDMI/DP2 for now.
+ * Explicit padding to 4 byte boundary.
*/
- uint8_t phy_rate;
+ uint8_t pad[2];
};
/**
@@ -2776,6 +2785,11 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD_GET_USBC_CABLE_ID command.
*/
struct dmub_rb_cmd_get_usbc_cable_id cable_id;
+
+ /**
+ * Definition of a DMUB_CMD__QUERY_HPD_STATE command.
+ */
+ struct dmub_rb_cmd_query_hpd_state query_hpd;
};
/**
@@ -3044,9 +3058,7 @@ static inline void dmub_rb_flush_pending(const struct dmub_rb *rb)
uint32_t wptr = rb->wrpt;
while (rptr != wptr) {
- uint64_t volatile *data = (uint64_t volatile *)((uint8_t *)(rb->base_address) + rptr);
- //uint64_t volatile *p = (uint64_t volatile *)data;
- uint64_t temp;
+ uint64_t *data = (uint64_t *)((uint8_t *)(rb->base_address) + rptr);
uint8_t i;
/* Don't remove this.
@@ -3054,7 +3066,7 @@ static inline void dmub_rb_flush_pending(const struct dmub_rb *rb)
* for this function to be effective.
*/
for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++)
- temp = *data++;
+ (void)READ_ONCE(*data++);
rptr += DMUB_RB_CMD_SIZE;
if (rptr >= rb->capacity)
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index 82c651535628..7c9330a61ac1 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -340,6 +340,7 @@ void dmub_dcn31_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu
boot_options.bits.z10_disable = params->disable_z10;
boot_options.bits.dpia_supported = params->dpia_supported;
boot_options.bits.enable_dpia = params->disable_dpia ? 0 : 1;
+ boot_options.bits.usb4_cm_version = params->usb4_cm_version;
boot_options.bits.power_optimization = params->power_optimization;
boot_options.bits.sel_mux_phy_c_d_phy_f_g = (dmub->asic == DMUB_ASIC_DCN31B) ? 1 : 0;
diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
index f883d87791fe..73b9e0a87e54 100644
--- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
@@ -39,6 +39,8 @@
#define DP_BRANCH_HW_REV_20 0x20
#define DP_DEVICE_ID_38EC11 0x38EC11
+#define DP_FORCE_PSRSU_CAPABILITY 0x40F
+
enum ddc_result {
DDC_RESULT_UNKNOWN = 0,
DDC_RESULT_SUCESSFULL,
diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
index ac822181359c..b2df07f9e91c 100644
--- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h
+++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
@@ -26,7 +26,7 @@
#ifndef __DAL_DPCD_DEFS_H__
#define __DAL_DPCD_DEFS_H__
-#include <drm/dp/drm_dp_helper.h>
+#include <drm/display/drm_dp_helper.h>
#ifndef DP_SINK_HW_REVISION_START // can remove this once the define gets into linux drm_dp_helper.h
#define DP_SINK_HW_REVISION_START 0x409
#endif
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_id.h b/drivers/gpu/drm/amd/display/include/grph_object_id.h
index fed1edc038d8..c6bbd262f1ac 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_id.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_id.h
@@ -162,6 +162,7 @@ enum connector_id {
CONNECTOR_ID_MXM = 21,
CONNECTOR_ID_WIRELESS = 22,
CONNECTOR_ID_MIRACAST = 23,
+ CONNECTOR_ID_USBC = 24,
CONNECTOR_ID_VIRTUAL = 100
};
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index bd1d1dc93629..03fa63d56fa6 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -46,6 +46,10 @@
/* Number of consecutive frames to check before entering/exiting fixed refresh */
#define FIXED_REFRESH_ENTER_FRAME_COUNT 5
#define FIXED_REFRESH_EXIT_FRAME_COUNT 10
+/* Flip interval workaround constants */
+#define VSYNCS_BETWEEN_FLIP_THRESHOLD 2
+#define FREESYNC_CONSEC_FLIP_AFTER_VSYNC 5
+#define FREESYNC_VSYNC_TO_FLIP_DELTA_IN_US 500
struct core_freesync {
struct mod_freesync public;
@@ -466,6 +470,41 @@ static void apply_fixed_refresh(struct core_freesync *core_freesync,
}
}
+static void determine_flip_interval_workaround_req(struct mod_vrr_params *in_vrr,
+ unsigned int curr_time_stamp_in_us)
+{
+ in_vrr->flip_interval.vsync_to_flip_in_us = curr_time_stamp_in_us -
+ in_vrr->flip_interval.v_update_timestamp_in_us;
+
+ /* Determine conditions for stopping workaround */
+ if (in_vrr->flip_interval.flip_interval_workaround_active &&
+ in_vrr->flip_interval.vsyncs_between_flip < VSYNCS_BETWEEN_FLIP_THRESHOLD &&
+ in_vrr->flip_interval.vsync_to_flip_in_us > FREESYNC_VSYNC_TO_FLIP_DELTA_IN_US) {
+ in_vrr->flip_interval.flip_interval_detect_counter = 0;
+ in_vrr->flip_interval.program_flip_interval_workaround = true;
+ in_vrr->flip_interval.flip_interval_workaround_active = false;
+ } else {
+ /* Determine conditions for starting workaround */
+ if (in_vrr->flip_interval.vsyncs_between_flip >= VSYNCS_BETWEEN_FLIP_THRESHOLD &&
+ in_vrr->flip_interval.vsync_to_flip_in_us < FREESYNC_VSYNC_TO_FLIP_DELTA_IN_US) {
+ /* Increase flip interval counter we have 2 vsyncs between flips and
+ * vsync to flip interval is less than 500us
+ */
+ in_vrr->flip_interval.flip_interval_detect_counter++;
+ if (in_vrr->flip_interval.flip_interval_detect_counter > FREESYNC_CONSEC_FLIP_AFTER_VSYNC) {
+ /* Start workaround if we detect 5 consecutive instances of the above case */
+ in_vrr->flip_interval.program_flip_interval_workaround = true;
+ in_vrr->flip_interval.flip_interval_workaround_active = true;
+ }
+ } else {
+ /* Reset the flip interval counter if we condition is no longer met */
+ in_vrr->flip_interval.flip_interval_detect_counter = 0;
+ }
+ }
+
+ in_vrr->flip_interval.vsyncs_between_flip = 0;
+}
+
static bool vrr_settings_require_update(struct core_freesync *core_freesync,
struct mod_freesync_config *in_config,
unsigned int min_refresh_in_uhz,
@@ -1179,6 +1218,9 @@ void mod_freesync_handle_preflip(struct mod_freesync *mod_freesync,
in_out_vrr);
}
+ determine_flip_interval_workaround_req(in_out_vrr,
+ curr_time_stamp_in_us);
+
}
}
@@ -1187,6 +1229,8 @@ void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
struct mod_vrr_params *in_out_vrr)
{
struct core_freesync *core_freesync = NULL;
+ unsigned int cur_timestamp_in_us;
+ unsigned long long cur_tick;
if ((mod_freesync == NULL) || (stream == NULL) || (in_out_vrr == NULL))
return;
@@ -1196,6 +1240,36 @@ void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
if (in_out_vrr->supported == false)
return;
+ cur_tick = dm_get_timestamp(core_freesync->dc->ctx);
+ cur_timestamp_in_us = (unsigned int)
+ div_u64(dm_get_elapse_time_in_ns(core_freesync->dc->ctx, cur_tick, 0), 1000);
+
+ in_out_vrr->flip_interval.vsyncs_between_flip++;
+ in_out_vrr->flip_interval.v_update_timestamp_in_us = cur_timestamp_in_us;
+
+ if (in_out_vrr->state == VRR_STATE_ACTIVE_VARIABLE &&
+ (in_out_vrr->flip_interval.flip_interval_workaround_active ||
+ (!in_out_vrr->flip_interval.flip_interval_workaround_active &&
+ in_out_vrr->flip_interval.program_flip_interval_workaround))) {
+ // set freesync vmin vmax to nominal for workaround
+ in_out_vrr->adjust.v_total_min =
+ mod_freesync_calc_v_total_from_refresh(
+ stream, in_out_vrr->max_refresh_in_uhz);
+ in_out_vrr->adjust.v_total_max =
+ in_out_vrr->adjust.v_total_min;
+ in_out_vrr->flip_interval.program_flip_interval_workaround = false;
+ in_out_vrr->flip_interval.do_flip_interval_workaround_cleanup = true;
+ return;
+ }
+
+ if (in_out_vrr->state != VRR_STATE_ACTIVE_VARIABLE &&
+ in_out_vrr->flip_interval.do_flip_interval_workaround_cleanup) {
+ in_out_vrr->flip_interval.do_flip_interval_workaround_cleanup = false;
+ in_out_vrr->flip_interval.flip_interval_detect_counter = 0;
+ in_out_vrr->flip_interval.vsyncs_between_flip = 0;
+ in_out_vrr->flip_interval.vsync_to_flip_in_us = 0;
+ }
+
/* Below the Range Logic */
/* Only execute if in fullscreen mode */
@@ -1302,7 +1376,7 @@ unsigned long long mod_freesync_calc_field_rate_from_timing(
bool mod_freesync_is_valid_range(uint32_t min_refresh_cap_in_uhz,
uint32_t max_refresh_cap_in_uhz,
- uint32_t nominal_field_rate_in_uhz)
+ uint32_t nominal_field_rate_in_uhz)
{
/* Typically nominal refresh calculated can have some fractional part.
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
index 3e81850a7ffe..5e01c6e24cbc 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
@@ -251,6 +251,33 @@ out:
return status;
}
+static enum mod_hdcp_status update_display_adjustments(struct mod_hdcp *hdcp,
+ struct mod_hdcp_display *display,
+ struct mod_hdcp_display_adjustment *adj)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_NOT_IMPLEMENTED;
+
+ if (is_in_authenticated_states(hdcp) &&
+ is_dp_mst_hdcp(hdcp) &&
+ display->adjust.disable == true &&
+ adj->disable == false) {
+ display->adjust.disable = false;
+ if (is_hdcp1(hdcp))
+ status = mod_hdcp_hdcp1_enable_dp_stream_encryption(hdcp);
+ else if (is_hdcp2(hdcp))
+ status = mod_hdcp_hdcp2_enable_dp_stream_encryption(hdcp);
+
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ display->adjust.disable = true;
+ }
+
+ if (status == MOD_HDCP_STATUS_SUCCESS &&
+ memcmp(adj, &display->adjust,
+ sizeof(struct mod_hdcp_display_adjustment)) != 0)
+ status = MOD_HDCP_STATUS_NOT_IMPLEMENTED;
+
+ return status;
+}
/*
* Implementation of functions in mod_hdcp.h
*/
@@ -391,7 +418,7 @@ out:
return status;
}
-enum mod_hdcp_status mod_hdcp_update_authentication(struct mod_hdcp *hdcp,
+enum mod_hdcp_status mod_hdcp_update_display(struct mod_hdcp *hdcp,
uint8_t index,
struct mod_hdcp_link_adjustment *link_adjust,
struct mod_hdcp_display_adjustment *display_adjust,
@@ -419,6 +446,15 @@ enum mod_hdcp_status mod_hdcp_update_authentication(struct mod_hdcp *hdcp,
goto out;
}
+ if (memcmp(link_adjust, &hdcp->connection.link.adjust,
+ sizeof(struct mod_hdcp_link_adjustment)) == 0 &&
+ memcmp(display_adjust, &display->adjust,
+ sizeof(struct mod_hdcp_display_adjustment)) != 0) {
+ status = update_display_adjustments(hdcp, display, display_adjust);
+ if (status != MOD_HDCP_STATUS_NOT_IMPLEMENTED)
+ goto out;
+ }
+
/* stop current authentication */
status = reset_authentication(hdcp, output);
if (status != MOD_HDCP_STATUS_SUCCESS)
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
index 8502263d2968..55c7d873175f 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -29,8 +29,8 @@
#include "mod_hdcp.h"
#include "hdcp_log.h"
-#include <drm/drm_hdcp.h>
-#include <drm/dp/drm_dp_helper.h>
+#include <drm/display/drm_dp_helper.h>
+#include <drm/display/drm_hdcp_helper.h>
enum mod_hdcp_trans_input_result {
UNKNOWN = 0,
@@ -445,6 +445,14 @@ static inline uint8_t is_in_hdcp2_dp_states(struct mod_hdcp *hdcp)
current_state(hdcp) <= HDCP2_DP_STATE_END);
}
+static inline uint8_t is_in_authenticated_states(struct mod_hdcp *hdcp)
+{
+ return (current_state(hdcp) == D1_A4_AUTHENTICATED ||
+ current_state(hdcp) == H1_A45_AUTHENTICATED ||
+ current_state(hdcp) == D2_A5_AUTHENTICATED ||
+ current_state(hdcp) == H2_A5_AUTHENTICATED);
+}
+
static inline uint8_t is_hdcp1(struct mod_hdcp *hdcp)
{
return (is_in_hdcp1_states(hdcp) || is_in_hdcp1_dp_states(hdcp));
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
index 6ec918af3bff..1ddb4f5eac8e 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -497,9 +497,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_execution(struct mod_hdcp *hdcp,
return status;
}
-extern enum mod_hdcp_status mod_hdcp_hdcp1_dp_execution(struct mod_hdcp *hdcp,
- struct mod_hdcp_event_context *event_ctx,
- struct mod_hdcp_transition_input_hdcp1 *input)
+enum mod_hdcp_status mod_hdcp_hdcp1_dp_execution(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
index 75a158a2514c..cf6bc9446244 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
@@ -105,6 +105,16 @@ struct mod_vrr_params_fixed_refresh {
uint32_t frame_counter;
};
+struct mod_vrr_params_flip_interval {
+ bool flip_interval_workaround_active;
+ bool program_flip_interval_workaround;
+ bool do_flip_interval_workaround_cleanup;
+ uint32_t flip_interval_detect_counter;
+ uint32_t vsyncs_between_flip;
+ uint32_t vsync_to_flip_in_us;
+ uint32_t v_update_timestamp_in_us;
+};
+
struct mod_vrr_params {
bool supported;
bool send_info_frame;
@@ -121,6 +131,8 @@ struct mod_vrr_params {
struct mod_vrr_params_fixed_refresh fixed;
struct mod_vrr_params_btr btr;
+
+ struct mod_vrr_params_flip_interval flip_interval;
};
struct mod_freesync *mod_freesync_create(struct dc *dc);
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index f7420c3f5672..3348bb97ef81 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -294,7 +294,7 @@ enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp,
uint8_t index, struct mod_hdcp_output *output);
/* called per display to apply new authentication adjustment */
-enum mod_hdcp_status mod_hdcp_update_authentication(struct mod_hdcp *hdcp,
+enum mod_hdcp_status mod_hdcp_update_display(struct mod_hdcp *hdcp,
uint8_t index,
struct mod_hdcp_link_adjustment *link_adjust,
struct mod_hdcp_display_adjustment *display_adjust,
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index b691aa45e84f..79bc207415bc 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -100,7 +100,8 @@ enum vsc_packet_revision {
//PB7 = MD0
#define MASK_VTEM_MD0__VRR_EN 0x01
#define MASK_VTEM_MD0__M_CONST 0x02
-#define MASK_VTEM_MD0__RESERVED2 0x0C
+#define MASK_VTEM_MD0__QMS_EN 0x04
+#define MASK_VTEM_MD0__RESERVED2 0x08
#define MASK_VTEM_MD0__FVA_FACTOR_M1 0xF0
//MD1
@@ -109,7 +110,7 @@ enum vsc_packet_revision {
//MD2
#define MASK_VTEM_MD2__BASE_REFRESH_RATE_98 0x03
#define MASK_VTEM_MD2__RB 0x04
-#define MASK_VTEM_MD2__RESERVED3 0xF8
+#define MASK_VTEM_MD2__NEXT_TFR 0xF8
//MD3
#define MASK_VTEM_MD3__BASE_REFRESH_RATE_07 0xFF
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 2b00f334e93d..97928d4c3b9a 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -784,3 +784,41 @@ bool dmcu_load_iram(struct dmcu *dmcu,
return result;
}
+/*
+ * is_psr_su_specific_panel() - check if sink is AMD vendor-specific PSR-SU
+ * supported eDP device.
+ *
+ * @link: dc link pointer
+ *
+ * Return: true if AMDGPU vendor specific PSR-SU eDP panel
+ */
+bool is_psr_su_specific_panel(struct dc_link *link)
+{
+ if (link->dpcd_caps.edp_rev >= DP_EDP_14) {
+ if (link->dpcd_caps.psr_info.psr_version >= DP_PSR2_WITH_Y_COORD_ET_SUPPORTED)
+ return true;
+ /*
+ * Some panels will report PSR capabilities over additional DPCD bits.
+ * Such panels are approved despite reporting only PSR v3, as long as
+ * the additional bits are reported.
+ */
+ if (link->dpcd_caps.psr_info.psr_version < DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)
+ return false;
+
+ if (link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_001CF8) {
+ /*
+ * FIXME:
+ * This is the temporary workaround to disable PSRSU when system turned on
+ * DSC function on the sepcific sink. Once the PSRSU + DSC is fixed, this
+ * condition should be removed.
+ */
+ if (link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT)
+ return false;
+
+ if (link->dpcd_caps.psr_info.force_psrsu_cap == 0x1)
+ return true;
+ }
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index 2a9f8e2d8080..1a634d8c78c5 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -52,4 +52,5 @@ bool dmub_init_abm_config(struct resource_pool *res_pool,
struct dmcu_iram_parameters params,
unsigned int inst);
+bool is_psr_su_specific_panel(struct dc_link *link);
#endif /* MODULES_POWER_POWER_HELPERS_H_ */