diff options
Diffstat (limited to 'drivers/gpu/drm/amd/display')
260 files changed, 22091 insertions, 16762 deletions
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index f4f3d2665a6b..2efe93f74f84 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -7,7 +7,8 @@ config DRM_AMD_DC  	default y  	depends on BROKEN || !CC_IS_CLANG || X86_64 || SPARC64 || ARM64  	select SND_HDA_COMPONENT if SND_HDA_CORE -	select DRM_AMD_DC_DCN if (X86 || PPC_LONG_DOUBLE_128) +	# !CC_IS_CLANG: https://github.com/ClangBuiltLinux/linux/issues/1752 +	select DRM_AMD_DC_DCN if (X86 || PPC_LONG_DOUBLE_128 || (ARM64 && KERNEL_MODE_NEON && !CC_IS_CLANG))  	help  	  Choose this option if you want to use the new display engine  	  support for AMDGPU. This adds required support for Vega and diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 512c32327eb1..c420bce47acb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -28,7 +28,6 @@  #include "dm_services_types.h"  #include "dc.h" -#include "dc_link_dp.h"  #include "link_enc_cfg.h"  #include "dc/inc/core_types.h"  #include "dal_asic_id.h" @@ -39,6 +38,9 @@  #include "dc/dc_edid_parser.h"  #include "dc/dc_stat.h"  #include "amdgpu_dm_trace.h" +#include "dpcd_defs.h" +#include "link/protocols/link_dpcd.h" +#include "link_service_types.h"  #include "vid.h"  #include "amdgpu.h" @@ -66,7 +68,7 @@  #include "ivsrcid/ivsrcid_vislands30.h" -#include "i2caux_interface.h" +#include <linux/backlight.h>  #include <linux/module.h>  #include <linux/moduleparam.h>  #include <linux/types.h> @@ -82,7 +84,6 @@  #include <drm/drm_atomic_uapi.h>  #include <drm/drm_atomic_helper.h>  #include <drm/drm_blend.h> -#include <drm/drm_fb_helper.h>  #include <drm/drm_fourcc.h>  #include <drm/drm_edid.h>  #include <drm/drm_vblank.h> @@ -105,7 +106,6 @@  #include "modules/inc/mod_freesync.h"  #include "modules/power/power_helpers.h" -#include "modules/inc/mod_info_packet.h"  #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"  MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB); @@ -147,14 +147,6 @@ MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);  /* Number of bytes in PSP footer for firmware. */  #define PSP_FOOTER_BYTES 0x100 -/* - * DMUB Async to Sync Mechanism Status - */ -#define DMUB_ASYNC_TO_SYNC_ACCESS_FAIL 1 -#define DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT 2 -#define DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS 3 -#define DMUB_ASYNC_TO_SYNC_ACCESS_INVALID 4 -  /**   * DOC: overview   * @@ -219,7 +211,7 @@ static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);  static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,  				    struct amdgpu_dm_connector *amdgpu_dm_connector, -				    uint32_t link_index, +				    u32 link_index,  				    struct amdgpu_encoder *amdgpu_encoder);  static int amdgpu_dm_encoder_init(struct drm_device *dev,  				  struct amdgpu_encoder *aencoder, @@ -271,7 +263,7 @@ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)  static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,  				  u32 *vbl, u32 *position)  { -	uint32_t v_blank_start, v_blank_end, h_position, v_position; +	u32 v_blank_start, v_blank_end, h_position, v_position;  	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))  		return -EINVAL; @@ -370,7 +362,7 @@ static void dm_pflip_high_irq(void *interrupt_params)  	struct amdgpu_device *adev = irq_params->adev;  	unsigned long flags;  	struct drm_pending_vblank_event *e; -	uint32_t vpos, hpos, v_blank_start, v_blank_end; +	u32 vpos, hpos, v_blank_start, v_blank_end;  	bool vrr_active;  	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP); @@ -657,7 +649,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,  	struct drm_connector *connector;  	struct drm_connector_list_iter iter;  	struct dc_link *link; -	uint8_t link_index = 0; +	u8 link_index = 0;  	struct drm_device *dev;  	if (adev == NULL) @@ -758,7 +750,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)  	struct amdgpu_device *adev = irq_params->adev;  	struct amdgpu_display_manager *dm = &adev->dm;  	struct dmcub_trace_buf_entry entry = { 0 }; -	uint32_t count = 0; +	u32 count = 0;  	struct dmub_hpd_work *dmub_hpd_wrk;  	struct dc_link *plink = NULL; @@ -1024,7 +1016,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)  	struct dmub_srv_hw_params hw_params;  	enum dmub_status status;  	const unsigned char *fw_inst_const, *fw_bss_data; -	uint32_t i, fw_inst_const_size, fw_bss_data_size; +	u32 i, fw_inst_const_size, fw_bss_data_size;  	bool has_hw_support;  	if (!dmub_srv) @@ -1105,7 +1097,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)  	/* Initialize hardware. */  	memset(&hw_params, 0, sizeof(hw_params));  	hw_params.fb_base = adev->gmc.fb_start; -	hw_params.fb_offset = adev->gmc.aper_base; +	hw_params.fb_offset = adev->vm_manager.vram_base_offset;  	/* backdoor load firmware and trigger dmub running */  	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) @@ -1185,32 +1177,46 @@ static void dm_dmub_hw_resume(struct amdgpu_device *adev)  static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)  { -	uint64_t pt_base; -	uint32_t logical_addr_low; -	uint32_t logical_addr_high; -	uint32_t agp_base, agp_bot, agp_top; +	u64 pt_base; +	u32 logical_addr_low; +	u32 logical_addr_high; +	u32 agp_base, agp_bot, agp_top;  	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;  	memset(pa_config, 0, sizeof(*pa_config)); -	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18; -	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); - -	if (adev->apu_flags & AMD_APU_IS_RAVEN2) -		/* -		 * Raven2 has a HW issue that it is unable to use the vram which -		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the -		 * workaround that increase system aperture high address (add 1) -		 * to get rid of the VM fault and hardware hang. -		 */ -		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18); -	else -		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18; -  	agp_base = 0;  	agp_bot = adev->gmc.agp_start >> 24;  	agp_top = adev->gmc.agp_end >> 24; +	/* AGP aperture is disabled */ +	if (agp_bot == agp_top) { +		logical_addr_low = adev->gmc.fb_start >> 18; +		if (adev->apu_flags & AMD_APU_IS_RAVEN2) +			/* +			 * Raven2 has a HW issue that it is unable to use the vram which +			 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the +			 * workaround that increase system aperture high address (add 1) +			 * to get rid of the VM fault and hardware hang. +			 */ +			logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1; +		else +			logical_addr_high = adev->gmc.fb_end >> 18; +	} else { +		logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18; +		if (adev->apu_flags & AMD_APU_IS_RAVEN2) +			/* +			 * Raven2 has a HW issue that it is unable to use the vram which +			 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the +			 * workaround that increase system aperture high address (add 1) +			 * to get rid of the VM fault and hardware hang. +			 */ +			logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18); +		else +			logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18; +	} + +	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);  	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;  	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12); @@ -1227,15 +1233,30 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_  	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;  	pa_config->system_aperture.fb_base = adev->gmc.fb_start; -	pa_config->system_aperture.fb_offset = adev->gmc.aper_base; +	pa_config->system_aperture.fb_offset = adev->vm_manager.vram_base_offset;  	pa_config->system_aperture.fb_top = adev->gmc.fb_end;  	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;  	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;  	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part; -	pa_config->is_hvm_enabled = 0; +	pa_config->is_hvm_enabled = adev->mode_info.gpu_vm_support; + +} + +static void force_connector_state( +	struct amdgpu_dm_connector *aconnector, +	enum drm_connector_force force_state) +{ +	struct drm_connector *connector = &aconnector->base; + +	mutex_lock(&connector->dev->mode_config.mutex); +	aconnector->base.force = force_state; +	mutex_unlock(&connector->dev->mode_config.mutex); +	mutex_lock(&aconnector->hpd_lock); +	drm_kms_helper_connector_hotplug_event(connector); +	mutex_unlock(&aconnector->hpd_lock);  }  static void dm_handle_hpd_rx_offload_work(struct work_struct *work) @@ -1246,6 +1267,9 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)  	struct amdgpu_device *adev;  	enum dc_connection_type new_connection_type = dc_connection_none;  	unsigned long flags; +	union test_response test_response; + +	memset(&test_response, 0, sizeof(test_response));  	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);  	aconnector = offload_work->offload_wq->aconnector; @@ -1259,7 +1283,7 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)  	dc_link = aconnector->dc_link;  	mutex_lock(&aconnector->hpd_lock); -	if (!dc_link_detect_sink(dc_link, &new_connection_type)) +	if (!dc_link_detect_connection_type(dc_link, &new_connection_type))  		DRM_ERROR("KMS: Failed to detect connector\n");  	mutex_unlock(&aconnector->hpd_lock); @@ -1270,15 +1294,49 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)  		goto skip;  	mutex_lock(&adev->dm.dc_lock); -	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) +	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) {  		dc_link_dp_handle_automated_test(dc_link); + +		if (aconnector->timing_changed) { +			/* force connector disconnect and reconnect */ +			force_connector_state(aconnector, DRM_FORCE_OFF); +			msleep(100); +			force_connector_state(aconnector, DRM_FORCE_UNSPECIFIED); +		} + +		test_response.bits.ACK = 1; + +		core_link_write_dpcd( +		dc_link, +		DP_TEST_RESPONSE, +		&test_response.raw, +		sizeof(test_response)); +	}  	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) && -			hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) && +			dc_link_check_link_loss_status(dc_link, &offload_work->data) &&  			dc_link_dp_allow_hpd_rx_irq(dc_link)) { -		dc_link_dp_handle_link_loss(dc_link); +		/* offload_work->data is from handle_hpd_rx_irq-> +		 * schedule_hpd_rx_offload_work.this is defer handle +		 * for hpd short pulse. upon here, link status may be +		 * changed, need get latest link status from dpcd +		 * registers. if link status is good, skip run link +		 * training again. +		 */ +		union hpd_irq_data irq_data; + +		memset(&irq_data, 0, sizeof(irq_data)); + +		/* before dc_link_dp_handle_link_loss, allow new link lost handle +		 * request be added to work queue if link lost at end of dc_link_ +		 * dp_handle_link_loss +		 */  		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);  		offload_work->offload_wq->is_handling_link_loss = false;  		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); + +		if ((dc_link_dp_read_hpd_rx_irq_data(dc_link, &irq_data) == DC_OK) && +			dc_link_check_link_loss_status(dc_link, &irq_data)) +			dc_link_dp_handle_link_loss(dc_link);  	}  	mutex_unlock(&adev->dm.dc_lock); @@ -1442,9 +1500,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)  	memset(&init_params, 0, sizeof(init_params));  #endif +	mutex_init(&adev->dm.dpia_aux_lock);  	mutex_init(&adev->dm.dc_lock);  	mutex_init(&adev->dm.audio_lock); -	spin_lock_init(&adev->dm.vblank_lock);  	if(amdgpu_dm_irq_init(adev)) {  		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n"); @@ -1512,6 +1570,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)  		case IP_VERSION(3, 0, 1):  		case IP_VERSION(3, 1, 2):  		case IP_VERSION(3, 1, 3): +		case IP_VERSION(3, 1, 4):  		case IP_VERSION(3, 1, 5):  		case IP_VERSION(3, 1, 6):  			init_data.flags.gpu_vm_support = true; @@ -1521,6 +1580,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)  		}  		break;  	} +	if (init_data.flags.gpu_vm_support && +	    (amdgpu_sg_display == 0)) +		init_data.flags.gpu_vm_support = false;  	if (init_data.flags.gpu_vm_support)  		adev->mode_info.gpu_vm_support = true; @@ -1542,6 +1604,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)  	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)  		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true; +	/* Disable SubVP + DRR config by default */ +	init_data.flags.disable_subvp_drr = true; +	if (amdgpu_dc_feature_mask & DC_ENABLE_SUBVP_DRR) +		init_data.flags.disable_subvp_drr = false; +  	init_data.flags.seamless_boot_edp_requested = false;  	if (check_seamless_boot_capability(adev)) { @@ -1597,6 +1664,26 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)  	/* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */  	adev->dm.dc->debug.ignore_cable_id = true; +	/* TODO: There is a new drm mst change where the freedom of +	 * vc_next_start_slot update is revoked/moved into drm, instead of in +	 * driver. This forces us to make sure to get vc_next_start_slot updated +	 * in drm function each time without considering if mst_state is active +	 * or not. Otherwise, next time hotplug will give wrong start_slot +	 * number. We are implementing a temporary solution to even notify drm +	 * mst deallocation when link is no longer of MST type when uncommitting +	 * the stream so we will have more time to work on a proper solution. +	 * Ideally when dm_helpers_dp_mst_stop_top_mgr message is triggered, we +	 * should notify drm to do a complete "reset" of its states and stop +	 * calling further drm mst functions when link is no longer of an MST +	 * type. This could happen when we unplug an MST hubs/displays. When +	 * uncommit stream comes later after unplug, we should just reset +	 * hardware states only. +	 */ +	adev->dm.dc->debug.temp_mst_deallocation_sequence = true; + +	if (adev->dm.dc->caps.dp_hdmi21_pcon_support) +		DRM_INFO("DP-HDMI FRL PCON supported\n"); +  	r = dm_dmub_hw_init(adev);  	if (r) {  		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); @@ -1650,7 +1737,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)  	}  #endif  #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) -	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work(); +	adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev); +	if (!adev->dm.secure_display_ctxs) { +		DRM_ERROR("amdgpu: failed to initialize secure_display_ctxs.\n"); +	}  #endif  	if (dc_is_dmub_outbox_supported(adev->dm.dc)) {  		init_completion(&adev->dm.dmub_aux_transfer_done); @@ -1738,17 +1828,18 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)  		adev->dm.vblank_control_workqueue = NULL;  	} -	for (i = 0; i < adev->dm.display_indexes_num; i++) { -		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base); -	} -  	amdgpu_dm_destroy_drm_device(&adev->dm);  #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) -	if (adev->dm.crc_rd_wrk) { -		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work); -		kfree(adev->dm.crc_rd_wrk); -		adev->dm.crc_rd_wrk = NULL; +	if (adev->dm.secure_display_ctxs) { +		for (i = 0; i < adev->mode_info.num_crtc; i++) { +			if (adev->dm.secure_display_ctxs[i].crtc) { +				flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work); +				flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work); +			} +		} +		kfree(adev->dm.secure_display_ctxs); +		adev->dm.secure_display_ctxs = NULL;  	}  #endif  #ifdef CONFIG_DRM_AMD_DC_HDCP @@ -1807,6 +1898,7 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)  	mutex_destroy(&adev->dm.audio_lock);  	mutex_destroy(&adev->dm.dc_lock); +	mutex_destroy(&adev->dm.dpia_aux_lock);  	return;  } @@ -1882,25 +1974,17 @@ static int load_dmcu_fw(struct amdgpu_device *adev)  		return 0;  	} -	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev); -	if (r == -ENOENT) { +	r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, fw_name_dmcu); +	if (r == -ENODEV) {  		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */  		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");  		adev->dm.fw_dmcu = NULL;  		return 0;  	}  	if (r) { -		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n", -			fw_name_dmcu); -		return r; -	} - -	r = amdgpu_ucode_validate(adev->dm.fw_dmcu); -	if (r) {  		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",  			fw_name_dmcu); -		release_firmware(adev->dm.fw_dmcu); -		adev->dm.fw_dmcu = NULL; +		amdgpu_ucode_release(&adev->dm.fw_dmcu);  		return r;  	} @@ -1946,7 +2030,6 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)  	struct dmub_srv_fb_info *fb_info;  	struct dmub_srv *dmub_srv;  	const struct dmcub_firmware_header_v1_0 *hdr; -	const char *fw_name_dmub;  	enum dmub_asic dmub_asic;  	enum dmub_status status;  	int r; @@ -1954,73 +2037,43 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)  	switch (adev->ip_versions[DCE_HWIP][0]) {  	case IP_VERSION(2, 1, 0):  		dmub_asic = DMUB_ASIC_DCN21; -		fw_name_dmub = FIRMWARE_RENOIR_DMUB; -		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id)) -			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;  		break;  	case IP_VERSION(3, 0, 0): -		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) { -			dmub_asic = DMUB_ASIC_DCN30; -			fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB; -		} else { -			dmub_asic = DMUB_ASIC_DCN30; -			fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB; -		} +		dmub_asic = DMUB_ASIC_DCN30;  		break;  	case IP_VERSION(3, 0, 1):  		dmub_asic = DMUB_ASIC_DCN301; -		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;  		break;  	case IP_VERSION(3, 0, 2):  		dmub_asic = DMUB_ASIC_DCN302; -		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;  		break;  	case IP_VERSION(3, 0, 3):  		dmub_asic = DMUB_ASIC_DCN303; -		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;  		break;  	case IP_VERSION(3, 1, 2):  	case IP_VERSION(3, 1, 3):  		dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31; -		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;  		break;  	case IP_VERSION(3, 1, 4):  		dmub_asic = DMUB_ASIC_DCN314; -		fw_name_dmub = FIRMWARE_DCN_314_DMUB;  		break;  	case IP_VERSION(3, 1, 5):  		dmub_asic = DMUB_ASIC_DCN315; -		fw_name_dmub = FIRMWARE_DCN_315_DMUB;  		break;  	case IP_VERSION(3, 1, 6):  		dmub_asic = DMUB_ASIC_DCN316; -		fw_name_dmub = FIRMWARE_DCN316_DMUB;  		break;  	case IP_VERSION(3, 2, 0):  		dmub_asic = DMUB_ASIC_DCN32; -		fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;  		break;  	case IP_VERSION(3, 2, 1):  		dmub_asic = DMUB_ASIC_DCN321; -		fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;  		break;  	default:  		/* ASIC doesn't support DMUB. */  		return 0;  	} -	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev); -	if (r) { -		DRM_ERROR("DMUB firmware loading failed: %d\n", r); -		return 0; -	} - -	r = amdgpu_ucode_validate(adev->dm.dmub_fw); -	if (r) { -		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r); -		return 0; -	} -  	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;  	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version); @@ -2087,7 +2140,9 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)  	 * TODO: Move this into GART.  	 */  	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE, -				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo, +				    AMDGPU_GEM_DOMAIN_VRAM | +				    AMDGPU_GEM_DOMAIN_GTT, +				    &adev->dm.dmub_bo,  				    &adev->dm.dmub_bo_gpu_addr,  				    &adev->dm.dmub_bo_cpu_addr);  	if (r) @@ -2142,11 +2197,8 @@ static int dm_sw_fini(void *handle)  		adev->dm.dmub_srv = NULL;  	} -	release_firmware(adev->dm.dmub_fw); -	adev->dm.dmub_fw = NULL; - -	release_firmware(adev->dm.fw_dmcu); -	adev->dm.fw_dmcu = NULL; +	amdgpu_ucode_release(&adev->dm.dmub_fw); +	amdgpu_ucode_release(&adev->dm.fw_dmcu);  	return 0;  } @@ -2172,6 +2224,8 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev)  				DRM_ERROR("DM_MST: Failed to start MST\n");  				aconnector->dc_link->type =  					dc_connection_single; +				ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, +								     aconnector->dc_link);  				break;  			}  		} @@ -2240,7 +2294,7 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)  	drm_for_each_connector_iter(connector, &iter) {  		aconnector = to_amdgpu_dm_connector(connector);  		if (aconnector->dc_link->type != dc_connection_mst_branch || -		    aconnector->mst_port) +		    aconnector->mst_root)  			continue;  		mgr = &aconnector->mst_mgr; @@ -2493,7 +2547,7 @@ struct amdgpu_dm_connector *  amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,  					     struct drm_crtc *crtc)  { -	uint32_t i; +	u32 i;  	struct drm_connector_state *new_con_state;  	struct drm_connector *connector;  	struct drm_crtc *crtc_from_state; @@ -2741,16 +2795,18 @@ static int dm_resume(void *handle)  	drm_for_each_connector_iter(connector, &iter) {  		aconnector = to_amdgpu_dm_connector(connector); +		if (!aconnector->dc_link) +			continue; +  		/*  		 * this is the case when traversing through already created  		 * MST connectors, should be skipped  		 */ -		if (aconnector->dc_link && -		    aconnector->dc_link->type == dc_connection_mst_branch) +		if (aconnector->dc_link->type == dc_connection_mst_branch)  			continue;  		mutex_lock(&aconnector->hpd_lock); -		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type)) +		if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))  			DRM_ERROR("KMS: Failed to detect connector\n");  		if (aconnector->base.force && new_connection_type == dc_connection_none) { @@ -2858,7 +2914,6 @@ const struct amdgpu_ip_block_version dm_ip_block =  static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {  	.fb_create = amdgpu_display_user_framebuffer_create,  	.get_format_info = amd_get_format_info, -	.output_poll_changed = drm_fb_helper_output_poll_changed,  	.atomic_check = amdgpu_dm_atomic_check,  	.atomic_commit = drm_atomic_helper_commit,  }; @@ -3029,6 +3084,10 @@ void amdgpu_dm_update_connector_after_detect(  						    aconnector->edid);  		} +		aconnector->timing_requested = kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL); +		if (!aconnector->timing_requested) +			dm_error("%s: failed to create aconnector->requested_timing\n", __func__); +  		drm_connector_update_edid_property(connector, aconnector->edid);  		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);  		update_connector_ext_caps(aconnector); @@ -3040,6 +3099,8 @@ void amdgpu_dm_update_connector_after_detect(  		dc_sink_release(aconnector->dc_sink);  		aconnector->dc_sink = NULL;  		aconnector->edid = NULL; +		kfree(aconnector->timing_requested); +		aconnector->timing_requested = NULL;  #ifdef CONFIG_DRM_AMD_DC_HDCP  		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */  		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) @@ -3084,7 +3145,9 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)  	if (aconnector->fake_enable)  		aconnector->fake_enable = false; -	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type)) +	aconnector->timing_changed = false; + +	if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))  		DRM_ERROR("KMS: Failed to detect connector\n");  	if (aconnector->base.force && new_connection_type == dc_connection_none) { @@ -3125,8 +3188,8 @@ static void handle_hpd_irq(void *param)  static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)  { -	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; -	uint8_t dret; +	u8 esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; +	u8 dret;  	bool new_irq_handled = false;  	int dpcd_addr;  	int dpcd_bytes_to_read; @@ -3154,7 +3217,7 @@ static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)  	while (dret == dpcd_bytes_to_read &&  		process_count < max_process_count) { -		uint8_t retry; +		u8 retry;  		dret = 0;  		process_count++; @@ -3173,7 +3236,7 @@ static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)  				dpcd_bytes_to_read - 1;  			for (retry = 0; retry < 3; retry++) { -				uint8_t wret; +				u8 wret;  				wret = drm_dp_dpcd_write(  					&aconnector->dm_dp_aux.aux, @@ -3233,7 +3296,7 @@ static void handle_hpd_rx_irq(void *param)  	union hpd_irq_data hpd_irq_data;  	bool link_loss = false;  	bool has_left_work = false; -	int idx = aconnector->base.index; +	int idx = dc_link->link_index;  	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];  	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data)); @@ -3287,7 +3350,7 @@ static void handle_hpd_rx_irq(void *param)  out:  	if (result && !is_mst_root_connector) {  		/* Downstream Port status changed. */ -		if (!dc_link_detect_sink(dc_link, &new_connection_type)) +		if (!dc_link_detect_connection_type(dc_link, &new_connection_type))  			DRM_ERROR("KMS: Failed to detect connector\n");  		if (aconnector->base.force && new_connection_type == dc_connection_none) { @@ -3375,7 +3438,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev)  					(void *) aconnector);  			if (adev->dm.hpd_rx_offload_wq) -				adev->dm.hpd_rx_offload_wq[connector->index].aconnector = +				adev->dm.hpd_rx_offload_wq[dc_link->link_index].aconnector =  					aconnector;  		}  	} @@ -3864,8 +3927,6 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)  	/* indicates support for immediate flip */  	adev_to_drm(adev)->mode_config.async_page_flip = true; -	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base; -  	state = kzalloc(sizeof(*state), GFP_KERNEL);  	if (!state)  		return -ENOMEM; @@ -4189,15 +4250,16 @@ static void amdgpu_set_panel_orientation(struct drm_connector *connector);  static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)  {  	struct amdgpu_display_manager *dm = &adev->dm; -	int32_t i; +	s32 i;  	struct amdgpu_dm_connector *aconnector = NULL;  	struct amdgpu_encoder *aencoder = NULL;  	struct amdgpu_mode_info *mode_info = &adev->mode_info; -	uint32_t link_cnt; -	int32_t primary_planes; +	u32 link_cnt; +	s32 primary_planes;  	enum dc_connection_type new_connection_type = dc_connection_none;  	const struct dc_plane_cap *plane;  	bool psr_feature_enabled = false; +	int max_overlay = dm->dc->caps.max_slave_planes;  	dm->display_indexes_num = dm->dc->caps.max_streams;  	/* Update the actual used number of crtc */ @@ -4252,14 +4314,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)  		if (!plane->pixel_format_support.argb8888)  			continue; +		if (max_overlay-- == 0) +			break; +  		if (initialize_plane(dm, NULL, primary_planes + i,  				     DRM_PLANE_TYPE_OVERLAY, plane)) {  			DRM_ERROR("KMS: Failed to initialize overlay plane\n");  			goto fail;  		} - -		/* Only create one overlay plane. */ -		break;  	}  	for (i = 0; i < dm->dc->caps.max_streams; i++) @@ -4338,7 +4400,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)  		link = dc_get_link_at_index(dm->dc, i); -		if (!dc_link_detect_sink(link, &new_connection_type)) +		if (!dc_link_detect_connection_type(link, &new_connection_type))  			DRM_ERROR("KMS: Failed to detect connector\n");  		if (aconnector->base.force && new_connection_type == dc_connection_none) { @@ -4371,6 +4433,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)  		amdgpu_set_panel_orientation(&aconnector->base);  	} +	/* If we didn't find a panel, notify the acpi video detection */ +	if (dm->adev->flags & AMD_IS_APU && dm->num_of_edps == 0) +		acpi_video_report_nolcd(); +  	/* Software is initialized. Now we can register interrupt handlers. */  	switch (adev->asic_type) {  #if defined(CONFIG_DRM_AMD_DC_SI) @@ -4510,9 +4576,75 @@ DEVICE_ATTR_WO(s3_debug);  #endif +static int dm_init_microcode(struct amdgpu_device *adev) +{ +	char *fw_name_dmub; +	int r; + +	switch (adev->ip_versions[DCE_HWIP][0]) { +	case IP_VERSION(2, 1, 0): +		fw_name_dmub = FIRMWARE_RENOIR_DMUB; +		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id)) +			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB; +		break; +	case IP_VERSION(3, 0, 0): +		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) +			fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB; +		else +			fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB; +		break; +	case IP_VERSION(3, 0, 1): +		fw_name_dmub = FIRMWARE_VANGOGH_DMUB; +		break; +	case IP_VERSION(3, 0, 2): +		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB; +		break; +	case IP_VERSION(3, 0, 3): +		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB; +		break; +	case IP_VERSION(3, 1, 2): +	case IP_VERSION(3, 1, 3): +		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB; +		break; +	case IP_VERSION(3, 1, 4): +		fw_name_dmub = FIRMWARE_DCN_314_DMUB; +		break; +	case IP_VERSION(3, 1, 5): +		fw_name_dmub = FIRMWARE_DCN_315_DMUB; +		break; +	case IP_VERSION(3, 1, 6): +		fw_name_dmub = FIRMWARE_DCN316_DMUB; +		break; +	case IP_VERSION(3, 2, 0): +		fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB; +		break; +	case IP_VERSION(3, 2, 1): +		fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB; +		break; +	default: +		/* ASIC doesn't support DMUB. */ +		return 0; +	} +	r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, fw_name_dmub); +	if (r) +		DRM_ERROR("DMUB firmware loading failed: %d\n", r); +	return r; +} +  static int dm_early_init(void *handle)  {  	struct amdgpu_device *adev = (struct amdgpu_device *)handle; +	struct amdgpu_mode_info *mode_info = &adev->mode_info; +	struct atom_context *ctx = mode_info->atom_context; +	int index = GetIndexIntoMasterTable(DATA, Object_Header); +	u16 data_offset; + +	/* if there is no object header, skip DM */ +	if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { +		adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; +		dev_info(adev->dev, "No object header, skipping DM\n"); +		return -ENOENT; +	}  	switch (adev->asic_type) {  #if defined(CONFIG_DRM_AMD_DC_SI) @@ -4640,8 +4772,9 @@ static int dm_early_init(void *handle)  		adev_to_drm(adev)->dev,  		&dev_attr_s3_debug);  #endif +	adev->dc_enabled = true; -	return 0; +	return dm_init_microcode(adev);  }  static bool modereset_required(struct drm_crtc_state *crtc_state) @@ -4706,7 +4839,7 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state,  static int  fill_dc_plane_info_and_addr(struct amdgpu_device *adev,  			    const struct drm_plane_state *plane_state, -			    const uint64_t tiling_flags, +			    const u64 tiling_flags,  			    struct dc_plane_info *plane_info,  			    struct dc_plane_address *address,  			    bool tmz_surface, @@ -4879,6 +5012,35 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,  	return 0;  } +static inline void fill_dc_dirty_rect(struct drm_plane *plane, +				      struct rect *dirty_rect, int32_t x, +				      s32 y, s32 width, s32 height, +				      int *i, bool ffu) +{ +	if (*i > DC_MAX_DIRTY_RECTS) +		return; + +	if (*i == DC_MAX_DIRTY_RECTS) +		goto out; + +	dirty_rect->x = x; +	dirty_rect->y = y; +	dirty_rect->width = width; +	dirty_rect->height = height; + +	if (ffu) +		drm_dbg(plane->dev, +			"[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n", +			plane->base.id, width, height); +	else +		drm_dbg(plane->dev, +			"[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)", +			plane->base.id, x, y, width, height); + +out: +	(*i)++; +} +  /**   * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates   * @@ -4888,6 +5050,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,   * @new_plane_state: New state of @plane   * @crtc_state: New state of CRTC connected to the @plane   * @flip_addrs: DC flip tracking struct, which also tracts dirty rects + * @dirty_regions_changed: dirty regions changed   *   * For PSR SU, DC informs the DMUB uController of dirty rectangle regions   * (referred to as "damage clips" in DRM nomenclature) that require updating on @@ -4899,25 +5062,22 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,   * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -   * implicitly provide damage clips without any client support via the plane   * bounds. - * - * Today, amdgpu_dm only supports the MPO and cursor usecase. - * - * TODO: Also enable for FB_DAMAGE_CLIPS   */  static void fill_dc_dirty_rects(struct drm_plane *plane,  				struct drm_plane_state *old_plane_state,  				struct drm_plane_state *new_plane_state,  				struct drm_crtc_state *crtc_state, -				struct dc_flip_addrs *flip_addrs) +				struct dc_flip_addrs *flip_addrs, +				bool *dirty_regions_changed)  {  	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);  	struct rect *dirty_rects = flip_addrs->dirty_rects; -	uint32_t num_clips; +	u32 num_clips; +	struct drm_mode_rect *clips;  	bool bb_changed;  	bool fb_changed; -	uint32_t i = 0; - -	flip_addrs->dirty_rect_count = 0; +	u32 i = 0; +	*dirty_regions_changed = false;  	/*  	 * Cursor plane has it's own dirty rect update interface. See @@ -4926,20 +5086,20 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,  	if (plane->type == DRM_PLANE_TYPE_CURSOR)  		return; -	/* -	 * Today, we only consider MPO use-case for PSR SU. If MPO not -	 * requested, and there is a plane update, do FFU. -	 */ +	num_clips = drm_plane_get_damage_clips_count(new_plane_state); +	clips = drm_plane_get_damage_clips(new_plane_state); +  	if (!dm_crtc_state->mpo_requested) { -		dirty_rects[0].x = 0; -		dirty_rects[0].y = 0; -		dirty_rects[0].width = dm_crtc_state->base.mode.crtc_hdisplay; -		dirty_rects[0].height = dm_crtc_state->base.mode.crtc_vdisplay; -		flip_addrs->dirty_rect_count = 1; -		DRM_DEBUG_DRIVER("[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n", -				 new_plane_state->plane->base.id, -				 dm_crtc_state->base.mode.crtc_hdisplay, -				 dm_crtc_state->base.mode.crtc_vdisplay); +		if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS) +			goto ffu; + +		for (; flip_addrs->dirty_rect_count < num_clips; clips++) +			fill_dc_dirty_rect(new_plane_state->plane, +					   &dirty_rects[i], clips->x1, +					   clips->y1, clips->x2 - clips->x1, +					   clips->y2 - clips->y1, +					   &flip_addrs->dirty_rect_count, +					   false);  		return;  	} @@ -4950,7 +5110,6 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,  	 * If plane is moved or resized, also add old bounding box to dirty  	 * rects.  	 */ -	num_clips = drm_plane_get_damage_clips_count(new_plane_state);  	fb_changed = old_plane_state->fb->base.id !=  		     new_plane_state->fb->base.id;  	bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x || @@ -4958,36 +5117,53 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,  		      old_plane_state->crtc_w != new_plane_state->crtc_w ||  		      old_plane_state->crtc_h != new_plane_state->crtc_h); -	DRM_DEBUG_DRIVER("[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n", -			 new_plane_state->plane->base.id, -			 bb_changed, fb_changed, num_clips); +	drm_dbg(plane->dev, +		"[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n", +		new_plane_state->plane->base.id, +		bb_changed, fb_changed, num_clips); -	if (num_clips || fb_changed || bb_changed) { -		dirty_rects[i].x = new_plane_state->crtc_x; -		dirty_rects[i].y = new_plane_state->crtc_y; -		dirty_rects[i].width = new_plane_state->crtc_w; -		dirty_rects[i].height = new_plane_state->crtc_h; -		DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n", -				 new_plane_state->plane->base.id, -				 dirty_rects[i].x, dirty_rects[i].y, -				 dirty_rects[i].width, dirty_rects[i].height); -		i += 1; -	} +	*dirty_regions_changed = bb_changed; -	/* Add old plane bounding-box if plane is moved or resized */  	if (bb_changed) { -		dirty_rects[i].x = old_plane_state->crtc_x; -		dirty_rects[i].y = old_plane_state->crtc_y; -		dirty_rects[i].width = old_plane_state->crtc_w; -		dirty_rects[i].height = old_plane_state->crtc_h; -		DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n", -				old_plane_state->plane->base.id, -				dirty_rects[i].x, dirty_rects[i].y, -				dirty_rects[i].width, dirty_rects[i].height); -		i += 1; -	} +		fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], +				   new_plane_state->crtc_x, +				   new_plane_state->crtc_y, +				   new_plane_state->crtc_w, +				   new_plane_state->crtc_h, &i, false); + +		/* Add old plane bounding-box if plane is moved or resized */ +		fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], +				   old_plane_state->crtc_x, +				   old_plane_state->crtc_y, +				   old_plane_state->crtc_w, +				   old_plane_state->crtc_h, &i, false); +	} + +	if (num_clips) { +		for (; i < num_clips; clips++) +			fill_dc_dirty_rect(new_plane_state->plane, +					   &dirty_rects[i], clips->x1, +					   clips->y1, clips->x2 - clips->x1, +					   clips->y2 - clips->y1, &i, false); +	} else if (fb_changed && !bb_changed) { +		fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], +				   new_plane_state->crtc_x, +				   new_plane_state->crtc_y, +				   new_plane_state->crtc_w, +				   new_plane_state->crtc_h, &i, false); +	} + +	if (i > DC_MAX_DIRTY_RECTS) +		goto ffu;  	flip_addrs->dirty_rect_count = i; +	return; + +ffu: +	fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[0], 0, 0, +			   dm_crtc_state->base.mode.crtc_hdisplay, +			   dm_crtc_state->base.mode.crtc_vdisplay, +			   &flip_addrs->dirty_rect_count, true);  }  static void update_stream_scaling_settings(const struct drm_display_mode *mode, @@ -5049,7 +5225,7 @@ static enum dc_color_depth  convert_color_depth_from_display_info(const struct drm_connector *connector,  				      bool is_y420, int requested_bpc)  { -	uint8_t bpc; +	u8 bpc;  	if (is_y420) {  		bpc = 8; @@ -5278,8 +5454,6 @@ static void fill_stream_properties_from_drm_display_mode(  	timing_out->aspect_ratio = get_aspect_ratio(mode_in); -	stream->output_color_space = get_output_color_space(timing_out); -  	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;  	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;  	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { @@ -5290,6 +5464,8 @@ static void fill_stream_properties_from_drm_display_mode(  			adjust_colour_depth_from_display_info(timing_out, info);  		}  	} + +	stream->output_color_space = get_output_color_space(timing_out);  }  static void fill_audio_info(struct audio_info *audio_info, @@ -5593,8 +5769,8 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,  				    uint32_t max_dsc_target_bpp_limit_override)  {  	const struct dc_link_settings *verified_link_cap = NULL; -	uint32_t link_bw_in_kbps; -	uint32_t edp_min_bpp_x16, edp_max_bpp_x16; +	u32 link_bw_in_kbps; +	u32 edp_min_bpp_x16, edp_max_bpp_x16;  	struct dc *dc = sink->ctx->dc;  	struct dc_dsc_bw_range bw_range = {0};  	struct dc_dsc_config dsc_cfg = {0}; @@ -5651,17 +5827,15 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,  					struct dsc_dec_dpcd_caps *dsc_caps)  {  	struct drm_connector *drm_connector = &aconnector->base; -	uint32_t link_bandwidth_kbps; -	uint32_t max_dsc_target_bpp_limit_override = 0; +	u32 link_bandwidth_kbps;  	struct dc *dc = sink->ctx->dc; -	uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps; -	uint32_t dsc_max_supported_bw_in_kbps; +	u32 max_supported_bw_in_kbps, timing_bw_in_kbps; +	u32 dsc_max_supported_bw_in_kbps; +	u32 max_dsc_target_bpp_limit_override = +		drm_connector->display_info.max_dsc_bpp;  	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,  							dc_link_get_link_cap(aconnector->dc_link)); -	if (stream->link && stream->link->local_sink) -		max_dsc_target_bpp_limit_override = -			stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;  	/* Set DSC policy according to dsc_clock_en */  	dc_dsc_policy_set_enable_dsc_when_not_needed( @@ -5734,7 +5908,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,  	const struct drm_connector_state *con_state =  		dm_state ? &dm_state->base : NULL;  	struct dc_stream_state *stream = NULL; -	struct drm_display_mode mode = *drm_mode; +	struct drm_display_mode mode;  	struct drm_display_mode saved_mode;  	struct drm_display_mode *freesync_mode = NULL;  	bool native_mode_found = false; @@ -5742,12 +5916,14 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,  	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;  	int mode_refresh;  	int preferred_refresh = 0; +	enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN;  #if defined(CONFIG_DRM_AMD_DC_DCN)  	struct dsc_dec_dpcd_caps dsc_caps;  #endif  	struct dc_sink *sink = NULL; +	drm_mode_init(&mode, drm_mode);  	memset(&saved_mode, 0, sizeof(saved_mode));  	if (aconnector == NULL) { @@ -5802,7 +5978,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,  		 */  		DRM_DEBUG_DRIVER("No preferred mode found\n");  	} else { -		recalculate_timing = is_freesync_video_mode(&mode, aconnector); +		recalculate_timing = amdgpu_freesync_vid_mode && +				 is_freesync_video_mode(&mode, aconnector);  		if (recalculate_timing) {  			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);  			drm_mode_copy(&saved_mode, &mode); @@ -5833,6 +6010,14 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,  			stream, &mode, &aconnector->base, con_state, old_stream,  			requested_bpc); +	if (aconnector->timing_changed) { +		DC_LOG_DEBUG("%s: overriding timing for automated test, bpc %d, changing to %d\n", +				__func__, +				stream->timing.display_color_depth, +				aconnector->timing_requested->display_color_depth); +		stream->timing = *aconnector->timing_requested; +	} +  #if defined(CONFIG_DRM_AMD_DC_DCN)  	/* SST DSC determination policy */  	update_dsc_caps(aconnector, sink, stream, &dsc_caps); @@ -5865,7 +6050,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,  			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)  				stream->use_vsc_sdp_for_colorimetry = true;  		} -		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space); +		if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) +			tf = TRANSFER_FUNC_GAMMA_22; +		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);  		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;  	} @@ -6023,15 +6210,12 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)  	if (aconnector->mst_mgr.dev)  		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr); -#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ -	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)  	for (i = 0; i < dm->num_of_edps; i++) {  		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {  			backlight_device_unregister(dm->backlight_dev[i]);  			dm->backlight_dev[i] = NULL;  		}  	} -#endif  	if (aconnector->dc_em_sink)  		dc_sink_release(aconnector->dc_em_sink); @@ -6158,7 +6342,6 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)  				aconnector->base.name);  		aconnector->base.force = DRM_FORCE_OFF; -		aconnector->base.override_edid = false;  		return;  	} @@ -6193,11 +6376,72 @@ static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)  		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;  	} - -	aconnector->base.override_edid = true;  	create_eml_sink(aconnector);  } +static enum dc_status dm_validate_stream_and_context(struct dc *dc, +						struct dc_stream_state *stream) +{ +	enum dc_status dc_result = DC_ERROR_UNEXPECTED; +	struct dc_plane_state *dc_plane_state = NULL; +	struct dc_state *dc_state = NULL; + +	if (!stream) +		goto cleanup; + +	dc_plane_state = dc_create_plane_state(dc); +	if (!dc_plane_state) +		goto cleanup; + +	dc_state = dc_create_state(dc); +	if (!dc_state) +		goto cleanup; + +	/* populate stream to plane */ +	dc_plane_state->src_rect.height  = stream->src.height; +	dc_plane_state->src_rect.width   = stream->src.width; +	dc_plane_state->dst_rect.height  = stream->src.height; +	dc_plane_state->dst_rect.width   = stream->src.width; +	dc_plane_state->clip_rect.height = stream->src.height; +	dc_plane_state->clip_rect.width  = stream->src.width; +	dc_plane_state->plane_size.surface_pitch = ((stream->src.width + 255) / 256) * 256; +	dc_plane_state->plane_size.surface_size.height = stream->src.height; +	dc_plane_state->plane_size.surface_size.width  = stream->src.width; +	dc_plane_state->plane_size.chroma_size.height  = stream->src.height; +	dc_plane_state->plane_size.chroma_size.width   = stream->src.width; +	dc_plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; +	dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN; +	dc_plane_state->rotation = ROTATION_ANGLE_0; +	dc_plane_state->is_tiling_rotated = false; +	dc_plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_LINEAR_GENERAL; + +	dc_result = dc_validate_stream(dc, stream); +	if (dc_result == DC_OK) +		dc_result = dc_validate_plane(dc, dc_plane_state); + +	if (dc_result == DC_OK) +		dc_result = dc_add_stream_to_ctx(dc, dc_state, stream); + +	if (dc_result == DC_OK && !dc_add_plane_to_context( +						dc, +						stream, +						dc_plane_state, +						dc_state)) +		dc_result = DC_FAIL_ATTACH_SURFACES; + +	if (dc_result == DC_OK) +		dc_result = dc_validate_global_state(dc, dc_state, true); + +cleanup: +	if (dc_state) +		dc_release_state(dc_state); + +	if (dc_plane_state) +		dc_plane_state_release(dc_plane_state); + +	return dc_result; +} +  struct dc_stream_state *  create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,  				const struct drm_display_mode *drm_mode, @@ -6224,6 +6468,9 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,  		if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)  			dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream); +		if (dc_result == DC_OK) +			dc_result = dm_validate_stream_and_context(adev->dm.dc, stream); +  		if (dc_result != DC_OK) {  			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",  				      drm_mode->hdisplay, @@ -6459,11 +6706,11 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,  	int clock, bpp = 0;  	bool is_y420 = false; -	if (!aconnector->port || !aconnector->dc_sink) +	if (!aconnector->mst_output_port || !aconnector->dc_sink)  		return 0; -	mst_port = aconnector->port; -	mst_mgr = &aconnector->mst_port->mst_mgr; +	mst_port = aconnector->mst_output_port; +	mst_mgr = &aconnector->mst_root->mst_mgr;  	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)  		return 0; @@ -6473,7 +6720,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,  		return PTR_ERR(mst_state);  	if (!mst_state->pbn_div) -		mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_port->dc_link); +		mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link);  	if (!state->duplicated) {  		int max_bpc = conn_state->max_requested_bpc; @@ -6519,7 +6766,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,  		aconnector = to_amdgpu_dm_connector(connector); -		if (!aconnector->port) +		if (!aconnector->mst_output_port)  			continue;  		if (!new_con_state || !new_con_state->crtc) @@ -6559,7 +6806,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,  			dm_conn_state->pbn = pbn;  			dm_conn_state->vcpi_slots = slot_num; -			ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->port, +			ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port,  							   dm_conn_state->pbn, false);  			if (ret < 0)  				return ret; @@ -6567,7 +6814,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,  			continue;  		} -		vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->port, pbn, true); +		vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, pbn, true);  		if (vcpi < 0)  			return vcpi; @@ -6810,7 +7057,7 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)  	const struct drm_display_mode *m;  	struct drm_display_mode *new_mode;  	uint i; -	uint32_t new_modes_count = 0; +	u32 new_modes_count = 0;  	/* Standard FPS values  	 * @@ -6824,7 +7071,7 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)  	 * 60 	        - Commonly used  	 * 48,72,96,120 - Multiples of 24  	 */ -	static const uint32_t common_rates[] = { +	static const u32 common_rates[] = {  		23976, 24000, 25000, 29970, 30000,  		48000, 50000, 60000, 72000, 96000, 120000  	}; @@ -6840,8 +7087,8 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)  		return 0;  	for (i = 0; i < ARRAY_SIZE(common_rates); i++) { -		uint64_t target_vtotal, target_vtotal_diff; -		uint64_t num, den; +		u64 target_vtotal, target_vtotal_diff; +		u64 num, den;  		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])  			continue; @@ -6887,7 +7134,7 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect  	struct amdgpu_dm_connector *amdgpu_dm_connector =  		to_amdgpu_dm_connector(connector); -	if (!edid) +	if (!(amdgpu_freesync_vid_mode && edid))  		return;  	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) @@ -6940,6 +7187,9 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,  	aconnector->base.dpms = DRM_MODE_DPMS_OFF;  	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */  	aconnector->audio_inst = -1; +	aconnector->pack_sdp_v1_3 = false; +	aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE; +	memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info));  	mutex_init(&aconnector->hpd_lock);  	/* @@ -6981,7 +7231,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,  				adev->mode_info.underscan_vborder_property,  				0); -	if (!aconnector->mst_port) +	if (!aconnector->mst_root)  		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);  	/* This defaults to the max in the range, but we want 8bpc for non-edp. */ @@ -6999,7 +7249,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,  	    connector_type == DRM_MODE_CONNECTOR_eDP) {  		drm_connector_attach_hdr_output_metadata_property(&aconnector->base); -		if (!aconnector->mst_port) +		if (!aconnector->mst_root)  			drm_connector_attach_vrr_capable_property(&aconnector->base);  #ifdef CONFIG_DRM_AMD_DC_HDCP @@ -7083,7 +7333,7 @@ create_i2c(struct ddc_service *ddc_service,   */  static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,  				    struct amdgpu_dm_connector *aconnector, -				    uint32_t link_index, +				    u32 link_index,  				    struct amdgpu_encoder *aencoder)  {  	int res = 0; @@ -7268,27 +7518,55 @@ is_scaling_state_different(const struct dm_connector_state *dm_state,  }  #ifdef CONFIG_DRM_AMD_DC_HDCP -static bool is_content_protection_different(struct drm_connector_state *state, -					    const struct drm_connector_state *old_state, -					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w) +static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state, +					    struct drm_crtc_state *old_crtc_state, +					    struct drm_connector_state *new_conn_state, +					    struct drm_connector_state *old_conn_state, +					    const struct drm_connector *connector, +					    struct hdcp_workqueue *hdcp_w)  {  	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);  	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); -	/* Handle: Type0/1 change */ -	if (old_state->hdcp_content_type != state->hdcp_content_type && -	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { -		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; +	pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", +		connector->index, connector->status, connector->dpms); +	pr_debug("[HDCP_DM] state protection old: %x new: %x\n", +		old_conn_state->content_protection, new_conn_state->content_protection); + +	if (old_crtc_state) +		pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", +		old_crtc_state->enable, +		old_crtc_state->active, +		old_crtc_state->mode_changed, +		old_crtc_state->active_changed, +		old_crtc_state->connectors_changed); + +	if (new_crtc_state) +		pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", +		new_crtc_state->enable, +		new_crtc_state->active, +		new_crtc_state->mode_changed, +		new_crtc_state->active_changed, +		new_crtc_state->connectors_changed); + +	/* hdcp content type change */ +	if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type && +	    new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { +		new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; +		pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__);  		return true;  	} -	/* CP is being re enabled, ignore this -	 * -	 * Handles:	ENABLED -> DESIRED -	 */ -	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && -	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { -		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED; +	/* CP is being re enabled, ignore this */ +	if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && +	    new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { +		if (new_crtc_state && new_crtc_state->mode_changed) { +			new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; +			pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__); +			return true; +		} +		new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED; +		pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__);  		return false;  	} @@ -7296,9 +7574,9 @@ static bool is_content_protection_different(struct drm_connector_state *state,  	 *  	 * Handles:	UNDESIRED -> ENABLED  	 */ -	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED && -	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) -		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; +	if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED && +	    new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) +		new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;  	/* Stream removed and re-enabled  	 * @@ -7308,10 +7586,12 @@ static bool is_content_protection_different(struct drm_connector_state *state,  	 *  	 * Handles:	DESIRED -> DESIRED (Special case)  	 */ -	if (!(old_state->crtc && old_state->crtc->enabled) && -		state->crtc && state->crtc->enabled && +	if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) && +		new_conn_state->crtc && new_conn_state->crtc->enabled &&  		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {  		dm_con_state->update_hdcp = false; +		pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n", +			__func__);  		return true;  	} @@ -7323,35 +7603,42 @@ static bool is_content_protection_different(struct drm_connector_state *state,  	 *  	 * Handles:	DESIRED -> DESIRED (Special case)  	 */ -	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && -	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) { +	if (dm_con_state->update_hdcp && +	new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && +	connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {  		dm_con_state->update_hdcp = false; +		pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n", +			__func__);  		return true;  	} -	/* -	 * Handles:	UNDESIRED -> UNDESIRED -	 *		DESIRED -> DESIRED -	 *		ENABLED -> ENABLED -	 */ -	if (old_state->content_protection == state->content_protection) +	if (old_conn_state->content_protection == new_conn_state->content_protection) { +		if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) { +			if (new_crtc_state && new_crtc_state->mode_changed) { +				pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n", +					__func__); +				return true; +			} +			pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n", +				__func__); +			return false; +		} + +		pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__);  		return false; +	} -	/* -	 * Handles:	UNDESIRED -> DESIRED -	 *		DESIRED -> UNDESIRED -	 *		ENABLED -> UNDESIRED -	 */ -	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) +	if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) { +		pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n", +			__func__);  		return true; +	} -	/* -	 * Handles:	DESIRED -> ENABLED -	 */ +	pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__);  	return false;  } -  #endif +  static void remove_stream(struct amdgpu_device *adev,  			  struct amdgpu_crtc *acrtc,  			  struct dc_stream_state *stream) @@ -7393,6 +7680,8 @@ static void update_freesync_state_on_stream(  	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);  	unsigned long flags;  	bool pack_sdp_v1_3 = false; +	struct amdgpu_dm_connector *aconn; +	enum vrr_packet_type packet_type = PACKET_TYPE_VRR;  	if (!new_stream)  		return; @@ -7428,11 +7717,27 @@ static void update_freesync_state_on_stream(  		}  	} +	aconn = (struct amdgpu_dm_connector *)new_stream->dm_stream_context; + +	if (aconn && aconn->as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) { +		pack_sdp_v1_3 = aconn->pack_sdp_v1_3; + +		if (aconn->vsdb_info.amd_vsdb_version == 1) +			packet_type = PACKET_TYPE_FS_V1; +		else if (aconn->vsdb_info.amd_vsdb_version == 2) +			packet_type = PACKET_TYPE_FS_V2; +		else if (aconn->vsdb_info.amd_vsdb_version == 3) +			packet_type = PACKET_TYPE_FS_V3; + +		mod_build_adaptive_sync_infopacket(new_stream, aconn->as_type, NULL, +					&new_stream->adaptive_sync_infopacket); +	} +  	mod_freesync_build_vrr_infopacket(  		dm->freesync_module,  		new_stream,  		&vrr_params, -		PACKET_TYPE_VRR, +		packet_type,  		TRANSFER_FUNC_UNKNOWN,  		&vrr_infopacket,  		pack_sdp_v1_3); @@ -7446,6 +7751,7 @@ static void update_freesync_state_on_stream(  	new_crtc_state->vrr_infopacket = vrr_infopacket;  	new_stream->vrr_infopacket = vrr_infopacket; +	new_stream->allow_freesync = mod_freesync_get_freesync_enabled(&vrr_params);  	if (new_crtc_state->freesync_vrr_info_changed)  		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d", @@ -7567,8 +7873,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,  				    struct drm_crtc *pcrtc,  				    bool wait_for_vblank)  { -	uint32_t i; -	uint64_t timestamp_ns; +	u32 i; +	u64 timestamp_ns = ktime_get_ns();  	struct drm_plane *plane;  	struct drm_plane_state *old_plane_state, *new_plane_state;  	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc); @@ -7579,10 +7885,11 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,  			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));  	int planes_count = 0, vpos, hpos;  	unsigned long flags; -	uint32_t target_vblank, last_flip_vblank; +	u32 target_vblank, last_flip_vblank;  	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);  	bool cursor_update = false;  	bool pflip_present = false; +	bool dirty_rects_changed = false;  	struct {  		struct dc_surface_update surface_updates[MAX_SURFACES];  		struct dc_plane_info plane_infos[MAX_SURFACES]; @@ -7670,10 +7977,32 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,  		bundle->surface_updates[planes_count].plane_info =  			&bundle->plane_infos[planes_count]; -		if (acrtc_state->stream->link->psr_settings.psr_feature_enabled) +		if (acrtc_state->stream->link->psr_settings.psr_feature_enabled) {  			fill_dc_dirty_rects(plane, old_plane_state,  					    new_plane_state, new_crtc_state, -					    &bundle->flip_addrs[planes_count]); +					    &bundle->flip_addrs[planes_count], +					    &dirty_rects_changed); + +			/* +			 * If the dirty regions changed, PSR-SU need to be disabled temporarily +			 * and enabled it again after dirty regions are stable to avoid video glitch. +			 * PSR-SU will be enabled in vblank_control_worker() if user pause the video +			 * during the PSR-SU was disabled. +			 */ +			if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && +			    acrtc_attach->dm_irq_params.allow_psr_entry && +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY +			    !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && +#endif +			    dirty_rects_changed) { +				mutex_lock(&dm->dc_lock); +				acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns = +				timestamp_ns; +				if (acrtc_state->stream->link->psr_settings.psr_allow_active) +					amdgpu_dm_psr_disable(acrtc_state->stream); +				mutex_unlock(&dm->dc_lock); +			} +		}  		/*  		 * Only allow immediate flips for fast updates that don't @@ -7889,7 +8218,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,  			 */  			if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&  			    acrtc_attach->dm_irq_params.allow_psr_entry && -			    !acrtc_state->stream->link->psr_settings.psr_allow_active) +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY +			    !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && +#endif +			    !acrtc_state->stream->link->psr_settings.psr_allow_active && +			    (timestamp_ns - +			    acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) > +			    500000000)  				amdgpu_dm_psr_enable(acrtc_state->stream);  		} else {  			acrtc_attach->dm_irq_params.allow_psr_entry = false; @@ -8014,7 +8349,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)  	struct amdgpu_display_manager *dm = &adev->dm;  	struct dm_atomic_state *dm_state;  	struct dc_state *dc_state = NULL, *dc_state_temp = NULL; -	uint32_t i, j; +	u32 i, j;  	struct drm_crtc *crtc;  	struct drm_crtc_state *old_crtc_state, *new_crtc_state;  	unsigned long flags; @@ -8188,10 +8523,61 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)  		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);  		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); +		pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i); + +		if (!connector) +			continue; + +		pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", +			connector->index, connector->status, connector->dpms); +		pr_debug("[HDCP_DM] state protection old: %x new: %x\n", +			old_con_state->content_protection, new_con_state->content_protection); + +		if (aconnector->dc_sink) { +			if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL && +				aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) { +				pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n", +				aconnector->dc_sink->edid_caps.display_name); +			} +		} + +		new_crtc_state = NULL; +		old_crtc_state = NULL; + +		if (acrtc) { +			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); +			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); +		} + +		if (old_crtc_state) +			pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", +			old_crtc_state->enable, +			old_crtc_state->active, +			old_crtc_state->mode_changed, +			old_crtc_state->active_changed, +			old_crtc_state->connectors_changed); + +		if (new_crtc_state) +			pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", +			new_crtc_state->enable, +			new_crtc_state->active, +			new_crtc_state->mode_changed, +			new_crtc_state->active_changed, +			new_crtc_state->connectors_changed); +	} + +	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { +		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); +		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); +		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); +  		new_crtc_state = NULL; +		old_crtc_state = NULL; -		if (acrtc) +		if (acrtc) {  			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); +			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); +		}  		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); @@ -8203,11 +8589,44 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)  			continue;  		} -		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue)) +		if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state, +											old_con_state, connector, adev->dm.hdcp_workqueue)) { +			/* when display is unplugged from mst hub, connctor will +			 * be destroyed within dm_dp_mst_connector_destroy. connector +			 * hdcp perperties, like type, undesired, desired, enabled, +			 * will be lost. So, save hdcp properties into hdcp_work within +			 * amdgpu_dm_atomic_commit_tail. if the same display is +			 * plugged back with same display index, its hdcp properties +			 * will be retrieved from hdcp_work within dm_dp_mst_get_modes +			 */ + +			bool enable_encryption = false; + +			if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) +				enable_encryption = true; + +			if (aconnector->dc_link && aconnector->dc_sink && +				aconnector->dc_link->type == dc_connection_mst_branch) { +				struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue; +				struct hdcp_workqueue *hdcp_w = +					&hdcp_work[aconnector->dc_link->link_index]; + +				hdcp_w->hdcp_content_type[connector->index] = +					new_con_state->hdcp_content_type; +				hdcp_w->content_protection[connector->index] = +					new_con_state->content_protection; +			} + +			if (new_crtc_state && new_crtc_state->mode_changed && +				new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) +				enable_encryption = true; + +			DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption); +  			hdcp_update_display(  				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector, -				new_con_state->hdcp_content_type, -				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED); +				new_con_state->hdcp_content_type, enable_encryption); +		}  	}  #endif @@ -8305,9 +8724,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)  		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);  #ifdef CONFIG_DEBUG_FS  		enum amdgpu_dm_pipe_crc_source cur_crc_src; -#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) -		struct crc_rd_work *crc_rd_wrk; -#endif  #endif  		/* Count number of newly disabled CRTCs for dropping PM refs later. */  		if (old_crtc_state->active && !new_crtc_state->active) @@ -8320,9 +8736,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)  		update_stream_irq_parameters(dm, dm_new_crtc_state);  #ifdef CONFIG_DEBUG_FS -#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) -		crc_rd_wrk = dm->crc_rd_wrk; -#endif  		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);  		cur_crc_src = acrtc->dm_irq_params.crc_src;  		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); @@ -8350,11 +8763,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)  #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)  				if (amdgpu_dm_crc_window_is_activated(crtc)) {  					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); -					acrtc->dm_irq_params.crc_window.update_win = true; -					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2; -					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock); -					crc_rd_wrk->crtc = crtc; -					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock); +					acrtc->dm_irq_params.window_param.update_win = true; + +					/** +					 * It takes 2 frames for HW to stably generate CRC when +					 * resuming from suspend, so we set skip_frame_cnt 2. +					 */ +					acrtc->dm_irq_params.window_param.skip_frame_cnt = 2;  					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);  				}  #endif @@ -8645,7 +9060,7 @@ is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,  }  static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) { -	uint64_t num, den, res; +	u64 num, den, res;  	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;  	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED; @@ -8748,7 +9163,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,  		 * TODO: Refactor this function to allow this check to work  		 * in all conditions.  		 */ -		if (dm_new_crtc_state->stream && +		if (amdgpu_freesync_vid_mode && +		    dm_new_crtc_state->stream &&  		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))  			goto skip_modeset; @@ -8783,7 +9199,14 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,  		if (!dm_old_crtc_state->stream)  			goto skip_modeset; -		if (dm_new_crtc_state->stream && +		/* Unset freesync video if it was active before */ +		if (dm_old_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) { +			dm_new_crtc_state->freesync_config.state = VRR_STATE_INACTIVE; +			dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = 0; +		} + +		/* Now check if we should set freesync video mode */ +		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&  		    is_timing_unchanged_for_freesync(new_crtc_state,  						     old_crtc_state)) {  			new_crtc_state->mode_changed = false; @@ -8795,7 +9218,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,  			set_freesync_fixed_config(dm_new_crtc_state);  			goto skip_modeset; -		} else if (aconnector && +		} else if (amdgpu_freesync_vid_mode && aconnector &&  			   is_freesync_video_mode(&new_crtc_state->mode,  						  aconnector)) {  			struct drm_display_mode *high_mode; @@ -9093,7 +9516,8 @@ static int dm_update_plane_state(struct dc *dc,  				 struct drm_plane_state *old_plane_state,  				 struct drm_plane_state *new_plane_state,  				 bool enable, -				 bool *lock_and_validation_needed) +				 bool *lock_and_validation_needed, +				 bool *is_top_most_overlay)  {  	struct dm_atomic_state *dm_state = NULL; @@ -9201,6 +9625,14 @@ static int dm_update_plane_state(struct dc *dc,  		if (!dc_new_plane_state)  			return -ENOMEM; +		/* Block top most plane from being a video plane */ +		if (plane->type == DRM_PLANE_TYPE_OVERLAY) { +			if (is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay) +				return -EINVAL; +			else +				*is_top_most_overlay = false; +		} +  		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",  				 plane->base.id, new_plane_crtc->base.id); @@ -9344,7 +9776,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm  			continue;  		aconnector = to_amdgpu_dm_connector(connector); -		if (!aconnector->port || !aconnector->mst_port) +		if (!aconnector->mst_output_port || !aconnector->mst_root)  			aconnector = NULL;  		else  			break; @@ -9353,7 +9785,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm  	if (!aconnector)  		return 0; -	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr); +	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_root->mst_mgr);  }  #endif @@ -9397,8 +9829,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,  	enum dc_status status;  	int ret, i;  	bool lock_and_validation_needed = false; +	bool is_top_most_overlay = true;  	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;  #if defined(CONFIG_DRM_AMD_DC_DCN) +	struct drm_dp_mst_topology_mgr *mgr; +	struct drm_dp_mst_topology_state *mst_state;  	struct dsc_mst_fairness_vars vars[MAX_PIPES];  #endif @@ -9426,8 +9861,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,  			goto fail;  		} -		if (dm_old_con_state->abm_level != -		    dm_new_con_state->abm_level) +		if (dm_old_con_state->abm_level != dm_new_con_state->abm_level || +		    dm_old_con_state->scaling != dm_new_con_state->scaling)  			new_crtc_state->connectors_changed = true;  	} @@ -9521,7 +9956,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,  	 * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in  	 * atomic state, so call drm helper to normalize zpos.  	 */ -	drm_atomic_normalize_zpos(dev, state); +	ret = drm_atomic_normalize_zpos(dev, state); +	if (ret) { +		drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n"); +		goto fail; +	}  	/* Remove exiting planes if they are modified */  	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { @@ -9529,7 +9968,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,  					    old_plane_state,  					    new_plane_state,  					    false, -					    &lock_and_validation_needed); +					    &lock_and_validation_needed, +					    &is_top_most_overlay);  		if (ret) {  			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");  			goto fail; @@ -9568,7 +10008,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,  					    old_plane_state,  					    new_plane_state,  					    true, -					    &lock_and_validation_needed); +					    &lock_and_validation_needed, +					    &is_top_most_overlay);  		if (ret) {  			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");  			goto fail; @@ -9647,6 +10088,28 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,  		lock_and_validation_needed = true;  	} +#if defined(CONFIG_DRM_AMD_DC_DCN) +	/* set the slot info for each mst_state based on the link encoding format */ +	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { +		struct amdgpu_dm_connector *aconnector; +		struct drm_connector *connector; +		struct drm_connector_list_iter iter; +		u8 link_coding_cap; + +		drm_connector_list_iter_begin(dev, &iter); +		drm_for_each_connector_iter(connector, &iter) { +			if (connector->index == mst_state->mgr->conn_base_id) { +				aconnector = to_amdgpu_dm_connector(connector); +				link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link); +				drm_dp_mst_update_slots(mst_state, link_coding_cap); + +				break; +			} +		} +		drm_connector_list_iter_end(&iter); +	} +#endif +  	/**  	 * Streams and planes are reset when there are changes that affect  	 * bandwidth. Anything that affects bandwidth needs to go through @@ -9781,7 +10244,7 @@ fail:  static bool is_dp_capable_without_timing_msa(struct dc *dc,  					     struct amdgpu_dm_connector *amdgpu_dm_connector)  { -	uint8_t dpcd_data; +	u8 dpcd_data;  	bool capable = false;  	if (amdgpu_dm_connector->dc_link && @@ -9800,7 +10263,7 @@ static bool is_dp_capable_without_timing_msa(struct dc *dc,  static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,  		unsigned int offset,  		unsigned int total_length, -		uint8_t *data, +		u8 *data,  		unsigned int length,  		struct amdgpu_hdmi_vsdb_info *vsdb)  { @@ -9855,7 +10318,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,  }  static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm, -		uint8_t *edid_ext, int len, +		u8 *edid_ext, int len,  		struct amdgpu_hdmi_vsdb_info *vsdb_info)  {  	int i; @@ -9896,7 +10359,7 @@ static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,  }  static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm, -		uint8_t *edid_ext, int len, +		u8 *edid_ext, int len,  		struct amdgpu_hdmi_vsdb_info *vsdb_info)  {  	int i; @@ -9912,21 +10375,25 @@ static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,  }  static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector, -		uint8_t *edid_ext, int len, +		u8 *edid_ext, int len,  		struct amdgpu_hdmi_vsdb_info *vsdb_info)  {  	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev); +	bool ret; +	mutex_lock(&adev->dm.dc_lock);  	if (adev->dm.dmub_srv) -		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info); +		ret = parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);  	else -		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info); +		ret = parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info); +	mutex_unlock(&adev->dm.dc_lock); +	return ret;  }  static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,  		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)  { -	uint8_t *edid_ext = NULL; +	u8 *edid_ext = NULL;  	int i;  	bool valid_vsdb_found = false; @@ -9981,6 +10448,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,  	struct amdgpu_device *adev = drm_to_adev(dev);  	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};  	bool freesync_capable = false; +	enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE;  	if (!connector->state) {  		DRM_ERROR("%s - Connector has no state", __func__); @@ -10073,6 +10541,26 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,  		}  	} +	as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link); + +	if (as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) { +		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); +		if (i >= 0 && vsdb_info.freesync_supported && vsdb_info.amd_vsdb_version > 0) { + +			amdgpu_dm_connector->pack_sdp_v1_3 = true; +			amdgpu_dm_connector->as_type = as_type; +			amdgpu_dm_connector->vsdb_info = vsdb_info; + +			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; +			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; +			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) +				freesync_capable = true; + +			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz; +			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz; +		} +	} +  update:  	if (dm_con_state)  		dm_con_state->freesync_capable = freesync_capable; @@ -10102,7 +10590,7 @@ void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)  }  void dm_write_reg_func(const struct dc_context *ctx, uint32_t address, -		       uint32_t value, const char *func_name) +		       u32 value, const char *func_name)  {  #ifdef DM_CHECK_ADDR_0  	if (address == 0) { @@ -10117,7 +10605,7 @@ void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,  uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,  			  const char *func_name)  { -	uint32_t value; +	u32 value;  #ifdef DM_CHECK_ADDR_0  	if (address == 0) {  		DC_ERR("invalid register read; address = 0\n"); @@ -10139,91 +10627,95 @@ uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,  	return value;  } -static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, -						struct dc_context *ctx, -						uint8_t status_type, -						uint32_t *operation_result) +int amdgpu_dm_process_dmub_aux_transfer_sync( +		struct dc_context *ctx, +		unsigned int link_index, +		struct aux_payload *payload, +		enum aux_return_code_type *operation_result)  {  	struct amdgpu_device *adev = ctx->driver_context; -	int return_status = -1;  	struct dmub_notification *p_notify = adev->dm.dmub_notify; +	int ret = -1; -	if (is_cmd_aux) { -		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) { -			return_status = p_notify->aux_reply.length; -			*operation_result = p_notify->result; -		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) { -			*operation_result = AUX_RET_ERROR_TIMEOUT; -		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) { -			*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE; -		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_INVALID) { -			*operation_result = AUX_RET_ERROR_INVALID_REPLY; -		} else { -			*operation_result = AUX_RET_ERROR_UNKNOWN; +	mutex_lock(&adev->dm.dpia_aux_lock); +	if (!dc_process_dmub_aux_transfer_async(ctx->dc, link_index, payload)) { +		*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE; +		goto out; + 	} + +	if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) { +		DRM_ERROR("wait_for_completion_timeout timeout!"); +		*operation_result = AUX_RET_ERROR_TIMEOUT; +		goto out; +	} + +	if (p_notify->result != AUX_RET_SUCCESS) { +		/* +		 * Transient states before tunneling is enabled could +		 * lead to this error. We can ignore this for now. +		 */ +		if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) { +			DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n", +					payload->address, payload->length, +					p_notify->result);  		} -	} else { -		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) { -			return_status = 0; -			*operation_result = p_notify->sc_status; -		} else { -			*operation_result = SET_CONFIG_UNKNOWN_ERROR; +		*operation_result = AUX_RET_ERROR_INVALID_REPLY; +		goto out; +	} + + +	payload->reply[0] = adev->dm.dmub_notify->aux_reply.command; +	if (!payload->write && p_notify->aux_reply.length && +			(payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) { + +		if (payload->length != p_notify->aux_reply.length) { +			DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n", +				p_notify->aux_reply.length, +					payload->address, payload->length); +			*operation_result = AUX_RET_ERROR_INVALID_REPLY; +			goto out;  		} + +		memcpy(payload->data, p_notify->aux_reply.data, +				p_notify->aux_reply.length);  	} -	return return_status; +	/* success */ +	ret = p_notify->aux_reply.length; +	*operation_result = p_notify->result; +out: +	reinit_completion(&adev->dm.dmub_aux_transfer_done); +	mutex_unlock(&adev->dm.dpia_aux_lock); +	return ret;  } -int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx, -	unsigned int link_index, void *cmd_payload, void *operation_result) +int amdgpu_dm_process_dmub_set_config_sync( +		struct dc_context *ctx, +		unsigned int link_index, +		struct set_config_cmd_payload *payload, +		enum set_config_status *operation_result)  {  	struct amdgpu_device *adev = ctx->driver_context; -	int ret = 0; +	bool is_cmd_complete; +	int ret; -	if (is_cmd_aux) { -		dc_process_dmub_aux_transfer_async(ctx->dc, -			link_index, (struct aux_payload *)cmd_payload); -	} else if (dc_process_dmub_set_config_async(ctx->dc, link_index, -					(struct set_config_cmd_payload *)cmd_payload, -					adev->dm.dmub_notify)) { -		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux, -					ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS, -					(uint32_t *)operation_result); -	} +	mutex_lock(&adev->dm.dpia_aux_lock); +	is_cmd_complete = dc_process_dmub_set_config_async(ctx->dc, +			link_index, payload, adev->dm.dmub_notify); -	ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ); -	if (ret == 0) { +	if (is_cmd_complete || wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) { +		ret = 0; +		*operation_result = adev->dm.dmub_notify->sc_status; +	} else {  		DRM_ERROR("wait_for_completion_timeout timeout!"); -		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux, -				ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT, -				(uint32_t *)operation_result); -	} - -	if (is_cmd_aux) { -		if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) { -			struct aux_payload *payload = (struct aux_payload *)cmd_payload; - -			payload->reply[0] = adev->dm.dmub_notify->aux_reply.command; -			if (!payload->write && adev->dm.dmub_notify->aux_reply.length && -			    payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) { - -				if (payload->length != adev->dm.dmub_notify->aux_reply.length) { -					DRM_WARN("invalid read from DPIA AUX %x(%d) got length %d!\n", -							payload->address, payload->length, -							adev->dm.dmub_notify->aux_reply.length); -					return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux, ctx, -							DMUB_ASYNC_TO_SYNC_ACCESS_INVALID, -							(uint32_t *)operation_result); -				} - -				memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data, -				       adev->dm.dmub_notify->aux_reply.length); -			} -		} +		ret = -1; +		*operation_result = SET_CONFIG_UNKNOWN_ERROR;  	} -	return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux, -			ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS, -			(uint32_t *)operation_result); +	if (!is_cmd_complete) +		reinit_completion(&adev->dm.dmub_aux_transfer_done); +	mutex_unlock(&adev->dm.dpia_aux_lock); +	return ret;  }  /* @@ -10235,8 +10727,8 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context   */  bool check_seamless_boot_capability(struct amdgpu_device *adev)  { -	switch (adev->asic_type) { -	case CHIP_VANGOGH: +	switch (adev->ip_versions[DCE_HWIP][0]) { +	case IP_VERSION(3, 0, 1):  		if (!adev->mman.keep_stolen_vga_memory)  			return true;  		break; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 635c398fcefe..ed5cbe9da40c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -31,6 +31,7 @@  #include <drm/drm_connector.h>  #include <drm/drm_crtc.h>  #include <drm/drm_plane.h> +#include "link_service_types.h"  /*   * This file contains the definition for amdgpu_display_manager @@ -58,8 +59,11 @@  #include "irq_types.h"  #include "signal_types.h"  #include "amdgpu_dm_crc.h" +#include "mod_info_packet.h"  struct aux_payload; +struct set_config_cmd_payload;  enum aux_return_code_type; +enum set_config_status;  /* Forward declarations */  struct amdgpu_device; @@ -360,13 +364,6 @@ struct amdgpu_display_manager {  	struct mutex audio_lock;  	/** -	 * @vblank_lock: -	 * -	 * Guards access to deferred vblank work state. -	 */ -	spinlock_t vblank_lock; - -	/**  	 * @audio_component:  	 *  	 * Used to notify ELD changes to sound driver. @@ -499,11 +496,12 @@ struct amdgpu_display_manager {  #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)  	/** -	 * @crc_rd_wrk: +	 * @secure_display_ctxs:  	 * -	 * Work to be executed in a separate thread to communicate with PSP. +	 * Store the ROI information and the work_struct to command dmub and psp for +	 * all crtcs.  	 */ -	struct crc_rd_work *crc_rd_wrk; +	struct secure_display_context *secure_display_ctxs;  #endif  	/**  	 * @hpd_rx_offload_wq: @@ -549,6 +547,13 @@ struct amdgpu_display_manager {  	 * occurred on certain intel platform  	 */  	bool aux_hpd_discon_quirk; + +	/** +	 * @dpia_aux_lock: +	 * +	 * Guards access to DPIA AUX +	 */ +	struct mutex dpia_aux_lock;  };  enum dsc_clock_force_state { @@ -573,6 +578,36 @@ enum mst_progress_status {  	MST_CLEAR_ALLOCATED_PAYLOAD = BIT(3),  }; +/** + * struct amdgpu_hdmi_vsdb_info - Keep track of the VSDB info + * + * AMDGPU supports FreeSync over HDMI by using the VSDB section, and this + * struct is useful to keep track of the display-specific information about + * FreeSync. + */ +struct amdgpu_hdmi_vsdb_info { +	/** +	 * @amd_vsdb_version: Vendor Specific Data Block Version, should be +	 * used to determine which Vendor Specific InfoFrame (VSIF) to send. +	 */ +	unsigned int amd_vsdb_version; + +	/** +	 * @freesync_supported: FreeSync Supported. +	 */ +	bool freesync_supported; + +	/** +	 * @min_refresh_rate_hz: FreeSync Minimum Refresh Rate in Hz. +	 */ +	unsigned int min_refresh_rate_hz; + +	/** +	 * @max_refresh_rate_hz: FreeSync Maximum Refresh Rate in Hz +	 */ +	unsigned int max_refresh_rate_hz; +}; +  struct amdgpu_dm_connector {  	struct drm_connector base; @@ -601,8 +636,8 @@ struct amdgpu_dm_connector {  	/* DM only */  	struct drm_dp_mst_topology_mgr mst_mgr;  	struct amdgpu_dm_dp_aux dm_dp_aux; -	struct drm_dp_mst_port *port; -	struct amdgpu_dm_connector *mst_port; +	struct drm_dp_mst_port *mst_output_port; +	struct amdgpu_dm_connector *mst_root;  	struct drm_dp_aux *dsc_aux;  	/* TODO see if we can merge with ddc_bus or make a dm_connector */  	struct amdgpu_i2c_adapter *i2c; @@ -641,6 +676,15 @@ struct amdgpu_dm_connector {  	/* Record progress status of mst*/  	uint8_t mst_status; + +	/* Automated testing */ +	bool timing_changed; +	struct dc_crtc_timing *timing_requested; + +	/* Adaptive Sync */ +	bool pack_sdp_v1_3; +	enum adaptive_sync_type as_type; +	struct amdgpu_hdmi_vsdb_info vsdb_info;  };  static inline void amdgpu_dm_set_mst_status(uint8_t *status, @@ -711,37 +755,6 @@ struct dm_connector_state {  	uint64_t pbn;  }; -/** - * struct amdgpu_hdmi_vsdb_info - Keep track of the VSDB info - * - * AMDGPU supports FreeSync over HDMI by using the VSDB section, and this - * struct is useful to keep track of the display-specific information about - * FreeSync. - */ -struct amdgpu_hdmi_vsdb_info { -	/** -	 * @amd_vsdb_version: Vendor Specific Data Block Version, should be -	 * used to determine which Vendor Specific InfoFrame (VSIF) to send. -	 */ -	unsigned int amd_vsdb_version; - -	/** -	 * @freesync_supported: FreeSync Supported. -	 */ -	bool freesync_supported; - -	/** -	 * @min_refresh_rate_hz: FreeSync Minimum Refresh Rate in Hz. -	 */ -	unsigned int min_refresh_rate_hz; - -	/** -	 * @max_refresh_rate_hz: FreeSync Maximum Refresh Rate in Hz -	 */ -	unsigned int max_refresh_rate_hz; -}; - -  #define to_dm_connector_state(x)\  	container_of((x), struct dm_connector_state, base) @@ -792,9 +805,11 @@ void amdgpu_dm_update_connector_after_detect(  extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs; -int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, -					struct dc_context *ctx, unsigned int link_index, -					void *payload, void *operation_result); +int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int link_index, +					struct aux_payload *payload, enum aux_return_code_type *operation_result); + +int amdgpu_dm_process_dmub_set_config_sync(struct dc_context *ctx, unsigned int link_index, +					struct set_config_cmd_payload *payload, enum set_config_status *operation_result);  bool check_seamless_boot_capability(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index 8a441a22c46e..27711743c22c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c @@ -89,50 +89,85 @@ static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc)  	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);  	spin_lock_irq(&drm_dev->event_lock); -	acrtc->dm_irq_params.crc_window.x_start = 0; -	acrtc->dm_irq_params.crc_window.y_start = 0; -	acrtc->dm_irq_params.crc_window.x_end = 0; -	acrtc->dm_irq_params.crc_window.y_end = 0; -	acrtc->dm_irq_params.crc_window.activated = false; -	acrtc->dm_irq_params.crc_window.update_win = false; -	acrtc->dm_irq_params.crc_window.skip_frame_cnt = 0; +	acrtc->dm_irq_params.window_param.x_start = 0; +	acrtc->dm_irq_params.window_param.y_start = 0; +	acrtc->dm_irq_params.window_param.x_end = 0; +	acrtc->dm_irq_params.window_param.y_end = 0; +	acrtc->dm_irq_params.window_param.activated = false; +	acrtc->dm_irq_params.window_param.update_win = false; +	acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;  	spin_unlock_irq(&drm_dev->event_lock);  }  static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)  { -	struct crc_rd_work *crc_rd_wrk; -	struct amdgpu_device *adev; +	struct secure_display_context *secure_display_ctx;  	struct psp_context *psp; -	struct securedisplay_cmd *securedisplay_cmd; +	struct ta_securedisplay_cmd *securedisplay_cmd;  	struct drm_crtc *crtc; -	uint8_t phy_id; +	struct dc_stream_state *stream; +	uint8_t phy_inst;  	int ret; -	crc_rd_wrk = container_of(work, struct crc_rd_work, notify_ta_work); -	spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock); -	crtc = crc_rd_wrk->crtc; +	secure_display_ctx = container_of(work, struct secure_display_context, notify_ta_work); +	crtc = secure_display_ctx->crtc;  	if (!crtc) { -		spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);  		return;  	} -	adev = drm_to_adev(crtc->dev); -	psp = &adev->psp; -	phy_id = crc_rd_wrk->phy_inst; -	spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock); +	psp = &drm_to_adev(crtc->dev)->psp; + +	if (!psp->securedisplay_context.context.initialized) { +		DRM_DEBUG_DRIVER("Secure Display fails to notify PSP TA\n"); +		return; +	} + +	stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream; +	phy_inst = stream->link->link_enc_hw_inst; + +	/* need lock for multiple crtcs to use the command buffer */ +	mutex_lock(&psp->securedisplay_context.mutex);  	psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,  						TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC); -	securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = -						phy_id; + +	securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = phy_inst; + +	/* PSP TA is expected to finish data transmission over I2C within current frame, +	 * even there are up to 4 crtcs request to send in this frame. +	 */  	ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC); +  	if (!ret) {  		if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {  			psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);  		}  	} + +	mutex_unlock(&psp->securedisplay_context.mutex); +} + +static void +amdgpu_dm_forward_crc_window(struct work_struct *work) +{ +	struct secure_display_context *secure_display_ctx; +	struct amdgpu_display_manager *dm; +	struct drm_crtc *crtc; +	struct dc_stream_state *stream; + +	secure_display_ctx = container_of(work, struct secure_display_context, forward_roi_work); +	crtc = secure_display_ctx->crtc; + +	if (!crtc) +		return; + +	dm = &drm_to_adev(crtc->dev)->dm; +	stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream; + +	mutex_lock(&dm->dc_lock); +	dc_stream_forward_crc_window(stream, &secure_display_ctx->rect, false); +	mutex_unlock(&dm->dc_lock);  }  bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc) @@ -142,7 +177,7 @@ bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc)  	bool ret = false;  	spin_lock_irq(&drm_dev->event_lock); -	ret = acrtc->dm_irq_params.crc_window.activated; +	ret = acrtc->dm_irq_params.window_param.activated;  	spin_unlock_irq(&drm_dev->event_lock);  	return ret; @@ -169,6 +204,9 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,  					struct dm_crtc_state *dm_crtc_state,  					enum amdgpu_dm_pipe_crc_source source)  { +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) +	int i; +#endif  	struct amdgpu_device *adev = drm_to_adev(crtc->dev);  	struct dc_stream_state *stream_state = dm_crtc_state->stream;  	bool enable = amdgpu_dm_is_valid_crc_source(source); @@ -180,19 +218,18 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,  	mutex_lock(&adev->dm.dc_lock); -	/* Enable CRTC CRC generation if necessary. */ +	/* Enable or disable CRTC CRC generation */  	if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) {  #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) +		/* Disable secure_display if it was enabled */  		if (!enable) { -			if (adev->dm.crc_rd_wrk) { -				flush_work(&adev->dm.crc_rd_wrk->notify_ta_work); -				spin_lock_irq(&adev->dm.crc_rd_wrk->crc_rd_work_lock); -				if (adev->dm.crc_rd_wrk->crtc == crtc) { -					dc_stream_stop_dmcu_crc_win_update(stream_state->ctx->dc, -									dm_crtc_state->stream); -					adev->dm.crc_rd_wrk->crtc = NULL; +			for (i = 0; i < adev->mode_info.num_crtc; i++) { +				if (adev->dm.secure_display_ctxs[i].crtc == crtc) { +					/* stop ROI update on this crtc */ +					flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work); +					flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work); +					dc_stream_forward_crc_window(stream_state, NULL, true);  				} -				spin_unlock_irq(&adev->dm.crc_rd_wrk->crc_rd_work_lock);  			}  		}  #endif @@ -307,7 +344,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)  			goto cleanup;  		} -		aux = (aconn->port) ? &aconn->port->aux : &aconn->dm_dp_aux.aux; +		aux = (aconn->mst_output_port) ? &aconn->mst_output_port->aux : &aconn->dm_dp_aux.aux;  		if (!aux) {  			DRM_DEBUG_DRIVER("No dp aux for amd connector\n"); @@ -325,6 +362,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)  	}  #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) +	/* Reset secure_display when we change crc source from debugfs */  	amdgpu_dm_set_crc_window_default(crtc);  #endif @@ -434,19 +472,12 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc)  #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)  void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)  { -	struct dc_stream_state *stream_state;  	struct drm_device *drm_dev = NULL;  	enum amdgpu_dm_pipe_crc_source cur_crc_src;  	struct amdgpu_crtc *acrtc = NULL;  	struct amdgpu_device *adev = NULL; -	struct crc_rd_work *crc_rd_wrk = NULL; -	struct crc_params *crc_window = NULL, tmp_window; -	unsigned long flags1, flags2; -	struct crtc_position position; -	uint32_t v_blank; -	uint32_t v_back_porch; -	uint32_t crc_window_latch_up_line; -	struct dc_crtc_timing *timing_out; +	struct secure_display_context *secure_display_ctx = NULL; +	unsigned long flags1;  	if (crtc == NULL)  		return; @@ -456,95 +487,76 @@ void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)  	drm_dev = crtc->dev;  	spin_lock_irqsave(&drm_dev->event_lock, flags1); -	stream_state = acrtc->dm_irq_params.stream;  	cur_crc_src = acrtc->dm_irq_params.crc_src; -	timing_out = &stream_state->timing;  	/* Early return if CRC capture is not enabled. */ -	if (!amdgpu_dm_is_valid_crc_source(cur_crc_src)) +	if (!amdgpu_dm_is_valid_crc_source(cur_crc_src) || +		!dm_is_crc_source_crtc(cur_crc_src))  		goto cleanup; -	if (dm_is_crc_source_crtc(cur_crc_src)) { -		if (acrtc->dm_irq_params.crc_window.activated) { -			if (acrtc->dm_irq_params.crc_window.update_win) { -				if (acrtc->dm_irq_params.crc_window.skip_frame_cnt) { -					acrtc->dm_irq_params.crc_window.skip_frame_cnt -= 1; -					goto cleanup; -				} -				crc_window = &tmp_window; - -				tmp_window.windowa_x_start = -							acrtc->dm_irq_params.crc_window.x_start; -				tmp_window.windowa_y_start = -							acrtc->dm_irq_params.crc_window.y_start; -				tmp_window.windowa_x_end = -							acrtc->dm_irq_params.crc_window.x_end; -				tmp_window.windowa_y_end = -							acrtc->dm_irq_params.crc_window.y_end; -				tmp_window.windowb_x_start = -							acrtc->dm_irq_params.crc_window.x_start; -				tmp_window.windowb_y_start = -							acrtc->dm_irq_params.crc_window.y_start; -				tmp_window.windowb_x_end = -							acrtc->dm_irq_params.crc_window.x_end; -				tmp_window.windowb_y_end = -							acrtc->dm_irq_params.crc_window.y_end; - -				dc_stream_forward_dmcu_crc_window(stream_state->ctx->dc, -									stream_state, crc_window); - -				acrtc->dm_irq_params.crc_window.update_win = false; - -				dc_stream_get_crtc_position(stream_state->ctx->dc, &stream_state, 1, -					&position.vertical_count, -					&position.nominal_vcount); - -				v_blank = timing_out->v_total - timing_out->v_border_top - -					timing_out->v_addressable - timing_out->v_border_bottom; - -				v_back_porch = v_blank - timing_out->v_front_porch - -					timing_out->v_sync_width; - -				crc_window_latch_up_line = v_back_porch + timing_out->v_sync_width; - -				/* take 3 lines margin*/ -				if ((position.vertical_count + 3) >= crc_window_latch_up_line) -					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 1; -				else -					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 0; -			} else { -				if (acrtc->dm_irq_params.crc_window.skip_frame_cnt == 0) { -					if (adev->dm.crc_rd_wrk) { -						crc_rd_wrk = adev->dm.crc_rd_wrk; -						spin_lock_irqsave(&crc_rd_wrk->crc_rd_work_lock, flags2); -						crc_rd_wrk->phy_inst = -							stream_state->link->link_enc_hw_inst; -						spin_unlock_irqrestore(&crc_rd_wrk->crc_rd_work_lock, flags2); -						schedule_work(&crc_rd_wrk->notify_ta_work); -					} -				} else { -					acrtc->dm_irq_params.crc_window.skip_frame_cnt -= 1; -				} -			} -		} +	if (!acrtc->dm_irq_params.window_param.activated) +		goto cleanup; + +	if (acrtc->dm_irq_params.window_param.skip_frame_cnt) { +		acrtc->dm_irq_params.window_param.skip_frame_cnt -= 1; +		goto cleanup; +	} + +	secure_display_ctx = &adev->dm.secure_display_ctxs[acrtc->crtc_id]; +	if (WARN_ON(secure_display_ctx->crtc != crtc)) { +		/* We have set the crtc when creating secure_display_context, +		 * don't expect it to be changed here. +		 */ +		secure_display_ctx->crtc = crtc; +	} + +	if (acrtc->dm_irq_params.window_param.update_win) { +		/* prepare work for dmub to update ROI */ +		secure_display_ctx->rect.x = acrtc->dm_irq_params.window_param.x_start; +		secure_display_ctx->rect.y = acrtc->dm_irq_params.window_param.y_start; +		secure_display_ctx->rect.width = acrtc->dm_irq_params.window_param.x_end - +								acrtc->dm_irq_params.window_param.x_start; +		secure_display_ctx->rect.height = acrtc->dm_irq_params.window_param.y_end - +								acrtc->dm_irq_params.window_param.y_start; +		schedule_work(&secure_display_ctx->forward_roi_work); + +		acrtc->dm_irq_params.window_param.update_win = false; + +		/* Statically skip 1 frame, because we may need to wait below things +		 * before sending ROI to dmub: +		 * 1. We defer the work by using system workqueue. +		 * 2. We may need to wait for dc_lock before accessing dmub. +		 */ +		acrtc->dm_irq_params.window_param.skip_frame_cnt = 1; + +	} else { +		/* prepare work for psp to read ROI/CRC and send to I2C */ +		schedule_work(&secure_display_ctx->notify_ta_work);  	}  cleanup:  	spin_unlock_irqrestore(&drm_dev->event_lock, flags1);  } -struct crc_rd_work *amdgpu_dm_crtc_secure_display_create_work(void) +struct secure_display_context * +amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev)  { -	struct crc_rd_work *crc_rd_wrk = NULL; +	struct secure_display_context *secure_display_ctxs = NULL; +	int i; -	crc_rd_wrk = kzalloc(sizeof(*crc_rd_wrk), GFP_KERNEL); +	secure_display_ctxs = kcalloc(adev->mode_info.num_crtc, +				      sizeof(struct secure_display_context), +				      GFP_KERNEL); -	if (!crc_rd_wrk) +	if (!secure_display_ctxs)  		return NULL; -	spin_lock_init(&crc_rd_wrk->crc_rd_work_lock); -	INIT_WORK(&crc_rd_wrk->notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read); +	for (i = 0; i < adev->mode_info.num_crtc; i++) { +		INIT_WORK(&secure_display_ctxs[i].forward_roi_work, amdgpu_dm_forward_crc_window); +		INIT_WORK(&secure_display_ctxs[i].notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read); +		secure_display_ctxs[i].crtc = &adev->mode_info.crtcs[i]->base; +	} -	return crc_rd_wrk; +	return secure_display_ctxs;  }  #endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h index f07850db60a6..935adca6f048 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h @@ -40,12 +40,12 @@ enum amdgpu_dm_pipe_crc_source {  };  #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY -struct crc_window_parm { +struct crc_window_param {  	uint16_t x_start;  	uint16_t y_start;  	uint16_t x_end;  	uint16_t y_end; -	/* CRC windwo is activated or not*/ +	/* CRC window is activated or not*/  	bool activated;  	/* Update crc window during vertical blank or not */  	bool update_win; @@ -53,12 +53,17 @@ struct crc_window_parm {  	int skip_frame_cnt;  }; -struct crc_rd_work { +struct secure_display_context { +	/* work to notify PSP TA*/  	struct work_struct notify_ta_work; -	/* To protect crc_rd_work carried fields*/ -	spinlock_t crc_rd_work_lock; + +	/* work to forward ROI to dmcu/dmub */ +	struct work_struct forward_roi_work; +  	struct drm_crtc *crtc; -	uint8_t phy_inst; + +	/* Region of Interest (ROI) */ +	struct rect rect;  };  #endif @@ -90,11 +95,12 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc);  #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY  bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc);  void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc); -struct crc_rd_work *amdgpu_dm_crtc_secure_display_create_work(void); +struct secure_display_context *amdgpu_dm_crtc_secure_display_create_contexts( +						struct amdgpu_device *adev);  #else  #define amdgpu_dm_crc_window_is_activated(x)  #define amdgpu_dm_crtc_handle_crc_window_irq(x) -#define amdgpu_dm_crtc_secure_display_create_work() +#define amdgpu_dm_crtc_secure_display_create_contexts()  #endif  #endif /* AMD_DAL_DEV_AMDGPU_DM_AMDGPU_DM_CRC_H_ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 64dd02970292..dc4f37240beb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -77,6 +77,9 @@ int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)  	struct amdgpu_device *adev = drm_to_adev(crtc->dev);  	int rc; +	if (acrtc->otg_inst == -1) +		return 0; +  	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;  	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; @@ -105,8 +108,7 @@ static void vblank_control_worker(struct work_struct *work)  	else if (dm->active_vblank_irq_count)  		dm->active_vblank_irq_count--; -	dc_allow_idle_optimizations( -		dm->dc, dm->active_vblank_irq_count == 0 ? true : false); +	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);  	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0); @@ -127,6 +129,9 @@ static void vblank_control_worker(struct work_struct *work)  				amdgpu_dm_psr_disable(vblank_work->stream);  		} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&  			   !vblank_work->stream->link->psr_settings.psr_allow_active && +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY +			   !amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base) && +#endif  			   vblank_work->acrtc->dm_irq_params.allow_psr_entry) {  			amdgpu_dm_psr_enable(vblank_work->stream);  		} @@ -149,6 +154,9 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)  	struct vblank_control_work *work;  	int rc = 0; +	if (acrtc->otg_inst == -1) +		goto skip; +  	if (enable) {  		/* vblank irq on -> Only need vupdate irq in vrr mode */  		if (amdgpu_dm_vrr_active(acrtc_state)) @@ -166,6 +174,7 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)  	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))  		return -EBUSY; +skip:  	if (amdgpu_in_reset(adev))  		return 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index ee242d9d8b06..09a3efa517da 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -34,9 +34,13 @@  #include "dmub/dmub_srv.h"  #include "resource.h"  #include "dsc.h" -#include "dc_link_dp.h"  #include "link_hwss.h"  #include "dc/dc_dmub_srv.h" +#include "link/protocols/link_dp_capability.h" + +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY +#include "amdgpu_dm_psr.h" +#endif  struct dmub_debugfs_trace_header {  	uint32_t entry_count; @@ -299,6 +303,8 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,  	case LINK_RATE_HIGH2:  	case LINK_RATE_HIGH3:  	case LINK_RATE_UHBR10: +	case LINK_RATE_UHBR13_5: +	case LINK_RATE_UHBR20:  		break;  	default:  		valid_input = false; @@ -413,67 +419,38 @@ static ssize_t dp_phy_settings_read(struct file *f, char __user *buf,  	return result;  } -static int dp_lttpr_status_show(struct seq_file *m, void *d) +static int dp_lttpr_status_show(struct seq_file *m, void *unused)  { -	char *data; -	struct amdgpu_dm_connector *connector = file_inode(m->file)->i_private; -	struct dc_link *link = connector->dc_link; -	uint32_t read_size = 1; -	uint8_t repeater_count = 0; +	struct drm_connector *connector = m->private; +	struct amdgpu_dm_connector *aconnector = +		to_amdgpu_dm_connector(connector); +	struct dc_lttpr_caps caps = aconnector->dc_link->dpcd_caps.lttpr_caps; -	data = kzalloc(read_size, GFP_KERNEL); -	if (!data) -		return 0; +	if (connector->status != connector_status_connected) +		return -ENODEV; -	dm_helpers_dp_read_dpcd(link->ctx, link, 0xF0002, data, read_size); +	seq_printf(m, "phy repeater count: %u (raw: 0x%x)\n", +		   dp_parse_lttpr_repeater_count(caps.phy_repeater_cnt), +		   caps.phy_repeater_cnt); -	switch ((uint8_t)*data) { -	case 0x80: -		repeater_count = 1; -		break; -	case 0x40: -		repeater_count = 2; -		break; -	case 0x20: -		repeater_count = 3; -		break; -	case 0x10: -		repeater_count = 4; -		break; -	case 0x8: -		repeater_count = 5; -		break; -	case 0x4: -		repeater_count = 6; -		break; -	case 0x2: -		repeater_count = 7; +	seq_puts(m, "phy repeater mode: "); + +	switch (caps.mode) { +	case DP_PHY_REPEATER_MODE_TRANSPARENT: +		seq_puts(m, "transparent");  		break; -	case 0x1: -		repeater_count = 8; +	case DP_PHY_REPEATER_MODE_NON_TRANSPARENT: +		seq_puts(m, "non-transparent");  		break; -	case 0x0: -		repeater_count = 0; +	case 0x00: +		seq_puts(m, "non lttpr");  		break;  	default: -		repeater_count = (uint8_t)*data; +		seq_printf(m, "read error (raw: 0x%x)", caps.mode);  		break;  	} -	seq_printf(m, "phy repeater count: %d\n", repeater_count); - -	dm_helpers_dp_read_dpcd(link->ctx, link, 0xF0003, data, read_size); - -	if ((uint8_t)*data == 0x55) -		seq_printf(m, "phy repeater mode: transparent\n"); -	else if ((uint8_t)*data == 0xAA) -		seq_printf(m, "phy repeater mode: non-transparent\n"); -	else if ((uint8_t)*data == 0x00) -		seq_printf(m, "phy repeater mode: non lttpr\n"); -	else -		seq_printf(m, "phy repeater mode: read error\n"); - -	kfree(data); +	seq_puts(m, "\n");  	return 0;  } @@ -1186,7 +1163,7 @@ static int dp_dsc_fec_support_show(struct seq_file *m, void *data)  			break;  		}  		dpcd_caps = aconnector->dc_link->dpcd_caps; -		if (aconnector->port) { +		if (aconnector->mst_output_port) {  			/* aconnector sets dsc_aux during get_modes call  			 * if MST connector has it means it can either  			 * enable DSC on the sink device or on MST branch @@ -1273,14 +1250,14 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf,  	mutex_lock(&aconnector->hpd_lock);  	/* Don't support for mst end device*/ -	if (aconnector->mst_port) { +	if (aconnector->mst_root) {  		mutex_unlock(&aconnector->hpd_lock);  		return -EINVAL;  	}  	if (param[0] == 1) { -		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type) && +		if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type) &&  			new_connection_type != dc_connection_none)  			goto unlock; @@ -1317,7 +1294,7 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf,  		/* If the aconnector is the root node in mst topology */  		if (aconnector->mst_mgr.mst_state == true) -			reset_cur_dp_mst_topology(link); +			dc_link_reset_cur_dp_mst_topology(link);  		drm_modeset_lock_all(dev);  		dm_restore_drm_connector_state(dev, connector); @@ -1369,16 +1346,11 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,  	for (i = 0; i < MAX_PIPES; i++) {  		pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; -		if (pipe_ctx && pipe_ctx->stream && +		if (pipe_ctx->stream &&  		    pipe_ctx->stream->link == aconnector->dc_link)  			break;  	} -	if (!pipe_ctx) { -		kfree(rd_buf); -		return -ENXIO; -	} -  	dsc = pipe_ctx->stream_res.dsc;  	if (dsc)  		dsc->funcs->dsc_read_state(dsc, &dsc_state); @@ -1475,12 +1447,12 @@ static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf,  	for (i = 0; i < MAX_PIPES; i++) {  		pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; -		if (pipe_ctx && pipe_ctx->stream && +		if (pipe_ctx->stream &&  		    pipe_ctx->stream->link == aconnector->dc_link)  			break;  	} -	if (!pipe_ctx || !pipe_ctx->stream) +	if (!pipe_ctx->stream)  		goto done;  	// Get CRTC state @@ -1560,16 +1532,11 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf,  	for (i = 0; i < MAX_PIPES; i++) {  		pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; -		if (pipe_ctx && pipe_ctx->stream && +		if (pipe_ctx->stream &&  		    pipe_ctx->stream->link == aconnector->dc_link)  			break;  	} -	if (!pipe_ctx) { -		kfree(rd_buf); -		return -ENXIO; -	} -  	dsc = pipe_ctx->stream_res.dsc;  	if (dsc)  		dsc->funcs->dsc_read_state(dsc, &dsc_state); @@ -1664,12 +1631,12 @@ static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf,  	for (i = 0; i < MAX_PIPES; i++) {  		pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; -		if (pipe_ctx && pipe_ctx->stream && +		if (pipe_ctx->stream &&  		    pipe_ctx->stream->link == aconnector->dc_link)  			break;  	} -	if (!pipe_ctx || !pipe_ctx->stream) +	if (!pipe_ctx->stream)  		goto done;  	// Safely get CRTC state @@ -1749,16 +1716,11 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,  	for (i = 0; i < MAX_PIPES; i++) {  		pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; -		if (pipe_ctx && pipe_ctx->stream && +		if (pipe_ctx->stream &&  		    pipe_ctx->stream->link == aconnector->dc_link)  			break;  	} -	if (!pipe_ctx) { -		kfree(rd_buf); -		return -ENXIO; -	} -  	dsc = pipe_ctx->stream_res.dsc;  	if (dsc)  		dsc->funcs->dsc_read_state(dsc, &dsc_state); @@ -1853,12 +1815,12 @@ static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf,  	for (i = 0; i < MAX_PIPES; i++) {  		pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; -		if (pipe_ctx && pipe_ctx->stream && +		if (pipe_ctx->stream &&  		    pipe_ctx->stream->link == aconnector->dc_link)  			break;  	} -	if (!pipe_ctx || !pipe_ctx->stream) +	if (!pipe_ctx->stream)  		goto done;  	// Get CRTC state @@ -1934,16 +1896,11 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf,  	for (i = 0; i < MAX_PIPES; i++) {  		pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; -		if (pipe_ctx && pipe_ctx->stream && +		if (pipe_ctx->stream &&  		    pipe_ctx->stream->link == aconnector->dc_link)  			break;  	} -	if (!pipe_ctx) { -		kfree(rd_buf); -		return -ENXIO; -	} -  	dsc = pipe_ctx->stream_res.dsc;  	if (dsc)  		dsc->funcs->dsc_read_state(dsc, &dsc_state); @@ -2035,12 +1992,12 @@ static ssize_t dp_dsc_bits_per_pixel_write(struct file *f, const char __user *bu  	for (i = 0; i < MAX_PIPES; i++) {  		pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; -		if (pipe_ctx && pipe_ctx->stream && +		if (pipe_ctx->stream &&  		    pipe_ctx->stream->link == aconnector->dc_link)  			break;  	} -	if (!pipe_ctx || !pipe_ctx->stream) +	if (!pipe_ctx->stream)  		goto done;  	// Get CRTC state @@ -2114,16 +2071,11 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf,  	for (i = 0; i < MAX_PIPES; i++) {  		pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; -		if (pipe_ctx && pipe_ctx->stream && +		if (pipe_ctx->stream &&  		    pipe_ctx->stream->link == aconnector->dc_link)  			break;  	} -	if (!pipe_ctx) { -		kfree(rd_buf); -		return -ENXIO; -	} -  	dsc = pipe_ctx->stream_res.dsc;  	if (dsc)  		dsc->funcs->dsc_read_state(dsc, &dsc_state); @@ -2175,16 +2127,11 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf,  	for (i = 0; i < MAX_PIPES; i++) {  		pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; -		if (pipe_ctx && pipe_ctx->stream && +		if (pipe_ctx->stream &&  		    pipe_ctx->stream->link == aconnector->dc_link)  			break;  	} -	if (!pipe_ctx) { -		kfree(rd_buf); -		return -ENXIO; -	} -  	dsc = pipe_ctx->stream_res.dsc;  	if (dsc)  		dsc->funcs->dsc_read_state(dsc, &dsc_state); @@ -2251,16 +2198,11 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf,  	for (i = 0; i < MAX_PIPES; i++) {  		pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; -		if (pipe_ctx && pipe_ctx->stream && +		if (pipe_ctx->stream &&  		    pipe_ctx->stream->link == aconnector->dc_link)  			break;  	} -	if (!pipe_ctx) { -		kfree(rd_buf); -		return -ENXIO; -	} -  	dsc = pipe_ctx->stream_res.dsc;  	if (dsc)  		dsc->funcs->dsc_read_state(dsc, &dsc_state); @@ -2327,16 +2269,11 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf,  	for (i = 0; i < MAX_PIPES; i++) {  		pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; -		if (pipe_ctx && pipe_ctx->stream && +		if (pipe_ctx->stream &&  		    pipe_ctx->stream->link == aconnector->dc_link)  			break;  	} -	if (!pipe_ctx) { -		kfree(rd_buf); -		return -ENXIO; -	} -  	dsc = pipe_ctx->stream_res.dsc;  	if (dsc)  		dsc->funcs->dsc_read_state(dsc, &dsc_state); @@ -2572,13 +2509,13 @@ static int dp_is_mst_connector_show(struct seq_file *m, void *unused)  	if (aconnector->mst_mgr.mst_state) {  		role = "root"; -	} else if (aconnector->mst_port && -		aconnector->mst_port->mst_mgr.mst_state) { +	} else if (aconnector->mst_root && +		aconnector->mst_root->mst_mgr.mst_state) {  		role = "end"; -		mgr = &aconnector->mst_port->mst_mgr; -		port = aconnector->port; +		mgr = &aconnector->mst_root->mst_mgr; +		port = aconnector->mst_output_port;  		drm_modeset_lock(&mgr->base.lock, NULL);  		if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING && @@ -2633,6 +2570,25 @@ static int dp_mst_progress_status_show(struct seq_file *m, void *unused)  	return 0;  } +/* + * Reports whether the connected display is a USB4 DPIA tunneled display + * Example usage: cat /sys/kernel/debug/dri/0/DP-8/is_dpia_link + */ +static int is_dpia_link_show(struct seq_file *m, void *data) +{ +	struct drm_connector *connector = m->private; +	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); +	struct dc_link *link = aconnector->dc_link; + +	if (connector->status != connector_status_connected) +		return -ENODEV; + +	seq_printf(m, "%s\n", (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ? "yes" : +				(link->ep_type == DISPLAY_ENDPOINT_PHY) ? "no" : "unknown"); + +	return 0; +} +  DEFINE_SHOW_ATTRIBUTE(dp_dsc_fec_support);  DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);  DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer); @@ -2644,6 +2600,7 @@ DEFINE_SHOW_ATTRIBUTE(internal_display);  DEFINE_SHOW_ATTRIBUTE(psr_capability);  DEFINE_SHOW_ATTRIBUTE(dp_is_mst_connector);  DEFINE_SHOW_ATTRIBUTE(dp_mst_progress_status); +DEFINE_SHOW_ATTRIBUTE(is_dpia_link);  static const struct file_operations dp_dsc_clock_en_debugfs_fops = {  	.owner = THIS_MODULE, @@ -2788,7 +2745,8 @@ static const struct {  		{"max_bpc", &dp_max_bpc_debugfs_fops},  		{"dsc_disable_passthrough", &dp_dsc_disable_passthrough_debugfs_fops},  		{"is_mst_connector", &dp_is_mst_connector_fops}, -		{"mst_progress_status", &dp_mst_progress_status_fops} +		{"mst_progress_status", &dp_mst_progress_status_fops}, +		{"is_dpia_link", &is_dpia_link_fops}  };  #ifdef CONFIG_DRM_AMD_DC_HDCP @@ -3079,8 +3037,8 @@ static int crc_win_x_start_set(void *data, u64 val)  	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);  	spin_lock_irq(&drm_dev->event_lock); -	acrtc->dm_irq_params.crc_window.x_start = (uint16_t) val; -	acrtc->dm_irq_params.crc_window.update_win = false; +	acrtc->dm_irq_params.window_param.x_start = (uint16_t) val; +	acrtc->dm_irq_params.window_param.update_win = false;  	spin_unlock_irq(&drm_dev->event_lock);  	return 0; @@ -3096,7 +3054,7 @@ static int crc_win_x_start_get(void *data, u64 *val)  	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);  	spin_lock_irq(&drm_dev->event_lock); -	*val = acrtc->dm_irq_params.crc_window.x_start; +	*val = acrtc->dm_irq_params.window_param.x_start;  	spin_unlock_irq(&drm_dev->event_lock);  	return 0; @@ -3116,8 +3074,8 @@ static int crc_win_y_start_set(void *data, u64 val)  	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);  	spin_lock_irq(&drm_dev->event_lock); -	acrtc->dm_irq_params.crc_window.y_start = (uint16_t) val; -	acrtc->dm_irq_params.crc_window.update_win = false; +	acrtc->dm_irq_params.window_param.y_start = (uint16_t) val; +	acrtc->dm_irq_params.window_param.update_win = false;  	spin_unlock_irq(&drm_dev->event_lock);  	return 0; @@ -3133,7 +3091,7 @@ static int crc_win_y_start_get(void *data, u64 *val)  	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);  	spin_lock_irq(&drm_dev->event_lock); -	*val = acrtc->dm_irq_params.crc_window.y_start; +	*val = acrtc->dm_irq_params.window_param.y_start;  	spin_unlock_irq(&drm_dev->event_lock);  	return 0; @@ -3152,8 +3110,8 @@ static int crc_win_x_end_set(void *data, u64 val)  	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);  	spin_lock_irq(&drm_dev->event_lock); -	acrtc->dm_irq_params.crc_window.x_end = (uint16_t) val; -	acrtc->dm_irq_params.crc_window.update_win = false; +	acrtc->dm_irq_params.window_param.x_end = (uint16_t) val; +	acrtc->dm_irq_params.window_param.update_win = false;  	spin_unlock_irq(&drm_dev->event_lock);  	return 0; @@ -3169,7 +3127,7 @@ static int crc_win_x_end_get(void *data, u64 *val)  	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);  	spin_lock_irq(&drm_dev->event_lock); -	*val = acrtc->dm_irq_params.crc_window.x_end; +	*val = acrtc->dm_irq_params.window_param.x_end;  	spin_unlock_irq(&drm_dev->event_lock);  	return 0; @@ -3188,8 +3146,8 @@ static int crc_win_y_end_set(void *data, u64 val)  	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);  	spin_lock_irq(&drm_dev->event_lock); -	acrtc->dm_irq_params.crc_window.y_end = (uint16_t) val; -	acrtc->dm_irq_params.crc_window.update_win = false; +	acrtc->dm_irq_params.window_param.y_end = (uint16_t) val; +	acrtc->dm_irq_params.window_param.update_win = false;  	spin_unlock_irq(&drm_dev->event_lock);  	return 0; @@ -3205,7 +3163,7 @@ static int crc_win_y_end_get(void *data, u64 *val)  	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);  	spin_lock_irq(&drm_dev->event_lock); -	*val = acrtc->dm_irq_params.crc_window.y_end; +	*val = acrtc->dm_irq_params.window_param.y_end;  	spin_unlock_irq(&drm_dev->event_lock);  	return 0; @@ -3218,41 +3176,26 @@ DEFINE_DEBUGFS_ATTRIBUTE(crc_win_y_end_fops, crc_win_y_end_get,   */  static int crc_win_update_set(void *data, u64 val)  { -	struct drm_crtc *new_crtc = data; -	struct drm_crtc *old_crtc = NULL; -	struct amdgpu_crtc *new_acrtc, *old_acrtc; -	struct amdgpu_device *adev = drm_to_adev(new_crtc->dev); -	struct crc_rd_work *crc_rd_wrk = adev->dm.crc_rd_wrk; - -	if (!crc_rd_wrk) -		return 0; +	struct drm_crtc *crtc = data; +	struct amdgpu_crtc *acrtc; +	struct amdgpu_device *adev = drm_to_adev(crtc->dev);  	if (val) { +		acrtc = to_amdgpu_crtc(crtc); +		mutex_lock(&adev->dm.dc_lock); +		/* PSR may write to OTG CRC window control register, +		 * so close it before starting secure_display. +		 */ +		amdgpu_dm_psr_disable(acrtc->dm_irq_params.stream); +  		spin_lock_irq(&adev_to_drm(adev)->event_lock); -		spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock); -		if (crc_rd_wrk->crtc) { -			old_crtc = crc_rd_wrk->crtc; -			old_acrtc = to_amdgpu_crtc(old_crtc); -		} -		new_acrtc = to_amdgpu_crtc(new_crtc); -		if (old_crtc && old_crtc != new_crtc) { -			old_acrtc->dm_irq_params.crc_window.activated = false; -			old_acrtc->dm_irq_params.crc_window.update_win = false; -			old_acrtc->dm_irq_params.crc_window.skip_frame_cnt = 0; +		acrtc->dm_irq_params.window_param.activated = true; +		acrtc->dm_irq_params.window_param.update_win = true; +		acrtc->dm_irq_params.window_param.skip_frame_cnt = 0; -			new_acrtc->dm_irq_params.crc_window.activated = true; -			new_acrtc->dm_irq_params.crc_window.update_win = true; -			new_acrtc->dm_irq_params.crc_window.skip_frame_cnt = 0; -			crc_rd_wrk->crtc = new_crtc; -		} else { -			new_acrtc->dm_irq_params.crc_window.activated = true; -			new_acrtc->dm_irq_params.crc_window.update_win = true; -			new_acrtc->dm_irq_params.crc_window.skip_frame_cnt = 0; -			crc_rd_wrk->crtc = new_crtc; -		} -		spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);  		spin_unlock_irq(&adev_to_drm(adev)->event_lock); +		mutex_unlock(&adev->dm.dc_lock);  	}  	return 0; @@ -3419,12 +3362,12 @@ static int trigger_hpd_mst_set(void *data, u64 val)  			if (!aconnector->dc_link)  				continue; -			if (!aconnector->mst_port) +			if (!aconnector->mst_root)  				continue;  			link = aconnector->dc_link; -			dp_receiver_power_ctrl(link, false); -			drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_port->mst_mgr, false); +			dc_link_dp_receiver_power_ctrl(link, false); +			drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_root->mst_mgr, false);  			link->mst_stream_alloc_table.stream_count = 0;  			memset(link->mst_stream_alloc_table.stream_allocations, 0,  					sizeof(link->mst_stream_alloc_table.stream_allocations)); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index 6202e31c7e3a..8e572f07ec47 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -170,9 +170,10 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,  	struct mod_hdcp_display *display = &hdcp_work[link_index].display;  	struct mod_hdcp_link *link = &hdcp_work[link_index].link;  	struct mod_hdcp_display_query query; +	unsigned int conn_index = aconnector->base.index;  	mutex_lock(&hdcp_w->mutex); -	hdcp_w->aconnector = aconnector; +	hdcp_w->aconnector[conn_index] = aconnector;  	query.display = NULL;  	mod_hdcp_query_display(&hdcp_w->hdcp, aconnector->base.index, &query); @@ -204,7 +205,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,  					      msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS));  		} else {  			display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION; -			hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; +			hdcp_w->encryption_status[conn_index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;  			cancel_delayed_work(&hdcp_w->property_validate_dwork);  		} @@ -223,9 +224,10 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,  {  	struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];  	struct drm_connector_state *conn_state = aconnector->base.state; +	unsigned int conn_index = aconnector->base.index;  	mutex_lock(&hdcp_w->mutex); -	hdcp_w->aconnector = aconnector; +	hdcp_w->aconnector[conn_index] = aconnector;  	/* the removal of display will invoke auth reset -> hdcp destroy and  	 * we'd expect the Content Protection (CP) property changed back to @@ -247,13 +249,18 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,  void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index)  {  	struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; +	unsigned int conn_index;  	mutex_lock(&hdcp_w->mutex);  	mod_hdcp_reset_connection(&hdcp_w->hdcp,  &hdcp_w->output);  	cancel_delayed_work(&hdcp_w->property_validate_dwork); -	hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + +	for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) { +		hdcp_w->encryption_status[conn_index] = +			MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; +	}  	process_output(hdcp_w); @@ -290,49 +297,80 @@ static void event_callback(struct work_struct *work)  } +  static void event_property_update(struct work_struct *work)  { -  	struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue, property_update_work); -	struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector; -	struct drm_device *dev = hdcp_work->aconnector->base.dev; +	struct amdgpu_dm_connector *aconnector = NULL; +	struct drm_device *dev;  	long ret; +	unsigned int conn_index; +	struct drm_connector *connector; +	struct drm_connector_state *conn_state; -	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); -	mutex_lock(&hdcp_work->mutex); +	for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) { +		aconnector = hdcp_work->aconnector[conn_index]; +		if (!aconnector) +			continue; -	if (aconnector->base.state && aconnector->base.state->commit) { -		ret = wait_for_completion_interruptible_timeout(&aconnector->base.state->commit->hw_done, 10 * HZ); +		connector = &aconnector->base; -		if (ret == 0) { -			DRM_ERROR("HDCP state unknown! Setting it to DESIRED"); -			hdcp_work->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; -		} -	} +		/* check if display connected */ +		if (connector->status != connector_status_connected) +			continue; -	if (aconnector->base.state) { -		if (hdcp_work->encryption_status != MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF) { -			if (aconnector->base.state->hdcp_content_type == +		conn_state = aconnector->base.state; + +		if (!conn_state) +			continue; + +		dev = connector->dev; + +		if (!dev) +			continue; + +		drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); +		mutex_lock(&hdcp_work->mutex); + +		if (conn_state->commit) { +			ret = wait_for_completion_interruptible_timeout( +				&conn_state->commit->hw_done, 10 * HZ); +			if (ret == 0) { +				DRM_ERROR( +					"HDCP state unknown! Setting it to DESIRED"); +				hdcp_work->encryption_status[conn_index] = +					MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; +			} +		} +		if (hdcp_work->encryption_status[conn_index] != +			MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF) { +			if (conn_state->hdcp_content_type ==  				DRM_MODE_HDCP_CONTENT_TYPE0 && -			hdcp_work->encryption_status <= -				MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON) -				drm_hdcp_update_content_protection(&aconnector->base, +				hdcp_work->encryption_status[conn_index] <= +				MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON) { + +				DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_ENABLED\n"); +				drm_hdcp_update_content_protection( +					connector,  					DRM_MODE_CONTENT_PROTECTION_ENABLED); -			else if (aconnector->base.state->hdcp_content_type == +			} else if (conn_state->hdcp_content_type ==  					DRM_MODE_HDCP_CONTENT_TYPE1 && -				hdcp_work->encryption_status == -					MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON) -				drm_hdcp_update_content_protection(&aconnector->base, +					hdcp_work->encryption_status[conn_index] == +					MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON) { +				drm_hdcp_update_content_protection( +					connector,  					DRM_MODE_CONTENT_PROTECTION_ENABLED); +			}  		} else { -			drm_hdcp_update_content_protection(&aconnector->base, -				DRM_MODE_CONTENT_PROTECTION_DESIRED); +			DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_DESIRED\n"); +			drm_hdcp_update_content_protection( +				connector, DRM_MODE_CONTENT_PROTECTION_DESIRED); +  		} +		mutex_unlock(&hdcp_work->mutex); +		drm_modeset_unlock(&dev->mode_config.connection_mutex);  	} - -	mutex_unlock(&hdcp_work->mutex); -	drm_modeset_unlock(&dev->mode_config.connection_mutex);  }  static void event_property_validate(struct work_struct *work) @@ -340,19 +378,47 @@ static void event_property_validate(struct work_struct *work)  	struct hdcp_workqueue *hdcp_work =  		container_of(to_delayed_work(work), struct hdcp_workqueue, property_validate_dwork);  	struct mod_hdcp_display_query query; -	struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector; - -	if (!aconnector) -		return; +	struct amdgpu_dm_connector *aconnector; +	unsigned int conn_index;  	mutex_lock(&hdcp_work->mutex); -	query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; -	mod_hdcp_query_display(&hdcp_work->hdcp, aconnector->base.index, &query); +	for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; +	     conn_index++) { +		aconnector = hdcp_work->aconnector[conn_index]; + +		if (!aconnector) +			continue; + +		/* check if display connected */ +		if (aconnector->base.status != connector_status_connected) +			continue; -	if (query.encryption_status != hdcp_work->encryption_status) { -		hdcp_work->encryption_status = query.encryption_status; -		schedule_work(&hdcp_work->property_update_work); +		if (!aconnector->base.state) +			continue; + +		query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; +		mod_hdcp_query_display(&hdcp_work->hdcp, aconnector->base.index, +				       &query); + +		DRM_DEBUG_DRIVER("[HDCP_DM] disp %d, connector->CP %u, (query, work): (%d, %d)\n", +			aconnector->base.index, +			aconnector->base.state->content_protection, +			query.encryption_status, +			hdcp_work->encryption_status[conn_index]); + +		if (query.encryption_status != +		    hdcp_work->encryption_status[conn_index]) { +			DRM_DEBUG_DRIVER("[HDCP_DM] encryption_status change from %x to %x\n", +				hdcp_work->encryption_status[conn_index], query.encryption_status); + +			hdcp_work->encryption_status[conn_index] = +				query.encryption_status; + +			DRM_DEBUG_DRIVER("[HDCP_DM] trigger property_update_work\n"); + +			schedule_work(&hdcp_work->property_update_work); +		}  	}  	mutex_unlock(&hdcp_work->mutex); @@ -495,7 +561,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)  	link->dp.mst_enabled = config->mst_enabled;  	link->dp.usb4_enabled = config->usb4_enabled;  	display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION; -	link->adjust.auth_delay = 3; +	link->adjust.auth_delay = 0;  	link->adjust.hdcp1.disable = 0;  	conn_state = aconnector->base.state; @@ -686,6 +752,13 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct  		hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c;  		hdcp_work[i].hdcp.config.ddc.funcs.write_dpcd = lp_write_dpcd;  		hdcp_work[i].hdcp.config.ddc.funcs.read_dpcd = lp_read_dpcd; + +		memset(hdcp_work[i].aconnector, 0, +		       sizeof(struct amdgpu_dm_connector *) * +			       AMDGPU_DM_MAX_DISPLAY_INDEX); +		memset(hdcp_work[i].encryption_status, 0, +		       sizeof(enum mod_hdcp_encryption_status) * +			       AMDGPU_DM_MAX_DISPLAY_INDEX);  	}  	cp_psp->funcs.update_stream_config = update_config; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h index 09294ff122fe..69b445b011c8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h @@ -43,7 +43,7 @@ struct hdcp_workqueue {  	struct delayed_work callback_dwork;  	struct delayed_work watchdog_timer_dwork;  	struct delayed_work property_validate_dwork; -	struct amdgpu_dm_connector *aconnector; +	struct amdgpu_dm_connector *aconnector[AMDGPU_DM_MAX_DISPLAY_INDEX];  	struct mutex mutex;  	struct mod_hdcp hdcp; @@ -51,7 +51,20 @@ struct hdcp_workqueue {  	struct mod_hdcp_display display;  	struct mod_hdcp_link link; -	enum mod_hdcp_encryption_status encryption_status; +	enum mod_hdcp_encryption_status encryption_status[AMDGPU_DM_MAX_DISPLAY_INDEX]; +	/* when display is unplugged from mst hub, connctor will be +	 * destroyed within dm_dp_mst_connector_destroy. connector +	 * hdcp perperties, like type, undesired, desired, enabled, +	 * will be lost. So, save hdcp properties into hdcp_work within +	 * amdgpu_dm_atomic_commit_tail. if the same display is +	 * plugged back with same display index, its hdcp properties +	 * will be retrieved from hdcp_work within dm_dp_mst_get_modes +	 */ +	/* un-desired, desired, enabled */ +	unsigned int content_protection[AMDGPU_DM_MAX_DISPLAY_INDEX]; +	/* hdcp1.x, hdcp2.x */ +	unsigned int hdcp_content_type[AMDGPU_DM_MAX_DISPLAY_INDEX]; +  	uint8_t max_link;  	uint8_t *srm; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index f0b01c8dc4a6..6fdc2027c2b4 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -38,43 +38,12 @@  #include "amdgpu_dm.h"  #include "amdgpu_dm_irq.h"  #include "amdgpu_dm_mst_types.h" +#include "dpcd_defs.h" +#include "dc/inc/core_types.h"  #include "dm_helpers.h"  #include "ddc_service_types.h" -struct monitor_patch_info { -	unsigned int manufacturer_id; -	unsigned int product_id; -	void (*patch_func)(struct dc_edid_caps *edid_caps, unsigned int param); -	unsigned int patch_param; -}; -static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param); - -static const struct monitor_patch_info monitor_patch_table[] = { -{0x6D1E, 0x5BBF, set_max_dsc_bpp_limit, 15}, -{0x6D1E, 0x5B9A, set_max_dsc_bpp_limit, 15}, -}; - -static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param) -{ -	if (edid_caps) -		edid_caps->panel_patch.max_dsc_target_bpp_limit = param; -} - -static int amdgpu_dm_patch_edid_caps(struct dc_edid_caps *edid_caps) -{ -	int i, ret = 0; - -	for (i = 0; i < ARRAY_SIZE(monitor_patch_table); i++) -		if ((edid_caps->manufacturer_id == monitor_patch_table[i].manufacturer_id) -			&&  (edid_caps->product_id == monitor_patch_table[i].product_id)) { -			monitor_patch_table[i].patch_func(edid_caps, monitor_patch_table[i].patch_param); -			ret++; -		} - -	return ret; -} -  /* dm_helpers_parse_edid_caps   *   * Parse edid caps @@ -149,29 +118,54 @@ enum dc_edid_status dm_helpers_parse_edid_caps(  	kfree(sads);  	kfree(sadb); -	amdgpu_dm_patch_edid_caps(edid_caps); -  	return result;  }  static void -fill_dc_mst_payload_table_from_drm(struct drm_dp_mst_topology_state *mst_state, -				   struct amdgpu_dm_connector *aconnector, +fill_dc_mst_payload_table_from_drm(struct dc_link *link, +				   bool enable, +				   struct drm_dp_mst_atomic_payload *target_payload,  				   struct dc_dp_mst_stream_allocation_table *table)  {  	struct dc_dp_mst_stream_allocation_table new_table = { 0 };  	struct dc_dp_mst_stream_allocation *sa; -	struct drm_dp_mst_atomic_payload *payload; +	struct link_mst_stream_allocation_table copy_of_link_table = +										link->mst_stream_alloc_table; -	/* Fill payload info*/ -	list_for_each_entry(payload, &mst_state->payloads, next) { -		if (payload->delete) -			continue; +	int i; +	int current_hw_table_stream_cnt = copy_of_link_table.stream_count; +	struct link_mst_stream_allocation *dc_alloc; -		sa = &new_table.stream_allocations[new_table.stream_count]; -		sa->slot_count = payload->time_slots; -		sa->vcp_id = payload->vcpi; -		new_table.stream_count++; +	/* TODO: refactor to set link->mst_stream_alloc_table directly if possible.*/ +	if (enable) { +		dc_alloc = +		©_of_link_table.stream_allocations[current_hw_table_stream_cnt]; +		dc_alloc->vcp_id = target_payload->vcpi; +		dc_alloc->slot_count = target_payload->time_slots; +	} else { +		for (i = 0; i < copy_of_link_table.stream_count; i++) { +			dc_alloc = +			©_of_link_table.stream_allocations[i]; + +			if (dc_alloc->vcp_id == target_payload->vcpi) { +				dc_alloc->vcp_id = 0; +				dc_alloc->slot_count = 0; +				break; +			} +		} +		ASSERT(i != copy_of_link_table.stream_count); +	} + +	/* Fill payload info*/ +	for (i = 0; i < MAX_CONTROLLER_NUM; i++) { +		dc_alloc = +			©_of_link_table.stream_allocations[i]; +		if (dc_alloc->vcp_id > 0 && dc_alloc->slot_count > 0) { +			sa = &new_table.stream_allocations[new_table.stream_count]; +			sa->slot_count = dc_alloc->slot_count; +			sa->vcp_id = dc_alloc->vcp_id; +			new_table.stream_count++; +		}  	}  	/* Overwrite the old table */ @@ -203,24 +197,24 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(  	 * that blocks before commit guaranteeing that the state  	 * is not gonna be swapped while still in use in commit tail */ -	if (!aconnector || !aconnector->mst_port) +	if (!aconnector || !aconnector->mst_root)  		return false; -	mst_mgr = &aconnector->mst_port->mst_mgr; +	mst_mgr = &aconnector->mst_root->mst_mgr;  	mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);  	/* It's OK for this to fail */ -	payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->port); +	payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port);  	if (enable)  		drm_dp_add_payload_part1(mst_mgr, mst_state, payload);  	else -		drm_dp_remove_payload(mst_mgr, mst_state, payload); +		drm_dp_remove_payload(mst_mgr, mst_state, payload, payload);  	/* mst_mgr->->payloads are VC payload notify MST branch using DPCD or  	 * AUX message. The sequence is slot 1-63 allocated sequence for each  	 * stream. AMD ASIC stream slot allocation should follow the same  	 * sequence. copy DRM MST allocation to dc */ -	fill_dc_mst_payload_table_from_drm(mst_state, aconnector, proposed_table); +	fill_dc_mst_payload_table_from_drm(stream->link, enable, payload, proposed_table);  	return true;  } @@ -255,10 +249,10 @@ enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger(  	aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; -	if (!aconnector || !aconnector->mst_port) +	if (!aconnector || !aconnector->mst_root)  		return ACT_FAILED; -	mst_mgr = &aconnector->mst_port->mst_mgr; +	mst_mgr = &aconnector->mst_root->mst_mgr;  	if (!mst_mgr->mst_state)  		return ACT_FAILED; @@ -282,22 +276,27 @@ bool dm_helpers_dp_mst_send_payload_allocation(  	struct drm_dp_mst_atomic_payload *payload;  	enum mst_progress_status set_flag = MST_ALLOCATE_NEW_PAYLOAD;  	enum mst_progress_status clr_flag = MST_CLEAR_ALLOCATED_PAYLOAD; +	int ret = 0;  	aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; -	if (!aconnector || !aconnector->mst_port) +	if (!aconnector || !aconnector->mst_root)  		return false; -	mst_mgr = &aconnector->mst_port->mst_mgr; +	mst_mgr = &aconnector->mst_root->mst_mgr;  	mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); -	payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->port); +	payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port); +  	if (!enable) {  		set_flag = MST_CLEAR_ALLOCATED_PAYLOAD;  		clr_flag = MST_ALLOCATE_NEW_PAYLOAD;  	} -	if (enable && drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, payload)) { +	if (enable) +		ret = drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, payload); + +	if (ret) {  		amdgpu_dm_set_mst_status(&aconnector->mst_status,  			set_flag, false);  	} else { @@ -404,6 +403,7 @@ bool dm_helpers_dp_mst_start_top_mgr(  		bool boot)  {  	struct amdgpu_dm_connector *aconnector = link->priv; +	int ret;  	if (!aconnector) {  		DRM_ERROR("Failed to find connector for link!"); @@ -419,7 +419,16 @@ bool dm_helpers_dp_mst_start_top_mgr(  	DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",  			aconnector, aconnector->base.base.id); -	return (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) == 0); +	ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); +	if (ret < 0) { +		DRM_ERROR("DM_MST: Failed to set the device into MST mode!"); +		return false; +	} + +	DRM_INFO("DM_MST: DP%x, %d-lane link detected\n", aconnector->mst_mgr.dpcd[0], +		aconnector->mst_mgr.dpcd[2] & DP_MAX_LANE_COUNT_MASK); + +	return true;  }  bool dm_helpers_dp_mst_stop_top_mgr( @@ -718,7 +727,7 @@ bool dm_helpers_dp_write_dsc_enable(  				aconnector->dsc_aux, stream, enable_dsc);  #endif -		port = aconnector->port; +		port = aconnector->mst_output_port;  		if (enable) {  			if (port->passthrough_aux) { @@ -852,9 +861,8 @@ int dm_helper_dmub_aux_transfer_sync(  		struct aux_payload *payload,  		enum aux_return_code_type *operation_result)  { -	return amdgpu_dm_process_dmub_aux_transfer_sync(true, ctx, -			link->link_index, (void *)payload, -			(void *)operation_result); +	return amdgpu_dm_process_dmub_aux_transfer_sync(ctx, link->link_index, payload, +			operation_result);  }  int dm_helpers_dmub_set_config_sync(struct dc_context *ctx, @@ -862,9 +870,8 @@ int dm_helpers_dmub_set_config_sync(struct dc_context *ctx,  		struct set_config_cmd_payload *payload,  		enum set_config_status *operation_result)  { -	return amdgpu_dm_process_dmub_aux_transfer_sync(false, ctx, -			link->link_index, (void *)payload, -			(void *)operation_result); +	return amdgpu_dm_process_dmub_set_config_sync(ctx, link->link_index, payload, +			operation_result);  }  void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks) @@ -997,6 +1004,128 @@ void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream)  					 sizeof(new_downspread));  } +bool dm_helpers_dp_handle_test_pattern_request( +		struct dc_context *ctx, +		const struct dc_link *link, +		union link_test_pattern dpcd_test_pattern, +		union test_misc dpcd_test_params) +{ +	enum dp_test_pattern test_pattern; +	enum dp_test_pattern_color_space test_pattern_color_space = +			DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED; +	enum dc_color_depth requestColorDepth = COLOR_DEPTH_UNDEFINED; +	enum dc_pixel_encoding requestPixelEncoding = PIXEL_ENCODING_UNDEFINED; +	struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; +	struct pipe_ctx *pipe_ctx = NULL; +	struct amdgpu_dm_connector *aconnector = link->priv; +	int i; + +	for (i = 0; i < MAX_PIPES; i++) { +		if (pipes[i].stream == NULL) +			continue; + +		if (pipes[i].stream->link == link && !pipes[i].top_pipe && +			!pipes[i].prev_odm_pipe) { +			pipe_ctx = &pipes[i]; +			break; +		} +	} + +	if (pipe_ctx == NULL) +		return false; + +	switch (dpcd_test_pattern.bits.PATTERN) { +	case LINK_TEST_PATTERN_COLOR_RAMP: +		test_pattern = DP_TEST_PATTERN_COLOR_RAMP; +	break; +	case LINK_TEST_PATTERN_VERTICAL_BARS: +		test_pattern = DP_TEST_PATTERN_VERTICAL_BARS; +	break; /* black and white */ +	case LINK_TEST_PATTERN_COLOR_SQUARES: +		test_pattern = (dpcd_test_params.bits.DYN_RANGE == +				TEST_DYN_RANGE_VESA ? +				DP_TEST_PATTERN_COLOR_SQUARES : +				DP_TEST_PATTERN_COLOR_SQUARES_CEA); +	break; +	default: +		test_pattern = DP_TEST_PATTERN_VIDEO_MODE; +	break; +	} + +	if (dpcd_test_params.bits.CLR_FORMAT == 0) +		test_pattern_color_space = DP_TEST_PATTERN_COLOR_SPACE_RGB; +	else +		test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ? +				DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 : +				DP_TEST_PATTERN_COLOR_SPACE_YCBCR601; + +	switch (dpcd_test_params.bits.BPC) { +	case 0: // 6 bits +		requestColorDepth = COLOR_DEPTH_666; +		break; +	case 1: // 8 bits +		requestColorDepth = COLOR_DEPTH_888; +		break; +	case 2: // 10 bits +		requestColorDepth = COLOR_DEPTH_101010; +		break; +	case 3: // 12 bits +		requestColorDepth = COLOR_DEPTH_121212; +		break; +	default: +		break; +	} + +	switch (dpcd_test_params.bits.CLR_FORMAT) { +	case 0: +		requestPixelEncoding = PIXEL_ENCODING_RGB; +		break; +	case 1: +		requestPixelEncoding = PIXEL_ENCODING_YCBCR422; +		break; +	case 2: +		requestPixelEncoding = PIXEL_ENCODING_YCBCR444; +		break; +	default: +		requestPixelEncoding = PIXEL_ENCODING_RGB; +		break; +	} + +	if ((requestColorDepth != COLOR_DEPTH_UNDEFINED +		&& pipe_ctx->stream->timing.display_color_depth != requestColorDepth) +		|| (requestPixelEncoding != PIXEL_ENCODING_UNDEFINED +		&& pipe_ctx->stream->timing.pixel_encoding != requestPixelEncoding)) { +		DC_LOG_DEBUG("%s: original bpc %d pix encoding %d, changing to %d  %d\n", +				__func__, +				pipe_ctx->stream->timing.display_color_depth, +				pipe_ctx->stream->timing.pixel_encoding, +				requestColorDepth, +				requestPixelEncoding); +		pipe_ctx->stream->timing.display_color_depth = requestColorDepth; +		pipe_ctx->stream->timing.pixel_encoding = requestPixelEncoding; + +		dc_link_update_dsc_config(pipe_ctx); + +		aconnector->timing_changed = true; +		/* store current timing */ +		if (aconnector->timing_requested) +			*aconnector->timing_requested = pipe_ctx->stream->timing; +		else +			DC_LOG_ERROR("%s: timing storage failed\n", __func__); + +	} + +	dc_link_dp_set_test_pattern( +		(struct dc_link *) link, +		test_pattern, +		test_pattern_color_space, +		NULL, +		NULL, +		0); + +	return false; +} +  void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz)  {         // TODO @@ -1006,3 +1135,44 @@ void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable)  {  	/* TODO: add periodic detection implementation */  } + +void dm_helpers_dp_mst_update_branch_bandwidth( +		struct dc_context *ctx, +		struct dc_link *link) +{ +	// TODO +} + +static bool dm_is_freesync_pcon_whitelist(const uint32_t branch_dev_id) +{ +	bool ret_val = false; + +	switch (branch_dev_id) { +	case DP_BRANCH_DEVICE_ID_0060AD: +		ret_val = true; +		break; +	default: +		break; +	} + +	return ret_val; +} + +enum adaptive_sync_type dm_get_adaptive_sync_support_type(struct dc_link *link) +{ +	struct dpcd_caps *dpcd_caps = &link->dpcd_caps; +	enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE; + +	switch (dpcd_caps->dongle_type) { +	case DISPLAY_DONGLE_DP_HDMI_CONVERTER: +		if (dpcd_caps->adaptive_sync_caps.dp_adap_sync_caps.bits.ADAPTIVE_SYNC_SDP_SUPPORT == true && +			dpcd_caps->allow_invalid_MSA_timing_param == true && +			dm_is_freesync_pcon_whitelist(dpcd_caps->branch_dev_id)) +			as_type = FREESYNC_TYPE_PCON_IN_WHITELIST; +		break; +	default: +		break; +	} + +	return as_type; +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h index 79b5f9999fec..5c9303241aeb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h @@ -39,7 +39,7 @@ struct dm_irq_params {  #ifdef CONFIG_DEBUG_FS  	enum amdgpu_dm_pipe_crc_source crc_src;  #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY -	struct crc_window_parm crc_window; +	struct crc_window_param window_param;  #endif  #endif  }; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 6483ba266893..e25e1b2bf194 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -32,15 +32,16 @@  #include "amdgpu_dm.h"  #include "amdgpu_dm_mst_types.h" +#ifdef CONFIG_DRM_AMD_DC_HDCP +#include "amdgpu_dm_hdcp.h" +#endif +  #include "dc.h"  #include "dm_helpers.h" -#include "dc_link_ddc.h" -#include "dc_link_dp.h"  #include "ddc_service_types.h"  #include "dpcd_defs.h" -#include "i2caux_interface.h"  #include "dmub_cmd.h"  #if defined(CONFIG_DEBUG_FS)  #include "amdgpu_dm_debugfs.h" @@ -49,7 +50,7 @@  #include "dc/dcn20/dcn20_resource.h"  bool is_timing_changed(struct dc_stream_state *cur_stream,  		       struct dc_stream_state *new_stream); - +#define PEAK_FACTOR_X1000 1006  static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,  				  struct drm_dp_aux_msg *msg) @@ -132,7 +133,7 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector)  	kfree(aconnector->edid);  	drm_connector_cleanup(connector); -	drm_dp_mst_put_port_malloc(aconnector->port); +	drm_dp_mst_put_port_malloc(aconnector->mst_output_port);  	kfree(aconnector);  } @@ -144,7 +145,7 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)  	int r;  	r = drm_dp_mst_connector_late_register(connector, -					       amdgpu_dm_connector->port); +					       amdgpu_dm_connector->mst_output_port);  	if (r < 0)  		return r; @@ -160,8 +161,8 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)  {  	struct amdgpu_dm_connector *aconnector =  		to_amdgpu_dm_connector(connector); -	struct drm_dp_mst_port *port = aconnector->port; -	struct amdgpu_dm_connector *root = aconnector->mst_port; +	struct drm_dp_mst_port *port = aconnector->mst_output_port; +	struct amdgpu_dm_connector *root = aconnector->mst_root;  	struct dc_link *dc_link = aconnector->dc_link;  	struct dc_sink *dc_sink = aconnector->dc_sink; @@ -176,6 +177,9 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)  		if (dc_link->sink_count)  			dc_link_remove_remote_sink(dc_link, dc_sink); +		DC_LOG_MST("DM_MST: remove remote sink 0x%p, %d remaining\n", +			dc_sink, dc_link->sink_count); +  		dc_sink_release(dc_sink);  		aconnector->dc_sink = NULL;  		aconnector->edid = NULL; @@ -211,7 +215,7 @@ bool needs_dsc_aux_workaround(struct dc_link *link)  static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)  {  	struct dc_sink *dc_sink = aconnector->dc_sink; -	struct drm_dp_mst_port *port = aconnector->port; +	struct drm_dp_mst_port *port = aconnector->mst_output_port;  	u8 dsc_caps[16] = { 0 };  	u8 dsc_branch_dec_caps_raw[3] = { 0 };	// DSC branch decoder caps 0xA0 ~ 0xA2  	u8 *dsc_branch_dec_caps = NULL; @@ -229,7 +233,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto  	 */  	if (!aconnector->dsc_aux && !port->parent->port_parent &&  	    needs_dsc_aux_workaround(aconnector->dc_link)) -		aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux; +		aconnector->dsc_aux = &aconnector->mst_root->dm_dp_aux.aux;  	if (!aconnector->dsc_aux)  		return false; @@ -279,7 +283,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)  	if (!aconnector->edid) {  		struct edid *edid; -		edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port); +		edid = drm_dp_mst_get_edid(connector, &aconnector->mst_root->mst_mgr, aconnector->mst_output_port);  		if (!edid) {  			amdgpu_dm_set_mst_status(&aconnector->mst_status, @@ -307,6 +311,9 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)  					return 0;  				} +				DC_LOG_MST("DM_MST: add remote sink 0x%p, %d remaining\n", +					dc_sink, aconnector->dc_link->sink_count); +  				dc_sink->priv = aconnector;  				aconnector->dc_sink = dc_sink;  			} @@ -340,10 +347,35 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)  			return 0;  		} +		DC_LOG_MST("DM_MST: add remote sink 0x%p, %d remaining\n", +			dc_sink, aconnector->dc_link->sink_count); +  		dc_sink->priv = aconnector;  		/* dc_link_add_remote_sink returns a new reference */  		aconnector->dc_sink = dc_sink; +		/* when display is unplugged from mst hub, connctor will be +		 * destroyed within dm_dp_mst_connector_destroy. connector +		 * hdcp perperties, like type, undesired, desired, enabled, +		 * will be lost. So, save hdcp properties into hdcp_work within +		 * amdgpu_dm_atomic_commit_tail. if the same display is +		 * plugged back with same display index, its hdcp properties +		 * will be retrieved from hdcp_work within dm_dp_mst_get_modes +		 */ +#ifdef CONFIG_DRM_AMD_DC_HDCP +		if (aconnector->dc_sink && connector->state) { +			struct drm_device *dev = connector->dev; +			struct amdgpu_device *adev = drm_to_adev(dev); +			struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue; +			struct hdcp_workqueue *hdcp_w = &hdcp_work[aconnector->dc_link->link_index]; + +			connector->state->hdcp_content_type = +			hdcp_w->hdcp_content_type[connector->index]; +			connector->state->content_protection = +			hdcp_w->content_protection[connector->index]; +		} +#endif +  		if (aconnector->dc_sink) {  			amdgpu_dm_update_freesync_caps(  					connector, aconnector->edid); @@ -386,15 +418,15 @@ dm_dp_mst_detect(struct drm_connector *connector,  		 struct drm_modeset_acquire_ctx *ctx, bool force)  {  	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); -	struct amdgpu_dm_connector *master = aconnector->mst_port; -	struct drm_dp_mst_port *port = aconnector->port; +	struct amdgpu_dm_connector *master = aconnector->mst_root; +	struct drm_dp_mst_port *port = aconnector->mst_output_port;  	int connection_status;  	if (drm_connector_is_unregistered(connector))  		return connector_status_disconnected;  	connection_status = drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr, -							aconnector->port); +							aconnector->mst_output_port);  	if (port->pdt != DP_PEER_DEVICE_NONE && !port->dpcd_rev) {  		uint8_t dpcd_rev; @@ -435,6 +467,9 @@ dm_dp_mst_detect(struct drm_connector *connector,  		if (aconnector->dc_link->sink_count)  			dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink); +		DC_LOG_MST("DM_MST: remove remote sink 0x%p, %d remaining\n", +			aconnector->dc_link, aconnector->dc_link->sink_count); +  		dc_sink_release(aconnector->dc_sink);  		aconnector->dc_sink = NULL;  		aconnector->edid = NULL; @@ -451,8 +486,8 @@ static int dm_dp_mst_atomic_check(struct drm_connector *connector,  				  struct drm_atomic_state *state)  {  	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); -	struct drm_dp_mst_topology_mgr *mst_mgr = &aconnector->mst_port->mst_mgr; -	struct drm_dp_mst_port *mst_port = aconnector->port; +	struct drm_dp_mst_topology_mgr *mst_mgr = &aconnector->mst_root->mst_mgr; +	struct drm_dp_mst_port *mst_port = aconnector->mst_output_port;  	return drm_dp_atomic_release_time_slots(state, mst_mgr, mst_port);  } @@ -468,7 +503,6 @@ static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs  static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)  {  	drm_encoder_cleanup(encoder); -	kfree(encoder);  }  static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { @@ -515,8 +549,8 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,  		return NULL;  	connector = &aconnector->base; -	aconnector->port = port; -	aconnector->mst_port = master; +	aconnector->mst_output_port = port; +	aconnector->mst_root = master;  	amdgpu_dm_set_mst_status(&aconnector->mst_status,  			MST_PROBE, true); @@ -642,15 +676,18 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p  		int count,  		int k)  { +	struct drm_connector *drm_connector;  	int i;  	for (i = 0; i < count; i++) { +		drm_connector = ¶ms[i].aconnector->base; +  		memset(¶ms[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg));  		if (vars[i + k].dsc_enabled && dc_dsc_compute_config(  					params[i].sink->ctx->dc->res_pool->dscs[0],  					¶ms[i].sink->dsc_caps.dsc_dec_caps,  					params[i].sink->ctx->dc->debug.dsc_min_slice_height_override, -					params[i].sink->edid_caps.panel_patch.max_dsc_target_bpp_limit, +					drm_connector->display_info.max_dsc_bpp,  					0,  					params[i].timing,  					¶ms[i].timing->dsc_cfg)) { @@ -692,12 +729,16 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)  	struct dc_dsc_config dsc_config;  	u64 kbps; +	struct drm_connector *drm_connector = ¶m.aconnector->base; +	uint32_t max_dsc_target_bpp_limit_override = +		drm_connector->display_info.max_dsc_bpp; +  	kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);  	dc_dsc_compute_config(  			param.sink->ctx->dc->res_pool->dscs[0],  			¶m.sink->dsc_caps.dsc_dec_caps,  			param.sink->ctx->dc->debug.dsc_min_slice_height_override, -			param.sink->edid_caps.panel_patch.max_dsc_target_bpp_limit, +			max_dsc_target_bpp_limit_override,  			(int) kbps, param.timing, &dsc_config);  	return dsc_config.bits_per_pixel; @@ -897,11 +938,6 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,  	if (IS_ERR(mst_state))  		return PTR_ERR(mst_state); -	mst_state->pbn_div = dm_mst_get_pbn_divider(dc_link); -#if defined(CONFIG_DRM_AMD_DC_DCN) -	drm_dp_mst_update_slots(mst_state, dc_link_dp_mst_decide_link_encoding_format(dc_link)); -#endif -  	/* Set up params */  	for (i = 0; i < dc_state->stream_count; i++) {  		struct dc_dsc_policy dsc_policy = {0}; @@ -915,7 +951,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,  		if (!aconnector)  			continue; -		if (!aconnector->port) +		if (!aconnector->mst_output_port)  			continue;  		stream->timing.flags.DSC = 0; @@ -923,7 +959,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,  		params[count].timing = &stream->timing;  		params[count].sink = stream->sink;  		params[count].aconnector = aconnector; -		params[count].port = aconnector->port; +		params[count].port = aconnector->mst_output_port;  		params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable;  		if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE)  			debugfs_overwrite = true; @@ -1132,7 +1168,7 @@ int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,  		aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; -		if (!aconnector || !aconnector->dc_sink || !aconnector->port) +		if (!aconnector || !aconnector->dc_sink || !aconnector->mst_output_port)  			continue;  		if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported) @@ -1147,7 +1183,7 @@ int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,  		if (!is_dsc_need_re_compute(state, dc_state, stream->link))  			continue; -		mst_mgr = aconnector->port->mgr; +		mst_mgr = aconnector->mst_output_port->mgr;  		ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr,  						       &link_vars_start_index);  		if (ret != 0) @@ -1193,7 +1229,7 @@ static int pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,  		aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; -		if (!aconnector || !aconnector->dc_sink || !aconnector->port) +		if (!aconnector || !aconnector->dc_sink || !aconnector->mst_output_port)  			continue;  		if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported) @@ -1205,7 +1241,7 @@ static int pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,  		if (!is_dsc_need_re_compute(state, dc_state, stream->link))  			continue; -		mst_mgr = aconnector->port->mgr; +		mst_mgr = aconnector->mst_output_port->mgr;  		ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr,  						       &link_vars_start_index);  		if (ret != 0) @@ -1420,8 +1456,8 @@ enum dc_status dm_dp_mst_is_port_support_mode(  	 * with DSC enabled.  	 */  	if (is_dsc_common_config_possible(stream, &bw_range) && -	    aconnector->port->passthrough_aux) { -		mst_mgr = aconnector->port->mgr; +	    aconnector->mst_output_port->passthrough_aux) { +		mst_mgr = aconnector->mst_output_port->mgr;  		mutex_lock(&mst_mgr->lock);  		cur_link_settings = stream->link->verified_link_cap; @@ -1429,7 +1465,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(  		upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,  							       &cur_link_settings  							       ); -		down_link_bw_in_kbps = kbps_from_pbn(aconnector->port->full_pbn); +		down_link_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn);  		/* pick the bottleneck */  		end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps, @@ -1453,7 +1489,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(  		bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3;  		pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp, false); -		if (pbn > aconnector->port->full_pbn) +		if (pbn > aconnector->mst_output_port->full_pbn)  			return DC_FAIL_BANDWIDTH_VALIDATE;  #if defined(CONFIG_DRM_AMD_DC_DCN)  	} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c index e6854f7270a6..28fb1f02591a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c @@ -67,7 +67,16 @@ static const uint32_t overlay_formats[] = {  	DRM_FORMAT_RGBA8888,  	DRM_FORMAT_XBGR8888,  	DRM_FORMAT_ABGR8888, -	DRM_FORMAT_RGB565 +	DRM_FORMAT_RGB565, +	DRM_FORMAT_NV21, +	DRM_FORMAT_NV12, +	DRM_FORMAT_P010 +}; + +static const uint32_t video_formats[] = { +	DRM_FORMAT_NV21, +	DRM_FORMAT_NV12, +	DRM_FORMAT_P010  };  static const u32 cursor_formats[] = { @@ -1600,6 +1609,10 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,  		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,  						   supported_rotations); +	if (dm->adev->ip_versions[DCE_HWIP][0] > IP_VERSION(3, 0, 1) && +	    plane->type != DRM_PLANE_TYPE_CURSOR) +		drm_plane_enable_fb_damage_clips(plane); +  	drm_plane_helper_add(plane, &dm_plane_helper_funcs);  #ifdef CONFIG_DRM_AMD_DC_HDR @@ -1612,3 +1625,14 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,  	return 0;  } +bool is_video_format(uint32_t format) +{ +	int i; + +	for (i = 0; i < ARRAY_SIZE(video_formats); i++) +		if (format == video_formats[i]) +			return true; + +	return false; +} + diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h index 286981a2dd40..a4bee8528a51 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h @@ -62,4 +62,5 @@ void fill_blending_from_plane_state(const struct drm_plane_state *plane_state,  				    bool *per_pixel_alpha, bool *pre_multiplied_alpha,  				    bool *global_alpha, int *global_alpha_value); +bool is_video_format(uint32_t format);  #endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c index 26291db0a3cf..d647f68fd563 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c @@ -122,6 +122,9 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)  		psr_config.allow_multi_disp_optimizations =  			(amdgpu_dc_feature_mask & DC_PSR_ALLOW_MULTI_DISP_OPT); +		if (!psr_su_set_dsc_slice_height(dc, link, stream, &psr_config)) +			return false; +  		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);  	} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h index d3bc9dc21771..0f580ea37576 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h @@ -37,6 +37,7 @@  #include <drm/drm_framebuffer.h>  #include <drm/drm_encoder.h>  #include <drm/drm_atomic.h> +#include "dcn10/dcn10_optc.h"  #include "dc/inc/core_types.h" @@ -662,6 +663,69 @@ TRACE_EVENT(dcn_fpu,  	    )  ); +TRACE_EVENT(dcn_optc_lock_unlock_state, +	    TP_PROTO(const struct optc *optc_state, int instance, bool lock, const char *function, const int line), +	    TP_ARGS(optc_state, instance, lock, function, line), + +	    TP_STRUCT__entry( +			     __field(const char *, function) +			     __field(int, instance) +			     __field(bool, lock) +			     __field(int, line) +			     __field(int, opp_count) +			     __field(int, max_h_total) +			     __field(int, max_v_total) +			     __field(int, min_h_blank) +			     __field(int, min_h_sync_width) +			     __field(int, min_v_sync_width) +			     __field(int, min_v_blank) +			     __field(int, min_v_blank_interlace) +			     __field(int, vstartup_start) +			     __field(int, vupdate_offset) +			     __field(int, vupdate_width) +			     __field(int, vready_offset) +	    ), +	    TP_fast_assign( +			   __entry->function = function; +			   __entry->instance = instance; +			   __entry->lock = lock; +			   __entry->line = line; +			   __entry->opp_count = optc_state->opp_count; +			   __entry->max_h_total = optc_state->max_h_total; +			   __entry->max_v_total = optc_state->max_v_total; +			   __entry->min_h_blank = optc_state->min_h_blank; +			   __entry->min_h_sync_width = optc_state->min_h_sync_width; +			   __entry->min_v_sync_width = optc_state->min_v_sync_width; +			   __entry->min_v_blank = optc_state->min_v_blank; +			   __entry->min_v_blank_interlace = optc_state->min_v_blank_interlace; +			   __entry->vstartup_start = optc_state->vstartup_start; +			   __entry->vupdate_offset = optc_state->vupdate_offset; +			   __entry->vupdate_width = optc_state->vupdate_width; +			   __entry->vready_offset = optc_state->vupdate_offset; +	    ), +	    TP_printk("%s: %s()+%d: optc_instance=%d opp_count=%d max_h_total=%d max_v_total=%d " +		      "min_h_blank=%d min_h_sync_width=%d min_v_sync_width=%d min_v_blank=%d " +		      "min_v_blank_interlace=%d vstartup_start=%d vupdate_offset=%d vupdate_width=%d " +		      "vready_offset=%d", +		      __entry->lock ? "Lock" : "Unlock", +		      __entry->function, +		      __entry->line, +		      __entry->instance, +		      __entry->opp_count, +		      __entry->max_h_total, +		      __entry->max_v_total, +		      __entry->min_h_blank, +		      __entry->min_h_sync_width, +		      __entry->min_v_sync_width, +		      __entry->min_v_blank, +		      __entry->min_v_blank_interlace, +		      __entry->vstartup_start, +		      __entry->vupdate_offset, +		      __entry->vupdate_width, +		      __entry->vready_offset +	    ) +); +  #endif /* _AMDGPU_DM_TRACE_H_ */  #undef TRACE_INCLUDE_PATH diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c index ab0c6d191038..1743ca0a3641 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c @@ -31,6 +31,8 @@  #elif defined(CONFIG_PPC64)  #include <asm/switch_to.h>  #include <asm/cputable.h> +#elif defined(CONFIG_ARM64) +#include <asm/neon.h>  #endif  /** @@ -99,6 +101,8 @@ void dc_fpu_begin(const char *function_name, const int line)  			preempt_disable();  			enable_kernel_fp();  		} +#elif defined(CONFIG_ARM64) +		kernel_neon_begin();  #endif  	} @@ -136,6 +140,8 @@ void dc_fpu_end(const char *function_name, const int line)  			disable_kernel_fp();  			preempt_enable();  		} +#elif defined(CONFIG_ARM64) +		kernel_neon_end();  #endif  	} diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index b9effadfc4bb..94f156d57220 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile @@ -64,9 +64,8 @@ AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LI  include $(AMD_DC) -DISPLAY_CORE = dc.o  dc_stat.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \ -dc_surface.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o \ -dc_link_enc_cfg.o dc_link_dpia.o dc_link_dpcd.o +DISPLAY_CORE = dc.o dc_stat.o dc_resource.o dc_hw_sequencer.o dc_sink.o \ +dc_surface.o dc_debug.o dc_stream.o dc_link_enc_cfg.o dc_link_exports.o  DISPLAY_CORE += dc_vm_helper.o diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index 9b8ea6e9a2b9..27af9d3c2b73 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -33,7 +33,6 @@  #include "include/gpio_service_interface.h"  #include "include/grph_object_ctrl_defs.h"  #include "include/bios_parser_interface.h" -#include "include/i2caux_interface.h"  #include "include/logger_interface.h"  #include "command_table.h" @@ -138,7 +137,9 @@ static uint8_t get_number_of_objects(struct bios_parser *bp, uint32_t offset)  	uint32_t object_table_offset = bp->object_info_tbl_offset + offset; -	table = GET_IMAGE(ATOM_OBJECT_TABLE, object_table_offset); +	table = ((ATOM_OBJECT_TABLE *) bios_get_image(&bp->base, +				object_table_offset, +				struct_size(table, asObjects, 1)));  	if (!table)  		return 0; @@ -166,8 +167,9 @@ static struct graphics_object_id bios_parser_get_connector_id(  	uint32_t connector_table_offset = bp->object_info_tbl_offset  		+ le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset); -	ATOM_OBJECT_TABLE *tbl = -		GET_IMAGE(ATOM_OBJECT_TABLE, connector_table_offset); +	ATOM_OBJECT_TABLE *tbl = ((ATOM_OBJECT_TABLE *) bios_get_image(&bp->base, +				connector_table_offset, +				struct_size(tbl, asObjects, 1)));  	if (!tbl) {  		dm_error("Can't get connector table from atom bios.\n"); @@ -662,8 +664,9 @@ static enum bp_result get_ss_info_v3_1(  	if (!DATA_TABLES(ASIC_InternalSS_Info))  		return BP_RESULT_UNSUPPORTED; -	ss_table_header_include = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V3, -		DATA_TABLES(ASIC_InternalSS_Info)); +	ss_table_header_include = ((ATOM_ASIC_INTERNAL_SS_INFO_V3 *) bios_get_image(&bp->base, +				DATA_TABLES(ASIC_InternalSS_Info), +				struct_size(ss_table_header_include, asSpreadSpectrum, 1)));  	table_size =  		(le16_to_cpu(ss_table_header_include->sHeader.usStructureSize)  				- sizeof(ATOM_COMMON_TABLE_HEADER)) @@ -1029,8 +1032,10 @@ static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1(  	if (!DATA_TABLES(ASIC_InternalSS_Info))  		return result; -	header = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V2, -		DATA_TABLES(ASIC_InternalSS_Info)); +	header = ((ATOM_ASIC_INTERNAL_SS_INFO_V2 *) bios_get_image( +				&bp->base, +				DATA_TABLES(ASIC_InternalSS_Info), +				struct_size(header, asSpreadSpectrum, 1)));  	memset(info, 0, sizeof(struct spread_spectrum_info)); @@ -1709,8 +1714,10 @@ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1(  	if (!DATA_TABLES(ASIC_InternalSS_Info))  		return 0; -	header_include = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V2, -			DATA_TABLES(ASIC_InternalSS_Info)); +	header_include = ((ATOM_ASIC_INTERNAL_SS_INFO_V2 *) bios_get_image( +				&bp->base, +				DATA_TABLES(ASIC_InternalSS_Info), +				struct_size(header_include, asSpreadSpectrum, 1)));  	size = (le16_to_cpu(header_include->sHeader.usStructureSize)  			- sizeof(ATOM_COMMON_TABLE_HEADER)) @@ -1746,8 +1753,9 @@ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_V3_1(  	if (!DATA_TABLES(ASIC_InternalSS_Info))  		return number; -	header_include = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V3, -			DATA_TABLES(ASIC_InternalSS_Info)); +	header_include = ((ATOM_ASIC_INTERNAL_SS_INFO_V3 *) bios_get_image(&bp->base, +				DATA_TABLES(ASIC_InternalSS_Info), +				struct_size(header_include, asSpreadSpectrum, 1)));  	size = (le16_to_cpu(header_include->sHeader.usStructureSize) -  			sizeof(ATOM_COMMON_TABLE_HEADER)) /  					sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3); @@ -1789,11 +1797,13 @@ static enum bp_result bios_parser_get_gpio_pin_info(  	if (!DATA_TABLES(GPIO_Pin_LUT))  		return BP_RESULT_BADBIOSTABLE; -	header = GET_IMAGE(ATOM_GPIO_PIN_LUT, DATA_TABLES(GPIO_Pin_LUT)); +	header = ((ATOM_GPIO_PIN_LUT *) bios_get_image(&bp->base, +				DATA_TABLES(GPIO_Pin_LUT), +				struct_size(header, asGPIO_Pin, 1)));  	if (!header)  		return BP_RESULT_BADBIOSTABLE; -	if (sizeof(ATOM_COMMON_TABLE_HEADER) + sizeof(ATOM_GPIO_PIN_LUT) +	if (sizeof(ATOM_COMMON_TABLE_HEADER) + struct_size(header, asGPIO_Pin, 1)  			> le16_to_cpu(header->sHeader.usStructureSize))  		return BP_RESULT_BADBIOSTABLE; @@ -1978,7 +1988,8 @@ static ATOM_OBJECT *get_bios_object(struct bios_parser *bp,  	offset += bp->object_info_tbl_offset; -	tbl = GET_IMAGE(ATOM_OBJECT_TABLE, offset); +	tbl = ((ATOM_OBJECT_TABLE *) bios_get_image(&bp->base, offset, +				struct_size(tbl, asObjects, 1)));  	if (!tbl)  		return NULL; @@ -2600,8 +2611,7 @@ static enum bp_result update_slot_layout_info(  	for (;;) { -		record_header = (ATOM_COMMON_RECORD_HEADER *) -			GET_IMAGE(ATOM_COMMON_RECORD_HEADER, record_offset); +		record_header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, record_offset);  		if (record_header == NULL) {  			result = BP_RESULT_BADBIOSTABLE;  			break; @@ -2615,7 +2625,7 @@ static enum bp_result update_slot_layout_info(  		if (record_header->ucRecordType ==  			ATOM_BRACKET_LAYOUT_RECORD_TYPE && -			sizeof(ATOM_BRACKET_LAYOUT_RECORD) +			struct_size(record, asConnInfo, 1)  			<= record_header->ucRecordSize) {  			record = (ATOM_BRACKET_LAYOUT_RECORD *)  				(record_header); @@ -2709,8 +2719,9 @@ static enum bp_result get_bracket_layout_record(  	genericTableOffset = bp->object_info_tbl_offset +  		bp->object_info_tbl.v1_3->usMiscObjectTableOffset; -	object_table = (ATOM_OBJECT_TABLE *) -		GET_IMAGE(ATOM_OBJECT_TABLE, genericTableOffset); +	object_table = ((ATOM_OBJECT_TABLE *) bios_get_image(&bp->base, +				genericTableOffset, +				struct_size(object_table, asObjects, 1)));  	if (!object_table)  		return BP_RESULT_FAILURE; diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index e0c8d6f09bb4..e381de2429fa 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -32,7 +32,6 @@  #include "dc_bios_types.h"  #include "include/grph_object_ctrl_defs.h"  #include "include/bios_parser_interface.h" -#include "include/i2caux_interface.h"  #include "include/logger_interface.h"  #include "command_table2.h" @@ -462,6 +461,7 @@ static enum bp_result get_gpio_i2c_info(  	uint32_t count = 0;  	unsigned int table_index = 0;  	bool find_valid = false; +	struct atom_gpio_pin_assignment *pin;  	if (!info)  		return BP_RESULT_BADINPUT; @@ -489,20 +489,17 @@ static enum bp_result get_gpio_i2c_info(  			- sizeof(struct atom_common_table_header))  				/ sizeof(struct atom_gpio_pin_assignment); +	pin = (struct atom_gpio_pin_assignment *) header->gpio_pin; +  	for (table_index = 0; table_index < count; table_index++) { -		if (((record->i2c_id & I2C_HW_CAP) == ( -		header->gpio_pin[table_index].gpio_id & -						I2C_HW_CAP)) && -		((record->i2c_id & I2C_HW_ENGINE_ID_MASK)  == -		(header->gpio_pin[table_index].gpio_id & -					I2C_HW_ENGINE_ID_MASK)) && -		((record->i2c_id & I2C_HW_LANE_MUX) == -		(header->gpio_pin[table_index].gpio_id & -						I2C_HW_LANE_MUX))) { +		if (((record->i2c_id & I2C_HW_CAP) 				== (pin->gpio_id & I2C_HW_CAP)) && +		    ((record->i2c_id & I2C_HW_ENGINE_ID_MASK)	== (pin->gpio_id & I2C_HW_ENGINE_ID_MASK)) && +		    ((record->i2c_id & I2C_HW_LANE_MUX) 		== (pin->gpio_id & I2C_HW_LANE_MUX))) {  			/* still valid */  			find_valid = true;  			break;  		} +		pin = (struct atom_gpio_pin_assignment *)((uint8_t *)pin + sizeof(struct atom_gpio_pin_assignment));  	}  	/* If we don't find the entry that we are looking for then @@ -1700,14 +1697,15 @@ static enum bp_result bios_parser_enable_disp_power_gating(  static enum bp_result bios_parser_enable_lvtma_control(  	struct dc_bios *dcb,  	uint8_t uc_pwr_on, -	uint8_t panel_instance) +	uint8_t panel_instance, +	uint8_t bypass_panel_control_wait)  {  	struct bios_parser *bp = BP_FROM_DCB(dcb);  	if (!bp->cmd_tbl.enable_lvtma_control)  		return BP_RESULT_FAILURE; -	return bp->cmd_tbl.enable_lvtma_control(bp, uc_pwr_on, panel_instance); +	return bp->cmd_tbl.enable_lvtma_control(bp, uc_pwr_on, panel_instance, bypass_panel_control_wait);  }  static bool bios_parser_is_accelerated_mode( @@ -2931,7 +2929,6 @@ static enum bp_result construct_integrated_info(  	struct atom_common_table_header *header;  	struct atom_data_revision revision; -	struct clock_voltage_caps temp = {0, 0};  	uint32_t i;  	uint32_t j; @@ -3034,14 +3031,8 @@ static enum bp_result construct_integrated_info(  	for (i = 1; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {  		for (j = i; j > 0; --j) {  			if (info->disp_clk_voltage[j].max_supported_clk < -				info->disp_clk_voltage[j-1].max_supported_clk -				) { -				/* swap j and j - 1*/ -				temp = info->disp_clk_voltage[j-1]; -				info->disp_clk_voltage[j-1] = -					info->disp_clk_voltage[j]; -				info->disp_clk_voltage[j] = temp; -			} +			    info->disp_clk_voltage[j-1].max_supported_clk) +				swap(info->disp_clk_voltage[j-1], info->disp_clk_voltage[j]);  		}  	} diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c index f52f7ff7ead4..1ef9e4053bb7 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c @@ -986,7 +986,8 @@ static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp, uint8_t id)  static enum bp_result enable_lvtma_control(  	struct bios_parser *bp,  	uint8_t uc_pwr_on, -	uint8_t panel_instance); +	uint8_t panel_instance, +	uint8_t bypass_panel_control_wait);  static void init_enable_lvtma_control(struct bios_parser *bp)  { @@ -998,7 +999,8 @@ static void init_enable_lvtma_control(struct bios_parser *bp)  static void enable_lvtma_control_dmcub(  	struct dc_dmub_srv *dmcub,  	uint8_t uc_pwr_on, -	uint8_t panel_instance) +	uint8_t panel_instance, +	uint8_t bypass_panel_control_wait)  {  	union dmub_rb_cmd cmd; @@ -1012,6 +1014,8 @@ static void enable_lvtma_control_dmcub(  			uc_pwr_on;  	cmd.lvtma_control.data.panel_inst =  			panel_instance; +	cmd.lvtma_control.data.bypass_panel_control_wait = +			bypass_panel_control_wait;  	dc_dmub_srv_cmd_queue(dmcub, &cmd);  	dc_dmub_srv_cmd_execute(dmcub);  	dc_dmub_srv_wait_idle(dmcub); @@ -1021,7 +1025,8 @@ static void enable_lvtma_control_dmcub(  static enum bp_result enable_lvtma_control(  	struct bios_parser *bp,  	uint8_t uc_pwr_on, -	uint8_t panel_instance) +	uint8_t panel_instance, +	uint8_t bypass_panel_control_wait)  {  	enum bp_result result = BP_RESULT_FAILURE; @@ -1029,7 +1034,8 @@ static enum bp_result enable_lvtma_control(  	    bp->base.ctx->dc->debug.dmub_command_table) {  		enable_lvtma_control_dmcub(bp->base.ctx->dmub_srv,  				uc_pwr_on, -				panel_instance); +				panel_instance, +				bypass_panel_control_wait);  		return BP_RESULT_OK;  	}  	return result; diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h index be060b4b87db..b6d09bf6cf72 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h @@ -96,7 +96,8 @@ struct cmd_tbl {  			struct bios_parser *bp, uint8_t id);  	enum bp_result (*enable_lvtma_control)(struct bios_parser *bp,  			uint8_t uc_pwr_on, -			uint8_t panel_instance); +			uint8_t panel_instance, +			uint8_t bypass_panel_control_wait);  };  void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index f276abb63bcd..69691daf4dbb 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -29,6 +29,7 @@  #include "dc_types.h"  #include "dccg.h"  #include "clk_mgr_internal.h" +#include "link.h"  #include "dce100/dce_clk_mgr.h"  #include "dce110/dce110_clk_mgr.h" diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h index 3e5df27aa96f..1ce19d875358 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h @@ -26,6 +26,8 @@  #ifndef DAL_DC_RN_CLK_MGR_VBIOS_SMU_H_  #define DAL_DC_RN_CLK_MGR_VBIOS_SMU_H_ +enum dcn_pwr_state; +  int rn_vbios_smu_get_smu_version(struct clk_mgr_internal *clk_mgr);  int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz);  int rn_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr); @@ -33,7 +35,7 @@ int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int reque  int rn_vbios_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz);  void rn_vbios_smu_set_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phyclk_khz);  int rn_vbios_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz); -void rn_vbios_smu_set_dcn_low_power_state(struct clk_mgr_internal *clk_mgr, int display_count); +void rn_vbios_smu_set_dcn_low_power_state(struct clk_mgr_internal *clk_mgr, enum dcn_pwr_state);  void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable);  void rn_vbios_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr);  int rn_vbios_smu_is_periodic_retraining_disabled(struct clk_mgr_internal *clk_mgr); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c index 3ce0ee0d012f..694a9d3d92ae 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c @@ -577,8 +577,7 @@ void dcn3_clk_mgr_construct(  void dcn3_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr)  { -	if (clk_mgr->base.bw_params) -		kfree(clk_mgr->base.bw_params); +	kfree(clk_mgr->base.bw_params);  	if (clk_mgr->wm_range_table)  		dm_helpers_free_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_GART, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_smu11_driver_if.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_smu11_driver_if.h index 8ea8ee57b39f..61bb1d86182e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_smu11_driver_if.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_smu11_driver_if.h @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: MIT  // This is a stripped-down version of the smu11_driver_if.h file for the relevant DAL interfaces.  #define SMU11_DRIVER_IF_VERSION 0x40 diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c index c1eaf571407a..f9e2e0c3095e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c @@ -47,6 +47,7 @@  #include "dcn30/dcn30_clk_mgr.h"  #include "dc_dmub_srv.h" +#include "link.h"  #include "logger_types.h"  #undef DC_LOGGER @@ -609,8 +610,10 @@ static void dcn31_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk  	}  	bw_params->vram_type = bios_info->memory_type; -	bw_params->num_channels = bios_info->ma_channel_number; +	bw_params->dram_channel_width_bytes = bios_info->memory_type == 0x22 ? 8 : 4; +	//bw_params->dram_channel_width_bytes = dc->ctx->asic_id.vram_width; +	bw_params->num_channels = bios_info->ma_channel_number ? bios_info->ma_channel_number : 4;  	for (i = 0; i < WM_SET_COUNT; i++) {  		bw_params->wm_table.entries[i].wm_inst = i; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c index 090b2c02aee1..0827c7df2855 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c @@ -333,8 +333,8 @@ void dcn31_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zst  			(support == DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY))  		support = DCN_ZSTATE_SUPPORT_DISALLOW; - -	if (support == DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY) +	if (support == DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY || +	    support == DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY)  		param = 1;  	else  		param = 0; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c index 20a06c04e4a1..89df7244b272 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c @@ -48,7 +48,7 @@  #include "dcn31/dcn31_clk_mgr.h"  #include "dc_dmub_srv.h" -#include "dc_link_dp.h" +#include "link.h"  #include "dcn314_smu.h" diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c index 2db595672a46..0765334f0825 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c @@ -146,6 +146,9 @@ static int dcn314_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,  		if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu &&  		    param == TABLE_WATERMARKS)  			DC_LOG_WARNING("Watermarks table not configured properly by SMU"); +		else if (msg_id == VBIOSSMC_MSG_SetHardMinDcfclkByFreq || +			 msg_id == VBIOSSMC_MSG_SetMinDeepSleepDcfclk) +			DC_LOG_WARNING("DCFCLK_DPM is not enabled by BIOS");  		else  			ASSERT(0);  		REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK); @@ -346,8 +349,6 @@ void dcn314_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zs  	if (!clk_mgr->smu_present)  		return; -	// Arg[15:0] = 8/9/0 for Z8/Z9/disallow -> existing bits -	// Arg[16] = Disallow Z9 -> new bit  	switch (support) {  	case DCN_ZSTATE_SUPPORT_ALLOW: @@ -366,6 +367,16 @@ void dcn314_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zs  		param = (1 << 10);  		break; +	case DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY: +		msg_id = VBIOSSMC_MSG_AllowZstatesEntry; +		param = (1 << 10) | (1 << 8); +		break; + +	case DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY: +		msg_id = VBIOSSMC_MSG_AllowZstatesEntry; +		param = (1 << 8); +		break; +  	default: //DCN_ZSTATE_SUPPORT_UNKNOWN  		msg_id = VBIOSSMC_MSG_AllowZstatesEntry;  		param = 0; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c index 893991a0eb97..a737782b2840 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c @@ -46,7 +46,7 @@  #define DC_LOGGER \  	clk_mgr->base.base.ctx->logger -#include "dc_link_dp.h" +#include "link.h"  #define TO_CLK_MGR_DCN315(clk_mgr)\  	container_of(clk_mgr, struct clk_mgr_dcn315, base) @@ -87,6 +87,16 @@ static int dcn315_get_active_display_cnt_wa(  	return display_count;  } +static bool should_disable_otg(struct pipe_ctx *pipe) +{ +	bool ret = true; + +	if (pipe->stream->link->link_enc && pipe->stream->link->link_enc->funcs->is_dig_enabled && +			pipe->stream->link->link_enc->funcs->is_dig_enabled(pipe->stream->link->link_enc)) +		ret = false; +	return ret; +} +  static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)  {  	struct dc *dc = clk_mgr_base->ctx->dc; @@ -98,12 +108,16 @@ static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state  		if (pipe->top_pipe || pipe->prev_odm_pipe)  			continue;  		if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL || -				     dc_is_virtual_signal(pipe->stream->signal))) { -			if (disable) { -				pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg); -				reset_sync_context_for_pipe(dc, context, i); -			} else -				pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg); +					dc_is_virtual_signal(pipe->stream->signal))) { + +			/* This w/a should not trigger when we have a dig active */ +			if (should_disable_otg(pipe)) { +				if (disable) { +					pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg); +					reset_sync_context_for_pipe(dc, context, i); +				} else +					pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg); +			}  		}  	}  } @@ -458,19 +472,6 @@ static void dcn315_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,  	dcn315_smu_transfer_dpm_table_smu_2_dram(clk_mgr);  } -static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks) -{ -	uint32_t max = 0; -	int i; - -	for (i = 0; i < num_clocks; ++i) { -		if (clocks[i] > max) -			max = clocks[i]; -	} - -	return max; -} -  static void dcn315_clk_mgr_helper_populate_bw_params(  		struct clk_mgr_internal *clk_mgr,  		struct integrated_info *bios_info, @@ -478,29 +479,21 @@ static void dcn315_clk_mgr_helper_populate_bw_params(  {  	int i;  	struct clk_bw_params *bw_params = clk_mgr->base.bw_params; -	uint32_t max_pstate = 0, max_fclk = 0, min_pstate = 0; +	uint32_t max_pstate = clock_table->NumDfPstatesEnabled - 1;  	struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1]; -	/* Find highest fclk pstate */ -	for (i = 0; i < clock_table->NumDfPstatesEnabled; i++) { -		if (clock_table->DfPstateTable[i].FClk > max_fclk) { -			max_fclk = clock_table->DfPstateTable[i].FClk; -			max_pstate = i; -		} -	} -  	/* For 315 we want to base clock table on dcfclk, need at least one entry regardless of pmfw table */  	for (i = 0; i < clock_table->NumDcfClkLevelsEnabled; i++) {  		int j; -		uint32_t min_fclk = clock_table->DfPstateTable[0].FClk; -		for (j = 1; j < clock_table->NumDfPstatesEnabled; j++) { -			if (clock_table->DfPstateTable[j].Voltage <= clock_table->SocVoltage[i] -					&& clock_table->DfPstateTable[j].FClk < min_fclk) { -				min_fclk = clock_table->DfPstateTable[j].FClk; -				min_pstate = j; -			} +		/* DF table is sorted with clocks decreasing */ +		for (j = clock_table->NumDfPstatesEnabled - 2; j >= 0; j--) { +			if (clock_table->DfPstateTable[j].Voltage <= clock_table->SocVoltage[i]) +				max_pstate = j;  		} +		/* Max DCFCLK should match up with max pstate */ +		if (i == clock_table->NumDcfClkLevelsEnabled - 1) +			max_pstate = 0;  		/* First search defaults for the clocks we don't read using closest lower or equal default dcfclk */  		for (j = bw_params->clk_table.num_entries - 1; j > 0; j--) @@ -511,9 +504,9 @@ static void dcn315_clk_mgr_helper_populate_bw_params(  		bw_params->clk_table.entries[i].dtbclk_mhz = bw_params->clk_table.entries[j].dtbclk_mhz;  		/* Now update clocks we do read */ -		bw_params->clk_table.entries[i].fclk_mhz = min_fclk; -		bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[min_pstate].MemClk; -		bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[min_pstate].Voltage; +		bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[max_pstate].FClk; +		bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[max_pstate].MemClk; +		bw_params->clk_table.entries[i].voltage = clock_table->SocVoltage[i];  		bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i];  		bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i];  		bw_params->clk_table.entries[i].dispclk_mhz = clock_table->DispClocks[i]; @@ -521,25 +514,16 @@ static void dcn315_clk_mgr_helper_populate_bw_params(  		bw_params->clk_table.entries[i].wck_ratio = 1;  	} -	/* Make sure to include at least one entry and highest pstate */ -	if (max_pstate != min_pstate || i == 0) { -		bw_params->clk_table.entries[i].fclk_mhz = max_fclk; -		bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[max_pstate].MemClk; -		bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[max_pstate].Voltage; -		bw_params->clk_table.entries[i].dcfclk_mhz = find_max_clk_value(clock_table->DcfClocks, NUM_DCFCLK_DPM_LEVELS); +	/* Make sure to include at least one entry */ +	if (i == 0) { +		bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[0].FClk; +		bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[0].MemClk; +		bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[0].Voltage; +		bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[0];  		bw_params->clk_table.entries[i].wck_ratio = 1;  		i++;  	} -	bw_params->clk_table.num_entries = i--; - -	/* Make sure all highest clocks are included*/ -	bw_params->clk_table.entries[i].socclk_mhz = find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS); -	bw_params->clk_table.entries[i].dispclk_mhz = find_max_clk_value(clock_table->DispClocks, NUM_DISPCLK_DPM_LEVELS); -	bw_params->clk_table.entries[i].dppclk_mhz = find_max_clk_value(clock_table->DppClocks, NUM_DPPCLK_DPM_LEVELS); -	ASSERT(clock_table->DcfClocks[i] == find_max_clk_value(clock_table->DcfClocks, NUM_DCFCLK_DPM_LEVELS)); -	bw_params->clk_table.entries[i].phyclk_mhz = def_max.phyclk_mhz; -	bw_params->clk_table.entries[i].phyclk_d18_mhz = def_max.phyclk_d18_mhz; -	bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz; +	bw_params->clk_table.num_entries = i;  	/* Set any 0 clocks to max default setting. Not an issue for  	 * power since we aren't doing switching in such case anyway @@ -565,6 +549,11 @@ static void dcn315_clk_mgr_helper_populate_bw_params(  		if (!bw_params->clk_table.entries[i].dtbclk_mhz)  			bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz;  	} + +	/* Make sure all highest default clocks are included*/ +	ASSERT(bw_params->clk_table.entries[i-1].phyclk_mhz == def_max.phyclk_mhz); +	ASSERT(bw_params->clk_table.entries[i-1].phyclk_d18_mhz == def_max.phyclk_d18_mhz); +	ASSERT(bw_params->clk_table.entries[i-1].dtbclk_mhz == def_max.dtbclk_mhz);  	ASSERT(bw_params->clk_table.entries[i-1].dcfclk_mhz);  	bw_params->vram_type = bios_info->memory_type;  	bw_params->num_channels = bios_info->ma_channel_number; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c index 187f5b27fdc8..93db4dbee713 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c @@ -39,7 +39,7 @@  #include "dcn316_smu.h"  #include "dm_helpers.h"  #include "dc_dmub_srv.h" -#include "dc_link_dp.h" +#include "link.h"  // DCN316 this is CLK1 instance  #define MAX_INSTANCE                                        7 @@ -553,6 +553,7 @@ static void dcn316_clk_mgr_helper_populate_bw_params(  	bw_params->vram_type = bios_info->memory_type;  	bw_params->num_channels = bios_info->ma_channel_number; +	bw_params->dram_channel_width_bytes = bios_info->memory_type == 0x22 ? 8 : 4;  	for (i = 0; i < WM_SET_COUNT; i++) {  		bw_params->wm_table.entries[i].wm_inst = i; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index 6f77d8e538ab..61768bf726f8 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -33,7 +33,7 @@  #include "reg_helper.h"  #include "core_types.h"  #include "dm_helpers.h" -#include "dc_link_dp.h" +#include "link.h"  #include "atomfirmware.h"  #include "smu13_driver_if.h" @@ -233,41 +233,6 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)  	DC_FP_END();  } -static void dcn32_update_clocks_update_dtb_dto(struct clk_mgr_internal *clk_mgr, -			struct dc_state *context, -			int ref_dtbclk_khz) -{ -	struct dccg *dccg = clk_mgr->dccg; -	uint32_t tg_mask = 0; -	int i; - -	for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { -		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; -		struct dtbclk_dto_params dto_params = {0}; - -		/* use mask to program DTO once per tg */ -		if (pipe_ctx->stream_res.tg && -				!(tg_mask & (1 << pipe_ctx->stream_res.tg->inst))) { -			tg_mask |= (1 << pipe_ctx->stream_res.tg->inst); - -			dto_params.otg_inst = pipe_ctx->stream_res.tg->inst; -			dto_params.ref_dtbclk_khz = ref_dtbclk_khz; - -			if (is_dp_128b_132b_signal(pipe_ctx)) { -				dto_params.pixclk_khz = pipe_ctx->stream->phy_pix_clk; - -				if (pipe_ctx->stream_res.audio != NULL) -					dto_params.req_audio_dtbclk_khz = 24000; -			} -			if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) -				dto_params.is_hdmi = true; - -			dccg->funcs->set_dtbclk_dto(clk_mgr->dccg, &dto_params); -			//dccg->funcs->set_audio_dtbclk_dto(clk_mgr->dccg, &dto_params); -		} -	} -} -  /* Since DPPCLK request to PMFW needs to be exact (due to DPP DTO programming),   * update DPPCLK to be the exact frequency that will be set after the DPPCLK   * divider is updated. This will prevent rounding issues that could cause DPP @@ -290,6 +255,94 @@ static void dcn32_update_dppclk_dispclk_freq(struct clk_mgr_internal *clk_mgr, s  	}  } +static void dcn32_update_clocks_update_dentist( +		struct clk_mgr_internal *clk_mgr, +		struct dc_state *context, +		uint32_t old_dispclk_khz) +{ +	uint32_t new_disp_divider = 0; +	uint32_t old_disp_divider = 0; +	uint32_t new_dispclk_wdivider = 0; +	uint32_t old_dispclk_wdivider = 0; +	uint32_t i; + +	if (old_dispclk_khz == 0 || clk_mgr->base.clks.dispclk_khz == 0) +		return; + +	new_disp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR +			* clk_mgr->base.dentist_vco_freq_khz / clk_mgr->base.clks.dispclk_khz; +	old_disp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR +			* clk_mgr->base.dentist_vco_freq_khz / old_dispclk_khz; + +	new_dispclk_wdivider = dentist_get_did_from_divider(new_disp_divider); +	old_dispclk_wdivider = dentist_get_did_from_divider(old_disp_divider); + +	/* When changing divider to or from 127, some extra programming is required to prevent corruption */ +	if (old_dispclk_wdivider == 127 && new_dispclk_wdivider != 127) { +		for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { +			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; +			uint32_t fifo_level; +			struct dccg *dccg = clk_mgr->base.ctx->dc->res_pool->dccg; +			struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc; +			int32_t N; +			int32_t j; + +			if (!pipe_ctx->stream) +				continue; +			/* Virtual encoders don't have this function */ +			if (!stream_enc->funcs->get_fifo_cal_average_level) +				continue; +			fifo_level = stream_enc->funcs->get_fifo_cal_average_level( +					stream_enc); +			N = fifo_level / 4; +			dccg->funcs->set_fifo_errdet_ovr_en( +					dccg, +					true); +			for (j = 0; j < N - 4; j++) +				dccg->funcs->otg_drop_pixel( +						dccg, +						pipe_ctx->stream_res.tg->inst); +			dccg->funcs->set_fifo_errdet_ovr_en( +					dccg, +					false); +		} +	} else if (new_dispclk_wdivider == 127 && old_dispclk_wdivider != 127) { +		/* request clock with 126 divider first */ +		uint32_t temp_disp_divider = dentist_get_divider_from_did(126); +		uint32_t temp_dispclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR * clk_mgr->base.dentist_vco_freq_khz) / temp_disp_divider; + +		if (clk_mgr->smu_present) +			dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(temp_dispclk_khz)); + +		for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { +			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; +			struct dccg *dccg = clk_mgr->base.ctx->dc->res_pool->dccg; +			struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc; +			uint32_t fifo_level; +			int32_t N; +			int32_t j; + +			if (!pipe_ctx->stream) +				continue; +			/* Virtual encoders don't have this function */ +			if (!stream_enc->funcs->get_fifo_cal_average_level) +				continue; +			fifo_level = stream_enc->funcs->get_fifo_cal_average_level( +					stream_enc); +			N = fifo_level / 4; +			dccg->funcs->set_fifo_errdet_ovr_en(dccg, true); +			for (j = 0; j < 12 - N; j++) +				dccg->funcs->otg_add_pixel(dccg, +						pipe_ctx->stream_res.tg->inst); +			dccg->funcs->set_fifo_errdet_ovr_en(dccg, false); +		} +	} + +	/* do requested DISPCLK updates*/ +	if (clk_mgr->smu_present) +		dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(clk_mgr->base.clks.dispclk_khz)); +} +  static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,  			struct dc_state *context,  			bool safe_to_lower) @@ -308,6 +361,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,  	bool p_state_change_support;  	bool fclk_p_state_change_support;  	int total_plane_count; +	int old_dispclk_khz = clk_mgr_base->clks.dispclk_khz;  	if (dc->work_arounds.skip_clock_update)  		return; @@ -431,14 +485,11 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,  	if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {  		clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; -		if (clk_mgr->smu_present) -			dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dispclk_khz)); -  		update_dispclk = true;  	}  	if (!new_clocks->dtbclk_en) { -		new_clocks->ref_dtbclk_khz = 0; +		new_clocks->ref_dtbclk_khz = clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz * 1000;  	}  	/* clock limits are received with MHz precision, divide by 1000 to prevent setting clocks at every call */ @@ -447,21 +498,19 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,  		/* DCCG requires KHz precision for DTBCLK */  		clk_mgr_base->clks.ref_dtbclk_khz =  				dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DTBCLK, khz_to_mhz_ceil(new_clocks->ref_dtbclk_khz)); - -		dcn32_update_clocks_update_dtb_dto(clk_mgr, context, clk_mgr_base->clks.ref_dtbclk_khz);  	}  	if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {  		if (dpp_clock_lowered) {  			/* if clock is being lowered, increase DTO before lowering refclk */  			dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); -			dcn20_update_clocks_update_dentist(clk_mgr, context); +			dcn32_update_clocks_update_dentist(clk_mgr, context, old_dispclk_khz);  			if (clk_mgr->smu_present)  				dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dppclk_khz));  		} else {  			/* if clock is being raised, increase refclk before lowering DTO */  			if (update_dppclk || update_dispclk) -				dcn20_update_clocks_update_dentist(clk_mgr, context); +				dcn32_update_clocks_update_dentist(clk_mgr, context, old_dispclk_khz);  			/* There is a check inside dcn20_update_clocks_update_dpp_dto which ensures  			 * that we do not lower dto when it is not safe to lower. We do not need to  			 * compare the current and new dppclk before calling this function. @@ -820,8 +869,7 @@ void dcn32_clk_mgr_construct(  void dcn32_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr)  { -	if (clk_mgr->base.bw_params) -		kfree(clk_mgr->base.bw_params); +	kfree(clk_mgr->base.bw_params);  	if (clk_mgr->wm_range_table)  		dm_helpers_free_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_GART, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_smu13_driver_if.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_smu13_driver_if.h index d30fbbdd1792..d3d5a8caccf8 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_smu13_driver_if.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_smu13_driver_if.h @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: MIT  // This is a stripped-down version of the smu13_driver_if.h file for the relevant DAL interfaces.  #define SMU13_DRIVER_IF_VERSION  0x18 diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 997ab031f816..1c218c526650 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -33,6 +33,7 @@  #include "resource.h" +#include "gpio_service_interface.h"  #include "clk_mgr.h"  #include "clock_source.h"  #include "dc_bios_types.h" @@ -53,11 +54,10 @@  #include "link_enc_cfg.h"  #include "dc_link.h" -#include "dc_link_ddc.h" +#include "link.h"  #include "dm_helpers.h"  #include "mem_input.h" -#include "dc_link_dp.h"  #include "dc_dmub_srv.h"  #include "dsc.h" @@ -68,8 +68,6 @@  #include "dmub/dmub_srv.h" -#include "i2caux_interface.h" -  #include "dce/dmub_psr.h"  #include "dce/dmub_hw_lock_mgr.h" @@ -135,9 +133,7 @@ static const char DC_BUILD_ID[] = "production-build";   * one or two (in the pipe-split case).   */ -/******************************************************************************* - * Private functions - ******************************************************************************/ +/* Private functions */  static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)  { @@ -384,16 +380,18 @@ static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)  }  /** - *  dc_stream_adjust_vmin_vmax: + *  dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR + *  @dc:     dc reference + *  @stream: Initial dc stream state + *  @adjust: Updated parameters for vertical_total_min and vertical_total_max   *   *  Looks up the pipe context of dc_stream_state and updates the   *  vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh   *  Rate, which is a power-saving feature that targets reducing panel   *  refresh rate while the screen is static   * - *  @dc:     dc reference - *  @stream: Initial dc stream state - *  @adjust: Updated parameters for vertical_total_min and vertical_total_max + *  Return: %true if the pipe context is found and adjusted; + *          %false if the pipe context is not found.   */  bool dc_stream_adjust_vmin_vmax(struct dc *dc,  		struct dc_stream_state *stream, @@ -401,9 +399,6 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,  {  	int i; -	if (memcmp(adjust, &stream->adjust, sizeof(struct dc_crtc_timing_adjust)) == 0) -		return true; -  	stream->adjust.v_total_max = adjust->v_total_max;  	stream->adjust.v_total_mid = adjust->v_total_mid;  	stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num; @@ -424,18 +419,17 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,  }  /** - ***************************************************************************** - *  Function: dc_stream_get_last_vrr_vtotal + * dc_stream_get_last_used_drr_vtotal - Looks up the pipe context of + * dc_stream_state and gets the last VTOTAL used by DRR (Dynamic Refresh Rate)   * - *  @brief - *     Looks up the pipe context of dc_stream_state and gets the - *     last VTOTAL used by DRR (Dynamic Refresh Rate) + * @dc: [in] dc reference + * @stream: [in] Initial dc stream state + * @refresh_rate: [in] new refresh_rate   * - *  @param [in] dc: dc reference - *  @param [in] stream: Initial dc stream state - *  @param [in] adjust: Updated parameters for vertical_total_min and - *  vertical_total_max - ***************************************************************************** + * Return: %true if the pipe context is found and there is an associated + *         timing_generator for the DC; + *         %false if the pipe context is not found or there is no + *         timing_generator for the DC.   */  bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,  		struct dc_stream_state *stream, @@ -491,86 +485,80 @@ bool dc_stream_get_crtc_position(struct dc *dc,  }  #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) -bool dc_stream_forward_dmcu_crc_window(struct dc *dc, struct dc_stream_state *stream, -			     struct crc_params *crc_window) +static inline void +dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv, +		struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop)  { -	int i; -	struct dmcu *dmcu = dc->res_pool->dmcu; -	struct pipe_ctx *pipe; -	struct crc_region tmp_win, *crc_win; -	struct otg_phy_mux mapping_tmp, *mux_mapping; - -	/*crc window can't be null*/ -	if (!crc_window) -		return false; - -	if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) { -		crc_win = &tmp_win; -		mux_mapping = &mapping_tmp; -		/*set crc window*/ -		tmp_win.x_start = crc_window->windowa_x_start; -		tmp_win.y_start = crc_window->windowa_y_start; -		tmp_win.x_end = crc_window->windowa_x_end; -		tmp_win.y_end = crc_window->windowa_y_end; - -		for (i = 0; i < MAX_PIPES; i++) { -			pipe = &dc->current_state->res_ctx.pipe_ctx[i]; -			if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe) -				break; -		} - -		/* Stream not found */ -		if (i == MAX_PIPES) -			return false; - +	union dmub_rb_cmd cmd = {0}; -		/*set mux routing info*/ -		mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst; -		mapping_tmp.otg_output_num = pipe->stream_res.tg->inst; +	cmd.secure_display.roi_info.phy_id = mux_mapping->phy_output_num; +	cmd.secure_display.roi_info.otg_id = mux_mapping->otg_output_num; -		dmcu->funcs->forward_crc_window(dmcu, crc_win, mux_mapping); +	if (is_stop) { +		cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY; +		cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_STOP_UPDATE;  	} else { -		DC_LOG_DC("dmcu is not initialized"); -		return false; +		cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY; +		cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY; +		cmd.secure_display.roi_info.x_start = rect->x; +		cmd.secure_display.roi_info.y_start = rect->y; +		cmd.secure_display.roi_info.x_end = rect->x + rect->width; +		cmd.secure_display.roi_info.y_end = rect->y + rect->height;  	} -	return true; +	dc_dmub_srv_cmd_queue(dmub_srv, &cmd); +	dc_dmub_srv_cmd_execute(dmub_srv);  } -bool dc_stream_stop_dmcu_crc_win_update(struct dc *dc, struct dc_stream_state *stream) +static inline void +dc_stream_forward_dmcu_crc_window(struct dmcu *dmcu, +		struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop)  { -	int i; -	struct dmcu *dmcu = dc->res_pool->dmcu; -	struct pipe_ctx *pipe; -	struct otg_phy_mux mapping_tmp, *mux_mapping; +	if (is_stop) +		dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping); +	else +		dmcu->funcs->forward_crc_window(dmcu, rect, mux_mapping); +} -	if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) { -		mux_mapping = &mapping_tmp; +bool +dc_stream_forward_crc_window(struct dc_stream_state *stream, +		struct rect *rect, bool is_stop) +{ +	struct dmcu *dmcu; +	struct dc_dmub_srv *dmub_srv; +	struct otg_phy_mux mux_mapping; +	struct pipe_ctx *pipe; +	int i; +	struct dc *dc = stream->ctx->dc; -		for (i = 0; i < MAX_PIPES; i++) { -			pipe = &dc->current_state->res_ctx.pipe_ctx[i]; -			if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe) -				break; -		} +	for (i = 0; i < MAX_PIPES; i++) { +		pipe = &dc->current_state->res_ctx.pipe_ctx[i]; +		if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe) +			break; +	} -		/* Stream not found */ -		if (i == MAX_PIPES) -			return false; +	/* Stream not found */ +	if (i == MAX_PIPES) +		return false; +	mux_mapping.phy_output_num = stream->link->link_enc_hw_inst; +	mux_mapping.otg_output_num = pipe->stream_res.tg->inst; -		/*set mux routing info*/ -		mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst; -		mapping_tmp.otg_output_num = pipe->stream_res.tg->inst; +	dmcu = dc->res_pool->dmcu; +	dmub_srv = dc->ctx->dmub_srv; -		dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping); -	} else { -		DC_LOG_DC("dmcu is not initialized"); +	/* forward to dmub */ +	if (dmub_srv) +		dc_stream_forward_dmub_crc_window(dmub_srv, rect, &mux_mapping, is_stop); +	/* forward to dmcu */ +	else if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) +		dc_stream_forward_dmcu_crc_window(dmcu, rect, &mux_mapping, is_stop); +	else  		return false; -	}  	return true;  } -#endif +#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */  /**   * dc_stream_configure_crc() - Configure CRC capture for the given stream. @@ -582,7 +570,10 @@ bool dc_stream_stop_dmcu_crc_win_update(struct dc *dc, struct dc_stream_state *s   *              once.   *   * By default, only CRC0 is configured, and the entire frame is used to - * calculate the crc. + * calculate the CRC. + * + * Return: %false if the stream is not found or CRC capture is not supported; + *         %true if the stream has been configured.   */  bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,  			     struct crc_params *crc_window, bool enable, bool continuous) @@ -651,7 +642,7 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,   * dc_stream_configure_crc needs to be called beforehand to enable CRCs.   *   * Return: - * false if stream is not found, or if CRCs are not enabled. + * %false if stream is not found, or if CRCs are not enabled.   */  bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,  		       uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb) @@ -878,6 +869,7 @@ static bool dc_construct_ctx(struct dc *dc,  	dc_ctx->perf_trace = dc_perf_trace_create();  	if (!dc_ctx->perf_trace) { +		kfree(dc_ctx);  		ASSERT_CRITICAL(false);  		return false;  	} @@ -1070,6 +1062,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)  	int i, j;  	struct dc_state *dangling_context = dc_create_state(dc);  	struct dc_state *current_ctx; +	struct pipe_ctx *pipe; +	struct timing_generator *tg;  	if (dangling_context == NULL)  		return; @@ -1112,6 +1106,18 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)  		}  		if (should_disable && old_stream) { +			pipe = &dc->current_state->res_ctx.pipe_ctx[i]; +			tg = pipe->stream_res.tg; +			/* When disabling plane for a phantom pipe, we must turn on the +			 * phantom OTG so the disable programming gets the double buffer +			 * update. Otherwise the pipe will be left in a partially disabled +			 * state that can result in underflow or hang when enabling it +			 * again for different use. +			 */ +			if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) { +				if (tg->funcs->enable_crtc) +					tg->funcs->enable_crtc(tg); +			}  			dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);  			disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); @@ -1127,6 +1133,15 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)  				dc->hwss.interdependent_update_lock(dc, dc->current_state, false);  				dc->hwss.post_unlock_program_front_end(dc, dangling_context);  			} +			/* We need to put the phantom OTG back into it's default (disabled) state or we +			 * can get corruption when transition from one SubVP config to a different one. +			 * The OTG is set to disable on falling edge of VUPDATE so the plane disable +			 * will still get it's double buffer update. +			 */ +			if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) { +				if (tg->funcs->disable_phantom_crtc) +					tg->funcs->disable_phantom_crtc(tg); +			}  		}  	} @@ -1184,7 +1199,7 @@ static void disable_vbios_mode_if_required(  						pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;  					if (pix_clk_100hz != requested_pix_clk_100hz) { -						core_link_disable_stream(pipe); +						link_set_dpms_off(pipe);  						pipe->stream->dpms_off = false;  					}  				} @@ -1219,9 +1234,7 @@ static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)  	PERF_TRACE();  } -/******************************************************************************* - * Public functions - ******************************************************************************/ +/* Public functions */  struct dc *dc_create(const struct dc_init_data *init_params)  { @@ -1294,7 +1307,7 @@ static void detect_edp_presence(struct dc *dc)  		if (dc->config.edp_not_connected) {  			edp_link->edp_sink_present = false;  		} else { -			dc_link_detect_sink(edp_link, &type); +			dc_link_detect_connection_type(edp_link, &type);  			edp_link->edp_sink_present = (type != dc_connection_none);  		}  	} @@ -1488,17 +1501,19 @@ static void program_timing_sync(  	}  } -static bool context_changed( -		struct dc *dc, -		struct dc_state *context) +static bool streams_changed(struct dc *dc, +			    struct dc_stream_state *streams[], +			    uint8_t stream_count)  {  	uint8_t i; -	if (context->stream_count != dc->current_state->stream_count) +	if (stream_count != dc->current_state->stream_count)  		return true;  	for (i = 0; i < dc->current_state->stream_count; i++) { -		if (dc->current_state->streams[i] != context->streams[i]) +		if (dc->current_state->streams[i] != streams[i]) +			return true; +		if (!streams[i]->link->link_state_valid)  			return true;  	} @@ -1549,6 +1564,9 @@ bool dc_validate_boot_timing(const struct dc *dc,  	if (tg_inst >= dc->res_pool->timing_generator_count)  		return false; +	if (tg_inst != link->link_enc->preferred_engine) +		return false; +  	tg = dc->res_pool->timing_generators[tg_inst];  	if (!tg->funcs->get_hw_timing) @@ -1640,7 +1658,7 @@ bool dc_validate_boot_timing(const struct dc *dc,  		return false;  	} -	if (is_edp_ilr_optimization_required(link, crtc_timing)) { +	if (link_is_edp_ilr_optimization_required(link, crtc_timing)) {  		DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");  		return false;  	} @@ -1722,9 +1740,16 @@ void dc_z10_save_init(struct dc *dc)  		dc->hwss.z10_save_init(dc);  } -/* - * Applies given context to HW and copy it into current context. +/** + * dc_commit_state_no_check - Apply context to the hardware + * + * @dc: DC object with the current status to be updated + * @context: New state that will become the current status at the end of this function + * + * Applies given context to the hardware and copy it into current context.   * It's up to the user to release the src context afterwards. + * + * Return: an enum dc_status result code for the operation   */  static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)  { @@ -1760,6 +1785,12 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c  		context->stream_count == 0)  		dc->hwss.prepare_bandwidth(dc, context); +	/* When SubVP is active, all HW programming must be done while +	 * SubVP lock is acquired +	 */ +	if (dc->hwss.subvp_pipe_control_lock) +		dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use); +  	if (dc->debug.enable_double_buffered_dsc_pg_support)  		dc->hwss.update_dsc_pg(dc, context, false); @@ -1787,9 +1818,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c  		dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);  	} -	if (dc->hwss.subvp_pipe_control_lock) -		dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use); -  	result = dc->hwss.apply_ctx_to_hw(dc, context);  	if (result != DC_OK) { @@ -1888,13 +1916,110 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c  	return result;  } +/** + * dc_commit_streams - Commit current stream state + * + * @dc: DC object with the commit state to be configured in the hardware + * @streams: Array with a list of stream state + * @stream_count: Total of streams + * + * Function responsible for commit streams change to the hardware. + * + * Return: + * Return DC_OK if everything work as expected, otherwise, return a dc_status + * code. + */ +enum dc_status dc_commit_streams(struct dc *dc, +				 struct dc_stream_state *streams[], +				 uint8_t stream_count) +{ +	int i, j; +	struct dc_state *context; +	enum dc_status res = DC_OK; +	struct dc_validation_set set[MAX_STREAMS] = {0}; + +	if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW) +		return res; + +	if (!streams_changed(dc, streams, stream_count)) +		return res; + +	DC_LOG_DC("%s: %d streams\n", __func__, stream_count); + +	for (i = 0; i < stream_count; i++) { +		struct dc_stream_state *stream = streams[i]; +		struct dc_stream_status *status = dc_stream_get_status(stream); + +		dc_stream_log(dc, stream); + +		set[i].stream = stream; + +		if (status) { +			set[i].plane_count = status->plane_count; +			for (j = 0; j < status->plane_count; j++) +				set[i].plane_states[j] = status->plane_states[j]; +		} +	} + +	context = dc_create_state(dc); +	if (!context) +		goto context_alloc_fail; + +	dc_resource_state_copy_construct_current(dc, context); + +	res = dc_validate_with_context(dc, set, stream_count, context, false); +	if (res != DC_OK) { +		BREAK_TO_DEBUGGER(); +		goto fail; +	} + +	res = dc_commit_state_no_check(dc, context); + +	for (i = 0; i < stream_count; i++) { +		for (j = 0; j < context->stream_count; j++) { +			if (streams[i]->stream_id == context->streams[j]->stream_id) +				streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst; + +			if (dc_is_embedded_signal(streams[i]->signal)) { +				struct dc_stream_status *status = dc_stream_get_status_from_state(context, streams[i]); + +				if (dc->hwss.is_abm_supported) +					status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, streams[i]); +				else +					status->is_abm_supported = true; +			} +		} +	} + +fail: +	dc_release_state(context); + +context_alloc_fail: + +	DC_LOG_DC("%s Finished.\n", __func__); + +	return res; +} + +/* TODO: When the transition to the new commit sequence is done, remove this + * function in favor of dc_commit_streams. */  bool dc_commit_state(struct dc *dc, struct dc_state *context)  {  	enum dc_status result = DC_ERROR_UNEXPECTED;  	int i; -	if (!context_changed(dc, context)) +	/* TODO: Since change commit sequence can have a huge impact, +	 * we decided to only enable it for DCN3x. However, as soon as +	 * we get more confident about this change we'll need to enable +	 * the new sequence for all ASICs. */ +	if (dc->ctx->dce_version >= DCN_VERSION_3_2) { +		result = dc_commit_streams(dc, context->streams, context->stream_count); +		return result == DC_OK; +	} + +	if (!streams_changed(dc, context->streams, context->stream_count)) {  		return DC_OK; +	}  	DC_LOG_DC("%s: %d streams\n",  				__func__, context->stream_count); @@ -2834,6 +2959,9 @@ static void copy_stream_update_to_stream(struct dc *dc,  	if (update->vsp_infopacket)  		stream->vsp_infopacket = *update->vsp_infopacket; +	if (update->adaptive_sync_infopacket) +		stream->adaptive_sync_infopacket = *update->adaptive_sync_infopacket; +  	if (update->dither_option)  		stream->dither_option = *update->dither_option; @@ -2950,7 +3078,7 @@ static bool update_planes_and_stream_state(struct dc *dc,  		 * Ensures that we have enough pipes for newly added MPO planes  		 */  		if (dc->res_pool->funcs->remove_phantom_pipes) -			dc->res_pool->funcs->remove_phantom_pipes(dc, context); +			dc->res_pool->funcs->remove_phantom_pipes(dc, context, false);  		/*remove old surfaces from context */  		if (!dc_rem_all_planes_for_stream(dc, stream, context)) { @@ -2987,6 +3115,19 @@ static bool update_planes_and_stream_state(struct dc *dc,  	if (update_type == UPDATE_TYPE_FULL) {  		if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { +			/* For phantom pipes we remove and create a new set of phantom pipes +			 * for each full update (because we don't know if we'll need phantom +			 * pipes until after the first round of validation). However, if validation +			 * fails we need to keep the existing phantom pipes (because we don't update +			 * the dc->current_state). +			 * +			 * The phantom stream/plane refcount is decremented for validation because +			 * we assume it'll be removed (the free comes when the dc_state is freed), +			 * but if validation fails we have to increment back the refcount so it's +			 * consistent. +			 */ +			if (dc->res_pool->funcs->retain_phantom_pipes) +				dc->res_pool->funcs->retain_phantom_pipes(dc, dc->current_state);  			BREAK_TO_DEBUGGER();  			goto fail;  		} @@ -3026,12 +3167,13 @@ static void commit_planes_do_stream_update(struct dc *dc,  					stream_update->vsc_infopacket ||  					stream_update->vsp_infopacket ||  					stream_update->hfvsif_infopacket || +					stream_update->adaptive_sync_infopacket ||  					stream_update->vtem_infopacket) {  				resource_build_info_frame(pipe_ctx);  				dc->hwss.update_info_frame(pipe_ctx);  				if (dc_is_dp_signal(pipe_ctx->stream->signal)) -					dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); +					link_dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);  			}  			if (stream_update->hdr_static_metadata && @@ -3067,14 +3209,14 @@ static void commit_planes_do_stream_update(struct dc *dc,  				continue;  			if (stream_update->dsc_config) -				dp_update_dsc_config(pipe_ctx); +				link_update_dsc_config(pipe_ctx);  			if (stream_update->mst_bw_update) {  				if (stream_update->mst_bw_update->is_increase) -					dc_link_increase_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw); -				else -					dc_link_reduce_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw); -			} +					link_increase_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw); + 				else +					link_reduce_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw); + 			}  			if (stream_update->pending_test_pattern) {  				dc_link_dp_set_test_pattern(stream->link, @@ -3087,7 +3229,7 @@ static void commit_planes_do_stream_update(struct dc *dc,  			if (stream_update->dpms_off) {  				if (*stream_update->dpms_off) { -					core_link_disable_stream(pipe_ctx); +					link_set_dpms_off(pipe_ctx);  					/* for dpms, keep acquired resources*/  					if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)  						pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); @@ -3097,7 +3239,7 @@ static void commit_planes_do_stream_update(struct dc *dc,  				} else {  					if (get_seamless_boot_stream_count(context) == 0)  						dc->hwss.prepare_bandwidth(dc, dc->current_state); -					core_link_enable_stream(dc->current_state, pipe_ctx); +					link_set_dpms_on(dc->current_state, pipe_ctx);  				}  			} @@ -3198,6 +3340,7 @@ static void commit_planes_for_stream(struct dc *dc,  	struct pipe_ctx *top_pipe_to_program = NULL;  	bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);  	bool subvp_prev_use = false; +	bool subvp_curr_use = false;  	// Once we apply the new subvp context to hardware it won't be in the  	// dc->current_state anymore, so we have to cache it before we apply @@ -3207,6 +3350,21 @@ static void commit_planes_for_stream(struct dc *dc,  	dc_z10_restore(dc); +	if (update_type == UPDATE_TYPE_FULL) { +		/* wait for all double-buffer activity to clear on all pipes */ +		int pipe_idx; + +		for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) { +			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx]; + +			if (!pipe_ctx->stream) +				continue; + +			if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear) +				pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg); +		} +	} +  	if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {  		/* Optimize seamless boot flag keeps clocks and watermarks high until  		 * first flip. After first flip, optimization is required to lower @@ -3254,6 +3412,15 @@ static void commit_planes_for_stream(struct dc *dc,  			break;  	} +	for (i = 0; i < dc->res_pool->pipe_count; i++) { +		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + +		if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { +			subvp_curr_use = true; +			break; +		} +	} +  	if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {  		struct pipe_ctx *mpcc_pipe;  		struct pipe_ctx *odm_pipe; @@ -3297,22 +3464,6 @@ static void commit_planes_for_stream(struct dc *dc,  		dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);  	} -	if (update_type != UPDATE_TYPE_FAST) { -		for (i = 0; i < dc->res_pool->pipe_count; i++) { -			struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; - -			if ((new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) || -					subvp_prev_use) { -				// If old context or new context has phantom pipes, apply -				// the phantom timings now. We can't change the phantom -				// pipe configuration safely without driver acquiring -				// the DMCUB lock first. -				dc->hwss.apply_ctx_to_hw(dc, context); -				break; -			} -		} -	} -  	dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context);  	if (update_type != UPDATE_TYPE_FAST) { @@ -3370,6 +3521,24 @@ static void commit_planes_for_stream(struct dc *dc,  		return;  	} +	if (update_type != UPDATE_TYPE_FAST) { +		for (j = 0; j < dc->res_pool->pipe_count; j++) { +			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; + +			if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP && +				pipe_ctx->stream && pipe_ctx->plane_state) { +				/* Only update visual confirm for SUBVP here. +				 * The bar appears on all pipes, so we need to update the bar on all displays, +				 * so the information doesn't get stale. +				 */ +				struct mpcc_blnd_cfg blnd_cfg = { 0 }; + +				dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, +						pipe_ctx->plane_res.hubp->inst); +			} +		} +	} +  	if (!IS_DIAG_DC(dc->ctx->dce_environment)) {  		for (i = 0; i < surface_count; i++) {  			struct dc_plane_state *plane_state = srf_updates[i].surface; @@ -3487,7 +3656,6 @@ static void commit_planes_for_stream(struct dc *dc,  					dc->hwss.update_plane_addr(dc, pipe_ctx);  			}  		} -  	}  	if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { @@ -3524,6 +3692,24 @@ static void commit_planes_for_stream(struct dc *dc,  					top_pipe_to_program->stream_res.tg);  		} +	if (subvp_curr_use) { +		/* If enabling subvp or transitioning from subvp->subvp, enable the +		 * phantom streams before we program front end for the phantom pipes. +		 */ +		if (update_type != UPDATE_TYPE_FAST) { +			if (dc->hwss.enable_phantom_streams) +				dc->hwss.enable_phantom_streams(dc, context); +		} +	} + +	if (subvp_prev_use && !subvp_curr_use) { +		/* If disabling subvp, disable phantom streams after front end +		 * programming has completed (we turn on phantom OTG in order +		 * to complete the plane disable for phantom pipes). +		 */ +		dc->hwss.apply_ctx_to_hw(dc, context); +	} +  	if (update_type != UPDATE_TYPE_FAST)  		dc->hwss.post_unlock_program_front_end(dc, context);  	if (update_type != UPDATE_TYPE_FAST) @@ -3563,10 +3749,24 @@ static void commit_planes_for_stream(struct dc *dc,  	}  } -/* Determines if the incoming context requires a applying transition state with unnecessary - * pipe splitting and ODM disabled, due to hardware limitations. In a case where - * the OPP associated with an MPCC might change due to plane additions, this function +/** + * could_mpcc_tree_change_for_active_pipes - Check if an OPP associated with MPCC might change + * + * @dc: Used to get the current state status + * @stream: Target stream, which we want to remove the attached planes + * @surface_count: Number of surface update + * @is_plane_addition: [in] Fill out with true if it is a plane addition case + * + * DCN32x and newer support a feature named Dynamic ODM which can conflict with + * the MPO if used simultaneously in some specific configurations (e.g., + * 4k@144). This function checks if the incoming context requires applying a + * transition state with unnecessary pipe splitting and ODM disabled to + * circumvent our hardware limitations to prevent this edge case. If the OPP + * associated with an MPCC might change due to plane additions, this function   * returns true. + * + * Return: + * Return true if OPP and MPCC might change, otherwise, return false.   */  static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,  		struct dc_stream_state *stream, @@ -3576,6 +3776,7 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,  	struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);  	bool force_minimal_pipe_splitting = false; +	bool subvp_active = false;  	uint32_t i;  	*is_plane_addition = false; @@ -3608,39 +3809,55 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,  		}  	} -	/* For SubVP pipe split case when adding MPO video -	 * we need to add a minimal transition. In this case -	 * there will be 2 streams (1 main stream, 1 phantom -	 * stream). -	 */ -	if (cur_stream_status && -			dc->current_state->stream_count == 2 && -			stream->mall_stream_config.type == SUBVP_MAIN) { -		bool is_pipe_split = false; +	for (i = 0; i < dc->res_pool->pipe_count; i++) { +		struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; -		for (i = 0; i < dc->res_pool->pipe_count; i++) { -			if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream && -					(dc->current_state->res_ctx.pipe_ctx[i].bottom_pipe || -					dc->current_state->res_ctx.pipe_ctx[i].next_odm_pipe)) { -				is_pipe_split = true; -				break; -			} +		if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) { +			subvp_active = true; +			break;  		} +	} +	/* For SubVP when adding or removing planes we need to add a minimal transition +	 * (even when disabling all planes). Whenever disabling a phantom pipe, we +	 * must use the minimal transition path to disable the pipe correctly. +	 * +	 * We want to use the minimal transition whenever subvp is active, not only if +	 * a plane is being added / removed from a subvp stream (MPO plane can be added +	 * to a DRR pipe of SubVP + DRR config, in which case we still want to run through +	 * a min transition to disable subvp. +	 */ +	if (cur_stream_status && subvp_active) {  		/* determine if minimal transition is required due to SubVP*/ -		if (surface_count > 0 && is_pipe_split) { -			if (cur_stream_status->plane_count > surface_count) { -				force_minimal_pipe_splitting = true; -			} else if (cur_stream_status->plane_count < surface_count) { -				force_minimal_pipe_splitting = true; -				*is_plane_addition = true; -			} +		if (cur_stream_status->plane_count > surface_count) { +			force_minimal_pipe_splitting = true; +		} else if (cur_stream_status->plane_count < surface_count) { +			force_minimal_pipe_splitting = true; +			*is_plane_addition = true;  		}  	}  	return force_minimal_pipe_splitting;  } +/** + * commit_minimal_transition_state - Create a transition pipe split state + * + * @dc: Used to get the current state status + * @transition_base_context: New transition state + * + * In some specific configurations, such as pipe split on multi-display with + * MPO and/or Dynamic ODM, removing a plane may cause unsupported pipe + * programming when moving to new planes. To mitigate those types of problems, + * this function adds a transition state that minimizes pipe usage before + * programming the new configuration. When adding a new plane, the current + * state requires the least pipes, so it is applied without splitting. When + * removing a plane, the new state requires the least pipes, so it is applied + * without splitting. + * + * Return: + * Return false if something is wrong in the transition state. + */  static bool commit_minimal_transition_state(struct dc *dc,  		struct dc_state *transition_base_context)  { @@ -3650,9 +3867,48 @@ static bool commit_minimal_transition_state(struct dc *dc,  	bool temp_subvp_policy;  	enum dc_status ret = DC_ERROR_UNEXPECTED;  	unsigned int i, j; +	unsigned int pipe_in_use = 0; +	bool subvp_in_use = false;  	if (!transition_context)  		return false; +	/* Setup: +	 * Store the current ODM and MPC config in some temp variables to be +	 * restored after we commit the transition state. +	 */ + +	/* check current pipes in use*/ +	for (i = 0; i < dc->res_pool->pipe_count; i++) { +		struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i]; + +		if (pipe->plane_state) +			pipe_in_use++; +	} + +	/* If SubVP is enabled and we are adding or removing planes from any main subvp +	 * pipe, we must use the minimal transition. +	 */ +	for (i = 0; i < dc->res_pool->pipe_count; i++) { +		struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + +		if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { +			subvp_in_use = true; +			break; +		} +	} + +	/* When the OS add a new surface if we have been used all of pipes with odm combine +	 * and mpc split feature, it need use commit_minimal_transition_state to transition safely. +	 * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need +	 * call it again. Otherwise return true to skip. +	 * +	 * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially +	 * enter/exit MPO when DCN still have enough resources. +	 */ +	if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use) { +		dc_release_state(transition_context); +		return true; +	}  	if (!dc->config.is_vmin_only_asic) {  		tmp_mpc_policy = dc->debug.pipe_split_policy; @@ -3667,7 +3923,7 @@ static bool commit_minimal_transition_state(struct dc *dc,  	dc_resource_state_copy_construct(transition_base_context, transition_context); -	//commit minimal state +	/* commit minimal state */  	if (dc->res_pool->funcs->validate_bandwidth(dc, transition_context, false)) {  		for (i = 0; i < transition_context->stream_count; i++) {  			struct dc_stream_status *stream_status = &transition_context->stream_status[i]; @@ -3685,10 +3941,12 @@ static bool commit_minimal_transition_state(struct dc *dc,  		ret = dc_commit_state_no_check(dc, transition_context);  	} -	/*always release as dc_commit_state_no_check retains in good case*/ +	/* always release as dc_commit_state_no_check retains in good case */  	dc_release_state(transition_context); -	/*restore previous pipe split and odm policy*/ +	/* TearDown: +	 * Restore original configuration for ODM and MPO. +	 */  	if (!dc->config.is_vmin_only_asic)  		dc->debug.pipe_split_policy = tmp_mpc_policy; @@ -3696,12 +3954,12 @@ static bool commit_minimal_transition_state(struct dc *dc,  	dc->debug.force_disable_subvp = temp_subvp_policy;  	if (ret != DC_OK) { -		/*this should never happen*/ +		/* this should never happen */  		BREAK_TO_DEBUGGER();  		return false;  	} -	/*force full surface update*/ +	/* force full surface update */  	for (i = 0; i < dc->current_state->stream_count; i++) {  		for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {  			dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF; @@ -3719,6 +3977,7 @@ bool dc_update_planes_and_stream(struct dc *dc,  	struct dc_state *context;  	enum surface_update_type update_type;  	int i; +	struct mall_temp_config mall_temp_config;  	/* In cases where MPO and split or ODM are used transitions can  	 * cause underflow. Apply stream configuration with minimal pipe @@ -3750,11 +4009,29 @@ bool dc_update_planes_and_stream(struct dc *dc,  	/* on plane removal, minimal state is the new one */  	if (force_minimal_pipe_splitting && !is_plane_addition) { +		/* Since all phantom pipes are removed in full validation, +		 * we have to save and restore the subvp/mall config when +		 * we do a minimal transition since the flags marking the +		 * pipe as subvp/phantom will be cleared (dc copy constructor +		 * creates a shallow copy). +		 */ +		if (dc->res_pool->funcs->save_mall_state) +			dc->res_pool->funcs->save_mall_state(dc, context, &mall_temp_config);  		if (!commit_minimal_transition_state(dc, context)) {  			dc_release_state(context);  			return false;  		} - +		if (dc->res_pool->funcs->restore_mall_state) +			dc->res_pool->funcs->restore_mall_state(dc, context, &mall_temp_config); + +		/* If we do a minimal transition with plane removal and the context +		 * has subvp we also have to retain back the phantom stream / planes +		 * since the refcount is decremented as part of the min transition +		 * (we commit a state with no subvp, so the phantom streams / planes +		 * had to be removed). +		 */ +		if (dc->res_pool->funcs->retain_phantom_pipes) +			dc->res_pool->funcs->retain_phantom_pipes(dc, context);  		update_type = UPDATE_TYPE_FULL;  	} @@ -3806,6 +4083,18 @@ void dc_commit_updates_for_stream(struct dc *dc,  	struct dc_context *dc_ctx = dc->ctx;  	int i, j; +	/* TODO: Since change commit sequence can have a huge impact, +	 * we decided to only enable it for DCN3x. However, as soon as +	 * we get more confident about this change we'll need to enable +	 * the new sequence for all ASICs. +	 */ +	if (dc->ctx->dce_version >= DCN_VERSION_3_2) { +		dc_update_planes_and_stream(dc, srf_updates, +					    surface_count, stream, +					    stream_update); +		return; +	} +  	stream_status = dc_stream_get_status(stream);  	context = dc->current_state; @@ -4016,7 +4305,7 @@ void dc_resume(struct dc *dc)  	uint32_t i;  	for (i = 0; i < dc->link_count; i++) -		core_link_resume(dc->links[i]); +		link_resume(dc->links[i]);  }  bool dc_is_dmcu_initialized(struct dc *dc) @@ -4387,21 +4676,17 @@ void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)  		dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true;  } -/* - ***************************************************************************** - * Function: dc_is_dmub_outbox_supported - +/** + * dc_is_dmub_outbox_supported - Check if DMUB firmware support outbox notification   * - * @brief - *      Checks whether DMUB FW supports outbox notifications, if supported - *		DM should register outbox interrupt prior to actually enabling interrupts - *		via dc_enable_dmub_outbox + * @dc: [in] dc structure   * - *  @param - *		[in] dc: dc structure + * Checks whether DMUB FW supports outbox notifications, if supported DM + * should register outbox interrupt prior to actually enabling interrupts + * via dc_enable_dmub_outbox   * - *  @return - *		True if DMUB FW supports outbox notifications, False otherwise - ***************************************************************************** + * Return: + * True if DMUB FW supports outbox notifications, False otherwise   */  bool dc_is_dmub_outbox_supported(struct dc *dc)  { @@ -4419,21 +4704,17 @@ bool dc_is_dmub_outbox_supported(struct dc *dc)  	return dc->debug.enable_dmub_aux_for_legacy_ddc;  } -/* - ***************************************************************************** - *  Function: dc_enable_dmub_notifications +/** + * dc_enable_dmub_notifications - Check if dmub fw supports outbox   * - *  @brief - *		Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox - *		notifications. All DMs shall switch to dc_is_dmub_outbox_supported. - *		This API shall be removed after switching. + * @dc: [in] dc structure   * - *  @param - *		[in] dc: dc structure + * Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox + * notifications. All DMs shall switch to dc_is_dmub_outbox_supported.  This + * API shall be removed after switching.   * - *  @return - *		True if DMUB FW supports outbox notifications, False otherwise - ***************************************************************************** + * Return: + * True if DMUB FW supports outbox notifications, False otherwise   */  bool dc_enable_dmub_notifications(struct dc *dc)  { @@ -4441,18 +4722,11 @@ bool dc_enable_dmub_notifications(struct dc *dc)  }  /** - ***************************************************************************** - *  Function: dc_enable_dmub_outbox - * - *  @brief - *		Enables DMUB unsolicited notifications to x86 via outbox + * dc_enable_dmub_outbox - Enables DMUB unsolicited notification   * - *  @param - *		[in] dc: dc structure + * @dc: [in] dc structure   * - *  @return - *		None - ***************************************************************************** + * Enables DMUB unsolicited notifications to x86 via outbox.   */  void dc_enable_dmub_outbox(struct dc *dc)  { @@ -4553,21 +4827,17 @@ uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,  }  /** - ***************************************************************************** - *  Function: dc_process_dmub_set_config_async + * dc_process_dmub_set_config_async - Submits set_config command   * - *  @brief - *		Submits set_config command to dmub via inbox message + * @dc: [in] dc structure + * @link_index: [in] link_index: link index + * @payload: [in] aux payload + * @notify: [out] set_config immediate reply   * - *  @param - *		[in] dc: dc structure - *		[in] link_index: link index - *		[in] payload: aux payload - *		[out] notify: set_config immediate reply + * Submits set_config command to dmub via inbox message.   * - *  @return - *		True if successful, False if failure - ***************************************************************************** + * Return: + * True if successful, False if failure   */  bool dc_process_dmub_set_config_async(struct dc *dc,  				uint32_t link_index, @@ -4603,21 +4873,17 @@ bool dc_process_dmub_set_config_async(struct dc *dc,  }  /** - ***************************************************************************** - *  Function: dc_process_dmub_set_mst_slots + * dc_process_dmub_set_mst_slots - Submits MST solt allocation   * - *  @brief - *		Submits mst slot allocation command to dmub via inbox message + * @dc: [in] dc structure + * @link_index: [in] link index + * @mst_alloc_slots: [in] mst slots to be allotted + * @mst_slots_in_use: [out] mst slots in use returned in failure case   * - *  @param - *		[in] dc: dc structure - *		[in] link_index: link index - *		[in] mst_alloc_slots: mst slots to be allotted - *		[out] mst_slots_in_use: mst slots in use returned in failure case + * Submits mst slot allocation command to dmub via inbox message   * - *	@return - *		DC_OK if successful, DC_ERROR if failure - ***************************************************************************** + * Return: + * DC_OK if successful, DC_ERROR if failure   */  enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,  				uint32_t link_index, @@ -4657,19 +4923,12 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,  }  /** - ***************************************************************************** - *  Function: dc_process_dmub_dpia_hpd_int_enable + * dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption   * - *  @brief - *		Submits dpia hpd int enable command to dmub via inbox message + * @dc: [in] dc structure + * @hpd_int_enable: [in] 1 for hpd int enable, 0 to disable   * - *  @param - *		[in] dc: dc structure - *		[in] hpd_int_enable: 1 for hpd int enable, 0 to disable - * - *	@return - *		None - ***************************************************************************** + * Submits dpia hpd int enable command to dmub via inbox message   */  void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,  				uint32_t hpd_int_enable) @@ -4698,16 +4957,13 @@ void dc_disable_accelerated_mode(struct dc *dc)  /** - ***************************************************************************** - *  dc_notify_vsync_int_state() - notifies vsync enable/disable state + *  dc_notify_vsync_int_state - notifies vsync enable/disable state   *  @dc: dc structure - *	@stream: stream where vsync int state changed - *	@enable: whether vsync is enabled or disabled - * - *  Called when vsync is enabled/disabled - *	Will notify DMUB to start/stop ABM interrupts after steady state is reached + *  @stream: stream where vsync int state changed + *  @enable: whether vsync is enabled or disabled   * - ***************************************************************************** + *  Called when vsync is enabled/disabled Will notify DMUB to start/stop ABM + *  interrupts after steady state is reached.   */  void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable)  { @@ -4749,14 +5005,18 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo  	if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)  		pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);  } -/* - * dc_extended_blank_supported: Decide whether extended blank is supported + +/** + * dc_extended_blank_supported - Decide whether extended blank is supported + * + * @dc: [in] Current DC state   * - * Extended blank is a freesync optimization feature to be enabled in the future. - * During the extra vblank period gained from freesync, we have the ability to enter z9/z10. + * Extended blank is a freesync optimization feature to be enabled in the + * future.  During the extra vblank period gained from freesync, we have the + * ability to enter z9/z10.   * - * @param [in] dc: Current DC state - * @return: Indicate whether extended blank is supported (true or false) + * Return: + * Indicate whether extended blank is supported (%true or %false)   */  bool dc_extended_blank_supported(struct dc *dc)  { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index 7c2e3b8dc26a..652270a0b498 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -90,8 +90,8 @@ static const struct out_csc_color_matrix_type output_csc_matrix[] = {  		{ 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3,  				0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} },  	{ COLOR_SPACE_YCBCR2020_TYPE, -		{ 0x1000, 0xF149, 0xFEB7, 0x0000, 0x0868, 0x15B2, -				0x01E6, 0x0000, 0xFB88, 0xF478, 0x1000, 0x0000} }, +		{ 0x1000, 0xF149, 0xFEB7, 0x1004, 0x0868, 0x15B2, +				0x01E6, 0x201, 0xFB88, 0xF478, 0x1000, 0x1004} },  	{ COLOR_SPACE_YCBCR709_BLACK_TYPE,  		{ 0x0000, 0x0000, 0x0000, 0x1000, 0x0000, 0x0000,  				0x0000, 0x0200, 0x0000, 0x0000, 0x0000, 0x1000} }, @@ -366,6 +366,7 @@ void get_hdr_visual_confirm_color(  		struct tg_color *color)  {  	uint32_t color_value = MAX_TG_COLOR_VALUE; +	bool is_sdr = false;  	/* Determine the overscan color based on the top-most (desktop) plane's context */  	struct pipe_ctx *top_pipe_ctx  = pipe_ctx; @@ -382,7 +383,8 @@ void get_hdr_visual_confirm_color(  			/* FreeSync 2 ARGB2101010 - set border color to pink */  			color->color_r_cr = color_value;  			color->color_b_cb = color_value; -		} +		} else +			is_sdr = true;  		break;  	case PIXEL_FORMAT_FP16:  		if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) { @@ -391,14 +393,19 @@ void get_hdr_visual_confirm_color(  		} else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {  			/* FreeSync 2 HDR - set border color to green */  			color->color_g_y = color_value; -		} +		} else +			is_sdr = true;  		break;  	default: +		is_sdr = true; +		break; +	} + +	if (is_sdr) {  		/* SDR - set border color to Gray */  		color->color_r_cr = color_value/2;  		color->color_b_cb = color_value/2;  		color->color_g_y = color_value/2; -		break;  	}  } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index d7b1ace6328a..c26e7258a91c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -23,4937 +23,5 @@   *   */ -#include <linux/slab.h> - -#include "dm_services.h" -#include "atomfirmware.h" -#include "dm_helpers.h" -#include "dc.h" -#include "grph_object_id.h" -#include "gpio_service_interface.h" -#include "core_status.h" -#include "dc_link_dp.h" -#include "dc_link_dpia.h" -#include "dc_link_ddc.h" -#include "link_hwss.h" -#include "opp.h" - -#include "link_encoder.h" -#include "hw_sequencer.h" -#include "resource.h" -#include "abm.h" -#include "fixed31_32.h" -#include "dpcd_defs.h" -#include "dmcu.h" -#include "hw/clk_mgr.h" -#include "dce/dmub_psr.h" -#include "dmub/dmub_srv.h" -#include "inc/hw/panel_cntl.h" -#include "inc/link_enc_cfg.h" -#include "inc/link_dpcd.h" -#include "link/link_dp_trace.h" - -#include "dc/dcn30/dcn30_vpg.h" - -#define DC_LOGGER_INIT(logger) - -#define LINK_INFO(...) \ -	DC_LOG_HW_HOTPLUG(  \ -		__VA_ARGS__) - -#define RETIMER_REDRIVER_INFO(...) \ -	DC_LOG_RETIMER_REDRIVER(  \ -		__VA_ARGS__) - -/******************************************************************************* - * Private functions - ******************************************************************************/ -static void dc_link_destruct(struct dc_link *link) -{ -	int i; - -	if (link->hpd_gpio) { -		dal_gpio_destroy_irq(&link->hpd_gpio); -		link->hpd_gpio = NULL; -	} - -	if (link->ddc) -		dal_ddc_service_destroy(&link->ddc); - -	if (link->panel_cntl) -		link->panel_cntl->funcs->destroy(&link->panel_cntl); - -	if (link->link_enc) { -		/* Update link encoder resource tracking variables. These are used for -		 * the dynamic assignment of link encoders to streams. Virtual links -		 * are not assigned encoder resources on creation. -		 */ -		if (link->link_id.id != CONNECTOR_ID_VIRTUAL) { -			link->dc->res_pool->link_encoders[link->eng_id - ENGINE_ID_DIGA] = NULL; -			link->dc->res_pool->dig_link_enc_count--; -		} -		link->link_enc->funcs->destroy(&link->link_enc); -	} - -	if (link->local_sink) -		dc_sink_release(link->local_sink); - -	for (i = 0; i < link->sink_count; ++i) -		dc_sink_release(link->remote_sinks[i]); -} - -struct gpio *get_hpd_gpio(struct dc_bios *dcb, -			  struct graphics_object_id link_id, -			  struct gpio_service *gpio_service) -{ -	enum bp_result bp_result; -	struct graphics_object_hpd_info hpd_info; -	struct gpio_pin_info pin_info; - -	if (dcb->funcs->get_hpd_info(dcb, link_id, &hpd_info) != BP_RESULT_OK) -		return NULL; - -	bp_result = dcb->funcs->get_gpio_pin_info(dcb, -		hpd_info.hpd_int_gpio_uid, &pin_info); - -	if (bp_result != BP_RESULT_OK) { -		ASSERT(bp_result == BP_RESULT_NORECORD); -		return NULL; -	} - -	return dal_gpio_service_create_irq(gpio_service, -					   pin_info.offset, -					   pin_info.mask); -} - -/* - *  Function: program_hpd_filter - * - *  @brief - *     Programs HPD filter on associated HPD line - * - *  @param [in] delay_on_connect_in_ms: Connect filter timeout - *  @param [in] delay_on_disconnect_in_ms: Disconnect filter timeout - * - *  @return - *     true on success, false otherwise - */ -static bool program_hpd_filter(const struct dc_link *link) -{ -	bool result = false; -	struct gpio *hpd; -	int delay_on_connect_in_ms = 0; -	int delay_on_disconnect_in_ms = 0; - -	if (link->is_hpd_filter_disabled) -		return false; -	/* Verify feature is supported */ -	switch (link->connector_signal) { -	case SIGNAL_TYPE_DVI_SINGLE_LINK: -	case SIGNAL_TYPE_DVI_DUAL_LINK: -	case SIGNAL_TYPE_HDMI_TYPE_A: -		/* Program hpd filter */ -		delay_on_connect_in_ms = 500; -		delay_on_disconnect_in_ms = 100; -		break; -	case SIGNAL_TYPE_DISPLAY_PORT: -	case SIGNAL_TYPE_DISPLAY_PORT_MST: -		/* Program hpd filter to allow DP signal to settle */ -		/* 500:	not able to detect MST <-> SST switch as HPD is low for -		 * only 100ms on DELL U2413 -		 * 0: some passive dongle still show aux mode instead of i2c -		 * 20-50: not enough to hide bouncing HPD with passive dongle. -		 * also see intermittent i2c read issues. -		 */ -		delay_on_connect_in_ms = 80; -		delay_on_disconnect_in_ms = 0; -		break; -	case SIGNAL_TYPE_LVDS: -	case SIGNAL_TYPE_EDP: -	default: -		/* Don't program hpd filter */ -		return false; -	} - -	/* Obtain HPD handle */ -	hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, -			   link->ctx->gpio_service); - -	if (!hpd) -		return result; - -	/* Setup HPD filtering */ -	if (dal_gpio_open(hpd, GPIO_MODE_INTERRUPT) == GPIO_RESULT_OK) { -		struct gpio_hpd_config config; - -		config.delay_on_connect = delay_on_connect_in_ms; -		config.delay_on_disconnect = delay_on_disconnect_in_ms; - -		dal_irq_setup_hpd_filter(hpd, &config); - -		dal_gpio_close(hpd); - -		result = true; -	} else { -		ASSERT_CRITICAL(false); -	} - -	/* Release HPD handle */ -	dal_gpio_destroy_irq(&hpd); - -	return result; -} - -bool dc_link_wait_for_t12(struct dc_link *link) -{ -	if (link->connector_signal == SIGNAL_TYPE_EDP && link->dc->hwss.edp_wait_for_T12) { -		link->dc->hwss.edp_wait_for_T12(link); - -		return true; -	} - -	return false; -} - -/** - * dc_link_detect_sink() - Determine if there is a sink connected - * - * @link: pointer to the dc link - * @type: Returned connection type - * Does not detect downstream devices, such as MST sinks - * or display connected through active dongles - */ -bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type) -{ -	uint32_t is_hpd_high = 0; -	struct gpio *hpd_pin; - -	if (link->connector_signal == SIGNAL_TYPE_LVDS) { -		*type = dc_connection_single; -		return true; -	} - -	if (link->connector_signal == SIGNAL_TYPE_EDP) { -		/*in case it is not on*/ -		if (!link->dc->config.edp_no_power_sequencing) -			link->dc->hwss.edp_power_control(link, true); -		link->dc->hwss.edp_wait_for_hpd_ready(link, true); -	} - -	/* Link may not have physical HPD pin. */ -	if (link->ep_type != DISPLAY_ENDPOINT_PHY) { -		if (link->is_hpd_pending || !dc_link_dpia_query_hpd_status(link)) -			*type = dc_connection_none; -		else -			*type = dc_connection_single; - -		return true; -	} - -	/* todo: may need to lock gpio access */ -	hpd_pin = get_hpd_gpio(link->ctx->dc_bios, link->link_id, -			       link->ctx->gpio_service); -	if (!hpd_pin) -		goto hpd_gpio_failure; - -	dal_gpio_open(hpd_pin, GPIO_MODE_INTERRUPT); -	dal_gpio_get_value(hpd_pin, &is_hpd_high); -	dal_gpio_close(hpd_pin); -	dal_gpio_destroy_irq(&hpd_pin); - -	if (is_hpd_high) { -		*type = dc_connection_single; -		/* TODO: need to do the actual detection */ -	} else { -		*type = dc_connection_none; -	} - -	return true; - -hpd_gpio_failure: -	return false; -} - -static enum ddc_transaction_type get_ddc_transaction_type(enum signal_type sink_signal) -{ -	enum ddc_transaction_type transaction_type = DDC_TRANSACTION_TYPE_NONE; - -	switch (sink_signal) { -	case SIGNAL_TYPE_DVI_SINGLE_LINK: -	case SIGNAL_TYPE_DVI_DUAL_LINK: -	case SIGNAL_TYPE_HDMI_TYPE_A: -	case SIGNAL_TYPE_LVDS: -	case SIGNAL_TYPE_RGB: -		transaction_type = DDC_TRANSACTION_TYPE_I2C; -		break; - -	case SIGNAL_TYPE_DISPLAY_PORT: -	case SIGNAL_TYPE_EDP: -		transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; -		break; - -	case SIGNAL_TYPE_DISPLAY_PORT_MST: -		/* MST does not use I2COverAux, but there is the -		 * SPECIAL use case for "immediate dwnstrm device -		 * access" (EPR#370830). -		 */ -		transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; -		break; - -	default: -		break; -	} - -	return transaction_type; -} - -static enum signal_type get_basic_signal_type(struct graphics_object_id encoder, -					      struct graphics_object_id downstream) -{ -	if (downstream.type == OBJECT_TYPE_CONNECTOR) { -		switch (downstream.id) { -		case CONNECTOR_ID_SINGLE_LINK_DVII: -			switch (encoder.id) { -			case ENCODER_ID_INTERNAL_DAC1: -			case ENCODER_ID_INTERNAL_KLDSCP_DAC1: -			case ENCODER_ID_INTERNAL_DAC2: -			case ENCODER_ID_INTERNAL_KLDSCP_DAC2: -				return SIGNAL_TYPE_RGB; -			default: -				return SIGNAL_TYPE_DVI_SINGLE_LINK; -			} -		break; -		case CONNECTOR_ID_DUAL_LINK_DVII: -		{ -			switch (encoder.id) { -			case ENCODER_ID_INTERNAL_DAC1: -			case ENCODER_ID_INTERNAL_KLDSCP_DAC1: -			case ENCODER_ID_INTERNAL_DAC2: -			case ENCODER_ID_INTERNAL_KLDSCP_DAC2: -				return SIGNAL_TYPE_RGB; -			default: -				return SIGNAL_TYPE_DVI_DUAL_LINK; -			} -		} -		break; -		case CONNECTOR_ID_SINGLE_LINK_DVID: -			return SIGNAL_TYPE_DVI_SINGLE_LINK; -		case CONNECTOR_ID_DUAL_LINK_DVID: -			return SIGNAL_TYPE_DVI_DUAL_LINK; -		case CONNECTOR_ID_VGA: -			return SIGNAL_TYPE_RGB; -		case CONNECTOR_ID_HDMI_TYPE_A: -			return SIGNAL_TYPE_HDMI_TYPE_A; -		case CONNECTOR_ID_LVDS: -			return SIGNAL_TYPE_LVDS; -		case CONNECTOR_ID_DISPLAY_PORT: -		case CONNECTOR_ID_USBC: -			return SIGNAL_TYPE_DISPLAY_PORT; -		case CONNECTOR_ID_EDP: -			return SIGNAL_TYPE_EDP; -		default: -			return SIGNAL_TYPE_NONE; -		} -	} else if (downstream.type == OBJECT_TYPE_ENCODER) { -		switch (downstream.id) { -		case ENCODER_ID_EXTERNAL_NUTMEG: -		case ENCODER_ID_EXTERNAL_TRAVIS: -			return SIGNAL_TYPE_DISPLAY_PORT; -		default: -			return SIGNAL_TYPE_NONE; -		} -	} - -	return SIGNAL_TYPE_NONE; -} - -/* - * dc_link_is_dp_sink_present() - Check if there is a native DP - * or passive DP-HDMI dongle connected - */ -bool dc_link_is_dp_sink_present(struct dc_link *link) -{ -	enum gpio_result gpio_result; -	uint32_t clock_pin = 0; -	uint8_t retry = 0; -	struct ddc *ddc; - -	enum connector_id connector_id = -		dal_graphics_object_id_get_connector_id(link->link_id); - -	bool present = -		((connector_id == CONNECTOR_ID_DISPLAY_PORT) || -		(connector_id == CONNECTOR_ID_EDP) || -		(connector_id == CONNECTOR_ID_USBC)); - -	ddc = dal_ddc_service_get_ddc_pin(link->ddc); - -	if (!ddc) { -		BREAK_TO_DEBUGGER(); -		return present; -	} - -	/* Open GPIO and set it to I2C mode */ -	/* Note: this GpioMode_Input will be converted -	 * to GpioConfigType_I2cAuxDualMode in GPIO component, -	 * which indicates we need additional delay -	 */ - -	if (dal_ddc_open(ddc, GPIO_MODE_INPUT, -			 GPIO_DDC_CONFIG_TYPE_MODE_I2C) != GPIO_RESULT_OK) { -		dal_ddc_close(ddc); - -		return present; -	} - -	/* -	 * Read GPIO: DP sink is present if both clock and data pins are zero -	 * -	 * [W/A] plug-unplug DP cable, sometimes customer board has -	 * one short pulse on clk_pin(1V, < 1ms). DP will be config to HDMI/DVI -	 * then monitor can't br light up. Add retry 3 times -	 * But in real passive dongle, it need additional 3ms to detect -	 */ -	do { -		gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin); -		ASSERT(gpio_result == GPIO_RESULT_OK); -		if (clock_pin) -			udelay(1000); -		else -			break; -	} while (retry++ < 3); - -	present = (gpio_result == GPIO_RESULT_OK) && !clock_pin; - -	dal_ddc_close(ddc); - -	return present; -} - -/* - * @brief - * Detect output sink type - */ -static enum signal_type link_detect_sink(struct dc_link *link, -					 enum dc_detect_reason reason) -{ -	enum signal_type result; -	struct graphics_object_id enc_id; - -	if (link->is_dig_mapping_flexible) -		enc_id = (struct graphics_object_id){.id = ENCODER_ID_UNKNOWN}; -	else -		enc_id = link->link_enc->id; -	result = get_basic_signal_type(enc_id, link->link_id); - -	/* Use basic signal type for link without physical connector. */ -	if (link->ep_type != DISPLAY_ENDPOINT_PHY) -		return result; - -	/* Internal digital encoder will detect only dongles -	 * that require digital signal -	 */ - -	/* Detection mechanism is different -	 * for different native connectors. -	 * LVDS connector supports only LVDS signal; -	 * PCIE is a bus slot, the actual connector needs to be detected first; -	 * eDP connector supports only eDP signal; -	 * HDMI should check straps for audio -	 */ - -	/* PCIE detects the actual connector on add-on board */ -	if (link->link_id.id == CONNECTOR_ID_PCIE) { -		/* ZAZTODO implement PCIE add-on card detection */ -	} - -	switch (link->link_id.id) { -	case CONNECTOR_ID_HDMI_TYPE_A: { -		/* check audio support: -		 * if native HDMI is not supported, switch to DVI -		 */ -		struct audio_support *aud_support = -					&link->dc->res_pool->audio_support; - -		if (!aud_support->hdmi_audio_native) -			if (link->link_id.id == CONNECTOR_ID_HDMI_TYPE_A) -				result = SIGNAL_TYPE_DVI_SINGLE_LINK; -	} -	break; -	case CONNECTOR_ID_DISPLAY_PORT: -	case CONNECTOR_ID_USBC: { -		/* DP HPD short pulse. Passive DP dongle will not -		 * have short pulse -		 */ -		if (reason != DETECT_REASON_HPDRX) { -			/* Check whether DP signal detected: if not - -			 * we assume signal is DVI; it could be corrected -			 * to HDMI after dongle detection -			 */ -			if (!dm_helpers_is_dp_sink_present(link)) -				result = SIGNAL_TYPE_DVI_SINGLE_LINK; -		} -	} -	break; -	default: -	break; -	} - -	return result; -} - -static enum signal_type decide_signal_from_strap_and_dongle_type(enum display_dongle_type dongle_type, -								 struct audio_support *audio_support) -{ -	enum signal_type signal = SIGNAL_TYPE_NONE; - -	switch (dongle_type) { -	case DISPLAY_DONGLE_DP_HDMI_DONGLE: -		if (audio_support->hdmi_audio_on_dongle) -			signal = SIGNAL_TYPE_HDMI_TYPE_A; -		else -			signal = SIGNAL_TYPE_DVI_SINGLE_LINK; -		break; -	case DISPLAY_DONGLE_DP_DVI_DONGLE: -		signal = SIGNAL_TYPE_DVI_SINGLE_LINK; -		break; -	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE: -		if (audio_support->hdmi_audio_native) -			signal =  SIGNAL_TYPE_HDMI_TYPE_A; -		else -			signal = SIGNAL_TYPE_DVI_SINGLE_LINK; -		break; -	default: -		signal = SIGNAL_TYPE_NONE; -		break; -	} - -	return signal; -} - -static enum signal_type dp_passive_dongle_detection(struct ddc_service *ddc, -						    struct display_sink_capability *sink_cap, -						    struct audio_support *audio_support) -{ -	dal_ddc_service_i2c_query_dp_dual_mode_adaptor(ddc, sink_cap); - -	return decide_signal_from_strap_and_dongle_type(sink_cap->dongle_type, -							audio_support); -} - -static void link_disconnect_sink(struct dc_link *link) -{ -	if (link->local_sink) { -		dc_sink_release(link->local_sink); -		link->local_sink = NULL; -	} - -	link->dpcd_sink_count = 0; -	//link->dpcd_caps.dpcd_rev.raw = 0; -} - -static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *link) -{ -	dc_sink_release(link->local_sink); -	link->local_sink = prev_sink; -} - -#if defined(CONFIG_DRM_AMD_DC_HDCP) -bool dc_link_is_hdcp14(struct dc_link *link, enum signal_type signal) -{ -	bool ret = false; - -	switch (signal)	{ -	case SIGNAL_TYPE_DISPLAY_PORT: -	case SIGNAL_TYPE_DISPLAY_PORT_MST: -		ret = link->hdcp_caps.bcaps.bits.HDCP_CAPABLE; -		break; -	case SIGNAL_TYPE_DVI_SINGLE_LINK: -	case SIGNAL_TYPE_DVI_DUAL_LINK: -	case SIGNAL_TYPE_HDMI_TYPE_A: -	/* HDMI doesn't tell us its HDCP(1.4) capability, so assume to always be capable, -	 * we can poll for bksv but some displays have an issue with this. Since its so rare -	 * for a display to not be 1.4 capable, this assumtion is ok -	 */ -		ret = true; -		break; -	default: -		break; -	} -	return ret; -} - -bool dc_link_is_hdcp22(struct dc_link *link, enum signal_type signal) -{ -	bool ret = false; - -	switch (signal)	{ -	case SIGNAL_TYPE_DISPLAY_PORT: -	case SIGNAL_TYPE_DISPLAY_PORT_MST: -		ret = (link->hdcp_caps.bcaps.bits.HDCP_CAPABLE && -				link->hdcp_caps.rx_caps.fields.byte0.hdcp_capable && -				(link->hdcp_caps.rx_caps.fields.version == 0x2)) ? 1 : 0; -		break; -	case SIGNAL_TYPE_DVI_SINGLE_LINK: -	case SIGNAL_TYPE_DVI_DUAL_LINK: -	case SIGNAL_TYPE_HDMI_TYPE_A: -		ret = (link->hdcp_caps.rx_caps.fields.version == 0x4) ? 1:0; -		break; -	default: -		break; -	} - -	return ret; -} - -static void query_hdcp_capability(enum signal_type signal, struct dc_link *link) -{ -	struct hdcp_protection_message msg22; -	struct hdcp_protection_message msg14; - -	memset(&msg22, 0, sizeof(struct hdcp_protection_message)); -	memset(&msg14, 0, sizeof(struct hdcp_protection_message)); -	memset(link->hdcp_caps.rx_caps.raw, 0, -		sizeof(link->hdcp_caps.rx_caps.raw)); - -	if ((link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && -			link->ddc->transaction_type == -			DDC_TRANSACTION_TYPE_I2C_OVER_AUX) || -			link->connector_signal == SIGNAL_TYPE_EDP) { -		msg22.data = link->hdcp_caps.rx_caps.raw; -		msg22.length = sizeof(link->hdcp_caps.rx_caps.raw); -		msg22.msg_id = HDCP_MESSAGE_ID_RX_CAPS; -	} else { -		msg22.data = &link->hdcp_caps.rx_caps.fields.version; -		msg22.length = sizeof(link->hdcp_caps.rx_caps.fields.version); -		msg22.msg_id = HDCP_MESSAGE_ID_HDCP2VERSION; -	} -	msg22.version = HDCP_VERSION_22; -	msg22.link = HDCP_LINK_PRIMARY; -	msg22.max_retries = 5; -	dc_process_hdcp_msg(signal, link, &msg22); - -	if (signal == SIGNAL_TYPE_DISPLAY_PORT || signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { -		msg14.data = &link->hdcp_caps.bcaps.raw; -		msg14.length = sizeof(link->hdcp_caps.bcaps.raw); -		msg14.msg_id = HDCP_MESSAGE_ID_READ_BCAPS; -		msg14.version = HDCP_VERSION_14; -		msg14.link = HDCP_LINK_PRIMARY; -		msg14.max_retries = 5; - -		dc_process_hdcp_msg(signal, link, &msg14); -	} - -} -#endif - -static void read_current_link_settings_on_detect(struct dc_link *link) -{ -	union lane_count_set lane_count_set = {0}; -	uint8_t link_bw_set; -	uint8_t link_rate_set; -	uint32_t read_dpcd_retry_cnt = 10; -	enum dc_status status = DC_ERROR_UNEXPECTED; -	int i; -	union max_down_spread max_down_spread = {0}; - -	// Read DPCD 00101h to find out the number of lanes currently set -	for (i = 0; i < read_dpcd_retry_cnt; i++) { -		status = core_link_read_dpcd(link, -					     DP_LANE_COUNT_SET, -					     &lane_count_set.raw, -					     sizeof(lane_count_set)); -		/* First DPCD read after VDD ON can fail if the particular board -		 * does not have HPD pin wired correctly. So if DPCD read fails, -		 * which it should never happen, retry a few times. Target worst -		 * case scenario of 80 ms. -		 */ -		if (status == DC_OK) { -			link->cur_link_settings.lane_count = -					lane_count_set.bits.LANE_COUNT_SET; -			break; -		} - -		msleep(8); -	} - -	// Read DPCD 00100h to find if standard link rates are set -	core_link_read_dpcd(link, DP_LINK_BW_SET, -			    &link_bw_set, sizeof(link_bw_set)); - -	if (link_bw_set == 0) { -		if (link->connector_signal == SIGNAL_TYPE_EDP) { -			/* If standard link rates are not being used, -			 * Read DPCD 00115h to find the edp link rate set used -			 */ -			core_link_read_dpcd(link, DP_LINK_RATE_SET, -					    &link_rate_set, sizeof(link_rate_set)); - -			// edp_supported_link_rates_count = 0 for DP -			if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { -				link->cur_link_settings.link_rate = -					link->dpcd_caps.edp_supported_link_rates[link_rate_set]; -				link->cur_link_settings.link_rate_set = link_rate_set; -				link->cur_link_settings.use_link_rate_set = true; -			} -		} else { -			// Link Rate not found. Seamless boot may not work. -			ASSERT(false); -		} -	} else { -		link->cur_link_settings.link_rate = link_bw_set; -		link->cur_link_settings.use_link_rate_set = false; -	} -	// Read DPCD 00003h to find the max down spread. -	core_link_read_dpcd(link, DP_MAX_DOWNSPREAD, -			    &max_down_spread.raw, sizeof(max_down_spread)); -	link->cur_link_settings.link_spread = -		max_down_spread.bits.MAX_DOWN_SPREAD ? -		LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; -} - -static bool detect_dp(struct dc_link *link, -		      struct display_sink_capability *sink_caps, -		      enum dc_detect_reason reason) -{ -	struct audio_support *audio_support = &link->dc->res_pool->audio_support; - -	sink_caps->signal = link_detect_sink(link, reason); -	sink_caps->transaction_type = -		get_ddc_transaction_type(sink_caps->signal); - -	if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) { -		sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT; -		if (!detect_dp_sink_caps(link)) -			return false; - -		if (is_dp_branch_device(link)) -			/* DP SST branch */ -			link->type = dc_connection_sst_branch; -	} else { -		/* DP passive dongles */ -		sink_caps->signal = dp_passive_dongle_detection(link->ddc, -								sink_caps, -								audio_support); -		link->dpcd_caps.dongle_type = sink_caps->dongle_type; -		link->dpcd_caps.is_dongle_type_one = sink_caps->is_dongle_type_one; -		link->dpcd_caps.dpcd_rev.raw = 0; -	} - -	return true; -} - -static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid) -{ -	if (old_edid->length != new_edid->length) -		return false; - -	if (new_edid->length == 0) -		return false; - -	return (memcmp(old_edid->raw_edid, -		       new_edid->raw_edid, new_edid->length) == 0); -} - -static bool wait_for_entering_dp_alt_mode(struct dc_link *link) -{ -	/** -	 * something is terribly wrong if time out is > 200ms. (5Hz) -	 * 500 microseconds * 400 tries us 200 ms -	 **/ -	unsigned int sleep_time_in_microseconds = 500; -	unsigned int tries_allowed = 400; -	bool is_in_alt_mode; -	unsigned long long enter_timestamp; -	unsigned long long finish_timestamp; -	unsigned long long time_taken_in_ns; -	int tries_taken; - -	DC_LOGGER_INIT(link->ctx->logger); - -	if (!link->link_enc->funcs->is_in_alt_mode) -		return true; - -	is_in_alt_mode = link->link_enc->funcs->is_in_alt_mode(link->link_enc); -	DC_LOG_WARNING("DP Alt mode state on HPD: %d\n", is_in_alt_mode); - -	if (is_in_alt_mode) -		return true; - -	enter_timestamp = dm_get_timestamp(link->ctx); - -	for (tries_taken = 0; tries_taken < tries_allowed; tries_taken++) { -		udelay(sleep_time_in_microseconds); -		/* ask the link if alt mode is enabled, if so return ok */ -		if (link->link_enc->funcs->is_in_alt_mode(link->link_enc)) { -			finish_timestamp = dm_get_timestamp(link->ctx); -			time_taken_in_ns = -				dm_get_elapse_time_in_ns(link->ctx, -							 finish_timestamp, -							 enter_timestamp); -			DC_LOG_WARNING("Alt mode entered finished after %llu ms\n", -				       div_u64(time_taken_in_ns, 1000000)); -			return true; -		} -	} -	finish_timestamp = dm_get_timestamp(link->ctx); -	time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, -						    enter_timestamp); -	DC_LOG_WARNING("Alt mode has timed out after %llu ms\n", -		       div_u64(time_taken_in_ns, 1000000)); -	return false; -} - -static void apply_dpia_mst_dsc_always_on_wa(struct dc_link *link) -{ -	/* Apply work around for tunneled MST on certain USB4 docks. Always use DSC if dock -	 * reports DSC support. -	 */ -	if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && -			link->type == dc_connection_mst_branch && -			link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && -			link->dpcd_caps.branch_hw_revision == DP_BRANCH_HW_REV_20 && -			link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT && -			!link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around) -		link->wa_flags.dpia_mst_dsc_always_on = true; -} - -static void revert_dpia_mst_dsc_always_on_wa(struct dc_link *link) -{ -	/* Disable work around which keeps DSC on for tunneled MST on certain USB4 docks. */ -	if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) -		link->wa_flags.dpia_mst_dsc_always_on = false; -} - -static bool discover_dp_mst_topology(struct dc_link *link, enum dc_detect_reason reason) -{ -	DC_LOGGER_INIT(link->ctx->logger); - -	LINK_INFO("link=%d, mst branch is now Connected\n", -		  link->link_index); - -	link->type = dc_connection_mst_branch; -	apply_dpia_mst_dsc_always_on_wa(link); - -	dm_helpers_dp_update_branch_info(link->ctx, link); -	if (dm_helpers_dp_mst_start_top_mgr(link->ctx, -			link, (reason == DETECT_REASON_BOOT || reason == DETECT_REASON_RESUMEFROMS3S4))) { -		link_disconnect_sink(link); -	} else { -		link->type = dc_connection_sst_branch; -	} - -	return link->type == dc_connection_mst_branch; -} - -bool reset_cur_dp_mst_topology(struct dc_link *link) -{ -	DC_LOGGER_INIT(link->ctx->logger); - -	LINK_INFO("link=%d, mst branch is now Disconnected\n", -		  link->link_index); - -	revert_dpia_mst_dsc_always_on_wa(link); -	return dm_helpers_dp_mst_stop_top_mgr(link->ctx, link); -} - -static bool should_prepare_phy_clocks_for_link_verification(const struct dc *dc, -		enum dc_detect_reason reason) -{ -	int i; -	bool can_apply_seamless_boot = false; - -	for (i = 0; i < dc->current_state->stream_count; i++) { -		if (dc->current_state->streams[i]->apply_seamless_boot_optimization) { -			can_apply_seamless_boot = true; -			break; -		} -	} - -	return !can_apply_seamless_boot && reason != DETECT_REASON_BOOT; -} - -static void prepare_phy_clocks_for_destructive_link_verification(const struct dc *dc) -{ -	dc_z10_restore(dc); -	clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr); -} - -static void restore_phy_clocks_for_destructive_link_verification(const struct dc *dc) -{ -	clk_mgr_optimize_pwr_state(dc, dc->clk_mgr); -} - -static void set_all_streams_dpms_off_for_link(struct dc_link *link) -{ -	int i; -	struct pipe_ctx *pipe_ctx; -	struct dc_stream_update stream_update; -	bool dpms_off = true; -	struct link_resource link_res = {0}; - -	memset(&stream_update, 0, sizeof(stream_update)); -	stream_update.dpms_off = &dpms_off; - -	for (i = 0; i < MAX_PIPES; i++) { -		pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; -		if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off && -				pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) { -			stream_update.stream = pipe_ctx->stream; -			dc_commit_updates_for_stream(link->ctx->dc, NULL, 0, -					pipe_ctx->stream, &stream_update, -					link->ctx->dc->current_state); -		} -	} - -	/* link can be also enabled by vbios. In this case it is not recorded -	 * in pipe_ctx. Disable link phy here to make sure it is completely off -	 */ -	dp_disable_link_phy(link, &link_res, link->connector_signal); -} - -static void verify_link_capability_destructive(struct dc_link *link, -		struct dc_sink *sink, -		enum dc_detect_reason reason) -{ -	bool should_prepare_phy_clocks = -			should_prepare_phy_clocks_for_link_verification(link->dc, reason); - -	if (should_prepare_phy_clocks) -		prepare_phy_clocks_for_destructive_link_verification(link->dc); - -	if (dc_is_dp_signal(link->local_sink->sink_signal)) { -		struct dc_link_settings known_limit_link_setting = -				dp_get_max_link_cap(link); -		set_all_streams_dpms_off_for_link(link); -		dp_verify_link_cap_with_retries( -				link, &known_limit_link_setting, -				LINK_TRAINING_MAX_VERIFY_RETRY); -	} else { -		ASSERT(0); -	} - -	if (should_prepare_phy_clocks) -		restore_phy_clocks_for_destructive_link_verification(link->dc); -} - -static void verify_link_capability_non_destructive(struct dc_link *link) -{ -	if (dc_is_dp_signal(link->local_sink->sink_signal)) { -		if (dc_is_embedded_signal(link->local_sink->sink_signal) || -				link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) -			/* TODO - should we check link encoder's max link caps here? -			 * How do we know which link encoder to check from? -			 */ -			link->verified_link_cap = link->reported_link_cap; -		else -			link->verified_link_cap = dp_get_max_link_cap(link); -	} -} - -static bool should_verify_link_capability_destructively(struct dc_link *link, -		enum dc_detect_reason reason) -{ -	bool destrictive = false; -	struct dc_link_settings max_link_cap; -	bool is_link_enc_unavailable = link->link_enc && -			link->dc->res_pool->funcs->link_encs_assign && -			!link_enc_cfg_is_link_enc_avail( -					link->ctx->dc, -					link->link_enc->preferred_engine, -					link); - -	if (dc_is_dp_signal(link->local_sink->sink_signal)) { -		max_link_cap = dp_get_max_link_cap(link); -		destrictive = true; - -		if (link->dc->debug.skip_detection_link_training || -				dc_is_embedded_signal(link->local_sink->sink_signal) || -				link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { -			destrictive = false; -		} else if (dp_get_link_encoding_format(&max_link_cap) == -				DP_8b_10b_ENCODING) { -			if (link->dpcd_caps.is_mst_capable || -					is_link_enc_unavailable) { -				destrictive = false; -			} -		} -	} - -	return destrictive; -} - -static void verify_link_capability(struct dc_link *link, struct dc_sink *sink, -		enum dc_detect_reason reason) -{ -	if (should_verify_link_capability_destructively(link, reason)) -		verify_link_capability_destructive(link, sink, reason); -	else -		verify_link_capability_non_destructive(link); -} - - -/** - * detect_link_and_local_sink() - Detect if a sink is attached to a given link - * - * link->local_sink is created or destroyed as needed. - * - * This does not create remote sinks. - */ -static bool detect_link_and_local_sink(struct dc_link *link, -				  enum dc_detect_reason reason) -{ -	struct dc_sink_init_data sink_init_data = { 0 }; -	struct display_sink_capability sink_caps = { 0 }; -	uint32_t i; -	bool converter_disable_audio = false; -	struct audio_support *aud_support = &link->dc->res_pool->audio_support; -	bool same_edid = false; -	enum dc_edid_status edid_status; -	struct dc_context *dc_ctx = link->ctx; -	struct dc *dc = dc_ctx->dc; -	struct dc_sink *sink = NULL; -	struct dc_sink *prev_sink = NULL; -	struct dpcd_caps prev_dpcd_caps; -	enum dc_connection_type new_connection_type = dc_connection_none; -	const uint32_t post_oui_delay = 30; // 30ms - -	DC_LOGGER_INIT(link->ctx->logger); - -	if (dc_is_virtual_signal(link->connector_signal)) -		return false; - -	if (((link->connector_signal == SIGNAL_TYPE_LVDS || -		link->connector_signal == SIGNAL_TYPE_EDP) && -		(!link->dc->config.allow_edp_hotplug_detection)) && -		link->local_sink) { -		// need to re-write OUI and brightness in resume case -		if (link->connector_signal == SIGNAL_TYPE_EDP && -			(link->dpcd_sink_ext_caps.bits.oled == 1)) { -			dpcd_set_source_specific_data(link); -			msleep(post_oui_delay); -			dc_link_set_default_brightness_aux(link); -			//TODO: use cached -		} - -		return true; -	} - -	if (!dc_link_detect_sink(link, &new_connection_type)) { -		BREAK_TO_DEBUGGER(); -		return false; -	} - -	prev_sink = link->local_sink; -	if (prev_sink) { -		dc_sink_retain(prev_sink); -		memcpy(&prev_dpcd_caps, &link->dpcd_caps, sizeof(struct dpcd_caps)); -	} - -	link_disconnect_sink(link); -	if (new_connection_type != dc_connection_none) { -		link->type = new_connection_type; -		link->link_state_valid = false; - -		/* From Disconnected-to-Connected. */ -		switch (link->connector_signal) { -		case SIGNAL_TYPE_HDMI_TYPE_A: { -			sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; -			if (aud_support->hdmi_audio_native) -				sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A; -			else -				sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK; -			break; -		} - -		case SIGNAL_TYPE_DVI_SINGLE_LINK: { -			sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; -			sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK; -			break; -		} - -		case SIGNAL_TYPE_DVI_DUAL_LINK: { -			sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; -			sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK; -			break; -		} - -		case SIGNAL_TYPE_LVDS: { -			sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; -			sink_caps.signal = SIGNAL_TYPE_LVDS; -			break; -		} - -		case SIGNAL_TYPE_EDP: { -			read_current_link_settings_on_detect(link); - -			detect_edp_sink_caps(link); -			read_current_link_settings_on_detect(link); - -			/* Disable power sequence on MIPI panel + converter -			 */ -			if (dc->config.enable_mipi_converter_optimization && -				dc_ctx->dce_version == DCN_VERSION_3_01 && -				link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_0022B9 && -				memcmp(&link->dpcd_caps.branch_dev_name, DP_SINK_BRANCH_DEV_NAME_7580, -					sizeof(link->dpcd_caps.branch_dev_name)) == 0) { -				dc->config.edp_no_power_sequencing = true; - -				if (!link->dpcd_caps.set_power_state_capable_edp) -					link->wa_flags.dp_keep_receiver_powered = true; -			} - -			sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; -			sink_caps.signal = SIGNAL_TYPE_EDP; -			break; -		} - -		case SIGNAL_TYPE_DISPLAY_PORT: { -			/* wa HPD high coming too early*/ -			if (link->ep_type == DISPLAY_ENDPOINT_PHY && -			    link->link_enc->features.flags.bits.DP_IS_USB_C == 1) { -				/* if alt mode times out, return false */ -				if (!wait_for_entering_dp_alt_mode(link)) -					return false; -			} - -			if (!detect_dp(link, &sink_caps, reason)) { -				if (prev_sink) -					dc_sink_release(prev_sink); -				return false; -			} - -			/* Active SST downstream branch device unplug*/ -			if (link->type == dc_connection_sst_branch && -			    link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) { -				if (prev_sink) -					/* Downstream unplug */ -					dc_sink_release(prev_sink); -				return true; -			} - -			/* disable audio for non DP to HDMI active sst converter */ -			if (link->type == dc_connection_sst_branch && -					is_dp_active_dongle(link) && -					(link->dpcd_caps.dongle_type != -							DISPLAY_DONGLE_DP_HDMI_CONVERTER)) -				converter_disable_audio = true; -			break; -		} - -		default: -			DC_ERROR("Invalid connector type! signal:%d\n", -				 link->connector_signal); -			if (prev_sink) -				dc_sink_release(prev_sink); -			return false; -		} /* switch() */ - -		if (link->dpcd_caps.sink_count.bits.SINK_COUNT) -			link->dpcd_sink_count = -				link->dpcd_caps.sink_count.bits.SINK_COUNT; -		else -			link->dpcd_sink_count = 1; - -		dal_ddc_service_set_transaction_type(link->ddc, -						     sink_caps.transaction_type); - -		link->aux_mode = -			dal_ddc_service_is_in_aux_transaction_mode(link->ddc); - -		sink_init_data.link = link; -		sink_init_data.sink_signal = sink_caps.signal; - -		sink = dc_sink_create(&sink_init_data); -		if (!sink) { -			DC_ERROR("Failed to create sink!\n"); -			if (prev_sink) -				dc_sink_release(prev_sink); -			return false; -		} - -		sink->link->dongle_max_pix_clk = sink_caps.max_hdmi_pixel_clock; -		sink->converter_disable_audio = converter_disable_audio; - -		/* dc_sink_create returns a new reference */ -		link->local_sink = sink; - -		edid_status = dm_helpers_read_local_edid(link->ctx, -							 link, sink); - -		switch (edid_status) { -		case EDID_BAD_CHECKSUM: -			DC_LOG_ERROR("EDID checksum invalid.\n"); -			break; -		case EDID_PARTIAL_VALID: -			DC_LOG_ERROR("Partial EDID valid, abandon invalid blocks.\n"); -			break; -		case EDID_NO_RESPONSE: -			DC_LOG_ERROR("No EDID read.\n"); -			/* -			 * Abort detection for non-DP connectors if we have -			 * no EDID -			 * -			 * DP needs to report as connected if HDP is high -			 * even if we have no EDID in order to go to -			 * fail-safe mode -			 */ -			if (dc_is_hdmi_signal(link->connector_signal) || -			    dc_is_dvi_signal(link->connector_signal)) { -				if (prev_sink) -					dc_sink_release(prev_sink); - -				return false; -			} - -			if (link->type == dc_connection_sst_branch && -					link->dpcd_caps.dongle_type == -						DISPLAY_DONGLE_DP_VGA_CONVERTER && -					reason == DETECT_REASON_HPDRX) { -				/* Abort detection for DP-VGA adapters when EDID -				 * can't be read and detection reason is VGA-side -				 * hotplug -				 */ -				if (prev_sink) -					dc_sink_release(prev_sink); -				link_disconnect_sink(link); - -				return true; -			} - -			break; -		default: -			break; -		} - -		// Check if edid is the same -		if ((prev_sink) && -		    (edid_status == EDID_THE_SAME || edid_status == EDID_OK)) -			same_edid = is_same_edid(&prev_sink->dc_edid, -						 &sink->dc_edid); - -		if (sink->edid_caps.panel_patch.skip_scdc_overwrite) -			link->ctx->dc->debug.hdmi20_disable = true; - -		if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && -		    sink_caps.transaction_type == -		    DDC_TRANSACTION_TYPE_I2C_OVER_AUX) { -			/* -			 * TODO debug why Dell 2413 doesn't like -			 *  two link trainings -			 */ -#if defined(CONFIG_DRM_AMD_DC_HDCP) -			query_hdcp_capability(sink->sink_signal, link); -#endif -		} else { -			// If edid is the same, then discard new sink and revert back to original sink -			if (same_edid) { -				link_disconnect_remap(prev_sink, link); -				sink = prev_sink; -				prev_sink = NULL; -			} -#if defined(CONFIG_DRM_AMD_DC_HDCP) -			query_hdcp_capability(sink->sink_signal, link); -#endif -		} - -		/* HDMI-DVI Dongle */ -		if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A && -		    !sink->edid_caps.edid_hdmi) -			sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK; - -		if (link->local_sink && dc_is_dp_signal(sink_caps.signal)) -			dp_trace_init(link); - -		/* Connectivity log: detection */ -		for (i = 0; i < sink->dc_edid.length / DC_EDID_BLOCK_SIZE; i++) { -			CONN_DATA_DETECT(link, -					 &sink->dc_edid.raw_edid[i * DC_EDID_BLOCK_SIZE], -					 DC_EDID_BLOCK_SIZE, -					 "%s: [Block %d] ", sink->edid_caps.display_name, i); -		} - -		DC_LOG_DETECTION_EDID_PARSER("%s: " -			"manufacturer_id = %X, " -			"product_id = %X, " -			"serial_number = %X, " -			"manufacture_week = %d, " -			"manufacture_year = %d, " -			"display_name = %s, " -			"speaker_flag = %d, " -			"audio_mode_count = %d\n", -			__func__, -			sink->edid_caps.manufacturer_id, -			sink->edid_caps.product_id, -			sink->edid_caps.serial_number, -			sink->edid_caps.manufacture_week, -			sink->edid_caps.manufacture_year, -			sink->edid_caps.display_name, -			sink->edid_caps.speaker_flags, -			sink->edid_caps.audio_mode_count); - -		for (i = 0; i < sink->edid_caps.audio_mode_count; i++) { -			DC_LOG_DETECTION_EDID_PARSER("%s: mode number = %d, " -				"format_code = %d, " -				"channel_count = %d, " -				"sample_rate = %d, " -				"sample_size = %d\n", -				__func__, -				i, -				sink->edid_caps.audio_modes[i].format_code, -				sink->edid_caps.audio_modes[i].channel_count, -				sink->edid_caps.audio_modes[i].sample_rate, -				sink->edid_caps.audio_modes[i].sample_size); -		} - -		if (link->connector_signal == SIGNAL_TYPE_EDP) { -			/* Init dc_panel_config by HW config */ -			if (dc_ctx->dc->res_pool->funcs->get_panel_config_defaults) -				dc_ctx->dc->res_pool->funcs->get_panel_config_defaults(&link->panel_config); -			/* Pickup base DM settings */ -			dm_helpers_init_panel_settings(dc_ctx, &link->panel_config, sink); -			// Override dc_panel_config if system has specific settings -			dm_helpers_override_panel_settings(dc_ctx, &link->panel_config); -		} - -	} else { -		/* From Connected-to-Disconnected. */ -		link->type = dc_connection_none; -		sink_caps.signal = SIGNAL_TYPE_NONE; -		/* When we unplug a passive DP-HDMI dongle connection, dongle_max_pix_clk -		 *  is not cleared. If we emulate a DP signal on this connection, it thinks -		 *  the dongle is still there and limits the number of modes we can emulate. -		 *  Clear dongle_max_pix_clk on disconnect to fix this -		 */ -		link->dongle_max_pix_clk = 0; - -		dc_link_clear_dprx_states(link); -		dp_trace_reset(link); -	} - -	LINK_INFO("link=%d, dc_sink_in=%p is now %s prev_sink=%p edid same=%d\n", -		  link->link_index, sink, -		  (sink_caps.signal == -		   SIGNAL_TYPE_NONE ? "Disconnected" : "Connected"), -		  prev_sink, same_edid); - -	if (prev_sink) -		dc_sink_release(prev_sink); - -	return true; -} - -bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) -{ -	bool is_local_sink_detect_success; -	bool is_delegated_to_mst_top_mgr = false; -	enum dc_connection_type pre_link_type = link->type; - -	is_local_sink_detect_success = detect_link_and_local_sink(link, reason); - -	if (is_local_sink_detect_success && link->local_sink) -		verify_link_capability(link, link->local_sink, reason); - -	if (is_local_sink_detect_success && link->local_sink && -			dc_is_dp_signal(link->local_sink->sink_signal) && -			link->dpcd_caps.is_mst_capable) -		is_delegated_to_mst_top_mgr = discover_dp_mst_topology(link, reason); - -	if (is_local_sink_detect_success && -			pre_link_type == dc_connection_mst_branch && -			link->type != dc_connection_mst_branch) -		is_delegated_to_mst_top_mgr = reset_cur_dp_mst_topology(link); - -	return is_local_sink_detect_success && !is_delegated_to_mst_top_mgr; -} - -bool dc_link_get_hpd_state(struct dc_link *dc_link) -{ -	uint32_t state; - -	dal_gpio_lock_pin(dc_link->hpd_gpio); -	dal_gpio_get_value(dc_link->hpd_gpio, &state); -	dal_gpio_unlock_pin(dc_link->hpd_gpio); - -	return state; -} - -static enum hpd_source_id get_hpd_line(struct dc_link *link) -{ -	struct gpio *hpd; -	enum hpd_source_id hpd_id; - -	hpd_id = HPD_SOURCEID_UNKNOWN; - -	hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, -			   link->ctx->gpio_service); - -	if (hpd) { -		switch (dal_irq_get_source(hpd)) { -		case DC_IRQ_SOURCE_HPD1: -			hpd_id = HPD_SOURCEID1; -		break; -		case DC_IRQ_SOURCE_HPD2: -			hpd_id = HPD_SOURCEID2; -		break; -		case DC_IRQ_SOURCE_HPD3: -			hpd_id = HPD_SOURCEID3; -		break; -		case DC_IRQ_SOURCE_HPD4: -			hpd_id = HPD_SOURCEID4; -		break; -		case DC_IRQ_SOURCE_HPD5: -			hpd_id = HPD_SOURCEID5; -		break; -		case DC_IRQ_SOURCE_HPD6: -			hpd_id = HPD_SOURCEID6; -		break; -		default: -			BREAK_TO_DEBUGGER(); -		break; -		} - -		dal_gpio_destroy_irq(&hpd); -	} - -	return hpd_id; -} - -static enum channel_id get_ddc_line(struct dc_link *link) -{ -	struct ddc *ddc; -	enum channel_id channel; - -	channel = CHANNEL_ID_UNKNOWN; - -	ddc = dal_ddc_service_get_ddc_pin(link->ddc); - -	if (ddc) { -		switch (dal_ddc_get_line(ddc)) { -		case GPIO_DDC_LINE_DDC1: -			channel = CHANNEL_ID_DDC1; -			break; -		case GPIO_DDC_LINE_DDC2: -			channel = CHANNEL_ID_DDC2; -			break; -		case GPIO_DDC_LINE_DDC3: -			channel = CHANNEL_ID_DDC3; -			break; -		case GPIO_DDC_LINE_DDC4: -			channel = CHANNEL_ID_DDC4; -			break; -		case GPIO_DDC_LINE_DDC5: -			channel = CHANNEL_ID_DDC5; -			break; -		case GPIO_DDC_LINE_DDC6: -			channel = CHANNEL_ID_DDC6; -			break; -		case GPIO_DDC_LINE_DDC_VGA: -			channel = CHANNEL_ID_DDC_VGA; -			break; -		case GPIO_DDC_LINE_I2C_PAD: -			channel = CHANNEL_ID_I2C_PAD; -			break; -		default: -			BREAK_TO_DEBUGGER(); -			break; -		} -	} - -	return channel; -} - -static enum transmitter translate_encoder_to_transmitter(struct graphics_object_id encoder) -{ -	switch (encoder.id) { -	case ENCODER_ID_INTERNAL_UNIPHY: -		switch (encoder.enum_id) { -		case ENUM_ID_1: -			return TRANSMITTER_UNIPHY_A; -		case ENUM_ID_2: -			return TRANSMITTER_UNIPHY_B; -		default: -			return TRANSMITTER_UNKNOWN; -		} -	break; -	case ENCODER_ID_INTERNAL_UNIPHY1: -		switch (encoder.enum_id) { -		case ENUM_ID_1: -			return TRANSMITTER_UNIPHY_C; -		case ENUM_ID_2: -			return TRANSMITTER_UNIPHY_D; -		default: -			return TRANSMITTER_UNKNOWN; -		} -	break; -	case ENCODER_ID_INTERNAL_UNIPHY2: -		switch (encoder.enum_id) { -		case ENUM_ID_1: -			return TRANSMITTER_UNIPHY_E; -		case ENUM_ID_2: -			return TRANSMITTER_UNIPHY_F; -		default: -			return TRANSMITTER_UNKNOWN; -		} -	break; -	case ENCODER_ID_INTERNAL_UNIPHY3: -		switch (encoder.enum_id) { -		case ENUM_ID_1: -			return TRANSMITTER_UNIPHY_G; -		default: -			return TRANSMITTER_UNKNOWN; -		} -	break; -	case ENCODER_ID_EXTERNAL_NUTMEG: -		switch (encoder.enum_id) { -		case ENUM_ID_1: -			return TRANSMITTER_NUTMEG_CRT; -		default: -			return TRANSMITTER_UNKNOWN; -		} -	break; -	case ENCODER_ID_EXTERNAL_TRAVIS: -		switch (encoder.enum_id) { -		case ENUM_ID_1: -			return TRANSMITTER_TRAVIS_CRT; -		case ENUM_ID_2: -			return TRANSMITTER_TRAVIS_LCD; -		default: -			return TRANSMITTER_UNKNOWN; -		} -	break; -	default: -		return TRANSMITTER_UNKNOWN; -	} -} - -static bool dc_link_construct_legacy(struct dc_link *link, -				     const struct link_init_data *init_params) -{ -	uint8_t i; -	struct ddc_service_init_data ddc_service_init_data = { 0 }; -	struct dc_context *dc_ctx = init_params->ctx; -	struct encoder_init_data enc_init_data = { 0 }; -	struct panel_cntl_init_data panel_cntl_init_data = { 0 }; -	struct integrated_info *info; -	struct dc_bios *bios = init_params->dc->ctx->dc_bios; -	const struct dc_vbios_funcs *bp_funcs = bios->funcs; -	struct bp_disp_connector_caps_info disp_connect_caps_info = { 0 }; - -	DC_LOGGER_INIT(dc_ctx->logger); - -	info = kzalloc(sizeof(*info), GFP_KERNEL); -	if (!info) -		goto create_fail; - -	link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; -	link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID; - -	link->link_status.dpcd_caps = &link->dpcd_caps; - -	link->dc = init_params->dc; -	link->ctx = dc_ctx; -	link->link_index = init_params->link_index; - -	memset(&link->preferred_training_settings, 0, -	       sizeof(struct dc_link_training_overrides)); -	memset(&link->preferred_link_setting, 0, -	       sizeof(struct dc_link_settings)); - -	link->link_id = -		bios->funcs->get_connector_id(bios, init_params->connector_index); - -	link->ep_type = DISPLAY_ENDPOINT_PHY; - -	DC_LOG_DC("BIOS object table - link_id: %d", link->link_id.id); - -	if (bios->funcs->get_disp_connector_caps_info) { -		bios->funcs->get_disp_connector_caps_info(bios, link->link_id, &disp_connect_caps_info); -		link->is_internal_display = disp_connect_caps_info.INTERNAL_DISPLAY; -		DC_LOG_DC("BIOS object table - is_internal_display: %d", link->is_internal_display); -	} - -	if (link->link_id.type != OBJECT_TYPE_CONNECTOR) { -		dm_output_to_console("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d! type %d expected %d\n", -				     __func__, init_params->connector_index, -				     link->link_id.type, OBJECT_TYPE_CONNECTOR); -		goto create_fail; -	} - -	if (link->dc->res_pool->funcs->link_init) -		link->dc->res_pool->funcs->link_init(link); - -	link->hpd_gpio = get_hpd_gpio(link->ctx->dc_bios, link->link_id, -				      link->ctx->gpio_service); - -	if (link->hpd_gpio) { -		dal_gpio_open(link->hpd_gpio, GPIO_MODE_INTERRUPT); -		dal_gpio_unlock_pin(link->hpd_gpio); -		link->irq_source_hpd = dal_irq_get_source(link->hpd_gpio); - -		DC_LOG_DC("BIOS object table - hpd_gpio id: %d", link->hpd_gpio->id); -		DC_LOG_DC("BIOS object table - hpd_gpio en: %d", link->hpd_gpio->en); -	} - -	switch (link->link_id.id) { -	case CONNECTOR_ID_HDMI_TYPE_A: -		link->connector_signal = SIGNAL_TYPE_HDMI_TYPE_A; - -		break; -	case CONNECTOR_ID_SINGLE_LINK_DVID: -	case CONNECTOR_ID_SINGLE_LINK_DVII: -		link->connector_signal = SIGNAL_TYPE_DVI_SINGLE_LINK; -		break; -	case CONNECTOR_ID_DUAL_LINK_DVID: -	case CONNECTOR_ID_DUAL_LINK_DVII: -		link->connector_signal = SIGNAL_TYPE_DVI_DUAL_LINK; -		break; -	case CONNECTOR_ID_DISPLAY_PORT: -	case CONNECTOR_ID_USBC: -		link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT; - -		if (link->hpd_gpio) -			link->irq_source_hpd_rx = -					dal_irq_get_rx_source(link->hpd_gpio); - -		break; -	case CONNECTOR_ID_EDP: -		link->connector_signal = SIGNAL_TYPE_EDP; - -		if (link->hpd_gpio) { -			if (!link->dc->config.allow_edp_hotplug_detection) -				link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; - -			switch (link->dc->config.allow_edp_hotplug_detection) { -			case 1: // only the 1st eDP handles hotplug -				if (link->link_index == 0) -					link->irq_source_hpd_rx = -						dal_irq_get_rx_source(link->hpd_gpio); -				else -					link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; -				break; -			case 2: // only the 2nd eDP handles hotplug -				if (link->link_index == 1) -					link->irq_source_hpd_rx = -						dal_irq_get_rx_source(link->hpd_gpio); -				else -					link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; -				break; -			default: -				break; -			} -		} - -		break; -	case CONNECTOR_ID_LVDS: -		link->connector_signal = SIGNAL_TYPE_LVDS; -		break; -	default: -		DC_LOG_WARNING("Unsupported Connector type:%d!\n", -			       link->link_id.id); -		goto create_fail; -	} - -	/* TODO: #DAL3 Implement id to str function.*/ -	LINK_INFO("Connector[%d] description:" -		  "signal %d\n", -		  init_params->connector_index, -		  link->connector_signal); - -	ddc_service_init_data.ctx = link->ctx; -	ddc_service_init_data.id = link->link_id; -	ddc_service_init_data.link = link; -	link->ddc = dal_ddc_service_create(&ddc_service_init_data); - -	if (!link->ddc) { -		DC_ERROR("Failed to create ddc_service!\n"); -		goto ddc_create_fail; -	} - -	if (!link->ddc->ddc_pin) { -		DC_ERROR("Failed to get I2C info for connector!\n"); -		goto ddc_create_fail; -	} - -	link->ddc_hw_inst = -		dal_ddc_get_line(dal_ddc_service_get_ddc_pin(link->ddc)); - - -	if (link->dc->res_pool->funcs->panel_cntl_create && -		(link->link_id.id == CONNECTOR_ID_EDP || -			link->link_id.id == CONNECTOR_ID_LVDS)) { -		panel_cntl_init_data.ctx = dc_ctx; -		panel_cntl_init_data.inst = -			panel_cntl_init_data.ctx->dc_edp_id_count; -		link->panel_cntl = -			link->dc->res_pool->funcs->panel_cntl_create( -								&panel_cntl_init_data); -		panel_cntl_init_data.ctx->dc_edp_id_count++; - -		if (link->panel_cntl == NULL) { -			DC_ERROR("Failed to create link panel_cntl!\n"); -			goto panel_cntl_create_fail; -		} -	} - -	enc_init_data.ctx = dc_ctx; -	bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0, -			      &enc_init_data.encoder); -	enc_init_data.connector = link->link_id; -	enc_init_data.channel = get_ddc_line(link); -	enc_init_data.hpd_source = get_hpd_line(link); - -	link->hpd_src = enc_init_data.hpd_source; - -	enc_init_data.transmitter = -		translate_encoder_to_transmitter(enc_init_data.encoder); -	link->link_enc = -		link->dc->res_pool->funcs->link_enc_create(dc_ctx, &enc_init_data); - -	if (!link->link_enc) { -		DC_ERROR("Failed to create link encoder!\n"); -		goto link_enc_create_fail; -	} - -	DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C); -	DC_LOG_DC("BIOS object table - IS_DP2_CAPABLE: %d", link->link_enc->features.flags.bits.IS_DP2_CAPABLE); - -	/* Update link encoder tracking variables. These are used for the dynamic -	 * assignment of link encoders to streams. -	 */ -	link->eng_id = link->link_enc->preferred_engine; -	link->dc->res_pool->link_encoders[link->eng_id - ENGINE_ID_DIGA] = link->link_enc; -	link->dc->res_pool->dig_link_enc_count++; - -	link->link_enc_hw_inst = link->link_enc->transmitter; - -	for (i = 0; i < 4; i++) { -		if (bp_funcs->get_device_tag(dc_ctx->dc_bios, -					     link->link_id, i, -					     &link->device_tag) != BP_RESULT_OK) { -			DC_ERROR("Failed to find device tag!\n"); -			goto device_tag_fail; -		} - -		/* Look for device tag that matches connector signal, -		 * CRT for rgb, LCD for other supported signal tyes -		 */ -		if (!bp_funcs->is_device_id_supported(dc_ctx->dc_bios, -						      link->device_tag.dev_id)) -			continue; -		if (link->device_tag.dev_id.device_type == DEVICE_TYPE_CRT && -		    link->connector_signal != SIGNAL_TYPE_RGB) -			continue; -		if (link->device_tag.dev_id.device_type == DEVICE_TYPE_LCD && -		    link->connector_signal == SIGNAL_TYPE_RGB) -			continue; - -		DC_LOG_DC("BIOS object table - device_tag.acpi_device: %d", link->device_tag.acpi_device); -		DC_LOG_DC("BIOS object table - device_tag.dev_id.device_type: %d", link->device_tag.dev_id.device_type); -		DC_LOG_DC("BIOS object table - device_tag.dev_id.enum_id: %d", link->device_tag.dev_id.enum_id); -		break; -	} - -	if (bios->integrated_info) -		memcpy(info, bios->integrated_info, sizeof(*info)); - -	/* Look for channel mapping corresponding to connector and device tag */ -	for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; i++) { -		struct external_display_path *path = -			&info->ext_disp_conn_info.path[i]; - -		if (path->device_connector_id.enum_id == link->link_id.enum_id && -		    path->device_connector_id.id == link->link_id.id && -		    path->device_connector_id.type == link->link_id.type) { -			if (link->device_tag.acpi_device != 0 && -			    path->device_acpi_enum == link->device_tag.acpi_device) { -				link->ddi_channel_mapping = path->channel_mapping; -				link->chip_caps = path->caps; -				DC_LOG_DC("BIOS object table - ddi_channel_mapping: 0x%04X", link->ddi_channel_mapping.raw); -				DC_LOG_DC("BIOS object table - chip_caps: %d", link->chip_caps); -			} else if (path->device_tag == -				   link->device_tag.dev_id.raw_device_tag) { -				link->ddi_channel_mapping = path->channel_mapping; -				link->chip_caps = path->caps; -				DC_LOG_DC("BIOS object table - ddi_channel_mapping: 0x%04X", link->ddi_channel_mapping.raw); -				DC_LOG_DC("BIOS object table - chip_caps: %d", link->chip_caps); -			} - -			if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) { -				link->bios_forced_drive_settings.VOLTAGE_SWING = -						(info->ext_disp_conn_info.fixdpvoltageswing & 0x3); -				link->bios_forced_drive_settings.PRE_EMPHASIS = -						((info->ext_disp_conn_info.fixdpvoltageswing >> 2) & 0x3); -			} - -			break; -		} -	} - -	if (bios->funcs->get_atom_dc_golden_table) -		bios->funcs->get_atom_dc_golden_table(bios); - -	/* -	 * TODO check if GPIO programmed correctly -	 * -	 * If GPIO isn't programmed correctly HPD might not rise or drain -	 * fast enough, leading to bounces. -	 */ -	program_hpd_filter(link); - -	link->psr_settings.psr_vtotal_control_support = false; -	link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; - -	DC_LOG_DC("BIOS object table - %s finished successfully.\n", __func__); -	kfree(info); -	return true; -device_tag_fail: -	link->link_enc->funcs->destroy(&link->link_enc); -link_enc_create_fail: -	if (link->panel_cntl != NULL) -		link->panel_cntl->funcs->destroy(&link->panel_cntl); -panel_cntl_create_fail: -	dal_ddc_service_destroy(&link->ddc); -ddc_create_fail: -create_fail: - -	if (link->hpd_gpio) { -		dal_gpio_destroy_irq(&link->hpd_gpio); -		link->hpd_gpio = NULL; -	} - -	DC_LOG_DC("BIOS object table - %s failed.\n", __func__); -	kfree(info); - -	return false; -} - -static bool dc_link_construct_dpia(struct dc_link *link, -				   const struct link_init_data *init_params) -{ -	struct ddc_service_init_data ddc_service_init_data = { 0 }; -	struct dc_context *dc_ctx = init_params->ctx; - -	DC_LOGGER_INIT(dc_ctx->logger); - -	/* Initialized irq source for hpd and hpd rx */ -	link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; -	link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID; -	link->link_status.dpcd_caps = &link->dpcd_caps; - -	link->dc = init_params->dc; -	link->ctx = dc_ctx; -	link->link_index = init_params->link_index; - -	memset(&link->preferred_training_settings, 0, -	       sizeof(struct dc_link_training_overrides)); -	memset(&link->preferred_link_setting, 0, -	       sizeof(struct dc_link_settings)); - -	/* Dummy Init for linkid */ -	link->link_id.type = OBJECT_TYPE_CONNECTOR; -	link->link_id.id = CONNECTOR_ID_DISPLAY_PORT; -	link->link_id.enum_id = ENUM_ID_1 + init_params->connector_index; -	link->is_internal_display = false; -	link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT; -	LINK_INFO("Connector[%d] description:signal %d\n", -		  init_params->connector_index, -		  link->connector_signal); - -	link->ep_type = DISPLAY_ENDPOINT_USB4_DPIA; -	link->is_dig_mapping_flexible = true; - -	/* TODO: Initialize link : funcs->link_init */ - -	ddc_service_init_data.ctx = link->ctx; -	ddc_service_init_data.id = link->link_id; -	ddc_service_init_data.link = link; -	/* Set indicator for dpia link so that ddc won't be created */ -	ddc_service_init_data.is_dpia_link = true; - -	link->ddc = dal_ddc_service_create(&ddc_service_init_data); -	if (!link->ddc) { -		DC_ERROR("Failed to create ddc_service!\n"); -		goto ddc_create_fail; -	} - -	/* Set dpia port index : 0 to number of dpia ports */ -	link->ddc_hw_inst = init_params->connector_index; - -	/* TODO: Create link encoder */ - -	link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; - -	/* Some docks seem to NAK I2C writes to segment pointer with mot=0. */ -	link->wa_flags.dp_mot_reset_segment = true; - -	return true; - -ddc_create_fail: -	return false; -} - -static bool dc_link_construct(struct dc_link *link, -			      const struct link_init_data *init_params) -{ -	/* Handle dpia case */ -	if (init_params->is_dpia_link) -		return dc_link_construct_dpia(link, init_params); -	else -		return dc_link_construct_legacy(link, init_params); -} -/******************************************************************************* - * Public functions - ******************************************************************************/ -struct dc_link *link_create(const struct link_init_data *init_params) -{ -	struct dc_link *link = -			kzalloc(sizeof(*link), GFP_KERNEL); - -	if (NULL == link) -		goto alloc_fail; - -	if (false == dc_link_construct(link, init_params)) -		goto construct_fail; - -	/* -	 * Must use preferred_link_setting, not reported_link_cap or verified_link_cap, -	 * since struct preferred_link_setting won't be reset after S3. -	 */ -	link->preferred_link_setting.dpcd_source_device_specific_field_support = true; - -	return link; - -construct_fail: -	kfree(link); - -alloc_fail: -	return NULL; -} - -void link_destroy(struct dc_link **link) -{ -	dc_link_destruct(*link); -	kfree(*link); -	*link = NULL; -} - -static void enable_stream_features(struct pipe_ctx *pipe_ctx) -{ -	struct dc_stream_state *stream = pipe_ctx->stream; - -	if (pipe_ctx->stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST) { -		struct dc_link *link = stream->link; -		union down_spread_ctrl old_downspread; -		union down_spread_ctrl new_downspread; - -		memset(&old_downspread, 0, sizeof(old_downspread)); - -		core_link_read_dpcd(link, DP_DOWNSPREAD_CTRL, -				&old_downspread.raw, sizeof(old_downspread)); - -		new_downspread.raw = old_downspread.raw; - -		new_downspread.bits.IGNORE_MSA_TIMING_PARAM = -				(stream->ignore_msa_timing_param) ? 1 : 0; - -		if (new_downspread.raw != old_downspread.raw) { -			core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, -				&new_downspread.raw, sizeof(new_downspread)); -		} - -	} else { -		dm_helpers_mst_enable_stream_features(stream); -	} -} - -static enum dc_status enable_link_dp(struct dc_state *state, -				     struct pipe_ctx *pipe_ctx) -{ -	struct dc_stream_state *stream = pipe_ctx->stream; -	enum dc_status status; -	bool skip_video_pattern; -	struct dc_link *link = stream->link; -	const struct dc_link_settings *link_settings = -			&pipe_ctx->link_config.dp_link_settings; -	bool fec_enable; -	int i; -	bool apply_seamless_boot_optimization = false; -	uint32_t bl_oled_enable_delay = 50; // in ms -	uint32_t post_oui_delay = 30; // 30ms -	/* Reduce link bandwidth between failed link training attempts. */ -	bool do_fallback = false; - -	// check for seamless boot -	for (i = 0; i < state->stream_count; i++) { -		if (state->streams[i]->apply_seamless_boot_optimization) { -			apply_seamless_boot_optimization = true; -			break; -		} -	} - -	/* Train with fallback when enabling DPIA link. Conventional links are -	 * trained with fallback during sink detection. -	 */ -	if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) -		do_fallback = true; - -	/* -	 * Temporary w/a to get DP2.0 link rates to work with SST. -	 * TODO DP2.0 - Workaround: Remove w/a if and when the issue is resolved. -	 */ -	if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING && -			pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && -			link->dc->debug.set_mst_en_for_sst) { -		dp_enable_mst_on_sink(link, true); -	} - -	if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) { -		/*in case it is not on*/ -		if (!link->dc->config.edp_no_power_sequencing) -			link->dc->hwss.edp_power_control(link, true); -		link->dc->hwss.edp_wait_for_hpd_ready(link, true); -	} - -	if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) { -		/* TODO - DP2.0 HW: calculate 32 symbol clock for HPO encoder */ -	} else { -		pipe_ctx->stream_res.pix_clk_params.requested_sym_clk = -				link_settings->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; -		if (state->clk_mgr && !apply_seamless_boot_optimization) -			state->clk_mgr->funcs->update_clocks(state->clk_mgr, -					state, false); -	} - -	// during mode switch we do DP_SET_POWER off then on, and OUI is lost -	dpcd_set_source_specific_data(link); -	if (link->dpcd_sink_ext_caps.raw != 0) { -		post_oui_delay += link->panel_config.pps.extra_post_OUI_ms; -		msleep(post_oui_delay); -	} - -	// similarly, mode switch can cause loss of cable ID -	dpcd_write_cable_id_to_dprx(link); - -	skip_video_pattern = true; - -	if (link_settings->link_rate == LINK_RATE_LOW) -		skip_video_pattern = false; - -	if (perform_link_training_with_retries(link_settings, -					       skip_video_pattern, -					       LINK_TRAINING_ATTEMPTS, -					       pipe_ctx, -					       pipe_ctx->stream->signal, -					       do_fallback)) { -		status = DC_OK; -	} else { -		status = DC_FAIL_DP_LINK_TRAINING; -	} - -	if (link->preferred_training_settings.fec_enable) -		fec_enable = *link->preferred_training_settings.fec_enable; -	else -		fec_enable = true; - -	if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) -		dp_set_fec_enable(link, fec_enable); - -	// during mode set we do DP_SET_POWER off then on, aux writes are lost -	if (link->dpcd_sink_ext_caps.bits.oled == 1 || -		link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1 || -		link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1) { -		dc_link_set_default_brightness_aux(link); // TODO: use cached if known -		if (link->dpcd_sink_ext_caps.bits.oled == 1) -			msleep(bl_oled_enable_delay); -		dc_link_backlight_enable_aux(link, true); -	} - -	return status; -} - -static enum dc_status enable_link_edp( -		struct dc_state *state, -		struct pipe_ctx *pipe_ctx) -{ -	return enable_link_dp(state, pipe_ctx); -} - -static enum dc_status enable_link_dp_mst( -		struct dc_state *state, -		struct pipe_ctx *pipe_ctx) -{ -	struct dc_link *link = pipe_ctx->stream->link; - -	/* sink signal type after MST branch is MST. Multiple MST sinks -	 * share one link. Link DP PHY is enable or training only once. -	 */ -	if (link->link_status.link_active) -		return DC_OK; - -	/* clear payload table */ -	dm_helpers_dp_mst_clear_payload_allocation_table(link->ctx, link); - -	/* to make sure the pending down rep can be processed -	 * before enabling the link -	 */ -	dm_helpers_dp_mst_poll_pending_down_reply(link->ctx, link); - -	/* set the sink to MST mode before enabling the link */ -	dp_enable_mst_on_sink(link, true); - -	return enable_link_dp(state, pipe_ctx); -} - -void dc_link_blank_all_dp_displays(struct dc *dc) -{ -	unsigned int i; -	uint8_t dpcd_power_state = '\0'; -	enum dc_status status = DC_ERROR_UNEXPECTED; - -	for (i = 0; i < dc->link_count; i++) { -		if ((dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) || -			(dc->links[i]->priv == NULL) || (dc->links[i]->local_sink == NULL)) -			continue; - -		/* DP 2.0 spec requires that we read LTTPR caps first */ -		dp_retrieve_lttpr_cap(dc->links[i]); -		/* if any of the displays are lit up turn them off */ -		status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, -							&dpcd_power_state, sizeof(dpcd_power_state)); - -		if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) -			dc_link_blank_dp_stream(dc->links[i], true); -	} - -} - -void dc_link_blank_all_edp_displays(struct dc *dc) -{ -	unsigned int i; -	uint8_t dpcd_power_state = '\0'; -	enum dc_status status = DC_ERROR_UNEXPECTED; - -	for (i = 0; i < dc->link_count; i++) { -		if ((dc->links[i]->connector_signal != SIGNAL_TYPE_EDP) || -			(!dc->links[i]->edp_sink_present)) -			continue; - -		/* if any of the displays are lit up turn them off */ -		status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, -							&dpcd_power_state, sizeof(dpcd_power_state)); - -		if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) -			dc_link_blank_dp_stream(dc->links[i], true); -	} -} - -void dc_link_blank_dp_stream(struct dc_link *link, bool hw_init) -{ -	unsigned int j; -	struct dc  *dc = link->ctx->dc; -	enum signal_type signal = link->connector_signal; - -	if ((signal == SIGNAL_TYPE_EDP) || -		(signal == SIGNAL_TYPE_DISPLAY_PORT)) { -		if (link->ep_type == DISPLAY_ENDPOINT_PHY && -			link->link_enc->funcs->get_dig_frontend && -			link->link_enc->funcs->is_dig_enabled(link->link_enc)) { -			unsigned int fe = link->link_enc->funcs->get_dig_frontend(link->link_enc); - -			if (fe != ENGINE_ID_UNKNOWN) -				for (j = 0; j < dc->res_pool->stream_enc_count; j++) { -					if (fe == dc->res_pool->stream_enc[j]->id) { -						dc->res_pool->stream_enc[j]->funcs->dp_blank(link, -									dc->res_pool->stream_enc[j]); -						break; -					} -				} -		} - -		if ((!link->wa_flags.dp_keep_receiver_powered) || hw_init) -			dp_receiver_power_ctrl(link, false); -	} -} - -static bool get_ext_hdmi_settings(struct pipe_ctx *pipe_ctx, -		enum engine_id eng_id, -		struct ext_hdmi_settings *settings) -{ -	bool result = false; -	int i = 0; -	struct integrated_info *integrated_info = -			pipe_ctx->stream->ctx->dc_bios->integrated_info; - -	if (integrated_info == NULL) -		return false; - -	/* -	 * Get retimer settings from sbios for passing SI eye test for DCE11 -	 * The setting values are varied based on board revision and port id -	 * Therefore the setting values of each ports is passed by sbios. -	 */ - -	// Check if current bios contains ext Hdmi settings -	if (integrated_info->gpu_cap_info & 0x20) { -		switch (eng_id) { -		case ENGINE_ID_DIGA: -			settings->slv_addr = integrated_info->dp0_ext_hdmi_slv_addr; -			settings->reg_num = integrated_info->dp0_ext_hdmi_6g_reg_num; -			settings->reg_num_6g = integrated_info->dp0_ext_hdmi_6g_reg_num; -			memmove(settings->reg_settings, -					integrated_info->dp0_ext_hdmi_reg_settings, -					sizeof(integrated_info->dp0_ext_hdmi_reg_settings)); -			memmove(settings->reg_settings_6g, -					integrated_info->dp0_ext_hdmi_6g_reg_settings, -					sizeof(integrated_info->dp0_ext_hdmi_6g_reg_settings)); -			result = true; -			break; -		case ENGINE_ID_DIGB: -			settings->slv_addr = integrated_info->dp1_ext_hdmi_slv_addr; -			settings->reg_num = integrated_info->dp1_ext_hdmi_6g_reg_num; -			settings->reg_num_6g = integrated_info->dp1_ext_hdmi_6g_reg_num; -			memmove(settings->reg_settings, -					integrated_info->dp1_ext_hdmi_reg_settings, -					sizeof(integrated_info->dp1_ext_hdmi_reg_settings)); -			memmove(settings->reg_settings_6g, -					integrated_info->dp1_ext_hdmi_6g_reg_settings, -					sizeof(integrated_info->dp1_ext_hdmi_6g_reg_settings)); -			result = true; -			break; -		case ENGINE_ID_DIGC: -			settings->slv_addr = integrated_info->dp2_ext_hdmi_slv_addr; -			settings->reg_num = integrated_info->dp2_ext_hdmi_6g_reg_num; -			settings->reg_num_6g = integrated_info->dp2_ext_hdmi_6g_reg_num; -			memmove(settings->reg_settings, -					integrated_info->dp2_ext_hdmi_reg_settings, -					sizeof(integrated_info->dp2_ext_hdmi_reg_settings)); -			memmove(settings->reg_settings_6g, -					integrated_info->dp2_ext_hdmi_6g_reg_settings, -					sizeof(integrated_info->dp2_ext_hdmi_6g_reg_settings)); -			result = true; -			break; -		case ENGINE_ID_DIGD: -			settings->slv_addr = integrated_info->dp3_ext_hdmi_slv_addr; -			settings->reg_num = integrated_info->dp3_ext_hdmi_6g_reg_num; -			settings->reg_num_6g = integrated_info->dp3_ext_hdmi_6g_reg_num; -			memmove(settings->reg_settings, -					integrated_info->dp3_ext_hdmi_reg_settings, -					sizeof(integrated_info->dp3_ext_hdmi_reg_settings)); -			memmove(settings->reg_settings_6g, -					integrated_info->dp3_ext_hdmi_6g_reg_settings, -					sizeof(integrated_info->dp3_ext_hdmi_6g_reg_settings)); -			result = true; -			break; -		default: -			break; -		} - -		if (result == true) { -			// Validate settings from bios integrated info table -			if (settings->slv_addr == 0) -				return false; -			if (settings->reg_num > 9) -				return false; -			if (settings->reg_num_6g > 3) -				return false; - -			for (i = 0; i < settings->reg_num; i++) { -				if (settings->reg_settings[i].i2c_reg_index > 0x20) -					return false; -			} - -			for (i = 0; i < settings->reg_num_6g; i++) { -				if (settings->reg_settings_6g[i].i2c_reg_index > 0x20) -					return false; -			} -		} -	} - -	return result; -} - -static bool i2c_write(struct pipe_ctx *pipe_ctx, -		uint8_t address, uint8_t *buffer, uint32_t length) -{ -	struct i2c_command cmd = {0}; -	struct i2c_payload payload = {0}; - -	memset(&payload, 0, sizeof(payload)); -	memset(&cmd, 0, sizeof(cmd)); - -	cmd.number_of_payloads = 1; -	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT; -	cmd.speed = pipe_ctx->stream->ctx->dc->caps.i2c_speed_in_khz; - -	payload.address = address; -	payload.data = buffer; -	payload.length = length; -	payload.write = true; -	cmd.payloads = &payload; - -	if (dm_helpers_submit_i2c(pipe_ctx->stream->ctx, -			pipe_ctx->stream->link, &cmd)) -		return true; - -	return false; -} - -static void write_i2c_retimer_setting( -		struct pipe_ctx *pipe_ctx, -		bool is_vga_mode, -		bool is_over_340mhz, -		struct ext_hdmi_settings *settings) -{ -	uint8_t slave_address = (settings->slv_addr >> 1); -	uint8_t buffer[2]; -	const uint8_t apply_rx_tx_change = 0x4; -	uint8_t offset = 0xA; -	uint8_t value = 0; -	int i = 0; -	bool i2c_success = false; -	DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); - -	memset(&buffer, 0, sizeof(buffer)); - -	/* Start Ext-Hdmi programming*/ - -	for (i = 0; i < settings->reg_num; i++) { -		/* Apply 3G settings */ -		if (settings->reg_settings[i].i2c_reg_index <= 0x20) { - -			buffer[0] = settings->reg_settings[i].i2c_reg_index; -			buffer[1] = settings->reg_settings[i].i2c_reg_val; -			i2c_success = i2c_write(pipe_ctx, slave_address, -						buffer, sizeof(buffer)); -			RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ -				offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n", -				slave_address, buffer[0], buffer[1], i2c_success?1:0); - -			if (!i2c_success) -				goto i2c_write_fail; - -			/* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A -			 * needs to be set to 1 on every 0xA-0xC write. -			 */ -			if (settings->reg_settings[i].i2c_reg_index == 0xA || -				settings->reg_settings[i].i2c_reg_index == 0xB || -				settings->reg_settings[i].i2c_reg_index == 0xC) { - -				/* Query current value from offset 0xA */ -				if (settings->reg_settings[i].i2c_reg_index == 0xA) -					value = settings->reg_settings[i].i2c_reg_val; -				else { -					i2c_success = -						dal_ddc_service_query_ddc_data( -						pipe_ctx->stream->link->ddc, -						slave_address, &offset, 1, &value, 1); -					if (!i2c_success) -						goto i2c_write_fail; -				} - -				buffer[0] = offset; -				/* Set APPLY_RX_TX_CHANGE bit to 1 */ -				buffer[1] = value | apply_rx_tx_change; -				i2c_success = i2c_write(pipe_ctx, slave_address, -						buffer, sizeof(buffer)); -				RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ -					offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", -					slave_address, buffer[0], buffer[1], i2c_success?1:0); -				if (!i2c_success) -					goto i2c_write_fail; -			} -		} -	} - -	/* Apply 3G settings */ -	if (is_over_340mhz) { -		for (i = 0; i < settings->reg_num_6g; i++) { -			/* Apply 3G settings */ -			if (settings->reg_settings[i].i2c_reg_index <= 0x20) { - -				buffer[0] = settings->reg_settings_6g[i].i2c_reg_index; -				buffer[1] = settings->reg_settings_6g[i].i2c_reg_val; -				i2c_success = i2c_write(pipe_ctx, slave_address, -							buffer, sizeof(buffer)); -				RETIMER_REDRIVER_INFO("above 340Mhz: retimer write to slave_address = 0x%x,\ -					offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", -					slave_address, buffer[0], buffer[1], i2c_success?1:0); - -				if (!i2c_success) -					goto i2c_write_fail; - -				/* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A -				 * needs to be set to 1 on every 0xA-0xC write. -				 */ -				if (settings->reg_settings_6g[i].i2c_reg_index == 0xA || -					settings->reg_settings_6g[i].i2c_reg_index == 0xB || -					settings->reg_settings_6g[i].i2c_reg_index == 0xC) { - -					/* Query current value from offset 0xA */ -					if (settings->reg_settings_6g[i].i2c_reg_index == 0xA) -						value = settings->reg_settings_6g[i].i2c_reg_val; -					else { -						i2c_success = -								dal_ddc_service_query_ddc_data( -								pipe_ctx->stream->link->ddc, -								slave_address, &offset, 1, &value, 1); -						if (!i2c_success) -							goto i2c_write_fail; -					} - -					buffer[0] = offset; -					/* Set APPLY_RX_TX_CHANGE bit to 1 */ -					buffer[1] = value | apply_rx_tx_change; -					i2c_success = i2c_write(pipe_ctx, slave_address, -							buffer, sizeof(buffer)); -					RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ -						offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", -						slave_address, buffer[0], buffer[1], i2c_success?1:0); -					if (!i2c_success) -						goto i2c_write_fail; -				} -			} -		} -	} - -	if (is_vga_mode) { -		/* Program additional settings if using 640x480 resolution */ - -		/* Write offset 0xFF to 0x01 */ -		buffer[0] = 0xff; -		buffer[1] = 0x01; -		i2c_success = i2c_write(pipe_ctx, slave_address, -				buffer, sizeof(buffer)); -		RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ -				offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", -				slave_address, buffer[0], buffer[1], i2c_success?1:0); -		if (!i2c_success) -			goto i2c_write_fail; - -		/* Write offset 0x00 to 0x23 */ -		buffer[0] = 0x00; -		buffer[1] = 0x23; -		i2c_success = i2c_write(pipe_ctx, slave_address, -				buffer, sizeof(buffer)); -		RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ -			offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", -			slave_address, buffer[0], buffer[1], i2c_success?1:0); -		if (!i2c_success) -			goto i2c_write_fail; - -		/* Write offset 0xff to 0x00 */ -		buffer[0] = 0xff; -		buffer[1] = 0x00; -		i2c_success = i2c_write(pipe_ctx, slave_address, -				buffer, sizeof(buffer)); -		RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ -			offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", -			slave_address, buffer[0], buffer[1], i2c_success?1:0); -		if (!i2c_success) -			goto i2c_write_fail; - -	} - -	return; - -i2c_write_fail: -	DC_LOG_DEBUG("Set retimer failed"); -} - -static void write_i2c_default_retimer_setting( -		struct pipe_ctx *pipe_ctx, -		bool is_vga_mode, -		bool is_over_340mhz) -{ -	uint8_t slave_address = (0xBA >> 1); -	uint8_t buffer[2]; -	bool i2c_success = false; -	DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); - -	memset(&buffer, 0, sizeof(buffer)); - -	/* Program Slave Address for tuning single integrity */ -	/* Write offset 0x0A to 0x13 */ -	buffer[0] = 0x0A; -	buffer[1] = 0x13; -	i2c_success = i2c_write(pipe_ctx, slave_address, -			buffer, sizeof(buffer)); -	RETIMER_REDRIVER_INFO("retimer writes default setting to slave_address = 0x%x,\ -		offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", -		slave_address, buffer[0], buffer[1], i2c_success?1:0); -	if (!i2c_success) -		goto i2c_write_fail; - -	/* Write offset 0x0A to 0x17 */ -	buffer[0] = 0x0A; -	buffer[1] = 0x17; -	i2c_success = i2c_write(pipe_ctx, slave_address, -			buffer, sizeof(buffer)); -	RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ -		offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", -		slave_address, buffer[0], buffer[1], i2c_success?1:0); -	if (!i2c_success) -		goto i2c_write_fail; - -	/* Write offset 0x0B to 0xDA or 0xD8 */ -	buffer[0] = 0x0B; -	buffer[1] = is_over_340mhz ? 0xDA : 0xD8; -	i2c_success = i2c_write(pipe_ctx, slave_address, -			buffer, sizeof(buffer)); -	RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ -		offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", -		slave_address, buffer[0], buffer[1], i2c_success?1:0); -	if (!i2c_success) -		goto i2c_write_fail; - -	/* Write offset 0x0A to 0x17 */ -	buffer[0] = 0x0A; -	buffer[1] = 0x17; -	i2c_success = i2c_write(pipe_ctx, slave_address, -			buffer, sizeof(buffer)); -	RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ -		offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n", -		slave_address, buffer[0], buffer[1], i2c_success?1:0); -	if (!i2c_success) -		goto i2c_write_fail; - -	/* Write offset 0x0C to 0x1D or 0x91 */ -	buffer[0] = 0x0C; -	buffer[1] = is_over_340mhz ? 0x1D : 0x91; -	i2c_success = i2c_write(pipe_ctx, slave_address, -			buffer, sizeof(buffer)); -	RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ -		offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", -		slave_address, buffer[0], buffer[1], i2c_success?1:0); -	if (!i2c_success) -		goto i2c_write_fail; - -	/* Write offset 0x0A to 0x17 */ -	buffer[0] = 0x0A; -	buffer[1] = 0x17; -	i2c_success = i2c_write(pipe_ctx, slave_address, -			buffer, sizeof(buffer)); -	RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ -		offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", -		slave_address, buffer[0], buffer[1], i2c_success?1:0); -	if (!i2c_success) -		goto i2c_write_fail; - - -	if (is_vga_mode) { -		/* Program additional settings if using 640x480 resolution */ - -		/* Write offset 0xFF to 0x01 */ -		buffer[0] = 0xff; -		buffer[1] = 0x01; -		i2c_success = i2c_write(pipe_ctx, slave_address, -				buffer, sizeof(buffer)); -		RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ -			offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", -			slave_address, buffer[0], buffer[1], i2c_success?1:0); -		if (!i2c_success) -			goto i2c_write_fail; - -		/* Write offset 0x00 to 0x23 */ -		buffer[0] = 0x00; -		buffer[1] = 0x23; -		i2c_success = i2c_write(pipe_ctx, slave_address, -				buffer, sizeof(buffer)); -		RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ -			offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n", -			slave_address, buffer[0], buffer[1], i2c_success?1:0); -		if (!i2c_success) -			goto i2c_write_fail; - -		/* Write offset 0xff to 0x00 */ -		buffer[0] = 0xff; -		buffer[1] = 0x00; -		i2c_success = i2c_write(pipe_ctx, slave_address, -				buffer, sizeof(buffer)); -		RETIMER_REDRIVER_INFO("retimer write default setting to slave_addr = 0x%x,\ -			offset = 0x%x, reg_val= 0x%x, i2c_success = %d end here\n", -			slave_address, buffer[0], buffer[1], i2c_success?1:0); -		if (!i2c_success) -			goto i2c_write_fail; -	} - -	return; - -i2c_write_fail: -	DC_LOG_DEBUG("Set default retimer failed"); -} - -static void write_i2c_redriver_setting( -		struct pipe_ctx *pipe_ctx, -		bool is_over_340mhz) -{ -	uint8_t slave_address = (0xF0 >> 1); -	uint8_t buffer[16]; -	bool i2c_success = false; -	DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); - -	memset(&buffer, 0, sizeof(buffer)); - -	// Program Slave Address for tuning single integrity -	buffer[3] = 0x4E; -	buffer[4] = 0x4E; -	buffer[5] = 0x4E; -	buffer[6] = is_over_340mhz ? 0x4E : 0x4A; - -	i2c_success = i2c_write(pipe_ctx, slave_address, -					buffer, sizeof(buffer)); -	RETIMER_REDRIVER_INFO("redriver write 0 to all 16 reg offset expect following:\n\ -		\t slave_addr = 0x%x, offset[3] = 0x%x, offset[4] = 0x%x,\ -		offset[5] = 0x%x,offset[6] is_over_340mhz = 0x%x,\ -		i2c_success = %d\n", -		slave_address, buffer[3], buffer[4], buffer[5], buffer[6], i2c_success?1:0); - -	if (!i2c_success) -		DC_LOG_DEBUG("Set redriver failed"); -} - -static void disable_link(struct dc_link *link, const struct link_resource *link_res, -		enum signal_type signal) -{ -	/* -	 * TODO: implement call for dp_set_hw_test_pattern -	 * it is needed for compliance testing -	 */ - -	/* Here we need to specify that encoder output settings -	 * need to be calculated as for the set mode, -	 * it will lead to querying dynamic link capabilities -	 * which should be done before enable output -	 */ - -	if (dc_is_dp_signal(signal)) { -		/* SST DP, eDP */ -		struct dc_link_settings link_settings = link->cur_link_settings; -		if (dc_is_dp_sst_signal(signal)) -			dp_disable_link_phy(link, link_res, signal); -		else -			dp_disable_link_phy_mst(link, link_res, signal); - -		if (dc_is_dp_sst_signal(signal) || -				link->mst_stream_alloc_table.stream_count == 0) { -			if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING) { -				dp_set_fec_enable(link, false); -				dp_set_fec_ready(link, link_res, false); -			} -		} -	} else if (signal != SIGNAL_TYPE_VIRTUAL) { -		link->dc->hwss.disable_link_output(link, link_res, signal); -	} - -	if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { -		/* MST disable link only when no stream use the link */ -		if (link->mst_stream_alloc_table.stream_count <= 0) -			link->link_status.link_active = false; -	} else { -		link->link_status.link_active = false; -	} -} - -static void enable_link_hdmi(struct pipe_ctx *pipe_ctx) -{ -	struct dc_stream_state *stream = pipe_ctx->stream; -	struct dc_link *link = stream->link; -	enum dc_color_depth display_color_depth; -	enum engine_id eng_id; -	struct ext_hdmi_settings settings = {0}; -	bool is_over_340mhz = false; -	bool is_vga_mode = (stream->timing.h_addressable == 640) -			&& (stream->timing.v_addressable == 480); -	struct dc *dc = pipe_ctx->stream->ctx->dc; - -	if (stream->phy_pix_clk == 0) -		stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10; -	if (stream->phy_pix_clk > 340000) -		is_over_340mhz = true; - -	if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) { -		unsigned short masked_chip_caps = pipe_ctx->stream->link->chip_caps & -				EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK; -		if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) { -			/* DP159, Retimer settings */ -			eng_id = pipe_ctx->stream_res.stream_enc->id; - -			if (get_ext_hdmi_settings(pipe_ctx, eng_id, &settings)) { -				write_i2c_retimer_setting(pipe_ctx, -						is_vga_mode, is_over_340mhz, &settings); -			} else { -				write_i2c_default_retimer_setting(pipe_ctx, -						is_vga_mode, is_over_340mhz); -			} -		} else if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) { -			/* PI3EQX1204, Redriver settings */ -			write_i2c_redriver_setting(pipe_ctx, is_over_340mhz); -		} -	} - -	if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) -		dal_ddc_service_write_scdc_data( -			stream->link->ddc, -			stream->phy_pix_clk, -			stream->timing.flags.LTE_340MCSC_SCRAMBLE); - -	memset(&stream->link->cur_link_settings, 0, -			sizeof(struct dc_link_settings)); - -	display_color_depth = stream->timing.display_color_depth; -	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) -		display_color_depth = COLOR_DEPTH_888; - -	dc->hwss.enable_tmds_link_output( -			link, -			&pipe_ctx->link_res, -			pipe_ctx->stream->signal, -			pipe_ctx->clock_source->id, -			display_color_depth, -			stream->phy_pix_clk); - -	if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) -		dal_ddc_service_read_scdc_data(link->ddc); -} - -static void enable_link_lvds(struct pipe_ctx *pipe_ctx) -{ -	struct dc_stream_state *stream = pipe_ctx->stream; -	struct dc_link *link = stream->link; -	struct dc *dc = stream->ctx->dc; - -	if (stream->phy_pix_clk == 0) -		stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10; - -	memset(&stream->link->cur_link_settings, 0, -			sizeof(struct dc_link_settings)); -	dc->hwss.enable_lvds_link_output( -			link, -			&pipe_ctx->link_res, -			pipe_ctx->clock_source->id, -			stream->phy_pix_clk); - -} - -bool dc_power_alpm_dpcd_enable(struct dc_link *link, bool enable) -{ -	bool ret = false; -	union dpcd_alpm_configuration alpm_config; - -	if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) { -		memset(&alpm_config, 0, sizeof(alpm_config)); - -		alpm_config.bits.ENABLE = (enable ? true : false); -		ret = dm_helpers_dp_write_dpcd(link->ctx, link, -				DP_RECEIVER_ALPM_CONFIG, &alpm_config.raw, -				sizeof(alpm_config.raw)); -	} -	return ret; -} - -/****************************enable_link***********************************/ -static enum dc_status enable_link( -		struct dc_state *state, -		struct pipe_ctx *pipe_ctx) -{ -	enum dc_status status = DC_ERROR_UNEXPECTED; -	struct dc_stream_state *stream = pipe_ctx->stream; -	struct dc_link *link = stream->link; - -	/* There's some scenarios where driver is unloaded with display -	 * still enabled. When driver is reloaded, it may cause a display -	 * to not light up if there is a mismatch between old and new -	 * link settings. Need to call disable first before enabling at -	 * new link settings. -	 */ -	if (link->link_status.link_active) { -		disable_link(link, &pipe_ctx->link_res, pipe_ctx->stream->signal); -	} - -	switch (pipe_ctx->stream->signal) { -	case SIGNAL_TYPE_DISPLAY_PORT: -		status = enable_link_dp(state, pipe_ctx); -		break; -	case SIGNAL_TYPE_EDP: -		status = enable_link_edp(state, pipe_ctx); -		break; -	case SIGNAL_TYPE_DISPLAY_PORT_MST: -		status = enable_link_dp_mst(state, pipe_ctx); -		msleep(200); -		break; -	case SIGNAL_TYPE_DVI_SINGLE_LINK: -	case SIGNAL_TYPE_DVI_DUAL_LINK: -	case SIGNAL_TYPE_HDMI_TYPE_A: -		enable_link_hdmi(pipe_ctx); -		status = DC_OK; -		break; -	case SIGNAL_TYPE_LVDS: -		enable_link_lvds(pipe_ctx); -		status = DC_OK; -		break; -	case SIGNAL_TYPE_VIRTUAL: -		status = DC_OK; -		break; -	default: -		break; -	} - -	if (status == DC_OK) -		pipe_ctx->stream->link->link_status.link_active = true; - -	return status; -} - -static uint32_t get_timing_pixel_clock_100hz(const struct dc_crtc_timing *timing) -{ - -	uint32_t pxl_clk = timing->pix_clk_100hz; - -	if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) -		pxl_clk /= 2; -	else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) -		pxl_clk = pxl_clk * 2 / 3; - -	if (timing->display_color_depth == COLOR_DEPTH_101010) -		pxl_clk = pxl_clk * 10 / 8; -	else if (timing->display_color_depth == COLOR_DEPTH_121212) -		pxl_clk = pxl_clk * 12 / 8; - -	return pxl_clk; -} - -static bool dp_active_dongle_validate_timing( -		const struct dc_crtc_timing *timing, -		const struct dpcd_caps *dpcd_caps) -{ -	const struct dc_dongle_caps *dongle_caps = &dpcd_caps->dongle_caps; - -	switch (dpcd_caps->dongle_type) { -	case DISPLAY_DONGLE_DP_VGA_CONVERTER: -	case DISPLAY_DONGLE_DP_DVI_CONVERTER: -	case DISPLAY_DONGLE_DP_DVI_DONGLE: -		if (timing->pixel_encoding == PIXEL_ENCODING_RGB) -			return true; -		else -			return false; -	default: -		break; -	} - -	if (dpcd_caps->dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER && -			dongle_caps->extendedCapValid == true) { -		/* Check Pixel Encoding */ -		switch (timing->pixel_encoding) { -		case PIXEL_ENCODING_RGB: -		case PIXEL_ENCODING_YCBCR444: -			break; -		case PIXEL_ENCODING_YCBCR422: -			if (!dongle_caps->is_dp_hdmi_ycbcr422_pass_through) -				return false; -			break; -		case PIXEL_ENCODING_YCBCR420: -			if (!dongle_caps->is_dp_hdmi_ycbcr420_pass_through) -				return false; -			break; -		default: -			/* Invalid Pixel Encoding*/ -			return false; -		} - -		switch (timing->display_color_depth) { -		case COLOR_DEPTH_666: -		case COLOR_DEPTH_888: -			/*888 and 666 should always be supported*/ -			break; -		case COLOR_DEPTH_101010: -			if (dongle_caps->dp_hdmi_max_bpc < 10) -				return false; -			break; -		case COLOR_DEPTH_121212: -			if (dongle_caps->dp_hdmi_max_bpc < 12) -				return false; -			break; -		case COLOR_DEPTH_141414: -		case COLOR_DEPTH_161616: -		default: -			/* These color depths are currently not supported */ -			return false; -		} - -		/* Check 3D format */ -		switch (timing->timing_3d_format) { -		case TIMING_3D_FORMAT_NONE: -		case TIMING_3D_FORMAT_FRAME_ALTERNATE: -			/*Only frame alternate 3D is supported on active dongle*/ -			break; -		default: -			/*other 3D formats are not supported due to bad infoframe translation */ -			return false; -		} - -#if defined(CONFIG_DRM_AMD_DC_DCN) -		if (dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps > 0) { // DP to HDMI FRL converter -			struct dc_crtc_timing outputTiming = *timing; - -			if (timing->flags.DSC && !timing->dsc_cfg.is_frl) -				/* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */ -				outputTiming.flags.DSC = 0; -			if (dc_bandwidth_in_kbps_from_timing(&outputTiming) > dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps) -				return false; -		} else { // DP to HDMI TMDS converter -			if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10)) -				return false; -		} -#else -		if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10)) -			return false; -#endif -	} - -	if (dpcd_caps->channel_coding_cap.bits.DP_128b_132b_SUPPORTED == 0 && -			dpcd_caps->dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT == 0 && -			dongle_caps->dfp_cap_ext.supported) { - -		if (dongle_caps->dfp_cap_ext.max_pixel_rate_in_mps < (timing->pix_clk_100hz / 10000)) -			return false; - -		if (dongle_caps->dfp_cap_ext.max_video_h_active_width < timing->h_addressable) -			return false; - -		if (dongle_caps->dfp_cap_ext.max_video_v_active_height < timing->v_addressable) -			return false; - -		if (timing->pixel_encoding == PIXEL_ENCODING_RGB) { -			if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb) -				return false; -			if (timing->display_color_depth == COLOR_DEPTH_666 && -					!dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_6bpc) -				return false; -			else if (timing->display_color_depth == COLOR_DEPTH_888 && -					!dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_8bpc) -				return false; -			else if (timing->display_color_depth == COLOR_DEPTH_101010 && -					!dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_10bpc) -				return false; -			else if (timing->display_color_depth == COLOR_DEPTH_121212 && -					!dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_12bpc) -				return false; -			else if (timing->display_color_depth == COLOR_DEPTH_161616 && -					!dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_16bpc) -				return false; -		} else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR444) { -			if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb) -				return false; -			if (timing->display_color_depth == COLOR_DEPTH_888 && -					!dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_8bpc) -				return false; -			else if (timing->display_color_depth == COLOR_DEPTH_101010 && -					!dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_10bpc) -				return false; -			else if (timing->display_color_depth == COLOR_DEPTH_121212 && -					!dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_12bpc) -				return false; -			else if (timing->display_color_depth == COLOR_DEPTH_161616 && -					!dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_16bpc) -				return false; -		} else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) { -			if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb) -				return false; -			if (timing->display_color_depth == COLOR_DEPTH_888 && -					!dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_8bpc) -				return false; -			else if (timing->display_color_depth == COLOR_DEPTH_101010 && -					!dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_10bpc) -				return false; -			else if (timing->display_color_depth == COLOR_DEPTH_121212 && -					!dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_12bpc) -				return false; -			else if (timing->display_color_depth == COLOR_DEPTH_161616 && -					!dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_16bpc) -				return false; -		} else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) { -			if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb) -				return false; -			if (timing->display_color_depth == COLOR_DEPTH_888 && -					!dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_8bpc) -				return false; -			else if (timing->display_color_depth == COLOR_DEPTH_101010 && -					!dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_10bpc) -				return false; -			else if (timing->display_color_depth == COLOR_DEPTH_121212 && -					!dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_12bpc) -				return false; -			else if (timing->display_color_depth == COLOR_DEPTH_161616 && -					!dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_16bpc) -				return false; -		} -	} - -	return true; -} - -enum dc_status dc_link_validate_mode_timing( -		const struct dc_stream_state *stream, -		struct dc_link *link, -		const struct dc_crtc_timing *timing) -{ -	uint32_t max_pix_clk = stream->link->dongle_max_pix_clk * 10; -	struct dpcd_caps *dpcd_caps = &link->dpcd_caps; - -	/* A hack to avoid failing any modes for EDID override feature on -	 * topology change such as lower quality cable for DP or different dongle -	 */ -	if (link->remote_sinks[0] && link->remote_sinks[0]->sink_signal == SIGNAL_TYPE_VIRTUAL) -		return DC_OK; - -	/* Passive Dongle */ -	if (max_pix_clk != 0 && get_timing_pixel_clock_100hz(timing) > max_pix_clk) -		return DC_EXCEED_DONGLE_CAP; - -	/* Active Dongle*/ -	if (!dp_active_dongle_validate_timing(timing, dpcd_caps)) -		return DC_EXCEED_DONGLE_CAP; - -	switch (stream->signal) { -	case SIGNAL_TYPE_EDP: -	case SIGNAL_TYPE_DISPLAY_PORT: -		if (!dp_validate_mode_timing( -				link, -				timing)) -			return DC_NO_DP_LINK_BANDWIDTH; -		break; - -	default: -		break; -	} - -	return DC_OK; -} - -static struct abm *get_abm_from_stream_res(const struct dc_link *link) -{ -	int i; -	struct dc *dc = NULL; -	struct abm *abm = NULL; - -	if (!link || !link->ctx) -		return NULL; - -	dc = link->ctx->dc; - -	for (i = 0; i < MAX_PIPES; i++) { -		struct pipe_ctx pipe_ctx = dc->current_state->res_ctx.pipe_ctx[i]; -		struct dc_stream_state *stream = pipe_ctx.stream; - -		if (stream && stream->link == link) { -			abm = pipe_ctx.stream_res.abm; -			break; -		} -	} -	return abm; -} - -int dc_link_get_backlight_level(const struct dc_link *link) -{ -	struct abm *abm = get_abm_from_stream_res(link); -	struct panel_cntl *panel_cntl = link->panel_cntl; -	struct dc  *dc = link->ctx->dc; -	struct dmcu *dmcu = dc->res_pool->dmcu; -	bool fw_set_brightness = true; - -	if (dmcu) -		fw_set_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); - -	if (!fw_set_brightness && panel_cntl->funcs->get_current_backlight) -		return panel_cntl->funcs->get_current_backlight(panel_cntl); -	else if (abm != NULL && abm->funcs->get_current_backlight != NULL) -		return (int) abm->funcs->get_current_backlight(abm); -	else -		return DC_ERROR_UNEXPECTED; -} - -int dc_link_get_target_backlight_pwm(const struct dc_link *link) -{ -	struct abm *abm = get_abm_from_stream_res(link); - -	if (abm == NULL || abm->funcs->get_target_backlight == NULL) -		return DC_ERROR_UNEXPECTED; - -	return (int) abm->funcs->get_target_backlight(abm); -} - -static struct pipe_ctx *get_pipe_from_link(const struct dc_link *link) -{ -	int i; -	struct dc *dc = link->ctx->dc; -	struct pipe_ctx *pipe_ctx = NULL; - -	for (i = 0; i < MAX_PIPES; i++) { -		if (dc->current_state->res_ctx.pipe_ctx[i].stream) { -			if (dc->current_state->res_ctx.pipe_ctx[i].stream->link == link) { -				pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; -				break; -			} -		} -	} - -	return pipe_ctx; -} - -bool dc_link_set_backlight_level(const struct dc_link *link, -		uint32_t backlight_pwm_u16_16, -		uint32_t frame_ramp) -{ -	struct dc  *dc = link->ctx->dc; - -	DC_LOGGER_INIT(link->ctx->logger); -	DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", -			backlight_pwm_u16_16, backlight_pwm_u16_16); - -	if (dc_is_embedded_signal(link->connector_signal)) { -		struct pipe_ctx *pipe_ctx = get_pipe_from_link(link); - -		if (pipe_ctx) { -			/* Disable brightness ramping when the display is blanked -			 * as it can hang the DMCU -			 */ -			if (pipe_ctx->plane_state == NULL) -				frame_ramp = 0; -		} else { -			return false; -		} - -		dc->hwss.set_backlight_level( -				pipe_ctx, -				backlight_pwm_u16_16, -				frame_ramp); -	} -	return true; -} - -bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active, -		bool wait, bool force_static, const unsigned int *power_opts) -{ -	struct dc  *dc = link->ctx->dc; -	struct dmcu *dmcu = dc->res_pool->dmcu; -	struct dmub_psr *psr = dc->res_pool->psr; -	unsigned int panel_inst; - -	if (psr == NULL && force_static) -		return false; - -	if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) -		return false; - -	if ((allow_active != NULL) && (*allow_active == true) && (link->type == dc_connection_none)) { -		// Don't enter PSR if panel is not connected -		return false; -	} - -	/* Set power optimization flag */ -	if (power_opts && link->psr_settings.psr_power_opt != *power_opts) { -		link->psr_settings.psr_power_opt = *power_opts; - -		if (psr != NULL && link->psr_settings.psr_feature_enabled && psr->funcs->psr_set_power_opt) -			psr->funcs->psr_set_power_opt(psr, link->psr_settings.psr_power_opt, panel_inst); -	} - -	if (psr != NULL && link->psr_settings.psr_feature_enabled && -			force_static && psr->funcs->psr_force_static) -		psr->funcs->psr_force_static(psr, panel_inst); - -	/* Enable or Disable PSR */ -	if (allow_active && link->psr_settings.psr_allow_active != *allow_active) { -		link->psr_settings.psr_allow_active = *allow_active; - -		if (!link->psr_settings.psr_allow_active) -			dc_z10_restore(dc); - -		if (psr != NULL && link->psr_settings.psr_feature_enabled) { -			psr->funcs->psr_enable(psr, link->psr_settings.psr_allow_active, wait, panel_inst); -		} else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && -			link->psr_settings.psr_feature_enabled) -			dmcu->funcs->set_psr_enable(dmcu, link->psr_settings.psr_allow_active, wait); -		else -			return false; -	} - -	return true; -} - -bool dc_link_get_psr_state(const struct dc_link *link, enum dc_psr_state *state) -{ -	struct dc  *dc = link->ctx->dc; -	struct dmcu *dmcu = dc->res_pool->dmcu; -	struct dmub_psr *psr = dc->res_pool->psr; -	unsigned int panel_inst; - -	if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) -		return false; - -	if (psr != NULL && link->psr_settings.psr_feature_enabled) -		psr->funcs->psr_get_state(psr, state, panel_inst); -	else if (dmcu != NULL && link->psr_settings.psr_feature_enabled) -		dmcu->funcs->get_psr_state(dmcu, state); - -	return true; -} - -static inline enum physical_phy_id -transmitter_to_phy_id(enum transmitter transmitter_value) -{ -	switch (transmitter_value) { -	case TRANSMITTER_UNIPHY_A: -		return PHYLD_0; -	case TRANSMITTER_UNIPHY_B: -		return PHYLD_1; -	case TRANSMITTER_UNIPHY_C: -		return PHYLD_2; -	case TRANSMITTER_UNIPHY_D: -		return PHYLD_3; -	case TRANSMITTER_UNIPHY_E: -		return PHYLD_4; -	case TRANSMITTER_UNIPHY_F: -		return PHYLD_5; -	case TRANSMITTER_NUTMEG_CRT: -		return PHYLD_6; -	case TRANSMITTER_TRAVIS_CRT: -		return PHYLD_7; -	case TRANSMITTER_TRAVIS_LCD: -		return PHYLD_8; -	case TRANSMITTER_UNIPHY_G: -		return PHYLD_9; -	case TRANSMITTER_COUNT: -		return PHYLD_COUNT; -	case TRANSMITTER_UNKNOWN: -		return PHYLD_UNKNOWN; -	default: -		WARN_ONCE(1, "Unknown transmitter value %d\n", -			  transmitter_value); -		return PHYLD_UNKNOWN; -	} -} - -bool dc_link_setup_psr(struct dc_link *link, -		const struct dc_stream_state *stream, struct psr_config *psr_config, -		struct psr_context *psr_context) -{ -	struct dc *dc; -	struct dmcu *dmcu; -	struct dmub_psr *psr; -	int i; -	unsigned int panel_inst; -	/* updateSinkPsrDpcdConfig*/ -	union dpcd_psr_configuration psr_configuration; -	union dpcd_sink_active_vtotal_control_mode vtotal_control = {0}; - -	psr_context->controllerId = CONTROLLER_ID_UNDEFINED; - -	if (!link) -		return false; - -	dc = link->ctx->dc; -	dmcu = dc->res_pool->dmcu; -	psr = dc->res_pool->psr; - -	if (!dmcu && !psr) -		return false; - -	if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) -		return false; - - -	memset(&psr_configuration, 0, sizeof(psr_configuration)); - -	psr_configuration.bits.ENABLE                    = 1; -	psr_configuration.bits.CRC_VERIFICATION          = 1; -	psr_configuration.bits.FRAME_CAPTURE_INDICATION  = -			psr_config->psr_frame_capture_indication_req; - -	/* Check for PSR v2*/ -	if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) { -		/* For PSR v2 selective update. -		 * Indicates whether sink should start capturing -		 * immediately following active scan line, -		 * or starting with the 2nd active scan line. -		 */ -		psr_configuration.bits.LINE_CAPTURE_INDICATION = 0; -		/*For PSR v2, determines whether Sink should generate -		 * IRQ_HPD when CRC mismatch is detected. -		 */ -		psr_configuration.bits.IRQ_HPD_WITH_CRC_ERROR    = 1; -		/* For PSR v2, set the bit when the Source device will -		 * be enabling PSR2 operation. -		 */ -		psr_configuration.bits.ENABLE_PSR2    = 1; -		/* For PSR v2, the Sink device must be able to receive -		 * SU region updates early in the frame time. -		 */ -		psr_configuration.bits.EARLY_TRANSPORT_ENABLE    = 1; -	} - -	dm_helpers_dp_write_dpcd( -		link->ctx, -		link, -		368, -		&psr_configuration.raw, -		sizeof(psr_configuration.raw)); - -	if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) { -		dc_power_alpm_dpcd_enable(link, true); -		psr_context->su_granularity_required = -			psr_config->su_granularity_required; -		psr_context->su_y_granularity = -			psr_config->su_y_granularity; -		psr_context->line_time_in_us = -			psr_config->line_time_in_us; - -		if (link->psr_settings.psr_vtotal_control_support) { -			psr_context->rate_control_caps = psr_config->rate_control_caps; -			vtotal_control.bits.ENABLE = true; -			core_link_write_dpcd(link, DP_SINK_PSR_ACTIVE_VTOTAL_CONTROL_MODE, -							&vtotal_control.raw, sizeof(vtotal_control.raw)); -		} -	} - -	psr_context->channel = link->ddc->ddc_pin->hw_info.ddc_channel; -	psr_context->transmitterId = link->link_enc->transmitter; -	psr_context->engineId = link->link_enc->preferred_engine; - -	for (i = 0; i < MAX_PIPES; i++) { -		if (dc->current_state->res_ctx.pipe_ctx[i].stream -				== stream) { -			/* dmcu -1 for all controller id values, -			 * therefore +1 here -			 */ -			psr_context->controllerId = -				dc->current_state->res_ctx. -				pipe_ctx[i].stream_res.tg->inst + 1; -			break; -		} -	} - -	/* Hardcoded for now.  Can be Pcie or Uniphy (or Unknown)*/ -	psr_context->phyType = PHY_TYPE_UNIPHY; -	/*PhyId is associated with the transmitter id*/ -	psr_context->smuPhyId = -		transmitter_to_phy_id(link->link_enc->transmitter); - -	psr_context->crtcTimingVerticalTotal = stream->timing.v_total; -	psr_context->vsync_rate_hz = div64_u64(div64_u64((stream-> -					timing.pix_clk_100hz * 100), -					stream->timing.v_total), -					stream->timing.h_total); - -	psr_context->psrSupportedDisplayConfig = true; -	psr_context->psrExitLinkTrainingRequired = -		psr_config->psr_exit_link_training_required; -	psr_context->sdpTransmitLineNumDeadline = -		psr_config->psr_sdp_transmit_line_num_deadline; -	psr_context->psrFrameCaptureIndicationReq = -		psr_config->psr_frame_capture_indication_req; - -	psr_context->skipPsrWaitForPllLock = 0; /* only = 1 in KV */ - -	psr_context->numberOfControllers = -			link->dc->res_pool->timing_generator_count; - -	psr_context->rfb_update_auto_en = true; - -	/* 2 frames before enter PSR. */ -	psr_context->timehyst_frames = 2; -	/* half a frame -	 * (units in 100 lines, i.e. a value of 1 represents 100 lines) -	 */ -	psr_context->hyst_lines = stream->timing.v_total / 2 / 100; -	psr_context->aux_repeats = 10; - -	psr_context->psr_level.u32all = 0; - -	/*skip power down the single pipe since it blocks the cstate*/ -#if defined(CONFIG_DRM_AMD_DC_DCN) -	if (link->ctx->asic_id.chip_family >= FAMILY_RV) { -		switch(link->ctx->asic_id.chip_family) { -		case FAMILY_YELLOW_CARP: -		case AMDGPU_FAMILY_GC_10_3_6: -		case AMDGPU_FAMILY_GC_11_0_1: -			if (dc->debug.disable_z10) -				psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true; -			break; -		default: -			psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true; -			break; -		} -	} -#else -	if (link->ctx->asic_id.chip_family >= FAMILY_RV) -		psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true; -#endif - -	/* SMU will perform additional powerdown sequence. -	 * For unsupported ASICs, set psr_level flag to skip PSR -	 *  static screen notification to SMU. -	 *  (Always set for DAL2, did not check ASIC) -	 */ -	psr_context->allow_smu_optimizations = psr_config->allow_smu_optimizations; -	psr_context->allow_multi_disp_optimizations = psr_config->allow_multi_disp_optimizations; - -	/* Complete PSR entry before aborting to prevent intermittent -	 * freezes on certain eDPs -	 */ -	psr_context->psr_level.bits.DISABLE_PSR_ENTRY_ABORT = 1; - -	/* enable ALPM */ -	psr_context->psr_level.bits.DISABLE_ALPM = 0; -	psr_context->psr_level.bits.ALPM_DEFAULT_PD_MODE = 1; - -	/* Controls additional delay after remote frame capture before -	 * continuing power down, default = 0 -	 */ -	psr_context->frame_delay = 0; - -	if (psr) { -		link->psr_settings.psr_feature_enabled = psr->funcs->psr_copy_settings(psr, -			link, psr_context, panel_inst); -		link->psr_settings.psr_power_opt = 0; -		link->psr_settings.psr_allow_active = 0; -	} -	else -		link->psr_settings.psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context); - -	/* psr_enabled == 0 indicates setup_psr did not succeed, but this -	 * should not happen since firmware should be running at this point -	 */ -	if (link->psr_settings.psr_feature_enabled == 0) -		ASSERT(0); - -	return true; - -} - -void dc_link_get_psr_residency(const struct dc_link *link, uint32_t *residency) -{ -	struct dc  *dc = link->ctx->dc; -	struct dmub_psr *psr = dc->res_pool->psr; -	unsigned int panel_inst; - -	if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) -		return; - -	/* PSR residency measurements only supported on DMCUB */ -	if (psr != NULL && link->psr_settings.psr_feature_enabled) -		psr->funcs->psr_get_residency(psr, residency, panel_inst); -	else -		*residency = 0; -} - -bool dc_link_set_sink_vtotal_in_psr_active(const struct dc_link *link, uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su) -{ -	struct dc *dc = link->ctx->dc; -	struct dmub_psr *psr = dc->res_pool->psr; - -	if (psr == NULL || !link->psr_settings.psr_feature_enabled || !link->psr_settings.psr_vtotal_control_support) -		return false; - -	psr->funcs->psr_set_sink_vtotal_in_psr_active(psr, psr_vtotal_idle, psr_vtotal_su); - -	return true; -} - -const struct dc_link_status *dc_link_get_status(const struct dc_link *link) -{ -	return &link->link_status; -} - -void core_link_resume(struct dc_link *link) -{ -	if (link->connector_signal != SIGNAL_TYPE_VIRTUAL) -		program_hpd_filter(link); -} - -static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream) -{ -	struct fixed31_32 mbytes_per_sec; -	uint32_t link_rate_in_mbytes_per_sec = dc_link_bandwidth_kbps(stream->link, -			&stream->link->cur_link_settings); -	link_rate_in_mbytes_per_sec /= 8000; /* Kbits to MBytes */ - -	mbytes_per_sec = dc_fixpt_from_int(link_rate_in_mbytes_per_sec); - -	return dc_fixpt_div_int(mbytes_per_sec, 54); -} - -static struct fixed31_32 get_pbn_from_bw_in_kbps(uint64_t kbps) -{ -	struct fixed31_32 peak_kbps; -	uint32_t numerator = 0; -	uint32_t denominator = 1; - -	/* -	 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006 -	 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on -	 * common multiplier to render an integer PBN for all link rate/lane -	 * counts combinations -	 * calculate -	 * peak_kbps *= (1006/1000) -	 * peak_kbps *= (64/54) -	 * peak_kbps *= 8    convert to bytes -	 */ - -	numerator = 64 * PEAK_FACTOR_X1000; -	denominator = 54 * 8 * 1000 * 1000; -	kbps *= numerator; -	peak_kbps = dc_fixpt_from_fraction(kbps, denominator); - -	return peak_kbps; -} - -static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx) -{ -	uint64_t kbps; - -	kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing); -	return get_pbn_from_bw_in_kbps(kbps); -} - -static void update_mst_stream_alloc_table( -	struct dc_link *link, -	struct stream_encoder *stream_enc, -	struct hpo_dp_stream_encoder *hpo_dp_stream_enc, // TODO: Rename stream_enc to dio_stream_enc? -	const struct dc_dp_mst_stream_allocation_table *proposed_table) -{ -	struct link_mst_stream_allocation work_table[MAX_CONTROLLER_NUM] = { 0 }; -	struct link_mst_stream_allocation *dc_alloc; - -	int i; -	int j; - -	/* if DRM proposed_table has more than one new payload */ -	ASSERT(proposed_table->stream_count - -			link->mst_stream_alloc_table.stream_count < 2); - -	/* copy proposed_table to link, add stream encoder */ -	for (i = 0; i < proposed_table->stream_count; i++) { - -		for (j = 0; j < link->mst_stream_alloc_table.stream_count; j++) { -			dc_alloc = -			&link->mst_stream_alloc_table.stream_allocations[j]; - -			if (dc_alloc->vcp_id == -				proposed_table->stream_allocations[i].vcp_id) { - -				work_table[i] = *dc_alloc; -				work_table[i].slot_count = proposed_table->stream_allocations[i].slot_count; -				break; /* exit j loop */ -			} -		} - -		/* new vcp_id */ -		if (j == link->mst_stream_alloc_table.stream_count) { -			work_table[i].vcp_id = -				proposed_table->stream_allocations[i].vcp_id; -			work_table[i].slot_count = -				proposed_table->stream_allocations[i].slot_count; -			work_table[i].stream_enc = stream_enc; -			work_table[i].hpo_dp_stream_enc = hpo_dp_stream_enc; -		} -	} - -	/* update link->mst_stream_alloc_table with work_table */ -	link->mst_stream_alloc_table.stream_count = -			proposed_table->stream_count; -	for (i = 0; i < MAX_CONTROLLER_NUM; i++) -		link->mst_stream_alloc_table.stream_allocations[i] = -				work_table[i]; -} - -static void remove_stream_from_alloc_table( -		struct dc_link *link, -		struct stream_encoder *dio_stream_enc, -		struct hpo_dp_stream_encoder *hpo_dp_stream_enc) -{ -	int i = 0; -	struct link_mst_stream_allocation_table *table = -			&link->mst_stream_alloc_table; - -	if (hpo_dp_stream_enc) { -		for (; i < table->stream_count; i++) -			if (hpo_dp_stream_enc == table->stream_allocations[i].hpo_dp_stream_enc) -				break; -	} else { -		for (; i < table->stream_count; i++) -			if (dio_stream_enc == table->stream_allocations[i].stream_enc) -				break; -	} - -	if (i < table->stream_count) { -		i++; -		for (; i < table->stream_count; i++) -			table->stream_allocations[i-1] = table->stream_allocations[i]; -		memset(&table->stream_allocations[table->stream_count-1], 0, -				sizeof(struct link_mst_stream_allocation)); -		table->stream_count--; -	} -} - -static void dc_log_vcp_x_y(const struct dc_link *link, struct fixed31_32 avg_time_slots_per_mtp) -{ -	const uint32_t VCP_Y_PRECISION = 1000; -	uint64_t vcp_x, vcp_y; - -	// Add 0.5*(1/VCP_Y_PRECISION) to round up to decimal precision -	avg_time_slots_per_mtp = dc_fixpt_add( -			avg_time_slots_per_mtp, dc_fixpt_from_fraction(1, 2 * VCP_Y_PRECISION)); - -	vcp_x = dc_fixpt_floor(avg_time_slots_per_mtp); -	vcp_y = dc_fixpt_floor( -			dc_fixpt_mul_int( -				dc_fixpt_sub_int(avg_time_slots_per_mtp, dc_fixpt_floor(avg_time_slots_per_mtp)), -				VCP_Y_PRECISION)); - -	if (link->type == dc_connection_mst_branch) -		DC_LOG_DP2("MST Update Payload: set_throttled_vcp_size slot X.Y for MST stream " -				"X: %lld Y: %lld/%d", vcp_x, vcp_y, VCP_Y_PRECISION); -	else -		DC_LOG_DP2("SST Update Payload: set_throttled_vcp_size slot X.Y for SST stream " -				"X: %lld Y: %lld/%d", vcp_x, vcp_y, VCP_Y_PRECISION); -} - -/* - * Payload allocation/deallocation for SST introduced in DP2.0 - */ -static enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx, -						 bool allocate) -{ -	struct dc_stream_state *stream = pipe_ctx->stream; -	struct dc_link *link = stream->link; -	struct link_mst_stream_allocation_table proposed_table = {0}; -	struct fixed31_32 avg_time_slots_per_mtp; -	const struct dc_link_settings empty_link_settings = {0}; -	const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); -	DC_LOGGER_INIT(link->ctx->logger); - -	/* slot X.Y for SST payload deallocate */ -	if (!allocate) { -		avg_time_slots_per_mtp = dc_fixpt_from_int(0); - -		dc_log_vcp_x_y(link, avg_time_slots_per_mtp); - -		if (link_hwss->ext.set_throttled_vcp_size) -			link_hwss->ext.set_throttled_vcp_size(pipe_ctx, -					avg_time_slots_per_mtp); -		if (link_hwss->ext.set_hblank_min_symbol_width) -			link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, -					&empty_link_settings, -					avg_time_slots_per_mtp); -	} - -	/* calculate VC payload and update branch with new payload allocation table*/ -	if (!dpcd_write_128b_132b_sst_payload_allocation_table( -			stream, -			link, -			&proposed_table, -			allocate)) { -		DC_LOG_ERROR("SST Update Payload: Failed to update " -						"allocation table for " -						"pipe idx: %d\n", -						pipe_ctx->pipe_idx); -		return DC_FAIL_DP_PAYLOAD_ALLOCATION; -	} - -	proposed_table.stream_allocations[0].hpo_dp_stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc; - -	ASSERT(proposed_table.stream_count == 1); - -	//TODO - DP2.0 Logging: Instead of hpo_dp_stream_enc pointer, log instance id -	DC_LOG_DP2("SST Update Payload: hpo_dp_stream_enc: %p      " -		"vcp_id: %d      " -		"slot_count: %d\n", -		(void *) proposed_table.stream_allocations[0].hpo_dp_stream_enc, -		proposed_table.stream_allocations[0].vcp_id, -		proposed_table.stream_allocations[0].slot_count); - -	/* program DP source TX for payload */ -	link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res, -			&proposed_table); - -	/* poll for ACT handled */ -	if (!dpcd_poll_for_allocation_change_trigger(link)) { -		// Failures will result in blackscreen and errors logged -		BREAK_TO_DEBUGGER(); -	} - -	/* slot X.Y for SST payload allocate */ -	if (allocate && dp_get_link_encoding_format(&link->cur_link_settings) == -			DP_128b_132b_ENCODING) { -		avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(stream, link); - -		dc_log_vcp_x_y(link, avg_time_slots_per_mtp); - -		if (link_hwss->ext.set_throttled_vcp_size) -			link_hwss->ext.set_throttled_vcp_size(pipe_ctx, -					avg_time_slots_per_mtp); -		if (link_hwss->ext.set_hblank_min_symbol_width) -			link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, -					&link->cur_link_settings, -					avg_time_slots_per_mtp); -	} - -	/* Always return DC_OK. -	 * If part of sequence fails, log failure(s) and show blackscreen -	 */ -	return DC_OK; -} - -/* convert link_mst_stream_alloc_table to dm dp_mst_stream_alloc_table - * because stream_encoder is not exposed to dm - */ -enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx) -{ -	struct dc_stream_state *stream = pipe_ctx->stream; -	struct dc_link *link = stream->link; -	struct dc_dp_mst_stream_allocation_table proposed_table = {0}; -	struct fixed31_32 avg_time_slots_per_mtp; -	struct fixed31_32 pbn; -	struct fixed31_32 pbn_per_slot; -	int i; -	enum act_return_status ret; -	const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); -	DC_LOGGER_INIT(link->ctx->logger); - -	/* enable_link_dp_mst already check link->enabled_stream_count -	 * and stream is in link->stream[]. This is called during set mode, -	 * stream_enc is available. -	 */ - -	/* get calculate VC payload for stream: stream_alloc */ -	if (dm_helpers_dp_mst_write_payload_allocation_table( -		stream->ctx, -		stream, -		&proposed_table, -		true)) -		update_mst_stream_alloc_table( -					link, -					pipe_ctx->stream_res.stream_enc, -					pipe_ctx->stream_res.hpo_dp_stream_enc, -					&proposed_table); -	else -		DC_LOG_WARNING("Failed to update" -				"MST allocation table for" -				"pipe idx:%d\n", -				pipe_ctx->pipe_idx); - -	DC_LOG_MST("%s  " -			"stream_count: %d: \n ", -			__func__, -			link->mst_stream_alloc_table.stream_count); - -	for (i = 0; i < MAX_CONTROLLER_NUM; i++) { -		DC_LOG_MST("stream_enc[%d]: %p      " -		"stream[%d].hpo_dp_stream_enc: %p      " -		"stream[%d].vcp_id: %d      " -		"stream[%d].slot_count: %d\n", -		i, -		(void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, -		i, -		(void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, -		i, -		link->mst_stream_alloc_table.stream_allocations[i].vcp_id, -		i, -		link->mst_stream_alloc_table.stream_allocations[i].slot_count); -	} - -	ASSERT(proposed_table.stream_count > 0); - -	/* program DP source TX for payload */ -	if (link_hwss->ext.update_stream_allocation_table == NULL || -			dp_get_link_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { -		DC_LOG_ERROR("Failure: unknown encoding format\n"); -		return DC_ERROR_UNEXPECTED; -	} - -	link_hwss->ext.update_stream_allocation_table(link, -			&pipe_ctx->link_res, -			&link->mst_stream_alloc_table); - -	/* send down message */ -	ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger( -			stream->ctx, -			stream); - -	if (ret != ACT_LINK_LOST) { -		dm_helpers_dp_mst_send_payload_allocation( -				stream->ctx, -				stream, -				true); -	} - -	/* slot X.Y for only current stream */ -	pbn_per_slot = get_pbn_per_slot(stream); -	if (pbn_per_slot.value == 0) { -		DC_LOG_ERROR("Failure: pbn_per_slot==0 not allowed. Cannot continue, returning DC_UNSUPPORTED_VALUE.\n"); -		return DC_UNSUPPORTED_VALUE; -	} -	pbn = get_pbn_from_timing(pipe_ctx); -	avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot); - -	dc_log_vcp_x_y(link, avg_time_slots_per_mtp); - -	if (link_hwss->ext.set_throttled_vcp_size) -		link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); -	if (link_hwss->ext.set_hblank_min_symbol_width) -		link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, -				&link->cur_link_settings, -				avg_time_slots_per_mtp); - -	return DC_OK; - -} - -enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in_kbps) -{ -	struct dc_stream_state *stream = pipe_ctx->stream; -	struct dc_link *link = stream->link; -	struct fixed31_32 avg_time_slots_per_mtp; -	struct fixed31_32 pbn; -	struct fixed31_32 pbn_per_slot; -	struct dc_dp_mst_stream_allocation_table proposed_table = {0}; -	uint8_t i; -	const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); -	DC_LOGGER_INIT(link->ctx->logger); - -	/* decrease throttled vcp size */ -	pbn_per_slot = get_pbn_per_slot(stream); -	pbn = get_pbn_from_bw_in_kbps(bw_in_kbps); -	avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot); - -	if (link_hwss->ext.set_throttled_vcp_size) -		link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); -	if (link_hwss->ext.set_hblank_min_symbol_width) -		link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, -				&link->cur_link_settings, -				avg_time_slots_per_mtp); - -	/* send ALLOCATE_PAYLOAD sideband message with updated pbn */ -	dm_helpers_dp_mst_send_payload_allocation( -			stream->ctx, -			stream, -			true); - -	/* notify immediate branch device table update */ -	if (dm_helpers_dp_mst_write_payload_allocation_table( -			stream->ctx, -			stream, -			&proposed_table, -			true)) { -		/* update mst stream allocation table software state */ -		update_mst_stream_alloc_table( -				link, -				pipe_ctx->stream_res.stream_enc, -				pipe_ctx->stream_res.hpo_dp_stream_enc, -				&proposed_table); -	} else { -		DC_LOG_WARNING("Failed to update" -				"MST allocation table for" -				"pipe idx:%d\n", -				pipe_ctx->pipe_idx); -	} - -	DC_LOG_MST("%s  " -			"stream_count: %d: \n ", -			__func__, -			link->mst_stream_alloc_table.stream_count); - -	for (i = 0; i < MAX_CONTROLLER_NUM; i++) { -		DC_LOG_MST("stream_enc[%d]: %p      " -				"stream[%d].hpo_dp_stream_enc: %p      " -				"stream[%d].vcp_id: %d      " -				"stream[%d].slot_count: %d\n", -				i, -				(void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, -				i, -				(void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, -				i, -				link->mst_stream_alloc_table.stream_allocations[i].vcp_id, -				i, -				link->mst_stream_alloc_table.stream_allocations[i].slot_count); -	} - -	ASSERT(proposed_table.stream_count > 0); - -	/* update mst stream allocation table hardware state */ -	if (link_hwss->ext.update_stream_allocation_table == NULL || -			dp_get_link_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { -		DC_LOG_ERROR("Failure: unknown encoding format\n"); -		return DC_ERROR_UNEXPECTED; -	} - -	link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res, -			&link->mst_stream_alloc_table); - -	/* poll for immediate branch device ACT handled */ -	dm_helpers_dp_mst_poll_for_allocation_change_trigger( -			stream->ctx, -			stream); - -	return DC_OK; -} - -enum dc_status dc_link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in_kbps) -{ -	struct dc_stream_state *stream = pipe_ctx->stream; -	struct dc_link *link = stream->link; -	struct fixed31_32 avg_time_slots_per_mtp; -	struct fixed31_32 pbn; -	struct fixed31_32 pbn_per_slot; -	struct dc_dp_mst_stream_allocation_table proposed_table = {0}; -	uint8_t i; -	enum act_return_status ret; -	const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); -	DC_LOGGER_INIT(link->ctx->logger); - -	/* notify immediate branch device table update */ -	if (dm_helpers_dp_mst_write_payload_allocation_table( -				stream->ctx, -				stream, -				&proposed_table, -				true)) { -		/* update mst stream allocation table software state */ -		update_mst_stream_alloc_table( -				link, -				pipe_ctx->stream_res.stream_enc, -				pipe_ctx->stream_res.hpo_dp_stream_enc, -				&proposed_table); -	} - -	DC_LOG_MST("%s  " -			"stream_count: %d: \n ", -			__func__, -			link->mst_stream_alloc_table.stream_count); - -	for (i = 0; i < MAX_CONTROLLER_NUM; i++) { -		DC_LOG_MST("stream_enc[%d]: %p      " -				"stream[%d].hpo_dp_stream_enc: %p      " -				"stream[%d].vcp_id: %d      " -				"stream[%d].slot_count: %d\n", -				i, -				(void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, -				i, -				(void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, -				i, -				link->mst_stream_alloc_table.stream_allocations[i].vcp_id, -				i, -				link->mst_stream_alloc_table.stream_allocations[i].slot_count); -	} - -	ASSERT(proposed_table.stream_count > 0); - -	/* update mst stream allocation table hardware state */ -	if (link_hwss->ext.update_stream_allocation_table == NULL || -			dp_get_link_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { -		DC_LOG_ERROR("Failure: unknown encoding format\n"); -		return DC_ERROR_UNEXPECTED; -	} - -	link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res, -			&link->mst_stream_alloc_table); - -	/* poll for immediate branch device ACT handled */ -	ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger( -			stream->ctx, -			stream); - -	if (ret != ACT_LINK_LOST) { -		/* send ALLOCATE_PAYLOAD sideband message with updated pbn */ -		dm_helpers_dp_mst_send_payload_allocation( -				stream->ctx, -				stream, -				true); -	} - -	/* increase throttled vcp size */ -	pbn = get_pbn_from_bw_in_kbps(bw_in_kbps); -	pbn_per_slot = get_pbn_per_slot(stream); -	avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot); - -	if (link_hwss->ext.set_throttled_vcp_size) -		link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); -	if (link_hwss->ext.set_hblank_min_symbol_width) -		link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, -				&link->cur_link_settings, -				avg_time_slots_per_mtp); - -	return DC_OK; -} - -static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) -{ -	struct dc_stream_state *stream = pipe_ctx->stream; -	struct dc_link *link = stream->link; -	struct dc_dp_mst_stream_allocation_table proposed_table = {0}; -	struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0); -	int i; -	bool mst_mode = (link->type == dc_connection_mst_branch); -	const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); -	const struct dc_link_settings empty_link_settings = {0}; -	DC_LOGGER_INIT(link->ctx->logger); - -	/* deallocate_mst_payload is called before disable link. When mode or -	 * disable/enable monitor, new stream is created which is not in link -	 * stream[] yet. For this, payload is not allocated yet, so de-alloc -	 * should not done. For new mode set, map_resources will get engine -	 * for new stream, so stream_enc->id should be validated until here. -	 */ - -	/* slot X.Y */ -	if (link_hwss->ext.set_throttled_vcp_size) -		link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); -	if (link_hwss->ext.set_hblank_min_symbol_width) -		link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, -				&empty_link_settings, -				avg_time_slots_per_mtp); - -	if (mst_mode) { -		/* when link is in mst mode, reply on mst manager to remove -		 * payload -		 */ -		if (dm_helpers_dp_mst_write_payload_allocation_table( -				stream->ctx, -				stream, -				&proposed_table, -				false)) - -			update_mst_stream_alloc_table( -					link, -					pipe_ctx->stream_res.stream_enc, -					pipe_ctx->stream_res.hpo_dp_stream_enc, -					&proposed_table); -		else -			DC_LOG_WARNING("Failed to update" -					"MST allocation table for" -					"pipe idx:%d\n", -					pipe_ctx->pipe_idx); -	} else { -		/* when link is no longer in mst mode (mst hub unplugged), -		 * remove payload with default dc logic -		 */ -		remove_stream_from_alloc_table(link, pipe_ctx->stream_res.stream_enc, -				pipe_ctx->stream_res.hpo_dp_stream_enc); -	} - -	DC_LOG_MST("%s" -			"stream_count: %d: ", -			__func__, -			link->mst_stream_alloc_table.stream_count); - -	for (i = 0; i < MAX_CONTROLLER_NUM; i++) { -		DC_LOG_MST("stream_enc[%d]: %p      " -		"stream[%d].hpo_dp_stream_enc: %p      " -		"stream[%d].vcp_id: %d      " -		"stream[%d].slot_count: %d\n", -		i, -		(void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, -		i, -		(void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, -		i, -		link->mst_stream_alloc_table.stream_allocations[i].vcp_id, -		i, -		link->mst_stream_alloc_table.stream_allocations[i].slot_count); -	} - -	/* update mst stream allocation table hardware state */ -	if (link_hwss->ext.update_stream_allocation_table == NULL || -			dp_get_link_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { -		DC_LOG_DEBUG("Unknown encoding format\n"); -		return DC_ERROR_UNEXPECTED; -	} - -	link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res, -			&link->mst_stream_alloc_table); - -	if (mst_mode) { -		dm_helpers_dp_mst_poll_for_allocation_change_trigger( -			stream->ctx, -			stream); - -		dm_helpers_dp_mst_send_payload_allocation( -			stream->ctx, -			stream, -			false); -	} - -	return DC_OK; -} - - -#if defined(CONFIG_DRM_AMD_DC_HDCP) -static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) -{ -	struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp; -	struct link_encoder *link_enc = NULL; -	struct cp_psp_stream_config config = {0}; -	enum dp_panel_mode panel_mode = -			dp_get_panel_mode(pipe_ctx->stream->link); - -	if (cp_psp == NULL || cp_psp->funcs.update_stream_config == NULL) -		return; - -	link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link); -	ASSERT(link_enc); -	if (link_enc == NULL) -		return; - -	/* otg instance */ -	config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst; - -	/* dig front end */ -	config.dig_fe = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst; - -	/* stream encoder index */ -	config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA; -	if (is_dp_128b_132b_signal(pipe_ctx)) -		config.stream_enc_idx = -				pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0; - -	/* dig back end */ -	config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst; - -	/* link encoder index */ -	config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; -	if (is_dp_128b_132b_signal(pipe_ctx)) -		config.link_enc_idx = pipe_ctx->link_res.hpo_dp_link_enc->inst; - -	/* dio output index is dpia index for DPIA endpoint & dcio index by default */ -	if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) -		config.dio_output_idx = pipe_ctx->stream->link->link_id.enum_id - ENUM_ID_1; -	else -		config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; - - -	/* phy index */ -	config.phy_idx = resource_transmitter_to_phy_idx( -			pipe_ctx->stream->link->dc, link_enc->transmitter); -	if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) -		/* USB4 DPIA doesn't use PHY in our soc, initialize it to 0 */ -		config.phy_idx = 0; - -	/* stream properties */ -	config.assr_enabled = (panel_mode == DP_PANEL_MODE_EDP) ? 1 : 0; -	config.mst_enabled = (pipe_ctx->stream->signal == -			SIGNAL_TYPE_DISPLAY_PORT_MST) ? 1 : 0; -	config.dp2_enabled = is_dp_128b_132b_signal(pipe_ctx) ? 1 : 0; -	config.usb4_enabled = (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ? -			1 : 0; -	config.dpms_off = dpms_off; - -	/* dm stream context */ -	config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context; - -	cp_psp->funcs.update_stream_config(cp_psp->handle, &config); -} -#endif - -static void fpga_dp_hpo_enable_link_and_stream(struct dc_state *state, struct pipe_ctx *pipe_ctx) -{ -	struct dc *dc = pipe_ctx->stream->ctx->dc; -	struct dc_stream_state *stream = pipe_ctx->stream; -	struct link_mst_stream_allocation_table proposed_table = {0}; -	struct fixed31_32 avg_time_slots_per_mtp; -	uint8_t req_slot_count = 0; -	uint8_t vc_id = 1; /// VC ID always 1 for SST -	struct dc_link_settings link_settings = pipe_ctx->link_config.dp_link_settings; -	const struct link_hwss *link_hwss = get_link_hwss(stream->link, &pipe_ctx->link_res); -	DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); - -	stream->link->cur_link_settings = link_settings; - -	if (link_hwss->ext.enable_dp_link_output) -		link_hwss->ext.enable_dp_link_output(stream->link, &pipe_ctx->link_res, -				stream->signal, pipe_ctx->clock_source->id, -				&link_settings); - -#ifdef DIAGS_BUILD -	/* Workaround for FPGA HPO capture DP link data: -	 * HPO capture will set link to active mode -	 * This workaround is required to get a capture from start of frame -	 */ -	if (!dc->debug.fpga_hpo_capture_en) { -		struct encoder_set_dp_phy_pattern_param params = {0}; -		params.dp_phy_pattern = DP_TEST_PATTERN_VIDEO_MODE; - -		/* Set link active */ -		stream->link->hpo_dp_link_enc->funcs->set_link_test_pattern( -				stream->link->hpo_dp_link_enc, -				¶ms); -	} -#endif - -	/* Enable DP_STREAM_ENC */ -	dc->hwss.enable_stream(pipe_ctx); - -	/* Set DPS PPS SDP (AKA "info frames") */ -	if (pipe_ctx->stream->timing.flags.DSC) { -		dp_set_dsc_pps_sdp(pipe_ctx, true, true); -	} - -	/* Allocate Payload */ -	if ((stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) && (state->stream_count > 1)) { -		// MST case -		uint8_t i; - -		proposed_table.stream_count = state->stream_count; -		for (i = 0; i < state->stream_count; i++) { -			avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(state->streams[i], state->streams[i]->link); -			req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp); -			proposed_table.stream_allocations[i].slot_count = req_slot_count; -			proposed_table.stream_allocations[i].vcp_id = i+1; -			/* NOTE: This makes assumption that pipe_ctx index is same as stream index */ -			proposed_table.stream_allocations[i].hpo_dp_stream_enc = state->res_ctx.pipe_ctx[i].stream_res.hpo_dp_stream_enc; -		} -	} else { -		// SST case -		avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(stream, stream->link); -		req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp); -		proposed_table.stream_count = 1; /// Always 1 stream for SST -		proposed_table.stream_allocations[0].slot_count = req_slot_count; -		proposed_table.stream_allocations[0].vcp_id = vc_id; -		proposed_table.stream_allocations[0].hpo_dp_stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc; -	} - -	link_hwss->ext.update_stream_allocation_table(stream->link, -			&pipe_ctx->link_res, -			&proposed_table); - -	if (link_hwss->ext.set_throttled_vcp_size) -		link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); - -	dc->hwss.unblank_stream(pipe_ctx, &stream->link->cur_link_settings); -} - -void core_link_enable_stream( -		struct dc_state *state, -		struct pipe_ctx *pipe_ctx) -{ -	struct dc *dc = pipe_ctx->stream->ctx->dc; -	struct dc_stream_state *stream = pipe_ctx->stream; -	struct dc_link *link = stream->sink->link; -	enum dc_status status; -	struct link_encoder *link_enc; -	enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO; -	struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg; -	const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); - -	if (is_dp_128b_132b_signal(pipe_ctx)) -		vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg; - -	DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); - -	if (pipe_ctx->stream->sink) { -		if (pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_VIRTUAL && -			pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_NONE) { -			DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x\n", __func__, -			pipe_ctx->stream->sink->edid_caps.display_name, -			pipe_ctx->stream->signal); -		} -	} - -	if (!IS_DIAG_DC(dc->ctx->dce_environment) && -			dc_is_virtual_signal(pipe_ctx->stream->signal)) -		return; - -	link_enc = link_enc_cfg_get_link_enc(link); -	ASSERT(link_enc); - -	if (!dc_is_virtual_signal(pipe_ctx->stream->signal) -			&& !is_dp_128b_132b_signal(pipe_ctx)) { -		if (link_enc) -			link_enc->funcs->setup( -				link_enc, -				pipe_ctx->stream->signal); -	} - -	pipe_ctx->stream->link->link_state_valid = true; - -	if (pipe_ctx->stream_res.tg->funcs->set_out_mux) { -		if (is_dp_128b_132b_signal(pipe_ctx)) -			otg_out_dest = OUT_MUX_HPO_DP; -		else -			otg_out_dest = OUT_MUX_DIO; -		pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, otg_out_dest); -	} - -	link_hwss->setup_stream_attribute(pipe_ctx); - -	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { -		bool apply_edp_fast_boot_optimization = -			pipe_ctx->stream->apply_edp_fast_boot_optimization; - -		pipe_ctx->stream->apply_edp_fast_boot_optimization = false; - -		// Enable VPG before building infoframe -		if (vpg && vpg->funcs->vpg_poweron) -			vpg->funcs->vpg_poweron(vpg); - -		resource_build_info_frame(pipe_ctx); -		dc->hwss.update_info_frame(pipe_ctx); - -		if (dc_is_dp_signal(pipe_ctx->stream->signal)) -			dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); - -		/* Do not touch link on seamless boot optimization. */ -		if (pipe_ctx->stream->apply_seamless_boot_optimization) { -			pipe_ctx->stream->dpms_off = false; - -			/* Still enable stream features & audio on seamless boot for DP external displays */ -			if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT) { -				enable_stream_features(pipe_ctx); -				if (pipe_ctx->stream_res.audio != NULL) { -					pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc); -					dc->hwss.enable_audio_stream(pipe_ctx); -				} -			} - -#if defined(CONFIG_DRM_AMD_DC_HDCP) -			update_psp_stream_config(pipe_ctx, false); -#endif -			return; -		} - -		/* eDP lit up by bios already, no need to enable again. */ -		if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && -					apply_edp_fast_boot_optimization && -					!pipe_ctx->stream->timing.flags.DSC && -					!pipe_ctx->next_odm_pipe) { -			pipe_ctx->stream->dpms_off = false; -#if defined(CONFIG_DRM_AMD_DC_HDCP) -			update_psp_stream_config(pipe_ctx, false); -#endif -			return; -		} - -		if (pipe_ctx->stream->dpms_off) -			return; - -		/* Have to setup DSC before DIG FE and BE are connected (which happens before the -		 * link training). This is to make sure the bandwidth sent to DIG BE won't be -		 * bigger than what the link and/or DIG BE can handle. VBID[6]/CompressedStream_flag -		 * will be automatically set at a later time when the video is enabled -		 * (DP_VID_STREAM_EN = 1). -		 */ -		if (pipe_ctx->stream->timing.flags.DSC) { -			if (dc_is_dp_signal(pipe_ctx->stream->signal) || -				dc_is_virtual_signal(pipe_ctx->stream->signal)) -			dp_set_dsc_enable(pipe_ctx, true); - -		} - -		status = enable_link(state, pipe_ctx); - -		if (status != DC_OK) { -			DC_LOG_WARNING("enabling link %u failed: %d\n", -			pipe_ctx->stream->link->link_index, -			status); - -			/* Abort stream enable *unless* the failure was due to -			 * DP link training - some DP monitors will recover and -			 * show the stream anyway. But MST displays can't proceed -			 * without link training. -			 */ -			if (status != DC_FAIL_DP_LINK_TRAINING || -					pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { -				if (false == stream->link->link_status.link_active) -					disable_link(stream->link, &pipe_ctx->link_res, -							pipe_ctx->stream->signal); -				BREAK_TO_DEBUGGER(); -				return; -			} -		} - -		/* turn off otg test pattern if enable */ -		if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) -			pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, -					CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, -					COLOR_DEPTH_UNDEFINED); - -		/* This second call is needed to reconfigure the DIG -		 * as a workaround for the incorrect value being applied -		 * from transmitter control. -		 */ -		if (!(dc_is_virtual_signal(pipe_ctx->stream->signal) || -				is_dp_128b_132b_signal(pipe_ctx))) -			if (link_enc) -				link_enc->funcs->setup( -					link_enc, -					pipe_ctx->stream->signal); - -		dc->hwss.enable_stream(pipe_ctx); - -		/* Set DPS PPS SDP (AKA "info frames") */ -		if (pipe_ctx->stream->timing.flags.DSC) { -			if (dc_is_dp_signal(pipe_ctx->stream->signal) || -					dc_is_virtual_signal(pipe_ctx->stream->signal)) { -				dp_set_dsc_on_rx(pipe_ctx, true); -				dp_set_dsc_pps_sdp(pipe_ctx, true, true); -			} -		} - -		if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) -			dc_link_allocate_mst_payload(pipe_ctx); -		else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && -				is_dp_128b_132b_signal(pipe_ctx)) -			dc_link_update_sst_payload(pipe_ctx, true); - -		dc->hwss.unblank_stream(pipe_ctx, -			&pipe_ctx->stream->link->cur_link_settings); - -		if (stream->sink_patches.delay_ignore_msa > 0) -			msleep(stream->sink_patches.delay_ignore_msa); - -		if (dc_is_dp_signal(pipe_ctx->stream->signal)) -			enable_stream_features(pipe_ctx); -#if defined(CONFIG_DRM_AMD_DC_HDCP) -		update_psp_stream_config(pipe_ctx, false); -#endif - -		dc->hwss.enable_audio_stream(pipe_ctx); - -	} else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) -		if (is_dp_128b_132b_signal(pipe_ctx)) -			fpga_dp_hpo_enable_link_and_stream(state, pipe_ctx); -		if (dc_is_dp_signal(pipe_ctx->stream->signal) || -				dc_is_virtual_signal(pipe_ctx->stream->signal)) -			dp_set_dsc_enable(pipe_ctx, true); -	} - -	if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) { -		core_link_set_avmute(pipe_ctx, false); -	} -} - -void core_link_disable_stream(struct pipe_ctx *pipe_ctx) -{ -	struct dc  *dc = pipe_ctx->stream->ctx->dc; -	struct dc_stream_state *stream = pipe_ctx->stream; -	struct dc_link *link = stream->sink->link; -	struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg; - -	if (is_dp_128b_132b_signal(pipe_ctx)) -		vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg; - -	DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); - -	if (pipe_ctx->stream->sink) { -		if (pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_VIRTUAL && -			pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_NONE) { -			DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x\n", __func__, -			pipe_ctx->stream->sink->edid_caps.display_name, -			pipe_ctx->stream->signal); -		} -	} - -	if (!IS_DIAG_DC(dc->ctx->dce_environment) && -			dc_is_virtual_signal(pipe_ctx->stream->signal)) -		return; - -	if (!pipe_ctx->stream->sink->edid_caps.panel_patch.skip_avmute) { -		if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) -			core_link_set_avmute(pipe_ctx, true); -	} - -	dc->hwss.disable_audio_stream(pipe_ctx); - -#if defined(CONFIG_DRM_AMD_DC_HDCP) -	update_psp_stream_config(pipe_ctx, true); -#endif -	dc->hwss.blank_stream(pipe_ctx); - -	if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) -		deallocate_mst_payload(pipe_ctx); -	else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && -			is_dp_128b_132b_signal(pipe_ctx)) -		dc_link_update_sst_payload(pipe_ctx, false); - -	if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) { -		struct ext_hdmi_settings settings = {0}; -		enum engine_id eng_id = pipe_ctx->stream_res.stream_enc->id; - -		unsigned short masked_chip_caps = link->chip_caps & -				EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK; -		//Need to inform that sink is going to use legacy HDMI mode. -		dal_ddc_service_write_scdc_data( -			link->ddc, -			165000,//vbios only handles 165Mhz. -			false); -		if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) { -			/* DP159, Retimer settings */ -			if (get_ext_hdmi_settings(pipe_ctx, eng_id, &settings)) -				write_i2c_retimer_setting(pipe_ctx, -						false, false, &settings); -			else -				write_i2c_default_retimer_setting(pipe_ctx, -						false, false); -		} else if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) { -			/* PI3EQX1204, Redriver settings */ -			write_i2c_redriver_setting(pipe_ctx, false); -		} -	} - -	if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && -			!is_dp_128b_132b_signal(pipe_ctx)) { - -		/* In DP1.x SST mode, our encoder will go to TPS1 -		 * when link is on but stream is off. -		 * Disabling link before stream will avoid exposing TPS1 pattern -		 * during the disable sequence as it will confuse some receivers -		 * state machine. -		 * In DP2 or MST mode, our encoder will stay video active -		 */ -		disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal); -		dc->hwss.disable_stream(pipe_ctx); -	} else { -		dc->hwss.disable_stream(pipe_ctx); -		disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal); -	} - -	if (pipe_ctx->stream->timing.flags.DSC) { -		if (dc_is_dp_signal(pipe_ctx->stream->signal)) -			dp_set_dsc_enable(pipe_ctx, false); -	} -	if (is_dp_128b_132b_signal(pipe_ctx)) { -		if (pipe_ctx->stream_res.tg->funcs->set_out_mux) -			pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, OUT_MUX_DIO); -	} - -	if (vpg && vpg->funcs->vpg_powerdown) -		vpg->funcs->vpg_powerdown(vpg); -} - -void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable) -{ -	struct dc  *dc = pipe_ctx->stream->ctx->dc; - -	if (!dc_is_hdmi_signal(pipe_ctx->stream->signal)) -		return; - -	dc->hwss.set_avmute(pipe_ctx, enable); -} - -/** - *  dc_link_enable_hpd_filter: - *     If enable is true, programs HPD filter on associated HPD line using - *     delay_on_disconnect/delay_on_connect values dependent on - *     link->connector_signal - * - *     If enable is false, programs HPD filter on associated HPD line with no - *     delays on connect or disconnect - * - *  @link:   pointer to the dc link - *  @enable: boolean specifying whether to enable hbd - */ -void dc_link_enable_hpd_filter(struct dc_link *link, bool enable) -{ -	struct gpio *hpd; - -	if (enable) { -		link->is_hpd_filter_disabled = false; -		program_hpd_filter(link); -	} else { -		link->is_hpd_filter_disabled = true; -		/* Obtain HPD handle */ -		hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service); - -		if (!hpd) -			return; - -		/* Setup HPD filtering */ -		if (dal_gpio_open(hpd, GPIO_MODE_INTERRUPT) == GPIO_RESULT_OK) { -			struct gpio_hpd_config config; - -			config.delay_on_connect = 0; -			config.delay_on_disconnect = 0; - -			dal_irq_setup_hpd_filter(hpd, &config); - -			dal_gpio_close(hpd); -		} else { -			ASSERT_CRITICAL(false); -		} -		/* Release HPD handle */ -		dal_gpio_destroy_irq(&hpd); -	} -} - -void dc_link_set_drive_settings(struct dc *dc, -				struct link_training_settings *lt_settings, -				const struct dc_link *link) -{ - -	int i; -	struct link_resource link_res; - -	for (i = 0; i < dc->link_count; i++) -		if (dc->links[i] == link) -			break; - -	if (i >= dc->link_count) -		ASSERT_CRITICAL(false); - -	dc_link_get_cur_link_res(link, &link_res); -	dc_link_dp_set_drive_settings(dc->links[i], &link_res, lt_settings); -} - -void dc_link_set_preferred_link_settings(struct dc *dc, -					 struct dc_link_settings *link_setting, -					 struct dc_link *link) -{ -	int i; -	struct pipe_ctx *pipe; -	struct dc_stream_state *link_stream; -	struct dc_link_settings store_settings = *link_setting; - -	link->preferred_link_setting = store_settings; - -	/* Retrain with preferred link settings only relevant for -	 * DP signal type -	 * Check for non-DP signal or if passive dongle present -	 */ -	if (!dc_is_dp_signal(link->connector_signal) || -		link->dongle_max_pix_clk > 0) -		return; - -	for (i = 0; i < MAX_PIPES; i++) { -		pipe = &dc->current_state->res_ctx.pipe_ctx[i]; -		if (pipe->stream && pipe->stream->link) { -			if (pipe->stream->link == link) { -				link_stream = pipe->stream; -				break; -			} -		} -	} - -	/* Stream not found */ -	if (i == MAX_PIPES) -		return; - -	/* Cannot retrain link if backend is off */ -	if (link_stream->dpms_off) -		return; - -	if (decide_link_settings(link_stream, &store_settings)) -		dp_retrain_link_dp_test(link, &store_settings, false); -} - -void dc_link_set_preferred_training_settings(struct dc *dc, -						 struct dc_link_settings *link_setting, -						 struct dc_link_training_overrides *lt_overrides, -						 struct dc_link *link, -						 bool skip_immediate_retrain) -{ -	if (lt_overrides != NULL) -		link->preferred_training_settings = *lt_overrides; -	else -		memset(&link->preferred_training_settings, 0, sizeof(link->preferred_training_settings)); - -	if (link_setting != NULL) { -		link->preferred_link_setting = *link_setting; -		if (dp_get_link_encoding_format(link_setting) == DP_128b_132b_ENCODING) -			/* TODO: add dc update for acquiring link res  */ -			skip_immediate_retrain = true; -	} else { -		link->preferred_link_setting.lane_count = LANE_COUNT_UNKNOWN; -		link->preferred_link_setting.link_rate = LINK_RATE_UNKNOWN; -	} - -	/* Retrain now, or wait until next stream update to apply */ -	if (skip_immediate_retrain == false) -		dc_link_set_preferred_link_settings(dc, &link->preferred_link_setting, link); -} - -void dc_link_enable_hpd(const struct dc_link *link) -{ -	dc_link_dp_enable_hpd(link); -} - -void dc_link_disable_hpd(const struct dc_link *link) -{ -	dc_link_dp_disable_hpd(link); -} - -void dc_link_set_test_pattern(struct dc_link *link, -			      enum dp_test_pattern test_pattern, -			      enum dp_test_pattern_color_space test_pattern_color_space, -			      const struct link_training_settings *p_link_settings, -			      const unsigned char *p_custom_pattern, -			      unsigned int cust_pattern_size) -{ -	if (link != NULL) -		dc_link_dp_set_test_pattern( -			link, -			test_pattern, -			test_pattern_color_space, -			p_link_settings, -			p_custom_pattern, -			cust_pattern_size); -} - -uint32_t dc_link_bandwidth_kbps( -	const struct dc_link *link, -	const struct dc_link_settings *link_setting) -{ -	uint32_t total_data_bw_efficiency_x10000 = 0; -	uint32_t link_rate_per_lane_kbps = 0; - -	switch (dp_get_link_encoding_format(link_setting)) { -	case DP_8b_10b_ENCODING: -		/* For 8b/10b encoding: -		 * link rate is defined in the unit of LINK_RATE_REF_FREQ_IN_KHZ per DP byte per lane. -		 * data bandwidth efficiency is 80% with additional 3% overhead if FEC is supported. -		 */ -		link_rate_per_lane_kbps = link_setting->link_rate * LINK_RATE_REF_FREQ_IN_KHZ * BITS_PER_DP_BYTE; -		total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_8b_10b_x10000; -		if (dc_link_should_enable_fec(link)) { -			total_data_bw_efficiency_x10000 /= 100; -			total_data_bw_efficiency_x10000 *= DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100; -		} -		break; -	case DP_128b_132b_ENCODING: -		/* For 128b/132b encoding: -		 * link rate is defined in the unit of 10mbps per lane. -		 * total data bandwidth efficiency is always 96.71%. -		 */ -		link_rate_per_lane_kbps = link_setting->link_rate * 10000; -		total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_128b_132b_x10000; -		break; -	default: -		break; -	} - -	/* overall effective link bandwidth = link rate per lane * lane count * total data bandwidth efficiency */ -	return link_rate_per_lane_kbps * link_setting->lane_count / 10000 * total_data_bw_efficiency_x10000; -} - -const struct dc_link_settings *dc_link_get_link_cap( -		const struct dc_link *link) -{ -	if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN && -			link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN) -		return &link->preferred_link_setting; -	return &link->verified_link_cap; -} - -void dc_link_overwrite_extended_receiver_cap( -		struct dc_link *link) -{ -	dp_overwrite_extended_receiver_cap(link); -} - -bool dc_link_is_fec_supported(const struct dc_link *link) -{ -	/* TODO - use asic cap instead of link_enc->features -	 * we no longer know which link enc to use for this link before commit -	 */ -	struct link_encoder *link_enc = NULL; - -	link_enc = link_enc_cfg_get_link_enc(link); -	ASSERT(link_enc); - -	return (dc_is_dp_signal(link->connector_signal) && link_enc && -			link_enc->features.fec_supported && -			link->dpcd_caps.fec_cap.bits.FEC_CAPABLE && -			!IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment)); -} - -bool dc_link_should_enable_fec(const struct dc_link *link) -{ -	bool force_disable = false; - -	if (link->fec_state == dc_link_fec_enabled) -		force_disable = false; -	else if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST && -			link->local_sink && -			link->local_sink->edid_caps.panel_patch.disable_fec) -		force_disable = true; -	else if (link->connector_signal == SIGNAL_TYPE_EDP -			&& (link->dpcd_caps.dsc_caps.dsc_basic_caps.fields. -			 dsc_support.DSC_SUPPORT == false -				|| link->panel_config.dsc.disable_dsc_edp -				|| !link->dc->caps.edp_dsc_support)) -		force_disable = true; - -	return !force_disable && dc_link_is_fec_supported(link); -} - -uint32_t dc_bandwidth_in_kbps_from_timing( -		const struct dc_crtc_timing *timing) -{ -	uint32_t bits_per_channel = 0; -	uint32_t kbps; - -#if defined(CONFIG_DRM_AMD_DC_DCN) -	if (timing->flags.DSC) -		return dc_dsc_stream_bandwidth_in_kbps(timing, -				timing->dsc_cfg.bits_per_pixel, -				timing->dsc_cfg.num_slices_h, -				timing->dsc_cfg.is_dp); -#endif /* CONFIG_DRM_AMD_DC_DCN */ - -	switch (timing->display_color_depth) { -	case COLOR_DEPTH_666: -		bits_per_channel = 6; -		break; -	case COLOR_DEPTH_888: -		bits_per_channel = 8; -		break; -	case COLOR_DEPTH_101010: -		bits_per_channel = 10; -		break; -	case COLOR_DEPTH_121212: -		bits_per_channel = 12; -		break; -	case COLOR_DEPTH_141414: -		bits_per_channel = 14; -		break; -	case COLOR_DEPTH_161616: -		bits_per_channel = 16; -		break; -	default: -		ASSERT(bits_per_channel != 0); -		bits_per_channel = 8; -		break; -	} - -	kbps = timing->pix_clk_100hz / 10; -	kbps *= bits_per_channel; - -	if (timing->flags.Y_ONLY != 1) { -		/*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/ -		kbps *= 3; -		if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) -			kbps /= 2; -		else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) -			kbps = kbps * 2 / 3; -	} - -	return kbps; - -} - -void dc_link_get_cur_link_res(const struct dc_link *link, -		struct link_resource *link_res) -{ -	int i; -	struct pipe_ctx *pipe = NULL; - -	memset(link_res, 0, sizeof(*link_res)); - -	for (i = 0; i < MAX_PIPES; i++) { -		pipe = &link->dc->current_state->res_ctx.pipe_ctx[i]; -		if (pipe->stream && pipe->stream->link && pipe->top_pipe == NULL) { -			if (pipe->stream->link == link) { -				*link_res = pipe->link_res; -				break; -			} -		} -	} - -} - -/** - * dc_get_cur_link_res_map() - take a snapshot of current link resource allocation state - * @dc: pointer to dc of the dm calling this - * @map: a dc link resource snapshot defined internally to dc. - * - * DM needs to capture a snapshot of current link resource allocation mapping - * and store it in its persistent storage. - * - * Some of the link resource is using first come first serve policy. - * The allocation mapping depends on original hotplug order. This information - * is lost after driver is loaded next time. The snapshot is used in order to - * restore link resource to its previous state so user will get consistent - * link capability allocation across reboot. - * - * Return: none (void function) - * - */ -void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map) -{ -	struct dc_link *link; -	uint32_t i; -	uint32_t hpo_dp_recycle_map = 0; - -	*map = 0; - -	if (dc->caps.dp_hpo) { -		for (i = 0; i < dc->caps.max_links; i++) { -			link = dc->links[i]; -			if (link->link_status.link_active && -					dp_get_link_encoding_format(&link->reported_link_cap) == DP_128b_132b_ENCODING && -					dp_get_link_encoding_format(&link->cur_link_settings) != DP_128b_132b_ENCODING) -				/* hpo dp link encoder is considered as recycled, when RX reports 128b/132b encoding capability -				 * but current link doesn't use it. -				 */ -				hpo_dp_recycle_map |= (1 << i); -		} -		*map |= (hpo_dp_recycle_map << LINK_RES_HPO_DP_REC_MAP__SHIFT); -	} -} - -/** - * dc_restore_link_res_map() - restore link resource allocation state from a snapshot - * @dc: pointer to dc of the dm calling this - * @map: a dc link resource snapshot defined internally to dc. - * - * DM needs to call this function after initial link detection on boot and - * before first commit streams to restore link resource allocation state - * from previous boot session. - * - * Some of the link resource is using first come first serve policy. - * The allocation mapping depends on original hotplug order. This information - * is lost after driver is loaded next time. The snapshot is used in order to - * restore link resource to its previous state so user will get consistent - * link capability allocation across reboot. - * - * Return: none (void function) - * - */ -void dc_restore_link_res_map(const struct dc *dc, uint32_t *map) -{ -	struct dc_link *link; -	uint32_t i; -	unsigned int available_hpo_dp_count; -	uint32_t hpo_dp_recycle_map = (*map & LINK_RES_HPO_DP_REC_MAP__MASK) -			>> LINK_RES_HPO_DP_REC_MAP__SHIFT; - -	if (dc->caps.dp_hpo) { -		available_hpo_dp_count = dc->res_pool->hpo_dp_link_enc_count; -		/* remove excess 128b/132b encoding support for not recycled links */ -		for (i = 0; i < dc->caps.max_links; i++) { -			if ((hpo_dp_recycle_map & (1 << i)) == 0) { -				link = dc->links[i]; -				if (link->type != dc_connection_none && -						dp_get_link_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) { -					if (available_hpo_dp_count > 0) -						available_hpo_dp_count--; -					else -						/* remove 128b/132b encoding capability by limiting verified link rate to HBR3 */ -						link->verified_link_cap.link_rate = LINK_RATE_HIGH3; -				} -			} -		} -		/* remove excess 128b/132b encoding support for recycled links */ -		for (i = 0; i < dc->caps.max_links; i++) { -			if ((hpo_dp_recycle_map & (1 << i)) != 0) { -				link = dc->links[i]; -				if (link->type != dc_connection_none && -						dp_get_link_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) { -					if (available_hpo_dp_count > 0) -						available_hpo_dp_count--; -					else -						/* remove 128b/132b encoding capability by limiting verified link rate to HBR3 */ -						link->verified_link_cap.link_rate = LINK_RATE_HIGH3; -				} -			} -		} -	} -} +// TODO - remove this file after external build dependencies is resolved. +/* NOTE: This file is pending to be removed, do not add new code to this file */
\ No newline at end of file diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c deleted file mode 100644 index 1254d38f1778..000000000000 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ /dev/null @@ -1,7598 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - */ -#include "dm_services.h" -#include "dc.h" -#include "dc_link_dp.h" -#include "dm_helpers.h" -#include "opp.h" -#include "dsc.h" -#include "clk_mgr.h" -#include "resource.h" - -#include "inc/core_types.h" -#include "link_hwss.h" -#include "dc_link_ddc.h" -#include "core_status.h" -#include "dpcd_defs.h" -#include "dc_dmub_srv.h" -#include "dce/dmub_hw_lock_mgr.h" -#include "inc/dc_link_dpia.h" -#include "inc/link_enc_cfg.h" -#include "link/link_dp_trace.h" - -/*Travis*/ -static const uint8_t DP_VGA_LVDS_CONVERTER_ID_2[] = "sivarT"; -/*Nutmeg*/ -static const uint8_t DP_VGA_LVDS_CONVERTER_ID_3[] = "dnomlA"; - -#define DC_LOGGER \ -	link->ctx->logger -#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */ - -#include "link_dpcd.h" - -#ifndef MAX -#define MAX(X, Y) ((X) > (Y) ? (X) : (Y)) -#endif -#ifndef MIN -#define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) -#endif - -	/* maximum pre emphasis level allowed for each voltage swing level*/ -	static const enum dc_pre_emphasis -	voltage_swing_to_pre_emphasis[] = { PRE_EMPHASIS_LEVEL3, -					    PRE_EMPHASIS_LEVEL2, -					    PRE_EMPHASIS_LEVEL1, -					    PRE_EMPHASIS_DISABLED }; - -enum { -	POST_LT_ADJ_REQ_LIMIT = 6, -	POST_LT_ADJ_REQ_TIMEOUT = 200 -}; - -struct dp_lt_fallback_entry { -	enum dc_lane_count lane_count; -	enum dc_link_rate link_rate; -}; - -static const struct dp_lt_fallback_entry dp_lt_fallbacks[] = { -		/* This link training fallback array is ordered by -		 * link bandwidth from highest to lowest. -		 * DP specs makes it a normative policy to always -		 * choose the next highest link bandwidth during -		 * link training fallback. -		 */ -		{LANE_COUNT_FOUR, LINK_RATE_UHBR20}, -		{LANE_COUNT_FOUR, LINK_RATE_UHBR13_5}, -		{LANE_COUNT_TWO, LINK_RATE_UHBR20}, -		{LANE_COUNT_FOUR, LINK_RATE_UHBR10}, -		{LANE_COUNT_TWO, LINK_RATE_UHBR13_5}, -		{LANE_COUNT_FOUR, LINK_RATE_HIGH3}, -		{LANE_COUNT_ONE, LINK_RATE_UHBR20}, -		{LANE_COUNT_TWO, LINK_RATE_UHBR10}, -		{LANE_COUNT_FOUR, LINK_RATE_HIGH2}, -		{LANE_COUNT_ONE, LINK_RATE_UHBR13_5}, -		{LANE_COUNT_TWO, LINK_RATE_HIGH3}, -		{LANE_COUNT_ONE, LINK_RATE_UHBR10}, -		{LANE_COUNT_TWO, LINK_RATE_HIGH2}, -		{LANE_COUNT_FOUR, LINK_RATE_HIGH}, -		{LANE_COUNT_ONE, LINK_RATE_HIGH3}, -		{LANE_COUNT_FOUR, LINK_RATE_LOW}, -		{LANE_COUNT_ONE, LINK_RATE_HIGH2}, -		{LANE_COUNT_TWO, LINK_RATE_HIGH}, -		{LANE_COUNT_TWO, LINK_RATE_LOW}, -		{LANE_COUNT_ONE, LINK_RATE_HIGH}, -		{LANE_COUNT_ONE, LINK_RATE_LOW}, -}; - -static const struct dc_link_settings fail_safe_link_settings = { -		.lane_count = LANE_COUNT_ONE, -		.link_rate = LINK_RATE_LOW, -		.link_spread = LINK_SPREAD_DISABLED, -}; - -static bool decide_fallback_link_setting( -		struct dc_link *link, -		struct dc_link_settings *max, -		struct dc_link_settings *cur, -		enum link_training_result training_result); -static void maximize_lane_settings(const struct link_training_settings *lt_settings, -		struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]); -static void override_lane_settings(const struct link_training_settings *lt_settings, -		struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]); - -static uint32_t get_cr_training_aux_rd_interval(struct dc_link *link, -		const struct dc_link_settings *link_settings) -{ -	union training_aux_rd_interval training_rd_interval; -	uint32_t wait_in_micro_secs = 100; - -	memset(&training_rd_interval, 0, sizeof(training_rd_interval)); -	if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING && -			link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { -		core_link_read_dpcd( -				link, -				DP_TRAINING_AUX_RD_INTERVAL, -				(uint8_t *)&training_rd_interval, -				sizeof(training_rd_interval)); -		if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) -			wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000; -	} - -	return wait_in_micro_secs; -} - -static uint32_t get_eq_training_aux_rd_interval( -	struct dc_link *link, -	const struct dc_link_settings *link_settings) -{ -	union training_aux_rd_interval training_rd_interval; - -	memset(&training_rd_interval, 0, sizeof(training_rd_interval)); -	if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) { -		core_link_read_dpcd( -				link, -				DP_128b_132b_TRAINING_AUX_RD_INTERVAL, -				(uint8_t *)&training_rd_interval, -				sizeof(training_rd_interval)); -	} else if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING && -			link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { -		core_link_read_dpcd( -				link, -				DP_TRAINING_AUX_RD_INTERVAL, -				(uint8_t *)&training_rd_interval, -				sizeof(training_rd_interval)); -	} - -	switch (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) { -	case 0: return 400; -	case 1: return 4000; -	case 2: return 8000; -	case 3: return 12000; -	case 4: return 16000; -	case 5: return 32000; -	case 6: return 64000; -	default: return 400; -	} -} - -void dp_wait_for_training_aux_rd_interval( -	struct dc_link *link, -	uint32_t wait_in_micro_secs) -{ -	if (wait_in_micro_secs > 1000) -		msleep(wait_in_micro_secs/1000); -	else -		udelay(wait_in_micro_secs); - -	DC_LOG_HW_LINK_TRAINING("%s:\n wait = %d\n", -		__func__, -		wait_in_micro_secs); -} - -enum dpcd_training_patterns -	dc_dp_training_pattern_to_dpcd_training_pattern( -	struct dc_link *link, -	enum dc_dp_training_pattern pattern) -{ -	enum dpcd_training_patterns dpcd_tr_pattern = -	DPCD_TRAINING_PATTERN_VIDEOIDLE; - -	switch (pattern) { -	case DP_TRAINING_PATTERN_SEQUENCE_1: -		dpcd_tr_pattern = DPCD_TRAINING_PATTERN_1; -		break; -	case DP_TRAINING_PATTERN_SEQUENCE_2: -		dpcd_tr_pattern = DPCD_TRAINING_PATTERN_2; -		break; -	case DP_TRAINING_PATTERN_SEQUENCE_3: -		dpcd_tr_pattern = DPCD_TRAINING_PATTERN_3; -		break; -	case DP_TRAINING_PATTERN_SEQUENCE_4: -		dpcd_tr_pattern = DPCD_TRAINING_PATTERN_4; -		break; -	case DP_128b_132b_TPS1: -		dpcd_tr_pattern = DPCD_128b_132b_TPS1; -		break; -	case DP_128b_132b_TPS2: -		dpcd_tr_pattern = DPCD_128b_132b_TPS2; -		break; -	case DP_128b_132b_TPS2_CDS: -		dpcd_tr_pattern = DPCD_128b_132b_TPS2_CDS; -		break; -	case DP_TRAINING_PATTERN_VIDEOIDLE: -		dpcd_tr_pattern = DPCD_TRAINING_PATTERN_VIDEOIDLE; -		break; -	default: -		ASSERT(0); -		DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n", -			__func__, pattern); -		break; -	} - -	return dpcd_tr_pattern; -} - -static void dpcd_set_training_pattern( -	struct dc_link *link, -	enum dc_dp_training_pattern training_pattern) -{ -	union dpcd_training_pattern dpcd_pattern = {0}; - -	dpcd_pattern.v1_4.TRAINING_PATTERN_SET = -			dc_dp_training_pattern_to_dpcd_training_pattern( -					link, training_pattern); - -	core_link_write_dpcd( -		link, -		DP_TRAINING_PATTERN_SET, -		&dpcd_pattern.raw, -		1); - -	DC_LOG_HW_LINK_TRAINING("%s\n %x pattern = %x\n", -		__func__, -		DP_TRAINING_PATTERN_SET, -		dpcd_pattern.v1_4.TRAINING_PATTERN_SET); -} - -static enum dc_dp_training_pattern decide_cr_training_pattern( -		const struct dc_link_settings *link_settings) -{ -	switch (dp_get_link_encoding_format(link_settings)) { -	case DP_8b_10b_ENCODING: -	default: -		return DP_TRAINING_PATTERN_SEQUENCE_1; -	case DP_128b_132b_ENCODING: -		return DP_128b_132b_TPS1; -	} -} - -static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link, -		const struct dc_link_settings *link_settings) -{ -	struct link_encoder *link_enc; -	struct encoder_feature_support *enc_caps; -	struct dpcd_caps *rx_caps = &link->dpcd_caps; -	enum dc_dp_training_pattern pattern = DP_TRAINING_PATTERN_SEQUENCE_2; - -	link_enc = link_enc_cfg_get_link_enc(link); -	ASSERT(link_enc); -	enc_caps = &link_enc->features; - -	switch (dp_get_link_encoding_format(link_settings)) { -	case DP_8b_10b_ENCODING: -		if (enc_caps->flags.bits.IS_TPS4_CAPABLE && -				rx_caps->max_down_spread.bits.TPS4_SUPPORTED) -			pattern = DP_TRAINING_PATTERN_SEQUENCE_4; -		else if (enc_caps->flags.bits.IS_TPS3_CAPABLE && -				rx_caps->max_ln_count.bits.TPS3_SUPPORTED) -			pattern = DP_TRAINING_PATTERN_SEQUENCE_3; -		else -			pattern = DP_TRAINING_PATTERN_SEQUENCE_2; -		break; -	case DP_128b_132b_ENCODING: -		pattern = DP_128b_132b_TPS2; -		break; -	default: -		pattern = DP_TRAINING_PATTERN_SEQUENCE_2; -		break; -	} -	return pattern; -} - -static uint8_t get_dpcd_link_rate(const struct dc_link_settings *link_settings) -{ -	uint8_t link_rate = 0; -	enum dp_link_encoding encoding = dp_get_link_encoding_format(link_settings); - -	if (encoding == DP_128b_132b_ENCODING) -		switch (link_settings->link_rate) { -		case LINK_RATE_UHBR10: -			link_rate = 0x1; -			break; -		case LINK_RATE_UHBR20: -			link_rate = 0x2; -			break; -		case LINK_RATE_UHBR13_5: -			link_rate = 0x4; -			break; -		default: -			link_rate = 0; -			break; -		} -	else if (encoding == DP_8b_10b_ENCODING) -		link_rate = (uint8_t) link_settings->link_rate; -	else -		link_rate = 0; - -	return link_rate; -} - -static void dp_fixed_vs_pe_read_lane_adjust( -	struct dc_link *link, -	union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX]) -{ -	const uint8_t vendor_lttpr_write_data_vs[3] = {0x0, 0x53, 0x63}; -	const uint8_t vendor_lttpr_write_data_pe[3] = {0x0, 0x54, 0x63}; -	const uint8_t offset = dp_convert_to_count( -			link->dpcd_caps.lttpr_caps.phy_repeater_cnt); -	uint32_t vendor_lttpr_write_address = 0xF004F; -	uint32_t vendor_lttpr_read_address = 0xF0053; -	uint8_t dprx_vs = 0; -	uint8_t dprx_pe = 0; -	uint8_t lane; - -	if (offset != 0xFF) { -		vendor_lttpr_write_address += -				((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); -		vendor_lttpr_read_address += -				((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); -	} - -	/* W/A to read lane settings requested by DPRX */ -	core_link_write_dpcd( -			link, -			vendor_lttpr_write_address, -			&vendor_lttpr_write_data_vs[0], -			sizeof(vendor_lttpr_write_data_vs)); -	core_link_read_dpcd( -			link, -			vendor_lttpr_read_address, -			&dprx_vs, -			1); -	core_link_write_dpcd( -			link, -			vendor_lttpr_write_address, -			&vendor_lttpr_write_data_pe[0], -			sizeof(vendor_lttpr_write_data_pe)); -	core_link_read_dpcd( -			link, -			vendor_lttpr_read_address, -			&dprx_pe, -			1); - -	for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { -		dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET  = (dprx_vs >> (2 * lane)) & 0x3; -		dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET = (dprx_pe >> (2 * lane)) & 0x3; -	} -} - -static void dp_fixed_vs_pe_set_retimer_lane_settings( -	struct dc_link *link, -	const union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX], -	uint8_t lane_count) -{ -	const uint8_t offset = dp_convert_to_count( -			link->dpcd_caps.lttpr_caps.phy_repeater_cnt); -	const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF}; -	uint32_t vendor_lttpr_write_address = 0xF004F; -	uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; -	uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; -	uint8_t lane = 0; - -	if (offset != 0xFF) { -		vendor_lttpr_write_address += -				((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); -	} - -	for (lane = 0; lane < lane_count; lane++) { -		vendor_lttpr_write_data_vs[3] |= -				dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET << (2 * lane); -		vendor_lttpr_write_data_pe[3] |= -				dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET << (2 * lane); -	} - -	/* Force LTTPR to output desired VS and PE */ -	core_link_write_dpcd( -			link, -			vendor_lttpr_write_address, -			&vendor_lttpr_write_data_reset[0], -			sizeof(vendor_lttpr_write_data_reset)); -	core_link_write_dpcd( -			link, -			vendor_lttpr_write_address, -			&vendor_lttpr_write_data_vs[0], -			sizeof(vendor_lttpr_write_data_vs)); -	core_link_write_dpcd( -			link, -			vendor_lttpr_write_address, -			&vendor_lttpr_write_data_pe[0], -			sizeof(vendor_lttpr_write_data_pe)); -} - -enum dc_status dpcd_set_link_settings( -	struct dc_link *link, -	const struct link_training_settings *lt_settings) -{ -	uint8_t rate; -	enum dc_status status; - -	union down_spread_ctrl downspread = {0}; -	union lane_count_set lane_count_set = {0}; - -	downspread.raw = (uint8_t) -	(lt_settings->link_settings.link_spread); - -	lane_count_set.bits.LANE_COUNT_SET = -	lt_settings->link_settings.lane_count; - -	lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing; -	lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0; - - -	if (link->ep_type == DISPLAY_ENDPOINT_PHY && -			lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) { -		lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = -				link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED; -	} - -	status = core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, -		&downspread.raw, sizeof(downspread)); - -	status = core_link_write_dpcd(link, DP_LANE_COUNT_SET, -		&lane_count_set.raw, 1); - -	if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 && -			lt_settings->link_settings.use_link_rate_set == true) { -		rate = 0; -		/* WA for some MUX chips that will power down with eDP and lose supported -		 * link rate set for eDP 1.4. Source reads DPCD 0x010 again to ensure -		 * MUX chip gets link rate set back before link training. -		 */ -		if (link->connector_signal == SIGNAL_TYPE_EDP) { -			uint8_t supported_link_rates[16]; - -			core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES, -					supported_link_rates, sizeof(supported_link_rates)); -		} -		status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); -		status = core_link_write_dpcd(link, DP_LINK_RATE_SET, -				<_settings->link_settings.link_rate_set, 1); -	} else { -		rate = get_dpcd_link_rate(<_settings->link_settings); - -		status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); -	} - -	if (rate) { -		DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n", -			__func__, -			DP_LINK_BW_SET, -			lt_settings->link_settings.link_rate, -			DP_LANE_COUNT_SET, -			lt_settings->link_settings.lane_count, -			lt_settings->enhanced_framing, -			DP_DOWNSPREAD_CTRL, -			lt_settings->link_settings.link_spread); -	} else { -		DC_LOG_HW_LINK_TRAINING("%s\n %x rate set = %x\n %x lane = %x framing = %x\n %x spread = %x\n", -			__func__, -			DP_LINK_RATE_SET, -			lt_settings->link_settings.link_rate_set, -			DP_LANE_COUNT_SET, -			lt_settings->link_settings.lane_count, -			lt_settings->enhanced_framing, -			DP_DOWNSPREAD_CTRL, -			lt_settings->link_settings.link_spread); -	} - -	return status; -} - -uint8_t dc_dp_initialize_scrambling_data_symbols( -	struct dc_link *link, -	enum dc_dp_training_pattern pattern) -{ -	uint8_t disable_scrabled_data_symbols = 0; - -	switch (pattern) { -	case DP_TRAINING_PATTERN_SEQUENCE_1: -	case DP_TRAINING_PATTERN_SEQUENCE_2: -	case DP_TRAINING_PATTERN_SEQUENCE_3: -		disable_scrabled_data_symbols = 1; -		break; -	case DP_TRAINING_PATTERN_SEQUENCE_4: -	case DP_128b_132b_TPS1: -	case DP_128b_132b_TPS2: -		disable_scrabled_data_symbols = 0; -		break; -	default: -		ASSERT(0); -		DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n", -			__func__, pattern); -		break; -	} -	return disable_scrabled_data_symbols; -} - -static inline bool is_repeater(const struct link_training_settings *lt_settings, uint32_t offset) -{ -	return (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (offset != 0); -} - -static void dpcd_set_lt_pattern_and_lane_settings( -	struct dc_link *link, -	const struct link_training_settings *lt_settings, -	enum dc_dp_training_pattern pattern, -	uint32_t offset) -{ -	uint32_t dpcd_base_lt_offset; - -	uint8_t dpcd_lt_buffer[5] = {0}; -	union dpcd_training_pattern dpcd_pattern = {0}; -	uint32_t size_in_bytes; -	bool edp_workaround = false; /* TODO link_prop.INTERNAL */ -	dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET; - -	if (is_repeater(lt_settings, offset)) -		dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + -			((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); - -	/***************************************************************** -	* DpcdAddress_TrainingPatternSet -	*****************************************************************/ -	dpcd_pattern.v1_4.TRAINING_PATTERN_SET = -		dc_dp_training_pattern_to_dpcd_training_pattern(link, pattern); - -	dpcd_pattern.v1_4.SCRAMBLING_DISABLE = -		dc_dp_initialize_scrambling_data_symbols(link, pattern); - -	dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET] -		= dpcd_pattern.raw; - -	if (is_repeater(lt_settings, offset)) { -		DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n", -			__func__, -			offset, -			dpcd_base_lt_offset, -			dpcd_pattern.v1_4.TRAINING_PATTERN_SET); -	} else { -		DC_LOG_HW_LINK_TRAINING("%s\n 0x%X pattern = %x\n", -			__func__, -			dpcd_base_lt_offset, -			dpcd_pattern.v1_4.TRAINING_PATTERN_SET); -	} - -	/* concatenate everything into one buffer*/ -	size_in_bytes = lt_settings->link_settings.lane_count * -			sizeof(lt_settings->dpcd_lane_settings[0]); - -	 // 0x00103 - 0x00102 -	memmove( -		&dpcd_lt_buffer[DP_TRAINING_LANE0_SET - DP_TRAINING_PATTERN_SET], -		lt_settings->dpcd_lane_settings, -		size_in_bytes); - -	if (is_repeater(lt_settings, offset)) { -		if (dp_get_link_encoding_format(<_settings->link_settings) == -				DP_128b_132b_ENCODING) -			DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" -					" 0x%X TX_FFE_PRESET_VALUE = %x\n", -					__func__, -					offset, -					dpcd_base_lt_offset, -					lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); -		else if (dp_get_link_encoding_format(<_settings->link_settings) == -				DP_8b_10b_ENCODING) -		DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" -				" 0x%X VS set = %x PE set = %x max VS Reached = %x  max PE Reached = %x\n", -			__func__, -			offset, -			dpcd_base_lt_offset, -			lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, -			lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, -			lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, -			lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); -	} else { -		if (dp_get_link_encoding_format(<_settings->link_settings) == -				DP_128b_132b_ENCODING) -			DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n", -					__func__, -					dpcd_base_lt_offset, -					lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); -		else if (dp_get_link_encoding_format(<_settings->link_settings) == -				DP_8b_10b_ENCODING) -			DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X VS set = %x  PE set = %x max VS Reached = %x  max PE Reached = %x\n", -					__func__, -					dpcd_base_lt_offset, -					lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, -					lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, -					lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, -					lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); -	} -	if (edp_workaround) { -		/* for eDP write in 2 parts because the 5-byte burst is -		* causing issues on some eDP panels (EPR#366724) -		*/ -		core_link_write_dpcd( -			link, -			DP_TRAINING_PATTERN_SET, -			&dpcd_pattern.raw, -			sizeof(dpcd_pattern.raw)); - -		core_link_write_dpcd( -			link, -			DP_TRAINING_LANE0_SET, -			(uint8_t *)(lt_settings->dpcd_lane_settings), -			size_in_bytes); - -	} else if (dp_get_link_encoding_format(<_settings->link_settings) == -			DP_128b_132b_ENCODING) { -		core_link_write_dpcd( -				link, -				dpcd_base_lt_offset, -				dpcd_lt_buffer, -				sizeof(dpcd_lt_buffer)); -	} else -		/* write it all in (1 + number-of-lanes)-byte burst*/ -		core_link_write_dpcd( -				link, -				dpcd_base_lt_offset, -				dpcd_lt_buffer, -				size_in_bytes + sizeof(dpcd_pattern.raw)); -} - -bool dp_is_cr_done(enum dc_lane_count ln_count, -	union lane_status *dpcd_lane_status) -{ -	uint32_t lane; -	/*LANEx_CR_DONE bits All 1's?*/ -	for (lane = 0; lane < (uint32_t)(ln_count); lane++) { -		if (!dpcd_lane_status[lane].bits.CR_DONE_0) -			return false; -	} -	return true; -} - -bool dp_is_ch_eq_done(enum dc_lane_count ln_count, -		union lane_status *dpcd_lane_status) -{ -	bool done = true; -	uint32_t lane; -	for (lane = 0; lane < (uint32_t)(ln_count); lane++) -		if (!dpcd_lane_status[lane].bits.CHANNEL_EQ_DONE_0) -			done = false; -	return done; -} - -bool dp_is_symbol_locked(enum dc_lane_count ln_count, -		union lane_status *dpcd_lane_status) -{ -	bool locked = true; -	uint32_t lane; -	for (lane = 0; lane < (uint32_t)(ln_count); lane++) -		if (!dpcd_lane_status[lane].bits.SYMBOL_LOCKED_0) -			locked = false; -	return locked; -} - -bool dp_is_interlane_aligned(union lane_align_status_updated align_status) -{ -	return align_status.bits.INTERLANE_ALIGN_DONE == 1; -} - -void dp_hw_to_dpcd_lane_settings( -		const struct link_training_settings *lt_settings, -		const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], -		union dpcd_training_lane dpcd_lane_settings[]) -{ -	uint8_t lane = 0; - -	for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { -		if (dp_get_link_encoding_format(<_settings->link_settings) == -				DP_8b_10b_ENCODING) { -			dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET = -					(uint8_t)(hw_lane_settings[lane].VOLTAGE_SWING); -			dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET = -					(uint8_t)(hw_lane_settings[lane].PRE_EMPHASIS); -			dpcd_lane_settings[lane].bits.MAX_SWING_REACHED = -					(hw_lane_settings[lane].VOLTAGE_SWING == -							VOLTAGE_SWING_MAX_LEVEL ? 1 : 0); -			dpcd_lane_settings[lane].bits.MAX_PRE_EMPHASIS_REACHED = -					(hw_lane_settings[lane].PRE_EMPHASIS == -							PRE_EMPHASIS_MAX_LEVEL ? 1 : 0); -		} -		else if (dp_get_link_encoding_format(<_settings->link_settings) == -				DP_128b_132b_ENCODING) { -			dpcd_lane_settings[lane].tx_ffe.PRESET_VALUE = -					hw_lane_settings[lane].FFE_PRESET.settings.level; -		} -	} -} - -void dp_decide_lane_settings( -		const struct link_training_settings *lt_settings, -		const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], -		struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], -		union dpcd_training_lane dpcd_lane_settings[]) -{ -	uint32_t lane; - -	for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { -		if (dp_get_link_encoding_format(<_settings->link_settings) == -				DP_8b_10b_ENCODING) { -			hw_lane_settings[lane].VOLTAGE_SWING = -					(enum dc_voltage_swing)(ln_adjust[lane].bits. -							VOLTAGE_SWING_LANE); -			hw_lane_settings[lane].PRE_EMPHASIS = -					(enum dc_pre_emphasis)(ln_adjust[lane].bits. -							PRE_EMPHASIS_LANE); -		} -		else if (dp_get_link_encoding_format(<_settings->link_settings) == -				DP_128b_132b_ENCODING) { -			hw_lane_settings[lane].FFE_PRESET.raw = -					ln_adjust[lane].tx_ffe.PRESET_VALUE; -		} -	} -	dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings); - -	if (lt_settings->disallow_per_lane_settings) { -		/* we find the maximum of the requested settings across all lanes*/ -		/* and set this maximum for all lanes*/ -		maximize_lane_settings(lt_settings, hw_lane_settings); -		override_lane_settings(lt_settings, hw_lane_settings); - -		if (lt_settings->always_match_dpcd_with_hw_lane_settings) -			dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings); -	} - -} - -static uint8_t get_nibble_at_index(const uint8_t *buf, -	uint32_t index) -{ -	uint8_t nibble; -	nibble = buf[index / 2]; - -	if (index % 2) -		nibble >>= 4; -	else -		nibble &= 0x0F; - -	return nibble; -} - -static enum dc_pre_emphasis get_max_pre_emphasis_for_voltage_swing( -	enum dc_voltage_swing voltage) -{ -	enum dc_pre_emphasis pre_emphasis; -	pre_emphasis = PRE_EMPHASIS_MAX_LEVEL; - -	if (voltage <= VOLTAGE_SWING_MAX_LEVEL) -		pre_emphasis = voltage_swing_to_pre_emphasis[voltage]; - -	return pre_emphasis; - -} - -static void maximize_lane_settings(const struct link_training_settings *lt_settings, -		struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) -{ -	uint32_t lane; -	struct dc_lane_settings max_requested; - -	max_requested.VOLTAGE_SWING = lane_settings[0].VOLTAGE_SWING; -	max_requested.PRE_EMPHASIS = lane_settings[0].PRE_EMPHASIS; -	max_requested.FFE_PRESET = lane_settings[0].FFE_PRESET; - -	/* Determine what the maximum of the requested settings are*/ -	for (lane = 1; lane < lt_settings->link_settings.lane_count; lane++) { -		if (lane_settings[lane].VOLTAGE_SWING > max_requested.VOLTAGE_SWING) -			max_requested.VOLTAGE_SWING = lane_settings[lane].VOLTAGE_SWING; - -		if (lane_settings[lane].PRE_EMPHASIS > max_requested.PRE_EMPHASIS) -			max_requested.PRE_EMPHASIS = lane_settings[lane].PRE_EMPHASIS; -		if (lane_settings[lane].FFE_PRESET.settings.level > -				max_requested.FFE_PRESET.settings.level) -			max_requested.FFE_PRESET.settings.level = -					lane_settings[lane].FFE_PRESET.settings.level; -	} - -	/* make sure the requested settings are -	 * not higher than maximum settings*/ -	if (max_requested.VOLTAGE_SWING > VOLTAGE_SWING_MAX_LEVEL) -		max_requested.VOLTAGE_SWING = VOLTAGE_SWING_MAX_LEVEL; - -	if (max_requested.PRE_EMPHASIS > PRE_EMPHASIS_MAX_LEVEL) -		max_requested.PRE_EMPHASIS = PRE_EMPHASIS_MAX_LEVEL; -	if (max_requested.FFE_PRESET.settings.level > DP_FFE_PRESET_MAX_LEVEL) -		max_requested.FFE_PRESET.settings.level = DP_FFE_PRESET_MAX_LEVEL; - -	/* make sure the pre-emphasis matches the voltage swing*/ -	if (max_requested.PRE_EMPHASIS > -		get_max_pre_emphasis_for_voltage_swing( -			max_requested.VOLTAGE_SWING)) -		max_requested.PRE_EMPHASIS = -		get_max_pre_emphasis_for_voltage_swing( -			max_requested.VOLTAGE_SWING); - -	for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { -		lane_settings[lane].VOLTAGE_SWING = max_requested.VOLTAGE_SWING; -		lane_settings[lane].PRE_EMPHASIS = max_requested.PRE_EMPHASIS; -		lane_settings[lane].FFE_PRESET = max_requested.FFE_PRESET; -	} -} - -static void override_lane_settings(const struct link_training_settings *lt_settings, -		struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) -{ -	uint32_t lane; - -	if (lt_settings->voltage_swing == NULL && -	    lt_settings->pre_emphasis == NULL && -	    lt_settings->ffe_preset == NULL && -	    lt_settings->post_cursor2 == NULL) - -		return; - -	for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { -		if (lt_settings->voltage_swing) -			lane_settings[lane].VOLTAGE_SWING = *lt_settings->voltage_swing; -		if (lt_settings->pre_emphasis) -			lane_settings[lane].PRE_EMPHASIS = *lt_settings->pre_emphasis; -		if (lt_settings->post_cursor2) -			lane_settings[lane].POST_CURSOR2 = *lt_settings->post_cursor2; -		if (lt_settings->ffe_preset) -			lane_settings[lane].FFE_PRESET = *lt_settings->ffe_preset; -	} -} - -enum dc_status dp_get_lane_status_and_lane_adjust( -	struct dc_link *link, -	const struct link_training_settings *link_training_setting, -	union lane_status ln_status[LANE_COUNT_DP_MAX], -	union lane_align_status_updated *ln_align, -	union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], -	uint32_t offset) -{ -	unsigned int lane01_status_address = DP_LANE0_1_STATUS; -	uint8_t lane_adjust_offset = 4; -	unsigned int lane01_adjust_address; -	uint8_t dpcd_buf[6] = {0}; -	uint32_t lane; -	enum dc_status status; - -	if (is_repeater(link_training_setting, offset)) { -		lane01_status_address = -				DP_LANE0_1_STATUS_PHY_REPEATER1 + -				((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); -		lane_adjust_offset = 3; -	} - -	status = core_link_read_dpcd( -		link, -		lane01_status_address, -		(uint8_t *)(dpcd_buf), -		sizeof(dpcd_buf)); - -	if (status != DC_OK) { -		DC_LOG_HW_LINK_TRAINING("%s:\n Failed to read from address 0x%X," -			" keep current lane status and lane adjust unchanged", -			__func__, -			lane01_status_address); -		return status; -	} - -	for (lane = 0; lane < -		(uint32_t)(link_training_setting->link_settings.lane_count); -		lane++) { - -		ln_status[lane].raw = -			get_nibble_at_index(&dpcd_buf[0], lane); -		ln_adjust[lane].raw = -			get_nibble_at_index(&dpcd_buf[lane_adjust_offset], lane); -	} - -	ln_align->raw = dpcd_buf[2]; - -	if (is_repeater(link_training_setting, offset)) { -		DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" -				" 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ", -			__func__, -			offset, -			lane01_status_address, dpcd_buf[0], -			lane01_status_address + 1, dpcd_buf[1]); - -		lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1_PHY_REPEATER1 + -				((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); - -		DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" -				" 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n", -					__func__, -					offset, -					lane01_adjust_address, -					dpcd_buf[lane_adjust_offset], -					lane01_adjust_address + 1, -					dpcd_buf[lane_adjust_offset + 1]); -	} else { -		DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ", -			__func__, -			lane01_status_address, dpcd_buf[0], -			lane01_status_address + 1, dpcd_buf[1]); - -		lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1; - -		DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n", -			__func__, -			lane01_adjust_address, -			dpcd_buf[lane_adjust_offset], -			lane01_adjust_address + 1, -			dpcd_buf[lane_adjust_offset + 1]); -	} - -	return status; -} - -static enum dc_status dpcd_128b_132b_set_lane_settings( -		struct dc_link *link, -		const struct link_training_settings *link_training_setting) -{ -	enum dc_status status = core_link_write_dpcd(link, -			DP_TRAINING_LANE0_SET, -			(uint8_t *)(link_training_setting->dpcd_lane_settings), -			sizeof(link_training_setting->dpcd_lane_settings)); - -	DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n", -			__func__, -			DP_TRAINING_LANE0_SET, -			link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); -	return status; -} - - -enum dc_status dpcd_set_lane_settings( -	struct dc_link *link, -	const struct link_training_settings *link_training_setting, -	uint32_t offset) -{ -	unsigned int lane0_set_address; -	enum dc_status status; - -	lane0_set_address = DP_TRAINING_LANE0_SET; - -	if (is_repeater(link_training_setting, offset)) -		lane0_set_address = DP_TRAINING_LANE0_SET_PHY_REPEATER1 + -		((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); - -	status = core_link_write_dpcd(link, -		lane0_set_address, -		(uint8_t *)(link_training_setting->dpcd_lane_settings), -		link_training_setting->link_settings.lane_count); - -	if (is_repeater(link_training_setting, offset)) { -		DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n" -				" 0x%X VS set = %x  PE set = %x max VS Reached = %x  max PE Reached = %x\n", -			__func__, -			offset, -			lane0_set_address, -			link_training_setting->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, -			link_training_setting->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, -			link_training_setting->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, -			link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); - -	} else { -		DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x  PE set = %x max VS Reached = %x  max PE Reached = %x\n", -			__func__, -			lane0_set_address, -			link_training_setting->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, -			link_training_setting->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, -			link_training_setting->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, -			link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); -	} - -	return status; -} - -bool dp_is_max_vs_reached( -	const struct link_training_settings *lt_settings) -{ -	uint32_t lane; -	for (lane = 0; lane < -		(uint32_t)(lt_settings->link_settings.lane_count); -		lane++) { -		if (lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET -			== VOLTAGE_SWING_MAX_LEVEL) -			return true; -	} -	return false; - -} - -static bool perform_post_lt_adj_req_sequence( -	struct dc_link *link, -	const struct link_resource *link_res, -	struct link_training_settings *lt_settings) -{ -	enum dc_lane_count lane_count = -	lt_settings->link_settings.lane_count; - -	uint32_t adj_req_count; -	uint32_t adj_req_timer; -	bool req_drv_setting_changed; -	uint32_t lane; -	union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; -	union lane_align_status_updated dpcd_lane_status_updated = {0}; -	union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; - -	req_drv_setting_changed = false; -	for (adj_req_count = 0; adj_req_count < POST_LT_ADJ_REQ_LIMIT; -	adj_req_count++) { - -		req_drv_setting_changed = false; - -		for (adj_req_timer = 0; -			adj_req_timer < POST_LT_ADJ_REQ_TIMEOUT; -			adj_req_timer++) { - -			dp_get_lane_status_and_lane_adjust( -				link, -				lt_settings, -				dpcd_lane_status, -				&dpcd_lane_status_updated, -				dpcd_lane_adjust, -				DPRX); - -			if (dpcd_lane_status_updated.bits. -					POST_LT_ADJ_REQ_IN_PROGRESS == 0) -				return true; - -			if (!dp_is_cr_done(lane_count, dpcd_lane_status)) -				return false; - -			if (!dp_is_ch_eq_done(lane_count, dpcd_lane_status) || -					!dp_is_symbol_locked(lane_count, dpcd_lane_status) || -					!dp_is_interlane_aligned(dpcd_lane_status_updated)) -				return false; - -			for (lane = 0; lane < (uint32_t)(lane_count); lane++) { - -				if (lt_settings-> -				dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET != -				dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_LANE || -				lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET != -				dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_LANE) { - -					req_drv_setting_changed = true; -					break; -				} -			} - -			if (req_drv_setting_changed) { -				dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, -						lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); - -				dc_link_dp_set_drive_settings(link, -						link_res, -						lt_settings); -				break; -			} - -			msleep(1); -		} - -		if (!req_drv_setting_changed) { -			DC_LOG_WARNING("%s: Post Link Training Adjust Request Timed out\n", -				__func__); - -			ASSERT(0); -			return true; -		} -	} -	DC_LOG_WARNING("%s: Post Link Training Adjust Request limit reached\n", -		__func__); - -	ASSERT(0); -	return true; - -} - -/* Only used for channel equalization */ -uint32_t dp_translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval) -{ -	unsigned int aux_rd_interval_us = 400; - -	switch (dpcd_aux_read_interval) { -	case 0x01: -		aux_rd_interval_us = 4000; -		break; -	case 0x02: -		aux_rd_interval_us = 8000; -		break; -	case 0x03: -		aux_rd_interval_us = 12000; -		break; -	case 0x04: -		aux_rd_interval_us = 16000; -		break; -	case 0x05: -		aux_rd_interval_us = 32000; -		break; -	case 0x06: -		aux_rd_interval_us = 64000; -		break; -	default: -		break; -	} - -	return aux_rd_interval_us; -} - -enum link_training_result dp_get_cr_failure(enum dc_lane_count ln_count, -					union lane_status *dpcd_lane_status) -{ -	enum link_training_result result = LINK_TRAINING_SUCCESS; - -	if (ln_count >= LANE_COUNT_ONE && !dpcd_lane_status[0].bits.CR_DONE_0) -		result = LINK_TRAINING_CR_FAIL_LANE0; -	else if (ln_count >= LANE_COUNT_TWO && !dpcd_lane_status[1].bits.CR_DONE_0) -		result = LINK_TRAINING_CR_FAIL_LANE1; -	else if (ln_count >= LANE_COUNT_FOUR && !dpcd_lane_status[2].bits.CR_DONE_0) -		result = LINK_TRAINING_CR_FAIL_LANE23; -	else if (ln_count >= LANE_COUNT_FOUR && !dpcd_lane_status[3].bits.CR_DONE_0) -		result = LINK_TRAINING_CR_FAIL_LANE23; -	return result; -} - -static enum link_training_result perform_channel_equalization_sequence( -	struct dc_link *link, -	const struct link_resource *link_res, -	struct link_training_settings *lt_settings, -	uint32_t offset) -{ -	enum dc_dp_training_pattern tr_pattern; -	uint32_t retries_ch_eq; -	uint32_t wait_time_microsec; -	enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; -	union lane_align_status_updated dpcd_lane_status_updated = {0}; -	union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; -	union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; - -	/* Note: also check that TPS4 is a supported feature*/ -	tr_pattern = lt_settings->pattern_for_eq; - -	if (is_repeater(lt_settings, offset) && dp_get_link_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING) -		tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4; - -	dp_set_hw_training_pattern(link, link_res, tr_pattern, offset); - -	for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT; -		retries_ch_eq++) { - -		dp_set_hw_lane_settings(link, link_res, lt_settings, offset); - -		/* 2. update DPCD*/ -		if (!retries_ch_eq) -			/* EPR #361076 - write as a 5-byte burst, -			 * but only for the 1-st iteration -			 */ - -			dpcd_set_lt_pattern_and_lane_settings( -				link, -				lt_settings, -				tr_pattern, offset); -		else -			dpcd_set_lane_settings(link, lt_settings, offset); - -		/* 3. wait for receiver to lock-on*/ -		wait_time_microsec = lt_settings->eq_pattern_time; - -		if (is_repeater(lt_settings, offset)) -			wait_time_microsec = -					dp_translate_training_aux_read_interval( -						link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]); - -		dp_wait_for_training_aux_rd_interval( -				link, -				wait_time_microsec); - -		/* 4. Read lane status and requested -		 * drive settings as set by the sink*/ - -		dp_get_lane_status_and_lane_adjust( -			link, -			lt_settings, -			dpcd_lane_status, -			&dpcd_lane_status_updated, -			dpcd_lane_adjust, -			offset); - -		/* 5. check CR done*/ -		if (!dp_is_cr_done(lane_count, dpcd_lane_status)) -			return dpcd_lane_status[0].bits.CR_DONE_0 ? -					LINK_TRAINING_EQ_FAIL_CR_PARTIAL : -					LINK_TRAINING_EQ_FAIL_CR; - -		/* 6. check CHEQ done*/ -		if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) && -				dp_is_symbol_locked(lane_count, dpcd_lane_status) && -				dp_is_interlane_aligned(dpcd_lane_status_updated)) -			return LINK_TRAINING_SUCCESS; - -		/* 7. update VS/PE/PC2 in lt_settings*/ -		dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, -				lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); -	} - -	return LINK_TRAINING_EQ_FAIL_EQ; - -} - -static void start_clock_recovery_pattern_early(struct dc_link *link, -		const struct link_resource *link_res, -		struct link_training_settings *lt_settings, -		uint32_t offset) -{ -	DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS1. Wait 400us.\n", -			__func__); -	dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, offset); -	dp_set_hw_lane_settings(link, link_res, lt_settings, offset); -	udelay(400); -} - -static enum link_training_result perform_clock_recovery_sequence( -	struct dc_link *link, -	const struct link_resource *link_res, -	struct link_training_settings *lt_settings, -	uint32_t offset) -{ -	uint32_t retries_cr; -	uint32_t retry_count; -	uint32_t wait_time_microsec; -	enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; -	union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; -	union lane_align_status_updated dpcd_lane_status_updated; -	union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; - -	retries_cr = 0; -	retry_count = 0; - -	memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status)); -	memset(&dpcd_lane_status_updated, '\0', -	sizeof(dpcd_lane_status_updated)); - -	if (!link->ctx->dc->work_arounds.lt_early_cr_pattern) -		dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, offset); - -	/* najeeb - The synaptics MST hub can put the LT in -	* infinite loop by switching the VS -	*/ -	/* between level 0 and level 1 continuously, here -	* we try for CR lock for LinkTrainingMaxCRRetry count*/ -	while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && -		(retry_count < LINK_TRAINING_MAX_CR_RETRY)) { - - -		/* 1. call HWSS to set lane settings*/ -		dp_set_hw_lane_settings( -				link, -				link_res, -				lt_settings, -				offset); - -		/* 2. update DPCD of the receiver*/ -		if (!retry_count) -			/* EPR #361076 - write as a 5-byte burst, -			 * but only for the 1-st iteration.*/ -			dpcd_set_lt_pattern_and_lane_settings( -					link, -					lt_settings, -					lt_settings->pattern_for_cr, -					offset); -		else -			dpcd_set_lane_settings( -					link, -					lt_settings, -					offset); - -		/* 3. wait receiver to lock-on*/ -		wait_time_microsec = lt_settings->cr_pattern_time; - -		dp_wait_for_training_aux_rd_interval( -				link, -				wait_time_microsec); - -		/* 4. Read lane status and requested drive -		* settings as set by the sink -		*/ -		dp_get_lane_status_and_lane_adjust( -				link, -				lt_settings, -				dpcd_lane_status, -				&dpcd_lane_status_updated, -				dpcd_lane_adjust, -				offset); - -		/* 5. check CR done*/ -		if (dp_is_cr_done(lane_count, dpcd_lane_status)) -			return LINK_TRAINING_SUCCESS; - -		/* 6. max VS reached*/ -		if ((dp_get_link_encoding_format(<_settings->link_settings) == -				DP_8b_10b_ENCODING) && -				dp_is_max_vs_reached(lt_settings)) -			break; - -		/* 7. same lane settings*/ -		/* Note: settings are the same for all lanes, -		 * so comparing first lane is sufficient*/ -		if ((dp_get_link_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING) && -				lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET == -						dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE) -			retries_cr++; -		else if ((dp_get_link_encoding_format(<_settings->link_settings) == DP_128b_132b_ENCODING) && -				lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE == -						dpcd_lane_adjust[0].tx_ffe.PRESET_VALUE) -			retries_cr++; -		else -			retries_cr = 0; - -		/* 8. update VS/PE/PC2 in lt_settings*/ -		dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, -				lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); -		retry_count++; -	} - -	if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) { -		ASSERT(0); -		DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue", -			__func__, -			LINK_TRAINING_MAX_CR_RETRY); - -	} - -	return dp_get_cr_failure(lane_count, dpcd_lane_status); -} - -static inline enum link_training_result dp_transition_to_video_idle( -	struct dc_link *link, -	const struct link_resource *link_res, -	struct link_training_settings *lt_settings, -	enum link_training_result status) -{ -	union lane_count_set lane_count_set = {0}; - -	/* 4. mainlink output idle pattern*/ -	dp_set_hw_test_pattern(link, link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); - -	/* -	 * 5. post training adjust if required -	 * If the upstream DPTX and downstream DPRX both support TPS4, -	 * TPS4 must be used instead of POST_LT_ADJ_REQ. -	 */ -	if (link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED != 1 || -			lt_settings->pattern_for_eq >= DP_TRAINING_PATTERN_SEQUENCE_4) { -		/* delay 5ms after Main Link output idle pattern and then check -		 * DPCD 0202h. -		 */ -		if (link->connector_signal != SIGNAL_TYPE_EDP && status == LINK_TRAINING_SUCCESS) { -			msleep(5); -			status = dp_check_link_loss_status(link, lt_settings); -		} -		return status; -	} - -	if (status == LINK_TRAINING_SUCCESS && -		perform_post_lt_adj_req_sequence(link, link_res, lt_settings) == false) -		status = LINK_TRAINING_LQA_FAIL; - -	lane_count_set.bits.LANE_COUNT_SET = lt_settings->link_settings.lane_count; -	lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing; -	lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0; - -	core_link_write_dpcd( -		link, -		DP_LANE_COUNT_SET, -		&lane_count_set.raw, -		sizeof(lane_count_set)); - -	return status; -} - -enum link_training_result dp_check_link_loss_status( -	struct dc_link *link, -	const struct link_training_settings *link_training_setting) -{ -	enum link_training_result status = LINK_TRAINING_SUCCESS; -	union lane_status lane_status; -	uint8_t dpcd_buf[6] = {0}; -	uint32_t lane; - -	core_link_read_dpcd( -			link, -			DP_SINK_COUNT, -			(uint8_t *)(dpcd_buf), -			sizeof(dpcd_buf)); - -	/*parse lane status*/ -	for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) { -		/* -		 * check lanes status -		 */ -		lane_status.raw = get_nibble_at_index(&dpcd_buf[2], lane); - -		if (!lane_status.bits.CHANNEL_EQ_DONE_0 || -			!lane_status.bits.CR_DONE_0 || -			!lane_status.bits.SYMBOL_LOCKED_0) { -			/* if one of the channel equalization, clock -			 * recovery or symbol lock is dropped -			 * consider it as (link has been -			 * dropped) dp sink status has changed -			 */ -			status = LINK_TRAINING_LINK_LOSS; -			break; -		} -	} - -	return status; -} - -static inline void decide_8b_10b_training_settings( -	 struct dc_link *link, -	const struct dc_link_settings *link_setting, -	struct link_training_settings *lt_settings) -{ -	memset(lt_settings, '\0', sizeof(struct link_training_settings)); - -	/* Initialize link settings */ -	lt_settings->link_settings.use_link_rate_set = link_setting->use_link_rate_set; -	lt_settings->link_settings.link_rate_set = link_setting->link_rate_set; -	lt_settings->link_settings.link_rate = link_setting->link_rate; -	lt_settings->link_settings.lane_count = link_setting->lane_count; -	/* TODO hard coded to SS for now -	 * lt_settings.link_settings.link_spread = -	 * dal_display_path_is_ss_supported( -	 * path_mode->display_path) ? -	 * LINK_SPREAD_05_DOWNSPREAD_30KHZ : -	 * LINK_SPREAD_DISABLED; -	 */ -	lt_settings->link_settings.link_spread = link->dp_ss_off ? -			LINK_SPREAD_DISABLED : LINK_SPREAD_05_DOWNSPREAD_30KHZ; -	lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting); -	lt_settings->eq_pattern_time = get_eq_training_aux_rd_interval(link, link_setting); -	lt_settings->pattern_for_cr = decide_cr_training_pattern(link_setting); -	lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_setting); -	lt_settings->enhanced_framing = 1; -	lt_settings->should_set_fec_ready = true; -	lt_settings->disallow_per_lane_settings = true; -	lt_settings->always_match_dpcd_with_hw_lane_settings = true; -	lt_settings->lttpr_mode = dp_decide_8b_10b_lttpr_mode(link); -	dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); -} - -static inline void decide_128b_132b_training_settings(struct dc_link *link, -		const struct dc_link_settings *link_settings, -		struct link_training_settings *lt_settings) -{ -	memset(lt_settings, 0, sizeof(*lt_settings)); - -	lt_settings->link_settings = *link_settings; -	/* TODO: should decide link spread when populating link_settings */ -	lt_settings->link_settings.link_spread = link->dp_ss_off ? LINK_SPREAD_DISABLED : -			LINK_SPREAD_05_DOWNSPREAD_30KHZ; - -	lt_settings->pattern_for_cr = decide_cr_training_pattern(link_settings); -	lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_settings); -	lt_settings->eq_pattern_time = 2500; -	lt_settings->eq_wait_time_limit = 400000; -	lt_settings->eq_loop_count_limit = 20; -	lt_settings->pattern_for_cds = DP_128b_132b_TPS2_CDS; -	lt_settings->cds_pattern_time = 2500; -	lt_settings->cds_wait_time_limit = (dp_convert_to_count( -			link->dpcd_caps.lttpr_caps.phy_repeater_cnt) + 1) * 20000; -	lt_settings->disallow_per_lane_settings = true; -	lt_settings->lttpr_mode = dp_decide_128b_132b_lttpr_mode(link); -	dp_hw_to_dpcd_lane_settings(lt_settings, -			lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); -} - -void dp_decide_training_settings( -		struct dc_link *link, -		const struct dc_link_settings *link_settings, -		struct link_training_settings *lt_settings) -{ -	if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) -		decide_8b_10b_training_settings(link, link_settings, lt_settings); -	else if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) -		decide_128b_132b_training_settings(link, link_settings, lt_settings); -} - -static void override_training_settings( -		struct dc_link *link, -		const struct dc_link_training_overrides *overrides, -		struct link_training_settings *lt_settings) -{ -	uint32_t lane; - -	/* Override link spread */ -	if (!link->dp_ss_off && overrides->downspread != NULL) -		lt_settings->link_settings.link_spread = *overrides->downspread ? -				LINK_SPREAD_05_DOWNSPREAD_30KHZ -				: LINK_SPREAD_DISABLED; - -	/* Override lane settings */ -	if (overrides->voltage_swing != NULL) -		lt_settings->voltage_swing = overrides->voltage_swing; -	if (overrides->pre_emphasis != NULL) -		lt_settings->pre_emphasis = overrides->pre_emphasis; -	if (overrides->post_cursor2 != NULL) -		lt_settings->post_cursor2 = overrides->post_cursor2; -	if (overrides->ffe_preset != NULL) -		lt_settings->ffe_preset = overrides->ffe_preset; -	/* Override HW lane settings with BIOS forced values if present */ -	if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN && -			lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) { -		lt_settings->voltage_swing = &link->bios_forced_drive_settings.VOLTAGE_SWING; -		lt_settings->pre_emphasis = &link->bios_forced_drive_settings.PRE_EMPHASIS; -		lt_settings->always_match_dpcd_with_hw_lane_settings = false; -	} -	for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { -		lt_settings->hw_lane_settings[lane].VOLTAGE_SWING = -			lt_settings->voltage_swing != NULL ? -			*lt_settings->voltage_swing : -			VOLTAGE_SWING_LEVEL0; -		lt_settings->hw_lane_settings[lane].PRE_EMPHASIS = -			lt_settings->pre_emphasis != NULL ? -			*lt_settings->pre_emphasis -			: PRE_EMPHASIS_DISABLED; -		lt_settings->hw_lane_settings[lane].POST_CURSOR2 = -			lt_settings->post_cursor2 != NULL ? -			*lt_settings->post_cursor2 -			: POST_CURSOR2_DISABLED; -	} - -	if (lt_settings->always_match_dpcd_with_hw_lane_settings) -		dp_hw_to_dpcd_lane_settings(lt_settings, -				lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); - -	/* Initialize training timings */ -	if (overrides->cr_pattern_time != NULL) -		lt_settings->cr_pattern_time = *overrides->cr_pattern_time; - -	if (overrides->eq_pattern_time != NULL) -		lt_settings->eq_pattern_time = *overrides->eq_pattern_time; - -	if (overrides->pattern_for_cr != NULL) -		lt_settings->pattern_for_cr = *overrides->pattern_for_cr; -	if (overrides->pattern_for_eq != NULL) -		lt_settings->pattern_for_eq = *overrides->pattern_for_eq; - -	if (overrides->enhanced_framing != NULL) -		lt_settings->enhanced_framing = *overrides->enhanced_framing; - -	if (link->preferred_training_settings.fec_enable != NULL) -		lt_settings->should_set_fec_ready = *link->preferred_training_settings.fec_enable; - -	#if defined(CONFIG_DRM_AMD_DC_DCN) -	/* Check DP tunnel LTTPR mode debug option. */ -	if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->dc->debug.dpia_debug.bits.force_non_lttpr) -		lt_settings->lttpr_mode = LTTPR_MODE_NON_LTTPR; - -#endif -	dp_get_lttpr_mode_override(link, <_settings->lttpr_mode); - -} - -uint8_t dp_convert_to_count(uint8_t lttpr_repeater_count) -{ -	switch (lttpr_repeater_count) { -	case 0x80: // 1 lttpr repeater -		return 1; -	case 0x40: // 2 lttpr repeaters -		return 2; -	case 0x20: // 3 lttpr repeaters -		return 3; -	case 0x10: // 4 lttpr repeaters -		return 4; -	case 0x08: // 5 lttpr repeaters -		return 5; -	case 0x04: // 6 lttpr repeaters -		return 6; -	case 0x02: // 7 lttpr repeaters -		return 7; -	case 0x01: // 8 lttpr repeaters -		return 8; -	default: -		break; -	} -	return 0; // invalid value -} - -static enum dc_status configure_lttpr_mode_transparent(struct dc_link *link) -{ -	uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT; - -	DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__); -	return core_link_write_dpcd(link, -			DP_PHY_REPEATER_MODE, -			(uint8_t *)&repeater_mode, -			sizeof(repeater_mode)); -} - -static enum dc_status configure_lttpr_mode_non_transparent( -		struct dc_link *link, -		const struct link_training_settings *lt_settings) -{ -	/* aux timeout is already set to extended */ -	/* RESET/SET lttpr mode to enable non transparent mode */ -	uint8_t repeater_cnt; -	uint32_t aux_interval_address; -	uint8_t repeater_id; -	enum dc_status result = DC_ERROR_UNEXPECTED; -	uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT; - -	enum dp_link_encoding encoding = dp_get_link_encoding_format(<_settings->link_settings); - -	if (encoding == DP_8b_10b_ENCODING) { -		DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__); -		result = core_link_write_dpcd(link, -				DP_PHY_REPEATER_MODE, -				(uint8_t *)&repeater_mode, -				sizeof(repeater_mode)); - -	} - -	if (result == DC_OK) { -		link->dpcd_caps.lttpr_caps.mode = repeater_mode; -	} - -	if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { - -		DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Non Transparent Mode\n", __func__); - -		repeater_mode = DP_PHY_REPEATER_MODE_NON_TRANSPARENT; -		result = core_link_write_dpcd(link, -				DP_PHY_REPEATER_MODE, -				(uint8_t *)&repeater_mode, -				sizeof(repeater_mode)); - -		if (result == DC_OK) { -			link->dpcd_caps.lttpr_caps.mode = repeater_mode; -		} - -		if (encoding == DP_8b_10b_ENCODING) { -			repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); - -			/* Driver does not need to train the first hop. Skip DPCD read and clear -			 * AUX_RD_INTERVAL for DPTX-to-DPIA hop. -			 */ -			if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) -				link->dpcd_caps.lttpr_caps.aux_rd_interval[--repeater_cnt] = 0; - -			for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) { -				aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 + -							((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (repeater_id - 1)); -				core_link_read_dpcd( -					link, -					aux_interval_address, -					(uint8_t *)&link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1], -					sizeof(link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1])); -				link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1] &= 0x7F; -			} -		} -	} - -	return result; -} - -static void repeater_training_done(struct dc_link *link, uint32_t offset) -{ -	union dpcd_training_pattern dpcd_pattern = {0}; - -	const uint32_t dpcd_base_lt_offset = -			DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + -				((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); -	/* Set training not in progress*/ -	dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE; - -	core_link_write_dpcd( -		link, -		dpcd_base_lt_offset, -		&dpcd_pattern.raw, -		1); - -	DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Id: %d 0x%X pattern = %x\n", -		__func__, -		offset, -		dpcd_base_lt_offset, -		dpcd_pattern.v1_4.TRAINING_PATTERN_SET); -} - -static void print_status_message( -	struct dc_link *link, -	const struct link_training_settings *lt_settings, -	enum link_training_result status) -{ -	char *link_rate = "Unknown"; -	char *lt_result = "Unknown"; -	char *lt_spread = "Disabled"; - -	switch (lt_settings->link_settings.link_rate) { -	case LINK_RATE_LOW: -		link_rate = "RBR"; -		break; -	case LINK_RATE_RATE_2: -		link_rate = "R2"; -		break; -	case LINK_RATE_RATE_3: -		link_rate = "R3"; -		break; -	case LINK_RATE_HIGH: -		link_rate = "HBR"; -		break; -	case LINK_RATE_RBR2: -		link_rate = "RBR2"; -		break; -	case LINK_RATE_RATE_6: -		link_rate = "R6"; -		break; -	case LINK_RATE_HIGH2: -		link_rate = "HBR2"; -		break; -	case LINK_RATE_HIGH3: -		link_rate = "HBR3"; -		break; -	case LINK_RATE_UHBR10: -		link_rate = "UHBR10"; -		break; -	case LINK_RATE_UHBR13_5: -		link_rate = "UHBR13.5"; -		break; -	case LINK_RATE_UHBR20: -		link_rate = "UHBR20"; -		break; -	default: -		break; -	} - -	switch (status) { -	case LINK_TRAINING_SUCCESS: -		lt_result = "pass"; -		break; -	case LINK_TRAINING_CR_FAIL_LANE0: -		lt_result = "CR failed lane0"; -		break; -	case LINK_TRAINING_CR_FAIL_LANE1: -		lt_result = "CR failed lane1"; -		break; -	case LINK_TRAINING_CR_FAIL_LANE23: -		lt_result = "CR failed lane23"; -		break; -	case LINK_TRAINING_EQ_FAIL_CR: -		lt_result = "CR failed in EQ"; -		break; -	case LINK_TRAINING_EQ_FAIL_CR_PARTIAL: -		lt_result = "CR failed in EQ partially"; -		break; -	case LINK_TRAINING_EQ_FAIL_EQ: -		lt_result = "EQ failed"; -		break; -	case LINK_TRAINING_LQA_FAIL: -		lt_result = "LQA failed"; -		break; -	case LINK_TRAINING_LINK_LOSS: -		lt_result = "Link loss"; -		break; -	case DP_128b_132b_LT_FAILED: -		lt_result = "LT_FAILED received"; -		break; -	case DP_128b_132b_MAX_LOOP_COUNT_REACHED: -		lt_result = "max loop count reached"; -		break; -	case DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT: -		lt_result = "channel EQ timeout"; -		break; -	case DP_128b_132b_CDS_DONE_TIMEOUT: -		lt_result = "CDS timeout"; -		break; -	default: -		break; -	} - -	switch (lt_settings->link_settings.link_spread) { -	case LINK_SPREAD_DISABLED: -		lt_spread = "Disabled"; -		break; -	case LINK_SPREAD_05_DOWNSPREAD_30KHZ: -		lt_spread = "0.5% 30KHz"; -		break; -	case LINK_SPREAD_05_DOWNSPREAD_33KHZ: -		lt_spread = "0.5% 33KHz"; -		break; -	default: -		break; -	} - -	/* Connectivity log: link training */ - -	/* TODO - DP2.0 Log: add connectivity log for FFE PRESET */ - -	CONN_MSG_LT(link, "%sx%d %s VS=%d, PE=%d, DS=%s", -				link_rate, -				lt_settings->link_settings.lane_count, -				lt_result, -				lt_settings->hw_lane_settings[0].VOLTAGE_SWING, -				lt_settings->hw_lane_settings[0].PRE_EMPHASIS, -				lt_spread); -} - -void dc_link_dp_set_drive_settings( -	struct dc_link *link, -	const struct link_resource *link_res, -	struct link_training_settings *lt_settings) -{ -	/* program ASIC PHY settings*/ -	dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX); - -	dp_hw_to_dpcd_lane_settings(lt_settings, -			lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); - -	/* Notify DP sink the PHY settings from source */ -	dpcd_set_lane_settings(link, lt_settings, DPRX); -} - -bool dc_link_dp_perform_link_training_skip_aux( -	struct dc_link *link, -	const struct link_resource *link_res, -	const struct dc_link_settings *link_setting) -{ -	struct link_training_settings lt_settings = {0}; - -	dp_decide_training_settings( -			link, -			link_setting, -			<_settings); -	override_training_settings( -			link, -			&link->preferred_training_settings, -			<_settings); - -	/* 1. Perform_clock_recovery_sequence. */ - -	/* transmit training pattern for clock recovery */ -	dp_set_hw_training_pattern(link, link_res, lt_settings.pattern_for_cr, DPRX); - -	/* call HWSS to set lane settings*/ -	dp_set_hw_lane_settings(link, link_res, <_settings, DPRX); - -	/* wait receiver to lock-on*/ -	dp_wait_for_training_aux_rd_interval(link, lt_settings.cr_pattern_time); - -	/* 2. Perform_channel_equalization_sequence. */ - -	/* transmit training pattern for channel equalization. */ -	dp_set_hw_training_pattern(link, link_res, lt_settings.pattern_for_eq, DPRX); - -	/* call HWSS to set lane settings*/ -	dp_set_hw_lane_settings(link, link_res, <_settings, DPRX); - -	/* wait receiver to lock-on. */ -	dp_wait_for_training_aux_rd_interval(link, lt_settings.eq_pattern_time); - -	/* 3. Perform_link_training_int. */ - -	/* Mainlink output idle pattern. */ -	dp_set_hw_test_pattern(link, link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); - -	print_status_message(link, <_settings, LINK_TRAINING_SUCCESS); - -	return true; -} - -enum dc_status dpcd_configure_lttpr_mode(struct dc_link *link, struct link_training_settings *lt_settings) -{ -	enum dc_status status = DC_OK; - -	if (lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) -		status = configure_lttpr_mode_transparent(link); - -	else if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) -		status = configure_lttpr_mode_non_transparent(link, lt_settings); - -	return status; -} - -static void dpcd_exit_training_mode(struct dc_link *link) -{ -	uint8_t sink_status = 0; -	uint8_t i; - -	/* clear training pattern set */ -	dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE); - -	/* poll for intra-hop disable */ -	for (i = 0; i < 10; i++) { -		if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) && -				(sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0) -			break; -		udelay(1000); -	} -} - -enum dc_status dpcd_configure_channel_coding(struct dc_link *link, -		struct link_training_settings *lt_settings) -{ -	enum dp_link_encoding encoding = -			dp_get_link_encoding_format( -					<_settings->link_settings); -	enum dc_status status; - -	status = core_link_write_dpcd( -			link, -			DP_MAIN_LINK_CHANNEL_CODING_SET, -			(uint8_t *) &encoding, -			1); -	DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X MAIN_LINK_CHANNEL_CODING_SET = %x\n", -					__func__, -					DP_MAIN_LINK_CHANNEL_CODING_SET, -					encoding); - -	return status; -} - -static void dpcd_128b_132b_get_aux_rd_interval(struct dc_link *link, -		uint32_t *interval_in_us) -{ -	union dp_128b_132b_training_aux_rd_interval dpcd_interval; -	uint32_t interval_unit = 0; - -	dpcd_interval.raw = 0; -	core_link_read_dpcd(link, DP_128b_132b_TRAINING_AUX_RD_INTERVAL, -			&dpcd_interval.raw, sizeof(dpcd_interval.raw)); -	interval_unit = dpcd_interval.bits.UNIT ? 1 : 2; /* 0b = 2 ms, 1b = 1 ms */ -	/* (128b/132b_TRAINING_AUX_RD_INTERVAL value + 1) * -	 * INTERVAL_UNIT. The maximum is 256 ms -	 */ -	*interval_in_us = (dpcd_interval.bits.VALUE + 1) * interval_unit * 1000; -} - -static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence( -		struct dc_link *link, -		const struct link_resource *link_res, -		struct link_training_settings *lt_settings) -{ -	uint8_t loop_count; -	uint32_t aux_rd_interval = 0; -	uint32_t wait_time = 0; -	union lane_align_status_updated dpcd_lane_status_updated = {0}; -	union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; -	enum dc_status status = DC_OK; -	enum link_training_result result = LINK_TRAINING_SUCCESS; -	union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; - -	/* Transmit 128b/132b_TPS1 over Main-Link */ -	dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, DPRX); -	/* Set TRAINING_PATTERN_SET to 01h */ -	dpcd_set_training_pattern(link, lt_settings->pattern_for_cr); - -	/* Adjust TX_FFE_PRESET_VALUE and Transmit 128b/132b_TPS2 over Main-Link */ -	dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval); -	dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, -			&dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); -	dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, -			lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); -	dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX); -	dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_eq, DPRX); - -	/* Set loop counter to start from 1 */ -	loop_count = 1; - -	/* Set TRAINING_PATTERN_SET to 02h and TX_FFE_PRESET_VALUE in one AUX transaction */ -	dpcd_set_lt_pattern_and_lane_settings(link, lt_settings, -			lt_settings->pattern_for_eq, DPRX); - -	/* poll for channel EQ done */ -	while (result == LINK_TRAINING_SUCCESS) { -		dp_wait_for_training_aux_rd_interval(link, aux_rd_interval); -		wait_time += aux_rd_interval; -		status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, -				&dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); -		dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, -			lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); -		dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval); -		if (status != DC_OK) { -			result = LINK_TRAINING_ABORT; -		} else if (dp_is_ch_eq_done(lt_settings->link_settings.lane_count, -				dpcd_lane_status)) { -			/* pass */ -			break; -		} else if (loop_count >= lt_settings->eq_loop_count_limit) { -			result = DP_128b_132b_MAX_LOOP_COUNT_REACHED; -		} else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) { -			result = DP_128b_132b_LT_FAILED; -		} else { -			dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX); -			dpcd_128b_132b_set_lane_settings(link, lt_settings); -		} -		loop_count++; -	} - -	/* poll for EQ interlane align done */ -	while (result == LINK_TRAINING_SUCCESS) { -		if (status != DC_OK) { -			result = LINK_TRAINING_ABORT; -		} else if (dpcd_lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b) { -			/* pass */ -			break; -		} else if (wait_time >= lt_settings->eq_wait_time_limit) { -			result = DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT; -		} else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) { -			result = DP_128b_132b_LT_FAILED; -		} else { -			dp_wait_for_training_aux_rd_interval(link, -					lt_settings->eq_pattern_time); -			wait_time += lt_settings->eq_pattern_time; -			status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, -					&dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); -		} -	} - -	return result; -} - -static enum link_training_result dp_perform_128b_132b_cds_done_sequence( -		struct dc_link *link, -		const struct link_resource *link_res, -		struct link_training_settings *lt_settings) -{ -	/* Assumption: assume hardware has transmitted eq pattern */ -	enum dc_status status = DC_OK; -	enum link_training_result result = LINK_TRAINING_SUCCESS; -	union lane_align_status_updated dpcd_lane_status_updated = {0}; -	union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; -	union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; -	uint32_t wait_time = 0; - -	/* initiate CDS done sequence */ -	dpcd_set_training_pattern(link, lt_settings->pattern_for_cds); - -	/* poll for CDS interlane align done and symbol lock */ -	while (result  == LINK_TRAINING_SUCCESS) { -		dp_wait_for_training_aux_rd_interval(link, -				lt_settings->cds_pattern_time); -		wait_time += lt_settings->cds_pattern_time; -		status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, -						&dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); -		if (status != DC_OK) { -			result = LINK_TRAINING_ABORT; -		} else if (dp_is_symbol_locked(lt_settings->link_settings.lane_count, dpcd_lane_status) && -				dpcd_lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b) { -			/* pass */ -			break; -		} else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) { -			result = DP_128b_132b_LT_FAILED; -		} else if (wait_time >= lt_settings->cds_wait_time_limit) { -			result = DP_128b_132b_CDS_DONE_TIMEOUT; -		} -	} - -	return result; -} - -static enum link_training_result dp_perform_8b_10b_link_training( -		struct dc_link *link, -		const struct link_resource *link_res, -		struct link_training_settings *lt_settings) -{ -	enum link_training_result status = LINK_TRAINING_SUCCESS; - -	uint8_t repeater_cnt; -	uint8_t repeater_id; -	uint8_t lane = 0; - -	if (link->ctx->dc->work_arounds.lt_early_cr_pattern) -		start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX); - -	/* 1. set link rate, lane count and spread. */ -	dpcd_set_link_settings(link, lt_settings); - -	if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { - -		/* 2. perform link training (set link training done -		 *  to false is done as well) -		 */ -		repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); - -		for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS); -				repeater_id--) { -			status = perform_clock_recovery_sequence(link, link_res, lt_settings, repeater_id); - -			if (status != LINK_TRAINING_SUCCESS) { -				repeater_training_done(link, repeater_id); -				break; -			} - -			status = perform_channel_equalization_sequence(link, -					link_res, -					lt_settings, -					repeater_id); - -			repeater_training_done(link, repeater_id); - -			if (status != LINK_TRAINING_SUCCESS) -				break; - -			for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { -				lt_settings->dpcd_lane_settings[lane].raw = 0; -				lt_settings->hw_lane_settings[lane].VOLTAGE_SWING = 0; -				lt_settings->hw_lane_settings[lane].PRE_EMPHASIS = 0; -			} -		} -	} - -	if (status == LINK_TRAINING_SUCCESS) { -		status = perform_clock_recovery_sequence(link, link_res, lt_settings, DPRX); -		if (status == LINK_TRAINING_SUCCESS) { -			status = perform_channel_equalization_sequence(link, -								       link_res, -								       lt_settings, -								       DPRX); -		} -	} - -	return status; -} - -static enum link_training_result dp_perform_128b_132b_link_training( -		struct dc_link *link, -		const struct link_resource *link_res, -		struct link_training_settings *lt_settings) -{ -	enum link_training_result result = LINK_TRAINING_SUCCESS; - -	/* TODO - DP2.0 Link: remove legacy_dp2_lt logic */ -	if (link->dc->debug.legacy_dp2_lt) { -		struct link_training_settings legacy_settings; - -		decide_8b_10b_training_settings(link, -				<_settings->link_settings, -				&legacy_settings); -		return dp_perform_8b_10b_link_training(link, link_res, &legacy_settings); -	} - -	dpcd_set_link_settings(link, lt_settings); - -	if (result == LINK_TRAINING_SUCCESS) -		result = dp_perform_128b_132b_channel_eq_done_sequence(link, link_res, lt_settings); - -	if (result == LINK_TRAINING_SUCCESS) -		result = dp_perform_128b_132b_cds_done_sequence(link, link_res, lt_settings); - -	return result; -} - -static enum link_training_result perform_fixed_vs_pe_nontransparent_training_sequence( -		struct dc_link *link, -		const struct link_resource *link_res, -		struct link_training_settings *lt_settings) -{ -	enum link_training_result status = LINK_TRAINING_SUCCESS; -	uint8_t lane = 0; -	uint8_t toggle_rate = 0x6; -	uint8_t target_rate = 0x6; -	bool apply_toggle_rate_wa = false; -	uint8_t repeater_cnt; -	uint8_t repeater_id; - -	/* Fixed VS/PE specific: Force CR AUX RD Interval to at least 16ms */ -	if (lt_settings->cr_pattern_time < 16000) -		lt_settings->cr_pattern_time = 16000; - -	/* Fixed VS/PE specific: Toggle link rate */ -	apply_toggle_rate_wa = (link->vendor_specific_lttpr_link_rate_wa == target_rate); -	target_rate = get_dpcd_link_rate(<_settings->link_settings); -	toggle_rate = (target_rate == 0x6) ? 0xA : 0x6; - -	if (apply_toggle_rate_wa) -		lt_settings->link_settings.link_rate = toggle_rate; - -	if (link->ctx->dc->work_arounds.lt_early_cr_pattern) -		start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX); - -	/* 1. set link rate, lane count and spread. */ -	dpcd_set_link_settings(link, lt_settings); - -	/* Fixed VS/PE specific: Toggle link rate back*/ -	if (apply_toggle_rate_wa) { -		core_link_write_dpcd( -				link, -				DP_LINK_BW_SET, -				&target_rate, -				1); -	} - -	link->vendor_specific_lttpr_link_rate_wa = target_rate; - -	if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { - -		/* 2. perform link training (set link training done -		 *  to false is done as well) -		 */ -		repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); - -		for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS); -				repeater_id--) { -			status = perform_clock_recovery_sequence(link, link_res, lt_settings, repeater_id); - -			if (status != LINK_TRAINING_SUCCESS) { -				repeater_training_done(link, repeater_id); -				break; -			} - -			status = perform_channel_equalization_sequence(link, -					link_res, -					lt_settings, -					repeater_id); - -			repeater_training_done(link, repeater_id); - -			if (status != LINK_TRAINING_SUCCESS) -				break; - -			for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { -				lt_settings->dpcd_lane_settings[lane].raw = 0; -				lt_settings->hw_lane_settings[lane].VOLTAGE_SWING = 0; -				lt_settings->hw_lane_settings[lane].PRE_EMPHASIS = 0; -			} -		} -	} - -	if (status == LINK_TRAINING_SUCCESS) { -		status = perform_clock_recovery_sequence(link, link_res, lt_settings, DPRX); -		if (status == LINK_TRAINING_SUCCESS) { -			status = perform_channel_equalization_sequence(link, -								       link_res, -								       lt_settings, -								       DPRX); -		} -	} - -	return status; -} - -static enum link_training_result dp_perform_fixed_vs_pe_training_sequence( -	struct dc_link *link, -	const struct link_resource *link_res, -	struct link_training_settings *lt_settings) -{ -	const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF}; -	const uint8_t offset = dp_convert_to_count( -			link->dpcd_caps.lttpr_caps.phy_repeater_cnt); -	const uint8_t vendor_lttpr_write_data_intercept_en[4] = {0x1, 0x55, 0x63, 0x0}; -	const uint8_t vendor_lttpr_write_data_intercept_dis[4] = {0x1, 0x55, 0x63, 0x68}; -	uint32_t pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa; -	uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; -	uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; -	uint32_t vendor_lttpr_write_address = 0xF004F; -	enum link_training_result status = LINK_TRAINING_SUCCESS; -	uint8_t lane = 0; -	union down_spread_ctrl downspread = {0}; -	union lane_count_set lane_count_set = {0}; -	uint8_t toggle_rate; -	uint8_t rate; - -	/* Only 8b/10b is supported */ -	ASSERT(dp_get_link_encoding_format(<_settings->link_settings) == -			DP_8b_10b_ENCODING); - -	if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { -		status = perform_fixed_vs_pe_nontransparent_training_sequence(link, link_res, lt_settings); -		return status; -	} - -	if (offset != 0xFF) { -		vendor_lttpr_write_address += -				((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); - -		/* Certain display and cable configuration require extra delay */ -		if (offset > 2) -			pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2; -	} - -	/* Vendor specific: Reset lane settings */ -	core_link_write_dpcd( -			link, -			vendor_lttpr_write_address, -			&vendor_lttpr_write_data_reset[0], -			sizeof(vendor_lttpr_write_data_reset)); -	core_link_write_dpcd( -			link, -			vendor_lttpr_write_address, -			&vendor_lttpr_write_data_vs[0], -			sizeof(vendor_lttpr_write_data_vs)); -	core_link_write_dpcd( -			link, -			vendor_lttpr_write_address, -			&vendor_lttpr_write_data_pe[0], -			sizeof(vendor_lttpr_write_data_pe)); - -	/* Vendor specific: Enable intercept */ -	core_link_write_dpcd( -			link, -			vendor_lttpr_write_address, -			&vendor_lttpr_write_data_intercept_en[0], -			sizeof(vendor_lttpr_write_data_intercept_en)); - -	/* 1. set link rate, lane count and spread. */ - -	downspread.raw = (uint8_t)(lt_settings->link_settings.link_spread); - -	lane_count_set.bits.LANE_COUNT_SET = -	lt_settings->link_settings.lane_count; - -	lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing; -	lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0; - - -	if (lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) { -		lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = -				link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED; -	} - -	core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, -		&downspread.raw, sizeof(downspread)); - -	core_link_write_dpcd(link, DP_LANE_COUNT_SET, -		&lane_count_set.raw, 1); - -	rate = get_dpcd_link_rate(<_settings->link_settings); - -	/* Vendor specific: Toggle link rate */ -	toggle_rate = (rate == 0x6) ? 0xA : 0x6; - -	if (link->vendor_specific_lttpr_link_rate_wa == rate) { -		core_link_write_dpcd( -				link, -				DP_LINK_BW_SET, -				&toggle_rate, -				1); -	} - -	link->vendor_specific_lttpr_link_rate_wa = rate; - -	core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); - -	DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n", -		__func__, -		DP_LINK_BW_SET, -		lt_settings->link_settings.link_rate, -		DP_LANE_COUNT_SET, -		lt_settings->link_settings.lane_count, -		lt_settings->enhanced_framing, -		DP_DOWNSPREAD_CTRL, -		lt_settings->link_settings.link_spread); - -	/* 2. Perform link training */ - -	/* Perform Clock Recovery Sequence */ -	if (status == LINK_TRAINING_SUCCESS) { -		const uint8_t max_vendor_dpcd_retries = 10; -		uint32_t retries_cr; -		uint32_t retry_count; -		uint32_t wait_time_microsec; -		enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; -		union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; -		union lane_align_status_updated dpcd_lane_status_updated; -		union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; -		enum dc_status dpcd_status = DC_OK; -		uint8_t i = 0; - -		retries_cr = 0; -		retry_count = 0; - -		memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status)); -		memset(&dpcd_lane_status_updated, '\0', -		sizeof(dpcd_lane_status_updated)); - -		while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && -			(retry_count < LINK_TRAINING_MAX_CR_RETRY)) { - - -			/* 1. call HWSS to set lane settings */ -			dp_set_hw_lane_settings( -					link, -					link_res, -					lt_settings, -					0); - -			/* 2. update DPCD of the receiver */ -			if (!retry_count) { -				/* EPR #361076 - write as a 5-byte burst, -				 * but only for the 1-st iteration. -				 */ -				dpcd_set_lt_pattern_and_lane_settings( -						link, -						lt_settings, -						lt_settings->pattern_for_cr, -						0); -				/* Vendor specific: Disable intercept */ -				for (i = 0; i < max_vendor_dpcd_retries; i++) { -					msleep(pre_disable_intercept_delay_ms); -					dpcd_status = core_link_write_dpcd( -							link, -							vendor_lttpr_write_address, -							&vendor_lttpr_write_data_intercept_dis[0], -							sizeof(vendor_lttpr_write_data_intercept_dis)); - -					if (dpcd_status == DC_OK) -						break; - -					core_link_write_dpcd( -							link, -							vendor_lttpr_write_address, -							&vendor_lttpr_write_data_intercept_en[0], -							sizeof(vendor_lttpr_write_data_intercept_en)); -				} -			} else { -				vendor_lttpr_write_data_vs[3] = 0; -				vendor_lttpr_write_data_pe[3] = 0; - -				for (lane = 0; lane < lane_count; lane++) { -					vendor_lttpr_write_data_vs[3] |= -							lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane); -					vendor_lttpr_write_data_pe[3] |= -							lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane); -				} - -				/* Vendor specific: Update VS and PE to DPRX requested value */ -				core_link_write_dpcd( -						link, -						vendor_lttpr_write_address, -						&vendor_lttpr_write_data_vs[0], -						sizeof(vendor_lttpr_write_data_vs)); -				core_link_write_dpcd( -						link, -						vendor_lttpr_write_address, -						&vendor_lttpr_write_data_pe[0], -						sizeof(vendor_lttpr_write_data_pe)); - -				dpcd_set_lane_settings( -						link, -						lt_settings, -						0); -			} - -			/* 3. wait receiver to lock-on*/ -			wait_time_microsec = lt_settings->cr_pattern_time; - -			dp_wait_for_training_aux_rd_interval( -					link, -					wait_time_microsec); - -			/* 4. Read lane status and requested drive -			 * settings as set by the sink -			 */ -			dp_get_lane_status_and_lane_adjust( -					link, -					lt_settings, -					dpcd_lane_status, -					&dpcd_lane_status_updated, -					dpcd_lane_adjust, -					0); - -			/* 5. check CR done*/ -			if (dp_is_cr_done(lane_count, dpcd_lane_status)) { -				status = LINK_TRAINING_SUCCESS; -				break; -			} - -			/* 6. max VS reached*/ -			if (dp_is_max_vs_reached(lt_settings)) -				break; - -			/* 7. same lane settings */ -			/* Note: settings are the same for all lanes, -			 * so comparing first lane is sufficient -			 */ -			if (lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET == -					dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE) -				retries_cr++; -			else -				retries_cr = 0; - -			/* 8. update VS/PE/PC2 in lt_settings*/ -			dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, -					lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); -			retry_count++; -		} - -		if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) { -			ASSERT(0); -			DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue", -				__func__, -				LINK_TRAINING_MAX_CR_RETRY); - -		} - -		status = dp_get_cr_failure(lane_count, dpcd_lane_status); -	} - -	/* Perform Channel EQ Sequence */ -	if (status == LINK_TRAINING_SUCCESS) { -		enum dc_dp_training_pattern tr_pattern; -		uint32_t retries_ch_eq; -		uint32_t wait_time_microsec; -		enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; -		union lane_align_status_updated dpcd_lane_status_updated = {0}; -		union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; -		union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; - -		/* Note: also check that TPS4 is a supported feature*/ -		tr_pattern = lt_settings->pattern_for_eq; - -		dp_set_hw_training_pattern(link, link_res, tr_pattern, 0); - -		status = LINK_TRAINING_EQ_FAIL_EQ; - -		for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT; -			retries_ch_eq++) { - -			dp_set_hw_lane_settings(link, link_res, lt_settings, 0); - -			vendor_lttpr_write_data_vs[3] = 0; -			vendor_lttpr_write_data_pe[3] = 0; - -			for (lane = 0; lane < lane_count; lane++) { -				vendor_lttpr_write_data_vs[3] |= -						lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane); -				vendor_lttpr_write_data_pe[3] |= -						lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane); -			} - -			/* Vendor specific: Update VS and PE to DPRX requested value */ -			core_link_write_dpcd( -					link, -					vendor_lttpr_write_address, -					&vendor_lttpr_write_data_vs[0], -					sizeof(vendor_lttpr_write_data_vs)); -			core_link_write_dpcd( -					link, -					vendor_lttpr_write_address, -					&vendor_lttpr_write_data_pe[0], -					sizeof(vendor_lttpr_write_data_pe)); - -			/* 2. update DPCD*/ -			if (!retries_ch_eq) -				/* EPR #361076 - write as a 5-byte burst, -				 * but only for the 1-st iteration -				 */ - -				dpcd_set_lt_pattern_and_lane_settings( -					link, -					lt_settings, -					tr_pattern, 0); -			else -				dpcd_set_lane_settings(link, lt_settings, 0); - -			/* 3. wait for receiver to lock-on*/ -			wait_time_microsec = lt_settings->eq_pattern_time; - -			dp_wait_for_training_aux_rd_interval( -					link, -					wait_time_microsec); - -			/* 4. Read lane status and requested -			 * drive settings as set by the sink -			 */ -			dp_get_lane_status_and_lane_adjust( -				link, -				lt_settings, -				dpcd_lane_status, -				&dpcd_lane_status_updated, -				dpcd_lane_adjust, -				0); - -			/* 5. check CR done*/ -			if (!dp_is_cr_done(lane_count, dpcd_lane_status)) { -				status = LINK_TRAINING_EQ_FAIL_CR; -				break; -			} - -			/* 6. check CHEQ done*/ -			if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) && -					dp_is_symbol_locked(lane_count, dpcd_lane_status) && -					dp_is_interlane_aligned(dpcd_lane_status_updated)) { -				status = LINK_TRAINING_SUCCESS; -				break; -			} - -			/* 7. update VS/PE/PC2 in lt_settings*/ -			dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, -					lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); -		} -	} - -	return status; -} - - -enum link_training_result dc_link_dp_perform_link_training( -	struct dc_link *link, -	const struct link_resource *link_res, -	const struct dc_link_settings *link_settings, -	bool skip_video_pattern) -{ -	enum link_training_result status = LINK_TRAINING_SUCCESS; -	struct link_training_settings lt_settings = {0}; -	enum dp_link_encoding encoding = -			dp_get_link_encoding_format(link_settings); - -	/* decide training settings */ -	dp_decide_training_settings( -			link, -			link_settings, -			<_settings); - -	override_training_settings( -			link, -			&link->preferred_training_settings, -			<_settings); - -	/* reset previous training states */ -	dpcd_exit_training_mode(link); - -	/* configure link prior to entering training mode */ -	dpcd_configure_lttpr_mode(link, <_settings); -	dp_set_fec_ready(link, link_res, lt_settings.should_set_fec_ready); -	dpcd_configure_channel_coding(link, <_settings); - -	/* enter training mode: -	 * Per DP specs starting from here, DPTX device shall not issue -	 * Non-LT AUX transactions inside training mode. -	 */ -	if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN && encoding == DP_8b_10b_ENCODING) -		status = dp_perform_fixed_vs_pe_training_sequence(link, link_res, <_settings); -	else if (encoding == DP_8b_10b_ENCODING) -		status = dp_perform_8b_10b_link_training(link, link_res, <_settings); -	else if (encoding == DP_128b_132b_ENCODING) -		status = dp_perform_128b_132b_link_training(link, link_res, <_settings); -	else -		ASSERT(0); - -	/* exit training mode */ -	dpcd_exit_training_mode(link); - -	/* switch to video idle */ -	if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) -		status = dp_transition_to_video_idle(link, -				link_res, -				<_settings, -				status); - -	/* dump debug data */ -	print_status_message(link, <_settings, status); -	if (status != LINK_TRAINING_SUCCESS) -		link->ctx->dc->debug_data.ltFailCount++; -	return status; -} - -bool perform_link_training_with_retries( -	const struct dc_link_settings *link_setting, -	bool skip_video_pattern, -	int attempts, -	struct pipe_ctx *pipe_ctx, -	enum signal_type signal, -	bool do_fallback) -{ -	int j; -	uint8_t delay_between_attempts = LINK_TRAINING_RETRY_DELAY; -	struct dc_stream_state *stream = pipe_ctx->stream; -	struct dc_link *link = stream->link; -	enum dp_panel_mode panel_mode = dp_get_panel_mode(link); -	enum link_training_result status = LINK_TRAINING_CR_FAIL_LANE0; -	struct dc_link_settings cur_link_settings = *link_setting; -	struct dc_link_settings max_link_settings = *link_setting; -	const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); -	int fail_count = 0; -	bool is_link_bw_low = false; /* link bandwidth < stream bandwidth */ -	bool is_link_bw_min = /* RBR x 1 */ -		(cur_link_settings.link_rate <= LINK_RATE_LOW) && -		(cur_link_settings.lane_count <= LANE_COUNT_ONE); - -	dp_trace_commit_lt_init(link); - -	if (dp_get_link_encoding_format(&cur_link_settings) == DP_8b_10b_ENCODING) -		/* We need to do this before the link training to ensure the idle -		 * pattern in SST mode will be sent right after the link training -		 */ -		link_hwss->setup_stream_encoder(pipe_ctx); - -	dp_trace_set_lt_start_timestamp(link, false); -	j = 0; -	while (j < attempts && fail_count < (attempts * 10)) { - -		DC_LOG_HW_LINK_TRAINING("%s: Beginning link(%d) training attempt %u of %d @ rate(%d) x lane(%d)\n", -			__func__, link->link_index, (unsigned int)j + 1, attempts, cur_link_settings.link_rate, -			cur_link_settings.lane_count); - -		dp_enable_link_phy( -			link, -			&pipe_ctx->link_res, -			signal, -			pipe_ctx->clock_source->id, -			&cur_link_settings); - -		if (stream->sink_patches.dppowerup_delay > 0) { -			int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay; - -			msleep(delay_dp_power_up_in_ms); -		} - -#ifdef CONFIG_DRM_AMD_DC_HDCP -		if (panel_mode == DP_PANEL_MODE_EDP) { -			struct cp_psp *cp_psp = &stream->ctx->cp_psp; - -			if (cp_psp && cp_psp->funcs.enable_assr) -				/* ASSR is bound to fail with unsigned PSP -				 * verstage used during devlopment phase. -				 * Report and continue with eDP panel mode to -				 * perform eDP link training with right settings -				 */ -				cp_psp->funcs.enable_assr(cp_psp->handle, link); -		} -#endif - -		dp_set_panel_mode(link, panel_mode); - -		if (link->aux_access_disabled) { -			dc_link_dp_perform_link_training_skip_aux(link, &pipe_ctx->link_res, &cur_link_settings); -			return true; -		} else { -			/** @todo Consolidate USB4 DP and DPx.x training. */ -			if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { -				status = dc_link_dpia_perform_link_training(link, -						&pipe_ctx->link_res, -						&cur_link_settings, -						skip_video_pattern); - -				/* Transmit idle pattern once training successful. */ -				if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low) { -					dp_set_hw_test_pattern(link, &pipe_ctx->link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); -					/* Update verified link settings to current one -					 * Because DPIA LT might fallback to lower link setting. -					 */ -					link->verified_link_cap.link_rate = link->cur_link_settings.link_rate; -					link->verified_link_cap.lane_count = link->cur_link_settings.lane_count; -				} -			} else { -				status = dc_link_dp_perform_link_training(link, -						&pipe_ctx->link_res, -						&cur_link_settings, -						skip_video_pattern); -			} - -			dp_trace_lt_total_count_increment(link, false); -			dp_trace_lt_result_update(link, status, false); -			dp_trace_set_lt_end_timestamp(link, false); -			if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low) -				return true; -		} - -		fail_count++; -		dp_trace_lt_fail_count_update(link, fail_count, false); -		if (link->ep_type == DISPLAY_ENDPOINT_PHY) { -			/* latest link training still fail or link training is aborted -			 * skip delay and keep PHY on -			 */ -			if (j == (attempts - 1) || (status == LINK_TRAINING_ABORT)) -				break; -		} - -		DC_LOG_WARNING("%s: Link(%d) training attempt %u of %d failed @ rate(%d) x lane(%d) : fail reason:(%d)\n", -			__func__, link->link_index, (unsigned int)j + 1, attempts, cur_link_settings.link_rate, -			cur_link_settings.lane_count, status); - -		dp_disable_link_phy(link, &pipe_ctx->link_res, signal); - -		/* Abort link training if failure due to sink being unplugged. */ -		if (status == LINK_TRAINING_ABORT) { -			enum dc_connection_type type = dc_connection_none; - -			dc_link_detect_sink(link, &type); -			if (type == dc_connection_none) { -				DC_LOG_HW_LINK_TRAINING("%s: Aborting training because sink unplugged\n", __func__); -				break; -			} -		} - -		/* Try to train again at original settings if: -		 * - not falling back between training attempts; -		 * - aborted previous attempt due to reasons other than sink unplug; -		 * - successfully trained but at a link rate lower than that required by stream; -		 * - reached minimum link bandwidth. -		 */ -		if (!do_fallback || (status == LINK_TRAINING_ABORT) || -				(status == LINK_TRAINING_SUCCESS && is_link_bw_low) || -				is_link_bw_min) { -			j++; -			cur_link_settings = *link_setting; -			delay_between_attempts += LINK_TRAINING_RETRY_DELAY; -			is_link_bw_low = false; -			is_link_bw_min = (cur_link_settings.link_rate <= LINK_RATE_LOW) && -				(cur_link_settings.lane_count <= LANE_COUNT_ONE); - -		} else if (do_fallback) { /* Try training at lower link bandwidth if doing fallback. */ -			uint32_t req_bw; -			uint32_t link_bw; - -			decide_fallback_link_setting(link, &max_link_settings, -					&cur_link_settings, status); -			/* Fail link training if reduced link bandwidth no longer meets -			 * stream requirements. -			 */ -			req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing); -			link_bw = dc_link_bandwidth_kbps(link, &cur_link_settings); -			is_link_bw_low = (req_bw > link_bw); -			is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) && -				(cur_link_settings.lane_count <= LANE_COUNT_ONE)); -			if (is_link_bw_low) -				DC_LOG_WARNING( -					"%s: Link(%d) bandwidth too low after fallback req_bw(%d) > link_bw(%d)\n", -					__func__, link->link_index, req_bw, link_bw); -		} - -		msleep(delay_between_attempts); -	} -	return false; -} - -static enum clock_source_id get_clock_source_id(struct dc_link *link) -{ -	enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_UNDEFINED; -	struct clock_source *dp_cs = link->dc->res_pool->dp_clock_source; - -	if (dp_cs != NULL) { -		dp_cs_id = dp_cs->id; -	} else { -		/* -		 * dp clock source is not initialized for some reason. -		 * Should not happen, CLOCK_SOURCE_ID_EXTERNAL will be used -		 */ -		ASSERT(dp_cs); -	} - -	return dp_cs_id; -} - -static void set_dp_mst_mode(struct dc_link *link, const struct link_resource *link_res, -		bool mst_enable) -{ -	if (mst_enable == false && -		link->type == dc_connection_mst_branch) { -		/* Disable MST on link. Use only local sink. */ -		dp_disable_link_phy_mst(link, link_res, link->connector_signal); - -		link->type = dc_connection_single; -		link->local_sink = link->remote_sinks[0]; -		link->local_sink->sink_signal = SIGNAL_TYPE_DISPLAY_PORT; -		dc_sink_retain(link->local_sink); -		dm_helpers_dp_mst_stop_top_mgr(link->ctx, link); -	} else if (mst_enable == true && -			link->type == dc_connection_single && -			link->remote_sinks[0] != NULL) { -		/* Re-enable MST on link. */ -		dp_disable_link_phy(link, link_res, link->connector_signal); -		dp_enable_mst_on_sink(link, true); - -		link->type = dc_connection_mst_branch; -		link->local_sink->sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST; -	} -} - -bool dc_link_dp_sync_lt_begin(struct dc_link *link) -{ -	/* Begin Sync LT. During this time, -	 * DPCD:600h must not be powered down. -	 */ -	link->sync_lt_in_progress = true; - -	/*Clear any existing preferred settings.*/ -	memset(&link->preferred_training_settings, 0, -		sizeof(struct dc_link_training_overrides)); -	memset(&link->preferred_link_setting, 0, -		sizeof(struct dc_link_settings)); - -	return true; -} - -enum link_training_result dc_link_dp_sync_lt_attempt( -    struct dc_link *link, -    const struct link_resource *link_res, -    struct dc_link_settings *link_settings, -    struct dc_link_training_overrides *lt_overrides) -{ -	struct link_training_settings lt_settings = {0}; -	enum link_training_result lt_status = LINK_TRAINING_SUCCESS; -	enum dp_panel_mode panel_mode = DP_PANEL_MODE_DEFAULT; -	enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL; -	bool fec_enable = false; - -	dp_decide_training_settings( -			link, -			link_settings, -			<_settings); -	override_training_settings( -			link, -			lt_overrides, -			<_settings); -	/* Setup MST Mode */ -	if (lt_overrides->mst_enable) -		set_dp_mst_mode(link, link_res, *lt_overrides->mst_enable); - -	/* Disable link */ -	dp_disable_link_phy(link, link_res, link->connector_signal); - -	/* Enable link */ -	dp_cs_id = get_clock_source_id(link); -	dp_enable_link_phy(link, link_res, link->connector_signal, -		dp_cs_id, link_settings); - -	/* Set FEC enable */ -	if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) { -		fec_enable = lt_overrides->fec_enable && *lt_overrides->fec_enable; -		dp_set_fec_ready(link, NULL, fec_enable); -	} - -	if (lt_overrides->alternate_scrambler_reset) { -		if (*lt_overrides->alternate_scrambler_reset) -			panel_mode = DP_PANEL_MODE_EDP; -		else -			panel_mode = DP_PANEL_MODE_DEFAULT; -	} else -		panel_mode = dp_get_panel_mode(link); - -	dp_set_panel_mode(link, panel_mode); - -	/* Attempt to train with given link training settings */ -	if (link->ctx->dc->work_arounds.lt_early_cr_pattern) -		start_clock_recovery_pattern_early(link, link_res, <_settings, DPRX); - -	/* Set link rate, lane count and spread. */ -	dpcd_set_link_settings(link, <_settings); - -	/* 2. perform link training (set link training done -	 *  to false is done as well) -	 */ -	lt_status = perform_clock_recovery_sequence(link, link_res, <_settings, DPRX); -	if (lt_status == LINK_TRAINING_SUCCESS) { -		lt_status = perform_channel_equalization_sequence(link, -						link_res, -						<_settings, -						DPRX); -	} - -	/* 3. Sync LT must skip TRAINING_PATTERN_SET:0 (video pattern)*/ -	/* 4. print status message*/ -	print_status_message(link, <_settings, lt_status); - -	return lt_status; -} - -bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down) -{ -	/* If input parameter is set, shut down phy. -	 * Still shouldn't turn off dp_receiver (DPCD:600h) -	 */ -	if (link_down == true) { -		struct dc_link_settings link_settings = link->cur_link_settings; -		dp_disable_link_phy(link, NULL, link->connector_signal); -		if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING) -			dp_set_fec_ready(link, NULL, false); -	} - -	link->sync_lt_in_progress = false; -	return true; -} - -static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link) -{ -	enum dc_link_rate lttpr_max_link_rate = link->dpcd_caps.lttpr_caps.max_link_rate; - -	if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR20) -		lttpr_max_link_rate = LINK_RATE_UHBR20; -	else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR13_5) -		lttpr_max_link_rate = LINK_RATE_UHBR13_5; -	else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR10) -		lttpr_max_link_rate = LINK_RATE_UHBR10; - -	return lttpr_max_link_rate; -} - -static enum dc_link_rate get_cable_max_link_rate(struct dc_link *link) -{ -	enum dc_link_rate cable_max_link_rate = LINK_RATE_HIGH3; - -	if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR20) -		cable_max_link_rate = LINK_RATE_UHBR20; -	else if (link->dpcd_caps.cable_id.bits.UHBR13_5_CAPABILITY) -		cable_max_link_rate = LINK_RATE_UHBR13_5; -	else if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR10) -		cable_max_link_rate = LINK_RATE_UHBR10; - -	return cable_max_link_rate; -} - -bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap) -{ -	struct link_encoder *link_enc = NULL; - -	if (!max_link_enc_cap) { -		DC_LOG_ERROR("%s: Could not return max link encoder caps", __func__); -		return false; -	} - -	link_enc = link_enc_cfg_get_link_enc(link); -	ASSERT(link_enc); - -	if (link_enc && link_enc->funcs->get_max_link_cap) { -		link_enc->funcs->get_max_link_cap(link_enc, max_link_enc_cap); -		return true; -	} - -	DC_LOG_ERROR("%s: Max link encoder caps unknown", __func__); -	max_link_enc_cap->lane_count = 1; -	max_link_enc_cap->link_rate = 6; -	return false; -} - - -struct dc_link_settings dp_get_max_link_cap(struct dc_link *link) -{ -	struct dc_link_settings max_link_cap = {0}; -	enum dc_link_rate lttpr_max_link_rate; -	enum dc_link_rate cable_max_link_rate; -	struct link_encoder *link_enc = NULL; - - -	link_enc = link_enc_cfg_get_link_enc(link); -	ASSERT(link_enc); - -	/* get max link encoder capability */ -	if (link_enc) -		link_enc->funcs->get_max_link_cap(link_enc, &max_link_cap); - -	/* Lower link settings based on sink's link cap */ -	if (link->reported_link_cap.lane_count < max_link_cap.lane_count) -		max_link_cap.lane_count = -				link->reported_link_cap.lane_count; -	if (link->reported_link_cap.link_rate < max_link_cap.link_rate) -		max_link_cap.link_rate = -				link->reported_link_cap.link_rate; -	if (link->reported_link_cap.link_spread < -			max_link_cap.link_spread) -		max_link_cap.link_spread = -				link->reported_link_cap.link_spread; - -	/* Lower link settings based on cable attributes */ -	cable_max_link_rate = get_cable_max_link_rate(link); - -	if (!link->dc->debug.ignore_cable_id && -			cable_max_link_rate < max_link_cap.link_rate) -		max_link_cap.link_rate = cable_max_link_rate; - -	/* -	 * account for lttpr repeaters cap -	 * notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3). -	 */ -	if (dp_is_lttpr_present(link)) { -		if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count) -			max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count; -		lttpr_max_link_rate = get_lttpr_max_link_rate(link); - -		if (lttpr_max_link_rate < max_link_cap.link_rate) -			max_link_cap.link_rate = lttpr_max_link_rate; - -		DC_LOG_HW_LINK_TRAINING("%s\n Training with LTTPR,  max_lane count %d max_link rate %d \n", -						__func__, -						max_link_cap.lane_count, -						max_link_cap.link_rate); -	} - -	if (dp_get_link_encoding_format(&max_link_cap) == DP_128b_132b_ENCODING && -			link->dc->debug.disable_uhbr) -		max_link_cap.link_rate = LINK_RATE_HIGH3; - -	return max_link_cap; -} - -static enum dc_status read_hpd_rx_irq_data( -	struct dc_link *link, -	union hpd_irq_data *irq_data) -{ -	static enum dc_status retval; - -	/* The HW reads 16 bytes from 200h on HPD, -	 * but if we get an AUX_DEFER, the HW cannot retry -	 * and this causes the CTS tests 4.3.2.1 - 3.2.4 to -	 * fail, so we now explicitly read 6 bytes which is -	 * the req from the above mentioned test cases. -	 * -	 * For DP 1.4 we need to read those from 2002h range. -	 */ -	if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14) -		retval = core_link_read_dpcd( -			link, -			DP_SINK_COUNT, -			irq_data->raw, -			sizeof(union hpd_irq_data)); -	else { -		/* Read 14 bytes in a single read and then copy only the required fields. -		 * This is more efficient than doing it in two separate AUX reads. */ - -		uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1]; - -		retval = core_link_read_dpcd( -			link, -			DP_SINK_COUNT_ESI, -			tmp, -			sizeof(tmp)); - -		if (retval != DC_OK) -			return retval; - -		irq_data->bytes.sink_cnt.raw = tmp[DP_SINK_COUNT_ESI - DP_SINK_COUNT_ESI]; -		irq_data->bytes.device_service_irq.raw = tmp[DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 - DP_SINK_COUNT_ESI]; -		irq_data->bytes.lane01_status.raw = tmp[DP_LANE0_1_STATUS_ESI - DP_SINK_COUNT_ESI]; -		irq_data->bytes.lane23_status.raw = tmp[DP_LANE2_3_STATUS_ESI - DP_SINK_COUNT_ESI]; -		irq_data->bytes.lane_status_updated.raw = tmp[DP_LANE_ALIGN_STATUS_UPDATED_ESI - DP_SINK_COUNT_ESI]; -		irq_data->bytes.sink_status.raw = tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI]; -	} - -	return retval; -} - -bool hpd_rx_irq_check_link_loss_status( -	struct dc_link *link, -	union hpd_irq_data *hpd_irq_dpcd_data) -{ -	uint8_t irq_reg_rx_power_state = 0; -	enum dc_status dpcd_result = DC_ERROR_UNEXPECTED; -	union lane_status lane_status; -	uint32_t lane; -	bool sink_status_changed; -	bool return_code; - -	sink_status_changed = false; -	return_code = false; - -	if (link->cur_link_settings.lane_count == 0) -		return return_code; - -	/*1. Check that Link Status changed, before re-training.*/ - -	/*parse lane status*/ -	for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) { -		/* check status of lanes 0,1 -		 * changed DpcdAddress_Lane01Status (0x202) -		 */ -		lane_status.raw = get_nibble_at_index( -			&hpd_irq_dpcd_data->bytes.lane01_status.raw, -			lane); - -		if (!lane_status.bits.CHANNEL_EQ_DONE_0 || -			!lane_status.bits.CR_DONE_0 || -			!lane_status.bits.SYMBOL_LOCKED_0) { -			/* if one of the channel equalization, clock -			 * recovery or symbol lock is dropped -			 * consider it as (link has been -			 * dropped) dp sink status has changed -			 */ -			sink_status_changed = true; -			break; -		} -	} - -	/* Check interlane align.*/ -	if (sink_status_changed || -		!hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) { - -		DC_LOG_HW_HPD_IRQ("%s: Link Status changed.\n", __func__); - -		return_code = true; - -		/*2. Check that we can handle interrupt: Not in FS DOS, -		 *  Not in "Display Timeout" state, Link is trained. -		 */ -		dpcd_result = core_link_read_dpcd(link, -			DP_SET_POWER, -			&irq_reg_rx_power_state, -			sizeof(irq_reg_rx_power_state)); - -		if (dpcd_result != DC_OK) { -			DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain power state.\n", -				__func__); -		} else { -			if (irq_reg_rx_power_state != DP_SET_POWER_D0) -				return_code = false; -		} -	} - -	return return_code; -} - -static bool dp_verify_link_cap( -	struct dc_link *link, -	struct dc_link_settings *known_limit_link_setting, -	int *fail_count) -{ -	struct dc_link_settings cur_link_settings = {0}; -	struct dc_link_settings max_link_settings = *known_limit_link_setting; -	bool success = false; -	bool skip_video_pattern; -	enum clock_source_id dp_cs_id = get_clock_source_id(link); -	enum link_training_result status = LINK_TRAINING_SUCCESS; -	union hpd_irq_data irq_data; -	struct link_resource link_res; - -	memset(&irq_data, 0, sizeof(irq_data)); -	cur_link_settings = max_link_settings; - -	/* Grant extended timeout request */ -	if (dp_is_lttpr_present(link) && link->dpcd_caps.lttpr_caps.max_ext_timeout > 0) { -		uint8_t grant = link->dpcd_caps.lttpr_caps.max_ext_timeout & 0x80; - -		core_link_write_dpcd(link, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &grant, sizeof(grant)); -	} - -	do { -		if (!get_temp_dp_link_res(link, &link_res, &cur_link_settings)) -			continue; - -		skip_video_pattern = cur_link_settings.link_rate != LINK_RATE_LOW; -		dp_enable_link_phy( -				link, -				&link_res, -				link->connector_signal, -				dp_cs_id, -				&cur_link_settings); - -		status = dc_link_dp_perform_link_training( -				link, -				&link_res, -				&cur_link_settings, -				skip_video_pattern); - -		if (status == LINK_TRAINING_SUCCESS) { -			success = true; -			udelay(1000); -			if (read_hpd_rx_irq_data(link, &irq_data) == DC_OK && -					hpd_rx_irq_check_link_loss_status( -							link, -							&irq_data)) -				(*fail_count)++; - -		} else { -			(*fail_count)++; -		} -		dp_trace_lt_total_count_increment(link, true); -		dp_trace_lt_result_update(link, status, true); -		dp_disable_link_phy(link, &link_res, link->connector_signal); -	} while (!success && decide_fallback_link_setting(link, -			&max_link_settings, &cur_link_settings, status)); - -	link->verified_link_cap = success ? -			cur_link_settings : fail_safe_link_settings; -	return success; -} - -static void apply_usbc_combo_phy_reset_wa(struct dc_link *link, -		struct dc_link_settings *link_settings) -{ -	/* Temporary Renoir-specific workaround PHY will sometimes be in bad -	 * state on hotplugging display from certain USB-C dongle, so add extra -	 * cycle of enabling and disabling the PHY before first link training. -	 */ -	struct link_resource link_res = {0}; -	enum clock_source_id dp_cs_id = get_clock_source_id(link); - -	dp_enable_link_phy(link, &link_res, link->connector_signal, -			dp_cs_id, link_settings); -	dp_disable_link_phy(link, &link_res, link->connector_signal); -} - -bool dp_verify_link_cap_with_retries( -	struct dc_link *link, -	struct dc_link_settings *known_limit_link_setting, -	int attempts) -{ -	int i = 0; -	bool success = false; -	int fail_count = 0; - -	dp_trace_detect_lt_init(link); - -	if (link->link_enc && link->link_enc->features.flags.bits.DP_IS_USB_C && -			link->dc->debug.usbc_combo_phy_reset_wa) -		apply_usbc_combo_phy_reset_wa(link, known_limit_link_setting); - -	dp_trace_set_lt_start_timestamp(link, false); -	for (i = 0; i < attempts; i++) { -		enum dc_connection_type type = dc_connection_none; - -		memset(&link->verified_link_cap, 0, -				sizeof(struct dc_link_settings)); -		if (!dc_link_detect_sink(link, &type) || type == dc_connection_none) { -			link->verified_link_cap = fail_safe_link_settings; -			break; -		} else if (dp_verify_link_cap(link, known_limit_link_setting, -				&fail_count) && fail_count == 0) { -			success = true; -			break; -		} -		msleep(10); -	} - -	dp_trace_lt_fail_count_update(link, fail_count, true); -	dp_trace_set_lt_end_timestamp(link, true); - -	return success; -} - -/* in DP compliance test, DPR-120 may have - * a random value in its MAX_LINK_BW dpcd field. - * We map it to the maximum supported link rate that - * is smaller than MAX_LINK_BW in this case. - */ -static enum dc_link_rate get_link_rate_from_max_link_bw( -		 uint8_t max_link_bw) -{ -	enum dc_link_rate link_rate; - -	if (max_link_bw >= LINK_RATE_HIGH3) { -		link_rate = LINK_RATE_HIGH3; -	} else if (max_link_bw < LINK_RATE_HIGH3 -			&& max_link_bw >= LINK_RATE_HIGH2) { -		link_rate = LINK_RATE_HIGH2; -	} else if (max_link_bw < LINK_RATE_HIGH2 -			&& max_link_bw >= LINK_RATE_HIGH) { -		link_rate = LINK_RATE_HIGH; -	} else if (max_link_bw < LINK_RATE_HIGH -			&& max_link_bw >= LINK_RATE_LOW) { -		link_rate = LINK_RATE_LOW; -	} else { -		link_rate = LINK_RATE_UNKNOWN; -	} - -	return link_rate; -} - -static inline bool reached_minimum_lane_count(enum dc_lane_count lane_count) -{ -	return lane_count <= LANE_COUNT_ONE; -} - -static inline bool reached_minimum_link_rate(enum dc_link_rate link_rate) -{ -	return link_rate <= LINK_RATE_LOW; -} - -static enum dc_lane_count reduce_lane_count(enum dc_lane_count lane_count) -{ -	switch (lane_count) { -	case LANE_COUNT_FOUR: -		return LANE_COUNT_TWO; -	case LANE_COUNT_TWO: -		return LANE_COUNT_ONE; -	case LANE_COUNT_ONE: -		return LANE_COUNT_UNKNOWN; -	default: -		return LANE_COUNT_UNKNOWN; -	} -} - -static enum dc_link_rate reduce_link_rate(enum dc_link_rate link_rate) -{ -	switch (link_rate) { -	case LINK_RATE_UHBR20: -		return LINK_RATE_UHBR13_5; -	case LINK_RATE_UHBR13_5: -		return LINK_RATE_UHBR10; -	case LINK_RATE_UHBR10: -		return LINK_RATE_HIGH3; -	case LINK_RATE_HIGH3: -		return LINK_RATE_HIGH2; -	case LINK_RATE_HIGH2: -		return LINK_RATE_HIGH; -	case LINK_RATE_HIGH: -		return LINK_RATE_LOW; -	case LINK_RATE_LOW: -		return LINK_RATE_UNKNOWN; -	default: -		return LINK_RATE_UNKNOWN; -	} -} - -static enum dc_lane_count increase_lane_count(enum dc_lane_count lane_count) -{ -	switch (lane_count) { -	case LANE_COUNT_ONE: -		return LANE_COUNT_TWO; -	case LANE_COUNT_TWO: -		return LANE_COUNT_FOUR; -	default: -		return LANE_COUNT_UNKNOWN; -	} -} - -static enum dc_link_rate increase_link_rate(struct dc_link *link, -		enum dc_link_rate link_rate) -{ -	switch (link_rate) { -	case LINK_RATE_LOW: -		return LINK_RATE_HIGH; -	case LINK_RATE_HIGH: -		return LINK_RATE_HIGH2; -	case LINK_RATE_HIGH2: -		return LINK_RATE_HIGH3; -	case LINK_RATE_HIGH3: -		return LINK_RATE_UHBR10; -	case LINK_RATE_UHBR10: -		/* upto DP2.x specs UHBR13.5 is the only link rate that could be -		 * not supported by DPRX when higher link rate is supported. -		 * so we treat it as a special case for code simplicity. When we -		 * have new specs with more link rates like this, we should -		 * consider a more generic solution to handle discrete link -		 * rate capabilities. -		 */ -		return link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5 ? -				LINK_RATE_UHBR13_5 : LINK_RATE_UHBR20; -	case LINK_RATE_UHBR13_5: -		return LINK_RATE_UHBR20; -	default: -		return LINK_RATE_UNKNOWN; -	} -} - -static bool decide_fallback_link_setting_max_bw_policy( -		struct dc_link *link, -		const struct dc_link_settings *max, -		struct dc_link_settings *cur, -		enum link_training_result training_result) -{ -	uint8_t cur_idx = 0, next_idx; -	bool found = false; - -	if (training_result == LINK_TRAINING_ABORT) -		return false; - -	while (cur_idx < ARRAY_SIZE(dp_lt_fallbacks)) -		/* find current index */ -		if (dp_lt_fallbacks[cur_idx].lane_count == cur->lane_count && -				dp_lt_fallbacks[cur_idx].link_rate == cur->link_rate) -			break; -		else -			cur_idx++; - -	next_idx = cur_idx + 1; - -	while (next_idx < ARRAY_SIZE(dp_lt_fallbacks)) -		/* find next index */ -		if (dp_lt_fallbacks[next_idx].lane_count > max->lane_count || -				dp_lt_fallbacks[next_idx].link_rate > max->link_rate) -			next_idx++; -		else if (dp_lt_fallbacks[next_idx].link_rate == LINK_RATE_UHBR13_5 && -				link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5 == 0) -			/* upto DP2.x specs UHBR13.5 is the only link rate that -			 * could be not supported by DPRX when higher link rate -			 * is supported. so we treat it as a special case for -			 * code simplicity. When we have new specs with more -			 * link rates like this, we should consider a more -			 * generic solution to handle discrete link rate -			 * capabilities. -			 */ -			next_idx++; -		else -			break; - -	if (next_idx < ARRAY_SIZE(dp_lt_fallbacks)) { -		cur->lane_count = dp_lt_fallbacks[next_idx].lane_count; -		cur->link_rate = dp_lt_fallbacks[next_idx].link_rate; -		found = true; -	} - -	return found; -} - -/* - * function: set link rate and lane count fallback based - * on current link setting and last link training result - * return value: - *			true - link setting could be set - *			false - has reached minimum setting - *					and no further fallback could be done - */ -static bool decide_fallback_link_setting( -		struct dc_link *link, -		struct dc_link_settings *max, -		struct dc_link_settings *cur, -		enum link_training_result training_result) -{ -	if (dp_get_link_encoding_format(max) == DP_128b_132b_ENCODING || -			link->dc->debug.force_dp2_lt_fallback_method) -		return decide_fallback_link_setting_max_bw_policy(link, max, cur, -				training_result); - -	switch (training_result) { -	case LINK_TRAINING_CR_FAIL_LANE0: -	case LINK_TRAINING_CR_FAIL_LANE1: -	case LINK_TRAINING_CR_FAIL_LANE23: -	case LINK_TRAINING_LQA_FAIL: -	{ -		if (!reached_minimum_link_rate(cur->link_rate)) { -			cur->link_rate = reduce_link_rate(cur->link_rate); -		} else if (!reached_minimum_lane_count(cur->lane_count)) { -			cur->link_rate = max->link_rate; -			if (training_result == LINK_TRAINING_CR_FAIL_LANE0) -				return false; -			else if (training_result == LINK_TRAINING_CR_FAIL_LANE1) -				cur->lane_count = LANE_COUNT_ONE; -			else if (training_result == LINK_TRAINING_CR_FAIL_LANE23) -				cur->lane_count = LANE_COUNT_TWO; -			else -				cur->lane_count = reduce_lane_count(cur->lane_count); -		} else { -			return false; -		} -		break; -	} -	case LINK_TRAINING_EQ_FAIL_EQ: -	case LINK_TRAINING_EQ_FAIL_CR_PARTIAL: -	{ -		if (!reached_minimum_lane_count(cur->lane_count)) { -			cur->lane_count = reduce_lane_count(cur->lane_count); -		} else if (!reached_minimum_link_rate(cur->link_rate)) { -			cur->link_rate = reduce_link_rate(cur->link_rate); -			/* Reduce max link rate to avoid potential infinite loop. -			 * Needed so that any subsequent CR_FAIL fallback can't -			 * re-set the link rate higher than the link rate from -			 * the latest EQ_FAIL fallback. -			 */ -			max->link_rate = cur->link_rate; -			cur->lane_count = max->lane_count; -		} else { -			return false; -		} -		break; -	} -	case LINK_TRAINING_EQ_FAIL_CR: -	{ -		if (!reached_minimum_link_rate(cur->link_rate)) { -			cur->link_rate = reduce_link_rate(cur->link_rate); -			/* Reduce max link rate to avoid potential infinite loop. -			 * Needed so that any subsequent CR_FAIL fallback can't -			 * re-set the link rate higher than the link rate from -			 * the latest EQ_FAIL fallback. -			 */ -			max->link_rate = cur->link_rate; -			cur->lane_count = max->lane_count; -		} else { -			return false; -		} -		break; -	} -	default: -		return false; -	} -	return true; -} - -bool dp_validate_mode_timing( -	struct dc_link *link, -	const struct dc_crtc_timing *timing) -{ -	uint32_t req_bw; -	uint32_t max_bw; - -	const struct dc_link_settings *link_setting; - -	/* According to spec, VSC SDP should be used if pixel format is YCbCr420 */ -	if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420 && -			!link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED && -			dal_graphics_object_id_get_connector_id(link->link_id) != CONNECTOR_ID_VIRTUAL) -		return false; - -	/*always DP fail safe mode*/ -	if ((timing->pix_clk_100hz / 10) == (uint32_t) 25175 && -		timing->h_addressable == (uint32_t) 640 && -		timing->v_addressable == (uint32_t) 480) -		return true; - -	link_setting = dc_link_get_link_cap(link); - -	/* TODO: DYNAMIC_VALIDATION needs to be implemented */ -	/*if (flags.DYNAMIC_VALIDATION == 1 && -		link->verified_link_cap.lane_count != LANE_COUNT_UNKNOWN) -		link_setting = &link->verified_link_cap; -	*/ - -	req_bw = dc_bandwidth_in_kbps_from_timing(timing); -	max_bw = dc_link_bandwidth_kbps(link, link_setting); - -	if (req_bw <= max_bw) { -		/* remember the biggest mode here, during -		 * initial link training (to get -		 * verified_link_cap), LS sends event about -		 * cannot train at reported cap to upper -		 * layer and upper layer will re-enumerate modes. -		 * this is not necessary if the lower -		 * verified_link_cap is enough to drive -		 * all the modes */ - -		/* TODO: DYNAMIC_VALIDATION needs to be implemented */ -		/* if (flags.DYNAMIC_VALIDATION == 1) -			dpsst->max_req_bw_for_verified_linkcap = dal_max( -				dpsst->max_req_bw_for_verified_linkcap, req_bw); */ -		return true; -	} else -		return false; -} - -static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw) -{ -	struct dc_link_settings initial_link_setting = { -		LANE_COUNT_ONE, LINK_RATE_LOW, LINK_SPREAD_DISABLED, false, 0}; -	struct dc_link_settings current_link_setting = -			initial_link_setting; -	uint32_t link_bw; - -	if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap)) -		return false; - -	/* search for the minimum link setting that: -	 * 1. is supported according to the link training result -	 * 2. could support the b/w requested by the timing -	 */ -	while (current_link_setting.link_rate <= -			link->verified_link_cap.link_rate) { -		link_bw = dc_link_bandwidth_kbps( -				link, -				¤t_link_setting); -		if (req_bw <= link_bw) { -			*link_setting = current_link_setting; -			return true; -		} - -		if (current_link_setting.lane_count < -				link->verified_link_cap.lane_count) { -			current_link_setting.lane_count = -					increase_lane_count( -							current_link_setting.lane_count); -		} else { -			current_link_setting.link_rate = -					increase_link_rate(link, -							current_link_setting.link_rate); -			current_link_setting.lane_count = -					initial_link_setting.lane_count; -		} -	} - -	return false; -} - -bool decide_edp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw) -{ -	struct dc_link_settings initial_link_setting; -	struct dc_link_settings current_link_setting; -	uint32_t link_bw; - -	/* -	 * edp_supported_link_rates_count is only valid for eDP v1.4 or higher. -	 * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h" -	 */ -	if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 || -			link->dpcd_caps.edp_supported_link_rates_count == 0) { -		*link_setting = link->verified_link_cap; -		return true; -	} - -	memset(&initial_link_setting, 0, sizeof(initial_link_setting)); -	initial_link_setting.lane_count = LANE_COUNT_ONE; -	initial_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[0]; -	initial_link_setting.link_spread = LINK_SPREAD_DISABLED; -	initial_link_setting.use_link_rate_set = true; -	initial_link_setting.link_rate_set = 0; -	current_link_setting = initial_link_setting; - -	/* search for the minimum link setting that: -	 * 1. is supported according to the link training result -	 * 2. could support the b/w requested by the timing -	 */ -	while (current_link_setting.link_rate <= -			link->verified_link_cap.link_rate) { -		link_bw = dc_link_bandwidth_kbps( -				link, -				¤t_link_setting); -		if (req_bw <= link_bw) { -			*link_setting = current_link_setting; -			return true; -		} - -		if (current_link_setting.lane_count < -				link->verified_link_cap.lane_count) { -			current_link_setting.lane_count = -					increase_lane_count( -							current_link_setting.lane_count); -		} else { -			if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { -				current_link_setting.link_rate_set++; -				current_link_setting.link_rate = -					link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; -				current_link_setting.lane_count = -									initial_link_setting.lane_count; -			} else -				break; -		} -	} -	return false; -} - -static bool decide_edp_link_settings_with_dsc(struct dc_link *link, -		struct dc_link_settings *link_setting, -		uint32_t req_bw, -		enum dc_link_rate max_link_rate) -{ -	struct dc_link_settings initial_link_setting; -	struct dc_link_settings current_link_setting; -	uint32_t link_bw; - -	unsigned int policy = 0; - -	policy = link->panel_config.dsc.force_dsc_edp_policy; -	if (max_link_rate == LINK_RATE_UNKNOWN) -		max_link_rate = link->verified_link_cap.link_rate; -	/* -	 * edp_supported_link_rates_count is only valid for eDP v1.4 or higher. -	 * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h" -	 */ -	if ((link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 || -			link->dpcd_caps.edp_supported_link_rates_count == 0)) { -		/* for DSC enabled case, we search for minimum lane count */ -		memset(&initial_link_setting, 0, sizeof(initial_link_setting)); -		initial_link_setting.lane_count = LANE_COUNT_ONE; -		initial_link_setting.link_rate = LINK_RATE_LOW; -		initial_link_setting.link_spread = LINK_SPREAD_DISABLED; -		initial_link_setting.use_link_rate_set = false; -		initial_link_setting.link_rate_set = 0; -		current_link_setting = initial_link_setting; -		if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap)) -			return false; - -		/* search for the minimum link setting that: -		 * 1. is supported according to the link training result -		 * 2. could support the b/w requested by the timing -		 */ -		while (current_link_setting.link_rate <= -				max_link_rate) { -			link_bw = dc_link_bandwidth_kbps( -					link, -					¤t_link_setting); -			if (req_bw <= link_bw) { -				*link_setting = current_link_setting; -				return true; -			} -			if (policy) { -				/* minimize lane */ -				if (current_link_setting.link_rate < max_link_rate) { -					current_link_setting.link_rate = -							increase_link_rate(link, -									current_link_setting.link_rate); -				} else { -					if (current_link_setting.lane_count < -									link->verified_link_cap.lane_count) { -						current_link_setting.lane_count = -								increase_lane_count( -										current_link_setting.lane_count); -						current_link_setting.link_rate = initial_link_setting.link_rate; -					} else -						break; -				} -			} else { -				/* minimize link rate */ -				if (current_link_setting.lane_count < -						link->verified_link_cap.lane_count) { -					current_link_setting.lane_count = -							increase_lane_count( -									current_link_setting.lane_count); -				} else { -					current_link_setting.link_rate = -							increase_link_rate(link, -									current_link_setting.link_rate); -					current_link_setting.lane_count = -							initial_link_setting.lane_count; -				} -			} -		} -		return false; -	} - -	/* if optimize edp link is supported */ -	memset(&initial_link_setting, 0, sizeof(initial_link_setting)); -	initial_link_setting.lane_count = LANE_COUNT_ONE; -	initial_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[0]; -	initial_link_setting.link_spread = LINK_SPREAD_DISABLED; -	initial_link_setting.use_link_rate_set = true; -	initial_link_setting.link_rate_set = 0; -	current_link_setting = initial_link_setting; - -	/* search for the minimum link setting that: -	 * 1. is supported according to the link training result -	 * 2. could support the b/w requested by the timing -	 */ -	while (current_link_setting.link_rate <= -			max_link_rate) { -		link_bw = dc_link_bandwidth_kbps( -				link, -				¤t_link_setting); -		if (req_bw <= link_bw) { -			*link_setting = current_link_setting; -			return true; -		} -		if (policy) { -			/* minimize lane */ -			if (current_link_setting.link_rate_set < -					link->dpcd_caps.edp_supported_link_rates_count -					&& current_link_setting.link_rate < max_link_rate) { -				current_link_setting.link_rate_set++; -				current_link_setting.link_rate = -					link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; -			} else { -				if (current_link_setting.lane_count < link->verified_link_cap.lane_count) { -					current_link_setting.lane_count = -							increase_lane_count( -									current_link_setting.lane_count); -					current_link_setting.link_rate_set = initial_link_setting.link_rate_set; -					current_link_setting.link_rate = -						link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; -				} else -					break; -			} -		} else { -			/* minimize link rate */ -			if (current_link_setting.lane_count < -					link->verified_link_cap.lane_count) { -				current_link_setting.lane_count = -						increase_lane_count( -								current_link_setting.lane_count); -			} else { -				if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { -					current_link_setting.link_rate_set++; -					current_link_setting.link_rate = -						link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; -					current_link_setting.lane_count = -						initial_link_setting.lane_count; -				} else -					break; -			} -		} -	} -	return false; -} - -static bool decide_mst_link_settings(const struct dc_link *link, struct dc_link_settings *link_setting) -{ -	*link_setting = link->verified_link_cap; -	return true; -} - -bool decide_link_settings(struct dc_stream_state *stream, -	struct dc_link_settings *link_setting) -{ -	struct dc_link *link = stream->link; -	uint32_t req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing); - -	memset(link_setting, 0, sizeof(*link_setting)); - -	/* if preferred is specified through AMDDP, use it, if it's enough -	 * to drive the mode -	 */ -	if (link->preferred_link_setting.lane_count != -			LANE_COUNT_UNKNOWN && -			link->preferred_link_setting.link_rate != -					LINK_RATE_UNKNOWN) { -		*link_setting = link->preferred_link_setting; -		return true; -	} - -	/* MST doesn't perform link training for now -	 * TODO: add MST specific link training routine -	 */ -	if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { -		decide_mst_link_settings(link, link_setting); -	} else if (link->connector_signal == SIGNAL_TYPE_EDP) { -		/* enable edp link optimization for DSC eDP case */ -		if (stream->timing.flags.DSC) { -			enum dc_link_rate max_link_rate = LINK_RATE_UNKNOWN; - -			if (link->panel_config.dsc.force_dsc_edp_policy) { -				/* calculate link max link rate cap*/ -				struct dc_link_settings tmp_link_setting; -				struct dc_crtc_timing tmp_timing = stream->timing; -				uint32_t orig_req_bw; - -				tmp_link_setting.link_rate = LINK_RATE_UNKNOWN; -				tmp_timing.flags.DSC = 0; -				orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing); -				decide_edp_link_settings(link, &tmp_link_setting, orig_req_bw); -				max_link_rate = tmp_link_setting.link_rate; -			} -			decide_edp_link_settings_with_dsc(link, link_setting, req_bw, max_link_rate); -		} else { -			decide_edp_link_settings(link, link_setting, req_bw); -		} -	} else { -		decide_dp_link_settings(link, link_setting, req_bw); -	} - -	return link_setting->lane_count != LANE_COUNT_UNKNOWN && -			link_setting->link_rate != LINK_RATE_UNKNOWN; -} - -/*************************Short Pulse IRQ***************************/ -bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link) -{ -	/* -	 * Don't handle RX IRQ unless one of following is met: -	 * 1) The link is established (cur_link_settings != unknown) -	 * 2) We know we're dealing with a branch device, SST or MST -	 */ - -	if ((link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) || -		is_dp_branch_device(link)) -		return true; - -	return false; -} - -static bool handle_hpd_irq_psr_sink(struct dc_link *link) -{ -	union dpcd_psr_configuration psr_configuration; - -	if (!link->psr_settings.psr_feature_enabled) -		return false; - -	dm_helpers_dp_read_dpcd( -		link->ctx, -		link, -		368,/*DpcdAddress_PSR_Enable_Cfg*/ -		&psr_configuration.raw, -		sizeof(psr_configuration.raw)); - -	if (psr_configuration.bits.ENABLE) { -		unsigned char dpcdbuf[3] = {0}; -		union psr_error_status psr_error_status; -		union psr_sink_psr_status psr_sink_psr_status; - -		dm_helpers_dp_read_dpcd( -			link->ctx, -			link, -			0x2006, /*DpcdAddress_PSR_Error_Status*/ -			(unsigned char *) dpcdbuf, -			sizeof(dpcdbuf)); - -		/*DPCD 2006h   ERROR STATUS*/ -		psr_error_status.raw = dpcdbuf[0]; -		/*DPCD 2008h   SINK PANEL SELF REFRESH STATUS*/ -		psr_sink_psr_status.raw = dpcdbuf[2]; - -		if (psr_error_status.bits.LINK_CRC_ERROR || -				psr_error_status.bits.RFB_STORAGE_ERROR || -				psr_error_status.bits.VSC_SDP_ERROR) { -			bool allow_active; - -			/* Acknowledge and clear error bits */ -			dm_helpers_dp_write_dpcd( -				link->ctx, -				link, -				8198,/*DpcdAddress_PSR_Error_Status*/ -				&psr_error_status.raw, -				sizeof(psr_error_status.raw)); - -			/* PSR error, disable and re-enable PSR */ -			if (link->psr_settings.psr_allow_active) { -				allow_active = false; -				dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL); -				allow_active = true; -				dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL); -			} - -			return true; -		} else if (psr_sink_psr_status.bits.SINK_SELF_REFRESH_STATUS == -				PSR_SINK_STATE_ACTIVE_DISPLAY_FROM_SINK_RFB){ -			/* No error is detect, PSR is active. -			 * We should return with IRQ_HPD handled without -			 * checking for loss of sync since PSR would have -			 * powered down main link. -			 */ -			return true; -		} -	} -	return false; -} - -static enum dc_link_rate get_link_rate_from_test_link_rate(uint8_t test_rate) -{ -	switch (test_rate) { -	case DP_TEST_LINK_RATE_RBR: -		return LINK_RATE_LOW; -	case DP_TEST_LINK_RATE_HBR: -		return LINK_RATE_HIGH; -	case DP_TEST_LINK_RATE_HBR2: -		return LINK_RATE_HIGH2; -	case DP_TEST_LINK_RATE_HBR3: -		return LINK_RATE_HIGH3; -	case DP_TEST_LINK_RATE_UHBR10: -		return LINK_RATE_UHBR10; -	case DP_TEST_LINK_RATE_UHBR20: -		return LINK_RATE_UHBR20; -	case DP_TEST_LINK_RATE_UHBR13_5: -		return LINK_RATE_UHBR13_5; -	default: -		return LINK_RATE_UNKNOWN; -	} -} - -static void dp_test_send_link_training(struct dc_link *link) -{ -	struct dc_link_settings link_settings = {0}; -	uint8_t test_rate = 0; - -	core_link_read_dpcd( -			link, -			DP_TEST_LANE_COUNT, -			(unsigned char *)(&link_settings.lane_count), -			1); -	core_link_read_dpcd( -			link, -			DP_TEST_LINK_RATE, -			&test_rate, -			1); -	link_settings.link_rate = get_link_rate_from_test_link_rate(test_rate); - -	/* Set preferred link settings */ -	link->verified_link_cap.lane_count = link_settings.lane_count; -	link->verified_link_cap.link_rate = link_settings.link_rate; - -	dp_retrain_link_dp_test(link, &link_settings, false); -} - -/* TODO Raven hbr2 compliance eye output is unstable - * (toggling on and off) with debugger break - * This caueses intermittent PHY automation failure - * Need to look into the root cause */ -static void dp_test_send_phy_test_pattern(struct dc_link *link) -{ -	union phy_test_pattern dpcd_test_pattern; -	union lane_adjust dpcd_lane_adjustment[2]; -	unsigned char dpcd_post_cursor_2_adjustment = 0; -	unsigned char test_pattern_buffer[ -			(DP_TEST_264BIT_CUSTOM_PATTERN_263_256 - -			DP_TEST_264BIT_CUSTOM_PATTERN_7_0)+1] = {0}; -	unsigned int test_pattern_size = 0; -	enum dp_test_pattern test_pattern; -	union lane_adjust dpcd_lane_adjust; -	unsigned int lane; -	struct link_training_settings link_training_settings; - -	dpcd_test_pattern.raw = 0; -	memset(dpcd_lane_adjustment, 0, sizeof(dpcd_lane_adjustment)); -	memset(&link_training_settings, 0, sizeof(link_training_settings)); - -	/* get phy test pattern and pattern parameters from DP receiver */ -	core_link_read_dpcd( -			link, -			DP_PHY_TEST_PATTERN, -			&dpcd_test_pattern.raw, -			sizeof(dpcd_test_pattern)); -	core_link_read_dpcd( -			link, -			DP_ADJUST_REQUEST_LANE0_1, -			&dpcd_lane_adjustment[0].raw, -			sizeof(dpcd_lane_adjustment)); - -	/* prepare link training settings */ -	link_training_settings.link_settings = link->cur_link_settings; - -	link_training_settings.lttpr_mode = dp_decide_lttpr_mode(link, &link->cur_link_settings); - -	if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && -			link_training_settings.lttpr_mode == LTTPR_MODE_TRANSPARENT) -		dp_fixed_vs_pe_read_lane_adjust( -				link, -				link_training_settings.dpcd_lane_settings); - -	/*get post cursor 2 parameters -	 * For DP 1.1a or eariler, this DPCD register's value is 0 -	 * For DP 1.2 or later: -	 * Bits 1:0 = POST_CURSOR2_LANE0; Bits 3:2 = POST_CURSOR2_LANE1 -	 * Bits 5:4 = POST_CURSOR2_LANE2; Bits 7:6 = POST_CURSOR2_LANE3 -	 */ -	core_link_read_dpcd( -			link, -			DP_ADJUST_REQUEST_POST_CURSOR2, -			&dpcd_post_cursor_2_adjustment, -			sizeof(dpcd_post_cursor_2_adjustment)); - -	/* translate request */ -	switch (dpcd_test_pattern.bits.PATTERN) { -	case PHY_TEST_PATTERN_D10_2: -		test_pattern = DP_TEST_PATTERN_D102; -		break; -	case PHY_TEST_PATTERN_SYMBOL_ERROR: -		test_pattern = DP_TEST_PATTERN_SYMBOL_ERROR; -		break; -	case PHY_TEST_PATTERN_PRBS7: -		test_pattern = DP_TEST_PATTERN_PRBS7; -		break; -	case PHY_TEST_PATTERN_80BIT_CUSTOM: -		test_pattern = DP_TEST_PATTERN_80BIT_CUSTOM; -		break; -	case PHY_TEST_PATTERN_CP2520_1: -		/* CP2520 pattern is unstable, temporarily use TPS4 instead */ -		test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ? -				DP_TEST_PATTERN_TRAINING_PATTERN4 : -				DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE; -		break; -	case PHY_TEST_PATTERN_CP2520_2: -		/* CP2520 pattern is unstable, temporarily use TPS4 instead */ -		test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ? -				DP_TEST_PATTERN_TRAINING_PATTERN4 : -				DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE; -		break; -	case PHY_TEST_PATTERN_CP2520_3: -		test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4; -		break; -	case PHY_TEST_PATTERN_128b_132b_TPS1: -		test_pattern = DP_TEST_PATTERN_128b_132b_TPS1; -		break; -	case PHY_TEST_PATTERN_128b_132b_TPS2: -		test_pattern = DP_TEST_PATTERN_128b_132b_TPS2; -		break; -	case PHY_TEST_PATTERN_PRBS9: -		test_pattern = DP_TEST_PATTERN_PRBS9; -		break; -	case PHY_TEST_PATTERN_PRBS11: -		test_pattern = DP_TEST_PATTERN_PRBS11; -		break; -	case PHY_TEST_PATTERN_PRBS15: -		test_pattern = DP_TEST_PATTERN_PRBS15; -		break; -	case PHY_TEST_PATTERN_PRBS23: -		test_pattern = DP_TEST_PATTERN_PRBS23; -		break; -	case PHY_TEST_PATTERN_PRBS31: -		test_pattern = DP_TEST_PATTERN_PRBS31; -		break; -	case PHY_TEST_PATTERN_264BIT_CUSTOM: -		test_pattern = DP_TEST_PATTERN_264BIT_CUSTOM; -		break; -	case PHY_TEST_PATTERN_SQUARE_PULSE: -		test_pattern = DP_TEST_PATTERN_SQUARE_PULSE; -		break; -	default: -		test_pattern = DP_TEST_PATTERN_VIDEO_MODE; -	break; -	} - -	if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM) { -		test_pattern_size = (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 - -				DP_TEST_80BIT_CUSTOM_PATTERN_7_0) + 1; -		core_link_read_dpcd( -				link, -				DP_TEST_80BIT_CUSTOM_PATTERN_7_0, -				test_pattern_buffer, -				test_pattern_size); -	} - -	if (test_pattern == DP_TEST_PATTERN_SQUARE_PULSE) { -		test_pattern_size = 1; // Square pattern data is 1 byte (DP spec) -		core_link_read_dpcd( -				link, -				DP_PHY_SQUARE_PATTERN, -				test_pattern_buffer, -				test_pattern_size); -	} - -	if (test_pattern == DP_TEST_PATTERN_264BIT_CUSTOM) { -		test_pattern_size = (DP_TEST_264BIT_CUSTOM_PATTERN_263_256- -				DP_TEST_264BIT_CUSTOM_PATTERN_7_0) + 1; -		core_link_read_dpcd( -				link, -				DP_TEST_264BIT_CUSTOM_PATTERN_7_0, -				test_pattern_buffer, -				test_pattern_size); -	} - -	for (lane = 0; lane < -		(unsigned int)(link->cur_link_settings.lane_count); -		lane++) { -		dpcd_lane_adjust.raw = -			get_nibble_at_index(&dpcd_lane_adjustment[0].raw, lane); -		if (dp_get_link_encoding_format(&link->cur_link_settings) == -				DP_8b_10b_ENCODING) { -			link_training_settings.hw_lane_settings[lane].VOLTAGE_SWING = -				(enum dc_voltage_swing) -				(dpcd_lane_adjust.bits.VOLTAGE_SWING_LANE); -			link_training_settings.hw_lane_settings[lane].PRE_EMPHASIS = -				(enum dc_pre_emphasis) -				(dpcd_lane_adjust.bits.PRE_EMPHASIS_LANE); -			link_training_settings.hw_lane_settings[lane].POST_CURSOR2 = -				(enum dc_post_cursor2) -				((dpcd_post_cursor_2_adjustment >> (lane * 2)) & 0x03); -		} else if (dp_get_link_encoding_format(&link->cur_link_settings) == -				DP_128b_132b_ENCODING) { -			link_training_settings.hw_lane_settings[lane].FFE_PRESET.raw = -					dpcd_lane_adjust.tx_ffe.PRESET_VALUE; -		} -	} - -	dp_hw_to_dpcd_lane_settings(&link_training_settings, -			link_training_settings.hw_lane_settings, -			link_training_settings.dpcd_lane_settings); -	/*Usage: Measure DP physical lane signal -	 * by DP SI test equipment automatically. -	 * PHY test pattern request is generated by equipment via HPD interrupt. -	 * HPD needs to be active all the time. HPD should be active -	 * all the time. Do not touch it. -	 * forward request to DS -	 */ -	dc_link_dp_set_test_pattern( -		link, -		test_pattern, -		DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED, -		&link_training_settings, -		test_pattern_buffer, -		test_pattern_size); -} - -static void dp_test_send_link_test_pattern(struct dc_link *link) -{ -	union link_test_pattern dpcd_test_pattern; -	union test_misc dpcd_test_params; -	enum dp_test_pattern test_pattern; -	enum dp_test_pattern_color_space test_pattern_color_space = -			DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED; -	enum dc_color_depth requestColorDepth = COLOR_DEPTH_UNDEFINED; -	struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; -	struct pipe_ctx *pipe_ctx = NULL; -	int i; - -	memset(&dpcd_test_pattern, 0, sizeof(dpcd_test_pattern)); -	memset(&dpcd_test_params, 0, sizeof(dpcd_test_params)); - -	for (i = 0; i < MAX_PIPES; i++) { -		if (pipes[i].stream == NULL) -			continue; - -		if (pipes[i].stream->link == link && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe) { -			pipe_ctx = &pipes[i]; -			break; -		} -	} - -	if (pipe_ctx == NULL) -		return; - -	/* get link test pattern and pattern parameters */ -	core_link_read_dpcd( -			link, -			DP_TEST_PATTERN, -			&dpcd_test_pattern.raw, -			sizeof(dpcd_test_pattern)); -	core_link_read_dpcd( -			link, -			DP_TEST_MISC0, -			&dpcd_test_params.raw, -			sizeof(dpcd_test_params)); - -	switch (dpcd_test_pattern.bits.PATTERN) { -	case LINK_TEST_PATTERN_COLOR_RAMP: -		test_pattern = DP_TEST_PATTERN_COLOR_RAMP; -	break; -	case LINK_TEST_PATTERN_VERTICAL_BARS: -		test_pattern = DP_TEST_PATTERN_VERTICAL_BARS; -	break; /* black and white */ -	case LINK_TEST_PATTERN_COLOR_SQUARES: -		test_pattern = (dpcd_test_params.bits.DYN_RANGE == -				TEST_DYN_RANGE_VESA ? -				DP_TEST_PATTERN_COLOR_SQUARES : -				DP_TEST_PATTERN_COLOR_SQUARES_CEA); -	break; -	default: -		test_pattern = DP_TEST_PATTERN_VIDEO_MODE; -	break; -	} - -	if (dpcd_test_params.bits.CLR_FORMAT == 0) -		test_pattern_color_space = DP_TEST_PATTERN_COLOR_SPACE_RGB; -	else -		test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ? -				DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 : -				DP_TEST_PATTERN_COLOR_SPACE_YCBCR601; - -	switch (dpcd_test_params.bits.BPC) { -	case 0: // 6 bits -		requestColorDepth = COLOR_DEPTH_666; -		break; -	case 1: // 8 bits -		requestColorDepth = COLOR_DEPTH_888; -		break; -	case 2: // 10 bits -		requestColorDepth = COLOR_DEPTH_101010; -		break; -	case 3: // 12 bits -		requestColorDepth = COLOR_DEPTH_121212; -		break; -	default: -		break; -	} - -	switch (dpcd_test_params.bits.CLR_FORMAT) { -	case 0: -		pipe_ctx->stream->timing.pixel_encoding = PIXEL_ENCODING_RGB; -		break; -	case 1: -		pipe_ctx->stream->timing.pixel_encoding = PIXEL_ENCODING_YCBCR422; -		break; -	case 2: -		pipe_ctx->stream->timing.pixel_encoding = PIXEL_ENCODING_YCBCR444; -		break; -	default: -		pipe_ctx->stream->timing.pixel_encoding = PIXEL_ENCODING_RGB; -		break; -	} - - -	if (requestColorDepth != COLOR_DEPTH_UNDEFINED -			&& pipe_ctx->stream->timing.display_color_depth != requestColorDepth) { -		DC_LOG_DEBUG("%s: original bpc %d, changing to %d\n", -				__func__, -				pipe_ctx->stream->timing.display_color_depth, -				requestColorDepth); -		pipe_ctx->stream->timing.display_color_depth = requestColorDepth; -	} - -	dp_update_dsc_config(pipe_ctx); - -	dc_link_dp_set_test_pattern( -			link, -			test_pattern, -			test_pattern_color_space, -			NULL, -			NULL, -			0); -} - -static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video) -{ -	union audio_test_mode            dpcd_test_mode = {0}; -	struct audio_test_pattern_type   dpcd_pattern_type = {0}; -	union audio_test_pattern_period  dpcd_pattern_period[AUDIO_CHANNELS_COUNT] = {0}; -	enum dp_test_pattern test_pattern = DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED; - -	struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; -	struct pipe_ctx *pipe_ctx = &pipes[0]; -	unsigned int channel_count; -	unsigned int channel = 0; -	unsigned int modes = 0; -	unsigned int sampling_rate_in_hz = 0; - -	// get audio test mode and test pattern parameters -	core_link_read_dpcd( -		link, -		DP_TEST_AUDIO_MODE, -		&dpcd_test_mode.raw, -		sizeof(dpcd_test_mode)); - -	core_link_read_dpcd( -		link, -		DP_TEST_AUDIO_PATTERN_TYPE, -		&dpcd_pattern_type.value, -		sizeof(dpcd_pattern_type)); - -	channel_count = min(dpcd_test_mode.bits.channel_count + 1, AUDIO_CHANNELS_COUNT); - -	// read pattern periods for requested channels when sawTooth pattern is requested -	if (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH || -			dpcd_pattern_type.value == AUDIO_TEST_PATTERN_OPERATOR_DEFINED) { - -		test_pattern = (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH) ? -				DP_TEST_PATTERN_AUDIO_SAWTOOTH : DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED; -		// read period for each channel -		for (channel = 0; channel < channel_count; channel++) { -			core_link_read_dpcd( -							link, -							DP_TEST_AUDIO_PERIOD_CH1 + channel, -							&dpcd_pattern_period[channel].raw, -							sizeof(dpcd_pattern_period[channel])); -		} -	} - -	// translate sampling rate -	switch (dpcd_test_mode.bits.sampling_rate) { -	case AUDIO_SAMPLING_RATE_32KHZ: -		sampling_rate_in_hz = 32000; -		break; -	case AUDIO_SAMPLING_RATE_44_1KHZ: -		sampling_rate_in_hz = 44100; -		break; -	case AUDIO_SAMPLING_RATE_48KHZ: -		sampling_rate_in_hz = 48000; -		break; -	case AUDIO_SAMPLING_RATE_88_2KHZ: -		sampling_rate_in_hz = 88200; -		break; -	case AUDIO_SAMPLING_RATE_96KHZ: -		sampling_rate_in_hz = 96000; -		break; -	case AUDIO_SAMPLING_RATE_176_4KHZ: -		sampling_rate_in_hz = 176400; -		break; -	case AUDIO_SAMPLING_RATE_192KHZ: -		sampling_rate_in_hz = 192000; -		break; -	default: -		sampling_rate_in_hz = 0; -		break; -	} - -	link->audio_test_data.flags.test_requested = 1; -	link->audio_test_data.flags.disable_video = disable_video; -	link->audio_test_data.sampling_rate = sampling_rate_in_hz; -	link->audio_test_data.channel_count = channel_count; -	link->audio_test_data.pattern_type = test_pattern; - -	if (test_pattern == DP_TEST_PATTERN_AUDIO_SAWTOOTH) { -		for (modes = 0; modes < pipe_ctx->stream->audio_info.mode_count; modes++) { -			link->audio_test_data.pattern_period[modes] = dpcd_pattern_period[modes].bits.pattern_period; -		} -	} -} - -void dc_link_dp_handle_automated_test(struct dc_link *link) -{ -	union test_request test_request; -	union test_response test_response; - -	memset(&test_request, 0, sizeof(test_request)); -	memset(&test_response, 0, sizeof(test_response)); - -	core_link_read_dpcd( -		link, -		DP_TEST_REQUEST, -		&test_request.raw, -		sizeof(union test_request)); -	if (test_request.bits.LINK_TRAINING) { -		/* ACK first to let DP RX test box monitor LT sequence */ -		test_response.bits.ACK = 1; -		core_link_write_dpcd( -			link, -			DP_TEST_RESPONSE, -			&test_response.raw, -			sizeof(test_response)); -		dp_test_send_link_training(link); -		/* no acknowledge request is needed again */ -		test_response.bits.ACK = 0; -	} -	if (test_request.bits.LINK_TEST_PATTRN) { -		dp_test_send_link_test_pattern(link); -		test_response.bits.ACK = 1; -	} - -	if (test_request.bits.AUDIO_TEST_PATTERN) { -		dp_test_get_audio_test_data(link, test_request.bits.TEST_AUDIO_DISABLED_VIDEO); -		test_response.bits.ACK = 1; -	} - -	if (test_request.bits.PHY_TEST_PATTERN) { -		dp_test_send_phy_test_pattern(link); -		test_response.bits.ACK = 1; -	} - -	/* send request acknowledgment */ -	if (test_response.bits.ACK) -		core_link_write_dpcd( -			link, -			DP_TEST_RESPONSE, -			&test_response.raw, -			sizeof(test_response)); -} - -void dc_link_dp_handle_link_loss(struct dc_link *link) -{ -	int i; -	struct pipe_ctx *pipe_ctx; - -	for (i = 0; i < MAX_PIPES; i++) { -		pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; -		if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link) -			break; -	} - -	if (pipe_ctx == NULL || pipe_ctx->stream == NULL) -		return; - -	for (i = 0; i < MAX_PIPES; i++) { -		pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; -		if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off && -				pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) -			core_link_disable_stream(pipe_ctx); -	} - -	for (i = 0; i < MAX_PIPES; i++) { -		pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; -		if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off && -				pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) -			core_link_enable_stream(link->dc->current_state, pipe_ctx); -	} -} - -bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss, -							bool defer_handling, bool *has_left_work) -{ -	union hpd_irq_data hpd_irq_dpcd_data = {0}; -	union device_service_irq device_service_clear = {0}; -	enum dc_status result; -	bool status = false; - -	if (out_link_loss) -		*out_link_loss = false; - -	if (has_left_work) -		*has_left_work = false; -	/* For use cases related to down stream connection status change, -	 * PSR and device auto test, refer to function handle_sst_hpd_irq -	 * in DAL2.1*/ - -	DC_LOG_HW_HPD_IRQ("%s: Got short pulse HPD on link %d\n", -		__func__, link->link_index); - - -	 /* All the "handle_hpd_irq_xxx()" methods -		 * should be called only after -		 * dal_dpsst_ls_read_hpd_irq_data -		 * Order of calls is important too -		 */ -	result = read_hpd_rx_irq_data(link, &hpd_irq_dpcd_data); -	if (out_hpd_irq_dpcd_data) -		*out_hpd_irq_dpcd_data = hpd_irq_dpcd_data; - -	if (result != DC_OK) { -		DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain irq data\n", -			__func__); -		return false; -	} - -	if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.AUTOMATED_TEST) { -		device_service_clear.bits.AUTOMATED_TEST = 1; -		core_link_write_dpcd( -			link, -			DP_DEVICE_SERVICE_IRQ_VECTOR, -			&device_service_clear.raw, -			sizeof(device_service_clear.raw)); -		device_service_clear.raw = 0; -		if (defer_handling && has_left_work) -			*has_left_work = true; -		else -			dc_link_dp_handle_automated_test(link); -		return false; -	} - -	if (!dc_link_dp_allow_hpd_rx_irq(link)) { -		DC_LOG_HW_HPD_IRQ("%s: skipping HPD handling on %d\n", -			__func__, link->link_index); -		return false; -	} - -	if (handle_hpd_irq_psr_sink(link)) -		/* PSR-related error was detected and handled */ -		return true; - -	/* If PSR-related error handled, Main link may be off, -	 * so do not handle as a normal sink status change interrupt. -	 */ - -	if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) { -		if (defer_handling && has_left_work) -			*has_left_work = true; -		return true; -	} - -	/* check if we have MST msg and return since we poll for it */ -	if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { -		if (defer_handling && has_left_work) -			*has_left_work = true; -		return false; -	} - -	/* For now we only handle 'Downstream port status' case. -	 * If we got sink count changed it means -	 * Downstream port status changed, -	 * then DM should call DC to do the detection. -	 * NOTE: Do not handle link loss on eDP since it is internal link*/ -	if ((link->connector_signal != SIGNAL_TYPE_EDP) && -		hpd_rx_irq_check_link_loss_status( -			link, -			&hpd_irq_dpcd_data)) { -		/* Connectivity log: link loss */ -		CONN_DATA_LINK_LOSS(link, -					hpd_irq_dpcd_data.raw, -					sizeof(hpd_irq_dpcd_data), -					"Status: "); - -		if (defer_handling && has_left_work) -			*has_left_work = true; -		else -			dc_link_dp_handle_link_loss(link); - -		status = false; -		if (out_link_loss) -			*out_link_loss = true; - -		dp_trace_link_loss_increment(link); -	} - -	if (link->type == dc_connection_sst_branch && -		hpd_irq_dpcd_data.bytes.sink_cnt.bits.SINK_COUNT -			!= link->dpcd_sink_count) -		status = true; - -	/* reasons for HPD RX: -	 * 1. Link Loss - ie Re-train the Link -	 * 2. MST sideband message -	 * 3. Automated Test - ie. Internal Commit -	 * 4. CP (copy protection) - (not interesting for DM???) -	 * 5. DRR -	 * 6. Downstream Port status changed -	 * -ie. Detect - this the only one -	 * which is interesting for DM because -	 * it must call dc_link_detect. -	 */ -	return status; -} - -/*query dpcd for version and mst cap addresses*/ -bool is_mst_supported(struct dc_link *link) -{ -	bool mst          = false; -	enum dc_status st = DC_OK; -	union dpcd_rev rev; -	union mstm_cap cap; - -	if (link->preferred_training_settings.mst_enable && -		*link->preferred_training_settings.mst_enable == false) { -		return false; -	} - -	rev.raw  = 0; -	cap.raw  = 0; - -	st = core_link_read_dpcd(link, DP_DPCD_REV, &rev.raw, -			sizeof(rev)); - -	if (st == DC_OK && rev.raw >= DPCD_REV_12) { - -		st = core_link_read_dpcd(link, DP_MSTM_CAP, -				&cap.raw, sizeof(cap)); -		if (st == DC_OK && cap.bits.MST_CAP == 1) -			mst = true; -	} -	return mst; - -} - -bool is_dp_active_dongle(const struct dc_link *link) -{ -	return (link->dpcd_caps.dongle_type >= DISPLAY_DONGLE_DP_VGA_CONVERTER) && -				(link->dpcd_caps.dongle_type <= DISPLAY_DONGLE_DP_HDMI_CONVERTER); -} - -bool is_dp_branch_device(const struct dc_link *link) -{ -	return link->dpcd_caps.is_branch_dev; -} - -static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc) -{ -	switch (bpc) { -	case DOWN_STREAM_MAX_8BPC: -		return 8; -	case DOWN_STREAM_MAX_10BPC: -		return 10; -	case DOWN_STREAM_MAX_12BPC: -		return 12; -	case DOWN_STREAM_MAX_16BPC: -		return 16; -	default: -		break; -	} - -	return -1; -} - -#if defined(CONFIG_DRM_AMD_DC_DCN) -uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw) -{ -	switch (bw) { -	case 0b001: -		return 9000000; -	case 0b010: -		return 18000000; -	case 0b011: -		return 24000000; -	case 0b100: -		return 32000000; -	case 0b101: -		return 40000000; -	case 0b110: -		return 48000000; -	} - -	return 0; -} - -/* - * Return PCON's post FRL link training supported BW if its non-zero, otherwise return max_supported_frl_bw. - */ -static uint32_t intersect_frl_link_bw_support( -	const uint32_t max_supported_frl_bw_in_kbps, -	const union hdmi_encoded_link_bw hdmi_encoded_link_bw) -{ -	uint32_t supported_bw_in_kbps = max_supported_frl_bw_in_kbps; - -	// HDMI_ENCODED_LINK_BW bits are only valid if HDMI Link Configuration bit is 1 (FRL mode) -	if (hdmi_encoded_link_bw.bits.FRL_MODE) { -		if (hdmi_encoded_link_bw.bits.BW_48Gbps) -			supported_bw_in_kbps = 48000000; -		else if (hdmi_encoded_link_bw.bits.BW_40Gbps) -			supported_bw_in_kbps = 40000000; -		else if (hdmi_encoded_link_bw.bits.BW_32Gbps) -			supported_bw_in_kbps = 32000000; -		else if (hdmi_encoded_link_bw.bits.BW_24Gbps) -			supported_bw_in_kbps = 24000000; -		else if (hdmi_encoded_link_bw.bits.BW_18Gbps) -			supported_bw_in_kbps = 18000000; -		else if (hdmi_encoded_link_bw.bits.BW_9Gbps) -			supported_bw_in_kbps = 9000000; -	} - -	return supported_bw_in_kbps; -} -#endif - -static void read_dp_device_vendor_id(struct dc_link *link) -{ -	struct dp_device_vendor_id dp_id; - -	/* read IEEE branch device id */ -	core_link_read_dpcd( -		link, -		DP_BRANCH_OUI, -		(uint8_t *)&dp_id, -		sizeof(dp_id)); - -	link->dpcd_caps.branch_dev_id = -		(dp_id.ieee_oui[0] << 16) + -		(dp_id.ieee_oui[1] << 8) + -		dp_id.ieee_oui[2]; - -	memmove( -		link->dpcd_caps.branch_dev_name, -		dp_id.ieee_device_id, -		sizeof(dp_id.ieee_device_id)); -} - - - -static void get_active_converter_info( -	uint8_t data, struct dc_link *link) -{ -	union dp_downstream_port_present ds_port = { .byte = data }; -	memset(&link->dpcd_caps.dongle_caps, 0, sizeof(link->dpcd_caps.dongle_caps)); - -	/* decode converter info*/ -	if (!ds_port.fields.PORT_PRESENT) { -		link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE; -		ddc_service_set_dongle_type(link->ddc, -				link->dpcd_caps.dongle_type); -		link->dpcd_caps.is_branch_dev = false; -		return; -	} - -	/* DPCD 0x5 bit 0 = 1, it indicate it's branch device */ -	link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT; - -	switch (ds_port.fields.PORT_TYPE) { -	case DOWNSTREAM_VGA: -		link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER; -		break; -	case DOWNSTREAM_DVI_HDMI_DP_PLUS_PLUS: -		/* At this point we don't know is it DVI or HDMI or DP++, -		 * assume DVI.*/ -		link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_DVI_CONVERTER; -		break; -	default: -		link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE; -		break; -	} - -	if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_11) { -		uint8_t det_caps[16]; /* CTS 4.2.2.7 expects source to read Detailed Capabilities Info : 00080h-0008F.*/ -		union dwnstream_port_caps_byte0 *port_caps = -			(union dwnstream_port_caps_byte0 *)det_caps; -		if (core_link_read_dpcd(link, DP_DOWNSTREAM_PORT_0, -				det_caps, sizeof(det_caps)) == DC_OK) { - -			switch (port_caps->bits.DWN_STRM_PORTX_TYPE) { -			/*Handle DP case as DONGLE_NONE*/ -			case DOWN_STREAM_DETAILED_DP: -				link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE; -				break; -			case DOWN_STREAM_DETAILED_VGA: -				link->dpcd_caps.dongle_type = -					DISPLAY_DONGLE_DP_VGA_CONVERTER; -				break; -			case DOWN_STREAM_DETAILED_DVI: -				link->dpcd_caps.dongle_type = -					DISPLAY_DONGLE_DP_DVI_CONVERTER; -				break; -			case DOWN_STREAM_DETAILED_HDMI: -			case DOWN_STREAM_DETAILED_DP_PLUS_PLUS: -				/*Handle DP++ active converter case, process DP++ case as HDMI case according DP1.4 spec*/ -				link->dpcd_caps.dongle_type = -					DISPLAY_DONGLE_DP_HDMI_CONVERTER; - -				link->dpcd_caps.dongle_caps.dongle_type = link->dpcd_caps.dongle_type; -				if (ds_port.fields.DETAILED_CAPS) { - -					union dwnstream_port_caps_byte3_hdmi -						hdmi_caps = {.raw = det_caps[3] }; -					union dwnstream_port_caps_byte2 -						hdmi_color_caps = {.raw = det_caps[2] }; -					link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz = -						det_caps[1] * 2500; - -					link->dpcd_caps.dongle_caps.is_dp_hdmi_s3d_converter = -						hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK; -					/*YCBCR capability only for HDMI case*/ -					if (port_caps->bits.DWN_STRM_PORTX_TYPE -							== DOWN_STREAM_DETAILED_HDMI) { -						link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_pass_through = -								hdmi_caps.bits.YCrCr422_PASS_THROUGH; -						link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_pass_through = -								hdmi_caps.bits.YCrCr420_PASS_THROUGH; -						link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_converter = -								hdmi_caps.bits.YCrCr422_CONVERSION; -						link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_converter = -								hdmi_caps.bits.YCrCr420_CONVERSION; -					} - -					link->dpcd_caps.dongle_caps.dp_hdmi_max_bpc = -						translate_dpcd_max_bpc( -							hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT); - -#if defined(CONFIG_DRM_AMD_DC_DCN) -					if (link->dc->caps.dp_hdmi21_pcon_support) { -						union hdmi_encoded_link_bw hdmi_encoded_link_bw; - -						link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = -								dc_link_bw_kbps_from_raw_frl_link_rate_data( -										hdmi_color_caps.bits.MAX_ENCODED_LINK_BW_SUPPORT); - -						// Intersect reported max link bw support with the supported link rate post FRL link training -						if (core_link_read_dpcd(link, DP_PCON_HDMI_POST_FRL_STATUS, -								&hdmi_encoded_link_bw.raw, sizeof(hdmi_encoded_link_bw)) == DC_OK) { -							link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = intersect_frl_link_bw_support( -									link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps, -									hdmi_encoded_link_bw); -						} - -						if (link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps > 0) -							link->dpcd_caps.dongle_caps.extendedCapValid = true; -					} -#endif - -					if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz != 0) -						link->dpcd_caps.dongle_caps.extendedCapValid = true; -				} - -				break; -			} -		} -	} - -	ddc_service_set_dongle_type(link->ddc, link->dpcd_caps.dongle_type); - -	{ -		struct dp_sink_hw_fw_revision dp_hw_fw_revision; - -		core_link_read_dpcd( -			link, -			DP_BRANCH_REVISION_START, -			(uint8_t *)&dp_hw_fw_revision, -			sizeof(dp_hw_fw_revision)); - -		link->dpcd_caps.branch_hw_revision = -			dp_hw_fw_revision.ieee_hw_rev; - -		memmove( -			link->dpcd_caps.branch_fw_revision, -			dp_hw_fw_revision.ieee_fw_rev, -			sizeof(dp_hw_fw_revision.ieee_fw_rev)); -	} -	if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 && -			link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) { -		union dp_dfp_cap_ext dfp_cap_ext; -		memset(&dfp_cap_ext, '\0', sizeof (dfp_cap_ext)); -		core_link_read_dpcd( -				link, -				DP_DFP_CAPABILITY_EXTENSION_SUPPORT, -				dfp_cap_ext.raw, -				sizeof(dfp_cap_ext.raw)); -		link->dpcd_caps.dongle_caps.dfp_cap_ext.supported = dfp_cap_ext.fields.supported; -		link->dpcd_caps.dongle_caps.dfp_cap_ext.max_pixel_rate_in_mps = -				dfp_cap_ext.fields.max_pixel_rate_in_mps[0] + -				(dfp_cap_ext.fields.max_pixel_rate_in_mps[1] << 8); -		link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width = -				dfp_cap_ext.fields.max_video_h_active_width[0] + -				(dfp_cap_ext.fields.max_video_h_active_width[1] << 8); -		link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height = -				dfp_cap_ext.fields.max_video_v_active_height[0] + -				(dfp_cap_ext.fields.max_video_v_active_height[1] << 8); -		link->dpcd_caps.dongle_caps.dfp_cap_ext.encoding_format_caps = -				dfp_cap_ext.fields.encoding_format_caps; -		link->dpcd_caps.dongle_caps.dfp_cap_ext.rgb_color_depth_caps = -				dfp_cap_ext.fields.rgb_color_depth_caps; -		link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr444_color_depth_caps = -				dfp_cap_ext.fields.ycbcr444_color_depth_caps; -		link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr422_color_depth_caps = -				dfp_cap_ext.fields.ycbcr422_color_depth_caps; -		link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr420_color_depth_caps = -				dfp_cap_ext.fields.ycbcr420_color_depth_caps; -		DC_LOG_DP2("DFP capability extension is read at link %d", link->link_index); -		DC_LOG_DP2("\tdfp_cap_ext.supported = %s", link->dpcd_caps.dongle_caps.dfp_cap_ext.supported ? "true" : "false"); -		DC_LOG_DP2("\tdfp_cap_ext.max_pixel_rate_in_mps = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_pixel_rate_in_mps); -		DC_LOG_DP2("\tdfp_cap_ext.max_video_h_active_width = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width); -		DC_LOG_DP2("\tdfp_cap_ext.max_video_v_active_height = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height); -	} -} - -static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data, -		int length) -{ -	int retry = 0; - -	if (!link->dpcd_caps.dpcd_rev.raw) { -		do { -			dp_receiver_power_ctrl(link, true); -			core_link_read_dpcd(link, DP_DPCD_REV, -							dpcd_data, length); -			link->dpcd_caps.dpcd_rev.raw = dpcd_data[ -				DP_DPCD_REV - -				DP_DPCD_REV]; -		} while (retry++ < 4 && !link->dpcd_caps.dpcd_rev.raw); -	} - -	if (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) { -		switch (link->dpcd_caps.branch_dev_id) { -		/* 0010FA active dongles (DP-VGA, DP-DLDVI converters) power down -		 * all internal circuits including AUX communication preventing -		 * reading DPCD table and EDID (spec violation). -		 * Encoder will skip DP RX power down on disable_output to -		 * keep receiver powered all the time.*/ -		case DP_BRANCH_DEVICE_ID_0010FA: -		case DP_BRANCH_DEVICE_ID_0080E1: -		case DP_BRANCH_DEVICE_ID_00E04C: -			link->wa_flags.dp_keep_receiver_powered = true; -			break; - -		/* TODO: May need work around for other dongles. */ -		default: -			link->wa_flags.dp_keep_receiver_powered = false; -			break; -		} -	} else -		link->wa_flags.dp_keep_receiver_powered = false; -} - -/* Read additional sink caps defined in source specific DPCD area - * This function currently only reads from SinkCapability address (DP_SOURCE_SINK_CAP) - */ -static bool dpcd_read_sink_ext_caps(struct dc_link *link) -{ -	uint8_t dpcd_data; - -	if (!link) -		return false; - -	if (core_link_read_dpcd(link, DP_SOURCE_SINK_CAP, &dpcd_data, 1) != DC_OK) -		return false; - -	link->dpcd_sink_ext_caps.raw = dpcd_data; -	return true; -} - -bool dp_retrieve_lttpr_cap(struct dc_link *link) -{ -	uint8_t lttpr_dpcd_data[8]; -	enum dc_status status = DC_ERROR_UNEXPECTED; -	bool is_lttpr_present = false; - -	/* Logic to determine LTTPR support*/ -	bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware; - -	if (!vbios_lttpr_interop || !link->dc->caps.extended_aux_timeout_support) -		return false; - -	/* By reading LTTPR capability, RX assumes that we will enable -	 * LTTPR extended aux timeout if LTTPR is present. -	 */ -	status = core_link_read_dpcd(link, -			DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV, -			lttpr_dpcd_data, -			sizeof(lttpr_dpcd_data)); - -	link->dpcd_caps.lttpr_caps.revision.raw = -			lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV - -							DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; - -	link->dpcd_caps.lttpr_caps.max_link_rate = -			lttpr_dpcd_data[DP_MAX_LINK_RATE_PHY_REPEATER - -							DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; - -	link->dpcd_caps.lttpr_caps.phy_repeater_cnt = -			lttpr_dpcd_data[DP_PHY_REPEATER_CNT - -							DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; - -	link->dpcd_caps.lttpr_caps.max_lane_count = -			lttpr_dpcd_data[DP_MAX_LANE_COUNT_PHY_REPEATER - -							DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; - -	link->dpcd_caps.lttpr_caps.mode = -			lttpr_dpcd_data[DP_PHY_REPEATER_MODE - -							DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; - -	link->dpcd_caps.lttpr_caps.max_ext_timeout = -			lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT - -							DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; -	link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw = -			lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER - -							DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; - -	link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw = -			lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES - -							DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; - -	/* If this chip cap is set, at least one retimer must exist in the chain -	 * Override count to 1 if we receive a known bad count (0 or an invalid value) -	 */ -	if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN && -			(dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) { -		ASSERT(0); -		link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80; -		DC_LOG_DC("lttpr_caps forced phy_repeater_cnt = %d\n", link->dpcd_caps.lttpr_caps.phy_repeater_cnt); -	} - -	/* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */ -	is_lttpr_present = dp_is_lttpr_present(link); - -	if (is_lttpr_present) -		CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: "); - -	DC_LOG_DC("is_lttpr_present = %d\n", is_lttpr_present); -	return is_lttpr_present; -} - -bool dp_is_lttpr_present(struct dc_link *link) -{ -	return (dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 && -			link->dpcd_caps.lttpr_caps.max_lane_count > 0 && -			link->dpcd_caps.lttpr_caps.max_lane_count <= 4 && -			link->dpcd_caps.lttpr_caps.revision.raw >= 0x14); -} - -enum lttpr_mode dp_decide_lttpr_mode(struct dc_link *link, struct dc_link_settings *link_setting) -{ -	enum dp_link_encoding encoding = dp_get_link_encoding_format(link_setting); - -	if (encoding == DP_8b_10b_ENCODING) -		return dp_decide_8b_10b_lttpr_mode(link); -	else if (encoding == DP_128b_132b_ENCODING) -		return dp_decide_128b_132b_lttpr_mode(link); - -	ASSERT(0); -	return LTTPR_MODE_NON_LTTPR; -} - -void dp_get_lttpr_mode_override(struct dc_link *link, enum lttpr_mode *override) -{ -	if (!dp_is_lttpr_present(link)) -		return; - -	if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_TRANSPARENT) { -		*override = LTTPR_MODE_TRANSPARENT; -	} else if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_NON_TRANSPARENT) { -		*override = LTTPR_MODE_NON_TRANSPARENT; -	} else if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_NON_LTTPR) { -		*override = LTTPR_MODE_NON_LTTPR; -	} -	DC_LOG_DC("lttpr_mode_override chose LTTPR_MODE = %d\n", (uint8_t)(*override)); -} - -enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link) -{ -	bool is_lttpr_present = dp_is_lttpr_present(link); -	bool vbios_lttpr_force_non_transparent = link->dc->caps.vbios_lttpr_enable; -	bool vbios_lttpr_aware = link->dc->caps.vbios_lttpr_aware; - -	if (!is_lttpr_present) -		return LTTPR_MODE_NON_LTTPR; - -	if (vbios_lttpr_aware) { -		if (vbios_lttpr_force_non_transparent) { -			DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT due to VBIOS DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE set to 1.\n"); -			return LTTPR_MODE_NON_TRANSPARENT; -		} else { -			DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT by default due to VBIOS not set DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE set to 1.\n"); -			return LTTPR_MODE_TRANSPARENT; -		} -	} - -	if (link->dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A && -			link->dc->caps.extended_aux_timeout_support) { -		DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT by default and dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A set to 1.\n"); -		return LTTPR_MODE_NON_TRANSPARENT; -	} - -	DC_LOG_DC("chose LTTPR_MODE_NON_LTTPR.\n"); -	return LTTPR_MODE_NON_LTTPR; -} - -enum lttpr_mode dp_decide_128b_132b_lttpr_mode(struct dc_link *link) -{ -	enum lttpr_mode mode = LTTPR_MODE_NON_LTTPR; - -	if (dp_is_lttpr_present(link)) -		mode = LTTPR_MODE_NON_TRANSPARENT; - -	DC_LOG_DC("128b_132b chose LTTPR_MODE %d.\n", mode); -	return mode; -} - -static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id) -{ -	union dmub_rb_cmd cmd; - -	if (!link->ctx->dmub_srv || -			link->ep_type != DISPLAY_ENDPOINT_PHY || -			link->link_enc->features.flags.bits.DP_IS_USB_C == 0) -		return false; - -	memset(&cmd, 0, sizeof(cmd)); -	cmd.cable_id.header.type = DMUB_CMD_GET_USBC_CABLE_ID; -	cmd.cable_id.header.payload_bytes = sizeof(cmd.cable_id.data); -	cmd.cable_id.data.input.phy_inst = resource_transmitter_to_phy_idx( -			link->dc, link->link_enc->transmitter); -	if (dc_dmub_srv_cmd_with_reply_data(link->ctx->dmub_srv, &cmd) && -			cmd.cable_id.header.ret_status == 1) { -		cable_id->raw = cmd.cable_id.data.output_raw; -		DC_LOG_DC("usbc_cable_id = %d.\n", cable_id->raw); -	} -	return cmd.cable_id.header.ret_status == 1; -} - -static union dp_cable_id intersect_cable_id( -		union dp_cable_id *a, union dp_cable_id *b) -{ -	union dp_cable_id out; - -	out.bits.UHBR10_20_CAPABILITY = MIN(a->bits.UHBR10_20_CAPABILITY, -			b->bits.UHBR10_20_CAPABILITY); -	out.bits.UHBR13_5_CAPABILITY = MIN(a->bits.UHBR13_5_CAPABILITY, -			b->bits.UHBR13_5_CAPABILITY); -	out.bits.CABLE_TYPE = MAX(a->bits.CABLE_TYPE, b->bits.CABLE_TYPE); - -	return out; -} - -static void retrieve_cable_id(struct dc_link *link) -{ -	union dp_cable_id usbc_cable_id; - -	link->dpcd_caps.cable_id.raw = 0; -	core_link_read_dpcd(link, DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX, -			&link->dpcd_caps.cable_id.raw, sizeof(uint8_t)); - -	if (get_usbc_cable_id(link, &usbc_cable_id)) -		link->dpcd_caps.cable_id = intersect_cable_id( -				&link->dpcd_caps.cable_id, &usbc_cable_id); -} - -/* DPRX may take some time to respond to AUX messages after HPD asserted. - * If AUX read unsuccessful, try to wake unresponsive DPRX by toggling DPCD SET_POWER (0x600). - */ -static enum dc_status wa_try_to_wake_dprx(struct dc_link *link, uint64_t timeout_ms) -{ -	enum dc_status status = DC_ERROR_UNEXPECTED; -	uint8_t dpcd_data = 0; -	uint64_t start_ts = 0; -	uint64_t current_ts = 0; -	uint64_t time_taken_ms = 0; -	enum dc_connection_type type = dc_connection_none; -	bool lttpr_present; -	bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware; - -	lttpr_present = dp_is_lttpr_present(link) || -			(!vbios_lttpr_interop || !link->dc->caps.extended_aux_timeout_support); -	DC_LOG_DC("lttpr_present = %d.\n", lttpr_present ? 1 : 0); - -	/* Issue an AUX read to test DPRX responsiveness. If LTTPR is supported the first read is expected to -	 * be to determine LTTPR capabilities. Otherwise trying to read power state should be an innocuous AUX read. -	 */ -	if (lttpr_present) -		status = core_link_read_dpcd( -				link, -				DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV, -				&dpcd_data, -				sizeof(dpcd_data)); -	else -		status = core_link_read_dpcd( -				link, -				DP_SET_POWER, -				&dpcd_data, -				sizeof(dpcd_data)); - -	if (status != DC_OK) { -		DC_LOG_WARNING("%s: Read DPCD LTTPR_CAP failed - try to toggle DPCD SET_POWER for %lld ms.", -				__func__, -				timeout_ms); -		start_ts = dm_get_timestamp(link->ctx); - -		do { -			if (!dc_link_detect_sink(link, &type) || type == dc_connection_none) -				break; - -			dpcd_data = DP_SET_POWER_D3; -			status = core_link_write_dpcd( -					link, -					DP_SET_POWER, -					&dpcd_data, -					sizeof(dpcd_data)); - -			dpcd_data = DP_SET_POWER_D0; -			status = core_link_write_dpcd( -					link, -					DP_SET_POWER, -					&dpcd_data, -					sizeof(dpcd_data)); - -			current_ts = dm_get_timestamp(link->ctx); -			time_taken_ms = div_u64(dm_get_elapse_time_in_ns(link->ctx, current_ts, start_ts), 1000000); -		} while (status != DC_OK && time_taken_ms < timeout_ms); - -		DC_LOG_WARNING("%s: DPCD SET_POWER %s after %lld ms%s", -				__func__, -				(status == DC_OK) ? "succeeded" : "failed", -				time_taken_ms, -				(type == dc_connection_none) ? ". Unplugged." : "."); -	} - -	return status; -} - -static bool retrieve_link_cap(struct dc_link *link) -{ -	/* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16, -	 * which means size 16 will be good for both of those DPCD register block reads -	 */ -	uint8_t dpcd_data[16]; -	/*Only need to read 1 byte starting from DP_DPRX_FEATURE_ENUMERATION_LIST. -	 */ -	uint8_t dpcd_dprx_data = '\0'; -	uint8_t dpcd_power_state = '\0'; - -	struct dp_device_vendor_id sink_id; -	union down_stream_port_count down_strm_port_count; -	union edp_configuration_cap edp_config_cap; -	union dp_downstream_port_present ds_port = { 0 }; -	enum dc_status status = DC_ERROR_UNEXPECTED; -	uint32_t read_dpcd_retry_cnt = 3; -	uint32_t aux_channel_retry_cnt = 0; -	int i; -	struct dp_sink_hw_fw_revision dp_hw_fw_revision; -	const uint32_t post_oui_delay = 30; // 30ms -	bool is_lttpr_present = false; - -	memset(dpcd_data, '\0', sizeof(dpcd_data)); -	memset(&down_strm_port_count, -		'\0', sizeof(union down_stream_port_count)); -	memset(&edp_config_cap, '\0', -		sizeof(union edp_configuration_cap)); - -	/* if extended timeout is supported in hardware, -	 * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer -	 * CTS 4.2.1.1 regression introduced by CTS specs requirement update. -	 */ -	dc_link_aux_try_to_configure_timeout(link->ddc, -			LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD); - -	/* Try to ensure AUX channel active before proceeding. */ -	if (link->dc->debug.aux_wake_wa.bits.enable_wa) { -		uint64_t timeout_ms = link->dc->debug.aux_wake_wa.bits.timeout_ms; - -		if (link->dc->debug.aux_wake_wa.bits.use_default_timeout) -			timeout_ms = LINK_AUX_WAKE_TIMEOUT_MS; -		status = wa_try_to_wake_dprx(link, timeout_ms); -	} - -	while (status != DC_OK && aux_channel_retry_cnt < 10) { -		status = core_link_read_dpcd(link, DP_SET_POWER, -				&dpcd_power_state, sizeof(dpcd_power_state)); - -		/* Delay 1 ms if AUX CH is in power down state. Based on spec -		 * section 2.3.1.2, if AUX CH may be powered down due to -		 * write to DPCD 600h = 2. Sink AUX CH is monitoring differential -		 * signal and may need up to 1 ms before being able to reply. -		 */ -		if (status != DC_OK || dpcd_power_state == DP_SET_POWER_D3) { -			udelay(1000); -			aux_channel_retry_cnt++; -		} -	} - -	/* If aux channel is not active, return false and trigger another detect*/ -	if (status != DC_OK) { -		dpcd_power_state = DP_SET_POWER_D0; -		status = core_link_write_dpcd( -				link, -				DP_SET_POWER, -				&dpcd_power_state, -				sizeof(dpcd_power_state)); - -		dpcd_power_state = DP_SET_POWER_D3; -		status = core_link_write_dpcd( -				link, -				DP_SET_POWER, -				&dpcd_power_state, -				sizeof(dpcd_power_state)); -		return false; -	} - -	is_lttpr_present = dp_retrieve_lttpr_cap(link); - -	if (is_lttpr_present) -		configure_lttpr_mode_transparent(link); - -	/* Read DP tunneling information. */ -	status = dpcd_get_tunneling_device_data(link); - -	dpcd_set_source_specific_data(link); -	/* Sink may need to configure internals based on vendor, so allow some -	 * time before proceeding with possibly vendor specific transactions -	 */ -	msleep(post_oui_delay); - -	for (i = 0; i < read_dpcd_retry_cnt; i++) { -		status = core_link_read_dpcd( -				link, -				DP_DPCD_REV, -				dpcd_data, -				sizeof(dpcd_data)); -		if (status == DC_OK) -			break; -	} - -	if (status != DC_OK) { -		dm_error("%s: Read receiver caps dpcd data failed.\n", __func__); -		return false; -	} - -	if (!is_lttpr_present) -		dc_link_aux_try_to_configure_timeout(link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD); - -	{ -		union training_aux_rd_interval aux_rd_interval; - -		aux_rd_interval.raw = -			dpcd_data[DP_TRAINING_AUX_RD_INTERVAL]; - -		link->dpcd_caps.ext_receiver_cap_field_present = -				aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1; - -		if (aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1) { -			uint8_t ext_cap_data[16]; - -			memset(ext_cap_data, '\0', sizeof(ext_cap_data)); -			for (i = 0; i < read_dpcd_retry_cnt; i++) { -				status = core_link_read_dpcd( -				link, -				DP_DP13_DPCD_REV, -				ext_cap_data, -				sizeof(ext_cap_data)); -				if (status == DC_OK) { -					memcpy(dpcd_data, ext_cap_data, sizeof(dpcd_data)); -					break; -				} -			} -			if (status != DC_OK) -				dm_error("%s: Read extend caps data failed, use cap from dpcd 0.\n", __func__); -		} -	} - -	link->dpcd_caps.dpcd_rev.raw = -			dpcd_data[DP_DPCD_REV - DP_DPCD_REV]; - -	if (link->dpcd_caps.ext_receiver_cap_field_present) { -		for (i = 0; i < read_dpcd_retry_cnt; i++) { -			status = core_link_read_dpcd( -					link, -					DP_DPRX_FEATURE_ENUMERATION_LIST, -					&dpcd_dprx_data, -					sizeof(dpcd_dprx_data)); -			if (status == DC_OK) -				break; -		} - -		link->dpcd_caps.dprx_feature.raw = dpcd_dprx_data; - -		if (status != DC_OK) -			dm_error("%s: Read DPRX caps data failed.\n", __func__); -	} - -	else { -		link->dpcd_caps.dprx_feature.raw = 0; -	} - - -	/* Error condition checking... -	 * It is impossible for Sink to report Max Lane Count = 0. -	 * It is possible for Sink to report Max Link Rate = 0, if it is -	 * an eDP device that is reporting specialized link rates in the -	 * SUPPORTED_LINK_RATE table. -	 */ -	if (dpcd_data[DP_MAX_LANE_COUNT - DP_DPCD_REV] == 0) -		return false; - -	ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT - -				 DP_DPCD_REV]; - -	read_dp_device_vendor_id(link); - -	/* TODO - decouple raw mst capability from policy decision */ -	link->dpcd_caps.is_mst_capable = is_mst_supported(link); - -	get_active_converter_info(ds_port.byte, link); - -	dp_wa_power_up_0010FA(link, dpcd_data, sizeof(dpcd_data)); - -	down_strm_port_count.raw = dpcd_data[DP_DOWN_STREAM_PORT_COUNT - -				 DP_DPCD_REV]; - -	link->dpcd_caps.allow_invalid_MSA_timing_param = -		down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM; - -	link->dpcd_caps.max_ln_count.raw = dpcd_data[ -		DP_MAX_LANE_COUNT - DP_DPCD_REV]; - -	link->dpcd_caps.max_down_spread.raw = dpcd_data[ -		DP_MAX_DOWNSPREAD - DP_DPCD_REV]; - -	link->reported_link_cap.lane_count = -		link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT; -	link->reported_link_cap.link_rate = get_link_rate_from_max_link_bw( -			dpcd_data[DP_MAX_LINK_RATE - DP_DPCD_REV]); -	link->reported_link_cap.link_spread = -		link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ? -		LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; - -	edp_config_cap.raw = dpcd_data[ -		DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV]; -	link->dpcd_caps.panel_mode_edp = -		edp_config_cap.bits.ALT_SCRAMBLER_RESET; -	link->dpcd_caps.dpcd_display_control_capable = -		edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE; -	link->dpcd_caps.channel_coding_cap.raw = -			dpcd_data[DP_MAIN_LINK_CHANNEL_CODING - DP_DPCD_REV]; -	link->test_pattern_enabled = false; -	link->compliance_test_state.raw = 0; - -	/* read sink count */ -	core_link_read_dpcd(link, -			DP_SINK_COUNT, -			&link->dpcd_caps.sink_count.raw, -			sizeof(link->dpcd_caps.sink_count.raw)); - -	/* read sink ieee oui */ -	core_link_read_dpcd(link, -			DP_SINK_OUI, -			(uint8_t *)(&sink_id), -			sizeof(sink_id)); - -	link->dpcd_caps.sink_dev_id = -			(sink_id.ieee_oui[0] << 16) + -			(sink_id.ieee_oui[1] << 8) + -			(sink_id.ieee_oui[2]); - -	memmove( -		link->dpcd_caps.sink_dev_id_str, -		sink_id.ieee_device_id, -		sizeof(sink_id.ieee_device_id)); - -	/* Quirk Apple MBP 2017 15" Retina panel: Wrong DP_MAX_LINK_RATE */ -	{ -		uint8_t str_mbp_2017[] = { 101, 68, 21, 101, 98, 97 }; - -		if ((link->dpcd_caps.sink_dev_id == 0x0010fa) && -		    !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2017, -			    sizeof(str_mbp_2017))) { -			link->reported_link_cap.link_rate = 0x0c; -		} -	} - -	core_link_read_dpcd( -		link, -		DP_SINK_HW_REVISION_START, -		(uint8_t *)&dp_hw_fw_revision, -		sizeof(dp_hw_fw_revision)); - -	link->dpcd_caps.sink_hw_revision = -		dp_hw_fw_revision.ieee_hw_rev; - -	memmove( -		link->dpcd_caps.sink_fw_revision, -		dp_hw_fw_revision.ieee_fw_rev, -		sizeof(dp_hw_fw_revision.ieee_fw_rev)); - -	/* Quirk for Apple MBP 2018 15" Retina panels: wrong DP_MAX_LINK_RATE */ -	{ -		uint8_t str_mbp_2018[] = { 101, 68, 21, 103, 98, 97 }; -		uint8_t fwrev_mbp_2018[] = { 7, 4 }; -		uint8_t fwrev_mbp_2018_vega[] = { 8, 4 }; - -		/* We also check for the firmware revision as 16,1 models have an -		 * identical device id and are incorrectly quirked otherwise. -		 */ -		if ((link->dpcd_caps.sink_dev_id == 0x0010fa) && -		    !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2018, -			     sizeof(str_mbp_2018)) && -		    (!memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018, -			     sizeof(fwrev_mbp_2018)) || -		    !memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018_vega, -			     sizeof(fwrev_mbp_2018_vega)))) { -			link->reported_link_cap.link_rate = LINK_RATE_RBR2; -		} -	} - -	memset(&link->dpcd_caps.dsc_caps, '\0', -			sizeof(link->dpcd_caps.dsc_caps)); -	memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap)); -	/* Read DSC and FEC sink capabilities if DP revision is 1.4 and up */ -	if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14) { -		status = core_link_read_dpcd( -				link, -				DP_FEC_CAPABILITY, -				&link->dpcd_caps.fec_cap.raw, -				sizeof(link->dpcd_caps.fec_cap.raw)); -		status = core_link_read_dpcd( -				link, -				DP_DSC_SUPPORT, -				link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, -				sizeof(link->dpcd_caps.dsc_caps.dsc_basic_caps.raw)); -		if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) { -			status = core_link_read_dpcd( -					link, -					DP_DSC_BRANCH_OVERALL_THROUGHPUT_0, -					link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, -					sizeof(link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw)); -			DC_LOG_DSC("DSC branch decoder capability is read at link %d", link->link_index); -			DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_0 = 0x%02x", -					link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_0); -			DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_1 = 0x%02x", -					link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_1); -			DC_LOG_DSC("\tBRANCH_MAX_LINE_WIDTH 0x%02x", -					link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_MAX_LINE_WIDTH); -		} - -		/* Apply work around to disable FEC and DSC for USB4 tunneling in TBT3 compatibility mode -		 * only if required. -		 */ -		if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && -				link->dc->debug.dpia_debug.bits.enable_force_tbt3_work_around && -				link->dpcd_caps.is_branch_dev && -				link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && -				link->dpcd_caps.branch_hw_revision == DP_BRANCH_HW_REV_10 && -				(link->dpcd_caps.fec_cap.bits.FEC_CAPABLE || -				link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT)) { -			/* A TBT3 device is expected to report no support for FEC or DSC to a USB4 DPIA. -			 * Clear FEC and DSC capabilities as a work around if that is not the case. -			 */ -			link->wa_flags.dpia_forced_tbt3_mode = true; -			memset(&link->dpcd_caps.dsc_caps, '\0', sizeof(link->dpcd_caps.dsc_caps)); -			memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap)); -			DC_LOG_DSC("Clear DSC SUPPORT for USB4 link(%d) in TBT3 compatibility mode", link->link_index); -		} else -			link->wa_flags.dpia_forced_tbt3_mode = false; -	} - -	if (!dpcd_read_sink_ext_caps(link)) -		link->dpcd_sink_ext_caps.raw = 0; - -	if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) { -		DC_LOG_DP2("128b/132b encoding is supported at link %d", link->link_index); - -		core_link_read_dpcd(link, -				DP_128b_132b_SUPPORTED_LINK_RATES, -				&link->dpcd_caps.dp_128b_132b_supported_link_rates.raw, -				sizeof(link->dpcd_caps.dp_128b_132b_supported_link_rates.raw)); -		if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR20) -			link->reported_link_cap.link_rate = LINK_RATE_UHBR20; -		else if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5) -			link->reported_link_cap.link_rate = LINK_RATE_UHBR13_5; -		else if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR10) -			link->reported_link_cap.link_rate = LINK_RATE_UHBR10; -		else -			dm_error("%s: Invalid RX 128b_132b_supported_link_rates\n", __func__); -		DC_LOG_DP2("128b/132b supported link rates is read at link %d", link->link_index); -		DC_LOG_DP2("\tmax 128b/132b link rate support is %d.%d GHz", -				link->reported_link_cap.link_rate / 100, -				link->reported_link_cap.link_rate % 100); - -		core_link_read_dpcd(link, -				DP_SINK_VIDEO_FALLBACK_FORMATS, -				&link->dpcd_caps.fallback_formats.raw, -				sizeof(link->dpcd_caps.fallback_formats.raw)); -		DC_LOG_DP2("sink video fallback format is read at link %d", link->link_index); -		if (link->dpcd_caps.fallback_formats.bits.dp_1920x1080_60Hz_24bpp_support) -			DC_LOG_DP2("\t1920x1080@60Hz 24bpp fallback format supported"); -		if (link->dpcd_caps.fallback_formats.bits.dp_1280x720_60Hz_24bpp_support) -			DC_LOG_DP2("\t1280x720@60Hz 24bpp fallback format supported"); -		if (link->dpcd_caps.fallback_formats.bits.dp_1024x768_60Hz_24bpp_support) -			DC_LOG_DP2("\t1024x768@60Hz 24bpp fallback format supported"); -		if (link->dpcd_caps.fallback_formats.raw == 0) { -			DC_LOG_DP2("\tno supported fallback formats, assume 1920x1080@60Hz 24bpp is supported"); -			link->dpcd_caps.fallback_formats.bits.dp_1920x1080_60Hz_24bpp_support = 1; -		} - -		core_link_read_dpcd(link, -				DP_FEC_CAPABILITY_1, -				&link->dpcd_caps.fec_cap1.raw, -				sizeof(link->dpcd_caps.fec_cap1.raw)); -		DC_LOG_DP2("FEC CAPABILITY 1 is read at link %d", link->link_index); -		if (link->dpcd_caps.fec_cap1.bits.AGGREGATED_ERROR_COUNTERS_CAPABLE) -			DC_LOG_DP2("\tFEC aggregated error counters are supported"); -	} - -	retrieve_cable_id(link); -	dpcd_write_cable_id_to_dprx(link); - -	/* Connectivity log: detection */ -	CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: "); - -	return true; -} - -bool dp_overwrite_extended_receiver_cap(struct dc_link *link) -{ -	uint8_t dpcd_data[16]; -	uint32_t read_dpcd_retry_cnt = 3; -	enum dc_status status = DC_ERROR_UNEXPECTED; -	union dp_downstream_port_present ds_port = { 0 }; -	union down_stream_port_count down_strm_port_count; -	union edp_configuration_cap edp_config_cap; - -	int i; - -	for (i = 0; i < read_dpcd_retry_cnt; i++) { -		status = core_link_read_dpcd( -				link, -				DP_DPCD_REV, -				dpcd_data, -				sizeof(dpcd_data)); -		if (status == DC_OK) -			break; -	} - -	link->dpcd_caps.dpcd_rev.raw = -		dpcd_data[DP_DPCD_REV - DP_DPCD_REV]; - -	if (dpcd_data[DP_MAX_LANE_COUNT - DP_DPCD_REV] == 0) -		return false; - -	ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT - -			DP_DPCD_REV]; - -	get_active_converter_info(ds_port.byte, link); - -	down_strm_port_count.raw = dpcd_data[DP_DOWN_STREAM_PORT_COUNT - -			DP_DPCD_REV]; - -	link->dpcd_caps.allow_invalid_MSA_timing_param = -		down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM; - -	link->dpcd_caps.max_ln_count.raw = dpcd_data[ -		DP_MAX_LANE_COUNT - DP_DPCD_REV]; - -	link->dpcd_caps.max_down_spread.raw = dpcd_data[ -		DP_MAX_DOWNSPREAD - DP_DPCD_REV]; - -	link->reported_link_cap.lane_count = -		link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT; -	link->reported_link_cap.link_rate = dpcd_data[ -		DP_MAX_LINK_RATE - DP_DPCD_REV]; -	link->reported_link_cap.link_spread = -		link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ? -		LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; - -	edp_config_cap.raw = dpcd_data[ -		DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV]; -	link->dpcd_caps.panel_mode_edp = -		edp_config_cap.bits.ALT_SCRAMBLER_RESET; -	link->dpcd_caps.dpcd_display_control_capable = -		edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE; - -	return true; -} - -bool detect_dp_sink_caps(struct dc_link *link) -{ -	return retrieve_link_cap(link); -} - -static enum dc_link_rate linkRateInKHzToLinkRateMultiplier(uint32_t link_rate_in_khz) -{ -	enum dc_link_rate link_rate; -	// LinkRate is normally stored as a multiplier of 0.27 Gbps per lane. Do the translation. -	switch (link_rate_in_khz) { -	case 1620000: -		link_rate = LINK_RATE_LOW;		// Rate_1 (RBR)		- 1.62 Gbps/Lane -		break; -	case 2160000: -		link_rate = LINK_RATE_RATE_2;	// Rate_2			- 2.16 Gbps/Lane -		break; -	case 2430000: -		link_rate = LINK_RATE_RATE_3;	// Rate_3			- 2.43 Gbps/Lane -		break; -	case 2700000: -		link_rate = LINK_RATE_HIGH;		// Rate_4 (HBR)		- 2.70 Gbps/Lane -		break; -	case 3240000: -		link_rate = LINK_RATE_RBR2;		// Rate_5 (RBR2)	- 3.24 Gbps/Lane -		break; -	case 4320000: -		link_rate = LINK_RATE_RATE_6;	// Rate_6			- 4.32 Gbps/Lane -		break; -	case 5400000: -		link_rate = LINK_RATE_HIGH2;	// Rate_7 (HBR2)	- 5.40 Gbps/Lane -		break; -	case 8100000: -		link_rate = LINK_RATE_HIGH3;	// Rate_8 (HBR3)	- 8.10 Gbps/Lane -		break; -	default: -		link_rate = LINK_RATE_UNKNOWN; -		break; -	} -	return link_rate; -} - -void detect_edp_sink_caps(struct dc_link *link) -{ -	uint8_t supported_link_rates[16]; -	uint32_t entry; -	uint32_t link_rate_in_khz; -	enum dc_link_rate link_rate = LINK_RATE_UNKNOWN; -	uint8_t backlight_adj_cap; -	uint8_t general_edp_cap; - -	retrieve_link_cap(link); -	link->dpcd_caps.edp_supported_link_rates_count = 0; -	memset(supported_link_rates, 0, sizeof(supported_link_rates)); - -	/* -	 * edp_supported_link_rates_count is only valid for eDP v1.4 or higher. -	 * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h" -	 */ -	if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 && -			(link->panel_config.ilr.optimize_edp_link_rate || -			link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)) { -		// Read DPCD 00010h - 0001Fh 16 bytes at one shot -		core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES, -							supported_link_rates, sizeof(supported_link_rates)); - -		for (entry = 0; entry < 16; entry += 2) { -			// DPCD register reports per-lane link rate = 16-bit link rate capability -			// value X 200 kHz. Need multiplier to find link rate in kHz. -			link_rate_in_khz = (supported_link_rates[entry+1] * 0x100 + -										supported_link_rates[entry]) * 200; - -			if (link_rate_in_khz != 0) { -				link_rate = linkRateInKHzToLinkRateMultiplier(link_rate_in_khz); -				link->dpcd_caps.edp_supported_link_rates[link->dpcd_caps.edp_supported_link_rates_count] = link_rate; -				link->dpcd_caps.edp_supported_link_rates_count++; - -				if (link->reported_link_cap.link_rate < link_rate) -					link->reported_link_cap.link_rate = link_rate; -			} -		} -	} -	core_link_read_dpcd(link, DP_EDP_BACKLIGHT_ADJUSTMENT_CAP, -						&backlight_adj_cap, sizeof(backlight_adj_cap)); - -	link->dpcd_caps.dynamic_backlight_capable_edp = -				(backlight_adj_cap & DP_EDP_DYNAMIC_BACKLIGHT_CAP) ? true:false; - -	core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_1, -						&general_edp_cap, sizeof(general_edp_cap)); - -	link->dpcd_caps.set_power_state_capable_edp = -				(general_edp_cap & DP_EDP_SET_POWER_CAP) ? true:false; - -	dc_link_set_default_brightness_aux(link); - -	core_link_read_dpcd(link, DP_EDP_DPCD_REV, -		&link->dpcd_caps.edp_rev, -		sizeof(link->dpcd_caps.edp_rev)); -	/* -	 * PSR is only valid for eDP v1.3 or higher. -	 */ -	if (link->dpcd_caps.edp_rev >= DP_EDP_13) { -		core_link_read_dpcd(link, DP_PSR_SUPPORT, -			&link->dpcd_caps.psr_info.psr_version, -			sizeof(link->dpcd_caps.psr_info.psr_version)); -		if (link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_001CF8) -			core_link_read_dpcd(link, DP_FORCE_PSRSU_CAPABILITY, -						&link->dpcd_caps.psr_info.force_psrsu_cap, -						sizeof(link->dpcd_caps.psr_info.force_psrsu_cap)); -		core_link_read_dpcd(link, DP_PSR_CAPS, -			&link->dpcd_caps.psr_info.psr_dpcd_caps.raw, -			sizeof(link->dpcd_caps.psr_info.psr_dpcd_caps.raw)); -		if (link->dpcd_caps.psr_info.psr_dpcd_caps.bits.Y_COORDINATE_REQUIRED) { -			core_link_read_dpcd(link, DP_PSR2_SU_Y_GRANULARITY, -				&link->dpcd_caps.psr_info.psr2_su_y_granularity_cap, -				sizeof(link->dpcd_caps.psr_info.psr2_su_y_granularity_cap)); -		} -	} - -	/* -	 * ALPM is only valid for eDP v1.4 or higher. -	 */ -	if (link->dpcd_caps.dpcd_rev.raw >= DP_EDP_14) -		core_link_read_dpcd(link, DP_RECEIVER_ALPM_CAP, -			&link->dpcd_caps.alpm_caps.raw, -			sizeof(link->dpcd_caps.alpm_caps.raw)); -} - -void dc_link_dp_enable_hpd(const struct dc_link *link) -{ -	struct link_encoder *encoder = link->link_enc; - -	if (encoder != NULL && encoder->funcs->enable_hpd != NULL) -		encoder->funcs->enable_hpd(encoder); -} - -void dc_link_dp_disable_hpd(const struct dc_link *link) -{ -	struct link_encoder *encoder = link->link_enc; - -	if (encoder != NULL && encoder->funcs->enable_hpd != NULL) -		encoder->funcs->disable_hpd(encoder); -} - -static bool is_dp_phy_pattern(enum dp_test_pattern test_pattern) -{ -	if ((DP_TEST_PATTERN_PHY_PATTERN_BEGIN <= test_pattern && -			test_pattern <= DP_TEST_PATTERN_PHY_PATTERN_END) || -			test_pattern == DP_TEST_PATTERN_VIDEO_MODE) -		return true; -	else -		return false; -} - -static void set_crtc_test_pattern(struct dc_link *link, -				struct pipe_ctx *pipe_ctx, -				enum dp_test_pattern test_pattern, -				enum dp_test_pattern_color_space test_pattern_color_space) -{ -	enum controller_dp_test_pattern controller_test_pattern; -	enum dc_color_depth color_depth = pipe_ctx-> -		stream->timing.display_color_depth; -	struct bit_depth_reduction_params params; -	struct output_pixel_processor *opp = pipe_ctx->stream_res.opp; -	int width = pipe_ctx->stream->timing.h_addressable + -		pipe_ctx->stream->timing.h_border_left + -		pipe_ctx->stream->timing.h_border_right; -	int height = pipe_ctx->stream->timing.v_addressable + -		pipe_ctx->stream->timing.v_border_bottom + -		pipe_ctx->stream->timing.v_border_top; - -	memset(¶ms, 0, sizeof(params)); - -	switch (test_pattern) { -	case DP_TEST_PATTERN_COLOR_SQUARES: -		controller_test_pattern = -				CONTROLLER_DP_TEST_PATTERN_COLORSQUARES; -	break; -	case DP_TEST_PATTERN_COLOR_SQUARES_CEA: -		controller_test_pattern = -				CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA; -	break; -	case DP_TEST_PATTERN_VERTICAL_BARS: -		controller_test_pattern = -				CONTROLLER_DP_TEST_PATTERN_VERTICALBARS; -	break; -	case DP_TEST_PATTERN_HORIZONTAL_BARS: -		controller_test_pattern = -				CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS; -	break; -	case DP_TEST_PATTERN_COLOR_RAMP: -		controller_test_pattern = -				CONTROLLER_DP_TEST_PATTERN_COLORRAMP; -	break; -	default: -		controller_test_pattern = -				CONTROLLER_DP_TEST_PATTERN_VIDEOMODE; -	break; -	} - -	switch (test_pattern) { -	case DP_TEST_PATTERN_COLOR_SQUARES: -	case DP_TEST_PATTERN_COLOR_SQUARES_CEA: -	case DP_TEST_PATTERN_VERTICAL_BARS: -	case DP_TEST_PATTERN_HORIZONTAL_BARS: -	case DP_TEST_PATTERN_COLOR_RAMP: -	{ -		/* disable bit depth reduction */ -		pipe_ctx->stream->bit_depth_params = params; -		opp->funcs->opp_program_bit_depth_reduction(opp, ¶ms); -		if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) -			pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, -				controller_test_pattern, color_depth); -		else if (link->dc->hwss.set_disp_pattern_generator) { -			struct pipe_ctx *odm_pipe; -			enum controller_dp_color_space controller_color_space; -			int opp_cnt = 1; -			int offset = 0; -			int dpg_width = width; - -			switch (test_pattern_color_space) { -			case DP_TEST_PATTERN_COLOR_SPACE_RGB: -				controller_color_space = CONTROLLER_DP_COLOR_SPACE_RGB; -				break; -			case DP_TEST_PATTERN_COLOR_SPACE_YCBCR601: -				controller_color_space = CONTROLLER_DP_COLOR_SPACE_YCBCR601; -				break; -			case DP_TEST_PATTERN_COLOR_SPACE_YCBCR709: -				controller_color_space = CONTROLLER_DP_COLOR_SPACE_YCBCR709; -				break; -			case DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED: -			default: -				controller_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED; -				DC_LOG_ERROR("%s: Color space must be defined for test pattern", __func__); -				ASSERT(0); -				break; -			} - -			for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) -				opp_cnt++; -			dpg_width = width / opp_cnt; -			offset = dpg_width; - -			link->dc->hwss.set_disp_pattern_generator(link->dc, -					pipe_ctx, -					controller_test_pattern, -					controller_color_space, -					color_depth, -					NULL, -					dpg_width, -					height, -					0); - -			for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { -				struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp; - -				odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); -				link->dc->hwss.set_disp_pattern_generator(link->dc, -						odm_pipe, -						controller_test_pattern, -						controller_color_space, -						color_depth, -						NULL, -						dpg_width, -						height, -						offset); -				offset += offset; -			} -		} -	} -	break; -	case DP_TEST_PATTERN_VIDEO_MODE: -	{ -		/* restore bitdepth reduction */ -		resource_build_bit_depth_reduction_params(pipe_ctx->stream, ¶ms); -		pipe_ctx->stream->bit_depth_params = params; -		opp->funcs->opp_program_bit_depth_reduction(opp, ¶ms); -		if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) -			pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, -				CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, -				color_depth); -		else if (link->dc->hwss.set_disp_pattern_generator) { -			struct pipe_ctx *odm_pipe; -			int opp_cnt = 1; -			int dpg_width; - -			for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) -				opp_cnt++; - -			dpg_width = width / opp_cnt; -			for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { -				struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp; - -				odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); -				link->dc->hwss.set_disp_pattern_generator(link->dc, -						odm_pipe, -						CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, -						CONTROLLER_DP_COLOR_SPACE_UDEFINED, -						color_depth, -						NULL, -						dpg_width, -						height, -						0); -			} -			link->dc->hwss.set_disp_pattern_generator(link->dc, -					pipe_ctx, -					CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, -					CONTROLLER_DP_COLOR_SPACE_UDEFINED, -					color_depth, -					NULL, -					dpg_width, -					height, -					0); -		} -	} -	break; - -	default: -	break; -	} -} - -bool dc_link_dp_set_test_pattern( -	struct dc_link *link, -	enum dp_test_pattern test_pattern, -	enum dp_test_pattern_color_space test_pattern_color_space, -	const struct link_training_settings *p_link_settings, -	const unsigned char *p_custom_pattern, -	unsigned int cust_pattern_size) -{ -	struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; -	struct pipe_ctx *pipe_ctx = NULL; -	unsigned int lane; -	unsigned int i; -	unsigned char link_qual_pattern[LANE_COUNT_DP_MAX] = {0}; -	union dpcd_training_pattern training_pattern; -	enum dpcd_phy_test_patterns pattern; - -	memset(&training_pattern, 0, sizeof(training_pattern)); - -	for (i = 0; i < MAX_PIPES; i++) { -		if (pipes[i].stream == NULL) -			continue; - -		if (pipes[i].stream->link == link && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe) { -			pipe_ctx = &pipes[i]; -			break; -		} -	} - -	if (pipe_ctx == NULL) -		return false; - -	/* Reset CRTC Test Pattern if it is currently running and request is VideoMode */ -	if (link->test_pattern_enabled && test_pattern == -			DP_TEST_PATTERN_VIDEO_MODE) { -		/* Set CRTC Test Pattern */ -		set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space); -		dp_set_hw_test_pattern(link, &pipe_ctx->link_res, test_pattern, -				(uint8_t *)p_custom_pattern, -				(uint32_t)cust_pattern_size); - -		/* Unblank Stream */ -		link->dc->hwss.unblank_stream( -			pipe_ctx, -			&link->verified_link_cap); -		/* TODO:m_pHwss->MuteAudioEndpoint -		 * (pPathMode->pDisplayPath, false); -		 */ - -		/* Reset Test Pattern state */ -		link->test_pattern_enabled = false; - -		return true; -	} - -	/* Check for PHY Test Patterns */ -	if (is_dp_phy_pattern(test_pattern)) { -		/* Set DPCD Lane Settings before running test pattern */ -		if (p_link_settings != NULL) { -			if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && -					p_link_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) { -				dp_fixed_vs_pe_set_retimer_lane_settings( -						link, -						p_link_settings->dpcd_lane_settings, -						p_link_settings->link_settings.lane_count); -			} else { -				dp_set_hw_lane_settings(link, &pipe_ctx->link_res, p_link_settings, DPRX); -			} -			dpcd_set_lane_settings(link, p_link_settings, DPRX); -		} - -		/* Blank stream if running test pattern */ -		if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) { -			/*TODO: -			 * m_pHwss-> -			 * MuteAudioEndpoint(pPathMode->pDisplayPath, true); -			 */ -			/* Blank stream */ -			pipes->stream_res.stream_enc->funcs->dp_blank(link, pipe_ctx->stream_res.stream_enc); -		} - -		dp_set_hw_test_pattern(link, &pipe_ctx->link_res, test_pattern, -				(uint8_t *)p_custom_pattern, -				(uint32_t)cust_pattern_size); - -		if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) { -			/* Set Test Pattern state */ -			link->test_pattern_enabled = true; -			if (p_link_settings != NULL) -				dpcd_set_link_settings(link, -						p_link_settings); -		} - -		switch (test_pattern) { -		case DP_TEST_PATTERN_VIDEO_MODE: -			pattern = PHY_TEST_PATTERN_NONE; -			break; -		case DP_TEST_PATTERN_D102: -			pattern = PHY_TEST_PATTERN_D10_2; -			break; -		case DP_TEST_PATTERN_SYMBOL_ERROR: -			pattern = PHY_TEST_PATTERN_SYMBOL_ERROR; -			break; -		case DP_TEST_PATTERN_PRBS7: -			pattern = PHY_TEST_PATTERN_PRBS7; -			break; -		case DP_TEST_PATTERN_80BIT_CUSTOM: -			pattern = PHY_TEST_PATTERN_80BIT_CUSTOM; -			break; -		case DP_TEST_PATTERN_CP2520_1: -			pattern = PHY_TEST_PATTERN_CP2520_1; -			break; -		case DP_TEST_PATTERN_CP2520_2: -			pattern = PHY_TEST_PATTERN_CP2520_2; -			break; -		case DP_TEST_PATTERN_CP2520_3: -			pattern = PHY_TEST_PATTERN_CP2520_3; -			break; -		case DP_TEST_PATTERN_128b_132b_TPS1: -			pattern = PHY_TEST_PATTERN_128b_132b_TPS1; -			break; -		case DP_TEST_PATTERN_128b_132b_TPS2: -			pattern = PHY_TEST_PATTERN_128b_132b_TPS2; -			break; -		case DP_TEST_PATTERN_PRBS9: -			pattern = PHY_TEST_PATTERN_PRBS9; -			break; -		case DP_TEST_PATTERN_PRBS11: -			pattern = PHY_TEST_PATTERN_PRBS11; -			break; -		case DP_TEST_PATTERN_PRBS15: -			pattern = PHY_TEST_PATTERN_PRBS15; -			break; -		case DP_TEST_PATTERN_PRBS23: -			pattern = PHY_TEST_PATTERN_PRBS23; -			break; -		case DP_TEST_PATTERN_PRBS31: -			pattern = PHY_TEST_PATTERN_PRBS31; -			break; -		case DP_TEST_PATTERN_264BIT_CUSTOM: -			pattern = PHY_TEST_PATTERN_264BIT_CUSTOM; -			break; -		case DP_TEST_PATTERN_SQUARE_PULSE: -			pattern = PHY_TEST_PATTERN_SQUARE_PULSE; -			break; -		default: -			return false; -		} - -		if (test_pattern == DP_TEST_PATTERN_VIDEO_MODE -		/*TODO:&& !pPathMode->pDisplayPath->IsTargetPoweredOn()*/) -			return false; - -		if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { -#if defined(CONFIG_DRM_AMD_DC_DCN) -			if (test_pattern == DP_TEST_PATTERN_SQUARE_PULSE) -				core_link_write_dpcd(link, -						DP_LINK_SQUARE_PATTERN, -						p_custom_pattern, -						1); - -#endif -			/* tell receiver that we are sending qualification -			 * pattern DP 1.2 or later - DP receiver's link quality -			 * pattern is set using DPCD LINK_QUAL_LANEx_SET -			 * register (0x10B~0x10E)\ -			 */ -			for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) -				link_qual_pattern[lane] = -						(unsigned char)(pattern); - -			core_link_write_dpcd(link, -					DP_LINK_QUAL_LANE0_SET, -					link_qual_pattern, -					sizeof(link_qual_pattern)); -		} else if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_10 || -			   link->dpcd_caps.dpcd_rev.raw == 0) { -			/* tell receiver that we are sending qualification -			 * pattern DP 1.1a or earlier - DP receiver's link -			 * quality pattern is set using -			 * DPCD TRAINING_PATTERN_SET -> LINK_QUAL_PATTERN_SET -			 * register (0x102). We will use v_1.3 when we are -			 * setting test pattern for DP 1.1. -			 */ -			core_link_read_dpcd(link, DP_TRAINING_PATTERN_SET, -					    &training_pattern.raw, -					    sizeof(training_pattern)); -			training_pattern.v1_3.LINK_QUAL_PATTERN_SET = pattern; -			core_link_write_dpcd(link, DP_TRAINING_PATTERN_SET, -					     &training_pattern.raw, -					     sizeof(training_pattern)); -		} -	} else { -		enum dc_color_space color_space = COLOR_SPACE_UNKNOWN; - -		switch (test_pattern_color_space) { -		case DP_TEST_PATTERN_COLOR_SPACE_RGB: -			color_space = COLOR_SPACE_SRGB; -			if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) -				color_space = COLOR_SPACE_SRGB_LIMITED; -			break; - -		case DP_TEST_PATTERN_COLOR_SPACE_YCBCR601: -			color_space = COLOR_SPACE_YCBCR601; -			if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) -				color_space = COLOR_SPACE_YCBCR601_LIMITED; -			break; -		case DP_TEST_PATTERN_COLOR_SPACE_YCBCR709: -			color_space = COLOR_SPACE_YCBCR709; -			if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) -				color_space = COLOR_SPACE_YCBCR709_LIMITED; -			break; -		default: -			break; -		} - -		if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable) { -			if (pipe_ctx->stream && should_use_dmub_lock(pipe_ctx->stream->link)) { -				union dmub_hw_lock_flags hw_locks = { 0 }; -				struct dmub_hw_lock_inst_flags inst_flags = { 0 }; - -				hw_locks.bits.lock_dig = 1; -				inst_flags.dig_inst = pipe_ctx->stream_res.tg->inst; - -				dmub_hw_lock_mgr_cmd(link->ctx->dmub_srv, -							true, -							&hw_locks, -							&inst_flags); -			} else -				pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable( -						pipe_ctx->stream_res.tg); -		} - -		pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg); -		/* update MSA to requested color space */ -		pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(pipe_ctx->stream_res.stream_enc, -				&pipe_ctx->stream->timing, -				color_space, -				pipe_ctx->stream->use_vsc_sdp_for_colorimetry, -				link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP); - -		if (pipe_ctx->stream->use_vsc_sdp_for_colorimetry) { -			if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) -				pipe_ctx->stream->vsc_infopacket.sb[17] |= (1 << 7); // sb17 bit 7 Dynamic Range: 0 = VESA range, 1 = CTA range -			else -				pipe_ctx->stream->vsc_infopacket.sb[17] &= ~(1 << 7); -			resource_build_info_frame(pipe_ctx); -			link->dc->hwss.update_info_frame(pipe_ctx); -		} - -		/* CRTC Patterns */ -		set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space); -		pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg); -		pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, -				CRTC_STATE_VACTIVE); -		pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, -				CRTC_STATE_VBLANK); -		pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, -				CRTC_STATE_VACTIVE); - -		if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable) { -			if (pipe_ctx->stream && should_use_dmub_lock(pipe_ctx->stream->link)) { -				union dmub_hw_lock_flags hw_locks = { 0 }; -				struct dmub_hw_lock_inst_flags inst_flags = { 0 }; - -				hw_locks.bits.lock_dig = 1; -				inst_flags.dig_inst = pipe_ctx->stream_res.tg->inst; - -				dmub_hw_lock_mgr_cmd(link->ctx->dmub_srv, -							false, -							&hw_locks, -							&inst_flags); -			} else -				pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable( -						pipe_ctx->stream_res.tg); -		} - -		/* Set Test Pattern state */ -		link->test_pattern_enabled = true; -	} - -	return true; -} - -void dp_enable_mst_on_sink(struct dc_link *link, bool enable) -{ -	unsigned char mstmCntl; - -	core_link_read_dpcd(link, DP_MSTM_CTRL, &mstmCntl, 1); -	if (enable) -		mstmCntl |= DP_MST_EN; -	else -		mstmCntl &= (~DP_MST_EN); - -	core_link_write_dpcd(link, DP_MSTM_CTRL, &mstmCntl, 1); -} - -void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode) -{ -	union dpcd_edp_config edp_config_set; -	bool panel_mode_edp = false; - -	memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config)); - -	if (panel_mode != DP_PANEL_MODE_DEFAULT) { - -		switch (panel_mode) { -		case DP_PANEL_MODE_EDP: -		case DP_PANEL_MODE_SPECIAL: -			panel_mode_edp = true; -			break; - -		default: -				break; -		} - -		/*set edp panel mode in receiver*/ -		core_link_read_dpcd( -			link, -			DP_EDP_CONFIGURATION_SET, -			&edp_config_set.raw, -			sizeof(edp_config_set.raw)); - -		if (edp_config_set.bits.PANEL_MODE_EDP -			!= panel_mode_edp) { -			enum dc_status result; - -			edp_config_set.bits.PANEL_MODE_EDP = -			panel_mode_edp; -			result = core_link_write_dpcd( -				link, -				DP_EDP_CONFIGURATION_SET, -				&edp_config_set.raw, -				sizeof(edp_config_set.raw)); - -			ASSERT(result == DC_OK); -		} -	} -	DC_LOG_DETECTION_DP_CAPS("Link: %d eDP panel mode supported: %d " -		 "eDP panel mode enabled: %d \n", -		 link->link_index, -		 link->dpcd_caps.panel_mode_edp, -		 panel_mode_edp); -} - -enum dp_panel_mode dp_get_panel_mode(struct dc_link *link) -{ -	/* We need to explicitly check that connector -	 * is not DP. Some Travis_VGA get reported -	 * by video bios as DP. -	 */ -	if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) { - -		switch (link->dpcd_caps.branch_dev_id) { -		case DP_BRANCH_DEVICE_ID_0022B9: -			/* alternate scrambler reset is required for Travis -			 * for the case when external chip does not -			 * provide sink device id, alternate scrambler -			 * scheme will  be overriden later by querying -			 * Encoder features -			 */ -			if (strncmp( -				link->dpcd_caps.branch_dev_name, -				DP_VGA_LVDS_CONVERTER_ID_2, -				sizeof( -				link->dpcd_caps. -				branch_dev_name)) == 0) { -					return DP_PANEL_MODE_SPECIAL; -			} -			break; -		case DP_BRANCH_DEVICE_ID_00001A: -			/* alternate scrambler reset is required for Travis -			 * for the case when external chip does not provide -			 * sink device id, alternate scrambler scheme will -			 * be overriden later by querying Encoder feature -			 */ -			if (strncmp(link->dpcd_caps.branch_dev_name, -				DP_VGA_LVDS_CONVERTER_ID_3, -				sizeof( -				link->dpcd_caps. -				branch_dev_name)) == 0) { -					return DP_PANEL_MODE_SPECIAL; -			} -			break; -		default: -			break; -		} -	} - -	if (link->dpcd_caps.panel_mode_edp && -		(link->connector_signal == SIGNAL_TYPE_EDP || -		 (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && -		  link->is_internal_display))) { -		return DP_PANEL_MODE_EDP; -	} - -	return DP_PANEL_MODE_DEFAULT; -} - -enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource *link_res, bool ready) -{ -	/* FEC has to be "set ready" before the link training. -	 * The policy is to always train with FEC -	 * if the sink supports it and leave it enabled on link. -	 * If FEC is not supported, disable it. -	 */ -	struct link_encoder *link_enc = NULL; -	enum dc_status status = DC_OK; -	uint8_t fec_config = 0; - -	link_enc = link_enc_cfg_get_link_enc(link); -	ASSERT(link_enc); - -	if (!dc_link_should_enable_fec(link)) -		return status; - -	if (link_enc->funcs->fec_set_ready && -			link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) { -		if (ready) { -			fec_config = 1; -			status = core_link_write_dpcd(link, -					DP_FEC_CONFIGURATION, -					&fec_config, -					sizeof(fec_config)); -			if (status == DC_OK) { -				link_enc->funcs->fec_set_ready(link_enc, true); -				link->fec_state = dc_link_fec_ready; -			} else { -				link_enc->funcs->fec_set_ready(link_enc, false); -				link->fec_state = dc_link_fec_not_ready; -				dm_error("dpcd write failed to set fec_ready"); -			} -		} else if (link->fec_state == dc_link_fec_ready) { -			fec_config = 0; -			status = core_link_write_dpcd(link, -					DP_FEC_CONFIGURATION, -					&fec_config, -					sizeof(fec_config)); -			link_enc->funcs->fec_set_ready(link_enc, false); -			link->fec_state = dc_link_fec_not_ready; -		} -	} - -	return status; -} - -void dp_set_fec_enable(struct dc_link *link, bool enable) -{ -	struct link_encoder *link_enc = NULL; - -	link_enc = link_enc_cfg_get_link_enc(link); -	ASSERT(link_enc); - -	if (!dc_link_should_enable_fec(link)) -		return; - -	if (link_enc->funcs->fec_set_enable && -			link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) { -		if (link->fec_state == dc_link_fec_ready && enable) { -			/* Accord to DP spec, FEC enable sequence can first -			 * be transmitted anytime after 1000 LL codes have -			 * been transmitted on the link after link training -			 * completion. Using 1 lane RBR should have the maximum -			 * time for transmitting 1000 LL codes which is 6.173 us. -			 * So use 7 microseconds delay instead. -			 */ -			udelay(7); -			link_enc->funcs->fec_set_enable(link_enc, true); -			link->fec_state = dc_link_fec_enabled; -		} else if (link->fec_state == dc_link_fec_enabled && !enable) { -			link_enc->funcs->fec_set_enable(link_enc, false); -			link->fec_state = dc_link_fec_ready; -		} -	} -} - -void dpcd_set_source_specific_data(struct dc_link *link) -{ -	if (!link->dc->vendor_signature.is_valid) { -		enum dc_status __maybe_unused result_write_min_hblank = DC_NOT_SUPPORTED; -		struct dpcd_amd_signature amd_signature = {0}; -		struct dpcd_amd_device_id amd_device_id = {0}; - -		amd_device_id.device_id_byte1 = -				(uint8_t)(link->ctx->asic_id.chip_id); -		amd_device_id.device_id_byte2 = -				(uint8_t)(link->ctx->asic_id.chip_id >> 8); -		amd_device_id.dce_version = -				(uint8_t)(link->ctx->dce_version); -		amd_device_id.dal_version_byte1 = 0x0; // needed? where to get? -		amd_device_id.dal_version_byte2 = 0x0; // needed? where to get? - -		core_link_read_dpcd(link, DP_SOURCE_OUI, -				(uint8_t *)(&amd_signature), -				sizeof(amd_signature)); - -		if (!((amd_signature.AMD_IEEE_TxSignature_byte1 == 0x0) && -			(amd_signature.AMD_IEEE_TxSignature_byte2 == 0x0) && -			(amd_signature.AMD_IEEE_TxSignature_byte3 == 0x1A))) { - -			amd_signature.AMD_IEEE_TxSignature_byte1 = 0x0; -			amd_signature.AMD_IEEE_TxSignature_byte2 = 0x0; -			amd_signature.AMD_IEEE_TxSignature_byte3 = 0x1A; - -			core_link_write_dpcd(link, DP_SOURCE_OUI, -				(uint8_t *)(&amd_signature), -				sizeof(amd_signature)); -		} - -		core_link_write_dpcd(link, DP_SOURCE_OUI+0x03, -				(uint8_t *)(&amd_device_id), -				sizeof(amd_device_id)); - -		if (link->ctx->dce_version >= DCN_VERSION_2_0 && -			link->dc->caps.min_horizontal_blanking_period != 0) { - -			uint8_t hblank_size = (uint8_t)link->dc->caps.min_horizontal_blanking_period; - -			if (link->preferred_link_setting.dpcd_source_device_specific_field_support) { -				result_write_min_hblank = core_link_write_dpcd(link, -					DP_SOURCE_MINIMUM_HBLANK_SUPPORTED, (uint8_t *)(&hblank_size), -					sizeof(hblank_size)); - -				if (result_write_min_hblank == DC_ERROR_UNEXPECTED) -					link->preferred_link_setting.dpcd_source_device_specific_field_support = false; -			} else { -				DC_LOG_DC("Sink device does not support 00340h DPCD write. Skipping on purpose.\n"); -			} -		} - -		DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, -							WPP_BIT_FLAG_DC_DETECTION_DP_CAPS, -							"result=%u link_index=%u enum dce_version=%d DPCD=0x%04X min_hblank=%u branch_dev_id=0x%x branch_dev_name='%c%c%c%c%c%c'", -							result_write_min_hblank, -							link->link_index, -							link->ctx->dce_version, -							DP_SOURCE_MINIMUM_HBLANK_SUPPORTED, -							link->dc->caps.min_horizontal_blanking_period, -							link->dpcd_caps.branch_dev_id, -							link->dpcd_caps.branch_dev_name[0], -							link->dpcd_caps.branch_dev_name[1], -							link->dpcd_caps.branch_dev_name[2], -							link->dpcd_caps.branch_dev_name[3], -							link->dpcd_caps.branch_dev_name[4], -							link->dpcd_caps.branch_dev_name[5]); -	} else { -		core_link_write_dpcd(link, DP_SOURCE_OUI, -				link->dc->vendor_signature.data.raw, -				sizeof(link->dc->vendor_signature.data.raw)); -	} -} - -void dpcd_write_cable_id_to_dprx(struct dc_link *link) -{ -	if (!link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED || -			link->dpcd_caps.cable_id.raw == 0 || -			link->dprx_states.cable_id_written) -		return; - -	core_link_write_dpcd(link, DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX, -			&link->dpcd_caps.cable_id.raw, -			sizeof(link->dpcd_caps.cable_id.raw)); - -	link->dprx_states.cable_id_written = 1; -} - -bool dc_link_set_backlight_level_nits(struct dc_link *link, -		bool isHDR, -		uint32_t backlight_millinits, -		uint32_t transition_time_in_ms) -{ -	struct dpcd_source_backlight_set dpcd_backlight_set; -	uint8_t backlight_control = isHDR ? 1 : 0; - -	if (!link || (link->connector_signal != SIGNAL_TYPE_EDP && -			link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) -		return false; - -	// OLEDs have no PWM, they can only use AUX -	if (link->dpcd_sink_ext_caps.bits.oled == 1) -		backlight_control = 1; - -	*(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits; -	*(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms; - - -	if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, -			(uint8_t *)(&dpcd_backlight_set), -			sizeof(dpcd_backlight_set)) != DC_OK) -		return false; - -	if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL, -			&backlight_control, 1) != DC_OK) -		return false; - -	return true; -} - -bool dc_link_get_backlight_level_nits(struct dc_link *link, -		uint32_t *backlight_millinits_avg, -		uint32_t *backlight_millinits_peak) -{ -	union dpcd_source_backlight_get dpcd_backlight_get; - -	memset(&dpcd_backlight_get, 0, sizeof(union dpcd_source_backlight_get)); - -	if (!link || (link->connector_signal != SIGNAL_TYPE_EDP && -			link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) -		return false; - -	if (core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK, -			dpcd_backlight_get.raw, -			sizeof(union dpcd_source_backlight_get)) != DC_OK) -		return false; - -	*backlight_millinits_avg = -		dpcd_backlight_get.bytes.backlight_millinits_avg; -	*backlight_millinits_peak = -		dpcd_backlight_get.bytes.backlight_millinits_peak; - -	/* On non-supported panels dpcd_read usually succeeds with 0 returned */ -	if (*backlight_millinits_avg == 0 || -			*backlight_millinits_avg > *backlight_millinits_peak) -		return false; - -	return true; -} - -bool dc_link_backlight_enable_aux(struct dc_link *link, bool enable) -{ -	uint8_t backlight_enable = enable ? 1 : 0; - -	if (!link || (link->connector_signal != SIGNAL_TYPE_EDP && -		link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) -		return false; - -	if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_ENABLE, -		&backlight_enable, 1) != DC_OK) -		return false; - -	return true; -} - -// we read default from 0x320 because we expect BIOS wrote it there -// regular get_backlight_nit reads from panel set at 0x326 -bool dc_link_read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millinits) -{ -	if (!link || (link->connector_signal != SIGNAL_TYPE_EDP && -		link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) -		return false; - -	if (core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, -		(uint8_t *) backlight_millinits, -		sizeof(uint32_t)) != DC_OK) -		return false; - -	return true; -} - -bool dc_link_set_default_brightness_aux(struct dc_link *link) -{ -	uint32_t default_backlight; - -	if (link && link->dpcd_sink_ext_caps.bits.oled == 1) { -		if (!dc_link_read_default_bl_aux(link, &default_backlight)) -			default_backlight = 150000; -		// if < 5 nits or > 5000, it might be wrong readback -		if (default_backlight < 5000 || default_backlight > 5000000) -			default_backlight = 150000; // - -		return dc_link_set_backlight_level_nits(link, true, -				default_backlight, 0); -	} -	return false; -} - -bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timing *crtc_timing) -{ -	struct dc_link_settings link_setting; -	uint8_t link_bw_set; -	uint8_t link_rate_set; -	uint32_t req_bw; -	union lane_count_set lane_count_set = {0}; - -	ASSERT(link || crtc_timing); // invalid input - -	if (link->dpcd_caps.edp_supported_link_rates_count == 0 || -			!link->panel_config.ilr.optimize_edp_link_rate) -		return false; - - -	// Read DPCD 00100h to find if standard link rates are set -	core_link_read_dpcd(link, DP_LINK_BW_SET, -				&link_bw_set, sizeof(link_bw_set)); - -	if (link_bw_set) { -		DC_LOG_EVENT_LINK_TRAINING("eDP ILR: Optimization required, VBIOS used link_bw_set\n"); -		return true; -	} - -	// Read DPCD 00115h to find the edp link rate set used -	core_link_read_dpcd(link, DP_LINK_RATE_SET, -			    &link_rate_set, sizeof(link_rate_set)); - -	// Read DPCD 00101h to find out the number of lanes currently set -	core_link_read_dpcd(link, DP_LANE_COUNT_SET, -				&lane_count_set.raw, sizeof(lane_count_set)); - -	req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing); - -	if (!crtc_timing->flags.DSC) -		decide_edp_link_settings(link, &link_setting, req_bw); -	else -		decide_edp_link_settings_with_dsc(link, &link_setting, req_bw, LINK_RATE_UNKNOWN); - -	if (link->dpcd_caps.edp_supported_link_rates[link_rate_set] != link_setting.link_rate || -			lane_count_set.bits.LANE_COUNT_SET != link_setting.lane_count) { -		DC_LOG_EVENT_LINK_TRAINING("eDP ILR: Optimization required, VBIOS link_rate_set not optimal\n"); -		return true; -	} - -	DC_LOG_EVENT_LINK_TRAINING("eDP ILR: No optimization required, VBIOS set optimal link_rate_set\n"); -	return false; -} - -enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings *link_settings) -{ -	if ((link_settings->link_rate >= LINK_RATE_LOW) && -			(link_settings->link_rate <= LINK_RATE_HIGH3)) -		return DP_8b_10b_ENCODING; -	else if ((link_settings->link_rate >= LINK_RATE_UHBR10) && -			(link_settings->link_rate <= LINK_RATE_UHBR20)) -		return DP_128b_132b_ENCODING; -	return DP_UNKNOWN_ENCODING; -} - -enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link) -{ -	struct dc_link_settings link_settings = {0}; - -	if (!dc_is_dp_signal(link->connector_signal)) -		return DP_UNKNOWN_ENCODING; - -	if (link->preferred_link_setting.lane_count != -			LANE_COUNT_UNKNOWN && -			link->preferred_link_setting.link_rate != -					LINK_RATE_UNKNOWN) { -		link_settings = link->preferred_link_setting; -	} else { -		decide_mst_link_settings(link, &link_settings); -	} - -	return dp_get_link_encoding_format(&link_settings); -} - -// TODO - DP2.0 Link: Fix get_lane_status to handle LTTPR offset (SST and MST) -static void get_lane_status( -	struct dc_link *link, -	uint32_t lane_count, -	union lane_status *status, -	union lane_align_status_updated *status_updated) -{ -	unsigned int lane; -	uint8_t dpcd_buf[3] = {0}; - -	if (status == NULL || status_updated == NULL) { -		return; -	} - -	core_link_read_dpcd( -			link, -			DP_LANE0_1_STATUS, -			dpcd_buf, -			sizeof(dpcd_buf)); - -	for (lane = 0; lane < lane_count; lane++) { -		status[lane].raw = get_nibble_at_index(&dpcd_buf[0], lane); -	} - -	status_updated->raw = dpcd_buf[2]; -} - -bool dpcd_write_128b_132b_sst_payload_allocation_table( -		const struct dc_stream_state *stream, -		struct dc_link *link, -		struct link_mst_stream_allocation_table *proposed_table, -		bool allocate) -{ -	const uint8_t vc_id = 1; /// VC ID always 1 for SST -	const uint8_t start_time_slot = 0; /// Always start at time slot 0 for SST -	bool result = false; -	uint8_t req_slot_count = 0; -	struct fixed31_32 avg_time_slots_per_mtp = { 0 }; -	union payload_table_update_status update_status = { 0 }; -	const uint32_t max_retries = 30; -	uint32_t retries = 0; - -	if (allocate)	{ -		avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(stream, link); -		req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp); -		/// Validation should filter out modes that exceed link BW -		ASSERT(req_slot_count <= MAX_MTP_SLOT_COUNT); -		if (req_slot_count > MAX_MTP_SLOT_COUNT) -			return false; -	} else { -		/// Leave req_slot_count = 0 if allocate is false. -	} - -	proposed_table->stream_count = 1; /// Always 1 stream for SST -	proposed_table->stream_allocations[0].slot_count = req_slot_count; -	proposed_table->stream_allocations[0].vcp_id = vc_id; - -	if (link->aux_access_disabled) -		return true; - -	/// Write DPCD 2C0 = 1 to start updating -	update_status.bits.VC_PAYLOAD_TABLE_UPDATED = 1; -	core_link_write_dpcd( -			link, -			DP_PAYLOAD_TABLE_UPDATE_STATUS, -			&update_status.raw, -			1); - -	/// Program the changes in DPCD 1C0 - 1C2 -	ASSERT(vc_id == 1); -	core_link_write_dpcd( -			link, -			DP_PAYLOAD_ALLOCATE_SET, -			&vc_id, -			1); - -	ASSERT(start_time_slot == 0); -	core_link_write_dpcd( -			link, -			DP_PAYLOAD_ALLOCATE_START_TIME_SLOT, -			&start_time_slot, -			1); - -	core_link_write_dpcd( -			link, -			DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT, -			&req_slot_count, -			1); - -	/// Poll till DPCD 2C0 read 1 -	/// Try for at least 150ms (30 retries, with 5ms delay after each attempt) - -	while (retries < max_retries) { -		if (core_link_read_dpcd( -				link, -				DP_PAYLOAD_TABLE_UPDATE_STATUS, -				&update_status.raw, -				1) == DC_OK) { -			if (update_status.bits.VC_PAYLOAD_TABLE_UPDATED == 1) { -				DC_LOG_DP2("SST Update Payload: downstream payload table updated."); -				result = true; -				break; -			} -		} else { -			union dpcd_rev dpcdRev; - -			if (core_link_read_dpcd( -					link, -					DP_DPCD_REV, -					&dpcdRev.raw, -					1) != DC_OK) { -				DC_LOG_ERROR("SST Update Payload: Unable to read DPCD revision " -						"of sink while polling payload table " -						"updated status bit."); -				break; -			} -		} -		retries++; -		msleep(5); -	} - -	if (!result && retries == max_retries) { -		DC_LOG_ERROR("SST Update Payload: Payload table not updated after retries, " -				"continue on. Something is wrong with the branch."); -		// TODO - DP2.0 Payload: Read and log the payload table from downstream branch -	} - -	return result; -} - -bool dpcd_poll_for_allocation_change_trigger(struct dc_link *link) -{ -	/* -	 * wait for ACT handled -	 */ -	int i; -	const int act_retries = 30; -	enum act_return_status result = ACT_FAILED; -	union payload_table_update_status update_status = {0}; -	union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; -	union lane_align_status_updated lane_status_updated; - -	if (link->aux_access_disabled) -		return true; -	for (i = 0; i < act_retries; i++) { -		get_lane_status(link, link->cur_link_settings.lane_count, dpcd_lane_status, &lane_status_updated); - -		if (!dp_is_cr_done(link->cur_link_settings.lane_count, dpcd_lane_status) || -				!dp_is_ch_eq_done(link->cur_link_settings.lane_count, dpcd_lane_status) || -				!dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status) || -				!dp_is_interlane_aligned(lane_status_updated)) { -			DC_LOG_ERROR("SST Update Payload: Link loss occurred while " -					"polling for ACT handled."); -			result = ACT_LINK_LOST; -			break; -		} -		core_link_read_dpcd( -				link, -				DP_PAYLOAD_TABLE_UPDATE_STATUS, -				&update_status.raw, -				1); - -		if (update_status.bits.ACT_HANDLED == 1) { -			DC_LOG_DP2("SST Update Payload: ACT handled by downstream."); -			result = ACT_SUCCESS; -			break; -		} - -		msleep(5); -	} - -	if (result == ACT_FAILED) { -		DC_LOG_ERROR("SST Update Payload: ACT still not handled after retries, " -				"continue on. Something is wrong with the branch."); -	} - -	return (result == ACT_SUCCESS); -} - -struct fixed31_32 calculate_sst_avg_time_slots_per_mtp( -		const struct dc_stream_state *stream, -		const struct dc_link *link) -{ -	struct fixed31_32 link_bw_effective = -			dc_fixpt_from_int( -					dc_link_bandwidth_kbps(link, &link->cur_link_settings)); -	struct fixed31_32 timeslot_bw_effective = -			dc_fixpt_div_int(link_bw_effective, MAX_MTP_SLOT_COUNT); -	struct fixed31_32 timing_bw = -			dc_fixpt_from_int( -					dc_bandwidth_in_kbps_from_timing(&stream->timing)); -	struct fixed31_32 avg_time_slots_per_mtp = -			dc_fixpt_div(timing_bw, timeslot_bw_effective); - -	return avg_time_slots_per_mtp; -} - -bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx) -{ -	/* If this assert is hit then we have a link encoder dynamic management issue */ -	ASSERT(pipe_ctx->stream_res.hpo_dp_stream_enc ? pipe_ctx->link_res.hpo_dp_link_enc != NULL : true); -	return (pipe_ctx->stream_res.hpo_dp_stream_enc && -			pipe_ctx->link_res.hpo_dp_link_enc && -			dc_is_dp_signal(pipe_ctx->stream->signal)); -} - -void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd) -{ -	if (link->connector_signal != SIGNAL_TYPE_EDP) -		return; - -	link->dc->hwss.edp_power_control(link, true); -	if (wait_for_hpd) -		link->dc->hwss.edp_wait_for_hpd_ready(link, true); -	if (link->dc->hwss.edp_backlight_control) -		link->dc->hwss.edp_backlight_control(link, true); -} - -void dc_link_clear_dprx_states(struct dc_link *link) -{ -	memset(&link->dprx_states, 0, sizeof(link->dprx_states)); -} - -void dp_receiver_power_ctrl(struct dc_link *link, bool on) -{ -	uint8_t state; - -	state = on ? DP_POWER_STATE_D0 : DP_POWER_STATE_D3; - -	if (link->sync_lt_in_progress) -		return; - -	core_link_write_dpcd(link, DP_SET_POWER, &state, -						 sizeof(state)); - -} - -void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode) -{ -	if (link != NULL && link->dc->debug.enable_driver_sequence_debug) -		core_link_write_dpcd(link, DP_SOURCE_SEQUENCE, -					&dp_test_mode, sizeof(dp_test_mode)); -} - - -static uint8_t convert_to_count(uint8_t lttpr_repeater_count) -{ -	switch (lttpr_repeater_count) { -	case 0x80: // 1 lttpr repeater -		return 1; -	case 0x40: // 2 lttpr repeaters -		return 2; -	case 0x20: // 3 lttpr repeaters -		return 3; -	case 0x10: // 4 lttpr repeaters -		return 4; -	case 0x08: // 5 lttpr repeaters -		return 5; -	case 0x04: // 6 lttpr repeaters -		return 6; -	case 0x02: // 7 lttpr repeaters -		return 7; -	case 0x01: // 8 lttpr repeaters -		return 8; -	default: -		break; -	} -	return 0; // invalid value -} - -static inline bool is_immediate_downstream(struct dc_link *link, uint32_t offset) -{ -	return (convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == offset); -} - -void dp_enable_link_phy( -	struct dc_link *link, -	const struct link_resource *link_res, -	enum signal_type signal, -	enum clock_source_id clock_source, -	const struct dc_link_settings *link_settings) -{ -	link->cur_link_settings = *link_settings; -	link->dc->hwss.enable_dp_link_output(link, link_res, signal, -			clock_source, link_settings); -	dp_receiver_power_ctrl(link, true); -} - -void edp_add_delay_for_T9(struct dc_link *link) -{ -	if (link && link->panel_config.pps.extra_delay_backlight_off > 0) -		udelay(link->panel_config.pps.extra_delay_backlight_off * 1000); -} - -bool edp_receiver_ready_T9(struct dc_link *link) -{ -	unsigned int tries = 0; -	unsigned char sinkstatus = 0; -	unsigned char edpRev = 0; -	enum dc_status result = DC_OK; - -	result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev)); - -	/* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/ -	if (result == DC_OK && edpRev >= DP_EDP_12) { -		do { -			sinkstatus = 1; -			result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus)); -			if (sinkstatus == 0) -				break; -			if (result != DC_OK) -				break; -			udelay(100); //MAx T9 -		} while (++tries < 50); -	} - -	return result; -} -bool edp_receiver_ready_T7(struct dc_link *link) -{ -	unsigned char sinkstatus = 0; -	unsigned char edpRev = 0; -	enum dc_status result = DC_OK; - -	/* use absolute time stamp to constrain max T7*/ -	unsigned long long enter_timestamp = 0; -	unsigned long long finish_timestamp = 0; -	unsigned long long time_taken_in_ns = 0; - -	result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev)); - -	if (result == DC_OK && edpRev >= DP_EDP_12) { -		/* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/ -		enter_timestamp = dm_get_timestamp(link->ctx); -		do { -			sinkstatus = 0; -			result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus)); -			if (sinkstatus == 1) -				break; -			if (result != DC_OK) -				break; -			udelay(25); -			finish_timestamp = dm_get_timestamp(link->ctx); -			time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp); -		} while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms -	} - -	if (link && link->panel_config.pps.extra_t7_ms > 0) -		udelay(link->panel_config.pps.extra_t7_ms * 1000); - -	return result; -} - -void dp_disable_link_phy(struct dc_link *link, const struct link_resource *link_res, -		enum signal_type signal) -{ -	struct dc  *dc = link->ctx->dc; - -	if (!link->wa_flags.dp_keep_receiver_powered) -		dp_receiver_power_ctrl(link, false); - -	dc->hwss.disable_link_output(link, link_res, signal); -	/* Clear current link setting.*/ -	memset(&link->cur_link_settings, 0, -			sizeof(link->cur_link_settings)); - -	if (dc->clk_mgr->funcs->notify_link_rate_change) -		dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link); -} - -void dp_disable_link_phy_mst(struct dc_link *link, const struct link_resource *link_res, -		enum signal_type signal) -{ -	/* MST disable link only when no stream use the link */ -	if (link->mst_stream_alloc_table.stream_count > 0) -		return; - -	dp_disable_link_phy(link, link_res, signal); - -	/* set the sink to SST mode after disabling the link */ -	dp_enable_mst_on_sink(link, false); -} - -bool dp_set_hw_training_pattern( -	struct dc_link *link, -	const struct link_resource *link_res, -	enum dc_dp_training_pattern pattern, -	uint32_t offset) -{ -	enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED; - -	switch (pattern) { -	case DP_TRAINING_PATTERN_SEQUENCE_1: -		test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN1; -		break; -	case DP_TRAINING_PATTERN_SEQUENCE_2: -		test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN2; -		break; -	case DP_TRAINING_PATTERN_SEQUENCE_3: -		test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN3; -		break; -	case DP_TRAINING_PATTERN_SEQUENCE_4: -		test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4; -		break; -	case DP_128b_132b_TPS1: -		test_pattern = DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE; -		break; -	case DP_128b_132b_TPS2: -		test_pattern = DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE; -		break; -	default: -		break; -	} - -	dp_set_hw_test_pattern(link, link_res, test_pattern, NULL, 0); - -	return true; -} - -void dp_set_hw_lane_settings( -	struct dc_link *link, -	const struct link_resource *link_res, -	const struct link_training_settings *link_settings, -	uint32_t offset) -{ -	const struct link_hwss *link_hwss = get_link_hwss(link, link_res); - -	if ((link_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && !is_immediate_downstream(link, offset)) -		return; - -	if (link_hwss->ext.set_dp_lane_settings) -		link_hwss->ext.set_dp_lane_settings(link, link_res, -				&link_settings->link_settings, -				link_settings->hw_lane_settings); - -	memmove(link->cur_lane_setting, -			link_settings->hw_lane_settings, -			sizeof(link->cur_lane_setting)); -} - -void dp_set_hw_test_pattern( -	struct dc_link *link, -	const struct link_resource *link_res, -	enum dp_test_pattern test_pattern, -	uint8_t *custom_pattern, -	uint32_t custom_pattern_size) -{ -	const struct link_hwss *link_hwss = get_link_hwss(link, link_res); -	struct encoder_set_dp_phy_pattern_param pattern_param = {0}; - -	pattern_param.dp_phy_pattern = test_pattern; -	pattern_param.custom_pattern = custom_pattern; -	pattern_param.custom_pattern_size = custom_pattern_size; -	pattern_param.dp_panel_mode = dp_get_panel_mode(link); - -	if (link_hwss->ext.set_dp_link_test_pattern) -		link_hwss->ext.set_dp_link_test_pattern(link, link_res, &pattern_param); -} - -void dp_retrain_link_dp_test(struct dc_link *link, -			struct dc_link_settings *link_setting, -			bool skip_video_pattern) -{ -	struct pipe_ctx *pipes = -			&link->dc->current_state->res_ctx.pipe_ctx[0]; -	unsigned int i; - - -	for (i = 0; i < MAX_PIPES; i++) { -		if (pipes[i].stream != NULL && -			!pipes[i].top_pipe && !pipes[i].prev_odm_pipe && -			pipes[i].stream->link != NULL && -			pipes[i].stream_res.stream_enc != NULL && -			pipes[i].stream->link == link) { -			udelay(100); - -			pipes[i].stream_res.stream_enc->funcs->dp_blank(link, -					pipes[i].stream_res.stream_enc); - -			/* disable any test pattern that might be active */ -			dp_set_hw_test_pattern(link, &pipes[i].link_res, -					DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); - -			dp_receiver_power_ctrl(link, false); - -			link->dc->hwss.disable_stream(&pipes[i]); -			if ((&pipes[i])->stream_res.audio && !link->dc->debug.az_endpoint_mute_only) -				(&pipes[i])->stream_res.audio->funcs->az_disable((&pipes[i])->stream_res.audio); - -			if (link->link_enc) -				link->link_enc->funcs->disable_output( -						link->link_enc, -						SIGNAL_TYPE_DISPLAY_PORT); - -			/* Clear current link setting. */ -			memset(&link->cur_link_settings, 0, -				sizeof(link->cur_link_settings)); - -			perform_link_training_with_retries( -					link_setting, -					skip_video_pattern, -					LINK_TRAINING_ATTEMPTS, -					&pipes[i], -					SIGNAL_TYPE_DISPLAY_PORT, -					false); - -			link->dc->hwss.enable_stream(&pipes[i]); - -			link->dc->hwss.unblank_stream(&pipes[i], -					link_setting); - -			if (pipes[i].stream_res.audio) { -				/* notify audio driver for -				 * audio modes of monitor */ -				pipes[i].stream_res.audio->funcs->az_enable( -						pipes[i].stream_res.audio); - -				/* un-mute audio */ -				/* TODO: audio should be per stream rather than -				 * per link */ -				pipes[i].stream_res.stream_enc->funcs-> -				audio_mute_control( -					pipes[i].stream_res.stream_enc, false); -			} -		} -	} -} - -#undef DC_LOGGER -#define DC_LOGGER \ -	dsc->ctx->logger -static void dsc_optc_config_log(struct display_stream_compressor *dsc, -		struct dsc_optc_config *config) -{ -	uint32_t precision = 1 << 28; -	uint32_t bytes_per_pixel_int = config->bytes_per_pixel / precision; -	uint32_t bytes_per_pixel_mod = config->bytes_per_pixel % precision; -	uint64_t ll_bytes_per_pix_fraq = bytes_per_pixel_mod; - -	/* 7 fractional digits decimal precision for bytes per pixel is enough because DSC -	 * bits per pixel precision is 1/16th of a pixel, which means bytes per pixel precision is -	 * 1/16/8 = 1/128 of a byte, or 0.0078125 decimal -	 */ -	ll_bytes_per_pix_fraq *= 10000000; -	ll_bytes_per_pix_fraq /= precision; - -	DC_LOG_DSC("\tbytes_per_pixel 0x%08x (%d.%07d)", -			config->bytes_per_pixel, bytes_per_pixel_int, (uint32_t)ll_bytes_per_pix_fraq); -	DC_LOG_DSC("\tis_pixel_format_444 %d", config->is_pixel_format_444); -	DC_LOG_DSC("\tslice_width %d", config->slice_width); -} - -bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable) -{ -	struct dc *dc = pipe_ctx->stream->ctx->dc; -	struct dc_stream_state *stream = pipe_ctx->stream; -	bool result = false; - -	if (dc_is_virtual_signal(stream->signal) || IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) -		result = true; -	else -		result = dm_helpers_dp_write_dsc_enable(dc->ctx, stream, enable); -	return result; -} - -/* The stream with these settings can be sent (unblanked) only after DSC was enabled on RX first, - * i.e. after dp_enable_dsc_on_rx() had been called - */ -void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) -{ -	struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; -	struct dc *dc = pipe_ctx->stream->ctx->dc; -	struct dc_stream_state *stream = pipe_ctx->stream; -	struct pipe_ctx *odm_pipe; -	int opp_cnt = 1; - -	for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) -		opp_cnt++; - -	if (enable) { -		struct dsc_config dsc_cfg; -		struct dsc_optc_config dsc_optc_cfg; -		enum optc_dsc_mode optc_dsc_mode; - -		/* Enable DSC hw block */ -		dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; -		dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; -		dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; -		dsc_cfg.color_depth = stream->timing.display_color_depth; -		dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; -		dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; -		ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0); -		dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; - -		dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg); -		dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst); -		for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { -			struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc; - -			odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg); -			odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst); -		} -		dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt; -		dsc_cfg.pic_width *= opp_cnt; - -		optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED; - -		/* Enable DSC in encoder */ -		if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) -				&& !is_dp_128b_132b_signal(pipe_ctx)) { -			DC_LOG_DSC("Setting stream encoder DSC config for engine %d:", (int)pipe_ctx->stream_res.stream_enc->id); -			dsc_optc_config_log(dsc, &dsc_optc_cfg); -			pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc, -									optc_dsc_mode, -									dsc_optc_cfg.bytes_per_pixel, -									dsc_optc_cfg.slice_width); - -			/* PPS SDP is set elsewhere because it has to be done after DIG FE is connected to DIG BE */ -		} - -		/* Enable DSC in OPTC */ -		DC_LOG_DSC("Setting optc DSC config for tg instance %d:", pipe_ctx->stream_res.tg->inst); -		dsc_optc_config_log(dsc, &dsc_optc_cfg); -		pipe_ctx->stream_res.tg->funcs->set_dsc_config(pipe_ctx->stream_res.tg, -							optc_dsc_mode, -							dsc_optc_cfg.bytes_per_pixel, -							dsc_optc_cfg.slice_width); -	} else { -		/* disable DSC in OPTC */ -		pipe_ctx->stream_res.tg->funcs->set_dsc_config( -				pipe_ctx->stream_res.tg, -				OPTC_DSC_DISABLED, 0, 0); - -		/* disable DSC in stream encoder */ -		if (dc_is_dp_signal(stream->signal)) { -			if (is_dp_128b_132b_signal(pipe_ctx)) -				pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet( -										pipe_ctx->stream_res.hpo_dp_stream_enc, -										false, -										NULL, -										true); -			else if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { -				pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config( -						pipe_ctx->stream_res.stream_enc, -						OPTC_DSC_DISABLED, 0, 0); -				pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( -							pipe_ctx->stream_res.stream_enc, false, NULL, true); -			} -		} - -		/* disable DSC block */ -		pipe_ctx->stream_res.dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc); -		for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) -			odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc); -	} -} - -bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable) -{ -	struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; -	bool result = false; - -	if (!pipe_ctx->stream->timing.flags.DSC) -		goto out; -	if (!dsc) -		goto out; - -	if (enable) { -		{ -			dp_set_dsc_on_stream(pipe_ctx, true); -			result = true; -		} -	} else { -		dp_set_dsc_on_rx(pipe_ctx, false); -		dp_set_dsc_on_stream(pipe_ctx, false); -		result = true; -	} -out: -	return result; -} - -/* - * For dynamic bpp change case, dsc is programmed with MASTER_UPDATE_LOCK enabled; - * hence PPS info packet update need to use frame update instead of immediate update. - * Added parameter immediate_update for this purpose. - * The decision to use frame update is hard-coded in function dp_update_dsc_config(), - * which is the only place where a "false" would be passed in for param immediate_update. - * - * immediate_update is only applicable when DSC is enabled. - */ -bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_update) -{ -	struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; -	struct dc_stream_state *stream = pipe_ctx->stream; - -	if (!pipe_ctx->stream->timing.flags.DSC || !dsc) -		return false; - -	if (enable) { -		struct dsc_config dsc_cfg; -		uint8_t dsc_packed_pps[128]; - -		memset(&dsc_cfg, 0, sizeof(dsc_cfg)); -		memset(dsc_packed_pps, 0, 128); - -		/* Enable DSC hw block */ -		dsc_cfg.pic_width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; -		dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; -		dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; -		dsc_cfg.color_depth = stream->timing.display_color_depth; -		dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; -		dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; - -		DC_LOG_DSC(" "); -		dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]); -		memcpy(&stream->dsc_packed_pps[0], &dsc_packed_pps[0], sizeof(stream->dsc_packed_pps)); -		if (dc_is_dp_signal(stream->signal)) { -			DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id); -			if (is_dp_128b_132b_signal(pipe_ctx)) -				pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet( -										pipe_ctx->stream_res.hpo_dp_stream_enc, -										true, -										&dsc_packed_pps[0], -										immediate_update); -			else -				pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( -						pipe_ctx->stream_res.stream_enc, -						true, -						&dsc_packed_pps[0], -						immediate_update); -		} -	} else { -		/* disable DSC PPS in stream encoder */ -		memset(&stream->dsc_packed_pps[0], 0, sizeof(stream->dsc_packed_pps)); -		if (dc_is_dp_signal(stream->signal)) { -			if (is_dp_128b_132b_signal(pipe_ctx)) -				pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet( -										pipe_ctx->stream_res.hpo_dp_stream_enc, -										false, -										NULL, -										true); -			else -				pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( -						pipe_ctx->stream_res.stream_enc, false, NULL, true); -		} -	} - -	return true; -} - - -bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx) -{ -	struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; - -	if (!pipe_ctx->stream->timing.flags.DSC) -		return false; -	if (!dsc) -		return false; - -	dp_set_dsc_on_stream(pipe_ctx, true); -	dp_set_dsc_pps_sdp(pipe_ctx, true, false); -	return true; -} - -#undef DC_LOGGER -#define DC_LOGGER \ -	link->ctx->logger diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c index 614f022d1cff..74e465ba158d 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c @@ -24,7 +24,7 @@  #include "link_enc_cfg.h"  #include "resource.h" -#include "dc_link_dp.h" +#include "link.h"  #define DC_LOGGER dc->ctx->logger @@ -48,7 +48,7 @@ static bool is_dig_link_enc_stream(struct dc_stream_state *stream)  					/* DIGs do not support DP2.0 streams with 128b/132b encoding. */  					struct dc_link_settings link_settings = {0}; -					decide_link_settings(stream, &link_settings); +					link_decide_link_settings(stream, &link_settings);  					if ((link_settings.link_rate >= LINK_RATE_LOW) &&  							link_settings.link_rate <= LINK_RATE_HIGH3) {  						is_dig_stream = true; @@ -305,15 +305,17 @@ void link_enc_cfg_link_encs_assign(  	for (i = 0; i < stream_count; i++) {  		struct dc_stream_state *stream = streams[i]; +		/* skip it if the link is mappable endpoint. */ +		if (stream->link->is_dig_mapping_flexible) +			continue; +  		/* Skip stream if not supported by DIG link encoder. */  		if (!is_dig_link_enc_stream(stream))  			continue;  		/* Physical endpoints have a fixed mapping to DIG link encoders. */ -		if (!stream->link->is_dig_mapping_flexible) { -			eng_id = stream->link->eng_id; -			add_link_enc_assignment(state, stream, eng_id); -		} +		eng_id = stream->link->eng_id; +		add_link_enc_assignment(state, stream, eng_id);  	}  	/* (b) Retain previous assignments for mappable endpoints if encoders still available. */ @@ -325,11 +327,12 @@ void link_enc_cfg_link_encs_assign(  		for (i = 0; i < stream_count; i++) {  			struct dc_stream_state *stream = state->streams[i]; -			/* Skip stream if not supported by DIG link encoder. */ -			if (!is_dig_link_enc_stream(stream)) +			/* Skip it if the link is NOT mappable endpoint. */ +			if (!stream->link->is_dig_mapping_flexible)  				continue; -			if (!stream->link->is_dig_mapping_flexible) +			/* Skip stream if not supported by DIG link encoder. */ +			if (!is_dig_link_enc_stream(stream))  				continue;  			for (j = 0; j < prev_state->stream_count; j++) { @@ -338,6 +341,7 @@ void link_enc_cfg_link_encs_assign(  				if (stream == prev_stream && stream->link == prev_stream->link &&  						prev_state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[j].valid) {  					eng_id = prev_state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[j].eng_id; +  					if (is_avail_link_enc(state, eng_id, stream))  						add_link_enc_assignment(state, stream, eng_id);  				} @@ -350,6 +354,15 @@ void link_enc_cfg_link_encs_assign(  	for (i = 0; i < stream_count; i++) {  		struct dc_stream_state *stream = streams[i]; +		struct link_encoder *link_enc = NULL; + +		/* Skip it if the link is NOT mappable endpoint. */ +		if (!stream->link->is_dig_mapping_flexible) +			continue; + +		/* Skip if encoder assignment retained in step (b) above. */ +		if (stream->link_enc) +			continue;  		/* Skip stream if not supported by DIG link encoder. */  		if (!is_dig_link_enc_stream(stream)) { @@ -358,24 +371,18 @@ void link_enc_cfg_link_encs_assign(  		}  		/* Mappable endpoints have a flexible mapping to DIG link encoders. */ -		if (stream->link->is_dig_mapping_flexible) { -			struct link_encoder *link_enc = NULL; -			/* Skip if encoder assignment retained in step (b) above. */ -			if (stream->link_enc) -				continue; +		/* For MST, multiple streams will share the same link / display +		 * endpoint. These streams should use the same link encoder +		 * assigned to that endpoint. +		 */ +		link_enc = get_link_enc_used_by_link(state, stream->link); +		if (link_enc == NULL) +			eng_id = find_first_avail_link_enc(stream->ctx, state); +		else +			eng_id =  link_enc->preferred_engine; -			/* For MST, multiple streams will share the same link / display -			 * endpoint. These streams should use the same link encoder -			 * assigned to that endpoint. -			 */ -			link_enc = get_link_enc_used_by_link(state, stream->link); -			if (link_enc == NULL) -				eng_id = find_first_avail_link_enc(stream->ctx, state); -			else -				eng_id =  link_enc->preferred_engine; -			add_link_enc_assignment(state, stream, eng_id); -		} +		add_link_enc_assignment(state, stream, eng_id);  	}  	link_enc_cfg_validate(dc, state); @@ -420,10 +427,6 @@ void link_enc_cfg_link_enc_unassign(  {  	enum engine_id eng_id = ENGINE_ID_UNKNOWN; -	/* Only DIG link encoders. */ -	if (!is_dig_link_enc_stream(stream)) -		return; -  	if (stream->link_enc)  		eng_id = stream->link_enc->preferred_engine; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c new file mode 100644 index 000000000000..a951e10416ee --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c @@ -0,0 +1,103 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file provides single entrance to link functionality declared in dc + * public headers. The file is intended to be used as a thin translation layer + * that directly calls link internal functions without adding new functional + * behavior. + * + * When exporting a new link related dc function, add function declaration in + * dc.h with detail interface documentation, then add function implementation + * in this file which calls link functions. + */ +#include "link.h" + +bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) +{ +	return link_detect(link, reason); +} + +bool dc_link_detect_connection_type(struct dc_link *link, +		enum dc_connection_type *type) +{ +	return link_detect_connection_type(link, type); +} + +const struct dc_link_status *dc_link_get_status(const struct dc_link *link) +{ +	return link_get_status(link); +} +#ifdef CONFIG_DRM_AMD_DC_HDCP + +/* return true if the connected receiver supports the hdcp version */ +bool dc_link_is_hdcp14(struct dc_link *link, enum signal_type signal) +{ +	return link_is_hdcp14(link, signal); +} + +bool dc_link_is_hdcp22(struct dc_link *link, enum signal_type signal) +{ +	return link_is_hdcp22(link, signal); +} +#endif + +void dc_link_clear_dprx_states(struct dc_link *link) +{ +	link_clear_dprx_states(link); +} + +bool dc_link_reset_cur_dp_mst_topology(struct dc_link *link) +{ +	return link_reset_cur_dp_mst_topology(link); +} + +uint32_t dc_link_bandwidth_kbps( +	const struct dc_link *link, +	const struct dc_link_settings *link_settings) +{ +	return dp_link_bandwidth_kbps(link, link_settings); +} + +uint32_t dc_bandwidth_in_kbps_from_timing( +	const struct dc_crtc_timing *timing) +{ +	return link_timing_bandwidth_kbps(timing); +} + +void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map) +{ +	link_get_cur_res_map(dc, map); +} + +void dc_restore_link_res_map(const struct dc *dc, uint32_t *map) +{ +	link_restore_res_map(dc, map); +} + +bool dc_link_update_dsc_config(struct pipe_ctx *pipe_ctx) +{ +	return link_update_dsc_config(pipe_ctx); +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index fd8db482e56f..d9f2ef242b0f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -40,11 +40,11 @@  #include "virtual/virtual_stream_encoder.h"  #include "dpcd_defs.h"  #include "link_enc_cfg.h" -#include "dc_link_dp.h" +#include "link.h"  #include "virtual/virtual_link_hwss.h" -#include "link/link_hwss_dio.h" -#include "link/link_hwss_dpia.h" -#include "link/link_hwss_hpo_dp.h" +#include "link/hwss/link_hwss_dio.h" +#include "link/hwss/link_hwss_dpia.h" +#include "link/hwss/link_hwss_hpo_dp.h"  #if defined(CONFIG_DRM_AMD_DC_SI)  #include "dce60/dce60_resource.h" @@ -1768,6 +1768,17 @@ bool dc_remove_plane_from_context(  	return true;  } +/** + * dc_rem_all_planes_for_stream - Remove planes attached to the target stream. + * + * @dc: Current dc state. + * @stream: Target stream, which we want to remove the attached plans. + * @context: New context. + * + * Return: + * Return true if DC was able to remove all planes from the target + * stream, otherwise, return false. + */  bool dc_rem_all_planes_for_stream(  		const struct dc *dc,  		struct dc_stream_state *stream, @@ -2202,7 +2213,7 @@ enum dc_status dc_remove_stream_from_ctx(  			del_pipe->stream_res.stream_enc,  			false); -	if (is_dp_128b_132b_signal(del_pipe)) { +	if (link_is_dp_128b_132b_signal(del_pipe)) {  		update_hpo_dp_stream_engine_usage(  			&new_ctx->res_ctx, dc->res_pool,  			del_pipe->stream_res.hpo_dp_stream_enc, @@ -2502,9 +2513,9 @@ enum dc_status resource_map_pool_resources(  	 * and link settings  	 */  	if (dc_is_dp_signal(stream->signal)) { -		if (!decide_link_settings(stream, &pipe_ctx->link_config.dp_link_settings)) +		if (!link_decide_link_settings(stream, &pipe_ctx->link_config.dp_link_settings))  			return DC_FAIL_DP_LINK_BANDWIDTH; -		if (dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) { +		if (link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) {  			pipe_ctx->stream_res.hpo_dp_stream_enc =  					find_first_free_match_hpo_dp_stream_enc_for_link(  							&context->res_ctx, pool, stream); @@ -2562,9 +2573,12 @@ enum dc_status resource_map_pool_resources(  /**   * dc_resource_state_copy_construct_current() - Creates a new dc_state from existing state - * Is a shallow copy.  Increments refcounts on existing streams and planes. + *   * @dc: copy out of dc->current_state   * @dst_ctx: copy into this + * + * This function makes a shallow copy of the current DC state and increments + * refcounts on existing streams and planes.   */  void dc_resource_state_copy_construct_current(  		const struct dc *dc, @@ -2593,15 +2607,241 @@ bool dc_resource_is_dsc_encoding_supported(const struct dc *dc)  	return dc->res_pool->res_cap->num_dsc > 0;  } +static bool planes_changed_for_existing_stream(struct dc_state *context, +					       struct dc_stream_state *stream, +					       const struct dc_validation_set set[], +					       int set_count) +{ +	int i, j; +	struct dc_stream_status *stream_status = NULL; + +	for (i = 0; i < context->stream_count; i++) { +		if (context->streams[i] == stream) { +			stream_status = &context->stream_status[i]; +			break; +		} +	} + +	if (!stream_status) +		ASSERT(0); + +	for (i = 0; i < set_count; i++) +		if (set[i].stream == stream) +			break; + +	if (i == set_count) +		ASSERT(0); + +	if (set[i].plane_count != stream_status->plane_count) +		return true; + +	for (j = 0; j < set[i].plane_count; j++) +		if (set[i].plane_states[j] != stream_status->plane_states[j]) +			return true; + +	return false; +}  /** - * dc_validate_global_state() - Determine if HW can support a given state - * Checks HW resource availability and bandwidth requirement. + * dc_validate_with_context - Validate and update the potential new stream in the context object + * + * @dc: Used to get the current state status + * @set: An array of dc_validation_set with all the current streams reference + * @set_count: Total of streams + * @context: New context + * @fast_validate: Enable or disable fast validation + * + * This function updates the potential new stream in the context object. It + * creates multiple lists for the add, remove, and unchanged streams. In + * particular, if the unchanged streams have a plane that changed, it is + * necessary to remove all planes from the unchanged streams. In summary, this + * function is responsible for validating the new context. + * + * Return: + * In case of success, return DC_OK (1), otherwise, return a DC error. + */ +enum dc_status dc_validate_with_context(struct dc *dc, +					const struct dc_validation_set set[], +					int set_count, +					struct dc_state *context, +					bool fast_validate) +{ +	struct dc_stream_state *unchanged_streams[MAX_PIPES] = { 0 }; +	struct dc_stream_state *del_streams[MAX_PIPES] = { 0 }; +	struct dc_stream_state *add_streams[MAX_PIPES] = { 0 }; +	int old_stream_count = context->stream_count; +	enum dc_status res = DC_ERROR_UNEXPECTED; +	int unchanged_streams_count = 0; +	int del_streams_count = 0; +	int add_streams_count = 0; +	bool found = false; +	int i, j, k; + +	DC_LOGGER_INIT(dc->ctx->logger); + +	/* First build a list of streams to be remove from current context */ +	for (i = 0; i < old_stream_count; i++) { +		struct dc_stream_state *stream = context->streams[i]; + +		for (j = 0; j < set_count; j++) { +			if (stream == set[j].stream) { +				found = true; +				break; +			} +		} + +		if (!found) +			del_streams[del_streams_count++] = stream; + +		found = false; +	} + +	/* Second, build a list of new streams */ +	for (i = 0; i < set_count; i++) { +		struct dc_stream_state *stream = set[i].stream; + +		for (j = 0; j < old_stream_count; j++) { +			if (stream == context->streams[j]) { +				found = true; +				break; +			} +		} + +		if (!found) +			add_streams[add_streams_count++] = stream; + +		found = false; +	} + +	/* Build a list of unchanged streams which is necessary for handling +	 * planes change such as added, removed, and updated. +	 */ +	for (i = 0; i < set_count; i++) { +		/* Check if stream is part of the delete list */ +		for (j = 0; j < del_streams_count; j++) { +			if (set[i].stream == del_streams[j]) { +				found = true; +				break; +			} +		} + +		if (!found) { +			/* Check if stream is part of the add list */ +			for (j = 0; j < add_streams_count; j++) { +				if (set[i].stream == add_streams[j]) { +					found = true; +					break; +				} +			} +		} + +		if (!found) +			unchanged_streams[unchanged_streams_count++] = set[i].stream; + +		found = false; +	} + +	/* Remove all planes for unchanged streams if planes changed */ +	for (i = 0; i < unchanged_streams_count; i++) { +		if (planes_changed_for_existing_stream(context, +						       unchanged_streams[i], +						       set, +						       set_count)) { +			if (!dc_rem_all_planes_for_stream(dc, +							  unchanged_streams[i], +							  context)) { +				res = DC_FAIL_DETACH_SURFACES; +				goto fail; +			} +		} +	} + +	/* Remove all planes for removed streams and then remove the streams */ +	for (i = 0; i < del_streams_count; i++) { +		/* Need to cpy the dwb data from the old stream in order to efc to work */ +		if (del_streams[i]->num_wb_info > 0) { +			for (j = 0; j < add_streams_count; j++) { +				if (del_streams[i]->sink == add_streams[j]->sink) { +					add_streams[j]->num_wb_info = del_streams[i]->num_wb_info; +					for (k = 0; k < del_streams[i]->num_wb_info; k++) +						add_streams[j]->writeback_info[k] = del_streams[i]->writeback_info[k]; +				} +			} +		} + +		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) { +			res = DC_FAIL_DETACH_SURFACES; +			goto fail; +		} + +		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]); +		if (res != DC_OK) +			goto fail; +	} + +	/* Swap seamless boot stream to pipe 0 (if needed) to ensure pipe_ctx +	 * matches. This may change in the future if seamless_boot_stream can be +	 * multiple. +	 */ +	for (i = 0; i < add_streams_count; i++) { +		mark_seamless_boot_stream(dc, add_streams[i]); +		if (add_streams[i]->apply_seamless_boot_optimization && i != 0) { +			struct dc_stream_state *temp = add_streams[0]; + +			add_streams[0] = add_streams[i]; +			add_streams[i] = temp; +			break; +		} +	} + +	/* Add new streams and then add all planes for the new stream */ +	for (i = 0; i < add_streams_count; i++) { +		calculate_phy_pix_clks(add_streams[i]); +		res = dc_add_stream_to_ctx(dc, context, add_streams[i]); +		if (res != DC_OK) +			goto fail; + +		if (!add_all_planes_for_stream(dc, add_streams[i], set, set_count, context)) { +			res = DC_FAIL_ATTACH_SURFACES; +			goto fail; +		} +	} + +	/* Add all planes for unchanged streams if planes changed */ +	for (i = 0; i < unchanged_streams_count; i++) { +		if (planes_changed_for_existing_stream(context, +						       unchanged_streams[i], +						       set, +						       set_count)) { +			if (!add_all_planes_for_stream(dc, unchanged_streams[i], set, set_count, context)) { +				res = DC_FAIL_ATTACH_SURFACES; +				goto fail; +			} +		} +	} + +	res = dc_validate_global_state(dc, context, fast_validate); + +fail: +	if (res != DC_OK) +		DC_LOG_WARNING("%s:resource validation failed, dc_status:%d\n", +			       __func__, +			       res); + +	return res; +} + +/** + * dc_validate_global_state() - Determine if hardware can support a given state + *   * @dc: dc struct for this driver   * @new_ctx: state to be validated   * @fast_validate: set to true if only yes/no to support matters   * - * Return: DC_OK if the result can be programmed.  Otherwise, an error code. + * Checks hardware resource availability and bandwidth requirement. + * + * Return: + * DC_OK if the result can be programmed. Otherwise, an error code.   */  enum dc_status dc_validate_global_state(  		struct dc *dc, @@ -2789,6 +3029,12 @@ static void set_avi_info_frame(  		hdmi_info.bits.C0_C1   = COLORIMETRY_EXTENDED;  	} +	if (pixel_encoding && color_space == COLOR_SPACE_2020_YCBCR && +			stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) { +		hdmi_info.bits.EC0_EC2 = 0; +		hdmi_info.bits.C0_C1 = COLORIMETRY_ITU709; +	} +  	/* TODO: un-hardcode aspect ratio */  	aspect = stream->timing.aspect_ratio; @@ -3023,6 +3269,50 @@ static void set_hfvs_info_packet(  	*info_packet = stream->hfvsif_infopacket;  } +static void adaptive_sync_override_dp_info_packets_sdp_line_num( +		const struct dc_crtc_timing *timing, +		struct enc_sdp_line_num *sdp_line_num, +		struct _vcs_dpi_display_pipe_dest_params_st *pipe_dlg_param) +{ +	uint32_t asic_blank_start = 0; +	uint32_t asic_blank_end   = 0; +	uint32_t v_update = 0; + +	const struct dc_crtc_timing *tg = timing; + +	/* blank_start = frame end - front porch */ +	asic_blank_start = tg->v_total - tg->v_front_porch; + +	/* blank_end = blank_start - active */ +	asic_blank_end = (asic_blank_start - tg->v_border_bottom - +						tg->v_addressable - tg->v_border_top); + +	if (pipe_dlg_param->vstartup_start > asic_blank_end) { +		v_update = (tg->v_total - (pipe_dlg_param->vstartup_start - asic_blank_end)); +		sdp_line_num->adaptive_sync_line_num_valid = true; +		sdp_line_num->adaptive_sync_line_num = (tg->v_total - v_update - 1); +	} else { +		sdp_line_num->adaptive_sync_line_num_valid = false; +		sdp_line_num->adaptive_sync_line_num = 0; +	} +} + +static void set_adaptive_sync_info_packet( +		struct dc_info_packet *info_packet, +		const struct dc_stream_state *stream, +		struct encoder_info_frame *info_frame, +		struct _vcs_dpi_display_pipe_dest_params_st *pipe_dlg_param) +{ +	if (!stream->adaptive_sync_infopacket.valid) +		return; + +	adaptive_sync_override_dp_info_packets_sdp_line_num( +			&stream->timing, +			&info_frame->sdp_line_num, +			pipe_dlg_param); + +	*info_packet = stream->adaptive_sync_infopacket; +}  static void set_vtem_info_packet(  		struct dc_info_packet *info_packet, @@ -3115,6 +3405,7 @@ void resource_build_info_frame(struct pipe_ctx *pipe_ctx)  	info->vsc.valid = false;  	info->hfvsif.valid = false;  	info->vtem.valid = false; +	info->adaptive_sync.valid = false;  	signal = pipe_ctx->stream->signal;  	/* HDMi and DP have different info packets*/ @@ -3135,6 +3426,10 @@ void resource_build_info_frame(struct pipe_ctx *pipe_ctx)  		set_spd_info_packet(&info->spd, pipe_ctx->stream);  		set_hdr_static_info_packet(&info->hdrsmd, pipe_ctx->stream); +		set_adaptive_sync_info_packet(&info->adaptive_sync, +										pipe_ctx->stream, +										info, +										&pipe_ctx->pipe_dlg_param);  	}  	patch_gamut_packet_checksum(&info->gamut); @@ -3390,7 +3685,7 @@ enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream)  	/* TODO: validate audio ASIC caps, encoder */  	if (res == DC_OK) -		res = dc_link_validate_mode_timing(stream, +		res = link_validate_mode_timing(stream,  		      link,  		      &stream->timing); @@ -3517,7 +3812,7 @@ bool get_temp_dp_link_res(struct dc_link *link,  	memset(link_res, 0, sizeof(*link_res)); -	if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) { +	if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING) {  		link_res->hpo_dp_link_enc = get_temp_hpo_dp_link_enc(res_ctx,  				dc->res_pool, link);  		if (!link_res->hpo_dp_link_enc) @@ -3574,9 +3869,20 @@ void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc,  		pipe_ctx_check = &context->res_ctx.pipe_ctx[i];  		if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx_check) == disabled_master_pipe_idx) && -			IS_PIPE_SYNCD_VALID(pipe_ctx_check) && (i != disabled_master_pipe_idx)) +		    IS_PIPE_SYNCD_VALID(pipe_ctx_check) && (i != disabled_master_pipe_idx)) { +			struct pipe_ctx *first_pipe = pipe_ctx_check; + +			while (first_pipe->prev_odm_pipe) +				first_pipe = first_pipe->prev_odm_pipe; +			/* When ODM combine is enabled, this case is expected. If the disabled pipe +			 * is part of the ODM tree, then we should not print an error. +			 * */ +			if (first_pipe->pipe_idx == disabled_master_pipe_idx) +				continue; +  			DC_ERR("DC: Failure: pipe_idx[%d] syncd with disabled master pipe_idx[%d]\n", -				i, disabled_master_pipe_idx); +				   i, disabled_master_pipe_idx); +		}  	}  } @@ -3734,4 +4040,43 @@ bool dc_resource_acquire_secondary_pipe_for_mpc_odm(  	}  	return true; -}
\ No newline at end of file +} + +enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc, +		struct dc_state *context, +		struct pipe_ctx *pipe_ctx) +{ +	if (link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) { +		if (pipe_ctx->stream_res.hpo_dp_stream_enc == NULL) { +			pipe_ctx->stream_res.hpo_dp_stream_enc = +					find_first_free_match_hpo_dp_stream_enc_for_link( +							&context->res_ctx, dc->res_pool, pipe_ctx->stream); + +			if (!pipe_ctx->stream_res.hpo_dp_stream_enc) +				return DC_NO_STREAM_ENC_RESOURCE; + +			update_hpo_dp_stream_engine_usage( +					&context->res_ctx, dc->res_pool, +					pipe_ctx->stream_res.hpo_dp_stream_enc, +					true); +		} + +		if (pipe_ctx->link_res.hpo_dp_link_enc == NULL) { +			if (!add_hpo_dp_link_enc_to_ctx(&context->res_ctx, dc->res_pool, pipe_ctx, pipe_ctx->stream)) +				return DC_NO_LINK_ENC_RESOURCE; +		} +	} else { +		if (pipe_ctx->stream_res.hpo_dp_stream_enc) { +			update_hpo_dp_stream_engine_usage( +					&context->res_ctx, dc->res_pool, +					pipe_ctx->stream_res.hpo_dp_stream_enc, +					false); +			pipe_ctx->stream_res.hpo_dp_stream_enc = NULL; +		} +		if (pipe_ctx->link_res.hpo_dp_link_enc) +			remove_hpo_dp_link_enc_from_ctx(&context->res_ctx, pipe_ctx, pipe_ctx->stream); +	} + +	return DC_OK; +} + diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c index 4b372aa52801..6c06587dd88c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c @@ -65,6 +65,7 @@ void dc_stat_get_dmub_notification(const struct dc *dc, struct dmub_notification  	/* For HPD/HPD RX, convert dpia port index into link index */  	if (notify->type == DMUB_NOTIFICATION_HPD ||  	    notify->type == DMUB_NOTIFICATION_HPD_IRQ || +		notify->type == DMUB_NOTIFICATION_DPIA_NOTIFICATION ||  	    notify->type == DMUB_NOTIFICATION_SET_CONFIG_REPLY) {  		notify->link_index =  			get_link_index_from_dpia_port_index(dc, notify->link_index); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 38d71b5c1f2d..72b261ad9587 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -332,9 +332,21 @@ bool dc_stream_set_cursor_attributes(  	dc = stream->ctx->dc; -	if (dc->debug.allow_sw_cursor_fallback && attributes->height * attributes->width * 4 > 16384) -		if (stream->mall_stream_config.type == SUBVP_MAIN) +	/* SubVP is not compatible with HW cursor larger than 64 x 64 x 4. +	 * Therefore, if cursor is greater than 64 x 64 x 4, fallback to SW cursor in the following case: +	 * 1. For single display cases, if resolution is >= 5K and refresh rate < 120hz +	 * 2. For multi display cases, if resolution is >= 4K and refresh rate < 120hz +	 * +	 * [< 120hz is a requirement for SubVP configs] +	 */ +	if (dc->debug.allow_sw_cursor_fallback && attributes->height * attributes->width * 4 > 16384) { +		if (dc->current_state->stream_count == 1 && stream->timing.v_addressable >= 2880 && +				((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120)  			return false; +		else if (dc->current_state->stream_count > 1 && stream->timing.v_addressable >= 2160 && +				((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120) +			return false; +	}  	stream->cursor_attributes = *attributes; @@ -396,7 +408,7 @@ bool dc_stream_set_cursor_position(  	struct dc_stream_state *stream,  	const struct dc_cursor_position *position)  { -	struct dc  *dc = stream->ctx->dc; +	struct dc *dc;  	bool reset_idle_optimizations = false;  	if (NULL == stream) { @@ -469,6 +481,7 @@ bool dc_stream_add_writeback(struct dc *dc,  	}  	if (!isDrc) { +		ASSERT(stream->num_wb_info + 1 <= MAX_DWB_PIPES);  		stream->writeback_info[stream->num_wb_info++] = *wb_info;  	} @@ -514,6 +527,11 @@ bool dc_stream_remove_writeback(struct dc *dc,  		return false;  	} +	if (stream->num_wb_info > MAX_DWB_PIPES) { +		dm_error("DC: num_wb_info is invalid!\n"); +		return false; +	} +  //	stream->writeback_info[dwb_pipe_inst].wb_enabled = false;  	for (i = 0; i < stream->num_wb_info; i++) {  		/*dynamic update*/ @@ -528,7 +546,8 @@ bool dc_stream_remove_writeback(struct dc *dc,  		if (stream->writeback_info[i].wb_enabled) {  			if (j < i)  				/* trim the array */ -				stream->writeback_info[j] = stream->writeback_info[i]; +				memcpy(&stream->writeback_info[j], &stream->writeback_info[i], +						sizeof(struct dc_writeback_info));  			j++;  		}  	} diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 0598465fd1a1..1fde43378689 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -47,18 +47,15 @@ struct aux_payload;  struct set_config_cmd_payload;  struct dmub_notification; -#define DC_VER "3.2.207" +#define DC_VER "3.2.223"  #define MAX_SURFACES 3  #define MAX_PLANES 6  #define MAX_STREAMS 6 -#define MAX_SINKS_PER_LINK 4  #define MIN_VIEWPORT_SIZE 12  #define MAX_NUM_EDP 2 -/******************************************************************************* - * Display Core Interfaces - ******************************************************************************/ +/* Display Core Interfaces */  struct dc_versions {  	const char *dc_ver;  	struct dmcu_version dmcu_version; @@ -263,11 +260,13 @@ struct dc_caps {  	uint32_t cache_line_size;  	uint32_t cache_num_ways;  	uint16_t subvp_fw_processing_delay_us; +	uint8_t subvp_drr_max_vblank_margin_us;  	uint16_t subvp_prefetch_end_to_mall_start_us;  	uint8_t subvp_swath_height_margin_lines; // subvp start line must be aligned to 2 x swath height  	uint16_t subvp_pstate_allow_width_us;  	uint16_t subvp_vertical_int_margin_us;  	bool seamless_odm; +	uint8_t subvp_drr_vblank_start_margin_us;  };  struct dc_bug_wa { @@ -395,6 +394,7 @@ struct dc_config {  	bool disable_dmcu;  	bool enable_4to1MPC;  	bool enable_windowed_mpo_odm; +	bool forceHBR2CP2520; // Used for switching between test patterns TPS4 and CP2520  	uint32_t allow_edp_hotplug_detection;  	bool clamp_min_dcfclk;  	uint64_t vblank_alignment_dto_params; @@ -408,7 +408,8 @@ struct dc_config {  	bool use_default_clock_table;  	bool force_bios_enable_lttpr;  	uint8_t force_bios_fixed_vs; - +	int sdpif_request_limit_words_per_umc; +	bool disable_subvp_drr;  };  enum visual_confirm { @@ -457,15 +458,15 @@ enum pipe_split_policy {  	MPC_SPLIT_DYNAMIC = 0,  	/** -	 * @MPC_SPLIT_DYNAMIC: Avoid pipe split, which means that DC will not +	 * @MPC_SPLIT_AVOID: Avoid pipe split, which means that DC will not  	 * try any sort of split optimization.  	 */  	MPC_SPLIT_AVOID = 1,  	/** -	 * @MPC_SPLIT_DYNAMIC: With this option, DC will only try to optimize -	 * the pipe utilization when using a single display; if the user -	 * connects to a second display, DC will avoid pipe split. +	 * @MPC_SPLIT_AVOID_MULT_DISP: With this option, DC will only try to +	 * optimize the pipe utilization when using a single display; if the +	 * user connects to a second display, DC will avoid pipe split.  	 */  	MPC_SPLIT_AVOID_MULT_DISP = 2,  }; @@ -491,12 +492,17 @@ enum dcn_pwr_state {  enum dcn_zstate_support_state {  	DCN_ZSTATE_SUPPORT_UNKNOWN,  	DCN_ZSTATE_SUPPORT_ALLOW, +	DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY, +	DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY,  	DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY,  	DCN_ZSTATE_SUPPORT_DISALLOW,  }; -/* - * For any clocks that may differ per pipe - * only the max is stored in this structure + +/** + * struct dc_clocks - DC pipe clocks + * + * For any clocks that may differ per pipe only the max is stored in this + * structure   */  struct dc_clocks {  	int dispclk_khz; @@ -523,6 +529,16 @@ struct dc_clocks {  	bool prev_p_state_change_support;  	bool fclk_prev_p_state_change_support;  	int num_ways; + +	/* +	 * @fw_based_mclk_switching +	 * +	 * DC has a mechanism that leverage the variable refresh rate to switch +	 * memory clock in cases that we have a large latency to achieve the +	 * memory clock change and a short vblank window. DC has some +	 * requirements to enable this feature, and this field describes if the +	 * system support or not such a feature. +	 */  	bool fw_based_mclk_switching;  	bool fw_based_mclk_switching_shut_down;  	int prev_num_ways; @@ -764,7 +780,6 @@ struct dc_debug_options {  	bool disable_mem_low_power;  	bool pstate_enabled;  	bool disable_dmcu; -	bool disable_psr;  	bool force_abm_enable;  	bool disable_stereo_support;  	bool vsr_support; @@ -828,6 +843,7 @@ struct dc_debug_options {  	int crb_alloc_policy_min_disp_count;  	bool disable_z10;  	bool enable_z9_disable_interface; +	bool psr_skip_crtc_disable;  	union dpia_debug_options dpia_debug;  	bool disable_fixed_vs_aux_timeout_wa;  	bool force_disable_subvp; @@ -836,6 +852,7 @@ struct dc_debug_options {  	unsigned int force_subvp_num_ways;  	unsigned int force_mall_ss_num_ways;  	bool alloc_extra_way_for_cursor; +	uint32_t subvp_extra_lines;  	bool force_usr_allow;  	/* uses value at boot and disables switch */  	bool disable_dtb_ref_clk_switch; @@ -854,6 +871,9 @@ struct dc_debug_options {  	enum lttpr_mode lttpr_mode_override;  	unsigned int dsc_delay_factor_wa_x1000;  	unsigned int min_prefetch_in_strobe_ns; +	bool disable_unbounded_requesting; +	bool dig_fifo_off_in_blank; +	bool temp_mst_deallocation_sequence;  };  struct gpu_info_soc_bounding_box_v1_0; @@ -990,9 +1010,7 @@ void dc_init_callbacks(struct dc *dc,  void dc_deinit_callbacks(struct dc *dc);  void dc_destroy(struct dc **dc); -/******************************************************************************* - * Surface Interfaces - ******************************************************************************/ +/* Surface Interfaces */  enum {  	TRANSFER_FUNC_POINTS = 1025 @@ -1271,12 +1289,23 @@ void dc_post_update_surfaces_to_stream(  #include "dc_stream.h" -/* - * Structure to store surface/stream associations for validation +/** + * struct dc_validation_set - Struct to store surface/stream associations for validation   */  struct dc_validation_set { +	/** +	 * @stream: Stream state properties +	 */  	struct dc_stream_state *stream; + +	/** +	 * @plane_state: Surface state +	 */  	struct dc_plane_state *plane_states[MAX_SURFACES]; + +	/** +	 * @plane_count: Total of active planes +	 */  	uint8_t plane_count;  }; @@ -1288,6 +1317,12 @@ enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *pla  void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info); +enum dc_status dc_validate_with_context(struct dc *dc, +					const struct dc_validation_set set[], +					int set_count, +					struct dc_state *context, +					bool fast_validate); +  bool dc_set_generic_gpio_for_stereo(bool enable,  		struct gpio_service *gpio_service); @@ -1323,15 +1358,12 @@ void dc_resource_state_destruct(struct dc_state *context);  bool dc_resource_is_dsc_encoding_supported(const struct dc *dc); -/* - * TODO update to make it about validation sets - * Set up streams and links associated to drive sinks - * The streams parameter is an absolute set of all active streams. - * - * After this call: - *   Phy, Encoder, Timing Generator are programmed and enabled. - *   New streams are enabled with blank stream; no memory read. - */ +enum dc_status dc_commit_streams(struct dc *dc, +				 struct dc_stream_state *streams[], +				 uint8_t stream_count); + +/* TODO: When the transition to the new commit sequence is done, remove this + * function in favor of dc_commit_streams. */  bool dc_commit_state(struct dc *dc, struct dc_state *context);  struct dc_state *dc_create_state(struct dc *dc); @@ -1339,113 +1371,129 @@ struct dc_state *dc_copy_state(struct dc_state *src_ctx);  void dc_retain_state(struct dc_state *context);  void dc_release_state(struct dc_state *context); -/******************************************************************************* - * Link Interfaces - ******************************************************************************/ - -struct dpcd_caps { -	union dpcd_rev dpcd_rev; -	union max_lane_count max_ln_count; -	union max_down_spread max_down_spread; -	union dprx_feature dprx_feature; - -	/* valid only for eDP v1.4 or higher*/ -	uint8_t edp_supported_link_rates_count; -	enum dc_link_rate edp_supported_link_rates[8]; - -	/* dongle type (DP converter, CV smart dongle) */ -	enum display_dongle_type dongle_type; -	bool is_dongle_type_one; -	/* branch device or sink device */ -	bool is_branch_dev; -	/* Dongle's downstream count. */ -	union sink_count sink_count; -	bool is_mst_capable; -	/* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER, -	indicates 'Frame Sequential-to-lllFrame Pack' conversion capability.*/ -	struct dc_dongle_caps dongle_caps; - -	uint32_t sink_dev_id; -	int8_t sink_dev_id_str[6]; -	int8_t sink_hw_revision; -	int8_t sink_fw_revision[2]; - -	uint32_t branch_dev_id; -	int8_t branch_dev_name[6]; -	int8_t branch_hw_revision; -	int8_t branch_fw_revision[2]; - -	bool allow_invalid_MSA_timing_param; -	bool panel_mode_edp; -	bool dpcd_display_control_capable; -	bool ext_receiver_cap_field_present; -	bool set_power_state_capable_edp; -	bool dynamic_backlight_capable_edp; -	union dpcd_fec_capability fec_cap; -	struct dpcd_dsc_capabilities dsc_caps; -	struct dc_lttpr_caps lttpr_caps; -	struct dpcd_usb4_dp_tunneling_info usb4_dp_tun_info; - -	union dp_128b_132b_supported_link_rates dp_128b_132b_supported_link_rates; -	union dp_main_line_channel_coding_cap channel_coding_cap; -	union dp_sink_video_fallback_formats fallback_formats; -	union dp_fec_capability1 fec_cap1; -	union dp_cable_id cable_id; -	uint8_t edp_rev; -	union edp_alpm_caps alpm_caps; -	struct edp_psr_info psr_info; -}; - -union dpcd_sink_ext_caps { -	struct { -		/* 0 - Sink supports backlight adjust via PWM during SDR/HDR mode -		 * 1 - Sink supports backlight adjust via AUX during SDR/HDR mode. -		 */ -		uint8_t sdr_aux_backlight_control : 1; -		uint8_t hdr_aux_backlight_control : 1; -		uint8_t reserved_1 : 2; -		uint8_t oled : 1; -		uint8_t reserved : 3; -	} bits; -	uint8_t raw; -}; +struct dc_plane_state *dc_get_surface_for_mpcc(struct dc *dc, +		struct dc_stream_state *stream, +		int mpcc_inst); -#if defined(CONFIG_DRM_AMD_DC_HDCP) -union hdcp_rx_caps { -	struct { -		uint8_t version; -		uint8_t reserved; -		struct { -			uint8_t repeater	: 1; -			uint8_t hdcp_capable	: 1; -			uint8_t reserved	: 6; -		} byte0; -	} fields; -	uint8_t raw[3]; -}; -union hdcp_bcaps { -	struct { -		uint8_t HDCP_CAPABLE:1; -		uint8_t REPEATER:1; -		uint8_t RESERVED:6; -	} bits; -	uint8_t raw; -}; +uint32_t dc_get_opp_for_plane(struct dc *dc, struct dc_plane_state *plane); -struct hdcp_caps { -	union hdcp_rx_caps rx_caps; -	union hdcp_bcaps bcaps; -}; +/* Link Interfaces */ +/* TODO: remove this after resolving external dependencies */ +#include "dc_link.h" + +/* The function initiates detection handshake over the given link. It first + * determines if there are display connections over the link. If so it initiates + * detection protocols supported by the connected receiver device. The function + * contains protocol specific handshake sequences which are sometimes mandatory + * to establish a proper connection between TX and RX. So it is always + * recommended to call this function as the first link operation upon HPD event + * or power up event. Upon completion, the function will update link structure + * in place based on latest RX capabilities. The function may also cause dpms + * to be reset to off for all currently enabled streams to the link. It is DM's + * responsibility to serialize detection and DPMS updates. + * + * @reason - Indicate which event triggers this detection. dc may customize + * detection flow depending on the triggering events. + * return false - if detection is not fully completed. This could happen when + * there is an unrecoverable error during detection or detection is partially + * completed (detection has been delegated to dm mst manager ie. + * link->connection_type == dc_connection_mst_branch when returning false). + * return true - detection is completed, link has been fully updated with latest + * detection result. + */ +bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason); + +/* determine if there is a sink connected to the link + * + * @type - dc_connection_single if connected, dc_connection_none otherwise. + * return - false if an unexpected error occurs, true otherwise. + * + * NOTE: This function doesn't detect downstream sink connections i.e + * dc_connection_mst_branch, dc_connection_sst_branch. In this case, it will + * return dc_connection_single if the branch device is connected despite of + * downstream sink's connection status. + */ +bool dc_link_detect_connection_type(struct dc_link *link, +		enum dc_connection_type *type); + +/* Getter for cached link status from given link */ +const struct dc_link_status *dc_link_get_status(const struct dc_link *link); + +#ifdef CONFIG_DRM_AMD_DC_HDCP +/* return true if the connected receiver supports the hdcp version */ +bool dc_link_is_hdcp14(struct dc_link *link, enum signal_type signal); +bool dc_link_is_hdcp22(struct dc_link *link, enum signal_type signal);  #endif -#include "dc_link.h" +/* The function clears recorded DP RX states in the link. DM should call this + * function when it is resuming from S3 power state to previously connected links. + * + * TODO - in the future we should consider to expand link resume interface to + * support clearing previous rx states. So we don't have to rely on dm to call + * this interface explicitly. + */ +void dc_link_clear_dprx_states(struct dc_link *link); -uint32_t dc_get_opp_for_plane(struct dc *dc, struct dc_plane_state *plane); +/* Destruct the mst topology of the link and reset the allocated payload table + * + * NOTE: this should only be called if DM chooses not to call dc_link_detect but + * still wants to reset MST topology on an unplug event */ +bool dc_link_reset_cur_dp_mst_topology(struct dc_link *link); -/******************************************************************************* - * Sink Interfaces - A sink corresponds to a display output device - ******************************************************************************/ +/* The function calculates effective DP link bandwidth when a given link is + * using the given link settings. + * + * return - total effective link bandwidth in kbps. + */ +uint32_t dc_link_bandwidth_kbps( +	const struct dc_link *link, +	const struct dc_link_settings *link_setting); + +/* The function returns minimum bandwidth required to drive a given timing + * return - minimum required timing bandwidth in kbps. + */ +uint32_t dc_bandwidth_in_kbps_from_timing( +	const struct dc_crtc_timing *timing); + +/* The function takes a snapshot of current link resource allocation state + * @dc: pointer to dc of the dm calling this + * @map: a dc link resource snapshot defined internally to dc. + * + * DM needs to capture a snapshot of current link resource allocation mapping + * and store it in its persistent storage. + * + * Some of the link resource is using first come first serve policy. + * The allocation mapping depends on original hotplug order. This information + * is lost after driver is loaded next time. The snapshot is used in order to + * restore link resource to its previous state so user will get consistent + * link capability allocation across reboot. + * + */ +void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map); + +/* This function restores link resource allocation state from a snapshot + * @dc: pointer to dc of the dm calling this + * @map: a dc link resource snapshot defined internally to dc. + * + * DM needs to call this function after initial link detection on boot and + * before first commit streams to restore link resource allocation state + * from previous boot session. + * + * Some of the link resource is using first come first serve policy. + * The allocation mapping depends on original hotplug order. This information + * is lost after driver is loaded next time. The snapshot is used in order to + * restore link resource to its previous state so user will get consistent + * link capability allocation across reboot. + * + */ +void dc_restore_link_res_map(const struct dc *dc, uint32_t *map); + +/* TODO: this is not meant to be exposed to DM. Should switch to stream update + * interface i.e stream_update->dsc_config + */ +bool dc_link_update_dsc_config(struct pipe_ctx *pipe_ctx); +/* Sink Interfaces - A sink corresponds to a display output device */  struct dc_container_id {  	// 128bit GUID in binary form @@ -1476,6 +1524,11 @@ struct dc_sink_fec_caps {  	bool is_topology_fec_supported;  }; +struct scdc_caps { +	union hdmi_scdc_manufacturer_OUI_data manufacturer_OUI; +	union hdmi_scdc_device_id_data device_id; +}; +  /*   * The sink structure contains EDID and other display device properties   */ @@ -1489,6 +1542,7 @@ struct dc_sink {  	struct stereo_3d_features features_3d[TIMING_3D_FORMAT_MAX];  	bool converter_disable_audio; +	struct scdc_caps scdc_caps;  	struct dc_sink_dsc_caps dsc_caps;  	struct dc_sink_fec_caps fec_caps; @@ -1528,9 +1582,7 @@ struct dc_cursor {  }; -/******************************************************************************* - * Interrupt interfaces - ******************************************************************************/ +/* Interrupt interfaces */  enum dc_irq_source dc_interrupt_to_irq_source(  		struct dc *dc,  		uint32_t src_id, @@ -1542,9 +1594,7 @@ enum dc_irq_source dc_get_hpd_irq_source_at_index(  void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable); -/******************************************************************************* - * Power Interfaces - ******************************************************************************/ +/* Power Interfaces */  void dc_set_power_state(  		struct dc *dc, @@ -1617,14 +1667,10 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,  void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,  				uint32_t hpd_int_enable); -/******************************************************************************* - * DSC Interfaces - ******************************************************************************/ +/* DSC Interfaces */  #include "dc_dsc.h" -/******************************************************************************* - * Disable acc mode Interfaces - ******************************************************************************/ +/* Disable acc mode Interfaces */  void dc_disable_accelerated_mode(struct dc *dc);  #endif /* DC_INTERFACE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h index 260ac4458870..be9aa1a71847 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h @@ -140,7 +140,8 @@ struct dc_vbios_funcs {  	enum bp_result (*enable_lvtma_control)(  		struct dc_bios *bios,  		uint8_t uc_pwr_on, -		uint8_t panel_instance); +		uint8_t panel_instance, +		uint8_t bypass_panel_control_wait);  	enum bp_result (*get_soc_bb_info)(  		struct dc_bios *dcb, diff --git a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h index 7769bd099a5a..428e3a9ab65a 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h @@ -77,6 +77,32 @@ struct aux_reply_transaction_data {  	uint8_t *data;  }; +struct aux_payload { +	/* set following flag to read/write I2C data, +	 * reset it to read/write DPCD data */ +	bool i2c_over_aux; +	/* set following flag to write data, +	 * reset it to read data */ +	bool write; +	bool mot; +	bool write_status_update; + +	uint32_t address; +	uint32_t length; +	uint8_t *data; +	/* +	 * used to return the reply type of the transaction +	 * ignored if NULL +	 */ +	uint8_t *reply; +	/* expressed in milliseconds +	 * zero means "use default value" +	 */ +	uint32_t defer_delay; + +}; +#define DEFAULT_AUX_MAX_DATA_SIZE 16 +  struct i2c_payload {  	bool write;  	uint8_t address; @@ -90,6 +116,8 @@ enum i2c_command_engine {  	I2C_COMMAND_ENGINE_HW  }; +#define DDC_I2C_COMMAND_ENGINE I2C_COMMAND_ENGINE_SW +  struct i2c_command {  	struct i2c_payload *payloads;  	uint8_t number_of_payloads; @@ -150,6 +178,9 @@ enum display_dongle_type {  	DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE,  }; +#define DC_MAX_EDID_BUFFER_SIZE 2048 +#define DC_EDID_BLOCK_SIZE 128 +  struct ddc_service {  	struct ddc *ddc_pin;  	struct ddc_flags flags; diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 0541e87e4f38..c2092775ca88 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -423,25 +423,20 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi  #ifdef CONFIG_DRM_AMD_DC_DCN  /** - * *********************************************************************************************** - * populate_subvp_cmd_drr_info: Helper to populate DRR pipe info for the DMCUB subvp command + * populate_subvp_cmd_drr_info - Helper to populate DRR pipe info for the DMCUB subvp command   * - * Populate the DMCUB SubVP command with DRR pipe info. All the information required for calculating - * the SubVP + DRR microschedule is populated here. + * @dc: [in] current dc state + * @subvp_pipe: [in] pipe_ctx for the SubVP pipe + * @vblank_pipe: [in] pipe_ctx for the DRR pipe + * @pipe_data: [in] Pipe data which stores the VBLANK/DRR info + * + * Populate the DMCUB SubVP command with DRR pipe info. All the information + * required for calculating the SubVP + DRR microschedule is populated here.   *   * High level algorithm:   * 1. Get timing for SubVP pipe, phantom pipe, and DRR pipe   * 2. Calculate the min and max vtotal which supports SubVP + DRR microschedule   * 3. Populate the drr_info with the min and max supported vtotal values - * - * @param [in] dc: current dc state - * @param [in] subvp_pipe: pipe_ctx for the SubVP pipe - * @param [in] vblank_pipe: pipe_ctx for the DRR pipe - * @param [in] pipe_data: Pipe data which stores the VBLANK/DRR info - * - * @return: void - * - * ***********************************************************************************************   */  static void populate_subvp_cmd_drr_info(struct dc *dc,  		struct pipe_ctx *subvp_pipe, @@ -482,33 +477,38 @@ static void populate_subvp_cmd_drr_info(struct dc *dc,  			(((uint64_t)main_timing->pix_clk_100hz * 100)));  	drr_active_us = div64_u64(((uint64_t)drr_timing->v_addressable * drr_timing->h_total * 1000000),  			(((uint64_t)drr_timing->pix_clk_100hz * 100))); -	max_drr_vblank_us = div64_u64((subvp_active_us - prefetch_us - drr_active_us), 2) + drr_active_us; -	max_drr_mallregion_us = subvp_active_us - prefetch_us - mall_region_us; +	max_drr_vblank_us = div64_u64((subvp_active_us - prefetch_us - +			dc->caps.subvp_fw_processing_delay_us - drr_active_us), 2) + drr_active_us; +	max_drr_mallregion_us = subvp_active_us - prefetch_us - mall_region_us - dc->caps.subvp_fw_processing_delay_us;  	max_drr_supported_us = max_drr_vblank_us > max_drr_mallregion_us ? max_drr_vblank_us : max_drr_mallregion_us;  	max_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * max_drr_supported_us),  			(((uint64_t)drr_timing->h_total * 1000000))); +	/* When calculating the max vtotal supported for SubVP + DRR cases, add +	 * margin due to possible rounding errors (being off by 1 line in the +	 * FW calculation can incorrectly push the P-State switch to wait 1 frame +	 * longer). +	 */ +	max_vtotal_supported = max_vtotal_supported - dc->caps.subvp_drr_max_vblank_margin_us; +  	pipe_data->pipe_config.vblank_data.drr_info.min_vtotal_supported = min_vtotal_supported;  	pipe_data->pipe_config.vblank_data.drr_info.max_vtotal_supported = max_vtotal_supported; +	pipe_data->pipe_config.vblank_data.drr_info.drr_vblank_start_margin = dc->caps.subvp_drr_vblank_start_margin_us;  }  /** - * *********************************************************************************************** - * populate_subvp_cmd_vblank_pipe_info: Helper to populate VBLANK pipe info for the DMUB subvp command - * - * Populate the DMCUB SubVP command with VBLANK pipe info. All the information required to calculate - * the microschedule for SubVP + VBLANK case is stored in the pipe_data (subvp_data and vblank_data). - * Also check if the VBLANK pipe is a DRR display -- if it is make a call to populate drr_info. - * - * @param [in] dc: current dc state - * @param [in] context: new dc state - * @param [in] cmd: DMUB cmd to be populated with SubVP info - * @param [in] vblank_pipe: pipe_ctx for the VBLANK pipe - * @param [in] cmd_pipe_index: index for the pipe array in DMCUB SubVP cmd + * populate_subvp_cmd_vblank_pipe_info - Helper to populate VBLANK pipe info for the DMUB subvp command   * - * @return: void + * @dc: [in] current dc state + * @context: [in] new dc state + * @cmd: [in] DMUB cmd to be populated with SubVP info + * @vblank_pipe: [in] pipe_ctx for the VBLANK pipe + * @cmd_pipe_index: [in] index for the pipe array in DMCUB SubVP cmd   * - * *********************************************************************************************** + * Populate the DMCUB SubVP command with VBLANK pipe info. All the information + * required to calculate the microschedule for SubVP + VBLANK case is stored in + * the pipe_data (subvp_data and vblank_data).  Also check if the VBLANK pipe + * is a DRR display -- if it is make a call to populate drr_info.   */  static void populate_subvp_cmd_vblank_pipe_info(struct dc *dc,  		struct dc_state *context, @@ -551,22 +551,18 @@ static void populate_subvp_cmd_vblank_pipe_info(struct dc *dc,  }  /** - * *********************************************************************************************** - * update_subvp_prefetch_end_to_mall_start: Helper for SubVP + SubVP case + * update_subvp_prefetch_end_to_mall_start - Helper for SubVP + SubVP case   * - * For SubVP + SubVP, we use a single vertical interrupt to start the microschedule for both - * SubVP pipes. In order for this to work correctly, the MALL REGION of both SubVP pipes must - * start at the same time. This function lengthens the prefetch end to mall start delay of the - * SubVP pipe that has the shorter prefetch so that both MALL REGION's will start at the same time. + * @dc: [in] current dc state + * @context: [in] new dc state + * @cmd: [in] DMUB cmd to be populated with SubVP info + * @subvp_pipes: [in] Array of SubVP pipes (should always be length 2)   * - * @param [in] dc: current dc state - * @param [in] context: new dc state - * @param [in] cmd: DMUB cmd to be populated with SubVP info - * @param [in] subvp_pipes: Array of SubVP pipes (should always be length 2) - * - * @return: void - * - * *********************************************************************************************** + * For SubVP + SubVP, we use a single vertical interrupt to start the + * microschedule for both SubVP pipes. In order for this to work correctly, the + * MALL REGION of both SubVP pipes must start at the same time. This function + * lengthens the prefetch end to mall start delay of the SubVP pipe that has + * the shorter prefetch so that both MALL REGION's will start at the same time.   */  static void update_subvp_prefetch_end_to_mall_start(struct dc *dc,  		struct dc_state *context, @@ -608,22 +604,17 @@ static void update_subvp_prefetch_end_to_mall_start(struct dc *dc,  }  /** - * *************************************************************************************** - * setup_subvp_dmub_command: Helper to populate the SubVP pipe info for the DMUB subvp command - * - * Populate the DMCUB SubVP command with SubVP pipe info. All the information required to - * calculate the microschedule for the SubVP pipe is stored in the pipe_data of the DMCUB - * SubVP command. - * - * @param [in] dc: current dc state - * @param [in] context: new dc state - * @param [in] cmd: DMUB cmd to be populated with SubVP info - * @param [in] subvp_pipe: pipe_ctx for the SubVP pipe - * @param [in] cmd_pipe_index: index for the pipe array in DMCUB SubVP cmd + * populate_subvp_cmd_pipe_info - Helper to populate the SubVP pipe info for the DMUB subvp command   * - * @return: void + * @dc: [in] current dc state + * @context: [in] new dc state + * @cmd: [in] DMUB cmd to be populated with SubVP info + * @subvp_pipe: [in] pipe_ctx for the SubVP pipe + * @cmd_pipe_index: [in] index for the pipe array in DMCUB SubVP cmd   * - * *************************************************************************************** + * Populate the DMCUB SubVP command with SubVP pipe info. All the information + * required to calculate the microschedule for the SubVP pipe is stored in the + * pipe_data of the DMCUB SubVP command.   */  static void populate_subvp_cmd_pipe_info(struct dc *dc,  		struct dc_state *context, @@ -703,19 +694,14 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,  }  /** - * *************************************************************************************** - * dc_dmub_setup_subvp_dmub_command: Populate the DMCUB SubVP command + * dc_dmub_setup_subvp_dmub_command - Populate the DMCUB SubVP command   * - * This function loops through each pipe and populates the DMUB - * SubVP CMD info based on the pipe (e.g. SubVP, VBLANK). + * @dc: [in] current dc state + * @context: [in] new dc state + * @enable: [in] if true enables the pipes population   * - * @param [in] dc: current dc state - * @param [in] context: new dc state - * @param [in] cmd: DMUB cmd to be populated with SubVP info - * - * @return: void - * - * *************************************************************************************** + * This function loops through each pipe and populates the DMUB SubVP CMD info + * based on the pipe (e.g. SubVP, VBLANK).   */  void dc_dmub_setup_subvp_dmub_command(struct dc *dc,  		struct dc_state *context, @@ -882,11 +868,59 @@ void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)  		diag_data.is_cw6_enabled);  } +static bool dc_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx) +{ +	struct pipe_ctx *test_pipe, *split_pipe; +	const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data; +	struct rect r1 = scl_data->recout, r2, r2_half; +	int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b; +	int cur_layer = pipe_ctx->plane_state->layer_index; + +	/** +	 * Disable the cursor if there's another pipe above this with a +	 * plane that contains this pipe's viewport to prevent double cursor +	 * and incorrect scaling artifacts. +	 */ +	for (test_pipe = pipe_ctx->top_pipe; test_pipe; +	     test_pipe = test_pipe->top_pipe) { +		// Skip invisible layer and pipe-split plane on same layer +		if (!test_pipe->plane_state->visible || test_pipe->plane_state->layer_index == cur_layer) +			continue; + +		r2 = test_pipe->plane_res.scl_data.recout; +		r2_r = r2.x + r2.width; +		r2_b = r2.y + r2.height; +		split_pipe = test_pipe; + +		/** +		 * There is another half plane on same layer because of +		 * pipe-split, merge together per same height. +		 */ +		for (split_pipe = pipe_ctx->top_pipe; split_pipe; +		     split_pipe = split_pipe->top_pipe) +			if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) { +				r2_half = split_pipe->plane_res.scl_data.recout; +				r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x; +				r2.width = r2.width + r2_half.width; +				r2_r = r2.x + r2.width; +				break; +			} + +		if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b) +			return true; +	} + +	return false; +} +  static bool dc_dmub_should_update_cursor_data(struct pipe_ctx *pipe_ctx)  {  	if (pipe_ctx->plane_state != NULL) {  		if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)  			return false; + +		if (dc_can_pipe_disable_cursor(pipe_ctx)) +			return false;  	}  	if ((pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 || @@ -962,19 +996,14 @@ static void dc_build_cursor_attribute_update_payload1(  }  /** - * *************************************************************************************** - * dc_send_update_cursor_info_to_dmu: Populate the DMCUB Cursor update info command - * - * This function would store the cursor related information and pass it into dmub + * dc_send_update_cursor_info_to_dmu - Populate the DMCUB Cursor update info command   * - * @param [in] pCtx: pipe context - * @param [in] pipe_idx: pipe index + * @pCtx: [in] pipe context + * @pipe_idx: [in] pipe index   * - * @return: void - * - * *************************************************************************************** + * This function would store the cursor related information and pass it into + * dmub   */ -  void dc_send_update_cursor_info_to_dmu(  		struct pipe_ctx *pCtx, uint8_t pipe_idx)  { diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index 2c54b6e0498b..809a1851f196 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -27,6 +27,7 @@  #define DC_DP_TYPES_H  #include "os_types.h" +#include "dc_ddc_types.h"  enum dc_lane_count {  	LANE_COUNT_UNKNOWN = 0, @@ -149,7 +150,6 @@ struct dc_link_settings {  	enum dc_link_spread link_spread;  	bool use_link_rate_set;  	uint8_t link_rate_set; -	bool dpcd_source_device_specific_field_support;  };  union dc_dp_ffe_preset { @@ -362,14 +362,10 @@ enum dpcd_downstream_port_detailed_type {  union dwnstream_port_caps_byte2 {  	struct {  		uint8_t MAX_BITS_PER_COLOR_COMPONENT:2; -#if defined(CONFIG_DRM_AMD_DC_DCN)  		uint8_t MAX_ENCODED_LINK_BW_SUPPORT:3;  		uint8_t SOURCE_CONTROL_MODE_SUPPORT:1;  		uint8_t CONCURRENT_LINK_BRING_UP_SEQ_SUPPORT:1;  		uint8_t RESERVED:1; -#else -		uint8_t RESERVED:6; -#endif  	} bits;  	uint8_t raw;  }; @@ -407,7 +403,6 @@ union dwnstream_port_caps_byte3_hdmi {  	uint8_t raw;  }; -#if defined(CONFIG_DRM_AMD_DC_DCN)  union hdmi_sink_encoded_link_bw_support {  	struct {  		uint8_t HDMI_SINK_ENCODED_LINK_BW_SUPPORT:3; @@ -429,7 +424,6 @@ union hdmi_encoded_link_bw {  	} bits;  	uint8_t raw;  }; -#endif  /*4-byte structure for detailed capabilities of a down-stream port  (DP-to-TMDS converter).*/ @@ -509,7 +503,11 @@ union down_spread_ctrl {  	1 = Main link signal is downspread <= 0.5%  	with frequency in the range of 30kHz ~ 33kHz*/  		uint8_t SPREAD_AMP:1; -		uint8_t RESERVED2:2;/*Bit 6:5 = RESERVED. Read all 0s*/ +		uint8_t RESERVED2:1;/*Bit 5 = RESERVED. Read all 0s*/ +	/* Bit 6 = FIXED_VTOTAL_AS_SDP_EN_IN_PR_ACTIVE. +	0 = FIXED_VTOTAL_AS_SDP_EN_IN_PR_ACTIVE is not enabled by the Source device (default) +	1 = FIXED_VTOTAL_AS_SDP_EN_IN_PR_ACTIVE is enabled by Source device */ +		uint8_t FIXED_VTOTAL_AS_SDP_EN_IN_PR_ACTIVE:1;  	/*Bit 7 = MSA_TIMING_PAR_IGNORE_EN  	0 = Source device will send valid data for the MSA Timing Params  	1 = Source device may send invalid data for these MSA Timing Params*/ @@ -865,6 +863,21 @@ struct psr_caps {  	unsigned int psr_power_opt_flag;  }; +union dpcd_dprx_feature_enumeration_list_cont_1 { +	struct { +		uint8_t ADAPTIVE_SYNC_SDP_SUPPORT:1; +		uint8_t AS_SDP_FIRST_HALF_LINE_OR_3840_PIXEL_CYCLE_WINDOW_NOT_SUPPORTED: 1; +		uint8_t RESERVED0: 2; +		uint8_t VSC_EXT_SDP_VER1_SUPPORT: 1; +		uint8_t RESERVED1: 3; +	} bits; +	uint8_t raw; +}; + +struct adaptive_sync_caps { +	union dpcd_dprx_feature_enumeration_list_cont_1 dp_adap_sync_caps; +}; +  /* Length of router topology ID read from DPCD in bytes. */  #define DPCD_USB4_TOPOLOGY_ID_LEN 5 @@ -926,6 +939,9 @@ struct dpcd_usb4_dp_tunneling_info {  #ifndef DP_128b_132b_TRAINING_AUX_RD_INTERVAL  #define DP_128b_132b_TRAINING_AUX_RD_INTERVAL		0x2216  #endif +#ifndef DP_LINK_SQUARE_PATTERN +#define DP_LINK_SQUARE_PATTERN				0x10F +#endif  #ifndef DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX  #define DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX		0x2217  #endif @@ -973,6 +989,9 @@ struct dpcd_usb4_dp_tunneling_info {  /* TODO - Use DRM header to replace above once available */  #endif // DP_INTRA_HOP_AUX_REPLY_INDICATION +#ifndef DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE +#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE	0x50 +#endif  union dp_main_line_channel_coding_cap {  	struct {  		uint8_t DP_8b_10b_SUPPORTED	:1; @@ -1107,4 +1126,139 @@ struct edp_psr_info {  	uint8_t force_psrsu_cap;  }; +struct dprx_states { +	bool cable_id_written; +}; + +enum dpcd_downstream_port_max_bpc { +	DOWN_STREAM_MAX_8BPC = 0, +	DOWN_STREAM_MAX_10BPC, +	DOWN_STREAM_MAX_12BPC, +	DOWN_STREAM_MAX_16BPC +}; + +enum link_training_offset { +	DPRX                = 0, +	LTTPR_PHY_REPEATER1 = 1, +	LTTPR_PHY_REPEATER2 = 2, +	LTTPR_PHY_REPEATER3 = 3, +	LTTPR_PHY_REPEATER4 = 4, +	LTTPR_PHY_REPEATER5 = 5, +	LTTPR_PHY_REPEATER6 = 6, +	LTTPR_PHY_REPEATER7 = 7, +	LTTPR_PHY_REPEATER8 = 8 +}; + +#define MAX_REPEATER_CNT 8 + +struct dc_lttpr_caps { +	union dpcd_rev revision; +	uint8_t mode; +	uint8_t max_lane_count; +	uint8_t max_link_rate; +	uint8_t phy_repeater_cnt; +	uint8_t max_ext_timeout; +	union dp_main_link_channel_coding_lttpr_cap main_link_channel_coding; +	union dp_128b_132b_supported_lttpr_link_rates supported_128b_132b_rates; +	uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1]; +}; + +struct dc_dongle_dfp_cap_ext { +	bool supported; +	uint16_t max_pixel_rate_in_mps; +	uint16_t max_video_h_active_width; +	uint16_t max_video_v_active_height; +	struct dp_encoding_format_caps encoding_format_caps; +	struct dp_color_depth_caps rgb_color_depth_caps; +	struct dp_color_depth_caps ycbcr444_color_depth_caps; +	struct dp_color_depth_caps ycbcr422_color_depth_caps; +	struct dp_color_depth_caps ycbcr420_color_depth_caps; +}; + +struct dc_dongle_caps { +	/* dongle type (DP converter, CV smart dongle) */ +	enum display_dongle_type dongle_type; +	bool extendedCapValid; +	/* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER, +	indicates 'Frame Sequential-to-lllFrame Pack' conversion capability.*/ +	bool is_dp_hdmi_s3d_converter; +	bool is_dp_hdmi_ycbcr422_pass_through; +	bool is_dp_hdmi_ycbcr420_pass_through; +	bool is_dp_hdmi_ycbcr422_converter; +	bool is_dp_hdmi_ycbcr420_converter; +	uint32_t dp_hdmi_max_bpc; +	uint32_t dp_hdmi_max_pixel_clk_in_khz; +	uint32_t dp_hdmi_frl_max_link_bw_in_kbps; +	struct dc_dongle_dfp_cap_ext dfp_cap_ext; +}; + +struct dpcd_caps { +	union dpcd_rev dpcd_rev; +	union max_lane_count max_ln_count; +	union max_down_spread max_down_spread; +	union dprx_feature dprx_feature; + +	/* valid only for eDP v1.4 or higher*/ +	uint8_t edp_supported_link_rates_count; +	enum dc_link_rate edp_supported_link_rates[8]; + +	/* dongle type (DP converter, CV smart dongle) */ +	enum display_dongle_type dongle_type; +	bool is_dongle_type_one; +	/* branch device or sink device */ +	bool is_branch_dev; +	/* Dongle's downstream count. */ +	union sink_count sink_count; +	bool is_mst_capable; +	/* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER, +	indicates 'Frame Sequential-to-lllFrame Pack' conversion capability.*/ +	struct dc_dongle_caps dongle_caps; + +	uint32_t sink_dev_id; +	int8_t sink_dev_id_str[6]; +	int8_t sink_hw_revision; +	int8_t sink_fw_revision[2]; + +	uint32_t branch_dev_id; +	int8_t branch_dev_name[6]; +	int8_t branch_hw_revision; +	int8_t branch_fw_revision[2]; + +	bool allow_invalid_MSA_timing_param; +	bool panel_mode_edp; +	bool dpcd_display_control_capable; +	bool ext_receiver_cap_field_present; +	bool set_power_state_capable_edp; +	bool dynamic_backlight_capable_edp; +	union dpcd_fec_capability fec_cap; +	struct dpcd_dsc_capabilities dsc_caps; +	struct dc_lttpr_caps lttpr_caps; +	struct adaptive_sync_caps adaptive_sync_caps; +	struct dpcd_usb4_dp_tunneling_info usb4_dp_tun_info; + +	union dp_128b_132b_supported_link_rates dp_128b_132b_supported_link_rates; +	union dp_main_line_channel_coding_cap channel_coding_cap; +	union dp_sink_video_fallback_formats fallback_formats; +	union dp_fec_capability1 fec_cap1; +	union dp_cable_id cable_id; +	uint8_t edp_rev; +	union edp_alpm_caps alpm_caps; +	struct edp_psr_info psr_info; +}; + +union dpcd_sink_ext_caps { +	struct { +		/* 0 - Sink supports backlight adjust via PWM during SDR/HDR mode +		 * 1 - Sink supports backlight adjust via AUX during SDR/HDR mode. +		 */ +		uint8_t sdr_aux_backlight_control : 1; +		uint8_t hdr_aux_backlight_control : 1; +		uint8_t reserved_1 : 2; +		uint8_t oled : 1; +		uint8_t reserved_2 : 1; +		uint8_t miniled : 1; +		uint8_t reserved : 1; +	} bits; +	uint8_t raw; +};  #endif /* DC_DP_TYPES_H */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_hdmi_types.h b/drivers/gpu/drm/amd/display/dc/dc_hdmi_types.h new file mode 100644 index 000000000000..c364744b4c83 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dc_hdmi_types.h @@ -0,0 +1,134 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef DC_HDMI_TYPES_H +#define DC_HDMI_TYPES_H + +#include "os_types.h" + +/* Address range from 0x00 to 0x1F.*/ +#define DP_ADAPTOR_TYPE2_SIZE 0x20 +#define DP_ADAPTOR_TYPE2_REG_ID 0x10 +#define DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK 0x1D +/* Identifies adaptor as Dual-mode adaptor */ +#define DP_ADAPTOR_TYPE2_ID 0xA0 +/* MHz*/ +#define DP_ADAPTOR_TYPE2_MAX_TMDS_CLK 600 +/* MHz*/ +#define DP_ADAPTOR_TYPE2_MIN_TMDS_CLK 25 +/* kHZ*/ +#define DP_ADAPTOR_DVI_MAX_TMDS_CLK 165000 +/* kHZ*/ +#define DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK 165000 + +struct dp_hdmi_dongle_signature_data { +	int8_t id[15];/* "DP-HDMI ADAPTOR"*/ +	uint8_t eot;/* end of transmition '\x4' */ +}; + +/* DP-HDMI dongle slave address for retrieving dongle signature*/ +#define DP_HDMI_DONGLE_ADDRESS 0x40 +static const uint8_t dp_hdmi_dongle_signature_str[] = "DP-HDMI ADAPTOR"; +#define DP_HDMI_DONGLE_SIGNATURE_EOT 0x04 + + +/* SCDC Address defines (HDMI 2.0)*/ +#define HDMI_SCDC_WRITE_UPDATE_0_ARRAY 3 +#define HDMI_SCDC_ADDRESS  0x54 +#define HDMI_SCDC_SINK_VERSION 0x01 +#define HDMI_SCDC_SOURCE_VERSION 0x02 +#define HDMI_SCDC_UPDATE_0 0x10 +#define HDMI_SCDC_TMDS_CONFIG 0x20 +#define HDMI_SCDC_SCRAMBLER_STATUS 0x21 +#define HDMI_SCDC_CONFIG_0 0x30 +#define HDMI_SCDC_CONFIG_1 0x31 +#define HDMI_SCDC_SOURCE_TEST_REQ 0x35 +#define HDMI_SCDC_STATUS_FLAGS 0x40 +#define HDMI_SCDC_ERR_DETECT 0x50 +#define HDMI_SCDC_TEST_CONFIG 0xC0 + +#define HDMI_SCDC_MANUFACTURER_OUI 0xD0 +#define HDMI_SCDC_DEVICE_ID 0xDB + +union hdmi_scdc_update_read_data { +	uint8_t byte[2]; +	struct { +		uint8_t STATUS_UPDATE:1; +		uint8_t CED_UPDATE:1; +		uint8_t RR_TEST:1; +		uint8_t RESERVED:5; +		uint8_t RESERVED2:8; +	} fields; +}; + +union hdmi_scdc_status_flags_data { +	uint8_t byte; +	struct { +		uint8_t CLOCK_DETECTED:1; +		uint8_t CH0_LOCKED:1; +		uint8_t CH1_LOCKED:1; +		uint8_t CH2_LOCKED:1; +		uint8_t RESERVED:4; +	} fields; +}; + +union hdmi_scdc_ced_data { +	uint8_t byte[11]; +	struct { +		uint8_t CH0_8LOW:8; +		uint8_t CH0_7HIGH:7; +		uint8_t CH0_VALID:1; +		uint8_t CH1_8LOW:8; +		uint8_t CH1_7HIGH:7; +		uint8_t CH1_VALID:1; +		uint8_t CH2_8LOW:8; +		uint8_t CH2_7HIGH:7; +		uint8_t CH2_VALID:1; +		uint8_t CHECKSUM:8; +		uint8_t RESERVED:8; +		uint8_t RESERVED2:8; +		uint8_t RESERVED3:8; +		uint8_t RESERVED4:4; +	} fields; +}; + +union hdmi_scdc_manufacturer_OUI_data { +	uint8_t byte[3]; +	struct { +		uint8_t Manufacturer_OUI_1:8; +		uint8_t Manufacturer_OUI_2:8; +		uint8_t Manufacturer_OUI_3:8; +	} fields; +}; + +union hdmi_scdc_device_id_data { +	uint8_t byte; +	struct { +		uint8_t Hardware_Minor_Rev:4; +		uint8_t Hardware_Major_Rev:4; +	} fields; +}; + +#endif /* DC_HDMI_TYPES_H */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index 848db8676adf..cc3d6fb39364 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -797,6 +797,29 @@ enum dc_timing_3d_format {  	TIMING_3D_FORMAT_MAX,  }; +#define DC_DSC_QP_SET_SIZE 15 +#define DC_DSC_RC_BUF_THRESH_SIZE 14 +struct dc_dsc_rc_params_override { +	int32_t rc_model_size; +	int32_t rc_buf_thresh[DC_DSC_RC_BUF_THRESH_SIZE]; +	int32_t rc_minqp[DC_DSC_QP_SET_SIZE]; +	int32_t rc_maxqp[DC_DSC_QP_SET_SIZE]; +	int32_t rc_offset[DC_DSC_QP_SET_SIZE]; + +	int32_t rc_tgt_offset_hi; +	int32_t rc_tgt_offset_lo; +	int32_t rc_edge_factor; +	int32_t rc_quant_incr_limit0; +	int32_t rc_quant_incr_limit1; + +	int32_t initial_fullness_offset; +	int32_t initial_delay; + +	int32_t flatness_min_qp; +	int32_t flatness_max_qp; +	int32_t flatness_det_thresh; +}; +  struct dc_dsc_config {  	uint32_t num_slices_h; /* Number of DSC slices - horizontal */  	uint32_t num_slices_v; /* Number of DSC slices - vertical */ @@ -811,6 +834,7 @@ struct dc_dsc_config {  #endif  	bool is_dp; /* indicate if DSC is applied based on DP's capability */  	uint32_t mst_pbn; /* pbn of display on dsc mst hub */ +	const struct dc_dsc_rc_params_override *rc_params_ovrd; /* DM owned memory. If not NULL, apply custom dsc rc params */  };  /** diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index caf0c7af2d0b..cecd807f5ed8 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -31,6 +31,7 @@  #include "grph_object_defs.h"  struct link_resource; +enum aux_return_code_type;  enum dc_link_fec_state {  	dc_link_fec_not_ready, @@ -38,15 +39,6 @@ enum dc_link_fec_state {  	dc_link_fec_enabled  }; -struct dc_link_status { -	bool link_active; -	struct dpcd_caps *dpcd_caps; -}; - -struct dprx_states { -	bool cable_id_written; -}; -  /* DP MST stream allocation (payload bandwidth number) */  struct link_mst_stream_allocation {  	/* DIG front */ @@ -101,6 +93,7 @@ struct psr_settings {  	bool psr_allow_active;			// PSR is currently active  	enum dc_psr_version psr_version;		// Internal PSR version, determined based on DPCD  	bool psr_vtotal_control_support;	// Vtotal control is supported by sink +	unsigned long long psr_dirty_rects_change_timestamp_ns;	// for delay of enabling PSR-SU  	/* These parameters are calculated in Driver,  	 * based on display timing and Sink capabilities. @@ -117,7 +110,7 @@ struct psr_settings {   * Add a struct dc_panel_config under dc_link   */  struct dc_panel_config { -	// extra panel power sequence parameters +	/* extra panel power sequence parameters */  	struct pps {  		unsigned int extra_t3_ms;  		unsigned int extra_t7_ms; @@ -127,13 +120,21 @@ struct dc_panel_config {  		unsigned int extra_t12_ms;  		unsigned int extra_post_OUI_ms;  	} pps; -	// ABM +	/* PSR */ +	struct psr { +		bool disable_psr; +		bool disallow_psrsu; +		bool rc_disable; +		bool rc_allow_static_screen; +		bool rc_allow_fullscreen_VPB; +	} psr; +	/* ABM */  	struct varib {  		unsigned int varibright_feature_enable;  		unsigned int def_varibright_level;  		unsigned int abm_config_setting;  	} varib; -	// edp DSC +	/* edp DSC */  	struct dsc {  		bool disable_dsc_edp;  		unsigned int force_dsc_edp_policy; @@ -143,6 +144,22 @@ struct dc_panel_config {  		bool optimize_edp_link_rate; /* eDP ILR */  	} ilr;  }; + +/* + *  USB4 DPIA BW ALLOCATION STRUCTS + */ +struct dc_dpia_bw_alloc { +	int sink_verified_bw;  // The Verified BW that sink can allocated and use that has been verified already +	int sink_allocated_bw; // The Actual Allocated BW that sink currently allocated +	int sink_max_bw;       // The Max BW that sink can require/support +	int estimated_bw;      // The estimated available BW for this DPIA +	int bw_granularity;    // BW Granularity +	bool bw_alloc_enabled; // The BW Alloc Mode Support is turned ON for all 3:  DP-Tx & Dpia & CM +	bool response_ready;   // Response ready from the CM side +}; + +#define MAX_SINKS_PER_LINK 4 +  /*   * A link contains one or more sinks and their connected status.   * The currently active signal type (HDMI, DP-SST, DP-MST) is also reported. @@ -158,6 +175,14 @@ struct dc_link {  	enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse  */  	bool is_hpd_filter_disabled;  	bool dp_ss_off; + +	/** +	 * @link_state_valid: +	 * +	 * If there is no link and local sink, this variable should be set to +	 * false. Otherwise, it should be set to true; usually, the function +	 * core_link_enable_stream sets this field to true. +	 */  	bool link_state_valid;  	bool aux_access_disabled;  	bool sync_lt_in_progress; @@ -168,6 +193,7 @@ struct dc_link {  	bool is_dig_mapping_flexible;  	bool hpd_status; /* HPD status of link without physical HPD pin. */  	bool is_hpd_pending; /* Indicates a new received hpd */ +	bool is_automated; /* Indicates automated testing */  	bool edp_sink_present; @@ -248,6 +274,7 @@ struct dc_link {  		bool dp_keep_receiver_powered;  		bool dp_skip_DID2;  		bool dp_skip_reset_segment; +		bool dp_skip_fs_144hz;  		bool dp_mot_reset_segment;  		/* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */  		bool dpia_mst_dsc_always_on; @@ -262,11 +289,12 @@ struct dc_link {  	struct gpio *hpd_gpio;  	enum dc_link_fec_state fec_state; +	bool link_powered_externally;	// Used to bypass hardware sequencing delays when panel is powered down forcibly +  	struct dc_panel_config panel_config;  	struct phy_state phy_state;  }; -const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link);  /**   * dc_get_link_at_index() - Return an enumerated dc_link. @@ -304,15 +332,17 @@ static inline bool dc_get_edp_link_panel_inst(const struct dc *dc,  		unsigned int *inst_out)  {  	struct dc_link *edp_links[MAX_NUM_EDP]; -	int edp_num; +	int edp_num, i; +	*inst_out = 0;  	if (link->connector_signal != SIGNAL_TYPE_EDP)  		return false;  	get_edp_links(dc, edp_links, &edp_num); -	if ((edp_num > 1) && (link->link_index > edp_links[0]->link_index)) -		*inst_out = 1; -	else -		*inst_out = 0; +	for (i = 0; i < edp_num; i++) { +		if (link == edp_links[i]) +			break; +		(*inst_out)++; +	}  	return true;  } @@ -334,11 +364,6 @@ bool dc_link_get_backlight_level_nits(struct dc_link *link,  		uint32_t *backlight_millinits,  		uint32_t *backlight_millinits_peak); -bool dc_link_backlight_enable_aux(struct dc_link *link, bool enable); - -bool dc_link_read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millinits); -bool dc_link_set_default_brightness_aux(struct dc_link *link); -  int dc_link_get_backlight_level(const struct dc_link *dc_link);  int dc_link_get_target_backlight_pwm(const struct dc_link *link); @@ -352,38 +377,7 @@ bool dc_link_setup_psr(struct dc_link *dc_link,  		const struct dc_stream_state *stream, struct psr_config *psr_config,  		struct psr_context *psr_context); -bool dc_power_alpm_dpcd_enable(struct dc_link *link, bool enable); - -void dc_link_get_psr_residency(const struct dc_link *link, uint32_t *residency); - -void dc_link_blank_all_dp_displays(struct dc *dc); -void dc_link_blank_all_edp_displays(struct dc *dc); - -void dc_link_blank_dp_stream(struct dc_link *link, bool hw_init); -bool dc_link_set_sink_vtotal_in_psr_active(const struct dc_link *link, -		uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su); - -/* Request DC to detect if there is a Panel connected. - * boot - If this call is during initial boot. - * Return false for any type of detection failure or MST detection - * true otherwise. True meaning further action is required (status update - * and OS notification). - */ -enum dc_detect_reason { -	DETECT_REASON_BOOT, -	DETECT_REASON_RESUMEFROMS3S4, -	DETECT_REASON_HPD, -	DETECT_REASON_HPDRX, -	DETECT_REASON_FALLBACK, -	DETECT_REASON_RETRAIN, -	DETECT_REASON_TDR, -}; - -bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);  bool dc_link_get_hpd_state(struct dc_link *dc_link); -enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx); -enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn); -enum dc_status dc_link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn);  /* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt).   * Return: @@ -405,7 +399,11 @@ bool dc_link_wait_for_t12(struct dc_link *link);  void dc_link_dp_handle_automated_test(struct dc_link *link);  void dc_link_dp_handle_link_loss(struct dc_link *link);  bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link); - +bool dc_link_check_link_loss_status(struct dc_link *link, +		union hpd_irq_data *hpd_irq_dpcd_data); +enum dc_status dc_link_dp_read_hpd_rx_irq_data( +	struct dc_link *link, +	union hpd_irq_data *irq_data);  struct dc_sink_init_data;  struct dc_sink *dc_link_add_remote_sink( @@ -420,36 +418,6 @@ void dc_link_remove_remote_sink(  /* Used by diagnostics for virtual link at the moment */ -void dc_link_dp_set_drive_settings( -	struct dc_link *link, -	const struct link_resource *link_res, -	struct link_training_settings *lt_settings); - -bool dc_link_dp_perform_link_training_skip_aux( -	struct dc_link *link, -	const struct link_resource *link_res, -	const struct dc_link_settings *link_setting); - -enum link_training_result dc_link_dp_perform_link_training( -	struct dc_link *link, -	const struct link_resource *link_res, -	const struct dc_link_settings *link_settings, -	bool skip_video_pattern); - -bool dc_link_dp_sync_lt_begin(struct dc_link *link); - -enum link_training_result dc_link_dp_sync_lt_attempt( -	struct dc_link *link, -	const struct link_resource *link_res, -	struct dc_link_settings *link_setting, -	struct dc_link_training_overrides *lt_settings); - -bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down); - -void dc_link_dp_enable_hpd(const struct dc_link *link); - -void dc_link_dp_disable_hpd(const struct dc_link *link); -  bool dc_link_dp_set_test_pattern(  	struct dc_link *link,  	enum dp_test_pattern test_pattern, @@ -460,19 +428,28 @@ bool dc_link_dp_set_test_pattern(  bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap); +/** + ***************************************************************************** + *  Function: dc_link_enable_hpd_filter + * + *  @brief + *     If enable is true, programs HPD filter on associated HPD line to default + *     values dependent on link->connector_signal + * + *     If enable is false, programs HPD filter on associated HPD line with no + *     delays on connect or disconnect + * + *  @param [in] link: pointer to the dc link + *  @param [in] enable: boolean specifying whether to enable hbd + ***************************************************************************** + */  void dc_link_enable_hpd_filter(struct dc_link *link, bool enable);  bool dc_link_is_dp_sink_present(struct dc_link *link); - -bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type);  /*   * DPCD access interfaces   */ -#ifdef CONFIG_DRM_AMD_DC_HDCP -bool dc_link_is_hdcp14(struct dc_link *link, enum signal_type signal); -bool dc_link_is_hdcp22(struct dc_link *link, enum signal_type signal); -#endif  void dc_link_set_drive_settings(struct dc *dc,  				struct link_training_settings *lt_settings,  				const struct dc_link *link); @@ -492,9 +469,6 @@ void dc_link_set_test_pattern(struct dc_link *link,  			const struct link_training_settings *p_link_settings,  			const unsigned char *p_custom_pattern,  			unsigned int cust_pattern_size); -uint32_t dc_link_bandwidth_kbps( -	const struct dc_link *link, -	const struct dc_link_settings *link_setting);  const struct dc_link_settings *dc_link_get_link_cap(  		const struct dc_link *link); @@ -516,25 +490,16 @@ bool dc_submit_i2c_oem(  		struct dc *dc,  		struct i2c_command *cmd); -uint32_t dc_bandwidth_in_kbps_from_timing( -	const struct dc_crtc_timing *timing); -  bool dc_link_is_fec_supported(const struct dc_link *link);  bool dc_link_should_enable_fec(const struct dc_link *link);  uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw);  enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link); -void dc_link_get_cur_link_res(const struct dc_link *link, -		struct link_resource *link_res);  /* take a snapshot of current link resource allocation state */  void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map);  /* restore link resource allocation state from a snapshot */  void dc_restore_link_res_map(const struct dc *dc, uint32_t *map); -void dc_link_clear_dprx_states(struct dc_link *link); -struct gpio *get_hpd_gpio(struct dc_bios *dcb, -		struct graphics_object_id link_id, -		struct gpio_service *gpio_service);  void dp_trace_reset(struct dc_link *link);  bool dc_dp_trace_is_initialized(struct dc_link *link);  unsigned long long dc_dp_trace_get_lt_end_timestamp(struct dc_link *link, @@ -548,6 +513,65 @@ struct dp_trace_lt_counts *dc_dp_trace_get_lt_counts(struct dc_link *link,  		bool in_detection);  unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link); -/* Destruct the mst topology of the link and reset the allocated payload table */ -bool reset_cur_dp_mst_topology(struct dc_link *link); +/* Attempt to transfer the given aux payload. This function does not perform + * retries or handle error states. The reply is returned in the payload->reply + * and the result through operation_result. Returns the number of bytes + * transferred,or -1 on a failure. + */ +int dc_link_aux_transfer_raw(struct ddc_service *ddc, +		struct aux_payload *payload, +		enum aux_return_code_type *operation_result); + +enum lttpr_mode dc_link_decide_lttpr_mode(struct dc_link *link, +		struct dc_link_settings *link_setting); +void dc_link_dp_receiver_power_ctrl(struct dc_link *link, bool on); +bool dc_link_decide_edp_link_settings(struct dc_link *link, +		struct dc_link_settings *link_setting, +		uint32_t req_bw); +void dc_link_edp_panel_backlight_power_on(struct dc_link *link, +		bool wait_for_hpd); + +/* + *  USB4 DPIA BW ALLOCATION PUBLIC FUNCTIONS + */ +/* + * Send a request from DP-Tx requesting to allocate BW remotely after + * allocating it locally. This will get processed by CM and a CB function + * will be called. + * + * @link: pointer to the dc_link struct instance + * @req_bw: The requested bw in Kbyte to allocated + * + * return: none + */ +void dc_link_set_usb4_req_bw_req(struct dc_link *link, int req_bw); + +/* + * CB function for when the status of the Req above is complete. We will + * find out the result of allocating on CM and update structs accordingly + * + * @link: pointer to the dc_link struct instance + * @bw: Allocated or Estimated BW depending on the result + * @result: Response type + * + * return: none + */ +void dc_link_get_usb4_req_bw_resp(struct dc_link *link, uint8_t bw, uint8_t result); + +/* + * Handle the USB4 BW Allocation related functionality here: + * Plug => Try to allocate max bw from timing parameters supported by the sink + * Unplug => de-allocate bw + * + * @link: pointer to the dc_link struct instance + * @peak_bw: Peak bw used by the link/sink + * + * return: allocated bw else return 0 + */ +int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw); + +/* TODO: this is not meant to be exposed to DM. Should switch to stream update + * interface i.e stream_update->dsc_config + */ +bool dc_link_update_dsc_config(struct pipe_ctx *pipe_ctx);  #endif /* DC_LINK_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 9e6025c98db9..567452599659 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -41,6 +41,10 @@ struct timing_sync_info {  struct dc_stream_status {  	int primary_otg_inst;  	int stream_enc_inst; + +	/** +	 * @plane_count: Total of planes attached to a single stream +	 */  	int plane_count;  	int audio_inst;  	struct timing_sync_info timing_sync_info; @@ -140,7 +144,7 @@ struct test_pattern {  	unsigned int cust_pattern_size;  }; -#define SUBVP_DRR_MARGIN_US 500 // 500us for DRR margin (SubVP + DRR) +#define SUBVP_DRR_MARGIN_US 600 // 600us for DRR margin (SubVP + DRR)  enum mall_stream_type {  	SUBVP_NONE, // subvp not in use @@ -156,6 +160,17 @@ struct mall_stream_config {  	struct dc_stream_state *paired_stream;	// master / slave stream  }; +/* Temp struct used to save and restore MALL config + * during validation. + * + * TODO: Move MALL config into dc_state instead of stream struct + * to avoid needing to save/restore. + */ +struct mall_temp_config { +	struct mall_stream_config mall_stream_config[MAX_PIPES]; +	bool is_phantom_plane[MAX_PIPES]; +}; +  struct dc_stream_state {  	// sink is deprecated, new code should not reference  	// this pointer @@ -175,6 +190,7 @@ struct dc_stream_state {  	struct dc_info_packet vsp_infopacket;  	struct dc_info_packet hfvsif_infopacket;  	struct dc_info_packet vtem_infopacket; +	struct dc_info_packet adaptive_sync_infopacket;  	uint8_t dsc_packed_pps[128];  	struct rect src; /* composition area */  	struct rect dst; /* stream addressable area */ @@ -197,7 +213,18 @@ struct dc_stream_state {  	bool use_vsc_sdp_for_colorimetry;  	bool ignore_msa_timing_param; +	/** +	 * @allow_freesync: +	 * +	 * It say if Freesync is enabled or not. +	 */  	bool allow_freesync; + +	/** +	 * @vrr_active_variable: +	 * +	 * It describes if VRR is in use. +	 */  	bool vrr_active_variable;  	bool freesync_on_desktop; @@ -287,6 +314,7 @@ struct dc_stream_update {  	struct dc_info_packet *vsp_infopacket;  	struct dc_info_packet *hfvsif_infopacket;  	struct dc_info_packet *vtem_infopacket; +	struct dc_info_packet *adaptive_sync_infopacket;  	bool *dpms_off;  	bool integer_scaling_update;  	bool *allow_freesync; @@ -517,10 +545,9 @@ bool dc_stream_get_crtc_position(struct dc *dc,  				 unsigned int *nom_v_pos);  #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) -bool dc_stream_forward_dmcu_crc_window(struct dc *dc, struct dc_stream_state *stream, -			     struct crc_params *crc_window); -bool dc_stream_stop_dmcu_crc_win_update(struct dc *dc, -				 struct dc_stream_state *stream); +bool dc_stream_forward_crc_window(struct dc_stream_state *stream, +		struct rect *rect, +		bool is_stop);  #endif  bool dc_stream_configure_crc(struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/dc/dc_trace.h b/drivers/gpu/drm/amd/display/dc/dc_trace.h index c711797e5c9e..bbec308a3a5e 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_trace.h +++ b/drivers/gpu/drm/amd/display/dc/dc_trace.h @@ -40,3 +40,5 @@  #define TRACE_DCN_FPU(begin, function, line, ref_count) \  	trace_dcn_fpu(begin, function, line, ref_count) +#define TRACE_OPTC_LOCK_UNLOCK_STATE(optc, inst, lock) \ +	trace_dcn_optc_lock_unlock_state(optc, inst, lock, __func__, __LINE__) diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index ad9041472cca..27d0242d6cbd 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -32,7 +32,9 @@  #include "os_types.h"  #include "fixed31_32.h"  #include "irq_types.h" +#include "dc_ddc_types.h"  #include "dc_dp_types.h" +#include "dc_hdmi_types.h"  #include "dc_hw_types.h"  #include "dal_types.h"  #include "grph_object_defs.h" @@ -82,13 +84,8 @@ struct dc_perf_trace {  	unsigned long last_entry_write;  }; -#define DC_MAX_EDID_BUFFER_SIZE 2048 -#define DC_EDID_BLOCK_SIZE 128  #define MAX_SURFACE_NUM 4  #define NUM_PIXEL_FORMATS 10 -#define MAX_REPEATER_CNT 8 - -#include "dc_ddc_types.h"  enum tiling_mode {  	TILING_MODE_INVALID, @@ -374,66 +371,6 @@ struct dc_csc_adjustments {  	struct fixed31_32 hue;  }; -enum dpcd_downstream_port_max_bpc { -	DOWN_STREAM_MAX_8BPC = 0, -	DOWN_STREAM_MAX_10BPC, -	DOWN_STREAM_MAX_12BPC, -	DOWN_STREAM_MAX_16BPC -}; - - -enum link_training_offset { -	DPRX                = 0, -	LTTPR_PHY_REPEATER1 = 1, -	LTTPR_PHY_REPEATER2 = 2, -	LTTPR_PHY_REPEATER3 = 3, -	LTTPR_PHY_REPEATER4 = 4, -	LTTPR_PHY_REPEATER5 = 5, -	LTTPR_PHY_REPEATER6 = 6, -	LTTPR_PHY_REPEATER7 = 7, -	LTTPR_PHY_REPEATER8 = 8 -}; - -struct dc_lttpr_caps { -	union dpcd_rev revision; -	uint8_t mode; -	uint8_t max_lane_count; -	uint8_t max_link_rate; -	uint8_t phy_repeater_cnt; -	uint8_t max_ext_timeout; -	union dp_main_link_channel_coding_lttpr_cap main_link_channel_coding; -	union dp_128b_132b_supported_lttpr_link_rates supported_128b_132b_rates; -	uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1]; -}; - -struct dc_dongle_dfp_cap_ext { -	bool supported; -	uint16_t max_pixel_rate_in_mps; -	uint16_t max_video_h_active_width; -	uint16_t max_video_v_active_height; -	struct dp_encoding_format_caps encoding_format_caps; -	struct dp_color_depth_caps rgb_color_depth_caps; -	struct dp_color_depth_caps ycbcr444_color_depth_caps; -	struct dp_color_depth_caps ycbcr422_color_depth_caps; -	struct dp_color_depth_caps ycbcr420_color_depth_caps; -}; - -struct dc_dongle_caps { -	/* dongle type (DP converter, CV smart dongle) */ -	enum display_dongle_type dongle_type; -	bool extendedCapValid; -	/* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER, -	indicates 'Frame Sequential-to-lllFrame Pack' conversion capability.*/ -	bool is_dp_hdmi_s3d_converter; -	bool is_dp_hdmi_ycbcr422_pass_through; -	bool is_dp_hdmi_ycbcr420_pass_through; -	bool is_dp_hdmi_ycbcr422_converter; -	bool is_dp_hdmi_ycbcr420_converter; -	uint32_t dp_hdmi_max_bpc; -	uint32_t dp_hdmi_max_pixel_clk_in_khz; -	uint32_t dp_hdmi_frl_max_link_bw_in_kbps; -	struct dc_dongle_dfp_cap_ext dfp_cap_ext; -};  /* Scaling format */  enum scaling_transformation {  	SCALING_TRANSFORMATION_UNINITIALIZED, @@ -690,6 +627,7 @@ struct psr_config {  	uint8_t su_y_granularity;  	unsigned int line_time_in_us;  	uint8_t rate_control_caps; +	uint16_t dsc_slice_height;  };  union dmcu_psr_level { @@ -801,6 +739,7 @@ struct psr_context {  	uint8_t su_y_granularity;  	unsigned int line_time_in_us;  	uint8_t rate_control_caps; +	uint16_t dsc_slice_height;  };  struct colorspace_transform { @@ -993,4 +932,54 @@ struct display_endpoint_id {  	enum display_endpoint_type ep_type;  }; +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) +struct otg_phy_mux { +	uint8_t phy_output_num; +	uint8_t otg_output_num; +}; +#endif + +enum dc_detect_reason { +	DETECT_REASON_BOOT, +	DETECT_REASON_RESUMEFROMS3S4, +	DETECT_REASON_HPD, +	DETECT_REASON_HPDRX, +	DETECT_REASON_FALLBACK, +	DETECT_REASON_RETRAIN, +	DETECT_REASON_TDR, +}; + +struct dc_link_status { +	bool link_active; +	struct dpcd_caps *dpcd_caps; +}; + +#if defined(CONFIG_DRM_AMD_DC_HDCP) +union hdcp_rx_caps { +	struct { +		uint8_t version; +		uint8_t reserved; +		struct { +			uint8_t repeater	: 1; +			uint8_t hdcp_capable	: 1; +			uint8_t reserved	: 6; +		} byte0; +	} fields; +	uint8_t raw[3]; +}; + +union hdcp_bcaps { +	struct { +		uint8_t HDCP_CAPABLE:1; +		uint8_t REPEATER:1; +		uint8_t RESERVED:6; +	} bits; +	uint8_t raw; +}; + +struct hdcp_caps { +	union hdcp_rx_caps rx_caps; +	union hdcp_bcaps bcaps; +}; +#endif  #endif /* DC_TYPES_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h index e69f1899fbf0..c850ed49281f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h @@ -26,7 +26,7 @@  #ifndef __DAL_AUX_ENGINE_DCE110_H__  #define __DAL_AUX_ENGINE_DCE110_H__ -#include "i2caux_interface.h" +#include "gpio_service_interface.h"  #include "inc/hw/aux_engine.h"  enum aux_return_code_type; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c index fbb19e253f50..d3cc5ec46956 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c @@ -927,19 +927,20 @@ static bool dcn10_recv_edid_cea_ack(struct dmcu *dmcu, int *offset)  #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)  static void dcn10_forward_crc_window(struct dmcu *dmcu, -					struct crc_region *crc_win, +					struct rect *rect,  					struct otg_phy_mux *mux_mapping)  {  	struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);  	unsigned int dmcu_max_retry_on_wait_reg_ready = 801;  	unsigned int dmcu_wait_reg_ready_interval = 100;  	unsigned int crc_start = 0, crc_end = 0, otg_phy_mux = 0; +	int x_start, y_start, x_end, y_end;  	/* If microcontroller is not running, do nothing */  	if (dmcu->dmcu_state != DMCU_RUNNING)  		return; -	if (!crc_win) +	if (!rect)  		return;  	/* waitDMCUReadyForCmd */ @@ -947,9 +948,14 @@ static void dcn10_forward_crc_window(struct dmcu *dmcu,  				dmcu_wait_reg_ready_interval,  				dmcu_max_retry_on_wait_reg_ready); +	x_start = rect->x; +	y_start = rect->y; +	x_end = x_start + rect->width; +	y_end = y_start + rect->height; +  	/* build up nitification data */ -	crc_start = (((unsigned int) crc_win->x_start) << 16) | crc_win->y_start; -	crc_end = (((unsigned int) crc_win->x_end) << 16) | crc_win->y_end; +	crc_start = (((unsigned int) x_start) << 16) | y_start; +	crc_end = (((unsigned int) x_end) << 16) | y_end;  	otg_phy_mux =  		(((unsigned int) mux_mapping->otg_output_num) << 16) | mux_mapping->phy_output_num; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c index 09260c23c3bd..fa314493ffc5 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c @@ -29,7 +29,6 @@  #include "link_encoder.h"  #include "dce_link_encoder.h"  #include "stream_encoder.h" -#include "i2caux_interface.h"  #include "dc_bios_types.h"  #include "gpio_service_interface.h" diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c index bec5e9f787fc..1e2d2cbe2c37 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c @@ -399,7 +399,11 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,  		link->psr_settings.force_ffu_mode = 0;  	copy_settings_data->force_ffu_mode = link->psr_settings.force_ffu_mode; -	if (link->fec_state == dc_link_fec_enabled && +	if (((link->dpcd_caps.fec_cap.bits.FEC_CAPABLE && +		!link->dc->debug.disable_fec) && +		(link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT && +		!link->panel_config.dsc.disable_dsc_edp && +		link->dc->caps.edp_dsc_support)) &&  		link->dpcd_caps.sink_dev_id == DP_DEVICE_ID_38EC11 &&  		(!memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_1,  			sizeof(DP_SINK_DEVICE_STR_ID_1)) || @@ -409,6 +413,12 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,  	else  		copy_settings_data->debug.bitfields.force_wakeup_by_tps3 = 0; +	//WA for PSR1 on specific TCON, require frame delay for frame re-lock +	copy_settings_data->relock_delay_frame_cnt = 0; +	if (link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_001CF8) +		copy_settings_data->relock_delay_frame_cnt = 2; +	copy_settings_data->dsc_slice_height = psr_context->dsc_slice_height; +  	dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);  	dc_dmub_srv_cmd_execute(dc->dmub_srv);  	dc_dmub_srv_wait_idle(dc->dmub_srv); diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index d260eaa1509e..fb3fd5b7c78b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -46,7 +46,7 @@  #include "link_encoder.h"  #include "link_enc_cfg.h"  #include "link_hwss.h" -#include "dc_link_dp.h" +#include "link.h"  #include "dccg.h"  #include "clock_source.h"  #include "clk_mgr.h" @@ -54,7 +54,6 @@  #include "audio.h"  #include "reg_helper.h"  #include "panel_cntl.h" -#include "inc/link_dpcd.h"  #include "dpcd_defs.h"  /* include DCE11 register header files */  #include "dce/dce_11_0_d.h" @@ -65,7 +64,6 @@  #include "dcn10/dcn10_hw_sequencer.h" -#include "link/link_dp_trace.h"  #include "dce110_hw_sequencer.h"  #define GAMMA_HW_POINTS_NUM 256 @@ -653,10 +651,16 @@ void dce110_update_info_frame(struct pipe_ctx *pipe_ctx)  		pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets(  			pipe_ctx->stream_res.stream_enc,  			&pipe_ctx->stream_res.encoder_info_frame); -	else +	else { +		if (pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets_sdp_line_num) +			pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets_sdp_line_num( +				pipe_ctx->stream_res.stream_enc, +				&pipe_ctx->stream_res.encoder_info_frame); +  		pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets(  			pipe_ctx->stream_res.stream_enc,  			&pipe_ctx->stream_res.encoder_info_frame); +	}  }  void dce110_enable_stream(struct pipe_ctx *pipe_ctx) @@ -688,16 +692,6 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)  		early_control = lane_count;  	tg->funcs->set_early_control(tg, early_control); - -	/* enable audio only within mode set */ -	if (pipe_ctx->stream_res.audio != NULL) { -		if (dc_is_dp_signal(pipe_ctx->stream->signal)) -			pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc); -	} - - - -  }  static enum bp_result link_transmitter_control( @@ -747,7 +741,7 @@ void dce110_edp_wait_for_hpd_ready(  	/* obtain HPD */  	/* TODO what to do with this? */ -	hpd = get_hpd_gpio(ctx->dc_bios, connector, ctx->gpio_service); +	hpd = link_get_hpd_gpio(ctx->dc_bios, connector, ctx->gpio_service);  	if (!hpd) {  		BREAK_TO_DEBUGGER(); @@ -817,19 +811,19 @@ void dce110_edp_power_control(  				div64_u64(dm_get_elapse_time_in_ns(  						ctx,  						current_ts, -						dp_trace_get_edp_poweroff_timestamp(link)), 1000000); +						link_dp_trace_get_edp_poweroff_timestamp(link)), 1000000);  		unsigned long long time_since_edp_poweron_ms =  				div64_u64(dm_get_elapse_time_in_ns(  						ctx,  						current_ts, -						dp_trace_get_edp_poweron_timestamp(link)), 1000000); +						link_dp_trace_get_edp_poweron_timestamp(link)), 1000000);  		DC_LOG_HW_RESUME_S3(  				"%s: transition: power_up=%d current_ts=%llu edp_poweroff=%llu edp_poweron=%llu time_since_edp_poweroff_ms=%llu time_since_edp_poweron_ms=%llu",  				__func__,  				power_up,  				current_ts, -				dp_trace_get_edp_poweroff_timestamp(link), -				dp_trace_get_edp_poweron_timestamp(link), +				link_dp_trace_get_edp_poweroff_timestamp(link), +				link_dp_trace_get_edp_poweron_timestamp(link),  				time_since_edp_poweroff_ms,  				time_since_edp_poweron_ms); @@ -844,7 +838,7 @@ void dce110_edp_power_control(  					link->panel_config.pps.extra_t12_ms;  			/* Adjust remaining_min_edp_poweroff_time_ms if this is not the first time. */ -			if (dp_trace_get_edp_poweroff_timestamp(link) != 0) { +			if (link_dp_trace_get_edp_poweroff_timestamp(link) != 0) {  				if (time_since_edp_poweroff_ms < remaining_min_edp_poweroff_time_ms)  					remaining_min_edp_poweroff_time_ms =  						remaining_min_edp_poweroff_time_ms - time_since_edp_poweroff_ms; @@ -885,14 +879,16 @@ void dce110_edp_power_control(  		if (ctx->dc->ctx->dmub_srv &&  				ctx->dc->debug.dmub_command_table) { -			if (cntl.action == TRANSMITTER_CONTROL_POWER_ON) + +			if (cntl.action == TRANSMITTER_CONTROL_POWER_ON) {  				bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,  						LVTMA_CONTROL_POWER_ON, -						panel_instance); -			else +						panel_instance, link->link_powered_externally); +			} else {  				bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,  						LVTMA_CONTROL_POWER_OFF, -						panel_instance); +						panel_instance, link->link_powered_externally); +			}  		}  		bp_result = link_transmitter_control(ctx->dc_bios, &cntl); @@ -902,13 +898,13 @@ void dce110_edp_power_control(  				__func__, (power_up ? "On":"Off"),  				bp_result); -		dp_trace_set_edp_power_timestamp(link, power_up); +		link_dp_trace_set_edp_power_timestamp(link, power_up);  		DC_LOG_HW_RESUME_S3(  				"%s: updated values: edp_poweroff=%llu edp_poweron=%llu\n",  				__func__, -				dp_trace_get_edp_poweroff_timestamp(link), -				dp_trace_get_edp_poweron_timestamp(link)); +				link_dp_trace_get_edp_poweroff_timestamp(link), +				link_dp_trace_get_edp_poweron_timestamp(link));  		if (bp_result != BP_RESULT_OK)  			DC_LOG_ERROR( @@ -936,14 +932,14 @@ void dce110_edp_wait_for_T12(  		return;  	if (!link->panel_cntl->funcs->is_panel_powered_on(link->panel_cntl) && -			dp_trace_get_edp_poweroff_timestamp(link) != 0) { +			link_dp_trace_get_edp_poweroff_timestamp(link) != 0) {  		unsigned int t12_duration = 500; // Default T12 as per spec  		unsigned long long current_ts = dm_get_timestamp(ctx);  		unsigned long long time_since_edp_poweroff_ms =  				div64_u64(dm_get_elapse_time_in_ns(  						ctx,  						current_ts, -						dp_trace_get_edp_poweroff_timestamp(link)), 1000000); +						link_dp_trace_get_edp_poweroff_timestamp(link)), 1000000);  		t12_duration += link->panel_config.pps.extra_t12_ms; // Add extra T12 @@ -951,7 +947,6 @@ void dce110_edp_wait_for_T12(  			msleep(t12_duration - time_since_edp_poweroff_ms);  	}  } -  /*todo: cloned in stream enc, fix*/  /*   * @brief @@ -1025,21 +1020,25 @@ void dce110_edp_backlight_control(  		 * we shouldn't be doing power-sequencing, hence we can skip  		 * waiting for T7-ready.  		 */ -			edp_receiver_ready_T7(link); +			link_edp_receiver_ready_T7(link);  		else  			DC_LOG_DC("edp_receiver_ready_T7 skipped\n");  	} +	/* Setting link_powered_externally will bypass delays in the backlight +	 * as they are not required if the link is being powered by a different +	 * source. +	 */  	if (ctx->dc->ctx->dmub_srv &&  			ctx->dc->debug.dmub_command_table) {  		if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON)  			ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,  					LVTMA_CONTROL_LCD_BLON, -					panel_instance); +					panel_instance, link->link_powered_externally);  		else  			ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,  					LVTMA_CONTROL_LCD_BLOFF, -					panel_instance); +					panel_instance, link->link_powered_externally);  	}  	link_transmitter_control(ctx->dc_bios, &cntl); @@ -1052,7 +1051,7 @@ void dce110_edp_backlight_control(  	if (link->dpcd_sink_ext_caps.bits.oled ||  		link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 ||  		link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1) -		dc_link_backlight_enable_aux(link, enable); +		link_backlight_enable_aux(link, enable);  	/*edp 1.2*/  	if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_OFF) { @@ -1064,7 +1063,7 @@ void dce110_edp_backlight_control(  		 * we shouldn't be doing power-sequencing, hence we can skip  		 * waiting for T9-ready.  		 */ -			edp_add_delay_for_T9(link); +			link_edp_add_delay_for_T9(link);  		else  			DC_LOG_DC("edp_receiver_ready_T9 skipped\n");  	} @@ -1081,12 +1080,14 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)  	struct dc *dc;  	struct clk_mgr *clk_mgr;  	unsigned int i, num_audio = 1; +	const struct link_hwss *link_hwss;  	if (!pipe_ctx->stream)  		return;  	dc = pipe_ctx->stream->ctx->dc;  	clk_mgr = dc->clk_mgr; +	link_hwss = get_link_hwss(pipe_ctx->stream->link, &pipe_ctx->link_res);  	if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == true)  		return; @@ -1103,56 +1104,35 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)  		if (num_audio >= 1 && clk_mgr->funcs->enable_pme_wa)  			/*this is the first audio. apply the PME w/a in order to wake AZ from D3*/  			clk_mgr->funcs->enable_pme_wa(clk_mgr); -		/* un-mute audio */ -		/* TODO: audio should be per stream rather than per link */ -		if (is_dp_128b_132b_signal(pipe_ctx)) -			pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->audio_mute_control( -					pipe_ctx->stream_res.hpo_dp_stream_enc, false); -		else -			pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( -					pipe_ctx->stream_res.stream_enc, false); + +		link_hwss->enable_audio_packet(pipe_ctx); +  		if (pipe_ctx->stream_res.audio)  			pipe_ctx->stream_res.audio->enabled = true;  	} - -	if (dc_is_dp_signal(pipe_ctx->stream->signal)) -		dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_ENABLE_AUDIO_STREAM);  }  void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx)  {  	struct dc *dc;  	struct clk_mgr *clk_mgr; +	const struct link_hwss *link_hwss;  	if (!pipe_ctx || !pipe_ctx->stream)  		return;  	dc = pipe_ctx->stream->ctx->dc;  	clk_mgr = dc->clk_mgr; +	link_hwss = get_link_hwss(pipe_ctx->stream->link, &pipe_ctx->link_res);  	if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == false)  		return; -	if (is_dp_128b_132b_signal(pipe_ctx)) -		pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->audio_mute_control( -				pipe_ctx->stream_res.hpo_dp_stream_enc, true); -	else -		pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( -				pipe_ctx->stream_res.stream_enc, true); +	link_hwss->disable_audio_packet(pipe_ctx); +  	if (pipe_ctx->stream_res.audio) {  		pipe_ctx->stream_res.audio->enabled = false; -		if (dc_is_dp_signal(pipe_ctx->stream->signal)) -			if (is_dp_128b_132b_signal(pipe_ctx)) -				pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_disable( -						pipe_ctx->stream_res.hpo_dp_stream_enc); -			else -				pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable( -						pipe_ctx->stream_res.stream_enc); -		else -			pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_disable( -					pipe_ctx->stream_res.stream_enc); -  		if (clk_mgr->funcs->enable_pme_wa)  			/*this is the first audio. apply the PME w/a in order to wake AZ from D3*/  			clk_mgr->funcs->enable_pme_wa(clk_mgr); @@ -1163,9 +1143,6 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx)  		 * stream->stream_engine_id);  		 */  	} - -	if (dc_is_dp_signal(pipe_ctx->stream->signal)) -		dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_DISABLE_AUDIO_STREAM);  }  void dce110_disable_stream(struct pipe_ctx *pipe_ctx) @@ -1174,6 +1151,10 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)  	struct dc_link *link = stream->link;  	struct dc *dc = pipe_ctx->stream->ctx->dc;  	const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); +	struct dccg *dccg = dc->res_pool->dccg; +	struct timing_generator *tg = pipe_ctx->stream_res.tg; +	struct dtbclk_dto_params dto_params = {0}; +	int dp_hpo_inst;  	if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) {  		pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets( @@ -1182,7 +1163,7 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)  			pipe_ctx->stream_res.stream_enc);  	} -	if (is_dp_128b_132b_signal(pipe_ctx)) { +	if (link_is_dp_128b_132b_signal(pipe_ctx)) {  		pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->stop_dp_info_packets(  					pipe_ctx->stream_res.hpo_dp_stream_enc);  	} else if (dc_is_dp_signal(pipe_ctx->stream->signal)) @@ -1193,7 +1174,16 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)  	link_hwss->reset_stream_encoder(pipe_ctx); -	if (is_dp_128b_132b_signal(pipe_ctx)) { +	if (link_is_dp_128b_132b_signal(pipe_ctx)) { +		dto_params.otg_inst = tg->inst; +		dto_params.timing = &pipe_ctx->stream->timing; +		dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst; +		dccg->funcs->set_dtbclk_dto(dccg, &dto_params); +		dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst); +		dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst); +	} + +	if (link_is_dp_128b_132b_signal(pipe_ctx)) {  		/* TODO: This looks like a bug to me as we are disabling HPO IO when  		 * we are just disabling a single HPO stream. Shouldn't we disable HPO  		 * HW control only when HPOs for all streams are disabled? @@ -1235,7 +1225,7 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)  		link->dc->hwss.set_abm_immediate_disable(pipe_ctx);  	} -	if (is_dp_128b_132b_signal(pipe_ctx)) { +	if (link_is_dp_128b_132b_signal(pipe_ctx)) {  		/* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */  		pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_blank(  				pipe_ctx->stream_res.hpo_dp_stream_enc); @@ -1257,7 +1247,7 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)  				 * we shouldn't be doing power-sequencing, hence we can skip  				 * waiting for T9-ready.  				 */ -				edp_receiver_ready_T9(link); +				link_edp_receiver_ready_T9(link);  			}  		}  	} @@ -1440,7 +1430,7 @@ static enum dc_status dce110_enable_stream_timing(  		if (false == pipe_ctx->clock_source->funcs->program_pix_clk(  				pipe_ctx->clock_source,  				&pipe_ctx->stream_res.pix_clk_params, -				dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings), +				link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),  				&pipe_ctx->pll_settings)) {  			BREAK_TO_DEBUGGER();  			return DC_ERROR_UNEXPECTED; @@ -1487,6 +1477,9 @@ static enum dc_status apply_single_controller_ctx_to_hw(  	unsigned int event_triggers = 0;  	struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;  	struct dce_hwseq *hws = dc->hwseq; +	const struct link_hwss *link_hwss = get_link_hwss( +			link, &pipe_ctx->link_res); +  	if (hws->funcs.disable_stream_gating) {  		hws->funcs.disable_stream_gating(dc, pipe_ctx); @@ -1497,23 +1490,8 @@ static enum dc_status apply_single_controller_ctx_to_hw(  		build_audio_output(context, pipe_ctx, &audio_output); -		if (dc_is_dp_signal(pipe_ctx->stream->signal)) -			if (is_dp_128b_132b_signal(pipe_ctx)) -				pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_setup( -						pipe_ctx->stream_res.hpo_dp_stream_enc, -						pipe_ctx->stream_res.audio->inst, -						&pipe_ctx->stream->audio_info); -			else -				pipe_ctx->stream_res.stream_enc->funcs->dp_audio_setup( -						pipe_ctx->stream_res.stream_enc, -						pipe_ctx->stream_res.audio->inst, -						&pipe_ctx->stream->audio_info); -		else -			pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_setup( -					pipe_ctx->stream_res.stream_enc, -					pipe_ctx->stream_res.audio->inst, -					&pipe_ctx->stream->audio_info, -					&audio_output.crtc_info); +		link_hwss->setup_audio_output(pipe_ctx, &audio_output, +				pipe_ctx->stream_res.audio->inst);  		pipe_ctx->stream_res.audio->funcs->az_configure(  				pipe_ctx->stream_res.audio, @@ -1556,7 +1534,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(  	 * To do so, move calling function enable_stream_timing to only be done AFTER calling  	 * function core_link_enable_stream  	 */ -	if (!(hws->wa.dp_hpo_and_otg_sequence && is_dp_128b_132b_signal(pipe_ctx))) +	if (!(hws->wa.dp_hpo_and_otg_sequence && link_is_dp_128b_132b_signal(pipe_ctx)))  		/*  */  		/* Do not touch stream timing on seamless boot optimization. */  		if (!pipe_ctx->stream->apply_seamless_boot_optimization) @@ -1588,25 +1566,30 @@ static enum dc_status apply_single_controller_ctx_to_hw(  			pipe_ctx->stream_res.tg->inst);  	if (dc_is_dp_signal(pipe_ctx->stream->signal)) -		dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG); +		link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG);  	if (!stream->dpms_off) -		core_link_enable_stream(context, pipe_ctx); +		link_set_dpms_on(context, pipe_ctx);  	/* DCN3.1 FPGA Workaround  	 * Need to enable HPO DP Stream Encoder before setting OTG master enable.  	 * To do so, move calling function enable_stream_timing to only be done AFTER calling  	 * function core_link_enable_stream  	 */ -	if (hws->wa.dp_hpo_and_otg_sequence && is_dp_128b_132b_signal(pipe_ctx)) { +	if (hws->wa.dp_hpo_and_otg_sequence && link_is_dp_128b_132b_signal(pipe_ctx)) {  		if (!pipe_ctx->stream->apply_seamless_boot_optimization)  			hws->funcs.enable_stream_timing(pipe_ctx, context, dc);  	}  	pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != NULL; -	pipe_ctx->stream->link->psr_settings.psr_feature_enabled = false; - +	/* Phantom and main stream share the same link (because the stream +	 * is constructed with the same sink). Make sure not to override +	 * and link programming on the main. +	 */ +	if (pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) { +		pipe_ctx->stream->link->psr_settings.psr_feature_enabled = false; +	}  	return DC_OK;  } @@ -1619,7 +1602,7 @@ static void power_down_encoders(struct dc *dc)  	for (i = 0; i < dc->link_count; i++) {  		enum signal_type signal = dc->links[i]->connector_signal; -		dc_link_blank_dp_stream(dc->links[i], false); +		link_blank_dp_stream(dc->links[i], false);  		if (signal != SIGNAL_TYPE_EDP)  			signal = SIGNAL_TYPE_NONE; @@ -2102,7 +2085,7 @@ static void dce110_reset_hw_ctx_wrap(  			 * disabled already, no need to disable again.  			 */  			if (!pipe_ctx->stream || !pipe_ctx->stream->dpms_off) { -				core_link_disable_stream(pipe_ctx_old); +				link_set_dpms_off(pipe_ctx_old);  				/* free acquired resources*/  				if (pipe_ctx_old->stream_res.audio) { @@ -3073,13 +3056,13 @@ void dce110_enable_dp_link_output(  				pipes[i].clock_source->funcs->program_pix_clk(  						pipes[i].clock_source,  						&pipes[i].stream_res.pix_clk_params, -						dp_get_link_encoding_format(link_settings), +						link_dp_get_encoding_format(link_settings),  						&pipes[i].pll_settings);  			}  		}  	} -	if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) { +	if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) {  		if (dc->clk_mgr->funcs->notify_link_rate_change)  			dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link);  	} @@ -3096,7 +3079,7 @@ void dce110_enable_dp_link_output(  	if (dmcu != NULL && dmcu->funcs->unlock_phy)  		dmcu->funcs->unlock_phy(dmcu); -	dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY); +	link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);  }  void dce110_disable_link_output(struct dc_link *link, @@ -3121,7 +3104,7 @@ void dce110_disable_link_output(struct dc_link *link,  		link->dc->hwss.edp_power_control(link, false);  	else if (dmcu != NULL && dmcu->funcs->lock_phy)  		dmcu->funcs->unlock_phy(dmcu); -	dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); +	link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);  }  static const struct hw_sequencer_funcs dce110_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h index 758f4b3b0087..394d83a97f33 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h @@ -71,7 +71,7 @@ void dce110_optimize_bandwidth(  		struct dc *dc,  		struct dc_state *context); -void dp_receiver_power_ctrl(struct dc_link *link, bool on); +void dc_link_dp_receiver_power_ctrl(struct dc_link *link, bool on);  void dce110_edp_power_control(  		struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c index fc6aa098bda0..8db9f7514466 100644 --- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c @@ -1128,6 +1128,7 @@ struct resource_pool *dce60_create_resource_pool(  	if (dce60_construct(num_virtual_links, dc, pool))  		return &pool->base; +	kfree(pool);  	BREAK_TO_DEBUGGER();  	return NULL;  } @@ -1325,6 +1326,7 @@ struct resource_pool *dce61_create_resource_pool(  	if (dce61_construct(num_virtual_links, dc, pool))  		return &pool->base; +	kfree(pool);  	BREAK_TO_DEBUGGER();  	return NULL;  } @@ -1518,6 +1520,7 @@ struct resource_pool *dce64_create_resource_pool(  	if (dce64_construct(num_virtual_links, dc, pool))  		return &pool->base; +	kfree(pool);  	BREAK_TO_DEBUGGER();  	return NULL;  } diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index b28025960050..5825e6f412bd 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c @@ -1137,6 +1137,7 @@ struct resource_pool *dce80_create_resource_pool(  	if (dce80_construct(num_virtual_links, dc, pool))  		return &pool->base; +	kfree(pool);  	BREAK_TO_DEBUGGER();  	return NULL;  } @@ -1336,6 +1337,7 @@ struct resource_pool *dce81_create_resource_pool(  	if (dce81_construct(num_virtual_links, dc, pool))  		return &pool->base; +	kfree(pool);  	BREAK_TO_DEBUGGER();  	return NULL;  } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c index f607a0e28f14..f62368da875d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c @@ -581,7 +581,7 @@ static void dpp1_dscl_set_manual_ratio_init(   * dpp1_dscl_set_recout - Set the first pixel of RECOUT in the OTG active area   *   * @dpp: DPP data struct - * @recount: Rectangle information + * @recout: Rectangle information   *   * This function sets the MPC RECOUT_START and RECOUT_SIZE registers based on   * the values specified in the recount parameter. diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c index 0f746bb4e500..d51f1ce02874 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c @@ -55,7 +55,7 @@ void hubbub1_wm_read_state(struct hubbub *hubbub,  		s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A);  		s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A);  	} -	s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A); +	s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A);  	s = &wm->sets[1];  	s->wm_set = 1; @@ -65,7 +65,7 @@ void hubbub1_wm_read_state(struct hubbub *hubbub,  		s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B);  		s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B);  	} -	s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B); +	s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B);  	s = &wm->sets[2];  	s->wm_set = 2; @@ -75,7 +75,7 @@ void hubbub1_wm_read_state(struct hubbub *hubbub,  		s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C);  		s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C);  	} -	s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C); +	s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C);  	s = &wm->sets[3];  	s->wm_set = 3; @@ -85,7 +85,7 @@ void hubbub1_wm_read_state(struct hubbub *hubbub,  		s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D);  		s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D);  	} -	s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D); +	s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D);  }  void hubbub1_allow_self_refresh_control(struct hubbub *hubbub, bool allow) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h index e48fd044f572..e8752077571a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h @@ -171,6 +171,11 @@ struct dcn_hubbub_registers {  	uint32_t DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B;  	uint32_t DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C;  	uint32_t DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D; +	uint32_t SDPIF_REQUEST_RATE_LIMIT; +	uint32_t DCHUBBUB_SDPIF_CFG0; +	uint32_t DCHUBBUB_SDPIF_CFG1; +	uint32_t DCHUBBUB_CLOCK_CNTL; +	uint32_t DCHUBBUB_MEM_PWR_MODE_CTRL;  };  #define HUBBUB_REG_FIELD_LIST_DCN32(type) \ @@ -360,7 +365,14 @@ struct dcn_hubbub_registers {  		type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C;\  		type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C;\  		type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D;\ -		type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D +		type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D;\ +		type SDPIF_REQUEST_RATE_LIMIT;\ +		type DISPCLK_R_DCHUBBUB_GATE_DIS;\ +		type DCFCLK_R_DCHUBBUB_GATE_DIS;\ +		type SDPIF_MAX_NUM_OUTSTANDING;\ +		type DCHUBBUB_ARB_MAX_REQ_OUTSTAND;\ +		type SDPIF_PORT_CONTROL;\ +		type DET_MEM_PWR_LS_MODE  struct dcn_hubbub_shift { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 11e4c4e46947..a1a29c508394 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -45,7 +45,6 @@  #include "dcn10_hubp.h"  #include "dcn10_hubbub.h"  #include "dcn10_cm_common.h" -#include "dc_link_dp.h"  #include "dccg.h"  #include "clk_mgr.h"  #include "link_hwss.h" @@ -56,8 +55,7 @@  #include "dce/dmub_hw_lock_mgr.h"  #include "dc_trace.h"  #include "dce/dmub_outbox.h" -#include "inc/dc_link_dp.h" -#include "inc/link_dpcd.h" +#include "link.h"  #define DC_LOGGER_INIT(logger) @@ -97,10 +95,12 @@ void dcn10_lock_all_pipes(struct dc *dc,  	bool lock)  {  	struct pipe_ctx *pipe_ctx; +	struct pipe_ctx *old_pipe_ctx;  	struct timing_generator *tg;  	int i;  	for (i = 0; i < dc->res_pool->pipe_count; i++) { +		old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];  		pipe_ctx = &context->res_ctx.pipe_ctx[i];  		tg = pipe_ctx->stream_res.tg; @@ -110,7 +110,7 @@ void dcn10_lock_all_pipes(struct dc *dc,  		 */  		if (pipe_ctx->top_pipe ||  		    !pipe_ctx->stream || -		    !pipe_ctx->plane_state || +		    (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) ||  		    !tg->funcs->is_tg_enabled(tg))  			continue; @@ -157,7 +157,7 @@ static void dcn10_log_hubbub_state(struct dc *dc,  		DTN_INFO_MICRO_SEC(s->pte_meta_urgent);  		DTN_INFO_MICRO_SEC(s->sr_enter);  		DTN_INFO_MICRO_SEC(s->sr_exit); -		DTN_INFO_MICRO_SEC(s->dram_clk_chanage); +		DTN_INFO_MICRO_SEC(s->dram_clk_change);  		DTN_INFO("\n");  	} @@ -867,6 +867,32 @@ static void false_optc_underflow_wa(  		tg->funcs->clear_optc_underflow(tg);  } +static int calculate_vready_offset_for_group(struct pipe_ctx *pipe) +{ +	struct pipe_ctx *other_pipe; +	int vready_offset = pipe->pipe_dlg_param.vready_offset; + +	/* Always use the largest vready_offset of all connected pipes */ +	for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) { +		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) +			vready_offset = other_pipe->pipe_dlg_param.vready_offset; +	} +	for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) { +		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) +			vready_offset = other_pipe->pipe_dlg_param.vready_offset; +	} +	for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) { +		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) +			vready_offset = other_pipe->pipe_dlg_param.vready_offset; +	} +	for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) { +		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) +			vready_offset = other_pipe->pipe_dlg_param.vready_offset; +	} + +	return vready_offset; +} +  enum dc_status dcn10_enable_stream_timing(  		struct pipe_ctx *pipe_ctx,  		struct dc_state *context, @@ -893,7 +919,7 @@ enum dc_status dcn10_enable_stream_timing(  	if (false == pipe_ctx->clock_source->funcs->program_pix_clk(  			pipe_ctx->clock_source,  			&pipe_ctx->stream_res.pix_clk_params, -			dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings), +			link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),  			&pipe_ctx->pll_settings)) {  		BREAK_TO_DEBUGGER();  		return DC_ERROR_UNEXPECTED; @@ -910,7 +936,7 @@ enum dc_status dcn10_enable_stream_timing(  	pipe_ctx->stream_res.tg->funcs->program_timing(  			pipe_ctx->stream_res.tg,  			&stream->timing, -			pipe_ctx->pipe_dlg_param.vready_offset, +			calculate_vready_offset_for_group(pipe_ctx),  			pipe_ctx->pipe_dlg_param.vstartup_start,  			pipe_ctx->pipe_dlg_param.vupdate_offset,  			pipe_ctx->pipe_dlg_param.vupdate_width, @@ -991,7 +1017,7 @@ static void dcn10_reset_back_end_for_pipe(  		 * VBIOS lit up eDP, so check link status too.  		 */  		if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) -			core_link_disable_stream(pipe_ctx); +			link_set_dpms_off(pipe_ctx);  		else if (pipe_ctx->stream_res.audio)  			dc->hwss.disable_audio_stream(pipe_ctx); @@ -1538,7 +1564,7 @@ void dcn10_init_hw(struct dc *dc)  	}  	/* we want to turn off all dp displays before doing detection */ -	dc_link_blank_all_dp_displays(dc); +	link_blank_all_dp_displays(dc);  	if (hws->funcs.enable_power_gating_plane)  		hws->funcs.enable_power_gating_plane(dc->hwseq, true); @@ -2188,6 +2214,12 @@ void dcn10_enable_vblanks_synchronization(  		opp = grouped_pipes[i]->stream_res.opp;  		tg = grouped_pipes[i]->stream_res.tg;  		tg->funcs->get_otg_active_size(tg, &width, &height); + +		if (!tg->funcs->is_tg_enabled(tg)) { +			DC_SYNC_INFO("Skipping timing sync on disabled OTG\n"); +			return; +		} +  		if (opp->funcs->opp_program_dpg_dimensions)  			opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);  	} @@ -2250,6 +2282,12 @@ void dcn10_enable_timing_synchronization(  		opp = grouped_pipes[i]->stream_res.opp;  		tg = grouped_pipes[i]->stream_res.tg;  		tg->funcs->get_otg_active_size(tg, &width, &height); + +		if (!tg->funcs->is_tg_enabled(tg)) { +			DC_SYNC_INFO("Skipping timing sync on disabled OTG\n"); +			return; +		} +  		if (opp->funcs->opp_program_dpg_dimensions)  			opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);  	} @@ -2861,7 +2899,7 @@ void dcn10_blank_pixel_data(  			dc->hwss.set_pipe(pipe_ctx);  			stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);  		} -	} else if (blank) { +	} else {  		dc->hwss.set_abm_immediate_disable(pipe_ctx);  		if (stream_res->tg->funcs->set_blank) {  			stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK); @@ -2900,7 +2938,7 @@ void dcn10_program_pipe(  		pipe_ctx->stream_res.tg->funcs->program_global_sync(  				pipe_ctx->stream_res.tg, -				pipe_ctx->pipe_dlg_param.vready_offset, +				calculate_vready_offset_for_group(pipe_ctx),  				pipe_ctx->pipe_dlg_param.vstartup_start,  				pipe_ctx->pipe_dlg_param.vupdate_offset,  				pipe_ctx->pipe_dlg_param.vupdate_width); @@ -3185,12 +3223,16 @@ static void dcn10_config_stereo_parameters(  			timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||  			timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||  			timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) { -			enum display_dongle_type dongle = \ -					stream->link->ddc->dongle_type; -			if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER || -				dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER || -				dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER) -				flags->DISABLE_STEREO_DP_SYNC = 1; + +			if (stream->link && stream->link->ddc) { +				enum display_dongle_type dongle = \ +						stream->link->ddc->dongle_type; + +				if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER || +					dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER || +					dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER) +					flags->DISABLE_STEREO_DP_SYNC = 1; +			}  		}  		flags->RIGHT_EYE_POLARITY =\  				stream->timing.flags.RIGHT_EYE_3D_POLARITY; @@ -3586,7 +3628,7 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)  						(int)hubp->curs_attr.width || pos_cpy.x  						<= (int)hubp->curs_attr.width +  						pipe_ctx->plane_state->src_rect.x) { -						pos_cpy.x = temp_x + viewport_width; +						pos_cpy.x = 2 * viewport_width - temp_x;  					}  				}  			} else { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c index e8b6065fffad..a0f8e31d2adc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c @@ -83,7 +83,7 @@ static unsigned int dcn10_get_hubbub_state(struct dc *dc, char *pBuf, unsigned i  	memset(&wm, 0, sizeof(struct dcn_hubbub_wm));  	dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm); -	chars_printed = snprintf_count(pBuf, remaining_buffer, "wm_set_index,data_urgent,pte_meta_urgent,sr_enter,sr_exit,dram_clk_chanage\n"); +	chars_printed = snprintf_count(pBuf, remaining_buffer, "wm_set_index,data_urgent,pte_meta_urgent,sr_enter,sr_exit,dram_clk_change\n");  	remaining_buffer -= chars_printed;  	pBuf += chars_printed; @@ -98,7 +98,7 @@ static unsigned int dcn10_get_hubbub_state(struct dc *dc, char *pBuf, unsigned i  			(s->pte_meta_urgent * frac) / ref_clk_mhz / frac, (s->pte_meta_urgent * frac) / ref_clk_mhz % frac,  			(s->sr_enter * frac) / ref_clk_mhz / frac, (s->sr_enter * frac) / ref_clk_mhz % frac,  			(s->sr_exit * frac) / ref_clk_mhz / frac, (s->sr_exit * frac) / ref_clk_mhz % frac, -			(s->dram_clk_chanage * frac) / ref_clk_mhz / frac, (s->dram_clk_chanage * frac) / ref_clk_mhz % frac); +			(s->dram_clk_change * frac) / ref_clk_mhz / frac, (s->dram_clk_change * frac) / ref_clk_mhz % frac);  		remaining_buffer -= chars_printed;  		pBuf += chars_printed;  	} diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c index fbccb7263ad2..c4287147b853 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c @@ -29,7 +29,6 @@  #include "link_encoder.h"  #include "dcn10_link_encoder.h"  #include "stream_encoder.h" -#include "i2caux_interface.h"  #include "dc_bios_types.h"  #include "gpio_service_interface.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index 33d780218790..c9e53dc49c92 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c @@ -27,6 +27,7 @@  #include "reg_helper.h"  #include "dcn10_optc.h"  #include "dc.h" +#include "dc_trace.h"  #define REG(reg)\  	optc1->tg_regs->reg @@ -657,6 +658,8 @@ void optc1_lock(struct timing_generator *optc)  		REG_WAIT(OTG_MASTER_UPDATE_LOCK,  				UPDATE_LOCK_STATUS, 1,  				1, 10); + +	TRACE_OPTC_LOCK_UNLOCK_STATE(optc1, optc->inst, true);  }  void optc1_unlock(struct timing_generator *optc) @@ -665,6 +668,8 @@ void optc1_unlock(struct timing_generator *optc)  	REG_SET(OTG_MASTER_UPDATE_LOCK, 0,  			OTG_MASTER_UPDATE_LOCK, 0); + +	TRACE_OPTC_LOCK_UNLOCK_STATE(optc1, optc->inst, false);  }  void optc1_get_position(struct timing_generator *optc, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h index 88ac5f6f4c96..0b37bb0e184b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h @@ -519,7 +519,8 @@ struct dcn_optc_registers {  	type OTG_CRC_DATA_STREAM_COMBINE_MODE;\  	type OTG_CRC_DATA_STREAM_SPLIT_MODE;\  	type OTG_CRC_DATA_FORMAT;\ -	type OTG_V_TOTAL_LAST_USED_BY_DRR; +	type OTG_V_TOTAL_LAST_USED_BY_DRR;\ +	type OTG_DRR_TIMING_DBUF_UPDATE_PENDING;  #define TG_REG_FIELD_LIST_DCN3_2(type) \  	type OTG_H_TIMING_DIV_MODE_MANUAL; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 56d30baf12df..6bfac8088ab0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -1295,47 +1295,6 @@ static uint32_t read_pipe_fuses(struct dc_context *ctx)  	return value;  } -/* - * Some architectures don't support soft-float (e.g. aarch64), on those - * this function has to be called with hardfloat enabled, make sure not - * to inline it so whatever fp stuff is done stays inside - */ -static noinline void dcn10_resource_construct_fp( -	struct dc *dc) -{ -	if (dc->ctx->dce_version == DCN_VERSION_1_01) { -		struct dcn_soc_bounding_box *dcn_soc = dc->dcn_soc; -		struct dcn_ip_params *dcn_ip = dc->dcn_ip; -		struct display_mode_lib *dml = &dc->dml; - -		dml->ip.max_num_dpp = 3; -		/* TODO how to handle 23.84? */ -		dcn_soc->dram_clock_change_latency = 23; -		dcn_ip->max_num_dpp = 3; -	} -	if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) { -		dc->dcn_soc->urgent_latency = 3; -		dc->debug.disable_dmcu = true; -		dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 41.60f; -	} - - -	dc->dcn_soc->number_of_channels = dc->ctx->asic_id.vram_width / ddr4_dram_width; -	ASSERT(dc->dcn_soc->number_of_channels < 3); -	if (dc->dcn_soc->number_of_channels == 0)/*old sbios bug*/ -		dc->dcn_soc->number_of_channels = 2; - -	if (dc->dcn_soc->number_of_channels == 1) { -		dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 19.2f; -		dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = 17.066f; -		dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = 14.933f; -		dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 12.8f; -		if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) { -			dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 20.80f; -		} -	} -} -  static bool verify_clock_values(struct dm_pp_clock_levels_with_voltage *clks)  {  	int i; @@ -1510,8 +1469,9 @@ static bool dcn10_resource_construct(  	memcpy(dc->dcn_ip, &dcn10_ip_defaults, sizeof(dcn10_ip_defaults));  	memcpy(dc->dcn_soc, &dcn10_soc_defaults, sizeof(dcn10_soc_defaults)); -	/* Other architectures we build for build this with soft-float */ +	DC_FP_START();  	dcn10_resource_construct_fp(dc); +	DC_FP_END();  	if (!dc->config.is_vmin_only_asic)  		if (ASICREV_IS_RAVEN2(dc->ctx->asic_id.hw_internal_rev)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c index 484e7cdf00b8..3c451ab5d3ca 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c @@ -28,7 +28,7 @@  #include "dcn10_stream_encoder.h"  #include "reg_helper.h"  #include "hw_shared.h" -#include "inc/link_dpcd.h" +#include "link.h"  #include "dpcd_defs.h"  #include "dcn30/dcn30_afmt.h" @@ -753,12 +753,19 @@ void enc1_stream_encoder_update_dp_info_packets(  	 * use other packetIndex (such as 5,6) for other info packet  	 */ +	if (info_frame->adaptive_sync.valid) +		enc1_update_generic_info_packet( +				enc1, +				5,  /* packetIndex */ +				&info_frame->adaptive_sync); +  	/* enable/disable transmission of packet(s).  	 * If enabled, packet transmission begins on the next frame  	 */  	REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, info_frame->vsc.valid);  	REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, info_frame->spd.valid);  	REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, info_frame->hdrsmd.valid); +	REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP5_ENABLE, info_frame->adaptive_sync.valid);  	/* This bit is the master enable bit.  	 * When enabling secondary stream engine, @@ -926,7 +933,7 @@ void enc1_stream_encoder_dp_blank(  	/* disable DP stream */  	REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0); -	dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_DP_VID_STREAM); +	link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_DP_VID_STREAM);  	/* the encoder stops sending the video stream  	 * at the start of the vertical blanking. @@ -945,7 +952,7 @@ void enc1_stream_encoder_dp_blank(  	REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, true); -	dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_FIFO_STEER_RESET); +	link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_FIFO_STEER_RESET);  }  /* output video stream to link encoder */ @@ -1018,7 +1025,7 @@ void enc1_stream_encoder_dp_unblank(  	REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true); -	dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM); +	link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);  }  void enc1_stream_encoder_set_avmute( diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c index 784a8b6f360d..42344aec60d6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c @@ -28,6 +28,7 @@  #include "reg_helper.h"  #include "dcn20_dsc.h"  #include "dsc/dscc_types.h" +#include "dsc/rc_calc.h"  static void dsc_log_pps(struct display_stream_compressor *dsc, struct drm_dsc_config *pps);  static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values *dsc_reg_vals, @@ -200,7 +201,6 @@ static void dsc2_set_config(struct display_stream_compressor *dsc, const struct  	bool is_config_ok;  	struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc); -	DC_LOG_DSC(" ");  	DC_LOG_DSC("Setting DSC Config at DSC inst %d", dsc->inst);  	dsc_config_log(dsc, dsc_cfg);  	is_config_ok = dsc_prepare_config(dsc_cfg, &dsc20->reg_vals, dsc_optc_cfg); @@ -345,10 +345,38 @@ static void dsc_log_pps(struct display_stream_compressor *dsc, struct drm_dsc_co  	}  } +static void dsc_override_rc_params(struct rc_params *rc, const struct dc_dsc_rc_params_override *override) +{ +	uint8_t i; + +	rc->rc_model_size = override->rc_model_size; +	for (i = 0; i < DC_DSC_RC_BUF_THRESH_SIZE; i++) +		rc->rc_buf_thresh[i] = override->rc_buf_thresh[i]; +	for (i = 0; i < DC_DSC_QP_SET_SIZE; i++) { +		rc->qp_min[i] = override->rc_minqp[i]; +		rc->qp_max[i] = override->rc_maxqp[i]; +		rc->ofs[i] = override->rc_offset[i]; +	} + +	rc->rc_tgt_offset_hi = override->rc_tgt_offset_hi; +	rc->rc_tgt_offset_lo = override->rc_tgt_offset_lo; +	rc->rc_edge_factor = override->rc_edge_factor; +	rc->rc_quant_incr_limit0 = override->rc_quant_incr_limit0; +	rc->rc_quant_incr_limit1 = override->rc_quant_incr_limit1; + +	rc->initial_fullness_offset = override->initial_fullness_offset; +	rc->initial_xmit_delay = override->initial_delay; + +	rc->flatness_min_qp = override->flatness_min_qp; +	rc->flatness_max_qp = override->flatness_max_qp; +	rc->flatness_det_thresh = override->flatness_det_thresh; +} +  static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values *dsc_reg_vals,  			struct dsc_optc_config *dsc_optc_cfg)  {  	struct dsc_parameters dsc_params; +	struct rc_params rc;  	/* Validate input parameters */  	ASSERT(dsc_cfg->dc_dsc_cfg.num_slices_h); @@ -413,7 +441,12 @@ static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_  	dsc_reg_vals->pps.native_420 = (dsc_reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR420);  	dsc_reg_vals->pps.simple_422 = (dsc_reg_vals->pixel_format == DSC_PIXFMT_SIMPLE_YCBCR422); -	if (dscc_compute_dsc_parameters(&dsc_reg_vals->pps, &dsc_params)) { +	calc_rc_params(&rc, &dsc_reg_vals->pps); + +	if (dsc_cfg->dc_dsc_cfg.rc_params_ovrd) +		dsc_override_rc_params(&rc, dsc_cfg->dc_dsc_cfg.rc_params_ovrd); + +	if (dscc_compute_dsc_parameters(&dsc_reg_vals->pps, &rc, &dsc_params)) {  		dm_output_to_console("%s: DSC config failed\n", __func__);  		return false;  	} diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.h index a85ed228dfc2..a9dd9ae23ec9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.h @@ -27,204 +27,177 @@  #define TO_DCN20_DWBC(dwbc_base) \  	container_of(dwbc_base, struct dcn20_dwbc, base) -/* DCN */ -#define BASE_INNER(seg) \ -	DCE_BASE__INST0_SEG ## seg - -#define BASE(seg) \ -	BASE_INNER(seg) - -#define SR(reg_name)\ -		.reg_name = BASE(mm ## reg_name ## _BASE_IDX) +  \ -					mm ## reg_name - -#define SRI(reg_name, block, id)\ -	.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ -					mm ## block ## id ## _ ## reg_name - -#define SRI2(reg_name, block, id)\ -	.reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ -					mm ## reg_name - -#define SRII(reg_name, block, id)\ -	.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ -					mm ## block ## id ## _ ## reg_name - -#define SF(reg_name, field_name, post_fix)\ -	.field_name = reg_name ## __ ## field_name ## post_fix - -  #define DWBC_COMMON_REG_LIST_DCN2_0(inst) \ -	SRI2(WB_ENABLE, CNV, inst),\ -	SRI2(WB_EC_CONFIG, CNV, inst),\ -	SRI2(CNV_MODE, CNV, inst),\ -	SRI2(CNV_WINDOW_START, CNV, inst),\ -	SRI2(CNV_WINDOW_SIZE, CNV, inst),\ -	SRI2(CNV_UPDATE, CNV, inst),\ -	SRI2(CNV_SOURCE_SIZE, CNV, inst),\ -	SRI2(CNV_TEST_CNTL, CNV, inst),\ -	SRI2(CNV_TEST_CRC_RED, CNV, inst),\ -	SRI2(CNV_TEST_CRC_GREEN, CNV, inst),\ -	SRI2(CNV_TEST_CRC_BLUE, CNV, inst),\ -	SRI2(WBSCL_COEF_RAM_SELECT, WBSCL, inst),\ -	SRI2(WBSCL_COEF_RAM_TAP_DATA, WBSCL, inst),\ -	SRI2(WBSCL_MODE, WBSCL, inst),\ -	SRI2(WBSCL_TAP_CONTROL, WBSCL, inst),\ -	SRI2(WBSCL_DEST_SIZE, WBSCL, inst),\ -	SRI2(WBSCL_HORZ_FILTER_SCALE_RATIO, WBSCL, inst),\ -	SRI2(WBSCL_HORZ_FILTER_INIT_Y_RGB, WBSCL, inst),\ -	SRI2(WBSCL_HORZ_FILTER_INIT_CBCR, WBSCL, inst),\ -	SRI2(WBSCL_VERT_FILTER_SCALE_RATIO, WBSCL, inst),\ -	SRI2(WBSCL_VERT_FILTER_INIT_Y_RGB, WBSCL, inst),\ -	SRI2(WBSCL_VERT_FILTER_INIT_CBCR, WBSCL, inst),\ -	SRI2(WBSCL_ROUND_OFFSET, WBSCL, inst),\ -	SRI2(WBSCL_OVERFLOW_STATUS, WBSCL, inst),\ -	SRI2(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL, inst),\ -	SRI2(WBSCL_TEST_CNTL, WBSCL, inst),\ -	SRI2(WBSCL_TEST_CRC_RED, WBSCL, inst),\ -	SRI2(WBSCL_TEST_CRC_GREEN, WBSCL, inst),\ -	SRI2(WBSCL_TEST_CRC_BLUE, WBSCL, inst),\ -	SRI2(WBSCL_BACKPRESSURE_CNT_EN, WBSCL, inst),\ -	SRI2(WB_MCIF_BACKPRESSURE_CNT, WBSCL, inst),\ -	SRI2(WBSCL_CLAMP_Y_RGB, WBSCL, inst),\ -	SRI2(WBSCL_CLAMP_CBCR, WBSCL, inst),\ -	SRI2(WBSCL_OUTSIDE_PIX_STRATEGY, WBSCL, inst),\ -	SRI2(WBSCL_OUTSIDE_PIX_STRATEGY_CBCR, WBSCL, inst),\ -	SRI2(WBSCL_DEBUG, WBSCL, inst),\ -	SRI2(WBSCL_TEST_DEBUG_INDEX, WBSCL, inst),\ -	SRI2(WBSCL_TEST_DEBUG_DATA, WBSCL, inst),\ -	SRI2(WB_DEBUG_CTRL, CNV, inst),\ -	SRI2(WB_DBG_MODE, CNV, inst),\ -	SRI2(WB_HW_DEBUG, CNV, inst),\ -	SRI2(CNV_TEST_DEBUG_INDEX, CNV, inst),\ -	SRI2(CNV_TEST_DEBUG_DATA, CNV, inst),\ -	SRI2(WB_SOFT_RESET, CNV, inst),\ -	SRI2(WB_WARM_UP_MODE_CTL1, CNV, inst),\ -	SRI2(WB_WARM_UP_MODE_CTL2, CNV, inst) +	SRI2_DWB(WB_ENABLE, CNV, inst),\ +	SRI2_DWB(WB_EC_CONFIG, CNV, inst),\ +	SRI2_DWB(CNV_MODE, CNV, inst),\ +	SRI2_DWB(CNV_WINDOW_START, CNV, inst),\ +	SRI2_DWB(CNV_WINDOW_SIZE, CNV, inst),\ +	SRI2_DWB(CNV_UPDATE, CNV, inst),\ +	SRI2_DWB(CNV_SOURCE_SIZE, CNV, inst),\ +	SRI2_DWB(CNV_TEST_CNTL, CNV, inst),\ +	SRI2_DWB(CNV_TEST_CRC_RED, CNV, inst),\ +	SRI2_DWB(CNV_TEST_CRC_GREEN, CNV, inst),\ +	SRI2_DWB(CNV_TEST_CRC_BLUE, CNV, inst),\ +	SRI2_DWB(WBSCL_COEF_RAM_SELECT, WBSCL, inst),\ +	SRI2_DWB(WBSCL_COEF_RAM_TAP_DATA, WBSCL, inst),\ +	SRI2_DWB(WBSCL_MODE, WBSCL, inst),\ +	SRI2_DWB(WBSCL_TAP_CONTROL, WBSCL, inst),\ +	SRI2_DWB(WBSCL_DEST_SIZE, WBSCL, inst),\ +	SRI2_DWB(WBSCL_HORZ_FILTER_SCALE_RATIO, WBSCL, inst),\ +	SRI2_DWB(WBSCL_HORZ_FILTER_INIT_Y_RGB, WBSCL, inst),\ +	SRI2_DWB(WBSCL_HORZ_FILTER_INIT_CBCR, WBSCL, inst),\ +	SRI2_DWB(WBSCL_VERT_FILTER_SCALE_RATIO, WBSCL, inst),\ +	SRI2_DWB(WBSCL_VERT_FILTER_INIT_Y_RGB, WBSCL, inst),\ +	SRI2_DWB(WBSCL_VERT_FILTER_INIT_CBCR, WBSCL, inst),\ +	SRI2_DWB(WBSCL_ROUND_OFFSET, WBSCL, inst),\ +	SRI2_DWB(WBSCL_OVERFLOW_STATUS, WBSCL, inst),\ +	SRI2_DWB(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL, inst),\ +	SRI2_DWB(WBSCL_TEST_CNTL, WBSCL, inst),\ +	SRI2_DWB(WBSCL_TEST_CRC_RED, WBSCL, inst),\ +	SRI2_DWB(WBSCL_TEST_CRC_GREEN, WBSCL, inst),\ +	SRI2_DWB(WBSCL_TEST_CRC_BLUE, WBSCL, inst),\ +	SRI2_DWB(WBSCL_BACKPRESSURE_CNT_EN, WBSCL, inst),\ +	SRI2_DWB(WB_MCIF_BACKPRESSURE_CNT, WBSCL, inst),\ +	SRI2_DWB(WBSCL_CLAMP_Y_RGB, WBSCL, inst),\ +	SRI2_DWB(WBSCL_CLAMP_CBCR, WBSCL, inst),\ +	SRI2_DWB(WBSCL_OUTSIDE_PIX_STRATEGY, WBSCL, inst),\ +	SRI2_DWB(WBSCL_OUTSIDE_PIX_STRATEGY_CBCR, WBSCL, inst),\ +	SRI2_DWB(WBSCL_DEBUG, WBSCL, inst),\ +	SRI2_DWB(WBSCL_TEST_DEBUG_INDEX, WBSCL, inst),\ +	SRI2_DWB(WBSCL_TEST_DEBUG_DATA, WBSCL, inst),\ +	SRI2_DWB(WB_DEBUG_CTRL, CNV, inst),\ +	SRI2_DWB(WB_DBG_MODE, CNV, inst),\ +	SRI2_DWB(WB_HW_DEBUG, CNV, inst),\ +	SRI2_DWB(CNV_TEST_DEBUG_INDEX, CNV, inst),\ +	SRI2_DWB(CNV_TEST_DEBUG_DATA, CNV, inst),\ +	SRI2_DWB(WB_SOFT_RESET, CNV, inst),\ +	SRI2_DWB(WB_WARM_UP_MODE_CTL1, CNV, inst),\ +	SRI2_DWB(WB_WARM_UP_MODE_CTL2, CNV, inst)  #define DWBC_COMMON_MASK_SH_LIST_DCN2_0(mask_sh) \ -	SF(WB_ENABLE, WB_ENABLE, mask_sh),\ -	SF(WB_EC_CONFIG, DISPCLK_R_WB_GATE_DIS, mask_sh),\ -	SF(WB_EC_CONFIG, DISPCLK_G_WB_GATE_DIS, mask_sh),\ -	SF(WB_EC_CONFIG, DISPCLK_G_WBSCL_GATE_DIS, mask_sh),\ -	SF(WB_EC_CONFIG, WB_TEST_CLK_SEL, mask_sh),\ -	SF(WB_EC_CONFIG, WB_LB_LS_DIS, mask_sh),\ -	SF(WB_EC_CONFIG, WB_LB_SD_DIS, mask_sh),\ -	SF(WB_EC_CONFIG, WB_LUT_LS_DIS, mask_sh),\ -	SF(WB_EC_CONFIG, WBSCL_LB_MEM_PWR_MODE_SEL, mask_sh),\ -	SF(WB_EC_CONFIG, WBSCL_LB_MEM_PWR_DIS, mask_sh),\ -	SF(WB_EC_CONFIG, WBSCL_LB_MEM_PWR_FORCE, mask_sh),\ -	SF(WB_EC_CONFIG, WBSCL_LB_MEM_PWR_STATE, mask_sh),\ -	SF(WB_EC_CONFIG, WB_RAM_PW_SAVE_MODE, mask_sh),\ -	SF(WB_EC_CONFIG, WBSCL_LUT_MEM_PWR_STATE, mask_sh),\ -	SF(CNV_MODE, CNV_OUT_BPC, mask_sh),\ -	SF(CNV_MODE, CNV_FRAME_CAPTURE_RATE, mask_sh),\ -	SF(CNV_MODE, CNV_WINDOW_CROP_EN, mask_sh),\ -	SF(CNV_MODE, CNV_STEREO_TYPE, mask_sh),\ -	SF(CNV_MODE, CNV_INTERLACED_MODE, mask_sh),\ -	SF(CNV_MODE, CNV_EYE_SELECTION, mask_sh),\ -	SF(CNV_MODE, CNV_STEREO_POLARITY, mask_sh),\ -	SF(CNV_MODE, CNV_INTERLACED_FIELD_ORDER, mask_sh),\ -	SF(CNV_MODE, CNV_STEREO_SPLIT, mask_sh),\ -	SF(CNV_MODE, CNV_NEW_CONTENT, mask_sh),\ -	SF(CNV_MODE, CNV_FRAME_CAPTURE_EN_CURRENT, mask_sh),\ -	SF(CNV_MODE, CNV_FRAME_CAPTURE_EN, mask_sh),\ -	SF(CNV_WINDOW_START, CNV_WINDOW_START_X, mask_sh),\ -	SF(CNV_WINDOW_START, CNV_WINDOW_START_Y, mask_sh),\ -	SF(CNV_WINDOW_SIZE, CNV_WINDOW_WIDTH, mask_sh),\ -	SF(CNV_WINDOW_SIZE, CNV_WINDOW_HEIGHT, mask_sh),\ -	SF(CNV_UPDATE, CNV_UPDATE_PENDING, mask_sh),\ -	SF(CNV_UPDATE, CNV_UPDATE_TAKEN, mask_sh),\ -	SF(CNV_UPDATE, CNV_UPDATE_LOCK, mask_sh),\ -	SF(CNV_SOURCE_SIZE, CNV_SOURCE_WIDTH, mask_sh),\ -	SF(CNV_SOURCE_SIZE, CNV_SOURCE_HEIGHT, mask_sh),\ -	SF(CNV_TEST_CNTL, CNV_TEST_CRC_EN, mask_sh),\ -	SF(CNV_TEST_CNTL, CNV_TEST_CRC_CONT_EN, mask_sh),\ -	SF(CNV_TEST_CRC_RED, CNV_TEST_CRC_RED_MASK, mask_sh),\ -	SF(CNV_TEST_CRC_RED, CNV_TEST_CRC_SIG_RED, mask_sh),\ -	SF(CNV_TEST_CRC_GREEN, CNV_TEST_CRC_GREEN_MASK, mask_sh),\ -	SF(CNV_TEST_CRC_GREEN, CNV_TEST_CRC_SIG_GREEN, mask_sh),\ -	SF(CNV_TEST_CRC_BLUE, CNV_TEST_CRC_BLUE_MASK, mask_sh),\ -	SF(CNV_TEST_CRC_BLUE, CNV_TEST_CRC_SIG_BLUE, mask_sh),\ -	SF(WB_DEBUG_CTRL, WB_DEBUG_EN, mask_sh),\ -	SF(WB_DEBUG_CTRL, WB_DEBUG_SEL, mask_sh),\ -	SF(WB_DBG_MODE, WB_DBG_MODE_EN, mask_sh),\ -	SF(WB_DBG_MODE, WB_DBG_DIN_FMT, mask_sh),\ -	SF(WB_DBG_MODE, WB_DBG_36MODE, mask_sh),\ -	SF(WB_DBG_MODE, WB_DBG_CMAP, mask_sh),\ -	SF(WB_DBG_MODE, WB_DBG_PXLRATE_ERROR, mask_sh),\ -	SF(WB_DBG_MODE, WB_DBG_SOURCE_WIDTH, mask_sh),\ -	SF(WB_HW_DEBUG, WB_HW_DEBUG, mask_sh),\ -	SF(WB_SOFT_RESET, WB_SOFT_RESET, mask_sh),\ -	SF(CNV_TEST_DEBUG_INDEX, CNV_TEST_DEBUG_INDEX, mask_sh),\ -	SF(CNV_TEST_DEBUG_INDEX, CNV_TEST_DEBUG_WRITE_EN, mask_sh),\ -	SF(CNV_TEST_DEBUG_DATA, CNV_TEST_DEBUG_DATA, mask_sh),\ -	SF(WBSCL_COEF_RAM_SELECT, WBSCL_COEF_RAM_TAP_PAIR_IDX, mask_sh),\ -	SF(WBSCL_COEF_RAM_SELECT, WBSCL_COEF_RAM_PHASE, mask_sh),\ -	SF(WBSCL_COEF_RAM_SELECT, WBSCL_COEF_RAM_FILTER_TYPE, mask_sh),\ -	SF(WBSCL_COEF_RAM_TAP_DATA, WBSCL_COEF_RAM_EVEN_TAP_COEF, mask_sh),\ -	SF(WBSCL_COEF_RAM_TAP_DATA, WBSCL_COEF_RAM_EVEN_TAP_COEF_EN, mask_sh),\ -	SF(WBSCL_COEF_RAM_TAP_DATA, WBSCL_COEF_RAM_ODD_TAP_COEF, mask_sh),\ -	SF(WBSCL_COEF_RAM_TAP_DATA, WBSCL_COEF_RAM_ODD_TAP_COEF_EN, mask_sh),\ -	SF(WBSCL_MODE, WBSCL_MODE, mask_sh),\ -	SF(WBSCL_MODE, WBSCL_OUT_BIT_DEPTH, mask_sh),\ -	SF(WBSCL_TAP_CONTROL, WBSCL_V_NUM_OF_TAPS_Y_RGB, mask_sh),\ -	SF(WBSCL_TAP_CONTROL, WBSCL_V_NUM_OF_TAPS_CBCR, mask_sh),\ -	SF(WBSCL_TAP_CONTROL, WBSCL_H_NUM_OF_TAPS_Y_RGB, mask_sh),\ -	SF(WBSCL_TAP_CONTROL, WBSCL_H_NUM_OF_TAPS_CBCR, mask_sh),\ -	SF(WBSCL_DEST_SIZE, WBSCL_DEST_HEIGHT, mask_sh),\ -	SF(WBSCL_DEST_SIZE, WBSCL_DEST_WIDTH, mask_sh),\ -	SF(WBSCL_HORZ_FILTER_SCALE_RATIO, WBSCL_H_SCALE_RATIO, mask_sh),\ -	SF(WBSCL_HORZ_FILTER_INIT_Y_RGB, WBSCL_H_INIT_FRAC_Y_RGB, mask_sh),\ -	SF(WBSCL_HORZ_FILTER_INIT_Y_RGB, WBSCL_H_INIT_INT_Y_RGB, mask_sh),\ -	SF(WBSCL_HORZ_FILTER_INIT_CBCR, WBSCL_H_INIT_FRAC_CBCR, mask_sh),\ -	SF(WBSCL_HORZ_FILTER_INIT_CBCR, WBSCL_H_INIT_INT_CBCR, mask_sh),\ -	SF(WBSCL_VERT_FILTER_SCALE_RATIO, WBSCL_V_SCALE_RATIO, mask_sh),\ -	SF(WBSCL_VERT_FILTER_INIT_Y_RGB, WBSCL_V_INIT_FRAC_Y_RGB, mask_sh),\ -	SF(WBSCL_VERT_FILTER_INIT_Y_RGB, WBSCL_V_INIT_INT_Y_RGB, mask_sh),\ -	SF(WBSCL_VERT_FILTER_INIT_CBCR, WBSCL_V_INIT_FRAC_CBCR, mask_sh),\ -	SF(WBSCL_VERT_FILTER_INIT_CBCR, WBSCL_V_INIT_INT_CBCR, mask_sh),\ -	SF(WBSCL_ROUND_OFFSET, WBSCL_ROUND_OFFSET_Y_RGB, mask_sh),\ -	SF(WBSCL_ROUND_OFFSET, WBSCL_ROUND_OFFSET_CBCR, mask_sh),\ -	SF(WBSCL_OVERFLOW_STATUS, WBSCL_DATA_OVERFLOW_FLAG, mask_sh),\ -	SF(WBSCL_OVERFLOW_STATUS, WBSCL_DATA_OVERFLOW_ACK, mask_sh),\ -	SF(WBSCL_OVERFLOW_STATUS, WBSCL_DATA_OVERFLOW_MASK, mask_sh),\ -	SF(WBSCL_OVERFLOW_STATUS, WBSCL_DATA_OVERFLOW_INT_STATUS, mask_sh),\ -	SF(WBSCL_OVERFLOW_STATUS, WBSCL_DATA_OVERFLOW_INT_TYPE, mask_sh),\ -	SF(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL_HOST_CONFLICT_FLAG, mask_sh),\ -	SF(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL_HOST_CONFLICT_ACK, mask_sh),\ -	SF(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL_HOST_CONFLICT_MASK, mask_sh),\ -	SF(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL_HOST_CONFLICT_INT_STATUS, mask_sh),\ -	SF(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL_HOST_CONFLICT_INT_TYPE, mask_sh),\ -	SF(WBSCL_TEST_CNTL, WBSCL_TEST_CRC_EN, mask_sh),\ -	SF(WBSCL_TEST_CNTL, WBSCL_TEST_CRC_CONT_EN, mask_sh),\ -	SF(WBSCL_TEST_CRC_RED, WBSCL_TEST_CRC_RED_MASK, mask_sh),\ -	SF(WBSCL_TEST_CRC_RED, WBSCL_TEST_CRC_SIG_RED, mask_sh),\ -	SF(WBSCL_TEST_CRC_GREEN, WBSCL_TEST_CRC_GREEN_MASK, mask_sh),\ -	SF(WBSCL_TEST_CRC_GREEN, WBSCL_TEST_CRC_SIG_GREEN, mask_sh),\ -	SF(WBSCL_TEST_CRC_BLUE, WBSCL_TEST_CRC_BLUE_MASK, mask_sh),\ -	SF(WBSCL_TEST_CRC_BLUE, WBSCL_TEST_CRC_SIG_BLUE, mask_sh),\ -	SF(WBSCL_BACKPRESSURE_CNT_EN, WBSCL_BACKPRESSURE_CNT_EN, mask_sh),\ -	SF(WB_MCIF_BACKPRESSURE_CNT, WB_MCIF_Y_MAX_BACKPRESSURE, mask_sh),\ -	SF(WB_MCIF_BACKPRESSURE_CNT, WB_MCIF_C_MAX_BACKPRESSURE, mask_sh),\ -	SF(WBSCL_CLAMP_Y_RGB, WBSCL_CLAMP_UPPER_Y_RGB, mask_sh),\ -	SF(WBSCL_CLAMP_Y_RGB, WBSCL_CLAMP_LOWER_Y_RGB, mask_sh),\ -	SF(WBSCL_CLAMP_CBCR, WBSCL_CLAMP_UPPER_CBCR, mask_sh),\ -	SF(WBSCL_CLAMP_CBCR, WBSCL_CLAMP_LOWER_CBCR, mask_sh),\ -	SF(WBSCL_OUTSIDE_PIX_STRATEGY, WBSCL_OUTSIDE_PIX_STRATEGY, mask_sh),\ -	SF(WBSCL_OUTSIDE_PIX_STRATEGY, WBSCL_BLACK_COLOR_G_Y, mask_sh),\ -	SF(WBSCL_OUTSIDE_PIX_STRATEGY_CBCR, WBSCL_BLACK_COLOR_B_CB, mask_sh),\ -	SF(WBSCL_OUTSIDE_PIX_STRATEGY_CBCR, WBSCL_BLACK_COLOR_R_CR, mask_sh),\ -	SF(WBSCL_DEBUG, WBSCL_DEBUG, mask_sh),\ -	SF(WBSCL_TEST_DEBUG_INDEX, WBSCL_TEST_DEBUG_INDEX, mask_sh),\ -	SF(WBSCL_TEST_DEBUG_INDEX, WBSCL_TEST_DEBUG_WRITE_EN, mask_sh),\ -	SF(WBSCL_TEST_DEBUG_DATA, WBSCL_TEST_DEBUG_DATA, mask_sh),\ -	SF(WB_WARM_UP_MODE_CTL1, WIDTH_WARMUP, mask_sh),\ -	SF(WB_WARM_UP_MODE_CTL1, HEIGHT_WARMUP, mask_sh),\ -	SF(WB_WARM_UP_MODE_CTL1, GMC_WARM_UP_ENABLE, mask_sh),\ -	SF(WB_WARM_UP_MODE_CTL2, DATA_VALUE_WARMUP, mask_sh),\ -	SF(WB_WARM_UP_MODE_CTL2, MODE_WARMUP, mask_sh),\ -	SF(WB_WARM_UP_MODE_CTL2, DATA_DEPTH_WARMUP, mask_sh) +	SF_DWB(WB_ENABLE, WB_ENABLE, mask_sh),\ +	SF_DWB(WB_EC_CONFIG, DISPCLK_R_WB_GATE_DIS, mask_sh),\ +	SF_DWB(WB_EC_CONFIG, DISPCLK_G_WB_GATE_DIS, mask_sh),\ +	SF_DWB(WB_EC_CONFIG, DISPCLK_G_WBSCL_GATE_DIS, mask_sh),\ +	SF_DWB(WB_EC_CONFIG, WB_TEST_CLK_SEL, mask_sh),\ +	SF_DWB(WB_EC_CONFIG, WB_LB_LS_DIS, mask_sh),\ +	SF_DWB(WB_EC_CONFIG, WB_LB_SD_DIS, mask_sh),\ +	SF_DWB(WB_EC_CONFIG, WB_LUT_LS_DIS, mask_sh),\ +	SF_DWB(WB_EC_CONFIG, WBSCL_LB_MEM_PWR_MODE_SEL, mask_sh),\ +	SF_DWB(WB_EC_CONFIG, WBSCL_LB_MEM_PWR_DIS, mask_sh),\ +	SF_DWB(WB_EC_CONFIG, WBSCL_LB_MEM_PWR_FORCE, mask_sh),\ +	SF_DWB(WB_EC_CONFIG, WBSCL_LB_MEM_PWR_STATE, mask_sh),\ +	SF_DWB(WB_EC_CONFIG, WB_RAM_PW_SAVE_MODE, mask_sh),\ +	SF_DWB(WB_EC_CONFIG, WBSCL_LUT_MEM_PWR_STATE, mask_sh),\ +	SF_DWB(CNV_MODE, CNV_OUT_BPC, mask_sh),\ +	SF_DWB(CNV_MODE, CNV_FRAME_CAPTURE_RATE, mask_sh),\ +	SF_DWB(CNV_MODE, CNV_WINDOW_CROP_EN, mask_sh),\ +	SF_DWB(CNV_MODE, CNV_STEREO_TYPE, mask_sh),\ +	SF_DWB(CNV_MODE, CNV_INTERLACED_MODE, mask_sh),\ +	SF_DWB(CNV_MODE, CNV_EYE_SELECTION, mask_sh),\ +	SF_DWB(CNV_MODE, CNV_STEREO_POLARITY, mask_sh),\ +	SF_DWB(CNV_MODE, CNV_INTERLACED_FIELD_ORDER, mask_sh),\ +	SF_DWB(CNV_MODE, CNV_STEREO_SPLIT, mask_sh),\ +	SF_DWB(CNV_MODE, CNV_NEW_CONTENT, mask_sh),\ +	SF_DWB(CNV_MODE, CNV_FRAME_CAPTURE_EN_CURRENT, mask_sh),\ +	SF_DWB(CNV_MODE, CNV_FRAME_CAPTURE_EN, mask_sh),\ +	SF_DWB(CNV_WINDOW_START, CNV_WINDOW_START_X, mask_sh),\ +	SF_DWB(CNV_WINDOW_START, CNV_WINDOW_START_Y, mask_sh),\ +	SF_DWB(CNV_WINDOW_SIZE, CNV_WINDOW_WIDTH, mask_sh),\ +	SF_DWB(CNV_WINDOW_SIZE, CNV_WINDOW_HEIGHT, mask_sh),\ +	SF_DWB(CNV_UPDATE, CNV_UPDATE_PENDING, mask_sh),\ +	SF_DWB(CNV_UPDATE, CNV_UPDATE_TAKEN, mask_sh),\ +	SF_DWB(CNV_UPDATE, CNV_UPDATE_LOCK, mask_sh),\ +	SF_DWB(CNV_SOURCE_SIZE, CNV_SOURCE_WIDTH, mask_sh),\ +	SF_DWB(CNV_SOURCE_SIZE, CNV_SOURCE_HEIGHT, mask_sh),\ +	SF_DWB(CNV_TEST_CNTL, CNV_TEST_CRC_EN, mask_sh),\ +	SF_DWB(CNV_TEST_CNTL, CNV_TEST_CRC_CONT_EN, mask_sh),\ +	SF_DWB(CNV_TEST_CRC_RED, CNV_TEST_CRC_RED_MASK, mask_sh),\ +	SF_DWB(CNV_TEST_CRC_RED, CNV_TEST_CRC_SIG_RED, mask_sh),\ +	SF_DWB(CNV_TEST_CRC_GREEN, CNV_TEST_CRC_GREEN_MASK, mask_sh),\ +	SF_DWB(CNV_TEST_CRC_GREEN, CNV_TEST_CRC_SIG_GREEN, mask_sh),\ +	SF_DWB(CNV_TEST_CRC_BLUE, CNV_TEST_CRC_BLUE_MASK, mask_sh),\ +	SF_DWB(CNV_TEST_CRC_BLUE, CNV_TEST_CRC_SIG_BLUE, mask_sh),\ +	SF_DWB(WB_DEBUG_CTRL, WB_DEBUG_EN, mask_sh),\ +	SF_DWB(WB_DEBUG_CTRL, WB_DEBUG_SEL, mask_sh),\ +	SF_DWB(WB_DBG_MODE, WB_DBG_MODE_EN, mask_sh),\ +	SF_DWB(WB_DBG_MODE, WB_DBG_DIN_FMT, mask_sh),\ +	SF_DWB(WB_DBG_MODE, WB_DBG_36MODE, mask_sh),\ +	SF_DWB(WB_DBG_MODE, WB_DBG_CMAP, mask_sh),\ +	SF_DWB(WB_DBG_MODE, WB_DBG_PXLRATE_ERROR, mask_sh),\ +	SF_DWB(WB_DBG_MODE, WB_DBG_SOURCE_WIDTH, mask_sh),\ +	SF_DWB(WB_HW_DEBUG, WB_HW_DEBUG, mask_sh),\ +	SF_DWB(WB_SOFT_RESET, WB_SOFT_RESET, mask_sh),\ +	SF_DWB(CNV_TEST_DEBUG_INDEX, CNV_TEST_DEBUG_INDEX, mask_sh),\ +	SF_DWB(CNV_TEST_DEBUG_INDEX, CNV_TEST_DEBUG_WRITE_EN, mask_sh),\ +	SF_DWB(CNV_TEST_DEBUG_DATA, CNV_TEST_DEBUG_DATA, mask_sh),\ +	SF_DWB(WBSCL_COEF_RAM_SELECT, WBSCL_COEF_RAM_TAP_PAIR_IDX, mask_sh),\ +	SF_DWB(WBSCL_COEF_RAM_SELECT, WBSCL_COEF_RAM_PHASE, mask_sh),\ +	SF_DWB(WBSCL_COEF_RAM_SELECT, WBSCL_COEF_RAM_FILTER_TYPE, mask_sh),\ +	SF_DWB(WBSCL_COEF_RAM_TAP_DATA, WBSCL_COEF_RAM_EVEN_TAP_COEF, mask_sh),\ +	SF_DWB(WBSCL_COEF_RAM_TAP_DATA, WBSCL_COEF_RAM_EVEN_TAP_COEF_EN, mask_sh),\ +	SF_DWB(WBSCL_COEF_RAM_TAP_DATA, WBSCL_COEF_RAM_ODD_TAP_COEF, mask_sh),\ +	SF_DWB(WBSCL_COEF_RAM_TAP_DATA, WBSCL_COEF_RAM_ODD_TAP_COEF_EN, mask_sh),\ +	SF_DWB(WBSCL_MODE, WBSCL_MODE, mask_sh),\ +	SF_DWB(WBSCL_MODE, WBSCL_OUT_BIT_DEPTH, mask_sh),\ +	SF_DWB(WBSCL_TAP_CONTROL, WBSCL_V_NUM_OF_TAPS_Y_RGB, mask_sh),\ +	SF_DWB(WBSCL_TAP_CONTROL, WBSCL_V_NUM_OF_TAPS_CBCR, mask_sh),\ +	SF_DWB(WBSCL_TAP_CONTROL, WBSCL_H_NUM_OF_TAPS_Y_RGB, mask_sh),\ +	SF_DWB(WBSCL_TAP_CONTROL, WBSCL_H_NUM_OF_TAPS_CBCR, mask_sh),\ +	SF_DWB(WBSCL_DEST_SIZE, WBSCL_DEST_HEIGHT, mask_sh),\ +	SF_DWB(WBSCL_DEST_SIZE, WBSCL_DEST_WIDTH, mask_sh),\ +	SF_DWB(WBSCL_HORZ_FILTER_SCALE_RATIO, WBSCL_H_SCALE_RATIO, mask_sh),\ +	SF_DWB(WBSCL_HORZ_FILTER_INIT_Y_RGB, WBSCL_H_INIT_FRAC_Y_RGB, mask_sh),\ +	SF_DWB(WBSCL_HORZ_FILTER_INIT_Y_RGB, WBSCL_H_INIT_INT_Y_RGB, mask_sh),\ +	SF_DWB(WBSCL_HORZ_FILTER_INIT_CBCR, WBSCL_H_INIT_FRAC_CBCR, mask_sh),\ +	SF_DWB(WBSCL_HORZ_FILTER_INIT_CBCR, WBSCL_H_INIT_INT_CBCR, mask_sh),\ +	SF_DWB(WBSCL_VERT_FILTER_SCALE_RATIO, WBSCL_V_SCALE_RATIO, mask_sh),\ +	SF_DWB(WBSCL_VERT_FILTER_INIT_Y_RGB, WBSCL_V_INIT_FRAC_Y_RGB, mask_sh),\ +	SF_DWB(WBSCL_VERT_FILTER_INIT_Y_RGB, WBSCL_V_INIT_INT_Y_RGB, mask_sh),\ +	SF_DWB(WBSCL_VERT_FILTER_INIT_CBCR, WBSCL_V_INIT_FRAC_CBCR, mask_sh),\ +	SF_DWB(WBSCL_VERT_FILTER_INIT_CBCR, WBSCL_V_INIT_INT_CBCR, mask_sh),\ +	SF_DWB(WBSCL_ROUND_OFFSET, WBSCL_ROUND_OFFSET_Y_RGB, mask_sh),\ +	SF_DWB(WBSCL_ROUND_OFFSET, WBSCL_ROUND_OFFSET_CBCR, mask_sh),\ +	SF_DWB(WBSCL_OVERFLOW_STATUS, WBSCL_DATA_OVERFLOW_FLAG, mask_sh),\ +	SF_DWB(WBSCL_OVERFLOW_STATUS, WBSCL_DATA_OVERFLOW_ACK, mask_sh),\ +	SF_DWB(WBSCL_OVERFLOW_STATUS, WBSCL_DATA_OVERFLOW_MASK, mask_sh),\ +	SF_DWB(WBSCL_OVERFLOW_STATUS, WBSCL_DATA_OVERFLOW_INT_STATUS, mask_sh),\ +	SF_DWB(WBSCL_OVERFLOW_STATUS, WBSCL_DATA_OVERFLOW_INT_TYPE, mask_sh),\ +	SF_DWB(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL_HOST_CONFLICT_FLAG, mask_sh),\ +	SF_DWB(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL_HOST_CONFLICT_ACK, mask_sh),\ +	SF_DWB(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL_HOST_CONFLICT_MASK, mask_sh),\ +	SF_DWB(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL_HOST_CONFLICT_INT_STATUS, mask_sh),\ +	SF_DWB(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL_HOST_CONFLICT_INT_TYPE, mask_sh),\ +	SF_DWB(WBSCL_TEST_CNTL, WBSCL_TEST_CRC_EN, mask_sh),\ +	SF_DWB(WBSCL_TEST_CNTL, WBSCL_TEST_CRC_CONT_EN, mask_sh),\ +	SF_DWB(WBSCL_TEST_CRC_RED, WBSCL_TEST_CRC_RED_MASK, mask_sh),\ +	SF_DWB(WBSCL_TEST_CRC_RED, WBSCL_TEST_CRC_SIG_RED, mask_sh),\ +	SF_DWB(WBSCL_TEST_CRC_GREEN, WBSCL_TEST_CRC_GREEN_MASK, mask_sh),\ +	SF_DWB(WBSCL_TEST_CRC_GREEN, WBSCL_TEST_CRC_SIG_GREEN, mask_sh),\ +	SF_DWB(WBSCL_TEST_CRC_BLUE, WBSCL_TEST_CRC_BLUE_MASK, mask_sh),\ +	SF_DWB(WBSCL_TEST_CRC_BLUE, WBSCL_TEST_CRC_SIG_BLUE, mask_sh),\ +	SF_DWB(WBSCL_BACKPRESSURE_CNT_EN, WBSCL_BACKPRESSURE_CNT_EN, mask_sh),\ +	SF_DWB(WB_MCIF_BACKPRESSURE_CNT, WB_MCIF_Y_MAX_BACKPRESSURE, mask_sh),\ +	SF_DWB(WB_MCIF_BACKPRESSURE_CNT, WB_MCIF_C_MAX_BACKPRESSURE, mask_sh),\ +	SF_DWB(WBSCL_CLAMP_Y_RGB, WBSCL_CLAMP_UPPER_Y_RGB, mask_sh),\ +	SF_DWB(WBSCL_CLAMP_Y_RGB, WBSCL_CLAMP_LOWER_Y_RGB, mask_sh),\ +	SF_DWB(WBSCL_CLAMP_CBCR, WBSCL_CLAMP_UPPER_CBCR, mask_sh),\ +	SF_DWB(WBSCL_CLAMP_CBCR, WBSCL_CLAMP_LOWER_CBCR, mask_sh),\ +	SF_DWB(WBSCL_OUTSIDE_PIX_STRATEGY, WBSCL_OUTSIDE_PIX_STRATEGY, mask_sh),\ +	SF_DWB(WBSCL_OUTSIDE_PIX_STRATEGY, WBSCL_BLACK_COLOR_G_Y, mask_sh),\ +	SF_DWB(WBSCL_OUTSIDE_PIX_STRATEGY_CBCR, WBSCL_BLACK_COLOR_B_CB, mask_sh),\ +	SF_DWB(WBSCL_OUTSIDE_PIX_STRATEGY_CBCR, WBSCL_BLACK_COLOR_R_CR, mask_sh),\ +	SF_DWB(WBSCL_DEBUG, WBSCL_DEBUG, mask_sh),\ +	SF_DWB(WBSCL_TEST_DEBUG_INDEX, WBSCL_TEST_DEBUG_INDEX, mask_sh),\ +	SF_DWB(WBSCL_TEST_DEBUG_INDEX, WBSCL_TEST_DEBUG_WRITE_EN, mask_sh),\ +	SF_DWB(WBSCL_TEST_DEBUG_DATA, WBSCL_TEST_DEBUG_DATA, mask_sh),\ +	SF_DWB(WB_WARM_UP_MODE_CTL1, WIDTH_WARMUP, mask_sh),\ +	SF_DWB(WB_WARM_UP_MODE_CTL1, HEIGHT_WARMUP, mask_sh),\ +	SF_DWB(WB_WARM_UP_MODE_CTL1, GMC_WARM_UP_ENABLE, mask_sh),\ +	SF_DWB(WB_WARM_UP_MODE_CTL2, DATA_VALUE_WARMUP, mask_sh),\ +	SF_DWB(WB_WARM_UP_MODE_CTL2, MODE_WARMUP, mask_sh),\ +	SF_DWB(WB_WARM_UP_MODE_CTL2, DATA_DEPTH_WARMUP, mask_sh)  #define DWBC_REG_FIELD_LIST_DCN2_0(type) \  	type WB_ENABLE;\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c index aacb1fb5c73e..24bd93219936 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c @@ -500,7 +500,7 @@ void hubbub2_wm_read_state(struct hubbub *hubbub,  		s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A);  		s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A);  	} -	s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A); +	s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A);  	s = &wm->sets[1];  	s->wm_set = 1; @@ -511,7 +511,7 @@ void hubbub2_wm_read_state(struct hubbub *hubbub,  		s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B);  		s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B);  	} -	s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B); +	s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B);  	s = &wm->sets[2];  	s->wm_set = 2; @@ -522,7 +522,7 @@ void hubbub2_wm_read_state(struct hubbub *hubbub,  		s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C);  		s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C);  	} -	s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C); +	s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C);  	s = &wm->sets[3];  	s->wm_set = 3; @@ -533,7 +533,7 @@ void hubbub2_wm_read_state(struct hubbub *hubbub,  		s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D);  		s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D);  	} -	s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D); +	s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D);  }  void hubbub2_get_dchub_ref_freq(struct hubbub *hubbub, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index a7e0001a8f46..b83873a3a534 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -46,16 +46,15 @@  #include "dchubbub.h"  #include "reg_helper.h"  #include "dcn10/dcn10_cm_common.h" -#include "dc_link_dp.h"  #include "vm_helper.h"  #include "dccg.h"  #include "dc_dmub_srv.h"  #include "dce/dmub_hw_lock_mgr.h"  #include "hw_sequencer.h" -#include "inc/link_dpcd.h"  #include "dpcd_defs.h"  #include "inc/link_enc_cfg.h"  #include "link_hwss.h" +#include "link.h"  #define DC_LOGGER_INIT(logger) @@ -582,6 +581,9 @@ void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)  	if (pipe_ctx->stream_res.gsl_group != 0)  		dcn20_setup_gsl_group_as_lock(dc, pipe_ctx, false); +	if (hubp->funcs->hubp_update_mall_sel) +		hubp->funcs->hubp_update_mall_sel(hubp, 0, false); +  	dc->hwss.set_flip_control_gsl(pipe_ctx, false);  	hubp->funcs->hubp_clk_cntl(hubp, false); @@ -605,6 +607,9 @@ void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)  void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)  { +	bool is_phantom = pipe_ctx->plane_state && pipe_ctx->plane_state->is_phantom; +	struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL; +  	DC_LOGGER_INIT(dc->ctx->logger);  	if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated) @@ -612,6 +617,12 @@ void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)  	dcn20_plane_atomic_disable(dc, pipe_ctx); +	/* Turn back off the phantom OTG after the phantom plane is fully disabled +	 */ +	if (is_phantom) +		if (tg && tg->funcs->disable_phantom_crtc) +			tg->funcs->disable_phantom_crtc(tg); +  	DC_LOG_DC("Power down front end %d\n",  					pipe_ctx->pipe_idx);  } @@ -700,7 +711,7 @@ enum dc_status dcn20_enable_stream_timing(  	if (false == pipe_ctx->clock_source->funcs->program_pix_clk(  			pipe_ctx->clock_source,  			&pipe_ctx->stream_res.pix_clk_params, -			dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings), +			link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),  			&pipe_ctx->pll_settings)) {  		BREAK_TO_DEBUGGER();  		return DC_ERROR_UNEXPECTED; @@ -1079,6 +1090,29 @@ void dcn20_blank_pixel_data(  				0);  	} +	if (!blank && dc->debug.enable_single_display_2to1_odm_policy) { +		/* when exiting dynamic ODM need to reinit DPG state for unused pipes */ +		struct pipe_ctx *old_odm_pipe = dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx].next_odm_pipe; + +		odm_pipe = pipe_ctx->next_odm_pipe; + +		while (old_odm_pipe) { +			if (!odm_pipe || old_odm_pipe->pipe_idx != odm_pipe->pipe_idx) +				dc->hwss.set_disp_pattern_generator(dc, +						old_odm_pipe, +						CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, +						CONTROLLER_DP_COLOR_SPACE_UDEFINED, +						COLOR_DEPTH_888, +						NULL, +						0, +						0, +						0); +			old_odm_pipe = old_odm_pipe->next_odm_pipe; +			if (odm_pipe) +				odm_pipe = odm_pipe->next_odm_pipe; +		} +	} +  	if (!blank)  		if (stream_res->abm) {  			dc->hwss.set_pipe(pipe_ctx); @@ -1287,6 +1321,19 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx  {  	new_pipe->update_flags.raw = 0; +	/* If non-phantom pipe is being transitioned to a phantom pipe, +	 * set disable and return immediately. This is because the pipe +	 * that was previously in use must be fully disabled before we +	 * can "enable" it as a phantom pipe (since the OTG will certainly +	 * be different). The post_unlock sequence will set the correct +	 * update flags to enable the phantom pipe. +	 */ +	if (old_pipe->plane_state && !old_pipe->plane_state->is_phantom && +			new_pipe->plane_state && new_pipe->plane_state->is_phantom) { +		new_pipe->update_flags.bits.disable = 1; +		return; +	} +  	/* Exit on unchanged, unused pipe */  	if (!old_pipe->plane_state && !new_pipe->plane_state)  		return; @@ -1616,6 +1663,31 @@ static void dcn20_update_dchubp_dpp(  		hubp->funcs->phantom_hubp_post_enable(hubp);  } +static int calculate_vready_offset_for_group(struct pipe_ctx *pipe) +{ +	struct pipe_ctx *other_pipe; +	int vready_offset = pipe->pipe_dlg_param.vready_offset; + +	/* Always use the largest vready_offset of all connected pipes */ +	for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) { +		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) +			vready_offset = other_pipe->pipe_dlg_param.vready_offset; +	} +	for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) { +		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) +			vready_offset = other_pipe->pipe_dlg_param.vready_offset; +	} +	for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) { +		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) +			vready_offset = other_pipe->pipe_dlg_param.vready_offset; +	} +	for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) { +		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) +			vready_offset = other_pipe->pipe_dlg_param.vready_offset; +	} + +	return vready_offset; +}  static void dcn20_program_pipe(  		struct dc *dc, @@ -1634,16 +1706,14 @@ static void dcn20_program_pipe(  			&& !pipe_ctx->prev_odm_pipe) {  		pipe_ctx->stream_res.tg->funcs->program_global_sync(  				pipe_ctx->stream_res.tg, -				pipe_ctx->pipe_dlg_param.vready_offset, +				calculate_vready_offset_for_group(pipe_ctx),  				pipe_ctx->pipe_dlg_param.vstartup_start,  				pipe_ctx->pipe_dlg_param.vupdate_offset,  				pipe_ctx->pipe_dlg_param.vupdate_width);  		if (pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) { -			pipe_ctx->stream_res.tg->funcs->wait_for_state( -				pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK); -			pipe_ctx->stream_res.tg->funcs->wait_for_state( -				pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); +			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK); +			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);  		}  		pipe_ctx->stream_res.tg->funcs->set_vtg_params( @@ -1682,7 +1752,10 @@ static void dcn20_program_pipe(  	 * only do gamma programming for powering on, internal memcmp to avoid  	 * updating on slave planes  	 */ -	if (pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.out_tf) +	if (pipe_ctx->update_flags.bits.enable || +			pipe_ctx->update_flags.bits.plane_changed || +			pipe_ctx->stream->update_flags.bits.out_tf || +			pipe_ctx->plane_state->update_flags.bits.output_tf_change)  		hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);  	/* If the pipe has been enabled or has a different opp, we @@ -1704,6 +1777,15 @@ static void dcn20_program_pipe(  			&pipe_ctx->stream->bit_depth_params,  			&pipe_ctx->stream->clamping);  	} + +	/* Set ABM pipe after other pipe configurations done */ +	if (pipe_ctx->plane_state->visible) { +		if (pipe_ctx->stream_res.abm) { +			dc->hwss.set_pipe(pipe_ctx); +			pipe_ctx->stream_res.abm->funcs->set_abm_level(pipe_ctx->stream_res.abm, +				pipe_ctx->stream->abm_level); +		} +	}  }  void dcn20_program_front_end_for_ctx( @@ -1741,6 +1823,20 @@ void dcn20_program_front_end_for_ctx(  		dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i],  				&context->res_ctx.pipe_ctx[i]); +	/* When disabling phantom pipes, turn on phantom OTG first (so we can get double +	 * buffer updates properly) +	 */ +	for (i = 0; i < dc->res_pool->pipe_count; i++) { +		struct dc_stream_state *stream = dc->current_state->res_ctx.pipe_ctx[i].stream; + +		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && stream && +			dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) { +			struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg; + +			if (tg->funcs->enable_crtc) +				tg->funcs->enable_crtc(tg); +		} +	}  	/* OTG blank before disabling all front ends */  	for (i = 0; i < dc->res_pool->pipe_count; i++)  		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable @@ -1813,6 +1909,17 @@ void dcn20_program_front_end_for_ctx(  			context->stream_status[0].plane_count > 1) {  			pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp);  		} + +		/* when dynamic ODM is active, pipes must be reconfigured when all planes are +		 * disabled, as some transitions will leave software and hardware state +		 * mismatched. +		 */ +		if (dc->debug.enable_single_display_2to1_odm_policy && +			pipe->stream && +			pipe->update_flags.bits.disable && +			!pipe->prev_odm_pipe && +			hws->funcs.update_odm) +			hws->funcs.update_odm(dc, context, pipe);  	}  } @@ -1852,26 +1959,6 @@ void dcn20_post_unlock_program_front_end(  	for (i = 0; i < dc->res_pool->pipe_count; i++) {  		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; -		struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; - -		/* If an active, non-phantom pipe is being transitioned into a phantom -		 * pipe, wait for the double buffer update to complete first before we do -		 * phantom pipe programming (HUBP_VTG_SEL updates right away so that can -		 * cause issues). -		 */ -		if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM && -				old_pipe->stream && old_pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) { -			old_pipe->stream_res.tg->funcs->wait_for_state( -					old_pipe->stream_res.tg, -					CRTC_STATE_VBLANK); -			old_pipe->stream_res.tg->funcs->wait_for_state( -					old_pipe->stream_res.tg, -					CRTC_STATE_VACTIVE); -		} -	} - -	for (i = 0; i < dc->res_pool->pipe_count; i++) { -		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];  		if (pipe->plane_state && !pipe->top_pipe) {  			/* Program phantom pipe here to prevent a frame of underflow in the MPO transition @@ -1881,6 +1968,11 @@ void dcn20_post_unlock_program_front_end(  			 */  			while (pipe) {  				if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { +					/* When turning on the phantom pipe we want to run through the +					 * entire enable sequence, so apply all the "enable" flags. +					 */ +					if (dc->hwss.apply_update_flags_for_phantom) +						dc->hwss.apply_update_flags_for_phantom(pipe);  					if (dc->hwss.update_phantom_vp_position)  						dc->hwss.update_phantom_vp_position(dc, context, pipe);  					dcn20_program_pipe(dc, pipe, context); @@ -1941,8 +2033,11 @@ void dcn20_prepare_bandwidth(  		}  	} -	/* program dchubbub watermarks */ -	dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub, +	/* program dchubbub watermarks: +	 * For assigning wm_optimized_required, use |= operator since we don't want +	 * to clear the value if the optimize has not happened yet +	 */ +	dc->wm_optimized_required |= hubbub->funcs->program_watermarks(hubbub,  					&context->bw_ctx.bw.dcn.watermarks,  					dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,  					false); @@ -1953,10 +2048,13 @@ void dcn20_prepare_bandwidth(  	/* decrease compbuf size */  	if (hubbub->funcs->program_compbuf_size) { -		if (context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes) +		if (context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes) {  			compbuf_size_kb = context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes; -		else +			dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.dml.ip.min_comp_buffer_size_kbytes); +		} else {  			compbuf_size_kb = context->bw_ctx.bw.dcn.compbuf_size_kb; +			dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.bw.dcn.compbuf_size_kb); +		}  		hubbub->funcs->program_compbuf_size(hubbub, compbuf_size_kb, false);  	} @@ -2037,7 +2135,7 @@ bool dcn20_update_bandwidth(  			pipe_ctx->stream_res.tg->funcs->program_global_sync(  					pipe_ctx->stream_res.tg, -					pipe_ctx->pipe_dlg_param.vready_offset, +					calculate_vready_offset_for_group(pipe_ctx),  					pipe_ctx->pipe_dlg_param.vstartup_start,  					pipe_ctx->pipe_dlg_param.vupdate_offset,  					pipe_ctx->pipe_dlg_param.vupdate_width); @@ -2298,7 +2396,7 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,  	params.link_settings.link_rate = link_settings->link_rate; -	if (is_dp_128b_132b_signal(pipe_ctx)) { +	if (link_is_dp_128b_132b_signal(pipe_ctx)) {  		/* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */  		pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank(  				pipe_ctx->stream_res.hpo_dp_stream_enc, @@ -2351,7 +2449,7 @@ static void dcn20_reset_back_end_for_pipe(  		 * VBIOS lit up eDP, so check link status too.  		 */  		if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) -			core_link_disable_stream(pipe_ctx); +			link_set_dpms_off(pipe_ctx);  		else if (pipe_ctx->stream_res.audio)  			dc->hwss.disable_audio_stream(pipe_ctx); @@ -2371,7 +2469,7 @@ static void dcn20_reset_back_end_for_pipe(  		}  	}  	else if (pipe_ctx->stream_res.dsc) { -		dp_set_dsc_enable(pipe_ctx, false); +		link_set_dsc_enable(pipe_ctx, false);  	}  	/* by upper caller loop, parent pipe: pipe0, will be reset last. @@ -2554,6 +2652,37 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)  	hubp->mpcc_id = mpcc_id;  } +static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link) +{ +	switch (link->link_enc->transmitter) { +	case TRANSMITTER_UNIPHY_A: +		return PHYD32CLKA; +	case TRANSMITTER_UNIPHY_B: +		return PHYD32CLKB; +	case TRANSMITTER_UNIPHY_C: +		return PHYD32CLKC; +	case TRANSMITTER_UNIPHY_D: +		return PHYD32CLKD; +	case TRANSMITTER_UNIPHY_E: +		return PHYD32CLKE; +	default: +		return PHYD32CLKA; +	} +} + +static int get_odm_segment_count(struct pipe_ctx *pipe_ctx) +{ +	struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; +	int count = 1; + +	while (odm_pipe != NULL) { +		count++; +		odm_pipe = odm_pipe->next_odm_pipe; +	} + +	return count; +} +  void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)  {  	enum dc_lane_count lane_count = @@ -2567,12 +2696,43 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)  	struct timing_generator *tg = pipe_ctx->stream_res.tg;  	const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);  	struct dc *dc = pipe_ctx->stream->ctx->dc; +	struct dtbclk_dto_params dto_params = {0}; +	struct dccg *dccg = dc->res_pool->dccg; +	enum phyd32clk_clock_source phyd32clk; +	int dp_hpo_inst; +	struct dce_hwseq *hws = dc->hwseq; +	unsigned int k1_div = PIXEL_RATE_DIV_NA; +	unsigned int k2_div = PIXEL_RATE_DIV_NA; -	if (is_dp_128b_132b_signal(pipe_ctx)) { +	if (link_is_dp_128b_132b_signal(pipe_ctx)) {  		if (dc->hwseq->funcs.setup_hpo_hw_control)  			dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, true);  	} +	if (link_is_dp_128b_132b_signal(pipe_ctx)) { +		dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst; +		dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst); + +		phyd32clk = get_phyd32clk_src(link); +		dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk); + +		dto_params.otg_inst = tg->inst; +		dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10; +		dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx); +		dto_params.timing = &pipe_ctx->stream->timing; +		dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr); +		dccg->funcs->set_dtbclk_dto(dccg, &dto_params); +	} + +	if (hws->funcs.calculate_dccg_k1_k2_values && dc->res_pool->dccg->funcs->set_pixel_rate_div) { +		hws->funcs.calculate_dccg_k1_k2_values(pipe_ctx, &k1_div, &k2_div); + +		dc->res_pool->dccg->funcs->set_pixel_rate_div( +			dc->res_pool->dccg, +			pipe_ctx->stream_res.tg->inst, +			k1_div, k2_div); +	} +  	link_hwss->setup_stream_encoder(pipe_ctx);  	if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) { @@ -2583,7 +2743,7 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)  	dc->hwss.update_info_frame(pipe_ctx);  	if (dc_is_dp_signal(pipe_ctx->stream->signal)) -		dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); +		link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);  	/* enable early control to avoid corruption on DP monitor*/  	active_total_with_borders = @@ -2601,14 +2761,6 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)  	if (dc->hwseq->funcs.set_pixels_per_cycle)  		dc->hwseq->funcs.set_pixels_per_cycle(pipe_ctx); - -	/* enable audio only within mode set */ -	if (pipe_ctx->stream_res.audio != NULL) { -		if (is_dp_128b_132b_signal(pipe_ctx)) -			pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.hpo_dp_stream_enc); -		else if (dc_is_dp_signal(pipe_ctx->stream->signal)) -			pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc); -	}  }  void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c index 2f9bfaeaba8d..51a57dae1811 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c @@ -29,7 +29,6 @@  #include "link_encoder.h"  #include "dcn20_link_encoder.h"  #include "stream_encoder.h" -#include "i2caux_interface.h"  #include "dc_bios_types.h"  #include "gpio_service_interface.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h index 7bcee5894d2e..5ab32aa51e13 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h @@ -29,13 +29,6 @@  #define TO_DCN20_MMHUBBUB(mcif_wb_base) \  	container_of(mcif_wb_base, struct dcn20_mmhubbub, base) -/* DCN */ -#define BASE_INNER(seg) \ -	DCE_BASE__INST0_SEG ## seg - -#define BASE(seg) \ -	BASE_INNER(seg) -  #define MCIF_WB_COMMON_REG_LIST_DCN2_0(inst) \  	SRI(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB, inst),\  	SRI(MCIF_WB_BUFMGR_CUR_LINE_R, MCIF_WB, inst),\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 8224b9bf01d1..3af24ef9cb2d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -62,7 +62,6 @@  #include "dml/display_mode_vba.h"  #include "dcn20_dccg.h"  #include "dcn20_vmid.h" -#include "dc_link_ddc.h"  #include "dce/dce_panel_cntl.h"  #include "navi10_ip_offset.h" @@ -90,6 +89,7 @@  #include "amdgpu_socbb.h" +#include "link.h"  #define DC_LOGGER_INIT(logger)  #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL @@ -124,8 +124,6 @@ enum dcn20_clk_src_array_id {   * macros to expend register list macro defined in HW object header file */  /* DCN */ -/* TODO awful hack. fixup dcn20_dwb.h */ -#undef BASE_INNER  #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg  #define BASE(seg) BASE_INNER(seg) @@ -138,6 +136,15 @@ enum dcn20_clk_src_array_id {  	.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \  					mm ## block ## id ## _ ## reg_name +#define SRI2_DWB(reg_name, block, id)\ +	.reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ +					mm ## reg_name +#define SF_DWB(reg_name, field_name, post_fix)\ +	.field_name = reg_name ## __ ## field_name ## post_fix + +#define SF_DWB2(reg_name, block, id, field_name, post_fix)	\ +	.field_name = reg_name ## __ ## field_name ## post_fix +  #define SRIR(var_name, reg_name, block, id)\  	.var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \  					mm ## block ## id ## _ ## reg_name @@ -1207,7 +1214,7 @@ static void dcn20_resource_destruct(struct dcn20_resource_pool *pool)  		dcn20_pp_smu_destroy(&pool->base.pp_smu);  	if (pool->base.oem_device != NULL) -		dal_ddc_service_destroy(&pool->base.oem_device); +		link_destroy_ddc_service(&pool->base.oem_device);  }  struct hubp *dcn20_hubp_create( @@ -1382,6 +1389,9 @@ enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc,  	for (i = 0; i < dc->res_pool->pipe_count; i++) {  		struct pipe_ctx *pipe_ctx = &dc_ctx->res_ctx.pipe_ctx[i]; +		if (pipe_ctx->top_pipe) +			continue; +  		if (pipe_ctx->stream != dc_stream)  			continue; @@ -1454,6 +1464,22 @@ enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_  	return result;  } +/** + * dcn20_split_stream_for_odm - Check if stream can be splited for ODM + * + * @dc: DC object with resource pool info required for pipe split + * @res_ctx: Persistent state of resources + * @prev_odm_pipe: Reference to the previous ODM pipe + * @next_odm_pipe: Reference to the next ODM pipe + * + * This function takes a logically active pipe and a logically free pipe and + * halves all the scaling parameters that need to be halved while populating + * the free pipe with the required resources and configuring the next/previous + * ODM pipe pointers. + * + * Return: + * Return true if split stream for ODM is possible, otherwise, return false. + */  bool dcn20_split_stream_for_odm(  		const struct dc *dc,  		struct resource_context *res_ctx, @@ -2199,14 +2225,10 @@ enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_stat  	enum surface_pixel_format surf_pix_format = plane_state->format;  	unsigned int bpp = resource_pixel_format_to_bpp(surf_pix_format); -	enum swizzle_mode_values swizzle = DC_SW_LINEAR; - +	plane_state->tiling_info.gfx9.swizzle = DC_SW_64KB_S;  	if (bpp == 64) -		swizzle = DC_SW_64KB_D; -	else -		swizzle = DC_SW_64KB_S; +		plane_state->tiling_info.gfx9.swizzle = DC_SW_64KB_D; -	plane_state->tiling_info.gfx9.swizzle = swizzle;  	return DC_OK;  } @@ -2743,7 +2765,7 @@ static bool dcn20_resource_construct(  		ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;  		ddc_init_data.id.enum_id = 0;  		ddc_init_data.id.type = OBJECT_TYPE_GENERIC; -		pool->base.oem_device = dal_ddc_service_create(&ddc_init_data); +		pool->base.oem_device = link_create_ddc_service(&ddc_init_data);  	} else {  		pool->base.oem_device = NULL;  	} diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c index b40489e678f9..42865d6c0cdd 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c @@ -29,7 +29,7 @@  #include "dcn20_stream_encoder.h"  #include "reg_helper.h"  #include "hw_shared.h" -#include "inc/link_dpcd.h" +#include "link.h"  #include "dpcd_defs.h"  #define DC_LOGGER \ @@ -423,6 +423,22 @@ void enc2_set_dynamic_metadata(struct stream_encoder *enc,  	}  } +static void enc2_stream_encoder_update_dp_info_packets_sdp_line_num( +		struct stream_encoder *enc, +		struct encoder_info_frame *info_frame) +{ +	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + +	if (info_frame->adaptive_sync.valid == true && +		info_frame->sdp_line_num.adaptive_sync_line_num_valid == true) { +		//00: REFER_TO_DP_SOF, 01: REFER_TO_OTG_SOF +		REG_UPDATE(DP_SEC_CNTL1, DP_SEC_GSP5_LINE_REFERENCE, 1); + +		REG_UPDATE(DP_SEC_CNTL5, DP_SEC_GSP5_LINE_NUM, +					info_frame->sdp_line_num.adaptive_sync_line_num); +	} +} +  static void enc2_stream_encoder_update_dp_info_packets(  	struct stream_encoder *enc,  	const struct encoder_info_frame *info_frame) @@ -530,7 +546,7 @@ void enc2_stream_encoder_dp_unblank(  	REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true); -	dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM); +	link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);  }  static void enc2_dp_set_odm_combine( @@ -587,6 +603,8 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = {  		enc2_stream_encoder_update_hdmi_info_packets,  	.stop_hdmi_info_packets =  		enc2_stream_encoder_stop_hdmi_info_packets, +	.update_dp_info_packets_sdp_line_num = +		enc2_stream_encoder_update_dp_info_packets_sdp_line_num,  	.update_dp_info_packets =  		enc2_stream_encoder_update_dp_info_packets,  	.send_immediate_sdp_message = diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h index f1ef46e8da5b..e7a1b7fa2cce 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h @@ -28,12 +28,6 @@  #include "vmid.h" -#define BASE_INNER(seg) \ -	DCE_BASE__INST0_SEG ## seg - -#define BASE(seg) \ -	BASE_INNER(seg) -  #define DCN20_VMID_REG_LIST(id)\  	SRI(CNTL, DCN_VM_CONTEXT, id),\  	SRI(PAGE_TABLE_BASE_ADDR_HI32, DCN_VM_CONTEXT, id),\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c index 7f9ec59ef443..8d31fa131cd6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c @@ -29,7 +29,6 @@  #include "link_encoder.h"  #include "dcn201_link_encoder.h"  #include "stream_encoder.h" -#include "i2caux_interface.h"  #include "dc_bios_types.h"  #include "gpio_service_interface.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c index c5e200d09038..aeb0e0d9b70a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c @@ -635,7 +635,7 @@ void hubbub21_wm_read_state(struct hubbub *hubbub,  			DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, &s->sr_exit);  	REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, -			 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, &s->dram_clk_chanage); +			 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, &s->dram_clk_change);  	s = &wm->sets[1];  	s->wm_set = 1; @@ -649,7 +649,7 @@ void hubbub21_wm_read_state(struct hubbub *hubbub,  			DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, &s->sr_exit);  	REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, -			DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, &s->dram_clk_chanage); +			DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, &s->dram_clk_change);  	s = &wm->sets[2];  	s->wm_set = 2; @@ -663,7 +663,7 @@ void hubbub21_wm_read_state(struct hubbub *hubbub,  			DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, &s->sr_exit);  	REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, -			DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, &s->dram_clk_chanage); +			DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, &s->dram_clk_change);  	s = &wm->sets[3];  	s->wm_set = 3; @@ -677,7 +677,7 @@ void hubbub21_wm_read_state(struct hubbub *hubbub,  			DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, &s->sr_exit);  	REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, -			DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, &s->dram_clk_chanage); +			DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, &s->dram_clk_change);  }  static void hubbub21_apply_DEDCN21_147_wa(struct hubbub *hubbub) diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c index 69cc192a7e71..15475c7e2cf9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c @@ -35,7 +35,7 @@  #include "hw/clk_mgr.h"  #include "dc_dmub_srv.h"  #include "abm.h" - +#include "link.h"  #define DC_LOGGER_INIT(logger) @@ -132,8 +132,8 @@ void dcn21_PLAT_58856_wa(struct dc_state *context, struct pipe_ctx *pipe_ctx)  		return;  	pipe_ctx->stream->dpms_off = false; -	core_link_enable_stream(context, pipe_ctx); -	core_link_disable_stream(pipe_ctx); +	link_set_dpms_on(context, pipe_ctx); +	link_set_dpms_off(pipe_ctx);  	pipe_ctx->stream->dpms_off = true;  } diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c index 0a1ba6e7081c..eb9abb9f9698 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c @@ -31,7 +31,6 @@  #include "dcn21_link_encoder.h"  #include "stream_encoder.h" -#include "i2caux_interface.h"  #include "dc_bios_types.h"  #include "gpio_service_interface.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 887081472c0d..8f9244fe5c86 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -94,8 +94,6 @@   * macros to expend register list macro defined in HW object header file */  /* DCN */ -/* TODO awful hack. fixup dcn20_dwb.h */ -#undef BASE_INNER  #define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg  #define BASE(seg) BASE_INNER(seg) @@ -671,12 +669,15 @@ static const struct dc_debug_options debug_defaults_diags = {  		.disable_pplib_wm_range = true,  		.disable_stutter = true,  		.disable_48mhz_pwrdwn = true, -		.disable_psr = true,  		.enable_tri_buf = true,  		.use_max_lb = true  };  static const struct dc_panel_config panel_config_defaults = { +		.psr = { +			.disable_psr = false, +			.disallow_psrsu = false, +		},  		.ilr = {  			.optimize_edp_link_rate = true,  		}, @@ -1392,15 +1393,13 @@ static uint32_t read_pipe_fuses(struct dc_context *ctx)  static enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_state)  { -	enum dc_status result = DC_OK; -  	if (plane_state->ctx->dc->debug.disable_dcc == DCC_ENABLE) {  		plane_state->dcc.enable = 1;  		/* align to our worst case block width */  		plane_state->dcc.meta_pitch = ((plane_state->src_rect.width + 1023) / 1024) * 1024;  	} -	result = dcn20_patch_unknown_plane_state(plane_state); -	return result; + +	return dcn20_patch_unknown_plane_state(plane_state);  }  static const struct resource_funcs dcn21_res_pool_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c index 6f3c2fb60790..1fb8fd7afc95 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c @@ -29,7 +29,6 @@  #include "link_encoder.h"  #include "dcn30_dio_link_encoder.h"  #include "stream_encoder.h" -#include "i2caux_interface.h"  #include "dc_bios_types.h"  /* #include "dcn3ag/dcn3ag_phy_fw.h" */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c index 17df53793c92..5f9079d3943a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c @@ -404,6 +404,22 @@ static void enc3_read_state(struct stream_encoder *enc, struct enc_state *s)  	}  } +void enc3_stream_encoder_update_dp_info_packets_sdp_line_num( +		struct stream_encoder *enc, +		struct encoder_info_frame *info_frame) +{ +	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + +	if (info_frame->adaptive_sync.valid == true && +		info_frame->sdp_line_num.adaptive_sync_line_num_valid == true) { +		//00: REFER_TO_DP_SOF, 01: REFER_TO_OTG_SOF +		REG_UPDATE(DP_SEC_CNTL1, DP_SEC_GSP5_LINE_REFERENCE, 1); + +		REG_UPDATE(DP_SEC_CNTL5, DP_SEC_GSP5_LINE_NUM, +					info_frame->sdp_line_num.adaptive_sync_line_num); +	} +} +  void enc3_stream_encoder_update_dp_info_packets(  	struct stream_encoder *enc,  	const struct encoder_info_frame *info_frame) @@ -452,12 +468,20 @@ void enc3_stream_encoder_update_dp_info_packets(  	 * use other packetIndex (such as 5,6) for other info packet  	 */ +	if (info_frame->adaptive_sync.valid) +		enc->vpg->funcs->update_generic_info_packet( +				enc->vpg, +				5,  /* packetIndex */ +				&info_frame->adaptive_sync, +				true); +  	/* enable/disable transmission of packet(s).  	 * If enabled, packet transmission begins on the next frame  	 */  	REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, info_frame->vsc.valid);  	REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, info_frame->spd.valid);  	REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, info_frame->hdrsmd.valid); +	REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP5_ENABLE, info_frame->adaptive_sync.valid);  	/* This bit is the master enable bit.  	 * When enabling secondary stream engine, @@ -803,6 +827,8 @@ static const struct stream_encoder_funcs dcn30_str_enc_funcs = {  		enc3_stream_encoder_update_hdmi_info_packets,  	.stop_hdmi_info_packets =  		enc3_stream_encoder_stop_hdmi_info_packets, +	.update_dp_info_packets_sdp_line_num = +		enc3_stream_encoder_update_dp_info_packets_sdp_line_num,  	.update_dp_info_packets =  		enc3_stream_encoder_update_dp_info_packets,  	.stop_dp_info_packets = diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h index 54ee230e7f98..06310973ded2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h @@ -292,6 +292,10 @@ void enc3_stream_encoder_update_hdmi_info_packets(  void enc3_stream_encoder_stop_hdmi_info_packets(  	struct stream_encoder *enc); +void enc3_stream_encoder_update_dp_info_packets_sdp_line_num( +		struct stream_encoder *enc, +		struct encoder_info_frame *info_frame); +  void enc3_stream_encoder_update_dp_info_packets(  	struct stream_encoder *enc,  	const struct encoder_info_frame *info_frame); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h index 1010930cf071..fc00ec0a0881 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h @@ -27,21 +27,6 @@  #define TO_DCN30_DWBC(dwbc_base) \  	container_of(dwbc_base, struct dcn30_dwbc, base) -/* DCN */ -#define BASE_INNER(seg) \ -	DCE_BASE__INST0_SEG ## seg - -#define BASE(seg) \ -	BASE_INNER(seg) - -#define SF_DWB(reg_name, block, id, field_name, post_fix)\ -	.field_name = block ## id ## _ ## reg_name ## __ ## field_name ## post_fix - - /* set field name */ -#define SF_DWB2(reg_name, block, id, field_name, post_fix)\ -	.field_name = reg_name ## __ ## field_name ## post_fix - -  #define DWBC_COMMON_REG_LIST_DCN30(inst) \  	SR(DWB_ENABLE_CLK_CTRL),\  	SR(DWB_MEM_PWR_CTRL),\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index 8c5045711264..df787fcf8e86 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -50,8 +50,7 @@  #include "dpcd_defs.h"  #include "../dcn20/dcn20_hwseq.h"  #include "dcn30_resource.h" -#include "inc/dc_link_dp.h" -#include "inc/link_dpcd.h" +#include "link.h" @@ -91,8 +90,8 @@ bool dcn30_set_blend_lut(  	return result;  } -static bool dcn30_set_mpc_shaper_3dlut( -	struct pipe_ctx *pipe_ctx, const struct dc_stream_state *stream) +static bool dcn30_set_mpc_shaper_3dlut(struct pipe_ctx *pipe_ctx, +				       const struct dc_stream_state *stream)  {  	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;  	int mpcc_id = pipe_ctx->plane_res.hubp->inst; @@ -104,19 +103,18 @@ static bool dcn30_set_mpc_shaper_3dlut(  	const struct pwl_params *shaper_lut = NULL;  	//get the shaper lut params  	if (stream->func_shaper) { -		if (stream->func_shaper->type == TF_TYPE_HWPWL) +		if (stream->func_shaper->type == TF_TYPE_HWPWL) {  			shaper_lut = &stream->func_shaper->pwl; -		else if (stream->func_shaper->type == TF_TYPE_DISTRIBUTED_POINTS) { -			cm_helper_translate_curve_to_hw_format( -					stream->func_shaper, -					&dpp_base->shaper_params, true); +		} else if (stream->func_shaper->type == TF_TYPE_DISTRIBUTED_POINTS) { +			cm_helper_translate_curve_to_hw_format(stream->func_shaper, +							       &dpp_base->shaper_params, true);  			shaper_lut = &dpp_base->shaper_params;  		}  	}  	if (stream->lut3d_func && -		stream->lut3d_func->state.bits.initialized == 1 && -		stream->lut3d_func->state.bits.rmu_idx_valid == 1) { +	    stream->lut3d_func->state.bits.initialized == 1 && +	    stream->lut3d_func->state.bits.rmu_idx_valid == 1) {  		if (stream->lut3d_func->state.bits.rmu_mux_num == 0)  			mpcc_id_projected = stream->lut3d_func->state.bits.mpc_rmu0_mux;  		else if (stream->lut3d_func->state.bits.rmu_mux_num == 1) @@ -125,20 +123,22 @@ static bool dcn30_set_mpc_shaper_3dlut(  			mpcc_id_projected = stream->lut3d_func->state.bits.mpc_rmu2_mux;  		if (mpcc_id_projected != mpcc_id)  			BREAK_TO_DEBUGGER(); -		/*find the reason why logical layer assigned a differant mpcc_id into acquire_post_bldn_3dlut*/ +		/* find the reason why logical layer assigned a different +		 * mpcc_id into acquire_post_bldn_3dlut +		 */  		acquired_rmu = mpc->funcs->acquire_rmu(mpc, mpcc_id, -				stream->lut3d_func->state.bits.rmu_mux_num); +						       stream->lut3d_func->state.bits.rmu_mux_num);  		if (acquired_rmu != stream->lut3d_func->state.bits.rmu_mux_num)  			BREAK_TO_DEBUGGER(); -		result = mpc->funcs->program_3dlut(mpc, -								&stream->lut3d_func->lut_3d, -								stream->lut3d_func->state.bits.rmu_mux_num); + +		result = mpc->funcs->program_3dlut(mpc, &stream->lut3d_func->lut_3d, +						   stream->lut3d_func->state.bits.rmu_mux_num);  		result = mpc->funcs->program_shaper(mpc, shaper_lut, -				stream->lut3d_func->state.bits.rmu_mux_num); -	} else -		/*loop through the available mux and release the requested mpcc_id*/ +						    stream->lut3d_func->state.bits.rmu_mux_num); +	} else { +		// loop through the available mux and release the requested mpcc_id  		mpc->funcs->release_rmu(mpc, mpcc_id); - +	}  	return result;  } @@ -540,7 +540,7 @@ void dcn30_init_hw(struct dc *dc)  			hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);  	/* we want to turn off all dp displays before doing detection */ -	dc_link_blank_all_dp_displays(dc); +	link_blank_all_dp_displays(dc);  	if (hws->funcs.enable_power_gating_plane)  		hws->funcs.enable_power_gating_plane(dc->hwseq, true); @@ -675,10 +675,16 @@ void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx)  		pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets(  			pipe_ctx->stream_res.stream_enc,  			&pipe_ctx->stream_res.encoder_info_frame); -	else +	else { +		if (pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets_sdp_line_num) +			pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets_sdp_line_num( +				pipe_ctx->stream_res.stream_enc, +				&pipe_ctx->stream_res.encoder_info_frame); +  		pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets(  			pipe_ctx->stream_res.stream_enc,  			&pipe_ctx->stream_res.encoder_info_frame); +	}  }  void dcn30_program_dmdata_engine(struct pipe_ctx *pipe_ctx) @@ -992,8 +998,5 @@ void dcn30_prepare_bandwidth(struct dc *dc,  			dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);  	dcn20_prepare_bandwidth(dc, context); - -	dc_dmub_srv_p_state_delegate(dc, -		context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching, context);  } diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.h index 7446e54bf5aa..376620a8f02f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.h @@ -31,13 +31,6 @@  #define TO_DCN30_MMHUBBUB(mcif_wb_base) \  	container_of(mcif_wb_base, struct dcn30_mmhubbub, base) -/* DCN */ -#define BASE_INNER(seg) \ -	DCE_BASE__INST0_SEG ## seg - -#define BASE(seg) \ -	BASE_INNER(seg) -  #define MCIF_WB_COMMON_REG_LIST_DCN3_0(inst) \  	SRI(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB, inst),\  	SRI(MCIF_WB_BUFMGR_STATUS, MCIF_WB, inst),\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c index 892d3c4d01a1..08b92715e2e6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c @@ -30,6 +30,7 @@  #include "dc_dmub_srv.h"  #include "dml/dcn30/dcn30_fpu.h" +#include "dc_trace.h"  #define REG(reg)\  	optc1->tg_regs->reg @@ -58,6 +59,8 @@ void optc3_triplebuffer_lock(struct timing_generator *optc)  		REG_WAIT(OTG_MASTER_UPDATE_LOCK,  				UPDATE_LOCK_STATUS, 1,  				1, 10); + +	TRACE_OPTC_LOCK_UNLOCK_STATE(optc1, optc->inst, true);  }  void optc3_lock_doublebuffer_enable(struct timing_generator *optc) @@ -93,6 +96,8 @@ void optc3_lock_doublebuffer_enable(struct timing_generator *optc)  		MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, 0,  		MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, 100,  		OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 1); + +	TRACE_OPTC_LOCK_UNLOCK_STATE(optc1, optc->inst, true);  }  void optc3_lock_doublebuffer_disable(struct timing_generator *optc) @@ -108,6 +113,8 @@ void optc3_lock_doublebuffer_disable(struct timing_generator *optc)  	REG_UPDATE(OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, 0);  	REG_UPDATE(OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_EN, 0); + +	TRACE_OPTC_LOCK_UNLOCK_STATE(optc1, optc->inst, true);  }  void optc3_lock(struct timing_generator *optc) @@ -122,6 +129,8 @@ void optc3_lock(struct timing_generator *optc)  	REG_WAIT(OTG_MASTER_UPDATE_LOCK,  			UPDATE_LOCK_STATUS, 1,  			1, 10); + +	TRACE_OPTC_LOCK_UNLOCK_STATE(optc1, optc->inst, true);  }  void optc3_set_out_mux(struct timing_generator *optc, enum otg_out_mux_dest dest) @@ -282,6 +291,14 @@ static void optc3_set_timing_double_buffer(struct timing_generator *optc, bool e  		   OTG_DRR_TIMING_DBUF_UPDATE_MODE, mode);  } +void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc) +{ +	struct optc *optc1 = DCN10TG_FROM_TG(optc); + +	REG_WAIT(OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_PENDING, 0, 2, 100000); /* 1 vupdate at 5hz */ + +} +  void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max)  {  	optc1_set_vtotal_min_max(optc, vtotal_min, vtotal_max); @@ -351,6 +368,7 @@ static struct timing_generator_funcs dcn30_tg_funcs = {  		.program_manual_trigger = optc2_program_manual_trigger,  		.setup_manual_trigger = optc2_setup_manual_trigger,  		.get_hw_timing = optc1_get_hw_timing, +		.wait_drr_doublebuffer_pending_clear = optc3_wait_drr_doublebuffer_pending_clear,  };  void dcn30_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h index dd45a5499b07..fb06dc9a4893 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h @@ -279,6 +279,7 @@  	SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_END_X, mask_sh),\  	SF(OTG0_OTG_DRR_V_TOTAL_CHANGE, OTG_DRR_V_TOTAL_CHANGE_LIMIT, mask_sh),\  	SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_BY2, mask_sh),\ +	SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_PENDING, mask_sh),\  	SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh),\  	SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_BLANK_DATA_DOUBLE_BUFFER_EN, mask_sh) @@ -317,6 +318,7 @@  	SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_END_X, mask_sh),\  	SF(OTG0_OTG_DRR_V_TOTAL_CHANGE, OTG_DRR_V_TOTAL_CHANGE_LIMIT, mask_sh),\  	SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, mask_sh),\ +	SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_PENDING, mask_sh),\  	SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh)  void dcn30_timing_generator_init(struct optc *optc1); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c index 020f512e9690..b5b5320c7bef 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c @@ -60,7 +60,7 @@  #include "dml/display_mode_vba.h"  #include "dcn30/dcn30_dccg.h"  #include "dcn10/dcn10_resource.h" -#include "dc_link_ddc.h" +#include "link.h"  #include "dce/dce_panel_cntl.h"  #include "dcn30/dcn30_dwb.h" @@ -108,8 +108,6 @@ enum dcn30_clk_src_array_id {   */  /* DCN */ -/* TODO awful hack. fixup dcn20_dwb.h */ -#undef BASE_INNER  #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg  #define BASE(seg) BASE_INNER(seg) @@ -142,6 +140,9 @@ enum dcn30_clk_src_array_id {  	.reg_name[id] = BASE(mm ## block ## id ## _ ## temp_name ## _BASE_IDX) + \  					mm ## block ## id ## _ ## temp_name +#define SF_DWB2(reg_name, block, id, field_name, post_fix)	\ +	.field_name = reg_name ## __ ## field_name ## post_fix +  #define DCCG_SRII(reg_name, block, id)\  	.block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \  					mm ## block ## id ## _ ## reg_name @@ -723,7 +724,6 @@ static const struct dc_debug_options debug_defaults_drv = {  	.underflow_assert_delay_us = 0xFFFFFFFF,  	.dwb_fi_phase = -1, // -1 = disable,  	.dmub_command_table = true, -	.disable_psr = false,  	.use_max_lb = true,  	.exit_idle_opt_for_cursor_updates = true  }; @@ -742,11 +742,17 @@ static const struct dc_debug_options debug_defaults_diags = {  	.scl_reset_length10 = true,  	.dwb_fi_phase = -1, // -1 = disable  	.dmub_command_table = true, -	.disable_psr = true,  	.enable_tri_buf = true,  	.use_max_lb = true  }; +static const struct dc_panel_config panel_config_defaults = { +	.psr = { +		.disable_psr = false, +		.disallow_psrsu = false, +	}, +}; +  static void dcn30_dpp_destroy(struct dpp **dpp)  {  	kfree(TO_DCN20_DPP(*dpp)); @@ -1202,7 +1208,7 @@ static void dcn30_resource_destruct(struct dcn30_resource_pool *pool)  		dcn_dccg_destroy(&pool->base.dccg);  	if (pool->base.oem_device != NULL) -		dal_ddc_service_destroy(&pool->base.oem_device); +		link_destroy_ddc_service(&pool->base.oem_device);  }  static struct hubp *dcn30_hubp_create( @@ -1323,6 +1329,7 @@ static struct clock_source *dcn30_clock_source_create(  		return &clk_src->base;  	} +	kfree(clk_src);  	BREAK_TO_DEBUGGER();  	return NULL;  } @@ -1470,8 +1477,8 @@ bool dcn30_acquire_post_bldn_3dlut(  				state->bits.mpc_rmu2_mux = mpcc_id;  			ret = true;  			break; -			}  		} +	}  	return ret;  } @@ -1641,7 +1648,8 @@ noinline bool dcn30_internal_validate_bw(  		display_e2e_pipe_params_st *pipes,  		int *pipe_cnt_out,  		int *vlevel_out, -		bool fast_validate) +		bool fast_validate, +		bool allow_self_refresh_only)  {  	bool out = false;  	bool repopulate_pipes = false; @@ -1668,7 +1676,7 @@ noinline bool dcn30_internal_validate_bw(  	dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt); -	if (!fast_validate) { +	if (!fast_validate || !allow_self_refresh_only) {  		/*  		 * DML favors voltage over p-state, but we're more interested in  		 * supporting p-state over voltage. We can't support p-state in @@ -1681,11 +1689,12 @@ noinline bool dcn30_internal_validate_bw(  		if (vlevel < context->bw_ctx.dml.soc.num_states)  			vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);  	} -	if (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states || -			vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) { +	if (allow_self_refresh_only && +	    (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states || +			vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported)) {  		/* -		 * If mode is unsupported or there's still no p-state support then -		 * fall back to favoring voltage. +		 * If mode is unsupported or there's still no p-state support +		 * then fall back to favoring voltage.  		 *  		 * We don't actually support prefetch mode 2, so require that we  		 * at least support prefetch mode 1. @@ -2056,7 +2065,7 @@ bool dcn30_validate_bandwidth(struct dc *dc,  	BW_VAL_TRACE_COUNT();  	DC_FP_START(); -	out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate); +	out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true);  	DC_FP_END();  	if (pipe_cnt == 0) @@ -2212,6 +2221,11 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params  	}  } +static void dcn30_get_panel_config_defaults(struct dc_panel_config *panel_config) +{ +	*panel_config = panel_config_defaults; +} +  static const struct resource_funcs dcn30_res_pool_funcs = {  	.destroy = dcn30_destroy_resource_pool,  	.link_enc_create = dcn30_link_encoder_create, @@ -2231,6 +2245,7 @@ static const struct resource_funcs dcn30_res_pool_funcs = {  	.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,  	.update_bw_bounding_box = dcn30_update_bw_bounding_box,  	.patch_unknown_plane_state = dcn20_patch_unknown_plane_state, +	.get_panel_config_defaults = dcn30_get_panel_config_defaults,  };  #define CTX ctx @@ -2577,7 +2592,7 @@ static bool dcn30_resource_construct(  		ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;  		ddc_init_data.id.enum_id = 0;  		ddc_init_data.id.type = OBJECT_TYPE_GENERIC; -		pool->base.oem_device = dal_ddc_service_create(&ddc_init_data); +		pool->base.oem_device = link_create_ddc_service(&ddc_init_data);  	} else {  		pool->base.oem_device = NULL;  	} diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h index 7d063c7d6a4b..8e6b8b7368fd 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h @@ -64,7 +64,8 @@ bool dcn30_internal_validate_bw(  		display_e2e_pipe_params_st *pipes,  		int *pipe_cnt_out,  		int *vlevel_out, -		bool fast_validate); +		bool fast_validate, +		bool allow_self_refresh_only);  void dcn30_calculate_wm_and_dlg(  		struct dc *dc, struct dc_state *context,  		display_e2e_pipe_params_st *pipes, diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.c index c9fbaed23965..1b39a6e8a1ac 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.c @@ -29,7 +29,6 @@  #include "link_encoder.h"  #include "dcn301_dio_link_encoder.h"  #include "stream_encoder.h" -#include "i2caux_interface.h"  #include "dc_bios_types.h"  #include "gpio_service_interface.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c index f04595b750ab..ee62ae3eb98f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c @@ -107,8 +107,6 @@ enum dcn301_clk_src_array_id {   */  /* DCN */ -/* TODO awful hack. fixup dcn20_dwb.h */ -#undef BASE_INNER  #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg  #define BASE(seg) BASE_INNER(seg) @@ -146,6 +144,9 @@ enum dcn301_clk_src_array_id {  	.reg_name[id] = BASE(mm ## block ## id ## _ ## temp_name ## _BASE_IDX) + \  					mm ## block ## id ## _ ## temp_name +#define SF_DWB2(reg_name, block, id, field_name, post_fix)	\ +	.field_name = reg_name ## __ ## field_name ## post_fix +  #define DCCG_SRII(reg_name, block, id)\  	.block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \  					mm ## block ## id ## _ ## reg_name @@ -1288,6 +1289,7 @@ static struct clock_source *dcn301_clock_source_create(  		return &clk_src->base;  	} +	kfree(clk_src);  	BREAK_TO_DEBUGGER();  	return NULL;  } @@ -1412,7 +1414,8 @@ static struct resource_funcs dcn301_res_pool_funcs = {  	.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,  	.acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,  	.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut, -	.update_bw_bounding_box = dcn301_update_bw_bounding_box +	.update_bw_bounding_box = dcn301_update_bw_bounding_box, +	.patch_unknown_plane_state = dcn20_patch_unknown_plane_state  };  static bool dcn301_resource_construct( @@ -1491,6 +1494,8 @@ static bool dcn301_resource_construct(  	dc->caps.color.mpc.ogam_rom_caps.hlg = 0;  	dc->caps.color.mpc.ocsc = 1; +	dc->caps.dp_hdmi21_pcon_support = true; +  	/* read VBIOS LTTPR caps */  	if (ctx->dc_bios->funcs->get_lttpr_caps) {  		enum bp_result bp_query_result; diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c index b925b6ddde5a..03ddf4f5f065 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c @@ -47,6 +47,7 @@  #include "dcn10/dcn10_resource.h" +#include "link.h"  #include "dce/dce_abm.h"  #include "dce/dce_audio.h"  #include "dce/dce_aux.h" @@ -112,10 +113,16 @@ static const struct dc_debug_options debug_defaults_diags = {  		.dwb_fi_phase = -1, // -1 = disable  		.dmub_command_table = true,  		.enable_tri_buf = true, -		.disable_psr = true,  		.use_max_lb = true  }; +static const struct dc_panel_config panel_config_defaults = { +		.psr = { +			.disable_psr = false, +			.disallow_psrsu = false, +		}, +}; +  enum dcn302_clk_src_array_id {  	DCN302_CLK_SRC_PLL0,  	DCN302_CLK_SRC_PLL1, @@ -177,7 +184,6 @@ static const struct dc_plane_cap plane_cap = {  		mm ## reg_name  /* DCN */ -#undef BASE_INNER  #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg  #define BASE(seg) BASE_INNER(seg) @@ -210,6 +216,9 @@ static const struct dc_plane_cap plane_cap = {  		.reg_name[id] = BASE(mm ## block ## id ## _ ## temp_name ## _BASE_IDX) + \  		mm ## block ## id ## _ ## temp_name +#define SF_DWB2(reg_name, block, id, field_name, post_fix)	\ +	.field_name = reg_name ## __ ## field_name ## post_fix +  #define SRII_MPC_RMU(reg_name, block, id)\  		.RMU##_##reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \  		mm ## block ## id ## _ ## reg_name @@ -458,6 +467,7 @@ static struct clock_source *dcn302_clock_source_create(struct dc_context *ctx, s  		return &clk_src->base;  	} +	kfree(clk_src);  	BREAK_TO_DEBUGGER();  	return NULL;  } @@ -1116,6 +1126,9 @@ static void dcn302_resource_destruct(struct resource_pool *pool)  	if (pool->dccg != NULL)  		dcn_dccg_destroy(&pool->dccg); + +	if (pool->oem_device != NULL) +		link_destroy_ddc_service(&pool->oem_device);  }  static void dcn302_destroy_resource_pool(struct resource_pool **pool) @@ -1132,6 +1145,11 @@ void dcn302_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param  	DC_FP_END();  } +static void dcn302_get_panel_config_defaults(struct dc_panel_config *panel_config) +{ +	*panel_config = panel_config_defaults; +} +  static struct resource_funcs dcn302_res_pool_funcs = {  		.destroy = dcn302_destroy_resource_pool,  		.link_enc_create = dcn302_link_encoder_create, @@ -1151,6 +1169,7 @@ static struct resource_funcs dcn302_res_pool_funcs = {  		.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,  		.update_bw_bounding_box = dcn302_update_bw_bounding_box,  		.patch_unknown_plane_state = dcn20_patch_unknown_plane_state, +		.get_panel_config_defaults = dcn302_get_panel_config_defaults,  };  static struct dc_cap_funcs cap_funcs = { @@ -1201,6 +1220,7 @@ static bool dcn302_resource_construct(  	int i;  	struct dc_context *ctx = dc->ctx;  	struct irq_service_init_data init_data; +	struct ddc_service_init_data ddc_init_data = {0};  	ctx->dc_bios->regs = &bios_regs; @@ -1266,6 +1286,8 @@ static bool dcn302_resource_construct(  	dc->caps.color.mpc.ogam_rom_caps.hlg = 0;  	dc->caps.color.mpc.ocsc = 1; +	dc->caps.dp_hdmi21_pcon_support = true; +  	/* read VBIOS LTTPR caps */  	if (ctx->dc_bios->funcs->get_lttpr_caps) {  		enum bp_result bp_query_result; @@ -1480,6 +1502,17 @@ static bool dcn302_resource_construct(  	dc->cap_funcs = cap_funcs; +	if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { +		ddc_init_data.ctx = dc->ctx; +		ddc_init_data.link = NULL; +		ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; +		ddc_init_data.id.enum_id = 0; +		ddc_init_data.id.type = OBJECT_TYPE_GENERIC; +		pool->oem_device = link_create_ddc_service(&ddc_init_data); +	} else { +		pool->oem_device = NULL; +	} +  	return true;  create_fail: diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c index 527d5c902878..31e212064168 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c @@ -29,7 +29,7 @@  #include "dcn10/dcn10_resource.h" -#include "dc_link_ddc.h" +#include "link.h"  #include "dce/dce_abm.h"  #include "dce/dce_audio.h" @@ -96,7 +96,13 @@ static const struct dc_debug_options debug_defaults_diags = {  		.dwb_fi_phase = -1, // -1 = disable  		.dmub_command_table = true,  		.enable_tri_buf = true, -		.disable_psr = true, +}; + +static const struct dc_panel_config panel_config_defaults = { +		.psr = { +			.disable_psr = false, +			.disallow_psrsu = false, +		},  };  enum dcn303_clk_src_array_id { @@ -156,7 +162,6 @@ static const struct dc_plane_cap plane_cap = {  		mm ## reg_name  /* DCN */ -#undef BASE_INNER  #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg  #define BASE(seg) BASE_INNER(seg) @@ -189,6 +194,9 @@ static const struct dc_plane_cap plane_cap = {  		.reg_name[id] = BASE(mm ## block ## id ## _ ## temp_name ## _BASE_IDX) + \  		mm ## block ## id ## _ ## temp_name +#define SF_DWB2(reg_name, block, id, field_name, post_fix)	\ +	.field_name = reg_name ## __ ## field_name ## post_fix +  #define SRII_MPC_RMU(reg_name, block, id)\  		.RMU##_##reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \  		mm ## block ## id ## _ ## reg_name @@ -425,6 +433,7 @@ static struct clock_source *dcn303_clock_source_create(struct dc_context *ctx, s  		return &clk_src->base;  	} +	kfree(clk_src);  	BREAK_TO_DEBUGGER();  	return NULL;  } @@ -1045,7 +1054,7 @@ static void dcn303_resource_destruct(struct resource_pool *pool)  		dcn_dccg_destroy(&pool->dccg);  	if (pool->oem_device != NULL) -		dal_ddc_service_destroy(&pool->oem_device); +		link_destroy_ddc_service(&pool->oem_device);  }  static void dcn303_destroy_resource_pool(struct resource_pool **pool) @@ -1055,6 +1064,10 @@ static void dcn303_destroy_resource_pool(struct resource_pool **pool)  	*pool = NULL;  } +static void dcn303_get_panel_config_defaults(struct dc_panel_config *panel_config) +{ +	*panel_config = panel_config_defaults; +}  void dcn303_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)  { @@ -1082,6 +1095,7 @@ static struct resource_funcs dcn303_res_pool_funcs = {  		.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,  		.update_bw_bounding_box = dcn303_update_bw_bounding_box,  		.patch_unknown_plane_state = dcn20_patch_unknown_plane_state, +		.get_panel_config_defaults = dcn303_get_panel_config_defaults,  };  static struct dc_cap_funcs cap_funcs = { @@ -1198,6 +1212,8 @@ static bool dcn303_resource_construct(  	dc->caps.color.mpc.ogam_rom_caps.hlg = 0;  	dc->caps.color.mpc.ocsc = 1; +	dc->caps.dp_hdmi21_pcon_support = true; +  	/* read VBIOS LTTPR caps */  	if (ctx->dc_bios->funcs->get_lttpr_caps) {  		enum bp_result bp_query_result; @@ -1405,7 +1421,7 @@ static bool dcn303_resource_construct(  		ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;  		ddc_init_data.id.enum_id = 0;  		ddc_init_data.id.type = OBJECT_TYPE_GENERIC; -		pool->oem_device = dal_ddc_service_create(&ddc_init_data); +		pool->oem_device = link_create_ddc_service(&ddc_init_data);  	} else {  		pool->oem_device = NULL;  	} diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c index de5e18c2a3ac..24e9ff65434d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c @@ -134,23 +134,10 @@ static void apg31_se_audio_setup(  	/* Disable forced mem power off */  	REG_UPDATE(APG_MEM_PWR, APG_MEM_PWR_FORCE, 0); - -	apg31_enable(apg); -} - -static void apg31_audio_mute_control( -	struct apg *apg, -	bool mute) -{ -	if (mute) -		apg31_disable(apg); -	else -		apg31_enable(apg);  }  static struct apg_funcs dcn31_apg_funcs = {  	.se_audio_setup			= apg31_se_audio_setup, -	.audio_mute_control		= apg31_audio_mute_control,  	.enable_apg			= apg31_enable,  	.disable_apg			= apg31_disable,  }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.h index 24f568e120d8..1b81f6773c53 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.h @@ -84,10 +84,6 @@ struct apg_funcs {  		unsigned int az_inst,  		struct audio_info *audio_info); -	void (*audio_mute_control)( -		struct apg *apg, -		bool mute); -  	void (*enable_apg)(  		struct apg *apg); diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c index ab70ebd8f223..275e78c06dee 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c @@ -30,7 +30,6 @@  #include "link_encoder.h"  #include "dcn31_dio_link_encoder.h"  #include "stream_encoder.h" -#include "i2caux_interface.h"  #include "dc_bios_types.h"  #include "gpio_service_interface.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c index 80dfaa4d4d81..0b317ed31f91 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c @@ -242,7 +242,10 @@ void dcn31_hpo_dp_link_enc_set_link_test_pattern(  		REG_UPDATE(DP_DPHY_SYM32_CONTROL,  				MODE, DP2_TEST_PATTERN);  		break; -	case DP_TEST_PATTERN_SQUARE_PULSE: +	case DP_TEST_PATTERN_SQUARE: +	case DP_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED: +	case DP_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED: +	case DP_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED:  		REG_SET(DP_DPHY_SYM32_TP_SQ_PULSE, 0,  				TP_SQ_PULSE_WIDTH, tp_params->custom_pattern[0]); diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c index 814f401db3b3..d76f55a12eb4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c @@ -430,6 +430,22 @@ static void dcn31_hpo_dp_stream_enc_set_stream_attribute(  			MSA_DATA_LANE_3, 0);  } +static void dcn31_hpo_dp_stream_enc_update_dp_info_packets_sdp_line_num( +		struct hpo_dp_stream_encoder *enc, +		struct encoder_info_frame *info_frame) +{ +	struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + +	if (info_frame->adaptive_sync.valid == true && +		info_frame->sdp_line_num.adaptive_sync_line_num_valid == true) { +		//00: REFER_TO_DP_SOF, 01: REFER_TO_OTG_SOF +		REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_SOF_REFERENCE, 1); + +		REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_TRANSMISSION_LINE_NUMBER, +					info_frame->sdp_line_num.adaptive_sync_line_num); +	} +} +  static void dcn31_hpo_dp_stream_enc_update_dp_info_packets(  		struct hpo_dp_stream_encoder *enc,  		const struct encoder_info_frame *info_frame) @@ -458,12 +474,20 @@ static void dcn31_hpo_dp_stream_enc_update_dp_info_packets(  				&info_frame->hdrsmd,  				true); +	if (info_frame->adaptive_sync.valid) +		enc->vpg->funcs->update_generic_info_packet( +				enc->vpg, +				5,  /* packetIndex */ +				&info_frame->adaptive_sync, +				true); +  	/* enable/disable transmission of packet(s).  	 * If enabled, packet transmission begins on the next frame  	 */  	REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->vsc.valid);  	REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL2, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->spd.valid);  	REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL3, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->hdrsmd.valid); +	REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->adaptive_sync.valid);  	/* check if dynamic metadata packet transmission is enabled */  	REG_GET(DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL, @@ -600,14 +624,6 @@ static void dcn31_hpo_dp_stream_enc_map_stream_to_link(  	}  } -static void dcn31_hpo_dp_stream_enc_mute_control( -	struct hpo_dp_stream_encoder *enc, -	bool mute) -{ -	ASSERT(enc->apg); -	enc->apg->funcs->audio_mute_control(enc->apg, mute); -} -  static void dcn31_hpo_dp_stream_enc_audio_setup(  	struct hpo_dp_stream_encoder *enc,  	unsigned int az_inst, @@ -722,11 +738,11 @@ static const struct hpo_dp_stream_encoder_funcs dcn30_str_enc_funcs = {  	.dp_blank = dcn31_hpo_dp_stream_enc_dp_blank,  	.disable = dcn31_hpo_dp_stream_enc_disable,  	.set_stream_attribute = dcn31_hpo_dp_stream_enc_set_stream_attribute, +	.update_dp_info_packets_sdp_line_num = dcn31_hpo_dp_stream_enc_update_dp_info_packets_sdp_line_num,  	.update_dp_info_packets = dcn31_hpo_dp_stream_enc_update_dp_info_packets,  	.stop_dp_info_packets = dcn31_hpo_dp_stream_enc_stop_dp_info_packets,  	.dp_set_dsc_pps_info_packet = dcn31_hpo_dp_stream_enc_set_dsc_pps_info_packet,  	.map_stream_to_link = dcn31_hpo_dp_stream_enc_map_stream_to_link, -	.audio_mute_control = dcn31_hpo_dp_stream_enc_mute_control,  	.dp_audio_setup = dcn31_hpo_dp_stream_enc_audio_setup,  	.dp_audio_enable = dcn31_hpo_dp_stream_enc_audio_enable,  	.dp_audio_disable = dcn31_hpo_dp_stream_enc_audio_disable, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c index 6360dc9502e7..7e7cd5b64e6a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c @@ -1008,6 +1008,24 @@ static bool hubbub31_verify_allow_pstate_change_high(struct hubbub *hubbub)  	return false;  } +void hubbub31_init(struct hubbub *hubbub) +{ +	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + +	/*Enable clock gate*/ +	if (hubbub->ctx->dc->debug.disable_clock_gate) { +		/*done in hwseq*/ +		/*REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);*/ +		REG_UPDATE_2(DCHUBBUB_CLOCK_CNTL, +				DISPCLK_R_DCHUBBUB_GATE_DIS, 0, +				DCFCLK_R_DCHUBBUB_GATE_DIS, 0); +	} + +	/* +	only the DCN will determine when to connect the SDP port +	*/ +	REG_UPDATE(DCHUBBUB_SDPIF_CFG0,	SDPIF_PORT_CONTROL, 1); +}  static const struct hubbub_funcs hubbub31_funcs = {  	.update_dchub = hubbub2_update_dchub,  	.init_dchub_sys_ctx = hubbub31_init_dchub_sys_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h index 70c60de448ac..89d6208287b5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h @@ -42,6 +42,10 @@  	SR(DCHUBBUB_COMPBUF_CTRL),\  	SR(COMPBUF_RESERVED_SPACE),\  	SR(DCHUBBUB_DEBUG_CTRL_0),\ +	SR(DCHUBBUB_CLOCK_CNTL),\ +	SR(DCHUBBUB_SDPIF_CFG0),\ +	SR(DCHUBBUB_SDPIF_CFG1),\ +	SR(DCHUBBUB_MEM_PWR_MODE_CTRL),\  	SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A),\  	SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A),\  	SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B),\ @@ -120,11 +124,17 @@  	HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, mask_sh), \  	HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_TABLE_LEVEL, mask_sh), \  	HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \ -	HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh) +	HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh),\ +	HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DISPCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\ +	HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DCFCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\ +	HUBBUB_SF(DCHUBBUB_SDPIF_CFG0, SDPIF_PORT_CONTROL, mask_sh),\ +	HUBBUB_SF(DCHUBBUB_MEM_PWR_MODE_CTRL, DET_MEM_PWR_LS_MODE, mask_sh)  int hubbub31_init_dchub_sys_ctx(struct hubbub *hubbub,  		struct dcn_hubbub_phys_addr_config *pa_config); +void hubbub31_init(struct hubbub *hubbub); +  void hubbub31_construct(struct dcn20_hubbub *hubbub3,  	struct dc_context *ctx,  	const struct dcn_hubbub_registers *hubbub_regs, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c index bdf101547484..d13e46eeee3c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c @@ -45,8 +45,7 @@  #include "link_hwss.h"  #include "dpcd_defs.h"  #include "dce/dmub_outbox.h" -#include "dc_link_dp.h" -#include "inc/link_dpcd.h" +#include "link.h"  #include "dcn10/dcn10_hw_sequencer.h"  #include "inc/link_enc_cfg.h"  #include "dcn30/dcn30_vpg.h" @@ -89,7 +88,8 @@ static void enable_memory_low_power(struct dc *dc)  		REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1);  	} -	if (dc->debug.enable_mem_low_power.bits.mpc) +	if (dc->debug.enable_mem_low_power.bits.mpc && +		dc->res_pool->mpc->funcs->set_mpc_mem_lp_mode)  		dc->res_pool->mpc->funcs->set_mpc_mem_lp_mode(dc->res_pool->mpc); @@ -141,7 +141,8 @@ void dcn31_init_hw(struct dc *dc)  	if (!dcb->funcs->is_accelerated_mode(dcb)) {  		hws->funcs.bios_golden_init(dc); -		hws->funcs.disable_vga(dc->hwseq); +		if (hws->funcs.disable_vga) +			hws->funcs.disable_vga(dc->hwseq);  	}  	// Initialize the dccg  	if (res_pool->dccg->funcs->dccg_init) @@ -201,7 +202,7 @@ void dcn31_init_hw(struct dc *dc)  		dmub_enable_outbox_notification(dc->ctx->dmub_srv);  	/* we want to turn off all dp displays before doing detection */ -	dc_link_blank_all_dp_displays(dc); +	link_blank_all_dp_displays(dc);  	if (hws->funcs.enable_power_gating_plane)  		hws->funcs.enable_power_gating_plane(dc->hwseq, true); @@ -229,7 +230,7 @@ void dcn31_init_hw(struct dc *dc)  				}  				if (num_opps > 1) { -					dc_link_blank_all_edp_displays(dc); +					link_blank_all_edp_displays(dc);  					break;  				}  			} @@ -413,7 +414,17 @@ void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx)  		pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets(  			pipe_ctx->stream_res.stream_enc,  			&pipe_ctx->stream_res.encoder_info_frame); -	else { +	else if (link_is_dp_128b_132b_signal(pipe_ctx)) { +		pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->update_dp_info_packets( +				pipe_ctx->stream_res.hpo_dp_stream_enc, +				&pipe_ctx->stream_res.encoder_info_frame); +		return; +	} else { +		if (pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets_sdp_line_num) +			pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets_sdp_line_num( +				pipe_ctx->stream_res.stream_enc, +				&pipe_ctx->stream_res.encoder_info_frame); +  		pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets(  			pipe_ctx->stream_res.stream_enc,  			&pipe_ctx->stream_res.encoder_info_frame); @@ -554,7 +565,7 @@ static void dcn31_reset_back_end_for_pipe(  		 * VBIOS lit up eDP, so check link status too.  		 */  		if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) -			core_link_disable_stream(pipe_ctx); +			link_set_dpms_off(pipe_ctx);  		else if (pipe_ctx->stream_res.audio)  			dc->hwss.disable_audio_stream(pipe_ctx); @@ -573,7 +584,7 @@ static void dcn31_reset_back_end_for_pipe(  			}  		}  	} else if (pipe_ctx->stream_res.dsc) { -			dp_set_dsc_enable(pipe_ctx, false); +			link_set_dsc_enable(pipe_ctx, false);  	}  	pipe_ctx->stream = NULL; diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c index fddc21a5a04c..d3918a10773a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c @@ -119,8 +119,6 @@ enum dcn31_clk_src_array_id {   */  /* DCN */ -/* TODO awful hack. fixup dcn20_dwb.h */ -#undef BASE_INNER  #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg  #define BASE(seg) BASE_INNER(seg) @@ -153,6 +151,9 @@ enum dcn31_clk_src_array_id {  	.reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \  					reg ## block ## id ## _ ## temp_name +#define SF_DWB2(reg_name, block, id, field_name, post_fix)	\ +	.field_name = reg_name ## __ ## field_name ## post_fix +  #define DCCG_SRII(reg_name, block, id)\  	.block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \  					reg ## block ## id ## _ ## reg_name @@ -911,6 +912,10 @@ static const struct dc_debug_options debug_defaults_diags = {  };  static const struct dc_panel_config panel_config_defaults = { +	.psr = { +		.disable_psr = false, +		.disallow_psrsu = false, +	},  	.ilr = {  		.optimize_edp_link_rate = true,  	}, @@ -1625,6 +1630,7 @@ static struct clock_source *dcn31_clock_source_create(  		return &clk_src->base;  	} +	kfree(clk_src);  	BREAK_TO_DEBUGGER();  	return NULL;  } @@ -1634,6 +1640,31 @@ static bool is_dual_plane(enum surface_pixel_format format)  	return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;  } +int dcn31x_populate_dml_pipes_from_context(struct dc *dc, +					  struct dc_state *context, +					  display_e2e_pipe_params_st *pipes, +					  bool fast_validate) +{ +	uint32_t pipe_cnt; +	int i; + +	dc_assert_fp_enabled(); + +	pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + +	for (i = 0; i < pipe_cnt; i++) { +		pipes[i].pipe.src.gpuvm = 1; +		if (dc->debug.dml_hostvm_override == DML_HOSTVM_NO_OVERRIDE) { +			//pipes[pipe_cnt].pipe.src.hostvm = dc->res_pool->hubbub->riommu_active; +			pipes[i].pipe.src.hostvm = dc->vm_pa_config.is_hvm_enabled; +		} else if (dc->debug.dml_hostvm_override == DML_HOSTVM_OVERRIDE_FALSE) +			pipes[i].pipe.src.hostvm = false; +		else if (dc->debug.dml_hostvm_override == DML_HOSTVM_OVERRIDE_TRUE) +			pipes[i].pipe.src.hostvm = true; +	} +	return pipe_cnt; +} +  int dcn31_populate_dml_pipes_from_context(  	struct dc *dc, struct dc_state *context,  	display_e2e_pipe_params_st *pipes, @@ -1645,7 +1676,7 @@ int dcn31_populate_dml_pipes_from_context(  	bool upscaled = false;  	DC_FP_START(); -	dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); +	dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);  	DC_FP_END();  	for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { @@ -1675,12 +1706,6 @@ int dcn31_populate_dml_pipes_from_context(  		dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt);  		DC_FP_END(); -		if (dc->debug.dml_hostvm_override == DML_HOSTVM_NO_OVERRIDE) -			pipes[pipe_cnt].pipe.src.hostvm = dc->res_pool->hubbub->riommu_active; -		else if (dc->debug.dml_hostvm_override == DML_HOSTVM_OVERRIDE_FALSE) -			pipes[pipe_cnt].pipe.src.hostvm = false; -		else if (dc->debug.dml_hostvm_override == DML_HOSTVM_OVERRIDE_TRUE) -			pipes[pipe_cnt].pipe.src.hostvm = true;  		if (pipes[pipe_cnt].dout.dsc_enable) {  			switch (timing->display_color_depth) { @@ -1770,7 +1795,7 @@ bool dcn31_validate_bandwidth(struct dc *dc,  	BW_VAL_TRACE_COUNT();  	DC_FP_START(); -	out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate); +	out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true);  	DC_FP_END();  	// Disable fast_validate to set min dcfclk in alculate_wm_and_dlg @@ -1898,6 +1923,8 @@ static bool dcn31_resource_construct(  	dc->caps.max_slave_rgb_planes = 2;  	dc->caps.post_blend_color_processing = true;  	dc->caps.force_dp_tps4_for_cp2520 = true; +	if (dc->config.forceHBR2CP2520) +		dc->caps.force_dp_tps4_for_cp2520 = false;  	dc->caps.dp_hpo = true;  	dc->caps.dp_hdmi21_pcon_support = true;  	dc->caps.edp_dsc_support = true; diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c index 389a8938ee45..0b769ee71405 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c @@ -104,7 +104,7 @@ static void dccg314_set_pixel_rate_div(  	}  	dccg314_get_pixel_rate_div(dccg, otg_inst, &cur_k1, &cur_k2); -	if (k1 == PIXEL_RATE_DIV_NA || k2 == PIXEL_RATE_DIV_NA || (k1 == cur_k1 && k2 == cur_k2)) +	if (k1 == cur_k1 && k2 == cur_k2)  		return;  	switch (otg_inst) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c index 38842f938bed..962a2c02b422 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c @@ -30,7 +30,7 @@  #include "dcn314_dio_stream_encoder.h"  #include "reg_helper.h"  #include "hw_shared.h" -#include "inc/link_dpcd.h" +#include "link.h"  #include "dpcd_defs.h"  #define DC_LOGGER \ @@ -278,10 +278,11 @@ static void enc314_stream_encoder_dp_blank(  	struct dc_link *link,  	struct stream_encoder *enc)  { -	/* New to DCN314 - disable the FIFO before VID stream disable. */ -	enc314_disable_fifo(enc); -  	enc1_stream_encoder_dp_blank(link, enc); + +	/* Disable FIFO after the DP vid stream is disabled to avoid corruption. */ +	if (enc->ctx->dc->debug.dig_fifo_off_in_blank) +		enc314_disable_fifo(enc);  }  static void enc314_stream_encoder_dp_unblank( @@ -365,7 +366,7 @@ static void enc314_stream_encoder_dp_unblank(  	 */  	enc314_enable_fifo(enc); -	dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM); +	link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);  }  /* Set DSC-related configuration. @@ -428,6 +429,8 @@ static const struct stream_encoder_funcs dcn314_str_enc_funcs = {  		enc3_stream_encoder_update_hdmi_info_packets,  	.stop_hdmi_info_packets =  		enc3_stream_encoder_stop_hdmi_info_packets, +	.update_dp_info_packets_sdp_line_num = +		enc3_stream_encoder_update_dp_info_packets_sdp_line_num,  	.update_dp_info_packets =  		enc3_stream_encoder_update_dp_info_packets,  	.stop_dp_info_packets = diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.h index 33dfdf8b4100..ed0772387903 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.h @@ -280,6 +280,10 @@ void enc3_stream_encoder_update_hdmi_info_packets(  void enc3_stream_encoder_stop_hdmi_info_packets(  	struct stream_encoder *enc); +void enc3_stream_encoder_update_dp_info_packets_sdp_line_num( +		struct stream_encoder *enc, +		struct encoder_info_frame *info_frame); +  void enc3_stream_encoder_update_dp_info_packets(  	struct stream_encoder *enc,  	const struct encoder_info_frame *info_frame); diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c index a0741794db62..575d3501c848 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c @@ -46,9 +46,7 @@  #include "link_hwss.h"  #include "dpcd_defs.h"  #include "dce/dmub_outbox.h" -#include "dc_link_dp.h" -#include "inc/dc_link_dp.h" -#include "inc/link_dpcd.h" +#include "link.h"  #include "dcn10/dcn10_hw_sequencer.h"  #include "inc/link_enc_cfg.h"  #include "dcn30/dcn30_vpg.h" @@ -348,7 +346,7 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig  	two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);  	odm_combine_factor = get_odm_config(pipe_ctx, NULL); -	if (is_dp_128b_132b_signal(pipe_ctx)) { +	if (link_is_dp_128b_132b_signal(pipe_ctx)) {  		*k1_div = PIXEL_RATE_DIV_BY_1;  		*k2_div = PIXEL_RATE_DIV_BY_1;  	} else if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) || dc_is_dvi_signal(pipe_ctx->stream->signal)) { @@ -391,3 +389,27 @@ void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx)  		pipe_ctx->stream_res.stream_enc->funcs->set_input_mode(pipe_ctx->stream_res.stream_enc,  				pix_per_cycle);  } + +void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on) +{ +	struct dc_context *ctx = hws->ctx; +	union dmub_rb_cmd cmd; + +	if (hws->ctx->dc->debug.disable_hubp_power_gate) +		return; + +	PERF_TRACE(); + +	memset(&cmd, 0, sizeof(cmd)); +	cmd.domain_control.header.type = DMUB_CMD__VBIOS; +	cmd.domain_control.header.sub_type = DMUB_CMD__VBIOS_DOMAIN_CONTROL; +	cmd.domain_control.header.payload_bytes = sizeof(cmd.domain_control.data); +	cmd.domain_control.data.inst = hubp_inst; +	cmd.domain_control.data.power_gate = !power_on; + +	dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd); +	dc_dmub_srv_cmd_execute(ctx->dmub_srv); +	dc_dmub_srv_wait_idle(ctx->dmub_srv); + +	PERF_TRACE(); +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h index 244280298212..c419d3dbdfee 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h @@ -41,4 +41,6 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig  void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx); +void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on); +  #endif /* __DC_HWSS_DCN314_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c index 5b6c2d94ec71..343f4d9dd5e3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c @@ -137,7 +137,7 @@ static const struct hwseq_private_funcs dcn314_private_funcs = {  	.plane_atomic_disable = dcn20_plane_atomic_disable,  	.plane_atomic_power_down = dcn10_plane_atomic_power_down,  	.enable_power_gating_plane = dcn314_enable_power_gating_plane, -	.hubp_pg_control = dcn31_hubp_pg_control, +	.hubp_pg_control = dcn314_hubp_pg_control,  	.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,  	.update_odm = dcn314_update_odm,  	.dsc_pg_control = dcn314_dsc_pg_control, diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c index 7dd36e402bac..0086cafb0f7a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c @@ -241,8 +241,6 @@ static struct timing_generator_funcs dcn314_tg_funcs = {  		.set_dsc_config = optc3_set_dsc_config,  		.get_dsc_status = optc2_get_dsc_status,  		.set_dwb_source = NULL, -		.set_odm_bypass = optc3_set_odm_bypass, -		.set_odm_combine = optc314_set_odm_combine,  		.get_optc_source = optc2_get_optc_source,  		.set_out_mux = optc3_set_out_mux,  		.set_drr_trigger_window = optc3_set_drr_trigger_window, diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c index 9066c511a052..54ed3de869d3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c @@ -184,6 +184,9 @@ enum dcn31_clk_src_array_id {  	.reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \  					reg ## block ## id ## _ ## temp_name +#define SF_DWB2(reg_name, block, id, field_name, post_fix)	\ +	.field_name = reg_name ## __ ## field_name ## post_fix +  #define DCCG_SRII(reg_name, block, id)\  	.block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \  					reg ## block ## id ## _ ## reg_name @@ -871,8 +874,9 @@ static const struct dc_plane_cap plane_cap = {  	},  	// 6:1 downscaling ratio: 1000/6 = 166.666 +	// 4:1 downscaling ratio for ARGB888 to prevent underflow during P010 playback: 1000/4 = 250  	.max_downscale_factor = { -			.argb8888 = 167, +			.argb8888 = 250,  			.nv12 = 167,  			.fp16 = 167  	}, @@ -883,10 +887,13 @@ static const struct dc_plane_cap plane_cap = {  static const struct dc_debug_options debug_defaults_drv = {  	.disable_z10 = false,  	.enable_z9_disable_interface = true, +	.psr_skip_crtc_disable = true,  	.disable_dmcu = true,  	.force_abm_enable = false,  	.timing_trace = false,  	.clock_trace = true, +	.disable_dpp_power_gate = true, +	.disable_hubp_power_gate = true,  	.disable_pplib_clock_request = false,  	.pipe_split_policy = MPC_SPLIT_DYNAMIC,  	.force_single_disp_pipe_split = false, @@ -896,7 +903,7 @@ static const struct dc_debug_options debug_defaults_drv = {  	.max_downscale_src_width = 4096,/*upto true 4k*/  	.disable_pplib_wm_range = false,  	.scl_reset_length10 = true, -	.sanity_checks = false, +	.sanity_checks = true,  	.underflow_assert_delay_us = 0xFFFFFFFF,  	.dwb_fi_phase = -1, // -1 = disable,  	.dmub_command_table = true, @@ -937,6 +944,10 @@ static const struct dc_debug_options debug_defaults_diags = {  };  static const struct dc_panel_config panel_config_defaults = { +	.psr = { +		.disable_psr = false, +		.disallow_psrsu = false, +	},  	.ilr = {  		.optimize_edp_link_rate = true,  	}, @@ -1686,6 +1697,61 @@ static void dcn314_get_panel_config_defaults(struct dc_panel_config *panel_confi  	*panel_config = panel_config_defaults;  } +bool dcn314_validate_bandwidth(struct dc *dc, +		struct dc_state *context, +		bool fast_validate) +{ +	bool out = false; + +	BW_VAL_TRACE_SETUP(); + +	int vlevel = 0; +	int pipe_cnt = 0; +	display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL); +	DC_LOGGER_INIT(dc->ctx->logger); + +	BW_VAL_TRACE_COUNT(); + +	DC_FP_START(); +	// do not support self refresh only +	out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, false); +	DC_FP_END(); + +	// Disable fast_validate to set min dcfclk in calculate_wm_and_dlg +	if (pipe_cnt == 0) +		fast_validate = false; + +	if (!out) +		goto validate_fail; + +	BW_VAL_TRACE_END_VOLTAGE_LEVEL(); + +	if (fast_validate) { +		BW_VAL_TRACE_SKIP(fast); +		goto validate_out; +	} + +	dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); + +	BW_VAL_TRACE_END_WATERMARKS(); + +	goto validate_out; + +validate_fail: +	DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n", +		dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states])); + +	BW_VAL_TRACE_SKIP(fail); +	out = false; + +validate_out: +	kfree(pipes); + +	BW_VAL_TRACE_FINISH(); + +	return out; +} +  static struct resource_funcs dcn314_res_pool_funcs = {  	.destroy = dcn314_destroy_resource_pool,  	.link_enc_create = dcn31_link_encoder_create, @@ -1693,7 +1759,7 @@ static struct resource_funcs dcn314_res_pool_funcs = {  	.link_encs_assign = link_enc_cfg_link_encs_assign,  	.link_enc_unassign = link_enc_cfg_link_enc_unassign,  	.panel_cntl_create = dcn31_panel_cntl_create, -	.validate_bandwidth = dcn31_validate_bandwidth, +	.validate_bandwidth = dcn314_validate_bandwidth,  	.calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg,  	.update_soc_for_wm_a = dcn31_update_soc_for_wm_a,  	.populate_dml_pipes = dcn314_populate_dml_pipes_from_context, @@ -1755,7 +1821,7 @@ static bool dcn314_resource_construct(  	pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;  	pool->base.pipe_count = pool->base.res_cap->num_timing_generator;  	pool->base.mpcc_count = pool->base.res_cap->num_timing_generator; -	dc->caps.max_downscale_ratio = 600; +	dc->caps.max_downscale_ratio = 400;  	dc->caps.i2c_speed_in_khz = 100;  	dc->caps.i2c_speed_in_khz_hdcp = 100;  	dc->caps.max_cursor_size = 256; @@ -1766,6 +1832,8 @@ static bool dcn314_resource_construct(  	dc->caps.max_slave_rgb_planes = 2;  	dc->caps.post_blend_color_processing = true;  	dc->caps.force_dp_tps4_for_cp2520 = true; +	if (dc->config.forceHBR2CP2520) +		dc->caps.force_dp_tps4_for_cp2520 = false;  	dc->caps.dp_hpo = true;  	dc->caps.dp_hdmi21_pcon_support = true;  	dc->caps.edp_dsc_support = true; diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h index 0dd3153aa5c1..49ffe71018df 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h @@ -39,6 +39,10 @@ struct dcn314_resource_pool {  	struct resource_pool base;  }; +bool dcn314_validate_bandwidth(struct dc *dc, +		struct dc_state *context, +		bool fast_validate); +  struct resource_pool *dcn314_create_resource_pool(  		const struct dc_init_data *init_data,  		struct dc *dc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c index 58746c437554..7887078c5f64 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c @@ -151,8 +151,6 @@ enum dcn31_clk_src_array_id {   */  /* DCN */ -/* TODO awful hack. fixup dcn20_dwb.h */ -#undef BASE_INNER  #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg  #define BASE(seg) BASE_INNER(seg) @@ -185,6 +183,9 @@ enum dcn31_clk_src_array_id {  	.reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \  					reg ## block ## id ## _ ## temp_name +#define SF_DWB2(reg_name, block, id, field_name, post_fix)	\ +	.field_name = reg_name ## __ ## field_name ## post_fix +  #define DCCG_SRII(reg_name, block, id)\  	.block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \  					reg ## block ## id ## _ ## reg_name @@ -907,6 +908,10 @@ static const struct dc_debug_options debug_defaults_diags = {  };  static const struct dc_panel_config panel_config_defaults = { +	.psr = { +		.disable_psr = false, +		.disallow_psrsu = false, +	},  	.ilr = {  		.optimize_edp_link_rate = true,  	}, @@ -1623,6 +1628,7 @@ static struct clock_source *dcn31_clock_source_create(  		return &clk_src->base;  	} +	kfree(clk_src);  	BREAK_TO_DEBUGGER();  	return NULL;  } @@ -1643,7 +1649,7 @@ static int dcn315_populate_dml_pipes_from_context(  	const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB;  	DC_FP_START(); -	dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); +	dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);  	DC_FP_END();  	for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { @@ -1662,7 +1668,6 @@ static int dcn315_populate_dml_pipes_from_context(  		pipes[pipe_cnt].pipe.src.immediate_flip = true;  		pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; -		pipes[pipe_cnt].pipe.src.gpuvm = true;  		pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;  		pipes[pipe_cnt].pipe.src.dcc_rate = 3;  		pipes[pipe_cnt].dout.dsc_input_bpc = 0; @@ -1703,7 +1708,9 @@ static int dcn315_populate_dml_pipes_from_context(  			dc->config.enable_4to1MPC = true;  			context->bw_ctx.dml.ip.det_buffer_size_kbytes =  					(max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB / 4) * DCN3_15_CRB_SEGMENT_SIZE_KB; -		} else if (!is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 5120) { +		} else if (!is_dual_plane(pipe->plane_state->format) +				&& pipe->plane_state->src_rect.width <= 5120 +				&& pipe->stream->timing.pix_clk_100hz < dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc)) {  			/* Limit to 5k max to avoid forced pipe split when there is not enough detile for swath */  			context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;  			pipes[0].pipe.src.unbounded_req_mode = true; @@ -1779,6 +1786,8 @@ static bool dcn315_resource_construct(  	dc->caps.max_slave_rgb_planes = 2;  	dc->caps.post_blend_color_processing = true;  	dc->caps.force_dp_tps4_for_cp2520 = true; +	if (dc->config.forceHBR2CP2520) +		dc->caps.force_dp_tps4_for_cp2520 = false;  	dc->caps.dp_hpo = true;  	dc->caps.dp_hdmi21_pcon_support = true;  	dc->caps.edp_dsc_support = true; diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c index 6b40a11ac83a..dc0b49506275 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c @@ -142,8 +142,6 @@ enum dcn31_clk_src_array_id {   */  /* DCN */ -/* TODO awful hack. fixup dcn20_dwb.h */ -#undef BASE_INNER  #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg  #define BASE(seg) BASE_INNER(seg) @@ -176,6 +174,9 @@ enum dcn31_clk_src_array_id {  	.reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \  					reg ## block ## id ## _ ## temp_name +#define SF_DWB2(reg_name, block, id, field_name, post_fix)	\ +	.field_name = reg_name ## __ ## field_name ## post_fix +  #define DCCG_SRII(reg_name, block, id)\  	.block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \  					reg ## block ## id ## _ ## reg_name @@ -906,6 +907,10 @@ static const struct dc_debug_options debug_defaults_diags = {  };  static const struct dc_panel_config panel_config_defaults = { +	.psr = { +		.disable_psr = false, +		.disallow_psrsu = false, +	},  	.ilr = {  		.optimize_edp_link_rate = true,  	}, @@ -1646,7 +1651,7 @@ static int dcn316_populate_dml_pipes_from_context(  	const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_16_MIN_COMPBUF_SIZE_KB;  	DC_FP_START(); -	dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); +	dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);  	DC_FP_END();  	for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { @@ -1665,7 +1670,6 @@ static int dcn316_populate_dml_pipes_from_context(  		pipes[pipe_cnt].pipe.src.immediate_flip = true;  		pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; -		pipes[pipe_cnt].pipe.src.gpuvm = true;  		pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;  		pipes[pipe_cnt].pipe.src.dcc_rate = 3;  		pipes[pipe_cnt].dout.dsc_input_bpc = 0; @@ -1772,7 +1776,7 @@ static bool dcn316_resource_construct(  	pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;  	dc->caps.max_downscale_ratio = 600;  	dc->caps.i2c_speed_in_khz = 100; -	dc->caps.i2c_speed_in_khz_hdcp = 100; +	dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.5 w/a applied by default*/  	dc->caps.max_cursor_size = 256;  	dc->caps.min_horizontal_blanking_period = 80;  	dc->caps.dmdata_alloc_size = 2048; @@ -1781,6 +1785,8 @@ static bool dcn316_resource_construct(  	dc->caps.max_slave_rgb_planes = 2;  	dc->caps.post_blend_color_processing = true;  	dc->caps.force_dp_tps4_for_cp2520 = true; +	if (dc->config.forceHBR2CP2520) +		dc->caps.force_dp_tps4_for_cp2520 = false;  	dc->caps.dp_hpo = true;  	dc->caps.dp_hdmi21_pcon_support = true;  	dc->caps.edp_dsc_support = true; diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c index df4f25119142..e4472c6be6c3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c @@ -225,11 +225,7 @@ static void dccg32_set_dtbclk_dto(  	} else {  		REG_UPDATE_2(OTG_PIXEL_RATE_CNTL[params->otg_inst],  				DTBCLK_DTO_ENABLE[params->otg_inst], 0, -				PIPE_DTO_SRC_SEL[params->otg_inst], 1); -		if (params->is_hdmi) -			REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst], -				PIPE_DTO_SRC_SEL[params->otg_inst], 0); - +				PIPE_DTO_SRC_SEL[params->otg_inst], params->is_hdmi ? 0 : 1);  		REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], 0);  		REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], 0);  	} diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c index 076969d928af..501388014855 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c @@ -31,7 +31,6 @@  #include "dcn31/dcn31_dio_link_encoder.h"  #include "dcn32_dio_link_encoder.h"  #include "stream_encoder.h" -#include "i2caux_interface.h"  #include "dc_bios_types.h"  #include "link_enc_cfg.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c index d19fc93dbc75..36e6f5657942 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c @@ -29,7 +29,7 @@  #include "dcn32_dio_stream_encoder.h"  #include "reg_helper.h"  #include "hw_shared.h" -#include "inc/link_dpcd.h" +#include "link.h"  #include "dpcd_defs.h"  #define DC_LOGGER \ @@ -373,7 +373,7 @@ static void enc32_stream_encoder_dp_unblank(  	REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true); -	dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM); +	link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);  }  /* Set DSC-related configuration. @@ -421,6 +421,33 @@ static void enc32_set_dig_input_mode(struct stream_encoder *enc, unsigned int pi  	REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, pix_per_container == 2 ? 0x1 : 0x0);  } +static void enc32_reset_fifo(struct stream_encoder *enc, bool reset) +{ +	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); +	uint32_t reset_val = reset ? 1 : 0; +	uint32_t is_symclk_on; + +	REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, reset_val); +	REG_GET(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, &is_symclk_on); + +	if (is_symclk_on) +		REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, reset_val, 10, 5000); +	else +		udelay(10); +} + +static void enc32_enable_fifo(struct stream_encoder *enc) +{ +	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + +	REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7); + +	enc32_reset_fifo(enc, true); +	enc32_reset_fifo(enc, false); + +	REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 1); +} +  static const struct stream_encoder_funcs dcn32_str_enc_funcs = {  	.dp_set_odm_combine =  		enc32_dp_set_odm_combine, @@ -436,6 +463,8 @@ static const struct stream_encoder_funcs dcn32_str_enc_funcs = {  		enc3_stream_encoder_update_hdmi_info_packets,  	.stop_hdmi_info_packets =  		enc3_stream_encoder_stop_hdmi_info_packets, +	.update_dp_info_packets_sdp_line_num = +		enc3_stream_encoder_update_dp_info_packets_sdp_line_num,  	.update_dp_info_packets =  		enc3_stream_encoder_update_dp_info_packets,  	.stop_dp_info_packets = @@ -466,6 +495,7 @@ static const struct stream_encoder_funcs dcn32_str_enc_funcs = {  	.hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute,  	.set_input_mode = enc32_set_dig_input_mode, +	.enable_fifo = enc32_enable_fifo,  };  void dcn32_dio_stream_encoder_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c index 9fbb72369c10..eb08ccc38e79 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c @@ -41,6 +41,10 @@  #define FN(reg_name, field_name) \  	hubbub2->shifts->field_name, hubbub2->masks->field_name +/** + * @DCN32_CRB_SEGMENT_SIZE_KB: Maximum Configurable Return Buffer size for + * DCN32 + */  #define DCN32_CRB_SEGMENT_SIZE_KB 64  static void dcn32_init_crb(struct hubbub *hubbub) @@ -68,6 +72,23 @@ static void dcn32_init_crb(struct hubbub *hubbub)  	REG_UPDATE(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, 0x47F);  } +void hubbub32_set_request_limit(struct hubbub *hubbub, int memory_channel_count, int words_per_channel) +{ +	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + +	uint32_t request_limit = 3 * memory_channel_count * words_per_channel / 4; + +	ASSERT((request_limit & (~0xFFF)) == 0); //field is only 24 bits long +	ASSERT(request_limit > 0); //field is only 24 bits long + +	if (request_limit > 0xFFF) +		request_limit = 0xFFF; + +	if (request_limit > 0) +		REG_UPDATE(SDPIF_REQUEST_RATE_LIMIT, SDPIF_REQUEST_RATE_LIMIT, request_limit); +} + +  void dcn32_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigned int det_buffer_size_in_kbyte)  {  	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); @@ -844,7 +865,7 @@ static void hubbub32_wm_read_state(struct hubbub *hubbub,  			DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, &s->sr_exit);  	REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, -			 DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, &s->dram_clk_chanage); +			 DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, &s->dram_clk_change);  	REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A,  			 DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, &s->usr_retrain); @@ -864,7 +885,7 @@ static void hubbub32_wm_read_state(struct hubbub *hubbub,  			DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, &s->sr_exit);  	REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, -			DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, &s->dram_clk_chanage); +			DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, &s->dram_clk_change);  	REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B,  			 DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, &s->usr_retrain); @@ -884,7 +905,7 @@ static void hubbub32_wm_read_state(struct hubbub *hubbub,  			DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, &s->sr_exit);  	REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, -			DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, &s->dram_clk_chanage); +			DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, &s->dram_clk_change);  	REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C,  			 DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, &s->usr_retrain); @@ -904,7 +925,7 @@ static void hubbub32_wm_read_state(struct hubbub *hubbub,  			DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, &s->sr_exit);  	REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, -			DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, &s->dram_clk_chanage); +			DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, &s->dram_clk_change);  	REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D,  			 DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, &s->usr_retrain); @@ -924,6 +945,35 @@ void hubbub32_force_wm_propagate_to_pipes(struct hubbub *hubbub)  			DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);  } +void hubbub32_init(struct hubbub *hubbub) +{ +	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + +	/* Enable clock gate*/ +	if (hubbub->ctx->dc->debug.disable_clock_gate) { +		/*done in hwseq*/ +		/*REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);*/ + +		REG_UPDATE_2(DCHUBBUB_CLOCK_CNTL, +			DISPCLK_R_DCHUBBUB_GATE_DIS, 0, +			DCFCLK_R_DCHUBBUB_GATE_DIS, 0); +	} +	/* +	ignore the "df_pre_cstate_req" from the SDP port control. +	only the DCN will determine when to connect the SDP port +	*/ +	REG_UPDATE(DCHUBBUB_SDPIF_CFG0, +			SDPIF_PORT_CONTROL, 1); +	/*Set SDP's max outstanding request to 512 +	must set the register back to 0 (max outstanding = 256) in zero frame buffer mode*/ +	REG_UPDATE(DCHUBBUB_SDPIF_CFG1, +			SDPIF_MAX_NUM_OUTSTANDING, 1); +	/*must set the registers back to 256 in zero frame buffer mode*/ +	REG_UPDATE_2(DCHUBBUB_ARB_DF_REQ_OUTSTAND, +			DCHUBBUB_ARB_MAX_REQ_OUTSTAND, 512, +			DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 512); +} +  static const struct hubbub_funcs hubbub32_funcs = {  	.update_dchub = hubbub2_update_dchub,  	.init_dchub_sys_ctx = hubbub3_init_dchub_sys_ctx, @@ -945,6 +995,7 @@ static const struct hubbub_funcs hubbub32_funcs = {  	.init_crb = dcn32_init_crb,  	.hubbub_read_state = hubbub2_read_state,  	.force_usr_retraining_allow = hubbub32_force_usr_retraining_allow, +	.set_request_limit = hubbub32_set_request_limit  };  void hubbub32_construct(struct dcn20_hubbub *hubbub2, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h index cda94e0e31bf..b20eb04724bb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h @@ -82,7 +82,13 @@  	SR(DCN_VM_FAULT_ADDR_MSB),\  	SR(DCN_VM_FAULT_ADDR_LSB),\  	SR(DCN_VM_FAULT_CNTL),\ -	SR(DCN_VM_FAULT_STATUS) +	SR(DCN_VM_FAULT_STATUS),\ +	SR(SDPIF_REQUEST_RATE_LIMIT),\ +	SR(DCHUBBUB_CLOCK_CNTL),\ +	SR(DCHUBBUB_SDPIF_CFG0),\ +	SR(DCHUBBUB_SDPIF_CFG1),\ +	SR(DCHUBBUB_MEM_PWR_MODE_CTRL) +  #define HUBBUB_MASK_SH_LIST_DCN32(mask_sh)\  	HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \ @@ -95,6 +101,7 @@  	HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, mask_sh), \  	HUBBUB_SF(DCHUBBUB_ARB_SAT_LEVEL, DCHUBBUB_ARB_SAT_LEVEL, mask_sh), \  	HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, mask_sh), \ +	HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MAX_REQ_OUTSTAND, mask_sh), \  	HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, mask_sh), \  	HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, mask_sh), \  	HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, mask_sh), \ @@ -159,7 +166,15 @@  	HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, mask_sh), \  	HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_TABLE_LEVEL, mask_sh), \  	HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \ -	HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh) +	HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh),\ +	HUBBUB_SF(SDPIF_REQUEST_RATE_LIMIT, SDPIF_REQUEST_RATE_LIMIT, mask_sh),\ +	HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DISPCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\ +	HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DCFCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\ +	HUBBUB_SF(DCHUBBUB_SDPIF_CFG0, SDPIF_PORT_CONTROL, mask_sh),\ +	HUBBUB_SF(DCHUBBUB_SDPIF_CFG1, SDPIF_MAX_NUM_OUTSTANDING, mask_sh),\ +	HUBBUB_SF(DCHUBBUB_MEM_PWR_MODE_CTRL, DET_MEM_PWR_LS_MODE, mask_sh) + +  bool hubbub32_program_urgent_watermarks(  		struct hubbub *hubbub, @@ -189,6 +204,8 @@ void hubbub32_force_usr_retraining_allow(struct hubbub *hubbub, bool allow);  void hubbub32_force_wm_propagate_to_pipes(struct hubbub *hubbub); +void hubbub32_init(struct hubbub *hubbub); +  void dcn32_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigned int det_buffer_size_in_kbyte);  void hubbub32_construct(struct dcn20_hubbub *hubbub2, @@ -200,4 +217,6 @@ void hubbub32_construct(struct dcn20_hubbub *hubbub2,  	int pixel_chunk_size_kb,  	int config_return_buffer_size_kb); +void hubbub32_set_request_limit(struct hubbub *hubbub, int umc_count, int words_per_umc); +  #endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c index ac1c6458dd55..fe0cd177744c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c @@ -155,7 +155,11 @@ void hubp32_cursor_set_attributes(  	else  		REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, false);  } - +void hubp32_init(struct hubp *hubp) +{ +	struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); +	REG_WRITE(HUBPREQ_DEBUG_DB, 1 << 8); +}  static struct hubp_funcs dcn32_hubp_funcs = {  	.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,  	.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.h index 56ef71151536..4cdbf63c952b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.h @@ -61,6 +61,8 @@ void hubp32_phantom_hubp_post_enable(struct hubp *hubp);  void hubp32_cursor_set_attributes(struct hubp *hubp,  		const struct dc_cursor_attributes *attr); +void hubp32_init(struct hubp *hubp); +  bool hubp32_construct(  	struct dcn20_hubp *hubp2,  	struct dc_context *ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c index d0b46a3e0155..16f892125b6f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c @@ -50,7 +50,7 @@  #include "dmub_subvp_state.h"  #include "dce/dmub_hw_lock_mgr.h"  #include "dcn32_resource.h" -#include "dc_link_dp.h" +#include "link.h"  #include "dmub/inc/dmub_subvp_state.h"  #define DC_LOGGER_INIT(logger) @@ -188,7 +188,8 @@ static bool dcn32_check_no_memory_request_for_cab(struct dc *dc)      /* First, check no-memory-request case */  	for (i = 0; i < dc->current_state->stream_count; i++) { -		if (dc->current_state->stream_status[i].plane_count) +		if ((dc->current_state->stream_status[i].plane_count) && +			(dc->current_state->streams[i]->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED))  			/* Fail eligibility on a visible stream */  			break;  	} @@ -206,146 +207,31 @@ static bool dcn32_check_no_memory_request_for_cab(struct dc *dc)   */  static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *ctx)  { -	int i, j; -	struct dc_stream_state *stream = NULL; -	struct dc_plane_state *plane = NULL; -	uint32_t cursor_size = 0; -	uint32_t total_lines = 0; -	uint32_t lines_per_way = 0; +	int i;  	uint8_t num_ways = 0; -	uint8_t bytes_per_pixel = 0; -	uint8_t cursor_bpp = 0; -	uint16_t mblk_width = 0; -	uint16_t mblk_height = 0; -	uint16_t mall_alloc_width_blk_aligned = 0; -	uint16_t mall_alloc_height_blk_aligned = 0; -	uint16_t num_mblks = 0; -	uint32_t bytes_in_mall = 0; -	uint32_t cache_lines_used = 0; -	uint32_t cache_lines_per_plane = 0; - -	for (i = 0; i < dc->res_pool->pipe_count; i++) { -		struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; - -		if (!pipe->stream || !pipe->plane_state || -				pipe->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED || -				pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) -			continue; - -		bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4; -		mblk_width = DCN3_2_MBLK_WIDTH; -		mblk_height = bytes_per_pixel == 4 ? DCN3_2_MBLK_HEIGHT_4BPE : DCN3_2_MBLK_HEIGHT_8BPE; - -		/* full_vp_width_blk_aligned = FLOOR(vp_x_start + full_vp_width + blk_width - 1, blk_width) - -		 * FLOOR(vp_x_start, blk_width) -		 * -		 * mall_alloc_width_blk_aligned_l/c = full_vp_width_blk_aligned_l/c -		 */ -		mall_alloc_width_blk_aligned = ((pipe->plane_res.scl_data.viewport.x + -				pipe->plane_res.scl_data.viewport.width + mblk_width - 1) / mblk_width * mblk_width) - -						(pipe->plane_res.scl_data.viewport.x / mblk_width * mblk_width); +	uint32_t mall_ss_size_bytes = 0; -		/* full_vp_height_blk_aligned = FLOOR(vp_y_start + full_vp_height + blk_height - 1, blk_height) - -		 * FLOOR(vp_y_start, blk_height) -		 * -		 * mall_alloc_height_blk_aligned_l/c = full_vp_height_blk_aligned_l/c -		 */ -		mall_alloc_height_blk_aligned = ((pipe->plane_res.scl_data.viewport.y + -				pipe->plane_res.scl_data.viewport.height + mblk_height - 1) / mblk_height * mblk_height) - -						(pipe->plane_res.scl_data.viewport.y / mblk_height * mblk_height); - -		num_mblks = ((mall_alloc_width_blk_aligned + mblk_width - 1) / mblk_width) * -				((mall_alloc_height_blk_aligned + mblk_height - 1) / mblk_height); - -		/* For DCC: -		 * meta_num_mblk = CEILING(full_mblk_width_ub_l*full_mblk_height_ub_l*Bpe/256/mblk_bytes, 1) -		 */ -		if (pipe->plane_state->dcc.enable) -			num_mblks += (mall_alloc_width_blk_aligned * mall_alloc_width_blk_aligned * bytes_per_pixel + -					(256 * DCN3_2_MALL_MBLK_SIZE_BYTES) - 1) / (256 * DCN3_2_MALL_MBLK_SIZE_BYTES); - -		bytes_in_mall = num_mblks * DCN3_2_MALL_MBLK_SIZE_BYTES; - -		/* (cache lines used is total bytes / cache_line size. Add +2 for worst case alignment -		 * (MALL is 64-byte aligned) -		 */ -		cache_lines_per_plane = bytes_in_mall / dc->caps.cache_line_size + 2; -		cache_lines_used += cache_lines_per_plane; -	} +	mall_ss_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_size_bytes; +	// TODO add additional logic for PSR active stream exclusion optimization +	// mall_ss_psr_active_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes;  	// Include cursor size for CAB allocation -	for (j = 0; j < dc->res_pool->pipe_count; j++) { -		struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[j]; -		struct hubp *hubp = pipe->plane_res.hubp; - -		if (pipe->stream && pipe->plane_state && hubp) -			/* Find the cursor plane and use the exact size instead of -			using the max for calculation */ - -		if (hubp->curs_attr.width > 0) { -				cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height; - -				switch (pipe->stream->cursor_attributes.color_format) { -				case CURSOR_MODE_MONO: -					cursor_size /= 2; -					cursor_bpp = 4; -					break; -				case CURSOR_MODE_COLOR_1BIT_AND: -				case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA: -				case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA: -					cursor_size *= 4; -					cursor_bpp = 4; -					break; +	for (i = 0; i < dc->res_pool->pipe_count; i++) { +		struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[i]; -				case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED: -				case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED: -					cursor_size *= 8; -					cursor_bpp = 8; -					break; -				} +		if (!pipe->stream || !pipe->plane_state) +			continue; -				if (pipe->stream->cursor_position.enable && !dc->debug.alloc_extra_way_for_cursor && -						cursor_size > 16384) { -					/* cursor_num_mblk = CEILING(num_cursors*cursor_width*cursor_width*cursor_Bpe/mblk_bytes, 1) -					 */ -					cache_lines_used += (((cursor_size + DCN3_2_MALL_MBLK_SIZE_BYTES - 1) / -							DCN3_2_MALL_MBLK_SIZE_BYTES) * DCN3_2_MALL_MBLK_SIZE_BYTES) / -							dc->caps.cache_line_size + 2; -				} -				break; -			} +		mall_ss_size_bytes += dcn32_helper_calculate_mall_bytes_for_cursor(dc, pipe, false);  	}  	// Convert number of cache lines required to number of ways -	total_lines = dc->caps.max_cab_allocation_bytes / dc->caps.cache_line_size; -	lines_per_way = total_lines / dc->caps.cache_num_ways; -	num_ways = cache_lines_used / lines_per_way; - -	if (cache_lines_used % lines_per_way > 0) -		num_ways++; - -	for (i = 0; i < ctx->stream_count; i++) { -		stream = ctx->streams[i]; -		for (j = 0; j < ctx->stream_status[i].plane_count; j++) { -			plane = ctx->stream_status[i].plane_states[j]; - -			if (stream->cursor_position.enable && plane && -					dc->debug.alloc_extra_way_for_cursor && -					cursor_size > 16384) { -				/* Cursor caching is not supported since it won't be on the same line. -				 * So we need an extra line to accommodate it. With large cursors and a single 4k monitor -				 * this case triggers corruption. If we're at the edge, then dont trigger display refresh -				 * from MALL. We only need to cache cursor if its greater that 64x64 at 4 bpp. -				 */ -				num_ways++; -				/* We only expect one cursor plane */ -				break; -			} -		} -	}  	if (dc->debug.force_mall_ss_num_ways > 0) {  		num_ways = dc->debug.force_mall_ss_num_ways; +	} else { +		num_ways = dcn32_helper_mall_bytes_to_ways(dc, mall_ss_size_bytes);  	} +  	return num_ways;  } @@ -360,6 +246,13 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)  	if (!dc->ctx->dmub_srv)  		return false; +	for (i = 0; i < dc->current_state->stream_count; i++) { +		/* MALL SS messaging is not supported with PSR at this time */ +		if (dc->current_state->streams[i] != NULL && +				dc->current_state->streams[i]->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) +			return false; +	} +  	if (enable) {  		if (dc->current_state) { @@ -698,11 +591,7 @@ void dcn32_subvp_update_force_pstate(struct dc *dc, struct dc_state *context)  	for (i = 0; i < dc->res_pool->pipe_count; i++) {  		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; -		// For SubVP + DRR, also force disallow on the DRR pipe -		// (We will force allow in the DMUB sequence -- some DRR timings by default won't allow P-State so we have -		// to force once the vblank is stretched). -		if (pipe->stream && pipe->plane_state && (pipe->stream->mall_stream_config.type == SUBVP_MAIN || -				(pipe->stream->mall_stream_config.type == SUBVP_NONE && pipe->stream->ignore_msa_timing_param))) { +		if (pipe->stream && pipe->plane_state && (pipe->stream->mall_stream_config.type == SUBVP_MAIN)) {  			struct hubp *hubp = pipe->plane_res.hubp;  			if (hubp && hubp->funcs->hubp_update_force_pstate_disallow) @@ -780,6 +669,10 @@ void dcn32_program_mall_pipe_config(struct dc *dc, struct dc_state *context)  	if (hws && hws->funcs.update_mall_sel)  		hws->funcs.update_mall_sel(dc, context); +	//update subvp force pstate +	if (hws && hws->funcs.subvp_update_force_pstate) +		dc->hwseq->funcs.subvp_update_force_pstate(dc, context); +  	// Program FORCE_ONE_ROW_FOR_FRAME and CURSOR_REQ_MODE for main subvp pipes  	for (i = 0; i < dc->res_pool->pipe_count; i++) {  		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; @@ -798,6 +691,26 @@ void dcn32_program_mall_pipe_config(struct dc *dc, struct dc_state *context)  	}  } +static void dcn32_initialize_min_clocks(struct dc *dc) +{ +	struct dc_clocks *clocks = &dc->current_state->bw_ctx.bw.dcn.clk; + +	clocks->dcfclk_deep_sleep_khz = DCN3_2_DCFCLK_DS_INIT_KHZ; +	clocks->dcfclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz * 1000; +	clocks->socclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].socclk_mhz * 1000; +	clocks->dramclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 1000; +	clocks->dppclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dppclk_mhz * 1000; +	clocks->dispclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz * 1000; +	clocks->ref_dtbclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dtbclk_mhz * 1000; +	clocks->fclk_p_state_change_support = true; +	clocks->p_state_change_support = true; + +	dc->clk_mgr->funcs->update_clocks( +			dc->clk_mgr, +			dc->current_state, +			true); +} +  void dcn32_init_hw(struct dc *dc)  {  	struct abm **abms = dc->res_pool->multiple_abms; @@ -879,7 +792,7 @@ void dcn32_init_hw(struct dc *dc)  			hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);  	/* we want to turn off all dp displays before doing detection */ -	dc_link_blank_all_dp_displays(dc); +	link_blank_all_dp_displays(dc);  	/* If taking control over from VBIOS, we may want to optimize our first  	 * mode set, so we need to skip powering down pipes until we know which @@ -892,6 +805,18 @@ void dcn32_init_hw(struct dc *dc)  		if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)  			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,  					!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter); + +		dcn32_initialize_min_clocks(dc); + +		/* On HW init, allow idle optimizations after pipes have been turned off. +		 * +		 * In certain D3 cases (i.e. BOCO / BOMACO) it's possible that hardware state +		 * is reset (i.e. not in idle at the time hw init is called), but software state +		 * still has idle_optimizations = true, so we must disable idle optimizations first +		 * (i.e. set false), then re-enable (set true). +		 */ +		dc_allow_idle_optimizations(dc, false); +		dc_allow_idle_optimizations(dc, true);  	}  	/* In headless boot cases, DIG may be turned @@ -980,15 +905,14 @@ void dcn32_init_hw(struct dc *dc)  	if (dc->res_pool->hubbub->funcs->init_crb)  		dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub); +	if (dc->res_pool->hubbub->funcs->set_request_limit && dc->config.sdpif_request_limit_words_per_umc > 0) +		dc->res_pool->hubbub->funcs->set_request_limit(dc->res_pool->hubbub, dc->ctx->dc_bios->vram_info.num_chans, dc->config.sdpif_request_limit_words_per_umc); +  	// Get DMCUB capabilities  	if (dc->ctx->dmub_srv) {  		dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub);  		dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;  	} - -	/* Enable support for ODM and windowed MPO if policy flag is set */ -	if (dc->debug.enable_single_display_2to1_odm_policy) -		dc->config.enable_windowed_mpo_odm = true;  }  static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream, @@ -1171,16 +1095,16 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign  	two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);  	odm_combine_factor = get_odm_config(pipe_ctx, NULL); -	if (is_dp_128b_132b_signal(pipe_ctx)) { +	if (link_is_dp_128b_132b_signal(pipe_ctx)) {  		*k1_div = PIXEL_RATE_DIV_BY_1;  		*k2_div = PIXEL_RATE_DIV_BY_1; -	} else if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) || dc_is_dvi_signal(pipe_ctx->stream->signal)) { +	} else if (dc_is_hdmi_tmds_signal(stream->signal) || dc_is_dvi_signal(stream->signal)) {  		*k1_div = PIXEL_RATE_DIV_BY_1;  		if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)  			*k2_div = PIXEL_RATE_DIV_BY_2;  		else  			*k2_div = PIXEL_RATE_DIV_BY_4; -	} else if (dc_is_dp_signal(pipe_ctx->stream->signal)) { +	} else if (dc_is_dp_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) {  		if (two_pix_per_container) {  			*k1_div = PIXEL_RATE_DIV_BY_1;  			*k2_div = PIXEL_RATE_DIV_BY_2; @@ -1235,7 +1159,7 @@ void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx,  	params.link_settings.link_rate = link_settings->link_rate; -	if (is_dp_128b_132b_signal(pipe_ctx)) { +	if (link_is_dp_128b_132b_signal(pipe_ctx)) {  		/* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */  		pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank(  				pipe_ctx->stream_res.hpo_dp_stream_enc, @@ -1262,7 +1186,7 @@ bool dcn32_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)  	if (!is_h_timing_divisible_by_2(pipe_ctx->stream))  		return false; -	if (dc_is_dp_signal(pipe_ctx->stream->signal) && !is_dp_128b_132b_signal(pipe_ctx) && +	if (dc_is_dp_signal(pipe_ctx->stream->signal) && !link_is_dp_128b_132b_signal(pipe_ctx) &&  		dc->debug.enable_dp_dig_pixel_rate_div_policy)  		return true;  	return false; @@ -1296,7 +1220,7 @@ static void apply_symclk_on_tx_off_wa(struct dc_link *link)  				pipe_ctx->clock_source->funcs->program_pix_clk(  						pipe_ctx->clock_source,  						&pipe_ctx->stream_res.pix_clk_params, -						dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings), +						link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),  						&pipe_ctx->pll_settings);  				link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;  				break; @@ -1328,7 +1252,7 @@ void dcn32_disable_link_output(struct dc_link *link,  	else if (dmcu != NULL && dmcu->funcs->lock_phy)  		dmcu->funcs->unlock_phy(dmcu); -	dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); +	link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);  	apply_symclk_on_tx_off_wa(link);  } @@ -1365,6 +1289,33 @@ void dcn32_update_phantom_vp_position(struct dc *dc,  	}  } +/* Treat the phantom pipe as if it needs to be fully enabled. + * If the pipe was previously in use but not phantom, it would + * have been disabled earlier in the sequence so we need to run + * the full enable sequence. + */ +void dcn32_apply_update_flags_for_phantom(struct pipe_ctx *phantom_pipe) +{ +	phantom_pipe->update_flags.raw = 0; +	if (phantom_pipe->stream && phantom_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { +		if (phantom_pipe->stream && phantom_pipe->plane_state) { +			phantom_pipe->update_flags.bits.enable = 1; +			phantom_pipe->update_flags.bits.mpcc = 1; +			phantom_pipe->update_flags.bits.dppclk = 1; +			phantom_pipe->update_flags.bits.hubp_interdependent = 1; +			phantom_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; +			phantom_pipe->update_flags.bits.gamut_remap = 1; +			phantom_pipe->update_flags.bits.scaler = 1; +			phantom_pipe->update_flags.bits.viewport = 1; +			phantom_pipe->update_flags.bits.det_size = 1; +			if (!phantom_pipe->top_pipe && !phantom_pipe->prev_odm_pipe) { +				phantom_pipe->update_flags.bits.odm = 1; +				phantom_pipe->update_flags.bits.global_sync = 1; +			} +		} +	} +} +  bool dcn32_dsc_pg_status(  		struct dce_hwseq *hws,  		unsigned int dsc_inst) @@ -1419,3 +1370,39 @@ void dcn32_update_dsc_pg(struct dc *dc,  		}  	}  } + +void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context) +{ +	unsigned int i; + +	for (i = 0; i < dc->res_pool->pipe_count; i++) { +		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; +		struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + +		/* If an active, non-phantom pipe is being transitioned into a phantom +		 * pipe, wait for the double buffer update to complete first before we do +		 * ANY phantom pipe programming. +		 */ +		if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM && +				old_pipe->stream && old_pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) { +			old_pipe->stream_res.tg->funcs->wait_for_state( +					old_pipe->stream_res.tg, +					CRTC_STATE_VBLANK); +			old_pipe->stream_res.tg->funcs->wait_for_state( +					old_pipe->stream_res.tg, +					CRTC_STATE_VACTIVE); +		} +	} +	for (i = 0; i < dc->res_pool->pipe_count; i++) { +		struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; + +		if (new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { +			// If old context or new context has phantom pipes, apply +			// the phantom timings now. We can't change the phantom +			// pipe configuration safely without driver acquiring +			// the DMCUB lock first. +			dc->hwss.apply_ctx_to_hw(dc, context); +			break; +		} +	} +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h index ac3657a5b9ea..e9e9534f3668 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h @@ -92,6 +92,8 @@ void dcn32_update_phantom_vp_position(struct dc *dc,  		struct dc_state *context,  		struct pipe_ctx *phantom_pipe); +void dcn32_apply_update_flags_for_phantom(struct pipe_ctx *phantom_pipe); +  bool dcn32_dsc_pg_status(  		struct dce_hwseq *hws,  		unsigned int dsc_inst); @@ -100,4 +102,6 @@ void dcn32_update_dsc_pg(struct dc *dc,  		struct dc_state *context,  		bool safe_to_disable); +void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context); +  #endif /* __DC_HWSS_DCN32_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c index 45a949ba6f3f..0694fa3a3680 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c @@ -30,6 +30,7 @@  #include "dcn30/dcn30_hwseq.h"  #include "dcn31/dcn31_hwseq.h"  #include "dcn32_hwseq.h" +#include "dcn32_init.h"  static const struct hw_sequencer_funcs dcn32_funcs = {  	.program_gamut_remap = dcn10_program_gamut_remap, @@ -94,7 +95,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {  	.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,  	.calc_vupdate_position = dcn10_calc_vupdate_position,  	.apply_idle_power_optimizations = dcn32_apply_idle_power_optimizations, -	.does_plane_fit_in_mall = dcn30_does_plane_fit_in_mall, +	.does_plane_fit_in_mall = NULL,  	.set_backlight_level = dcn21_set_backlight_level,  	.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,  	.hardware_release = dcn30_hardware_release, @@ -106,10 +107,12 @@ static const struct hw_sequencer_funcs dcn32_funcs = {  	.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,  	.get_dcc_en_bits = dcn10_get_dcc_en_bits,  	.commit_subvp_config = dcn32_commit_subvp_config, +	.enable_phantom_streams = dcn32_enable_phantom_streams,  	.subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock,  	.update_visual_confirm_color = dcn20_update_visual_confirm_color,  	.update_phantom_vp_position = dcn32_update_phantom_vp_position,  	.update_dsc_pg = dcn32_update_dsc_pg, +	.apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom,  };  static const struct hwseq_private_funcs dcn32_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.c index 41b0baf8e183..c3b089ba511a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.c @@ -211,7 +211,7 @@ static void mmhubbub32_config_mcif_arb(struct mcif_wb *mcif_wb,  	REG_UPDATE(MCIF_WB_ARBITRATION_CONTROL, MCIF_WB_CLIENT_ARBITRATION_SLICE,  params->arbitration_slice);  } -const struct mcif_wb_funcs dcn32_mmhubbub_funcs = { +static const struct mcif_wb_funcs dcn32_mmhubbub_funcs = {  	.warmup_mcif		= mmhubbub32_warmup_mcif,  	.enable_mcif		= mmhubbub2_enable_mcif,  	.disable_mcif		= mmhubbub2_disable_mcif, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c index 4edd0655965b..206a5ddbaf6d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c @@ -982,7 +982,7 @@ static bool mpc32_program_3dlut(  	return true;  } -const struct mpc_funcs dcn32_mpc_funcs = { +static const struct mpc_funcs dcn32_mpc_funcs = {  	.read_mpcc_state = mpc1_read_mpcc_state,  	.insert_plane = mpc1_insert_plane,  	.remove_mpcc = mpc1_remove_mpcc, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c index 2b33eeb213e2..2ee798965bc2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c @@ -167,6 +167,13 @@ static void optc32_phantom_crtc_post_enable(struct timing_generator *optc)  	REG_WAIT(OTG_CLOCK_CONTROL, OTG_BUSY, 0, 1, 100000);  } +static void optc32_disable_phantom_otg(struct timing_generator *optc) +{ +	struct optc *optc1 = DCN10TG_FROM_TG(optc); + +	REG_UPDATE(OTG_CONTROL, OTG_MASTER_EN, 0); +} +  static void optc32_set_odm_bypass(struct timing_generator *optc,  		const struct dc_crtc_timing *dc_crtc_timing)  { @@ -260,6 +267,7 @@ static struct timing_generator_funcs dcn32_tg_funcs = {  		.enable_crtc = optc32_enable_crtc,  		.disable_crtc = optc32_disable_crtc,  		.phantom_crtc_post_enable = optc32_phantom_crtc_post_enable, +		.disable_phantom_crtc = optc32_disable_phantom_otg,  		/* used by enable_timing_synchronization. Not need for FPGA */  		.is_counter_moving = optc1_is_counter_moving,  		.get_position = optc1_get_position, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c index d1598e3131f6..74e50c09bb62 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c @@ -57,7 +57,6 @@  #include "dcn31/dcn31_hpo_dp_stream_encoder.h"  #include "dcn31/dcn31_hpo_dp_link_encoder.h"  #include "dcn32/dcn32_hpo_dp_link_encoder.h" -#include "dc_link_dp.h"  #include "dcn31/dcn31_apg.h"  #include "dcn31/dcn31_dio_link_encoder.h"  #include "dcn32/dcn32_dio_link_encoder.h" @@ -69,7 +68,7 @@  #include "dml/display_mode_vba.h"  #include "dcn32/dcn32_dccg.h"  #include "dcn10/dcn10_resource.h" -#include "dc_link_ddc.h" +#include "link.h"  #include "dcn31/dcn31_panel_cntl.h"  #include "dcn30/dcn30_dwb.h" @@ -106,8 +105,6 @@ enum dcn32_clk_src_array_id {   */  /* DCN */ -/* TODO awful hack. fixup dcn20_dwb.h */ -#undef BASE_INNER  #define BASE_INNER(seg) ctx->dcn_reg_offsets[seg]  #define BASE(seg) BASE_INNER(seg) @@ -167,6 +164,9 @@ enum dcn32_clk_src_array_id {  	REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \  		reg ## block ## id ## _ ## temp_name +#define SF_DWB2(reg_name, block, id, field_name, post_fix)	\ +	.field_name = reg_name ## __ ## field_name ## post_fix +  #define DCCG_SRII(reg_name, block, id)\  	REG_STRUCT.block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \  		reg ## block ## id ## _ ## reg_name @@ -722,9 +722,10 @@ static const struct dc_debug_options debug_defaults_drv = {  	/* Must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions*/  	.enable_double_buffered_dsc_pg_support = true,  	.enable_dp_dig_pixel_rate_div_policy = 1, -	.allow_sw_cursor_fallback = false, +	.allow_sw_cursor_fallback = false, // Linux can't do SW cursor "fallback"  	.alloc_extra_way_for_cursor = true,  	.min_prefetch_in_strobe_ns = 60000, // 60us +	.disable_unbounded_requesting = false,  };  static const struct dc_debug_options debug_defaults_diags = { @@ -830,6 +831,7 @@ static struct clock_source *dcn32_clock_source_create(  		return &clk_src->base;  	} +	kfree(clk_src);  	BREAK_TO_DEBUGGER();  	return NULL;  } @@ -1505,7 +1507,7 @@ static void dcn32_resource_destruct(struct dcn32_resource_pool *pool)  		dcn_dccg_destroy(&pool->base.dccg);  	if (pool->base.oem_device != NULL) -		dal_ddc_service_destroy(&pool->base.oem_device); +		link_destroy_ddc_service(&pool->base.oem_device);  } @@ -1679,7 +1681,7 @@ static void dcn32_enable_phantom_plane(struct dc *dc,  		/* Shadow pipe has small viewport. */  		phantom_plane->clip_rect.y = 0; -		phantom_plane->clip_rect.height = phantom_stream->timing.v_addressable; +		phantom_plane->clip_rect.height = phantom_stream->src.height;  		phantom_plane->is_phantom = true; @@ -1719,8 +1721,29 @@ static struct dc_stream_state *dcn32_enable_phantom_stream(struct dc *dc,  	return phantom_stream;  } +void dcn32_retain_phantom_pipes(struct dc *dc, struct dc_state *context) +{ +	int i; +	struct dc_plane_state *phantom_plane = NULL; +	struct dc_stream_state *phantom_stream = NULL; + +	for (i = 0; i < dc->res_pool->pipe_count; i++) { +		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + +		if (!pipe->top_pipe && !pipe->prev_odm_pipe && +				pipe->plane_state && pipe->stream && +				pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { +			phantom_plane = pipe->plane_state; +			phantom_stream = pipe->stream; + +			dc_plane_state_retain(phantom_plane); +			dc_stream_retain(phantom_stream); +		} +	} +} +  // return true if removed piped from ctx, false otherwise -bool dcn32_remove_phantom_pipes(struct dc *dc, struct dc_state *context) +bool dcn32_remove_phantom_pipes(struct dc *dc, struct dc_state *context, bool fast_update)  {  	int i;  	bool removed_pipe = false; @@ -1747,14 +1770,23 @@ bool dcn32_remove_phantom_pipes(struct dc *dc, struct dc_state *context)  			removed_pipe = true;  		} -		// Clear all phantom stream info -		if (pipe->stream) { -			pipe->stream->mall_stream_config.type = SUBVP_NONE; -			pipe->stream->mall_stream_config.paired_stream = NULL; -		} +		/* For non-full updates, a shallow copy of the current state +		 * is created. In this case we don't want to erase the current +		 * state (there can be 2 HIRQL threads, one in flip, and one in +		 * checkMPO) that can cause a race condition. +		 * +		 * This is just a workaround, needs a proper fix. +		 */ +		if (!fast_update) { +			// Clear all phantom stream info +			if (pipe->stream) { +				pipe->stream->mall_stream_config.type = SUBVP_NONE; +				pipe->stream->mall_stream_config.paired_stream = NULL; +			} -		if (pipe->plane_state) { -			pipe->plane_state->is_phantom = false; +			if (pipe->plane_state) { +				pipe->plane_state->is_phantom = false; +			}  		}  	}  	return removed_pipe; @@ -1901,7 +1933,7 @@ int dcn32_populate_dml_pipes_from_context(  		pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;  		if (context->stream_count == 1 && -				context->stream_status[0].plane_count <= 1 && +				context->stream_status[0].plane_count == 1 &&  				!dc_is_hdmi_signal(res_ctx->pipe_ctx[i].stream->signal) &&  				is_h_timing_divisible_by_2(res_ctx->pipe_ctx[i].stream) &&  				pipe->stream->timing.pix_clk_100hz * 100 > DCN3_2_VMIN_DISPCLK_HZ && @@ -1919,30 +1951,36 @@ int dcn32_populate_dml_pipes_from_context(  		timing = &pipe->stream->timing;  		pipes[pipe_cnt].pipe.src.gpuvm = true; -		pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0; -		pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0; +		DC_FP_START(); +		dcn32_zero_pipe_dcc_fraction(pipes, pipe_cnt); +		DC_FP_END();  		pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;  		pipes[pipe_cnt].pipe.src.gpuvm_min_page_size_kbytes = 256; // according to spreadsheet  		pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;  		pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_19; -		switch (pipe->stream->mall_stream_config.type) { -		case SUBVP_MAIN: -			pipes[pipe_cnt].pipe.src.use_mall_for_pstate_change = dm_use_mall_pstate_change_sub_viewport; -			subvp_in_use = true; -			break; -		case SUBVP_PHANTOM: -			pipes[pipe_cnt].pipe.src.use_mall_for_pstate_change = dm_use_mall_pstate_change_phantom_pipe; -			pipes[pipe_cnt].pipe.src.use_mall_for_static_screen = dm_use_mall_static_screen_disable; -			// Disallow unbounded req for SubVP according to DCHUB programming guide -			pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; -			break; -		case SUBVP_NONE: -			pipes[pipe_cnt].pipe.src.use_mall_for_pstate_change = dm_use_mall_pstate_change_disable; -			pipes[pipe_cnt].pipe.src.use_mall_for_static_screen = dm_use_mall_static_screen_disable; -			break; -		default: -			break; +		/* Only populate DML input with subvp info for full updates. +		 * This is just a workaround -- needs a proper fix. +		 */ +		if (!fast_validate) { +			switch (pipe->stream->mall_stream_config.type) { +			case SUBVP_MAIN: +				pipes[pipe_cnt].pipe.src.use_mall_for_pstate_change = dm_use_mall_pstate_change_sub_viewport; +				subvp_in_use = true; +				break; +			case SUBVP_PHANTOM: +				pipes[pipe_cnt].pipe.src.use_mall_for_pstate_change = dm_use_mall_pstate_change_phantom_pipe; +				pipes[pipe_cnt].pipe.src.use_mall_for_static_screen = dm_use_mall_static_screen_disable; +				// Disallow unbounded req for SubVP according to DCHUB programming guide +				pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; +				break; +			case SUBVP_NONE: +				pipes[pipe_cnt].pipe.src.use_mall_for_pstate_change = dm_use_mall_pstate_change_disable; +				pipes[pipe_cnt].pipe.src.use_mall_for_static_screen = dm_use_mall_static_screen_disable; +				break; +			default: +				break; +			}  		}  		pipes[pipe_cnt].dout.dsc_input_bpc = 0; @@ -2030,6 +2068,9 @@ static struct resource_funcs dcn32_res_pool_funcs = {  	.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,  	.add_phantom_pipes = dcn32_add_phantom_pipes,  	.remove_phantom_pipes = dcn32_remove_phantom_pipes, +	.retain_phantom_pipes = dcn32_retain_phantom_pipes, +	.save_mall_state = dcn32_save_mall_state, +	.restore_mall_state = dcn32_restore_mall_state,  }; @@ -2108,24 +2149,34 @@ static bool dcn32_resource_construct(  	dc->caps.max_cursor_size = 64;  	dc->caps.min_horizontal_blanking_period = 80;  	dc->caps.dmdata_alloc_size = 2048; -	dc->caps.mall_size_per_mem_channel = 0; +	dc->caps.mall_size_per_mem_channel = 4;  	dc->caps.mall_size_total = 0;  	dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;  	dc->caps.cache_line_size = 64;  	dc->caps.cache_num_ways = 16; -	dc->caps.max_cab_allocation_bytes = 67108864; // 64MB = 1024 * 1024 * 64 + +	/* Calculate the available MALL space */ +	dc->caps.max_cab_allocation_bytes = dcn32_calc_num_avail_chans_for_mall( +		dc, dc->ctx->dc_bios->vram_info.num_chans) * +		dc->caps.mall_size_per_mem_channel * 1024 * 1024; +	dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes; +  	dc->caps.subvp_fw_processing_delay_us = 15; +	dc->caps.subvp_drr_max_vblank_margin_us = 40;  	dc->caps.subvp_prefetch_end_to_mall_start_us = 15;  	dc->caps.subvp_swath_height_margin_lines = 16;  	dc->caps.subvp_pstate_allow_width_us = 20;  	dc->caps.subvp_vertical_int_margin_us = 30; +	dc->caps.subvp_drr_vblank_start_margin_us = 100; // 100us margin  	dc->caps.max_slave_planes = 2;  	dc->caps.max_slave_yuv_planes = 2;  	dc->caps.max_slave_rgb_planes = 2;  	dc->caps.post_blend_color_processing = true;  	dc->caps.force_dp_tps4_for_cp2520 = true; +	if (dc->config.forceHBR2CP2520) +		dc->caps.force_dp_tps4_for_cp2520 = false;  	dc->caps.dp_hpo = true;  	dc->caps.dp_hdmi21_pcon_support = true;  	dc->caps.edp_dsc_support = true; @@ -2404,11 +2455,14 @@ static bool dcn32_resource_construct(  		ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;  		ddc_init_data.id.enum_id = 0;  		ddc_init_data.id.type = OBJECT_TYPE_GENERIC; -		pool->base.oem_device = dal_ddc_service_create(&ddc_init_data); +		pool->base.oem_device = link_create_ddc_service(&ddc_init_data);  	} else {  		pool->base.oem_device = NULL;  	} +	if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev) && (dc->config.sdpif_request_limit_words_per_umc == 0)) +		dc->config.sdpif_request_limit_words_per_umc = 16; +  	DC_FP_END();  	return true; @@ -2544,3 +2598,55 @@ struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer(  	return idle_pipe;  } + +unsigned int dcn32_calc_num_avail_chans_for_mall(struct dc *dc, int num_chans) +{ +	/* +	 * DCN32 and DCN321 SKUs may have different sizes for MALL +	 *  but we may not be able to access all the MALL space. +	 *  If the num_chans is power of 2, then we can access all +	 *  of the available MALL space.  Otherwise, we can only +	 *  access: +	 * +	 *  max_cab_size_in_bytes = total_cache_size_in_bytes * +	 *    ((2^floor(log2(num_chans)))/num_chans) +	 * +	 * Calculating the MALL sizes for all available SKUs, we +	 *  have come up with the follow simplified check. +	 * - we have max_chans which provides the max MALL size. +	 *  Each chans supports 4MB of MALL so: +	 * +	 *  total_cache_size_in_bytes = max_chans * 4 MB +	 * +	 * - we have avail_chans which shows the number of channels +	 *  we can use if we can't access the entire MALL space. +	 *  It is generally half of max_chans +	 * - so we use the following checks: +	 * +	 *   if (num_chans == max_chans), return max_chans +	 *   if (num_chans < max_chans), return avail_chans +	 * +	 * - exception is GC_11_0_0 where we can't access max_chans, +	 *  so we define max_avail_chans as the maximum available +	 *  MALL space +	 * +	 */ +	int gc_11_0_0_max_chans = 48; +	int gc_11_0_0_max_avail_chans = 32; +	int gc_11_0_0_avail_chans = 16; +	int gc_11_0_3_max_chans = 16; +	int gc_11_0_3_avail_chans = 8; +	int gc_11_0_2_max_chans = 8; +	int gc_11_0_2_avail_chans = 4; + +	if (ASICREV_IS_GC_11_0_0(dc->ctx->asic_id.hw_internal_rev)) { +		return (num_chans == gc_11_0_0_max_chans) ? +			gc_11_0_0_max_avail_chans : gc_11_0_0_avail_chans; +	} else if (ASICREV_IS_GC_11_0_2(dc->ctx->asic_id.hw_internal_rev)) { +		return (num_chans == gc_11_0_2_max_chans) ? +			gc_11_0_2_max_chans : gc_11_0_2_avail_chans; +	} else { // if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev)) { +		return (num_chans == gc_11_0_3_max_chans) ? +			gc_11_0_3_max_chans : gc_11_0_3_avail_chans; +	} +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h index f76120e67c16..aca928edc4e3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h @@ -38,6 +38,7 @@  #define DCN3_2_MBLK_HEIGHT_4BPE 128  #define DCN3_2_MBLK_HEIGHT_8BPE 64  #define DCN3_2_VMIN_DISPCLK_HZ 717000000 +#define DCN3_2_DCFCLK_DS_INIT_KHZ 10000 // Choose 10Mhz for init DCFCLK DS freq  #define TO_DCN32_RES_POOL(pool)\  	container_of(pool, struct dcn32_resource_pool, base) @@ -45,17 +46,6 @@  extern struct _vcs_dpi_ip_params_st dcn3_2_ip;  extern struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc; -/* Temp struct used to save and restore MALL config - * during validation. - * - * TODO: Move MALL config into dc_state instead of stream struct - * to avoid needing to save/restore. - */ -struct mall_temp_config { -	struct mall_stream_config mall_stream_config[MAX_PIPES]; -	bool is_phantom_plane[MAX_PIPES]; -}; -  struct dcn32_resource_pool {  	struct resource_pool base;  }; @@ -81,6 +71,9 @@ bool dcn32_release_post_bldn_3dlut(  		struct dc_transfer_func **shaper);  bool dcn32_remove_phantom_pipes(struct dc *dc, +		struct dc_state *context, bool fast_update); + +void dcn32_retain_phantom_pipes(struct dc *dc,  		struct dc_state *context);  void dcn32_add_phantom_pipes(struct dc *dc, @@ -104,8 +97,17 @@ void dcn32_calculate_wm_and_dlg(  		int pipe_cnt,  		int vlevel); -uint32_t dcn32_helper_calculate_num_ways_for_subvp -		(struct dc *dc, +uint32_t dcn32_helper_mall_bytes_to_ways( +		struct dc *dc, +		uint32_t total_size_in_mall_bytes); + +uint32_t dcn32_helper_calculate_mall_bytes_for_cursor( +		struct dc *dc, +		struct pipe_ctx *pipe_ctx, +		bool ignore_cursor_buf); + +uint32_t dcn32_helper_calculate_num_ways_for_subvp( +		struct dc *dc,  		struct dc_state *context);  void dcn32_merge_pipes_for_subvp(struct dc *dc, @@ -120,6 +122,8 @@ bool dcn32_subvp_in_use(struct dc *dc,  bool dcn32_mpo_in_use(struct dc_state *context);  bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context); +bool dcn32_is_center_timing(struct pipe_ctx *pipe); +bool dcn32_is_psr_capable(struct pipe_ctx *pipe);  struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer(  		struct dc_state *state, @@ -142,6 +146,12 @@ void dcn32_restore_mall_state(struct dc *dc,  		struct dc_state *context,  		struct mall_temp_config *temp_config); +bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe); + +unsigned int dcn32_calc_num_avail_chans_for_mall(struct dc *dc, int num_chans); + +double dcn32_determine_max_vratio_prefetch(struct dc *dc, struct dc_state *context); +  /* definitions for run time init of reg offsets */  /* CLK SRC */ @@ -1244,7 +1254,8 @@ void dcn32_restore_mall_state(struct dc *dc,        SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C),                         \        SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D),                         \        SR(DCN_VM_FAULT_ADDR_MSB), SR(DCN_VM_FAULT_ADDR_LSB),                    \ -      SR(DCN_VM_FAULT_CNTL), SR(DCN_VM_FAULT_STATUS)                           \ +      SR(DCN_VM_FAULT_CNTL), SR(DCN_VM_FAULT_STATUS),                          \ +      SR(SDPIF_REQUEST_RATE_LIMIT)                                             \    )  /* DCCG */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c index fa3778849db1..3a2d7bcc4b6d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c @@ -33,13 +33,75 @@ static bool is_dual_plane(enum surface_pixel_format format)  	return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;  } + +uint32_t dcn32_helper_mall_bytes_to_ways( +		struct dc *dc, +		uint32_t total_size_in_mall_bytes) +{ +	uint32_t cache_lines_used, lines_per_way, total_cache_lines, num_ways; + +	/* add 2 lines for worst case alignment */ +	cache_lines_used = total_size_in_mall_bytes / dc->caps.cache_line_size + 2; + +	total_cache_lines = dc->caps.max_cab_allocation_bytes / dc->caps.cache_line_size; +	lines_per_way = total_cache_lines / dc->caps.cache_num_ways; +	num_ways = cache_lines_used / lines_per_way; +	if (cache_lines_used % lines_per_way > 0) +		num_ways++; + +	return num_ways; +} + +uint32_t dcn32_helper_calculate_mall_bytes_for_cursor( +		struct dc *dc, +		struct pipe_ctx *pipe_ctx, +		bool ignore_cursor_buf) +{ +	struct hubp *hubp = pipe_ctx->plane_res.hubp; +	uint32_t cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height; +	uint32_t cursor_bpp = 4; +	uint32_t cursor_mall_size_bytes = 0; + +	switch (pipe_ctx->stream->cursor_attributes.color_format) { +	case CURSOR_MODE_MONO: +		cursor_size /= 2; +		cursor_bpp = 4; +		break; +	case CURSOR_MODE_COLOR_1BIT_AND: +	case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA: +	case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA: +		cursor_size *= 4; +		cursor_bpp = 4; +		break; + +	case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED: +	case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED: +		cursor_size *= 8; +		cursor_bpp = 8; +		break; +	} + +	/* only count if cursor is enabled, and if additional allocation needed outside of the +	 * DCN cursor buffer +	 */ +	if (pipe_ctx->stream->cursor_position.enable && (ignore_cursor_buf || +			cursor_size > 16384)) { +		/* cursor_num_mblk = CEILING(num_cursors*cursor_width*cursor_width*cursor_Bpe/mblk_bytes, 1) +		 * Note: add 1 mblk in case of cursor misalignment +		 */ +		cursor_mall_size_bytes = ((cursor_size + DCN3_2_MALL_MBLK_SIZE_BYTES - 1) / +				DCN3_2_MALL_MBLK_SIZE_BYTES + 1) * DCN3_2_MALL_MBLK_SIZE_BYTES; +	} + +	return cursor_mall_size_bytes; +} +  /**   * ********************************************************************************************   * dcn32_helper_calculate_num_ways_for_subvp: Calculate number of ways needed for SubVP   * - * This function first checks the bytes required per pixel on the SubVP pipe, then calculates - * the total number of pixels required in the SubVP MALL region. These are used to calculate - * the number of cache lines used (then number of ways required) for SubVP MCLK switching. + * Gets total allocation required for the phantom viewport calculated by DML in bytes and + * converts to number of cache ways.   *   * @param [in] dc: current dc state   * @param [in] context: new dc state @@ -48,101 +110,19 @@ static bool is_dual_plane(enum surface_pixel_format format)   *   * ********************************************************************************************   */ -uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_state *context) +uint32_t dcn32_helper_calculate_num_ways_for_subvp( +		struct dc *dc, +		struct dc_state *context)  { -	uint32_t num_ways = 0; -	uint32_t bytes_per_pixel = 0; -	uint32_t cache_lines_used = 0; -	uint32_t lines_per_way = 0; -	uint32_t total_cache_lines = 0; -	uint32_t bytes_in_mall = 0; -	uint32_t num_mblks = 0; -	uint32_t cache_lines_per_plane = 0; -	uint32_t i = 0, j = 0; -	uint16_t mblk_width = 0; -	uint16_t mblk_height = 0; -	uint32_t full_vp_width_blk_aligned = 0; -	uint32_t full_vp_height_blk_aligned = 0; -	uint32_t mall_alloc_width_blk_aligned = 0; -	uint32_t mall_alloc_height_blk_aligned = 0; -	uint16_t full_vp_height = 0; -	bool subvp_in_use = false; - -	for (i = 0; i < dc->res_pool->pipe_count; i++) { -		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - -		/* Find the phantom pipes. -		 * - For pipe split case we need to loop through the bottom and next ODM -		 *   pipes or only half the viewport size is counted -		 */ -		if (pipe->stream && pipe->plane_state && -				pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { -			struct pipe_ctx *main_pipe = NULL; - -			subvp_in_use = true; -			/* Get full viewport height from main pipe (required for MBLK calculation) */ -			for (j = 0; j < dc->res_pool->pipe_count; j++) { -				main_pipe = &context->res_ctx.pipe_ctx[j]; -				if (main_pipe->stream == pipe->stream->mall_stream_config.paired_stream) { -					full_vp_height = main_pipe->plane_res.scl_data.viewport.height; -					break; -				} -			} - -			bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4; -			mblk_width = DCN3_2_MBLK_WIDTH; -			mblk_height = bytes_per_pixel == 4 ? DCN3_2_MBLK_HEIGHT_4BPE : DCN3_2_MBLK_HEIGHT_8BPE; - -			/* full_vp_width_blk_aligned = FLOOR(vp_x_start + full_vp_width + blk_width - 1, blk_width) - -			 * FLOOR(vp_x_start, blk_width) -			 */ -			full_vp_width_blk_aligned = ((pipe->plane_res.scl_data.viewport.x + -					pipe->plane_res.scl_data.viewport.width + mblk_width - 1) / mblk_width * mblk_width) + -					(pipe->plane_res.scl_data.viewport.x / mblk_width * mblk_width); - -			/* full_vp_height_blk_aligned = FLOOR(vp_y_start + full_vp_height + blk_height - 1, blk_height) - -			 * FLOOR(vp_y_start, blk_height) -			 */ -			full_vp_height_blk_aligned = ((pipe->plane_res.scl_data.viewport.y + -					full_vp_height + mblk_height - 1) / mblk_height * mblk_height) + -					(pipe->plane_res.scl_data.viewport.y / mblk_height * mblk_height); - -			/* mall_alloc_width_blk_aligned_l/c = full_vp_width_blk_aligned_l/c */ -			mall_alloc_width_blk_aligned = full_vp_width_blk_aligned; - -			/* mall_alloc_height_blk_aligned_l/c = CEILING(sub_vp_height_l/c - 1, blk_height_l/c) + blk_height_l/c */ -			mall_alloc_height_blk_aligned = (pipe->plane_res.scl_data.viewport.height - 1 + mblk_height - 1) / -					mblk_height * mblk_height + mblk_height; - -			/* full_mblk_width_ub_l/c = mall_alloc_width_blk_aligned_l/c; -			 * full_mblk_height_ub_l/c = mall_alloc_height_blk_aligned_l/c; -			 * num_mblk_l/c = (full_mblk_width_ub_l/c / mblk_width_l/c) * (full_mblk_height_ub_l/c / mblk_height_l/c); -			 * (Should be divisible, but round up if not) -			 */ -			num_mblks = ((mall_alloc_width_blk_aligned + mblk_width - 1) / mblk_width) * -					((mall_alloc_height_blk_aligned + mblk_height - 1) / mblk_height); -			bytes_in_mall = num_mblks * DCN3_2_MALL_MBLK_SIZE_BYTES; -			// cache lines used is total bytes / cache_line size. Add +2 for worst case alignment -			// (MALL is 64-byte aligned) -			cache_lines_per_plane = bytes_in_mall / dc->caps.cache_line_size + 2; - -			/* For DCC divide by 256 */ -			if (pipe->plane_state->dcc.enable) -				cache_lines_per_plane = cache_lines_per_plane + (cache_lines_per_plane / 256) + 1; -			cache_lines_used += cache_lines_per_plane; +	if (context->bw_ctx.bw.dcn.mall_subvp_size_bytes > 0) { +		if (dc->debug.force_subvp_num_ways) { +			return dc->debug.force_subvp_num_ways; +		} else { +			return dcn32_helper_mall_bytes_to_ways(dc, context->bw_ctx.bw.dcn.mall_subvp_size_bytes);  		} +	} else { +		return 0;  	} - -	total_cache_lines = dc->caps.max_cab_allocation_bytes / dc->caps.cache_line_size; -	lines_per_way = total_cache_lines / dc->caps.cache_num_ways; -	num_ways = cache_lines_used / lines_per_way; -	if (cache_lines_used % lines_per_way > 0) -		num_ways++; - -	if (subvp_in_use && dc->debug.force_subvp_num_ways > 0) -		num_ways = dc->debug.force_subvp_num_ways; - -	return num_ways;  }  void dcn32_merge_pipes_for_subvp(struct dc *dc, @@ -250,6 +230,37 @@ bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context)  	return false;  } +bool dcn32_is_center_timing(struct pipe_ctx *pipe) +{ +	bool is_center_timing = false; + +	if (pipe->stream) { +		if (pipe->stream->timing.v_addressable != pipe->stream->dst.height || +				pipe->stream->timing.v_addressable != pipe->stream->src.height) { +			is_center_timing = true; +		} +	} + +	if (pipe->plane_state) { +		if (pipe->stream->timing.v_addressable != pipe->plane_state->dst_rect.height && +				pipe->stream->timing.v_addressable != pipe->plane_state->src_rect.height) { +			is_center_timing = true; +		} +	} + +	return is_center_timing; +} + +bool dcn32_is_psr_capable(struct pipe_ctx *pipe) +{ +	bool psr_capable = false; + +	if (pipe->stream && pipe->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) { +		psr_capable = true; +	} +	return psr_capable; +} +  /**   * *******************************************************************************************   * dcn32_determine_det_override: Determine DET allocation for each pipe @@ -352,6 +363,7 @@ void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,  	int i, pipe_cnt;  	struct resource_context *res_ctx = &context->res_ctx;  	struct pipe_ctx *pipe; +	bool disable_unbounded_requesting = dc->debug.disable_z9_mpc || dc->debug.disable_unbounded_requesting;  	for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { @@ -368,7 +380,7 @@ void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,  	 */  	if (pipe_cnt == 1) {  		pipes[0].pipe.src.det_size_override = DCN3_2_MAX_DET_SIZE; -		if (pipe->plane_state && !dc->debug.disable_z9_mpc && pipe->plane_state->tiling_info.gfx9.swizzle != DC_SW_LINEAR) { +		if (pipe->plane_state && !disable_unbounded_requesting && pipe->plane_state->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {  			if (!is_dual_plane(pipe->plane_state->format)) {  				pipes[0].pipe.src.det_size_override = DCN3_2_DEFAULT_DET_SIZE;  				pipes[0].pipe.src.unbounded_req_mode = true; diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c index fa9b6603cfd3..13be5f06d987 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c @@ -31,7 +31,6 @@  #include "dcn321_dio_link_encoder.h"  #include "dcn31/dcn31_dio_link_encoder.h"  #include "stream_encoder.h" -#include "i2caux_interface.h"  #include "dc_bios_types.h"  #include "gpio_service_interface.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c index 6292ac515d1a..55f918b44077 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c @@ -60,7 +60,6 @@  #include "dcn31/dcn31_hpo_dp_stream_encoder.h"  #include "dcn31/dcn31_hpo_dp_link_encoder.h"  #include "dcn32/dcn32_hpo_dp_link_encoder.h" -#include "dc_link_dp.h"  #include "dcn31/dcn31_apg.h"  #include "dcn31/dcn31_dio_link_encoder.h"  #include "dcn32/dcn32_dio_link_encoder.h" @@ -73,7 +72,7 @@  #include "dml/display_mode_vba.h"  #include "dcn32/dcn32_dccg.h"  #include "dcn10/dcn10_resource.h" -#include "dc_link_ddc.h" +#include "link.h"  #include "dcn31/dcn31_panel_cntl.h"  #include "dcn30/dcn30_dwb.h" @@ -109,8 +108,6 @@ enum dcn321_clk_src_array_id {   */  /* DCN */ -/* TODO awful hack. fixup dcn20_dwb.h */ -#undef BASE_INNER  #define BASE_INNER(seg) ctx->dcn_reg_offsets[seg]  #define BASE(seg) BASE_INNER(seg) @@ -174,6 +171,9 @@ enum dcn321_clk_src_array_id {  	REG_STRUCT.block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \  		reg ## block ## id ## _ ## reg_name +#define SF_DWB2(reg_name, block, id, field_name, post_fix) \ +	.field_name = reg_name ## __ ## field_name ## post_fix +  #define VUPDATE_SRII(reg_name, block, id)\  	REG_STRUCT.reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \  		reg ## reg_name ## _ ## block ## id @@ -720,9 +720,10 @@ static const struct dc_debug_options debug_defaults_drv = {  	/*must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions*/  	.enable_double_buffered_dsc_pg_support = true,  	.enable_dp_dig_pixel_rate_div_policy = 1, -	.allow_sw_cursor_fallback = false, +	.allow_sw_cursor_fallback = false, // Linux can't do SW cursor "fallback"  	.alloc_extra_way_for_cursor = true,  	.min_prefetch_in_strobe_ns = 60000, // 60us +	.disable_unbounded_requesting = false,  };  static const struct dc_debug_options debug_defaults_diags = { @@ -742,7 +743,7 @@ static const struct dc_debug_options debug_defaults_diags = {  	.dmub_command_table = true,  	.enable_tri_buf = true,  	.use_max_lb = true, -	.force_disable_subvp = true +	.force_disable_subvp = true,  }; @@ -829,6 +830,7 @@ static struct clock_source *dcn321_clock_source_create(  		return &clk_src->base;  	} +	kfree(clk_src);  	BREAK_TO_DEBUGGER();  	return NULL;  } @@ -1490,7 +1492,7 @@ static void dcn321_resource_destruct(struct dcn321_resource_pool *pool)  		dcn_dccg_destroy(&pool->base.dccg);  	if (pool->base.oem_device != NULL) -		dal_ddc_service_destroy(&pool->base.oem_device); +		link_destroy_ddc_service(&pool->base.oem_device);  } @@ -1619,6 +1621,9 @@ static struct resource_funcs dcn321_res_pool_funcs = {  	.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,  	.add_phantom_pipes = dcn32_add_phantom_pipes,  	.remove_phantom_pipes = dcn32_remove_phantom_pipes, +	.retain_phantom_pipes = dcn32_retain_phantom_pipes, +	.save_mall_state = dcn32_save_mall_state, +	.restore_mall_state = dcn32_restore_mall_state,  }; @@ -1697,17 +1702,25 @@ static bool dcn321_resource_construct(  	dc->caps.max_cursor_size = 64;  	dc->caps.min_horizontal_blanking_period = 80;  	dc->caps.dmdata_alloc_size = 2048; -	dc->caps.mall_size_per_mem_channel = 0; +	dc->caps.mall_size_per_mem_channel = 4;  	dc->caps.mall_size_total = 0;  	dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;  	dc->caps.cache_line_size = 64;  	dc->caps.cache_num_ways = 16; -	dc->caps.max_cab_allocation_bytes = 33554432; // 32MB = 1024 * 1024 * 32 + +	/* Calculate the available MALL space */ +	dc->caps.max_cab_allocation_bytes = dcn32_calc_num_avail_chans_for_mall( +		dc, dc->ctx->dc_bios->vram_info.num_chans) * +		dc->caps.mall_size_per_mem_channel * 1024 * 1024; +	dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes; +  	dc->caps.subvp_fw_processing_delay_us = 15; +	dc->caps.subvp_drr_max_vblank_margin_us = 40;  	dc->caps.subvp_prefetch_end_to_mall_start_us = 15;  	dc->caps.subvp_swath_height_margin_lines = 16;  	dc->caps.subvp_pstate_allow_width_us = 20;  	dc->caps.subvp_vertical_int_margin_us = 30; +	dc->caps.subvp_drr_vblank_start_margin_us = 100; // 100us margin  	dc->caps.max_slave_planes = 1;  	dc->caps.max_slave_yuv_planes = 1;  	dc->caps.max_slave_rgb_planes = 1; @@ -1983,7 +1996,7 @@ static bool dcn321_resource_construct(  		ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;  		ddc_init_data.id.enum_id = 0;  		ddc_init_data.id.type = OBJECT_TYPE_GENERIC; -		pool->base.oem_device = dal_ddc_service_create(&ddc_init_data); +		pool->base.oem_device = link_create_ddc_service(&ddc_init_data);  	} else {  		pool->base.oem_device = NULL;  	} diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h index e3e5c39895a3..7ce9a5b6c33b 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h +++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h @@ -116,6 +116,11 @@ bool dm_helpers_dp_mst_start_top_mgr(  bool dm_helpers_dp_mst_stop_top_mgr(  		struct dc_context *ctx,  		struct dc_link *link); + +void dm_helpers_dp_mst_update_branch_bandwidth( +		struct dc_context *ctx, +		struct dc_link *link); +  /**   * OS specific aux read callback.   */ @@ -156,6 +161,12 @@ enum dc_edid_status dm_helpers_read_local_edid(  		struct dc_link *link,  		struct dc_sink *sink); +bool dm_helpers_dp_handle_test_pattern_request( +		struct dc_context *ctx, +		const struct dc_link *link, +		union link_test_pattern dpcd_test_pattern, +		union test_misc dpcd_test_params); +  void dm_set_dcn_clocks(  		struct dc_context *ctx,  		struct dc_clocks *clks); @@ -188,6 +199,7 @@ int dm_helpers_dmub_set_config_sync(struct dc_context *ctx,  		const struct dc_link *link,  		struct set_config_cmd_payload *payload,  		enum set_config_status *operation_result); +enum adaptive_sync_type dm_get_adaptive_sync_support_type(struct dc_link *link);  enum dc_edid_status dm_helpers_get_sbios_edid(struct dc_link *link, struct dc_edid *edid); diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index ca7d24000621..0ecea87cf48f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -33,6 +33,10 @@ ifdef CONFIG_PPC64  dml_ccflags := -mhard-float -maltivec  endif +ifdef CONFIG_ARM64 +dml_rcflags := -mgeneral-regs-only +endif +  ifdef CONFIG_CC_IS_GCC  ifneq ($(call gcc-min-version, 70100),y)  IS_OLD_GCC = 1 @@ -55,8 +59,6 @@ frame_warn_flag := -Wframe-larger-than=2048  endif  CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags) - -ifdef CONFIG_DRM_AMD_DC_DCN  CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags)  CFLAGS_$(AMDDALPATH)/dc/dml/dcn10/dcn10_fpu.o := $(dml_ccflags)  CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/dcn20_fpu.o := $(dml_ccflags) @@ -88,7 +90,6 @@ CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calcs.o := $(dml_ccflags)  CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_auto.o := $(dml_ccflags)  CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_math.o := $(dml_ccflags) -Wno-tautological-compare  CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn2x/dcn2x.o := $(dml_rcflags)  CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_rcflags)  CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_rcflags)  CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_rcflags) @@ -105,7 +106,18 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_util_32.o := $(dml_rcf  CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_rcflags)  CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_rcflags)  CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o  := $(dml_rcflags) -endif +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn10/dcn10_fpu.o := $(dml_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn20/dcn20_fpu.o := $(dml_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn314/display_mode_vba_314.o := $(dml_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn314/display_rq_dlg_calc_314.o := $(dml_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn314/dcn314_fpu.o := $(dml_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn30/dcn30_fpu.o := $(dml_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn32/dcn32_fpu.o := $(dml_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn321/dcn321_fpu.o := $(dml_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn31/dcn31_fpu.o := $(dml_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn302/dcn302_fpu.o := $(dml_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn303/dcn303_fpu.o := $(dml_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_math.o := $(dml_rcflags)  CFLAGS_$(AMDDALPATH)/dc/dml/dml1_display_rq_dlg_calc.o := $(dml_ccflags)  CFLAGS_$(AMDDALPATH)/dc/dml/display_rq_dlg_helpers.o := $(dml_ccflags)  CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dml1_display_rq_dlg_calc.o := $(dml_rcflags) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dc_features.h b/drivers/gpu/drm/amd/display/dc/dml/dc_features.h index 74e86732e301..2cbdd75429ff 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dc_features.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dc_features.h @@ -29,6 +29,13 @@  #define DC__PRESENT 1  #define DC__PRESENT__1 1  #define DC__NUM_DPP 4 + +/** + * @DC__VOLTAGE_STATES: + * + * Define the maximum amount of states supported by the ASIC. Every ASIC has a + * specific number of states; this macro defines the maximum number of states. + */  #define DC__VOLTAGE_STATES 20  #define DC__NUM_DPP__4 1  #define DC__NUM_DPP__0_PRESENT 1 diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn10/dcn10_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn10/dcn10_fpu.c index 99644d896222..c5e84190c17a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn10/dcn10_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn10/dcn10_fpu.c @@ -27,6 +27,8 @@  #include "dcn10/dcn10_resource.h"  #include "dcn10_fpu.h" +#include "resource.h" +#include "amdgpu_dm/dc_fpu.h"  /**   * DOC: DCN10 FPU manipulation Overview @@ -121,3 +123,37 @@ struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc = {  	.writeback_dram_clock_change_latency_us = 23.0,  	.return_bus_width_bytes = 64,  }; + +void dcn10_resource_construct_fp(struct dc *dc) +{ +	dc_assert_fp_enabled(); +	if (dc->ctx->dce_version == DCN_VERSION_1_01) { +		struct dcn_soc_bounding_box *dcn_soc = dc->dcn_soc; +		struct dcn_ip_params *dcn_ip = dc->dcn_ip; +		struct display_mode_lib *dml = &dc->dml; + +		dml->ip.max_num_dpp = 3; +		/* TODO how to handle 23.84? */ +		dcn_soc->dram_clock_change_latency = 23; +		dcn_ip->max_num_dpp = 3; +	} +	if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) { +		dc->dcn_soc->urgent_latency = 3; +		dc->debug.disable_dmcu = true; +		dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 41.60f; +	} + +	dc->dcn_soc->number_of_channels = dc->ctx->asic_id.vram_width / ddr4_dram_width; +	ASSERT(dc->dcn_soc->number_of_channels < 3); +	if (dc->dcn_soc->number_of_channels == 0)/*old sbios bug*/ +		dc->dcn_soc->number_of_channels = 2; + +	if (dc->dcn_soc->number_of_channels == 1) { +		dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 19.2f; +		dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = 17.066f; +		dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = 14.933f; +		dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 12.8f; +		if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) +			dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 20.80f; +	} +} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn10/dcn10_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn10/dcn10_fpu.h index e74ed4b4ce5b..63219ecd8478 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn10/dcn10_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn10/dcn10_fpu.h @@ -27,4 +27,6 @@  #ifndef __DCN10_FPU_H__  #define __DCN10_FPU_H__ +void dcn10_resource_construct_fp(struct dc *dc); +  #endif /* __DCN20_FPU_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c index 45db40c41882..d3ba65efe1d2 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c @@ -26,12 +26,12 @@  #include "resource.h"  #include "clk_mgr.h" -#include "dc_link_dp.h"  #include "dchubbub.h"  #include "dcn20/dcn20_resource.h"  #include "dcn21/dcn21_resource.h"  #include "clk_mgr/dcn21/rn_clk_mgr.h" +#include "link.h"  #include "dcn20_fpu.h"  #define DC_LOGGER_INIT(logger) @@ -565,7 +565,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {  				.dppclk_mhz = 847.06,  				.phyclk_mhz = 810.0,  				.socclk_mhz = 953.0, -				.dscclk_mhz = 489.0, +				.dscclk_mhz = 300.0,  				.dram_speed_mts = 2400.0,  			},  			{ @@ -576,7 +576,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {  				.dppclk_mhz = 960.00,  				.phyclk_mhz = 810.0,  				.socclk_mhz = 278.0, -				.dscclk_mhz = 287.67, +				.dscclk_mhz = 342.86,  				.dram_speed_mts = 2666.0,  			},  			{ @@ -587,7 +587,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {  				.dppclk_mhz = 1028.57,  				.phyclk_mhz = 810.0,  				.socclk_mhz = 715.0, -				.dscclk_mhz = 318.334, +				.dscclk_mhz = 369.23,  				.dram_speed_mts = 3200.0,  			},  			{ @@ -938,7 +938,7 @@ static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)  	for (i = 0; i < dc->res_pool->pipe_count; i++) {  		if (!context->res_ctx.pipe_ctx[i].stream)  			continue; -		if (is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i])) +		if (link_is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i]))  			return true;  	}  	return false; @@ -963,6 +963,8 @@ static enum dcn_zstate_support_state  decide_zstate_support(struct dc *dc, struc  	 * 	2. single eDP, on link 0, 1 plane and stutter period > 5ms  	 * Z10 only cases:  	 * 	1. single eDP, on link 0, 1 plane and stutter period >= 5ms +	 * Z8 cases: +	 * 	1. stutter period sufficient  	 * Zstate not allowed cases:  	 * 	1. Everything else  	 */ @@ -971,6 +973,8 @@ static enum dcn_zstate_support_state  decide_zstate_support(struct dc *dc, struc  	else if (context->stream_count == 1 &&  context->streams[0]->signal == SIGNAL_TYPE_EDP) {  		struct dc_link *link = context->streams[0]->sink->link;  		struct dc_stream_status *stream_status = &context->stream_status[0]; +		bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > 1000.0; +		bool is_pwrseq0 = link->link_index == 0;  		if (dc_extended_blank_supported(dc)) {  			for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -983,18 +987,53 @@ static enum dcn_zstate_support_state  decide_zstate_support(struct dc *dc, struc  				}  			}  		} -		/* zstate only supported on PWRSEQ0  and when there's <2 planes*/ -		if (link->link_index != 0 || stream_status->plane_count > 1) + +		/* Don't support multi-plane configurations */ +		if (stream_status->plane_count > 1)  			return DCN_ZSTATE_SUPPORT_DISALLOW; -		if (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || optimized_min_dst_y_next_start_us > 5000) +		if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || optimized_min_dst_y_next_start_us > 5000))  			return DCN_ZSTATE_SUPPORT_ALLOW; -		else if (link->psr_settings.psr_version == DC_PSR_VERSION_1 && !dc->debug.disable_psr) -			return DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY; +		else if (is_pwrseq0 && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr) +			return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;  		else -			return DCN_ZSTATE_SUPPORT_DISALLOW; -	} else +			return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY : DCN_ZSTATE_SUPPORT_DISALLOW; +	} else {  		return DCN_ZSTATE_SUPPORT_DISALLOW; +	} +} + +static void dcn20_adjust_freesync_v_startup( +		const struct dc_crtc_timing *dc_crtc_timing, int *vstartup_start) +{ +	struct dc_crtc_timing patched_crtc_timing; +	uint32_t asic_blank_end   = 0; +	uint32_t asic_blank_start = 0; +	uint32_t newVstartup	  = 0; + +	patched_crtc_timing = *dc_crtc_timing; + +	if (patched_crtc_timing.flags.INTERLACE == 1) { +		if (patched_crtc_timing.v_front_porch < 2) +			patched_crtc_timing.v_front_porch = 2; +	} else { +		if (patched_crtc_timing.v_front_porch < 1) +			patched_crtc_timing.v_front_porch = 1; +	} + +	/* blank_start = frame end - front porch */ +	asic_blank_start = patched_crtc_timing.v_total - +					patched_crtc_timing.v_front_porch; + +	/* blank_end = blank_start - active */ +	asic_blank_end = asic_blank_start - +					patched_crtc_timing.v_border_bottom - +					patched_crtc_timing.v_addressable - +					patched_crtc_timing.v_border_top; + +	newVstartup = asic_blank_end + (patched_crtc_timing.v_total - asic_blank_start); + +	*vstartup_start = ((newVstartup > *vstartup_start) ? newVstartup : *vstartup_start);  }  void dcn20_calculate_dlg_params( @@ -1056,6 +1095,11 @@ void dcn20_calculate_dlg_params(  		context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz =  						pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;  		context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest; +		if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid) +			dcn20_adjust_freesync_v_startup( +				&context->res_ctx.pipe_ctx[i].stream->timing, +				&context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start); +  		pipe_idx++;  	}  	/*save a original dppclock copy*/ @@ -1296,6 +1340,8 @@ int dcn20_populate_dml_pipes_from_context(  		case SIGNAL_TYPE_DISPLAY_PORT_MST:  		case SIGNAL_TYPE_DISPLAY_PORT:  			pipes[pipe_cnt].dout.output_type = dm_dp; +			if (link_is_dp_128b_132b_signal(&res_ctx->pipe_ctx[i])) +				pipes[pipe_cnt].dout.output_type = dm_dp2p0;  			break;  		case SIGNAL_TYPE_EDP:  			pipes[pipe_cnt].dout.output_type = dm_edp; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c index d3b5b6fedf04..6266b0788387 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c @@ -3897,14 +3897,14 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  					mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2  							* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); -				locals->ODMCombineEnablePerState[i][k] = false; +				locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;  				mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;  				if (mode_lib->vba.ODMCapability) {  					if (locals->PlaneRequiredDISPCLKWithoutODMCombine > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) { -						locals->ODMCombineEnablePerState[i][k] = true; +						locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;  						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;  					} else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) { -						locals->ODMCombineEnablePerState[i][k] = true; +						locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;  						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;  					}  				} @@ -3957,7 +3957,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  				locals->RequiredDISPCLK[i][j] = 0.0;  				locals->DISPCLK_DPPCLK_Support[i][j] = true;  				for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { -					locals->ODMCombineEnablePerState[i][k] = false; +					locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;  					if (locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]) {  						locals->NoOfDPP[i][j][k] = 1;  						locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k] diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c index edd098c7eb92..989d83ee3842 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c @@ -4008,17 +4008,17 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  					mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2  							* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); -				locals->ODMCombineEnablePerState[i][k] = false; +				locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;  				mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;  				if (mode_lib->vba.ODMCapability) {  					if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) { -						locals->ODMCombineEnablePerState[i][k] = true; +						locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;  						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;  					} else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN20_MAX_DSC_IMAGE_WIDTH)) { -						locals->ODMCombineEnablePerState[i][k] = true; +						locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;  						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;  					} else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) { -						locals->ODMCombineEnablePerState[i][k] = true; +						locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;  						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;  					}  				} @@ -4071,7 +4071,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  				locals->RequiredDISPCLK[i][j] = 0.0;  				locals->DISPCLK_DPPCLK_Support[i][j] = true;  				for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { -					locals->ODMCombineEnablePerState[i][k] = false; +					locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;  					if (locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]) {  						locals->NoOfDPP[i][j][k] = 1;  						locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k] diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c index 1d84ae50311d..b7c2844d0cbe 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c @@ -4102,17 +4102,17 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  					mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2  							* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); -				locals->ODMCombineEnablePerState[i][k] = false; +				locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;  				mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;  				if (mode_lib->vba.ODMCapability) {  					if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) { -						locals->ODMCombineEnablePerState[i][k] = true; +						locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;  						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;  					} else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN21_MAX_DSC_IMAGE_WIDTH)) { -						locals->ODMCombineEnablePerState[i][k] = true; +						locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;  						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;  					} else if (locals->HActive[k] > DCN21_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) { -						locals->ODMCombineEnablePerState[i][k] = true; +						locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;  						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;  					}  				} @@ -4165,7 +4165,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  				locals->RequiredDISPCLK[i][j] = 0.0;  				locals->DISPCLK_DPPCLK_Support[i][j] = true;  				for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { -					locals->ODMCombineEnablePerState[i][k] = false; +					locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;  					if (locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]) {  						locals->NoOfDPP[i][j][k] = 1;  						locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k] @@ -5230,7 +5230,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  			mode_lib->vba.ODMCombineEnabled[k] =  					locals->ODMCombineEnablePerState[mode_lib->vba.VoltageLevel][k];  		} else { -			mode_lib->vba.ODMCombineEnabled[k] = false; +			mode_lib->vba.ODMCombineEnabled[k] = dm_odm_combine_mode_disabled;  		}  		mode_lib->vba.DSCEnabled[k] =  				locals->RequiresDSC[mode_lib->vba.VoltageLevel][k]; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c index e1e92daba668..4fa636364793 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c @@ -520,9 +520,7 @@ void dcn30_fpu_calculate_wm_and_dlg(  		pipe_idx++;  	} -	DC_FP_START();  	dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); -	DC_FP_END();  	if (!pstate_en)  		/* Restore full p-state latency */ @@ -636,7 +634,7 @@ int dcn30_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,  	while (dummy_latency_index < max_latency_table_entries) {  		context->bw_ctx.dml.soc.dram_clock_change_latency_us =  				dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us; -		dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false); +		dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false, true);  		if (context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank ==  			dm_allow_self_refresh_and_mclk_switch) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c index 479e2c1a1301..379729b02847 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c @@ -4851,7 +4851,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  							v->SwathHeightYThisState[k],  							v->SwathHeightCThisState[k],  							v->HTotal[k] / v->PixelClock[k], -							v->UrgentLatency, +							v->UrgLatency[i],  							v->CursorBufferSize,  							v->CursorWidth[k][0],  							v->CursorBPP[k][0], diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c index 7dd0845d1bd9..b37d14369a62 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c @@ -483,7 +483,7 @@ void dcn31_calculate_wm_and_dlg_fp(  		int pipe_cnt,  		int vlevel)  { -	int i, pipe_idx, active_dpp_count = 0; +	int i, pipe_idx, active_hubp_count = 0;  	double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];  	dc_assert_fp_enabled(); @@ -529,7 +529,7 @@ void dcn31_calculate_wm_and_dlg_fp(  			continue;  		if (context->res_ctx.pipe_ctx[i].plane_state) -			active_dpp_count++; +			active_hubp_count++;  		pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);  		pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); @@ -547,9 +547,22 @@ void dcn31_calculate_wm_and_dlg_fp(  	}  	dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); -	/* For 31x apu pstate change is only supported if possible in vactive or if there are no active dpps */ +	/* For 31x apu pstate change is only supported if possible in vactive*/  	context->bw_ctx.bw.dcn.clk.p_state_change_support = -			context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_vactive || !active_dpp_count; +			context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_vactive; +	/* If DCN isn't making memory requests we can allow pstate change and lower clocks */ +	if (!active_hubp_count) { +		context->bw_ctx.bw.dcn.clk.socclk_khz = 0; +		context->bw_ctx.bw.dcn.clk.dppclk_khz = 0; +		context->bw_ctx.bw.dcn.clk.dcfclk_khz = 0; +		context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = 0; +		context->bw_ctx.bw.dcn.clk.dramclk_khz = 0; +		context->bw_ctx.bw.dcn.clk.fclk_khz = 0; +		context->bw_ctx.bw.dcn.clk.p_state_change_support = true; +		for (i = 0; i < dc->res_pool->pipe_count; i++) +			if (context->res_ctx.pipe_ctx[i].stream) +				context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = 0; +	}  }  void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) @@ -797,3 +810,8 @@ void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param  	else  		dml_init_instance(&dc->dml, &dcn3_16_soc, &dcn3_16_ip, DML_PROJECT_DCN31_FPGA);  } + +int dcn_get_max_non_odm_pix_rate_100hz(struct _vcs_dpi_soc_bounding_box_st *soc) +{ +	return soc->clock_limits[0].dispclk_mhz * 10000.0 / (1.0 + soc->dcn_downspread_percent / 100.0); +} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h index fd58b2561ec9..687d3522cc33 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h @@ -46,5 +46,10 @@ void dcn31_calculate_wm_and_dlg_fp(  void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);  void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);  void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params); +int dcn_get_max_non_odm_pix_rate_100hz(struct _vcs_dpi_soc_bounding_box_st *soc); +int dcn31x_populate_dml_pipes_from_context(struct dc *dc, +					  struct dc_state *context, +					  display_e2e_pipe_params_st *pipes, +					  bool fast_validate);  #endif /* __DCN31_FPU_H__*/ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c index b612edb14417..27f488405335 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c @@ -878,7 +878,9 @@ static bool CalculatePrefetchSchedule(  	double DSTTotalPixelsAfterScaler;  	double LineTime;  	double dst_y_prefetch_equ; +#ifdef __DML_VBA_DEBUG__  	double Tsw_oto; +#endif  	double prefetch_bw_oto;  	double prefetch_bw_pr;  	double Tvm_oto; @@ -1052,17 +1054,18 @@ static bool CalculatePrefetchSchedule(  	else  		bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC;  	/*rev 99*/ -	prefetch_bw_pr = dml_min(1, bytes_pp * myPipe->PixelClock / (double) myPipe->DPPPerPlane); +	prefetch_bw_pr = bytes_pp * myPipe->PixelClock / (double) myPipe->DPPPerPlane; +	prefetch_bw_pr = dml_min(1, myPipe->VRatio) * prefetch_bw_pr;  	max_Tsw = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime;  	prefetch_sw_bytes = PrefetchSourceLinesY * swath_width_luma_ub * myPipe->BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * myPipe->BytePerPixelC; -	prefetch_bw_oto = dml_max(bytes_pp * myPipe->PixelClock / myPipe->DPPPerPlane, prefetch_sw_bytes / (dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime));  	prefetch_bw_oto = dml_max(prefetch_bw_pr, prefetch_sw_bytes / max_Tsw);  	min_Lsw = dml_max(1, dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / max_vratio_pre);  	Lsw_oto = dml_ceil(4 * dml_max(prefetch_sw_bytes / prefetch_bw_oto / LineTime, min_Lsw), 1) / 4; +#ifdef __DML_VBA_DEBUG__  	Tsw_oto = Lsw_oto * LineTime; +#endif -	prefetch_bw_oto = (PrefetchSourceLinesY * swath_width_luma_ub * myPipe->BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * myPipe->BytePerPixelC) / Tsw_oto;  #ifdef __DML_VBA_DEBUG__  	dml_print("DML: HTotal: %d\n", myPipe->HTotal); @@ -5083,7 +5086,7 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  							v->SwathHeightYThisState[k],  							v->SwathHeightCThisState[k],  							v->HTotal[k] / v->PixelClock[k], -							v->UrgentLatency, +							v->UrgLatency[i],  							v->CursorBufferSize,  							v->CursorWidth[k][0],  							v->CursorBPP[k][0], @@ -5361,6 +5364,58 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  				v->ModeSupport[i][j] = true;  			} else {  				v->ModeSupport[i][j] = false; +#ifdef __DML_VBA_DEBUG__ +				if (v->ScaleRatioAndTapsSupport == false) +					dml_print("DML SUPPORT:     ScaleRatioAndTapsSupport failed"); +				if (v->SourceFormatPixelAndScanSupport == false) +					dml_print("DML SUPPORT:     SourceFormatPixelAndScanSupport failed"); +				if (v->ViewportSizeSupport[i][j] == false) +					dml_print("DML SUPPORT:     ViewportSizeSupport failed"); +				if (v->LinkCapacitySupport[i] == false) +					dml_print("DML SUPPORT:     LinkCapacitySupport failed"); +				if (v->ODMCombine4To1SupportCheckOK[i] == false) +					dml_print("DML SUPPORT:     DSC422NativeNotSupported failed"); +				if (v->NotEnoughDSCUnits[i] == true) +					dml_print("DML SUPPORT:     NotEnoughDSCUnits"); +				if (v->DTBCLKRequiredMoreThanSupported[i] == true) +					dml_print("DML SUPPORT:     DTBCLKRequiredMoreThanSupported"); +				if (v->ROBSupport[i][j] == false) +					dml_print("DML SUPPORT:     ROBSupport failed"); +				if (v->DISPCLK_DPPCLK_Support[i][j] == false) +					dml_print("DML SUPPORT:     DISPCLK_DPPCLK_Support failed"); +				if (v->TotalAvailablePipesSupport[i][j] == false) +					dml_print("DML SUPPORT:     DSC422NativeNotSupported failed"); +				if (EnoughWritebackUnits == false) +					dml_print("DML SUPPORT:     DSC422NativeNotSupported failed"); +				if (v->WritebackLatencySupport == false) +					dml_print("DML SUPPORT:     WritebackLatencySupport failed"); +				if (v->WritebackScaleRatioAndTapsSupport == false) +					dml_print("DML SUPPORT:     DSC422NativeNotSupported "); +				if (v->CursorSupport == false) +					dml_print("DML SUPPORT:     DSC422NativeNotSupported failed"); +				if (v->PitchSupport == false) +					dml_print("DML SUPPORT:     PitchSupport failed"); +				if (ViewportExceedsSurface == true) +					dml_print("DML SUPPORT:     ViewportExceedsSurface failed"); +				if (v->PrefetchSupported[i][j] == false) +					dml_print("DML SUPPORT:     PrefetchSupported failed"); +				if (v->DynamicMetadataSupported[i][j] == false) +					dml_print("DML SUPPORT:     DSC422NativeNotSupported failed"); +				if (v->TotalVerticalActiveBandwidthSupport[i][j] == false) +					dml_print("DML SUPPORT:     TotalVerticalActiveBandwidthSupport failed"); +				if (v->VRatioInPrefetchSupported[i][j] == false) +					dml_print("DML SUPPORT:     VRatioInPrefetchSupported failed"); +				if (v->PTEBufferSizeNotExceeded[i][j] == false) +					dml_print("DML SUPPORT:     PTEBufferSizeNotExceeded failed"); +				if (v->NonsupportedDSCInputBPC == true) +					dml_print("DML SUPPORT:     NonsupportedDSCInputBPC failed"); +				if (!((v->HostVMEnable == false +					&& v->ImmediateFlipRequirement[0] != dm_immediate_flip_required) +							|| v->ImmediateFlipSupportedForState[i][j] == true)) +					dml_print("DML SUPPORT:     ImmediateFlipRequirement failed"); +				if (FMTBufferExceeded == true) +					dml_print("DML SUPPORT:     FMTBufferExceeded failed"); +#endif  			}  		}  	} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c index 34b6c763a455..acda3e1babd4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c @@ -29,6 +29,7 @@  #include "dcn31/dcn31_hubbub.h"  #include "dcn314_fpu.h"  #include "dml/dcn20/dcn20_fpu.h" +#include "dml/dcn31/dcn31_fpu.h"  #include "dml/display_mode_vba.h"  struct _vcs_dpi_ip_params_st dcn3_14_ip = { @@ -148,8 +149,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc = {  	.num_states = 5,  	.sr_exit_time_us = 16.5,  	.sr_enter_plus_exit_time_us = 18.5, -	.sr_exit_z8_time_us = 442.0, -	.sr_enter_plus_exit_z8_time_us = 560.0, +	.sr_exit_z8_time_us = 210.0, +	.sr_enter_plus_exit_z8_time_us = 310.0,  	.writeback_latency_us = 12.0,  	.dram_channel_width_bytes = 4,  	.round_trip_ping_latency_dcfclk_cycles = 106, @@ -264,11 +265,8 @@ void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p  		dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;  	} -	if ((int)(dcn3_14_soc.dram_clock_change_latency_us * 1000) -				!= dc->debug.dram_clock_change_latency_ns -			&& dc->debug.dram_clock_change_latency_ns) { -		dcn3_14_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000; -	} +	dcn20_patch_bounding_box(dc, &dcn3_14_soc); +  	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))  		dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN314);  	else @@ -291,7 +289,7 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c  	dc_assert_fp_enabled(); -	dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); +	dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);  	for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {  		struct dc_crtc_timing *timing; @@ -318,8 +316,6 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c  		pipes[pipe_cnt].pipe.src.immediate_flip = true;  		pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; -		pipes[pipe_cnt].pipe.src.hostvm = dc->res_pool->hubbub->riommu_active; -		pipes[pipe_cnt].pipe.src.gpuvm = true;  		pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;  		pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;  		pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch; @@ -350,7 +346,8 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c  	context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_14_DEFAULT_DET_SIZE;  	dc->config.enable_4to1MPC = false; -	if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) { +	if (pipe_cnt == 1 && pipe->plane_state +		&& pipe->plane_state->rotation == ROTATION_ANGLE_0 && !dc->debug.disable_z9_mpc) {  		if (is_dual_plane(pipe->plane_state->format)  				&& pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) {  			dc->config.enable_4to1MPC = true; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c index 0d12fd079cd6..c843b394aeb4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c @@ -900,7 +900,9 @@ static bool CalculatePrefetchSchedule(  	double DSTTotalPixelsAfterScaler;  	double LineTime;  	double dst_y_prefetch_equ; +#ifdef __DML_VBA_DEBUG__  	double Tsw_oto; +#endif  	double prefetch_bw_oto;  	double prefetch_bw_pr;  	double Tvm_oto; @@ -1074,17 +1076,18 @@ static bool CalculatePrefetchSchedule(  	else  		bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC;  	/*rev 99*/ -	prefetch_bw_pr = dml_min(1, bytes_pp * myPipe->PixelClock / (double) myPipe->DPPPerPlane); +	prefetch_bw_pr = bytes_pp * myPipe->PixelClock / (double) myPipe->DPPPerPlane; +	prefetch_bw_pr = dml_min(1, myPipe->VRatio) * prefetch_bw_pr;  	max_Tsw = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime;  	prefetch_sw_bytes = PrefetchSourceLinesY * swath_width_luma_ub * myPipe->BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * myPipe->BytePerPixelC; -	prefetch_bw_oto = dml_max(bytes_pp * myPipe->PixelClock / myPipe->DPPPerPlane, prefetch_sw_bytes / (dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime));  	prefetch_bw_oto = dml_max(prefetch_bw_pr, prefetch_sw_bytes / max_Tsw);  	min_Lsw = dml_max(1, dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / max_vratio_pre);  	Lsw_oto = dml_ceil(4 * dml_max(prefetch_sw_bytes / prefetch_bw_oto / LineTime, min_Lsw), 1) / 4; +#ifdef __DML_VBA_DEBUG__  	Tsw_oto = Lsw_oto * LineTime; +#endif -	prefetch_bw_oto = (PrefetchSourceLinesY * swath_width_luma_ub * myPipe->BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * myPipe->BytePerPixelC) / Tsw_oto;  #ifdef __DML_VBA_DEBUG__  	dml_print("DML: HTotal: %d\n", myPipe->HTotal); @@ -3184,7 +3187,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman  		} else {  			v->MIN_DST_Y_NEXT_START[k] = v->VTotal[k] - v->VFrontPorch[k] + v->VTotal[k] - v->VActive[k] - v->VStartup[k];  		} -		v->MIN_DST_Y_NEXT_START[k] += dml_floor(4.0 * v->TSetup[k] / (double)v->HTotal[k] / v->PixelClock[k], 1.0) / 4.0; +		v->MIN_DST_Y_NEXT_START[k] += dml_floor(4.0 * v->TSetup[k] / ((double)v->HTotal[k] / v->PixelClock[k]), 1.0) / 4.0;  		if (((v->VUpdateOffsetPix[k] + v->VUpdateWidthPix[k] + v->VReadyOffsetPix[k]) / v->HTotal[k])  				<= (isInterlaceTiming ?  						dml_floor((v->VTotal[k] - v->VActive[k] - v->VFrontPorch[k] - v->VStartup[k]) / 2.0, 1.0) : @@ -5180,7 +5183,7 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_  							v->SwathHeightYThisState[k],  							v->SwathHeightCThisState[k],  							v->HTotal[k] / v->PixelClock[k], -							v->UrgentLatency, +							v->UrgLatency[i],  							v->CursorBufferSize,  							v->CursorWidth[k][0],  							v->CursorBPP[k][0], diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c index 61ee9ba063a7..6576b897a512 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c @@ -51,7 +51,7 @@ static bool CalculateBytePerPixelAnd256BBlockSizes(  		*BytePerPixelDETC = 0;  		*BytePerPixelY = 4;  		*BytePerPixelC = 0; -	} else if (SourcePixelFormat == dm_444_16 || SourcePixelFormat == dm_444_16) { +	} else if (SourcePixelFormat == dm_444_16) {  		*BytePerPixelDETY = 2;  		*BytePerPixelDETC = 0;  		*BytePerPixelY = 2; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index 2abe3967f7fb..e47828e3b6d5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -24,13 +24,14 @@   *   */  #include "dcn32_fpu.h" -#include "dc_link_dp.h"  #include "dcn32/dcn32_resource.h"  #include "dcn20/dcn20_resource.h"  #include "display_mode_vba_util_32.h" +#include "dml/dcn32/display_mode_vba_32.h"  // We need this includes for WATERMARKS_* defines  #include "clk_mgr/dcn32/dcn32_smu13_driver_if.h"  #include "dcn30/dcn30_resource.h" +#include "link.h"  #define DC_LOGGER_INIT(logger) @@ -256,16 +257,24 @@ int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,  							    int vlevel)  {  	const int max_latency_table_entries = 4; -	const struct vba_vars_st *vba = &context->bw_ctx.dml.vba; +	struct vba_vars_st *vba = &context->bw_ctx.dml.vba;  	int dummy_latency_index = 0; +	enum clock_change_support temp_clock_change_support = vba->DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb];  	dc_assert_fp_enabled();  	while (dummy_latency_index < max_latency_table_entries) { +		if (temp_clock_change_support != dm_dram_clock_change_unsupported) +			vba->DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = temp_clock_change_support;  		context->bw_ctx.dml.soc.dram_clock_change_latency_us =  				dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;  		dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false); +		/* for subvp + DRR case, if subvp pipes are still present we support pstate */ +		if (vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported && +				dcn32_subvp_in_use(dc, context)) +			vba->DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = temp_clock_change_support; +  		if (vlevel < context->bw_ctx.dml.vba.soc.num_states &&  				vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] != dm_dram_clock_change_unsupported)  			break; @@ -531,9 +540,12 @@ void dcn32_set_phantom_stream_timing(struct dc *dc,  	unsigned int i, pipe_idx;  	struct pipe_ctx *pipe;  	uint32_t phantom_vactive, phantom_bp, pstate_width_fw_delay_lines; +	unsigned int num_dpp;  	unsigned int vlevel = context->bw_ctx.dml.vba.VoltageLevel;  	unsigned int dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];  	unsigned int socclk = context->bw_ctx.dml.vba.SOCCLKPerState[vlevel]; +	struct vba_vars_st *vba = &context->bw_ctx.dml.vba; +	struct dc_stream_state *main_stream = ref_pipe->stream;  	dc_assert_fp_enabled(); @@ -569,13 +581,26 @@ void dcn32_set_phantom_stream_timing(struct dc *dc,  	phantom_vactive = get_subviewport_lines_needed_in_mall(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx) +  				pstate_width_fw_delay_lines + dc->caps.subvp_swath_height_margin_lines; +	// W/A for DCC corruption with certain high resolution timings. +	// Determing if pipesplit is used. If so, add meta_row_height to the phantom vactive. +	num_dpp = vba->NoOfDPP[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]]; +	phantom_vactive += num_dpp > 1 ? vba->meta_row_height[vba->pipe_plane[pipe_idx]] : 0; + +	/* dc->debug.subvp_extra_lines 0 by default*/ +	phantom_vactive += dc->debug.subvp_extra_lines; +  	// For backporch of phantom pipe, use vstartup of the main pipe  	phantom_bp = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);  	phantom_stream->dst.y = 0;  	phantom_stream->dst.height = phantom_vactive; +	/* When scaling, DML provides the end to end required number of lines for MALL. +	 * dst.height is always correct for this case, but src.height is not which causes a +	 * delta between main and phantom pipe scaling outputs. Need to adjust src.height on +	 * phantom for this case. +	 */  	phantom_stream->src.y = 0; -	phantom_stream->src.height = phantom_vactive; +	phantom_stream->src.height = (double)phantom_vactive * (double)main_stream->src.height / (double)main_stream->dst.height;  	phantom_stream->timing.v_addressable = phantom_vactive;  	phantom_stream->timing.v_front_porch = 1; @@ -667,9 +692,11 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,  		 *   to combine this with SubVP can cause issues with the scheduling).  		 * - Not TMZ surface  		 */ -		if (pipe->plane_state && !pipe->top_pipe && +		if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) && !dcn32_is_psr_capable(pipe) &&  				pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120 && !pipe->plane_state->address.tmz_surface && -				vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) { +				(vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0 || +				(vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 && +						dcn32_allow_subvp_with_active_margin(pipe)))) {  			while (pipe) {  				num_pipes++;  				pipe = pipe->bottom_pipe; @@ -853,6 +880,10 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struc  	int16_t stretched_drr_us = 0;  	int16_t drr_stretched_vblank_us = 0;  	int16_t max_vblank_mallregion = 0; +	const struct dc_config *config = &dc->config; + +	if (config->disable_subvp_drr) +		return false;  	// Find SubVP pipe  	for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -953,10 +984,12 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)  		if (!subvp_pipe && pipe->stream->mall_stream_config.type == SUBVP_MAIN)  			subvp_pipe = pipe;  	} -	// Use ignore_msa_timing_param flag to identify as DRR -	if (found && context->res_ctx.pipe_ctx[vblank_index].stream->ignore_msa_timing_param) { -		// SUBVP + DRR case -		schedulable = subvp_drr_schedulable(dc, context, &context->res_ctx.pipe_ctx[vblank_index]); +	// Use ignore_msa_timing_param and VRR active, or Freesync flag to identify as DRR On +	if (found && context->res_ctx.pipe_ctx[vblank_index].stream->ignore_msa_timing_param && +			(context->res_ctx.pipe_ctx[vblank_index].stream->allow_freesync || +			context->res_ctx.pipe_ctx[vblank_index].stream->vrr_active_variable)) { +		// SUBVP + DRR case -- only allowed if run through DRR validation path +		schedulable = false;  	} else if (found) {  		main_timing = &subvp_pipe->stream->timing;  		phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing; @@ -1060,12 +1093,12 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,  {  	struct vba_vars_st *vba = &context->bw_ctx.dml.vba;  	unsigned int dc_pipe_idx = 0; +	int i = 0;  	bool found_supported_config = false;  	struct pipe_ctx *pipe = NULL;  	uint32_t non_subvp_pipes = 0;  	bool drr_pipe_found = false;  	uint32_t drr_pipe_index = 0; -	uint32_t i = 0;  	dc_assert_fp_enabled(); @@ -1128,7 +1161,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,  				context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final ==  					dm_prefetch_support_uclk_fclk_and_stutter) {  				context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = -								dm_prefetch_support_stutter; +								dm_prefetch_support_fclk_and_stutter;  				/* There are params (such as FabricClock) that need to be recalculated  				 * after validation fails (otherwise it will be 0). Calculation for  				 * phantom vactive requires call into DML, so we must ensure all the @@ -1145,15 +1178,25 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,  			pipes[0].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, *pipe_cnt, 0);  			*vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt); +			/* Check that vlevel requested supports pstate or not +			 * if not, select the lowest vlevel that supports it +			 */ +			for (i = *vlevel; i < context->bw_ctx.dml.soc.num_states; i++) { +				if (vba->DRAMClockChangeSupport[i][vba->maxMpcComb] != dm_dram_clock_change_unsupported) { +					*vlevel = i; +					break; +				} +			} +  			if (*vlevel < context->bw_ctx.dml.soc.num_states &&  			    vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] != dm_dram_clock_change_unsupported  			    && subvp_validate_static_schedulability(dc, context, *vlevel)) {  				found_supported_config = true; -			} else if (*vlevel < context->bw_ctx.dml.soc.num_states && -					vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) { -				/* Case where 1 SubVP is added, and DML reports MCLK unsupported. This handles -				 * the case for SubVP + DRR, where the DRR display does not support MCLK switch -				 * at it's native refresh rate / timing. +			} else if (*vlevel < context->bw_ctx.dml.soc.num_states) { +				/* Case where 1 SubVP is added, and DML reports MCLK unsupported or DRR is allowed. +				 * This handles the case for SubVP + DRR, where the DRR display does not support MCLK +				 * switch at it's native refresh rate / timing, or DRR is allowed for the non-subvp +				 * display.  				 */  				for (i = 0; i < dc->res_pool->pipe_count; i++) {  					pipe = &context->res_ctx.pipe_ctx[i]; @@ -1161,7 +1204,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,  					    pipe->stream->mall_stream_config.type == SUBVP_NONE) {  						non_subvp_pipes++;  						// Use ignore_msa_timing_param flag to identify as DRR -						if (pipe->stream->ignore_msa_timing_param) { +						if (pipe->stream->ignore_msa_timing_param && pipe->stream->allow_freesync) {  							drr_pipe_found = true;  							drr_pipe_index = i;  						} @@ -1170,6 +1213,15 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,  				// If there is only 1 remaining non SubVP pipe that is DRR, check static  				// schedulability for SubVP + DRR.  				if (non_subvp_pipes == 1 && drr_pipe_found) { +					/* find lowest vlevel that supports the config */ +					for (i = *vlevel; i >= 0; i--) { +						if (vba->ModeSupport[i][vba->maxMpcComb]) { +							*vlevel = i; +						} else { +							break; +						} +					} +  					found_supported_config = subvp_drr_schedulable(dc, context,  										       &context->res_ctx.pipe_ctx[drr_pipe_index]);  				} @@ -1179,7 +1231,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,  		// If SubVP pipe config is unsupported (or cannot be used for UCLK switching)  		// remove phantom pipes and repopulate dml pipes  		if (!found_supported_config) { -			dc->res_pool->funcs->remove_phantom_pipes(dc, context); +			dc->res_pool->funcs->remove_phantom_pipes(dc, context, false);  			vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] = dm_dram_clock_change_unsupported;  			*pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false); @@ -1191,9 +1243,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,  			}  		} else {  			// Most populate phantom DLG params before programming hardware / timing for phantom pipe -			DC_FP_START();  			dcn32_helper_populate_phantom_dlg_params(dc, context, pipes, *pipe_cnt); -			DC_FP_END();  			/* Call validate_apply_pipe_split flags after calling DML getters for  			 * phantom dlg params, or some of the VBA params indicating pipe split @@ -1220,17 +1270,49 @@ static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)  	for (i = 0; i < dc->res_pool->pipe_count; i++) {  		if (!context->res_ctx.pipe_ctx[i].stream)  			continue; -		if (is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i])) +		if (link_is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i]))  			return true;  	}  	return false;  } +static void dcn20_adjust_freesync_v_startup(const struct dc_crtc_timing *dc_crtc_timing, int *vstartup_start) +{ +	struct dc_crtc_timing patched_crtc_timing; +	uint32_t asic_blank_end   = 0; +	uint32_t asic_blank_start = 0; +	uint32_t newVstartup	  = 0; + +	patched_crtc_timing = *dc_crtc_timing; + +	if (patched_crtc_timing.flags.INTERLACE == 1) { +		if (patched_crtc_timing.v_front_porch < 2) +			patched_crtc_timing.v_front_porch = 2; +	} else { +		if (patched_crtc_timing.v_front_porch < 1) +			patched_crtc_timing.v_front_porch = 1; +	} + +	/* blank_start = frame end - front porch */ +	asic_blank_start = patched_crtc_timing.v_total - +					patched_crtc_timing.v_front_porch; + +	/* blank_end = blank_start - active */ +	asic_blank_end = asic_blank_start - +					patched_crtc_timing.v_border_bottom - +					patched_crtc_timing.v_addressable - +					patched_crtc_timing.v_border_top; + +	newVstartup = asic_blank_end + (patched_crtc_timing.v_total - asic_blank_start); + +	*vstartup_start = ((newVstartup > *vstartup_start) ? newVstartup : *vstartup_start); +} +  static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,  				       display_e2e_pipe_params_st *pipes,  				       int pipe_cnt, int vlevel)  { -	int i, pipe_idx; +	int i, pipe_idx, active_hubp_count = 0;  	bool usr_retraining_support = false;  	bool unbounded_req_enabled = false; @@ -1248,7 +1330,6 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,  	context->bw_ctx.bw.dcn.clk.p_state_change_support =  			context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb]  					!= dm_dram_clock_change_unsupported; -	context->bw_ctx.bw.dcn.clk.num_ways = dcn32_helper_calculate_num_ways_for_subvp(dc, context);  	context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;  	context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context); @@ -1272,9 +1353,15 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,  		unbounded_req_enabled = false;  	} +	context->bw_ctx.bw.dcn.mall_ss_size_bytes = 0; +	context->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes = 0; +	context->bw_ctx.bw.dcn.mall_subvp_size_bytes = 0; +  	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {  		if (!context->res_ctx.pipe_ctx[i].stream)  			continue; +		if (context->res_ctx.pipe_ctx[i].plane_state) +			active_hubp_count++;  		pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt,  				pipe_idx);  		pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt, @@ -1296,10 +1383,51 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,  		if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)  			context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000; -		context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000; +		if (context->res_ctx.pipe_ctx[i].plane_state) +			context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000; +		else +			context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = 0;  		context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest; + +		context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes = get_surface_size_in_mall(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + +		/* MALL Allocation Sizes */ +		/* count from active, top pipes per plane only */ +		if (context->res_ctx.pipe_ctx[i].stream && context->res_ctx.pipe_ctx[i].plane_state && +				(context->res_ctx.pipe_ctx[i].top_pipe == NULL || +				context->res_ctx.pipe_ctx[i].plane_state != context->res_ctx.pipe_ctx[i].top_pipe->plane_state) && +				context->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) { +			/* SS: all active surfaces stored in MALL */ +			if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type != SUBVP_PHANTOM) { +				context->bw_ctx.bw.dcn.mall_ss_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes; + +				if (context->res_ctx.pipe_ctx[i].stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED) { +					/* SS PSR On: all active surfaces part of streams not supporting PSR stored in MALL */ +					context->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes; +				} +			} else { +				/* SUBVP: phantom surfaces only stored in MALL */ +				context->bw_ctx.bw.dcn.mall_subvp_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes; +			} +		} + +		if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid) +			dcn20_adjust_freesync_v_startup( +				&context->res_ctx.pipe_ctx[i].stream->timing, +				&context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start); +  		pipe_idx++;  	} +	/* If DCN isn't making memory requests we can allow pstate change and lower clocks */ +	if (!active_hubp_count) { +		context->bw_ctx.bw.dcn.clk.socclk_khz = 0; +		context->bw_ctx.bw.dcn.clk.dppclk_khz = 0; +		context->bw_ctx.bw.dcn.clk.dcfclk_khz = 0; +		context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = 0; +		context->bw_ctx.bw.dcn.clk.dramclk_khz = 0; +		context->bw_ctx.bw.dcn.clk.fclk_khz = 0; +		context->bw_ctx.bw.dcn.clk.p_state_change_support = true; +	}  	/*save a original dppclock copy*/  	context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz;  	context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz; @@ -1308,6 +1436,8 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,  	context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dispclk_mhz  			* 1000; +	context->bw_ctx.bw.dcn.clk.num_ways = dcn32_helper_calculate_num_ways_for_subvp(dc, context); +  	context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes;  	for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -1481,7 +1611,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,  		return false;  	// For each full update, remove all existing phantom pipes first -	dc->res_pool->funcs->remove_phantom_pipes(dc, context); +	dc->res_pool->funcs->remove_phantom_pipes(dc, context, fast_validate);  	dc->res_pool->funcs->update_soc_for_wm_a(dc, context); @@ -1493,12 +1623,10 @@ bool dcn32_internal_validate_bw(struct dc *dc,  	}  	dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt); +	context->bw_ctx.dml.soc.max_vratio_pre = dcn32_determine_max_vratio_prefetch(dc, context); -	if (!fast_validate) { -		DC_FP_START(); +	if (!fast_validate)  		dcn32_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge, &pipe_cnt); -		DC_FP_END(); -	}  	if (fast_validate ||  			(dc->debug.dml_disallow_alternate_prefetch_modes && @@ -1515,16 +1643,12 @@ bool dcn32_internal_validate_bw(struct dc *dc,  		 * to support with Prefetch mode 1 (dm_prefetch_support_fclk_and_stutter == 2)  		 */  		context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = -			dm_prefetch_support_fclk_and_stutter; +			dm_prefetch_support_none; +		context->bw_ctx.dml.validate_max_state = fast_validate;  		vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); -		/* Last attempt with Prefetch mode 2 (dm_prefetch_support_stutter == 3) */ -		if (vlevel == context->bw_ctx.dml.soc.num_states) { -			context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = -				dm_prefetch_support_stutter; -			vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); -		} +		context->bw_ctx.dml.validate_max_state = false;  		if (vlevel < context->bw_ctx.dml.soc.num_states) {  			memset(split, 0, sizeof(split)); @@ -1611,6 +1735,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,  				dcn20_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc);  			memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));  			memset(&pipe->stream_res, 0, sizeof(pipe->stream_res)); +			memset(&pipe->link_res, 0, sizeof(pipe->link_res));  			repopulate_pipes = true;  		} else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {  			struct pipe_ctx *top_pipe = pipe->top_pipe; @@ -1626,6 +1751,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,  			pipe->stream = NULL;  			memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));  			memset(&pipe->stream_res, 0, sizeof(pipe->stream_res)); +			memset(&pipe->link_res, 0, sizeof(pipe->link_res));  			repopulate_pipes = true;  		} else  			ASSERT(0); /* Should never try to merge master pipe */ @@ -1734,6 +1860,10 @@ bool dcn32_internal_validate_bw(struct dc *dc,  	}  	if (repopulate_pipes) { +		int flag_max_mpc_comb = vba->maxMpcComb; +		int flag_vlevel = vlevel; +		int i; +  		pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);  		/* repopulate_pipes = 1 means the pipes were either split or merged. In this case @@ -1741,10 +1871,28 @@ bool dcn32_internal_validate_bw(struct dc *dc,  		 * ensure all the params are calculated correctly. We do not need to run the  		 * pipe split check again after this call (pipes are already split / merged).  		 * */ -		if (!fast_validate) { -			context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = -						dm_prefetch_support_uclk_fclk_and_stutter_if_possible; -			vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); +		context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = +					dm_prefetch_support_uclk_fclk_and_stutter_if_possible; +		vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); +		if (vlevel == context->bw_ctx.dml.soc.num_states) { +			/* failed after DET size changes */ +			goto validate_fail; +		} else if (flag_max_mpc_comb == 0 && +				flag_max_mpc_comb != context->bw_ctx.dml.vba.maxMpcComb) { +			/* check the context constructed with pipe split flags is still valid*/ +			bool flags_valid = false; +			for (i = flag_vlevel; i < context->bw_ctx.dml.soc.num_states; i++) { +				if (vba->ModeSupport[i][flag_max_mpc_comb]) { +					vba->maxMpcComb = flag_max_mpc_comb; +					vba->VoltageLevel = i; +					vlevel = i; +					flags_valid = true; +				} +			} + +			/* this should never happen */ +			if (!flags_valid) +				goto validate_fail;  		}  	}  	*vlevel_out = vlevel; @@ -1775,14 +1923,40 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,  	unsigned int dummy_latency_index = 0;  	int maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;  	unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed; +	bool subvp_in_use = dcn32_subvp_in_use(dc, context);  	unsigned int min_dram_speed_mts_margin; +	bool need_fclk_lat_as_dummy = false; +	bool is_subvp_p_drr = false;  	dc_assert_fp_enabled(); -	// Override DRAMClockChangeSupport for SubVP + DRR case where the DRR cannot switch without stretching it's VBLANK -	if (!pstate_en && dcn32_subvp_in_use(dc, context)) { -		context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp; -		pstate_en = true; +	/* need to find dummy latency index for subvp */ +	if (subvp_in_use) { +		/* Override DRAMClockChangeSupport for SubVP + DRR case where the DRR cannot switch without stretching it's VBLANK */ +		if (!pstate_en) { +			context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp; +			context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = dm_prefetch_support_fclk_and_stutter; +			pstate_en = true; +			is_subvp_p_drr = true; +		} +		dummy_latency_index = dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(dc, +						context, pipes, pipe_cnt, vlevel); + +		/* For DCN32/321 need to validate with fclk pstate change latency equal to dummy so prefetch is +		 * scheduled correctly to account for dummy pstate. +		 */ +		if (context->bw_ctx.dml.soc.fclk_change_latency_us < dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us) { +			need_fclk_lat_as_dummy = true; +			context->bw_ctx.dml.soc.fclk_change_latency_us = +					dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us; +		} +		context->bw_ctx.dml.soc.dram_clock_change_latency_us = +							dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; +		dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false); +		maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb; +		if (is_subvp_p_drr) { +			context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp; +		}  	}  	context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false; @@ -1806,9 +1980,11 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,  			/* For DCN32/321 need to validate with fclk pstate change latency equal to dummy so  			 * prefetch is scheduled correctly to account for dummy pstate.  			 */ -			if (dummy_latency_index == 0) +			if (context->bw_ctx.dml.soc.fclk_change_latency_us < dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us) { +				need_fclk_lat_as_dummy = true;  				context->bw_ctx.dml.soc.fclk_change_latency_us =  						dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us; +			}  			dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);  			maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;  			dcfclk_from_fw_based_mclk_switching = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; @@ -1916,7 +2092,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,  				dc->clk_mgr->bw_params->clk_table.entries[min_dram_speed_mts_offset].memclk_mhz * 16;  		} -		if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { +		if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching && !subvp_in_use) {  			/* find largest table entry that is lower than dram speed,  			 * but lower than DPM0 still uses DPM0  			 */ @@ -1956,6 +2132,10 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,  		 */  		context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c;  		context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0; +		/* Calculate FCLK p-state change watermark based on FCLK pstate change latency in case +		 * UCLK p-state is not supported, to avoid underflow in case FCLK pstate is supported +		 */ +		context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;  	} else {  		/* Set A:  		 * All clocks min. @@ -1996,7 +2176,8 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,  	context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod; -	if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching && dummy_latency_index == 0) +	/* for proper prefetch calculations, if dummy lat > fclk lat, use fclk lat = dummy lat */ +	if (need_fclk_lat_as_dummy)  		context->bw_ctx.dml.soc.fclk_change_latency_us =  				dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us; @@ -2009,10 +2190,12 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,  	if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {  		dcn30_setup_mclk_switch_using_fw_based_vblank_stretch(dc, context); -		if (dummy_latency_index == 0) -			context->bw_ctx.dml.soc.fclk_change_latency_us = -					dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us;  	} + +	/* revert fclk lat changes if required */ +	if (need_fclk_lat_as_dummy) +		context->bw_ctx.dml.soc.fclk_change_latency_us = +				dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us;  }  static void dcn32_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts, @@ -2159,9 +2342,7 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,  		entry.fabricclk_mhz = 0;  		entry.dram_speed_mts = 0; -		DC_FP_START();  		insert_entry_into_table_sorted(table, num_entries, &entry); -		DC_FP_END();  	}  	// Insert the max DCFCLK @@ -2169,9 +2350,7 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,  	entry.fabricclk_mhz = 0;  	entry.dram_speed_mts = 0; -	DC_FP_START();  	insert_entry_into_table_sorted(table, num_entries, &entry); -	DC_FP_END();  	// Insert the UCLK DPMS  	for (i = 0; i < num_uclk_dpms; i++) { @@ -2179,9 +2358,7 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,  		entry.fabricclk_mhz = 0;  		entry.dram_speed_mts = bw_params->clk_table.entries[i].memclk_mhz * 16; -		DC_FP_START();  		insert_entry_into_table_sorted(table, num_entries, &entry); -		DC_FP_END();  	}  	// If FCLK is coarse grained, insert individual DPMs. @@ -2191,9 +2368,7 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,  			entry.fabricclk_mhz = bw_params->clk_table.entries[i].fclk_mhz;  			entry.dram_speed_mts = 0; -			DC_FP_START();  			insert_entry_into_table_sorted(table, num_entries, &entry); -			DC_FP_END();  		}  	}  	// If FCLK fine grained, only insert max @@ -2202,9 +2377,7 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,  		entry.fabricclk_mhz = max_fclk_mhz;  		entry.dram_speed_mts = 0; -		DC_FP_START();  		insert_entry_into_table_sorted(table, num_entries, &entry); -		DC_FP_END();  	}  	// At this point, the table contains all "points of interest" based on @@ -2368,8 +2541,11 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa  		}  		/* Override from VBIOS for num_chan */ -		if (dc->ctx->dc_bios->vram_info.num_chans) +		if (dc->ctx->dc_bios->vram_info.num_chans) {  			dcn3_2_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans; +			dcn3_2_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc, +				dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel); +		}  		if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)  			dcn3_2_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; @@ -2539,3 +2715,68 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa  	}  } +void dcn32_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes, +				  int pipe_cnt) +{ +	dc_assert_fp_enabled(); + +	pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0; +	pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0; +} + +bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe) +{ +	bool allow = false; +	uint32_t refresh_rate = 0; + +	/* Allow subvp on displays that have active margin for 2560x1440@60hz displays +	 * only for now. There must be no scaling as well. +	 * +	 * For now we only enable on 2560x1440@60hz displays to enable 4K60 + 1440p60 configs +	 * for p-state switching. +	 */ +	if (pipe->stream && pipe->plane_state) { +		refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 + +						pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1) +						/ (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total); +		if (pipe->stream->timing.v_addressable == 1440 && +				pipe->stream->timing.h_addressable == 2560 && +				refresh_rate >= 55 && refresh_rate <= 65 && +				pipe->plane_state->src_rect.height == 1440 && +				pipe->plane_state->src_rect.width == 2560 && +				pipe->plane_state->dst_rect.height == 1440 && +				pipe->plane_state->dst_rect.width == 2560) +			allow = true; +	} +	return allow; +} + +/** + * ******************************************************************************************* + * dcn32_determine_max_vratio_prefetch: Determine max Vratio for prefetch by driver policy + * + * @param [in]: dc: Current DC state + * @param [in]: context: New DC state to be programmed + * + * @return: Max vratio for prefetch + * + * ******************************************************************************************* + */ +double dcn32_determine_max_vratio_prefetch(struct dc *dc, struct dc_state *context) +{ +	double max_vratio_pre = __DML_MAX_BW_RATIO_PRE__; // Default value is 4 +	int i; + +	/* For single display MPO configs, allow the max vratio to be 8 +	 * if any plane is YUV420 format +	 */ +	if (context->stream_count == 1 && context->stream_status[0].plane_count > 1) { +		for (i = 0; i < context->stream_status[0].plane_count; i++) { +			if (context->stream_status[0].plane_states[i]->format == SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr || +					context->stream_status[0].plane_states[i]->format == SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb) { +				max_vratio_pre = __DML_MAX_VRATIO_PRE__; +			} +		} +	} +	return max_vratio_pre; +} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h index 3a3dc2ce4c73..ab010e7e840b 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h @@ -73,4 +73,7 @@ int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,  void dcn32_patch_dpm_table(struct clk_bw_params *bw_params); +void dcn32_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes, +				  int pipe_cnt); +  #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c index 9afd9ba23fb2..3b2a014ccf8f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c @@ -387,6 +387,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman  				mode_lib->vba.NumberOfActiveSurfaces,  				mode_lib->vba.MALLAllocatedForDCNFinal,  				mode_lib->vba.UseMALLForStaticScreen, +				mode_lib->vba.UsesMALLForPStateChange,  				mode_lib->vba.DCCEnable,  				mode_lib->vba.ViewportStationary,  				mode_lib->vba.ViewportXStartY, @@ -411,6 +412,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman  				v->BlockWidthC,  				v->BlockHeightY,  				v->BlockHeightC, +				mode_lib->vba.DCCMetaPitchY, +				mode_lib->vba.DCCMetaPitchC,  				/* Output */  				v->SurfaceSizeInMALL, @@ -670,6 +673,25 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman  		v->cursor_bw[k] = mode_lib->vba.NumberOfCursors[k] * mode_lib->vba.CursorWidth[k][0] * mode_lib->vba.CursorBPP[k][0] / 8 / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * mode_lib->vba.VRatio[k];  	} +	v->NotEnoughDETSwathFillLatencyHiding = dml32_CalculateDETSwathFillLatencyHiding( +						mode_lib->vba.NumberOfActiveSurfaces, +						mode_lib->vba.ReturnBW, +						v->UrgentLatency, +						mode_lib->vba.SwathHeightY, +						mode_lib->vba.SwathHeightC, +						v->swath_width_luma_ub, +						v->swath_width_chroma_ub, +						v->BytePerPixelDETY, +						v->BytePerPixelDETC, +						mode_lib->vba.DETBufferSizeY, +						mode_lib->vba.DETBufferSizeC, +						mode_lib->vba.DPPPerPlane, +						mode_lib->vba.HTotal, +						mode_lib->vba.PixelClock, +						mode_lib->vba.VRatio, +						mode_lib->vba.VRatioChroma, +						mode_lib->vba.UsesMALLForPStateChange); +  	for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {  		v->MaxVStartupLines[k] = ((mode_lib->vba.Interlace[k] &&  				!mode_lib->vba.ProgressiveToInterlaceUnitInOPP) ? @@ -874,8 +896,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman  			if (v->DestinationLinesForPrefetch[k] < 2)  				DestinationLineTimesForPrefetchLessThan2 = true; -			if (v->VRatioPrefetchY[k] > __DML_MAX_VRATIO_PRE__ -					|| v->VRatioPrefetchC[k] > __DML_MAX_VRATIO_PRE__) +			if (v->VRatioPrefetchY[k] > v->MaxVRatioPre +					|| v->VRatioPrefetchC[k] > v->MaxVRatioPre)  				VRatioPrefetchMoreThanMax = true;  			//bool DestinationLinesToRequestVMInVBlankEqualOrMoreThan32 = false; @@ -920,6 +942,9 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman  					v->UrgBurstFactorLumaPre,  					v->UrgBurstFactorChromaPre,  					v->UrgBurstFactorCursorPre, +					v->PrefetchBandwidth, +					v->VRatio, +					v->MaxVRatioPre,  					/* output */  					&MaxTotalRDBandwidth, @@ -950,6 +975,9 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman  					v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_unit_vector,  					v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_unit_vector,  					v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_unit_vector, +					v->PrefetchBandwidth, +					v->VRatio, +					v->MaxVRatioPre,  					/* output */  					&v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_single[0], @@ -1617,9 +1645,14 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman  static void mode_support_configuration(struct vba_vars_st *v,  				  struct display_mode_lib *mode_lib)  { -	int i, j; +	int i, j, start_state; -	for (i = v->soc.num_states - 1; i >= 0; i--) { +	if (mode_lib->validate_max_state) +		start_state = v->soc.num_states - 1; +	else +		start_state = 0; + +	for (i = v->soc.num_states - 1; i >= start_state; i--) {  		for (j = 0; j < 2; j++) {  			if (mode_lib->vba.ScaleRatioAndTapsSupport == true  				&& mode_lib->vba.SourceFormatPixelAndScanSupport == true @@ -1665,6 +1698,8 @@ static void mode_support_configuration(struct vba_vars_st *v,  				&& mode_lib->vba.DCCMetaBufferSizeNotExceeded[i][j] == true  				&& mode_lib->vba.NonsupportedDSCInputBPC == false  				&& !mode_lib->vba.ExceededMALLSize +				&& (mode_lib->vba.NotEnoughDETSwathFillLatencyHidingPerState[i][j] == false +				|| i == v->soc.num_states - 1)  				&& ((mode_lib->vba.HostVMEnable == false  				&& !mode_lib->vba.ImmediateFlipRequiredFinal)  				|| mode_lib->vba.ImmediateFlipSupportedForState[i][j]) @@ -1686,7 +1721,7 @@ static void mode_support_configuration(struct vba_vars_st *v,  void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)  {  	struct vba_vars_st *v = &mode_lib->vba; -	int i, j; +	int i, j, start_state;  	unsigned int k, m;  	unsigned int MaximumMPCCombine;  	unsigned int NumberOfNonCombinedSurfaceOfMaximumBandwidth; @@ -1699,6 +1734,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  #endif  	/*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/ +	if (mode_lib->validate_max_state) +		start_state = v->soc.num_states - 1; +	else +		start_state = 0;  	/*Scale Ratio, taps Support Check*/ @@ -1988,7 +2027,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  	mode_lib->vba.MPCCombineMethodIncompatible = v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MPCCombineMethodAsNeededForPStateChangeAndVoltage  			&& v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MPCCombineMethodAsPossible; -	for (i = 0; i < v->soc.num_states; i++) { +	for (i = start_state; i < v->soc.num_states; i++) {  		for (j = 0; j < 2; j++) {  			mode_lib->vba.TotalNumberOfActiveDPP[i][j] = 0;  			mode_lib->vba.TotalAvailablePipesSupport[i][j] = true; @@ -2265,7 +2304,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  		}  	} -	for (i = 0; i < v->soc.num_states; ++i) { +	for (i = start_state; i < v->soc.num_states; ++i) {  		mode_lib->vba.ExceededMultistreamSlots[i] = false;  		for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {  			if (mode_lib->vba.OutputMultistreamEn[k] == true && mode_lib->vba.OutputMultistreamId[k] == k) { @@ -2314,8 +2353,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  			if (mode_lib->vba.DSCEnable[k] && mode_lib->vba.ForcedOutputLinkBPP[k] != 0)  				mode_lib->vba.DSCOnlyIfNecessaryWithBPP = true; -			if ((mode_lib->vba.DSCEnable[k] || mode_lib->vba.DSCEnable[k]) -					&& mode_lib->vba.OutputFormat[k] == dm_n422 +			if (mode_lib->vba.DSCEnable[k] && mode_lib->vba.OutputFormat[k] == dm_n422  					&& !mode_lib->vba.DSC422NativeSupport)  				mode_lib->vba.DSC422NativeNotSupported = true; @@ -2365,7 +2403,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  		}  	} -	for (i = 0; i < v->soc.num_states; ++i) { +	for (i = start_state; i < v->soc.num_states; ++i) {  		mode_lib->vba.DTBCLKRequiredMoreThanSupported[i] = false;  		for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {  			if (mode_lib->vba.BlendingAndTiming[k] == k @@ -2382,7 +2420,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  		}  	} -	for (i = 0; i < v->soc.num_states; ++i) { +	for (i = start_state; i < v->soc.num_states; ++i) {  		mode_lib->vba.ODMCombine2To1SupportCheckOK[i] = true;  		mode_lib->vba.ODMCombine4To1SupportCheckOK[i] = true;  		for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) { @@ -2400,7 +2438,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  		}  	} -	for (i = 0; i < v->soc.num_states; i++) { +	for (i = start_state; i < v->soc.num_states; i++) {  		mode_lib->vba.DSCCLKRequiredMoreThanSupported[i] = false;  		for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {  			if (mode_lib->vba.BlendingAndTiming[k] == k) { @@ -2437,7 +2475,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  	/* Check DSC Unit and Slices Support */  	v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalDSCUnitsRequired = 0; -	for (i = 0; i < v->soc.num_states; ++i) { +	for (i = start_state; i < v->soc.num_states; ++i) {  		mode_lib->vba.NotEnoughDSCUnits[i] = false;  		mode_lib->vba.NotEnoughDSCSlices[i] = false;  		v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalDSCUnitsRequired = 0; @@ -2472,7 +2510,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  	}  	/*DSC Delay per state*/ -	for (i = 0; i < v->soc.num_states; ++i) { +	for (i = start_state; i < v->soc.num_states; ++i) {  		for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {  			mode_lib->vba.DSCDelayPerState[i][k] = dml32_DSCDelayRequirement(  					mode_lib->vba.RequiresDSC[i][k], mode_lib->vba.ODMCombineEnablePerState[i][k], @@ -2499,7 +2537,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  	//Calculate Swath, DET Configuration, DCFCLKDeepSleep  	// -	for (i = 0; i < (int) v->soc.num_states; ++i) { +	for (i = start_state; i < (int) v->soc.num_states; ++i) {  		for (j = 0; j <= 1; ++j) {  			for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {  				mode_lib->vba.RequiredDPPCLKThisState[k] = mode_lib->vba.RequiredDPPCLK[i][j][k]; @@ -2605,6 +2643,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  			mode_lib->vba.NumberOfActiveSurfaces,  			mode_lib->vba.MALLAllocatedForDCNFinal,  			mode_lib->vba.UseMALLForStaticScreen, +			mode_lib->vba.UsesMALLForPStateChange,  			mode_lib->vba.DCCEnable,  			mode_lib->vba.ViewportStationary,  			mode_lib->vba.ViewportXStartY, @@ -2629,12 +2668,14 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  			mode_lib->vba.MacroTileWidthC,  			mode_lib->vba.MacroTileHeightY,  			mode_lib->vba.MacroTileHeightC, +			mode_lib->vba.DCCMetaPitchY, +			mode_lib->vba.DCCMetaPitchC,  			/* Output */  			mode_lib->vba.SurfaceSizeInMALL,  			&mode_lib->vba.ExceededMALLSize); -	for (i = 0; i < v->soc.num_states; i++) { +	for (i = start_state; i < v->soc.num_states; i++) {  		for (j = 0; j < 2; j++) {  			for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {  				mode_lib->vba.swath_width_luma_ub_this_state[k] = @@ -2861,7 +2902,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  	}  	//Calculate Return BW -	for (i = 0; i < (int) v->soc.num_states; ++i) { +	for (i = start_state; i < (int) v->soc.num_states; ++i) {  		for (j = 0; j <= 1; ++j) {  			for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {  				if (mode_lib->vba.BlendingAndTiming[k] == k) { @@ -2940,7 +2981,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  			&mode_lib->vba.MinPrefetchMode,  			&mode_lib->vba.MaxPrefetchMode); -	for (i = 0; i < (int) v->soc.num_states; ++i) { +	for (i = start_state; i < (int) v->soc.num_states; ++i) {  		for (j = 0; j <= 1; ++j)  			mode_lib->vba.DCFCLKState[i][j] = mode_lib->vba.DCFCLKPerState[i];  	} @@ -3062,7 +3103,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  				mode_lib->vba.DCFCLKState);  	} // UseMinimumRequiredDCFCLK == true -	for (i = 0; i < (int) v->soc.num_states; ++i) { +	for (i = start_state; i < (int) v->soc.num_states; ++i) {  		for (j = 0; j <= 1; ++j) {  			mode_lib->vba.ReturnBWPerState[i][j] = dml32_get_return_bw_mbps(&mode_lib->vba.soc, i,  					mode_lib->vba.HostVMEnable, mode_lib->vba.DCFCLKState[i][j], @@ -3071,7 +3112,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  	}  	//Re-ordering Buffer Support Check -	for (i = 0; i < (int) v->soc.num_states; ++i) { +	for (i = start_state; i < (int) v->soc.num_states; ++i) {  		for (j = 0; j <= 1; ++j) {  			if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024  					/ mode_lib->vba.ReturnBWPerState[i][j] @@ -3093,7 +3134,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  				+ mode_lib->vba.ReadBandwidthChroma[k];  	} -	for (i = 0; i < (int) v->soc.num_states; ++i) { +	for (i = start_state; i < (int) v->soc.num_states; ++i) {  		for (j = 0; j <= 1; ++j) {  			mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][j] =  				dml_min3(mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKState[i][j] @@ -3117,7 +3158,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  	/* Prefetch Check */ -	for (i = 0; i < (int) v->soc.num_states; ++i) { +	for (i = start_state; i < (int) v->soc.num_states; ++i) {  		for (j = 0; j <= 1; ++j) {  			mode_lib->vba.TimeCalc = 24 / mode_lib->vba.ProjectedDCFCLKDeepSleep[i][j]; @@ -3158,6 +3199,25 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  					mode_lib->vba.UrgentBurstFactorChroma,  					mode_lib->vba.UrgentBurstFactorCursor); +			mode_lib->vba.NotEnoughDETSwathFillLatencyHidingPerState[i][j] = dml32_CalculateDETSwathFillLatencyHiding( +					mode_lib->vba.NumberOfActiveSurfaces, +					mode_lib->vba.ReturnBWPerState[i][j], +					mode_lib->vba.UrgLatency[i], +					mode_lib->vba.SwathHeightYThisState, +					mode_lib->vba.SwathHeightCThisState, +					mode_lib->vba.swath_width_luma_ub_this_state, +					mode_lib->vba.swath_width_chroma_ub_this_state, +					mode_lib->vba.BytePerPixelInDETY, +					mode_lib->vba.BytePerPixelInDETC, +					mode_lib->vba.DETBufferSizeYThisState, +					mode_lib->vba.DETBufferSizeCThisState, +					mode_lib->vba.NoOfDPPThisState, +					mode_lib->vba.HTotal, +					mode_lib->vba.PixelClock, +					mode_lib->vba.VRatio, +					mode_lib->vba.VRatioChroma, +					mode_lib->vba.UsesMALLForPStateChange); +  			v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.VMDataOnlyReturnBWPerState = dml32_get_return_bw_mbps_vm_only(&mode_lib->vba.soc, i,  					mode_lib->vba.DCFCLKState[i][j], mode_lib->vba.FabricClockPerState[i],  					mode_lib->vba.DRAMSpeedPerState[i]); @@ -3318,6 +3378,9 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  							mode_lib->vba.UrgentBurstFactorLumaPre,  							mode_lib->vba.UrgentBurstFactorChromaPre,  							mode_lib->vba.UrgentBurstFactorCursorPre, +							v->PrefetchBW, +							v->VRatio, +							v->MaxVRatioPre,  							/* output */  							&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[0],   // Single  *PrefetchBandwidth @@ -3342,8 +3405,8 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  				mode_lib->vba.VRatioInPrefetchSupported[i][j] = true;  				for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) { -					if (mode_lib->vba.VRatioPreY[i][j][k] > __DML_MAX_VRATIO_PRE__ -							|| mode_lib->vba.VRatioPreC[i][j][k] > __DML_MAX_VRATIO_PRE__ +					if (mode_lib->vba.VRatioPreY[i][j][k] > mode_lib->vba.MaxVRatioPre +							|| mode_lib->vba.VRatioPreC[i][j][k] > mode_lib->vba.MaxVRatioPre  							|| mode_lib->vba.NoTimeForPrefetch[i][j][k] == true) {  						mode_lib->vba.VRatioInPrefetchSupported[i][j] = false;  					} @@ -3599,7 +3662,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  			if (mode_lib->vba.SourcePixelFormat[k] != dm_444_64  					&& mode_lib->vba.SourcePixelFormat[k] != dm_444_32  					&& mode_lib->vba.SourcePixelFormat[k] != dm_444_16 -					&& mode_lib->vba.SourcePixelFormat[k] != dm_444_16  					&& mode_lib->vba.SourcePixelFormat[k] != dm_444_8  					&& mode_lib->vba.SourcePixelFormat[k] != dm_rgbe) {  				if (mode_lib->vba.ViewportWidthChroma[k] > mode_lib->vba.SurfaceWidthC[k] @@ -3616,7 +3678,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  	MaximumMPCCombine = 0; -	for (i = v->soc.num_states; i >= 0; i--) { +	for (i = v->soc.num_states; i >= start_state; i--) {  		if (i == v->soc.num_states || mode_lib->vba.ModeSupport[i][0] == true ||  				mode_lib->vba.ModeSupport[i][1] == true) {  			mode_lib->vba.VoltageLevel = i; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h index c8b28c83ddf4..500b3dd6052d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h @@ -44,7 +44,8 @@  #define __DML_MIN_DCFCLK_FACTOR__   1.15  // Prefetch schedule max vratio -#define __DML_MAX_VRATIO_PRE__ 4.0 +#define __DML_MAX_VRATIO_PRE__ 7.9 +#define __DML_MAX_BW_RATIO_PRE__ 4.0  #define __DML_VBA_MAX_DST_Y_PRE__    63.75 diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c index debe46b24a3e..d1000aa4c481 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c @@ -1772,6 +1772,7 @@ void dml32_CalculateSurfaceSizeInMall(  		unsigned int NumberOfActiveSurfaces,  		unsigned int MALLAllocatedForDCN,  		enum dm_use_mall_for_static_screen_mode UseMALLForStaticScreen[], +		enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[],  		bool DCCEnable[],  		bool ViewportStationary[],  		unsigned int ViewportXStartY[], @@ -1796,13 +1797,17 @@ void dml32_CalculateSurfaceSizeInMall(  		unsigned int ReadBlockWidthC[],  		unsigned int ReadBlockHeightY[],  		unsigned int ReadBlockHeightC[], +		unsigned int DCCMetaPitchY[], +		unsigned int DCCMetaPitchC[],  		/* Output */  		unsigned int    SurfaceSizeInMALL[],  		bool *ExceededMALLSize)  { -	unsigned int TotalSurfaceSizeInMALL  = 0;  	unsigned int k; +	unsigned int TotalSurfaceSizeInMALLForSS = 0; +	unsigned int TotalSurfaceSizeInMALLForSubVP = 0; +	unsigned int MALLAllocatedForDCNInBytes = MALLAllocatedForDCN * 1024 * 1024;  	for (k = 0; k < NumberOfActiveSurfaces; ++k) {  		if (ViewportStationary[k]) { @@ -1828,18 +1833,18 @@ void dml32_CalculateSurfaceSizeInMall(  			}  			if (DCCEnable[k] == true) {  				SurfaceSizeInMALL[k] = SurfaceSizeInMALL[k] + -						dml_min(dml_ceil(SurfaceWidthY[k], 8 * Read256BytesBlockWidthY[k]), +						(dml_min(dml_ceil(DCCMetaPitchY[k], 8 * Read256BytesBlockWidthY[k]),  							dml_floor(ViewportXStartY[k] + ViewportWidthY[k] + 8 *  							Read256BytesBlockWidthY[k] - 1, 8 * Read256BytesBlockWidthY[k])  							- dml_floor(ViewportXStartY[k], 8 * Read256BytesBlockWidthY[k]))  							* dml_min(dml_ceil(SurfaceHeightY[k], 8 *  							Read256BytesBlockHeightY[k]), dml_floor(ViewportYStartY[k] +  							ViewportHeightY[k] + 8 * Read256BytesBlockHeightY[k] - 1, 8 * -							Read256BytesBlockHeightY[k]) - dml_floor(ViewportYStartY[k], 8 -							* Read256BytesBlockHeightY[k])) * BytesPerPixelY[k] / 256; +							Read256BytesBlockHeightY[k]) - dml_floor(ViewportYStartY[k], 8 * +							Read256BytesBlockHeightY[k])) * BytesPerPixelY[k] / 256) + (64 * 1024);  				if (Read256BytesBlockWidthC[k] > 0) {  					SurfaceSizeInMALL[k] = SurfaceSizeInMALL[k] + -							dml_min(dml_ceil(SurfaceWidthC[k], 8 * +							dml_min(dml_ceil(DCCMetaPitchC[k], 8 *  								Read256BytesBlockWidthC[k]),  								dml_floor(ViewportXStartC[k] + ViewportWidthC[k] + 8  								* Read256BytesBlockWidthC[k] - 1, 8 * @@ -1872,16 +1877,16 @@ void dml32_CalculateSurfaceSizeInMall(  			}  			if (DCCEnable[k] == true) {  				SurfaceSizeInMALL[k] = SurfaceSizeInMALL[k] + -						dml_ceil(dml_min(SurfaceWidthY[k], ViewportWidthY[k] + 8 * +						(dml_ceil(dml_min(DCCMetaPitchY[k], ViewportWidthY[k] + 8 *  								Read256BytesBlockWidthY[k] - 1), 8 *  								Read256BytesBlockWidthY[k]) *  						dml_ceil(dml_min(SurfaceHeightY[k], ViewportHeightY[k] + 8 *  								Read256BytesBlockHeightY[k] - 1), 8 * -								Read256BytesBlockHeightY[k]) * BytesPerPixelY[k] / 256; +								Read256BytesBlockHeightY[k]) * BytesPerPixelY[k] / 256) + (64 * 1024);  				if (Read256BytesBlockWidthC[k] > 0) {  					SurfaceSizeInMALL[k] = SurfaceSizeInMALL[k] + -							dml_ceil(dml_min(SurfaceWidthC[k], ViewportWidthC[k] + 8 * +							dml_ceil(dml_min(DCCMetaPitchC[k], ViewportWidthC[k] + 8 *  									Read256BytesBlockWidthC[k] - 1), 8 *  									Read256BytesBlockWidthC[k]) *  							dml_ceil(dml_min(SurfaceHeightC[k], ViewportHeightC[k] + 8 * @@ -1894,10 +1899,14 @@ void dml32_CalculateSurfaceSizeInMall(  	}  	for (k = 0; k < NumberOfActiveSurfaces; ++k) { -		if (UseMALLForStaticScreen[k] == dm_use_mall_static_screen_enable) -			TotalSurfaceSizeInMALL = TotalSurfaceSizeInMALL + SurfaceSizeInMALL[k]; +		/* SS and Subvp counted separate as they are never used at the same time */ +		if (UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_phantom_pipe) +			TotalSurfaceSizeInMALLForSubVP = TotalSurfaceSizeInMALLForSubVP + SurfaceSizeInMALL[k]; +		else if (UseMALLForStaticScreen[k] == dm_use_mall_static_screen_enable) +			TotalSurfaceSizeInMALLForSS = TotalSurfaceSizeInMALLForSS + SurfaceSizeInMALL[k];  	} -	*ExceededMALLSize =  (TotalSurfaceSizeInMALL > MALLAllocatedForDCN * 1024 * 1024); +	*ExceededMALLSize =  (TotalSurfaceSizeInMALLForSS > MALLAllocatedForDCNInBytes) || +							(TotalSurfaceSizeInMALLForSubVP > MALLAllocatedForDCNInBytes);  } // CalculateSurfaceSizeInMall  void dml32_CalculateVMRowAndSwath( @@ -3471,7 +3480,7 @@ bool dml32_CalculatePrefetchSchedule(  	double  prefetch_sw_bytes;  	double  bytes_pp;  	double  dep_bytes; -	unsigned int max_vratio_pre = __DML_MAX_VRATIO_PRE__; +	unsigned int max_vratio_pre = v->MaxVRatioPre;  	double  min_Lsw;  	double  Tsw_est1 = 0;  	double  Tsw_est3 = 0; @@ -6134,29 +6143,46 @@ void dml32_CalculatePrefetchBandwithSupport(unsigned int NumberOfActiveSurfaces,  		double UrgentBurstFactorLumaPre[],  		double UrgentBurstFactorChromaPre[],  		double UrgentBurstFactorCursorPre[], +		double PrefetchBW[], +		double VRatio[], +		double MaxVRatioPre,  		/* output */ -		double  *PrefetchBandwidth, +		double  *MaxPrefetchBandwidth,  		double  *FractionOfUrgentBandwidth,  		bool *PrefetchBandwidthSupport)  {  	unsigned int k; +	double ActiveBandwidthPerSurface;  	bool NotEnoughUrgentLatencyHiding = false; +	double TotalActiveBandwidth = 0; +	double TotalPrefetchBandwidth = 0; +  	for (k = 0; k < NumberOfActiveSurfaces; ++k) {  		if (NotUrgentLatencyHiding[k]) {  			NotEnoughUrgentLatencyHiding = true;  		}  	} -	*PrefetchBandwidth = 0; +	*MaxPrefetchBandwidth = 0;  	for (k = 0; k < NumberOfActiveSurfaces; ++k) { -		*PrefetchBandwidth = *PrefetchBandwidth + dml_max3(NumberOfDPP[k] * prefetch_vmrow_bw[k], -				ReadBandwidthLuma[k] * UrgentBurstFactorLuma[k] + ReadBandwidthChroma[k] * UrgentBurstFactorChroma[k] + cursor_bw[k] * UrgentBurstFactorCursor[k] + NumberOfDPP[k] * (meta_row_bandwidth[k] + dpte_row_bandwidth[k]), +		ActiveBandwidthPerSurface = ReadBandwidthLuma[k] * UrgentBurstFactorLuma[k] + ReadBandwidthChroma[k] * UrgentBurstFactorChroma[k] + cursor_bw[k] * UrgentBurstFactorCursor[k] + NumberOfDPP[k] * (meta_row_bandwidth[k] + dpte_row_bandwidth[k]); + +		TotalActiveBandwidth += ActiveBandwidthPerSurface; + +		TotalPrefetchBandwidth = TotalPrefetchBandwidth + PrefetchBW[k] * VRatio[k]; + +		*MaxPrefetchBandwidth = *MaxPrefetchBandwidth + dml_max3(NumberOfDPP[k] * prefetch_vmrow_bw[k], +				ActiveBandwidthPerSurface,  				NumberOfDPP[k] * (PrefetchBandwidthLuma[k] * UrgentBurstFactorLumaPre[k] + PrefetchBandwidthChroma[k] * UrgentBurstFactorChromaPre[k]) + cursor_bw_pre[k] * UrgentBurstFactorCursorPre[k]);  	} -	*PrefetchBandwidthSupport = (*PrefetchBandwidth <= ReturnBW) && !NotEnoughUrgentLatencyHiding; -	*FractionOfUrgentBandwidth = *PrefetchBandwidth / ReturnBW; +	if (MaxVRatioPre == __DML_MAX_VRATIO_PRE__) +		*PrefetchBandwidthSupport = (*MaxPrefetchBandwidth <= ReturnBW) && (TotalPrefetchBandwidth <= TotalActiveBandwidth * __DML_MAX_BW_RATIO_PRE__) && !NotEnoughUrgentLatencyHiding; +	else +		*PrefetchBandwidthSupport = (*MaxPrefetchBandwidth <= ReturnBW) && !NotEnoughUrgentLatencyHiding; + +	*FractionOfUrgentBandwidth = *MaxPrefetchBandwidth / ReturnBW;  }  double dml32_CalculateBandwidthAvailableForImmediateFlip(unsigned int NumberOfActiveSurfaces, @@ -6228,3 +6254,72 @@ void dml32_CalculateImmediateFlipBandwithSupport(unsigned int NumberOfActiveSurf  	*ImmediateFlipBandwidthSupport = (*TotalBandwidth <= ReturnBW);  	*FractionOfUrgentBandwidth = *TotalBandwidth / ReturnBW;  } + +bool dml32_CalculateDETSwathFillLatencyHiding(unsigned int NumberOfActiveSurfaces, +		double ReturnBW, +		double UrgentLatency, +		unsigned int SwathHeightY[], +		unsigned int SwathHeightC[], +		unsigned int SwathWidthY[], +		unsigned int SwathWidthC[], +		double  BytePerPixelInDETY[], +		double  BytePerPixelInDETC[], +		unsigned int    DETBufferSizeY[], +		unsigned int    DETBufferSizeC[], +		unsigned int	NumOfDPP[], +		unsigned int	HTotal[], +		double	PixelClock[], +		double	VRatioY[], +		double	VRatioC[], +		enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[]) +{ +	int k; +	double SwathSizeAllSurfaces = 0; +	double SwathSizeAllSurfacesInFetchTimeUs; +	double DETSwathLatencyHidingUs; +	double DETSwathLatencyHidingYUs; +	double DETSwathLatencyHidingCUs; +	double SwathSizePerSurfaceY[DC__NUM_DPP__MAX]; +	double SwathSizePerSurfaceC[DC__NUM_DPP__MAX]; +	bool NotEnoughDETSwathFillLatencyHiding = false; + +	/* calculate sum of single swath size for all pipes in bytes */ +	for (k = 0; k < NumberOfActiveSurfaces; k++) { +		SwathSizePerSurfaceY[k] = SwathHeightY[k] * SwathWidthY[k] * BytePerPixelInDETY[k] * NumOfDPP[k]; + +		if (SwathHeightC[k] != 0) +			SwathSizePerSurfaceC[k] = SwathHeightC[k] * SwathWidthC[k] * BytePerPixelInDETC[k] * NumOfDPP[k]; +		else +			SwathSizePerSurfaceC[k] = 0; + +		SwathSizeAllSurfaces += SwathSizePerSurfaceY[k] + SwathSizePerSurfaceC[k]; +	} + +	SwathSizeAllSurfacesInFetchTimeUs = SwathSizeAllSurfaces / ReturnBW + UrgentLatency; + +	/* ensure all DET - 1 swath can hide a fetch for all surfaces */ +	for (k = 0; k < NumberOfActiveSurfaces; k++) { +		double LineTime = HTotal[k] / PixelClock[k]; + +		/* only care if surface is not phantom */ +		if (UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) { +			DETSwathLatencyHidingYUs = (dml_floor(DETBufferSizeY[k] / BytePerPixelInDETY[k] / SwathWidthY[k], 1.0) - SwathHeightY[k]) / VRatioY[k] * LineTime; + +			if (SwathHeightC[k] != 0) { +				DETSwathLatencyHidingCUs = (dml_floor(DETBufferSizeC[k] / BytePerPixelInDETC[k] / SwathWidthC[k], 1.0) - SwathHeightC[k]) / VRatioC[k] * LineTime; + +				DETSwathLatencyHidingUs = dml_min(DETSwathLatencyHidingYUs, DETSwathLatencyHidingCUs); +			} else { +				DETSwathLatencyHidingUs = DETSwathLatencyHidingYUs; +			} + +			/* DET must be able to hide time to fetch 1 swath for each surface */ +			if (DETSwathLatencyHidingUs < SwathSizeAllSurfacesInFetchTimeUs) { +				NotEnoughDETSwathFillLatencyHiding = true; +				break; +			} +		} +	} + +	return NotEnoughDETSwathFillLatencyHiding; +} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h index 3989c2a28fae..9ba792c633a5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h @@ -334,6 +334,7 @@ void dml32_CalculateSurfaceSizeInMall(  		unsigned int NumberOfActiveSurfaces,  		unsigned int MALLAllocatedForDCN,  		enum dm_use_mall_for_static_screen_mode UseMALLForStaticScreen[], +		enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[],  		bool DCCEnable[],  		bool ViewportStationary[],  		unsigned int ViewportXStartY[], @@ -358,6 +359,8 @@ void dml32_CalculateSurfaceSizeInMall(  		unsigned int ReadBlockWidthC[],  		unsigned int ReadBlockHeightY[],  		unsigned int ReadBlockHeightC[], +		unsigned int DCCMetaPitchY[], +		unsigned int DCCMetaPitchC[],  		/* Output */  		unsigned int    SurfaceSizeInMALL[], @@ -1093,9 +1096,12 @@ void dml32_CalculatePrefetchBandwithSupport(unsigned int NumberOfActiveSurfaces,  		double UrgentBurstFactorLumaPre[],  		double UrgentBurstFactorChromaPre[],  		double UrgentBurstFactorCursorPre[], +		double PrefetchBW[], +		double VRatio[], +		double MaxVRatioPre,  		/* output */ -		double  *PrefetchBandwidth, +		double  *MaxPrefetchBandwidth,  		double  *FractionOfUrgentBandwidth,  		bool *PrefetchBandwidthSupport); @@ -1141,4 +1147,22 @@ void dml32_CalculateImmediateFlipBandwithSupport(unsigned int NumberOfActiveSurf  		double  *FractionOfUrgentBandwidth,  		bool *ImmediateFlipBandwidthSupport); +bool dml32_CalculateDETSwathFillLatencyHiding(unsigned int NumberOfActiveSurfaces, +		double ReturnBW, +		double UrgentLatency, +		unsigned int SwathHeightY[], +		unsigned int SwathHeightC[], +		unsigned int SwathWidthY[], +		unsigned int SwathWidthC[], +		double  BytePerPixelInDETY[], +		double  BytePerPixelInDETC[], +		unsigned int    DETBufferSizeY[], +		unsigned int    DETBufferSizeC[], +		unsigned int	NumOfDPP[], +		unsigned int	HTotal[], +		double	PixelClock[], +		double	VRatioY[], +		double	VRatioC[], +		enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[]); +  #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c index f4b176599be7..b80cef70fa60 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c @@ -136,7 +136,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_21_soc = {  	.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,  	.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,  	.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, -	.pct_ideal_sdp_bw_after_urgent = 100.0, +	.pct_ideal_sdp_bw_after_urgent = 90.0,  	.pct_ideal_fabric_bw_after_urgent = 67.0,  	.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 20.0,  	.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, // N/A, for now keep as is until DML implemented @@ -534,8 +534,11 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p  		}  		/* Override from VBIOS for num_chan */ -		if (dc->ctx->dc_bios->vram_info.num_chans) +		if (dc->ctx->dc_bios->vram_info.num_chans) {  			dcn3_21_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans; +			dcn3_21_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc, +				dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel); +		}  		if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)  			dcn3_21_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h index f394b3f3922a..0bffae95f3a2 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h @@ -105,14 +105,39 @@ enum source_macro_tile_size {  enum cursor_bpp {  	dm_cur_2bit = 0, dm_cur_32bit = 1, dm_cur_64bit = 2  }; + +/** + * @enum clock_change_support - It represents possible reasons to change the DRAM clock. + * + * DC may change the DRAM clock during its execution, and this enum tracks all + * the available methods. Note that every ASIC has their specific way to deal + * with these clock switch. + */  enum clock_change_support { +	/** +	 * @dm_dram_clock_change_uninitialized: If you see this, we might have +	 * a code initialization issue +	 */  	dm_dram_clock_change_uninitialized = 0, + +	/** +	 * @dm_dram_clock_change_vactive: Support DRAM switch in VActive +	 */  	dm_dram_clock_change_vactive, + +	/** +	 * @dm_dram_clock_change_vblank: Support DRAM switch in VBlank +	 */  	dm_dram_clock_change_vblank, +  	dm_dram_clock_change_vactive_w_mall_full_frame,  	dm_dram_clock_change_vactive_w_mall_sub_vp,  	dm_dram_clock_change_vblank_w_mall_full_frame,  	dm_dram_clock_change_vblank_w_mall_sub_vp, + +	/** +	 * @dm_dram_clock_change_unsupported: Do not support DRAM switch +	 */  	dm_dram_clock_change_unsupported  }; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h index 3d643d50c3eb..a9d49ef58fb5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h @@ -91,6 +91,7 @@ struct display_mode_lib {  	struct dal_logger *logger;  	struct dml_funcs funcs;  	struct _vcs_dpi_display_e2e_pipe_params_st dml_pipe_state[6]; +	bool validate_max_state;  };  void dml_init_instance(struct display_mode_lib *lib, diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h index 64d602e6412f..3c077164f362 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h @@ -246,6 +246,7 @@ struct _vcs_dpi_soc_bounding_box_st {  	bool disable_dram_clock_change_vactive_support;  	bool allow_dram_clock_one_display_vactive;  	enum self_refresh_affinity allow_dram_self_refresh_or_dram_clock_change_in_vblank; +	double max_vratio_pre;  };  /** diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c index 8e6585dab20e..f9653f511baa 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c @@ -202,6 +202,7 @@ dml_get_pipe_attr_func(vm_group_size_in_bytes, mode_lib->vba.vm_group_bytes);  dml_get_pipe_attr_func(dpte_row_height_linear_l, mode_lib->vba.dpte_row_height_linear);  dml_get_pipe_attr_func(pte_buffer_mode, mode_lib->vba.PTE_BUFFER_MODE);  dml_get_pipe_attr_func(subviewport_lines_needed_in_mall, mode_lib->vba.SubViewportLinesNeededInMALL); +dml_get_pipe_attr_func(surface_size_in_mall, mode_lib->vba.SurfaceSizeInMALL)  double get_total_immediate_flip_bytes(  		struct display_mode_lib *mode_lib, @@ -411,6 +412,7 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib)  		soc->urgent_latency_adjustment_fabric_clock_component_us;  	mode_lib->vba.UrgentLatencyAdjustmentFabricClockReference =  		soc->urgent_latency_adjustment_fabric_clock_reference_mhz; +	mode_lib->vba.MaxVRatioPre = soc->max_vratio_pre;  }  static void fetch_ip_params(struct display_mode_lib *mode_lib) diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h index 630f3395e90a..07993741f5e6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h @@ -143,6 +143,7 @@ dml_get_pipe_attr_decl(vready_at_or_after_vsync);  dml_get_pipe_attr_decl(min_dst_y_next_start);  dml_get_pipe_attr_decl(vstartup_calculated);  dml_get_pipe_attr_decl(subviewport_lines_needed_in_mall); +dml_get_pipe_attr_decl(surface_size_in_mall);  double get_total_immediate_flip_bytes(  		struct display_mode_lib *mode_lib, @@ -262,6 +263,7 @@ struct vba_vars_st {  	int maxMpcComb;  	bool UseMaximumVStartup; +	double MaxVRatioPre;  	double WritebackDISPCLK;  	double DPPCLKUsingSingleDPPLuma;  	double DPPCLKUsingSingleDPPChroma; @@ -419,6 +421,15 @@ struct vba_vars_st {  	double MinPixelChunkSizeBytes;  	unsigned int DCCMetaBufferSizeBytes;  	// Pipe/Plane Parameters + +	/** @VoltageLevel: +	 * Every ASIC has a fixed number of DPM states, and some devices might +	 * have some particular voltage configuration that does not map +	 * directly to the DPM states. This field tells how many states the +	 * target device supports; even though this field combines the DPM and +	 * special SOC voltages, it mostly matches the total number of DPM +	 * states. +	 */  	int VoltageLevel;  	double FabricClock;  	double DRAMSpeed; @@ -1041,6 +1052,7 @@ struct vba_vars_st {  	double MinFullDETBufferingTime;  	double AverageReadBandwidthGBytePerSecond;  	bool   FirstMainPlane; +	bool NotEnoughDETSwathFillLatencyHiding;  	unsigned int ViewportWidthChroma[DC__NUM_DPP__MAX];  	unsigned int ViewportHeightChroma[DC__NUM_DPP__MAX]; @@ -1153,7 +1165,7 @@ struct vba_vars_st {  	double UrgBurstFactorLumaPre[DC__NUM_DPP__MAX];  	double UrgBurstFactorChromaPre[DC__NUM_DPP__MAX];  	bool NotUrgentLatencyHidingPre[DC__NUM_DPP__MAX]; -	bool LinkCapacitySupport[DC__NUM_DPP__MAX]; +	bool LinkCapacitySupport[DC__VOLTAGE_STATES];  	bool VREADY_AT_OR_AFTER_VSYNC[DC__NUM_DPP__MAX];  	unsigned int MIN_DST_Y_NEXT_START[DC__NUM_DPP__MAX];  	unsigned int VFrontPorch[DC__NUM_DPP__MAX]; @@ -1224,6 +1236,7 @@ struct vba_vars_st {  	unsigned int BlockWidthC[DC__NUM_DPP__MAX];  	unsigned int SubViewportLinesNeededInMALL[DC__NUM_DPP__MAX];  	bool VActiveBandwithSupport[DC__VOLTAGE_STATES][2]; +	bool NotEnoughDETSwathFillLatencyHidingPerState[DC__VOLTAGE_STATES][2];  	struct dummy_vars dummy_vars;  }; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dsc/qp_tables.h b/drivers/gpu/drm/amd/display/dc/dml/dsc/qp_tables.h index e5fac9f4181d..dcff0dd2b6a1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dsc/qp_tables.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/qp_tables.h @@ -25,7 +25,7 @@   */ -const qp_table   qp_table_422_10bpc_min = { +static const qp_table   qp_table_422_10bpc_min = {  	{   6, { 0, 4, 5, 6, 6, 6, 6, 7, 7, 8, 9, 9, 9, 12, 16} },  	{ 6.5, { 0, 4, 5, 6, 6, 6, 6, 7, 7, 8, 9, 9, 9, 12, 16} },  	{   7, { 0, 4, 5, 6, 6, 6, 6, 7, 7, 7, 9, 9, 9, 11, 15} }, @@ -58,7 +58,7 @@ const qp_table   qp_table_422_10bpc_min = {  }; -const qp_table   qp_table_444_8bpc_max = { +static const qp_table   qp_table_444_8bpc_max = {  	{   6, { 4, 6, 8, 8, 9, 9, 9, 10, 11, 12, 12, 12, 12, 13, 15} },  	{ 6.5, { 4, 6, 7, 8, 8, 8, 9, 10, 11, 11, 12, 12, 12, 13, 15} },  	{   7, { 4, 5, 7, 7, 8, 8, 8, 9, 10, 11, 11, 12, 12, 13, 14} }, @@ -99,7 +99,7 @@ const qp_table   qp_table_444_8bpc_max = {  }; -const qp_table   qp_table_420_12bpc_max = { +static const qp_table   qp_table_420_12bpc_max = {  	{   4, {11, 12, 13, 14, 15, 15, 15, 16, 17, 18, 18, 19, 19, 21, 22} },  	{ 4.5, {10, 11, 12, 13, 14, 15, 15, 16, 17, 18, 18, 19, 19, 20, 21} },  	{   5, { 9, 11, 12, 13, 14, 15, 15, 16, 17, 17, 18, 18, 19, 20, 21} }, @@ -132,7 +132,7 @@ const qp_table   qp_table_420_12bpc_max = {  }; -const qp_table   qp_table_444_10bpc_min = { +static const qp_table   qp_table_444_10bpc_min = {  	{   6, { 0, 4, 7, 7, 9, 9, 9, 9, 9, 10, 10, 10, 10, 12, 18} },  	{ 6.5, { 0, 4, 6, 7, 8, 8, 9, 9, 9, 9, 10, 10, 10, 12, 18} },  	{   7, { 0, 4, 6, 6, 8, 8, 8, 8, 8, 9, 9, 10, 10, 12, 17} }, @@ -185,7 +185,7 @@ const qp_table   qp_table_444_10bpc_min = {  }; -const qp_table   qp_table_420_8bpc_max = { +static const qp_table   qp_table_420_8bpc_max = {  	{   4, { 4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 10, 11, 11, 13, 14} },  	{ 4.5, { 4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 10, 11, 11, 12, 13} },  	{   5, { 3, 4, 5, 6, 7, 7, 7, 8, 9, 9, 10, 10, 11, 12, 13} }, @@ -206,7 +206,7 @@ const qp_table   qp_table_420_8bpc_max = {  }; -const qp_table   qp_table_444_8bpc_min = { +static const qp_table   qp_table_444_8bpc_min = {  	{   6, { 0, 1, 3, 3, 5, 5, 5, 5, 5, 6, 6, 6, 6, 9, 14} },  	{ 6.5, { 0, 1, 2, 3, 4, 4, 5, 5, 5, 5, 6, 6, 6, 9, 14} },  	{   7, { 0, 0, 2, 2, 4, 4, 4, 4, 4, 5, 5, 6, 6, 9, 13} }, @@ -247,7 +247,7 @@ const qp_table   qp_table_444_8bpc_min = {  }; -const qp_table   qp_table_444_12bpc_min = { +static const qp_table   qp_table_444_12bpc_min = {  	{   6, { 0, 5, 11, 11, 13, 13, 13, 13, 13, 14, 14, 14, 14, 17, 22} },  	{ 6.5, { 0, 5, 10, 11, 12, 12, 13, 13, 13, 13, 14, 14, 14, 17, 22} },  	{   7, { 0, 5, 10, 10, 12, 12, 12, 12, 12, 13, 13, 14, 14, 17, 21} }, @@ -312,7 +312,7 @@ const qp_table   qp_table_444_12bpc_min = {  }; -const qp_table   qp_table_420_12bpc_min = { +static const qp_table   qp_table_420_12bpc_min = {  	{   4, { 0, 4, 9, 10, 11, 11, 11, 11, 11, 11, 13, 13, 13, 15, 21} },  	{ 4.5, { 0, 4, 8, 9, 10, 11, 11, 11, 11, 11, 13, 13, 13, 15, 20} },  	{   5, { 0, 4, 8, 9, 10, 11, 11, 11, 11, 11, 13, 13, 13, 15, 20} }, @@ -345,7 +345,7 @@ const qp_table   qp_table_420_12bpc_min = {  }; -const qp_table   qp_table_422_12bpc_min = { +static const qp_table   qp_table_422_12bpc_min = {  	{   6, { 0, 4, 9, 10, 11, 11, 11, 11, 11, 11, 13, 13, 13, 16, 20} },  	{ 6.5, { 0, 4, 9, 10, 11, 11, 11, 11, 11, 11, 13, 13, 13, 16, 20} },  	{   7, { 0, 4, 9, 10, 11, 11, 11, 11, 11, 11, 13, 13, 13, 15, 19} }, @@ -386,7 +386,7 @@ const qp_table   qp_table_422_12bpc_min = {  }; -const qp_table   qp_table_422_12bpc_max = { +static const qp_table   qp_table_422_12bpc_max = {  	{   6, {12, 12, 13, 14, 15, 15, 15, 16, 17, 18, 18, 19, 19, 20, 21} },  	{ 6.5, {12, 12, 13, 14, 15, 15, 15, 16, 17, 18, 18, 19, 19, 20, 21} },  	{   7, {11, 12, 13, 14, 15, 15, 15, 16, 17, 17, 18, 18, 19, 19, 20} }, @@ -427,7 +427,7 @@ const qp_table   qp_table_422_12bpc_max = {  }; -const qp_table   qp_table_444_12bpc_max = { +static const qp_table   qp_table_444_12bpc_max = {  	{   6, {12, 14, 16, 16, 17, 17, 17, 18, 19, 20, 20, 20, 20, 21, 23} },  	{ 6.5, {12, 14, 15, 16, 16, 16, 17, 18, 19, 19, 20, 20, 20, 21, 23} },  	{   7, {12, 13, 15, 15, 16, 16, 16, 17, 18, 19, 19, 20, 20, 21, 22} }, @@ -492,7 +492,7 @@ const qp_table   qp_table_444_12bpc_max = {  }; -const qp_table   qp_table_420_8bpc_min = { +static const qp_table   qp_table_420_8bpc_min = {  	{   4, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 9, 13} },  	{ 4.5, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 8, 12} },  	{   5, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 8, 12} }, @@ -513,7 +513,7 @@ const qp_table   qp_table_420_8bpc_min = {  }; -const qp_table   qp_table_422_8bpc_min = { +static const qp_table   qp_table_422_8bpc_min = {  	{   6, { 0, 0, 1, 2, 3, 3, 3, 3, 3, 3, 5, 5, 5, 8, 12} },  	{ 6.5, { 0, 0, 1, 2, 3, 3, 3, 3, 3, 3, 5, 5, 5, 8, 12} },  	{   7, { 0, 0, 1, 2, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 11} }, @@ -538,7 +538,7 @@ const qp_table   qp_table_422_8bpc_min = {  }; -const qp_table   qp_table_422_10bpc_max = { +static const qp_table   qp_table_422_10bpc_max = {  	{   6, { 8, 8, 9, 10, 11, 11, 11, 12, 13, 14, 14, 15, 15, 16, 17} },  	{ 6.5, { 8, 8, 9, 10, 11, 11, 11, 12, 13, 14, 14, 15, 15, 16, 17} },  	{   7, { 7, 8, 9, 10, 11, 11, 11, 12, 13, 13, 14, 14, 15, 15, 16} }, @@ -571,7 +571,7 @@ const qp_table   qp_table_422_10bpc_max = {  }; -const qp_table qp_table_420_10bpc_max = { +static const qp_table qp_table_420_10bpc_max = {  	{   4, { 8, 8, 9, 10, 11, 11, 11, 12, 13, 14, 14, 15, 15, 17, 18} },  	{ 4.5, { 8, 8, 9, 10, 11, 11, 11, 12, 13, 14, 14, 15, 15, 16, 17} },  	{   5, { 7, 8, 9, 10, 11, 11, 11, 12, 13, 13, 14, 14, 15, 16, 17} }, @@ -598,7 +598,7 @@ const qp_table qp_table_420_10bpc_max = {  }; -const qp_table   qp_table_420_10bpc_min = { +static const qp_table   qp_table_420_10bpc_min = {  	{   4, { 0, 4, 4, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 13, 17} },  	{ 4.5, { 0, 4, 4, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 12, 16} },  	{   5, { 0, 4, 4, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 12, 16} }, @@ -625,7 +625,7 @@ const qp_table   qp_table_420_10bpc_min = {  }; -const qp_table   qp_table_444_10bpc_max = { +static const qp_table   qp_table_444_10bpc_max = {  	{   6, { 8, 10, 12, 12, 13, 13, 13, 14, 15, 16, 16, 16, 16, 17, 19} },  	{ 6.5, { 8, 10, 11, 12, 12, 12, 13, 14, 15, 15, 16, 16, 16, 17, 19} },  	{   7, { 8, 9, 11, 11, 12, 12, 12, 13, 14, 15, 15, 16, 16, 17, 18} }, @@ -678,7 +678,7 @@ const qp_table   qp_table_444_10bpc_max = {  }; -const qp_table   qp_table_422_8bpc_max = { +static const qp_table   qp_table_422_8bpc_max = {  	{   6, { 4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 10, 11, 11, 12, 13} },  	{ 6.5, { 4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 10, 11, 11, 12, 13} },  	{   7, { 3, 4, 5, 6, 7, 7, 7, 8, 9, 9, 10, 10, 11, 11, 12} }, diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h b/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h index ad80bde9bc0f..31574940ccc7 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h @@ -46,7 +46,10 @@ struct dsc_parameters {  	uint32_t rc_buffer_model_size;  }; -int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_parameters *dsc_params); +struct rc_params; +int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, +		const struct rc_params *rc, +		struct dsc_parameters *dsc_params);  #endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c index f0aea988fef0..36d6c1646a51 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c @@ -95,19 +95,19 @@ static void copy_rc_to_cfg(struct drm_dsc_config *dsc_cfg, const struct rc_param  		dsc_cfg->rc_buf_thresh[i] = rc->rc_buf_thresh[i];  } -int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_parameters *dsc_params) +int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, +		const struct rc_params *rc, +		struct dsc_parameters *dsc_params)  {  	int              ret; -	struct rc_params rc;  	struct drm_dsc_config   dsc_cfg;  	unsigned long long tmp; -	calc_rc_params(&rc, pps);  	dsc_params->pps = *pps; -	dsc_params->pps.initial_scale_value = 8 * rc.rc_model_size / (rc.rc_model_size - rc.initial_fullness_offset); +	dsc_params->pps.initial_scale_value = 8 * rc->rc_model_size / (rc->rc_model_size - rc->initial_fullness_offset);  	copy_pps_fields(&dsc_cfg, &dsc_params->pps); -	copy_rc_to_cfg(&dsc_cfg, &rc); +	copy_rc_to_cfg(&dsc_cfg, rc);  	dsc_cfg.mux_word_size = dsc_params->pps.bits_per_component <= 10 ? 48 : 64; diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c index 9b63c6c0cc84..e0bd0c722e00 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c @@ -138,7 +138,8 @@ static const struct ddc_sh_mask ddc_shift[] = {  	DDC_MASK_SH_LIST_DCN2(__SHIFT, 3),  	DDC_MASK_SH_LIST_DCN2(__SHIFT, 4),  	DDC_MASK_SH_LIST_DCN2(__SHIFT, 5), -	DDC_MASK_SH_LIST_DCN2(__SHIFT, 6) +	DDC_MASK_SH_LIST_DCN2(__SHIFT, 6), +	DDC_MASK_SH_LIST_DCN2_VGA(__SHIFT)  };  static const struct ddc_sh_mask ddc_mask[] = { @@ -147,7 +148,8 @@ static const struct ddc_sh_mask ddc_mask[] = {  	DDC_MASK_SH_LIST_DCN2(_MASK, 3),  	DDC_MASK_SH_LIST_DCN2(_MASK, 4),  	DDC_MASK_SH_LIST_DCN2(_MASK, 5), -	DDC_MASK_SH_LIST_DCN2(_MASK, 6) +	DDC_MASK_SH_LIST_DCN2(_MASK, 6), +	DDC_MASK_SH_LIST_DCN2_VGA(_MASK)  };  #include "../generic_regs.h" diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c index 687d4f128480..36a5736c58c9 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c @@ -145,7 +145,8 @@ static const struct ddc_sh_mask ddc_shift[] = {  	DDC_MASK_SH_LIST_DCN2(__SHIFT, 3),  	DDC_MASK_SH_LIST_DCN2(__SHIFT, 4),  	DDC_MASK_SH_LIST_DCN2(__SHIFT, 5), -	DDC_MASK_SH_LIST_DCN2(__SHIFT, 6) +	DDC_MASK_SH_LIST_DCN2(__SHIFT, 6), +	DDC_MASK_SH_LIST_DCN2_VGA(__SHIFT)  };  static const struct ddc_sh_mask ddc_mask[] = { @@ -154,7 +155,8 @@ static const struct ddc_sh_mask ddc_mask[] = {  	DDC_MASK_SH_LIST_DCN2(_MASK, 3),  	DDC_MASK_SH_LIST_DCN2(_MASK, 4),  	DDC_MASK_SH_LIST_DCN2(_MASK, 5), -	DDC_MASK_SH_LIST_DCN2(_MASK, 6) +	DDC_MASK_SH_LIST_DCN2(_MASK, 6), +	DDC_MASK_SH_LIST_DCN2_VGA(_MASK)  };  #include "../generic_regs.h" diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c index 0ea52ba5ac82..985f10b39750 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c @@ -149,7 +149,8 @@ static const struct ddc_sh_mask ddc_shift[] = {  	DDC_MASK_SH_LIST_DCN2(__SHIFT, 3),  	DDC_MASK_SH_LIST_DCN2(__SHIFT, 4),  	DDC_MASK_SH_LIST_DCN2(__SHIFT, 5), -	DDC_MASK_SH_LIST_DCN2(__SHIFT, 6) +	DDC_MASK_SH_LIST_DCN2(__SHIFT, 6), +	DDC_MASK_SH_LIST_DCN2_VGA(__SHIFT)  };  static const struct ddc_sh_mask ddc_mask[] = { @@ -158,7 +159,8 @@ static const struct ddc_sh_mask ddc_mask[] = {  	DDC_MASK_SH_LIST_DCN2(_MASK, 3),  	DDC_MASK_SH_LIST_DCN2(_MASK, 4),  	DDC_MASK_SH_LIST_DCN2(_MASK, 5), -	DDC_MASK_SH_LIST_DCN2(_MASK, 6) +	DDC_MASK_SH_LIST_DCN2(_MASK, 6), +	DDC_MASK_SH_LIST_DCN2_VGA(_MASK)  };  #include "../generic_regs.h" @@ -256,8 +258,8 @@ static const struct hw_factory_funcs funcs = {   */  void dal_hw_factory_dcn32_init(struct hw_factory *factory)  { -	factory->number_of_pins[GPIO_ID_DDC_DATA] = 6; -	factory->number_of_pins[GPIO_ID_DDC_CLOCK] = 6; +	factory->number_of_pins[GPIO_ID_DDC_DATA] = 8; +	factory->number_of_pins[GPIO_ID_DDC_CLOCK] = 8;  	factory->number_of_pins[GPIO_ID_GENERIC] = 4;  	factory->number_of_pins[GPIO_ID_HPD] = 5;  	factory->number_of_pins[GPIO_ID_GPIO_PAD] = 28; diff --git a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h index 308a543178a5..59884ef651b3 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h +++ b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h @@ -113,6 +113,13 @@  	(PHY_AUX_CNTL__AUX## cd ##_PAD_RXSEL## mask_sh),\  	(DC_GPIO_AUX_CTRL_5__DDC_PAD## cd ##_I2CMODE## mask_sh)} +#define DDC_MASK_SH_LIST_DCN2_VGA(mask_sh) \ +	{DDC_MASK_SH_LIST_COMMON(mask_sh),\ +	0,\ +	0,\ +	0,\ +	0} +  struct ddc_registers {  	struct gpio_registers gpio;  	uint32_t ddc_setup; diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c index 4233955e3c47..e1422e5e86c9 100644 --- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c +++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c @@ -28,12 +28,11 @@  #include "dm_services.h"  #include "dm_helpers.h"  #include "include/hdcp_types.h" -#include "include/i2caux_interface.h"  #include "include/signal_types.h"  #include "core_types.h" -#include "dc_link_ddc.h" +#include "link.h"  #include "link_hwss.h" -#include "inc/link_dpcd.h" +#include "link/protocols/link_dpcd.h"  #define DC_LOGGER \  	link->ctx->logger diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 9498105c98ab..ed3c03108da6 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -56,33 +56,6 @@ void enable_surface_flip_reporting(struct dc_plane_state *plane_state,  #endif  #include "link_hwss.h" -/************ link *****************/ -struct link_init_data { -	const struct dc *dc; -	struct dc_context *ctx; /* TODO: remove 'dal' when DC is complete. */ -	uint32_t connector_index; /* this will be mapped to the HPD pins */ -	uint32_t link_index; /* this is mapped to DAL display_index -				TODO: remove it when DC is complete. */ -	bool is_dpia_link; -}; - -struct dc_link *link_create(const struct link_init_data *init_params); -void link_destroy(struct dc_link **link); - -enum dc_status dc_link_validate_mode_timing( -		const struct dc_stream_state *stream, -		struct dc_link *link, -		const struct dc_crtc_timing *timing); - -void core_link_resume(struct dc_link *link); - -void core_link_enable_stream( -		struct dc_state *state, -		struct pipe_ctx *pipe_ctx); - -void core_link_disable_stream(struct pipe_ctx *pipe_ctx); - -void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);  /********** DAL Core*********************/  #include "transform.h"  #include "dpp.h" @@ -115,6 +88,13 @@ struct resource_funcs {  				int vlevel);  	void (*update_soc_for_wm_a)(  				struct dc *dc, struct dc_state *context); + +	/** +	 * @populate_dml_pipes - Populate pipe data struct +	 * +	 * Returns: +	 * Total of pipes available in the specific ASIC. +	 */  	int (*populate_dml_pipes)(  		struct dc *dc,  		struct dc_state *context, @@ -233,8 +213,11 @@ struct resource_funcs {  			unsigned int pipe_cnt,              unsigned int index); -	bool (*remove_phantom_pipes)(struct dc *dc, struct dc_state *context); +	bool (*remove_phantom_pipes)(struct dc *dc, struct dc_state *context, bool fast_update); +	void (*retain_phantom_pipes)(struct dc *dc, struct dc_state *context);  	void (*get_panel_config_defaults)(struct dc_panel_config *panel_config); +	void (*save_mall_state)(struct dc *dc, struct dc_state *context, struct mall_temp_config *temp_config); +	void (*restore_mall_state)(struct dc *dc, struct dc_state *context, struct mall_temp_config *temp_config);  };  struct audio_support{ @@ -413,7 +396,10 @@ struct pipe_ctx {  	struct pll_settings pll_settings; -	/* link config records software decision for what link config should be +	/** +	 * @link_config: +	 * +	 * link config records software decision for what link config should be  	 * enabled given current link capability and stream during hw resource  	 * mapping. This is to decouple the dependency on link capability during  	 * dc commit or update. @@ -437,10 +423,11 @@ struct pipe_ctx {  	struct _vcs_dpi_display_e2e_pipe_params_st dml_input;  	int det_buffer_size_kb;  	bool unbounded_req; +	unsigned int surface_size_in_mall_bytes; -	union pipe_update_flags update_flags;  	struct dwbc *dwbc;  	struct mcif_wb *mcif_wb; +	union pipe_update_flags update_flags;  };  /* Data used for dynamic link encoder assignment. @@ -494,6 +481,9 @@ struct dcn_bw_output {  	struct dcn_watermark_set watermarks;  	struct dcn_bw_writeback bw_writeback;  	int compbuf_size_kb; +	unsigned int mall_ss_size_bytes; +	unsigned int mall_ss_psr_active_size_bytes; +	unsigned int mall_subvp_size_bytes;  	unsigned int legacy_svp_drr_stream_index;  	bool legacy_svp_drr_stream_index_valid;  }; @@ -507,33 +497,62 @@ struct bw_context {  	union bw_output bw;  	struct display_mode_lib dml;  }; +  /** - * struct dc_state - The full description of a state requested by a user - * - * @streams: Stream properties - * @stream_status: The planes on a given stream - * @res_ctx: Persistent state of resources - * @bw_ctx: The output from bandwidth and watermark calculations and the DML - * @pp_display_cfg: PowerPlay clocks and settings - * @dcn_bw_vars: non-stack memory to support bandwidth calculations - * + * struct dc_state - The full description of a state requested by users   */  struct dc_state { +	/** +	 * @streams: Stream state properties +	 */  	struct dc_stream_state *streams[MAX_PIPES]; + +	/** +	 * @stream_status: Planes status on a given stream +	 */  	struct dc_stream_status stream_status[MAX_PIPES]; + +	/** +	 * @stream_count: Total of streams in use +	 */  	uint8_t stream_count;  	uint8_t stream_mask; +	/** +	 * @res_ctx: Persistent state of resources +	 */  	struct resource_context res_ctx; -	struct bw_context bw_ctx; - -	/* Note: these are big structures, do *not* put on stack! */ +	/** +	 * @pp_display_cfg: PowerPlay clocks and settings +	 * Note: this is a big struct, do *not* put on stack! +	 */  	struct dm_pp_display_configuration pp_display_cfg; + +	/** +	 * @dcn_bw_vars: non-stack memory to support bandwidth calculations +	 * Note: this is a big struct, do *not* put on stack! +	 */  	struct dcn_bw_internal_vars dcn_bw_vars;  	struct clk_mgr *clk_mgr; +	/** +	 * @bw_ctx: The output from bandwidth and watermark calculations and the DML +	 * +	 * Each context must have its own instance of VBA, and in order to +	 * initialize and obtain IP and SOC, the base DML instance from DC is +	 * initially copied into every context. +	 */ +	struct bw_context bw_ctx; + +	/** +	 * @refcount: refcount reference +	 * +	 * Notice that dc_state is used around the code to capture the current +	 * context, so we need to pass it everywhere. That's why we want to use +	 * kref in this struct. +	 */  	struct kref refcount;  	struct { diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h deleted file mode 100644 index 95fb61d62778..000000000000 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DAL_DDC_SERVICE_H__ -#define __DAL_DDC_SERVICE_H__ - -#include "include/ddc_service_types.h" -#include "include/i2caux_interface.h" - -#define EDID_SEGMENT_SIZE 256 - -/* Address range from 0x00 to 0x1F.*/ -#define DP_ADAPTOR_TYPE2_SIZE 0x20 -#define DP_ADAPTOR_TYPE2_REG_ID 0x10 -#define DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK 0x1D -/* Identifies adaptor as Dual-mode adaptor */ -#define DP_ADAPTOR_TYPE2_ID 0xA0 -/* MHz*/ -#define DP_ADAPTOR_TYPE2_MAX_TMDS_CLK 600 -/* MHz*/ -#define DP_ADAPTOR_TYPE2_MIN_TMDS_CLK 25 -/* kHZ*/ -#define DP_ADAPTOR_DVI_MAX_TMDS_CLK 165000 -/* kHZ*/ -#define DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK 165000 - -#define DDC_I2C_COMMAND_ENGINE I2C_COMMAND_ENGINE_SW - -struct ddc_service; -struct graphics_object_id; -enum ddc_result; -struct av_sync_data; -struct dp_receiver_id_info; - -struct i2c_payloads; -struct aux_payloads; -enum aux_return_code_type; - -void dal_ddc_i2c_payloads_add( -		struct i2c_payloads *payloads, -		uint32_t address, -		uint32_t len, -		uint8_t *data, -		bool write); - -struct ddc_service_init_data { -	struct graphics_object_id id; -	struct dc_context *ctx; -	struct dc_link *link; -	bool is_dpia_link; -}; - -struct ddc_service *dal_ddc_service_create( -		struct ddc_service_init_data *ddc_init_data); - -void dal_ddc_service_destroy(struct ddc_service **ddc); - -enum ddc_service_type dal_ddc_service_get_type(struct ddc_service *ddc); - -void dal_ddc_service_set_transaction_type( -		struct ddc_service *ddc, -		enum ddc_transaction_type type); - -bool dal_ddc_service_is_in_aux_transaction_mode(struct ddc_service *ddc); - -void dal_ddc_service_i2c_query_dp_dual_mode_adaptor( -		struct ddc_service *ddc, -		struct display_sink_capability *sink_cap); - -bool dal_ddc_service_query_ddc_data( -		struct ddc_service *ddc, -		uint32_t address, -		uint8_t *write_buf, -		uint32_t write_size, -		uint8_t *read_buf, -		uint32_t read_size); - -bool dal_ddc_submit_aux_command(struct ddc_service *ddc, -		struct aux_payload *payload); - -int dc_link_aux_transfer_raw(struct ddc_service *ddc, -		struct aux_payload *payload, -		enum aux_return_code_type *operation_result); - -bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc, -		struct aux_payload *payload); - -bool dc_link_aux_try_to_configure_timeout(struct ddc_service *ddc, -		uint32_t timeout); - -void dal_ddc_service_write_scdc_data( -		struct ddc_service *ddc_service, -		uint32_t pix_clk, -		bool lte_340_scramble); - -void dal_ddc_service_read_scdc_data( -		struct ddc_service *ddc_service); - -void ddc_service_set_dongle_type(struct ddc_service *ddc, -		enum display_dongle_type dongle_type); - -void dal_ddc_service_set_ddc_pin( -		struct ddc_service *ddc_service, -		struct ddc *ddc); - -struct ddc *dal_ddc_service_get_ddc_pin(struct ddc_service *ddc_service); - -uint32_t get_defer_delay(struct ddc_service *ddc); - -#endif /* __DAL_DDC_SERVICE_H__ */ - diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h deleted file mode 100644 index b304d450b038..000000000000 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h +++ /dev/null @@ -1,267 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_LINK_DP_H__ -#define __DC_LINK_DP_H__ - -#define LINK_TRAINING_ATTEMPTS 4 -#define LINK_TRAINING_RETRY_DELAY 50 /* ms */ -#define LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD 3200 /*us*/ -#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 552 /*us*/ -#define MAX_MTP_SLOT_COUNT 64 -#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50 -#define TRAINING_AUX_RD_INTERVAL 100 //us -#define LINK_AUX_WAKE_TIMEOUT_MS 1500 // Timeout when trying to wake unresponsive DPRX. - -struct dc_link; -struct dc_stream_state; -struct dc_link_settings; - -enum { -	LINK_TRAINING_MAX_RETRY_COUNT = 5, -	/* to avoid infinite loop where-in the receiver -	 * switches between different VS -	 */ -	LINK_TRAINING_MAX_CR_RETRY = 100, -	/* -	 * Some receivers fail to train on first try and are good -	 * on subsequent tries. 2 retries should be plenty. If we -	 * don't have a successful training then we don't expect to -	 * ever get one. -	 */ -	LINK_TRAINING_MAX_VERIFY_RETRY = 2, -	PEAK_FACTOR_X1000 = 1006, -}; - -struct dc_link_settings dp_get_max_link_cap(struct dc_link *link); - -bool dp_verify_link_cap_with_retries( -	struct dc_link *link, -	struct dc_link_settings *known_limit_link_setting, -	int attempts); - -bool dp_validate_mode_timing( -	struct dc_link *link, -	const struct dc_crtc_timing *timing); - -bool decide_edp_link_settings(struct dc_link *link, -		struct dc_link_settings *link_setting, -		uint32_t req_bw); - -bool decide_link_settings( -	struct dc_stream_state *stream, -	struct dc_link_settings *link_setting); - -bool perform_link_training_with_retries( -	const struct dc_link_settings *link_setting, -	bool skip_video_pattern, -	int attempts, -	struct pipe_ctx *pipe_ctx, -	enum signal_type signal, -	bool do_fallback); - -bool hpd_rx_irq_check_link_loss_status( -	struct dc_link *link, -	union hpd_irq_data *hpd_irq_dpcd_data); - -bool is_mst_supported(struct dc_link *link); - -bool detect_dp_sink_caps(struct dc_link *link); - -void detect_edp_sink_caps(struct dc_link *link); - -bool is_dp_active_dongle(const struct dc_link *link); - -bool is_dp_branch_device(const struct dc_link *link); - -bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timing *crtc_timing); - -void dp_enable_mst_on_sink(struct dc_link *link, bool enable); - -enum dp_panel_mode dp_get_panel_mode(struct dc_link *link); -void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode); - -bool dp_overwrite_extended_receiver_cap(struct dc_link *link); - -void dpcd_set_source_specific_data(struct dc_link *link); - -void dpcd_write_cable_id_to_dprx(struct dc_link *link); - -/* Write DPCD link configuration data. */ -enum dc_status dpcd_set_link_settings( -	struct dc_link *link, -	const struct link_training_settings *lt_settings); -/* Write DPCD drive settings. */ -enum dc_status dpcd_set_lane_settings( -	struct dc_link *link, -	const struct link_training_settings *link_training_setting, -	uint32_t offset); -/* Read training status and adjustment requests from DPCD. */ -enum dc_status dp_get_lane_status_and_lane_adjust( -	struct dc_link *link, -	const struct link_training_settings *link_training_setting, -	union lane_status ln_status[LANE_COUNT_DP_MAX], -	union lane_align_status_updated *ln_align, -	union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], -	uint32_t offset); - -void dp_wait_for_training_aux_rd_interval( -	struct dc_link *link, -	uint32_t wait_in_micro_secs); - -bool dp_is_cr_done(enum dc_lane_count ln_count, -	union lane_status *dpcd_lane_status); - -enum link_training_result dp_get_cr_failure(enum dc_lane_count ln_count, -	union lane_status *dpcd_lane_status); - -bool dp_is_ch_eq_done(enum dc_lane_count ln_count, -	union lane_status *dpcd_lane_status); -bool dp_is_symbol_locked(enum dc_lane_count ln_count, -	union lane_status *dpcd_lane_status); -bool dp_is_interlane_aligned(union lane_align_status_updated align_status); - -bool dp_is_max_vs_reached( -	const struct link_training_settings *lt_settings); -void dp_hw_to_dpcd_lane_settings( -	const struct link_training_settings *lt_settings, -	const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], -	union dpcd_training_lane dpcd_lane_settings[]); -void dp_decide_lane_settings( -	const struct link_training_settings *lt_settings, -	const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], -	struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], -	union dpcd_training_lane dpcd_lane_settings[]); - -uint32_t dp_translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval); - -enum dpcd_training_patterns -	dc_dp_training_pattern_to_dpcd_training_pattern( -	struct dc_link *link, -	enum dc_dp_training_pattern pattern); - -uint8_t dc_dp_initialize_scrambling_data_symbols( -	struct dc_link *link, -	enum dc_dp_training_pattern pattern); - -enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource *link_res, bool ready); -void dp_set_fec_enable(struct dc_link *link, bool enable); -bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable); -bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_update); -void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable); -bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx); -bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable); - -/* Initialize output parameter lt_settings. */ -void dp_decide_training_settings( -	struct dc_link *link, -	const struct dc_link_settings *link_setting, -	struct link_training_settings *lt_settings); - -/* Convert PHY repeater count read from DPCD uint8_t. */ -uint8_t dp_convert_to_count(uint8_t lttpr_repeater_count); - -/* Check DPCD training status registers to detect link loss. */ -enum link_training_result dp_check_link_loss_status( -		struct dc_link *link, -		const struct link_training_settings *link_training_setting); - -enum dc_status dpcd_configure_lttpr_mode( -		struct dc_link *link, -		struct link_training_settings *lt_settings); - -enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings *link_settings); -bool dp_retrieve_lttpr_cap(struct dc_link *link); -bool dp_is_lttpr_present(struct dc_link *link); -enum lttpr_mode dp_decide_lttpr_mode(struct dc_link *link, struct dc_link_settings *link_setting); -void dp_get_lttpr_mode_override(struct dc_link *link, enum lttpr_mode *override); -enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link); -enum lttpr_mode dp_decide_128b_132b_lttpr_mode(struct dc_link *link); -bool dpcd_write_128b_132b_sst_payload_allocation_table( -		const struct dc_stream_state *stream, -		struct dc_link *link, -		struct link_mst_stream_allocation_table *proposed_table, -		bool allocate); - -enum dc_status dpcd_configure_channel_coding( -		struct dc_link *link, -		struct link_training_settings *lt_settings); - -bool dpcd_poll_for_allocation_change_trigger(struct dc_link *link); - -struct fixed31_32 calculate_sst_avg_time_slots_per_mtp( -		const struct dc_stream_state *stream, -		const struct dc_link *link); -void enable_dp_hpo_output(struct dc_link *link, -		const struct link_resource *link_res, -		const struct dc_link_settings *link_settings); -void disable_dp_hpo_output(struct dc_link *link, -		const struct link_resource *link_res, -		enum signal_type signal); - -void setup_dp_hpo_stream(struct pipe_ctx *pipe_ctx, bool enable); -bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx); -void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd); -void dp_receiver_power_ctrl(struct dc_link *link, bool on); -void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode); -void dp_enable_link_phy( -	struct dc_link *link, -	const struct link_resource *link_res, -	enum signal_type signal, -	enum clock_source_id clock_source, -	const struct dc_link_settings *link_settings); -void edp_add_delay_for_T9(struct dc_link *link); -bool edp_receiver_ready_T9(struct dc_link *link); -bool edp_receiver_ready_T7(struct dc_link *link); - -void dp_disable_link_phy(struct dc_link *link, const struct link_resource *link_res, -		enum signal_type signal); - -void dp_disable_link_phy_mst(struct dc_link *link, const struct link_resource *link_res, -		enum signal_type signal); - -bool dp_set_hw_training_pattern( -		struct dc_link *link, -		const struct link_resource *link_res, -		enum dc_dp_training_pattern pattern, -		uint32_t offset); - -void dp_set_hw_lane_settings( -		struct dc_link *link, -		const struct link_resource *link_res, -		const struct link_training_settings *link_settings, -		uint32_t offset); - -void dp_set_hw_test_pattern( -		struct dc_link *link, -		const struct link_resource *link_res, -		enum dp_test_pattern test_pattern, -		uint8_t *custom_pattern, -		uint32_t custom_pattern_size); - -void dp_retrain_link_dp_test(struct dc_link *link, -		struct dc_link_settings *link_setting, -		bool skip_video_pattern); -#endif /* __DC_LINK_DP_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h deleted file mode 100644 index 39c1d1d07357..000000000000 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h +++ /dev/null @@ -1,105 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright 2021 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_LINK_DPIA_H__ -#define __DC_LINK_DPIA_H__ - -/* This module implements functionality for training DPIA links. */ - -struct dc_link; -struct dc_link_settings; - -/* The approximate time (us) it takes to transmit 9 USB4 DP clock sync packets. */ -#define DPIA_CLK_SYNC_DELAY 16000 - -/* Extend interval between training status checks for manual testing. */ -#define DPIA_DEBUG_EXTENDED_AUX_RD_INTERVAL_US 60000000 - -/** @note Can remove once DP tunneling registers in upstream include/drm/drm_dp_helper.h */ -/* DPCD DP Tunneling over USB4 */ -#define DP_TUNNELING_CAPABILITIES_SUPPORT 0xe000d -#define DP_IN_ADAPTER_INFO                0xe000e -#define DP_USB4_DRIVER_ID                 0xe000f -#define DP_USB4_ROUTER_TOPOLOGY_ID        0xe001b - -/* SET_CONFIG message types sent by driver. */ -enum dpia_set_config_type { -	DPIA_SET_CFG_SET_LINK = 0x01, -	DPIA_SET_CFG_SET_PHY_TEST_MODE = 0x05, -	DPIA_SET_CFG_SET_TRAINING = 0x18, -	DPIA_SET_CFG_SET_VSPE = 0x19 -}; - -/* Training stages (TS) in SET_CONFIG(SET_TRAINING) message. */ -enum dpia_set_config_ts { -	DPIA_TS_DPRX_DONE = 0x00, /* Done training DPRX. */ -	DPIA_TS_TPS1 = 0x01, -	DPIA_TS_TPS2 = 0x02, -	DPIA_TS_TPS3 = 0x03, -	DPIA_TS_TPS4 = 0x07, -	DPIA_TS_UFP_DONE = 0xff /* Done training DPTX-to-DPIA hop. */ -}; - -/* SET_CONFIG message data associated with messages sent by driver. */ -union dpia_set_config_data { -	struct { -		uint8_t mode : 1; -		uint8_t reserved : 7; -	} set_link; -	struct { -		uint8_t stage; -	} set_training; -	struct { -		uint8_t swing : 2; -		uint8_t max_swing_reached : 1; -		uint8_t pre_emph : 2; -		uint8_t max_pre_emph_reached : 1; -		uint8_t reserved : 2; -	} set_vspe; -	uint8_t raw; -}; - -/* Read tunneling device capability from DPCD and update link capability - * accordingly. - */ -enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link); - -/* Query hot plug status of USB4 DP tunnel. - * Returns true if HPD high. - */ -bool dc_link_dpia_query_hpd_status(struct dc_link *link); - -/* Train DP tunneling link for USB4 DPIA display endpoint. - * DPIA equivalent of dc_link_dp_perfrorm_link_training. - * Aborts link training upon detection of sink unplug. - */ -enum link_training_result dc_link_dpia_perform_link_training( -	struct dc_link *link, -	const struct link_resource *link_res, -	const struct dc_link_settings *link_setting, -	bool skip_video_pattern); - -#endif /* __DC_LINK_DPIA_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h index 2ae630bf2aee..7254182b7c72 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h @@ -27,7 +27,6 @@  #define __DAL_AUX_ENGINE_H__  #include "dc_ddc_types.h" -#include "include/i2caux_interface.h"  enum aux_return_code_type; @@ -81,7 +80,12 @@ enum i2c_default_speed {  	I2CAUX_DEFAULT_I2C_SW_SPEED = 50  }; -union aux_config; +union aux_config { +	struct { +		uint32_t ALLOW_AUX_WHEN_HPD_LOW:1; +	} bits; +	uint32_t raw; +};  struct aux_engine {  	uint32_t inst; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index e7571c6f5ead..beb26dc8a07f 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -46,7 +46,7 @@ struct dcn_hubbub_wm_set {  	uint32_t pte_meta_urgent;  	uint32_t sr_enter;  	uint32_t sr_exit; -	uint32_t dram_clk_chanage; +	uint32_t dram_clk_change;  	uint32_t usr_retrain;  	uint32_t fclk_pstate_change;  }; @@ -167,10 +167,27 @@ struct hubbub_funcs {  	void (*force_pstate_change_control)(struct hubbub *hubbub, bool force, bool allow);  	void (*init_watermarks)(struct hubbub *hubbub); + +	/** +	 * @program_det_size: +	 * +	 * DE-Tile buffers (DET) is a memory that is used to convert the tiled +	 * data into linear, which the rest of the display can use to generate +	 * the graphics output. One of the main features of this component is +	 * that each pipe has a configurable DET buffer which means that when a +	 * pipe is not enabled, the device can assign the memory to other +	 * enabled pipes to try to be more efficient. +	 * +	 * DET logic is handled by dchubbub. Some ASICs provide a feature named +	 * Configurable Return Buffer (CRB) segments which can be allocated to +	 * compressed or detiled buffers. +	 */  	void (*program_det_size)(struct hubbub *hubbub, int hubp_inst, unsigned det_buffer_size_in_kbyte);  	void (*program_compbuf_size)(struct hubbub *hubbub, unsigned compbuf_size_kb, bool safe_to_increase);  	void (*init_crb)(struct hubbub *hubbub);  	void (*force_usr_retraining_allow)(struct hubbub *hubbub, bool allow); +	void (*set_request_limit)(struct hubbub *hubbub, int memory_channel_count, int words_per_channel); +	void (*dchubbub_init)(struct hubbub *hubbub);  };  struct hubbub { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h index 8df2765cce78..de3113ecbc77 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h @@ -56,20 +56,6 @@ struct dmcu {  	bool auto_load_dmcu;  }; -#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) -struct crc_region { -	uint16_t x_start; -	uint16_t y_start; -	uint16_t x_end; -	uint16_t y_end; -}; - -struct otg_phy_mux { -	uint8_t phy_output_num; -	uint8_t otg_output_num; -}; -#endif -  struct dmcu_funcs {  	bool (*dmcu_init)(struct dmcu *dmcu);  	bool (*load_iram)(struct dmcu *dmcu, @@ -100,7 +86,7 @@ struct dmcu_funcs {  	bool (*recv_edid_cea_ack)(struct dmcu *dmcu, int *offset);  #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)  	void (*forward_crc_window)(struct dmcu *dmcu, -			struct crc_region *crc_win, +			struct rect *rect,  			struct otg_phy_mux *mux_mapping);  	void (*stop_crc_win_update)(struct dmcu *dmcu,  			struct otg_phy_mux *mux_mapping); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h index dcb80c4747b0..131fcfa28bca 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h @@ -83,10 +83,15 @@ static const struct dpp_input_csc_matrix __maybe_unused dpp_input_csc_matrix[] =  	{COLOR_SPACE_YCBCR709,  		{0x3265, 0x2000, 0, 0xe6ce, 0xf105, 0x2000, 0xfa01, 0xa7d, 0,  						0x2000, 0x3b61, 0xe24f} }, -  	{COLOR_SPACE_YCBCR709_LIMITED,  		{0x39a6, 0x2568, 0, 0xe0d6, 0xeedd, 0x2568, 0xf925, 0x9a8, 0, -						0x2568, 0x43ee, 0xdbb2} } +						0x2568, 0x43ee, 0xdbb2} }, +	{COLOR_SPACE_2020_YCBCR, +		{0x2F30, 0x2000, 0, 0xE869, 0xEDB7, 0x2000, 0xFABC, 0xBC6, 0, +						0x2000, 0x3C34, 0xE1E6} }, +	{COLOR_SPACE_2020_RGB_LIMITEDRANGE, +		{0x35E0, 0x255F, 0, 0xE2B3, 0xEB20, 0x255F, 0xF9FD, 0xB1E, 0, +						0x255F, 0x44BD, 0xDB43} }  };  struct dpp_grph_csc_adjustment { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index cd2be729846b..a819f0f97c5f 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h @@ -35,6 +35,13 @@   ******************************************************************************/  #define MAX_AUDIOS 7 + +/** + * @MAX_PIPES: + * + * Every ASIC support a fixed number of pipes; MAX_PIPES defines a large number + * to be used inside loops and for determining array sizes. + */  #define MAX_PIPES 6  #define MAX_DIG_LINK_ENCODERS 7  #define MAX_DWB_PIPES	1 diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h index 42afa1952890..bb5ad70d4266 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h @@ -72,6 +72,12 @@ enum dynamic_metadata_mode {  	dmdata_dolby_vision  }; +struct enc_sdp_line_num { +	/* Adaptive Sync SDP */ +	bool adaptive_sync_line_num_valid; +	uint32_t adaptive_sync_line_num; +}; +  struct encoder_info_frame {  	/* auxiliary video information */  	struct dc_info_packet avi; @@ -85,6 +91,9 @@ struct encoder_info_frame {  	struct dc_info_packet vsc;  	/* HDR Static MetaData */  	struct dc_info_packet hdrsmd; +	/* Adaptive Sync SDP*/ +	struct dc_info_packet adaptive_sync; +	struct enc_sdp_line_num sdp_line_num;  };  struct encoder_unblank_param { @@ -154,6 +163,10 @@ struct stream_encoder_funcs {  	void (*stop_hdmi_info_packets)(  		struct stream_encoder *enc); +	void (*update_dp_info_packets_sdp_line_num)( +		struct stream_encoder *enc, +		struct encoder_info_frame *info_frame); +  	void (*update_dp_info_packets)(  		struct stream_encoder *enc,  		const struct encoder_info_frame *info_frame); @@ -243,6 +256,9 @@ struct stream_encoder_funcs {  			uint32_t hubp_requestor_id,  			enum dynamic_metadata_mode dmdata_mode); +	/** +	 * @dp_set_odm_combine: Sets up DP stream encoder for ODM. +	 */  	void (*dp_set_odm_combine)(  		struct stream_encoder *enc,  		bool odm_combine); @@ -299,6 +315,10 @@ struct hpo_dp_stream_encoder_funcs {  		bool compressed_format,  		bool double_buffer_en); +	void (*update_dp_info_packets_sdp_line_num)( +		struct hpo_dp_stream_encoder *enc, +		struct encoder_info_frame *info_frame); +  	void (*update_dp_info_packets)(  		struct hpo_dp_stream_encoder *enc,  		const struct encoder_info_frame *info_frame); @@ -317,9 +337,6 @@ struct hpo_dp_stream_encoder_funcs {  			uint32_t stream_enc_inst,  			uint32_t link_enc_inst); -	void (*audio_mute_control)( -			struct hpo_dp_stream_encoder *enc, bool mute); -  	void (*dp_audio_setup)(  			struct hpo_dp_stream_encoder *enc,  			unsigned int az_inst, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index 25a1df45b264..1d9f9c53d2bd 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -185,6 +185,7 @@ struct timing_generator_funcs {  #ifdef CONFIG_DRM_AMD_DC_DCN  	void (*phantom_crtc_post_enable)(struct timing_generator *tg);  #endif +	void (*disable_phantom_crtc)(struct timing_generator *tg);  	bool (*immediate_disable_crtc)(struct timing_generator *tg);  	bool (*is_counter_moving)(struct timing_generator *tg);  	void (*get_position)(struct timing_generator *tg, @@ -301,6 +302,11 @@ struct timing_generator_funcs {  	void (*get_dsc_status)(struct timing_generator *optc,  					uint32_t *dsc_mode);  	void (*set_odm_bypass)(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing); + +	/** +	 * @set_odm_combine: Set up the ODM block to read from the correct +	 * OPP(s) and turn on/off ODM memory. +	 */  	void (*set_odm_combine)(struct timing_generator *optc, int *opp_id, int opp_cnt,  			struct dc_crtc_timing *timing);  	void (*set_h_timing_div_manual_mode)(struct timing_generator *optc, bool manual_mode); @@ -325,6 +331,7 @@ struct timing_generator_funcs {  			uint32_t vtotal_change_limit);  	void (*init_odm)(struct timing_generator *tg); +	void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg);  };  #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index d04b68dad413..88ac723d10aa 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -263,8 +263,10 @@ struct hw_sequencer_funcs {  	void (*update_phantom_vp_position)(struct dc *dc,  			struct dc_state *context,  			struct pipe_ctx *phantom_pipe); +	void (*apply_update_flags_for_phantom)(struct pipe_ctx *phantom_pipe);  	void (*commit_subvp_config)(struct dc *dc, struct dc_state *context); +	void (*enable_phantom_streams)(struct dc *dc, struct dc_state *context);  	void (*subvp_pipe_control_lock)(struct dc *dc,  			struct dc_state *context,  			bool lock, diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h new file mode 100644 index 000000000000..e70fa0059223 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/inc/link.h @@ -0,0 +1,157 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_LINK_H__ +#define __DC_LINK_H__ + +/* FILE POLICY AND INTENDED USAGE: + * + * This header declares link functions exposed to dc. All functions must have + * "link_" as prefix. For example link_run_my_function. This header is strictly + * private in dc and should never be included in other header files. dc + * components should include this header in their .c files in order to access + * functions in link folder. This file should never include any header files in + * link folder. If there is a need to expose a function declared in one of + * header files in side link folder, you need to move the function declaration + * into this file and prefix it with "link_". + */ +#include "core_types.h" +#include "dc_link.h" + +struct link_init_data { +	const struct dc *dc; +	struct dc_context *ctx; /* TODO: remove 'dal' when DC is complete. */ +	uint32_t connector_index; /* this will be mapped to the HPD pins */ +	uint32_t link_index; /* this is mapped to DAL display_index +				TODO: remove it when DC is complete. */ +	bool is_dpia_link; +}; + +struct dc_link *link_create(const struct link_init_data *init_params); +void link_destroy(struct dc_link **link); + +// TODO - convert any function declarations below to function pointers +struct gpio *link_get_hpd_gpio(struct dc_bios *dcb, +		struct graphics_object_id link_id, +		struct gpio_service *gpio_service); + +struct ddc_service_init_data { +	struct graphics_object_id id; +	struct dc_context *ctx; +	struct dc_link *link; +	bool is_dpia_link; +}; + +struct ddc_service *link_create_ddc_service( +		struct ddc_service_init_data *ddc_init_data); + +void link_destroy_ddc_service(struct ddc_service **ddc); + +bool link_is_in_aux_transaction_mode(struct ddc_service *ddc); + +bool link_query_ddc_data( +		struct ddc_service *ddc, +		uint32_t address, +		uint8_t *write_buf, +		uint32_t write_size, +		uint8_t *read_buf, +		uint32_t read_size); + + +/* Attempt to submit an aux payload, retrying on timeouts, defers, and busy + * states as outlined in the DP spec.  Returns true if the request was + * successful. + * + * NOTE: The function requires explicit mutex on DM side in order to prevent + * potential race condition. DC components should call the dpcd read/write + * function in dm_helpers in order to access dpcd safely + */ +bool link_aux_transfer_with_retries_no_mutex(struct ddc_service *ddc, +		struct aux_payload *payload); + +uint32_t link_get_aux_defer_delay(struct ddc_service *ddc); + +bool link_is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx); + +enum dp_link_encoding link_dp_get_encoding_format( +		const struct dc_link_settings *link_settings); + +bool link_decide_link_settings( +	struct dc_stream_state *stream, +	struct dc_link_settings *link_setting); + +void link_dp_trace_set_edp_power_timestamp(struct dc_link *link, +		bool power_up); +uint64_t link_dp_trace_get_edp_poweron_timestamp(struct dc_link *link); +uint64_t link_dp_trace_get_edp_poweroff_timestamp(struct dc_link *link); + +bool link_is_edp_ilr_optimization_required(struct dc_link *link, +		struct dc_crtc_timing *crtc_timing); + +bool link_backlight_enable_aux(struct dc_link *link, bool enable); +void link_edp_add_delay_for_T9(struct dc_link *link); +bool link_edp_receiver_ready_T9(struct dc_link *link); +bool link_edp_receiver_ready_T7(struct dc_link *link); +bool link_power_alpm_dpcd_enable(struct dc_link *link, bool enable); +bool link_set_sink_vtotal_in_psr_active(const struct dc_link *link, +		uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su); +void link_get_psr_residency(const struct dc_link *link, uint32_t *residency); +enum dc_status link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn); +enum dc_status link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn); +void link_blank_all_dp_displays(struct dc *dc); +void link_blank_all_edp_displays(struct dc *dc); +void link_blank_dp_stream(struct dc_link *link, bool hw_init); +void link_resume(struct dc_link *link); +void link_set_dpms_on( +		struct dc_state *state, +		struct pipe_ctx *pipe_ctx); +void link_set_dpms_off(struct pipe_ctx *pipe_ctx); +void link_dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode); +void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable); +bool link_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable); +bool link_update_dsc_config(struct pipe_ctx *pipe_ctx); +enum dc_status link_validate_mode_timing( +		const struct dc_stream_state *stream, +		struct dc_link *link, +		const struct dc_crtc_timing *timing); +bool link_detect(struct dc_link *link, enum dc_detect_reason reason); +bool link_detect_connection_type(struct dc_link *link, +		enum dc_connection_type *type); +const struct dc_link_status *link_get_status(const struct dc_link *link); +#ifdef CONFIG_DRM_AMD_DC_HDCP +/* return true if the connected receiver supports the hdcp version */ +bool link_is_hdcp14(struct dc_link *link, enum signal_type signal); +bool link_is_hdcp22(struct dc_link *link, enum signal_type signal); +#endif +void link_clear_dprx_states(struct dc_link *link); +bool link_reset_cur_dp_mst_topology(struct dc_link *link); +uint32_t dp_link_bandwidth_kbps( +	const struct dc_link *link, +	const struct dc_link_settings *link_settings); +uint32_t link_timing_bandwidth_kbps(const struct dc_crtc_timing *timing); +void link_get_cur_res_map(const struct dc *dc, uint32_t *map); +void link_restore_res_map(const struct dc *dc, uint32_t *map); + +#endif /* __DC_LINK_HPD_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h index 89964c980b87..0f69946cce9f 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h @@ -38,6 +38,7 @@ struct link_resource;  struct pipe_ctx;  struct encoder_set_dp_phy_pattern_param;  struct link_mst_stream_allocation_table; +struct audio_output;  struct link_hwss_ext {  	/* function pointers below may require to check for NULL if caller @@ -79,6 +80,10 @@ struct link_hwss {  	void (*disable_link_output)(struct dc_link *link,  			const struct link_resource *link_res,  			enum signal_type signal); +	void (*setup_audio_output)(struct pipe_ctx *pipe_ctx, +			struct audio_output *audio_output, uint32_t audio_inst); +	void (*enable_audio_packet)(struct pipe_ctx *pipe_ctx); +	void (*disable_audio_packet)(struct pipe_ctx *pipe_ctx);  };  #endif /* __DC_LINK_HWSS_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index 5040836f404d..fa6da93caa88 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -165,10 +165,6 @@ bool resource_validate_attach_surfaces(  		struct dc_state *context,  		const struct resource_pool *pool); -void resource_validate_ctx_update_pointer_after_copy( -		const struct dc_state *src_ctx, -		struct dc_state *dst_ctx); -  enum dc_status resource_map_clock_resources(  		const struct dc *dc,  		struct dc_state *context, @@ -236,4 +232,13 @@ bool dc_resource_acquire_secondary_pipe_for_mpc_odm(  		struct pipe_ctx *pri_pipe,  		struct pipe_ctx *sec_pipe,  		bool odm); + +/* A test harness interface that modifies dp encoder resources in the given dc + * state and bypasses the need to revalidate. The interface assumes that the + * test harness interface is called with pre-validated link config stored in the + * pipe_ctx and updates dp encoder resources according to the link config. + */ +enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc, +		struct dc_state *context, +		struct pipe_ctx *pipe_ctx);  #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c index 45f99351a0ab..3c7cb3dc046b 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c @@ -28,20 +28,19 @@  #include "include/logger_interface.h"  #include "../dce110/irq_service_dce110.h" +#include "irq_service_dcn201.h"  #include "dcn/dcn_2_0_3_offset.h"  #include "dcn/dcn_2_0_3_sh_mask.h"  #include "cyan_skillfish_ip_offset.h"  #include "soc15_hw_ip.h" - -#include "irq_service_dcn201.h" -  #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" -static enum dc_irq_source to_dal_irq_source_dcn201(struct irq_service *irq_service, -						   uint32_t src_id, -						   uint32_t ext_id) +static enum dc_irq_source to_dal_irq_source_dcn201( +		struct irq_service *irq_service, +		uint32_t src_id, +		uint32_t ext_id)  {  	switch (src_id) {  	case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: @@ -79,7 +78,6 @@ static enum dc_irq_source to_dal_irq_source_dcn201(struct irq_service *irq_servi  	default:  		return DC_IRQ_SOURCE_INVALID;  	} -	return DC_IRQ_SOURCE_INVALID;  }  static bool hpd_ack( diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.h b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.h index 8e27c5e219a3..0cfd2f2d62e8 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.h +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.h @@ -1,5 +1,5 @@  /* - * Copyright 2018 Advanced Micro Devices, Inc. + * Copyright 2022 Advanced Micro Devices, Inc.   *   * Permission is hereby granted, free of charge, to any person obtaining a   * copy of this software and associated documentation files (the "Software"), diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c index 7bad39bba86b..d100edaedbbb 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c +++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c @@ -112,8 +112,15 @@ bool dal_irq_service_set(  	dal_irq_service_ack(irq_service, source); -	if (info->funcs && info->funcs->set) +	if (info->funcs && info->funcs->set) { +		if (info->funcs->set == dal_irq_service_dummy_set) { +			DC_LOG_WARNING("%s: src: %d, st: %d\n", __func__, +				       source, enable); +			ASSERT(0); +		} +  		return info->funcs->set(irq_service, info, enable); +	}  	dal_irq_service_set_generic(irq_service, info, enable); @@ -146,8 +153,14 @@ bool dal_irq_service_ack(  		return false;  	} -	if (info->funcs && info->funcs->ack) +	if (info->funcs && info->funcs->ack) { +		if (info->funcs->ack == dal_irq_service_dummy_ack) { +			DC_LOG_WARNING("%s: src: %d\n", __func__, source); +			ASSERT(0); +		} +  		return info->funcs->ack(irq_service, info); +	}  	dal_irq_service_ack_generic(irq_service, info); diff --git a/drivers/gpu/drm/amd/display/dc/link/Makefile b/drivers/gpu/drm/amd/display/dc/link/Makefile index 054c2a727eb2..40352d8d7648 100644 --- a/drivers/gpu/drm/amd/display/dc/link/Makefile +++ b/drivers/gpu/drm/amd/display/dc/link/Makefile @@ -23,8 +23,41 @@  # It abstracts the control and status of back end pipe such as DIO, HPO, DPIA,  # PHY, HPD, DDC and etc). -LINK = link_hwss_dio.o link_hwss_dpia.o link_hwss_hpo_dp.o link_dp_trace.o +LINK = link_detection.o link_dpms.o link_factory.o link_resource.o \ +link_validation.o -AMD_DAL_LINK = $(addprefix $(AMDDALPATH)/dc/link/,$(LINK)) +AMD_DAL_LINK = $(addprefix $(AMDDALPATH)/dc/link/, \ +$(LINK))  AMD_DISPLAY_FILES += $(AMD_DAL_LINK) +############################################################################### +# accessories +############################################################################### +LINK_ACCESSORIES = link_dp_trace.o link_dp_cts.o link_fpga.o + +AMD_DAL_LINK_ACCESSORIES = $(addprefix $(AMDDALPATH)/dc/link/accessories/, \ +$(LINK_ACCESSORIES)) + +AMD_DISPLAY_FILES += $(AMD_DAL_LINK_ACCESSORIES) +############################################################################### +# hwss +############################################################################### +LINK_HWSS = link_hwss_dio.o link_hwss_dpia.o link_hwss_hpo_dp.o + +AMD_DAL_LINK_HWSS = $(addprefix $(AMDDALPATH)/dc/link/hwss/, \ +$(LINK_HWSS)) + +AMD_DISPLAY_FILES += $(AMD_DAL_LINK_HWSS) +############################################################################### +# protocols +############################################################################### +LINK_PROTOCOLS = link_hpd.o link_ddc.o link_dpcd.o link_dp_dpia.o \ +link_dp_training.o link_dp_training_8b_10b.o link_dp_training_128b_132b.o \ +link_dp_training_dpia.o link_dp_training_auxless.o \ +link_dp_training_fixed_vs_pe_retimer.o link_dp_phy.o link_dp_capability.o \ +link_edp_panel_control.o link_dp_irq_handler.o + +AMD_DAL_LINK_PROTOCOLS = $(addprefix $(AMDDALPATH)/dc/link/protocols/, \ +$(LINK_PROTOCOLS)) + +AMD_DISPLAY_FILES += $(AMD_DAL_LINK_PROTOCOLS)
\ No newline at end of file diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c new file mode 100644 index 000000000000..942300e0bd92 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c @@ -0,0 +1,1046 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "link_dp_cts.h" +#include "link/link_resource.h" +#include "link/protocols/link_dpcd.h" +#include "link/protocols/link_dp_training.h" +#include "link/protocols/link_dp_phy.h" +#include "link/protocols/link_dp_training_fixed_vs_pe_retimer.h" +#include "link/link_dpms.h" +#include "resource.h" +#include "dm_helpers.h" +#include "dc_dmub_srv.h" +#include "dce/dmub_hw_lock_mgr.h" + +#define DC_LOGGER \ +	link->ctx->logger + +static enum dc_link_rate get_link_rate_from_test_link_rate(uint8_t test_rate) +{ +	switch (test_rate) { +	case DP_TEST_LINK_RATE_RBR: +		return LINK_RATE_LOW; +	case DP_TEST_LINK_RATE_HBR: +		return LINK_RATE_HIGH; +	case DP_TEST_LINK_RATE_HBR2: +		return LINK_RATE_HIGH2; +	case DP_TEST_LINK_RATE_HBR3: +		return LINK_RATE_HIGH3; +	case DP_TEST_LINK_RATE_UHBR10: +		return LINK_RATE_UHBR10; +	case DP_TEST_LINK_RATE_UHBR20: +		return LINK_RATE_UHBR20; +	case DP_TEST_LINK_RATE_UHBR13_5: +		return LINK_RATE_UHBR13_5; +	default: +		return LINK_RATE_UNKNOWN; +	} +} + +static bool is_dp_phy_sqaure_pattern(enum dp_test_pattern test_pattern) +{ +	return (DP_TEST_PATTERN_SQUARE_BEGIN <= test_pattern && +			test_pattern <= DP_TEST_PATTERN_SQUARE_END); +} + +static bool is_dp_phy_pattern(enum dp_test_pattern test_pattern) +{ +	if ((DP_TEST_PATTERN_PHY_PATTERN_BEGIN <= test_pattern && +			test_pattern <= DP_TEST_PATTERN_PHY_PATTERN_END) || +			test_pattern == DP_TEST_PATTERN_VIDEO_MODE) +		return true; +	else +		return false; +} + +void dp_retrain_link_dp_test(struct dc_link *link, +			struct dc_link_settings *link_setting, +			bool skip_video_pattern) +{ +	struct pipe_ctx *pipes[MAX_PIPES]; +	struct dc_state *state = link->dc->current_state; +	uint8_t count; +	int i; + +	udelay(100); + +	link_get_master_pipes_with_dpms_on(link, state, &count, pipes); + +	for (i = 0; i < count; i++) { +		link_set_dpms_off(pipes[i]); +		pipes[i]->link_config.dp_link_settings = *link_setting; +		update_dp_encoder_resources_for_test_harness( +				link->dc, +				state, +				pipes[i]); +	} + +	for (i = count-1; i >= 0; i--) +		link_set_dpms_on(state, pipes[i]); +} + +static void dp_test_send_link_training(struct dc_link *link) +{ +	struct dc_link_settings link_settings = {0}; +	uint8_t test_rate = 0; + +	core_link_read_dpcd( +			link, +			DP_TEST_LANE_COUNT, +			(unsigned char *)(&link_settings.lane_count), +			1); +	core_link_read_dpcd( +			link, +			DP_TEST_LINK_RATE, +			&test_rate, +			1); +	link_settings.link_rate = get_link_rate_from_test_link_rate(test_rate); + +	/* Set preferred link settings */ +	link->verified_link_cap.lane_count = link_settings.lane_count; +	link->verified_link_cap.link_rate = link_settings.link_rate; + +	dp_retrain_link_dp_test(link, &link_settings, false); +} + +static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video) +{ +	union audio_test_mode            dpcd_test_mode = {0}; +	struct audio_test_pattern_type   dpcd_pattern_type = {0}; +	union audio_test_pattern_period  dpcd_pattern_period[AUDIO_CHANNELS_COUNT] = {0}; +	enum dp_test_pattern test_pattern = DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED; + +	struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; +	struct pipe_ctx *pipe_ctx = &pipes[0]; +	unsigned int channel_count; +	unsigned int channel = 0; +	unsigned int modes = 0; +	unsigned int sampling_rate_in_hz = 0; + +	// get audio test mode and test pattern parameters +	core_link_read_dpcd( +		link, +		DP_TEST_AUDIO_MODE, +		&dpcd_test_mode.raw, +		sizeof(dpcd_test_mode)); + +	core_link_read_dpcd( +		link, +		DP_TEST_AUDIO_PATTERN_TYPE, +		&dpcd_pattern_type.value, +		sizeof(dpcd_pattern_type)); + +	channel_count = min(dpcd_test_mode.bits.channel_count + 1, AUDIO_CHANNELS_COUNT); + +	// read pattern periods for requested channels when sawTooth pattern is requested +	if (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH || +			dpcd_pattern_type.value == AUDIO_TEST_PATTERN_OPERATOR_DEFINED) { + +		test_pattern = (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH) ? +				DP_TEST_PATTERN_AUDIO_SAWTOOTH : DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED; +		// read period for each channel +		for (channel = 0; channel < channel_count; channel++) { +			core_link_read_dpcd( +							link, +							DP_TEST_AUDIO_PERIOD_CH1 + channel, +							&dpcd_pattern_period[channel].raw, +							sizeof(dpcd_pattern_period[channel])); +		} +	} + +	// translate sampling rate +	switch (dpcd_test_mode.bits.sampling_rate) { +	case AUDIO_SAMPLING_RATE_32KHZ: +		sampling_rate_in_hz = 32000; +		break; +	case AUDIO_SAMPLING_RATE_44_1KHZ: +		sampling_rate_in_hz = 44100; +		break; +	case AUDIO_SAMPLING_RATE_48KHZ: +		sampling_rate_in_hz = 48000; +		break; +	case AUDIO_SAMPLING_RATE_88_2KHZ: +		sampling_rate_in_hz = 88200; +		break; +	case AUDIO_SAMPLING_RATE_96KHZ: +		sampling_rate_in_hz = 96000; +		break; +	case AUDIO_SAMPLING_RATE_176_4KHZ: +		sampling_rate_in_hz = 176400; +		break; +	case AUDIO_SAMPLING_RATE_192KHZ: +		sampling_rate_in_hz = 192000; +		break; +	default: +		sampling_rate_in_hz = 0; +		break; +	} + +	link->audio_test_data.flags.test_requested = 1; +	link->audio_test_data.flags.disable_video = disable_video; +	link->audio_test_data.sampling_rate = sampling_rate_in_hz; +	link->audio_test_data.channel_count = channel_count; +	link->audio_test_data.pattern_type = test_pattern; + +	if (test_pattern == DP_TEST_PATTERN_AUDIO_SAWTOOTH) { +		for (modes = 0; modes < pipe_ctx->stream->audio_info.mode_count; modes++) { +			link->audio_test_data.pattern_period[modes] = dpcd_pattern_period[modes].bits.pattern_period; +		} +	} +} + +/* TODO Raven hbr2 compliance eye output is unstable + * (toggling on and off) with debugger break + * This caueses intermittent PHY automation failure + * Need to look into the root cause */ +static void dp_test_send_phy_test_pattern(struct dc_link *link) +{ +	union phy_test_pattern dpcd_test_pattern; +	union lane_adjust dpcd_lane_adjustment[2]; +	unsigned char dpcd_post_cursor_2_adjustment = 0; +	unsigned char test_pattern_buffer[ +			(DP_TEST_264BIT_CUSTOM_PATTERN_263_256 - +			DP_TEST_264BIT_CUSTOM_PATTERN_7_0)+1] = {0}; +	unsigned int test_pattern_size = 0; +	enum dp_test_pattern test_pattern; +	union lane_adjust dpcd_lane_adjust; +	unsigned int lane; +	struct link_training_settings link_training_settings; +	unsigned char no_preshoot = 0; +	unsigned char no_deemphasis = 0; + +	dpcd_test_pattern.raw = 0; +	memset(dpcd_lane_adjustment, 0, sizeof(dpcd_lane_adjustment)); +	memset(&link_training_settings, 0, sizeof(link_training_settings)); + +	/* get phy test pattern and pattern parameters from DP receiver */ +	core_link_read_dpcd( +			link, +			DP_PHY_TEST_PATTERN, +			&dpcd_test_pattern.raw, +			sizeof(dpcd_test_pattern)); +	core_link_read_dpcd( +			link, +			DP_ADJUST_REQUEST_LANE0_1, +			&dpcd_lane_adjustment[0].raw, +			sizeof(dpcd_lane_adjustment)); + +	/* prepare link training settings */ +	link_training_settings.link_settings = link->cur_link_settings; + +	link_training_settings.lttpr_mode = dc_link_decide_lttpr_mode(link, &link->cur_link_settings); + +	if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && +			link_training_settings.lttpr_mode == LTTPR_MODE_TRANSPARENT) +		dp_fixed_vs_pe_read_lane_adjust( +				link, +				link_training_settings.dpcd_lane_settings); + +	/*get post cursor 2 parameters +	 * For DP 1.1a or eariler, this DPCD register's value is 0 +	 * For DP 1.2 or later: +	 * Bits 1:0 = POST_CURSOR2_LANE0; Bits 3:2 = POST_CURSOR2_LANE1 +	 * Bits 5:4 = POST_CURSOR2_LANE2; Bits 7:6 = POST_CURSOR2_LANE3 +	 */ +	core_link_read_dpcd( +			link, +			DP_ADJUST_REQUEST_POST_CURSOR2, +			&dpcd_post_cursor_2_adjustment, +			sizeof(dpcd_post_cursor_2_adjustment)); + +	/* translate request */ +	switch (dpcd_test_pattern.bits.PATTERN) { +	case PHY_TEST_PATTERN_D10_2: +		test_pattern = DP_TEST_PATTERN_D102; +		break; +	case PHY_TEST_PATTERN_SYMBOL_ERROR: +		test_pattern = DP_TEST_PATTERN_SYMBOL_ERROR; +		break; +	case PHY_TEST_PATTERN_PRBS7: +		test_pattern = DP_TEST_PATTERN_PRBS7; +		break; +	case PHY_TEST_PATTERN_80BIT_CUSTOM: +		test_pattern = DP_TEST_PATTERN_80BIT_CUSTOM; +		break; +	case PHY_TEST_PATTERN_CP2520_1: +		/* CP2520 pattern is unstable, temporarily use TPS4 instead */ +		test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ? +				DP_TEST_PATTERN_TRAINING_PATTERN4 : +				DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE; +		break; +	case PHY_TEST_PATTERN_CP2520_2: +		/* CP2520 pattern is unstable, temporarily use TPS4 instead */ +		test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ? +				DP_TEST_PATTERN_TRAINING_PATTERN4 : +				DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE; +		break; +	case PHY_TEST_PATTERN_CP2520_3: +		test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4; +		break; +	case PHY_TEST_PATTERN_128b_132b_TPS1: +		test_pattern = DP_TEST_PATTERN_128b_132b_TPS1; +		break; +	case PHY_TEST_PATTERN_128b_132b_TPS2: +		test_pattern = DP_TEST_PATTERN_128b_132b_TPS2; +		break; +	case PHY_TEST_PATTERN_PRBS9: +		test_pattern = DP_TEST_PATTERN_PRBS9; +		break; +	case PHY_TEST_PATTERN_PRBS11: +		test_pattern = DP_TEST_PATTERN_PRBS11; +		break; +	case PHY_TEST_PATTERN_PRBS15: +		test_pattern = DP_TEST_PATTERN_PRBS15; +		break; +	case PHY_TEST_PATTERN_PRBS23: +		test_pattern = DP_TEST_PATTERN_PRBS23; +		break; +	case PHY_TEST_PATTERN_PRBS31: +		test_pattern = DP_TEST_PATTERN_PRBS31; +		break; +	case PHY_TEST_PATTERN_264BIT_CUSTOM: +		test_pattern = DP_TEST_PATTERN_264BIT_CUSTOM; +		break; +	case PHY_TEST_PATTERN_SQUARE: +		test_pattern = DP_TEST_PATTERN_SQUARE; +		break; +	case PHY_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED: +		test_pattern = DP_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED; +		no_preshoot = 1; +		break; +	case PHY_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED: +		test_pattern = DP_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED; +		no_deemphasis = 1; +		break; +	case PHY_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED: +		test_pattern = DP_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED; +		no_preshoot = 1; +		no_deemphasis = 1; +		break; +	default: +		test_pattern = DP_TEST_PATTERN_VIDEO_MODE; +	break; +	} + +	if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM) { +		test_pattern_size = (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 - +				DP_TEST_80BIT_CUSTOM_PATTERN_7_0) + 1; +		core_link_read_dpcd( +				link, +				DP_TEST_80BIT_CUSTOM_PATTERN_7_0, +				test_pattern_buffer, +				test_pattern_size); +	} + +	if (is_dp_phy_sqaure_pattern(test_pattern)) { +		test_pattern_size = 1; // Square pattern data is 1 byte (DP spec) +		core_link_read_dpcd( +				link, +				DP_PHY_SQUARE_PATTERN, +				test_pattern_buffer, +				test_pattern_size); +	} + +	if (test_pattern == DP_TEST_PATTERN_264BIT_CUSTOM) { +		test_pattern_size = (DP_TEST_264BIT_CUSTOM_PATTERN_263_256- +				DP_TEST_264BIT_CUSTOM_PATTERN_7_0) + 1; +		core_link_read_dpcd( +				link, +				DP_TEST_264BIT_CUSTOM_PATTERN_7_0, +				test_pattern_buffer, +				test_pattern_size); +	} + +	for (lane = 0; lane < +		(unsigned int)(link->cur_link_settings.lane_count); +		lane++) { +		dpcd_lane_adjust.raw = +			dp_get_nibble_at_index(&dpcd_lane_adjustment[0].raw, lane); +		if (link_dp_get_encoding_format(&link->cur_link_settings) == +				DP_8b_10b_ENCODING) { +			link_training_settings.hw_lane_settings[lane].VOLTAGE_SWING = +				(enum dc_voltage_swing) +				(dpcd_lane_adjust.bits.VOLTAGE_SWING_LANE); +			link_training_settings.hw_lane_settings[lane].PRE_EMPHASIS = +				(enum dc_pre_emphasis) +				(dpcd_lane_adjust.bits.PRE_EMPHASIS_LANE); +			link_training_settings.hw_lane_settings[lane].POST_CURSOR2 = +				(enum dc_post_cursor2) +				((dpcd_post_cursor_2_adjustment >> (lane * 2)) & 0x03); +		} else if (link_dp_get_encoding_format(&link->cur_link_settings) == +				DP_128b_132b_ENCODING) { +			link_training_settings.hw_lane_settings[lane].FFE_PRESET.settings.level = +					dpcd_lane_adjust.tx_ffe.PRESET_VALUE; +			link_training_settings.hw_lane_settings[lane].FFE_PRESET.settings.no_preshoot = no_preshoot; +			link_training_settings.hw_lane_settings[lane].FFE_PRESET.settings.no_deemphasis = no_deemphasis; +		} +	} + +	dp_hw_to_dpcd_lane_settings(&link_training_settings, +			link_training_settings.hw_lane_settings, +			link_training_settings.dpcd_lane_settings); +	/*Usage: Measure DP physical lane signal +	 * by DP SI test equipment automatically. +	 * PHY test pattern request is generated by equipment via HPD interrupt. +	 * HPD needs to be active all the time. HPD should be active +	 * all the time. Do not touch it. +	 * forward request to DS +	 */ +	dc_link_dp_set_test_pattern( +		link, +		test_pattern, +		DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED, +		&link_training_settings, +		test_pattern_buffer, +		test_pattern_size); +} + +static void set_crtc_test_pattern(struct dc_link *link, +				struct pipe_ctx *pipe_ctx, +				enum dp_test_pattern test_pattern, +				enum dp_test_pattern_color_space test_pattern_color_space) +{ +	enum controller_dp_test_pattern controller_test_pattern; +	enum dc_color_depth color_depth = pipe_ctx-> +		stream->timing.display_color_depth; +	struct bit_depth_reduction_params params; +	struct output_pixel_processor *opp = pipe_ctx->stream_res.opp; +	int width = pipe_ctx->stream->timing.h_addressable + +		pipe_ctx->stream->timing.h_border_left + +		pipe_ctx->stream->timing.h_border_right; +	int height = pipe_ctx->stream->timing.v_addressable + +		pipe_ctx->stream->timing.v_border_bottom + +		pipe_ctx->stream->timing.v_border_top; + +	memset(¶ms, 0, sizeof(params)); + +	switch (test_pattern) { +	case DP_TEST_PATTERN_COLOR_SQUARES: +		controller_test_pattern = +				CONTROLLER_DP_TEST_PATTERN_COLORSQUARES; +	break; +	case DP_TEST_PATTERN_COLOR_SQUARES_CEA: +		controller_test_pattern = +				CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA; +	break; +	case DP_TEST_PATTERN_VERTICAL_BARS: +		controller_test_pattern = +				CONTROLLER_DP_TEST_PATTERN_VERTICALBARS; +	break; +	case DP_TEST_PATTERN_HORIZONTAL_BARS: +		controller_test_pattern = +				CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS; +	break; +	case DP_TEST_PATTERN_COLOR_RAMP: +		controller_test_pattern = +				CONTROLLER_DP_TEST_PATTERN_COLORRAMP; +	break; +	default: +		controller_test_pattern = +				CONTROLLER_DP_TEST_PATTERN_VIDEOMODE; +	break; +	} + +	switch (test_pattern) { +	case DP_TEST_PATTERN_COLOR_SQUARES: +	case DP_TEST_PATTERN_COLOR_SQUARES_CEA: +	case DP_TEST_PATTERN_VERTICAL_BARS: +	case DP_TEST_PATTERN_HORIZONTAL_BARS: +	case DP_TEST_PATTERN_COLOR_RAMP: +	{ +		/* disable bit depth reduction */ +		pipe_ctx->stream->bit_depth_params = params; +		opp->funcs->opp_program_bit_depth_reduction(opp, ¶ms); +		if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) +			pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, +				controller_test_pattern, color_depth); +		else if (link->dc->hwss.set_disp_pattern_generator) { +			struct pipe_ctx *odm_pipe; +			enum controller_dp_color_space controller_color_space; +			int opp_cnt = 1; +			int offset = 0; +			int dpg_width = width; + +			switch (test_pattern_color_space) { +			case DP_TEST_PATTERN_COLOR_SPACE_RGB: +				controller_color_space = CONTROLLER_DP_COLOR_SPACE_RGB; +				break; +			case DP_TEST_PATTERN_COLOR_SPACE_YCBCR601: +				controller_color_space = CONTROLLER_DP_COLOR_SPACE_YCBCR601; +				break; +			case DP_TEST_PATTERN_COLOR_SPACE_YCBCR709: +				controller_color_space = CONTROLLER_DP_COLOR_SPACE_YCBCR709; +				break; +			case DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED: +			default: +				controller_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED; +				DC_LOG_ERROR("%s: Color space must be defined for test pattern", __func__); +				ASSERT(0); +				break; +			} + +			for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) +				opp_cnt++; +			dpg_width = width / opp_cnt; +			offset = dpg_width; + +			link->dc->hwss.set_disp_pattern_generator(link->dc, +					pipe_ctx, +					controller_test_pattern, +					controller_color_space, +					color_depth, +					NULL, +					dpg_width, +					height, +					0); + +			for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { +				struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp; + +				odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); +				link->dc->hwss.set_disp_pattern_generator(link->dc, +						odm_pipe, +						controller_test_pattern, +						controller_color_space, +						color_depth, +						NULL, +						dpg_width, +						height, +						offset); +				offset += offset; +			} +		} +	} +	break; +	case DP_TEST_PATTERN_VIDEO_MODE: +	{ +		/* restore bitdepth reduction */ +		resource_build_bit_depth_reduction_params(pipe_ctx->stream, ¶ms); +		pipe_ctx->stream->bit_depth_params = params; +		opp->funcs->opp_program_bit_depth_reduction(opp, ¶ms); +		if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) +			pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, +				CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, +				color_depth); +		else if (link->dc->hwss.set_disp_pattern_generator) { +			struct pipe_ctx *odm_pipe; +			int opp_cnt = 1; +			int dpg_width; + +			for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) +				opp_cnt++; + +			dpg_width = width / opp_cnt; +			for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { +				struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp; + +				odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); +				link->dc->hwss.set_disp_pattern_generator(link->dc, +						odm_pipe, +						CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, +						CONTROLLER_DP_COLOR_SPACE_UDEFINED, +						color_depth, +						NULL, +						dpg_width, +						height, +						0); +			} +			link->dc->hwss.set_disp_pattern_generator(link->dc, +					pipe_ctx, +					CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, +					CONTROLLER_DP_COLOR_SPACE_UDEFINED, +					color_depth, +					NULL, +					dpg_width, +					height, +					0); +		} +	} +	break; + +	default: +	break; +	} +} + +void dc_link_dp_handle_automated_test(struct dc_link *link) +{ +	union test_request test_request; +	union test_response test_response; + +	memset(&test_request, 0, sizeof(test_request)); +	memset(&test_response, 0, sizeof(test_response)); + +	core_link_read_dpcd( +		link, +		DP_TEST_REQUEST, +		&test_request.raw, +		sizeof(union test_request)); +	if (test_request.bits.LINK_TRAINING) { +		/* ACK first to let DP RX test box monitor LT sequence */ +		test_response.bits.ACK = 1; +		core_link_write_dpcd( +			link, +			DP_TEST_RESPONSE, +			&test_response.raw, +			sizeof(test_response)); +		dp_test_send_link_training(link); +		/* no acknowledge request is needed again */ +		test_response.bits.ACK = 0; +	} +	if (test_request.bits.LINK_TEST_PATTRN) { +		union test_misc dpcd_test_params; +		union link_test_pattern dpcd_test_pattern; + +		memset(&dpcd_test_pattern, 0, sizeof(dpcd_test_pattern)); +		memset(&dpcd_test_params, 0, sizeof(dpcd_test_params)); + +		/* get link test pattern and pattern parameters */ +		core_link_read_dpcd( +				link, +				DP_TEST_PATTERN, +				&dpcd_test_pattern.raw, +				sizeof(dpcd_test_pattern)); +		core_link_read_dpcd( +				link, +				DP_TEST_MISC0, +				&dpcd_test_params.raw, +				sizeof(dpcd_test_params)); +		test_response.bits.ACK = dm_helpers_dp_handle_test_pattern_request(link->ctx, link, +				dpcd_test_pattern, dpcd_test_params) ? 1 : 0; +	} + +	if (test_request.bits.AUDIO_TEST_PATTERN) { +		dp_test_get_audio_test_data(link, test_request.bits.TEST_AUDIO_DISABLED_VIDEO); +		test_response.bits.ACK = 1; +	} + +	if (test_request.bits.PHY_TEST_PATTERN) { +		dp_test_send_phy_test_pattern(link); +		test_response.bits.ACK = 1; +	} + +	/* send request acknowledgment */ +	if (test_response.bits.ACK) +		core_link_write_dpcd( +			link, +			DP_TEST_RESPONSE, +			&test_response.raw, +			sizeof(test_response)); +} + +bool dc_link_dp_set_test_pattern( +	struct dc_link *link, +	enum dp_test_pattern test_pattern, +	enum dp_test_pattern_color_space test_pattern_color_space, +	const struct link_training_settings *p_link_settings, +	const unsigned char *p_custom_pattern, +	unsigned int cust_pattern_size) +{ +	struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; +	struct pipe_ctx *pipe_ctx = NULL; +	unsigned int lane; +	unsigned int i; +	unsigned char link_qual_pattern[LANE_COUNT_DP_MAX] = {0}; +	union dpcd_training_pattern training_pattern; +	enum dpcd_phy_test_patterns pattern; + +	memset(&training_pattern, 0, sizeof(training_pattern)); + +	for (i = 0; i < MAX_PIPES; i++) { +		if (pipes[i].stream == NULL) +			continue; + +		if (pipes[i].stream->link == link && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe) { +			pipe_ctx = &pipes[i]; +			break; +		} +	} + +	if (pipe_ctx == NULL) +		return false; + +	/* Reset CRTC Test Pattern if it is currently running and request is VideoMode */ +	if (link->test_pattern_enabled && test_pattern == +			DP_TEST_PATTERN_VIDEO_MODE) { +		/* Set CRTC Test Pattern */ +		set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space); +		dp_set_hw_test_pattern(link, &pipe_ctx->link_res, test_pattern, +				(uint8_t *)p_custom_pattern, +				(uint32_t)cust_pattern_size); + +		/* Unblank Stream */ +		link->dc->hwss.unblank_stream( +			pipe_ctx, +			&link->verified_link_cap); +		/* TODO:m_pHwss->MuteAudioEndpoint +		 * (pPathMode->pDisplayPath, false); +		 */ + +		/* Reset Test Pattern state */ +		link->test_pattern_enabled = false; + +		return true; +	} + +	/* Check for PHY Test Patterns */ +	if (is_dp_phy_pattern(test_pattern)) { +		/* Set DPCD Lane Settings before running test pattern */ +		if (p_link_settings != NULL) { +			if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && +					p_link_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) { +				dp_fixed_vs_pe_set_retimer_lane_settings( +						link, +						p_link_settings->dpcd_lane_settings, +						p_link_settings->link_settings.lane_count); +			} else { +				dp_set_hw_lane_settings(link, &pipe_ctx->link_res, p_link_settings, DPRX); +			} +			dpcd_set_lane_settings(link, p_link_settings, DPRX); +		} + +		/* Blank stream if running test pattern */ +		if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) { +			/*TODO: +			 * m_pHwss-> +			 * MuteAudioEndpoint(pPathMode->pDisplayPath, true); +			 */ +			/* Blank stream */ +			link->dc->hwss.blank_stream(pipe_ctx); +		} + +		dp_set_hw_test_pattern(link, &pipe_ctx->link_res, test_pattern, +				(uint8_t *)p_custom_pattern, +				(uint32_t)cust_pattern_size); + +		if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) { +			/* Set Test Pattern state */ +			link->test_pattern_enabled = true; +			if (p_link_settings != NULL) +				dpcd_set_link_settings(link, +						p_link_settings); +		} + +		switch (test_pattern) { +		case DP_TEST_PATTERN_VIDEO_MODE: +			pattern = PHY_TEST_PATTERN_NONE; +			break; +		case DP_TEST_PATTERN_D102: +			pattern = PHY_TEST_PATTERN_D10_2; +			break; +		case DP_TEST_PATTERN_SYMBOL_ERROR: +			pattern = PHY_TEST_PATTERN_SYMBOL_ERROR; +			break; +		case DP_TEST_PATTERN_PRBS7: +			pattern = PHY_TEST_PATTERN_PRBS7; +			break; +		case DP_TEST_PATTERN_80BIT_CUSTOM: +			pattern = PHY_TEST_PATTERN_80BIT_CUSTOM; +			break; +		case DP_TEST_PATTERN_CP2520_1: +			pattern = PHY_TEST_PATTERN_CP2520_1; +			break; +		case DP_TEST_PATTERN_CP2520_2: +			pattern = PHY_TEST_PATTERN_CP2520_2; +			break; +		case DP_TEST_PATTERN_CP2520_3: +			pattern = PHY_TEST_PATTERN_CP2520_3; +			break; +		case DP_TEST_PATTERN_128b_132b_TPS1: +			pattern = PHY_TEST_PATTERN_128b_132b_TPS1; +			break; +		case DP_TEST_PATTERN_128b_132b_TPS2: +			pattern = PHY_TEST_PATTERN_128b_132b_TPS2; +			break; +		case DP_TEST_PATTERN_PRBS9: +			pattern = PHY_TEST_PATTERN_PRBS9; +			break; +		case DP_TEST_PATTERN_PRBS11: +			pattern = PHY_TEST_PATTERN_PRBS11; +			break; +		case DP_TEST_PATTERN_PRBS15: +			pattern = PHY_TEST_PATTERN_PRBS15; +			break; +		case DP_TEST_PATTERN_PRBS23: +			pattern = PHY_TEST_PATTERN_PRBS23; +			break; +		case DP_TEST_PATTERN_PRBS31: +			pattern = PHY_TEST_PATTERN_PRBS31; +			break; +		case DP_TEST_PATTERN_264BIT_CUSTOM: +			pattern = PHY_TEST_PATTERN_264BIT_CUSTOM; +			break; +		case DP_TEST_PATTERN_SQUARE: +			pattern = PHY_TEST_PATTERN_SQUARE; +			break; +		case DP_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED: +			pattern = PHY_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED; +			break; +		case DP_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED: +			pattern = PHY_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED; +			break; +		case DP_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED: +			pattern = PHY_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED; +			break; +		default: +			return false; +		} + +		if (test_pattern == DP_TEST_PATTERN_VIDEO_MODE +		/*TODO:&& !pPathMode->pDisplayPath->IsTargetPoweredOn()*/) +			return false; + +		if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { +			if (is_dp_phy_sqaure_pattern(test_pattern)) +				core_link_write_dpcd(link, +						DP_LINK_SQUARE_PATTERN, +						p_custom_pattern, +						1); + +			/* tell receiver that we are sending qualification +			 * pattern DP 1.2 or later - DP receiver's link quality +			 * pattern is set using DPCD LINK_QUAL_LANEx_SET +			 * register (0x10B~0x10E)\ +			 */ +			for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) +				link_qual_pattern[lane] = +						(unsigned char)(pattern); + +			core_link_write_dpcd(link, +					DP_LINK_QUAL_LANE0_SET, +					link_qual_pattern, +					sizeof(link_qual_pattern)); +		} else if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_10 || +			   link->dpcd_caps.dpcd_rev.raw == 0) { +			/* tell receiver that we are sending qualification +			 * pattern DP 1.1a or earlier - DP receiver's link +			 * quality pattern is set using +			 * DPCD TRAINING_PATTERN_SET -> LINK_QUAL_PATTERN_SET +			 * register (0x102). We will use v_1.3 when we are +			 * setting test pattern for DP 1.1. +			 */ +			core_link_read_dpcd(link, DP_TRAINING_PATTERN_SET, +					    &training_pattern.raw, +					    sizeof(training_pattern)); +			training_pattern.v1_3.LINK_QUAL_PATTERN_SET = pattern; +			core_link_write_dpcd(link, DP_TRAINING_PATTERN_SET, +					     &training_pattern.raw, +					     sizeof(training_pattern)); +		} +	} else { +		enum dc_color_space color_space = COLOR_SPACE_UNKNOWN; + +		switch (test_pattern_color_space) { +		case DP_TEST_PATTERN_COLOR_SPACE_RGB: +			color_space = COLOR_SPACE_SRGB; +			if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) +				color_space = COLOR_SPACE_SRGB_LIMITED; +			break; + +		case DP_TEST_PATTERN_COLOR_SPACE_YCBCR601: +			color_space = COLOR_SPACE_YCBCR601; +			if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) +				color_space = COLOR_SPACE_YCBCR601_LIMITED; +			break; +		case DP_TEST_PATTERN_COLOR_SPACE_YCBCR709: +			color_space = COLOR_SPACE_YCBCR709; +			if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) +				color_space = COLOR_SPACE_YCBCR709_LIMITED; +			break; +		default: +			break; +		} + +		if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable) { +			if (pipe_ctx->stream && should_use_dmub_lock(pipe_ctx->stream->link)) { +				union dmub_hw_lock_flags hw_locks = { 0 }; +				struct dmub_hw_lock_inst_flags inst_flags = { 0 }; + +				hw_locks.bits.lock_dig = 1; +				inst_flags.dig_inst = pipe_ctx->stream_res.tg->inst; + +				dmub_hw_lock_mgr_cmd(link->ctx->dmub_srv, +							true, +							&hw_locks, +							&inst_flags); +			} else +				pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable( +						pipe_ctx->stream_res.tg); +		} + +		pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg); +		/* update MSA to requested color space */ +		pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(pipe_ctx->stream_res.stream_enc, +				&pipe_ctx->stream->timing, +				color_space, +				pipe_ctx->stream->use_vsc_sdp_for_colorimetry, +				link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP); + +		if (pipe_ctx->stream->use_vsc_sdp_for_colorimetry) { +			if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) +				pipe_ctx->stream->vsc_infopacket.sb[17] |= (1 << 7); // sb17 bit 7 Dynamic Range: 0 = VESA range, 1 = CTA range +			else +				pipe_ctx->stream->vsc_infopacket.sb[17] &= ~(1 << 7); +			resource_build_info_frame(pipe_ctx); +			link->dc->hwss.update_info_frame(pipe_ctx); +		} + +		/* CRTC Patterns */ +		set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space); +		pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg); +		pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, +				CRTC_STATE_VACTIVE); +		pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, +				CRTC_STATE_VBLANK); +		pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, +				CRTC_STATE_VACTIVE); + +		if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable) { +			if (pipe_ctx->stream && should_use_dmub_lock(pipe_ctx->stream->link)) { +				union dmub_hw_lock_flags hw_locks = { 0 }; +				struct dmub_hw_lock_inst_flags inst_flags = { 0 }; + +				hw_locks.bits.lock_dig = 1; +				inst_flags.dig_inst = pipe_ctx->stream_res.tg->inst; + +				dmub_hw_lock_mgr_cmd(link->ctx->dmub_srv, +							false, +							&hw_locks, +							&inst_flags); +			} else +				pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable( +						pipe_ctx->stream_res.tg); +		} + +		/* Set Test Pattern state */ +		link->test_pattern_enabled = true; +	} + +	return true; +} + +void dc_link_set_drive_settings(struct dc *dc, +				struct link_training_settings *lt_settings, +				const struct dc_link *link) +{ + +	int i; +	struct link_resource link_res; + +	for (i = 0; i < dc->link_count; i++) +		if (dc->links[i] == link) +			break; + +	if (i >= dc->link_count) +		ASSERT_CRITICAL(false); + +	link_get_cur_link_res(link, &link_res); +	dp_set_drive_settings(dc->links[i], &link_res, lt_settings); +} + +void dc_link_set_preferred_link_settings(struct dc *dc, +					 struct dc_link_settings *link_setting, +					 struct dc_link *link) +{ +	int i; +	struct pipe_ctx *pipe; +	struct dc_stream_state *link_stream; +	struct dc_link_settings store_settings = *link_setting; + +	link->preferred_link_setting = store_settings; + +	/* Retrain with preferred link settings only relevant for +	 * DP signal type +	 * Check for non-DP signal or if passive dongle present +	 */ +	if (!dc_is_dp_signal(link->connector_signal) || +		link->dongle_max_pix_clk > 0) +		return; + +	for (i = 0; i < MAX_PIPES; i++) { +		pipe = &dc->current_state->res_ctx.pipe_ctx[i]; +		if (pipe->stream && pipe->stream->link) { +			if (pipe->stream->link == link) { +				link_stream = pipe->stream; +				break; +			} +		} +	} + +	/* Stream not found */ +	if (i == MAX_PIPES) +		return; + +	/* Cannot retrain link if backend is off */ +	if (link_stream->dpms_off) +		return; + +	if (link_decide_link_settings(link_stream, &store_settings)) +		dp_retrain_link_dp_test(link, &store_settings, false); +} + +void dc_link_set_preferred_training_settings(struct dc *dc, +						 struct dc_link_settings *link_setting, +						 struct dc_link_training_overrides *lt_overrides, +						 struct dc_link *link, +						 bool skip_immediate_retrain) +{ +	if (lt_overrides != NULL) +		link->preferred_training_settings = *lt_overrides; +	else +		memset(&link->preferred_training_settings, 0, sizeof(link->preferred_training_settings)); + +	if (link_setting != NULL) { +		link->preferred_link_setting = *link_setting; +	} else { +		link->preferred_link_setting.lane_count = LANE_COUNT_UNKNOWN; +		link->preferred_link_setting.link_rate = LINK_RATE_UNKNOWN; +	} + +	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && +			link->type == dc_connection_mst_branch) +		dm_helpers_dp_mst_update_branch_bandwidth(dc->ctx, link); + +	/* Retrain now, or wait until next stream update to apply */ +	if (skip_immediate_retrain == false) +		dc_link_set_preferred_link_settings(dc, &link->preferred_link_setting, link); +} + +void dc_link_set_test_pattern(struct dc_link *link, +		enum dp_test_pattern test_pattern, +		enum dp_test_pattern_color_space test_pattern_color_space, +		const struct link_training_settings *p_link_settings, +		const unsigned char *p_custom_pattern, +		unsigned int cust_pattern_size) +{ +	if (link != NULL) +		dc_link_dp_set_test_pattern( +			link, +			test_pattern, +			test_pattern_color_space, +			p_link_settings, +			p_custom_pattern, +			cust_pattern_size); +} diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h new file mode 100644 index 000000000000..7f17838b653b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h @@ -0,0 +1,33 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef __LINK_DP_CTS_H__ +#define __LINK_DP_CTS_H__ +#include "link.h" + +void dp_retrain_link_dp_test(struct dc_link *link, +		struct dc_link_settings *link_setting, +		bool skip_video_pattern); + +#endif /* __LINK_DP_CTS_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c index 2c1a3bfcdb50..459b362ed374 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.c +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c @@ -22,8 +22,9 @@   * Authors: AMD   *   */ -#include "dc_link.h"  #include "link_dp_trace.h" +#include "link/protocols/link_dpcd.h" +#include "link.h"  void dp_trace_init(struct dc_link *link)  { @@ -145,7 +146,7 @@ unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link)  	return link->dp_trace.link_loss_count;  } -void dp_trace_set_edp_power_timestamp(struct dc_link *link, +void link_dp_trace_set_edp_power_timestamp(struct dc_link *link,  		bool power_up)  {  	if (!power_up) @@ -155,12 +156,19 @@ void dp_trace_set_edp_power_timestamp(struct dc_link *link,  		link->dp_trace.edp_trace_power_timestamps.poweron = dm_get_timestamp(link->dc->ctx);  } -uint64_t dp_trace_get_edp_poweron_timestamp(struct dc_link *link) +uint64_t link_dp_trace_get_edp_poweron_timestamp(struct dc_link *link)  {  	return link->dp_trace.edp_trace_power_timestamps.poweron;  } -uint64_t dp_trace_get_edp_poweroff_timestamp(struct dc_link *link) +uint64_t link_dp_trace_get_edp_poweroff_timestamp(struct dc_link *link)  {  	return link->dp_trace.edp_trace_power_timestamps.poweroff; -}
\ No newline at end of file +} + +void link_dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode) +{ +	if (link != NULL && link->dc->debug.enable_driver_sequence_debug) +		core_link_write_dpcd(link, DP_SOURCE_SEQUENCE, +					&dp_test_mode, sizeof(dp_test_mode)); +} diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.h b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h index 26700e3cd65e..89feea1b2692 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.h +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h @@ -24,6 +24,7 @@   */  #ifndef __LINK_DP_TRACE_H__  #define __LINK_DP_TRACE_H__ +#include "link.h"  void dp_trace_init(struct dc_link *link);  void dp_trace_reset(struct dc_link *link); @@ -54,9 +55,4 @@ struct dp_trace_lt_counts *dc_dp_trace_get_lt_counts(struct dc_link *link,  		bool in_detection);  unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link); -void dp_trace_set_edp_power_timestamp(struct dc_link *link, -		bool power_up); -uint64_t dp_trace_get_edp_poweron_timestamp(struct dc_link *link); -uint64_t dp_trace_get_edp_poweroff_timestamp(struct dc_link *link); -  #endif /* __LINK_DP_TRACE_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.c new file mode 100644 index 000000000000..d3cc604eed67 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.c @@ -0,0 +1,95 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "link_fpga.h" +#include "link/link_dpms.h" +#include "dm_helpers.h" +#include "link_hwss.h" +#include "dccg.h" +#include "resource.h" + +#define DC_LOGGER_INIT(logger) + +void dp_fpga_hpo_enable_link_and_stream(struct dc_state *state, struct pipe_ctx *pipe_ctx) +{ +	struct dc *dc = pipe_ctx->stream->ctx->dc; +	struct dc_stream_state *stream = pipe_ctx->stream; +	struct link_mst_stream_allocation_table proposed_table = {0}; +	struct fixed31_32 avg_time_slots_per_mtp; +	uint8_t req_slot_count = 0; +	uint8_t vc_id = 1; /// VC ID always 1 for SST +	struct dc_link_settings link_settings = pipe_ctx->link_config.dp_link_settings; +	const struct link_hwss *link_hwss = get_link_hwss(stream->link, &pipe_ctx->link_res); +	DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); + +	stream->link->cur_link_settings = link_settings; + +	if (link_hwss->ext.enable_dp_link_output) +		link_hwss->ext.enable_dp_link_output(stream->link, &pipe_ctx->link_res, +				stream->signal, pipe_ctx->clock_source->id, +				&link_settings); + +	/* Enable DP_STREAM_ENC */ +	dc->hwss.enable_stream(pipe_ctx); + +	/* Set DPS PPS SDP (AKA "info frames") */ +	if (pipe_ctx->stream->timing.flags.DSC) { +		link_set_dsc_pps_packet(pipe_ctx, true, true); +	} + +	/* Allocate Payload */ +	if ((stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) && (state->stream_count > 1)) { +		// MST case +		uint8_t i; + +		proposed_table.stream_count = state->stream_count; +		for (i = 0; i < state->stream_count; i++) { +			avg_time_slots_per_mtp = link_calculate_sst_avg_time_slots_per_mtp(state->streams[i], state->streams[i]->link); +			req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp); +			proposed_table.stream_allocations[i].slot_count = req_slot_count; +			proposed_table.stream_allocations[i].vcp_id = i+1; +			/* NOTE: This makes assumption that pipe_ctx index is same as stream index */ +			proposed_table.stream_allocations[i].hpo_dp_stream_enc = state->res_ctx.pipe_ctx[i].stream_res.hpo_dp_stream_enc; +		} +	} else { +		// SST case +		avg_time_slots_per_mtp = link_calculate_sst_avg_time_slots_per_mtp(stream, stream->link); +		req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp); +		proposed_table.stream_count = 1; /// Always 1 stream for SST +		proposed_table.stream_allocations[0].slot_count = req_slot_count; +		proposed_table.stream_allocations[0].vcp_id = vc_id; +		proposed_table.stream_allocations[0].hpo_dp_stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc; +	} + +	link_hwss->ext.update_stream_allocation_table(stream->link, +			&pipe_ctx->link_res, +			&proposed_table); + +	if (link_hwss->ext.set_throttled_vcp_size) +		link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); + +	dc->hwss.unblank_stream(pipe_ctx, &stream->link->cur_link_settings); +	dc->hwss.enable_audio_stream(pipe_ctx); +} + diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.h b/drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.h new file mode 100644 index 000000000000..3a80f5595943 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.h @@ -0,0 +1,30 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef __LINK_FPGA_H__ +#define __LINK_FPGA_H__ +#include "link.h" +void dp_fpga_hpo_enable_link_and_stream(struct dc_state *state, +		struct pipe_ctx *pipe_ctx); +#endif /* __LINK_FPGA_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c index 4227adbc646a..b092b00b3599 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c @@ -24,7 +24,6 @@   */  #include "link_hwss_dio.h"  #include "core_types.h" -#include "dc_link_dp.h"  #include "link_enc_cfg.h"  void set_dio_throttled_vcp_size(struct pipe_ctx *pipe_ctx, @@ -45,7 +44,7 @@ void setup_dio_stream_encoder(struct pipe_ctx *pipe_ctx)  	link_enc->funcs->connect_dig_be_to_fe(link_enc,  			pipe_ctx->stream_res.stream_enc->id, true);  	if (dc_is_dp_signal(pipe_ctx->stream->signal)) -		dp_source_sequence_trace(pipe_ctx->stream->link, +		link_dp_source_sequence_trace(pipe_ctx->stream->link,  				DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE);  	if (stream_enc->funcs->enable_fifo)  		stream_enc->funcs->enable_fifo(stream_enc); @@ -64,7 +63,7 @@ void reset_dio_stream_encoder(struct pipe_ctx *pipe_ctx)  			pipe_ctx->stream_res.stream_enc->id,  			false);  	if (dc_is_dp_signal(pipe_ctx->stream->signal)) -		dp_source_sequence_trace(pipe_ctx->stream->link, +		link_dp_source_sequence_trace(pipe_ctx->stream->link,  				DPCD_SOURCE_SEQ_AFTER_DISCONNECT_DIG_FE_BE);  } @@ -106,7 +105,7 @@ void setup_dio_stream_attribute(struct pipe_ctx *pipe_ctx)  				&stream->timing);  	if (dc_is_dp_signal(stream->signal)) -		dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR); +		link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR);  }  void enable_dio_dp_link_output(struct dc_link *link, @@ -127,7 +126,7 @@ void enable_dio_dp_link_output(struct dc_link *link,  				link_enc,  				link_settings,  				clock_source); -	dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY); +	link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);  }  void disable_dio_link_output(struct dc_link *link, @@ -137,7 +136,7 @@ void disable_dio_link_output(struct dc_link *link,  	struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);  	link_enc->funcs->disable_output(link_enc, signal); -	dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); +	link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);  }  void set_dio_dp_link_test_pattern(struct dc_link *link, @@ -147,7 +146,7 @@ void set_dio_dp_link_test_pattern(struct dc_link *link,  	struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);  	link_enc->funcs->dp_set_phy_pattern(link_enc, tp_params); -	dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN); +	link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);  }  void set_dio_dp_lane_settings(struct dc_link *link, @@ -170,11 +169,63 @@ static void update_dio_stream_allocation_table(struct dc_link *link,  	link_enc->funcs->update_mst_stream_allocation_table(link_enc, table);  } +void setup_dio_audio_output(struct pipe_ctx *pipe_ctx, +		struct audio_output *audio_output, uint32_t audio_inst) +{ +	if (dc_is_dp_signal(pipe_ctx->stream->signal)) +		pipe_ctx->stream_res.stream_enc->funcs->dp_audio_setup( +				pipe_ctx->stream_res.stream_enc, +				audio_inst, +				&pipe_ctx->stream->audio_info); +	else +		pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_setup( +				pipe_ctx->stream_res.stream_enc, +				audio_inst, +				&pipe_ctx->stream->audio_info, +				&audio_output->crtc_info); +} + +void enable_dio_audio_packet(struct pipe_ctx *pipe_ctx) +{ +	if (dc_is_dp_signal(pipe_ctx->stream->signal)) +		pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable( +				pipe_ctx->stream_res.stream_enc); + +	pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( +			pipe_ctx->stream_res.stream_enc, false); + +	if (dc_is_dp_signal(pipe_ctx->stream->signal)) +		link_dp_source_sequence_trace(pipe_ctx->stream->link, +				DPCD_SOURCE_SEQ_AFTER_ENABLE_AUDIO_STREAM); +} + +void disable_dio_audio_packet(struct pipe_ctx *pipe_ctx) +{ +	pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( +			pipe_ctx->stream_res.stream_enc, true); + +	if (pipe_ctx->stream_res.audio) { +		if (dc_is_dp_signal(pipe_ctx->stream->signal)) +			pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable( +					pipe_ctx->stream_res.stream_enc); +		else +			pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_disable( +					pipe_ctx->stream_res.stream_enc); +	} + +	if (dc_is_dp_signal(pipe_ctx->stream->signal)) +		link_dp_source_sequence_trace(pipe_ctx->stream->link, +				DPCD_SOURCE_SEQ_AFTER_DISABLE_AUDIO_STREAM); +} +  static const struct link_hwss dio_link_hwss = {  	.setup_stream_encoder = setup_dio_stream_encoder,  	.reset_stream_encoder = reset_dio_stream_encoder,  	.setup_stream_attribute = setup_dio_stream_attribute,  	.disable_link_output = disable_dio_link_output, +	.setup_audio_output = setup_dio_audio_output, +	.enable_audio_packet = enable_dio_audio_packet, +	.disable_audio_packet = disable_dio_audio_packet,  	.ext = {  		.set_throttled_vcp_size = set_dio_throttled_vcp_size,  		.enable_dp_link_output = enable_dio_dp_link_output, diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h index 126d37f847a1..8b8a099feeb0 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h @@ -26,6 +26,7 @@  #define __LINK_HWSS_DIO_H__  #include "link_hwss.h" +#include "link.h"  const struct link_hwss *get_dio_link_hwss(void);  bool can_use_dio_link_hwss(const struct dc_link *link, @@ -50,5 +51,9 @@ void set_dio_dp_lane_settings(struct dc_link *link,  		const struct link_resource *link_res,  		const struct dc_link_settings *link_settings,  		const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]); +void setup_dio_audio_output(struct pipe_ctx *pipe_ctx, +		struct audio_output *audio_output, uint32_t audio_inst); +void enable_dio_audio_packet(struct pipe_ctx *pipe_ctx); +void disable_dio_audio_packet(struct pipe_ctx *pipe_ctx);  #endif /* __LINK_HWSS_DIO_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c index 64f7ea6a9aa3..861f3cd5b356 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c @@ -57,6 +57,9 @@ static const struct link_hwss dpia_link_hwss = {  	.reset_stream_encoder = reset_dio_stream_encoder,  	.setup_stream_attribute = setup_dio_stream_attribute,  	.disable_link_output = disable_dio_link_output, +	.setup_audio_output = setup_dio_audio_output, +	.enable_audio_packet = enable_dio_audio_packet, +	.disable_audio_packet = disable_dio_audio_packet,  	.ext = {  		.set_throttled_vcp_size = set_dio_throttled_vcp_size,  		.enable_dp_link_output = enable_dio_dp_link_output, diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h index ad16ec5d9bb7..ad16ec5d9bb7 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.h +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c index 153a88381f2c..aa1c5e253b43 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c @@ -26,7 +26,6 @@  #include "dm_helpers.h"  #include "core_types.h"  #include "dccg.h" -#include "dc_link_dp.h"  #include "clk_mgr.h"  static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link) @@ -87,57 +86,20 @@ static void set_hpo_dp_hblank_min_symbol_width(struct pipe_ctx *pipe_ctx,  			hblank_min_symbol_width);  } -static int get_odm_segment_count(struct pipe_ctx *pipe_ctx) -{ -	struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; -	int count = 1; - -	while (odm_pipe != NULL) { -		count++; -		odm_pipe = odm_pipe->next_odm_pipe; -	} - -	return count; -} -  static void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)  { -	struct dc *dc = pipe_ctx->stream->ctx->dc;  	struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;  	struct hpo_dp_link_encoder *link_enc = pipe_ctx->link_res.hpo_dp_link_enc; -	struct dccg *dccg = dc->res_pool->dccg; -	struct timing_generator *tg = pipe_ctx->stream_res.tg; -	struct dtbclk_dto_params dto_params = {0}; -	enum phyd32clk_clock_source phyd32clk = get_phyd32clk_src(pipe_ctx->stream->link); - -	dto_params.otg_inst = tg->inst; -	dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10; -	dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx); -	dto_params.timing = &pipe_ctx->stream->timing; -	dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr); -	dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, stream_enc->inst); -	dccg->funcs->enable_symclk32_se(dccg, stream_enc->inst, phyd32clk); -	dccg->funcs->set_dtbclk_dto(dccg, &dto_params);  	stream_enc->funcs->enable_stream(stream_enc);  	stream_enc->funcs->map_stream_to_link(stream_enc, stream_enc->inst, link_enc->inst);  }  static void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)  { -	struct dc *dc = pipe_ctx->stream->ctx->dc;  	struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc; -	struct dccg *dccg = dc->res_pool->dccg; -	struct timing_generator *tg = pipe_ctx->stream_res.tg; -	struct dtbclk_dto_params dto_params = {0}; - -	dto_params.otg_inst = tg->inst; -	dto_params.timing = &pipe_ctx->stream->timing;  	stream_enc->funcs->disable(stream_enc); -	dccg->funcs->set_dtbclk_dto(dccg, &dto_params); -	dccg->funcs->disable_symclk32_se(dccg, stream_enc->inst); -	dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, stream_enc->inst);  }  static void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx) @@ -153,7 +115,7 @@ static void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx)  			stream->use_vsc_sdp_for_colorimetry,  			stream->timing.flags.DSC,  			false); -	dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR); +	link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR);  }  static void enable_hpo_dp_fpga_link_output(struct dc_link *link, @@ -239,7 +201,7 @@ static void set_hpo_dp_link_test_pattern(struct dc_link *link,  {  	link_res->hpo_dp_link_enc->funcs->set_link_test_pattern(  			link_res->hpo_dp_link_enc, tp_params); -	dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN); +	link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);  }  static void set_hpo_dp_lane_settings(struct dc_link *link, @@ -262,11 +224,36 @@ static void update_hpo_dp_stream_allocation_table(struct dc_link *link,  			table);  } +static void setup_hpo_dp_audio_output(struct pipe_ctx *pipe_ctx, +		struct audio_output *audio_output, uint32_t audio_inst) +{ +	pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_setup( +			pipe_ctx->stream_res.hpo_dp_stream_enc, +			audio_inst, +			&pipe_ctx->stream->audio_info); +} + +static void enable_hpo_dp_audio_packet(struct pipe_ctx *pipe_ctx) +{ +	pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_enable( +			pipe_ctx->stream_res.hpo_dp_stream_enc); +} + +static void disable_hpo_dp_audio_packet(struct pipe_ctx *pipe_ctx) +{ +	if (pipe_ctx->stream_res.audio) +		pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_disable( +				pipe_ctx->stream_res.hpo_dp_stream_enc); +} +  static const struct link_hwss hpo_dp_link_hwss = {  	.setup_stream_encoder = setup_hpo_dp_stream_encoder,  	.reset_stream_encoder = reset_hpo_dp_stream_encoder,  	.setup_stream_attribute = setup_hpo_dp_stream_attribute,  	.disable_link_output = disable_hpo_dp_link_output, +	.setup_audio_output = setup_hpo_dp_audio_output, +	.enable_audio_packet = enable_hpo_dp_audio_packet, +	.disable_audio_packet = disable_hpo_dp_audio_packet,  	.ext = {  		.set_throttled_vcp_size = set_hpo_dp_throttled_vcp_size,  		.set_hblank_min_symbol_width = set_hpo_dp_hblank_min_symbol_width, diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h index 57d447ec27b8..3cbb94b41a23 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.h +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h @@ -26,6 +26,7 @@  #define __LINK_HWSS_HPO_DP_H__  #include "link_hwss.h" +#include "link.h"  bool can_use_hpo_dp_link_hwss(const struct dc_link *link,  		const struct link_resource *link_res); diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c new file mode 100644 index 000000000000..38216c789d77 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c @@ -0,0 +1,1323 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file manages link detection states and receiver states by using various + * link protocols. It also provides helper functions to interpret certain + * capabilities or status based on the states it manages or retrieve them + * directly from connected receivers. + */ + +#include "link_dpms.h" +#include "link_detection.h" +#include "link_hwss.h" +#include "protocols/link_edp_panel_control.h" +#include "protocols/link_ddc.h" +#include "protocols/link_hpd.h" +#include "protocols/link_dpcd.h" +#include "protocols/link_dp_capability.h" +#include "protocols/link_dp_dpia.h" +#include "protocols/link_dp_phy.h" +#include "protocols/link_dp_training.h" +#include "accessories/link_dp_trace.h" + +#include "link_enc_cfg.h" +#include "dm_helpers.h" +#include "clk_mgr.h" + +#define DC_LOGGER_INIT(logger) + +#define LINK_INFO(...) \ +	DC_LOG_HW_HOTPLUG(  \ +		__VA_ARGS__) +/* + * Some receivers fail to train on first try and are good + * on subsequent tries. 2 retries should be plenty. If we + * don't have a successful training then we don't expect to + * ever get one. + */ +#define LINK_TRAINING_MAX_VERIFY_RETRY 2 + +static enum ddc_transaction_type get_ddc_transaction_type(enum signal_type sink_signal) +{ +	enum ddc_transaction_type transaction_type = DDC_TRANSACTION_TYPE_NONE; + +	switch (sink_signal) { +	case SIGNAL_TYPE_DVI_SINGLE_LINK: +	case SIGNAL_TYPE_DVI_DUAL_LINK: +	case SIGNAL_TYPE_HDMI_TYPE_A: +	case SIGNAL_TYPE_LVDS: +	case SIGNAL_TYPE_RGB: +		transaction_type = DDC_TRANSACTION_TYPE_I2C; +		break; + +	case SIGNAL_TYPE_DISPLAY_PORT: +	case SIGNAL_TYPE_EDP: +		transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; +		break; + +	case SIGNAL_TYPE_DISPLAY_PORT_MST: +		/* MST does not use I2COverAux, but there is the +		 * SPECIAL use case for "immediate dwnstrm device +		 * access" (EPR#370830). +		 */ +		transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; +		break; + +	default: +		break; +	} + +	return transaction_type; +} + +static enum signal_type get_basic_signal_type(struct graphics_object_id encoder, +					      struct graphics_object_id downstream) +{ +	if (downstream.type == OBJECT_TYPE_CONNECTOR) { +		switch (downstream.id) { +		case CONNECTOR_ID_SINGLE_LINK_DVII: +			switch (encoder.id) { +			case ENCODER_ID_INTERNAL_DAC1: +			case ENCODER_ID_INTERNAL_KLDSCP_DAC1: +			case ENCODER_ID_INTERNAL_DAC2: +			case ENCODER_ID_INTERNAL_KLDSCP_DAC2: +				return SIGNAL_TYPE_RGB; +			default: +				return SIGNAL_TYPE_DVI_SINGLE_LINK; +			} +		break; +		case CONNECTOR_ID_DUAL_LINK_DVII: +		{ +			switch (encoder.id) { +			case ENCODER_ID_INTERNAL_DAC1: +			case ENCODER_ID_INTERNAL_KLDSCP_DAC1: +			case ENCODER_ID_INTERNAL_DAC2: +			case ENCODER_ID_INTERNAL_KLDSCP_DAC2: +				return SIGNAL_TYPE_RGB; +			default: +				return SIGNAL_TYPE_DVI_DUAL_LINK; +			} +		} +		break; +		case CONNECTOR_ID_SINGLE_LINK_DVID: +			return SIGNAL_TYPE_DVI_SINGLE_LINK; +		case CONNECTOR_ID_DUAL_LINK_DVID: +			return SIGNAL_TYPE_DVI_DUAL_LINK; +		case CONNECTOR_ID_VGA: +			return SIGNAL_TYPE_RGB; +		case CONNECTOR_ID_HDMI_TYPE_A: +			return SIGNAL_TYPE_HDMI_TYPE_A; +		case CONNECTOR_ID_LVDS: +			return SIGNAL_TYPE_LVDS; +		case CONNECTOR_ID_DISPLAY_PORT: +		case CONNECTOR_ID_USBC: +			return SIGNAL_TYPE_DISPLAY_PORT; +		case CONNECTOR_ID_EDP: +			return SIGNAL_TYPE_EDP; +		default: +			return SIGNAL_TYPE_NONE; +		} +	} else if (downstream.type == OBJECT_TYPE_ENCODER) { +		switch (downstream.id) { +		case ENCODER_ID_EXTERNAL_NUTMEG: +		case ENCODER_ID_EXTERNAL_TRAVIS: +			return SIGNAL_TYPE_DISPLAY_PORT; +		default: +			return SIGNAL_TYPE_NONE; +		} +	} + +	return SIGNAL_TYPE_NONE; +} + +/* + * @brief + * Detect output sink type + */ +static enum signal_type link_detect_sink_signal_type(struct dc_link *link, +					 enum dc_detect_reason reason) +{ +	enum signal_type result; +	struct graphics_object_id enc_id; + +	if (link->is_dig_mapping_flexible) +		enc_id = (struct graphics_object_id){.id = ENCODER_ID_UNKNOWN}; +	else +		enc_id = link->link_enc->id; +	result = get_basic_signal_type(enc_id, link->link_id); + +	/* Use basic signal type for link without physical connector. */ +	if (link->ep_type != DISPLAY_ENDPOINT_PHY) +		return result; + +	/* Internal digital encoder will detect only dongles +	 * that require digital signal +	 */ + +	/* Detection mechanism is different +	 * for different native connectors. +	 * LVDS connector supports only LVDS signal; +	 * PCIE is a bus slot, the actual connector needs to be detected first; +	 * eDP connector supports only eDP signal; +	 * HDMI should check straps for audio +	 */ + +	/* PCIE detects the actual connector on add-on board */ +	if (link->link_id.id == CONNECTOR_ID_PCIE) { +		/* ZAZTODO implement PCIE add-on card detection */ +	} + +	switch (link->link_id.id) { +	case CONNECTOR_ID_HDMI_TYPE_A: { +		/* check audio support: +		 * if native HDMI is not supported, switch to DVI +		 */ +		struct audio_support *aud_support = +					&link->dc->res_pool->audio_support; + +		if (!aud_support->hdmi_audio_native) +			if (link->link_id.id == CONNECTOR_ID_HDMI_TYPE_A) +				result = SIGNAL_TYPE_DVI_SINGLE_LINK; +	} +	break; +	case CONNECTOR_ID_DISPLAY_PORT: +	case CONNECTOR_ID_USBC: { +		/* DP HPD short pulse. Passive DP dongle will not +		 * have short pulse +		 */ +		if (reason != DETECT_REASON_HPDRX) { +			/* Check whether DP signal detected: if not - +			 * we assume signal is DVI; it could be corrected +			 * to HDMI after dongle detection +			 */ +			if (!dm_helpers_is_dp_sink_present(link)) +				result = SIGNAL_TYPE_DVI_SINGLE_LINK; +		} +	} +	break; +	default: +	break; +	} + +	return result; +} + +static enum signal_type decide_signal_from_strap_and_dongle_type(enum display_dongle_type dongle_type, +								 struct audio_support *audio_support) +{ +	enum signal_type signal = SIGNAL_TYPE_NONE; + +	switch (dongle_type) { +	case DISPLAY_DONGLE_DP_HDMI_DONGLE: +		if (audio_support->hdmi_audio_on_dongle) +			signal = SIGNAL_TYPE_HDMI_TYPE_A; +		else +			signal = SIGNAL_TYPE_DVI_SINGLE_LINK; +		break; +	case DISPLAY_DONGLE_DP_DVI_DONGLE: +		signal = SIGNAL_TYPE_DVI_SINGLE_LINK; +		break; +	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE: +		if (audio_support->hdmi_audio_native) +			signal =  SIGNAL_TYPE_HDMI_TYPE_A; +		else +			signal = SIGNAL_TYPE_DVI_SINGLE_LINK; +		break; +	default: +		signal = SIGNAL_TYPE_NONE; +		break; +	} + +	return signal; +} + +static void read_scdc_caps(struct ddc_service *ddc_service, +		struct dc_sink *sink) +{ +	uint8_t slave_address = HDMI_SCDC_ADDRESS; +	uint8_t offset = HDMI_SCDC_MANUFACTURER_OUI; + +	link_query_ddc_data(ddc_service, slave_address, &offset, +			sizeof(offset), sink->scdc_caps.manufacturer_OUI.byte, +			sizeof(sink->scdc_caps.manufacturer_OUI.byte)); + +	offset = HDMI_SCDC_DEVICE_ID; + +	link_query_ddc_data(ddc_service, slave_address, &offset, +			sizeof(offset), &(sink->scdc_caps.device_id.byte), +			sizeof(sink->scdc_caps.device_id.byte)); +} + +static bool i2c_read( +	struct ddc_service *ddc, +	uint32_t address, +	uint8_t *buffer, +	uint32_t len) +{ +	uint8_t offs_data = 0; +	struct i2c_payload payloads[2] = { +		{ +		.write = true, +		.address = address, +		.length = 1, +		.data = &offs_data }, +		{ +		.write = false, +		.address = address, +		.length = len, +		.data = buffer } }; + +	struct i2c_command command = { +		.payloads = payloads, +		.number_of_payloads = 2, +		.engine = DDC_I2C_COMMAND_ENGINE, +		.speed = ddc->ctx->dc->caps.i2c_speed_in_khz }; + +	return dm_helpers_submit_i2c( +			ddc->ctx, +			ddc->link, +			&command); +} + +enum { +	DP_SINK_CAP_SIZE = +		DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV + 1 +}; + +static void query_dp_dual_mode_adaptor( +	struct ddc_service *ddc, +	struct display_sink_capability *sink_cap) +{ +	uint8_t i; +	bool is_valid_hdmi_signature; +	enum display_dongle_type *dongle = &sink_cap->dongle_type; +	uint8_t type2_dongle_buf[DP_ADAPTOR_TYPE2_SIZE]; +	bool is_type2_dongle = false; +	int retry_count = 2; +	struct dp_hdmi_dongle_signature_data *dongle_signature; + +	/* Assume we have no valid DP passive dongle connected */ +	*dongle = DISPLAY_DONGLE_NONE; +	sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK; + +	/* Read DP-HDMI dongle I2c (no response interpreted as DP-DVI dongle)*/ +	if (!i2c_read( +		ddc, +		DP_HDMI_DONGLE_ADDRESS, +		type2_dongle_buf, +		sizeof(type2_dongle_buf))) { +		/* Passive HDMI dongles can sometimes fail here without retrying*/ +		while (retry_count > 0) { +			if (i2c_read(ddc, +				DP_HDMI_DONGLE_ADDRESS, +				type2_dongle_buf, +				sizeof(type2_dongle_buf))) +				break; +			retry_count--; +		} +		if (retry_count == 0) { +			*dongle = DISPLAY_DONGLE_DP_DVI_DONGLE; +			sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK; + +			CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf), +					"DP-DVI passive dongle %dMhz: ", +					DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000); +			return; +		} +	} + +	/* Check if Type 2 dongle.*/ +	if (type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_ID] == DP_ADAPTOR_TYPE2_ID) +		is_type2_dongle = true; + +	dongle_signature = +		(struct dp_hdmi_dongle_signature_data *)type2_dongle_buf; + +	is_valid_hdmi_signature = true; + +	/* Check EOT */ +	if (dongle_signature->eot != DP_HDMI_DONGLE_SIGNATURE_EOT) { +		is_valid_hdmi_signature = false; +	} + +	/* Check signature */ +	for (i = 0; i < sizeof(dongle_signature->id); ++i) { +		/* If its not the right signature, +		 * skip mismatch in subversion byte.*/ +		if (dongle_signature->id[i] != +			dp_hdmi_dongle_signature_str[i] && i != 3) { + +			if (is_type2_dongle) { +				is_valid_hdmi_signature = false; +				break; +			} + +		} +	} + +	if (is_type2_dongle) { +		uint32_t max_tmds_clk = +			type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK]; + +		max_tmds_clk = max_tmds_clk * 2 + max_tmds_clk / 2; + +		if (0 == max_tmds_clk || +				max_tmds_clk < DP_ADAPTOR_TYPE2_MIN_TMDS_CLK || +				max_tmds_clk > DP_ADAPTOR_TYPE2_MAX_TMDS_CLK) { +			*dongle = DISPLAY_DONGLE_DP_DVI_DONGLE; + +			CONN_DATA_DETECT(ddc->link, type2_dongle_buf, +					sizeof(type2_dongle_buf), +					"DP-DVI passive dongle %dMhz: ", +					DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000); +		} else { +			if (is_valid_hdmi_signature == true) { +				*dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE; + +				CONN_DATA_DETECT(ddc->link, type2_dongle_buf, +						sizeof(type2_dongle_buf), +						"Type 2 DP-HDMI passive dongle %dMhz: ", +						max_tmds_clk); +			} else { +				*dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE; + +				CONN_DATA_DETECT(ddc->link, type2_dongle_buf, +						sizeof(type2_dongle_buf), +						"Type 2 DP-HDMI passive dongle (no signature) %dMhz: ", +						max_tmds_clk); + +			} + +			/* Multiply by 1000 to convert to kHz. */ +			sink_cap->max_hdmi_pixel_clock = +				max_tmds_clk * 1000; +		} +		sink_cap->is_dongle_type_one = false; + +	} else { +		if (is_valid_hdmi_signature == true) { +			*dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE; + +			CONN_DATA_DETECT(ddc->link, type2_dongle_buf, +					sizeof(type2_dongle_buf), +					"Type 1 DP-HDMI passive dongle %dMhz: ", +					sink_cap->max_hdmi_pixel_clock / 1000); +		} else { +			*dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE; + +			CONN_DATA_DETECT(ddc->link, type2_dongle_buf, +					sizeof(type2_dongle_buf), +					"Type 1 DP-HDMI passive dongle (no signature) %dMhz: ", +					sink_cap->max_hdmi_pixel_clock / 1000); +		} +		sink_cap->is_dongle_type_one = true; +	} + +	return; +} + +static enum signal_type dp_passive_dongle_detection(struct ddc_service *ddc, +						    struct display_sink_capability *sink_cap, +						    struct audio_support *audio_support) +{ +	query_dp_dual_mode_adaptor(ddc, sink_cap); + +	return decide_signal_from_strap_and_dongle_type(sink_cap->dongle_type, +							audio_support); +} + +static void link_disconnect_sink(struct dc_link *link) +{ +	if (link->local_sink) { +		dc_sink_release(link->local_sink); +		link->local_sink = NULL; +	} + +	link->dpcd_sink_count = 0; +	//link->dpcd_caps.dpcd_rev.raw = 0; +} + +static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *link) +{ +	dc_sink_release(link->local_sink); +	link->local_sink = prev_sink; +} + +#if defined(CONFIG_DRM_AMD_DC_HDCP) +static void query_hdcp_capability(enum signal_type signal, struct dc_link *link) +{ +	struct hdcp_protection_message msg22; +	struct hdcp_protection_message msg14; + +	memset(&msg22, 0, sizeof(struct hdcp_protection_message)); +	memset(&msg14, 0, sizeof(struct hdcp_protection_message)); +	memset(link->hdcp_caps.rx_caps.raw, 0, +		sizeof(link->hdcp_caps.rx_caps.raw)); + +	if ((link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && +			link->ddc->transaction_type == +			DDC_TRANSACTION_TYPE_I2C_OVER_AUX) || +			link->connector_signal == SIGNAL_TYPE_EDP) { +		msg22.data = link->hdcp_caps.rx_caps.raw; +		msg22.length = sizeof(link->hdcp_caps.rx_caps.raw); +		msg22.msg_id = HDCP_MESSAGE_ID_RX_CAPS; +	} else { +		msg22.data = &link->hdcp_caps.rx_caps.fields.version; +		msg22.length = sizeof(link->hdcp_caps.rx_caps.fields.version); +		msg22.msg_id = HDCP_MESSAGE_ID_HDCP2VERSION; +	} +	msg22.version = HDCP_VERSION_22; +	msg22.link = HDCP_LINK_PRIMARY; +	msg22.max_retries = 5; +	dc_process_hdcp_msg(signal, link, &msg22); + +	if (signal == SIGNAL_TYPE_DISPLAY_PORT || signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { +		enum hdcp_message_status status = HDCP_MESSAGE_UNSUPPORTED; + +		msg14.data = &link->hdcp_caps.bcaps.raw; +		msg14.length = sizeof(link->hdcp_caps.bcaps.raw); +		msg14.msg_id = HDCP_MESSAGE_ID_READ_BCAPS; +		msg14.version = HDCP_VERSION_14; +		msg14.link = HDCP_LINK_PRIMARY; +		msg14.max_retries = 5; + +		status = dc_process_hdcp_msg(signal, link, &msg14); +	} + +} +#endif // CONFIG_DRM_AMD_DC_HDCP +static void read_current_link_settings_on_detect(struct dc_link *link) +{ +	union lane_count_set lane_count_set = {0}; +	uint8_t link_bw_set; +	uint8_t link_rate_set; +	uint32_t read_dpcd_retry_cnt = 10; +	enum dc_status status = DC_ERROR_UNEXPECTED; +	int i; +	union max_down_spread max_down_spread = {0}; + +	// Read DPCD 00101h to find out the number of lanes currently set +	for (i = 0; i < read_dpcd_retry_cnt; i++) { +		status = core_link_read_dpcd(link, +					     DP_LANE_COUNT_SET, +					     &lane_count_set.raw, +					     sizeof(lane_count_set)); +		/* First DPCD read after VDD ON can fail if the particular board +		 * does not have HPD pin wired correctly. So if DPCD read fails, +		 * which it should never happen, retry a few times. Target worst +		 * case scenario of 80 ms. +		 */ +		if (status == DC_OK) { +			link->cur_link_settings.lane_count = +					lane_count_set.bits.LANE_COUNT_SET; +			break; +		} + +		msleep(8); +	} + +	// Read DPCD 00100h to find if standard link rates are set +	core_link_read_dpcd(link, DP_LINK_BW_SET, +			    &link_bw_set, sizeof(link_bw_set)); + +	if (link_bw_set == 0) { +		if (link->connector_signal == SIGNAL_TYPE_EDP) { +			/* If standard link rates are not being used, +			 * Read DPCD 00115h to find the edp link rate set used +			 */ +			core_link_read_dpcd(link, DP_LINK_RATE_SET, +					    &link_rate_set, sizeof(link_rate_set)); + +			// edp_supported_link_rates_count = 0 for DP +			if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { +				link->cur_link_settings.link_rate = +					link->dpcd_caps.edp_supported_link_rates[link_rate_set]; +				link->cur_link_settings.link_rate_set = link_rate_set; +				link->cur_link_settings.use_link_rate_set = true; +			} +		} else { +			// Link Rate not found. Seamless boot may not work. +			ASSERT(false); +		} +	} else { +		link->cur_link_settings.link_rate = link_bw_set; +		link->cur_link_settings.use_link_rate_set = false; +	} +	// Read DPCD 00003h to find the max down spread. +	core_link_read_dpcd(link, DP_MAX_DOWNSPREAD, +			    &max_down_spread.raw, sizeof(max_down_spread)); +	link->cur_link_settings.link_spread = +		max_down_spread.bits.MAX_DOWN_SPREAD ? +		LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; +} + +static bool detect_dp(struct dc_link *link, +		      struct display_sink_capability *sink_caps, +		      enum dc_detect_reason reason) +{ +	struct audio_support *audio_support = &link->dc->res_pool->audio_support; + +	sink_caps->signal = link_detect_sink_signal_type(link, reason); +	sink_caps->transaction_type = +		get_ddc_transaction_type(sink_caps->signal); + +	if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) { +		sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT; +		if (!detect_dp_sink_caps(link)) +			return false; + +		if (is_dp_branch_device(link)) +			/* DP SST branch */ +			link->type = dc_connection_sst_branch; +	} else { +		/* DP passive dongles */ +		sink_caps->signal = dp_passive_dongle_detection(link->ddc, +								sink_caps, +								audio_support); +		link->dpcd_caps.dongle_type = sink_caps->dongle_type; +		link->dpcd_caps.is_dongle_type_one = sink_caps->is_dongle_type_one; +		link->dpcd_caps.dpcd_rev.raw = 0; +	} + +	return true; +} + +static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid) +{ +	if (old_edid->length != new_edid->length) +		return false; + +	if (new_edid->length == 0) +		return false; + +	return (memcmp(old_edid->raw_edid, +		       new_edid->raw_edid, new_edid->length) == 0); +} + +static bool wait_for_entering_dp_alt_mode(struct dc_link *link) +{ + +	/** +	 * something is terribly wrong if time out is > 200ms. (5Hz) +	 * 500 microseconds * 400 tries us 200 ms +	 **/ +	unsigned int sleep_time_in_microseconds = 500; +	unsigned int tries_allowed = 400; +	bool is_in_alt_mode; +	unsigned long long enter_timestamp; +	unsigned long long finish_timestamp; +	unsigned long long time_taken_in_ns; +	int tries_taken; + +	DC_LOGGER_INIT(link->ctx->logger); + +	/** +	 * this function will only exist if we are on dcn21 (is_in_alt_mode is a +	 *  function pointer, so checking to see if it is equal to 0 is the same +	 *  as checking to see if it is null +	 **/ +	if (!link->link_enc->funcs->is_in_alt_mode) +		return true; + +	is_in_alt_mode = link->link_enc->funcs->is_in_alt_mode(link->link_enc); +	DC_LOG_DC("DP Alt mode state on HPD: %d\n", is_in_alt_mode); + +	if (is_in_alt_mode) +		return true; + +	enter_timestamp = dm_get_timestamp(link->ctx); + +	for (tries_taken = 0; tries_taken < tries_allowed; tries_taken++) { +		udelay(sleep_time_in_microseconds); +		/* ask the link if alt mode is enabled, if so return ok */ +		if (link->link_enc->funcs->is_in_alt_mode(link->link_enc)) { +			finish_timestamp = dm_get_timestamp(link->ctx); +			time_taken_in_ns = +				dm_get_elapse_time_in_ns(link->ctx, +							 finish_timestamp, +							 enter_timestamp); +			DC_LOG_WARNING("Alt mode entered finished after %llu ms\n", +				       div_u64(time_taken_in_ns, 1000000)); +			return true; +		} +	} +	finish_timestamp = dm_get_timestamp(link->ctx); +	time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, +						    enter_timestamp); +	DC_LOG_WARNING("Alt mode has timed out after %llu ms\n", +			div_u64(time_taken_in_ns, 1000000)); +	return false; +} + +static void apply_dpia_mst_dsc_always_on_wa(struct dc_link *link) +{ +	/* Apply work around for tunneled MST on certain USB4 docks. Always use DSC if dock +	 * reports DSC support. +	 */ +	if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && +			link->type == dc_connection_mst_branch && +			link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && +			link->dpcd_caps.branch_hw_revision == DP_BRANCH_HW_REV_20 && +			link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT && +			!link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around) +		link->wa_flags.dpia_mst_dsc_always_on = true; +} + +static void revert_dpia_mst_dsc_always_on_wa(struct dc_link *link) +{ +	/* Disable work around which keeps DSC on for tunneled MST on certain USB4 docks. */ +	if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) +		link->wa_flags.dpia_mst_dsc_always_on = false; +} + +static bool discover_dp_mst_topology(struct dc_link *link, enum dc_detect_reason reason) +{ +	DC_LOGGER_INIT(link->ctx->logger); + +	LINK_INFO("link=%d, mst branch is now Connected\n", +		  link->link_index); + +	link->type = dc_connection_mst_branch; +	apply_dpia_mst_dsc_always_on_wa(link); + +	dm_helpers_dp_update_branch_info(link->ctx, link); +	if (dm_helpers_dp_mst_start_top_mgr(link->ctx, +			link, (reason == DETECT_REASON_BOOT || reason == DETECT_REASON_RESUMEFROMS3S4))) { +		link_disconnect_sink(link); +	} else { +		link->type = dc_connection_sst_branch; +	} + +	return link->type == dc_connection_mst_branch; +} + +bool link_reset_cur_dp_mst_topology(struct dc_link *link) +{ +	DC_LOGGER_INIT(link->ctx->logger); + +	LINK_INFO("link=%d, mst branch is now Disconnected\n", +		  link->link_index); + +	revert_dpia_mst_dsc_always_on_wa(link); +	return dm_helpers_dp_mst_stop_top_mgr(link->ctx, link); +} + +static bool should_prepare_phy_clocks_for_link_verification(const struct dc *dc, +		enum dc_detect_reason reason) +{ +	int i; +	bool can_apply_seamless_boot = false; + +	for (i = 0; i < dc->current_state->stream_count; i++) { +		if (dc->current_state->streams[i]->apply_seamless_boot_optimization) { +			can_apply_seamless_boot = true; +			break; +		} +	} + +	return !can_apply_seamless_boot && reason != DETECT_REASON_BOOT; +} + +static void prepare_phy_clocks_for_destructive_link_verification(const struct dc *dc) +{ +	dc_z10_restore(dc); +	clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr); +} + +static void restore_phy_clocks_for_destructive_link_verification(const struct dc *dc) +{ +	clk_mgr_optimize_pwr_state(dc, dc->clk_mgr); +} + +static void verify_link_capability_destructive(struct dc_link *link, +		struct dc_sink *sink, +		enum dc_detect_reason reason) +{ +	bool should_prepare_phy_clocks = +			should_prepare_phy_clocks_for_link_verification(link->dc, reason); + +	if (should_prepare_phy_clocks) +		prepare_phy_clocks_for_destructive_link_verification(link->dc); + +	if (dc_is_dp_signal(link->local_sink->sink_signal)) { +		struct dc_link_settings known_limit_link_setting = +				dp_get_max_link_cap(link); +		link_set_all_streams_dpms_off_for_link(link); +		dp_verify_link_cap_with_retries( +				link, &known_limit_link_setting, +				LINK_TRAINING_MAX_VERIFY_RETRY); +	} else { +		ASSERT(0); +	} + +	if (should_prepare_phy_clocks) +		restore_phy_clocks_for_destructive_link_verification(link->dc); +} + +static void verify_link_capability_non_destructive(struct dc_link *link) +{ +	if (dc_is_dp_signal(link->local_sink->sink_signal)) { +		if (dc_is_embedded_signal(link->local_sink->sink_signal) || +				link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) +			/* TODO - should we check link encoder's max link caps here? +			 * How do we know which link encoder to check from? +			 */ +			link->verified_link_cap = link->reported_link_cap; +		else +			link->verified_link_cap = dp_get_max_link_cap(link); +	} +} + +static bool should_verify_link_capability_destructively(struct dc_link *link, +		enum dc_detect_reason reason) +{ +	bool destrictive = false; +	struct dc_link_settings max_link_cap; +	bool is_link_enc_unavailable = link->link_enc && +			link->dc->res_pool->funcs->link_encs_assign && +			!link_enc_cfg_is_link_enc_avail( +					link->ctx->dc, +					link->link_enc->preferred_engine, +					link); + +	if (dc_is_dp_signal(link->local_sink->sink_signal)) { +		max_link_cap = dp_get_max_link_cap(link); +		destrictive = true; + +		if (link->dc->debug.skip_detection_link_training || +				dc_is_embedded_signal(link->local_sink->sink_signal) || +				link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { +			destrictive = false; +		} else if (link_dp_get_encoding_format(&max_link_cap) == +				DP_8b_10b_ENCODING) { +			if (link->dpcd_caps.is_mst_capable || +					is_link_enc_unavailable) { +				destrictive = false; +			} +		} +	} + +	return destrictive; +} + +static void verify_link_capability(struct dc_link *link, struct dc_sink *sink, +		enum dc_detect_reason reason) +{ +	if (should_verify_link_capability_destructively(link, reason)) +		verify_link_capability_destructive(link, sink, reason); +	else +		verify_link_capability_non_destructive(link); +} + +/** + * detect_link_and_local_sink() - Detect if a sink is attached to a given link + * + * link->local_sink is created or destroyed as needed. + * + * This does not create remote sinks. + */ +static bool detect_link_and_local_sink(struct dc_link *link, +				  enum dc_detect_reason reason) +{ +	struct dc_sink_init_data sink_init_data = { 0 }; +	struct display_sink_capability sink_caps = { 0 }; +	uint32_t i; +	bool converter_disable_audio = false; +	struct audio_support *aud_support = &link->dc->res_pool->audio_support; +	bool same_edid = false; +	enum dc_edid_status edid_status; +	struct dc_context *dc_ctx = link->ctx; +	struct dc *dc = dc_ctx->dc; +	struct dc_sink *sink = NULL; +	struct dc_sink *prev_sink = NULL; +	struct dpcd_caps prev_dpcd_caps; +	enum dc_connection_type new_connection_type = dc_connection_none; +	const uint32_t post_oui_delay = 30; // 30ms + +	DC_LOGGER_INIT(link->ctx->logger); + +	if (dc_is_virtual_signal(link->connector_signal)) +		return false; + +	if (((link->connector_signal == SIGNAL_TYPE_LVDS || +		link->connector_signal == SIGNAL_TYPE_EDP) && +		(!link->dc->config.allow_edp_hotplug_detection)) && +		link->local_sink) { +		// need to re-write OUI and brightness in resume case +		if (link->connector_signal == SIGNAL_TYPE_EDP && +			(link->dpcd_sink_ext_caps.bits.oled == 1)) { +			dpcd_set_source_specific_data(link); +			msleep(post_oui_delay); +			set_default_brightness_aux(link); +			//TODO: use cached +		} + +		return true; +	} + +	if (!dc_link_detect_connection_type(link, &new_connection_type)) { +		BREAK_TO_DEBUGGER(); +		return false; +	} + +	prev_sink = link->local_sink; +	if (prev_sink) { +		dc_sink_retain(prev_sink); +		memcpy(&prev_dpcd_caps, &link->dpcd_caps, sizeof(struct dpcd_caps)); +	} + +	link_disconnect_sink(link); +	if (new_connection_type != dc_connection_none) { +		link->type = new_connection_type; +		link->link_state_valid = false; + +		/* From Disconnected-to-Connected. */ +		switch (link->connector_signal) { +		case SIGNAL_TYPE_HDMI_TYPE_A: { +			sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; +			if (aud_support->hdmi_audio_native) +				sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A; +			else +				sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK; +			break; +		} + +		case SIGNAL_TYPE_DVI_SINGLE_LINK: { +			sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; +			sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK; +			break; +		} + +		case SIGNAL_TYPE_DVI_DUAL_LINK: { +			sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; +			sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK; +			break; +		} + +		case SIGNAL_TYPE_LVDS: { +			sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; +			sink_caps.signal = SIGNAL_TYPE_LVDS; +			break; +		} + +		case SIGNAL_TYPE_EDP: { +			detect_edp_sink_caps(link); +			read_current_link_settings_on_detect(link); + +			/* Disable power sequence on MIPI panel + converter +			 */ +			if (dc->config.enable_mipi_converter_optimization && +				dc_ctx->dce_version == DCN_VERSION_3_01 && +				link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_0022B9 && +				memcmp(&link->dpcd_caps.branch_dev_name, DP_SINK_BRANCH_DEV_NAME_7580, +					sizeof(link->dpcd_caps.branch_dev_name)) == 0) { +				dc->config.edp_no_power_sequencing = true; + +				if (!link->dpcd_caps.set_power_state_capable_edp) +					link->wa_flags.dp_keep_receiver_powered = true; +			} + +			sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; +			sink_caps.signal = SIGNAL_TYPE_EDP; +			break; +		} + +		case SIGNAL_TYPE_DISPLAY_PORT: { + +			/* wa HPD high coming too early*/ +			if (link->ep_type == DISPLAY_ENDPOINT_PHY && +			    link->link_enc->features.flags.bits.DP_IS_USB_C == 1) { + +				/* if alt mode times out, return false */ +				if (!wait_for_entering_dp_alt_mode(link)) +					return false; +			} + +			if (!detect_dp(link, &sink_caps, reason)) { +				if (prev_sink) +					dc_sink_release(prev_sink); +				return false; +			} + +			/* Active SST downstream branch device unplug*/ +			if (link->type == dc_connection_sst_branch && +			    link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) { +				if (prev_sink) +					/* Downstream unplug */ +					dc_sink_release(prev_sink); +				return true; +			} + +			/* disable audio for non DP to HDMI active sst converter */ +			if (link->type == dc_connection_sst_branch && +					is_dp_active_dongle(link) && +					(link->dpcd_caps.dongle_type != +							DISPLAY_DONGLE_DP_HDMI_CONVERTER)) +				converter_disable_audio = true; +			break; +		} + +		default: +			DC_ERROR("Invalid connector type! signal:%d\n", +				 link->connector_signal); +			if (prev_sink) +				dc_sink_release(prev_sink); +			return false; +		} /* switch() */ + +		if (link->dpcd_caps.sink_count.bits.SINK_COUNT) +			link->dpcd_sink_count = +				link->dpcd_caps.sink_count.bits.SINK_COUNT; +		else +			link->dpcd_sink_count = 1; + +		set_ddc_transaction_type(link->ddc, +						     sink_caps.transaction_type); + +		link->aux_mode = +			link_is_in_aux_transaction_mode(link->ddc); + +		sink_init_data.link = link; +		sink_init_data.sink_signal = sink_caps.signal; + +		sink = dc_sink_create(&sink_init_data); +		if (!sink) { +			DC_ERROR("Failed to create sink!\n"); +			if (prev_sink) +				dc_sink_release(prev_sink); +			return false; +		} + +		sink->link->dongle_max_pix_clk = sink_caps.max_hdmi_pixel_clock; +		sink->converter_disable_audio = converter_disable_audio; + +		/* dc_sink_create returns a new reference */ +		link->local_sink = sink; + +		edid_status = dm_helpers_read_local_edid(link->ctx, +							 link, sink); + +		switch (edid_status) { +		case EDID_BAD_CHECKSUM: +			DC_LOG_ERROR("EDID checksum invalid.\n"); +			break; +		case EDID_PARTIAL_VALID: +			DC_LOG_ERROR("Partial EDID valid, abandon invalid blocks.\n"); +			break; +		case EDID_NO_RESPONSE: +			DC_LOG_ERROR("No EDID read.\n"); +			/* +			 * Abort detection for non-DP connectors if we have +			 * no EDID +			 * +			 * DP needs to report as connected if HDP is high +			 * even if we have no EDID in order to go to +			 * fail-safe mode +			 */ +			if (dc_is_hdmi_signal(link->connector_signal) || +			    dc_is_dvi_signal(link->connector_signal)) { +				if (prev_sink) +					dc_sink_release(prev_sink); + +				return false; +			} + +			if (link->type == dc_connection_sst_branch && +					link->dpcd_caps.dongle_type == +						DISPLAY_DONGLE_DP_VGA_CONVERTER && +					reason == DETECT_REASON_HPDRX) { +				/* Abort detection for DP-VGA adapters when EDID +				 * can't be read and detection reason is VGA-side +				 * hotplug +				 */ +				if (prev_sink) +					dc_sink_release(prev_sink); +				link_disconnect_sink(link); + +				return true; +			} + +			break; +		default: +			break; +		} + +		// Check if edid is the same +		if ((prev_sink) && +		    (edid_status == EDID_THE_SAME || edid_status == EDID_OK)) +			same_edid = is_same_edid(&prev_sink->dc_edid, +						 &sink->dc_edid); + +		if (sink->edid_caps.panel_patch.skip_scdc_overwrite) +			link->ctx->dc->debug.hdmi20_disable = true; + +		if (dc_is_hdmi_signal(link->connector_signal)) +			read_scdc_caps(link->ddc, link->local_sink); + +		if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && +		    sink_caps.transaction_type == +		    DDC_TRANSACTION_TYPE_I2C_OVER_AUX) { +			/* +			 * TODO debug why certain monitors don't like +			 *  two link trainings +			 */ +#if defined(CONFIG_DRM_AMD_DC_HDCP) +			query_hdcp_capability(sink->sink_signal, link); +#endif +		} else { +			// If edid is the same, then discard new sink and revert back to original sink +			if (same_edid) { +				link_disconnect_remap(prev_sink, link); +				sink = prev_sink; +				prev_sink = NULL; +			} +#if defined(CONFIG_DRM_AMD_DC_HDCP) +			query_hdcp_capability(sink->sink_signal, link); +#endif +		} + +		/* HDMI-DVI Dongle */ +		if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A && +		    !sink->edid_caps.edid_hdmi) +			sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK; + +		if (link->local_sink && dc_is_dp_signal(sink_caps.signal)) +			dp_trace_init(link); + +		/* Connectivity log: detection */ +		for (i = 0; i < sink->dc_edid.length / DC_EDID_BLOCK_SIZE; i++) { +			CONN_DATA_DETECT(link, +					 &sink->dc_edid.raw_edid[i * DC_EDID_BLOCK_SIZE], +					 DC_EDID_BLOCK_SIZE, +					 "%s: [Block %d] ", sink->edid_caps.display_name, i); +		} + +		DC_LOG_DETECTION_EDID_PARSER("%s: " +			"manufacturer_id = %X, " +			"product_id = %X, " +			"serial_number = %X, " +			"manufacture_week = %d, " +			"manufacture_year = %d, " +			"display_name = %s, " +			"speaker_flag = %d, " +			"audio_mode_count = %d\n", +			__func__, +			sink->edid_caps.manufacturer_id, +			sink->edid_caps.product_id, +			sink->edid_caps.serial_number, +			sink->edid_caps.manufacture_week, +			sink->edid_caps.manufacture_year, +			sink->edid_caps.display_name, +			sink->edid_caps.speaker_flags, +			sink->edid_caps.audio_mode_count); + +		for (i = 0; i < sink->edid_caps.audio_mode_count; i++) { +			DC_LOG_DETECTION_EDID_PARSER("%s: mode number = %d, " +				"format_code = %d, " +				"channel_count = %d, " +				"sample_rate = %d, " +				"sample_size = %d\n", +				__func__, +				i, +				sink->edid_caps.audio_modes[i].format_code, +				sink->edid_caps.audio_modes[i].channel_count, +				sink->edid_caps.audio_modes[i].sample_rate, +				sink->edid_caps.audio_modes[i].sample_size); +		} + +		if (link->connector_signal == SIGNAL_TYPE_EDP) { +			// Init dc_panel_config by HW config +			if (dc_ctx->dc->res_pool->funcs->get_panel_config_defaults) +				dc_ctx->dc->res_pool->funcs->get_panel_config_defaults(&link->panel_config); +			// Pickup base DM settings +			dm_helpers_init_panel_settings(dc_ctx, &link->panel_config, sink); +			// Override dc_panel_config if system has specific settings +			dm_helpers_override_panel_settings(dc_ctx, &link->panel_config); +		} + +	} else { +		/* From Connected-to-Disconnected. */ +		link->type = dc_connection_none; +		sink_caps.signal = SIGNAL_TYPE_NONE; +#if defined(CONFIG_DRM_AMD_DC_HDCP) +		memset(&link->hdcp_caps, 0, sizeof(struct hdcp_caps)); +#endif +		/* When we unplug a passive DP-HDMI dongle connection, dongle_max_pix_clk +		 *  is not cleared. If we emulate a DP signal on this connection, it thinks +		 *  the dongle is still there and limits the number of modes we can emulate. +		 *  Clear dongle_max_pix_clk on disconnect to fix this +		 */ +		link->dongle_max_pix_clk = 0; + +		dc_link_clear_dprx_states(link); +		dp_trace_reset(link); +	} + +	LINK_INFO("link=%d, dc_sink_in=%p is now %s prev_sink=%p edid same=%d\n", +		  link->link_index, sink, +		  (sink_caps.signal == +		   SIGNAL_TYPE_NONE ? "Disconnected" : "Connected"), +		  prev_sink, same_edid); + +	if (prev_sink) +		dc_sink_release(prev_sink); + +	return true; +} + +/** + * dc_link_detect_connection_type() - Determine if there is a sink connected + * + * @type: Returned connection type + * Does not detect downstream devices, such as MST sinks + * or display connected through active dongles + */ +bool link_detect_connection_type(struct dc_link *link, enum dc_connection_type *type) +{ +	uint32_t is_hpd_high = 0; + +	if (link->connector_signal == SIGNAL_TYPE_LVDS) { +		*type = dc_connection_single; +		return true; +	} + +	if (link->connector_signal == SIGNAL_TYPE_EDP) { +		/*in case it is not on*/ +		if (!link->dc->config.edp_no_power_sequencing) +			link->dc->hwss.edp_power_control(link, true); +		link->dc->hwss.edp_wait_for_hpd_ready(link, true); +	} + +	/* Link may not have physical HPD pin. */ +	if (link->ep_type != DISPLAY_ENDPOINT_PHY) { +		if (link->is_hpd_pending || !dc_link_dpia_query_hpd_status(link)) +			*type = dc_connection_none; +		else +			*type = dc_connection_single; + +		return true; +	} + + +	if (!query_hpd_status(link, &is_hpd_high)) +		goto hpd_gpio_failure; + +	if (is_hpd_high) { +		*type = dc_connection_single; +		/* TODO: need to do the actual detection */ +	} else { +		*type = dc_connection_none; +	} + +	return true; + +hpd_gpio_failure: +	return false; +} + +bool link_detect(struct dc_link *link, enum dc_detect_reason reason) +{ +	bool is_local_sink_detect_success; +	bool is_delegated_to_mst_top_mgr = false; +	enum dc_connection_type pre_link_type = link->type; + +	is_local_sink_detect_success = detect_link_and_local_sink(link, reason); + +	if (is_local_sink_detect_success && link->local_sink) +		verify_link_capability(link, link->local_sink, reason); + +	if (is_local_sink_detect_success && link->local_sink && +			dc_is_dp_signal(link->local_sink->sink_signal) && +			link->dpcd_caps.is_mst_capable) +		is_delegated_to_mst_top_mgr = discover_dp_mst_topology(link, reason); + +	if (is_local_sink_detect_success && +			pre_link_type == dc_connection_mst_branch && +			link->type != dc_connection_mst_branch) +		is_delegated_to_mst_top_mgr = link_reset_cur_dp_mst_topology(link); + +	return is_local_sink_detect_success && !is_delegated_to_mst_top_mgr; +} + +void link_clear_dprx_states(struct dc_link *link) +{ +	memset(&link->dprx_states, 0, sizeof(link->dprx_states)); +} +#if defined(CONFIG_DRM_AMD_DC_HDCP) + +bool link_is_hdcp14(struct dc_link *link, enum signal_type signal) +{ +	bool ret = false; + +	switch (signal)	{ +	case SIGNAL_TYPE_DISPLAY_PORT: +	case SIGNAL_TYPE_DISPLAY_PORT_MST: +		ret = link->hdcp_caps.bcaps.bits.HDCP_CAPABLE; +		break; +	case SIGNAL_TYPE_DVI_SINGLE_LINK: +	case SIGNAL_TYPE_DVI_DUAL_LINK: +	case SIGNAL_TYPE_HDMI_TYPE_A: +	/* HDMI doesn't tell us its HDCP(1.4) capability, so assume to always be capable, +	 * we can poll for bksv but some displays have an issue with this. Since its so rare +	 * for a display to not be 1.4 capable, this assumtion is ok +	 */ +		ret = true; +		break; +	default: +		break; +	} +	return ret; +} + +bool link_is_hdcp22(struct dc_link *link, enum signal_type signal) +{ +	bool ret = false; + +	switch (signal)	{ +	case SIGNAL_TYPE_DISPLAY_PORT: +	case SIGNAL_TYPE_DISPLAY_PORT_MST: +		ret = (link->hdcp_caps.bcaps.bits.HDCP_CAPABLE && +				link->hdcp_caps.rx_caps.fields.byte0.hdcp_capable && +				(link->hdcp_caps.rx_caps.fields.version == 0x2)) ? 1 : 0; +		break; +	case SIGNAL_TYPE_DVI_SINGLE_LINK: +	case SIGNAL_TYPE_DVI_DUAL_LINK: +	case SIGNAL_TYPE_HDMI_TYPE_A: +		ret = (link->hdcp_caps.rx_caps.fields.version == 0x4) ? 1:0; +		break; +	default: +		break; +	} + +	return ret; +} +#endif // CONFIG_DRM_AMD_DC_HDCP + +const struct dc_link_status *link_get_status(const struct dc_link *link) +{ +	return &link->link_status; +} + diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.h b/drivers/gpu/drm/amd/display/dc/link/link_detection.h new file mode 100644 index 000000000000..1831636516fb --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.h @@ -0,0 +1,30 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_LINK_DETECTION_H__ +#define __DC_LINK_DETECTION_H__ +#include "link.h" + +#endif /* __DC_LINK_DETECTION_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c new file mode 100644 index 000000000000..257e1c3ba00a --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -0,0 +1,2528 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file owns the programming sequence of stream's dpms state associated + * with the link and link's enable/disable sequences as result of the stream's + * dpms state change. + * + * TODO - The reason link owns stream's dpms programming sequence is + * because dpms programming sequence is highly dependent on underlying signal + * specific link protocols. This unfortunately causes link to own a portion of + * stream state programming sequence. This creates a gray area where the + * boundary between link and stream is not clearly defined. + */ + +#include "link_dpms.h" +#include "link_hwss.h" +#include "accessories/link_fpga.h" +#include "accessories/link_dp_trace.h" +#include "protocols/link_dpcd.h" +#include "protocols/link_ddc.h" +#include "protocols/link_hpd.h" +#include "protocols/link_dp_phy.h" +#include "protocols/link_dp_capability.h" +#include "protocols/link_dp_training.h" +#include "protocols/link_edp_panel_control.h" + +#include "dm_helpers.h" +#include "link_enc_cfg.h" +#include "resource.h" +#include "dsc.h" +#include "dccg.h" +#include "clk_mgr.h" +#include "atomfirmware.h" +#define DC_LOGGER_INIT(logger) + +#define LINK_INFO(...) \ +	DC_LOG_HW_HOTPLUG(  \ +		__VA_ARGS__) + +#define RETIMER_REDRIVER_INFO(...) \ +	DC_LOG_RETIMER_REDRIVER(  \ +		__VA_ARGS__) +#include "dc/dcn30/dcn30_vpg.h" + +#define MAX_MTP_SLOT_COUNT 64 +#define LINK_TRAINING_ATTEMPTS 4 +#define PEAK_FACTOR_X1000 1006 + +void link_blank_all_dp_displays(struct dc *dc) +{ +	unsigned int i; +	uint8_t dpcd_power_state = '\0'; +	enum dc_status status = DC_ERROR_UNEXPECTED; + +	for (i = 0; i < dc->link_count; i++) { +		if ((dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) || +			(dc->links[i]->priv == NULL) || (dc->links[i]->local_sink == NULL)) +			continue; + +		/* DP 2.0 spec requires that we read LTTPR caps first */ +		dp_retrieve_lttpr_cap(dc->links[i]); +		/* if any of the displays are lit up turn them off */ +		status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, +							&dpcd_power_state, sizeof(dpcd_power_state)); + +		if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) +			link_blank_dp_stream(dc->links[i], true); +	} + +} + +void link_blank_all_edp_displays(struct dc *dc) +{ +	unsigned int i; +	uint8_t dpcd_power_state = '\0'; +	enum dc_status status = DC_ERROR_UNEXPECTED; + +	for (i = 0; i < dc->link_count; i++) { +		if ((dc->links[i]->connector_signal != SIGNAL_TYPE_EDP) || +			(!dc->links[i]->edp_sink_present)) +			continue; + +		/* if any of the displays are lit up turn them off */ +		status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, +							&dpcd_power_state, sizeof(dpcd_power_state)); + +		if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) +			link_blank_dp_stream(dc->links[i], true); +	} +} + +void link_blank_dp_stream(struct dc_link *link, bool hw_init) +{ +	unsigned int j; +	struct dc  *dc = link->ctx->dc; +	enum signal_type signal = link->connector_signal; + +	if ((signal == SIGNAL_TYPE_EDP) || +		(signal == SIGNAL_TYPE_DISPLAY_PORT)) { +		if (link->ep_type == DISPLAY_ENDPOINT_PHY && +			link->link_enc->funcs->get_dig_frontend && +			link->link_enc->funcs->is_dig_enabled(link->link_enc)) { +			unsigned int fe = link->link_enc->funcs->get_dig_frontend(link->link_enc); + +			if (fe != ENGINE_ID_UNKNOWN) +				for (j = 0; j < dc->res_pool->stream_enc_count; j++) { +					if (fe == dc->res_pool->stream_enc[j]->id) { +						dc->res_pool->stream_enc[j]->funcs->dp_blank(link, +									dc->res_pool->stream_enc[j]); +						break; +					} +				} +		} + +		if ((!link->wa_flags.dp_keep_receiver_powered) || hw_init) +			dc_link_dp_receiver_power_ctrl(link, false); +	} +} + +void link_set_all_streams_dpms_off_for_link(struct dc_link *link) +{ +	struct pipe_ctx *pipes[MAX_PIPES]; +	struct dc_state *state = link->dc->current_state; +	uint8_t count; +	int i; +	struct dc_stream_update stream_update; +	bool dpms_off = true; +	struct link_resource link_res = {0}; + +	memset(&stream_update, 0, sizeof(stream_update)); +	stream_update.dpms_off = &dpms_off; + +	link_get_master_pipes_with_dpms_on(link, state, &count, pipes); + +	for (i = 0; i < count; i++) { +		stream_update.stream = pipes[i]->stream; +		dc_commit_updates_for_stream(link->ctx->dc, NULL, 0, +				pipes[i]->stream, &stream_update, +				state); +	} + +	/* link can be also enabled by vbios. In this case it is not recorded +	 * in pipe_ctx. Disable link phy here to make sure it is completely off +	 */ +	dp_disable_link_phy(link, &link_res, link->connector_signal); +} + +void link_resume(struct dc_link *link) +{ +	if (link->connector_signal != SIGNAL_TYPE_VIRTUAL) +		program_hpd_filter(link); +} + +/* This function returns true if the pipe is used to feed video signal directly + * to the link. + */ +static bool is_master_pipe_for_link(const struct dc_link *link, +		const struct pipe_ctx *pipe) +{ +	return (pipe->stream && +			pipe->stream->link && +			pipe->stream->link == link && +			pipe->top_pipe == NULL && +			pipe->prev_odm_pipe == NULL); +} + +/* + * This function finds all master pipes feeding to a given link with dpms set to + * on in given dc state. + */ +void link_get_master_pipes_with_dpms_on(const struct dc_link *link, +		struct dc_state *state, +		uint8_t *count, +		struct pipe_ctx *pipes[MAX_PIPES]) +{ +	int i; +	struct pipe_ctx *pipe = NULL; + +	*count = 0; +	for (i = 0; i < MAX_PIPES; i++) { +		pipe = &state->res_ctx.pipe_ctx[i]; + +		if (is_master_pipe_for_link(link, pipe) && +				pipe->stream->dpms_off == false) { +			pipes[(*count)++] = pipe; +		} +	} +} + +static bool get_ext_hdmi_settings(struct pipe_ctx *pipe_ctx, +		enum engine_id eng_id, +		struct ext_hdmi_settings *settings) +{ +	bool result = false; +	int i = 0; +	struct integrated_info *integrated_info = +			pipe_ctx->stream->ctx->dc_bios->integrated_info; + +	if (integrated_info == NULL) +		return false; + +	/* +	 * Get retimer settings from sbios for passing SI eye test for DCE11 +	 * The setting values are varied based on board revision and port id +	 * Therefore the setting values of each ports is passed by sbios. +	 */ + +	// Check if current bios contains ext Hdmi settings +	if (integrated_info->gpu_cap_info & 0x20) { +		switch (eng_id) { +		case ENGINE_ID_DIGA: +			settings->slv_addr = integrated_info->dp0_ext_hdmi_slv_addr; +			settings->reg_num = integrated_info->dp0_ext_hdmi_6g_reg_num; +			settings->reg_num_6g = integrated_info->dp0_ext_hdmi_6g_reg_num; +			memmove(settings->reg_settings, +					integrated_info->dp0_ext_hdmi_reg_settings, +					sizeof(integrated_info->dp0_ext_hdmi_reg_settings)); +			memmove(settings->reg_settings_6g, +					integrated_info->dp0_ext_hdmi_6g_reg_settings, +					sizeof(integrated_info->dp0_ext_hdmi_6g_reg_settings)); +			result = true; +			break; +		case ENGINE_ID_DIGB: +			settings->slv_addr = integrated_info->dp1_ext_hdmi_slv_addr; +			settings->reg_num = integrated_info->dp1_ext_hdmi_6g_reg_num; +			settings->reg_num_6g = integrated_info->dp1_ext_hdmi_6g_reg_num; +			memmove(settings->reg_settings, +					integrated_info->dp1_ext_hdmi_reg_settings, +					sizeof(integrated_info->dp1_ext_hdmi_reg_settings)); +			memmove(settings->reg_settings_6g, +					integrated_info->dp1_ext_hdmi_6g_reg_settings, +					sizeof(integrated_info->dp1_ext_hdmi_6g_reg_settings)); +			result = true; +			break; +		case ENGINE_ID_DIGC: +			settings->slv_addr = integrated_info->dp2_ext_hdmi_slv_addr; +			settings->reg_num = integrated_info->dp2_ext_hdmi_6g_reg_num; +			settings->reg_num_6g = integrated_info->dp2_ext_hdmi_6g_reg_num; +			memmove(settings->reg_settings, +					integrated_info->dp2_ext_hdmi_reg_settings, +					sizeof(integrated_info->dp2_ext_hdmi_reg_settings)); +			memmove(settings->reg_settings_6g, +					integrated_info->dp2_ext_hdmi_6g_reg_settings, +					sizeof(integrated_info->dp2_ext_hdmi_6g_reg_settings)); +			result = true; +			break; +		case ENGINE_ID_DIGD: +			settings->slv_addr = integrated_info->dp3_ext_hdmi_slv_addr; +			settings->reg_num = integrated_info->dp3_ext_hdmi_6g_reg_num; +			settings->reg_num_6g = integrated_info->dp3_ext_hdmi_6g_reg_num; +			memmove(settings->reg_settings, +					integrated_info->dp3_ext_hdmi_reg_settings, +					sizeof(integrated_info->dp3_ext_hdmi_reg_settings)); +			memmove(settings->reg_settings_6g, +					integrated_info->dp3_ext_hdmi_6g_reg_settings, +					sizeof(integrated_info->dp3_ext_hdmi_6g_reg_settings)); +			result = true; +			break; +		default: +			break; +		} + +		if (result == true) { +			// Validate settings from bios integrated info table +			if (settings->slv_addr == 0) +				return false; +			if (settings->reg_num > 9) +				return false; +			if (settings->reg_num_6g > 3) +				return false; + +			for (i = 0; i < settings->reg_num; i++) { +				if (settings->reg_settings[i].i2c_reg_index > 0x20) +					return false; +			} + +			for (i = 0; i < settings->reg_num_6g; i++) { +				if (settings->reg_settings_6g[i].i2c_reg_index > 0x20) +					return false; +			} +		} +	} + +	return result; +} + +static bool write_i2c(struct pipe_ctx *pipe_ctx, +		uint8_t address, uint8_t *buffer, uint32_t length) +{ +	struct i2c_command cmd = {0}; +	struct i2c_payload payload = {0}; + +	memset(&payload, 0, sizeof(payload)); +	memset(&cmd, 0, sizeof(cmd)); + +	cmd.number_of_payloads = 1; +	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT; +	cmd.speed = pipe_ctx->stream->ctx->dc->caps.i2c_speed_in_khz; + +	payload.address = address; +	payload.data = buffer; +	payload.length = length; +	payload.write = true; +	cmd.payloads = &payload; + +	if (dm_helpers_submit_i2c(pipe_ctx->stream->ctx, +			pipe_ctx->stream->link, &cmd)) +		return true; + +	return false; +} + +static void write_i2c_retimer_setting( +		struct pipe_ctx *pipe_ctx, +		bool is_vga_mode, +		bool is_over_340mhz, +		struct ext_hdmi_settings *settings) +{ +	uint8_t slave_address = (settings->slv_addr >> 1); +	uint8_t buffer[2]; +	const uint8_t apply_rx_tx_change = 0x4; +	uint8_t offset = 0xA; +	uint8_t value = 0; +	int i = 0; +	bool i2c_success = false; +	DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); + +	memset(&buffer, 0, sizeof(buffer)); + +	/* Start Ext-Hdmi programming*/ + +	for (i = 0; i < settings->reg_num; i++) { +		/* Apply 3G settings */ +		if (settings->reg_settings[i].i2c_reg_index <= 0x20) { + +			buffer[0] = settings->reg_settings[i].i2c_reg_index; +			buffer[1] = settings->reg_settings[i].i2c_reg_val; +			i2c_success = write_i2c(pipe_ctx, slave_address, +						buffer, sizeof(buffer)); +			RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ +				offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n", +				slave_address, buffer[0], buffer[1], i2c_success?1:0); + +			if (!i2c_success) +				goto i2c_write_fail; + +			/* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A +			 * needs to be set to 1 on every 0xA-0xC write. +			 */ +			if (settings->reg_settings[i].i2c_reg_index == 0xA || +				settings->reg_settings[i].i2c_reg_index == 0xB || +				settings->reg_settings[i].i2c_reg_index == 0xC) { + +				/* Query current value from offset 0xA */ +				if (settings->reg_settings[i].i2c_reg_index == 0xA) +					value = settings->reg_settings[i].i2c_reg_val; +				else { +					i2c_success = +						link_query_ddc_data( +						pipe_ctx->stream->link->ddc, +						slave_address, &offset, 1, &value, 1); +					if (!i2c_success) +						goto i2c_write_fail; +				} + +				buffer[0] = offset; +				/* Set APPLY_RX_TX_CHANGE bit to 1 */ +				buffer[1] = value | apply_rx_tx_change; +				i2c_success = write_i2c(pipe_ctx, slave_address, +						buffer, sizeof(buffer)); +				RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ +					offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", +					slave_address, buffer[0], buffer[1], i2c_success?1:0); +				if (!i2c_success) +					goto i2c_write_fail; +			} +		} +	} + +	/* Apply 3G settings */ +	if (is_over_340mhz) { +		for (i = 0; i < settings->reg_num_6g; i++) { +			/* Apply 3G settings */ +			if (settings->reg_settings[i].i2c_reg_index <= 0x20) { + +				buffer[0] = settings->reg_settings_6g[i].i2c_reg_index; +				buffer[1] = settings->reg_settings_6g[i].i2c_reg_val; +				i2c_success = write_i2c(pipe_ctx, slave_address, +							buffer, sizeof(buffer)); +				RETIMER_REDRIVER_INFO("above 340Mhz: retimer write to slave_address = 0x%x,\ +					offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", +					slave_address, buffer[0], buffer[1], i2c_success?1:0); + +				if (!i2c_success) +					goto i2c_write_fail; + +				/* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A +				 * needs to be set to 1 on every 0xA-0xC write. +				 */ +				if (settings->reg_settings_6g[i].i2c_reg_index == 0xA || +					settings->reg_settings_6g[i].i2c_reg_index == 0xB || +					settings->reg_settings_6g[i].i2c_reg_index == 0xC) { + +					/* Query current value from offset 0xA */ +					if (settings->reg_settings_6g[i].i2c_reg_index == 0xA) +						value = settings->reg_settings_6g[i].i2c_reg_val; +					else { +						i2c_success = +								link_query_ddc_data( +								pipe_ctx->stream->link->ddc, +								slave_address, &offset, 1, &value, 1); +						if (!i2c_success) +							goto i2c_write_fail; +					} + +					buffer[0] = offset; +					/* Set APPLY_RX_TX_CHANGE bit to 1 */ +					buffer[1] = value | apply_rx_tx_change; +					i2c_success = write_i2c(pipe_ctx, slave_address, +							buffer, sizeof(buffer)); +					RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ +						offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", +						slave_address, buffer[0], buffer[1], i2c_success?1:0); +					if (!i2c_success) +						goto i2c_write_fail; +				} +			} +		} +	} + +	if (is_vga_mode) { +		/* Program additional settings if using 640x480 resolution */ + +		/* Write offset 0xFF to 0x01 */ +		buffer[0] = 0xff; +		buffer[1] = 0x01; +		i2c_success = write_i2c(pipe_ctx, slave_address, +				buffer, sizeof(buffer)); +		RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ +				offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", +				slave_address, buffer[0], buffer[1], i2c_success?1:0); +		if (!i2c_success) +			goto i2c_write_fail; + +		/* Write offset 0x00 to 0x23 */ +		buffer[0] = 0x00; +		buffer[1] = 0x23; +		i2c_success = write_i2c(pipe_ctx, slave_address, +				buffer, sizeof(buffer)); +		RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ +			offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", +			slave_address, buffer[0], buffer[1], i2c_success?1:0); +		if (!i2c_success) +			goto i2c_write_fail; + +		/* Write offset 0xff to 0x00 */ +		buffer[0] = 0xff; +		buffer[1] = 0x00; +		i2c_success = write_i2c(pipe_ctx, slave_address, +				buffer, sizeof(buffer)); +		RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ +			offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", +			slave_address, buffer[0], buffer[1], i2c_success?1:0); +		if (!i2c_success) +			goto i2c_write_fail; + +	} + +	return; + +i2c_write_fail: +	DC_LOG_DEBUG("Set retimer failed"); +} + +static void write_i2c_default_retimer_setting( +		struct pipe_ctx *pipe_ctx, +		bool is_vga_mode, +		bool is_over_340mhz) +{ +	uint8_t slave_address = (0xBA >> 1); +	uint8_t buffer[2]; +	bool i2c_success = false; +	DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); + +	memset(&buffer, 0, sizeof(buffer)); + +	/* Program Slave Address for tuning single integrity */ +	/* Write offset 0x0A to 0x13 */ +	buffer[0] = 0x0A; +	buffer[1] = 0x13; +	i2c_success = write_i2c(pipe_ctx, slave_address, +			buffer, sizeof(buffer)); +	RETIMER_REDRIVER_INFO("retimer writes default setting to slave_address = 0x%x,\ +		offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", +		slave_address, buffer[0], buffer[1], i2c_success?1:0); +	if (!i2c_success) +		goto i2c_write_fail; + +	/* Write offset 0x0A to 0x17 */ +	buffer[0] = 0x0A; +	buffer[1] = 0x17; +	i2c_success = write_i2c(pipe_ctx, slave_address, +			buffer, sizeof(buffer)); +	RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ +		offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", +		slave_address, buffer[0], buffer[1], i2c_success?1:0); +	if (!i2c_success) +		goto i2c_write_fail; + +	/* Write offset 0x0B to 0xDA or 0xD8 */ +	buffer[0] = 0x0B; +	buffer[1] = is_over_340mhz ? 0xDA : 0xD8; +	i2c_success = write_i2c(pipe_ctx, slave_address, +			buffer, sizeof(buffer)); +	RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ +		offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", +		slave_address, buffer[0], buffer[1], i2c_success?1:0); +	if (!i2c_success) +		goto i2c_write_fail; + +	/* Write offset 0x0A to 0x17 */ +	buffer[0] = 0x0A; +	buffer[1] = 0x17; +	i2c_success = write_i2c(pipe_ctx, slave_address, +			buffer, sizeof(buffer)); +	RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ +		offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n", +		slave_address, buffer[0], buffer[1], i2c_success?1:0); +	if (!i2c_success) +		goto i2c_write_fail; + +	/* Write offset 0x0C to 0x1D or 0x91 */ +	buffer[0] = 0x0C; +	buffer[1] = is_over_340mhz ? 0x1D : 0x91; +	i2c_success = write_i2c(pipe_ctx, slave_address, +			buffer, sizeof(buffer)); +	RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ +		offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", +		slave_address, buffer[0], buffer[1], i2c_success?1:0); +	if (!i2c_success) +		goto i2c_write_fail; + +	/* Write offset 0x0A to 0x17 */ +	buffer[0] = 0x0A; +	buffer[1] = 0x17; +	i2c_success = write_i2c(pipe_ctx, slave_address, +			buffer, sizeof(buffer)); +	RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ +		offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", +		slave_address, buffer[0], buffer[1], i2c_success?1:0); +	if (!i2c_success) +		goto i2c_write_fail; + + +	if (is_vga_mode) { +		/* Program additional settings if using 640x480 resolution */ + +		/* Write offset 0xFF to 0x01 */ +		buffer[0] = 0xff; +		buffer[1] = 0x01; +		i2c_success = write_i2c(pipe_ctx, slave_address, +				buffer, sizeof(buffer)); +		RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ +			offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", +			slave_address, buffer[0], buffer[1], i2c_success?1:0); +		if (!i2c_success) +			goto i2c_write_fail; + +		/* Write offset 0x00 to 0x23 */ +		buffer[0] = 0x00; +		buffer[1] = 0x23; +		i2c_success = write_i2c(pipe_ctx, slave_address, +				buffer, sizeof(buffer)); +		RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ +			offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n", +			slave_address, buffer[0], buffer[1], i2c_success?1:0); +		if (!i2c_success) +			goto i2c_write_fail; + +		/* Write offset 0xff to 0x00 */ +		buffer[0] = 0xff; +		buffer[1] = 0x00; +		i2c_success = write_i2c(pipe_ctx, slave_address, +				buffer, sizeof(buffer)); +		RETIMER_REDRIVER_INFO("retimer write default setting to slave_addr = 0x%x,\ +			offset = 0x%x, reg_val= 0x%x, i2c_success = %d end here\n", +			slave_address, buffer[0], buffer[1], i2c_success?1:0); +		if (!i2c_success) +			goto i2c_write_fail; +	} + +	return; + +i2c_write_fail: +	DC_LOG_DEBUG("Set default retimer failed"); +} + +static void write_i2c_redriver_setting( +		struct pipe_ctx *pipe_ctx, +		bool is_over_340mhz) +{ +	uint8_t slave_address = (0xF0 >> 1); +	uint8_t buffer[16]; +	bool i2c_success = false; +	DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); + +	memset(&buffer, 0, sizeof(buffer)); + +	// Program Slave Address for tuning single integrity +	buffer[3] = 0x4E; +	buffer[4] = 0x4E; +	buffer[5] = 0x4E; +	buffer[6] = is_over_340mhz ? 0x4E : 0x4A; + +	i2c_success = write_i2c(pipe_ctx, slave_address, +					buffer, sizeof(buffer)); +	RETIMER_REDRIVER_INFO("redriver write 0 to all 16 reg offset expect following:\n\ +		\t slave_addr = 0x%x, offset[3] = 0x%x, offset[4] = 0x%x,\ +		offset[5] = 0x%x,offset[6] is_over_340mhz = 0x%x,\ +		i2c_success = %d\n", +		slave_address, buffer[3], buffer[4], buffer[5], buffer[6], i2c_success?1:0); + +	if (!i2c_success) +		DC_LOG_DEBUG("Set redriver failed"); +} +#if defined(CONFIG_DRM_AMD_DC_HDCP) + +static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) +{ +	struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp; +	struct link_encoder *link_enc = NULL; +	struct cp_psp_stream_config config = {0}; +	enum dp_panel_mode panel_mode = +			dp_get_panel_mode(pipe_ctx->stream->link); + +	if (cp_psp == NULL || cp_psp->funcs.update_stream_config == NULL) +		return; + +	link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link); +	ASSERT(link_enc); +	if (link_enc == NULL) +		return; + +	/* otg instance */ +	config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst; + +	/* dig front end */ +	config.dig_fe = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst; + +	/* stream encoder index */ +	config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA; +	if (link_is_dp_128b_132b_signal(pipe_ctx)) +		config.stream_enc_idx = +				pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0; + +	/* dig back end */ +	config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst; + +	/* link encoder index */ +	config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; +	if (link_is_dp_128b_132b_signal(pipe_ctx)) +		config.link_enc_idx = pipe_ctx->link_res.hpo_dp_link_enc->inst; + +	/* dio output index is dpia index for DPIA endpoint & dcio index by default */ +	if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) +		config.dio_output_idx = pipe_ctx->stream->link->link_id.enum_id - ENUM_ID_1; +	else +		config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; + + +	/* phy index */ +	config.phy_idx = resource_transmitter_to_phy_idx( +			pipe_ctx->stream->link->dc, link_enc->transmitter); +	if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) +		/* USB4 DPIA doesn't use PHY in our soc, initialize it to 0 */ +		config.phy_idx = 0; + +	/* stream properties */ +	config.assr_enabled = (panel_mode == DP_PANEL_MODE_EDP) ? 1 : 0; +	config.mst_enabled = (pipe_ctx->stream->signal == +			SIGNAL_TYPE_DISPLAY_PORT_MST) ? 1 : 0; +	config.dp2_enabled = link_is_dp_128b_132b_signal(pipe_ctx) ? 1 : 0; +	config.usb4_enabled = (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ? +			1 : 0; +	config.dpms_off = dpms_off; + +	/* dm stream context */ +	config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context; + +	cp_psp->funcs.update_stream_config(cp_psp->handle, &config); +} +#endif + +static void set_avmute(struct pipe_ctx *pipe_ctx, bool enable) +{ +	struct dc  *dc = pipe_ctx->stream->ctx->dc; + +	if (!dc_is_hdmi_signal(pipe_ctx->stream->signal)) +		return; + +	dc->hwss.set_avmute(pipe_ctx, enable); +} + +static void enable_mst_on_sink(struct dc_link *link, bool enable) +{ +	unsigned char mstmCntl; + +	core_link_read_dpcd(link, DP_MSTM_CTRL, &mstmCntl, 1); +	if (enable) +		mstmCntl |= DP_MST_EN; +	else +		mstmCntl &= (~DP_MST_EN); + +	core_link_write_dpcd(link, DP_MSTM_CTRL, &mstmCntl, 1); +} + +static void dsc_optc_config_log(struct display_stream_compressor *dsc, +		struct dsc_optc_config *config) +{ +	uint32_t precision = 1 << 28; +	uint32_t bytes_per_pixel_int = config->bytes_per_pixel / precision; +	uint32_t bytes_per_pixel_mod = config->bytes_per_pixel % precision; +	uint64_t ll_bytes_per_pix_fraq = bytes_per_pixel_mod; +	DC_LOGGER_INIT(dsc->ctx->logger); + +	/* 7 fractional digits decimal precision for bytes per pixel is enough because DSC +	 * bits per pixel precision is 1/16th of a pixel, which means bytes per pixel precision is +	 * 1/16/8 = 1/128 of a byte, or 0.0078125 decimal +	 */ +	ll_bytes_per_pix_fraq *= 10000000; +	ll_bytes_per_pix_fraq /= precision; + +	DC_LOG_DSC("\tbytes_per_pixel 0x%08x (%d.%07d)", +			config->bytes_per_pixel, bytes_per_pixel_int, (uint32_t)ll_bytes_per_pix_fraq); +	DC_LOG_DSC("\tis_pixel_format_444 %d", config->is_pixel_format_444); +	DC_LOG_DSC("\tslice_width %d", config->slice_width); +} + +static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable) +{ +	struct dc *dc = pipe_ctx->stream->ctx->dc; +	struct dc_stream_state *stream = pipe_ctx->stream; +	bool result = false; + +	if (dc_is_virtual_signal(stream->signal) || IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) +		result = true; +	else +		result = dm_helpers_dp_write_dsc_enable(dc->ctx, stream, enable); +	return result; +} + +/* The stream with these settings can be sent (unblanked) only after DSC was enabled on RX first, + * i.e. after dp_enable_dsc_on_rx() had been called + */ +void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) +{ +	struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; +	struct dc *dc = pipe_ctx->stream->ctx->dc; +	struct dc_stream_state *stream = pipe_ctx->stream; +	struct pipe_ctx *odm_pipe; +	int opp_cnt = 1; +	DC_LOGGER_INIT(dsc->ctx->logger); + +	for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) +		opp_cnt++; + +	if (enable) { +		struct dsc_config dsc_cfg; +		struct dsc_optc_config dsc_optc_cfg; +		enum optc_dsc_mode optc_dsc_mode; + +		/* Enable DSC hw block */ +		dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; +		dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; +		dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; +		dsc_cfg.color_depth = stream->timing.display_color_depth; +		dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; +		dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; +		ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0); +		dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; + +		dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg); +		dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst); +		for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { +			struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc; + +			odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg); +			odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst); +		} +		dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt; +		dsc_cfg.pic_width *= opp_cnt; + +		optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED; + +		/* Enable DSC in encoder */ +		if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) +				&& !link_is_dp_128b_132b_signal(pipe_ctx)) { +			DC_LOG_DSC("Setting stream encoder DSC config for engine %d:", (int)pipe_ctx->stream_res.stream_enc->id); +			dsc_optc_config_log(dsc, &dsc_optc_cfg); +			pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc, +									optc_dsc_mode, +									dsc_optc_cfg.bytes_per_pixel, +									dsc_optc_cfg.slice_width); + +			/* PPS SDP is set elsewhere because it has to be done after DIG FE is connected to DIG BE */ +		} + +		/* Enable DSC in OPTC */ +		DC_LOG_DSC("Setting optc DSC config for tg instance %d:", pipe_ctx->stream_res.tg->inst); +		dsc_optc_config_log(dsc, &dsc_optc_cfg); +		pipe_ctx->stream_res.tg->funcs->set_dsc_config(pipe_ctx->stream_res.tg, +							optc_dsc_mode, +							dsc_optc_cfg.bytes_per_pixel, +							dsc_optc_cfg.slice_width); +	} else { +		/* disable DSC in OPTC */ +		pipe_ctx->stream_res.tg->funcs->set_dsc_config( +				pipe_ctx->stream_res.tg, +				OPTC_DSC_DISABLED, 0, 0); + +		/* disable DSC in stream encoder */ +		if (dc_is_dp_signal(stream->signal)) { +			if (link_is_dp_128b_132b_signal(pipe_ctx)) +				pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet( +										pipe_ctx->stream_res.hpo_dp_stream_enc, +										false, +										NULL, +										true); +			else if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { +				pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config( +						pipe_ctx->stream_res.stream_enc, +						OPTC_DSC_DISABLED, 0, 0); +				pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( +							pipe_ctx->stream_res.stream_enc, false, NULL, true); +			} +		} + +		/* disable DSC block */ +		pipe_ctx->stream_res.dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc); +		for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) +			odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc); +	} +} + +/* + * For dynamic bpp change case, dsc is programmed with MASTER_UPDATE_LOCK enabled; + * hence PPS info packet update need to use frame update instead of immediate update. + * Added parameter immediate_update for this purpose. + * The decision to use frame update is hard-coded in function dp_update_dsc_config(), + * which is the only place where a "false" would be passed in for param immediate_update. + * + * immediate_update is only applicable when DSC is enabled. + */ +bool link_set_dsc_pps_packet(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_update) +{ +	struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; +	struct dc_stream_state *stream = pipe_ctx->stream; +	DC_LOGGER_INIT(dsc->ctx->logger); + +	if (!pipe_ctx->stream->timing.flags.DSC || !dsc) +		return false; + +	if (enable) { +		struct dsc_config dsc_cfg; +		uint8_t dsc_packed_pps[128]; + +		memset(&dsc_cfg, 0, sizeof(dsc_cfg)); +		memset(dsc_packed_pps, 0, 128); + +		/* Enable DSC hw block */ +		dsc_cfg.pic_width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; +		dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; +		dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; +		dsc_cfg.color_depth = stream->timing.display_color_depth; +		dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; +		dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; + +		dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]); +		memcpy(&stream->dsc_packed_pps[0], &dsc_packed_pps[0], sizeof(stream->dsc_packed_pps)); +		if (dc_is_dp_signal(stream->signal)) { +			DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id); +			if (link_is_dp_128b_132b_signal(pipe_ctx)) +				pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet( +										pipe_ctx->stream_res.hpo_dp_stream_enc, +										true, +										&dsc_packed_pps[0], +										immediate_update); +			else +				pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( +						pipe_ctx->stream_res.stream_enc, +						true, +						&dsc_packed_pps[0], +						immediate_update); +		} +	} else { +		/* disable DSC PPS in stream encoder */ +		memset(&stream->dsc_packed_pps[0], 0, sizeof(stream->dsc_packed_pps)); +		if (dc_is_dp_signal(stream->signal)) { +			if (link_is_dp_128b_132b_signal(pipe_ctx)) +				pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet( +										pipe_ctx->stream_res.hpo_dp_stream_enc, +										false, +										NULL, +										true); +			else +				pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( +						pipe_ctx->stream_res.stream_enc, false, NULL, true); +		} +	} + +	return true; +} + +bool link_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable) +{ +	struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; +	bool result = false; + +	if (!pipe_ctx->stream->timing.flags.DSC) +		goto out; +	if (!dsc) +		goto out; + +	if (enable) { +		{ +			link_set_dsc_on_stream(pipe_ctx, true); +			result = true; +		} +	} else { +		dp_set_dsc_on_rx(pipe_ctx, false); +		link_set_dsc_on_stream(pipe_ctx, false); +		result = true; +	} +out: +	return result; +} + +bool link_update_dsc_config(struct pipe_ctx *pipe_ctx) +{ +	struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; + +	if (!pipe_ctx->stream->timing.flags.DSC) +		return false; +	if (!dsc) +		return false; + +	link_set_dsc_on_stream(pipe_ctx, true); +	link_set_dsc_pps_packet(pipe_ctx, true, false); +	return true; +} + +static void enable_stream_features(struct pipe_ctx *pipe_ctx) +{ +	struct dc_stream_state *stream = pipe_ctx->stream; + +	if (pipe_ctx->stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST) { +		struct dc_link *link = stream->link; +		union down_spread_ctrl old_downspread; +		union down_spread_ctrl new_downspread; + +		memset(&old_downspread, 0, sizeof(old_downspread)); + +		core_link_read_dpcd(link, DP_DOWNSPREAD_CTRL, +				&old_downspread.raw, sizeof(old_downspread)); + +		new_downspread.raw = old_downspread.raw; + +		new_downspread.bits.IGNORE_MSA_TIMING_PARAM = +				(stream->ignore_msa_timing_param) ? 1 : 0; + +		if (new_downspread.raw != old_downspread.raw) { +			core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, +				&new_downspread.raw, sizeof(new_downspread)); +		} + +	} else { +		dm_helpers_mst_enable_stream_features(stream); +	} +} + +static void dc_log_vcp_x_y(const struct dc_link *link, struct fixed31_32 avg_time_slots_per_mtp) +{ +	const uint32_t VCP_Y_PRECISION = 1000; +	uint64_t vcp_x, vcp_y; +	DC_LOGGER_INIT(link->ctx->logger); + +	// Add 0.5*(1/VCP_Y_PRECISION) to round up to decimal precision +	avg_time_slots_per_mtp = dc_fixpt_add( +			avg_time_slots_per_mtp, +			dc_fixpt_from_fraction( +				1, +				2*VCP_Y_PRECISION)); + +	vcp_x = dc_fixpt_floor( +			avg_time_slots_per_mtp); +	vcp_y = dc_fixpt_floor( +			dc_fixpt_mul_int( +				dc_fixpt_sub_int( +					avg_time_slots_per_mtp, +					dc_fixpt_floor( +							avg_time_slots_per_mtp)), +				VCP_Y_PRECISION)); + + +	if (link->type == dc_connection_mst_branch) +		DC_LOG_DP2("MST Update Payload: set_throttled_vcp_size slot X.Y for MST stream " +				"X: %llu " +				"Y: %llu/%d", +				vcp_x, +				vcp_y, +				VCP_Y_PRECISION); +	else +		DC_LOG_DP2("SST Update Payload: set_throttled_vcp_size slot X.Y for SST stream " +				"X: %llu " +				"Y: %llu/%d", +				vcp_x, +				vcp_y, +				VCP_Y_PRECISION); +} + +static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream) +{ +	struct fixed31_32 mbytes_per_sec; +	uint32_t link_rate_in_mbytes_per_sec = dc_link_bandwidth_kbps(stream->link, +			&stream->link->cur_link_settings); +	link_rate_in_mbytes_per_sec /= 8000; /* Kbits to MBytes */ + +	mbytes_per_sec = dc_fixpt_from_int(link_rate_in_mbytes_per_sec); + +	return dc_fixpt_div_int(mbytes_per_sec, 54); +} + +static struct fixed31_32 get_pbn_from_bw_in_kbps(uint64_t kbps) +{ +	struct fixed31_32 peak_kbps; +	uint32_t numerator = 0; +	uint32_t denominator = 1; + +	/* +	 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006 +	 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on +	 * common multiplier to render an integer PBN for all link rate/lane +	 * counts combinations +	 * calculate +	 * peak_kbps *= (1006/1000) +	 * peak_kbps *= (64/54) +	 * peak_kbps *= 8    convert to bytes +	 */ + +	numerator = 64 * PEAK_FACTOR_X1000; +	denominator = 54 * 8 * 1000 * 1000; +	kbps *= numerator; +	peak_kbps = dc_fixpt_from_fraction(kbps, denominator); + +	return peak_kbps; +} + +static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx) +{ +	uint64_t kbps; + +	kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing); +	return get_pbn_from_bw_in_kbps(kbps); +} + + +// TODO - DP2.0 Link: Fix get_lane_status to handle LTTPR offset (SST and MST) +static void get_lane_status( +	struct dc_link *link, +	uint32_t lane_count, +	union lane_status *status, +	union lane_align_status_updated *status_updated) +{ +	unsigned int lane; +	uint8_t dpcd_buf[3] = {0}; + +	if (status == NULL || status_updated == NULL) { +		return; +	} + +	core_link_read_dpcd( +			link, +			DP_LANE0_1_STATUS, +			dpcd_buf, +			sizeof(dpcd_buf)); + +	for (lane = 0; lane < lane_count; lane++) { +		status[lane].raw = dp_get_nibble_at_index(&dpcd_buf[0], lane); +	} + +	status_updated->raw = dpcd_buf[2]; +} + +static bool poll_for_allocation_change_trigger(struct dc_link *link) +{ +	/* +	 * wait for ACT handled +	 */ +	int i; +	const int act_retries = 30; +	enum act_return_status result = ACT_FAILED; +	union payload_table_update_status update_status = {0}; +	union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; +	union lane_align_status_updated lane_status_updated; +	DC_LOGGER_INIT(link->ctx->logger); + +	if (link->aux_access_disabled) +		return true; +	for (i = 0; i < act_retries; i++) { +		get_lane_status(link, link->cur_link_settings.lane_count, dpcd_lane_status, &lane_status_updated); + +		if (!dp_is_cr_done(link->cur_link_settings.lane_count, dpcd_lane_status) || +				!dp_is_ch_eq_done(link->cur_link_settings.lane_count, dpcd_lane_status) || +				!dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status) || +				!dp_is_interlane_aligned(lane_status_updated)) { +			DC_LOG_ERROR("SST Update Payload: Link loss occurred while " +					"polling for ACT handled."); +			result = ACT_LINK_LOST; +			break; +		} +		core_link_read_dpcd( +				link, +				DP_PAYLOAD_TABLE_UPDATE_STATUS, +				&update_status.raw, +				1); + +		if (update_status.bits.ACT_HANDLED == 1) { +			DC_LOG_DP2("SST Update Payload: ACT handled by downstream."); +			result = ACT_SUCCESS; +			break; +		} + +		msleep(5); +	} + +	if (result == ACT_FAILED) { +		DC_LOG_ERROR("SST Update Payload: ACT still not handled after retries, " +				"continue on. Something is wrong with the branch."); +	} + +	return (result == ACT_SUCCESS); +} + +static void update_mst_stream_alloc_table( +	struct dc_link *link, +	struct stream_encoder *stream_enc, +	struct hpo_dp_stream_encoder *hpo_dp_stream_enc, // TODO: Rename stream_enc to dio_stream_enc? +	const struct dc_dp_mst_stream_allocation_table *proposed_table) +{ +	struct link_mst_stream_allocation work_table[MAX_CONTROLLER_NUM] = { 0 }; +	struct link_mst_stream_allocation *dc_alloc; + +	int i; +	int j; + +	/* if DRM proposed_table has more than one new payload */ +	ASSERT(proposed_table->stream_count - +			link->mst_stream_alloc_table.stream_count < 2); + +	/* copy proposed_table to link, add stream encoder */ +	for (i = 0; i < proposed_table->stream_count; i++) { + +		for (j = 0; j < link->mst_stream_alloc_table.stream_count; j++) { +			dc_alloc = +			&link->mst_stream_alloc_table.stream_allocations[j]; + +			if (dc_alloc->vcp_id == +				proposed_table->stream_allocations[i].vcp_id) { + +				work_table[i] = *dc_alloc; +				work_table[i].slot_count = proposed_table->stream_allocations[i].slot_count; +				break; /* exit j loop */ +			} +		} + +		/* new vcp_id */ +		if (j == link->mst_stream_alloc_table.stream_count) { +			work_table[i].vcp_id = +				proposed_table->stream_allocations[i].vcp_id; +			work_table[i].slot_count = +				proposed_table->stream_allocations[i].slot_count; +			work_table[i].stream_enc = stream_enc; +			work_table[i].hpo_dp_stream_enc = hpo_dp_stream_enc; +		} +	} + +	/* update link->mst_stream_alloc_table with work_table */ +	link->mst_stream_alloc_table.stream_count = +			proposed_table->stream_count; +	for (i = 0; i < MAX_CONTROLLER_NUM; i++) +		link->mst_stream_alloc_table.stream_allocations[i] = +				work_table[i]; +} + +static void remove_stream_from_alloc_table( +		struct dc_link *link, +		struct stream_encoder *dio_stream_enc, +		struct hpo_dp_stream_encoder *hpo_dp_stream_enc) +{ +	int i = 0; +	struct link_mst_stream_allocation_table *table = +			&link->mst_stream_alloc_table; + +	if (hpo_dp_stream_enc) { +		for (; i < table->stream_count; i++) +			if (hpo_dp_stream_enc == table->stream_allocations[i].hpo_dp_stream_enc) +				break; +	} else { +		for (; i < table->stream_count; i++) +			if (dio_stream_enc == table->stream_allocations[i].stream_enc) +				break; +	} + +	if (i < table->stream_count) { +		i++; +		for (; i < table->stream_count; i++) +			table->stream_allocations[i-1] = table->stream_allocations[i]; +		memset(&table->stream_allocations[table->stream_count-1], 0, +				sizeof(struct link_mst_stream_allocation)); +		table->stream_count--; +	} +} + +static enum dc_status deallocate_mst_payload_with_temp_drm_wa( +		struct pipe_ctx *pipe_ctx) +{ +	struct dc_stream_state *stream = pipe_ctx->stream; +	struct dc_link *link = stream->link; +	struct dc_dp_mst_stream_allocation_table proposed_table = {0}; +	struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0); +	int i; +	bool mst_mode = (link->type == dc_connection_mst_branch); +	/* adjust for drm changes*/ +	const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); +	const struct dc_link_settings empty_link_settings = {0}; +	DC_LOGGER_INIT(link->ctx->logger); + +	if (link_hwss->ext.set_throttled_vcp_size) +		link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); +	if (link_hwss->ext.set_hblank_min_symbol_width) +		link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, +				&empty_link_settings, +				avg_time_slots_per_mtp); + +	if (dm_helpers_dp_mst_write_payload_allocation_table( +			stream->ctx, +			stream, +			&proposed_table, +			false)) +		update_mst_stream_alloc_table( +				link, +				pipe_ctx->stream_res.stream_enc, +				pipe_ctx->stream_res.hpo_dp_stream_enc, +				&proposed_table); +	else +		DC_LOG_WARNING("Failed to update" +				"MST allocation table for" +				"pipe idx:%d\n", +				pipe_ctx->pipe_idx); + +	DC_LOG_MST("%s" +			"stream_count: %d: ", +			__func__, +			link->mst_stream_alloc_table.stream_count); + +	for (i = 0; i < MAX_CONTROLLER_NUM; i++) { +		DC_LOG_MST("stream_enc[%d]: %p      " +		"stream[%d].hpo_dp_stream_enc: %p      " +		"stream[%d].vcp_id: %d      " +		"stream[%d].slot_count: %d\n", +		i, +		(void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, +		i, +		(void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, +		i, +		link->mst_stream_alloc_table.stream_allocations[i].vcp_id, +		i, +		link->mst_stream_alloc_table.stream_allocations[i].slot_count); +	} + +	if (link_hwss->ext.update_stream_allocation_table == NULL || +			link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { +		DC_LOG_DEBUG("Unknown encoding format\n"); +		return DC_ERROR_UNEXPECTED; +	} + +	link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res, +			&link->mst_stream_alloc_table); + +	if (mst_mode) { +		dm_helpers_dp_mst_poll_for_allocation_change_trigger( +			stream->ctx, +			stream); +	} + +	dm_helpers_dp_mst_send_payload_allocation( +			stream->ctx, +			stream, +			false); + +	return DC_OK; +} + +static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) +{ +	struct dc_stream_state *stream = pipe_ctx->stream; +	struct dc_link *link = stream->link; +	struct dc_dp_mst_stream_allocation_table proposed_table = {0}; +	struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0); +	int i; +	bool mst_mode = (link->type == dc_connection_mst_branch); +	const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); +	const struct dc_link_settings empty_link_settings = {0}; +	DC_LOGGER_INIT(link->ctx->logger); + +	if (link->dc->debug.temp_mst_deallocation_sequence) +		return deallocate_mst_payload_with_temp_drm_wa(pipe_ctx); + +	/* deallocate_mst_payload is called before disable link. When mode or +	 * disable/enable monitor, new stream is created which is not in link +	 * stream[] yet. For this, payload is not allocated yet, so de-alloc +	 * should not done. For new mode set, map_resources will get engine +	 * for new stream, so stream_enc->id should be validated until here. +	 */ + +	/* slot X.Y */ +	if (link_hwss->ext.set_throttled_vcp_size) +		link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); +	if (link_hwss->ext.set_hblank_min_symbol_width) +		link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, +				&empty_link_settings, +				avg_time_slots_per_mtp); + +	if (mst_mode) { +		/* when link is in mst mode, reply on mst manager to remove +		 * payload +		 */ +		if (dm_helpers_dp_mst_write_payload_allocation_table( +				stream->ctx, +				stream, +				&proposed_table, +				false)) +			update_mst_stream_alloc_table( +					link, +					pipe_ctx->stream_res.stream_enc, +					pipe_ctx->stream_res.hpo_dp_stream_enc, +					&proposed_table); +		else +			DC_LOG_WARNING("Failed to update" +					"MST allocation table for" +					"pipe idx:%d\n", +					pipe_ctx->pipe_idx); +	} else { +		/* when link is no longer in mst mode (mst hub unplugged), +		 * remove payload with default dc logic +		 */ +		remove_stream_from_alloc_table(link, pipe_ctx->stream_res.stream_enc, +				pipe_ctx->stream_res.hpo_dp_stream_enc); +	} + +	DC_LOG_MST("%s" +			"stream_count: %d: ", +			__func__, +			link->mst_stream_alloc_table.stream_count); + +	for (i = 0; i < MAX_CONTROLLER_NUM; i++) { +		DC_LOG_MST("stream_enc[%d]: %p      " +		"stream[%d].hpo_dp_stream_enc: %p      " +		"stream[%d].vcp_id: %d      " +		"stream[%d].slot_count: %d\n", +		i, +		(void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, +		i, +		(void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, +		i, +		link->mst_stream_alloc_table.stream_allocations[i].vcp_id, +		i, +		link->mst_stream_alloc_table.stream_allocations[i].slot_count); +	} + +	/* update mst stream allocation table hardware state */ +	if (link_hwss->ext.update_stream_allocation_table == NULL || +			link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { +		DC_LOG_DEBUG("Unknown encoding format\n"); +		return DC_ERROR_UNEXPECTED; +	} + +	link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res, +			&link->mst_stream_alloc_table); + +	if (mst_mode) { +		dm_helpers_dp_mst_poll_for_allocation_change_trigger( +			stream->ctx, +			stream); + +		dm_helpers_dp_mst_send_payload_allocation( +				stream->ctx, +				stream, +				false); +	} + +	return DC_OK; +} + +/* convert link_mst_stream_alloc_table to dm dp_mst_stream_alloc_table + * because stream_encoder is not exposed to dm + */ +static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx) +{ +	struct dc_stream_state *stream = pipe_ctx->stream; +	struct dc_link *link = stream->link; +	struct dc_dp_mst_stream_allocation_table proposed_table = {0}; +	struct fixed31_32 avg_time_slots_per_mtp; +	struct fixed31_32 pbn; +	struct fixed31_32 pbn_per_slot; +	int i; +	enum act_return_status ret; +	const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); +	DC_LOGGER_INIT(link->ctx->logger); + +	/* enable_link_dp_mst already check link->enabled_stream_count +	 * and stream is in link->stream[]. This is called during set mode, +	 * stream_enc is available. +	 */ + +	/* get calculate VC payload for stream: stream_alloc */ +	if (dm_helpers_dp_mst_write_payload_allocation_table( +		stream->ctx, +		stream, +		&proposed_table, +		true)) +		update_mst_stream_alloc_table( +					link, +					pipe_ctx->stream_res.stream_enc, +					pipe_ctx->stream_res.hpo_dp_stream_enc, +					&proposed_table); +	else +		DC_LOG_WARNING("Failed to update" +				"MST allocation table for" +				"pipe idx:%d\n", +				pipe_ctx->pipe_idx); + +	DC_LOG_MST("%s  " +			"stream_count: %d: \n ", +			__func__, +			link->mst_stream_alloc_table.stream_count); + +	for (i = 0; i < MAX_CONTROLLER_NUM; i++) { +		DC_LOG_MST("stream_enc[%d]: %p      " +		"stream[%d].hpo_dp_stream_enc: %p      " +		"stream[%d].vcp_id: %d      " +		"stream[%d].slot_count: %d\n", +		i, +		(void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, +		i, +		(void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, +		i, +		link->mst_stream_alloc_table.stream_allocations[i].vcp_id, +		i, +		link->mst_stream_alloc_table.stream_allocations[i].slot_count); +	} + +	ASSERT(proposed_table.stream_count > 0); + +	/* program DP source TX for payload */ +	if (link_hwss->ext.update_stream_allocation_table == NULL || +			link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { +		DC_LOG_ERROR("Failure: unknown encoding format\n"); +		return DC_ERROR_UNEXPECTED; +	} + +	link_hwss->ext.update_stream_allocation_table(link, +			&pipe_ctx->link_res, +			&link->mst_stream_alloc_table); + +	/* send down message */ +	ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger( +			stream->ctx, +			stream); + +	if (ret != ACT_LINK_LOST) { +		dm_helpers_dp_mst_send_payload_allocation( +				stream->ctx, +				stream, +				true); +	} + +	/* slot X.Y for only current stream */ +	pbn_per_slot = get_pbn_per_slot(stream); +	if (pbn_per_slot.value == 0) { +		DC_LOG_ERROR("Failure: pbn_per_slot==0 not allowed. Cannot continue, returning DC_UNSUPPORTED_VALUE.\n"); +		return DC_UNSUPPORTED_VALUE; +	} +	pbn = get_pbn_from_timing(pipe_ctx); +	avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot); + +	dc_log_vcp_x_y(link, avg_time_slots_per_mtp); + +	if (link_hwss->ext.set_throttled_vcp_size) +		link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); +	if (link_hwss->ext.set_hblank_min_symbol_width) +		link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, +				&link->cur_link_settings, +				avg_time_slots_per_mtp); + +	return DC_OK; +} + +struct fixed31_32 link_calculate_sst_avg_time_slots_per_mtp( +		const struct dc_stream_state *stream, +		const struct dc_link *link) +{ +	struct fixed31_32 link_bw_effective = +			dc_fixpt_from_int( +					dc_link_bandwidth_kbps(link, &link->cur_link_settings)); +	struct fixed31_32 timeslot_bw_effective = +			dc_fixpt_div_int(link_bw_effective, MAX_MTP_SLOT_COUNT); +	struct fixed31_32 timing_bw = +			dc_fixpt_from_int( +					dc_bandwidth_in_kbps_from_timing(&stream->timing)); +	struct fixed31_32 avg_time_slots_per_mtp = +			dc_fixpt_div(timing_bw, timeslot_bw_effective); + +	return avg_time_slots_per_mtp; +} + + +static bool write_128b_132b_sst_payload_allocation_table( +		const struct dc_stream_state *stream, +		struct dc_link *link, +		struct link_mst_stream_allocation_table *proposed_table, +		bool allocate) +{ +	const uint8_t vc_id = 1; /// VC ID always 1 for SST +	const uint8_t start_time_slot = 0; /// Always start at time slot 0 for SST +	bool result = false; +	uint8_t req_slot_count = 0; +	struct fixed31_32 avg_time_slots_per_mtp = { 0 }; +	union payload_table_update_status update_status = { 0 }; +	const uint32_t max_retries = 30; +	uint32_t retries = 0; +	DC_LOGGER_INIT(link->ctx->logger); + +	if (allocate)	{ +		avg_time_slots_per_mtp = link_calculate_sst_avg_time_slots_per_mtp(stream, link); +		req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp); +		/// Validation should filter out modes that exceed link BW +		ASSERT(req_slot_count <= MAX_MTP_SLOT_COUNT); +		if (req_slot_count > MAX_MTP_SLOT_COUNT) +			return false; +	} else { +		/// Leave req_slot_count = 0 if allocate is false. +	} + +	proposed_table->stream_count = 1; /// Always 1 stream for SST +	proposed_table->stream_allocations[0].slot_count = req_slot_count; +	proposed_table->stream_allocations[0].vcp_id = vc_id; + +	if (link->aux_access_disabled) +		return true; + +	/// Write DPCD 2C0 = 1 to start updating +	update_status.bits.VC_PAYLOAD_TABLE_UPDATED = 1; +	core_link_write_dpcd( +			link, +			DP_PAYLOAD_TABLE_UPDATE_STATUS, +			&update_status.raw, +			1); + +	/// Program the changes in DPCD 1C0 - 1C2 +	ASSERT(vc_id == 1); +	core_link_write_dpcd( +			link, +			DP_PAYLOAD_ALLOCATE_SET, +			&vc_id, +			1); + +	ASSERT(start_time_slot == 0); +	core_link_write_dpcd( +			link, +			DP_PAYLOAD_ALLOCATE_START_TIME_SLOT, +			&start_time_slot, +			1); + +	core_link_write_dpcd( +			link, +			DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT, +			&req_slot_count, +			1); + +	/// Poll till DPCD 2C0 read 1 +	/// Try for at least 150ms (30 retries, with 5ms delay after each attempt) + +	while (retries < max_retries) { +		if (core_link_read_dpcd( +				link, +				DP_PAYLOAD_TABLE_UPDATE_STATUS, +				&update_status.raw, +				1) == DC_OK) { +			if (update_status.bits.VC_PAYLOAD_TABLE_UPDATED == 1) { +				DC_LOG_DP2("SST Update Payload: downstream payload table updated."); +				result = true; +				break; +			} +		} else { +			union dpcd_rev dpcdRev; + +			if (core_link_read_dpcd( +					link, +					DP_DPCD_REV, +					&dpcdRev.raw, +					1) != DC_OK) { +				DC_LOG_ERROR("SST Update Payload: Unable to read DPCD revision " +						"of sink while polling payload table " +						"updated status bit."); +				break; +			} +		} +		retries++; +		msleep(5); +	} + +	if (!result && retries == max_retries) { +		DC_LOG_ERROR("SST Update Payload: Payload table not updated after retries, " +				"continue on. Something is wrong with the branch."); +		// TODO - DP2.0 Payload: Read and log the payload table from downstream branch +	} + +	return result; +} + +/* + * Payload allocation/deallocation for SST introduced in DP2.0 + */ +static enum dc_status update_sst_payload(struct pipe_ctx *pipe_ctx, +						 bool allocate) +{ +	struct dc_stream_state *stream = pipe_ctx->stream; +	struct dc_link *link = stream->link; +	struct link_mst_stream_allocation_table proposed_table = {0}; +	struct fixed31_32 avg_time_slots_per_mtp; +	const struct dc_link_settings empty_link_settings = {0}; +	const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); +	DC_LOGGER_INIT(link->ctx->logger); + +	/* slot X.Y for SST payload deallocate */ +	if (!allocate) { +		avg_time_slots_per_mtp = dc_fixpt_from_int(0); + +		dc_log_vcp_x_y(link, avg_time_slots_per_mtp); + +		if (link_hwss->ext.set_throttled_vcp_size) +			link_hwss->ext.set_throttled_vcp_size(pipe_ctx, +					avg_time_slots_per_mtp); +		if (link_hwss->ext.set_hblank_min_symbol_width) +			link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, +					&empty_link_settings, +					avg_time_slots_per_mtp); +	} + +	/* calculate VC payload and update branch with new payload allocation table*/ +	if (!write_128b_132b_sst_payload_allocation_table( +			stream, +			link, +			&proposed_table, +			allocate)) { +		DC_LOG_ERROR("SST Update Payload: Failed to update " +						"allocation table for " +						"pipe idx: %d\n", +						pipe_ctx->pipe_idx); +		return DC_FAIL_DP_PAYLOAD_ALLOCATION; +	} + +	proposed_table.stream_allocations[0].hpo_dp_stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc; + +	ASSERT(proposed_table.stream_count == 1); + +	//TODO - DP2.0 Logging: Instead of hpo_dp_stream_enc pointer, log instance id +	DC_LOG_DP2("SST Update Payload: hpo_dp_stream_enc: %p      " +		"vcp_id: %d      " +		"slot_count: %d\n", +		(void *) proposed_table.stream_allocations[0].hpo_dp_stream_enc, +		proposed_table.stream_allocations[0].vcp_id, +		proposed_table.stream_allocations[0].slot_count); + +	/* program DP source TX for payload */ +	link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res, +			&proposed_table); + +	/* poll for ACT handled */ +	if (!poll_for_allocation_change_trigger(link)) { +		// Failures will result in blackscreen and errors logged +		BREAK_TO_DEBUGGER(); +	} + +	/* slot X.Y for SST payload allocate */ +	if (allocate && link_dp_get_encoding_format(&link->cur_link_settings) == +			DP_128b_132b_ENCODING) { +		avg_time_slots_per_mtp = link_calculate_sst_avg_time_slots_per_mtp(stream, link); + +		dc_log_vcp_x_y(link, avg_time_slots_per_mtp); + +		if (link_hwss->ext.set_throttled_vcp_size) +			link_hwss->ext.set_throttled_vcp_size(pipe_ctx, +					avg_time_slots_per_mtp); +		if (link_hwss->ext.set_hblank_min_symbol_width) +			link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, +					&link->cur_link_settings, +					avg_time_slots_per_mtp); +	} + +	/* Always return DC_OK. +	 * If part of sequence fails, log failure(s) and show blackscreen +	 */ +	return DC_OK; +} + +enum dc_status link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in_kbps) +{ +	struct dc_stream_state *stream = pipe_ctx->stream; +	struct dc_link *link = stream->link; +	struct fixed31_32 avg_time_slots_per_mtp; +	struct fixed31_32 pbn; +	struct fixed31_32 pbn_per_slot; +	struct dc_dp_mst_stream_allocation_table proposed_table = {0}; +	uint8_t i; +	const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); +	DC_LOGGER_INIT(link->ctx->logger); + +	/* decrease throttled vcp size */ +	pbn_per_slot = get_pbn_per_slot(stream); +	pbn = get_pbn_from_bw_in_kbps(bw_in_kbps); +	avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot); + +	if (link_hwss->ext.set_throttled_vcp_size) +		link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); +	if (link_hwss->ext.set_hblank_min_symbol_width) +		link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, +				&link->cur_link_settings, +				avg_time_slots_per_mtp); + +	/* send ALLOCATE_PAYLOAD sideband message with updated pbn */ +	dm_helpers_dp_mst_send_payload_allocation( +			stream->ctx, +			stream, +			true); + +	/* notify immediate branch device table update */ +	if (dm_helpers_dp_mst_write_payload_allocation_table( +			stream->ctx, +			stream, +			&proposed_table, +			true)) { +		/* update mst stream allocation table software state */ +		update_mst_stream_alloc_table( +				link, +				pipe_ctx->stream_res.stream_enc, +				pipe_ctx->stream_res.hpo_dp_stream_enc, +				&proposed_table); +	} else { +		DC_LOG_WARNING("Failed to update" +				"MST allocation table for" +				"pipe idx:%d\n", +				pipe_ctx->pipe_idx); +	} + +	DC_LOG_MST("%s  " +			"stream_count: %d: \n ", +			__func__, +			link->mst_stream_alloc_table.stream_count); + +	for (i = 0; i < MAX_CONTROLLER_NUM; i++) { +		DC_LOG_MST("stream_enc[%d]: %p      " +		"stream[%d].hpo_dp_stream_enc: %p      " +		"stream[%d].vcp_id: %d      " +		"stream[%d].slot_count: %d\n", +		i, +		(void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, +		i, +		(void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, +		i, +		link->mst_stream_alloc_table.stream_allocations[i].vcp_id, +		i, +		link->mst_stream_alloc_table.stream_allocations[i].slot_count); +	} + +	ASSERT(proposed_table.stream_count > 0); + +	/* update mst stream allocation table hardware state */ +	if (link_hwss->ext.update_stream_allocation_table == NULL || +			link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { +		DC_LOG_ERROR("Failure: unknown encoding format\n"); +		return DC_ERROR_UNEXPECTED; +	} + +	link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res, +			&link->mst_stream_alloc_table); + +	/* poll for immediate branch device ACT handled */ +	dm_helpers_dp_mst_poll_for_allocation_change_trigger( +			stream->ctx, +			stream); + +	return DC_OK; +} + +enum dc_status link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in_kbps) +{ +	struct dc_stream_state *stream = pipe_ctx->stream; +	struct dc_link *link = stream->link; +	struct fixed31_32 avg_time_slots_per_mtp; +	struct fixed31_32 pbn; +	struct fixed31_32 pbn_per_slot; +	struct dc_dp_mst_stream_allocation_table proposed_table = {0}; +	uint8_t i; +	enum act_return_status ret; +	const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); +	DC_LOGGER_INIT(link->ctx->logger); + +	/* notify immediate branch device table update */ +	if (dm_helpers_dp_mst_write_payload_allocation_table( +				stream->ctx, +				stream, +				&proposed_table, +				true)) { +		/* update mst stream allocation table software state */ +		update_mst_stream_alloc_table( +				link, +				pipe_ctx->stream_res.stream_enc, +				pipe_ctx->stream_res.hpo_dp_stream_enc, +				&proposed_table); +	} + +	DC_LOG_MST("%s  " +			"stream_count: %d: \n ", +			__func__, +			link->mst_stream_alloc_table.stream_count); + +	for (i = 0; i < MAX_CONTROLLER_NUM; i++) { +		DC_LOG_MST("stream_enc[%d]: %p      " +		"stream[%d].hpo_dp_stream_enc: %p      " +		"stream[%d].vcp_id: %d      " +		"stream[%d].slot_count: %d\n", +		i, +		(void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, +		i, +		(void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, +		i, +		link->mst_stream_alloc_table.stream_allocations[i].vcp_id, +		i, +		link->mst_stream_alloc_table.stream_allocations[i].slot_count); +	} + +	ASSERT(proposed_table.stream_count > 0); + +	/* update mst stream allocation table hardware state */ +	if (link_hwss->ext.update_stream_allocation_table == NULL || +			link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { +		DC_LOG_ERROR("Failure: unknown encoding format\n"); +		return DC_ERROR_UNEXPECTED; +	} + +	link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res, +			&link->mst_stream_alloc_table); + +	/* poll for immediate branch device ACT handled */ +	ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger( +			stream->ctx, +			stream); + +	if (ret != ACT_LINK_LOST) { +		/* send ALLOCATE_PAYLOAD sideband message with updated pbn */ +		dm_helpers_dp_mst_send_payload_allocation( +				stream->ctx, +				stream, +				true); +	} + +	/* increase throttled vcp size */ +	pbn = get_pbn_from_bw_in_kbps(bw_in_kbps); +	pbn_per_slot = get_pbn_per_slot(stream); +	avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot); + +	if (link_hwss->ext.set_throttled_vcp_size) +		link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); +	if (link_hwss->ext.set_hblank_min_symbol_width) +		link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, +				&link->cur_link_settings, +				avg_time_slots_per_mtp); + +	return DC_OK; +} + +static void disable_link_dp(struct dc_link *link, +		const struct link_resource *link_res, +		enum signal_type signal) +{ +	struct dc_link_settings link_settings = link->cur_link_settings; + +	if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST && +			link->mst_stream_alloc_table.stream_count > 0) +		/* disable MST link only when last vc payload is deallocated */ +		return; + +	dp_disable_link_phy(link, link_res, signal); + +	if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) +		/* set the sink to SST mode after disabling the link */ +		enable_mst_on_sink(link, false); + +	if (link_dp_get_encoding_format(&link_settings) == +			DP_8b_10b_ENCODING) { +		dp_set_fec_enable(link, false); +		dp_set_fec_ready(link, link_res, false); +	} +} + +static void disable_link(struct dc_link *link, +		const struct link_resource *link_res, +		enum signal_type signal) +{ +	if (dc_is_dp_signal(signal)) { +		disable_link_dp(link, link_res, signal); +	} else if (signal != SIGNAL_TYPE_VIRTUAL) { +		link->dc->hwss.disable_link_output(link, link_res, signal); +	} + +	if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { +		/* MST disable link only when no stream use the link */ +		if (link->mst_stream_alloc_table.stream_count <= 0) +			link->link_status.link_active = false; +	} else { +		link->link_status.link_active = false; +	} +} + +static void enable_link_hdmi(struct pipe_ctx *pipe_ctx) +{ +	struct dc_stream_state *stream = pipe_ctx->stream; +	struct dc_link *link = stream->link; +	enum dc_color_depth display_color_depth; +	enum engine_id eng_id; +	struct ext_hdmi_settings settings = {0}; +	bool is_over_340mhz = false; +	bool is_vga_mode = (stream->timing.h_addressable == 640) +			&& (stream->timing.v_addressable == 480); +	struct dc *dc = pipe_ctx->stream->ctx->dc; + +	if (stream->phy_pix_clk == 0) +		stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10; +	if (stream->phy_pix_clk > 340000) +		is_over_340mhz = true; + +	if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) { +		unsigned short masked_chip_caps = pipe_ctx->stream->link->chip_caps & +				EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK; +		if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) { +			/* DP159, Retimer settings */ +			eng_id = pipe_ctx->stream_res.stream_enc->id; + +			if (get_ext_hdmi_settings(pipe_ctx, eng_id, &settings)) { +				write_i2c_retimer_setting(pipe_ctx, +						is_vga_mode, is_over_340mhz, &settings); +			} else { +				write_i2c_default_retimer_setting(pipe_ctx, +						is_vga_mode, is_over_340mhz); +			} +		} else if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) { +			/* PI3EQX1204, Redriver settings */ +			write_i2c_redriver_setting(pipe_ctx, is_over_340mhz); +		} +	} + +	if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) +		write_scdc_data( +			stream->link->ddc, +			stream->phy_pix_clk, +			stream->timing.flags.LTE_340MCSC_SCRAMBLE); + +	memset(&stream->link->cur_link_settings, 0, +			sizeof(struct dc_link_settings)); + +	display_color_depth = stream->timing.display_color_depth; +	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) +		display_color_depth = COLOR_DEPTH_888; + +	dc->hwss.enable_tmds_link_output( +			link, +			&pipe_ctx->link_res, +			pipe_ctx->stream->signal, +			pipe_ctx->clock_source->id, +			display_color_depth, +			stream->phy_pix_clk); + +	if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) +		read_scdc_data(link->ddc); +} + +static enum dc_status enable_link_dp(struct dc_state *state, +				     struct pipe_ctx *pipe_ctx) +{ +	struct dc_stream_state *stream = pipe_ctx->stream; +	enum dc_status status; +	bool skip_video_pattern; +	struct dc_link *link = stream->link; +	const struct dc_link_settings *link_settings = +			&pipe_ctx->link_config.dp_link_settings; +	bool fec_enable; +	int i; +	bool apply_seamless_boot_optimization = false; +	uint32_t bl_oled_enable_delay = 50; // in ms +	uint32_t post_oui_delay = 30; // 30ms +	/* Reduce link bandwidth between failed link training attempts. */ +	bool do_fallback = false; + +	// check for seamless boot +	for (i = 0; i < state->stream_count; i++) { +		if (state->streams[i]->apply_seamless_boot_optimization) { +			apply_seamless_boot_optimization = true; +			break; +		} +	} + +	/* Train with fallback when enabling DPIA link. Conventional links are +	 * trained with fallback during sink detection. +	 */ +	if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) +		do_fallback = true; + +	/* +	 * Temporary w/a to get DP2.0 link rates to work with SST. +	 * TODO DP2.0 - Workaround: Remove w/a if and when the issue is resolved. +	 */ +	if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING && +			pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && +			link->dc->debug.set_mst_en_for_sst) { +		enable_mst_on_sink(link, true); +	} +	if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) { +		/*in case it is not on*/ +		if (!link->dc->config.edp_no_power_sequencing) +			link->dc->hwss.edp_power_control(link, true); +		link->dc->hwss.edp_wait_for_hpd_ready(link, true); +	} + +	if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING) { +		/* TODO - DP2.0 HW: calculate 32 symbol clock for HPO encoder */ +	} else { +		pipe_ctx->stream_res.pix_clk_params.requested_sym_clk = +				link_settings->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; +		if (state->clk_mgr && !apply_seamless_boot_optimization) +			state->clk_mgr->funcs->update_clocks(state->clk_mgr, +					state, false); +	} + +	// during mode switch we do DP_SET_POWER off then on, and OUI is lost +	dpcd_set_source_specific_data(link); +	if (link->dpcd_sink_ext_caps.raw != 0) { +		post_oui_delay += link->panel_config.pps.extra_post_OUI_ms; +		msleep(post_oui_delay); +	} + +	// similarly, mode switch can cause loss of cable ID +	dpcd_write_cable_id_to_dprx(link); + +	skip_video_pattern = true; + +	if (link_settings->link_rate == LINK_RATE_LOW) +		skip_video_pattern = false; + +	if (perform_link_training_with_retries(link_settings, +					       skip_video_pattern, +					       LINK_TRAINING_ATTEMPTS, +					       pipe_ctx, +					       pipe_ctx->stream->signal, +					       do_fallback)) { +		status = DC_OK; +	} else { +		status = DC_FAIL_DP_LINK_TRAINING; +	} + +	if (link->preferred_training_settings.fec_enable) +		fec_enable = *link->preferred_training_settings.fec_enable; +	else +		fec_enable = true; + +	if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) +		dp_set_fec_enable(link, fec_enable); + +	// during mode set we do DP_SET_POWER off then on, aux writes are lost +	if (link->dpcd_sink_ext_caps.bits.oled == 1 || +		link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1 || +		link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1) { +		set_default_brightness_aux(link); // TODO: use cached if known +		if (link->dpcd_sink_ext_caps.bits.oled == 1) +			msleep(bl_oled_enable_delay); +		link_backlight_enable_aux(link, true); +	} + +	return status; +} + +static enum dc_status enable_link_edp( +		struct dc_state *state, +		struct pipe_ctx *pipe_ctx) +{ +	return enable_link_dp(state, pipe_ctx); +} + +static void enable_link_lvds(struct pipe_ctx *pipe_ctx) +{ +	struct dc_stream_state *stream = pipe_ctx->stream; +	struct dc_link *link = stream->link; +	struct dc *dc = stream->ctx->dc; + +	if (stream->phy_pix_clk == 0) +		stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10; + +	memset(&stream->link->cur_link_settings, 0, +			sizeof(struct dc_link_settings)); +	dc->hwss.enable_lvds_link_output( +			link, +			&pipe_ctx->link_res, +			pipe_ctx->clock_source->id, +			stream->phy_pix_clk); + +} + +static enum dc_status enable_link_dp_mst( +		struct dc_state *state, +		struct pipe_ctx *pipe_ctx) +{ +	struct dc_link *link = pipe_ctx->stream->link; + +	/* sink signal type after MST branch is MST. Multiple MST sinks +	 * share one link. Link DP PHY is enable or training only once. +	 */ +	if (link->link_status.link_active) +		return DC_OK; + +	/* clear payload table */ +	dm_helpers_dp_mst_clear_payload_allocation_table(link->ctx, link); + +	/* to make sure the pending down rep can be processed +	 * before enabling the link +	 */ +	dm_helpers_dp_mst_poll_pending_down_reply(link->ctx, link); + +	/* set the sink to MST mode before enabling the link */ +	enable_mst_on_sink(link, true); + +	return enable_link_dp(state, pipe_ctx); +} + +static enum dc_status enable_link( +		struct dc_state *state, +		struct pipe_ctx *pipe_ctx) +{ +	enum dc_status status = DC_ERROR_UNEXPECTED; +	struct dc_stream_state *stream = pipe_ctx->stream; +	struct dc_link *link = stream->link; + +	/* There's some scenarios where driver is unloaded with display +	 * still enabled. When driver is reloaded, it may cause a display +	 * to not light up if there is a mismatch between old and new +	 * link settings. Need to call disable first before enabling at +	 * new link settings. +	 */ +	if (link->link_status.link_active) { +		disable_link(link, &pipe_ctx->link_res, pipe_ctx->stream->signal); +	} + +	switch (pipe_ctx->stream->signal) { +	case SIGNAL_TYPE_DISPLAY_PORT: +		status = enable_link_dp(state, pipe_ctx); +		break; +	case SIGNAL_TYPE_EDP: +		status = enable_link_edp(state, pipe_ctx); +		break; +	case SIGNAL_TYPE_DISPLAY_PORT_MST: +		status = enable_link_dp_mst(state, pipe_ctx); +		msleep(200); +		break; +	case SIGNAL_TYPE_DVI_SINGLE_LINK: +	case SIGNAL_TYPE_DVI_DUAL_LINK: +	case SIGNAL_TYPE_HDMI_TYPE_A: +		enable_link_hdmi(pipe_ctx); +		status = DC_OK; +		break; +	case SIGNAL_TYPE_LVDS: +		enable_link_lvds(pipe_ctx); +		status = DC_OK; +		break; +	case SIGNAL_TYPE_VIRTUAL: +		status = DC_OK; +		break; +	default: +		break; +	} + +	if (status == DC_OK) { +		pipe_ctx->stream->link->link_status.link_active = true; +	} + +	return status; +} + +void link_set_dpms_off(struct pipe_ctx *pipe_ctx) +{ +	struct dc  *dc = pipe_ctx->stream->ctx->dc; +	struct dc_stream_state *stream = pipe_ctx->stream; +	struct dc_link *link = stream->sink->link; +	struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg; + +	ASSERT(is_master_pipe_for_link(link, pipe_ctx)); + +	if (link_is_dp_128b_132b_signal(pipe_ctx)) +		vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg; + +	DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); + +	if (pipe_ctx->stream->sink) { +		if (pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_VIRTUAL && +			pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_NONE) { +			DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x\n", __func__, +			pipe_ctx->stream->sink->edid_caps.display_name, +			pipe_ctx->stream->signal); +		} +	} + +	if (!IS_DIAG_DC(dc->ctx->dce_environment) && +			dc_is_virtual_signal(pipe_ctx->stream->signal)) +		return; + +	if (!pipe_ctx->stream->sink->edid_caps.panel_patch.skip_avmute) { +		if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) +			set_avmute(pipe_ctx, true); +	} + +	dc->hwss.disable_audio_stream(pipe_ctx); + +#if defined(CONFIG_DRM_AMD_DC_HDCP) +	update_psp_stream_config(pipe_ctx, true); +#endif +	dc->hwss.blank_stream(pipe_ctx); + +	if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) +		deallocate_mst_payload(pipe_ctx); +	else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && +			link_is_dp_128b_132b_signal(pipe_ctx)) +		update_sst_payload(pipe_ctx, false); + +	if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) { +		struct ext_hdmi_settings settings = {0}; +		enum engine_id eng_id = pipe_ctx->stream_res.stream_enc->id; + +		unsigned short masked_chip_caps = link->chip_caps & +				EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK; +		//Need to inform that sink is going to use legacy HDMI mode. +		write_scdc_data( +			link->ddc, +			165000,//vbios only handles 165Mhz. +			false); +		if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) { +			/* DP159, Retimer settings */ +			if (get_ext_hdmi_settings(pipe_ctx, eng_id, &settings)) +				write_i2c_retimer_setting(pipe_ctx, +						false, false, &settings); +			else +				write_i2c_default_retimer_setting(pipe_ctx, +						false, false); +		} else if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) { +			/* PI3EQX1204, Redriver settings */ +			write_i2c_redriver_setting(pipe_ctx, false); +		} +	} + +	if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && +			!link_is_dp_128b_132b_signal(pipe_ctx)) { + +		/* In DP1.x SST mode, our encoder will go to TPS1 +		 * when link is on but stream is off. +		 * Disabling link before stream will avoid exposing TPS1 pattern +		 * during the disable sequence as it will confuse some receivers +		 * state machine. +		 * In DP2 or MST mode, our encoder will stay video active +		 */ +		disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal); +		dc->hwss.disable_stream(pipe_ctx); +	} else { +		dc->hwss.disable_stream(pipe_ctx); +		disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal); +	} + +	if (pipe_ctx->stream->timing.flags.DSC) { +		if (dc_is_dp_signal(pipe_ctx->stream->signal)) +			link_set_dsc_enable(pipe_ctx, false); +	} +	if (link_is_dp_128b_132b_signal(pipe_ctx)) { +		if (pipe_ctx->stream_res.tg->funcs->set_out_mux) +			pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, OUT_MUX_DIO); +	} + +	if (vpg && vpg->funcs->vpg_powerdown) +		vpg->funcs->vpg_powerdown(vpg); +} + +void link_set_dpms_on( +		struct dc_state *state, +		struct pipe_ctx *pipe_ctx) +{ +	struct dc *dc = pipe_ctx->stream->ctx->dc; +	struct dc_stream_state *stream = pipe_ctx->stream; +	struct dc_link *link = stream->sink->link; +	enum dc_status status; +	struct link_encoder *link_enc; +	enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO; +	struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg; +	const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); + +	ASSERT(is_master_pipe_for_link(link, pipe_ctx)); + +	if (link_is_dp_128b_132b_signal(pipe_ctx)) +		vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg; + +	DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); + +	if (pipe_ctx->stream->sink) { +		if (pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_VIRTUAL && +			pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_NONE) { +			DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x\n", __func__, +			pipe_ctx->stream->sink->edid_caps.display_name, +			pipe_ctx->stream->signal); +		} +	} + +	if (!IS_DIAG_DC(dc->ctx->dce_environment) && +			dc_is_virtual_signal(pipe_ctx->stream->signal)) +		return; + +	link_enc = link_enc_cfg_get_link_enc(link); +	ASSERT(link_enc); + +	if (!dc_is_virtual_signal(pipe_ctx->stream->signal) +			&& !link_is_dp_128b_132b_signal(pipe_ctx)) { +		if (link_enc) +			link_enc->funcs->setup( +				link_enc, +				pipe_ctx->stream->signal); +	} + +	pipe_ctx->stream->link->link_state_valid = true; + +	if (pipe_ctx->stream_res.tg->funcs->set_out_mux) { +		if (link_is_dp_128b_132b_signal(pipe_ctx)) +			otg_out_dest = OUT_MUX_HPO_DP; +		else +			otg_out_dest = OUT_MUX_DIO; +		pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, otg_out_dest); +	} + +	link_hwss->setup_stream_attribute(pipe_ctx); + +	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { +		bool apply_edp_fast_boot_optimization = +			pipe_ctx->stream->apply_edp_fast_boot_optimization; + +		pipe_ctx->stream->apply_edp_fast_boot_optimization = false; + +		// Enable VPG before building infoframe +		if (vpg && vpg->funcs->vpg_poweron) +			vpg->funcs->vpg_poweron(vpg); + +		resource_build_info_frame(pipe_ctx); +		dc->hwss.update_info_frame(pipe_ctx); + +		if (dc_is_dp_signal(pipe_ctx->stream->signal)) +			link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); + +		/* Do not touch link on seamless boot optimization. */ +		if (pipe_ctx->stream->apply_seamless_boot_optimization) { +			pipe_ctx->stream->dpms_off = false; + +			/* Still enable stream features & audio on seamless boot for DP external displays */ +			if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT) { +				enable_stream_features(pipe_ctx); +				dc->hwss.enable_audio_stream(pipe_ctx); +			} + +#if defined(CONFIG_DRM_AMD_DC_HDCP) +			update_psp_stream_config(pipe_ctx, false); +#endif +			return; +		} + +		/* eDP lit up by bios already, no need to enable again. */ +		if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && +					apply_edp_fast_boot_optimization && +					!pipe_ctx->stream->timing.flags.DSC && +					!pipe_ctx->next_odm_pipe) { +			pipe_ctx->stream->dpms_off = false; +#if defined(CONFIG_DRM_AMD_DC_HDCP) +			update_psp_stream_config(pipe_ctx, false); +#endif +			return; +		} + +		if (pipe_ctx->stream->dpms_off) +			return; + +		/* Have to setup DSC before DIG FE and BE are connected (which happens before the +		 * link training). This is to make sure the bandwidth sent to DIG BE won't be +		 * bigger than what the link and/or DIG BE can handle. VBID[6]/CompressedStream_flag +		 * will be automatically set at a later time when the video is enabled +		 * (DP_VID_STREAM_EN = 1). +		 */ +		if (pipe_ctx->stream->timing.flags.DSC) { +			if (dc_is_dp_signal(pipe_ctx->stream->signal) || +				dc_is_virtual_signal(pipe_ctx->stream->signal)) +			link_set_dsc_enable(pipe_ctx, true); + +		} + +		status = enable_link(state, pipe_ctx); + +		if (status != DC_OK) { +			DC_LOG_WARNING("enabling link %u failed: %d\n", +			pipe_ctx->stream->link->link_index, +			status); + +			/* Abort stream enable *unless* the failure was due to +			 * DP link training - some DP monitors will recover and +			 * show the stream anyway. But MST displays can't proceed +			 * without link training. +			 */ +			if (status != DC_FAIL_DP_LINK_TRAINING || +					pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { +				if (false == stream->link->link_status.link_active) +					disable_link(stream->link, &pipe_ctx->link_res, +							pipe_ctx->stream->signal); +				BREAK_TO_DEBUGGER(); +				return; +			} +		} + +		/* turn off otg test pattern if enable */ +		if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) +			pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, +					CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, +					COLOR_DEPTH_UNDEFINED); + +		/* This second call is needed to reconfigure the DIG +		 * as a workaround for the incorrect value being applied +		 * from transmitter control. +		 */ +		if (!(dc_is_virtual_signal(pipe_ctx->stream->signal) || +				link_is_dp_128b_132b_signal(pipe_ctx))) +			if (link_enc) +				link_enc->funcs->setup( +					link_enc, +					pipe_ctx->stream->signal); + +		dc->hwss.enable_stream(pipe_ctx); + +		/* Set DPS PPS SDP (AKA "info frames") */ +		if (pipe_ctx->stream->timing.flags.DSC) { +			if (dc_is_dp_signal(pipe_ctx->stream->signal) || +					dc_is_virtual_signal(pipe_ctx->stream->signal)) { +				dp_set_dsc_on_rx(pipe_ctx, true); +				link_set_dsc_pps_packet(pipe_ctx, true, true); +			} +		} + +		if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) +			allocate_mst_payload(pipe_ctx); +		else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && +				link_is_dp_128b_132b_signal(pipe_ctx)) +			update_sst_payload(pipe_ctx, true); + +		dc->hwss.unblank_stream(pipe_ctx, +			&pipe_ctx->stream->link->cur_link_settings); + +		if (stream->sink_patches.delay_ignore_msa > 0) +			msleep(stream->sink_patches.delay_ignore_msa); + +		if (dc_is_dp_signal(pipe_ctx->stream->signal)) +			enable_stream_features(pipe_ctx); +#if defined(CONFIG_DRM_AMD_DC_HDCP) +		update_psp_stream_config(pipe_ctx, false); +#endif + +		dc->hwss.enable_audio_stream(pipe_ctx); + +	} else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) +		if (link_is_dp_128b_132b_signal(pipe_ctx)) +			dp_fpga_hpo_enable_link_and_stream(state, pipe_ctx); +		if (dc_is_dp_signal(pipe_ctx->stream->signal) || +				dc_is_virtual_signal(pipe_ctx->stream->signal)) +			link_set_dsc_enable(pipe_ctx, true); +	} + +	if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) { +		set_avmute(pipe_ctx, false); +	} +} diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.h b/drivers/gpu/drm/amd/display/dc/link/link_dpms.h new file mode 100644 index 000000000000..33d312dabdb8 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.h @@ -0,0 +1,40 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_LINK_DPMS_H__ +#define __DC_LINK_DPMS_H__ + +#include "link.h" +bool link_set_dsc_pps_packet(struct pipe_ctx *pipe_ctx, +		bool enable, bool immediate_update); +struct fixed31_32 link_calculate_sst_avg_time_slots_per_mtp( +		const struct dc_stream_state *stream, +		const struct dc_link *link); +void link_set_all_streams_dpms_off_for_link(struct dc_link *link); +void link_get_master_pipes_with_dpms_on(const struct dc_link *link, +		struct dc_state *state, +		uint8_t *count, +		struct pipe_ctx *pipes[MAX_PIPES]); +#endif /* __DC_LINK_DPMS_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c new file mode 100644 index 000000000000..aeb26a4d539e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c @@ -0,0 +1,577 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file owns the creation/destruction of link structure. + */ +#include "link_factory.h" +#include "protocols/link_ddc.h" +#include "protocols/link_edp_panel_control.h" +#include "protocols/link_hpd.h" +#include "gpio_service_interface.h" +#include "atomfirmware.h" + +#define DC_LOGGER_INIT(logger) + +#define LINK_INFO(...) \ +	DC_LOG_HW_HOTPLUG(  \ +		__VA_ARGS__) + +static enum transmitter translate_encoder_to_transmitter(struct graphics_object_id encoder) +{ +	switch (encoder.id) { +	case ENCODER_ID_INTERNAL_UNIPHY: +		switch (encoder.enum_id) { +		case ENUM_ID_1: +			return TRANSMITTER_UNIPHY_A; +		case ENUM_ID_2: +			return TRANSMITTER_UNIPHY_B; +		default: +			return TRANSMITTER_UNKNOWN; +		} +	break; +	case ENCODER_ID_INTERNAL_UNIPHY1: +		switch (encoder.enum_id) { +		case ENUM_ID_1: +			return TRANSMITTER_UNIPHY_C; +		case ENUM_ID_2: +			return TRANSMITTER_UNIPHY_D; +		default: +			return TRANSMITTER_UNKNOWN; +		} +	break; +	case ENCODER_ID_INTERNAL_UNIPHY2: +		switch (encoder.enum_id) { +		case ENUM_ID_1: +			return TRANSMITTER_UNIPHY_E; +		case ENUM_ID_2: +			return TRANSMITTER_UNIPHY_F; +		default: +			return TRANSMITTER_UNKNOWN; +		} +	break; +	case ENCODER_ID_INTERNAL_UNIPHY3: +		switch (encoder.enum_id) { +		case ENUM_ID_1: +			return TRANSMITTER_UNIPHY_G; +		default: +			return TRANSMITTER_UNKNOWN; +		} +	break; +	case ENCODER_ID_EXTERNAL_NUTMEG: +		switch (encoder.enum_id) { +		case ENUM_ID_1: +			return TRANSMITTER_NUTMEG_CRT; +		default: +			return TRANSMITTER_UNKNOWN; +		} +	break; +	case ENCODER_ID_EXTERNAL_TRAVIS: +		switch (encoder.enum_id) { +		case ENUM_ID_1: +			return TRANSMITTER_TRAVIS_CRT; +		case ENUM_ID_2: +			return TRANSMITTER_TRAVIS_LCD; +		default: +			return TRANSMITTER_UNKNOWN; +		} +	break; +	default: +		return TRANSMITTER_UNKNOWN; +	} +} + +static void link_destruct(struct dc_link *link) +{ +	int i; + +	if (link->hpd_gpio) { +		dal_gpio_destroy_irq(&link->hpd_gpio); +		link->hpd_gpio = NULL; +	} + +	if (link->ddc) +		link_destroy_ddc_service(&link->ddc); + +	if (link->panel_cntl) +		link->panel_cntl->funcs->destroy(&link->panel_cntl); + +	if (link->link_enc) { +		/* Update link encoder resource tracking variables. These are used for +		 * the dynamic assignment of link encoders to streams. Virtual links +		 * are not assigned encoder resources on creation. +		 */ +		if (link->link_id.id != CONNECTOR_ID_VIRTUAL) { +			link->dc->res_pool->link_encoders[link->eng_id - ENGINE_ID_DIGA] = NULL; +			link->dc->res_pool->dig_link_enc_count--; +		} +		link->link_enc->funcs->destroy(&link->link_enc); +	} + +	if (link->local_sink) +		dc_sink_release(link->local_sink); + +	for (i = 0; i < link->sink_count; ++i) +		dc_sink_release(link->remote_sinks[i]); +} + +static enum channel_id get_ddc_line(struct dc_link *link) +{ +	struct ddc *ddc; +	enum channel_id channel; + +	channel = CHANNEL_ID_UNKNOWN; + +	ddc = get_ddc_pin(link->ddc); + +	if (ddc) { +		switch (dal_ddc_get_line(ddc)) { +		case GPIO_DDC_LINE_DDC1: +			channel = CHANNEL_ID_DDC1; +			break; +		case GPIO_DDC_LINE_DDC2: +			channel = CHANNEL_ID_DDC2; +			break; +		case GPIO_DDC_LINE_DDC3: +			channel = CHANNEL_ID_DDC3; +			break; +		case GPIO_DDC_LINE_DDC4: +			channel = CHANNEL_ID_DDC4; +			break; +		case GPIO_DDC_LINE_DDC5: +			channel = CHANNEL_ID_DDC5; +			break; +		case GPIO_DDC_LINE_DDC6: +			channel = CHANNEL_ID_DDC6; +			break; +		case GPIO_DDC_LINE_DDC_VGA: +			channel = CHANNEL_ID_DDC_VGA; +			break; +		case GPIO_DDC_LINE_I2C_PAD: +			channel = CHANNEL_ID_I2C_PAD; +			break; +		default: +			BREAK_TO_DEBUGGER(); +			break; +		} +	} + +	return channel; +} + +static bool dc_link_construct_phy(struct dc_link *link, +			      const struct link_init_data *init_params) +{ +	uint8_t i; +	struct ddc_service_init_data ddc_service_init_data = { 0 }; +	struct dc_context *dc_ctx = init_params->ctx; +	struct encoder_init_data enc_init_data = { 0 }; +	struct panel_cntl_init_data panel_cntl_init_data = { 0 }; +	struct integrated_info info = { 0 }; +	struct dc_bios *bios = init_params->dc->ctx->dc_bios; +	const struct dc_vbios_funcs *bp_funcs = bios->funcs; +	struct bp_disp_connector_caps_info disp_connect_caps_info = { 0 }; + +	DC_LOGGER_INIT(dc_ctx->logger); + +	link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; +	link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID; +	link->link_status.dpcd_caps = &link->dpcd_caps; + +	link->dc = init_params->dc; +	link->ctx = dc_ctx; +	link->link_index = init_params->link_index; + +	memset(&link->preferred_training_settings, 0, +	       sizeof(struct dc_link_training_overrides)); +	memset(&link->preferred_link_setting, 0, +	       sizeof(struct dc_link_settings)); + +	link->link_id = +		bios->funcs->get_connector_id(bios, init_params->connector_index); + +	link->ep_type = DISPLAY_ENDPOINT_PHY; + +	DC_LOG_DC("BIOS object table - link_id: %d", link->link_id.id); + +	if (bios->funcs->get_disp_connector_caps_info) { +		bios->funcs->get_disp_connector_caps_info(bios, link->link_id, &disp_connect_caps_info); +		link->is_internal_display = disp_connect_caps_info.INTERNAL_DISPLAY; +		DC_LOG_DC("BIOS object table - is_internal_display: %d", link->is_internal_display); +	} + +	if (link->link_id.type != OBJECT_TYPE_CONNECTOR) { +		dm_output_to_console("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d! type %d expected %d\n", +				     __func__, init_params->connector_index, +				     link->link_id.type, OBJECT_TYPE_CONNECTOR); +		goto create_fail; +	} + +	if (link->dc->res_pool->funcs->link_init) +		link->dc->res_pool->funcs->link_init(link); + +	link->hpd_gpio = link_get_hpd_gpio(link->ctx->dc_bios, link->link_id, +				      link->ctx->gpio_service); + +	if (link->hpd_gpio) { +		dal_gpio_open(link->hpd_gpio, GPIO_MODE_INTERRUPT); +		dal_gpio_unlock_pin(link->hpd_gpio); +		link->irq_source_hpd = dal_irq_get_source(link->hpd_gpio); + +		DC_LOG_DC("BIOS object table - hpd_gpio id: %d", link->hpd_gpio->id); +		DC_LOG_DC("BIOS object table - hpd_gpio en: %d", link->hpd_gpio->en); +	} + +	switch (link->link_id.id) { +	case CONNECTOR_ID_HDMI_TYPE_A: +		link->connector_signal = SIGNAL_TYPE_HDMI_TYPE_A; + +		break; +	case CONNECTOR_ID_SINGLE_LINK_DVID: +	case CONNECTOR_ID_SINGLE_LINK_DVII: +		link->connector_signal = SIGNAL_TYPE_DVI_SINGLE_LINK; +		break; +	case CONNECTOR_ID_DUAL_LINK_DVID: +	case CONNECTOR_ID_DUAL_LINK_DVII: +		link->connector_signal = SIGNAL_TYPE_DVI_DUAL_LINK; +		break; +	case CONNECTOR_ID_DISPLAY_PORT: +	case CONNECTOR_ID_USBC: +		link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT; + +		if (link->hpd_gpio) +			link->irq_source_hpd_rx = +					dal_irq_get_rx_source(link->hpd_gpio); + +		break; +	case CONNECTOR_ID_EDP: +		link->connector_signal = SIGNAL_TYPE_EDP; + +		if (link->hpd_gpio) { +			if (!link->dc->config.allow_edp_hotplug_detection) +				link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; + +			switch (link->dc->config.allow_edp_hotplug_detection) { +			case 1: // only the 1st eDP handles hotplug +				if (link->link_index == 0) +					link->irq_source_hpd_rx = +						dal_irq_get_rx_source(link->hpd_gpio); +				else +					link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; +				break; +			case 2: // only the 2nd eDP handles hotplug +				if (link->link_index == 1) +					link->irq_source_hpd_rx = +						dal_irq_get_rx_source(link->hpd_gpio); +				else +					link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; +				break; +			default: +				break; +			} +		} + +		break; +	case CONNECTOR_ID_LVDS: +		link->connector_signal = SIGNAL_TYPE_LVDS; +		break; +	default: +		DC_LOG_WARNING("Unsupported Connector type:%d!\n", +			       link->link_id.id); +		goto create_fail; +	} + +	/* TODO: #DAL3 Implement id to str function.*/ +	LINK_INFO("Connector[%d] description:" +		  "signal %d\n", +		  init_params->connector_index, +		  link->connector_signal); + +	ddc_service_init_data.ctx = link->ctx; +	ddc_service_init_data.id = link->link_id; +	ddc_service_init_data.link = link; +	link->ddc = link_create_ddc_service(&ddc_service_init_data); + +	if (!link->ddc) { +		DC_ERROR("Failed to create ddc_service!\n"); +		goto ddc_create_fail; +	} + +	if (!link->ddc->ddc_pin) { +		DC_ERROR("Failed to get I2C info for connector!\n"); +		goto ddc_create_fail; +	} + +	link->ddc_hw_inst = +		dal_ddc_get_line(get_ddc_pin(link->ddc)); + + +	if (link->dc->res_pool->funcs->panel_cntl_create && +		(link->link_id.id == CONNECTOR_ID_EDP || +			link->link_id.id == CONNECTOR_ID_LVDS)) { +		panel_cntl_init_data.ctx = dc_ctx; +		panel_cntl_init_data.inst = +			panel_cntl_init_data.ctx->dc_edp_id_count; +		link->panel_cntl = +			link->dc->res_pool->funcs->panel_cntl_create( +								&panel_cntl_init_data); +		panel_cntl_init_data.ctx->dc_edp_id_count++; + +		if (link->panel_cntl == NULL) { +			DC_ERROR("Failed to create link panel_cntl!\n"); +			goto panel_cntl_create_fail; +		} +	} + +	enc_init_data.ctx = dc_ctx; +	bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0, +			      &enc_init_data.encoder); +	enc_init_data.connector = link->link_id; +	enc_init_data.channel = get_ddc_line(link); +	enc_init_data.hpd_source = get_hpd_line(link); + +	link->hpd_src = enc_init_data.hpd_source; + +	enc_init_data.transmitter = +		translate_encoder_to_transmitter(enc_init_data.encoder); +	link->link_enc = +		link->dc->res_pool->funcs->link_enc_create(dc_ctx, &enc_init_data); + +	DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C); +	DC_LOG_DC("BIOS object table - IS_DP2_CAPABLE: %d", link->link_enc->features.flags.bits.IS_DP2_CAPABLE); + +	if (!link->link_enc) { +		DC_ERROR("Failed to create link encoder!\n"); +		goto link_enc_create_fail; +	} + +	/* Update link encoder tracking variables. These are used for the dynamic +	 * assignment of link encoders to streams. +	 */ +	link->eng_id = link->link_enc->preferred_engine; +	link->dc->res_pool->link_encoders[link->eng_id - ENGINE_ID_DIGA] = link->link_enc; +	link->dc->res_pool->dig_link_enc_count++; + +	link->link_enc_hw_inst = link->link_enc->transmitter; +	for (i = 0; i < 4; i++) { +		if (bp_funcs->get_device_tag(dc_ctx->dc_bios, +					     link->link_id, i, +					     &link->device_tag) != BP_RESULT_OK) { +			DC_ERROR("Failed to find device tag!\n"); +			goto device_tag_fail; +		} + +		/* Look for device tag that matches connector signal, +		 * CRT for rgb, LCD for other supported signal tyes +		 */ +		if (!bp_funcs->is_device_id_supported(dc_ctx->dc_bios, +						      link->device_tag.dev_id)) +			continue; +		if (link->device_tag.dev_id.device_type == DEVICE_TYPE_CRT && +		    link->connector_signal != SIGNAL_TYPE_RGB) +			continue; +		if (link->device_tag.dev_id.device_type == DEVICE_TYPE_LCD && +		    link->connector_signal == SIGNAL_TYPE_RGB) +			continue; + +		DC_LOG_DC("BIOS object table - device_tag.acpi_device: %d", link->device_tag.acpi_device); +		DC_LOG_DC("BIOS object table - device_tag.dev_id.device_type: %d", link->device_tag.dev_id.device_type); +		DC_LOG_DC("BIOS object table - device_tag.dev_id.enum_id: %d", link->device_tag.dev_id.enum_id); +		break; +	} + +	if (bios->integrated_info) +		info = *bios->integrated_info; + +	/* Look for channel mapping corresponding to connector and device tag */ +	for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; i++) { +		struct external_display_path *path = +			&info.ext_disp_conn_info.path[i]; + +		if (path->device_connector_id.enum_id == link->link_id.enum_id && +		    path->device_connector_id.id == link->link_id.id && +		    path->device_connector_id.type == link->link_id.type) { +			if (link->device_tag.acpi_device != 0 && +			    path->device_acpi_enum == link->device_tag.acpi_device) { +				link->ddi_channel_mapping = path->channel_mapping; +				link->chip_caps = path->caps; +				DC_LOG_DC("BIOS object table - ddi_channel_mapping: 0x%04X", link->ddi_channel_mapping.raw); +				DC_LOG_DC("BIOS object table - chip_caps: %d", link->chip_caps); +			} else if (path->device_tag == +				   link->device_tag.dev_id.raw_device_tag) { +				link->ddi_channel_mapping = path->channel_mapping; +				link->chip_caps = path->caps; +				DC_LOG_DC("BIOS object table - ddi_channel_mapping: 0x%04X", link->ddi_channel_mapping.raw); +				DC_LOG_DC("BIOS object table - chip_caps: %d", link->chip_caps); +			} + +			if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) { +				link->bios_forced_drive_settings.VOLTAGE_SWING = +						(info.ext_disp_conn_info.fixdpvoltageswing & 0x3); +				link->bios_forced_drive_settings.PRE_EMPHASIS = +						((info.ext_disp_conn_info.fixdpvoltageswing >> 2) & 0x3); +			} + +			break; +		} +	} + +	if (bios->funcs->get_atom_dc_golden_table) +		bios->funcs->get_atom_dc_golden_table(bios); + +	/* +	 * TODO check if GPIO programmed correctly +	 * +	 * If GPIO isn't programmed correctly HPD might not rise or drain +	 * fast enough, leading to bounces. +	 */ +	program_hpd_filter(link); + +	link->psr_settings.psr_vtotal_control_support = false; +	link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; + +	DC_LOG_DC("BIOS object table - %s finished successfully.\n", __func__); +	return true; +device_tag_fail: +	link->link_enc->funcs->destroy(&link->link_enc); +link_enc_create_fail: +	if (link->panel_cntl != NULL) +		link->panel_cntl->funcs->destroy(&link->panel_cntl); +panel_cntl_create_fail: +	link_destroy_ddc_service(&link->ddc); +ddc_create_fail: +create_fail: + +	if (link->hpd_gpio) { +		dal_gpio_destroy_irq(&link->hpd_gpio); +		link->hpd_gpio = NULL; +	} + +	DC_LOG_DC("BIOS object table - %s failed.\n", __func__); +	return false; +} + +static bool dc_link_construct_dpia(struct dc_link *link, +			      const struct link_init_data *init_params) +{ +	struct ddc_service_init_data ddc_service_init_data = { 0 }; +	struct dc_context *dc_ctx = init_params->ctx; + +	DC_LOGGER_INIT(dc_ctx->logger); + +	/* Initialized irq source for hpd and hpd rx */ +	link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; +	link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID; +	link->link_status.dpcd_caps = &link->dpcd_caps; + +	link->dc = init_params->dc; +	link->ctx = dc_ctx; +	link->link_index = init_params->link_index; + +	memset(&link->preferred_training_settings, 0, +	       sizeof(struct dc_link_training_overrides)); +	memset(&link->preferred_link_setting, 0, +	       sizeof(struct dc_link_settings)); + +	/* Dummy Init for linkid */ +	link->link_id.type = OBJECT_TYPE_CONNECTOR; +	link->link_id.id = CONNECTOR_ID_DISPLAY_PORT; +	link->link_id.enum_id = ENUM_ID_1 + init_params->connector_index; +	link->is_internal_display = false; +	link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT; +	LINK_INFO("Connector[%d] description:signal %d\n", +		  init_params->connector_index, +		  link->connector_signal); + +	link->ep_type = DISPLAY_ENDPOINT_USB4_DPIA; +	link->is_dig_mapping_flexible = true; + +	/* TODO: Initialize link : funcs->link_init */ + +	ddc_service_init_data.ctx = link->ctx; +	ddc_service_init_data.id = link->link_id; +	ddc_service_init_data.link = link; +	/* Set indicator for dpia link so that ddc wont be created */ +	ddc_service_init_data.is_dpia_link = true; + +	link->ddc = link_create_ddc_service(&ddc_service_init_data); +	if (!link->ddc) { +		DC_ERROR("Failed to create ddc_service!\n"); +		goto ddc_create_fail; +	} + +	/* Set dpia port index : 0 to number of dpia ports */ +	link->ddc_hw_inst = init_params->connector_index; + +	/* TODO: Create link encoder */ + +	link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; + +	/* Some docks seem to NAK I2C writes to segment pointer with mot=0. */ +	link->wa_flags.dp_mot_reset_segment = true; + +	return true; + +ddc_create_fail: +	return false; +} + +static bool link_construct(struct dc_link *link, +			      const struct link_init_data *init_params) +{ +	/* Handle dpia case */ +	if (init_params->is_dpia_link == true) +		return dc_link_construct_dpia(link, init_params); +	else +		return dc_link_construct_phy(link, init_params); +} + +struct dc_link *link_create(const struct link_init_data *init_params) +{ +	struct dc_link *link = +			kzalloc(sizeof(*link), GFP_KERNEL); + +	if (NULL == link) +		goto alloc_fail; + +	if (false == link_construct(link, init_params)) +		goto construct_fail; + +	return link; + +construct_fail: +	kfree(link); + +alloc_fail: +	return NULL; +} + +void link_destroy(struct dc_link **link) +{ +	link_destruct(*link); +	kfree(*link); +	*link = NULL; +} + diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.h b/drivers/gpu/drm/amd/display/dc/link/link_factory.h new file mode 100644 index 000000000000..5b846147c4a6 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.h @@ -0,0 +1,29 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef __LINK_FACTORY_H__ +#define __LINK_FACTORY_H__ +#include "link.h" + +#endif /* __LINK_FACTORY_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_resource.c b/drivers/gpu/drm/amd/display/dc/link/link_resource.c new file mode 100644 index 000000000000..bd42bb273c0c --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_resource.c @@ -0,0 +1,114 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +/* FILE POLICY AND INTENDED USAGE: + * This file implements accessors to link resource. + */ + +#include "link_resource.h" +#include "protocols/link_dp_capability.h" + +void link_get_cur_link_res(const struct dc_link *link, +		struct link_resource *link_res) +{ +	int i; +	struct pipe_ctx *pipe = NULL; + +	memset(link_res, 0, sizeof(*link_res)); + +	for (i = 0; i < MAX_PIPES; i++) { +		pipe = &link->dc->current_state->res_ctx.pipe_ctx[i]; +		if (pipe->stream && pipe->stream->link && pipe->top_pipe == NULL) { +			if (pipe->stream->link == link) { +				*link_res = pipe->link_res; +				break; +			} +		} +	} + +} + +void link_get_cur_res_map(const struct dc *dc, uint32_t *map) +{ +	struct dc_link *link; +	uint32_t i; +	uint32_t hpo_dp_recycle_map = 0; + +	*map = 0; + +	if (dc->caps.dp_hpo) { +		for (i = 0; i < dc->caps.max_links; i++) { +			link = dc->links[i]; +			if (link->link_status.link_active && +					link_dp_get_encoding_format(&link->reported_link_cap) == DP_128b_132b_ENCODING && +					link_dp_get_encoding_format(&link->cur_link_settings) != DP_128b_132b_ENCODING) +				/* hpo dp link encoder is considered as recycled, when RX reports 128b/132b encoding capability +				 * but current link doesn't use it. +				 */ +				hpo_dp_recycle_map |= (1 << i); +		} +		*map |= (hpo_dp_recycle_map << LINK_RES_HPO_DP_REC_MAP__SHIFT); +	} +} + +void link_restore_res_map(const struct dc *dc, uint32_t *map) +{ +	struct dc_link *link; +	uint32_t i; +	unsigned int available_hpo_dp_count; +	uint32_t hpo_dp_recycle_map = (*map & LINK_RES_HPO_DP_REC_MAP__MASK) +			>> LINK_RES_HPO_DP_REC_MAP__SHIFT; + +	if (dc->caps.dp_hpo) { +		available_hpo_dp_count = dc->res_pool->hpo_dp_link_enc_count; +		/* remove excess 128b/132b encoding support for not recycled links */ +		for (i = 0; i < dc->caps.max_links; i++) { +			if ((hpo_dp_recycle_map & (1 << i)) == 0) { +				link = dc->links[i]; +				if (link->type != dc_connection_none && +						link_dp_get_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) { +					if (available_hpo_dp_count > 0) +						available_hpo_dp_count--; +					else +						/* remove 128b/132b encoding capability by limiting verified link rate to HBR3 */ +						link->verified_link_cap.link_rate = LINK_RATE_HIGH3; +				} +			} +		} +		/* remove excess 128b/132b encoding support for recycled links */ +		for (i = 0; i < dc->caps.max_links; i++) { +			if ((hpo_dp_recycle_map & (1 << i)) != 0) { +				link = dc->links[i]; +				if (link->type != dc_connection_none && +						link_dp_get_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) { +					if (available_hpo_dp_count > 0) +						available_hpo_dp_count--; +					else +						/* remove 128b/132b encoding capability by limiting verified link rate to HBR3 */ +						link->verified_link_cap.link_rate = LINK_RATE_HIGH3; +				} +			} +		} +	} +} diff --git a/drivers/gpu/drm/amd/display/dc/link/link_resource.h b/drivers/gpu/drm/amd/display/dc/link/link_resource.h new file mode 100644 index 000000000000..45554d30adf0 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_resource.h @@ -0,0 +1,31 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef __LINK_RESOURCE_H__ +#define __LINK_RESOURCE_H__ +#include "link.h" +void link_get_cur_link_res(const struct dc_link *link, +		struct link_resource *link_res); + +#endif /* __LINK_RESOURCE_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c new file mode 100644 index 000000000000..d4f6ee6ca948 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c @@ -0,0 +1,398 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file owns timing validation against various link limitations. (ex. + * link bandwidth, receiver capability or our hardware capability) It also + * provides helper functions exposing bandwidth formulas used in validation. + */ +#include "link_validation.h" +#include "resource.h" + +#define DC_LOGGER_INIT(logger) + +static uint32_t get_tmds_output_pixel_clock_100hz(const struct dc_crtc_timing *timing) +{ + +	uint32_t pxl_clk = timing->pix_clk_100hz; + +	if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) +		pxl_clk /= 2; +	else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) +		pxl_clk = pxl_clk * 2 / 3; + +	if (timing->display_color_depth == COLOR_DEPTH_101010) +		pxl_clk = pxl_clk * 10 / 8; +	else if (timing->display_color_depth == COLOR_DEPTH_121212) +		pxl_clk = pxl_clk * 12 / 8; + +	return pxl_clk; +} + +static bool dp_active_dongle_validate_timing( +		const struct dc_crtc_timing *timing, +		const struct dpcd_caps *dpcd_caps) +{ +	const struct dc_dongle_caps *dongle_caps = &dpcd_caps->dongle_caps; + +	switch (dpcd_caps->dongle_type) { +	case DISPLAY_DONGLE_DP_VGA_CONVERTER: +	case DISPLAY_DONGLE_DP_DVI_CONVERTER: +	case DISPLAY_DONGLE_DP_DVI_DONGLE: +		if (timing->pixel_encoding == PIXEL_ENCODING_RGB) +			return true; +		else +			return false; +	default: +		break; +	} + +	if (dpcd_caps->dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER && +			dongle_caps->extendedCapValid == true) { +		/* Check Pixel Encoding */ +		switch (timing->pixel_encoding) { +		case PIXEL_ENCODING_RGB: +		case PIXEL_ENCODING_YCBCR444: +			break; +		case PIXEL_ENCODING_YCBCR422: +			if (!dongle_caps->is_dp_hdmi_ycbcr422_pass_through) +				return false; +			break; +		case PIXEL_ENCODING_YCBCR420: +			if (!dongle_caps->is_dp_hdmi_ycbcr420_pass_through) +				return false; +			break; +		default: +			/* Invalid Pixel Encoding*/ +			return false; +		} + +		switch (timing->display_color_depth) { +		case COLOR_DEPTH_666: +		case COLOR_DEPTH_888: +			/*888 and 666 should always be supported*/ +			break; +		case COLOR_DEPTH_101010: +			if (dongle_caps->dp_hdmi_max_bpc < 10) +				return false; +			break; +		case COLOR_DEPTH_121212: +			if (dongle_caps->dp_hdmi_max_bpc < 12) +				return false; +			break; +		case COLOR_DEPTH_141414: +		case COLOR_DEPTH_161616: +		default: +			/* These color depths are currently not supported */ +			return false; +		} + +		/* Check 3D format */ +		switch (timing->timing_3d_format) { +		case TIMING_3D_FORMAT_NONE: +		case TIMING_3D_FORMAT_FRAME_ALTERNATE: +			/*Only frame alternate 3D is supported on active dongle*/ +			break; +		default: +			/*other 3D formats are not supported due to bad infoframe translation */ +			return false; +		} + +		if (dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps > 0) { // DP to HDMI FRL converter +			struct dc_crtc_timing outputTiming = *timing; + +#if defined(CONFIG_DRM_AMD_DC_DCN) +			if (timing->flags.DSC && !timing->dsc_cfg.is_frl) +				/* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */ +				outputTiming.flags.DSC = 0; +#endif +			if (dc_bandwidth_in_kbps_from_timing(&outputTiming) > dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps) +				return false; +		} else { // DP to HDMI TMDS converter +			if (get_tmds_output_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10)) +				return false; +		} +	} + +	if (dpcd_caps->channel_coding_cap.bits.DP_128b_132b_SUPPORTED == 0 && +			dpcd_caps->dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT == 0 && +			dongle_caps->dfp_cap_ext.supported) { + +		if (dongle_caps->dfp_cap_ext.max_pixel_rate_in_mps < (timing->pix_clk_100hz / 10000)) +			return false; + +		if (dongle_caps->dfp_cap_ext.max_video_h_active_width < timing->h_addressable) +			return false; + +		if (dongle_caps->dfp_cap_ext.max_video_v_active_height < timing->v_addressable) +			return false; + +		if (timing->pixel_encoding == PIXEL_ENCODING_RGB) { +			if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb) +				return false; +			if (timing->display_color_depth == COLOR_DEPTH_666 && +					!dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_6bpc) +				return false; +			else if (timing->display_color_depth == COLOR_DEPTH_888 && +					!dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_8bpc) +				return false; +			else if (timing->display_color_depth == COLOR_DEPTH_101010 && +					!dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_10bpc) +				return false; +			else if (timing->display_color_depth == COLOR_DEPTH_121212 && +					!dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_12bpc) +				return false; +			else if (timing->display_color_depth == COLOR_DEPTH_161616 && +					!dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_16bpc) +				return false; +		} else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR444) { +			if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb) +				return false; +			if (timing->display_color_depth == COLOR_DEPTH_888 && +					!dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_8bpc) +				return false; +			else if (timing->display_color_depth == COLOR_DEPTH_101010 && +					!dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_10bpc) +				return false; +			else if (timing->display_color_depth == COLOR_DEPTH_121212 && +					!dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_12bpc) +				return false; +			else if (timing->display_color_depth == COLOR_DEPTH_161616 && +					!dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_16bpc) +				return false; +		} else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) { +			if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb) +				return false; +			if (timing->display_color_depth == COLOR_DEPTH_888 && +					!dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_8bpc) +				return false; +			else if (timing->display_color_depth == COLOR_DEPTH_101010 && +					!dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_10bpc) +				return false; +			else if (timing->display_color_depth == COLOR_DEPTH_121212 && +					!dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_12bpc) +				return false; +			else if (timing->display_color_depth == COLOR_DEPTH_161616 && +					!dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_16bpc) +				return false; +		} else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) { +			if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb) +				return false; +			if (timing->display_color_depth == COLOR_DEPTH_888 && +					!dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_8bpc) +				return false; +			else if (timing->display_color_depth == COLOR_DEPTH_101010 && +					!dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_10bpc) +				return false; +			else if (timing->display_color_depth == COLOR_DEPTH_121212 && +					!dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_12bpc) +				return false; +			else if (timing->display_color_depth == COLOR_DEPTH_161616 && +					!dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_16bpc) +				return false; +		} +	} + +	return true; +} + +uint32_t dp_link_bandwidth_kbps( +	const struct dc_link *link, +	const struct dc_link_settings *link_settings) +{ +	uint32_t total_data_bw_efficiency_x10000 = 0; +	uint32_t link_rate_per_lane_kbps = 0; + +	switch (link_dp_get_encoding_format(link_settings)) { +	case DP_8b_10b_ENCODING: +		/* For 8b/10b encoding: +		 * link rate is defined in the unit of LINK_RATE_REF_FREQ_IN_KHZ per DP byte per lane. +		 * data bandwidth efficiency is 80% with additional 3% overhead if FEC is supported. +		 */ +		link_rate_per_lane_kbps = link_settings->link_rate * LINK_RATE_REF_FREQ_IN_KHZ * BITS_PER_DP_BYTE; +		total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_8b_10b_x10000; +		if (dc_link_should_enable_fec(link)) { +			total_data_bw_efficiency_x10000 /= 100; +			total_data_bw_efficiency_x10000 *= DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100; +		} +		break; +	case DP_128b_132b_ENCODING: +		/* For 128b/132b encoding: +		 * link rate is defined in the unit of 10mbps per lane. +		 * total data bandwidth efficiency is always 96.71%. +		 */ +		link_rate_per_lane_kbps = link_settings->link_rate * 10000; +		total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_128b_132b_x10000; +		break; +	default: +		break; +	} + +	/* overall effective link bandwidth = link rate per lane * lane count * total data bandwidth efficiency */ +	return link_rate_per_lane_kbps * link_settings->lane_count / 10000 * total_data_bw_efficiency_x10000; +} + +uint32_t link_timing_bandwidth_kbps( +	const struct dc_crtc_timing *timing) +{ +	uint32_t bits_per_channel = 0; +	uint32_t kbps; + +#if defined(CONFIG_DRM_AMD_DC_DCN) +	if (timing->flags.DSC) +		return dc_dsc_stream_bandwidth_in_kbps(timing, +				timing->dsc_cfg.bits_per_pixel, +				timing->dsc_cfg.num_slices_h, +				timing->dsc_cfg.is_dp); +#endif /* CONFIG_DRM_AMD_DC_DCN */ + +	switch (timing->display_color_depth) { +	case COLOR_DEPTH_666: +		bits_per_channel = 6; +		break; +	case COLOR_DEPTH_888: +		bits_per_channel = 8; +		break; +	case COLOR_DEPTH_101010: +		bits_per_channel = 10; +		break; +	case COLOR_DEPTH_121212: +		bits_per_channel = 12; +		break; +	case COLOR_DEPTH_141414: +		bits_per_channel = 14; +		break; +	case COLOR_DEPTH_161616: +		bits_per_channel = 16; +		break; +	default: +		ASSERT(bits_per_channel != 0); +		bits_per_channel = 8; +		break; +	} + +	kbps = timing->pix_clk_100hz / 10; +	kbps *= bits_per_channel; + +	if (timing->flags.Y_ONLY != 1) { +		/*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/ +		kbps *= 3; +		if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) +			kbps /= 2; +		else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) +			kbps = kbps * 2 / 3; +	} + +	return kbps; +} + +static bool dp_validate_mode_timing( +	struct dc_link *link, +	const struct dc_crtc_timing *timing) +{ +	uint32_t req_bw; +	uint32_t max_bw; + +	const struct dc_link_settings *link_setting; + +	/* According to spec, VSC SDP should be used if pixel format is YCbCr420 */ +	if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420 && +			!link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED && +			dal_graphics_object_id_get_connector_id(link->link_id) != CONNECTOR_ID_VIRTUAL) +		return false; + +	/*always DP fail safe mode*/ +	if ((timing->pix_clk_100hz / 10) == (uint32_t) 25175 && +		timing->h_addressable == (uint32_t) 640 && +		timing->v_addressable == (uint32_t) 480) +		return true; + +	link_setting = dc_link_get_link_cap(link); + +	/* TODO: DYNAMIC_VALIDATION needs to be implemented */ +	/*if (flags.DYNAMIC_VALIDATION == 1 && +		link->verified_link_cap.lane_count != LANE_COUNT_UNKNOWN) +		link_setting = &link->verified_link_cap; +	*/ + +	req_bw = dc_bandwidth_in_kbps_from_timing(timing); +	max_bw = dc_link_bandwidth_kbps(link, link_setting); + +	if (req_bw <= max_bw) { +		/* remember the biggest mode here, during +		 * initial link training (to get +		 * verified_link_cap), LS sends event about +		 * cannot train at reported cap to upper +		 * layer and upper layer will re-enumerate modes. +		 * this is not necessary if the lower +		 * verified_link_cap is enough to drive +		 * all the modes */ + +		/* TODO: DYNAMIC_VALIDATION needs to be implemented */ +		/* if (flags.DYNAMIC_VALIDATION == 1) +			dpsst->max_req_bw_for_verified_linkcap = dal_max( +				dpsst->max_req_bw_for_verified_linkcap, req_bw); */ +		return true; +	} else +		return false; +} + +enum dc_status link_validate_mode_timing( +		const struct dc_stream_state *stream, +		struct dc_link *link, +		const struct dc_crtc_timing *timing) +{ +	uint32_t max_pix_clk = stream->link->dongle_max_pix_clk * 10; +	struct dpcd_caps *dpcd_caps = &link->dpcd_caps; + +	/* A hack to avoid failing any modes for EDID override feature on +	 * topology change such as lower quality cable for DP or different dongle +	 */ +	if (link->remote_sinks[0] && link->remote_sinks[0]->sink_signal == SIGNAL_TYPE_VIRTUAL) +		return DC_OK; + +	/* Passive Dongle */ +	if (max_pix_clk != 0 && get_tmds_output_pixel_clock_100hz(timing) > max_pix_clk) +		return DC_EXCEED_DONGLE_CAP; + +	/* Active Dongle*/ +	if (!dp_active_dongle_validate_timing(timing, dpcd_caps)) +		return DC_EXCEED_DONGLE_CAP; + +	switch (stream->signal) { +	case SIGNAL_TYPE_EDP: +	case SIGNAL_TYPE_DISPLAY_PORT: +		if (!dp_validate_mode_timing( +				link, +				timing)) +			return DC_NO_DP_LINK_BANDWIDTH; +		break; + +	default: +		break; +	} + +	return DC_OK; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.h b/drivers/gpu/drm/amd/display/dc/link/link_validation.h new file mode 100644 index 000000000000..ab6a44f50032 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.h @@ -0,0 +1,28 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef __LINK_VALIDATION_H__ +#define __LINK_VALIDATION_H__ +#include "link.h" +#endif /* __LINK_VALIDATION_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c index 651231387043..5269125bc2a4 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c @@ -23,20 +23,20 @@   *   */ -#include "dm_services.h" -#include "dm_helpers.h" -#include "gpio_service_interface.h" -#include "include/ddc_service_types.h" -#include "include/grph_object_id.h" -#include "include/dpcd_defs.h" -#include "include/logger_interface.h" -#include "include/vector.h" -#include "core_types.h" -#include "dc_link_ddc.h" +/* FILE POLICY AND INTENDED USAGE: + * + * This file implements generic display communication protocols such as i2c, aux + * and scdc. The file should not contain any specific applications of these + * protocols such as display capability query, detection, or handshaking such as + * link training. + */ +#include "link_ddc.h" +#include "vector.h"  #include "dce/dce_aux.h" -#include "dmub/inc/dmub_cmd.h" +#include "dal_asic_id.h"  #include "link_dpcd.h" -#include "include/dal_asic_id.h" +#include "dm_helpers.h" +#include "atomfirmware.h"  #define DC_LOGGER_INIT(logger) @@ -45,86 +45,6 @@ static const uint8_t DP_VGA_DONGLE_BRANCH_DEV_NAME[] = "DpVga";  static const uint8_t DP_DVI_CONVERTER_ID_4[] = "m2DVIa";  static const uint8_t DP_DVI_CONVERTER_ID_5[] = "3393N2"; -#define AUX_POWER_UP_WA_DELAY 500 -#define I2C_OVER_AUX_DEFER_WA_DELAY 70 -#define DPVGA_DONGLE_AUX_DEFER_WA_DELAY 40 -#define I2C_OVER_AUX_DEFER_WA_DELAY_1MS 1 - -/* CV smart dongle slave address for retrieving supported HDTV modes*/ -#define CV_SMART_DONGLE_ADDRESS 0x20 -/* DVI-HDMI dongle slave address for retrieving dongle signature*/ -#define DVI_HDMI_DONGLE_ADDRESS 0x68 -struct dvi_hdmi_dongle_signature_data { -	int8_t vendor[3];/* "AMD" */ -	uint8_t version[2]; -	uint8_t size; -	int8_t id[11];/* "6140063500G"*/ -}; -/* DP-HDMI dongle slave address for retrieving dongle signature*/ -#define DP_HDMI_DONGLE_ADDRESS 0x40 -static const uint8_t dp_hdmi_dongle_signature_str[] = "DP-HDMI ADAPTOR"; -#define DP_HDMI_DONGLE_SIGNATURE_EOT 0x04 - -struct dp_hdmi_dongle_signature_data { -	int8_t id[15];/* "DP-HDMI ADAPTOR"*/ -	uint8_t eot;/* end of transmition '\x4' */ -}; - -/* SCDC Address defines (HDMI 2.0)*/ -#define HDMI_SCDC_WRITE_UPDATE_0_ARRAY 3 -#define HDMI_SCDC_ADDRESS  0x54 -#define HDMI_SCDC_SINK_VERSION 0x01 -#define HDMI_SCDC_SOURCE_VERSION 0x02 -#define HDMI_SCDC_UPDATE_0 0x10 -#define HDMI_SCDC_TMDS_CONFIG 0x20 -#define HDMI_SCDC_SCRAMBLER_STATUS 0x21 -#define HDMI_SCDC_CONFIG_0 0x30 -#define HDMI_SCDC_STATUS_FLAGS 0x40 -#define HDMI_SCDC_ERR_DETECT 0x50 -#define HDMI_SCDC_TEST_CONFIG 0xC0 - -union hdmi_scdc_update_read_data { -	uint8_t byte[2]; -	struct { -		uint8_t STATUS_UPDATE:1; -		uint8_t CED_UPDATE:1; -		uint8_t RR_TEST:1; -		uint8_t RESERVED:5; -		uint8_t RESERVED2:8; -	} fields; -}; - -union hdmi_scdc_status_flags_data { -	uint8_t byte; -	struct { -		uint8_t CLOCK_DETECTED:1; -		uint8_t CH0_LOCKED:1; -		uint8_t CH1_LOCKED:1; -		uint8_t CH2_LOCKED:1; -		uint8_t RESERVED:4; -	} fields; -}; - -union hdmi_scdc_ced_data { -	uint8_t byte[7]; -	struct { -		uint8_t CH0_8LOW:8; -		uint8_t CH0_7HIGH:7; -		uint8_t CH0_VALID:1; -		uint8_t CH1_8LOW:8; -		uint8_t CH1_7HIGH:7; -		uint8_t CH1_VALID:1; -		uint8_t CH2_8LOW:8; -		uint8_t CH2_7HIGH:7; -		uint8_t CH2_VALID:1; -		uint8_t CHECKSUM:8; -		uint8_t RESERVED:8; -		uint8_t RESERVED2:8; -		uint8_t RESERVED3:8; -		uint8_t RESERVED4:4; -	} fields; -}; -  struct i2c_payloads {  	struct vector payloads;  }; @@ -157,7 +77,7 @@ static uint32_t dal_ddc_i2c_payloads_get_count(struct i2c_payloads *p)  #define DDC_MIN(a, b) (((a) < (b)) ? (a) : (b)) -void dal_ddc_i2c_payloads_add( +static void i2c_payloads_add(  	struct i2c_payloads *payloads,  	uint32_t address,  	uint32_t len, @@ -225,7 +145,7 @@ static void ddc_service_construct(  	ddc_service->wa.raw = 0;  } -struct ddc_service *dal_ddc_service_create( +struct ddc_service *link_create_ddc_service(  	struct ddc_service_init_data *init_data)  {  	struct ddc_service *ddc_service; @@ -245,7 +165,7 @@ static void ddc_service_destruct(struct ddc_service *ddc)  		dal_gpio_destroy_ddc(&ddc->ddc_pin);  } -void dal_ddc_service_destroy(struct ddc_service **ddc) +void link_destroy_ddc_service(struct ddc_service **ddc)  {  	if (!ddc || !*ddc) {  		BREAK_TO_DEBUGGER(); @@ -256,19 +176,14 @@ void dal_ddc_service_destroy(struct ddc_service **ddc)  	*ddc = NULL;  } -enum ddc_service_type dal_ddc_service_get_type(struct ddc_service *ddc) -{ -	return DDC_SERVICE_TYPE_CONNECTOR; -} - -void dal_ddc_service_set_transaction_type( +void set_ddc_transaction_type(  	struct ddc_service *ddc,  	enum ddc_transaction_type type)  {  	ddc->transaction_type = type;  } -bool dal_ddc_service_is_in_aux_transaction_mode(struct ddc_service *ddc) +bool link_is_in_aux_transaction_mode(struct ddc_service *ddc)  {  	switch (ddc->transaction_type) {  	case DDC_TRANSACTION_TYPE_I2C_OVER_AUX: @@ -281,7 +196,7 @@ bool dal_ddc_service_is_in_aux_transaction_mode(struct ddc_service *ddc)  	return false;  } -void ddc_service_set_dongle_type(struct ddc_service *ddc, +void set_dongle_type(struct ddc_service *ddc,  		enum display_dongle_type dongle_type)  {  	ddc->dongle_type = dongle_type; @@ -323,7 +238,7 @@ static uint32_t defer_delay_converter_wa(  #define DP_TRANSLATOR_DELAY 5 -uint32_t get_defer_delay(struct ddc_service *ddc) +uint32_t link_get_aux_defer_delay(struct ddc_service *ddc)  {  	uint32_t defer_delay = 0; @@ -351,175 +266,45 @@ uint32_t get_defer_delay(struct ddc_service *ddc)  	return defer_delay;  } -static bool i2c_read( -	struct ddc_service *ddc, -	uint32_t address, -	uint8_t *buffer, -	uint32_t len) -{ -	uint8_t offs_data = 0; -	struct i2c_payload payloads[2] = { -		{ -		.write = true, -		.address = address, -		.length = 1, -		.data = &offs_data }, -		{ -		.write = false, -		.address = address, -		.length = len, -		.data = buffer } }; - -	struct i2c_command command = { -		.payloads = payloads, -		.number_of_payloads = 2, -		.engine = DDC_I2C_COMMAND_ENGINE, -		.speed = ddc->ctx->dc->caps.i2c_speed_in_khz }; - -	return dm_helpers_submit_i2c( -			ddc->ctx, -			ddc->link, -			&command); -} - -void dal_ddc_service_i2c_query_dp_dual_mode_adaptor( -	struct ddc_service *ddc, -	struct display_sink_capability *sink_cap) +static bool submit_aux_command(struct ddc_service *ddc, +		struct aux_payload *payload)  { -	uint8_t i; -	bool is_valid_hdmi_signature; -	enum display_dongle_type *dongle = &sink_cap->dongle_type; -	uint8_t type2_dongle_buf[DP_ADAPTOR_TYPE2_SIZE]; -	bool is_type2_dongle = false; -	int retry_count = 2; -	struct dp_hdmi_dongle_signature_data *dongle_signature; - -	/* Assume we have no valid DP passive dongle connected */ -	*dongle = DISPLAY_DONGLE_NONE; -	sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK; - -	/* Read DP-HDMI dongle I2c (no response interpreted as DP-DVI dongle)*/ -	if (!i2c_read( -		ddc, -		DP_HDMI_DONGLE_ADDRESS, -		type2_dongle_buf, -		sizeof(type2_dongle_buf))) { -		/* Passive HDMI dongles can sometimes fail here without retrying*/ -		while (retry_count > 0) { -			if (i2c_read(ddc, -				DP_HDMI_DONGLE_ADDRESS, -				type2_dongle_buf, -				sizeof(type2_dongle_buf))) -				break; -			retry_count--; -		} -		if (retry_count == 0) { -			*dongle = DISPLAY_DONGLE_DP_DVI_DONGLE; -			sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK; - -			CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf), -					"DP-DVI passive dongle %dMhz: ", -					DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000); -			return; -		} -	} - -	/* Check if Type 2 dongle.*/ -	if (type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_ID] == DP_ADAPTOR_TYPE2_ID) -		is_type2_dongle = true; - -	dongle_signature = -		(struct dp_hdmi_dongle_signature_data *)type2_dongle_buf; +	uint32_t retrieved = 0; +	bool ret = false; -	is_valid_hdmi_signature = true; +	if (!ddc) +		return false; -	/* Check EOT */ -	if (dongle_signature->eot != DP_HDMI_DONGLE_SIGNATURE_EOT) { -		is_valid_hdmi_signature = false; -	} +	if (!payload) +		return false; -	/* Check signature */ -	for (i = 0; i < sizeof(dongle_signature->id); ++i) { -		/* If its not the right signature, -		 * skip mismatch in subversion byte.*/ -		if (dongle_signature->id[i] != -			dp_hdmi_dongle_signature_str[i] && i != 3) { +	do { +		struct aux_payload current_payload; +		bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >= +				payload->length; +		uint32_t payload_length = is_end_of_payload ? +				payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE; -			if (is_type2_dongle) { -				is_valid_hdmi_signature = false; -				break; -			} +		current_payload.address = payload->address; +		current_payload.data = &payload->data[retrieved]; +		current_payload.defer_delay = payload->defer_delay; +		current_payload.i2c_over_aux = payload->i2c_over_aux; +		current_payload.length = payload_length; +		/* set mot (middle of transaction) to false if it is the last payload */ +		current_payload.mot = is_end_of_payload ? payload->mot:true; +		current_payload.write_status_update = false; +		current_payload.reply = payload->reply; +		current_payload.write = payload->write; -		} -	} +		ret = link_aux_transfer_with_retries_no_mutex(ddc, ¤t_payload); -	if (is_type2_dongle) { -		uint32_t max_tmds_clk = -			type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK]; - -		max_tmds_clk = max_tmds_clk * 2 + max_tmds_clk / 2; - -		if (0 == max_tmds_clk || -				max_tmds_clk < DP_ADAPTOR_TYPE2_MIN_TMDS_CLK || -				max_tmds_clk > DP_ADAPTOR_TYPE2_MAX_TMDS_CLK) { -			*dongle = DISPLAY_DONGLE_DP_DVI_DONGLE; - -			CONN_DATA_DETECT(ddc->link, type2_dongle_buf, -					sizeof(type2_dongle_buf), -					"DP-DVI passive dongle %dMhz: ", -					DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000); -		} else { -			if (is_valid_hdmi_signature == true) { -				*dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE; - -				CONN_DATA_DETECT(ddc->link, type2_dongle_buf, -						sizeof(type2_dongle_buf), -						"Type 2 DP-HDMI passive dongle %dMhz: ", -						max_tmds_clk); -			} else { -				*dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE; - -				CONN_DATA_DETECT(ddc->link, type2_dongle_buf, -						sizeof(type2_dongle_buf), -						"Type 2 DP-HDMI passive dongle (no signature) %dMhz: ", -						max_tmds_clk); - -			} - -			/* Multiply by 1000 to convert to kHz. */ -			sink_cap->max_hdmi_pixel_clock = -				max_tmds_clk * 1000; -		} -		sink_cap->is_dongle_type_one = false; - -	} else { -		if (is_valid_hdmi_signature == true) { -			*dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE; - -			CONN_DATA_DETECT(ddc->link, type2_dongle_buf, -					sizeof(type2_dongle_buf), -					"Type 1 DP-HDMI passive dongle %dMhz: ", -					sink_cap->max_hdmi_pixel_clock / 1000); -		} else { -			*dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE; - -			CONN_DATA_DETECT(ddc->link, type2_dongle_buf, -					sizeof(type2_dongle_buf), -					"Type 1 DP-HDMI passive dongle (no signature) %dMhz: ", -					sink_cap->max_hdmi_pixel_clock / 1000); -		} -		sink_cap->is_dongle_type_one = true; -	} +		retrieved += payload_length; +	} while (retrieved < payload->length && ret == true); -	return; +	return ret;  } -enum { -	DP_SINK_CAP_SIZE = -		DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV + 1 -}; - -bool dal_ddc_service_query_ddc_data( +bool link_query_ddc_data(  	struct ddc_service *ddc,  	uint32_t address,  	uint8_t *write_buf, @@ -529,7 +314,7 @@ bool dal_ddc_service_query_ddc_data(  {  	bool success = true;  	uint32_t payload_size = -		dal_ddc_service_is_in_aux_transaction_mode(ddc) ? +		link_is_in_aux_transaction_mode(ddc) ?  			DEFAULT_AUX_MAX_DATA_SIZE : EDID_SEGMENT_SIZE;  	uint32_t write_payloads = @@ -543,13 +328,13 @@ bool dal_ddc_service_query_ddc_data(  	if (!payloads_num)  		return false; -	if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) { +	if (link_is_in_aux_transaction_mode(ddc)) {  		struct aux_payload payload;  		payload.i2c_over_aux = true;  		payload.address = address;  		payload.reply = NULL; -		payload.defer_delay = get_defer_delay(ddc); +		payload.defer_delay = link_get_aux_defer_delay(ddc);  		payload.write_status_update = false;  		if (write_size != 0) { @@ -561,7 +346,7 @@ bool dal_ddc_service_query_ddc_data(  			payload.length = write_size;  			payload.data = write_buf; -			success = dal_ddc_submit_aux_command(ddc, &payload); +			success = submit_aux_command(ddc, &payload);  		}  		if (read_size != 0 && success) { @@ -573,7 +358,7 @@ bool dal_ddc_service_query_ddc_data(  			payload.length = read_size;  			payload.data = read_buf; -			success = dal_ddc_submit_aux_command(ddc, &payload); +			success = submit_aux_command(ddc, &payload);  		}  	} else {  		struct i2c_command command = {0}; @@ -587,10 +372,10 @@ bool dal_ddc_service_query_ddc_data(  		command.engine = DDC_I2C_COMMAND_ENGINE;  		command.speed = ddc->ctx->dc->caps.i2c_speed_in_khz; -		dal_ddc_i2c_payloads_add( +		i2c_payloads_add(  			&payloads, address, write_size, write_buf, true); -		dal_ddc_i2c_payloads_add( +		i2c_payloads_add(  			&payloads, address, read_size, read_buf, false);  		command.number_of_payloads = @@ -607,51 +392,6 @@ bool dal_ddc_service_query_ddc_data(  	return success;  } -bool dal_ddc_submit_aux_command(struct ddc_service *ddc, -		struct aux_payload *payload) -{ -	uint32_t retrieved = 0; -	bool ret = false; - -	if (!ddc) -		return false; - -	if (!payload) -		return false; - -	do { -		struct aux_payload current_payload; -		bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >= -				payload->length; -		uint32_t payload_length = is_end_of_payload ? -				payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE; - -		current_payload.address = payload->address; -		current_payload.data = &payload->data[retrieved]; -		current_payload.defer_delay = payload->defer_delay; -		current_payload.i2c_over_aux = payload->i2c_over_aux; -		current_payload.length = payload_length; -		/* set mot (middle of transaction) to false if it is the last payload */ -		current_payload.mot = is_end_of_payload ? payload->mot:true; -		current_payload.write_status_update = false; -		current_payload.reply = payload->reply; -		current_payload.write = payload->write; - -		ret = dc_link_aux_transfer_with_retries(ddc, ¤t_payload); - -		retrieved += payload_length; -	} while (retrieved < payload->length && ret == true); - -	return ret; -} - -/* dc_link_aux_transfer_raw() - Attempt to transfer - * the given aux payload.  This function does not perform - * retries or handle error states.  The reply is returned - * in the payload->reply and the result through - * *operation_result.  Returns the number of bytes transferred, - * or -1 on a failure. - */  int dc_link_aux_transfer_raw(struct ddc_service *ddc,  		struct aux_payload *payload,  		enum aux_return_code_type *operation_result) @@ -664,22 +404,14 @@ int dc_link_aux_transfer_raw(struct ddc_service *ddc,  	}  } -/* dc_link_aux_transfer_with_retries() - Attempt to submit an - * aux payload, retrying on timeouts, defers, and busy states - * as outlined in the DP spec.  Returns true if the request - * was successful. - * - * Unless you want to implement your own retry semantics, this - * is probably the one you want. - */ -bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc, +bool link_aux_transfer_with_retries_no_mutex(struct ddc_service *ddc,  		struct aux_payload *payload)  {  	return dce_aux_transfer_with_retries(ddc, payload);  } -bool dc_link_aux_try_to_configure_timeout(struct ddc_service *ddc, +bool try_to_configure_aux_timeout(struct ddc_service *ddc,  		uint32_t timeout)  {  	bool result = false; @@ -712,20 +444,12 @@ bool dc_link_aux_try_to_configure_timeout(struct ddc_service *ddc,  	return result;  } -/*test only function*/ -void dal_ddc_service_set_ddc_pin( -	struct ddc_service *ddc_service, -	struct ddc *ddc) -{ -	ddc_service->ddc_pin = ddc; -} - -struct ddc *dal_ddc_service_get_ddc_pin(struct ddc_service *ddc_service) +struct ddc *get_ddc_pin(struct ddc_service *ddc_service)  {  	return ddc_service->ddc_pin;  } -void dal_ddc_service_write_scdc_data(struct ddc_service *ddc_service, +void write_scdc_data(struct ddc_service *ddc_service,  		uint32_t pix_clk,  		bool lte_340_scramble)  { @@ -740,13 +464,13 @@ void dal_ddc_service_write_scdc_data(struct ddc_service *ddc_service,  		ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite)  		return; -	dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset, +	link_query_ddc_data(ddc_service, slave_address, &offset,  			sizeof(offset), &sink_version, sizeof(sink_version));  	if (sink_version == 1) {  		/*Source Version = 1*/  		write_buffer[0] = HDMI_SCDC_SOURCE_VERSION;  		write_buffer[1] = 1; -		dal_ddc_service_query_ddc_data(ddc_service, slave_address, +		link_query_ddc_data(ddc_service, slave_address,  				write_buffer, sizeof(write_buffer), NULL, 0);  		/*Read Request from SCDC caps*/  	} @@ -759,11 +483,11 @@ void dal_ddc_service_write_scdc_data(struct ddc_service *ddc_service,  	} else {  		write_buffer[1] = 0;  	} -	dal_ddc_service_query_ddc_data(ddc_service, slave_address, write_buffer, +	link_query_ddc_data(ddc_service, slave_address, write_buffer,  			sizeof(write_buffer), NULL, 0);  } -void dal_ddc_service_read_scdc_data(struct ddc_service *ddc_service) +void read_scdc_data(struct ddc_service *ddc_service)  {  	uint8_t slave_address = HDMI_SCDC_ADDRESS;  	uint8_t offset = HDMI_SCDC_TMDS_CONFIG; @@ -773,20 +497,19 @@ void dal_ddc_service_read_scdc_data(struct ddc_service *ddc_service)  		ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite)  		return; -	dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset, +	link_query_ddc_data(ddc_service, slave_address, &offset,  			sizeof(offset), &tmds_config, sizeof(tmds_config));  	if (tmds_config & 0x1) {  		union hdmi_scdc_status_flags_data status_data = {0};  		uint8_t scramble_status = 0;  		offset = HDMI_SCDC_SCRAMBLER_STATUS; -		dal_ddc_service_query_ddc_data(ddc_service, slave_address, +		link_query_ddc_data(ddc_service, slave_address,  				&offset, sizeof(offset), &scramble_status,  				sizeof(scramble_status));  		offset = HDMI_SCDC_STATUS_FLAGS; -		dal_ddc_service_query_ddc_data(ddc_service, slave_address, +		link_query_ddc_data(ddc_service, slave_address,  				&offset, sizeof(offset), &status_data.byte,  				sizeof(status_data.byte));  	}  } - diff --git a/drivers/gpu/drm/amd/display/include/i2caux_interface.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h index 418fbf8c5c3a..86e9d2e886d6 100644 --- a/drivers/gpu/drm/amd/display/include/i2caux_interface.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h @@ -23,60 +23,38 @@   *   */ -#ifndef __DAL_I2CAUX_INTERFACE_H__ -#define __DAL_I2CAUX_INTERFACE_H__ +#ifndef __DAL_DDC_SERVICE_H__ +#define __DAL_DDC_SERVICE_H__ -#include "dc_types.h" -#include "gpio_service_interface.h" +#include "link.h" +#define AUX_POWER_UP_WA_DELAY 500 +#define I2C_OVER_AUX_DEFER_WA_DELAY 70 +#define DPVGA_DONGLE_AUX_DEFER_WA_DELAY 40 +#define I2C_OVER_AUX_DEFER_WA_DELAY_1MS 1 +#define LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD 3200 /*us*/ -#define DEFAULT_AUX_MAX_DATA_SIZE 16 -#define AUX_MAX_DEFER_WRITE_RETRY 20 +#define EDID_SEGMENT_SIZE 256 -struct aux_payload { -	/* set following flag to read/write I2C data, -	 * reset it to read/write DPCD data */ -	bool i2c_over_aux; -	/* set following flag to write data, -	 * reset it to read data */ -	bool write; -	bool mot; -	bool write_status_update; +void set_ddc_transaction_type( +		struct ddc_service *ddc, +		enum ddc_transaction_type type); -	uint32_t address; -	uint32_t length; -	uint8_t *data; -	/* -	 * used to return the reply type of the transaction -	 * ignored if NULL -	 */ -	uint8_t *reply; -	/* expressed in milliseconds -	 * zero means "use default value" -	 */ -	uint32_t defer_delay; +bool try_to_configure_aux_timeout(struct ddc_service *ddc, +		uint32_t timeout); -}; +void write_scdc_data( +		struct ddc_service *ddc_service, +		uint32_t pix_clk, +		bool lte_340_scramble); -struct aux_command { -	struct aux_payload *payloads; -	uint8_t number_of_payloads; +void read_scdc_data( +		struct ddc_service *ddc_service); -	/* expressed in milliseconds -	 * zero means "use default value" */ -	uint32_t defer_delay; +void set_dongle_type(struct ddc_service *ddc, +		enum display_dongle_type dongle_type); -	/* zero means "use default value" */ -	uint32_t max_defer_write_retry; +struct ddc *get_ddc_pin(struct ddc_service *ddc_service); -	enum i2c_mot_mode mot; -}; +#endif /* __DAL_DDC_SERVICE_H__ */ -union aux_config { -	struct { -		uint32_t ALLOW_AUX_WHEN_HPD_LOW:1; -	} bits; -	uint32_t raw; -}; - -#endif diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c new file mode 100644 index 000000000000..4874d1bf1dcb --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -0,0 +1,2246 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file implements dp specific link capability retrieval sequence. It is + * responsible for retrieving, parsing, overriding, deciding capability obtained + * from dp link. Link capability consists of encoders, DPRXs, cables, retimers, + * usb and all other possible backend capabilities. Other components should + * include this header file in order to access link capability. Accessing link + * capability by dereferencing dc_link outside dp_link_capability is not a + * recommended method as it makes the component dependent on the underlying data + * structure used to represent link capability instead of function interfaces. + */ + +#include "link_dp_capability.h" +#include "link_ddc.h" +#include "link_dpcd.h" +#include "link_dp_dpia.h" +#include "link_dp_phy.h" +#include "link_edp_panel_control.h" +#include "link_dp_irq_handler.h" +#include "link/accessories/link_dp_trace.h" +#include "link_dp_training.h" +#include "atomfirmware.h" +#include "resource.h" +#include "link_enc_cfg.h" +#include "dc_dmub_srv.h" +#include "gpio_service_interface.h" + +#define DC_LOGGER \ +	link->ctx->logger +#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */ + +#ifndef MAX +#define MAX(X, Y) ((X) > (Y) ? (X) : (Y)) +#endif +#ifndef MIN +#define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) +#endif + +#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 552 /*us*/ + +struct dp_lt_fallback_entry { +	enum dc_lane_count lane_count; +	enum dc_link_rate link_rate; +}; + +static const struct dp_lt_fallback_entry dp_lt_fallbacks[] = { +		/* This link training fallback array is ordered by +		 * link bandwidth from highest to lowest. +		 * DP specs makes it a normative policy to always +		 * choose the next highest link bandwidth during +		 * link training fallback. +		 */ +		{LANE_COUNT_FOUR, LINK_RATE_UHBR20}, +		{LANE_COUNT_FOUR, LINK_RATE_UHBR13_5}, +		{LANE_COUNT_TWO, LINK_RATE_UHBR20}, +		{LANE_COUNT_FOUR, LINK_RATE_UHBR10}, +		{LANE_COUNT_TWO, LINK_RATE_UHBR13_5}, +		{LANE_COUNT_FOUR, LINK_RATE_HIGH3}, +		{LANE_COUNT_ONE, LINK_RATE_UHBR20}, +		{LANE_COUNT_TWO, LINK_RATE_UHBR10}, +		{LANE_COUNT_FOUR, LINK_RATE_HIGH2}, +		{LANE_COUNT_ONE, LINK_RATE_UHBR13_5}, +		{LANE_COUNT_TWO, LINK_RATE_HIGH3}, +		{LANE_COUNT_ONE, LINK_RATE_UHBR10}, +		{LANE_COUNT_TWO, LINK_RATE_HIGH2}, +		{LANE_COUNT_FOUR, LINK_RATE_HIGH}, +		{LANE_COUNT_ONE, LINK_RATE_HIGH3}, +		{LANE_COUNT_FOUR, LINK_RATE_LOW}, +		{LANE_COUNT_ONE, LINK_RATE_HIGH2}, +		{LANE_COUNT_TWO, LINK_RATE_HIGH}, +		{LANE_COUNT_TWO, LINK_RATE_LOW}, +		{LANE_COUNT_ONE, LINK_RATE_HIGH}, +		{LANE_COUNT_ONE, LINK_RATE_LOW}, +}; + +static const struct dc_link_settings fail_safe_link_settings = { +		.lane_count = LANE_COUNT_ONE, +		.link_rate = LINK_RATE_LOW, +		.link_spread = LINK_SPREAD_DISABLED, +}; + +bool is_dp_active_dongle(const struct dc_link *link) +{ +	return (link->dpcd_caps.dongle_type >= DISPLAY_DONGLE_DP_VGA_CONVERTER) && +				(link->dpcd_caps.dongle_type <= DISPLAY_DONGLE_DP_HDMI_CONVERTER); +} + +bool is_dp_branch_device(const struct dc_link *link) +{ +	return link->dpcd_caps.is_branch_dev; +} + +static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc) +{ +	switch (bpc) { +	case DOWN_STREAM_MAX_8BPC: +		return 8; +	case DOWN_STREAM_MAX_10BPC: +		return 10; +	case DOWN_STREAM_MAX_12BPC: +		return 12; +	case DOWN_STREAM_MAX_16BPC: +		return 16; +	default: +		break; +	} + +	return -1; +} + +uint8_t dp_parse_lttpr_repeater_count(uint8_t lttpr_repeater_count) +{ +	switch (lttpr_repeater_count) { +	case 0x80: // 1 lttpr repeater +		return 1; +	case 0x40: // 2 lttpr repeaters +		return 2; +	case 0x20: // 3 lttpr repeaters +		return 3; +	case 0x10: // 4 lttpr repeaters +		return 4; +	case 0x08: // 5 lttpr repeaters +		return 5; +	case 0x04: // 6 lttpr repeaters +		return 6; +	case 0x02: // 7 lttpr repeaters +		return 7; +	case 0x01: // 8 lttpr repeaters +		return 8; +	default: +		break; +	} +	return 0; // invalid value +} + +uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw) +{ +	switch (bw) { +	case 0b001: +		return 9000000; +	case 0b010: +		return 18000000; +	case 0b011: +		return 24000000; +	case 0b100: +		return 32000000; +	case 0b101: +		return 40000000; +	case 0b110: +		return 48000000; +	} + +	return 0; +} + +static enum dc_link_rate linkRateInKHzToLinkRateMultiplier(uint32_t link_rate_in_khz) +{ +	enum dc_link_rate link_rate; +	// LinkRate is normally stored as a multiplier of 0.27 Gbps per lane. Do the translation. +	switch (link_rate_in_khz) { +	case 1620000: +		link_rate = LINK_RATE_LOW;	// Rate_1 (RBR)	- 1.62 Gbps/Lane +		break; +	case 2160000: +		link_rate = LINK_RATE_RATE_2;	// Rate_2	- 2.16 Gbps/Lane +		break; +	case 2430000: +		link_rate = LINK_RATE_RATE_3;	// Rate_3	- 2.43 Gbps/Lane +		break; +	case 2700000: +		link_rate = LINK_RATE_HIGH;	// Rate_4 (HBR)	- 2.70 Gbps/Lane +		break; +	case 3240000: +		link_rate = LINK_RATE_RBR2;	// Rate_5 (RBR2)- 3.24 Gbps/Lane +		break; +	case 4320000: +		link_rate = LINK_RATE_RATE_6;	// Rate_6	- 4.32 Gbps/Lane +		break; +	case 5400000: +		link_rate = LINK_RATE_HIGH2;	// Rate_7 (HBR2)- 5.40 Gbps/Lane +		break; +	case 8100000: +		link_rate = LINK_RATE_HIGH3;	// Rate_8 (HBR3)- 8.10 Gbps/Lane +		break; +	default: +		link_rate = LINK_RATE_UNKNOWN; +		break; +	} +	return link_rate; +} + +static union dp_cable_id intersect_cable_id( +		union dp_cable_id *a, union dp_cable_id *b) +{ +	union dp_cable_id out; + +	out.bits.UHBR10_20_CAPABILITY = MIN(a->bits.UHBR10_20_CAPABILITY, +			b->bits.UHBR10_20_CAPABILITY); +	out.bits.UHBR13_5_CAPABILITY = MIN(a->bits.UHBR13_5_CAPABILITY, +			b->bits.UHBR13_5_CAPABILITY); +	out.bits.CABLE_TYPE = MAX(a->bits.CABLE_TYPE, b->bits.CABLE_TYPE); + +	return out; +} + +/* + * Return PCON's post FRL link training supported BW if its non-zero, otherwise return max_supported_frl_bw. + */ +static uint32_t intersect_frl_link_bw_support( +	const uint32_t max_supported_frl_bw_in_kbps, +	const union hdmi_encoded_link_bw hdmi_encoded_link_bw) +{ +	uint32_t supported_bw_in_kbps = max_supported_frl_bw_in_kbps; + +	// HDMI_ENCODED_LINK_BW bits are only valid if HDMI Link Configuration bit is 1 (FRL mode) +	if (hdmi_encoded_link_bw.bits.FRL_MODE) { +		if (hdmi_encoded_link_bw.bits.BW_48Gbps) +			supported_bw_in_kbps = 48000000; +		else if (hdmi_encoded_link_bw.bits.BW_40Gbps) +			supported_bw_in_kbps = 40000000; +		else if (hdmi_encoded_link_bw.bits.BW_32Gbps) +			supported_bw_in_kbps = 32000000; +		else if (hdmi_encoded_link_bw.bits.BW_24Gbps) +			supported_bw_in_kbps = 24000000; +		else if (hdmi_encoded_link_bw.bits.BW_18Gbps) +			supported_bw_in_kbps = 18000000; +		else if (hdmi_encoded_link_bw.bits.BW_9Gbps) +			supported_bw_in_kbps = 9000000; +	} + +	return supported_bw_in_kbps; +} + +static enum clock_source_id get_clock_source_id(struct dc_link *link) +{ +	enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_UNDEFINED; +	struct clock_source *dp_cs = link->dc->res_pool->dp_clock_source; + +	if (dp_cs != NULL) { +		dp_cs_id = dp_cs->id; +	} else { +		/* +		 * dp clock source is not initialized for some reason. +		 * Should not happen, CLOCK_SOURCE_ID_EXTERNAL will be used +		 */ +		ASSERT(dp_cs); +	} + +	return dp_cs_id; +} + +static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data, +		int length) +{ +	int retry = 0; + +	if (!link->dpcd_caps.dpcd_rev.raw) { +		do { +			dc_link_dp_receiver_power_ctrl(link, true); +			core_link_read_dpcd(link, DP_DPCD_REV, +							dpcd_data, length); +			link->dpcd_caps.dpcd_rev.raw = dpcd_data[ +				DP_DPCD_REV - +				DP_DPCD_REV]; +		} while (retry++ < 4 && !link->dpcd_caps.dpcd_rev.raw); +	} + +	if (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) { +		switch (link->dpcd_caps.branch_dev_id) { +		/* 0010FA active dongles (DP-VGA, DP-DLDVI converters) power down +		 * all internal circuits including AUX communication preventing +		 * reading DPCD table and EDID (spec violation). +		 * Encoder will skip DP RX power down on disable_output to +		 * keep receiver powered all the time.*/ +		case DP_BRANCH_DEVICE_ID_0010FA: +		case DP_BRANCH_DEVICE_ID_0080E1: +		case DP_BRANCH_DEVICE_ID_00E04C: +			link->wa_flags.dp_keep_receiver_powered = true; +			break; + +		/* TODO: May need work around for other dongles. */ +		default: +			link->wa_flags.dp_keep_receiver_powered = false; +			break; +		} +	} else +		link->wa_flags.dp_keep_receiver_powered = false; +} + +bool dc_link_is_fec_supported(const struct dc_link *link) +{ +	/* TODO - use asic cap instead of link_enc->features +	 * we no longer know which link enc to use for this link before commit +	 */ +	struct link_encoder *link_enc = NULL; + +	link_enc = link_enc_cfg_get_link_enc(link); +	ASSERT(link_enc); + +	return (dc_is_dp_signal(link->connector_signal) && link_enc && +			link_enc->features.fec_supported && +			link->dpcd_caps.fec_cap.bits.FEC_CAPABLE && +			!IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment)); +} + +bool dc_link_should_enable_fec(const struct dc_link *link) +{ +	bool force_disable = false; + +	if (link->fec_state == dc_link_fec_enabled) +		force_disable = false; +	else if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST && +			link->local_sink && +			link->local_sink->edid_caps.panel_patch.disable_fec) +		force_disable = true; +	else if (link->connector_signal == SIGNAL_TYPE_EDP +			&& (link->dpcd_caps.dsc_caps.dsc_basic_caps.fields. +			 dsc_support.DSC_SUPPORT == false +				|| link->panel_config.dsc.disable_dsc_edp +				|| !link->dc->caps.edp_dsc_support)) +		force_disable = true; + +	return !force_disable && dc_link_is_fec_supported(link); +} + +bool link_is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx) +{ +	/* If this assert is hit then we have a link encoder dynamic management issue */ +	ASSERT(pipe_ctx->stream_res.hpo_dp_stream_enc ? pipe_ctx->link_res.hpo_dp_link_enc != NULL : true); +	return (pipe_ctx->stream_res.hpo_dp_stream_enc && +			pipe_ctx->link_res.hpo_dp_link_enc && +			dc_is_dp_signal(pipe_ctx->stream->signal)); +} + +bool dp_is_lttpr_present(struct dc_link *link) +{ +	return (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 && +			link->dpcd_caps.lttpr_caps.max_lane_count > 0 && +			link->dpcd_caps.lttpr_caps.max_lane_count <= 4 && +			link->dpcd_caps.lttpr_caps.revision.raw >= 0x14); +} + +/* in DP compliance test, DPR-120 may have + * a random value in its MAX_LINK_BW dpcd field. + * We map it to the maximum supported link rate that + * is smaller than MAX_LINK_BW in this case. + */ +static enum dc_link_rate get_link_rate_from_max_link_bw( +		 uint8_t max_link_bw) +{ +	enum dc_link_rate link_rate; + +	if (max_link_bw >= LINK_RATE_HIGH3) { +		link_rate = LINK_RATE_HIGH3; +	} else if (max_link_bw < LINK_RATE_HIGH3 +			&& max_link_bw >= LINK_RATE_HIGH2) { +		link_rate = LINK_RATE_HIGH2; +	} else if (max_link_bw < LINK_RATE_HIGH2 +			&& max_link_bw >= LINK_RATE_HIGH) { +		link_rate = LINK_RATE_HIGH; +	} else if (max_link_bw < LINK_RATE_HIGH +			&& max_link_bw >= LINK_RATE_LOW) { +		link_rate = LINK_RATE_LOW; +	} else { +		link_rate = LINK_RATE_UNKNOWN; +	} + +	return link_rate; +} + +static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link) +{ +	enum dc_link_rate lttpr_max_link_rate = link->dpcd_caps.lttpr_caps.max_link_rate; + +	if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR20) +		lttpr_max_link_rate = LINK_RATE_UHBR20; +	else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR13_5) +		lttpr_max_link_rate = LINK_RATE_UHBR13_5; +	else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR10) +		lttpr_max_link_rate = LINK_RATE_UHBR10; + +	return lttpr_max_link_rate; +} + +static enum dc_link_rate get_cable_max_link_rate(struct dc_link *link) +{ +	enum dc_link_rate cable_max_link_rate = LINK_RATE_UNKNOWN; + +	if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR20) +		cable_max_link_rate = LINK_RATE_UHBR20; +	else if (link->dpcd_caps.cable_id.bits.UHBR13_5_CAPABILITY) +		cable_max_link_rate = LINK_RATE_UHBR13_5; +	else if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR10) +		cable_max_link_rate = LINK_RATE_UHBR10; + +	return cable_max_link_rate; +} + +static inline bool reached_minimum_lane_count(enum dc_lane_count lane_count) +{ +	return lane_count <= LANE_COUNT_ONE; +} + +static inline bool reached_minimum_link_rate(enum dc_link_rate link_rate) +{ +	return link_rate <= LINK_RATE_LOW; +} + +static enum dc_lane_count reduce_lane_count(enum dc_lane_count lane_count) +{ +	switch (lane_count) { +	case LANE_COUNT_FOUR: +		return LANE_COUNT_TWO; +	case LANE_COUNT_TWO: +		return LANE_COUNT_ONE; +	case LANE_COUNT_ONE: +		return LANE_COUNT_UNKNOWN; +	default: +		return LANE_COUNT_UNKNOWN; +	} +} + +static enum dc_link_rate reduce_link_rate(enum dc_link_rate link_rate) +{ +	switch (link_rate) { +	case LINK_RATE_UHBR20: +		return LINK_RATE_UHBR13_5; +	case LINK_RATE_UHBR13_5: +		return LINK_RATE_UHBR10; +	case LINK_RATE_UHBR10: +		return LINK_RATE_HIGH3; +	case LINK_RATE_HIGH3: +		return LINK_RATE_HIGH2; +	case LINK_RATE_HIGH2: +		return LINK_RATE_HIGH; +	case LINK_RATE_HIGH: +		return LINK_RATE_LOW; +	case LINK_RATE_LOW: +		return LINK_RATE_UNKNOWN; +	default: +		return LINK_RATE_UNKNOWN; +	} +} + +static enum dc_lane_count increase_lane_count(enum dc_lane_count lane_count) +{ +	switch (lane_count) { +	case LANE_COUNT_ONE: +		return LANE_COUNT_TWO; +	case LANE_COUNT_TWO: +		return LANE_COUNT_FOUR; +	default: +		return LANE_COUNT_UNKNOWN; +	} +} + +static enum dc_link_rate increase_link_rate(struct dc_link *link, +		enum dc_link_rate link_rate) +{ +	switch (link_rate) { +	case LINK_RATE_LOW: +		return LINK_RATE_HIGH; +	case LINK_RATE_HIGH: +		return LINK_RATE_HIGH2; +	case LINK_RATE_HIGH2: +		return LINK_RATE_HIGH3; +	case LINK_RATE_HIGH3: +		return LINK_RATE_UHBR10; +	case LINK_RATE_UHBR10: +		/* upto DP2.x specs UHBR13.5 is the only link rate that could be +		 * not supported by DPRX when higher link rate is supported. +		 * so we treat it as a special case for code simplicity. When we +		 * have new specs with more link rates like this, we should +		 * consider a more generic solution to handle discrete link +		 * rate capabilities. +		 */ +		return link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5 ? +				LINK_RATE_UHBR13_5 : LINK_RATE_UHBR20; +	case LINK_RATE_UHBR13_5: +		return LINK_RATE_UHBR20; +	default: +		return LINK_RATE_UNKNOWN; +	} +} + +static bool decide_fallback_link_setting_max_bw_policy( +		struct dc_link *link, +		const struct dc_link_settings *max, +		struct dc_link_settings *cur, +		enum link_training_result training_result) +{ +	uint8_t cur_idx = 0, next_idx; +	bool found = false; + +	if (training_result == LINK_TRAINING_ABORT) +		return false; + +	while (cur_idx < ARRAY_SIZE(dp_lt_fallbacks)) +		/* find current index */ +		if (dp_lt_fallbacks[cur_idx].lane_count == cur->lane_count && +				dp_lt_fallbacks[cur_idx].link_rate == cur->link_rate) +			break; +		else +			cur_idx++; + +	next_idx = cur_idx + 1; + +	while (next_idx < ARRAY_SIZE(dp_lt_fallbacks)) +		/* find next index */ +		if (dp_lt_fallbacks[next_idx].lane_count > max->lane_count || +				dp_lt_fallbacks[next_idx].link_rate > max->link_rate) +			next_idx++; +		else if (dp_lt_fallbacks[next_idx].link_rate == LINK_RATE_UHBR13_5 && +				link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5 == 0) +			/* upto DP2.x specs UHBR13.5 is the only link rate that +			 * could be not supported by DPRX when higher link rate +			 * is supported. so we treat it as a special case for +			 * code simplicity. When we have new specs with more +			 * link rates like this, we should consider a more +			 * generic solution to handle discrete link rate +			 * capabilities. +			 */ +			next_idx++; +		else +			break; + +	if (next_idx < ARRAY_SIZE(dp_lt_fallbacks)) { +		cur->lane_count = dp_lt_fallbacks[next_idx].lane_count; +		cur->link_rate = dp_lt_fallbacks[next_idx].link_rate; +		found = true; +	} + +	return found; +} + +/* + * function: set link rate and lane count fallback based + * on current link setting and last link training result + * return value: + *			true - link setting could be set + *			false - has reached minimum setting + *					and no further fallback could be done + */ +bool decide_fallback_link_setting( +		struct dc_link *link, +		struct dc_link_settings *max, +		struct dc_link_settings *cur, +		enum link_training_result training_result) +{ +	if (link_dp_get_encoding_format(max) == DP_128b_132b_ENCODING || +			link->dc->debug.force_dp2_lt_fallback_method) +		return decide_fallback_link_setting_max_bw_policy(link, max, +				cur, training_result); + +	switch (training_result) { +	case LINK_TRAINING_CR_FAIL_LANE0: +	case LINK_TRAINING_CR_FAIL_LANE1: +	case LINK_TRAINING_CR_FAIL_LANE23: +	case LINK_TRAINING_LQA_FAIL: +	{ +		if (!reached_minimum_link_rate(cur->link_rate)) { +			cur->link_rate = reduce_link_rate(cur->link_rate); +		} else if (!reached_minimum_lane_count(cur->lane_count)) { +			cur->link_rate = max->link_rate; +			if (training_result == LINK_TRAINING_CR_FAIL_LANE0) +				return false; +			else if (training_result == LINK_TRAINING_CR_FAIL_LANE1) +				cur->lane_count = LANE_COUNT_ONE; +			else if (training_result == LINK_TRAINING_CR_FAIL_LANE23) +				cur->lane_count = LANE_COUNT_TWO; +			else +				cur->lane_count = reduce_lane_count(cur->lane_count); +		} else { +			return false; +		} +		break; +	} +	case LINK_TRAINING_EQ_FAIL_EQ: +	case LINK_TRAINING_EQ_FAIL_CR_PARTIAL: +	{ +		if (!reached_minimum_lane_count(cur->lane_count)) { +			cur->lane_count = reduce_lane_count(cur->lane_count); +		} else if (!reached_minimum_link_rate(cur->link_rate)) { +			cur->link_rate = reduce_link_rate(cur->link_rate); +			/* Reduce max link rate to avoid potential infinite loop. +			 * Needed so that any subsequent CR_FAIL fallback can't +			 * re-set the link rate higher than the link rate from +			 * the latest EQ_FAIL fallback. +			 */ +			max->link_rate = cur->link_rate; +			cur->lane_count = max->lane_count; +		} else { +			return false; +		} +		break; +	} +	case LINK_TRAINING_EQ_FAIL_CR: +	{ +		if (!reached_minimum_link_rate(cur->link_rate)) { +			cur->link_rate = reduce_link_rate(cur->link_rate); +			/* Reduce max link rate to avoid potential infinite loop. +			 * Needed so that any subsequent CR_FAIL fallback can't +			 * re-set the link rate higher than the link rate from +			 * the latest EQ_FAIL fallback. +			 */ +			max->link_rate = cur->link_rate; +			cur->lane_count = max->lane_count; +		} else { +			return false; +		} +		break; +	} +	default: +		return false; +	} +	return true; +} +static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw) +{ +	struct dc_link_settings initial_link_setting = { +		LANE_COUNT_ONE, LINK_RATE_LOW, LINK_SPREAD_DISABLED, false, 0}; +	struct dc_link_settings current_link_setting = +			initial_link_setting; +	uint32_t link_bw; + +	if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap)) +		return false; + +	/* search for the minimum link setting that: +	 * 1. is supported according to the link training result +	 * 2. could support the b/w requested by the timing +	 */ +	while (current_link_setting.link_rate <= +			link->verified_link_cap.link_rate) { +		link_bw = dc_link_bandwidth_kbps( +				link, +				¤t_link_setting); +		if (req_bw <= link_bw) { +			*link_setting = current_link_setting; +			return true; +		} + +		if (current_link_setting.lane_count < +				link->verified_link_cap.lane_count) { +			current_link_setting.lane_count = +					increase_lane_count( +							current_link_setting.lane_count); +		} else { +			current_link_setting.link_rate = +					increase_link_rate(link, +							current_link_setting.link_rate); +			current_link_setting.lane_count = +					initial_link_setting.lane_count; +		} +	} + +	return false; +} + +bool dc_link_decide_edp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw) +{ +	struct dc_link_settings initial_link_setting; +	struct dc_link_settings current_link_setting; +	uint32_t link_bw; + +	/* +	 * edp_supported_link_rates_count is only valid for eDP v1.4 or higher. +	 * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h" +	 */ +	if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 || +			link->dpcd_caps.edp_supported_link_rates_count == 0) { +		*link_setting = link->verified_link_cap; +		return true; +	} + +	memset(&initial_link_setting, 0, sizeof(initial_link_setting)); +	initial_link_setting.lane_count = LANE_COUNT_ONE; +	initial_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[0]; +	initial_link_setting.link_spread = LINK_SPREAD_DISABLED; +	initial_link_setting.use_link_rate_set = true; +	initial_link_setting.link_rate_set = 0; +	current_link_setting = initial_link_setting; + +	/* search for the minimum link setting that: +	 * 1. is supported according to the link training result +	 * 2. could support the b/w requested by the timing +	 */ +	while (current_link_setting.link_rate <= +			link->verified_link_cap.link_rate) { +		link_bw = dc_link_bandwidth_kbps( +				link, +				¤t_link_setting); +		if (req_bw <= link_bw) { +			*link_setting = current_link_setting; +			return true; +		} + +		if (current_link_setting.lane_count < +				link->verified_link_cap.lane_count) { +			current_link_setting.lane_count = +					increase_lane_count( +							current_link_setting.lane_count); +		} else { +			if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { +				current_link_setting.link_rate_set++; +				current_link_setting.link_rate = +					link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; +				current_link_setting.lane_count = +									initial_link_setting.lane_count; +			} else +				break; +		} +	} +	return false; +} + +bool decide_edp_link_settings_with_dsc(struct dc_link *link, +		struct dc_link_settings *link_setting, +		uint32_t req_bw, +		enum dc_link_rate max_link_rate) +{ +	struct dc_link_settings initial_link_setting; +	struct dc_link_settings current_link_setting; +	uint32_t link_bw; + +	unsigned int policy = 0; + +	policy = link->panel_config.dsc.force_dsc_edp_policy; +	if (max_link_rate == LINK_RATE_UNKNOWN) +		max_link_rate = link->verified_link_cap.link_rate; +	/* +	 * edp_supported_link_rates_count is only valid for eDP v1.4 or higher. +	 * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h" +	 */ +	if ((link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 || +			link->dpcd_caps.edp_supported_link_rates_count == 0)) { +		/* for DSC enabled case, we search for minimum lane count */ +		memset(&initial_link_setting, 0, sizeof(initial_link_setting)); +		initial_link_setting.lane_count = LANE_COUNT_ONE; +		initial_link_setting.link_rate = LINK_RATE_LOW; +		initial_link_setting.link_spread = LINK_SPREAD_DISABLED; +		initial_link_setting.use_link_rate_set = false; +		initial_link_setting.link_rate_set = 0; +		current_link_setting = initial_link_setting; +		if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap)) +			return false; + +		/* search for the minimum link setting that: +		 * 1. is supported according to the link training result +		 * 2. could support the b/w requested by the timing +		 */ +		while (current_link_setting.link_rate <= +				max_link_rate) { +			link_bw = dc_link_bandwidth_kbps( +					link, +					¤t_link_setting); +			if (req_bw <= link_bw) { +				*link_setting = current_link_setting; +				return true; +			} +			if (policy) { +				/* minimize lane */ +				if (current_link_setting.link_rate < max_link_rate) { +					current_link_setting.link_rate = +							increase_link_rate(link, +									current_link_setting.link_rate); +				} else { +					if (current_link_setting.lane_count < +									link->verified_link_cap.lane_count) { +						current_link_setting.lane_count = +								increase_lane_count( +										current_link_setting.lane_count); +						current_link_setting.link_rate = initial_link_setting.link_rate; +					} else +						break; +				} +			} else { +				/* minimize link rate */ +				if (current_link_setting.lane_count < +						link->verified_link_cap.lane_count) { +					current_link_setting.lane_count = +							increase_lane_count( +									current_link_setting.lane_count); +				} else { +					current_link_setting.link_rate = +							increase_link_rate(link, +									current_link_setting.link_rate); +					current_link_setting.lane_count = +							initial_link_setting.lane_count; +				} +			} +		} +		return false; +	} + +	/* if optimize edp link is supported */ +	memset(&initial_link_setting, 0, sizeof(initial_link_setting)); +	initial_link_setting.lane_count = LANE_COUNT_ONE; +	initial_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[0]; +	initial_link_setting.link_spread = LINK_SPREAD_DISABLED; +	initial_link_setting.use_link_rate_set = true; +	initial_link_setting.link_rate_set = 0; +	current_link_setting = initial_link_setting; + +	/* search for the minimum link setting that: +	 * 1. is supported according to the link training result +	 * 2. could support the b/w requested by the timing +	 */ +	while (current_link_setting.link_rate <= +			max_link_rate) { +		link_bw = dc_link_bandwidth_kbps( +				link, +				¤t_link_setting); +		if (req_bw <= link_bw) { +			*link_setting = current_link_setting; +			return true; +		} +		if (policy) { +			/* minimize lane */ +			if (current_link_setting.link_rate_set < +					link->dpcd_caps.edp_supported_link_rates_count +					&& current_link_setting.link_rate < max_link_rate) { +				current_link_setting.link_rate_set++; +				current_link_setting.link_rate = +					link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; +			} else { +				if (current_link_setting.lane_count < link->verified_link_cap.lane_count) { +					current_link_setting.lane_count = +							increase_lane_count( +									current_link_setting.lane_count); +					current_link_setting.link_rate_set = initial_link_setting.link_rate_set; +					current_link_setting.link_rate = +						link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; +				} else +					break; +			} +		} else { +			/* minimize link rate */ +			if (current_link_setting.lane_count < +					link->verified_link_cap.lane_count) { +				current_link_setting.lane_count = +						increase_lane_count( +								current_link_setting.lane_count); +			} else { +				if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { +					current_link_setting.link_rate_set++; +					current_link_setting.link_rate = +						link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; +					current_link_setting.lane_count = +						initial_link_setting.lane_count; +				} else +					break; +			} +		} +	} +	return false; +} + +static bool decide_mst_link_settings(const struct dc_link *link, struct dc_link_settings *link_setting) +{ +	*link_setting = link->verified_link_cap; +	return true; +} + +bool link_decide_link_settings(struct dc_stream_state *stream, +	struct dc_link_settings *link_setting) +{ +	struct dc_link *link = stream->link; +	uint32_t req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing); + +	memset(link_setting, 0, sizeof(*link_setting)); + +	/* if preferred is specified through AMDDP, use it, if it's enough +	 * to drive the mode +	 */ +	if (link->preferred_link_setting.lane_count != +			LANE_COUNT_UNKNOWN && +			link->preferred_link_setting.link_rate != +					LINK_RATE_UNKNOWN) { +		*link_setting = link->preferred_link_setting; +		return true; +	} + +	/* MST doesn't perform link training for now +	 * TODO: add MST specific link training routine +	 */ +	if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { +		decide_mst_link_settings(link, link_setting); +	} else if (link->connector_signal == SIGNAL_TYPE_EDP) { +		/* enable edp link optimization for DSC eDP case */ +		if (stream->timing.flags.DSC) { +			enum dc_link_rate max_link_rate = LINK_RATE_UNKNOWN; + +			if (link->panel_config.dsc.force_dsc_edp_policy) { +				/* calculate link max link rate cap*/ +				struct dc_link_settings tmp_link_setting; +				struct dc_crtc_timing tmp_timing = stream->timing; +				uint32_t orig_req_bw; + +				tmp_link_setting.link_rate = LINK_RATE_UNKNOWN; +				tmp_timing.flags.DSC = 0; +				orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing); +				dc_link_decide_edp_link_settings(link, &tmp_link_setting, orig_req_bw); +				max_link_rate = tmp_link_setting.link_rate; +			} +			decide_edp_link_settings_with_dsc(link, link_setting, req_bw, max_link_rate); +		} else { +			dc_link_decide_edp_link_settings(link, link_setting, req_bw); +		} +	} else { +		decide_dp_link_settings(link, link_setting, req_bw); +	} + +	return link_setting->lane_count != LANE_COUNT_UNKNOWN && +			link_setting->link_rate != LINK_RATE_UNKNOWN; +} + +enum dp_link_encoding link_dp_get_encoding_format(const struct dc_link_settings *link_settings) +{ +	if ((link_settings->link_rate >= LINK_RATE_LOW) && +			(link_settings->link_rate <= LINK_RATE_HIGH3)) +		return DP_8b_10b_ENCODING; +	else if ((link_settings->link_rate >= LINK_RATE_UHBR10) && +			(link_settings->link_rate <= LINK_RATE_UHBR20)) +		return DP_128b_132b_ENCODING; +	return DP_UNKNOWN_ENCODING; +} + +enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link) +{ +	struct dc_link_settings link_settings = {0}; + +	if (!dc_is_dp_signal(link->connector_signal)) +		return DP_UNKNOWN_ENCODING; + +	if (link->preferred_link_setting.lane_count != +			LANE_COUNT_UNKNOWN && +			link->preferred_link_setting.link_rate != +					LINK_RATE_UNKNOWN) { +		link_settings = link->preferred_link_setting; +	} else { +		decide_mst_link_settings(link, &link_settings); +	} + +	return link_dp_get_encoding_format(&link_settings); +} + +static void read_dp_device_vendor_id(struct dc_link *link) +{ +	struct dp_device_vendor_id dp_id; + +	/* read IEEE branch device id */ +	core_link_read_dpcd( +		link, +		DP_BRANCH_OUI, +		(uint8_t *)&dp_id, +		sizeof(dp_id)); + +	link->dpcd_caps.branch_dev_id = +		(dp_id.ieee_oui[0] << 16) + +		(dp_id.ieee_oui[1] << 8) + +		dp_id.ieee_oui[2]; + +	memmove( +		link->dpcd_caps.branch_dev_name, +		dp_id.ieee_device_id, +		sizeof(dp_id.ieee_device_id)); +} + +static enum dc_status wake_up_aux_channel(struct dc_link *link) +{ +	enum dc_status status = DC_ERROR_UNEXPECTED; +	uint32_t aux_channel_retry_cnt = 0; +	uint8_t dpcd_power_state = '\0'; + +	while (status != DC_OK && aux_channel_retry_cnt < 10) { +		status = core_link_read_dpcd(link, DP_SET_POWER, +				&dpcd_power_state, sizeof(dpcd_power_state)); + +		/* Delay 1 ms if AUX CH is in power down state. Based on spec +		 * section 2.3.1.2, if AUX CH may be powered down due to +		 * write to DPCD 600h = 2. Sink AUX CH is monitoring differential +		 * signal and may need up to 1 ms before being able to reply. +		 */ +		if (status != DC_OK || dpcd_power_state == DP_SET_POWER_D3) { +			udelay(1000); +			aux_channel_retry_cnt++; +		} +	} + +	if (status != DC_OK) { +		dpcd_power_state = DP_SET_POWER_D0; +		status = core_link_write_dpcd( +				link, +				DP_SET_POWER, +				&dpcd_power_state, +				sizeof(dpcd_power_state)); + +		dpcd_power_state = DP_SET_POWER_D3; +		status = core_link_write_dpcd( +				link, +				DP_SET_POWER, +				&dpcd_power_state, +				sizeof(dpcd_power_state)); +		return DC_ERROR_UNEXPECTED; +	} + +	return DC_OK; +} + +static void get_active_converter_info( +	uint8_t data, struct dc_link *link) +{ +	union dp_downstream_port_present ds_port = { .byte = data }; +	memset(&link->dpcd_caps.dongle_caps, 0, sizeof(link->dpcd_caps.dongle_caps)); + +	/* decode converter info*/ +	if (!ds_port.fields.PORT_PRESENT) { +		link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE; +		set_dongle_type(link->ddc, +				link->dpcd_caps.dongle_type); +		link->dpcd_caps.is_branch_dev = false; +		return; +	} + +	/* DPCD 0x5 bit 0 = 1, it indicate it's branch device */ +	link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT; + +	switch (ds_port.fields.PORT_TYPE) { +	case DOWNSTREAM_VGA: +		link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER; +		break; +	case DOWNSTREAM_DVI_HDMI_DP_PLUS_PLUS: +		/* At this point we don't know is it DVI or HDMI or DP++, +		 * assume DVI.*/ +		link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_DVI_CONVERTER; +		break; +	default: +		link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE; +		break; +	} + +	if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_11) { +		uint8_t det_caps[16]; /* CTS 4.2.2.7 expects source to read Detailed Capabilities Info : 00080h-0008F.*/ +		union dwnstream_port_caps_byte0 *port_caps = +			(union dwnstream_port_caps_byte0 *)det_caps; +		if (core_link_read_dpcd(link, DP_DOWNSTREAM_PORT_0, +				det_caps, sizeof(det_caps)) == DC_OK) { + +			switch (port_caps->bits.DWN_STRM_PORTX_TYPE) { +			/*Handle DP case as DONGLE_NONE*/ +			case DOWN_STREAM_DETAILED_DP: +				link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE; +				break; +			case DOWN_STREAM_DETAILED_VGA: +				link->dpcd_caps.dongle_type = +					DISPLAY_DONGLE_DP_VGA_CONVERTER; +				break; +			case DOWN_STREAM_DETAILED_DVI: +				link->dpcd_caps.dongle_type = +					DISPLAY_DONGLE_DP_DVI_CONVERTER; +				break; +			case DOWN_STREAM_DETAILED_HDMI: +			case DOWN_STREAM_DETAILED_DP_PLUS_PLUS: +				/*Handle DP++ active converter case, process DP++ case as HDMI case according DP1.4 spec*/ +				link->dpcd_caps.dongle_type = +					DISPLAY_DONGLE_DP_HDMI_CONVERTER; + +				link->dpcd_caps.dongle_caps.dongle_type = link->dpcd_caps.dongle_type; +				if (ds_port.fields.DETAILED_CAPS) { + +					union dwnstream_port_caps_byte3_hdmi +						hdmi_caps = {.raw = det_caps[3] }; +					union dwnstream_port_caps_byte2 +						hdmi_color_caps = {.raw = det_caps[2] }; +					link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz = +						det_caps[1] * 2500; + +					link->dpcd_caps.dongle_caps.is_dp_hdmi_s3d_converter = +						hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK; +					/*YCBCR capability only for HDMI case*/ +					if (port_caps->bits.DWN_STRM_PORTX_TYPE +							== DOWN_STREAM_DETAILED_HDMI) { +						link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_pass_through = +								hdmi_caps.bits.YCrCr422_PASS_THROUGH; +						link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_pass_through = +								hdmi_caps.bits.YCrCr420_PASS_THROUGH; +						link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_converter = +								hdmi_caps.bits.YCrCr422_CONVERSION; +						link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_converter = +								hdmi_caps.bits.YCrCr420_CONVERSION; +					} + +					link->dpcd_caps.dongle_caps.dp_hdmi_max_bpc = +						translate_dpcd_max_bpc( +							hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT); + +					if (link->dc->caps.dp_hdmi21_pcon_support) { +						union hdmi_encoded_link_bw hdmi_encoded_link_bw; + +						link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = +								dc_link_bw_kbps_from_raw_frl_link_rate_data( +										hdmi_color_caps.bits.MAX_ENCODED_LINK_BW_SUPPORT); + +						// Intersect reported max link bw support with the supported link rate post FRL link training +						if (core_link_read_dpcd(link, DP_PCON_HDMI_POST_FRL_STATUS, +								&hdmi_encoded_link_bw.raw, sizeof(hdmi_encoded_link_bw)) == DC_OK) { +							link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = intersect_frl_link_bw_support( +									link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps, +									hdmi_encoded_link_bw); +						} + +						if (link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps > 0) +							link->dpcd_caps.dongle_caps.extendedCapValid = true; +					} + +					if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz != 0) +						link->dpcd_caps.dongle_caps.extendedCapValid = true; +				} + +				break; +			} +		} +	} + +	set_dongle_type(link->ddc, link->dpcd_caps.dongle_type); + +	{ +		struct dp_sink_hw_fw_revision dp_hw_fw_revision; + +		core_link_read_dpcd( +			link, +			DP_BRANCH_REVISION_START, +			(uint8_t *)&dp_hw_fw_revision, +			sizeof(dp_hw_fw_revision)); + +		link->dpcd_caps.branch_hw_revision = +			dp_hw_fw_revision.ieee_hw_rev; + +		memmove( +			link->dpcd_caps.branch_fw_revision, +			dp_hw_fw_revision.ieee_fw_rev, +			sizeof(dp_hw_fw_revision.ieee_fw_rev)); +	} +	if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 && +			link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) { +		union dp_dfp_cap_ext dfp_cap_ext; +		memset(&dfp_cap_ext, '\0', sizeof (dfp_cap_ext)); +		core_link_read_dpcd( +				link, +				DP_DFP_CAPABILITY_EXTENSION_SUPPORT, +				dfp_cap_ext.raw, +				sizeof(dfp_cap_ext.raw)); +		link->dpcd_caps.dongle_caps.dfp_cap_ext.supported = dfp_cap_ext.fields.supported; +		link->dpcd_caps.dongle_caps.dfp_cap_ext.max_pixel_rate_in_mps = +				dfp_cap_ext.fields.max_pixel_rate_in_mps[0] + +				(dfp_cap_ext.fields.max_pixel_rate_in_mps[1] << 8); +		link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width = +				dfp_cap_ext.fields.max_video_h_active_width[0] + +				(dfp_cap_ext.fields.max_video_h_active_width[1] << 8); +		link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height = +				dfp_cap_ext.fields.max_video_v_active_height[0] + +				(dfp_cap_ext.fields.max_video_v_active_height[1] << 8); +		link->dpcd_caps.dongle_caps.dfp_cap_ext.encoding_format_caps = +				dfp_cap_ext.fields.encoding_format_caps; +		link->dpcd_caps.dongle_caps.dfp_cap_ext.rgb_color_depth_caps = +				dfp_cap_ext.fields.rgb_color_depth_caps; +		link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr444_color_depth_caps = +				dfp_cap_ext.fields.ycbcr444_color_depth_caps; +		link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr422_color_depth_caps = +				dfp_cap_ext.fields.ycbcr422_color_depth_caps; +		link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr420_color_depth_caps = +				dfp_cap_ext.fields.ycbcr420_color_depth_caps; +		DC_LOG_DP2("DFP capability extension is read at link %d", link->link_index); +		DC_LOG_DP2("\tdfp_cap_ext.supported = %s", link->dpcd_caps.dongle_caps.dfp_cap_ext.supported ? "true" : "false"); +		DC_LOG_DP2("\tdfp_cap_ext.max_pixel_rate_in_mps = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_pixel_rate_in_mps); +		DC_LOG_DP2("\tdfp_cap_ext.max_video_h_active_width = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width); +		DC_LOG_DP2("\tdfp_cap_ext.max_video_v_active_height = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height); +	} +} + +static void apply_usbc_combo_phy_reset_wa(struct dc_link *link, +		struct dc_link_settings *link_settings) +{ +	/* Temporary Renoir-specific workaround PHY will sometimes be in bad +	 * state on hotplugging display from certain USB-C dongle, so add extra +	 * cycle of enabling and disabling the PHY before first link training. +	 */ +	struct link_resource link_res = {0}; +	enum clock_source_id dp_cs_id = get_clock_source_id(link); + +	dp_enable_link_phy(link, &link_res, link->connector_signal, +			dp_cs_id, link_settings); +	dp_disable_link_phy(link, &link_res, link->connector_signal); +} + +static bool dp_overwrite_extended_receiver_cap(struct dc_link *link) +{ +	uint8_t dpcd_data[16]; +	uint32_t read_dpcd_retry_cnt = 3; +	enum dc_status status = DC_ERROR_UNEXPECTED; +	union dp_downstream_port_present ds_port = { 0 }; +	union down_stream_port_count down_strm_port_count; +	union edp_configuration_cap edp_config_cap; + +	int i; + +	for (i = 0; i < read_dpcd_retry_cnt; i++) { +		status = core_link_read_dpcd( +				link, +				DP_DPCD_REV, +				dpcd_data, +				sizeof(dpcd_data)); +		if (status == DC_OK) +			break; +	} + +	link->dpcd_caps.dpcd_rev.raw = +		dpcd_data[DP_DPCD_REV - DP_DPCD_REV]; + +	if (dpcd_data[DP_MAX_LANE_COUNT - DP_DPCD_REV] == 0) +		return false; + +	ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT - +			DP_DPCD_REV]; + +	get_active_converter_info(ds_port.byte, link); + +	down_strm_port_count.raw = dpcd_data[DP_DOWN_STREAM_PORT_COUNT - +			DP_DPCD_REV]; + +	link->dpcd_caps.allow_invalid_MSA_timing_param = +		down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM; + +	link->dpcd_caps.max_ln_count.raw = dpcd_data[ +		DP_MAX_LANE_COUNT - DP_DPCD_REV]; + +	link->dpcd_caps.max_down_spread.raw = dpcd_data[ +		DP_MAX_DOWNSPREAD - DP_DPCD_REV]; + +	link->reported_link_cap.lane_count = +		link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT; +	link->reported_link_cap.link_rate = dpcd_data[ +		DP_MAX_LINK_RATE - DP_DPCD_REV]; +	link->reported_link_cap.link_spread = +		link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ? +		LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; + +	edp_config_cap.raw = dpcd_data[ +		DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV]; +	link->dpcd_caps.panel_mode_edp = +		edp_config_cap.bits.ALT_SCRAMBLER_RESET; +	link->dpcd_caps.dpcd_display_control_capable = +		edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE; + +	return true; +} + +void dc_link_overwrite_extended_receiver_cap( +		struct dc_link *link) +{ +	dp_overwrite_extended_receiver_cap(link); +} + +void dpcd_set_source_specific_data(struct dc_link *link) +{ +	if (!link->dc->vendor_signature.is_valid) { +		enum dc_status result_write_min_hblank = DC_NOT_SUPPORTED; +		struct dpcd_amd_signature amd_signature = {0}; +		struct dpcd_amd_device_id amd_device_id = {0}; + +		amd_device_id.device_id_byte1 = +				(uint8_t)(link->ctx->asic_id.chip_id); +		amd_device_id.device_id_byte2 = +				(uint8_t)(link->ctx->asic_id.chip_id >> 8); +		amd_device_id.dce_version = +				(uint8_t)(link->ctx->dce_version); +		amd_device_id.dal_version_byte1 = 0x0; // needed? where to get? +		amd_device_id.dal_version_byte2 = 0x0; // needed? where to get? + +		core_link_read_dpcd(link, DP_SOURCE_OUI, +				(uint8_t *)(&amd_signature), +				sizeof(amd_signature)); + +		if (!((amd_signature.AMD_IEEE_TxSignature_byte1 == 0x0) && +			(amd_signature.AMD_IEEE_TxSignature_byte2 == 0x0) && +			(amd_signature.AMD_IEEE_TxSignature_byte3 == 0x1A))) { + +			amd_signature.AMD_IEEE_TxSignature_byte1 = 0x0; +			amd_signature.AMD_IEEE_TxSignature_byte2 = 0x0; +			amd_signature.AMD_IEEE_TxSignature_byte3 = 0x1A; + +			core_link_write_dpcd(link, DP_SOURCE_OUI, +				(uint8_t *)(&amd_signature), +				sizeof(amd_signature)); +		} + +		core_link_write_dpcd(link, DP_SOURCE_OUI+0x03, +				(uint8_t *)(&amd_device_id), +				sizeof(amd_device_id)); + +		if (link->ctx->dce_version >= DCN_VERSION_2_0 && +			link->dc->caps.min_horizontal_blanking_period != 0) { + +			uint8_t hblank_size = (uint8_t)link->dc->caps.min_horizontal_blanking_period; + +			result_write_min_hblank = core_link_write_dpcd(link, +				DP_SOURCE_MINIMUM_HBLANK_SUPPORTED, (uint8_t *)(&hblank_size), +				sizeof(hblank_size)); +		} +		DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, +							WPP_BIT_FLAG_DC_DETECTION_DP_CAPS, +							"result=%u link_index=%u enum dce_version=%d DPCD=0x%04X min_hblank=%u branch_dev_id=0x%x branch_dev_name='%c%c%c%c%c%c'", +							result_write_min_hblank, +							link->link_index, +							link->ctx->dce_version, +							DP_SOURCE_MINIMUM_HBLANK_SUPPORTED, +							link->dc->caps.min_horizontal_blanking_period, +							link->dpcd_caps.branch_dev_id, +							link->dpcd_caps.branch_dev_name[0], +							link->dpcd_caps.branch_dev_name[1], +							link->dpcd_caps.branch_dev_name[2], +							link->dpcd_caps.branch_dev_name[3], +							link->dpcd_caps.branch_dev_name[4], +							link->dpcd_caps.branch_dev_name[5]); +	} else { +		core_link_write_dpcd(link, DP_SOURCE_OUI, +				link->dc->vendor_signature.data.raw, +				sizeof(link->dc->vendor_signature.data.raw)); +	} +} + +void dpcd_write_cable_id_to_dprx(struct dc_link *link) +{ +	if (!link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED || +			link->dpcd_caps.cable_id.raw == 0 || +			link->dprx_states.cable_id_written) +		return; + +	core_link_write_dpcd(link, DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX, +			&link->dpcd_caps.cable_id.raw, +			sizeof(link->dpcd_caps.cable_id.raw)); + +	link->dprx_states.cable_id_written = 1; +} + +static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id) +{ +	union dmub_rb_cmd cmd; + +	if (!link->ctx->dmub_srv || +			link->ep_type != DISPLAY_ENDPOINT_PHY || +			link->link_enc->features.flags.bits.DP_IS_USB_C == 0) +		return false; + +	memset(&cmd, 0, sizeof(cmd)); +	cmd.cable_id.header.type = DMUB_CMD_GET_USBC_CABLE_ID; +	cmd.cable_id.header.payload_bytes = sizeof(cmd.cable_id.data); +	cmd.cable_id.data.input.phy_inst = resource_transmitter_to_phy_idx( +			link->dc, link->link_enc->transmitter); +	if (dc_dmub_srv_cmd_with_reply_data(link->ctx->dmub_srv, &cmd) && +			cmd.cable_id.header.ret_status == 1) { +		cable_id->raw = cmd.cable_id.data.output_raw; +		DC_LOG_DC("usbc_cable_id = %d.\n", cable_id->raw); +	} +	return cmd.cable_id.header.ret_status == 1; +} + +static void retrieve_cable_id(struct dc_link *link) +{ +	union dp_cable_id usbc_cable_id; + +	link->dpcd_caps.cable_id.raw = 0; +	core_link_read_dpcd(link, DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX, +			&link->dpcd_caps.cable_id.raw, sizeof(uint8_t)); + +	if (get_usbc_cable_id(link, &usbc_cable_id)) +		link->dpcd_caps.cable_id = intersect_cable_id( +				&link->dpcd_caps.cable_id, &usbc_cable_id); +} + +bool read_is_mst_supported(struct dc_link *link) +{ +	bool mst          = false; +	enum dc_status st = DC_OK; +	union dpcd_rev rev; +	union mstm_cap cap; + +	if (link->preferred_training_settings.mst_enable && +		*link->preferred_training_settings.mst_enable == false) { +		return false; +	} + +	rev.raw  = 0; +	cap.raw  = 0; + +	st = core_link_read_dpcd(link, DP_DPCD_REV, &rev.raw, +			sizeof(rev)); + +	if (st == DC_OK && rev.raw >= DPCD_REV_12) { + +		st = core_link_read_dpcd(link, DP_MSTM_CAP, +				&cap.raw, sizeof(cap)); +		if (st == DC_OK && cap.bits.MST_CAP == 1) +			mst = true; +	} +	return mst; + +} + +/* Read additional sink caps defined in source specific DPCD area + * This function currently only reads from SinkCapability address (DP_SOURCE_SINK_CAP) + * TODO: Add FS caps and read from DP_SOURCE_SINK_FS_CAP as well + */ +static bool dpcd_read_sink_ext_caps(struct dc_link *link) +{ +	uint8_t dpcd_data; + +	if (!link) +		return false; + +	if (core_link_read_dpcd(link, DP_SOURCE_SINK_CAP, &dpcd_data, 1) != DC_OK) +		return false; + +	link->dpcd_sink_ext_caps.raw = dpcd_data; +	return true; +} + +enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link) +{ +	uint8_t lttpr_dpcd_data[8]; +	enum dc_status status; +	bool is_lttpr_present; + +	/* Logic to determine LTTPR support*/ +	bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware; + +	if (!vbios_lttpr_interop || !link->dc->caps.extended_aux_timeout_support) +		return DC_NOT_SUPPORTED; + +	/* By reading LTTPR capability, RX assumes that we will enable +	 * LTTPR extended aux timeout if LTTPR is present. +	 */ +	status = core_link_read_dpcd( +			link, +			DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV, +			lttpr_dpcd_data, +			sizeof(lttpr_dpcd_data)); + +	link->dpcd_caps.lttpr_caps.revision.raw = +			lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV - +							DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + +	link->dpcd_caps.lttpr_caps.max_link_rate = +			lttpr_dpcd_data[DP_MAX_LINK_RATE_PHY_REPEATER - +							DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + +	link->dpcd_caps.lttpr_caps.phy_repeater_cnt = +			lttpr_dpcd_data[DP_PHY_REPEATER_CNT - +							DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + +	link->dpcd_caps.lttpr_caps.max_lane_count = +			lttpr_dpcd_data[DP_MAX_LANE_COUNT_PHY_REPEATER - +							DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + +	link->dpcd_caps.lttpr_caps.mode = +			lttpr_dpcd_data[DP_PHY_REPEATER_MODE - +							DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + +	link->dpcd_caps.lttpr_caps.max_ext_timeout = +			lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT - +							DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; +	link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw = +			lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER - +							DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + +	link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw = +			lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES - +							DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + +	/* If this chip cap is set, at least one retimer must exist in the chain +	 * Override count to 1 if we receive a known bad count (0 or an invalid value) */ +	if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && +			(dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) { +		ASSERT(0); +		link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80; +		DC_LOG_DC("lttpr_caps forced phy_repeater_cnt = %d\n", link->dpcd_caps.lttpr_caps.phy_repeater_cnt); +	} + +	/* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */ +	is_lttpr_present = dp_is_lttpr_present(link); + +	if (is_lttpr_present) +		CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: "); + +	DC_LOG_DC("is_lttpr_present = %d\n", is_lttpr_present); +	return status; +} + +static bool retrieve_link_cap(struct dc_link *link) +{ +	/* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16, +	 * which means size 16 will be good for both of those DPCD register block reads +	 */ +	uint8_t dpcd_data[16]; +	/*Only need to read 1 byte starting from DP_DPRX_FEATURE_ENUMERATION_LIST. +	 */ +	uint8_t dpcd_dprx_data = '\0'; + +	struct dp_device_vendor_id sink_id; +	union down_stream_port_count down_strm_port_count; +	union edp_configuration_cap edp_config_cap; +	union dp_downstream_port_present ds_port = { 0 }; +	enum dc_status status = DC_ERROR_UNEXPECTED; +	uint32_t read_dpcd_retry_cnt = 3; +	int i; +	struct dp_sink_hw_fw_revision dp_hw_fw_revision; +	const uint32_t post_oui_delay = 30; // 30ms + +	memset(dpcd_data, '\0', sizeof(dpcd_data)); +	memset(&down_strm_port_count, +		'\0', sizeof(union down_stream_port_count)); +	memset(&edp_config_cap, '\0', +		sizeof(union edp_configuration_cap)); + +	/* if extended timeout is supported in hardware, +	 * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer +	 * CTS 4.2.1.1 regression introduced by CTS specs requirement update. +	 */ +	try_to_configure_aux_timeout(link->ddc, +			LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD); + +	status = dp_retrieve_lttpr_cap(link); + +	if (status != DC_OK) { +		status = wake_up_aux_channel(link); +		if (status == DC_OK) +			dp_retrieve_lttpr_cap(link); +		else +			return false; +	} + +	if (dp_is_lttpr_present(link)) +		configure_lttpr_mode_transparent(link); + +	/* Read DP tunneling information. */ +	status = dpcd_get_tunneling_device_data(link); + +	dpcd_set_source_specific_data(link); +	/* Sink may need to configure internals based on vendor, so allow some +	 * time before proceeding with possibly vendor specific transactions +	 */ +	msleep(post_oui_delay); + +	for (i = 0; i < read_dpcd_retry_cnt; i++) { +		status = core_link_read_dpcd( +				link, +				DP_DPCD_REV, +				dpcd_data, +				sizeof(dpcd_data)); +		if (status == DC_OK) +			break; +	} + + +	if (status != DC_OK) { +		dm_error("%s: Read receiver caps dpcd data failed.\n", __func__); +		return false; +	} + +	if (!dp_is_lttpr_present(link)) +		try_to_configure_aux_timeout(link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD); + + +	{ +		union training_aux_rd_interval aux_rd_interval; + +		aux_rd_interval.raw = +			dpcd_data[DP_TRAINING_AUX_RD_INTERVAL]; + +		link->dpcd_caps.ext_receiver_cap_field_present = +				aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1; + +		if (aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1) { +			uint8_t ext_cap_data[16]; + +			memset(ext_cap_data, '\0', sizeof(ext_cap_data)); +			for (i = 0; i < read_dpcd_retry_cnt; i++) { +				status = core_link_read_dpcd( +				link, +				DP_DP13_DPCD_REV, +				ext_cap_data, +				sizeof(ext_cap_data)); +				if (status == DC_OK) { +					memcpy(dpcd_data, ext_cap_data, sizeof(dpcd_data)); +					break; +				} +			} +			if (status != DC_OK) +				dm_error("%s: Read extend caps data failed, use cap from dpcd 0.\n", __func__); +		} +	} + +	link->dpcd_caps.dpcd_rev.raw = +			dpcd_data[DP_DPCD_REV - DP_DPCD_REV]; + +	if (link->dpcd_caps.ext_receiver_cap_field_present) { +		for (i = 0; i < read_dpcd_retry_cnt; i++) { +			status = core_link_read_dpcd( +					link, +					DP_DPRX_FEATURE_ENUMERATION_LIST, +					&dpcd_dprx_data, +					sizeof(dpcd_dprx_data)); +			if (status == DC_OK) +				break; +		} + +		link->dpcd_caps.dprx_feature.raw = dpcd_dprx_data; + +		if (status != DC_OK) +			dm_error("%s: Read DPRX caps data failed.\n", __func__); + +		/* AdaptiveSyncCapability  */ +		dpcd_dprx_data = 0; +		for (i = 0; i < read_dpcd_retry_cnt; i++) { +			status = core_link_read_dpcd( +					link, DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1, +					&dpcd_dprx_data, sizeof(dpcd_dprx_data)); +			if (status == DC_OK) +				break; +		} + +		link->dpcd_caps.adaptive_sync_caps.dp_adap_sync_caps.raw = dpcd_dprx_data; + +		if (status != DC_OK) +			dm_error("%s: Read DPRX caps data failed. Addr:%#x\n", +					__func__, DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1); +	} + +	else { +		link->dpcd_caps.dprx_feature.raw = 0; +	} + + +	/* Error condition checking... +	 * It is impossible for Sink to report Max Lane Count = 0. +	 * It is possible for Sink to report Max Link Rate = 0, if it is +	 * an eDP device that is reporting specialized link rates in the +	 * SUPPORTED_LINK_RATE table. +	 */ +	if (dpcd_data[DP_MAX_LANE_COUNT - DP_DPCD_REV] == 0) +		return false; + +	ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT - +				 DP_DPCD_REV]; + +	read_dp_device_vendor_id(link); + +	/* TODO - decouple raw mst capability from policy decision */ +	link->dpcd_caps.is_mst_capable = read_is_mst_supported(link); + +	get_active_converter_info(ds_port.byte, link); + +	dp_wa_power_up_0010FA(link, dpcd_data, sizeof(dpcd_data)); + +	down_strm_port_count.raw = dpcd_data[DP_DOWN_STREAM_PORT_COUNT - +				 DP_DPCD_REV]; + +	link->dpcd_caps.allow_invalid_MSA_timing_param = +		down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM; + +	link->dpcd_caps.max_ln_count.raw = dpcd_data[ +		DP_MAX_LANE_COUNT - DP_DPCD_REV]; + +	link->dpcd_caps.max_down_spread.raw = dpcd_data[ +		DP_MAX_DOWNSPREAD - DP_DPCD_REV]; + +	link->reported_link_cap.lane_count = +		link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT; +	link->reported_link_cap.link_rate = get_link_rate_from_max_link_bw( +			dpcd_data[DP_MAX_LINK_RATE - DP_DPCD_REV]); +	link->reported_link_cap.link_spread = +		link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ? +		LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; + +	edp_config_cap.raw = dpcd_data[ +		DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV]; +	link->dpcd_caps.panel_mode_edp = +		edp_config_cap.bits.ALT_SCRAMBLER_RESET; +	link->dpcd_caps.dpcd_display_control_capable = +		edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE; +	link->dpcd_caps.channel_coding_cap.raw = +			dpcd_data[DP_MAIN_LINK_CHANNEL_CODING - DP_DPCD_REV]; +	link->test_pattern_enabled = false; +	link->compliance_test_state.raw = 0; + +	/* read sink count */ +	core_link_read_dpcd(link, +			DP_SINK_COUNT, +			&link->dpcd_caps.sink_count.raw, +			sizeof(link->dpcd_caps.sink_count.raw)); + +	/* read sink ieee oui */ +	core_link_read_dpcd(link, +			DP_SINK_OUI, +			(uint8_t *)(&sink_id), +			sizeof(sink_id)); + +	link->dpcd_caps.sink_dev_id = +			(sink_id.ieee_oui[0] << 16) + +			(sink_id.ieee_oui[1] << 8) + +			(sink_id.ieee_oui[2]); + +	memmove( +		link->dpcd_caps.sink_dev_id_str, +		sink_id.ieee_device_id, +		sizeof(sink_id.ieee_device_id)); + +	core_link_read_dpcd( +		link, +		DP_SINK_HW_REVISION_START, +		(uint8_t *)&dp_hw_fw_revision, +		sizeof(dp_hw_fw_revision)); + +	link->dpcd_caps.sink_hw_revision = +		dp_hw_fw_revision.ieee_hw_rev; + +	memmove( +		link->dpcd_caps.sink_fw_revision, +		dp_hw_fw_revision.ieee_fw_rev, +		sizeof(dp_hw_fw_revision.ieee_fw_rev)); + +	/* Quirk for Retina panels: wrong DP_MAX_LINK_RATE */ +	{ +		uint8_t str_mbp_2018[] = { 101, 68, 21, 103, 98, 97 }; +		uint8_t fwrev_mbp_2018[] = { 7, 4 }; +		uint8_t fwrev_mbp_2018_vega[] = { 8, 4 }; + +		/* We also check for the firmware revision as 16,1 models have an +		 * identical device id and are incorrectly quirked otherwise. +		 */ +		if ((link->dpcd_caps.sink_dev_id == 0x0010fa) && +		    !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2018, +			     sizeof(str_mbp_2018)) && +		    (!memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018, +			     sizeof(fwrev_mbp_2018)) || +		    !memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018_vega, +			     sizeof(fwrev_mbp_2018_vega)))) { +			link->reported_link_cap.link_rate = LINK_RATE_RBR2; +		} +	} + +	memset(&link->dpcd_caps.dsc_caps, '\0', +			sizeof(link->dpcd_caps.dsc_caps)); +	memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap)); +	/* Read DSC and FEC sink capabilities if DP revision is 1.4 and up */ +	if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14) { +		status = core_link_read_dpcd( +				link, +				DP_FEC_CAPABILITY, +				&link->dpcd_caps.fec_cap.raw, +				sizeof(link->dpcd_caps.fec_cap.raw)); +		status = core_link_read_dpcd( +				link, +				DP_DSC_SUPPORT, +				link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, +				sizeof(link->dpcd_caps.dsc_caps.dsc_basic_caps.raw)); +		if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) { +			status = core_link_read_dpcd( +					link, +					DP_DSC_BRANCH_OVERALL_THROUGHPUT_0, +					link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, +					sizeof(link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw)); +			DC_LOG_DSC("DSC branch decoder capability is read at link %d", link->link_index); +			DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_0 = 0x%02x", +					link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_0); +			DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_1 = 0x%02x", +					link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_1); +			DC_LOG_DSC("\tBRANCH_MAX_LINE_WIDTH 0x%02x", +					link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_MAX_LINE_WIDTH); +		} + +		/* Apply work around to disable FEC and DSC for USB4 tunneling in TBT3 compatibility mode +		 * only if required. +		 */ +		if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && +				link->dc->debug.dpia_debug.bits.enable_force_tbt3_work_around && +				link->dpcd_caps.is_branch_dev && +				link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && +				link->dpcd_caps.branch_hw_revision == DP_BRANCH_HW_REV_10 && +				(link->dpcd_caps.fec_cap.bits.FEC_CAPABLE || +				link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT)) { +			/* A TBT3 device is expected to report no support for FEC or DSC to a USB4 DPIA. +			 * Clear FEC and DSC capabilities as a work around if that is not the case. +			 */ +			link->wa_flags.dpia_forced_tbt3_mode = true; +			memset(&link->dpcd_caps.dsc_caps, '\0', sizeof(link->dpcd_caps.dsc_caps)); +			memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap)); +			DC_LOG_DSC("Clear DSC SUPPORT for USB4 link(%d) in TBT3 compatibility mode", link->link_index); +		} else +			link->wa_flags.dpia_forced_tbt3_mode = false; +	} + +	if (!dpcd_read_sink_ext_caps(link)) +		link->dpcd_sink_ext_caps.raw = 0; + +	if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) { +		DC_LOG_DP2("128b/132b encoding is supported at link %d", link->link_index); + +		core_link_read_dpcd(link, +				DP_128B132B_SUPPORTED_LINK_RATES, +				&link->dpcd_caps.dp_128b_132b_supported_link_rates.raw, +				sizeof(link->dpcd_caps.dp_128b_132b_supported_link_rates.raw)); +		if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR20) +			link->reported_link_cap.link_rate = LINK_RATE_UHBR20; +		else if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5) +			link->reported_link_cap.link_rate = LINK_RATE_UHBR13_5; +		else if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR10) +			link->reported_link_cap.link_rate = LINK_RATE_UHBR10; +		else +			dm_error("%s: Invalid RX 128b_132b_supported_link_rates\n", __func__); +		DC_LOG_DP2("128b/132b supported link rates is read at link %d", link->link_index); +		DC_LOG_DP2("\tmax 128b/132b link rate support is %d.%d GHz", +				link->reported_link_cap.link_rate / 100, +				link->reported_link_cap.link_rate % 100); + +		core_link_read_dpcd(link, +				DP_SINK_VIDEO_FALLBACK_FORMATS, +				&link->dpcd_caps.fallback_formats.raw, +				sizeof(link->dpcd_caps.fallback_formats.raw)); +		DC_LOG_DP2("sink video fallback format is read at link %d", link->link_index); +		if (link->dpcd_caps.fallback_formats.bits.dp_1920x1080_60Hz_24bpp_support) +			DC_LOG_DP2("\t1920x1080@60Hz 24bpp fallback format supported"); +		if (link->dpcd_caps.fallback_formats.bits.dp_1280x720_60Hz_24bpp_support) +			DC_LOG_DP2("\t1280x720@60Hz 24bpp fallback format supported"); +		if (link->dpcd_caps.fallback_formats.bits.dp_1024x768_60Hz_24bpp_support) +			DC_LOG_DP2("\t1024x768@60Hz 24bpp fallback format supported"); +		if (link->dpcd_caps.fallback_formats.raw == 0) { +			DC_LOG_DP2("\tno supported fallback formats, assume 1920x1080@60Hz 24bpp is supported"); +			link->dpcd_caps.fallback_formats.bits.dp_1920x1080_60Hz_24bpp_support = 1; +		} + +		core_link_read_dpcd(link, +				DP_FEC_CAPABILITY_1, +				&link->dpcd_caps.fec_cap1.raw, +				sizeof(link->dpcd_caps.fec_cap1.raw)); +		DC_LOG_DP2("FEC CAPABILITY 1 is read at link %d", link->link_index); +		if (link->dpcd_caps.fec_cap1.bits.AGGREGATED_ERROR_COUNTERS_CAPABLE) +			DC_LOG_DP2("\tFEC aggregated error counters are supported"); +	} + +	retrieve_cable_id(link); +	dpcd_write_cable_id_to_dprx(link); + +	/* Connectivity log: detection */ +	CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: "); + +	return true; +} + +bool detect_dp_sink_caps(struct dc_link *link) +{ +	return retrieve_link_cap(link); +} + +void detect_edp_sink_caps(struct dc_link *link) +{ +	uint8_t supported_link_rates[16]; +	uint32_t entry; +	uint32_t link_rate_in_khz; +	enum dc_link_rate link_rate = LINK_RATE_UNKNOWN; +	uint8_t backlight_adj_cap; +	uint8_t general_edp_cap; + +	retrieve_link_cap(link); +	link->dpcd_caps.edp_supported_link_rates_count = 0; +	memset(supported_link_rates, 0, sizeof(supported_link_rates)); + +	/* +	 * edp_supported_link_rates_count is only valid for eDP v1.4 or higher. +	 * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h" +	 */ +	if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 && +			(link->panel_config.ilr.optimize_edp_link_rate || +			link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)) { +		// Read DPCD 00010h - 0001Fh 16 bytes at one shot +		core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES, +							supported_link_rates, sizeof(supported_link_rates)); + +		for (entry = 0; entry < 16; entry += 2) { +			// DPCD register reports per-lane link rate = 16-bit link rate capability +			// value X 200 kHz. Need multiplier to find link rate in kHz. +			link_rate_in_khz = (supported_link_rates[entry+1] * 0x100 + +										supported_link_rates[entry]) * 200; + +			if (link_rate_in_khz != 0) { +				link_rate = linkRateInKHzToLinkRateMultiplier(link_rate_in_khz); +				link->dpcd_caps.edp_supported_link_rates[link->dpcd_caps.edp_supported_link_rates_count] = link_rate; +				link->dpcd_caps.edp_supported_link_rates_count++; + +				if (link->reported_link_cap.link_rate < link_rate) +					link->reported_link_cap.link_rate = link_rate; +			} +		} +	} +	core_link_read_dpcd(link, DP_EDP_BACKLIGHT_ADJUSTMENT_CAP, +						&backlight_adj_cap, sizeof(backlight_adj_cap)); + +	link->dpcd_caps.dynamic_backlight_capable_edp = +				(backlight_adj_cap & DP_EDP_DYNAMIC_BACKLIGHT_CAP) ? true:false; + +	core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_1, +						&general_edp_cap, sizeof(general_edp_cap)); + +	link->dpcd_caps.set_power_state_capable_edp = +				(general_edp_cap & DP_EDP_SET_POWER_CAP) ? true:false; + +	set_default_brightness_aux(link); + +	core_link_read_dpcd(link, DP_EDP_DPCD_REV, +		&link->dpcd_caps.edp_rev, +		sizeof(link->dpcd_caps.edp_rev)); +	/* +	 * PSR is only valid for eDP v1.3 or higher. +	 */ +	if (link->dpcd_caps.edp_rev >= DP_EDP_13) { +		core_link_read_dpcd(link, DP_PSR_SUPPORT, +			&link->dpcd_caps.psr_info.psr_version, +			sizeof(link->dpcd_caps.psr_info.psr_version)); +		if (link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_001CF8) +			core_link_read_dpcd(link, DP_FORCE_PSRSU_CAPABILITY, +						&link->dpcd_caps.psr_info.force_psrsu_cap, +						sizeof(link->dpcd_caps.psr_info.force_psrsu_cap)); +		core_link_read_dpcd(link, DP_PSR_CAPS, +			&link->dpcd_caps.psr_info.psr_dpcd_caps.raw, +			sizeof(link->dpcd_caps.psr_info.psr_dpcd_caps.raw)); +		if (link->dpcd_caps.psr_info.psr_dpcd_caps.bits.Y_COORDINATE_REQUIRED) { +			core_link_read_dpcd(link, DP_PSR2_SU_Y_GRANULARITY, +				&link->dpcd_caps.psr_info.psr2_su_y_granularity_cap, +				sizeof(link->dpcd_caps.psr_info.psr2_su_y_granularity_cap)); +		} +	} + +	/* +	 * ALPM is only valid for eDP v1.4 or higher. +	 */ +	if (link->dpcd_caps.dpcd_rev.raw >= DP_EDP_14) +		core_link_read_dpcd(link, DP_RECEIVER_ALPM_CAP, +			&link->dpcd_caps.alpm_caps.raw, +			sizeof(link->dpcd_caps.alpm_caps.raw)); +} + +bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap) +{ +	struct link_encoder *link_enc = NULL; + +	if (!max_link_enc_cap) { +		DC_LOG_ERROR("%s: Could not return max link encoder caps", __func__); +		return false; +	} + +	link_enc = link_enc_cfg_get_link_enc(link); +	ASSERT(link_enc); + +	if (link_enc && link_enc->funcs->get_max_link_cap) { +		link_enc->funcs->get_max_link_cap(link_enc, max_link_enc_cap); +		return true; +	} + +	DC_LOG_ERROR("%s: Max link encoder caps unknown", __func__); +	max_link_enc_cap->lane_count = 1; +	max_link_enc_cap->link_rate = 6; +	return false; +} + +const struct dc_link_settings *dc_link_get_link_cap( +		const struct dc_link *link) +{ +	if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN && +			link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN) +		return &link->preferred_link_setting; +	return &link->verified_link_cap; +} + +struct dc_link_settings dp_get_max_link_cap(struct dc_link *link) +{ +	struct dc_link_settings max_link_cap = {0}; +	enum dc_link_rate lttpr_max_link_rate; +	enum dc_link_rate cable_max_link_rate; +	struct link_encoder *link_enc = NULL; + + +	link_enc = link_enc_cfg_get_link_enc(link); +	ASSERT(link_enc); + +	/* get max link encoder capability */ +	if (link_enc) +		link_enc->funcs->get_max_link_cap(link_enc, &max_link_cap); + +	/* Lower link settings based on sink's link cap */ +	if (link->reported_link_cap.lane_count < max_link_cap.lane_count) +		max_link_cap.lane_count = +				link->reported_link_cap.lane_count; +	if (link->reported_link_cap.link_rate < max_link_cap.link_rate) +		max_link_cap.link_rate = +				link->reported_link_cap.link_rate; +	if (link->reported_link_cap.link_spread < +			max_link_cap.link_spread) +		max_link_cap.link_spread = +				link->reported_link_cap.link_spread; + +	/* Lower link settings based on cable attributes +	 * Cable ID is a DP2 feature to identify max certified link rate that +	 * a cable can carry. The cable identification method requires both +	 * cable and display hardware support. Since the specs comes late, it is +	 * anticipated that the first round of DP2 cables and displays may not +	 * be fully compatible to reliably return cable ID data. Therefore the +	 * decision of our cable id policy is that if the cable can return non +	 * zero cable id data, we will take cable's link rate capability into +	 * account. However if we get zero data, the cable link rate capability +	 * is considered inconclusive. In this case, we will not take cable's +	 * capability into account to avoid of over limiting hardware capability +	 * from users. The max overall link rate capability is still determined +	 * after actual dp pre-training. Cable id is considered as an auxiliary +	 * method of determining max link bandwidth capability. +	 */ +	cable_max_link_rate = get_cable_max_link_rate(link); + +	if (!link->dc->debug.ignore_cable_id && +			cable_max_link_rate != LINK_RATE_UNKNOWN && +			cable_max_link_rate < max_link_cap.link_rate) +		max_link_cap.link_rate = cable_max_link_rate; + +	/* account for lttpr repeaters cap +	 * notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3). +	 */ +	if (dp_is_lttpr_present(link)) { +		if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count) +			max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count; +		lttpr_max_link_rate = get_lttpr_max_link_rate(link); + +		if (lttpr_max_link_rate < max_link_cap.link_rate) +			max_link_cap.link_rate = lttpr_max_link_rate; + +		DC_LOG_HW_LINK_TRAINING("%s\n Training with LTTPR,  max_lane count %d max_link rate %d \n", +						__func__, +						max_link_cap.lane_count, +						max_link_cap.link_rate); +	} + +	if (link_dp_get_encoding_format(&max_link_cap) == DP_128b_132b_ENCODING && +			link->dc->debug.disable_uhbr) +		max_link_cap.link_rate = LINK_RATE_HIGH3; + +	return max_link_cap; +} + +static bool dp_verify_link_cap( +	struct dc_link *link, +	struct dc_link_settings *known_limit_link_setting, +	int *fail_count) +{ +	struct dc_link_settings cur_link_settings = {0}; +	struct dc_link_settings max_link_settings = *known_limit_link_setting; +	bool success = false; +	bool skip_video_pattern; +	enum clock_source_id dp_cs_id = get_clock_source_id(link); +	enum link_training_result status = LINK_TRAINING_SUCCESS; +	union hpd_irq_data irq_data; +	struct link_resource link_res; + +	memset(&irq_data, 0, sizeof(irq_data)); +	cur_link_settings = max_link_settings; + +	/* Grant extended timeout request */ +	if (dp_is_lttpr_present(link) && link->dpcd_caps.lttpr_caps.max_ext_timeout > 0) { +		uint8_t grant = link->dpcd_caps.lttpr_caps.max_ext_timeout & 0x80; + +		core_link_write_dpcd(link, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &grant, sizeof(grant)); +	} + +	do { +		if (!get_temp_dp_link_res(link, &link_res, &cur_link_settings)) +			continue; + +		skip_video_pattern = cur_link_settings.link_rate != LINK_RATE_LOW; +		dp_enable_link_phy( +				link, +				&link_res, +				link->connector_signal, +				dp_cs_id, +				&cur_link_settings); + +		status = dp_perform_link_training( +				link, +				&link_res, +				&cur_link_settings, +				skip_video_pattern); + +		if (status == LINK_TRAINING_SUCCESS) { +			success = true; +			udelay(1000); +			if (dc_link_dp_read_hpd_rx_irq_data(link, &irq_data) == DC_OK && +					dc_link_check_link_loss_status( +							link, +							&irq_data)) +				(*fail_count)++; + +		} else { +			(*fail_count)++; +		} +		dp_trace_lt_total_count_increment(link, true); +		dp_trace_lt_result_update(link, status, true); +		dp_disable_link_phy(link, &link_res, link->connector_signal); +	} while (!success && decide_fallback_link_setting(link, +			&max_link_settings, &cur_link_settings, status)); + +	link->verified_link_cap = success ? +			cur_link_settings : fail_safe_link_settings; +	return success; +} + +bool dp_verify_link_cap_with_retries( +	struct dc_link *link, +	struct dc_link_settings *known_limit_link_setting, +	int attempts) +{ +	int i = 0; +	bool success = false; +	int fail_count = 0; + +	dp_trace_detect_lt_init(link); + +	if (link->link_enc && link->link_enc->features.flags.bits.DP_IS_USB_C && +			link->dc->debug.usbc_combo_phy_reset_wa) +		apply_usbc_combo_phy_reset_wa(link, known_limit_link_setting); + +	dp_trace_set_lt_start_timestamp(link, false); +	for (i = 0; i < attempts; i++) { +		enum dc_connection_type type = dc_connection_none; + +		memset(&link->verified_link_cap, 0, +				sizeof(struct dc_link_settings)); +		if (!dc_link_detect_connection_type(link, &type) || type == dc_connection_none) { +			link->verified_link_cap = fail_safe_link_settings; +			break; +		} else if (dp_verify_link_cap(link, known_limit_link_setting, +				&fail_count) && fail_count == 0) { +			success = true; +			break; +		} +		msleep(10); +	} + +	dp_trace_lt_fail_count_update(link, fail_count, true); +	dp_trace_set_lt_end_timestamp(link, true); + +	return success; +} + +/** + * dc_link_is_dp_sink_present() - Check if there is a native DP + * or passive DP-HDMI dongle connected + */ +bool dc_link_is_dp_sink_present(struct dc_link *link) +{ +	enum gpio_result gpio_result; +	uint32_t clock_pin = 0; +	uint8_t retry = 0; +	struct ddc *ddc; + +	enum connector_id connector_id = +		dal_graphics_object_id_get_connector_id(link->link_id); + +	bool present = +		((connector_id == CONNECTOR_ID_DISPLAY_PORT) || +		(connector_id == CONNECTOR_ID_EDP) || +		(connector_id == CONNECTOR_ID_USBC)); + +	ddc = get_ddc_pin(link->ddc); + +	if (!ddc) { +		BREAK_TO_DEBUGGER(); +		return present; +	} + +	/* Open GPIO and set it to I2C mode */ +	/* Note: this GpioMode_Input will be converted +	 * to GpioConfigType_I2cAuxDualMode in GPIO component, +	 * which indicates we need additional delay +	 */ + +	if (dal_ddc_open(ddc, GPIO_MODE_INPUT, +			 GPIO_DDC_CONFIG_TYPE_MODE_I2C) != GPIO_RESULT_OK) { +		dal_ddc_close(ddc); + +		return present; +	} + +	/* +	 * Read GPIO: DP sink is present if both clock and data pins are zero +	 * +	 * [W/A] plug-unplug DP cable, sometimes customer board has +	 * one short pulse on clk_pin(1V, < 1ms). DP will be config to HDMI/DVI +	 * then monitor can't br light up. Add retry 3 times +	 * But in real passive dongle, it need additional 3ms to detect +	 */ +	do { +		gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin); +		ASSERT(gpio_result == GPIO_RESULT_OK); +		if (clock_pin) +			udelay(1000); +		else +			break; +	} while (retry++ < 3); + +	present = (gpio_result == GPIO_RESULT_OK) && !clock_pin; + +	dal_ddc_close(ddc); + +	return present; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h new file mode 100644 index 000000000000..f79e4a4a9db6 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h @@ -0,0 +1,79 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_LINK_DP_CAPABILITY_H__ +#define __DC_LINK_DP_CAPABILITY_H__ + +#include "link.h" + +bool detect_dp_sink_caps(struct dc_link *link); + +void detect_edp_sink_caps(struct dc_link *link); + +struct dc_link_settings dp_get_max_link_cap(struct dc_link *link); + + +enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link); + +/* Convert PHY repeater count read from DPCD uint8_t. */ +uint8_t dp_parse_lttpr_repeater_count(uint8_t lttpr_repeater_count); + +bool dp_is_lttpr_present(struct dc_link *link); + +bool is_dp_active_dongle(const struct dc_link *link); + +bool is_dp_branch_device(const struct dc_link *link); + +void dpcd_write_cable_id_to_dprx(struct dc_link *link); + +/* Initialize output parameter lt_settings. */ +void dp_decide_training_settings( +	struct dc_link *link, +	const struct dc_link_settings *link_setting, +	struct link_training_settings *lt_settings); + + +bool decide_edp_link_settings_with_dsc(struct dc_link *link, +		struct dc_link_settings *link_setting, +		uint32_t req_bw, +		enum dc_link_rate max_link_rate); + +void dpcd_set_source_specific_data(struct dc_link *link); + +/*query dpcd for version and mst cap addresses*/ +bool read_is_mst_supported(struct dc_link *link); + +bool decide_fallback_link_setting( +		struct dc_link *link, +		struct dc_link_settings *max, +		struct dc_link_settings *cur, +		enum link_training_result training_result); + +bool dp_verify_link_cap_with_retries( +	struct dc_link *link, +	struct dc_link_settings *known_limit_link_setting, +	int attempts); + +#endif /* __DC_LINK_DP_CAPABILITY_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c new file mode 100644 index 000000000000..32f48a48e9dd --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dc.h" +#include "inc/core_status.h" +#include "dc_link.h" +#include "dpcd_defs.h" + +#include "link_dp_dpia.h" +#include "link_hwss.h" +#include "dm_helpers.h" +#include "dmub/inc/dmub_cmd.h" +#include "link_dpcd.h" +#include "link_dp_training.h" +#include "dc_dmub_srv.h" + +#define DC_LOGGER \ +	link->ctx->logger + +/** @note Can remove once DP tunneling registers in upstream include/drm/drm_dp_helper.h */ +/* DPCD DP Tunneling over USB4 */ +#define DP_TUNNELING_CAPABILITIES_SUPPORT 0xe000d +#define DP_IN_ADAPTER_INFO                0xe000e +#define DP_USB4_DRIVER_ID                 0xe000f +#define DP_USB4_ROUTER_TOPOLOGY_ID        0xe001b + +enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link) +{ +	enum dc_status status = DC_OK; +	uint8_t dpcd_dp_tun_data[3] = {0}; +	uint8_t dpcd_topology_data[DPCD_USB4_TOPOLOGY_ID_LEN] = {0}; +	uint8_t i = 0; + +	status = core_link_read_dpcd( +			link, +			DP_TUNNELING_CAPABILITIES_SUPPORT, +			dpcd_dp_tun_data, +			sizeof(dpcd_dp_tun_data)); + +	status = core_link_read_dpcd( +			link, +			DP_USB4_ROUTER_TOPOLOGY_ID, +			dpcd_topology_data, +			sizeof(dpcd_topology_data)); + +	link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw = +			dpcd_dp_tun_data[DP_TUNNELING_CAPABILITIES_SUPPORT - DP_TUNNELING_CAPABILITIES_SUPPORT]; +	link->dpcd_caps.usb4_dp_tun_info.dpia_info.raw = +			dpcd_dp_tun_data[DP_IN_ADAPTER_INFO - DP_TUNNELING_CAPABILITIES_SUPPORT]; +	link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id = +			dpcd_dp_tun_data[DP_USB4_DRIVER_ID - DP_TUNNELING_CAPABILITIES_SUPPORT]; + +	for (i = 0; i < DPCD_USB4_TOPOLOGY_ID_LEN; i++) +		link->dpcd_caps.usb4_dp_tun_info.usb4_topology_id[i] = dpcd_topology_data[i]; + +	return status; +} + +bool dc_link_dpia_query_hpd_status(struct dc_link *link) +{ +	union dmub_rb_cmd cmd = {0}; +	struct dc_dmub_srv *dmub_srv = link->ctx->dmub_srv; +	bool is_hpd_high = false; + +	/* prepare QUERY_HPD command */ +	cmd.query_hpd.header.type = DMUB_CMD__QUERY_HPD_STATE; +	cmd.query_hpd.data.instance = link->link_id.enum_id - ENUM_ID_1; +	cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA; + +	/* Return HPD status reported by DMUB if query successfully executed. */ +	if (dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd) && cmd.query_hpd.data.status == AUX_RET_SUCCESS) +		is_hpd_high = cmd.query_hpd.data.result; + +	DC_LOG_DEBUG("%s: link(%d) dpia(%d) cmd_status(%d) result(%d)\n", +		__func__, +		link->link_index, +		link->link_id.enum_id - ENUM_ID_1, +		cmd.query_hpd.data.status, +		cmd.query_hpd.data.result); + +	return is_hpd_high; +} + diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h new file mode 100644 index 000000000000..98935cc10bb7 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_LINK_DPIA_H__ +#define __DC_LINK_DPIA_H__ + +#include "link.h" + +/* Read tunneling device capability from DPCD and update link capability + * accordingly. + */ +enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link); + +/* Query hot plug status of USB4 DP tunnel. + * Returns true if HPD high. + */ +bool dc_link_dpia_query_hpd_status(struct dc_link *link); + + +#endif /* __DC_LINK_DPIA_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c new file mode 100644 index 000000000000..f69e681b3b5b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c @@ -0,0 +1,441 @@ + +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +/*********************************************************************/ +//				USB4 DPIA BANDWIDTH ALLOCATION LOGIC +/*********************************************************************/ +#include "dc.h" +#include "dc_link.h" +#include "link_dp_dpia_bw.h" +#include "drm_dp_helper_dc.h" +#include "link_dpcd.h" + +#define Kbps_TO_Gbps (1000 * 1000) + +// ------------------------------------------------------------------ +//					PRIVATE FUNCTIONS +// ------------------------------------------------------------------ +/* + * Always Check the following: + *  - Is it USB4 link? + *  - Is HPD HIGH? + *  - Is BW Allocation Support Mode enabled on DP-Tx? + */ +static bool get_bw_alloc_proceed_flag(struct dc_link *tmp) +{ +	return (tmp && DISPLAY_ENDPOINT_USB4_DPIA == tmp->ep_type +			&& tmp->hpd_status +			&& tmp->dpia_bw_alloc_config.bw_alloc_enabled); +} +static void reset_bw_alloc_struct(struct dc_link *link) +{ +	link->dpia_bw_alloc_config.bw_alloc_enabled = false; +	link->dpia_bw_alloc_config.sink_verified_bw = 0; +	link->dpia_bw_alloc_config.sink_max_bw = 0; +	link->dpia_bw_alloc_config.estimated_bw = 0; +	link->dpia_bw_alloc_config.bw_granularity = 0; +	link->dpia_bw_alloc_config.response_ready = false; +} +static uint8_t get_bw_granularity(struct dc_link *link) +{ +	uint8_t bw_granularity = 0; + +	core_link_read_dpcd( +			link, +			DP_BW_GRANULALITY, +			&bw_granularity, +			sizeof(uint8_t)); + +	switch (bw_granularity & 0x3) { +	case 0: +		bw_granularity = 4; +		break; +	case 1: +	default: +		bw_granularity = 2; +		break; +	} + +	return bw_granularity; +} +static int get_estimated_bw(struct dc_link *link) +{ +	uint8_t bw_estimated_bw = 0; + +	if (core_link_read_dpcd( +		link, +		ESTIMATED_BW, +		&bw_estimated_bw, +		sizeof(uint8_t)) != DC_OK) +		dm_output_to_console("%s: AUX W/R ERROR @ 0x%x\n", __func__, ESTIMATED_BW); + +	return bw_estimated_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity); +} +static bool allocate_usb4_bw(int *stream_allocated_bw, int bw_needed, struct dc_link *link) +{ +	if (bw_needed > 0) +		*stream_allocated_bw += bw_needed; + +	return true; +} +static bool deallocate_usb4_bw(int *stream_allocated_bw, int bw_to_dealloc, struct dc_link *link) +{ +	bool ret = false; + +	if (*stream_allocated_bw > 0) { +		*stream_allocated_bw -= bw_to_dealloc; +		ret = true; +	} else { +		//Do nothing for now +		ret = true; +	} + +	// Unplug so reset values +	if (!link->hpd_status) +		reset_bw_alloc_struct(link); + +	return ret; +} +/* + * Read all New BW alloc configuration ex: estimated_bw, allocated_bw, + * granuality, Driver_ID, CM_Group, & populate the BW allocation structs + * for host router and dpia + */ +static void init_usb4_bw_struct(struct dc_link *link) +{ +	// Init the known values +	link->dpia_bw_alloc_config.bw_granularity = get_bw_granularity(link); +	link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link); +} +static uint8_t get_lowest_dpia_index(struct dc_link *link) +{ +	const struct dc *dc_struct = link->dc; +	uint8_t idx = 0xFF; + +	for (int i = 0; i < MAX_PIPES * 2; ++i) { + +		if (!dc_struct->links[i] || +				dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) +			continue; + +		if (idx > dc_struct->links[i]->link_index) +			idx = dc_struct->links[i]->link_index; +	} + +	return idx; +} +/* + * Get the Max Available BW or Max Estimated BW for each Host Router + * + * @link: pointer to the dc_link struct instance + * @type: ESTIMATD BW or MAX AVAILABLE BW + * + * return: response_ready flag from dc_link struct + */ +static int get_host_router_total_bw(struct dc_link *link, uint8_t type) +{ +	const struct dc *dc_struct = link->dc; +	uint8_t lowest_dpia_index = get_lowest_dpia_index(link); +	uint8_t idx = (link->link_index - lowest_dpia_index) / 2, idx_temp = 0; +	struct dc_link *link_temp; +	int total_bw = 0; + +	for (int i = 0; i < MAX_PIPES * 2; ++i) { + +		if (!dc_struct->links[i] || dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) +			continue; + +		link_temp = dc_struct->links[i]; +		if (!link_temp || !link_temp->hpd_status) +			continue; + +		idx_temp = (link_temp->link_index - lowest_dpia_index) / 2; + +		if (idx_temp == idx) { + +			if (type == HOST_ROUTER_BW_ESTIMATED) +				total_bw += link_temp->dpia_bw_alloc_config.estimated_bw; +			else if (type == HOST_ROUTER_BW_ALLOCATED) +				total_bw += link_temp->dpia_bw_alloc_config.sink_allocated_bw; +		} +	} + +	return total_bw; +} +/* + * Cleanup function for when the dpia is unplugged to reset struct + * and perform any required clean up + * + * @link: pointer to the dc_link struct instance + * + * return: none + */ +static bool dpia_bw_alloc_unplug(struct dc_link *link) +{ +	bool ret = false; + +	if (!link) +		return true; + +	return deallocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw, +			link->dpia_bw_alloc_config.sink_allocated_bw, link); +} +static void dc_link_set_usb4_req_bw_req(struct dc_link *link, int req_bw) +{ +	uint8_t requested_bw; +	uint32_t temp; + +	// 1. Add check for this corner case #1 +	if (req_bw > link->dpia_bw_alloc_config.estimated_bw) +		req_bw = link->dpia_bw_alloc_config.estimated_bw; + +	temp = req_bw * link->dpia_bw_alloc_config.bw_granularity; +	requested_bw = temp / Kbps_TO_Gbps; + +	// Always make sure to add more to account for floating points +	if (temp % Kbps_TO_Gbps) +		++requested_bw; + +	// 2. Add check for this corner case #2 +	req_bw = requested_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity); +	if (req_bw == link->dpia_bw_alloc_config.sink_allocated_bw) +		return; + +	if (core_link_write_dpcd( +		link, +		REQUESTED_BW, +		&requested_bw, +		sizeof(uint8_t)) != DC_OK) +		dm_output_to_console("%s: AUX W/R ERROR @ 0x%x\n", __func__, REQUESTED_BW); +	else +		link->dpia_bw_alloc_config.response_ready = false; // Reset flag +} +/* + * Return the response_ready flag from dc_link struct + * + * @link: pointer to the dc_link struct instance + * + * return: response_ready flag from dc_link struct + */ +static bool get_cm_response_ready_flag(struct dc_link *link) +{ +	return link->dpia_bw_alloc_config.response_ready; +} +// ------------------------------------------------------------------ +//					PUBLIC FUNCTIONS +// ------------------------------------------------------------------ +bool set_dptx_usb4_bw_alloc_support(struct dc_link *link) +{ +	bool ret = false; +	uint8_t response = 0, +			bw_support_dpia = 0, +			bw_support_cm = 0; + +	if (!(link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->hpd_status)) +		goto out; + +	if (core_link_read_dpcd( +		link, +		DP_TUNNELING_CAPABILITIES, +		&response, +		sizeof(uint8_t)) != DC_OK) +		dm_output_to_console("%s: AUX W/R ERROR @ 0x%x\n", __func__, DP_TUNNELING_CAPABILITIES); + +	bw_support_dpia = (response >> 7) & 1; + +	if (core_link_read_dpcd( +		link, +		USB4_DRIVER_BW_CAPABILITY, +		&response, +		sizeof(uint8_t)) != DC_OK) +		dm_output_to_console("%s: AUX W/R ERROR @ 0x%x\n", __func__, DP_TUNNELING_CAPABILITIES); + +	bw_support_cm = (response >> 7) & 1; + +	/* Send request acknowledgment to Turn ON DPTX support */ +	if (bw_support_cm && bw_support_dpia) { + +		response = 0x80; +		if (core_link_write_dpcd( +				link, +				DPTX_BW_ALLOCATION_MODE_CONTROL, +				&response, +				sizeof(uint8_t)) != DC_OK) +			dm_output_to_console("%s: AUX W/R ERROR @ 0x%x\n", +					"**** FAILURE Enabling DPtx BW Allocation Mode Support ***\n", +					__func__, DP_TUNNELING_CAPABILITIES); +		else { + +			// SUCCESS Enabled DPtx BW Allocation Mode Support +			link->dpia_bw_alloc_config.bw_alloc_enabled = true; +			dm_output_to_console("**** SUCCESS Enabling DPtx BW Allocation Mode Support ***\n"); + +			ret = true; +			init_usb4_bw_struct(link); +		} +	} + +out: +	return ret; +} +void dc_link_get_usb4_req_bw_resp(struct dc_link *link, uint8_t bw, uint8_t result) +{ +	if (!get_bw_alloc_proceed_flag((link))) +		return; + +	switch (result) { + +	case DPIA_BW_REQ_FAILED: + +		dm_output_to_console("%s: *** *** BW REQ FAILURE for DP-TX Request *** ***\n", __func__); + +		// Update the new Estimated BW value updated by CM +		link->dpia_bw_alloc_config.estimated_bw = +				bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity); + +		dc_link_set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.estimated_bw); +		link->dpia_bw_alloc_config.response_ready = false; + +		/* +		 * If FAIL then it is either: +		 * 1. Due to DP-Tx trying to allocate more than available i.e. it failed locally +		 *    => get estimated and allocate that +		 * 2. Due to the fact that DP-Tx tried to allocated ESTIMATED BW and failed then +		 *    CM will have to update 0xE0023 with new ESTIMATED BW value. +		 */ +		break; + +	case DPIA_BW_REQ_SUCCESS: + +		dm_output_to_console("%s: *** BW REQ SUCCESS for DP-TX Request ***\n", __func__); + +		// 1. SUCCESS 1st time before any Pruning is done +		// 2. SUCCESS after prev. FAIL before any Pruning is done +		// 3. SUCCESS after Pruning is done but before enabling link + +		int needed = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity); + +		// 1. +		if (!link->dpia_bw_alloc_config.sink_allocated_bw) { + +			allocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw, needed, link); +			link->dpia_bw_alloc_config.sink_verified_bw = +					link->dpia_bw_alloc_config.sink_allocated_bw; + +			// SUCCESS from first attempt +			if (link->dpia_bw_alloc_config.sink_allocated_bw > +			link->dpia_bw_alloc_config.sink_max_bw) +				link->dpia_bw_alloc_config.sink_verified_bw = +						link->dpia_bw_alloc_config.sink_max_bw; +		} +		// 3. +		else if (link->dpia_bw_alloc_config.sink_allocated_bw) { + +			// Find out how much do we need to de-alloc +			if (link->dpia_bw_alloc_config.sink_allocated_bw > needed) +				deallocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw, +						link->dpia_bw_alloc_config.sink_allocated_bw - needed, link); +			else +				allocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw, +						needed - link->dpia_bw_alloc_config.sink_allocated_bw, link); +		} + +		// 4. If this is the 2nd sink then any unused bw will be reallocated to master DPIA +		// => check if estimated_bw changed + +		link->dpia_bw_alloc_config.response_ready = true; +		break; + +	case DPIA_EST_BW_CHANGED: + +		dm_output_to_console("%s: *** ESTIMATED BW CHANGED for DP-TX Request ***\n", __func__); + +		int available = 0, estimated = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity); +		int host_router_total_estimated_bw = get_host_router_total_bw(link, HOST_ROUTER_BW_ESTIMATED); + +		// 1. If due to unplug of other sink +		if (estimated == host_router_total_estimated_bw) { + +			// First update the estimated & max_bw fields +			if (link->dpia_bw_alloc_config.estimated_bw < estimated) { +				available = estimated - link->dpia_bw_alloc_config.estimated_bw; +				link->dpia_bw_alloc_config.estimated_bw = estimated; +			} +		} +		// 2. If due to realloc bw btw 2 dpia due to plug OR realloc unused Bw +		else { + +			// We took from another unplugged/problematic sink to give to us +			if (link->dpia_bw_alloc_config.estimated_bw < estimated) +				available = estimated - link->dpia_bw_alloc_config.estimated_bw; + +			// We lost estimated bw usually due to plug event of other dpia +			link->dpia_bw_alloc_config.estimated_bw = estimated; +		} +		break; + +	case DPIA_BW_ALLOC_CAPS_CHANGED: + +		dm_output_to_console("%s: *** BW ALLOC CAPABILITY CHANGED for DP-TX Request ***\n", __func__); +		link->dpia_bw_alloc_config.bw_alloc_enabled = false; +		break; +	} +} +int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw) +{ +	int ret = 0; +	uint8_t timeout = 10; + +	if (!(link && DISPLAY_ENDPOINT_USB4_DPIA == link->ep_type +			&& link->dpia_bw_alloc_config.bw_alloc_enabled)) +		goto out; + +	//1. Hot Plug +	if (link->hpd_status && peak_bw > 0) { + +		// If DP over USB4 then we need to check BW allocation +		link->dpia_bw_alloc_config.sink_max_bw = peak_bw; +		dc_link_set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.sink_max_bw); + +		do { +			if (!timeout > 0) +				timeout--; +			else +				break; +			udelay(10 * 1000); +		} while (!get_cm_response_ready_flag(link)); + +		if (!timeout) +			ret = 0;// ERROR TIMEOUT waiting for response for allocating bw +		else if (link->dpia_bw_alloc_config.sink_allocated_bw > 0) +			ret = get_host_router_total_bw(link, HOST_ROUTER_BW_ALLOCATED); +	} +	//2. Cold Unplug +	else if (!link->hpd_status) +		dpia_bw_alloc_unplug(link); + +out: +	return ret; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h new file mode 100644 index 000000000000..c2c3049adcd1 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h @@ -0,0 +1,47 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef DC_INC_LINK_DP_DPIA_BW_H_ +#define DC_INC_LINK_DP_DPIA_BW_H_ + +/* + * Host Router BW type + */ +enum bw_type { +	HOST_ROUTER_BW_ESTIMATED, +	HOST_ROUTER_BW_ALLOCATED, +	HOST_ROUTER_BW_INVALID, +}; + +/* + * Enable BW Allocation Mode Support from the DP-Tx side + * + * @link: pointer to the dc_link struct instance + * + * return: SUCCESS or FAILURE + */ +bool set_dptx_usb4_bw_alloc_support(struct dc_link *link); + +#endif /* DC_INC_LINK_DP_DPIA_BW_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c new file mode 100644 index 000000000000..9d80427520cf --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c @@ -0,0 +1,389 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file implements DP HPD short pulse handling sequence according to DP + * specifications + * + */ + +#include "link_dp_irq_handler.h" +#include "link_dpcd.h" +#include "link_dp_training.h" +#include "link_dp_capability.h" +#include "link/accessories/link_dp_trace.h" +#include "link/link_dpms.h" +#include "dm_helpers.h" + +#define DC_LOGGER_INIT(logger) + +bool dc_link_check_link_loss_status( +	struct dc_link *link, +	union hpd_irq_data *hpd_irq_dpcd_data) +{ +	uint8_t irq_reg_rx_power_state = 0; +	enum dc_status dpcd_result = DC_ERROR_UNEXPECTED; +	union lane_status lane_status; +	uint32_t lane; +	bool sink_status_changed; +	bool return_code; + +	sink_status_changed = false; +	return_code = false; + +	if (link->cur_link_settings.lane_count == 0) +		return return_code; + +	/*1. Check that Link Status changed, before re-training.*/ + +	/*parse lane status*/ +	for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) { +		/* check status of lanes 0,1 +		 * changed DpcdAddress_Lane01Status (0x202) +		 */ +		lane_status.raw = dp_get_nibble_at_index( +			&hpd_irq_dpcd_data->bytes.lane01_status.raw, +			lane); + +		if (!lane_status.bits.CHANNEL_EQ_DONE_0 || +			!lane_status.bits.CR_DONE_0 || +			!lane_status.bits.SYMBOL_LOCKED_0) { +			/* if one of the channel equalization, clock +			 * recovery or symbol lock is dropped +			 * consider it as (link has been +			 * dropped) dp sink status has changed +			 */ +			sink_status_changed = true; +			break; +		} +	} + +	/* Check interlane align.*/ +	if (sink_status_changed || +		!hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) { + +		DC_LOG_HW_HPD_IRQ("%s: Link Status changed.\n", __func__); + +		return_code = true; + +		/*2. Check that we can handle interrupt: Not in FS DOS, +		 *  Not in "Display Timeout" state, Link is trained. +		 */ +		dpcd_result = core_link_read_dpcd(link, +			DP_SET_POWER, +			&irq_reg_rx_power_state, +			sizeof(irq_reg_rx_power_state)); + +		if (dpcd_result != DC_OK) { +			DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain power state.\n", +				__func__); +		} else { +			if (irq_reg_rx_power_state != DP_SET_POWER_D0) +				return_code = false; +		} +	} + +	return return_code; +} + +static bool handle_hpd_irq_psr_sink(struct dc_link *link) +{ +	union dpcd_psr_configuration psr_configuration; + +	if (!link->psr_settings.psr_feature_enabled) +		return false; + +	dm_helpers_dp_read_dpcd( +		link->ctx, +		link, +		368,/*DpcdAddress_PSR_Enable_Cfg*/ +		&psr_configuration.raw, +		sizeof(psr_configuration.raw)); + +	if (psr_configuration.bits.ENABLE) { +		unsigned char dpcdbuf[3] = {0}; +		union psr_error_status psr_error_status; +		union psr_sink_psr_status psr_sink_psr_status; + +		dm_helpers_dp_read_dpcd( +			link->ctx, +			link, +			0x2006, /*DpcdAddress_PSR_Error_Status*/ +			(unsigned char *) dpcdbuf, +			sizeof(dpcdbuf)); + +		/*DPCD 2006h   ERROR STATUS*/ +		psr_error_status.raw = dpcdbuf[0]; +		/*DPCD 2008h   SINK PANEL SELF REFRESH STATUS*/ +		psr_sink_psr_status.raw = dpcdbuf[2]; + +		if (psr_error_status.bits.LINK_CRC_ERROR || +				psr_error_status.bits.RFB_STORAGE_ERROR || +				psr_error_status.bits.VSC_SDP_ERROR) { +			bool allow_active; + +			/* Acknowledge and clear error bits */ +			dm_helpers_dp_write_dpcd( +				link->ctx, +				link, +				8198,/*DpcdAddress_PSR_Error_Status*/ +				&psr_error_status.raw, +				sizeof(psr_error_status.raw)); + +			/* PSR error, disable and re-enable PSR */ +			if (link->psr_settings.psr_allow_active) { +				allow_active = false; +				dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL); +				allow_active = true; +				dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL); +			} + +			return true; +		} else if (psr_sink_psr_status.bits.SINK_SELF_REFRESH_STATUS == +				PSR_SINK_STATE_ACTIVE_DISPLAY_FROM_SINK_RFB){ +			/* No error is detect, PSR is active. +			 * We should return with IRQ_HPD handled without +			 * checking for loss of sync since PSR would have +			 * powered down main link. +			 */ +			return true; +		} +	} +	return false; +} + +void dc_link_dp_handle_link_loss(struct dc_link *link) +{ +	struct pipe_ctx *pipes[MAX_PIPES]; +	struct dc_state *state = link->dc->current_state; +	uint8_t count; +	int i; + +	link_get_master_pipes_with_dpms_on(link, state, &count, pipes); + +	for (i = 0; i < count; i++) +		link_set_dpms_off(pipes[i]); + +	for (i = count - 1; i >= 0; i--) { +		// Always use max settings here for DP 1.4a LL Compliance CTS +		if (link->is_automated) { +			pipes[i]->link_config.dp_link_settings.lane_count = +					link->verified_link_cap.lane_count; +			pipes[i]->link_config.dp_link_settings.link_rate = +					link->verified_link_cap.link_rate; +			pipes[i]->link_config.dp_link_settings.link_spread = +					link->verified_link_cap.link_spread; +		} +		link_set_dpms_on(link->dc->current_state, pipes[i]); +	} +} + +enum dc_status dc_link_dp_read_hpd_rx_irq_data( +	struct dc_link *link, +	union hpd_irq_data *irq_data) +{ +	static enum dc_status retval; + +	/* The HW reads 16 bytes from 200h on HPD, +	 * but if we get an AUX_DEFER, the HW cannot retry +	 * and this causes the CTS tests 4.3.2.1 - 3.2.4 to +	 * fail, so we now explicitly read 6 bytes which is +	 * the req from the above mentioned test cases. +	 * +	 * For DP 1.4 we need to read those from 2002h range. +	 */ +	if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14) +		retval = core_link_read_dpcd( +			link, +			DP_SINK_COUNT, +			irq_data->raw, +			sizeof(union hpd_irq_data)); +	else { +		/* Read 14 bytes in a single read and then copy only the required fields. +		 * This is more efficient than doing it in two separate AUX reads. */ + +		uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1]; + +		retval = core_link_read_dpcd( +			link, +			DP_SINK_COUNT_ESI, +			tmp, +			sizeof(tmp)); + +		if (retval != DC_OK) +			return retval; + +		irq_data->bytes.sink_cnt.raw = tmp[DP_SINK_COUNT_ESI - DP_SINK_COUNT_ESI]; +		irq_data->bytes.device_service_irq.raw = tmp[DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 - DP_SINK_COUNT_ESI]; +		irq_data->bytes.lane01_status.raw = tmp[DP_LANE0_1_STATUS_ESI - DP_SINK_COUNT_ESI]; +		irq_data->bytes.lane23_status.raw = tmp[DP_LANE2_3_STATUS_ESI - DP_SINK_COUNT_ESI]; +		irq_data->bytes.lane_status_updated.raw = tmp[DP_LANE_ALIGN_STATUS_UPDATED_ESI - DP_SINK_COUNT_ESI]; +		irq_data->bytes.sink_status.raw = tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI]; +	} + +	return retval; +} + +/*************************Short Pulse IRQ***************************/ +bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link) +{ +	/* +	 * Don't handle RX IRQ unless one of following is met: +	 * 1) The link is established (cur_link_settings != unknown) +	 * 2) We know we're dealing with a branch device, SST or MST +	 */ + +	if ((link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) || +		is_dp_branch_device(link)) +		return true; + +	return false; +} + +bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss, +							bool defer_handling, bool *has_left_work) +{ +	union hpd_irq_data hpd_irq_dpcd_data = {0}; +	union device_service_irq device_service_clear = {0}; +	enum dc_status result; +	bool status = false; + +	if (out_link_loss) +		*out_link_loss = false; + +	if (has_left_work) +		*has_left_work = false; +	/* For use cases related to down stream connection status change, +	 * PSR and device auto test, refer to function handle_sst_hpd_irq +	 * in DAL2.1*/ + +	DC_LOG_HW_HPD_IRQ("%s: Got short pulse HPD on link %d\n", +		__func__, link->link_index); + + +	 /* All the "handle_hpd_irq_xxx()" methods +		 * should be called only after +		 * dal_dpsst_ls_read_hpd_irq_data +		 * Order of calls is important too +		 */ +	result = dc_link_dp_read_hpd_rx_irq_data(link, &hpd_irq_dpcd_data); +	if (out_hpd_irq_dpcd_data) +		*out_hpd_irq_dpcd_data = hpd_irq_dpcd_data; + +	if (result != DC_OK) { +		DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain irq data\n", +			__func__); +		return false; +	} + +	if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.AUTOMATED_TEST) { +		// Workaround for DP 1.4a LL Compliance CTS as USB4 has to share encoders unlike DP and USBC +		link->is_automated = true; +		device_service_clear.bits.AUTOMATED_TEST = 1; +		core_link_write_dpcd( +			link, +			DP_DEVICE_SERVICE_IRQ_VECTOR, +			&device_service_clear.raw, +			sizeof(device_service_clear.raw)); +		device_service_clear.raw = 0; +		if (defer_handling && has_left_work) +			*has_left_work = true; +		else +			dc_link_dp_handle_automated_test(link); +		return false; +	} + +	if (!dc_link_dp_allow_hpd_rx_irq(link)) { +		DC_LOG_HW_HPD_IRQ("%s: skipping HPD handling on %d\n", +			__func__, link->link_index); +		return false; +	} + +	if (handle_hpd_irq_psr_sink(link)) +		/* PSR-related error was detected and handled */ +		return true; + +	/* If PSR-related error handled, Main link may be off, +	 * so do not handle as a normal sink status change interrupt. +	 */ + +	if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) { +		if (defer_handling && has_left_work) +			*has_left_work = true; +		return true; +	} + +	/* check if we have MST msg and return since we poll for it */ +	if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { +		if (defer_handling && has_left_work) +			*has_left_work = true; +		return false; +	} + +	/* For now we only handle 'Downstream port status' case. +	 * If we got sink count changed it means +	 * Downstream port status changed, +	 * then DM should call DC to do the detection. +	 * NOTE: Do not handle link loss on eDP since it is internal link*/ +	if ((link->connector_signal != SIGNAL_TYPE_EDP) && +		dc_link_check_link_loss_status( +			link, +			&hpd_irq_dpcd_data)) { +		/* Connectivity log: link loss */ +		CONN_DATA_LINK_LOSS(link, +					hpd_irq_dpcd_data.raw, +					sizeof(hpd_irq_dpcd_data), +					"Status: "); + +		if (defer_handling && has_left_work) +			*has_left_work = true; +		else +			dc_link_dp_handle_link_loss(link); + +		status = false; +		if (out_link_loss) +			*out_link_loss = true; + +		dp_trace_link_loss_increment(link); +	} + +	if (link->type == dc_connection_sst_branch && +		hpd_irq_dpcd_data.bytes.sink_cnt.bits.SINK_COUNT +			!= link->dpcd_sink_count) +		status = true; + +	/* reasons for HPD RX: +	 * 1. Link Loss - ie Re-train the Link +	 * 2. MST sideband message +	 * 3. Automated Test - ie. Internal Commit +	 * 4. CP (copy protection) - (not interesting for DM???) +	 * 5. DRR +	 * 6. Downstream Port status changed +	 * -ie. Detect - this the only one +	 * which is interesting for DM because +	 * it must call dc_link_detect. +	 */ +	return status; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h index ea8d9760132f..39b2e51ea79d 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h @@ -22,13 +22,10 @@   * Authors: AMD   *   */ -#ifndef __LINK_HWSS_HPO_FRL_H__ -#define __LINK_HWSS_HPO_FRL_H__ -#include "link_hwss.h" +#ifndef __DC_LINK_DP_IRQ_HANDLER_H__ +#define __DC_LINK_DP_IRQ_HANDLER_H__ -bool can_use_hpo_frl_link_hwss(const struct dc_link *link, -		const struct link_resource *link_res); -const struct link_hwss *get_hpo_frl_link_hwss(void); +#include "link.h" -#endif /* __LINK_HWSS_HPO_FRL_H__ */ +#endif /* __DC_LINK_DP_IRQ_HANDLER_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c new file mode 100644 index 000000000000..cd9fb8126bcf --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c @@ -0,0 +1,208 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file implements basic dp phy functionality such as enable/disable phy + * output and set lane/drive settings. This file is responsible for maintaining + * and update software state representing current phy status such as current + * link settings. + */ + +#include "link_dp_phy.h" +#include "link_dpcd.h" +#include "link_dp_training.h" +#include "link_dp_capability.h" +#include "clk_mgr.h" +#include "resource.h" +#include "link_enc_cfg.h" +#define DC_LOGGER \ +	link->ctx->logger + +void dc_link_dp_receiver_power_ctrl(struct dc_link *link, bool on) +{ +	uint8_t state; + +	state = on ? DP_POWER_STATE_D0 : DP_POWER_STATE_D3; + +	if (link->sync_lt_in_progress) +		return; + +	core_link_write_dpcd(link, DP_SET_POWER, &state, +						 sizeof(state)); + +} + +void dp_enable_link_phy( +	struct dc_link *link, +	const struct link_resource *link_res, +	enum signal_type signal, +	enum clock_source_id clock_source, +	const struct dc_link_settings *link_settings) +{ +	link->cur_link_settings = *link_settings; +	link->dc->hwss.enable_dp_link_output(link, link_res, signal, +			clock_source, link_settings); +	dc_link_dp_receiver_power_ctrl(link, true); +} + +void dp_disable_link_phy(struct dc_link *link, +		const struct link_resource *link_res, +		enum signal_type signal) +{ +	struct dc  *dc = link->ctx->dc; + +	if (!link->wa_flags.dp_keep_receiver_powered) +		dc_link_dp_receiver_power_ctrl(link, false); + +	dc->hwss.disable_link_output(link, link_res, signal); +	/* Clear current link setting.*/ +	memset(&link->cur_link_settings, 0, +			sizeof(link->cur_link_settings)); + +	if (dc->clk_mgr->funcs->notify_link_rate_change) +		dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link); +} + +static inline bool is_immediate_downstream(struct dc_link *link, uint32_t offset) +{ +	return (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == +			offset); +} + +void dp_set_hw_lane_settings( +	struct dc_link *link, +	const struct link_resource *link_res, +	const struct link_training_settings *link_settings, +	uint32_t offset) +{ +	const struct link_hwss *link_hwss = get_link_hwss(link, link_res); + +	if ((link_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && +			!is_immediate_downstream(link, offset)) +		return; + +	if (link_hwss->ext.set_dp_lane_settings) +		link_hwss->ext.set_dp_lane_settings(link, link_res, +				&link_settings->link_settings, +				link_settings->hw_lane_settings); + +	memmove(link->cur_lane_setting, +			link_settings->hw_lane_settings, +			sizeof(link->cur_lane_setting)); +} + +void dp_set_drive_settings( +	struct dc_link *link, +	const struct link_resource *link_res, +	struct link_training_settings *lt_settings) +{ +	/* program ASIC PHY settings*/ +	dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX); + +	dp_hw_to_dpcd_lane_settings(lt_settings, +			lt_settings->hw_lane_settings, +			lt_settings->dpcd_lane_settings); + +	/* Notify DP sink the PHY settings from source */ +	dpcd_set_lane_settings(link, lt_settings, DPRX); +} + +enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource *link_res, bool ready) +{ +	/* FEC has to be "set ready" before the link training. +	 * The policy is to always train with FEC +	 * if the sink supports it and leave it enabled on link. +	 * If FEC is not supported, disable it. +	 */ +	struct link_encoder *link_enc = NULL; +	enum dc_status status = DC_OK; +	uint8_t fec_config = 0; + +	link_enc = link_enc_cfg_get_link_enc(link); +	ASSERT(link_enc); + +	if (!dc_link_should_enable_fec(link)) +		return status; + +	if (link_enc->funcs->fec_set_ready && +			link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) { +		if (ready) { +			fec_config = 1; +			status = core_link_write_dpcd(link, +					DP_FEC_CONFIGURATION, +					&fec_config, +					sizeof(fec_config)); +			if (status == DC_OK) { +				link_enc->funcs->fec_set_ready(link_enc, true); +				link->fec_state = dc_link_fec_ready; +			} else { +				link_enc->funcs->fec_set_ready(link_enc, false); +				link->fec_state = dc_link_fec_not_ready; +				dm_error("dpcd write failed to set fec_ready"); +			} +		} else if (link->fec_state == dc_link_fec_ready) { +			fec_config = 0; +			status = core_link_write_dpcd(link, +					DP_FEC_CONFIGURATION, +					&fec_config, +					sizeof(fec_config)); +			link_enc->funcs->fec_set_ready(link_enc, false); +			link->fec_state = dc_link_fec_not_ready; +		} +	} + +	return status; +} + +void dp_set_fec_enable(struct dc_link *link, bool enable) +{ +	struct link_encoder *link_enc = NULL; + +	link_enc = link_enc_cfg_get_link_enc(link); +	ASSERT(link_enc); + +	if (!dc_link_should_enable_fec(link)) +		return; + +	if (link_enc->funcs->fec_set_enable && +			link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) { +		if (link->fec_state == dc_link_fec_ready && enable) { +			/* Accord to DP spec, FEC enable sequence can first +			 * be transmitted anytime after 1000 LL codes have +			 * been transmitted on the link after link training +			 * completion. Using 1 lane RBR should have the maximum +			 * time for transmitting 1000 LL codes which is 6.173 us. +			 * So use 7 microseconds delay instead. +			 */ +			udelay(7); +			link_enc->funcs->fec_set_enable(link_enc, true); +			link->fec_state = dc_link_fec_enabled; +		} else if (link->fec_state == dc_link_fec_enabled && !enable) { +			link_enc->funcs->fec_set_enable(link_enc, false); +			link->fec_state = dc_link_fec_ready; +		} +	} +} + diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h new file mode 100644 index 000000000000..dba1f29df319 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h @@ -0,0 +1,56 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_LINK_DP_PHY_H__ +#define __DC_LINK_DP_PHY_H__ + +#include "link.h" +void dp_enable_link_phy( +	struct dc_link *link, +	const struct link_resource *link_res, +	enum signal_type signal, +	enum clock_source_id clock_source, +	const struct dc_link_settings *link_settings); + +void dp_disable_link_phy(struct dc_link *link, +		const struct link_resource *link_res, +		enum signal_type signal); + +void dp_set_hw_lane_settings( +		struct dc_link *link, +		const struct link_resource *link_res, +		const struct link_training_settings *link_settings, +		uint32_t offset); + +void dp_set_drive_settings( +	struct dc_link *link, +	const struct link_resource *link_res, +	struct link_training_settings *lt_settings); + +enum dc_status dp_set_fec_ready(struct dc_link *link, +		const struct link_resource *link_res, bool ready); +void dp_set_fec_enable(struct dc_link *link, bool enable); + +#endif /* __DC_LINK_DP_PHY_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c new file mode 100644 index 000000000000..b48d4d822991 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c @@ -0,0 +1,1701 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file implements all generic dp link training helper functions and top + * level generic training sequence. All variations of dp link training sequence + * should be called inside the top level training functions in this file to + * ensure the integrity of our overall training procedure across different types + * of link encoding and back end hardware. + */ +#include "link_dp_training.h" +#include "link_dp_training_8b_10b.h" +#include "link_dp_training_128b_132b.h" +#include "link_dp_training_auxless.h" +#include "link_dp_training_dpia.h" +#include "link_dp_training_fixed_vs_pe_retimer.h" +#include "link_dpcd.h" +#include "link/accessories/link_dp_trace.h" +#include "link_dp_phy.h" +#include "link_dp_capability.h" +#include "link_edp_panel_control.h" +#include "atomfirmware.h" +#include "link_enc_cfg.h" +#include "resource.h" +#include "dm_helpers.h" + +#define DC_LOGGER \ +	link->ctx->logger + +#define POST_LT_ADJ_REQ_LIMIT 6 +#define POST_LT_ADJ_REQ_TIMEOUT 200 +#define LINK_TRAINING_RETRY_DELAY 50 /* ms */ + +void dp_log_training_result( +	struct dc_link *link, +	const struct link_training_settings *lt_settings, +	enum link_training_result status) +{ +	char *link_rate = "Unknown"; +	char *lt_result = "Unknown"; +	char *lt_spread = "Disabled"; + +	switch (lt_settings->link_settings.link_rate) { +	case LINK_RATE_LOW: +		link_rate = "RBR"; +		break; +	case LINK_RATE_RATE_2: +		link_rate = "R2"; +		break; +	case LINK_RATE_RATE_3: +		link_rate = "R3"; +		break; +	case LINK_RATE_HIGH: +		link_rate = "HBR"; +		break; +	case LINK_RATE_RBR2: +		link_rate = "RBR2"; +		break; +	case LINK_RATE_RATE_6: +		link_rate = "R6"; +		break; +	case LINK_RATE_HIGH2: +		link_rate = "HBR2"; +		break; +	case LINK_RATE_HIGH3: +		link_rate = "HBR3"; +		break; +	case LINK_RATE_UHBR10: +		link_rate = "UHBR10"; +		break; +	case LINK_RATE_UHBR13_5: +		link_rate = "UHBR13.5"; +		break; +	case LINK_RATE_UHBR20: +		link_rate = "UHBR20"; +		break; +	default: +		break; +	} + +	switch (status) { +	case LINK_TRAINING_SUCCESS: +		lt_result = "pass"; +		break; +	case LINK_TRAINING_CR_FAIL_LANE0: +		lt_result = "CR failed lane0"; +		break; +	case LINK_TRAINING_CR_FAIL_LANE1: +		lt_result = "CR failed lane1"; +		break; +	case LINK_TRAINING_CR_FAIL_LANE23: +		lt_result = "CR failed lane23"; +		break; +	case LINK_TRAINING_EQ_FAIL_CR: +		lt_result = "CR failed in EQ"; +		break; +	case LINK_TRAINING_EQ_FAIL_CR_PARTIAL: +		lt_result = "CR failed in EQ partially"; +		break; +	case LINK_TRAINING_EQ_FAIL_EQ: +		lt_result = "EQ failed"; +		break; +	case LINK_TRAINING_LQA_FAIL: +		lt_result = "LQA failed"; +		break; +	case LINK_TRAINING_LINK_LOSS: +		lt_result = "Link loss"; +		break; +	case DP_128b_132b_LT_FAILED: +		lt_result = "LT_FAILED received"; +		break; +	case DP_128b_132b_MAX_LOOP_COUNT_REACHED: +		lt_result = "max loop count reached"; +		break; +	case DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT: +		lt_result = "channel EQ timeout"; +		break; +	case DP_128b_132b_CDS_DONE_TIMEOUT: +		lt_result = "CDS timeout"; +		break; +	default: +		break; +	} + +	switch (lt_settings->link_settings.link_spread) { +	case LINK_SPREAD_DISABLED: +		lt_spread = "Disabled"; +		break; +	case LINK_SPREAD_05_DOWNSPREAD_30KHZ: +		lt_spread = "0.5% 30KHz"; +		break; +	case LINK_SPREAD_05_DOWNSPREAD_33KHZ: +		lt_spread = "0.5% 33KHz"; +		break; +	default: +		break; +	} + +	/* Connectivity log: link training */ + +	/* TODO - DP2.0 Log: add connectivity log for FFE PRESET */ + +	CONN_MSG_LT(link, "%sx%d %s VS=%d, PE=%d, DS=%s", +				link_rate, +				lt_settings->link_settings.lane_count, +				lt_result, +				lt_settings->hw_lane_settings[0].VOLTAGE_SWING, +				lt_settings->hw_lane_settings[0].PRE_EMPHASIS, +				lt_spread); +} + +uint8_t dp_initialize_scrambling_data_symbols( +	struct dc_link *link, +	enum dc_dp_training_pattern pattern) +{ +	uint8_t disable_scrabled_data_symbols = 0; + +	switch (pattern) { +	case DP_TRAINING_PATTERN_SEQUENCE_1: +	case DP_TRAINING_PATTERN_SEQUENCE_2: +	case DP_TRAINING_PATTERN_SEQUENCE_3: +		disable_scrabled_data_symbols = 1; +		break; +	case DP_TRAINING_PATTERN_SEQUENCE_4: +	case DP_128b_132b_TPS1: +	case DP_128b_132b_TPS2: +		disable_scrabled_data_symbols = 0; +		break; +	default: +		ASSERT(0); +		DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n", +			__func__, pattern); +		break; +	} +	return disable_scrabled_data_symbols; +} + +enum dpcd_training_patterns +	dp_training_pattern_to_dpcd_training_pattern( +	struct dc_link *link, +	enum dc_dp_training_pattern pattern) +{ +	enum dpcd_training_patterns dpcd_tr_pattern = +	DPCD_TRAINING_PATTERN_VIDEOIDLE; + +	switch (pattern) { +	case DP_TRAINING_PATTERN_SEQUENCE_1: +		dpcd_tr_pattern = DPCD_TRAINING_PATTERN_1; +		break; +	case DP_TRAINING_PATTERN_SEQUENCE_2: +		dpcd_tr_pattern = DPCD_TRAINING_PATTERN_2; +		break; +	case DP_TRAINING_PATTERN_SEQUENCE_3: +		dpcd_tr_pattern = DPCD_TRAINING_PATTERN_3; +		break; +	case DP_TRAINING_PATTERN_SEQUENCE_4: +		dpcd_tr_pattern = DPCD_TRAINING_PATTERN_4; +		break; +	case DP_128b_132b_TPS1: +		dpcd_tr_pattern = DPCD_128b_132b_TPS1; +		break; +	case DP_128b_132b_TPS2: +		dpcd_tr_pattern = DPCD_128b_132b_TPS2; +		break; +	case DP_128b_132b_TPS2_CDS: +		dpcd_tr_pattern = DPCD_128b_132b_TPS2_CDS; +		break; +	case DP_TRAINING_PATTERN_VIDEOIDLE: +		dpcd_tr_pattern = DPCD_TRAINING_PATTERN_VIDEOIDLE; +		break; +	default: +		ASSERT(0); +		DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n", +			__func__, pattern); +		break; +	} + +	return dpcd_tr_pattern; +} + +uint8_t dp_get_nibble_at_index(const uint8_t *buf, +	uint32_t index) +{ +	uint8_t nibble; +	nibble = buf[index / 2]; + +	if (index % 2) +		nibble >>= 4; +	else +		nibble &= 0x0F; + +	return nibble; +} + +void dp_wait_for_training_aux_rd_interval( +	struct dc_link *link, +	uint32_t wait_in_micro_secs) +{ +	if (wait_in_micro_secs > 1000) +		msleep(wait_in_micro_secs/1000); +	else +		udelay(wait_in_micro_secs); + +	DC_LOG_HW_LINK_TRAINING("%s:\n wait = %d\n", +		__func__, +		wait_in_micro_secs); +} + +/* maximum pre emphasis level allowed for each voltage swing level*/ +static const enum dc_pre_emphasis voltage_swing_to_pre_emphasis[] = { +		PRE_EMPHASIS_LEVEL3, +		PRE_EMPHASIS_LEVEL2, +		PRE_EMPHASIS_LEVEL1, +		PRE_EMPHASIS_DISABLED }; + +static enum dc_pre_emphasis get_max_pre_emphasis_for_voltage_swing( +	enum dc_voltage_swing voltage) +{ +	enum dc_pre_emphasis pre_emphasis; +	pre_emphasis = PRE_EMPHASIS_MAX_LEVEL; + +	if (voltage <= VOLTAGE_SWING_MAX_LEVEL) +		pre_emphasis = voltage_swing_to_pre_emphasis[voltage]; + +	return pre_emphasis; + +} + +static void maximize_lane_settings(const struct link_training_settings *lt_settings, +		struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) +{ +	uint32_t lane; +	struct dc_lane_settings max_requested; + +	max_requested.VOLTAGE_SWING = lane_settings[0].VOLTAGE_SWING; +	max_requested.PRE_EMPHASIS = lane_settings[0].PRE_EMPHASIS; +	max_requested.FFE_PRESET = lane_settings[0].FFE_PRESET; + +	/* Determine what the maximum of the requested settings are*/ +	for (lane = 1; lane < lt_settings->link_settings.lane_count; lane++) { +		if (lane_settings[lane].VOLTAGE_SWING > max_requested.VOLTAGE_SWING) +			max_requested.VOLTAGE_SWING = lane_settings[lane].VOLTAGE_SWING; + +		if (lane_settings[lane].PRE_EMPHASIS > max_requested.PRE_EMPHASIS) +			max_requested.PRE_EMPHASIS = lane_settings[lane].PRE_EMPHASIS; +		if (lane_settings[lane].FFE_PRESET.settings.level > +				max_requested.FFE_PRESET.settings.level) +			max_requested.FFE_PRESET.settings.level = +					lane_settings[lane].FFE_PRESET.settings.level; +	} + +	/* make sure the requested settings are +	 * not higher than maximum settings*/ +	if (max_requested.VOLTAGE_SWING > VOLTAGE_SWING_MAX_LEVEL) +		max_requested.VOLTAGE_SWING = VOLTAGE_SWING_MAX_LEVEL; + +	if (max_requested.PRE_EMPHASIS > PRE_EMPHASIS_MAX_LEVEL) +		max_requested.PRE_EMPHASIS = PRE_EMPHASIS_MAX_LEVEL; +	if (max_requested.FFE_PRESET.settings.level > DP_FFE_PRESET_MAX_LEVEL) +		max_requested.FFE_PRESET.settings.level = DP_FFE_PRESET_MAX_LEVEL; + +	/* make sure the pre-emphasis matches the voltage swing*/ +	if (max_requested.PRE_EMPHASIS > +		get_max_pre_emphasis_for_voltage_swing( +			max_requested.VOLTAGE_SWING)) +		max_requested.PRE_EMPHASIS = +		get_max_pre_emphasis_for_voltage_swing( +			max_requested.VOLTAGE_SWING); + +	for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { +		lane_settings[lane].VOLTAGE_SWING = max_requested.VOLTAGE_SWING; +		lane_settings[lane].PRE_EMPHASIS = max_requested.PRE_EMPHASIS; +		lane_settings[lane].FFE_PRESET = max_requested.FFE_PRESET; +	} +} + +void dp_hw_to_dpcd_lane_settings( +		const struct link_training_settings *lt_settings, +		const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], +		union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]) +{ +	uint8_t lane = 0; + +	for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { +		if (link_dp_get_encoding_format(<_settings->link_settings) == +				DP_8b_10b_ENCODING) { +			dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET = +					(uint8_t)(hw_lane_settings[lane].VOLTAGE_SWING); +			dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET = +					(uint8_t)(hw_lane_settings[lane].PRE_EMPHASIS); +			dpcd_lane_settings[lane].bits.MAX_SWING_REACHED = +					(hw_lane_settings[lane].VOLTAGE_SWING == +							VOLTAGE_SWING_MAX_LEVEL ? 1 : 0); +			dpcd_lane_settings[lane].bits.MAX_PRE_EMPHASIS_REACHED = +					(hw_lane_settings[lane].PRE_EMPHASIS == +							PRE_EMPHASIS_MAX_LEVEL ? 1 : 0); +		} else if (link_dp_get_encoding_format(<_settings->link_settings) == +				DP_128b_132b_ENCODING) { +			dpcd_lane_settings[lane].tx_ffe.PRESET_VALUE = +					hw_lane_settings[lane].FFE_PRESET.settings.level; +		} +	} +} + +uint8_t get_dpcd_link_rate(const struct dc_link_settings *link_settings) +{ +	uint8_t link_rate = 0; +	enum dp_link_encoding encoding = link_dp_get_encoding_format(link_settings); + +	if (encoding == DP_128b_132b_ENCODING) +		switch (link_settings->link_rate) { +		case LINK_RATE_UHBR10: +			link_rate = 0x1; +			break; +		case LINK_RATE_UHBR20: +			link_rate = 0x2; +			break; +		case LINK_RATE_UHBR13_5: +			link_rate = 0x4; +			break; +		default: +			link_rate = 0; +			break; +		} +	else if (encoding == DP_8b_10b_ENCODING) +		link_rate = (uint8_t) link_settings->link_rate; +	else +		link_rate = 0; + +	return link_rate; +} + +/* Only used for channel equalization */ +uint32_t dp_translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval) +{ +	unsigned int aux_rd_interval_us = 400; + +	switch (dpcd_aux_read_interval) { +	case 0x01: +		aux_rd_interval_us = 4000; +		break; +	case 0x02: +		aux_rd_interval_us = 8000; +		break; +	case 0x03: +		aux_rd_interval_us = 12000; +		break; +	case 0x04: +		aux_rd_interval_us = 16000; +		break; +	case 0x05: +		aux_rd_interval_us = 32000; +		break; +	case 0x06: +		aux_rd_interval_us = 64000; +		break; +	default: +		break; +	} + +	return aux_rd_interval_us; +} + +enum link_training_result dp_get_cr_failure(enum dc_lane_count ln_count, +					union lane_status *dpcd_lane_status) +{ +	enum link_training_result result = LINK_TRAINING_SUCCESS; + +	if (ln_count >= LANE_COUNT_ONE && !dpcd_lane_status[0].bits.CR_DONE_0) +		result = LINK_TRAINING_CR_FAIL_LANE0; +	else if (ln_count >= LANE_COUNT_TWO && !dpcd_lane_status[1].bits.CR_DONE_0) +		result = LINK_TRAINING_CR_FAIL_LANE1; +	else if (ln_count >= LANE_COUNT_FOUR && !dpcd_lane_status[2].bits.CR_DONE_0) +		result = LINK_TRAINING_CR_FAIL_LANE23; +	else if (ln_count >= LANE_COUNT_FOUR && !dpcd_lane_status[3].bits.CR_DONE_0) +		result = LINK_TRAINING_CR_FAIL_LANE23; +	return result; +} + +bool is_repeater(const struct link_training_settings *lt_settings, uint32_t offset) +{ +	return (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (offset != 0); +} + +bool dp_is_max_vs_reached( +	const struct link_training_settings *lt_settings) +{ +	uint32_t lane; +	for (lane = 0; lane < +		(uint32_t)(lt_settings->link_settings.lane_count); +		lane++) { +		if (lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET +			== VOLTAGE_SWING_MAX_LEVEL) +			return true; +	} +	return false; + +} + +bool dp_is_cr_done(enum dc_lane_count ln_count, +	union lane_status *dpcd_lane_status) +{ +	bool done = true; +	uint32_t lane; +	/*LANEx_CR_DONE bits All 1's?*/ +	for (lane = 0; lane < (uint32_t)(ln_count); lane++) { +		if (!dpcd_lane_status[lane].bits.CR_DONE_0) +			done = false; +	} +	return done; + +} + +bool dp_is_ch_eq_done(enum dc_lane_count ln_count, +		union lane_status *dpcd_lane_status) +{ +	bool done = true; +	uint32_t lane; +	for (lane = 0; lane < (uint32_t)(ln_count); lane++) +		if (!dpcd_lane_status[lane].bits.CHANNEL_EQ_DONE_0) +			done = false; +	return done; +} + +bool dp_is_symbol_locked(enum dc_lane_count ln_count, +		union lane_status *dpcd_lane_status) +{ +	bool locked = true; +	uint32_t lane; +	for (lane = 0; lane < (uint32_t)(ln_count); lane++) +		if (!dpcd_lane_status[lane].bits.SYMBOL_LOCKED_0) +			locked = false; +	return locked; +} + +bool dp_is_interlane_aligned(union lane_align_status_updated align_status) +{ +	return align_status.bits.INTERLANE_ALIGN_DONE == 1; +} + +enum link_training_result dp_check_link_loss_status( +	struct dc_link *link, +	const struct link_training_settings *link_training_setting) +{ +	enum link_training_result status = LINK_TRAINING_SUCCESS; +	union lane_status lane_status; +	uint8_t dpcd_buf[6] = {0}; +	uint32_t lane; + +	core_link_read_dpcd( +			link, +			DP_SINK_COUNT, +			(uint8_t *)(dpcd_buf), +			sizeof(dpcd_buf)); + +	/*parse lane status*/ +	for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) { +		/* +		 * check lanes status +		 */ +		lane_status.raw = dp_get_nibble_at_index(&dpcd_buf[2], lane); + +		if (!lane_status.bits.CHANNEL_EQ_DONE_0 || +			!lane_status.bits.CR_DONE_0 || +			!lane_status.bits.SYMBOL_LOCKED_0) { +			/* if one of the channel equalization, clock +			 * recovery or symbol lock is dropped +			 * consider it as (link has been +			 * dropped) dp sink status has changed +			 */ +			status = LINK_TRAINING_LINK_LOSS; +			break; +		} +	} + +	return status; +} + +enum dc_status dp_get_lane_status_and_lane_adjust( +	struct dc_link *link, +	const struct link_training_settings *link_training_setting, +	union lane_status ln_status[LANE_COUNT_DP_MAX], +	union lane_align_status_updated *ln_align, +	union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], +	uint32_t offset) +{ +	unsigned int lane01_status_address = DP_LANE0_1_STATUS; +	uint8_t lane_adjust_offset = 4; +	unsigned int lane01_adjust_address; +	uint8_t dpcd_buf[6] = {0}; +	uint32_t lane; +	enum dc_status status; + +	if (is_repeater(link_training_setting, offset)) { +		lane01_status_address = +				DP_LANE0_1_STATUS_PHY_REPEATER1 + +				((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); +		lane_adjust_offset = 3; +	} + +	status = core_link_read_dpcd( +		link, +		lane01_status_address, +		(uint8_t *)(dpcd_buf), +		sizeof(dpcd_buf)); + +	if (status != DC_OK) { +		DC_LOG_HW_LINK_TRAINING("%s:\n Failed to read from address 0x%X," +			" keep current lane status and lane adjust unchanged", +			__func__, +			lane01_status_address); +		return status; +	} + +	for (lane = 0; lane < +		(uint32_t)(link_training_setting->link_settings.lane_count); +		lane++) { + +		ln_status[lane].raw = +			dp_get_nibble_at_index(&dpcd_buf[0], lane); +		ln_adjust[lane].raw = +			dp_get_nibble_at_index(&dpcd_buf[lane_adjust_offset], lane); +	} + +	ln_align->raw = dpcd_buf[2]; + +	if (is_repeater(link_training_setting, offset)) { +		DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" +				" 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ", +			__func__, +			offset, +			lane01_status_address, dpcd_buf[0], +			lane01_status_address + 1, dpcd_buf[1]); + +		lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1_PHY_REPEATER1 + +				((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + +		DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" +				" 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n", +					__func__, +					offset, +					lane01_adjust_address, +					dpcd_buf[lane_adjust_offset], +					lane01_adjust_address + 1, +					dpcd_buf[lane_adjust_offset + 1]); +	} else { +		DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ", +			__func__, +			lane01_status_address, dpcd_buf[0], +			lane01_status_address + 1, dpcd_buf[1]); + +		lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1; + +		DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n", +			__func__, +			lane01_adjust_address, +			dpcd_buf[lane_adjust_offset], +			lane01_adjust_address + 1, +			dpcd_buf[lane_adjust_offset + 1]); +	} + +	return status; +} + +static void override_lane_settings(const struct link_training_settings *lt_settings, +		struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) +{ +	uint32_t lane; + +	if (lt_settings->voltage_swing == NULL && +			lt_settings->pre_emphasis == NULL && +			lt_settings->ffe_preset == NULL && +			lt_settings->post_cursor2 == NULL) + +		return; + +	for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { +		if (lt_settings->voltage_swing) +			lane_settings[lane].VOLTAGE_SWING = *lt_settings->voltage_swing; +		if (lt_settings->pre_emphasis) +			lane_settings[lane].PRE_EMPHASIS = *lt_settings->pre_emphasis; +		if (lt_settings->post_cursor2) +			lane_settings[lane].POST_CURSOR2 = *lt_settings->post_cursor2; +		if (lt_settings->ffe_preset) +			lane_settings[lane].FFE_PRESET = *lt_settings->ffe_preset; +	} +} + +void dp_get_lttpr_mode_override(struct dc_link *link, enum lttpr_mode *override) +{ +	if (!dp_is_lttpr_present(link)) +		return; + +	if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_TRANSPARENT) { +		*override = LTTPR_MODE_TRANSPARENT; +	} else if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_NON_TRANSPARENT) { +		*override = LTTPR_MODE_NON_TRANSPARENT; +	} else if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_NON_LTTPR) { +		*override = LTTPR_MODE_NON_LTTPR; +	} +	DC_LOG_DC("lttpr_mode_override chose LTTPR_MODE = %d\n", (uint8_t)(*override)); +} + +void override_training_settings( +		struct dc_link *link, +		const struct dc_link_training_overrides *overrides, +		struct link_training_settings *lt_settings) +{ +	uint32_t lane; + +	/* Override link spread */ +	if (!link->dp_ss_off && overrides->downspread != NULL) +		lt_settings->link_settings.link_spread = *overrides->downspread ? +				LINK_SPREAD_05_DOWNSPREAD_30KHZ +				: LINK_SPREAD_DISABLED; + +	/* Override lane settings */ +	if (overrides->voltage_swing != NULL) +		lt_settings->voltage_swing = overrides->voltage_swing; +	if (overrides->pre_emphasis != NULL) +		lt_settings->pre_emphasis = overrides->pre_emphasis; +	if (overrides->post_cursor2 != NULL) +		lt_settings->post_cursor2 = overrides->post_cursor2; +	if (overrides->ffe_preset != NULL) +		lt_settings->ffe_preset = overrides->ffe_preset; +	/* Override HW lane settings with BIOS forced values if present */ +	if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && +			lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) { +		lt_settings->voltage_swing = &link->bios_forced_drive_settings.VOLTAGE_SWING; +		lt_settings->pre_emphasis = &link->bios_forced_drive_settings.PRE_EMPHASIS; +		lt_settings->always_match_dpcd_with_hw_lane_settings = false; +	} +	for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { +		lt_settings->hw_lane_settings[lane].VOLTAGE_SWING = +			lt_settings->voltage_swing != NULL ? +			*lt_settings->voltage_swing : +			VOLTAGE_SWING_LEVEL0; +		lt_settings->hw_lane_settings[lane].PRE_EMPHASIS = +			lt_settings->pre_emphasis != NULL ? +			*lt_settings->pre_emphasis +			: PRE_EMPHASIS_DISABLED; +		lt_settings->hw_lane_settings[lane].POST_CURSOR2 = +			lt_settings->post_cursor2 != NULL ? +			*lt_settings->post_cursor2 +			: POST_CURSOR2_DISABLED; +	} + +	if (lt_settings->always_match_dpcd_with_hw_lane_settings) +		dp_hw_to_dpcd_lane_settings(lt_settings, +				lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + +	/* Override training timings */ +	if (overrides->cr_pattern_time != NULL) +		lt_settings->cr_pattern_time = *overrides->cr_pattern_time; +	if (overrides->eq_pattern_time != NULL) +		lt_settings->eq_pattern_time = *overrides->eq_pattern_time; +	if (overrides->pattern_for_cr != NULL) +		lt_settings->pattern_for_cr = *overrides->pattern_for_cr; +	if (overrides->pattern_for_eq != NULL) +		lt_settings->pattern_for_eq = *overrides->pattern_for_eq; +	if (overrides->enhanced_framing != NULL) +		lt_settings->enhanced_framing = *overrides->enhanced_framing; +	if (link->preferred_training_settings.fec_enable != NULL) +		lt_settings->should_set_fec_ready = *link->preferred_training_settings.fec_enable; + +#if defined(CONFIG_DRM_AMD_DC_DCN) +	/* Check DP tunnel LTTPR mode debug option. */ +	if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->dc->debug.dpia_debug.bits.force_non_lttpr) +		lt_settings->lttpr_mode = LTTPR_MODE_NON_LTTPR; + +#endif +	dp_get_lttpr_mode_override(link, <_settings->lttpr_mode); + +} + +enum dc_dp_training_pattern decide_cr_training_pattern( +		const struct dc_link_settings *link_settings) +{ +	switch (link_dp_get_encoding_format(link_settings)) { +	case DP_8b_10b_ENCODING: +	default: +		return DP_TRAINING_PATTERN_SEQUENCE_1; +	case DP_128b_132b_ENCODING: +		return DP_128b_132b_TPS1; +	} +} + +enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link, +		const struct dc_link_settings *link_settings) +{ +	struct link_encoder *link_enc; +	struct encoder_feature_support *enc_caps; +	struct dpcd_caps *rx_caps = &link->dpcd_caps; +	enum dc_dp_training_pattern pattern = DP_TRAINING_PATTERN_SEQUENCE_2; + +	link_enc = link_enc_cfg_get_link_enc(link); +	ASSERT(link_enc); +	enc_caps = &link_enc->features; + +	switch (link_dp_get_encoding_format(link_settings)) { +	case DP_8b_10b_ENCODING: +		if (enc_caps->flags.bits.IS_TPS4_CAPABLE && +				rx_caps->max_down_spread.bits.TPS4_SUPPORTED) +			pattern = DP_TRAINING_PATTERN_SEQUENCE_4; +		else if (enc_caps->flags.bits.IS_TPS3_CAPABLE && +				rx_caps->max_ln_count.bits.TPS3_SUPPORTED) +			pattern = DP_TRAINING_PATTERN_SEQUENCE_3; +		else +			pattern = DP_TRAINING_PATTERN_SEQUENCE_2; +		break; +	case DP_128b_132b_ENCODING: +		pattern = DP_128b_132b_TPS2; +		break; +	default: +		pattern = DP_TRAINING_PATTERN_SEQUENCE_2; +		break; +	} +	return pattern; +} + +enum lttpr_mode dc_link_decide_lttpr_mode(struct dc_link *link, +		struct dc_link_settings *link_setting) +{ +	enum dp_link_encoding encoding = link_dp_get_encoding_format(link_setting); + +	if (encoding == DP_8b_10b_ENCODING) +		return dp_decide_8b_10b_lttpr_mode(link); +	else if (encoding == DP_128b_132b_ENCODING) +		return dp_decide_128b_132b_lttpr_mode(link); + +	ASSERT(0); +	return LTTPR_MODE_NON_LTTPR; +} + +void dp_decide_lane_settings( +		const struct link_training_settings *lt_settings, +		const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], +		struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], +		union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]) +{ +	uint32_t lane; + +	for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { +		if (link_dp_get_encoding_format(<_settings->link_settings) == +				DP_8b_10b_ENCODING) { +			hw_lane_settings[lane].VOLTAGE_SWING = +					(enum dc_voltage_swing)(ln_adjust[lane].bits. +							VOLTAGE_SWING_LANE); +			hw_lane_settings[lane].PRE_EMPHASIS = +					(enum dc_pre_emphasis)(ln_adjust[lane].bits. +							PRE_EMPHASIS_LANE); +		} else if (link_dp_get_encoding_format(<_settings->link_settings) == +				DP_128b_132b_ENCODING) { +			hw_lane_settings[lane].FFE_PRESET.raw = +					ln_adjust[lane].tx_ffe.PRESET_VALUE; +		} +	} +	dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings); + +	if (lt_settings->disallow_per_lane_settings) { +		/* we find the maximum of the requested settings across all lanes*/ +		/* and set this maximum for all lanes*/ +		maximize_lane_settings(lt_settings, hw_lane_settings); +		override_lane_settings(lt_settings, hw_lane_settings); + +		if (lt_settings->always_match_dpcd_with_hw_lane_settings) +			dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings); +	} + +} + +void dp_decide_training_settings( +		struct dc_link *link, +		const struct dc_link_settings *link_settings, +		struct link_training_settings *lt_settings) +{ +	if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) +		decide_8b_10b_training_settings(link, link_settings, lt_settings); +	else if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING) +		decide_128b_132b_training_settings(link, link_settings, lt_settings); +} + + +enum dc_status configure_lttpr_mode_transparent(struct dc_link *link) +{ +	uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT; + +	DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__); +	return core_link_write_dpcd(link, +			DP_PHY_REPEATER_MODE, +			(uint8_t *)&repeater_mode, +			sizeof(repeater_mode)); +} + +static enum dc_status configure_lttpr_mode_non_transparent( +		struct dc_link *link, +		const struct link_training_settings *lt_settings) +{ +	/* aux timeout is already set to extended */ +	/* RESET/SET lttpr mode to enable non transparent mode */ +	uint8_t repeater_cnt; +	uint32_t aux_interval_address; +	uint8_t repeater_id; +	enum dc_status result = DC_ERROR_UNEXPECTED; +	uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT; + +	enum dp_link_encoding encoding = link_dp_get_encoding_format(<_settings->link_settings); + +	if (encoding == DP_8b_10b_ENCODING) { +		DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__); +		result = core_link_write_dpcd(link, +				DP_PHY_REPEATER_MODE, +				(uint8_t *)&repeater_mode, +				sizeof(repeater_mode)); + +	} + +	if (result == DC_OK) { +		link->dpcd_caps.lttpr_caps.mode = repeater_mode; +	} + +	if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { + +		DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Non Transparent Mode\n", __func__); + +		repeater_mode = DP_PHY_REPEATER_MODE_NON_TRANSPARENT; +		result = core_link_write_dpcd(link, +				DP_PHY_REPEATER_MODE, +				(uint8_t *)&repeater_mode, +				sizeof(repeater_mode)); + +		if (result == DC_OK) { +			link->dpcd_caps.lttpr_caps.mode = repeater_mode; +		} + +		if (encoding == DP_8b_10b_ENCODING) { +			repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + +			/* Driver does not need to train the first hop. Skip DPCD read and clear +			 * AUX_RD_INTERVAL for DPTX-to-DPIA hop. +			 */ +			if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) +				link->dpcd_caps.lttpr_caps.aux_rd_interval[--repeater_cnt] = 0; + +			for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) { +				aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 + +						((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (repeater_id - 1)); +				core_link_read_dpcd( +						link, +						aux_interval_address, +						(uint8_t *)&link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1], +						sizeof(link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1])); +				link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1] &= 0x7F; +			} +		} +	} + +	return result; +} + +enum dc_status dpcd_configure_lttpr_mode(struct dc_link *link, struct link_training_settings *lt_settings) +{ +	enum dc_status status = DC_OK; + +	if (lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) +		status = configure_lttpr_mode_transparent(link); + +	else if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) +		status = configure_lttpr_mode_non_transparent(link, lt_settings); + +	return status; +} + +void repeater_training_done(struct dc_link *link, uint32_t offset) +{ +	union dpcd_training_pattern dpcd_pattern = {0}; + +	const uint32_t dpcd_base_lt_offset = +			DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + +				((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); +	/* Set training not in progress*/ +	dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE; + +	core_link_write_dpcd( +		link, +		dpcd_base_lt_offset, +		&dpcd_pattern.raw, +		1); + +	DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Id: %d 0x%X pattern = %x\n", +		__func__, +		offset, +		dpcd_base_lt_offset, +		dpcd_pattern.v1_4.TRAINING_PATTERN_SET); +} + +static void dpcd_exit_training_mode(struct dc_link *link, enum dp_link_encoding encoding) +{ +	uint8_t sink_status = 0; +	uint8_t i; + +	/* clear training pattern set */ +	dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE); + +	if (encoding == DP_128b_132b_ENCODING) { +		/* poll for intra-hop disable */ +		for (i = 0; i < 10; i++) { +			if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) && +					(sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0) +				break; +			udelay(1000); +		} +	} +} + +enum dc_status dpcd_configure_channel_coding(struct dc_link *link, +		struct link_training_settings *lt_settings) +{ +	enum dp_link_encoding encoding = +			link_dp_get_encoding_format( +					<_settings->link_settings); +	enum dc_status status; + +	status = core_link_write_dpcd( +			link, +			DP_MAIN_LINK_CHANNEL_CODING_SET, +			(uint8_t *) &encoding, +			1); +	DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X MAIN_LINK_CHANNEL_CODING_SET = %x\n", +					__func__, +					DP_MAIN_LINK_CHANNEL_CODING_SET, +					encoding); + +	return status; +} + +void dpcd_set_training_pattern( +	struct dc_link *link, +	enum dc_dp_training_pattern training_pattern) +{ +	union dpcd_training_pattern dpcd_pattern = {0}; + +	dpcd_pattern.v1_4.TRAINING_PATTERN_SET = +			dp_training_pattern_to_dpcd_training_pattern( +					link, training_pattern); + +	core_link_write_dpcd( +		link, +		DP_TRAINING_PATTERN_SET, +		&dpcd_pattern.raw, +		1); + +	DC_LOG_HW_LINK_TRAINING("%s\n %x pattern = %x\n", +		__func__, +		DP_TRAINING_PATTERN_SET, +		dpcd_pattern.v1_4.TRAINING_PATTERN_SET); +} + +enum dc_status dpcd_set_link_settings( +	struct dc_link *link, +	const struct link_training_settings *lt_settings) +{ +	uint8_t rate; +	enum dc_status status; + +	union down_spread_ctrl downspread = {0}; +	union lane_count_set lane_count_set = {0}; + +	downspread.raw = (uint8_t) +	(lt_settings->link_settings.link_spread); + +	lane_count_set.bits.LANE_COUNT_SET = +	lt_settings->link_settings.lane_count; + +	lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing; +	lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0; + + +	if (link->ep_type == DISPLAY_ENDPOINT_PHY && +			lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) { +		lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = +				link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED; +	} + +	status = core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, +		&downspread.raw, sizeof(downspread)); + +	status = core_link_write_dpcd(link, DP_LANE_COUNT_SET, +		&lane_count_set.raw, 1); + +	if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 && +			lt_settings->link_settings.use_link_rate_set == true) { +		rate = 0; +		/* WA for some MUX chips that will power down with eDP and lose supported +		 * link rate set for eDP 1.4. Source reads DPCD 0x010 again to ensure +		 * MUX chip gets link rate set back before link training. +		 */ +		if (link->connector_signal == SIGNAL_TYPE_EDP) { +			uint8_t supported_link_rates[16]; + +			core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES, +					supported_link_rates, sizeof(supported_link_rates)); +		} +		status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); +		status = core_link_write_dpcd(link, DP_LINK_RATE_SET, +				<_settings->link_settings.link_rate_set, 1); +	} else { +		rate = get_dpcd_link_rate(<_settings->link_settings); + +		status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); +	} + +	if (rate) { +		DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n", +			__func__, +			DP_LINK_BW_SET, +			lt_settings->link_settings.link_rate, +			DP_LANE_COUNT_SET, +			lt_settings->link_settings.lane_count, +			lt_settings->enhanced_framing, +			DP_DOWNSPREAD_CTRL, +			lt_settings->link_settings.link_spread); +	} else { +		DC_LOG_HW_LINK_TRAINING("%s\n %x rate set = %x\n %x lane = %x framing = %x\n %x spread = %x\n", +			__func__, +			DP_LINK_RATE_SET, +			lt_settings->link_settings.link_rate_set, +			DP_LANE_COUNT_SET, +			lt_settings->link_settings.lane_count, +			lt_settings->enhanced_framing, +			DP_DOWNSPREAD_CTRL, +			lt_settings->link_settings.link_spread); +	} + +	return status; +} + +enum dc_status dpcd_set_lane_settings( +	struct dc_link *link, +	const struct link_training_settings *link_training_setting, +	uint32_t offset) +{ +	unsigned int lane0_set_address; +	enum dc_status status; +	lane0_set_address = DP_TRAINING_LANE0_SET; + +	if (is_repeater(link_training_setting, offset)) +		lane0_set_address = DP_TRAINING_LANE0_SET_PHY_REPEATER1 + +		((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + +	status = core_link_write_dpcd(link, +		lane0_set_address, +		(uint8_t *)(link_training_setting->dpcd_lane_settings), +		link_training_setting->link_settings.lane_count); + +	if (is_repeater(link_training_setting, offset)) { +		DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n" +				" 0x%X VS set = %x  PE set = %x max VS Reached = %x  max PE Reached = %x\n", +			__func__, +			offset, +			lane0_set_address, +			link_training_setting->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, +			link_training_setting->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, +			link_training_setting->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, +			link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); + +	} else { +		DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x  PE set = %x max VS Reached = %x  max PE Reached = %x\n", +			__func__, +			lane0_set_address, +			link_training_setting->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, +			link_training_setting->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, +			link_training_setting->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, +			link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); +	} + +	return status; +} + +void dpcd_set_lt_pattern_and_lane_settings( +	struct dc_link *link, +	const struct link_training_settings *lt_settings, +	enum dc_dp_training_pattern pattern, +	uint32_t offset) +{ +	uint32_t dpcd_base_lt_offset; +	uint8_t dpcd_lt_buffer[5] = {0}; +	union dpcd_training_pattern dpcd_pattern = {0}; +	uint32_t size_in_bytes; +	bool edp_workaround = false; /* TODO link_prop.INTERNAL */ +	dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET; + +	if (is_repeater(lt_settings, offset)) +		dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + +			((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + +	/***************************************************************** +	* DpcdAddress_TrainingPatternSet +	*****************************************************************/ +	dpcd_pattern.v1_4.TRAINING_PATTERN_SET = +		dp_training_pattern_to_dpcd_training_pattern(link, pattern); + +	dpcd_pattern.v1_4.SCRAMBLING_DISABLE = +		dp_initialize_scrambling_data_symbols(link, pattern); + +	dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET] +		= dpcd_pattern.raw; + +	if (is_repeater(lt_settings, offset)) { +		DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n", +			__func__, +			offset, +			dpcd_base_lt_offset, +			dpcd_pattern.v1_4.TRAINING_PATTERN_SET); +	} else { +		DC_LOG_HW_LINK_TRAINING("%s\n 0x%X pattern = %x\n", +			__func__, +			dpcd_base_lt_offset, +			dpcd_pattern.v1_4.TRAINING_PATTERN_SET); +	} + +	/* concatenate everything into one buffer*/ +	size_in_bytes = lt_settings->link_settings.lane_count * +			sizeof(lt_settings->dpcd_lane_settings[0]); + +	 // 0x00103 - 0x00102 +	memmove( +		&dpcd_lt_buffer[DP_TRAINING_LANE0_SET - DP_TRAINING_PATTERN_SET], +		lt_settings->dpcd_lane_settings, +		size_in_bytes); + +	if (is_repeater(lt_settings, offset)) { +		if (link_dp_get_encoding_format(<_settings->link_settings) == +				DP_128b_132b_ENCODING) +			DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" +					" 0x%X TX_FFE_PRESET_VALUE = %x\n", +					__func__, +					offset, +					dpcd_base_lt_offset, +					lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); +		else if (link_dp_get_encoding_format(<_settings->link_settings) == +				DP_8b_10b_ENCODING) +		DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" +				" 0x%X VS set = %x PE set = %x max VS Reached = %x  max PE Reached = %x\n", +			__func__, +			offset, +			dpcd_base_lt_offset, +			lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, +			lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, +			lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, +			lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); +	} else { +		if (link_dp_get_encoding_format(<_settings->link_settings) == +				DP_128b_132b_ENCODING) +			DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n", +					__func__, +					dpcd_base_lt_offset, +					lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); +		else if (link_dp_get_encoding_format(<_settings->link_settings) == +				DP_8b_10b_ENCODING) +			DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X VS set = %x  PE set = %x max VS Reached = %x  max PE Reached = %x\n", +					__func__, +					dpcd_base_lt_offset, +					lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, +					lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, +					lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, +					lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); +	} +	if (edp_workaround) { +		/* for eDP write in 2 parts because the 5-byte burst is +		* causing issues on some eDP panels (EPR#366724) +		*/ +		core_link_write_dpcd( +			link, +			DP_TRAINING_PATTERN_SET, +			&dpcd_pattern.raw, +			sizeof(dpcd_pattern.raw)); + +		core_link_write_dpcd( +			link, +			DP_TRAINING_LANE0_SET, +			(uint8_t *)(lt_settings->dpcd_lane_settings), +			size_in_bytes); + +	} else if (link_dp_get_encoding_format(<_settings->link_settings) == +			DP_128b_132b_ENCODING) { +		core_link_write_dpcd( +				link, +				dpcd_base_lt_offset, +				dpcd_lt_buffer, +				sizeof(dpcd_lt_buffer)); +	} else +		/* write it all in (1 + number-of-lanes)-byte burst*/ +		core_link_write_dpcd( +				link, +				dpcd_base_lt_offset, +				dpcd_lt_buffer, +				size_in_bytes + sizeof(dpcd_pattern.raw)); +} + +void start_clock_recovery_pattern_early(struct dc_link *link, +		const struct link_resource *link_res, +		struct link_training_settings *lt_settings, +		uint32_t offset) +{ +	DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS1. Wait 400us.\n", +			__func__); +	dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, offset); +	dp_set_hw_lane_settings(link, link_res, lt_settings, offset); +	udelay(400); +} + +void dp_set_hw_test_pattern( +	struct dc_link *link, +	const struct link_resource *link_res, +	enum dp_test_pattern test_pattern, +	uint8_t *custom_pattern, +	uint32_t custom_pattern_size) +{ +	const struct link_hwss *link_hwss = get_link_hwss(link, link_res); +	struct encoder_set_dp_phy_pattern_param pattern_param = {0}; + +	pattern_param.dp_phy_pattern = test_pattern; +	pattern_param.custom_pattern = custom_pattern; +	pattern_param.custom_pattern_size = custom_pattern_size; +	pattern_param.dp_panel_mode = dp_get_panel_mode(link); + +	if (link_hwss->ext.set_dp_link_test_pattern) +		link_hwss->ext.set_dp_link_test_pattern(link, link_res, &pattern_param); +} + +bool dp_set_hw_training_pattern( +	struct dc_link *link, +	const struct link_resource *link_res, +	enum dc_dp_training_pattern pattern, +	uint32_t offset) +{ +	enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED; + +	switch (pattern) { +	case DP_TRAINING_PATTERN_SEQUENCE_1: +		test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN1; +		break; +	case DP_TRAINING_PATTERN_SEQUENCE_2: +		test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN2; +		break; +	case DP_TRAINING_PATTERN_SEQUENCE_3: +		test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN3; +		break; +	case DP_TRAINING_PATTERN_SEQUENCE_4: +		test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4; +		break; +	case DP_128b_132b_TPS1: +		test_pattern = DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE; +		break; +	case DP_128b_132b_TPS2: +		test_pattern = DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE; +		break; +	default: +		break; +	} + +	dp_set_hw_test_pattern(link, link_res, test_pattern, NULL, 0); + +	return true; +} + +static bool perform_post_lt_adj_req_sequence( +		struct dc_link *link, +		const struct link_resource *link_res, +		struct link_training_settings *lt_settings) +{ +	enum dc_lane_count lane_count = +	lt_settings->link_settings.lane_count; + +	uint32_t adj_req_count; +	uint32_t adj_req_timer; +	bool req_drv_setting_changed; +	uint32_t lane; +	union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; +	union lane_align_status_updated dpcd_lane_status_updated = {0}; +	union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; + +	req_drv_setting_changed = false; +	for (adj_req_count = 0; adj_req_count < POST_LT_ADJ_REQ_LIMIT; +	adj_req_count++) { + +		req_drv_setting_changed = false; + +		for (adj_req_timer = 0; +			adj_req_timer < POST_LT_ADJ_REQ_TIMEOUT; +			adj_req_timer++) { + +			dp_get_lane_status_and_lane_adjust( +				link, +				lt_settings, +				dpcd_lane_status, +				&dpcd_lane_status_updated, +				dpcd_lane_adjust, +				DPRX); + +			if (dpcd_lane_status_updated.bits. +					POST_LT_ADJ_REQ_IN_PROGRESS == 0) +				return true; + +			if (!dp_is_cr_done(lane_count, dpcd_lane_status)) +				return false; + +			if (!dp_is_ch_eq_done(lane_count, dpcd_lane_status) || +					!dp_is_symbol_locked(lane_count, dpcd_lane_status) || +					!dp_is_interlane_aligned(dpcd_lane_status_updated)) +				return false; + +			for (lane = 0; lane < (uint32_t)(lane_count); lane++) { + +				if (lt_settings-> +				dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET != +				dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_LANE || +				lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET != +				dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_LANE) { + +					req_drv_setting_changed = true; +					break; +				} +			} + +			if (req_drv_setting_changed) { +				dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, +						lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + +				dp_set_drive_settings(link, +						link_res, +						lt_settings); +				break; +			} + +			msleep(1); +		} + +		if (!req_drv_setting_changed) { +			DC_LOG_WARNING("%s: Post Link Training Adjust Request Timed out\n", +				__func__); + +			ASSERT(0); +			return true; +		} +	} +	DC_LOG_WARNING("%s: Post Link Training Adjust Request limit reached\n", +		__func__); + +	ASSERT(0); +	return true; + +} + +static enum link_training_result dp_transition_to_video_idle( +	struct dc_link *link, +	const struct link_resource *link_res, +	struct link_training_settings *lt_settings, +	enum link_training_result status) +{ +	union lane_count_set lane_count_set = {0}; + +	/* 4. mainlink output idle pattern*/ +	dp_set_hw_test_pattern(link, link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); + +	/* +	 * 5. post training adjust if required +	 * If the upstream DPTX and downstream DPRX both support TPS4, +	 * TPS4 must be used instead of POST_LT_ADJ_REQ. +	 */ +	if (link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED != 1 || +			lt_settings->pattern_for_eq >= DP_TRAINING_PATTERN_SEQUENCE_4) { +		/* delay 5ms after Main Link output idle pattern and then check +		 * DPCD 0202h. +		 */ +		if (link->connector_signal != SIGNAL_TYPE_EDP && status == LINK_TRAINING_SUCCESS) { +			msleep(5); +			status = dp_check_link_loss_status(link, lt_settings); +		} +		return status; +	} + +	if (status == LINK_TRAINING_SUCCESS && +		perform_post_lt_adj_req_sequence(link, link_res, lt_settings) == false) +		status = LINK_TRAINING_LQA_FAIL; + +	lane_count_set.bits.LANE_COUNT_SET = lt_settings->link_settings.lane_count; +	lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing; +	lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0; + +	core_link_write_dpcd( +		link, +		DP_LANE_COUNT_SET, +		&lane_count_set.raw, +		sizeof(lane_count_set)); + +	return status; +} + +enum link_training_result dp_perform_link_training( +	struct dc_link *link, +	const struct link_resource *link_res, +	const struct dc_link_settings *link_settings, +	bool skip_video_pattern) +{ +	enum link_training_result status = LINK_TRAINING_SUCCESS; +	struct link_training_settings lt_settings = {0}; +	enum dp_link_encoding encoding = +			link_dp_get_encoding_format(link_settings); + +	/* decide training settings */ +	dp_decide_training_settings( +			link, +			link_settings, +			<_settings); + +	override_training_settings( +			link, +			&link->preferred_training_settings, +			<_settings); + +	/* reset previous training states */ +	dpcd_exit_training_mode(link, encoding); + +	/* configure link prior to entering training mode */ +	dpcd_configure_lttpr_mode(link, <_settings); +	dp_set_fec_ready(link, link_res, lt_settings.should_set_fec_ready); +	dpcd_configure_channel_coding(link, <_settings); + +	/* enter training mode: +	 * Per DP specs starting from here, DPTX device shall not issue +	 * Non-LT AUX transactions inside training mode. +	 */ +	if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && encoding == DP_8b_10b_ENCODING) +		status = dp_perform_fixed_vs_pe_training_sequence(link, link_res, <_settings); +	else if (encoding == DP_8b_10b_ENCODING) +		status = dp_perform_8b_10b_link_training(link, link_res, <_settings); +	else if (encoding == DP_128b_132b_ENCODING) +		status = dp_perform_128b_132b_link_training(link, link_res, <_settings); +	else +		ASSERT(0); + +	/* exit training mode */ +	dpcd_exit_training_mode(link, encoding); + +	/* switch to video idle */ +	if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) +		status = dp_transition_to_video_idle(link, +				link_res, +				<_settings, +				status); + +	/* dump debug data */ +	dp_log_training_result(link, <_settings, status); +	if (status != LINK_TRAINING_SUCCESS) +		link->ctx->dc->debug_data.ltFailCount++; +	return status; +} + +bool perform_link_training_with_retries( +	const struct dc_link_settings *link_setting, +	bool skip_video_pattern, +	int attempts, +	struct pipe_ctx *pipe_ctx, +	enum signal_type signal, +	bool do_fallback) +{ +	int j; +	uint8_t delay_between_attempts = LINK_TRAINING_RETRY_DELAY; +	struct dc_stream_state *stream = pipe_ctx->stream; +	struct dc_link *link = stream->link; +	enum dp_panel_mode panel_mode = dp_get_panel_mode(link); +	enum link_training_result status = LINK_TRAINING_CR_FAIL_LANE0; +	struct dc_link_settings cur_link_settings = *link_setting; +	struct dc_link_settings max_link_settings = *link_setting; +	const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); +	int fail_count = 0; +	bool is_link_bw_low = false; /* link bandwidth < stream bandwidth */ +	bool is_link_bw_min = /* RBR x 1 */ +		(cur_link_settings.link_rate <= LINK_RATE_LOW) && +		(cur_link_settings.lane_count <= LANE_COUNT_ONE); + +	dp_trace_commit_lt_init(link); + + +	if (link_dp_get_encoding_format(&cur_link_settings) == DP_8b_10b_ENCODING) +		/* We need to do this before the link training to ensure the idle +		 * pattern in SST mode will be sent right after the link training +		 */ +		link_hwss->setup_stream_encoder(pipe_ctx); + +	dp_trace_set_lt_start_timestamp(link, false); +	j = 0; +	while (j < attempts && fail_count < (attempts * 10)) { + +		DC_LOG_HW_LINK_TRAINING("%s: Beginning link(%d) training attempt %u of %d @ rate(%d) x lane(%d)\n", +			__func__, link->link_index, (unsigned int)j + 1, attempts, cur_link_settings.link_rate, +			cur_link_settings.lane_count); + +		dp_enable_link_phy( +			link, +			&pipe_ctx->link_res, +			signal, +			pipe_ctx->clock_source->id, +			&cur_link_settings); + +		if (stream->sink_patches.dppowerup_delay > 0) { +			int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay; + +			msleep(delay_dp_power_up_in_ms); +		} + +#ifdef CONFIG_DRM_AMD_DC_HDCP +		if (panel_mode == DP_PANEL_MODE_EDP) { +			struct cp_psp *cp_psp = &stream->ctx->cp_psp; + +			if (cp_psp && cp_psp->funcs.enable_assr) { +				/* ASSR is bound to fail with unsigned PSP +				 * verstage used during devlopment phase. +				 * Report and continue with eDP panel mode to +				 * perform eDP link training with right settings +				 */ +				bool result; +				result = cp_psp->funcs.enable_assr(cp_psp->handle, link); +			} +		} +#endif + +		dp_set_panel_mode(link, panel_mode); + +		if (link->aux_access_disabled) { +			dc_link_dp_perform_link_training_skip_aux(link, &pipe_ctx->link_res, &cur_link_settings); +			return true; +		} else { +			/** @todo Consolidate USB4 DP and DPx.x training. */ +			if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { +				status = dc_link_dpia_perform_link_training( +						link, +						&pipe_ctx->link_res, +						&cur_link_settings, +						skip_video_pattern); + +				/* Transmit idle pattern once training successful. */ +				if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low) { +					dp_set_hw_test_pattern(link, &pipe_ctx->link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); +					// Update verified link settings to current one +					// Because DPIA LT might fallback to lower link setting. +					if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { +						link->verified_link_cap.link_rate = link->cur_link_settings.link_rate; +						link->verified_link_cap.lane_count = link->cur_link_settings.lane_count; +						dm_helpers_dp_mst_update_branch_bandwidth(link->ctx, link); +					} +				} +			} else { +				status = dp_perform_link_training( +						link, +						&pipe_ctx->link_res, +						&cur_link_settings, +						skip_video_pattern); +			} + +			dp_trace_lt_total_count_increment(link, false); +			dp_trace_lt_result_update(link, status, false); +			dp_trace_set_lt_end_timestamp(link, false); +			if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low) +				return true; +		} + +		fail_count++; +		dp_trace_lt_fail_count_update(link, fail_count, false); +		if (link->ep_type == DISPLAY_ENDPOINT_PHY) { +			/* latest link training still fail or link training is aborted +			 * skip delay and keep PHY on +			 */ +			if (j == (attempts - 1) || (status == LINK_TRAINING_ABORT)) +				break; +		} + +		DC_LOG_WARNING("%s: Link(%d) training attempt %u of %d failed @ rate(%d) x lane(%d) : fail reason:(%d)\n", +			__func__, link->link_index, (unsigned int)j + 1, attempts, cur_link_settings.link_rate, +			cur_link_settings.lane_count, status); + +		dp_disable_link_phy(link, &pipe_ctx->link_res, signal); + +		/* Abort link training if failure due to sink being unplugged. */ +		if (status == LINK_TRAINING_ABORT) { +			enum dc_connection_type type = dc_connection_none; + +			dc_link_detect_connection_type(link, &type); +			if (type == dc_connection_none) { +				DC_LOG_HW_LINK_TRAINING("%s: Aborting training because sink unplugged\n", __func__); +				break; +			} +		} + +		/* Try to train again at original settings if: +		 * - not falling back between training attempts; +		 * - aborted previous attempt due to reasons other than sink unplug; +		 * - successfully trained but at a link rate lower than that required by stream; +		 * - reached minimum link bandwidth. +		 */ +		if (!do_fallback || (status == LINK_TRAINING_ABORT) || +				(status == LINK_TRAINING_SUCCESS && is_link_bw_low) || +				is_link_bw_min) { +			j++; +			cur_link_settings = *link_setting; +			delay_between_attempts += LINK_TRAINING_RETRY_DELAY; +			is_link_bw_low = false; +			is_link_bw_min = (cur_link_settings.link_rate <= LINK_RATE_LOW) && +				(cur_link_settings.lane_count <= LANE_COUNT_ONE); + +		} else if (do_fallback) { /* Try training at lower link bandwidth if doing fallback. */ +			uint32_t req_bw; +			uint32_t link_bw; + +			decide_fallback_link_setting(link, &max_link_settings, +					&cur_link_settings, status); +			/* Flag if reduced link bandwidth no longer meets stream requirements or fallen back to +			 * minimum link bandwidth. +			 */ +			req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing); +			link_bw = dc_link_bandwidth_kbps(link, &cur_link_settings); +			is_link_bw_low = (req_bw > link_bw); +			is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) && +				(cur_link_settings.lane_count <= LANE_COUNT_ONE)); + +			if (is_link_bw_low) +				DC_LOG_WARNING( +					"%s: Link(%d) bandwidth too low after fallback req_bw(%d) > link_bw(%d)\n", +					__func__, link->link_index, req_bw, link_bw); +		} + +		msleep(delay_between_attempts); +	} + +	return false; +} + diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h new file mode 100644 index 000000000000..a04948635369 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h @@ -0,0 +1,182 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#ifndef __DC_LINK_DP_TRAINING_H__ +#define __DC_LINK_DP_TRAINING_H__ +#include "link.h" + +bool perform_link_training_with_retries( +	const struct dc_link_settings *link_setting, +	bool skip_video_pattern, +	int attempts, +	struct pipe_ctx *pipe_ctx, +	enum signal_type signal, +	bool do_fallback); + +enum link_training_result dp_perform_link_training( +		struct dc_link *link, +		const struct link_resource *link_res, +		const struct dc_link_settings *link_settings, +		bool skip_video_pattern); + +bool dp_set_hw_training_pattern( +		struct dc_link *link, +		const struct link_resource *link_res, +		enum dc_dp_training_pattern pattern, +		uint32_t offset); + +void dp_set_hw_test_pattern( +		struct dc_link *link, +		const struct link_resource *link_res, +		enum dp_test_pattern test_pattern, +		uint8_t *custom_pattern, +		uint32_t custom_pattern_size); + +void dpcd_set_training_pattern( +	struct dc_link *link, +	enum dc_dp_training_pattern training_pattern); + +/* Write DPCD drive settings. */ +enum dc_status dpcd_set_lane_settings( +	struct dc_link *link, +	const struct link_training_settings *link_training_setting, +	uint32_t offset); + +/* Write DPCD link configuration data. */ +enum dc_status dpcd_set_link_settings( +	struct dc_link *link, +	const struct link_training_settings *lt_settings); + +void dpcd_set_lt_pattern_and_lane_settings( +	struct dc_link *link, +	const struct link_training_settings *lt_settings, +	enum dc_dp_training_pattern pattern, +	uint32_t offset); + +/* Read training status and adjustment requests from DPCD. */ +enum dc_status dp_get_lane_status_and_lane_adjust( +	struct dc_link *link, +	const struct link_training_settings *link_training_setting, +	union lane_status ln_status[LANE_COUNT_DP_MAX], +	union lane_align_status_updated *ln_align, +	union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], +	uint32_t offset); + +enum dc_status dpcd_configure_lttpr_mode( +		struct dc_link *link, +		struct link_training_settings *lt_settings); + +enum dc_status configure_lttpr_mode_transparent(struct dc_link *link); + +enum dc_status dpcd_configure_channel_coding( +		struct dc_link *link, +		struct link_training_settings *lt_settings); + +void repeater_training_done(struct dc_link *link, uint32_t offset); + +void start_clock_recovery_pattern_early(struct dc_link *link, +		const struct link_resource *link_res, +		struct link_training_settings *lt_settings, +		uint32_t offset); + +void dp_decide_training_settings( +		struct dc_link *link, +		const struct dc_link_settings *link_settings, +		struct link_training_settings *lt_settings); + +void dp_decide_lane_settings( +	const struct link_training_settings *lt_settings, +	const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], +	struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], +	union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]); + +enum dc_dp_training_pattern decide_cr_training_pattern( +		const struct dc_link_settings *link_settings); + +enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link, +		const struct dc_link_settings *link_settings); + +void dp_get_lttpr_mode_override(struct dc_link *link, +		enum lttpr_mode *override); + +void override_training_settings( +		struct dc_link *link, +		const struct dc_link_training_overrides *overrides, +		struct link_training_settings *lt_settings); + +/* Check DPCD training status registers to detect link loss. */ +enum link_training_result dp_check_link_loss_status( +		struct dc_link *link, +		const struct link_training_settings *link_training_setting); + +bool dp_is_cr_done(enum dc_lane_count ln_count, +	union lane_status *dpcd_lane_status); + +bool dp_is_ch_eq_done(enum dc_lane_count ln_count, +	union lane_status *dpcd_lane_status); +bool dp_is_symbol_locked(enum dc_lane_count ln_count, +	union lane_status *dpcd_lane_status); +bool dp_is_interlane_aligned(union lane_align_status_updated align_status); + +bool is_repeater(const struct link_training_settings *lt_settings, uint32_t offset); + +bool dp_is_max_vs_reached( +	const struct link_training_settings *lt_settings); + +uint8_t get_dpcd_link_rate(const struct dc_link_settings *link_settings); + +enum link_training_result dp_get_cr_failure(enum dc_lane_count ln_count, +	union lane_status *dpcd_lane_status); + +void dp_hw_to_dpcd_lane_settings( +	const struct link_training_settings *lt_settings, +	const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], +	union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]); + +void dp_wait_for_training_aux_rd_interval( +	struct dc_link *link, +	uint32_t wait_in_micro_secs); + +enum dpcd_training_patterns +	dp_training_pattern_to_dpcd_training_pattern( +	struct dc_link *link, +	enum dc_dp_training_pattern pattern); + +uint8_t dp_initialize_scrambling_data_symbols( +	struct dc_link *link, +	enum dc_dp_training_pattern pattern); + +void dp_log_training_result( +	struct dc_link *link, +	const struct link_training_settings *lt_settings, +	enum link_training_result status); + +uint32_t dp_translate_training_aux_read_interval( +		uint32_t dpcd_aux_read_interval); + +uint8_t dp_get_nibble_at_index(const uint8_t *buf, +	uint32_t index); +#endif /* __DC_LINK_DP_TRAINING_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c new file mode 100644 index 000000000000..23d380f09a21 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c @@ -0,0 +1,259 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file implements dp 128b/132b link training software policies and + * sequences. + */ +#include "link_dp_training_128b_132b.h" +#include "link_dp_training_8b_10b.h" +#include "link_dpcd.h" +#include "link_dp_phy.h" +#include "link_dp_capability.h" + +#define DC_LOGGER \ +	link->ctx->logger + +static enum dc_status dpcd_128b_132b_set_lane_settings( +		struct dc_link *link, +		const struct link_training_settings *link_training_setting) +{ +	enum dc_status status = core_link_write_dpcd(link, +			DP_TRAINING_LANE0_SET, +			(uint8_t *)(link_training_setting->dpcd_lane_settings), +			sizeof(link_training_setting->dpcd_lane_settings)); + +	DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n", +			__func__, +			DP_TRAINING_LANE0_SET, +			link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); +	return status; +} + +static void dpcd_128b_132b_get_aux_rd_interval(struct dc_link *link, +		uint32_t *interval_in_us) +{ +	union dp_128b_132b_training_aux_rd_interval dpcd_interval; +	uint32_t interval_unit = 0; + +	dpcd_interval.raw = 0; +	core_link_read_dpcd(link, DP_128B132B_TRAINING_AUX_RD_INTERVAL, +			&dpcd_interval.raw, sizeof(dpcd_interval.raw)); +	interval_unit = dpcd_interval.bits.UNIT ? 1 : 2; /* 0b = 2 ms, 1b = 1 ms */ +	/* (128b/132b_TRAINING_AUX_RD_INTERVAL value + 1) * +	 * INTERVAL_UNIT. The maximum is 256 ms +	 */ +	*interval_in_us = (dpcd_interval.bits.VALUE + 1) * interval_unit * 1000; +} + +static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence( +		struct dc_link *link, +		const struct link_resource *link_res, +		struct link_training_settings *lt_settings) +{ +	uint8_t loop_count; +	uint32_t aux_rd_interval = 0; +	uint32_t wait_time = 0; +	union lane_align_status_updated dpcd_lane_status_updated = {0}; +	union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; +	union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; +	enum dc_status status = DC_OK; +	enum link_training_result result = LINK_TRAINING_SUCCESS; + +	/* Transmit 128b/132b_TPS1 over Main-Link */ +	dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, DPRX); + +	/* Set TRAINING_PATTERN_SET to 01h */ +	dpcd_set_training_pattern(link, lt_settings->pattern_for_cr); + +	/* Adjust TX_FFE_PRESET_VALUE and Transmit 128b/132b_TPS2 over Main-Link */ +	dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval); +	dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, +			&dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); +	dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, +			lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); +	dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX); +	dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_eq, DPRX); + +	/* Set loop counter to start from 1 */ +	loop_count = 1; + +	/* Set TRAINING_PATTERN_SET to 02h and TX_FFE_PRESET_VALUE in one AUX transaction */ +	dpcd_set_lt_pattern_and_lane_settings(link, lt_settings, +			lt_settings->pattern_for_eq, DPRX); + +	/* poll for channel EQ done */ +	while (result == LINK_TRAINING_SUCCESS) { +		dp_wait_for_training_aux_rd_interval(link, aux_rd_interval); +		wait_time += aux_rd_interval; +		status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, +				&dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); +		dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, +				lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); +		dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval); +		if (status != DC_OK) { +			result = LINK_TRAINING_ABORT; +		} else if (dp_is_ch_eq_done(lt_settings->link_settings.lane_count, +				dpcd_lane_status)) { +			/* pass */ +			break; +		} else if (loop_count >= lt_settings->eq_loop_count_limit) { +			result = DP_128b_132b_MAX_LOOP_COUNT_REACHED; +		} else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) { +			result = DP_128b_132b_LT_FAILED; +		} else { +			dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX); +			dpcd_128b_132b_set_lane_settings(link, lt_settings); +		} +		loop_count++; +	} + +	/* poll for EQ interlane align done */ +	while (result == LINK_TRAINING_SUCCESS) { +		if (status != DC_OK) { +			result = LINK_TRAINING_ABORT; +		} else if (dpcd_lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b) { +			/* pass */ +			break; +		} else if (wait_time >= lt_settings->eq_wait_time_limit) { +			result = DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT; +		} else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) { +			result = DP_128b_132b_LT_FAILED; +		} else { +			dp_wait_for_training_aux_rd_interval(link, +					lt_settings->eq_pattern_time); +			wait_time += lt_settings->eq_pattern_time; +			status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, +					&dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); +		} +	} + +	return result; +} + +static enum link_training_result dp_perform_128b_132b_cds_done_sequence( +		struct dc_link *link, +		const struct link_resource *link_res, +		struct link_training_settings *lt_settings) +{ +	/* Assumption: assume hardware has transmitted eq pattern */ +	enum dc_status status = DC_OK; +	enum link_training_result result = LINK_TRAINING_SUCCESS; +	union lane_align_status_updated dpcd_lane_status_updated = {0}; +	union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; +	union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; +	uint32_t wait_time = 0; + +	/* initiate CDS done sequence */ +	dpcd_set_training_pattern(link, lt_settings->pattern_for_cds); + +	/* poll for CDS interlane align done and symbol lock */ +	while (result == LINK_TRAINING_SUCCESS) { +		dp_wait_for_training_aux_rd_interval(link, +				lt_settings->cds_pattern_time); +		wait_time += lt_settings->cds_pattern_time; +		status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, +						&dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); +		if (status != DC_OK) { +			result = LINK_TRAINING_ABORT; +		} else if (dp_is_symbol_locked(lt_settings->link_settings.lane_count, dpcd_lane_status) && +				dpcd_lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b) { +			/* pass */ +			break; +		} else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) { +			result = DP_128b_132b_LT_FAILED; +		} else if (wait_time >= lt_settings->cds_wait_time_limit) { +			result = DP_128b_132b_CDS_DONE_TIMEOUT; +		} +	} + +	return result; +} + +enum link_training_result dp_perform_128b_132b_link_training( +		struct dc_link *link, +		const struct link_resource *link_res, +		struct link_training_settings *lt_settings) +{ +	enum link_training_result result = LINK_TRAINING_SUCCESS; + +	/* TODO - DP2.0 Link: remove legacy_dp2_lt logic */ +	if (link->dc->debug.legacy_dp2_lt) { +		struct link_training_settings legacy_settings; + +		decide_8b_10b_training_settings(link, +				<_settings->link_settings, +				&legacy_settings); +		return dp_perform_8b_10b_link_training(link, link_res, &legacy_settings); +	} + +	dpcd_set_link_settings(link, lt_settings); + +	if (result == LINK_TRAINING_SUCCESS) +		result = dp_perform_128b_132b_channel_eq_done_sequence(link, link_res, lt_settings); + +	if (result == LINK_TRAINING_SUCCESS) +		result = dp_perform_128b_132b_cds_done_sequence(link, link_res, lt_settings); + +	return result; +} + +void decide_128b_132b_training_settings(struct dc_link *link, +		const struct dc_link_settings *link_settings, +		struct link_training_settings *lt_settings) +{ +	memset(lt_settings, 0, sizeof(*lt_settings)); + +	lt_settings->link_settings = *link_settings; +	/* TODO: should decide link spread when populating link_settings */ +	lt_settings->link_settings.link_spread = link->dp_ss_off ? LINK_SPREAD_DISABLED : +			LINK_SPREAD_05_DOWNSPREAD_30KHZ; + +	lt_settings->pattern_for_cr = decide_cr_training_pattern(link_settings); +	lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_settings); +	lt_settings->eq_pattern_time = 2500; +	lt_settings->eq_wait_time_limit = 400000; +	lt_settings->eq_loop_count_limit = 20; +	lt_settings->pattern_for_cds = DP_128b_132b_TPS2_CDS; +	lt_settings->cds_pattern_time = 2500; +	lt_settings->cds_wait_time_limit = (dp_parse_lttpr_repeater_count( +			link->dpcd_caps.lttpr_caps.phy_repeater_cnt) + 1) * 20000; +	lt_settings->disallow_per_lane_settings = true; +	lt_settings->lttpr_mode = dp_decide_128b_132b_lttpr_mode(link); +	dp_hw_to_dpcd_lane_settings(lt_settings, +			lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); +} + +enum lttpr_mode dp_decide_128b_132b_lttpr_mode(struct dc_link *link) +{ +	enum lttpr_mode mode = LTTPR_MODE_NON_LTTPR; + +	if (dp_is_lttpr_present(link)) +		mode = LTTPR_MODE_NON_TRANSPARENT; + +	DC_LOG_DC("128b_132b chose LTTPR_MODE %d.\n", mode); +	return mode; +} + diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.h new file mode 100644 index 000000000000..2147f24efc8b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.h @@ -0,0 +1,42 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#ifndef __DC_LINK_DP_TRAINING_128B_132B_H__ +#define __DC_LINK_DP_TRAINING_128B_132B_H__ +#include "link_dp_training.h" + +enum link_training_result dp_perform_128b_132b_link_training( +		struct dc_link *link, +		const struct link_resource *link_res, +		struct link_training_settings *lt_settings); + +void decide_128b_132b_training_settings(struct dc_link *link, +		const struct dc_link_settings *link_settings, +		struct link_training_settings *lt_settings); + +enum lttpr_mode dp_decide_128b_132b_lttpr_mode(struct dc_link *link); + +#endif /* __DC_LINK_DP_TRAINING_128B_132B_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c new file mode 100644 index 000000000000..14b98e096d39 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c @@ -0,0 +1,414 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file implements dp 8b/10b link training software policies and + * sequences. + */ +#include "link_dp_training_8b_10b.h" +#include "link_dpcd.h" +#include "link_dp_phy.h" +#include "link_dp_capability.h" + +#define DC_LOGGER \ +	link->ctx->logger + +static int32_t get_cr_training_aux_rd_interval(struct dc_link *link, +		const struct dc_link_settings *link_settings) +{ +	union training_aux_rd_interval training_rd_interval; +	uint32_t wait_in_micro_secs = 100; + +	memset(&training_rd_interval, 0, sizeof(training_rd_interval)); +	if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING && +			link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { +		core_link_read_dpcd( +				link, +				DP_TRAINING_AUX_RD_INTERVAL, +				(uint8_t *)&training_rd_interval, +				sizeof(training_rd_interval)); +		if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) +			wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000; +	} +	return wait_in_micro_secs; +} + +static uint32_t get_eq_training_aux_rd_interval( +	struct dc_link *link, +	const struct dc_link_settings *link_settings) +{ +	union training_aux_rd_interval training_rd_interval; + +	memset(&training_rd_interval, 0, sizeof(training_rd_interval)); +	if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING) { +		core_link_read_dpcd( +				link, +				DP_128B132B_TRAINING_AUX_RD_INTERVAL, +				(uint8_t *)&training_rd_interval, +				sizeof(training_rd_interval)); +	} else if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING && +			link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { +		core_link_read_dpcd( +				link, +				DP_TRAINING_AUX_RD_INTERVAL, +				(uint8_t *)&training_rd_interval, +				sizeof(training_rd_interval)); +	} + +	switch (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) { +	case 0: return 400; +	case 1: return 4000; +	case 2: return 8000; +	case 3: return 12000; +	case 4: return 16000; +	case 5: return 32000; +	case 6: return 64000; +	default: return 400; +	} +} + +void decide_8b_10b_training_settings( +	 struct dc_link *link, +	const struct dc_link_settings *link_setting, +	struct link_training_settings *lt_settings) +{ +	memset(lt_settings, '\0', sizeof(struct link_training_settings)); + +	/* Initialize link settings */ +	lt_settings->link_settings.use_link_rate_set = link_setting->use_link_rate_set; +	lt_settings->link_settings.link_rate_set = link_setting->link_rate_set; +	lt_settings->link_settings.link_rate = link_setting->link_rate; +	lt_settings->link_settings.lane_count = link_setting->lane_count; +	/* TODO hard coded to SS for now +	 * lt_settings.link_settings.link_spread = +	 * dal_display_path_is_ss_supported( +	 * path_mode->display_path) ? +	 * LINK_SPREAD_05_DOWNSPREAD_30KHZ : +	 * LINK_SPREAD_DISABLED; +	 */ +	lt_settings->link_settings.link_spread = link->dp_ss_off ? +			LINK_SPREAD_DISABLED : LINK_SPREAD_05_DOWNSPREAD_30KHZ; +	lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting); +	lt_settings->eq_pattern_time = get_eq_training_aux_rd_interval(link, link_setting); +	lt_settings->pattern_for_cr = decide_cr_training_pattern(link_setting); +	lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_setting); +	lt_settings->enhanced_framing = 1; +	lt_settings->should_set_fec_ready = true; +	lt_settings->disallow_per_lane_settings = true; +	lt_settings->always_match_dpcd_with_hw_lane_settings = true; +	lt_settings->lttpr_mode = dp_decide_8b_10b_lttpr_mode(link); +	dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); +} + +enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link) +{ +	bool is_lttpr_present = dp_is_lttpr_present(link); +	bool vbios_lttpr_force_non_transparent = link->dc->caps.vbios_lttpr_enable; +	bool vbios_lttpr_aware = link->dc->caps.vbios_lttpr_aware; + +	if (!is_lttpr_present) +		return LTTPR_MODE_NON_LTTPR; + +	if (vbios_lttpr_aware) { +		if (vbios_lttpr_force_non_transparent) { +			DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT due to VBIOS DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE set to 1.\n"); +			return LTTPR_MODE_NON_TRANSPARENT; +		} else { +			DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT by default due to VBIOS not set DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE set to 1.\n"); +			return LTTPR_MODE_TRANSPARENT; +		} +	} + +	if (link->dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A && +			link->dc->caps.extended_aux_timeout_support) { +		DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT by default and dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A set to 1.\n"); +		return LTTPR_MODE_NON_TRANSPARENT; +	} + +	DC_LOG_DC("chose LTTPR_MODE_NON_LTTPR.\n"); +	return LTTPR_MODE_NON_LTTPR; +} + +enum link_training_result perform_8b_10b_clock_recovery_sequence( +	struct dc_link *link, +	const struct link_resource *link_res, +	struct link_training_settings *lt_settings, +	uint32_t offset) +{ +	uint32_t retries_cr; +	uint32_t retry_count; +	uint32_t wait_time_microsec; +	enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; +	union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; +	union lane_align_status_updated dpcd_lane_status_updated; +	union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; + +	retries_cr = 0; +	retry_count = 0; + +	memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status)); +	memset(&dpcd_lane_status_updated, '\0', +	sizeof(dpcd_lane_status_updated)); + +	if (!link->ctx->dc->work_arounds.lt_early_cr_pattern) +		dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, offset); + +	/* najeeb - The synaptics MST hub can put the LT in +	* infinite loop by switching the VS +	*/ +	/* between level 0 and level 1 continuously, here +	* we try for CR lock for LinkTrainingMaxCRRetry count*/ +	while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && +		(retry_count < LINK_TRAINING_MAX_CR_RETRY)) { + + +		/* 1. call HWSS to set lane settings*/ +		dp_set_hw_lane_settings( +				link, +				link_res, +				lt_settings, +				offset); + +		/* 2. update DPCD of the receiver*/ +		if (!retry_count) +			/* EPR #361076 - write as a 5-byte burst, +			 * but only for the 1-st iteration.*/ +			dpcd_set_lt_pattern_and_lane_settings( +					link, +					lt_settings, +					lt_settings->pattern_for_cr, +					offset); +		else +			dpcd_set_lane_settings( +					link, +					lt_settings, +					offset); + +		/* 3. wait receiver to lock-on*/ +		wait_time_microsec = lt_settings->cr_pattern_time; + +		dp_wait_for_training_aux_rd_interval( +				link, +				wait_time_microsec); + +		/* 4. Read lane status and requested drive +		* settings as set by the sink +		*/ +		dp_get_lane_status_and_lane_adjust( +				link, +				lt_settings, +				dpcd_lane_status, +				&dpcd_lane_status_updated, +				dpcd_lane_adjust, +				offset); + +		/* 5. check CR done*/ +		if (dp_is_cr_done(lane_count, dpcd_lane_status)) +			return LINK_TRAINING_SUCCESS; + +		/* 6. max VS reached*/ +		if ((link_dp_get_encoding_format(<_settings->link_settings) == +				DP_8b_10b_ENCODING) && +				dp_is_max_vs_reached(lt_settings)) +			break; + +		/* 7. same lane settings*/ +		/* Note: settings are the same for all lanes, +		 * so comparing first lane is sufficient*/ +		if ((link_dp_get_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING) && +				lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET == +						dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE) +			retries_cr++; +		else if ((link_dp_get_encoding_format(<_settings->link_settings) == DP_128b_132b_ENCODING) && +				lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE == +						dpcd_lane_adjust[0].tx_ffe.PRESET_VALUE) +			retries_cr++; +		else +			retries_cr = 0; + +		/* 8. update VS/PE/PC2 in lt_settings*/ +		dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, +				lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); +		retry_count++; +	} + +	if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) { +		ASSERT(0); +		DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue", +			__func__, +			LINK_TRAINING_MAX_CR_RETRY); + +	} + +	return dp_get_cr_failure(lane_count, dpcd_lane_status); +} + +enum link_training_result perform_8b_10b_channel_equalization_sequence( +	struct dc_link *link, +	const struct link_resource *link_res, +	struct link_training_settings *lt_settings, +	uint32_t offset) +{ +	enum dc_dp_training_pattern tr_pattern; +	uint32_t retries_ch_eq; +	uint32_t wait_time_microsec; +	enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; +	union lane_align_status_updated dpcd_lane_status_updated = {0}; +	union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; +	union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; + +	/* Note: also check that TPS4 is a supported feature*/ +	tr_pattern = lt_settings->pattern_for_eq; + +	if (is_repeater(lt_settings, offset) && link_dp_get_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING) +		tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4; + +	dp_set_hw_training_pattern(link, link_res, tr_pattern, offset); + +	for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT; +		retries_ch_eq++) { + +		dp_set_hw_lane_settings(link, link_res, lt_settings, offset); + +		/* 2. update DPCD*/ +		if (!retries_ch_eq) +			/* EPR #361076 - write as a 5-byte burst, +			 * but only for the 1-st iteration +			 */ + +			dpcd_set_lt_pattern_and_lane_settings( +				link, +				lt_settings, +				tr_pattern, offset); +		else +			dpcd_set_lane_settings(link, lt_settings, offset); + +		/* 3. wait for receiver to lock-on*/ +		wait_time_microsec = lt_settings->eq_pattern_time; + +		if (is_repeater(lt_settings, offset)) +			wait_time_microsec = +					dp_translate_training_aux_read_interval( +						link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]); + +		dp_wait_for_training_aux_rd_interval( +				link, +				wait_time_microsec); + +		/* 4. Read lane status and requested +		 * drive settings as set by the sink*/ + +		dp_get_lane_status_and_lane_adjust( +			link, +			lt_settings, +			dpcd_lane_status, +			&dpcd_lane_status_updated, +			dpcd_lane_adjust, +			offset); + +		/* 5. check CR done*/ +		if (!dp_is_cr_done(lane_count, dpcd_lane_status)) +			return dpcd_lane_status[0].bits.CR_DONE_0 ? +					LINK_TRAINING_EQ_FAIL_CR_PARTIAL : +					LINK_TRAINING_EQ_FAIL_CR; + +		/* 6. check CHEQ done*/ +		if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) && +				dp_is_symbol_locked(lane_count, dpcd_lane_status) && +				dp_is_interlane_aligned(dpcd_lane_status_updated)) +			return LINK_TRAINING_SUCCESS; + +		/* 7. update VS/PE/PC2 in lt_settings*/ +		dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, +				lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); +	} + +	return LINK_TRAINING_EQ_FAIL_EQ; + +} + +enum link_training_result dp_perform_8b_10b_link_training( +		struct dc_link *link, +		const struct link_resource *link_res, +		struct link_training_settings *lt_settings) +{ +	enum link_training_result status = LINK_TRAINING_SUCCESS; + +	uint8_t repeater_cnt; +	uint8_t repeater_id; +	uint8_t lane = 0; + +	if (link->ctx->dc->work_arounds.lt_early_cr_pattern) +		start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX); + +	/* 1. set link rate, lane count and spread. */ +	dpcd_set_link_settings(link, lt_settings); + +	if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { + +		/* 2. perform link training (set link training done +		 *  to false is done as well) +		 */ +		repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + +		for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS); +				repeater_id--) { +			status = perform_8b_10b_clock_recovery_sequence(link, link_res, lt_settings, repeater_id); + +			if (status != LINK_TRAINING_SUCCESS) { +				repeater_training_done(link, repeater_id); +				break; +			} + +			status = perform_8b_10b_channel_equalization_sequence(link, +					link_res, +					lt_settings, +					repeater_id); + +			repeater_training_done(link, repeater_id); + +			if (status != LINK_TRAINING_SUCCESS) +				break; + +			for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { +				lt_settings->dpcd_lane_settings[lane].raw = 0; +				lt_settings->hw_lane_settings[lane].VOLTAGE_SWING = 0; +				lt_settings->hw_lane_settings[lane].PRE_EMPHASIS = 0; +			} +		} +	} + +	if (status == LINK_TRAINING_SUCCESS) { +		status = perform_8b_10b_clock_recovery_sequence(link, link_res, lt_settings, DPRX); +		if (status == LINK_TRAINING_SUCCESS) { +			status = perform_8b_10b_channel_equalization_sequence(link, +					link_res, +					lt_settings, +					DPRX); +		} +	} + +	return status; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.h new file mode 100644 index 000000000000..d26de15ce954 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.h @@ -0,0 +1,61 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#ifndef __DC_LINK_DP_TRAINING_8B_10B_H__ +#define __DC_LINK_DP_TRAINING_8B_10B_H__ +#include "link_dp_training.h" + +/* to avoid infinite loop where-in the receiver + * switches between different VS + */ +#define LINK_TRAINING_MAX_CR_RETRY 100 +#define LINK_TRAINING_MAX_RETRY_COUNT 5 + +enum link_training_result dp_perform_8b_10b_link_training( +		struct dc_link *link, +		const struct link_resource *link_res, +		struct link_training_settings *lt_settings); + +enum link_training_result perform_8b_10b_clock_recovery_sequence( +	struct dc_link *link, +	const struct link_resource *link_res, +	struct link_training_settings *lt_settings, +	uint32_t offset); + +enum link_training_result perform_8b_10b_channel_equalization_sequence( +	struct dc_link *link, +	const struct link_resource *link_res, +	struct link_training_settings *lt_settings, +	uint32_t offset); + +enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link); + +void decide_8b_10b_training_settings( +	 struct dc_link *link, +	const struct dc_link_settings *link_setting, +	struct link_training_settings *lt_settings); + +#endif /* __DC_LINK_DP_TRAINING_8B_10B_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c new file mode 100644 index 000000000000..e50ec5012559 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c @@ -0,0 +1,79 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * + */ +#include "link_dp_training_auxless.h" +#include "link_dp_phy.h" +#define DC_LOGGER \ +	link->ctx->logger +bool dc_link_dp_perform_link_training_skip_aux( +	struct dc_link *link, +	const struct link_resource *link_res, +	const struct dc_link_settings *link_setting) +{ +	struct link_training_settings lt_settings = {0}; + +	dp_decide_training_settings( +			link, +			link_setting, +			<_settings); +	override_training_settings( +			link, +			&link->preferred_training_settings, +			<_settings); + +	/* 1. Perform_clock_recovery_sequence. */ + +	/* transmit training pattern for clock recovery */ +	dp_set_hw_training_pattern(link, link_res, lt_settings.pattern_for_cr, DPRX); + +	/* call HWSS to set lane settings*/ +	dp_set_hw_lane_settings(link, link_res, <_settings, DPRX); + +	/* wait receiver to lock-on*/ +	dp_wait_for_training_aux_rd_interval(link, lt_settings.cr_pattern_time); + +	/* 2. Perform_channel_equalization_sequence. */ + +	/* transmit training pattern for channel equalization. */ +	dp_set_hw_training_pattern(link, link_res, lt_settings.pattern_for_eq, DPRX); + +	/* call HWSS to set lane settings*/ +	dp_set_hw_lane_settings(link, link_res, <_settings, DPRX); + +	/* wait receiver to lock-on. */ +	dp_wait_for_training_aux_rd_interval(link, lt_settings.eq_pattern_time); + +	/* 3. Perform_link_training_int. */ + +	/* Mainlink output idle pattern. */ +	dp_set_hw_test_pattern(link, link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); + +	dp_log_training_result(link, <_settings, LINK_TRAINING_SUCCESS); + +	return true; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.h new file mode 100644 index 000000000000..413999cd03c4 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.h @@ -0,0 +1,35 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#ifndef __DC_LINK_DP_TRAINING_AUXLESS_H__ +#define __DC_LINK_DP_TRAINING_AUXLESS_H__ +#include "link_dp_training.h" + +bool dc_link_dp_perform_link_training_skip_aux( +	struct dc_link *link, +	const struct link_resource *link_res, +	const struct dc_link_settings *link_setting); +#endif /* __DC_LINK_DP_TRAINING_AUXLESS_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c index 74e36b34d3f7..e60da0532c53 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c @@ -1,6 +1,5 @@ -// SPDX-License-Identifier: MIT  /* - * Copyright 2021 Advanced Micro Devices, Inc. + * Copyright 2022 Advanced Micro Devices, Inc.   *   * Permission is hereby granted, free of charge, to any person obtaining a   * copy of this software and associated documentation files (the "Software"), @@ -24,76 +23,72 @@   *   */ +/* FILE POLICY AND INTENDED USAGE: + * This module implements functionality for training DPIA links. + */ +#include "link_dp_training_dpia.h"  #include "dc.h" -#include "dc_link_dpia.h"  #include "inc/core_status.h"  #include "dc_link.h" -#include "dc_link_dp.h"  #include "dpcd_defs.h" + +#include "link_dp_dpia.h"  #include "link_hwss.h"  #include "dm_helpers.h"  #include "dmub/inc/dmub_cmd.h" -#include "inc/link_dpcd.h" +#include "link_dpcd.h" +#include "link_dp_phy.h" +#include "link_dp_training_8b_10b.h" +#include "link_dp_capability.h"  #include "dc_dmub_srv.h" -  #define DC_LOGGER \  	link->ctx->logger -enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link) -{ -	enum dc_status status = DC_OK; -	uint8_t dpcd_dp_tun_data[3] = {0}; -	uint8_t dpcd_topology_data[DPCD_USB4_TOPOLOGY_ID_LEN] = {0}; -	uint8_t i = 0; - -	status = core_link_read_dpcd(link, -			DP_TUNNELING_CAPABILITIES_SUPPORT, -			dpcd_dp_tun_data, -			sizeof(dpcd_dp_tun_data)); - -	status = core_link_read_dpcd(link, -			DP_USB4_ROUTER_TOPOLOGY_ID, -			dpcd_topology_data, -			sizeof(dpcd_topology_data)); - -	link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw = -			dpcd_dp_tun_data[DP_TUNNELING_CAPABILITIES_SUPPORT - -					 DP_TUNNELING_CAPABILITIES_SUPPORT]; -	link->dpcd_caps.usb4_dp_tun_info.dpia_info.raw = -			dpcd_dp_tun_data[DP_IN_ADAPTER_INFO - DP_TUNNELING_CAPABILITIES_SUPPORT]; -	link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id = -			dpcd_dp_tun_data[DP_USB4_DRIVER_ID - DP_TUNNELING_CAPABILITIES_SUPPORT]; - -	for (i = 0; i < DPCD_USB4_TOPOLOGY_ID_LEN; i++) -		link->dpcd_caps.usb4_dp_tun_info.usb4_topology_id[i] = dpcd_topology_data[i]; - -	return status; -} - -bool dc_link_dpia_query_hpd_status(struct dc_link *link) -{ -	union dmub_rb_cmd cmd = {0}; -	struct dc_dmub_srv *dmub_srv = link->ctx->dmub_srv; -	bool is_hpd_high = false; +/* The approximate time (us) it takes to transmit 9 USB4 DP clock sync packets. */ +#define DPIA_CLK_SYNC_DELAY 16000 + +/* Extend interval between training status checks for manual testing. */ +#define DPIA_DEBUG_EXTENDED_AUX_RD_INTERVAL_US 60000000 + +#define TRAINING_AUX_RD_INTERVAL 100 //us + +/* SET_CONFIG message types sent by driver. */ +enum dpia_set_config_type { +	DPIA_SET_CFG_SET_LINK = 0x01, +	DPIA_SET_CFG_SET_PHY_TEST_MODE = 0x05, +	DPIA_SET_CFG_SET_TRAINING = 0x18, +	DPIA_SET_CFG_SET_VSPE = 0x19 +}; + +/* Training stages (TS) in SET_CONFIG(SET_TRAINING) message. */ +enum dpia_set_config_ts { +	DPIA_TS_DPRX_DONE = 0x00, /* Done training DPRX. */ +	DPIA_TS_TPS1 = 0x01, +	DPIA_TS_TPS2 = 0x02, +	DPIA_TS_TPS3 = 0x03, +	DPIA_TS_TPS4 = 0x07, +	DPIA_TS_UFP_DONE = 0xff /* Done training DPTX-to-DPIA hop. */ +}; + +/* SET_CONFIG message data associated with messages sent by driver. */ +union dpia_set_config_data { +	struct { +		uint8_t mode : 1; +		uint8_t reserved : 7; +	} set_link; +	struct { +		uint8_t stage; +	} set_training; +	struct { +		uint8_t swing : 2; +		uint8_t max_swing_reached : 1; +		uint8_t pre_emph : 2; +		uint8_t max_pre_emph_reached : 1; +		uint8_t reserved : 2; +	} set_vspe; +	uint8_t raw; +}; -	/* prepare QUERY_HPD command */ -	cmd.query_hpd.header.type = DMUB_CMD__QUERY_HPD_STATE; -	cmd.query_hpd.data.instance = link->link_id.enum_id - ENUM_ID_1; -	cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA; - -	/* Return HPD status reported by DMUB if query successfully executed. */ -	if (dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd) && cmd.query_hpd.data.status == AUX_RET_SUCCESS) -		is_hpd_high = cmd.query_hpd.data.result; - -	DC_LOG_DEBUG("%s: link(%d) dpia(%d) cmd_status(%d) result(%d)\n", -		__func__, -		link->link_index, -		link->link_id.enum_id - ENUM_ID_1, -		cmd.query_hpd.data.status, -		cmd.query_hpd.data.result); - -	return is_hpd_high; -}  /* Configure link as prescribed in link_setting; set LTTPR mode; and   * Initialize link training settings. @@ -113,11 +108,12 @@ static enum link_training_result dpia_configure_link(  	bool fec_enable;  	DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) configuring\n - LTTPR mode(%d)\n", -				__func__, -				link->link_id.enum_id - ENUM_ID_1, -				lt_settings->lttpr_mode); +		__func__, +		link->link_id.enum_id - ENUM_ID_1, +		lt_settings->lttpr_mode); -	dp_decide_training_settings(link, +	dp_decide_training_settings( +		link,  		link_setting,  		lt_settings); @@ -137,7 +133,7 @@ static enum link_training_result dpia_configure_link(  	if (status != DC_OK && link->is_hpd_pending)  		return LINK_TRAINING_ABORT; -	if (link->preferred_training_settings.fec_enable) +	if (link->preferred_training_settings.fec_enable != NULL)  		fec_enable = *link->preferred_training_settings.fec_enable;  	else  		fec_enable = true; @@ -148,7 +144,8 @@ static enum link_training_result dpia_configure_link(  	return LINK_TRAINING_SUCCESS;  } -static enum dc_status core_link_send_set_config(struct dc_link *link, +static enum dc_status core_link_send_set_config( +	struct dc_link *link,  	uint8_t msg_type,  	uint8_t msg_data)  { @@ -160,8 +157,8 @@ static enum dc_status core_link_send_set_config(struct dc_link *link,  	payload.msg_data = msg_data;  	if (!link->ddc->ddc_pin && !link->aux_access_disabled && -	    (dm_helpers_dmub_set_config_sync(link->ctx, link, -					     &payload, &set_config_result) == -1)) { +			(dm_helpers_dmub_set_config_sync(link->ctx, +			link, &payload, &set_config_result) == -1)) {  		return DC_ERROR_UNEXPECTED;  	} @@ -170,7 +167,8 @@ static enum dc_status core_link_send_set_config(struct dc_link *link,  }  /* Build SET_CONFIG message data payload for specified message type. */ -static uint8_t dpia_build_set_config_data(enum dpia_set_config_type type, +static uint8_t dpia_build_set_config_data( +		enum dpia_set_config_type type,  		struct dc_link *link,  		struct link_training_settings *lt_settings)  { @@ -189,11 +187,9 @@ static uint8_t dpia_build_set_config_data(enum dpia_set_config_type type,  		data.set_vspe.swing = lt_settings->hw_lane_settings[0].VOLTAGE_SWING;  		data.set_vspe.pre_emph = lt_settings->hw_lane_settings[0].PRE_EMPHASIS;  		data.set_vspe.max_swing_reached = -			lt_settings->hw_lane_settings[0].VOLTAGE_SWING == -			VOLTAGE_SWING_MAX_LEVEL ? 1 : 0; +				lt_settings->hw_lane_settings[0].VOLTAGE_SWING == VOLTAGE_SWING_MAX_LEVEL ? 1 : 0;  		data.set_vspe.max_pre_emph_reached = -			lt_settings->hw_lane_settings[0].PRE_EMPHASIS == -			PRE_EMPHASIS_MAX_LEVEL ? 1 : 0; +				lt_settings->hw_lane_settings[0].PRE_EMPHASIS == PRE_EMPHASIS_MAX_LEVEL ? 1 : 0;  		break;  	default:  		ASSERT(false); /* Message type not supported by helper function. */ @@ -235,7 +231,8 @@ static enum dc_status convert_trng_ptn_to_trng_stg(enum dc_dp_training_pattern t  }  /* Write training pattern to DPCD. */ -static enum dc_status dpcd_set_lt_pattern(struct dc_link *link, +static enum dc_status dpcd_set_lt_pattern( +	struct dc_link *link,  	enum dc_dp_training_pattern pattern,  	uint32_t hop)  { @@ -249,28 +246,29 @@ static enum dc_status dpcd_set_lt_pattern(struct dc_link *link,  	/* DpcdAddress_TrainingPatternSet */  	dpcd_pattern.v1_4.TRAINING_PATTERN_SET = -		dc_dp_training_pattern_to_dpcd_training_pattern(link, pattern); +		dp_training_pattern_to_dpcd_training_pattern(link, pattern);  	dpcd_pattern.v1_4.SCRAMBLING_DISABLE = -		dc_dp_initialize_scrambling_data_symbols(link, pattern); +		dp_initialize_scrambling_data_symbols(link, pattern);  	if (hop != DPRX) {  		DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n", -					__func__, -					hop, -					dpcd_tps_offset, -					dpcd_pattern.v1_4.TRAINING_PATTERN_SET); +			__func__, +			hop, +			dpcd_tps_offset, +			dpcd_pattern.v1_4.TRAINING_PATTERN_SET);  	} else {  		DC_LOG_HW_LINK_TRAINING("%s\n 0x%X pattern = %x\n", -					__func__, -					dpcd_tps_offset, -					dpcd_pattern.v1_4.TRAINING_PATTERN_SET); +			__func__, +			dpcd_tps_offset, +			dpcd_pattern.v1_4.TRAINING_PATTERN_SET);  	} -	status = core_link_write_dpcd(link, -				      dpcd_tps_offset, -				      &dpcd_pattern.raw, -				      sizeof(dpcd_pattern.raw)); +	status = core_link_write_dpcd( +			link, +			dpcd_tps_offset, +			&dpcd_pattern.raw, +			sizeof(dpcd_pattern.raw));  	return status;  } @@ -284,7 +282,7 @@ static enum dc_status dpcd_set_lt_pattern(struct dc_link *link,   *   * @param link DPIA link being trained.   * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). - * @param hop The Hop in display path. DPRX = 0. + * @param hop Hop in display path. DPRX = 0.   */  static enum link_training_result dpia_training_cr_non_transparent(  		struct dc_link *link, @@ -297,8 +295,7 @@ static enum link_training_result dpia_training_cr_non_transparent(  	enum dc_status status;  	uint32_t retries_cr = 0; /* Number of consecutive attempts with same VS or PE. */  	uint32_t retry_count = 0; -	/* From DP spec, CR read interval is always 100us. */ -	uint32_t wait_time_microsec = TRAINING_AUX_RD_INTERVAL; +	uint32_t wait_time_microsec = TRAINING_AUX_RD_INTERVAL; /* From DP spec, CR read interval is always 100us. */  	enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;  	union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};  	union lane_align_status_updated dpcd_lane_status_updated = {0}; @@ -306,7 +303,7 @@ static enum link_training_result dpia_training_cr_non_transparent(  	uint8_t set_cfg_data;  	enum dpia_set_config_ts ts; -	repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); +	repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);  	/* Cap of LINK_TRAINING_MAX_CR_RETRY attempts at clock recovery.  	 * Fix inherited from perform_clock_recovery_sequence() - @@ -316,17 +313,20 @@ static enum link_training_result dpia_training_cr_non_transparent(  	 * continuously.  	 */  	while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && -	       (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { +			(retry_count < LINK_TRAINING_MAX_CR_RETRY)) { +  		/* DPTX-to-DPIA */  		if (hop == repeater_cnt) {  			/* Send SET_CONFIG(SET_LINK:LC,LR,LTTPR) to notify DPOA that  			 * non-transparent link training has started.  			 * This also enables the transmission of clk_sync packets.  			 */ -			set_cfg_data = dpia_build_set_config_data(DPIA_SET_CFG_SET_LINK, +			set_cfg_data = dpia_build_set_config_data( +					DPIA_SET_CFG_SET_LINK,  					link,  					lt_settings); -			status = core_link_send_set_config(link, +			status = core_link_send_set_config( +					link,  					DPIA_SET_CFG_SET_LINK,  					set_cfg_data);  			/* CR for this hop is considered successful as long as @@ -347,6 +347,14 @@ static enum link_training_result dpia_training_cr_non_transparent(  				result = LINK_TRAINING_ABORT;  				break;  			} +			status = core_link_send_set_config( +					link, +					DPIA_SET_CFG_SET_TRAINING, +					ts); +			if (status != DC_OK) { +				result = LINK_TRAINING_ABORT; +				break; +			}  			status = dpcd_set_lt_pattern(link, lt_settings->pattern_for_cr, hop);  			if (status != DC_OK) {  				result = LINK_TRAINING_ABORT; @@ -358,10 +366,12 @@ static enum link_training_result dpia_training_cr_non_transparent(  		 * drive settings for hops immediately downstream.  		 */  		if (hop == repeater_cnt - 1) { -			set_cfg_data = dpia_build_set_config_data(DPIA_SET_CFG_SET_VSPE, +			set_cfg_data = dpia_build_set_config_data( +					DPIA_SET_CFG_SET_VSPE,  					link,  					lt_settings); -			status = core_link_send_set_config(link, +			status = core_link_send_set_config( +					link,  					DPIA_SET_CFG_SET_VSPE,  					set_cfg_data);  			if (status != DC_OK) { @@ -468,7 +478,8 @@ static enum link_training_result dpia_training_cr_transparent(  	 * continuously.  	 */  	while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && -	       (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { +			(retry_count < LINK_TRAINING_MAX_CR_RETRY)) { +  		/* Write TPS1 (not VS or PE) to DPCD to start CR phase.  		 * DPIA sends SET_CONFIG(SET_LINK) to notify DPOA to  		 * start link training. @@ -529,8 +540,7 @@ static enum link_training_result dpia_training_cr_transparent(  	if (link->is_hpd_pending)  		result = LINK_TRAINING_ABORT; -	DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) clock recovery\n" -		" -hop(%d)\n - result(%d)\n - retries(%d)\n", +	DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) clock recovery\n -hop(%d)\n - result(%d)\n - retries(%d)\n",  		__func__,  		link->link_id.enum_id - ENUM_ID_1,  		DPRX, @@ -545,7 +555,7 @@ static enum link_training_result dpia_training_cr_transparent(   *   * @param link DPIA link being trained.   * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). - * @param hop The Hop in display path. DPRX = 0. + * @param hop Hop in display path. DPRX = 0.   */  static enum link_training_result dpia_training_cr_phase(  		struct dc_link *link, @@ -564,7 +574,8 @@ static enum link_training_result dpia_training_cr_phase(  }  /* Return status read interval during equalization phase. */ -static uint32_t dpia_get_eq_aux_rd_interval(const struct dc_link *link, +static uint32_t dpia_get_eq_aux_rd_interval( +		const struct dc_link *link,  		const struct link_training_settings *lt_settings,  		uint32_t hop)  { @@ -590,12 +601,11 @@ static uint32_t dpia_get_eq_aux_rd_interval(const struct dc_link *link,   * - TPSx is transmitted for any hops downstream of DPOA.   * - Drive (VS/PE) only transmitted for the hop immediately downstream of DPOA.   * - EQ for the first hop (DPTX-to-DPIA) is assumed to be successful. - * - DPRX EQ only reported successful when both DPRX and DPIA requirements - * (clk sync packets sent) fulfilled. + * - DPRX EQ only reported successful when both DPRX and DPIA requirements (clk sync packets sent) fulfilled.   *   * @param link DPIA link being trained.   * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). - * @param hop The Hop in display path. DPRX = 0. + * @param hop Hop in display path. DPRX = 0.   */  static enum link_training_result dpia_training_eq_non_transparent(  		struct dc_link *link, @@ -624,9 +634,10 @@ static enum link_training_result dpia_training_eq_non_transparent(  	else  		tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4; -	repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); +	repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);  	for (retries_eq = 0; retries_eq < LINK_TRAINING_MAX_RETRY_COUNT; retries_eq++) { +  		/* DPTX-to-DPIA equalization always successful. */  		if (hop == repeater_cnt) {  			result = LINK_TRAINING_SUCCESS; @@ -640,7 +651,8 @@ static enum link_training_result dpia_training_eq_non_transparent(  				result = LINK_TRAINING_ABORT;  				break;  			} -			status = core_link_send_set_config(link, +			status = core_link_send_set_config( +					link,  					DPIA_SET_CFG_SET_TRAINING,  					ts);  			if (status != DC_OK) { @@ -658,12 +670,14 @@ static enum link_training_result dpia_training_eq_non_transparent(  		 * drive settings for hop immediately downstream.  		 */  		if (hop == repeater_cnt - 1) { -			set_cfg_data = dpia_build_set_config_data(DPIA_SET_CFG_SET_VSPE, -								  link, -								  lt_settings); -			status = core_link_send_set_config(link, -							   DPIA_SET_CFG_SET_VSPE, -							   set_cfg_data); +			set_cfg_data = dpia_build_set_config_data( +					DPIA_SET_CFG_SET_VSPE, +					link, +					lt_settings); +			status = core_link_send_set_config( +					link, +					DPIA_SET_CFG_SET_VSPE, +					set_cfg_data);  			if (status != DC_OK) {  				result = LINK_TRAINING_ABORT;  				break; @@ -679,7 +693,7 @@ static enum link_training_result dpia_training_eq_non_transparent(  		 * ensure clock sync packets have been sent.  		 */  		if (hop == DPRX && retries_eq == 1) -			wait_time_microsec = max(wait_time_microsec, (uint32_t)DPIA_CLK_SYNC_DELAY); +			wait_time_microsec = max(wait_time_microsec, (uint32_t) DPIA_CLK_SYNC_DELAY);  		else  			wait_time_microsec = dpia_get_eq_aux_rd_interval(link, lt_settings, hop); @@ -705,8 +719,8 @@ static enum link_training_result dpia_training_eq_non_transparent(  		}  		if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) && -		    dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status) && -		    dp_is_interlane_aligned(dpcd_lane_status_updated)) { +				dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status) && +				dp_is_interlane_aligned(dpcd_lane_status_updated)) {  			result =  LINK_TRAINING_SUCCESS;  			break;  		} @@ -741,7 +755,7 @@ static enum link_training_result dpia_training_eq_non_transparent(   *   * @param link DPIA link being trained.   * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). - * @param hop The Hop in display path. DPRX = 0. + * @param hop Hop in display path. DPRX = 0.   */  static enum link_training_result dpia_training_eq_transparent(  		struct dc_link *link, @@ -761,6 +775,7 @@ static enum link_training_result dpia_training_eq_transparent(  	wait_time_microsec = dpia_get_eq_aux_rd_interval(link, lt_settings, DPRX);  	for (retries_eq = 0; retries_eq < LINK_TRAINING_MAX_RETRY_COUNT; retries_eq++) { +  		if (retries_eq == 0) {  			status = dpcd_set_lt_pattern(link, tr_pattern, DPRX);  			if (status != DC_OK) { @@ -791,10 +806,14 @@ static enum link_training_result dpia_training_eq_transparent(  		}  		if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) && -		    dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status) && -		    dp_is_interlane_aligned(dpcd_lane_status_updated)) { -			result =  LINK_TRAINING_SUCCESS; -			break; +				dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status)) { +			/* Take into consideration corner case for DP 1.4a LL Compliance CTS as USB4 +			 * has to share encoders unlike DP and USBC +			 */ +			if (dp_is_interlane_aligned(dpcd_lane_status_updated) || (link->is_automated && retries_eq)) { +				result =  LINK_TRAINING_SUCCESS; +				break; +			}  		}  		/* Update VS/PE. */ @@ -806,8 +825,7 @@ static enum link_training_result dpia_training_eq_transparent(  	if (link->is_hpd_pending)  		result = LINK_TRAINING_ABORT; -	DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) equalization\n" -		" - hop(%d)\n - result(%d)\n - retries(%d)\n", +	DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) equalization\n - hop(%d)\n - result(%d)\n - retries(%d)\n",  		__func__,  		link->link_id.enum_id - ENUM_ID_1,  		DPRX, @@ -822,7 +840,7 @@ static enum link_training_result dpia_training_eq_transparent(   *   * @param link DPIA link being trained.   * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). - * @param hop The Hop in display path. DPRX = 0. + * @param hop Hop in display path. DPRX = 0.   */  static enum link_training_result dpia_training_eq_phase(  		struct dc_link *link, @@ -841,7 +859,9 @@ static enum link_training_result dpia_training_eq_phase(  }  /* End training of specified hop in display path. */ -static enum dc_status dpcd_clear_lt_pattern(struct dc_link *link, uint32_t hop) +static enum dc_status dpcd_clear_lt_pattern( +	struct dc_link *link, +	uint32_t hop)  {  	union dpcd_training_pattern dpcd_pattern = {0};  	uint32_t dpcd_tps_offset = DP_TRAINING_PATTERN_SET; @@ -851,7 +871,8 @@ static enum dc_status dpcd_clear_lt_pattern(struct dc_link *link, uint32_t hop)  		dpcd_tps_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 +  			((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (hop - 1)); -	status = core_link_write_dpcd(link, +	status = core_link_write_dpcd( +			link,  			dpcd_tps_offset,  			&dpcd_pattern.raw,  			sizeof(dpcd_pattern.raw)); @@ -869,9 +890,10 @@ static enum dc_status dpcd_clear_lt_pattern(struct dc_link *link, uint32_t hop)   * (DPTX-to-DPIA) and last hop (DPRX).   *   * @param link DPIA link being trained. - * @param hop The Hop in display path. DPRX = 0. + * @param hop Hop in display path. DPRX = 0.   */ -static enum link_training_result dpia_training_end(struct dc_link *link, +static enum link_training_result dpia_training_end( +		struct dc_link *link,  		struct link_training_settings *lt_settings,  		uint32_t hop)  { @@ -880,13 +902,15 @@ static enum link_training_result dpia_training_end(struct dc_link *link,  	enum dc_status status;  	if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { -		repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + +		repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);  		if (hop == repeater_cnt) { /* DPTX-to-DPIA */  			/* Send SET_CONFIG(SET_TRAINING:0xff) to notify DPOA that  			 * DPTX-to-DPIA hop trained. No DPCD write needed for first hop.  			 */ -			status = core_link_send_set_config(link, +			status = core_link_send_set_config( +					link,  					DPIA_SET_CFG_SET_TRAINING,  					DPIA_TS_UFP_DONE);  			if (status != DC_OK) @@ -900,7 +924,8 @@ static enum link_training_result dpia_training_end(struct dc_link *link,  		/* Notify DPOA that non-transparent link training of DPRX done. */  		if (hop == DPRX && result != LINK_TRAINING_ABORT) { -			status = core_link_send_set_config(link, +			status = core_link_send_set_config( +					link,  					DPIA_SET_CFG_SET_TRAINING,  					DPIA_TS_DPRX_DONE);  			if (status != DC_OK) @@ -908,18 +933,20 @@ static enum link_training_result dpia_training_end(struct dc_link *link,  		}  	} else { /* non-LTTPR or transparent LTTPR. */ +  		/* Write 0x0 to TRAINING_PATTERN_SET */  		status = dpcd_clear_lt_pattern(link, hop);  		if (status != DC_OK)  			result = LINK_TRAINING_ABORT; +  	}  	DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) end\n - hop(%d)\n - result(%d)\n - LTTPR mode(%d)\n", -				__func__, -				link->link_id.enum_id - ENUM_ID_1, -				hop, -				result, -				lt_settings->lttpr_mode); +		__func__, +		link->link_id.enum_id - ENUM_ID_1, +		hop, +		result, +		lt_settings->lttpr_mode);  	return result;  } @@ -929,20 +956,21 @@ static enum link_training_result dpia_training_end(struct dc_link *link,   * - Sending SET_CONFIG(SET_LINK) with lane count and link rate set to 0.   *   * @param link DPIA link being trained. - * @param hop The Hop in display path. DPRX = 0. + * @param hop Hop in display path. DPRX = 0.   */ -static void dpia_training_abort(struct dc_link *link, -	struct link_training_settings *lt_settings, -	uint32_t hop) +static void dpia_training_abort( +		struct dc_link *link, +		struct link_training_settings *lt_settings, +		uint32_t hop)  {  	uint8_t data = 0;  	uint32_t dpcd_tps_offset = DP_TRAINING_PATTERN_SET;  	DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) aborting\n - LTTPR mode(%d)\n - HPD(%d)\n", -				__func__, -				link->link_id.enum_id - ENUM_ID_1, -				lt_settings->lttpr_mode, -				link->is_hpd_pending); +		__func__, +		link->link_id.enum_id - ENUM_ID_1, +		lt_settings->lttpr_mode, +		link->is_hpd_pending);  	/* Abandon clean-up if sink unplugged. */  	if (link->is_hpd_pending) @@ -971,7 +999,7 @@ enum link_training_result dc_link_dpia_perform_link_training(  	struct dc_link_settings link_settings = *link_setting; // non-const copy to pass in -	lt_settings.lttpr_mode = dp_decide_lttpr_mode(link, &link_settings); +	lt_settings.lttpr_mode = dc_link_decide_lttpr_mode(link, &link_settings);  	/* Configure link as prescribed in link_setting and set LTTPR mode. */  	result = dpia_configure_link(link, link_res, link_setting, <_settings); @@ -979,7 +1007,7 @@ enum link_training_result dc_link_dpia_perform_link_training(  		return result;  	if (lt_settings.lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) -		repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); +		repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);  	/* Train each hop in turn starting with the one closest to DPTX.  	 * In transparent or non-LTTPR mode, train only the final hop (DPRX). @@ -1008,11 +1036,12 @@ enum link_training_result dc_link_dpia_perform_link_training(  	 */  	if (result == LINK_TRAINING_SUCCESS) {  		msleep(5); -		result = dp_check_link_loss_status(link, <_settings); -	} else if (result == LINK_TRAINING_ABORT) { +		if (!link->is_automated) +			result = dp_check_link_loss_status(link, <_settings); +	} else if (result == LINK_TRAINING_ABORT)  		dpia_training_abort(link, <_settings, repeater_id); -	} else { +	else  		dpia_training_end(link, <_settings, repeater_id); -	} +  	return result;  } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h new file mode 100644 index 000000000000..0150f2916421 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h @@ -0,0 +1,41 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#ifndef __DC_LINK_DP_TRAINING_DPIA_H__ +#define __DC_LINK_DP_TRAINING_DPIA_H__ +#include "link_dp_training.h" + +/* Train DP tunneling link for USB4 DPIA display endpoint. + * DPIA equivalent of dc_link_dp_perfrorm_link_training. + * Aborts link training upon detection of sink unplug. + */ +enum link_training_result dc_link_dpia_perform_link_training( +	struct dc_link *link, +	const struct link_resource *link_res, +	const struct dc_link_settings *link_setting, +	bool skip_video_pattern); + +#endif /* __DC_LINK_DP_TRAINING_DPIA_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c new file mode 100644 index 000000000000..a4071d2959a0 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c @@ -0,0 +1,579 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file implements 8b/10b link training specially modified to support an + * embedded retimer chip. This retimer chip is referred as fixed vs pe retimer. + * Unlike native dp connection this chip requires a modified link training + * protocol based on 8b/10b link training. Since this is a non standard sequence + * and we must support this hardware, we decided to isolate it in its own + * training sequence inside its own file. + */ +#include "link_dp_training_fixed_vs_pe_retimer.h" +#include "link_dp_training_8b_10b.h" +#include "link_dpcd.h" +#include "link_dp_phy.h" +#include "link_dp_capability.h" + +#define DC_LOGGER \ +	link->ctx->logger + +void dp_fixed_vs_pe_read_lane_adjust( +	struct dc_link *link, +	union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX]) +{ +	const uint8_t vendor_lttpr_write_data_vs[3] = {0x0, 0x53, 0x63}; +	const uint8_t vendor_lttpr_write_data_pe[3] = {0x0, 0x54, 0x63}; +	const uint8_t offset = dp_parse_lttpr_repeater_count( +			link->dpcd_caps.lttpr_caps.phy_repeater_cnt); +	uint32_t vendor_lttpr_write_address = 0xF004F; +	uint32_t vendor_lttpr_read_address = 0xF0053; +	uint8_t dprx_vs = 0; +	uint8_t dprx_pe = 0; +	uint8_t lane; + +	if (offset != 0xFF) { +		vendor_lttpr_write_address += +				((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); +		vendor_lttpr_read_address += +				((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); +	} + +	/* W/A to read lane settings requested by DPRX */ +	core_link_write_dpcd( +			link, +			vendor_lttpr_write_address, +			&vendor_lttpr_write_data_vs[0], +			sizeof(vendor_lttpr_write_data_vs)); +	core_link_read_dpcd( +			link, +			vendor_lttpr_read_address, +			&dprx_vs, +			1); +	core_link_write_dpcd( +			link, +			vendor_lttpr_write_address, +			&vendor_lttpr_write_data_pe[0], +			sizeof(vendor_lttpr_write_data_pe)); +	core_link_read_dpcd( +			link, +			vendor_lttpr_read_address, +			&dprx_pe, +			1); + +	for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { +		dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET  = (dprx_vs >> (2 * lane)) & 0x3; +		dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET = (dprx_pe >> (2 * lane)) & 0x3; +	} +} + + +void dp_fixed_vs_pe_set_retimer_lane_settings( +	struct dc_link *link, +	const union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX], +	uint8_t lane_count) +{ +	const uint8_t offset = dp_parse_lttpr_repeater_count( +			link->dpcd_caps.lttpr_caps.phy_repeater_cnt); +	const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF}; +	uint32_t vendor_lttpr_write_address = 0xF004F; +	uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; +	uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; +	uint8_t lane = 0; + +	if (offset != 0xFF) { +		vendor_lttpr_write_address += +				((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); +	} + +	for (lane = 0; lane < lane_count; lane++) { +		vendor_lttpr_write_data_vs[3] |= +				dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET << (2 * lane); +		vendor_lttpr_write_data_pe[3] |= +				dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET << (2 * lane); +	} + +	/* Force LTTPR to output desired VS and PE */ +	core_link_write_dpcd( +			link, +			vendor_lttpr_write_address, +			&vendor_lttpr_write_data_reset[0], +			sizeof(vendor_lttpr_write_data_reset)); +	core_link_write_dpcd( +			link, +			vendor_lttpr_write_address, +			&vendor_lttpr_write_data_vs[0], +			sizeof(vendor_lttpr_write_data_vs)); +	core_link_write_dpcd( +			link, +			vendor_lttpr_write_address, +			&vendor_lttpr_write_data_pe[0], +			sizeof(vendor_lttpr_write_data_pe)); +} + +static enum link_training_result perform_fixed_vs_pe_nontransparent_training_sequence( +		struct dc_link *link, +		const struct link_resource *link_res, +		struct link_training_settings *lt_settings) +{ +	enum link_training_result status = LINK_TRAINING_SUCCESS; +	uint8_t lane = 0; +	uint8_t toggle_rate = 0x6; +	uint8_t target_rate = 0x6; +	bool apply_toggle_rate_wa = false; +	uint8_t repeater_cnt; +	uint8_t repeater_id; + +	/* Fixed VS/PE specific: Force CR AUX RD Interval to at least 16ms */ +	if (lt_settings->cr_pattern_time < 16000) +		lt_settings->cr_pattern_time = 16000; + +	/* Fixed VS/PE specific: Toggle link rate */ +	apply_toggle_rate_wa = (link->vendor_specific_lttpr_link_rate_wa == target_rate); +	target_rate = get_dpcd_link_rate(<_settings->link_settings); +	toggle_rate = (target_rate == 0x6) ? 0xA : 0x6; + +	if (apply_toggle_rate_wa) +		lt_settings->link_settings.link_rate = toggle_rate; + +	if (link->ctx->dc->work_arounds.lt_early_cr_pattern) +		start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX); + +	/* 1. set link rate, lane count and spread. */ +	dpcd_set_link_settings(link, lt_settings); + +	/* Fixed VS/PE specific: Toggle link rate back*/ +	if (apply_toggle_rate_wa) { +		core_link_write_dpcd( +				link, +				DP_LINK_BW_SET, +				&target_rate, +				1); +	} + +	link->vendor_specific_lttpr_link_rate_wa = target_rate; + +	if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { + +		/* 2. perform link training (set link training done +		 *  to false is done as well) +		 */ +		repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + +		for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS); +				repeater_id--) { +			status = perform_8b_10b_clock_recovery_sequence(link, link_res, lt_settings, repeater_id); + +			if (status != LINK_TRAINING_SUCCESS) { +				repeater_training_done(link, repeater_id); +				break; +			} + +			status = perform_8b_10b_channel_equalization_sequence(link, +					link_res, +					lt_settings, +					repeater_id); + +			repeater_training_done(link, repeater_id); + +			if (status != LINK_TRAINING_SUCCESS) +				break; + +			for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { +				lt_settings->dpcd_lane_settings[lane].raw = 0; +				lt_settings->hw_lane_settings[lane].VOLTAGE_SWING = 0; +				lt_settings->hw_lane_settings[lane].PRE_EMPHASIS = 0; +			} +		} +	} + +	if (status == LINK_TRAINING_SUCCESS) { +		status = perform_8b_10b_clock_recovery_sequence(link, link_res, lt_settings, DPRX); +		if (status == LINK_TRAINING_SUCCESS) { +			status = perform_8b_10b_channel_equalization_sequence(link, +								       link_res, +								       lt_settings, +								       DPRX); +		} +	} + +	return status; +} + + +enum link_training_result dp_perform_fixed_vs_pe_training_sequence( +	struct dc_link *link, +	const struct link_resource *link_res, +	struct link_training_settings *lt_settings) +{ +	const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF}; +	const uint8_t offset = dp_parse_lttpr_repeater_count( +			link->dpcd_caps.lttpr_caps.phy_repeater_cnt); +	const uint8_t vendor_lttpr_write_data_intercept_en[4] = {0x1, 0x55, 0x63, 0x0}; +	const uint8_t vendor_lttpr_write_data_intercept_dis[4] = {0x1, 0x55, 0x63, 0x68}; +	uint32_t pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa; +	uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; +	uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; +	uint32_t vendor_lttpr_write_address = 0xF004F; +	enum link_training_result status = LINK_TRAINING_SUCCESS; +	uint8_t lane = 0; +	union down_spread_ctrl downspread = {0}; +	union lane_count_set lane_count_set = {0}; +	uint8_t toggle_rate; +	uint8_t rate; + +	/* Only 8b/10b is supported */ +	ASSERT(link_dp_get_encoding_format(<_settings->link_settings) == +			DP_8b_10b_ENCODING); + +	if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { +		status = perform_fixed_vs_pe_nontransparent_training_sequence(link, link_res, lt_settings); +		return status; +	} + +	if (offset != 0xFF) { +		vendor_lttpr_write_address += +				((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + +		/* Certain display and cable configuration require extra delay */ +		if (offset > 2) +			pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2; +	} + +	/* Vendor specific: Reset lane settings */ +	core_link_write_dpcd( +			link, +			vendor_lttpr_write_address, +			&vendor_lttpr_write_data_reset[0], +			sizeof(vendor_lttpr_write_data_reset)); +	core_link_write_dpcd( +			link, +			vendor_lttpr_write_address, +			&vendor_lttpr_write_data_vs[0], +			sizeof(vendor_lttpr_write_data_vs)); +	core_link_write_dpcd( +			link, +			vendor_lttpr_write_address, +			&vendor_lttpr_write_data_pe[0], +			sizeof(vendor_lttpr_write_data_pe)); + +	/* Vendor specific: Enable intercept */ +	core_link_write_dpcd( +			link, +			vendor_lttpr_write_address, +			&vendor_lttpr_write_data_intercept_en[0], +			sizeof(vendor_lttpr_write_data_intercept_en)); + +	/* 1. set link rate, lane count and spread. */ + +	downspread.raw = (uint8_t)(lt_settings->link_settings.link_spread); + +	lane_count_set.bits.LANE_COUNT_SET = +	lt_settings->link_settings.lane_count; + +	lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing; +	lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0; + + +	if (lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) { +		lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = +				link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED; +	} + +	core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, +		&downspread.raw, sizeof(downspread)); + +	core_link_write_dpcd(link, DP_LANE_COUNT_SET, +		&lane_count_set.raw, 1); + +	rate = get_dpcd_link_rate(<_settings->link_settings); + +	/* Vendor specific: Toggle link rate */ +	toggle_rate = (rate == 0x6) ? 0xA : 0x6; + +	if (link->vendor_specific_lttpr_link_rate_wa == rate) { +		core_link_write_dpcd( +				link, +				DP_LINK_BW_SET, +				&toggle_rate, +				1); +	} + +	link->vendor_specific_lttpr_link_rate_wa = rate; + +	core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); + +	DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n", +		__func__, +		DP_LINK_BW_SET, +		lt_settings->link_settings.link_rate, +		DP_LANE_COUNT_SET, +		lt_settings->link_settings.lane_count, +		lt_settings->enhanced_framing, +		DP_DOWNSPREAD_CTRL, +		lt_settings->link_settings.link_spread); + +	/* 2. Perform link training */ + +	/* Perform Clock Recovery Sequence */ +	if (status == LINK_TRAINING_SUCCESS) { +		const uint8_t max_vendor_dpcd_retries = 10; +		uint32_t retries_cr; +		uint32_t retry_count; +		uint32_t wait_time_microsec; +		enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; +		union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; +		union lane_align_status_updated dpcd_lane_status_updated; +		union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; +		enum dc_status dpcd_status = DC_OK; +		uint8_t i = 0; + +		retries_cr = 0; +		retry_count = 0; + +		memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status)); +		memset(&dpcd_lane_status_updated, '\0', +		sizeof(dpcd_lane_status_updated)); + +		while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && +			(retry_count < LINK_TRAINING_MAX_CR_RETRY)) { + + +			/* 1. call HWSS to set lane settings */ +			dp_set_hw_lane_settings( +					link, +					link_res, +					lt_settings, +					0); + +			/* 2. update DPCD of the receiver */ +			if (!retry_count) { +				/* EPR #361076 - write as a 5-byte burst, +				 * but only for the 1-st iteration. +				 */ +				dpcd_set_lt_pattern_and_lane_settings( +						link, +						lt_settings, +						lt_settings->pattern_for_cr, +						0); +				/* Vendor specific: Disable intercept */ +				for (i = 0; i < max_vendor_dpcd_retries; i++) { +					msleep(pre_disable_intercept_delay_ms); +					dpcd_status = core_link_write_dpcd( +							link, +							vendor_lttpr_write_address, +							&vendor_lttpr_write_data_intercept_dis[0], +							sizeof(vendor_lttpr_write_data_intercept_dis)); + +					if (dpcd_status == DC_OK) +						break; + +					core_link_write_dpcd( +							link, +							vendor_lttpr_write_address, +							&vendor_lttpr_write_data_intercept_en[0], +							sizeof(vendor_lttpr_write_data_intercept_en)); +				} +			} else { +				vendor_lttpr_write_data_vs[3] = 0; +				vendor_lttpr_write_data_pe[3] = 0; + +				for (lane = 0; lane < lane_count; lane++) { +					vendor_lttpr_write_data_vs[3] |= +							lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane); +					vendor_lttpr_write_data_pe[3] |= +							lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane); +				} + +				/* Vendor specific: Update VS and PE to DPRX requested value */ +				core_link_write_dpcd( +						link, +						vendor_lttpr_write_address, +						&vendor_lttpr_write_data_vs[0], +						sizeof(vendor_lttpr_write_data_vs)); +				core_link_write_dpcd( +						link, +						vendor_lttpr_write_address, +						&vendor_lttpr_write_data_pe[0], +						sizeof(vendor_lttpr_write_data_pe)); + +				dpcd_set_lane_settings( +						link, +						lt_settings, +						0); +			} + +			/* 3. wait receiver to lock-on*/ +			wait_time_microsec = lt_settings->cr_pattern_time; + +			dp_wait_for_training_aux_rd_interval( +					link, +					wait_time_microsec); + +			/* 4. Read lane status and requested drive +			 * settings as set by the sink +			 */ +			dp_get_lane_status_and_lane_adjust( +					link, +					lt_settings, +					dpcd_lane_status, +					&dpcd_lane_status_updated, +					dpcd_lane_adjust, +					0); + +			/* 5. check CR done*/ +			if (dp_is_cr_done(lane_count, dpcd_lane_status)) { +				status = LINK_TRAINING_SUCCESS; +				break; +			} + +			/* 6. max VS reached*/ +			if (dp_is_max_vs_reached(lt_settings)) +				break; + +			/* 7. same lane settings */ +			/* Note: settings are the same for all lanes, +			 * so comparing first lane is sufficient +			 */ +			if (lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET == +					dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE) +				retries_cr++; +			else +				retries_cr = 0; + +			/* 8. update VS/PE/PC2 in lt_settings*/ +			dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, +					lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); +			retry_count++; +		} + +		if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) { +			ASSERT(0); +			DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue", +				__func__, +				LINK_TRAINING_MAX_CR_RETRY); + +		} + +		status = dp_get_cr_failure(lane_count, dpcd_lane_status); +	} + +	/* Perform Channel EQ Sequence */ +	if (status == LINK_TRAINING_SUCCESS) { +		enum dc_dp_training_pattern tr_pattern; +		uint32_t retries_ch_eq; +		uint32_t wait_time_microsec; +		enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; +		union lane_align_status_updated dpcd_lane_status_updated = {0}; +		union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; +		union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; + +		/* Note: also check that TPS4 is a supported feature*/ +		tr_pattern = lt_settings->pattern_for_eq; + +		dp_set_hw_training_pattern(link, link_res, tr_pattern, 0); + +		status = LINK_TRAINING_EQ_FAIL_EQ; + +		for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT; +			retries_ch_eq++) { + +			dp_set_hw_lane_settings(link, link_res, lt_settings, 0); + +			vendor_lttpr_write_data_vs[3] = 0; +			vendor_lttpr_write_data_pe[3] = 0; + +			for (lane = 0; lane < lane_count; lane++) { +				vendor_lttpr_write_data_vs[3] |= +						lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane); +				vendor_lttpr_write_data_pe[3] |= +						lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane); +			} + +			/* Vendor specific: Update VS and PE to DPRX requested value */ +			core_link_write_dpcd( +					link, +					vendor_lttpr_write_address, +					&vendor_lttpr_write_data_vs[0], +					sizeof(vendor_lttpr_write_data_vs)); +			core_link_write_dpcd( +					link, +					vendor_lttpr_write_address, +					&vendor_lttpr_write_data_pe[0], +					sizeof(vendor_lttpr_write_data_pe)); + +			/* 2. update DPCD*/ +			if (!retries_ch_eq) +				/* EPR #361076 - write as a 5-byte burst, +				 * but only for the 1-st iteration +				 */ + +				dpcd_set_lt_pattern_and_lane_settings( +					link, +					lt_settings, +					tr_pattern, 0); +			else +				dpcd_set_lane_settings(link, lt_settings, 0); + +			/* 3. wait for receiver to lock-on*/ +			wait_time_microsec = lt_settings->eq_pattern_time; + +			dp_wait_for_training_aux_rd_interval( +					link, +					wait_time_microsec); + +			/* 4. Read lane status and requested +			 * drive settings as set by the sink +			 */ +			dp_get_lane_status_and_lane_adjust( +				link, +				lt_settings, +				dpcd_lane_status, +				&dpcd_lane_status_updated, +				dpcd_lane_adjust, +				0); + +			/* 5. check CR done*/ +			if (!dp_is_cr_done(lane_count, dpcd_lane_status)) { +				status = LINK_TRAINING_EQ_FAIL_CR; +				break; +			} + +			/* 6. check CHEQ done*/ +			if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) && +					dp_is_symbol_locked(lane_count, dpcd_lane_status) && +					dp_is_interlane_aligned(dpcd_lane_status_updated)) { +				status = LINK_TRAINING_SUCCESS; +				break; +			} + +			/* 7. update VS/PE/PC2 in lt_settings*/ +			dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, +					lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); +		} +	} + +	return status; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h new file mode 100644 index 000000000000..e61970e27661 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h @@ -0,0 +1,45 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#ifndef __DC_LINK_DP_FIXED_VS_PE_RETIMER_H__ +#define __DC_LINK_DP_FIXED_VS_PE_RETIMER_H__ +#include "link_dp_training.h" + +enum link_training_result dp_perform_fixed_vs_pe_training_sequence( +	struct dc_link *link, +	const struct link_resource *link_res, +	struct link_training_settings *lt_settings); + +void dp_fixed_vs_pe_set_retimer_lane_settings( +	struct dc_link *link, +	const union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX], +	uint8_t lane_count); + +void dp_fixed_vs_pe_read_lane_adjust( +	struct dc_link *link, +	union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX]); + +#endif /* __DC_LINK_DP_FIXED_VS_PE_RETIMER_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c index af110bf9470f..5c9a30211c10 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c @@ -23,11 +23,14 @@   *   */ -#include <inc/core_status.h> -#include <dc_link.h> -#include <inc/link_hwss.h> -#include <inc/link_dpcd.h> -#include <dc_dp_types.h> +/* FILE POLICY AND INTENDED USAGE: + * + * This file implements basic dpcd read/write functionality. It also does basic + * dpcd range check to ensure that every dpcd request is compliant with specs + * range requirements. + */ + +#include "link_dpcd.h"  #include <drm/display/drm_dp_helper.h>  #include "dm_helpers.h" diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_dpcd.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h index d561f86d503c..08d787a1e451 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link_dpcd.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h @@ -25,9 +25,8 @@  #ifndef __LINK_DPCD_H__  #define __LINK_DPCD_H__ -#include <inc/core_status.h> -#include <dc_link.h> -#include <dc_link_dp.h> +#include "link.h" +#include "dpcd_defs.h"  enum dc_status core_link_read_dpcd(  		struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c new file mode 100644 index 000000000000..97e02b5b21ae --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -0,0 +1,833 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file implements retrieval and configuration of eDP panel features such + * as PSR and ABM and it also manages specs defined eDP panel power sequences. + */ + +#include "link_edp_panel_control.h" +#include "link_dpcd.h" +#include "link_dp_capability.h" +#include "dm_helpers.h" +#include "dal_asic_id.h" +#include "dce/dmub_psr.h" +#include "abm.h" +#define DC_LOGGER_INIT(logger) + +void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode) +{ +	union dpcd_edp_config edp_config_set; +	bool panel_mode_edp = false; + +	memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config)); + +	if (panel_mode != DP_PANEL_MODE_DEFAULT) { + +		switch (panel_mode) { +		case DP_PANEL_MODE_EDP: +		case DP_PANEL_MODE_SPECIAL: +			panel_mode_edp = true; +			break; + +		default: +				break; +		} + +		/*set edp panel mode in receiver*/ +		core_link_read_dpcd( +			link, +			DP_EDP_CONFIGURATION_SET, +			&edp_config_set.raw, +			sizeof(edp_config_set.raw)); + +		if (edp_config_set.bits.PANEL_MODE_EDP +			!= panel_mode_edp) { +			enum dc_status result; + +			edp_config_set.bits.PANEL_MODE_EDP = +			panel_mode_edp; +			result = core_link_write_dpcd( +				link, +				DP_EDP_CONFIGURATION_SET, +				&edp_config_set.raw, +				sizeof(edp_config_set.raw)); + +			ASSERT(result == DC_OK); +		} +	} +	DC_LOG_DETECTION_DP_CAPS("Link: %d eDP panel mode supported: %d " +		 "eDP panel mode enabled: %d \n", +		 link->link_index, +		 link->dpcd_caps.panel_mode_edp, +		 panel_mode_edp); +} + +enum dp_panel_mode dp_get_panel_mode(struct dc_link *link) +{ +	/* We need to explicitly check that connector +	 * is not DP. Some Travis_VGA get reported +	 * by video bios as DP. +	 */ +	if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) { + +		switch (link->dpcd_caps.branch_dev_id) { +		case DP_BRANCH_DEVICE_ID_0022B9: +			/* alternate scrambler reset is required for Travis +			 * for the case when external chip does not +			 * provide sink device id, alternate scrambler +			 * scheme will  be overriden later by querying +			 * Encoder features +			 */ +			if (strncmp( +				link->dpcd_caps.branch_dev_name, +				DP_VGA_LVDS_CONVERTER_ID_2, +				sizeof( +				link->dpcd_caps. +				branch_dev_name)) == 0) { +					return DP_PANEL_MODE_SPECIAL; +			} +			break; +		case DP_BRANCH_DEVICE_ID_00001A: +			/* alternate scrambler reset is required for Travis +			 * for the case when external chip does not provide +			 * sink device id, alternate scrambler scheme will +			 * be overriden later by querying Encoder feature +			 */ +			if (strncmp(link->dpcd_caps.branch_dev_name, +				DP_VGA_LVDS_CONVERTER_ID_3, +				sizeof( +				link->dpcd_caps. +				branch_dev_name)) == 0) { +					return DP_PANEL_MODE_SPECIAL; +			} +			break; +		default: +			break; +		} +	} + +	if (link->dpcd_caps.panel_mode_edp && +		(link->connector_signal == SIGNAL_TYPE_EDP || +		 (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && +		  link->is_internal_display))) { +		return DP_PANEL_MODE_EDP; +	} + +	return DP_PANEL_MODE_DEFAULT; +} + +bool dc_link_set_backlight_level_nits(struct dc_link *link, +		bool isHDR, +		uint32_t backlight_millinits, +		uint32_t transition_time_in_ms) +{ +	struct dpcd_source_backlight_set dpcd_backlight_set; +	uint8_t backlight_control = isHDR ? 1 : 0; + +	if (!link || (link->connector_signal != SIGNAL_TYPE_EDP && +			link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) +		return false; + +	// OLEDs have no PWM, they can only use AUX +	if (link->dpcd_sink_ext_caps.bits.oled == 1) +		backlight_control = 1; + +	*(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits; +	*(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms; + + +	if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, +			(uint8_t *)(&dpcd_backlight_set), +			sizeof(dpcd_backlight_set)) != DC_OK) +		return false; + +	if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL, +			&backlight_control, 1) != DC_OK) +		return false; + +	return true; +} + +bool dc_link_get_backlight_level_nits(struct dc_link *link, +		uint32_t *backlight_millinits_avg, +		uint32_t *backlight_millinits_peak) +{ +	union dpcd_source_backlight_get dpcd_backlight_get; + +	memset(&dpcd_backlight_get, 0, sizeof(union dpcd_source_backlight_get)); + +	if (!link || (link->connector_signal != SIGNAL_TYPE_EDP && +			link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) +		return false; + +	if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK, +			dpcd_backlight_get.raw, +			sizeof(union dpcd_source_backlight_get))) +		return false; + +	*backlight_millinits_avg = +		dpcd_backlight_get.bytes.backlight_millinits_avg; +	*backlight_millinits_peak = +		dpcd_backlight_get.bytes.backlight_millinits_peak; + +	/* On non-supported panels dpcd_read usually succeeds with 0 returned */ +	if (*backlight_millinits_avg == 0 || +			*backlight_millinits_avg > *backlight_millinits_peak) +		return false; + +	return true; +} + +bool link_backlight_enable_aux(struct dc_link *link, bool enable) +{ +	uint8_t backlight_enable = enable ? 1 : 0; + +	if (!link || (link->connector_signal != SIGNAL_TYPE_EDP && +		link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) +		return false; + +	if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_ENABLE, +		&backlight_enable, 1) != DC_OK) +		return false; + +	return true; +} + +// we read default from 0x320 because we expect BIOS wrote it there +// regular get_backlight_nit reads from panel set at 0x326 +static bool read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millinits) +{ +	if (!link || (link->connector_signal != SIGNAL_TYPE_EDP && +		link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) +		return false; + +	if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, +		(uint8_t *) backlight_millinits, +		sizeof(uint32_t))) +		return false; + +	return true; +} + +bool set_default_brightness_aux(struct dc_link *link) +{ +	uint32_t default_backlight; + +	if (link && link->dpcd_sink_ext_caps.bits.oled == 1) { +		if (!read_default_bl_aux(link, &default_backlight)) +			default_backlight = 150000; +		// if < 5 nits or > 5000, it might be wrong readback +		if (default_backlight < 5000 || default_backlight > 5000000) +			default_backlight = 150000; // + +		return dc_link_set_backlight_level_nits(link, true, +				default_backlight, 0); +	} +	return false; +} + +bool link_is_edp_ilr_optimization_required(struct dc_link *link, +		struct dc_crtc_timing *crtc_timing) +{ +	struct dc_link_settings link_setting; +	uint8_t link_bw_set; +	uint8_t link_rate_set; +	uint32_t req_bw; +	union lane_count_set lane_count_set = {0}; + +	ASSERT(link || crtc_timing); // invalid input + +	if (link->dpcd_caps.edp_supported_link_rates_count == 0 || +			!link->panel_config.ilr.optimize_edp_link_rate) +		return false; + + +	// Read DPCD 00100h to find if standard link rates are set +	core_link_read_dpcd(link, DP_LINK_BW_SET, +				&link_bw_set, sizeof(link_bw_set)); + +	if (link_bw_set) { +		DC_LOG_EVENT_LINK_TRAINING("eDP ILR: Optimization required, VBIOS used link_bw_set\n"); +		return true; +	} + +	// Read DPCD 00115h to find the edp link rate set used +	core_link_read_dpcd(link, DP_LINK_RATE_SET, +			    &link_rate_set, sizeof(link_rate_set)); + +	// Read DPCD 00101h to find out the number of lanes currently set +	core_link_read_dpcd(link, DP_LANE_COUNT_SET, +				&lane_count_set.raw, sizeof(lane_count_set)); + +	req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing); + +	if (!crtc_timing->flags.DSC) +		dc_link_decide_edp_link_settings(link, &link_setting, req_bw); +	else +		decide_edp_link_settings_with_dsc(link, &link_setting, req_bw, LINK_RATE_UNKNOWN); + +	if (link->dpcd_caps.edp_supported_link_rates[link_rate_set] != link_setting.link_rate || +			lane_count_set.bits.LANE_COUNT_SET != link_setting.lane_count) { +		DC_LOG_EVENT_LINK_TRAINING("eDP ILR: Optimization required, VBIOS link_rate_set not optimal\n"); +		return true; +	} + +	DC_LOG_EVENT_LINK_TRAINING("eDP ILR: No optimization required, VBIOS set optimal link_rate_set\n"); +	return false; +} + +void dc_link_edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd) +{ +	if (link->connector_signal != SIGNAL_TYPE_EDP) +		return; + +	link->dc->hwss.edp_power_control(link, true); +	if (wait_for_hpd) +		link->dc->hwss.edp_wait_for_hpd_ready(link, true); +	if (link->dc->hwss.edp_backlight_control) +		link->dc->hwss.edp_backlight_control(link, true); +} + +bool dc_link_wait_for_t12(struct dc_link *link) +{ +	if (link->connector_signal == SIGNAL_TYPE_EDP && link->dc->hwss.edp_wait_for_T12) { +		link->dc->hwss.edp_wait_for_T12(link); + +		return true; +	} + +	return false; +} + +void link_edp_add_delay_for_T9(struct dc_link *link) +{ +	if (link && link->panel_config.pps.extra_delay_backlight_off > 0) +		udelay(link->panel_config.pps.extra_delay_backlight_off * 1000); +} + +bool link_edp_receiver_ready_T9(struct dc_link *link) +{ +	unsigned int tries = 0; +	unsigned char sinkstatus = 0; +	unsigned char edpRev = 0; +	enum dc_status result = DC_OK; + +	result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev)); + +	/* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/ +	if (result == DC_OK && edpRev >= DP_EDP_12) { +		do { +			sinkstatus = 1; +			result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus)); +			if (sinkstatus == 0) +				break; +			if (result != DC_OK) +				break; +			udelay(100); //MAx T9 +		} while (++tries < 50); +	} + +	return result; +} + +bool link_edp_receiver_ready_T7(struct dc_link *link) +{ +	unsigned char sinkstatus = 0; +	unsigned char edpRev = 0; +	enum dc_status result = DC_OK; + +	/* use absolute time stamp to constrain max T7*/ +	unsigned long long enter_timestamp = 0; +	unsigned long long finish_timestamp = 0; +	unsigned long long time_taken_in_ns = 0; + +	result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev)); + +	if (result == DC_OK && edpRev >= DP_EDP_12) { +		/* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/ +		enter_timestamp = dm_get_timestamp(link->ctx); +		do { +			sinkstatus = 0; +			result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus)); +			if (sinkstatus == 1) +				break; +			if (result != DC_OK) +				break; +			udelay(25); +			finish_timestamp = dm_get_timestamp(link->ctx); +			time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp); +		} while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms +	} + +	if (link && link->panel_config.pps.extra_t7_ms > 0) +		udelay(link->panel_config.pps.extra_t7_ms * 1000); + +	return result; +} + +bool link_power_alpm_dpcd_enable(struct dc_link *link, bool enable) +{ +	bool ret = false; +	union dpcd_alpm_configuration alpm_config; + +	if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) { +		memset(&alpm_config, 0, sizeof(alpm_config)); + +		alpm_config.bits.ENABLE = (enable ? true : false); +		ret = dm_helpers_dp_write_dpcd(link->ctx, link, +				DP_RECEIVER_ALPM_CONFIG, &alpm_config.raw, +				sizeof(alpm_config.raw)); +	} +	return ret; +} + +static struct pipe_ctx *get_pipe_from_link(const struct dc_link *link) +{ +	int i; +	struct dc *dc = link->ctx->dc; +	struct pipe_ctx *pipe_ctx = NULL; + +	for (i = 0; i < MAX_PIPES; i++) { +		if (dc->current_state->res_ctx.pipe_ctx[i].stream) { +			if (dc->current_state->res_ctx.pipe_ctx[i].stream->link == link) { +				pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; +				break; +			} +		} +	} + +	return pipe_ctx; +} + +bool dc_link_set_backlight_level(const struct dc_link *link, +		uint32_t backlight_pwm_u16_16, +		uint32_t frame_ramp) +{ +	struct dc  *dc = link->ctx->dc; + +	DC_LOGGER_INIT(link->ctx->logger); +	DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", +			backlight_pwm_u16_16, backlight_pwm_u16_16); + +	if (dc_is_embedded_signal(link->connector_signal)) { +		struct pipe_ctx *pipe_ctx = get_pipe_from_link(link); + +		if (pipe_ctx) { +			/* Disable brightness ramping when the display is blanked +			 * as it can hang the DMCU +			 */ +			if (pipe_ctx->plane_state == NULL) +				frame_ramp = 0; +		} else { +			return false; +		} + +		dc->hwss.set_backlight_level( +				pipe_ctx, +				backlight_pwm_u16_16, +				frame_ramp); +	} +	return true; +} + +bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active, +		bool wait, bool force_static, const unsigned int *power_opts) +{ +	struct dc  *dc = link->ctx->dc; +	struct dmcu *dmcu = dc->res_pool->dmcu; +	struct dmub_psr *psr = dc->res_pool->psr; +	unsigned int panel_inst; + +	if (psr == NULL && force_static) +		return false; + +	if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) +		return false; + +	if ((allow_active != NULL) && (*allow_active == true) && (link->type == dc_connection_none)) { +		// Don't enter PSR if panel is not connected +		return false; +	} + +	/* Set power optimization flag */ +	if (power_opts && link->psr_settings.psr_power_opt != *power_opts) { +		link->psr_settings.psr_power_opt = *power_opts; + +		if (psr != NULL && link->psr_settings.psr_feature_enabled && psr->funcs->psr_set_power_opt) +			psr->funcs->psr_set_power_opt(psr, link->psr_settings.psr_power_opt, panel_inst); +	} + +	if (psr != NULL && link->psr_settings.psr_feature_enabled && +			force_static && psr->funcs->psr_force_static) +		psr->funcs->psr_force_static(psr, panel_inst); + +	/* Enable or Disable PSR */ +	if (allow_active && link->psr_settings.psr_allow_active != *allow_active) { +		link->psr_settings.psr_allow_active = *allow_active; + +		if (!link->psr_settings.psr_allow_active) +			dc_z10_restore(dc); + +		if (psr != NULL && link->psr_settings.psr_feature_enabled) { +			psr->funcs->psr_enable(psr, link->psr_settings.psr_allow_active, wait, panel_inst); +		} else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && +			link->psr_settings.psr_feature_enabled) +			dmcu->funcs->set_psr_enable(dmcu, link->psr_settings.psr_allow_active, wait); +		else +			return false; +	} +	return true; +} + +bool dc_link_get_psr_state(const struct dc_link *link, enum dc_psr_state *state) +{ +	struct dc  *dc = link->ctx->dc; +	struct dmcu *dmcu = dc->res_pool->dmcu; +	struct dmub_psr *psr = dc->res_pool->psr; +	unsigned int panel_inst; + +	if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) +		return false; + +	if (psr != NULL && link->psr_settings.psr_feature_enabled) +		psr->funcs->psr_get_state(psr, state, panel_inst); +	else if (dmcu != NULL && link->psr_settings.psr_feature_enabled) +		dmcu->funcs->get_psr_state(dmcu, state); + +	return true; +} + +static inline enum physical_phy_id +transmitter_to_phy_id(struct dc_link *link) +{ +	struct dc_context *dc_ctx = link->ctx; +	enum transmitter transmitter_value = link->link_enc->transmitter; + +	switch (transmitter_value) { +	case TRANSMITTER_UNIPHY_A: +		return PHYLD_0; +	case TRANSMITTER_UNIPHY_B: +		return PHYLD_1; +	case TRANSMITTER_UNIPHY_C: +		return PHYLD_2; +	case TRANSMITTER_UNIPHY_D: +		return PHYLD_3; +	case TRANSMITTER_UNIPHY_E: +		return PHYLD_4; +	case TRANSMITTER_UNIPHY_F: +		return PHYLD_5; +	case TRANSMITTER_NUTMEG_CRT: +		return PHYLD_6; +	case TRANSMITTER_TRAVIS_CRT: +		return PHYLD_7; +	case TRANSMITTER_TRAVIS_LCD: +		return PHYLD_8; +	case TRANSMITTER_UNIPHY_G: +		return PHYLD_9; +	case TRANSMITTER_COUNT: +		return PHYLD_COUNT; +	case TRANSMITTER_UNKNOWN: +		return PHYLD_UNKNOWN; +	default: +		DC_ERROR("Unknown transmitter value %d\n", transmitter_value); +		return PHYLD_UNKNOWN; +	} +} + +bool dc_link_setup_psr(struct dc_link *link, +		const struct dc_stream_state *stream, struct psr_config *psr_config, +		struct psr_context *psr_context) +{ +	struct dc *dc; +	struct dmcu *dmcu; +	struct dmub_psr *psr; +	int i; +	unsigned int panel_inst; +	/* updateSinkPsrDpcdConfig*/ +	union dpcd_psr_configuration psr_configuration; +	union dpcd_sink_active_vtotal_control_mode vtotal_control = {0}; + +	psr_context->controllerId = CONTROLLER_ID_UNDEFINED; + +	if (!link) +		return false; + +	dc = link->ctx->dc; +	dmcu = dc->res_pool->dmcu; +	psr = dc->res_pool->psr; + +	if (!dmcu && !psr) +		return false; + +	if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) +		return false; + + +	memset(&psr_configuration, 0, sizeof(psr_configuration)); + +	psr_configuration.bits.ENABLE                    = 1; +	psr_configuration.bits.CRC_VERIFICATION          = 1; +	psr_configuration.bits.FRAME_CAPTURE_INDICATION  = +			psr_config->psr_frame_capture_indication_req; + +	/* Check for PSR v2*/ +	if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) { +		/* For PSR v2 selective update. +		 * Indicates whether sink should start capturing +		 * immediately following active scan line, +		 * or starting with the 2nd active scan line. +		 */ +		psr_configuration.bits.LINE_CAPTURE_INDICATION = 0; +		/*For PSR v2, determines whether Sink should generate +		 * IRQ_HPD when CRC mismatch is detected. +		 */ +		psr_configuration.bits.IRQ_HPD_WITH_CRC_ERROR    = 1; +		/* For PSR v2, set the bit when the Source device will +		 * be enabling PSR2 operation. +		 */ +		psr_configuration.bits.ENABLE_PSR2    = 1; +		/* For PSR v2, the Sink device must be able to receive +		 * SU region updates early in the frame time. +		 */ +		psr_configuration.bits.EARLY_TRANSPORT_ENABLE    = 1; +	} + +	dm_helpers_dp_write_dpcd( +		link->ctx, +		link, +		368, +		&psr_configuration.raw, +		sizeof(psr_configuration.raw)); + +	if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) { +		link_power_alpm_dpcd_enable(link, true); +		psr_context->su_granularity_required = +			psr_config->su_granularity_required; +		psr_context->su_y_granularity = +			psr_config->su_y_granularity; +		psr_context->line_time_in_us = psr_config->line_time_in_us; + +		/* linux must be able to expose AMD Source DPCD definition +		 * in order to support FreeSync PSR +		 */ +		if (link->psr_settings.psr_vtotal_control_support) { +			psr_context->rate_control_caps = psr_config->rate_control_caps; +			vtotal_control.bits.ENABLE = true; +			core_link_write_dpcd(link, DP_SINK_PSR_ACTIVE_VTOTAL_CONTROL_MODE, +							&vtotal_control.raw, sizeof(vtotal_control.raw)); +		} +	} + +	psr_context->channel = link->ddc->ddc_pin->hw_info.ddc_channel; +	psr_context->transmitterId = link->link_enc->transmitter; +	psr_context->engineId = link->link_enc->preferred_engine; + +	for (i = 0; i < MAX_PIPES; i++) { +		if (dc->current_state->res_ctx.pipe_ctx[i].stream +				== stream) { +			/* dmcu -1 for all controller id values, +			 * therefore +1 here +			 */ +			psr_context->controllerId = +				dc->current_state->res_ctx. +				pipe_ctx[i].stream_res.tg->inst + 1; +			break; +		} +	} + +	/* Hardcoded for now.  Can be Pcie or Uniphy (or Unknown)*/ +	psr_context->phyType = PHY_TYPE_UNIPHY; +	/*PhyId is associated with the transmitter id*/ +	psr_context->smuPhyId = transmitter_to_phy_id(link); + +	psr_context->crtcTimingVerticalTotal = stream->timing.v_total; +	psr_context->vsync_rate_hz = div64_u64(div64_u64((stream-> +					timing.pix_clk_100hz * 100), +					stream->timing.v_total), +					stream->timing.h_total); + +	psr_context->psrSupportedDisplayConfig = true; +	psr_context->psrExitLinkTrainingRequired = +		psr_config->psr_exit_link_training_required; +	psr_context->sdpTransmitLineNumDeadline = +		psr_config->psr_sdp_transmit_line_num_deadline; +	psr_context->psrFrameCaptureIndicationReq = +		psr_config->psr_frame_capture_indication_req; + +	psr_context->skipPsrWaitForPllLock = 0; /* only = 1 in KV */ + +	psr_context->numberOfControllers = +			link->dc->res_pool->timing_generator_count; + +	psr_context->rfb_update_auto_en = true; + +	/* 2 frames before enter PSR. */ +	psr_context->timehyst_frames = 2; +	/* half a frame +	 * (units in 100 lines, i.e. a value of 1 represents 100 lines) +	 */ +	psr_context->hyst_lines = stream->timing.v_total / 2 / 100; +	psr_context->aux_repeats = 10; + +	psr_context->psr_level.u32all = 0; + +	/*skip power down the single pipe since it blocks the cstate*/ +#if defined(CONFIG_DRM_AMD_DC_DCN) +	if (link->ctx->asic_id.chip_family >= FAMILY_RV) { +		switch (link->ctx->asic_id.chip_family) { +		case FAMILY_YELLOW_CARP: +		case AMDGPU_FAMILY_GC_10_3_6: +		case AMDGPU_FAMILY_GC_11_0_1: +			if (dc->debug.disable_z10 || dc->debug.psr_skip_crtc_disable) +				psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true; +			break; +		default: +			psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true; +			break; +		} +	} +#else +	if (link->ctx->asic_id.chip_family >= FAMILY_RV) +		psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true; +#endif + +	/* SMU will perform additional powerdown sequence. +	 * For unsupported ASICs, set psr_level flag to skip PSR +	 *  static screen notification to SMU. +	 *  (Always set for DAL2, did not check ASIC) +	 */ +	psr_context->allow_smu_optimizations = psr_config->allow_smu_optimizations; +	psr_context->allow_multi_disp_optimizations = psr_config->allow_multi_disp_optimizations; + +	/* Complete PSR entry before aborting to prevent intermittent +	 * freezes on certain eDPs +	 */ +	psr_context->psr_level.bits.DISABLE_PSR_ENTRY_ABORT = 1; + +	/* Disable ALPM first for compatible non-ALPM panel now */ +	psr_context->psr_level.bits.DISABLE_ALPM = 0; +	psr_context->psr_level.bits.ALPM_DEFAULT_PD_MODE = 1; + +	/* Controls additional delay after remote frame capture before +	 * continuing power down, default = 0 +	 */ +	psr_context->frame_delay = 0; + +	psr_context->dsc_slice_height = psr_config->dsc_slice_height; + +	if (psr) { +		link->psr_settings.psr_feature_enabled = psr->funcs->psr_copy_settings(psr, +			link, psr_context, panel_inst); +		link->psr_settings.psr_power_opt = 0; +		link->psr_settings.psr_allow_active = 0; +	} else { +		link->psr_settings.psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context); +	} + +	/* psr_enabled == 0 indicates setup_psr did not succeed, but this +	 * should not happen since firmware should be running at this point +	 */ +	if (link->psr_settings.psr_feature_enabled == 0) +		ASSERT(0); + +	return true; + +} + +void link_get_psr_residency(const struct dc_link *link, uint32_t *residency) +{ +	struct dc  *dc = link->ctx->dc; +	struct dmub_psr *psr = dc->res_pool->psr; +	unsigned int panel_inst; + +	if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) +		return; + +	// PSR residency measurements only supported on DMCUB +	if (psr != NULL && link->psr_settings.psr_feature_enabled) +		psr->funcs->psr_get_residency(psr, residency, panel_inst); +	else +		*residency = 0; +} +bool link_set_sink_vtotal_in_psr_active(const struct dc_link *link, uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su) +{ +	struct dc *dc = link->ctx->dc; +	struct dmub_psr *psr = dc->res_pool->psr; + +	if (psr == NULL || !link->psr_settings.psr_feature_enabled || !link->psr_settings.psr_vtotal_control_support) +		return false; + +	psr->funcs->psr_set_sink_vtotal_in_psr_active(psr, psr_vtotal_idle, psr_vtotal_su); + +	return true; +} + +static struct abm *get_abm_from_stream_res(const struct dc_link *link) +{ +	int i; +	struct dc *dc = link->ctx->dc; +	struct abm *abm = NULL; + +	for (i = 0; i < MAX_PIPES; i++) { +		struct pipe_ctx pipe_ctx = dc->current_state->res_ctx.pipe_ctx[i]; +		struct dc_stream_state *stream = pipe_ctx.stream; + +		if (stream && stream->link == link) { +			abm = pipe_ctx.stream_res.abm; +			break; +		} +	} +	return abm; +} + +int dc_link_get_backlight_level(const struct dc_link *link) +{ +	struct abm *abm = get_abm_from_stream_res(link); +	struct panel_cntl *panel_cntl = link->panel_cntl; +	struct dc  *dc = link->ctx->dc; +	struct dmcu *dmcu = dc->res_pool->dmcu; +	bool fw_set_brightness = true; + +	if (dmcu) +		fw_set_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); + +	if (!fw_set_brightness && panel_cntl->funcs->get_current_backlight) +		return panel_cntl->funcs->get_current_backlight(panel_cntl); +	else if (abm != NULL && abm->funcs->get_current_backlight != NULL) +		return (int) abm->funcs->get_current_backlight(abm); +	else +		return DC_ERROR_UNEXPECTED; +} + +int dc_link_get_target_backlight_pwm(const struct dc_link *link) +{ +	struct abm *abm = get_abm_from_stream_res(link); + +	if (abm == NULL || abm->funcs->get_target_backlight == NULL) +		return DC_ERROR_UNEXPECTED; + +	return (int) abm->funcs->get_target_backlight(abm); +} diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h new file mode 100644 index 000000000000..7f91a564b089 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h @@ -0,0 +1,33 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_LINK_EDP_PANEL_CONTROL_H__ +#define __DC_LINK_EDP_PANEL_CONTROL_H__ +#include "link.h" + +enum dp_panel_mode dp_get_panel_mode(struct dc_link *link); +void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode); +bool set_default_brightness_aux(struct dc_link *link); +#endif /* __DC_LINK_EDP_POWER_CONTROL_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c new file mode 100644 index 000000000000..5f39dfe06e9a --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c @@ -0,0 +1,240 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * + * This file implements functions that manage basic HPD components such as gpio. + * It also provides wrapper functions to execute HPD related programming. This + * file only manages basic HPD functionality. It doesn't manage detection or + * feature or signal specific HPD behaviors. + */ +#include "link_hpd.h" +#include "gpio_service_interface.h" + +bool dc_link_get_hpd_state(struct dc_link *dc_link) +{ +	uint32_t state; + +	dal_gpio_lock_pin(dc_link->hpd_gpio); +	dal_gpio_get_value(dc_link->hpd_gpio, &state); +	dal_gpio_unlock_pin(dc_link->hpd_gpio); + +	return state; +} + +void dc_link_enable_hpd(const struct dc_link *link) +{ +	struct link_encoder *encoder = link->link_enc; + +	if (encoder != NULL && encoder->funcs->enable_hpd != NULL) +		encoder->funcs->enable_hpd(encoder); +} + +void dc_link_disable_hpd(const struct dc_link *link) +{ +	struct link_encoder *encoder = link->link_enc; + +	if (encoder != NULL && encoder->funcs->enable_hpd != NULL) +		encoder->funcs->disable_hpd(encoder); +} + +void dc_link_enable_hpd_filter(struct dc_link *link, bool enable) +{ +	struct gpio *hpd; + +	if (enable) { +		link->is_hpd_filter_disabled = false; +		program_hpd_filter(link); +	} else { +		link->is_hpd_filter_disabled = true; +		/* Obtain HPD handle */ +		hpd = link_get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service); + +		if (!hpd) +			return; + +		/* Setup HPD filtering */ +		if (dal_gpio_open(hpd, GPIO_MODE_INTERRUPT) == GPIO_RESULT_OK) { +			struct gpio_hpd_config config; + +			config.delay_on_connect = 0; +			config.delay_on_disconnect = 0; + +			dal_irq_setup_hpd_filter(hpd, &config); + +			dal_gpio_close(hpd); +		} else { +			ASSERT_CRITICAL(false); +		} +		/* Release HPD handle */ +		dal_gpio_destroy_irq(&hpd); +	} +} + +struct gpio *link_get_hpd_gpio(struct dc_bios *dcb, +			  struct graphics_object_id link_id, +			  struct gpio_service *gpio_service) +{ +	enum bp_result bp_result; +	struct graphics_object_hpd_info hpd_info; +	struct gpio_pin_info pin_info; + +	if (dcb->funcs->get_hpd_info(dcb, link_id, &hpd_info) != BP_RESULT_OK) +		return NULL; + +	bp_result = dcb->funcs->get_gpio_pin_info(dcb, +		hpd_info.hpd_int_gpio_uid, &pin_info); + +	if (bp_result != BP_RESULT_OK) { +		ASSERT(bp_result == BP_RESULT_NORECORD); +		return NULL; +	} + +	return dal_gpio_service_create_irq(gpio_service, +					   pin_info.offset, +					   pin_info.mask); +} + +bool query_hpd_status(struct dc_link *link, uint32_t *is_hpd_high) +{ +	struct gpio *hpd_pin = link_get_hpd_gpio( +			link->ctx->dc_bios, link->link_id, +			link->ctx->gpio_service); +	if (!hpd_pin) +		return false; + +	dal_gpio_open(hpd_pin, GPIO_MODE_INTERRUPT); +	dal_gpio_get_value(hpd_pin, is_hpd_high); +	dal_gpio_close(hpd_pin); +	dal_gpio_destroy_irq(&hpd_pin); +	return true; +} + +enum hpd_source_id get_hpd_line(struct dc_link *link) +{ +	struct gpio *hpd; +	enum hpd_source_id hpd_id; + +		hpd_id = HPD_SOURCEID_UNKNOWN; + +	hpd = link_get_hpd_gpio(link->ctx->dc_bios, link->link_id, +			   link->ctx->gpio_service); + +	if (hpd) { +		switch (dal_irq_get_source(hpd)) { +		case DC_IRQ_SOURCE_HPD1: +			hpd_id = HPD_SOURCEID1; +		break; +		case DC_IRQ_SOURCE_HPD2: +			hpd_id = HPD_SOURCEID2; +		break; +		case DC_IRQ_SOURCE_HPD3: +			hpd_id = HPD_SOURCEID3; +		break; +		case DC_IRQ_SOURCE_HPD4: +			hpd_id = HPD_SOURCEID4; +		break; +		case DC_IRQ_SOURCE_HPD5: +			hpd_id = HPD_SOURCEID5; +		break; +		case DC_IRQ_SOURCE_HPD6: +			hpd_id = HPD_SOURCEID6; +		break; +		default: +			BREAK_TO_DEBUGGER(); +		break; +		} + +		dal_gpio_destroy_irq(&hpd); +	} + +	return hpd_id; +} + +bool program_hpd_filter(const struct dc_link *link) +{ +	bool result = false; +	struct gpio *hpd; +	int delay_on_connect_in_ms = 0; +	int delay_on_disconnect_in_ms = 0; + +	if (link->is_hpd_filter_disabled) +		return false; +	/* Verify feature is supported */ +	switch (link->connector_signal) { +	case SIGNAL_TYPE_DVI_SINGLE_LINK: +	case SIGNAL_TYPE_DVI_DUAL_LINK: +	case SIGNAL_TYPE_HDMI_TYPE_A: +		/* Program hpd filter */ +		delay_on_connect_in_ms = 500; +		delay_on_disconnect_in_ms = 100; +		break; +	case SIGNAL_TYPE_DISPLAY_PORT: +	case SIGNAL_TYPE_DISPLAY_PORT_MST: +		/* Program hpd filter to allow DP signal to settle */ +		/* 500:	not able to detect MST <-> SST switch as HPD is low for +		 * only 100ms on DELL U2413 +		 * 0: some passive dongle still show aux mode instead of i2c +		 * 20-50: not enough to hide bouncing HPD with passive dongle. +		 * also see intermittent i2c read issues. +		 */ +		delay_on_connect_in_ms = 80; +		delay_on_disconnect_in_ms = 0; +		break; +	case SIGNAL_TYPE_LVDS: +	case SIGNAL_TYPE_EDP: +	default: +		/* Don't program hpd filter */ +		return false; +	} + +	/* Obtain HPD handle */ +	hpd = link_get_hpd_gpio(link->ctx->dc_bios, link->link_id, +			   link->ctx->gpio_service); + +	if (!hpd) +		return result; + +	/* Setup HPD filtering */ +	if (dal_gpio_open(hpd, GPIO_MODE_INTERRUPT) == GPIO_RESULT_OK) { +		struct gpio_hpd_config config; + +		config.delay_on_connect = delay_on_connect_in_ms; +		config.delay_on_disconnect = delay_on_disconnect_in_ms; + +		dal_irq_setup_hpd_filter(hpd, &config); + +		dal_gpio_close(hpd); + +		result = true; +	} else { +		ASSERT_CRITICAL(false); +	} + +	/* Release HPD handle */ +	dal_gpio_destroy_irq(&hpd); + +	return result; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h new file mode 100644 index 000000000000..3d122def0c88 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h @@ -0,0 +1,47 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#ifndef __DC_LINK_HPD_H__ +#define __DC_LINK_HPD_H__ +#include "link.h" + +enum hpd_source_id get_hpd_line(struct dc_link *link); +/* + *  Function: program_hpd_filter + * + *  @brief + *     Programs HPD filter on associated HPD line to default values. + * + *  @return + *     true on success, false otherwise + */ +bool program_hpd_filter(const struct dc_link *link); +/* Query hot plug status of USB4 DP tunnel. + * Returns true if HPD high. + */ +bool dpia_query_hpd_status(struct dc_link *link); +bool query_hpd_status(struct dc_link *link, uint32_t *is_hpd_high); +#endif /* __DC_LINK_HPD_H__ */ diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index eb5b7eb292ef..a391b939d709 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -126,10 +126,22 @@ enum dmub_notification_type {  	DMUB_NOTIFICATION_HPD,  	DMUB_NOTIFICATION_HPD_IRQ,  	DMUB_NOTIFICATION_SET_CONFIG_REPLY, +	DMUB_NOTIFICATION_DPIA_NOTIFICATION,  	DMUB_NOTIFICATION_MAX  };  /** + * DPIA NOTIFICATION Response Type + */ +enum dpia_notify_bw_alloc_status { + +	DPIA_BW_REQ_FAILED = 0, +	DPIA_BW_REQ_SUCCESS, +	DPIA_EST_BW_CHANGED, +	DPIA_BW_ALLOC_CAPS_CHANGED +}; + +/**   * struct dmub_region - dmub hw memory region   * @base: base address for region, must be 256 byte aligned   * @top: top address for region @@ -453,6 +465,7 @@ struct dmub_srv {   * @pending_notification: Indicates there are other pending notifications   * @aux_reply: aux reply   * @hpd_status: hpd status + * @bw_alloc_reply: BW Allocation reply from CM/DPIA   */  struct dmub_notification {  	enum dmub_notification_type type; @@ -463,6 +476,10 @@ struct dmub_notification {  		struct aux_reply_data aux_reply;  		enum dp_hpd_status hpd_status;  		enum set_config_status sc_status; +		/** +		 * DPIA notification command. +		 */ +		struct dmub_rb_cmd_dpia_notification dpia_notification;  	};  }; diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 7a8f61517424..007d6bdc3e39 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -162,6 +162,7 @@ extern "C" {  #define dmub_udelay(microseconds) udelay(microseconds)  #endif +#pragma pack(push, 1)  /**   * union dmub_addr - DMUB physical/virtual 64-bit address.   */ @@ -172,6 +173,7 @@ union dmub_addr {  	} u; /*<< Low/high bit access */  	uint64_t quad_part; /*<< 64 bit address */  }; +#pragma pack(pop)  /**   * Dirty rect definition. @@ -225,6 +227,12 @@ union dmub_psr_debug_flags {  		 * Use TPS3 signal when restore main link.  		 */  		uint32_t force_wakeup_by_tps3 : 1; + +		/** +		 * Back to back flip, therefore cannot power down PHY +		 */ +		uint32_t back_to_back_flip : 1; +  	} bitfields;  	/** @@ -401,8 +409,9 @@ union dmub_fw_boot_options {  		uint32_t gpint_scratch8: 1; /* 1 if GPINT is in scratch8*/  		uint32_t usb4_cm_version: 1; /**< 1 CM support */  		uint32_t dpia_hpd_int_enable_supported: 1; /* 1 if dpia hpd int enable supported */ +		uint32_t usb4_dpia_bw_alloc_supported: 1; /* 1 if USB4 dpia BW allocation supported */ -		uint32_t reserved : 16; /**< reserved */ +		uint32_t reserved : 15; /**< reserved */  	} bits; /**< boot bits */  	uint32_t all; /**< 32-bit access to bits */  }; @@ -450,6 +459,10 @@ enum dmub_cmd_vbios_type {  	 * Query DP alt status on a transmitter.  	 */  	DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT  = 26, +	/** +	 * Controls domain power gating +	 */ +	DMUB_CMD__VBIOS_DOMAIN_CONTROL = 28,  };  //============================================================================== @@ -731,6 +744,11 @@ enum dmub_cmd_type {  	 */  	/** +	 * Command type used for all SECURE_DISPLAY commands. +	 */ +	DMUB_CMD__SECURE_DISPLAY = 85, + +	/**  	 * Command type used to set DPIA HPD interrupt state  	 */  	DMUB_CMD__DPIA_HPD_INT_ENABLE = 86, @@ -758,6 +776,10 @@ enum dmub_out_cmd_type {  	 * Command type used for SET_CONFIG Reply notification  	 */  	DMUB_OUT_CMD__SET_CONFIG_REPLY = 3, +	/** +	 * Command type used for USB4 DPIA notification +	 */ +	DMUB_OUT_CMD__DPIA_NOTIFICATION = 5,  };  /* DMUB_CMD__DPIA command sub-types. */ @@ -767,6 +789,11 @@ enum dmub_cmd_dpia_type {  	DMUB_CMD__DPIA_MST_ALLOC_SLOTS = 2,  }; +/* DMUB_OUT_CMD__DPIA_NOTIFICATION command types. */ +enum dmub_cmd_dpia_notification_type { +	DPIA_NOTIFY__BW_ALLOCATION = 0, +}; +  #pragma pack(push, 1)  /** @@ -1017,13 +1044,14 @@ struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 {  			uint16_t vtotal;  			uint16_t htotal;  			uint8_t vblank_pipe_index; -			uint8_t padding[2]; +			uint8_t padding[1];  			struct {  				uint8_t drr_in_use;  				uint8_t drr_window_size_ms;	// Indicates largest VMIN/VMAX adjustment per frame  				uint16_t min_vtotal_supported;	// Min VTOTAL that supports switching in VBLANK  				uint16_t max_vtotal_supported;	// Max VTOTAL that can support SubVP static scheduling  				uint8_t use_ramping;		// Use ramping or not +				uint8_t drr_vblank_start_margin;  			} drr_info;				// DRR considered as part of SubVP + VBLANK case  		} vblank_data;  	} pipe_config; @@ -1192,6 +1220,23 @@ struct dmub_rb_cmd_dig1_transmitter_control {  };  /** + * struct dmub_rb_cmd_domain_control_data - Data for DOMAIN power control + */ +struct dmub_rb_cmd_domain_control_data { +	uint8_t inst : 6; /**< DOMAIN instance to control */ +	uint8_t power_gate : 1; /**< 1=power gate, 0=power up */ +	uint8_t reserved[3]; /**< Reserved for future use */ +}; + +/** + * struct dmub_rb_cmd_domain_control - Controls DOMAIN power gating + */ +struct dmub_rb_cmd_domain_control { +	struct dmub_cmd_header header; /**< header */ +	struct dmub_rb_cmd_domain_control_data data; /**< payload */ +}; + +/**   * DPIA tunnel command parameters.   */  struct dmub_cmd_dig_dpia_control_data { @@ -1545,6 +1590,79 @@ struct dmub_rb_cmd_dp_set_config_reply {  };  /** + * Definition of a DPIA notification header + */ +struct dpia_notification_header { +	uint8_t instance; /**< DPIA Instance */ +	uint8_t reserved[3]; +	enum dmub_cmd_dpia_notification_type type; /**< DPIA notification type */ +}; + +/** + * Definition of the common data struct of DPIA notification + */ +struct dpia_notification_common { +	uint8_t cmd_buffer[DMUB_RB_CMD_SIZE - sizeof(struct dmub_cmd_header) +								- sizeof(struct dpia_notification_header)]; +}; + +/** + * Definition of a DPIA notification data + */ +struct dpia_bw_allocation_notify_data { +	union { +		struct { +			uint16_t cm_bw_alloc_support: 1; /**< USB4 CM BW Allocation mode support */ +			uint16_t bw_request_failed: 1; /**< BW_Request_Failed */ +			uint16_t bw_request_succeeded: 1; /**< BW_Request_Succeeded */ +			uint16_t est_bw_changed: 1; /**< Estimated_BW changed */ +			uint16_t bw_alloc_cap_changed: 1; /**< BW_Allocation_Capabiity_Changed */ +			uint16_t reserved: 11; /**< Reserved */ +		} bits; + +		uint16_t flags; +	}; + +	uint8_t cm_id; /**< CM ID */ +	uint8_t group_id; /**< Group ID */ +	uint8_t granularity; /**< BW Allocation Granularity */ +	uint8_t estimated_bw; /**< Estimated_BW */ +	uint8_t allocated_bw; /**< Allocated_BW */ +	uint8_t reserved; +}; + +/** + * union dpia_notify_data_type - DPIA Notification in Outbox command + */ +union dpia_notification_data { +	/** +	 * DPIA Notification for common data struct +	 */ +	struct dpia_notification_common common_data; + +	/** +	 * DPIA Notification for DP BW Allocation support +	 */ +	struct dpia_bw_allocation_notify_data dpia_bw_alloc; +}; + +/** + * Definition of a DPIA notification payload + */ +struct dpia_notification_payload { +	struct dpia_notification_header header; +	union dpia_notification_data data; /**< DPIA notification payload data */ +}; + +/** + * Definition of a DMUB_OUT_CMD__DPIA_NOTIFICATION command. + */ +struct dmub_rb_cmd_dpia_notification { +	struct dmub_cmd_header header; /**< DPIA notification header */ +	struct dpia_notification_payload payload; /**< DPIA notification payload */ +}; + +/**   * Data passed from driver to FW in a DMUB_CMD__QUERY_HPD_STATE command.   */  struct dmub_cmd_hpd_state_query_data { @@ -1866,9 +1984,21 @@ struct dmub_cmd_psr_copy_settings_data {  	 */  	uint8_t use_phy_fsm;  	/** +	 * frame delay for frame re-lock +	 */ +	uint8_t relock_delay_frame_cnt; +	/**  	 * Explicit padding to 2 byte boundary.  	 */ -	uint8_t pad3[2]; +	uint8_t pad3; +	/** +	 * DSC Slice height. +	 */ +	uint16_t dsc_slice_height; +	/** +	 * Explicit padding to 4 byte boundary. +	 */ +	uint16_t pad;  };  /** @@ -3012,7 +3142,8 @@ struct dmub_rb_cmd_panel_cntl {   */  struct dmub_cmd_lvtma_control_data {  	uint8_t uc_pwr_action; /**< LVTMA_ACTION */ -	uint8_t reserved_0[3]; /**< For future use */ +	uint8_t bypass_panel_control_wait; +	uint8_t reserved_0[2]; /**< For future use */  	uint8_t panel_inst; /**< LVTMA control instance */  	uint8_t reserved_1[3]; /**< For future use */  }; @@ -3144,6 +3275,33 @@ struct dmub_rb_cmd_get_usbc_cable_id {  };  /** + * Command type of a DMUB_CMD__SECURE_DISPLAY command + */ +enum dmub_cmd_secure_display_type { +	DMUB_CMD__SECURE_DISPLAY_TEST_CMD = 0,		/* test command to only check if inbox message works */ +	DMUB_CMD__SECURE_DISPLAY_CRC_STOP_UPDATE, +	DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY +}; + +/** + * Definition of a DMUB_CMD__SECURE_DISPLAY command + */ +struct dmub_rb_cmd_secure_display { +	struct dmub_cmd_header header; +	/** +	 * Data passed from driver to dmub firmware. +	 */ +	struct dmub_cmd_roi_info { +		uint16_t x_start; +		uint16_t x_end; +		uint16_t y_start; +		uint16_t y_end; +		uint8_t otg_id; +		uint8_t phy_id; +	} roi_info; +}; + +/**   * union dmub_rb_cmd - DMUB inbox command.   */  union dmub_rb_cmd { @@ -3188,6 +3346,10 @@ union dmub_rb_cmd {  	 */  	struct dmub_rb_cmd_dig1_transmitter_control dig1_transmitter_control;  	/** +	 * Definition of a DMUB_CMD__VBIOS_DOMAIN_CONTROL command. +	 */ +	struct dmub_rb_cmd_domain_control domain_control; +	/**  	 * Definition of a DMUB_CMD__PSR_SET_VERSION command.  	 */  	struct dmub_rb_cmd_psr_set_version psr_set_version; @@ -3348,6 +3510,11 @@ union dmub_rb_cmd {  	 */  	struct dmub_rb_cmd_query_hpd_state query_hpd;  	/** +	 * Definition of a DMUB_CMD__SECURE_DISPLAY command. +	 */ +	struct dmub_rb_cmd_secure_display secure_display; + +	/**  	 * Definition of a DMUB_CMD__DPIA_HPD_INT_ENABLE command.  	 */  	struct dmub_rb_cmd_dpia_hpd_int_enable dpia_hpd_int_enable; @@ -3373,6 +3540,10 @@ union dmub_rb_out_cmd {  	 * SET_CONFIG reply command.  	 */  	struct dmub_rb_cmd_dp_set_config_reply set_config_reply; +	/** +	 * DPIA notification command. +	 */ +	struct dmub_rb_cmd_dpia_notification dpia_notification;  };  #pragma pack(pop) diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index 4a122925c3ae..92c18bfb98b3 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -532,6 +532,9 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,  	if (dmub->hw_funcs.reset)  		dmub->hw_funcs.reset(dmub); +	/* reset the cache of the last wptr as well now that hw is reset */ +	dmub->inbox1_last_wptr = 0; +  	cw0.offset.quad_part = inst_fb->gpu_addr;  	cw0.region.base = DMUB_CW0_BASE;  	cw0.region.top = cw0.region.base + inst_fb->size - 1; @@ -649,6 +652,15 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)  	if (dmub->hw_funcs.reset)  		dmub->hw_funcs.reset(dmub); +	/* mailboxes have been reset in hw, so reset the sw state as well */ +	dmub->inbox1_last_wptr = 0; +	dmub->inbox1_rb.wrpt = 0; +	dmub->inbox1_rb.rptr = 0; +	dmub->outbox0_rb.wrpt = 0; +	dmub->outbox0_rb.rptr = 0; +	dmub->outbox1_rb.wrpt = 0; +	dmub->outbox1_rb.rptr = 0; +  	dmub->hw_init = false;  	return DMUB_STATUS_OK; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c index 44502ec919a2..74189102eaec 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c @@ -92,6 +92,27 @@ enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub,  		notify->link_index = cmd.set_config_reply.set_config_reply_control.instance;  		notify->sc_status = cmd.set_config_reply.set_config_reply_control.status;  		break; +	case DMUB_OUT_CMD__DPIA_NOTIFICATION: +		notify->type = DMUB_NOTIFICATION_DPIA_NOTIFICATION; +		notify->link_index = cmd.dpia_notification.payload.header.instance; + +		if (cmd.dpia_notification.payload.header.type == DPIA_NOTIFY__BW_ALLOCATION) { + +			notify->dpia_notification.payload.data.dpia_bw_alloc.estimated_bw = +					cmd.dpia_notification.payload.data.dpia_bw_alloc.estimated_bw; +			notify->dpia_notification.payload.data.dpia_bw_alloc.allocated_bw = +					cmd.dpia_notification.payload.data.dpia_bw_alloc.allocated_bw; + +			if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.bw_request_failed) +				notify->result = DPIA_BW_REQ_FAILED; +			else if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.bw_request_succeeded) +				notify->result = DPIA_BW_REQ_SUCCESS; +			else if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.est_bw_changed) +				notify->result = DPIA_EST_BW_CHANGED; +			else if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.bw_alloc_cap_changed) +				notify->result = DPIA_BW_ALLOC_CAPS_CHANGED; +		} +		break;  	default:  		notify->type = DMUB_NOTIFICATION_NO_DATA;  		break; diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h index a7ba5bd8dc16..31a12ce79a8e 100644 --- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h +++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h @@ -35,6 +35,7 @@  #define DP_BRANCH_DEVICE_ID_00E04C 0x00E04C  #define DP_BRANCH_DEVICE_ID_006037 0x006037  #define DP_BRANCH_DEVICE_ID_001CF8 0x001CF8 +#define DP_BRANCH_DEVICE_ID_0060AD 0x0060AD  #define DP_BRANCH_HW_REV_10 0x10  #define DP_BRANCH_HW_REV_20 0x20 @@ -133,6 +134,11 @@ static const uint8_t DP_SINK_DEVICE_STR_ID_2[] = {7, 1, 8, 7, 5};  static const u8 DP_SINK_BRANCH_DEV_NAME_7580[] = "7580\x80u"; +/*Travis*/ +static const uint8_t DP_VGA_LVDS_CONVERTER_ID_2[] = "sivarT"; +/*Nutmeg*/ +static const uint8_t DP_VGA_LVDS_CONVERTER_ID_3[] = "dnomlA"; +  /*MST Dock*/  static const uint8_t SYNAPTICS_DEVICE_ID[] = "SYNA"; diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h index b2df07f9e91c..c062a44db078 100644 --- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h +++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h @@ -88,7 +88,10 @@ enum dpcd_phy_test_patterns {  	PHY_TEST_PATTERN_PRBS23 = 0x30,  	PHY_TEST_PATTERN_PRBS31 = 0x38,  	PHY_TEST_PATTERN_264BIT_CUSTOM = 0x40, -	PHY_TEST_PATTERN_SQUARE_PULSE = 0x48, +	PHY_TEST_PATTERN_SQUARE = 0x48, +	PHY_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED = 0x49, +	PHY_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED = 0x4A, +	PHY_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED = 0x4B,  };  enum dpcd_test_dyn_range { diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h index d1e91d31d151..18b9173d5a96 100644 --- a/drivers/gpu/drm/amd/display/include/link_service_types.h +++ b/drivers/gpu/drm/amd/display/include/link_service_types.h @@ -165,7 +165,12 @@ enum dp_test_pattern {  	DP_TEST_PATTERN_PRBS23,  	DP_TEST_PATTERN_PRBS31,  	DP_TEST_PATTERN_264BIT_CUSTOM, -	DP_TEST_PATTERN_SQUARE_PULSE, +	DP_TEST_PATTERN_SQUARE_BEGIN, +	DP_TEST_PATTERN_SQUARE = DP_TEST_PATTERN_SQUARE_BEGIN, +	DP_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED, +	DP_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED, +	DP_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED, +	DP_TEST_PATTERN_SQUARE_END = DP_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED,  	/* Link Training Patterns */  	DP_TEST_PATTERN_TRAINING_PATTERN1, diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index 447a0ec9cbe2..67a062af3ab0 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -61,7 +61,7 @@ static const int32_t numerator01[] = { 31308,   180000, 0,  0,  0};  static const int32_t numerator02[] = { 12920,   4500,   0,  0,  0};  static const int32_t numerator03[] = { 55,      99,     0,  0,  0};  static const int32_t numerator04[] = { 55,      99,     0,  0,  0}; -static const int32_t numerator05[] = { 2400,    2200,   2200, 2400, 2600}; +static const int32_t numerator05[] = { 2400,    2222,   2200, 2400, 2600};  /* one-time setup of X points */  void setup_x_points_distribution(void) @@ -1715,8 +1715,8 @@ static bool map_regamma_hw_to_x_user(  	const struct pwl_float_data_ex *rgb_regamma,  	uint32_t hw_points_num,  	struct dc_transfer_func_distributed_points *tf_pts, -	bool mapUserRamp, -	bool doClamping) +	bool map_user_ramp, +	bool do_clamping)  {  	/* setup to spare calculated ideal regamma values */ @@ -1724,7 +1724,7 @@ static bool map_regamma_hw_to_x_user(  	struct hw_x_point *coords = coords_x;  	const struct pwl_float_data_ex *regamma = rgb_regamma; -	if (ramp && mapUserRamp) { +	if (ramp && map_user_ramp) {  		copy_rgb_regamma_to_coordinates_x(coords,  				hw_points_num,  				rgb_regamma); @@ -1744,7 +1744,7 @@ static bool map_regamma_hw_to_x_user(  		}  	} -	if (doClamping) { +	if (do_clamping) {  		/* this should be named differently, all it does is clamp to 0-1 */  		build_new_custom_resulted_curve(hw_points_num, tf_pts);  	} @@ -1875,7 +1875,7 @@ rgb_user_alloc_fail:  bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,  		struct dc_transfer_func *input_tf, -		const struct dc_gamma *ramp, bool mapUserRamp) +		const struct dc_gamma *ramp, bool map_user_ramp)  {  	struct dc_transfer_func_distributed_points *tf_pts = &input_tf->tf_pts;  	struct dividers dividers; @@ -1883,7 +1883,7 @@ bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,  	struct pwl_float_data_ex *curve = NULL;  	struct gamma_pixel *axis_x = NULL;  	struct pixel_gamma_point *coeff = NULL; -	enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB; +	enum dc_transfer_func_predefined tf;  	uint32_t i;  	bool ret = false; @@ -1891,12 +1891,12 @@ bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,  		return false;  	/* we can use hardcoded curve for plain SRGB TF -	 * If linear, it's bypass if on user ramp +	 * If linear, it's bypass if no user ramp  	 */  	if (input_tf->type == TF_TYPE_PREDEFINED) {  		if ((input_tf->tf == TRANSFER_FUNCTION_SRGB ||  				input_tf->tf == TRANSFER_FUNCTION_LINEAR) && -				!mapUserRamp) +				!map_user_ramp)  			return true;  		if (dc_caps != NULL && @@ -1919,7 +1919,7 @@ bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,  	input_tf->type = TF_TYPE_DISTRIBUTED_POINTS; -	if (mapUserRamp && ramp && ramp->type == GAMMA_RGB_256) { +	if (map_user_ramp && ramp && ramp->type == GAMMA_RGB_256) {  		rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS,  				sizeof(*rgb_user),  				GFP_KERNEL); @@ -2007,7 +2007,7 @@ bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,  		map_regamma_hw_to_x_user(ramp, coeff, rgb_user,  				coordinates_x, axis_x, curve,  				MAX_HW_POINTS, tf_pts, -				mapUserRamp && ramp && ramp->type == GAMMA_RGB_256, +				map_user_ramp && ramp && ramp->type == GAMMA_RGB_256,  				true);  	} @@ -2112,9 +2112,11 @@ static bool calculate_curve(enum dc_transfer_func_predefined trans,  }  bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, -		const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed, -		const struct hdr_tm_params *fs_params, -		struct calculate_buffer *cal_buffer) +					const struct dc_gamma *ramp, +					bool map_user_ramp, +					bool can_rom_be_used, +					const struct hdr_tm_params *fs_params, +					struct calculate_buffer *cal_buffer)  {  	struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts;  	struct dividers dividers; @@ -2123,27 +2125,27 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,  	struct pwl_float_data_ex *rgb_regamma = NULL;  	struct gamma_pixel *axis_x = NULL;  	struct pixel_gamma_point *coeff = NULL; -	enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB; -	bool doClamping = true; +	enum dc_transfer_func_predefined tf; +	bool do_clamping = true;  	bool ret = false;  	if (output_tf->type == TF_TYPE_BYPASS)  		return false;  	/* we can use hardcoded curve for plain SRGB TF */ -	if (output_tf->type == TF_TYPE_PREDEFINED && canRomBeUsed == true && +	if (output_tf->type == TF_TYPE_PREDEFINED && can_rom_be_used == true &&  			output_tf->tf == TRANSFER_FUNCTION_SRGB) {  		if (ramp == NULL)  			return true;  		if ((ramp->is_identity && ramp->type != GAMMA_CS_TFM_1D) || -				(!mapUserRamp && ramp->type == GAMMA_RGB_256)) +		    (!map_user_ramp && ramp->type == GAMMA_RGB_256))  			return true;  	}  	output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;  	if (ramp && ramp->type != GAMMA_CS_TFM_1D && -			(mapUserRamp || ramp->type != GAMMA_RGB_256)) { +	    (map_user_ramp || ramp->type != GAMMA_RGB_256)) {  		rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS,  			    sizeof(*rgb_user),  			    GFP_KERNEL); @@ -2164,7 +2166,7 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,  				ramp->num_entries,  				dividers); -		if (ramp->type == GAMMA_RGB_256 && mapUserRamp) +		if (ramp->type == GAMMA_RGB_256 && map_user_ramp)  			scale_gamma(rgb_user, ramp, dividers);  		else if (ramp->type == GAMMA_RGB_FLOAT_1024)  			scale_gamma_dx(rgb_user, ramp, dividers); @@ -2191,15 +2193,15 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,  			cal_buffer);  	if (ret) { -		doClamping = !(output_tf->tf == TRANSFER_FUNCTION_GAMMA22 && -			fs_params != NULL && fs_params->skip_tm == 0); +		do_clamping = !(output_tf->tf == TRANSFER_FUNCTION_GAMMA22 && +				fs_params != NULL && fs_params->skip_tm == 0);  		map_regamma_hw_to_x_user(ramp, coeff, rgb_user, -				coordinates_x, axis_x, rgb_regamma, -				MAX_HW_POINTS, tf_pts, -				(mapUserRamp || (ramp && ramp->type != GAMMA_RGB_256)) && -				(ramp && ramp->type != GAMMA_CS_TFM_1D), -				doClamping); +					 coordinates_x, axis_x, rgb_regamma, +					 MAX_HW_POINTS, tf_pts, +					 (map_user_ramp || (ramp && ramp->type != GAMMA_RGB_256)) && +					 (ramp && ramp->type != GAMMA_CS_TFM_1D), +					 do_clamping);  		if (ramp && ramp->type == GAMMA_CS_TFM_1D)  			apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts); @@ -2215,89 +2217,3 @@ axis_x_alloc_fail:  rgb_user_alloc_fail:  	return ret;  } - -bool  mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans, -				struct dc_transfer_func_distributed_points *points) -{ -	uint32_t i; -	bool ret = false; -	struct pwl_float_data_ex *rgb_degamma = NULL; - -	if (trans == TRANSFER_FUNCTION_UNITY || -		trans == TRANSFER_FUNCTION_LINEAR) { - -		for (i = 0; i <= MAX_HW_POINTS ; i++) { -			points->red[i]    = coordinates_x[i].x; -			points->green[i]  = coordinates_x[i].x; -			points->blue[i]   = coordinates_x[i].x; -		} -		ret = true; -	} else if (trans == TRANSFER_FUNCTION_PQ) { -		rgb_degamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, -				       sizeof(*rgb_degamma), -				       GFP_KERNEL); -		if (!rgb_degamma) -			goto rgb_degamma_alloc_fail; - - -		build_de_pq(rgb_degamma, -				MAX_HW_POINTS, -				coordinates_x); -		for (i = 0; i <= MAX_HW_POINTS ; i++) { -			points->red[i]    = rgb_degamma[i].r; -			points->green[i]  = rgb_degamma[i].g; -			points->blue[i]   = rgb_degamma[i].b; -		} -		ret = true; - -		kvfree(rgb_degamma); -	} else if (trans == TRANSFER_FUNCTION_SRGB || -		trans == TRANSFER_FUNCTION_BT709 || -		trans == TRANSFER_FUNCTION_GAMMA22 || -		trans == TRANSFER_FUNCTION_GAMMA24 || -		trans == TRANSFER_FUNCTION_GAMMA26) { -		rgb_degamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, -				       sizeof(*rgb_degamma), -				       GFP_KERNEL); -		if (!rgb_degamma) -			goto rgb_degamma_alloc_fail; - -		build_degamma(rgb_degamma, -				MAX_HW_POINTS, -				coordinates_x, -				trans); -		for (i = 0; i <= MAX_HW_POINTS ; i++) { -			points->red[i]    = rgb_degamma[i].r; -			points->green[i]  = rgb_degamma[i].g; -			points->blue[i]   = rgb_degamma[i].b; -		} -		ret = true; - -		kvfree(rgb_degamma); -	} else if (trans == TRANSFER_FUNCTION_HLG) { -		rgb_degamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, -				       sizeof(*rgb_degamma), -				       GFP_KERNEL); -		if (!rgb_degamma) -			goto rgb_degamma_alloc_fail; - -		build_hlg_degamma(rgb_degamma, -				MAX_HW_POINTS, -				coordinates_x, -				80, 1000); -		for (i = 0; i <= MAX_HW_POINTS ; i++) { -			points->red[i]    = rgb_degamma[i].r; -			points->green[i]  = rgb_degamma[i].g; -			points->blue[i]   = rgb_degamma[i].b; -		} -		ret = true; -		kvfree(rgb_degamma); -	} -	points->end_exponent = 0; -	points->x_point_at_y1_red = 1; -	points->x_point_at_y1_green = 1; -	points->x_point_at_y1_blue = 1; - -rgb_degamma_alloc_fail: -	return ret; -} diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h index 2893abf48208..ee5c466613de 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h @@ -115,9 +115,6 @@ bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,  		struct dc_transfer_func *output_tf,  		const struct dc_gamma *ramp, bool mapUserRamp); -bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans, -				struct dc_transfer_func_distributed_points *points); -  bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf,  		const struct regamma_lut *regamma,  		struct calculate_buffer *cal_buffer, diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index 0f39ab9dc5b4..2be45b314922 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -616,7 +616,8 @@ static void build_vrr_infopacket_data_v1(const struct mod_vrr_params *vrr,  }  static void build_vrr_infopacket_data_v3(const struct mod_vrr_params *vrr, -		struct dc_info_packet *infopacket) +		struct dc_info_packet *infopacket, +		bool freesync_on_desktop)  {  	unsigned int min_refresh;  	unsigned int max_refresh; @@ -649,9 +650,15 @@ static void build_vrr_infopacket_data_v3(const struct mod_vrr_params *vrr,  		infopacket->sb[6] |= 0x02;  	/* PB6 = [Bit 2 = FreeSync Active] */ -	if (vrr->state == VRR_STATE_ACTIVE_VARIABLE || +	if (freesync_on_desktop) { +		if (vrr->state != VRR_STATE_DISABLED && +			vrr->state != VRR_STATE_UNSUPPORTED) +			infopacket->sb[6] |= 0x04; +	} else { +		if (vrr->state == VRR_STATE_ACTIVE_VARIABLE ||  			vrr->state == VRR_STATE_ACTIVE_FIXED) -		infopacket->sb[6] |= 0x04; +			infopacket->sb[6] |= 0x04; +	}  	min_refresh = (vrr->min_refresh_in_uhz + 500000) / 1000000;  	max_refresh = (vrr->max_refresh_in_uhz + 500000) / 1000000; @@ -688,10 +695,10 @@ static void build_vrr_infopacket_fs2_data(enum color_transfer_func app_tf,  	if (app_tf != TRANSFER_FUNC_UNKNOWN) {  		infopacket->valid = true; -		infopacket->sb[6] |= 0x08;  // PB6 = [Bit 3 = Native Color Active] - -		if (app_tf == TRANSFER_FUNC_GAMMA_22) { -			infopacket->sb[9] |= 0x04;  // PB6 = [Bit 2 = Gamma 2.2 EOTF Active] +		if (app_tf != TRANSFER_FUNC_PQ2084) { +			infopacket->sb[6] |= 0x08;  // PB6 = [Bit 3 = Native Color Active] +			if (app_tf == TRANSFER_FUNC_GAMMA_22) +				infopacket->sb[9] |= 0x04;  // PB6 = [Bit 2 = Gamma 2.2 EOTF Active]  		}  	}  } @@ -898,52 +905,20 @@ static void build_vrr_infopacket_v2(enum signal_type signal,  	infopacket->valid = true;  } -#ifndef TRIM_FSFT -static void build_vrr_infopacket_fast_transport_data( -	bool ftActive, -	unsigned int ftOutputRate, -	struct dc_info_packet *infopacket) -{ -	/* PB9 : bit7 - fast transport Active*/ -	unsigned char activeBit = (ftActive) ? 1 << 7 : 0; - -	infopacket->sb[1] &= ~activeBit;  //clear bit -	infopacket->sb[1] |=  activeBit;  //set bit - -	/* PB13 : Target Output Pixel Rate [kHz] - bits 7:0  */ -	infopacket->sb[13] = ftOutputRate & 0xFF; - -	/* PB14 : Target Output Pixel Rate [kHz] - bits 15:8  */ -	infopacket->sb[14] = (ftOutputRate >> 8) & 0xFF; - -	/* PB15 : Target Output Pixel Rate [kHz] - bits 23:16  */ -	infopacket->sb[15] = (ftOutputRate >> 16) & 0xFF; - -} -#endif  static void build_vrr_infopacket_v3(enum signal_type signal,  		const struct mod_vrr_params *vrr, -#ifndef TRIM_FSFT -		bool ftActive, unsigned int ftOutputRate, -#endif  		enum color_transfer_func app_tf, -		struct dc_info_packet *infopacket) +		struct dc_info_packet *infopacket, +		bool freesync_on_desktop)  {  	unsigned int payload_size = 0;  	build_vrr_infopacket_header_v3(signal, infopacket, &payload_size); -	build_vrr_infopacket_data_v3(vrr, infopacket); +	build_vrr_infopacket_data_v3(vrr, infopacket, freesync_on_desktop);  	build_vrr_infopacket_fs2_data(app_tf, infopacket); -#ifndef TRIM_FSFT -	build_vrr_infopacket_fast_transport_data( -			ftActive, -			ftOutputRate, -			infopacket); -#endif -  	build_vrr_infopacket_checksum(&payload_size, infopacket);  	infopacket->valid = true; @@ -980,31 +955,26 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,  	 * Check if Freesync is supported. Return if false. If true,  	 * set the corresponding bit in the info packet  	 */ +	bool freesync_on_desktop; +	bool fams_enable; + +	fams_enable = stream->ctx->dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching; +	freesync_on_desktop = stream->freesync_on_desktop && fams_enable; +  	if (!vrr->send_info_frame)  		return;  	switch (packet_type) {  	case PACKET_TYPE_FS_V3: -#ifndef TRIM_FSFT -		// always populate with pixel rate. -		build_vrr_infopacket_v3( -				stream->signal, vrr, -				stream->timing.flags.FAST_TRANSPORT, -				(stream->timing.flags.FAST_TRANSPORT) ? -						stream->timing.fast_transport_output_rate_100hz : -						stream->timing.pix_clk_100hz, -				app_tf, infopacket); -#else -		build_vrr_infopacket_v3(stream->signal, vrr, app_tf, infopacket); -#endif +		build_vrr_infopacket_v3(stream->signal, vrr, app_tf, infopacket, freesync_on_desktop);  		break;  	case PACKET_TYPE_FS_V2: -		build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket, stream->freesync_on_desktop); +		build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket, freesync_on_desktop);  		break;  	case PACKET_TYPE_VRR:  	case PACKET_TYPE_FS_V1:  	default: -		build_vrr_infopacket_v1(stream->signal, vrr, infopacket, stream->freesync_on_desktop); +		build_vrr_infopacket_v1(stream->signal, vrr, infopacket, freesync_on_desktop);  	}  	if (true == pack_sdp_v1_3 && diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h index 1d8b746b02f2..66dc9a19aebe 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h @@ -35,9 +35,46 @@ struct mod_vrr_params;  void mod_build_vsc_infopacket(const struct dc_stream_state *stream,  		struct dc_info_packet *info_packet, -		enum dc_color_space cs); +		enum dc_color_space cs, +		enum color_transfer_func tf);  void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,  		struct dc_info_packet *info_packet); +enum adaptive_sync_type { +	ADAPTIVE_SYNC_TYPE_NONE                  = 0, +	ADAPTIVE_SYNC_TYPE_DP                    = 1, +	FREESYNC_TYPE_PCON_IN_WHITELIST          = 2, +	FREESYNC_TYPE_PCON_NOT_IN_WHITELIST      = 3, +	ADAPTIVE_SYNC_TYPE_EDP                   = 4, +}; + +enum adaptive_sync_sdp_version { +	AS_SDP_VER_0 = 0x0, +	AS_SDP_VER_1 = 0x1, +	AS_SDP_VER_2 = 0x2, +}; + +#define AS_DP_SDP_LENGTH (9) + +struct frame_duration_op { +	bool          support; +	unsigned char frame_duration_hex; +}; + +struct AS_Df_params { +	bool   supportMode; +	struct frame_duration_op increase; +	struct frame_duration_op decrease; +}; + +void mod_build_adaptive_sync_infopacket(const struct dc_stream_state *stream, +		enum adaptive_sync_type asType, const struct AS_Df_params *param, +		struct dc_info_packet *info_packet); + +void mod_build_adaptive_sync_infopacket_v2(const struct dc_stream_state *stream, +		const struct AS_Df_params *param, struct dc_info_packet *info_packet); + +void mod_build_adaptive_sync_infopacket_v1(struct dc_info_packet *info_packet); +  #endif diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c index 27ceba9d6d65..ec64f19e1786 100644 --- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c +++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c @@ -132,7 +132,8 @@ enum ColorimetryYCCDP {  void mod_build_vsc_infopacket(const struct dc_stream_state *stream,  		struct dc_info_packet *info_packet, -		enum dc_color_space cs) +		enum dc_color_space cs, +		enum color_transfer_func tf)  {  	unsigned int vsc_packet_revision = vsc_packet_undefined;  	unsigned int i; @@ -382,6 +383,9 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,  				colorimetryFormat = ColorimetryYCC_DP_AdobeYCC;  			else if (cs == COLOR_SPACE_2020_YCBCR)  				colorimetryFormat = ColorimetryYCC_DP_ITU2020YCbCr; + +			if (cs == COLOR_SPACE_2020_YCBCR && tf == TRANSFER_FUNC_GAMMA_22) +				colorimetryFormat = ColorimetryYCC_DP_ITU709;  			break;  		default: @@ -515,3 +519,58 @@ void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,  		info_packet->valid = true;  } +void mod_build_adaptive_sync_infopacket(const struct dc_stream_state *stream, +		enum adaptive_sync_type asType, +		const struct AS_Df_params *param, +		struct dc_info_packet *info_packet) +{ +	info_packet->valid = false; + +	memset(info_packet, 0, sizeof(struct dc_info_packet)); + +	switch (asType) { +	case ADAPTIVE_SYNC_TYPE_DP: +		if (stream != NULL) +			mod_build_adaptive_sync_infopacket_v2(stream, param, info_packet); +		break; +	case FREESYNC_TYPE_PCON_IN_WHITELIST: +		mod_build_adaptive_sync_infopacket_v1(info_packet); +		break; +	case ADAPTIVE_SYNC_TYPE_NONE: +	case FREESYNC_TYPE_PCON_NOT_IN_WHITELIST: +	default: +		break; +	} +} + +void mod_build_adaptive_sync_infopacket_v1(struct dc_info_packet *info_packet) +{ +	info_packet->valid = true; +	// HEADER {HB0, HB1, HB2, HB3} = {00, Type, Version, Length} +	info_packet->hb0 = 0x00; +	info_packet->hb1 = 0x22; +	info_packet->hb2 = AS_SDP_VER_1; +	info_packet->hb3 = 0x00; +} + +void mod_build_adaptive_sync_infopacket_v2(const struct dc_stream_state *stream, +		const struct AS_Df_params *param, +		struct dc_info_packet *info_packet) +{ +	info_packet->valid = true; +	// HEADER {HB0, HB1, HB2, HB3} = {00, Type, Version, Length} +	info_packet->hb0 = 0x00; +	info_packet->hb1 = 0x22; +	info_packet->hb2 = AS_SDP_VER_2; +	info_packet->hb3 = AS_DP_SDP_LENGTH; + +	//Payload +	info_packet->sb[0] = param->supportMode; //1: AVT; 0: FAVT +	info_packet->sb[1] = (stream->timing.v_total & 0x00FF); +	info_packet->sb[2] = (stream->timing.v_total & 0xFF00) >> 8; +	//info_packet->sb[3] = 0x00; Target RR, not use fot AVT +	info_packet->sb[4] = (param->increase.support << 6 | param->decrease.support << 7); +	info_packet->sb[5] = param->increase.frame_duration_hex; +	info_packet->sb[6] = param->decrease.frame_duration_hex; +} + diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c index 235259d6c5a1..e39b133d05af 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c @@ -102,9 +102,18 @@ static const struct abm_parameters abm_settings_config1[abm_defines_max_level] =  	{0x82,   0x4d,    0x20,       0x00,     0x00,        0xff,     0xb3, 0x70,     0x70,     0xcccc,  0xcccc},  }; +static const struct abm_parameters abm_settings_config2[abm_defines_max_level] = { +//  min_red  max_red  bright_pos  dark_pos  bright_gain  contrast  dev   min_knee  max_knee  blRed    blStart +	{0xf0,   0xbf,    0x20,       0x00,     0x88,        0x99,     0xb3, 0x40,     0xe0,    0x0000,  0xcccc}, +	{0xd8,   0x85,    0x20,       0x00,     0x70,        0x90,     0xa8, 0x40,     0xc8,    0x0700,  0xb333}, +	{0xb8,   0x58,    0x20,       0x00,     0x64,        0x88,     0x78, 0x70,     0xa0,    0x7000,  0x9999}, +	{0x82,   0x40,    0x20,       0x00,     0x00,        0xb8,     0xb3, 0x70,     0x70,    0xc333,  0xb333}, +}; +  static const struct abm_parameters * const abm_settings[] = {  	abm_settings_config0,  	abm_settings_config1, +	abm_settings_config2,  };  #define NUM_AMBI_LEVEL    5 @@ -907,3 +916,34 @@ bool mod_power_only_edp(const struct dc_state *context, const struct dc_stream_s  {  	return context && context->stream_count == 1 && dc_is_embedded_signal(stream->signal);  } + +bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link, +			      struct dc_stream_state *stream, +			      struct psr_config *config) +{ +	uint16_t pic_height; +	uint16_t slice_height; + +	config->dsc_slice_height = 0; +	if ((link->connector_signal & SIGNAL_TYPE_EDP) && +	    (!dc->caps.edp_dsc_support || +	    link->panel_config.dsc.disable_dsc_edp || +	    !link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT || +	    !stream->timing.dsc_cfg.num_slices_v)) +		return true; + +	pic_height = stream->timing.v_addressable + +		stream->timing.v_border_top + stream->timing.v_border_bottom; +	slice_height = pic_height / stream->timing.dsc_cfg.num_slices_v; +	config->dsc_slice_height = slice_height; + +	if (slice_height) { +		if (config->su_y_granularity && +		    (slice_height % config->su_y_granularity)) { +			ASSERT(0); +			return false; +		} +	} + +	return true; +} diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h index 316452e9dbc9..1d3079e56799 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h @@ -59,4 +59,7 @@ void mod_power_calc_psr_configs(struct psr_config *psr_config,  		const struct dc_stream_state *stream);  bool mod_power_only_edp(const struct dc_state *context,  		const struct dc_stream_state *stream); +bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link, +			      struct dc_stream_state *stream, +			      struct psr_config *config);  #endif /* MODULES_POWER_POWER_HELPERS_H_ */  |