diff options
| author | Dmitry Torokhov <[email protected]> | 2023-05-01 15:20:08 -0700 | 
|---|---|---|
| committer | Dmitry Torokhov <[email protected]> | 2023-05-01 15:20:08 -0700 | 
| commit | 9a87ffc99ec8eb8d35eed7c4f816d75f5cc9662e (patch) | |
| tree | d57f3a63479a07b4e0cece029886e76e04feb984 /drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | |
| parent | 5dc63e56a9cf8df0b59c234a505a1653f1bdf885 (diff) | |
| parent | 53bea86b5712c7491bb3dae12e271666df0a308c (diff) | |
Merge branch 'next' into for-linus
Prepare input updates for 6.4 merge window.
Diffstat (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c')
| -rw-r--r-- | drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 843 | 
1 files changed, 620 insertions, 223 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 1b7f20a9d4ae..009ef917dad4 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -28,7 +28,6 @@  #include "dm_services_types.h"  #include "dc.h" -#include "dc_link_dp.h"  #include "link_enc_cfg.h"  #include "dc/inc/core_types.h"  #include "dal_asic_id.h" @@ -39,6 +38,11 @@  #include "dc/dc_edid_parser.h"  #include "dc/dc_stat.h"  #include "amdgpu_dm_trace.h" +#include "dpcd_defs.h" +#include "link/protocols/link_dpcd.h" +#include "link_service_types.h" +#include "link/protocols/link_dp_capability.h" +#include "link/protocols/link_ddc.h"  #include "vid.h"  #include "amdgpu.h" @@ -66,7 +70,7 @@  #include "ivsrcid/ivsrcid_vislands30.h" -#include "i2caux_interface.h" +#include <linux/backlight.h>  #include <linux/module.h>  #include <linux/moduleparam.h>  #include <linux/types.h> @@ -104,7 +108,6 @@  #include "modules/inc/mod_freesync.h"  #include "modules/power/power_helpers.h" -#include "modules/inc/mod_info_packet.h"  #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"  MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB); @@ -210,7 +213,7 @@ static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);  static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,  				    struct amdgpu_dm_connector *amdgpu_dm_connector, -				    uint32_t link_index, +				    u32 link_index,  				    struct amdgpu_encoder *amdgpu_encoder);  static int amdgpu_dm_encoder_init(struct drm_device *dev,  				  struct amdgpu_encoder *aencoder, @@ -262,7 +265,7 @@ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)  static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,  				  u32 *vbl, u32 *position)  { -	uint32_t v_blank_start, v_blank_end, h_position, v_position; +	u32 v_blank_start, v_blank_end, h_position, v_position;  	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))  		return -EINVAL; @@ -361,7 +364,7 @@ static void dm_pflip_high_irq(void *interrupt_params)  	struct amdgpu_device *adev = irq_params->adev;  	unsigned long flags;  	struct drm_pending_vblank_event *e; -	uint32_t vpos, hpos, v_blank_start, v_blank_end; +	u32 vpos, hpos, v_blank_start, v_blank_end;  	bool vrr_active;  	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP); @@ -648,7 +651,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,  	struct drm_connector *connector;  	struct drm_connector_list_iter iter;  	struct dc_link *link; -	uint8_t link_index = 0; +	u8 link_index = 0;  	struct drm_device *dev;  	if (adev == NULL) @@ -749,7 +752,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)  	struct amdgpu_device *adev = irq_params->adev;  	struct amdgpu_display_manager *dm = &adev->dm;  	struct dmcub_trace_buf_entry entry = { 0 }; -	uint32_t count = 0; +	u32 count = 0;  	struct dmub_hpd_work *dmub_hpd_wrk;  	struct dc_link *plink = NULL; @@ -1015,7 +1018,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)  	struct dmub_srv_hw_params hw_params;  	enum dmub_status status;  	const unsigned char *fw_inst_const, *fw_bss_data; -	uint32_t i, fw_inst_const_size, fw_bss_data_size; +	u32 i, fw_inst_const_size, fw_bss_data_size;  	bool has_hw_support;  	if (!dmub_srv) @@ -1176,32 +1179,46 @@ static void dm_dmub_hw_resume(struct amdgpu_device *adev)  static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)  { -	uint64_t pt_base; -	uint32_t logical_addr_low; -	uint32_t logical_addr_high; -	uint32_t agp_base, agp_bot, agp_top; +	u64 pt_base; +	u32 logical_addr_low; +	u32 logical_addr_high; +	u32 agp_base, agp_bot, agp_top;  	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;  	memset(pa_config, 0, sizeof(*pa_config)); -	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18; -	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); - -	if (adev->apu_flags & AMD_APU_IS_RAVEN2) -		/* -		 * Raven2 has a HW issue that it is unable to use the vram which -		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the -		 * workaround that increase system aperture high address (add 1) -		 * to get rid of the VM fault and hardware hang. -		 */ -		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18); -	else -		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18; -  	agp_base = 0;  	agp_bot = adev->gmc.agp_start >> 24;  	agp_top = adev->gmc.agp_end >> 24; +	/* AGP aperture is disabled */ +	if (agp_bot == agp_top) { +		logical_addr_low = adev->gmc.fb_start >> 18; +		if (adev->apu_flags & AMD_APU_IS_RAVEN2) +			/* +			 * Raven2 has a HW issue that it is unable to use the vram which +			 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the +			 * workaround that increase system aperture high address (add 1) +			 * to get rid of the VM fault and hardware hang. +			 */ +			logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1; +		else +			logical_addr_high = adev->gmc.fb_end >> 18; +	} else { +		logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18; +		if (adev->apu_flags & AMD_APU_IS_RAVEN2) +			/* +			 * Raven2 has a HW issue that it is unable to use the vram which +			 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the +			 * workaround that increase system aperture high address (add 1) +			 * to get rid of the VM fault and hardware hang. +			 */ +			logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18); +		else +			logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18; +	} + +	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);  	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;  	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12); @@ -1225,10 +1242,25 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_  	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;  	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part; -	pa_config->is_hvm_enabled = 0; +	pa_config->is_hvm_enabled = adev->mode_info.gpu_vm_support;  } +static void force_connector_state( +	struct amdgpu_dm_connector *aconnector, +	enum drm_connector_force force_state) +{ +	struct drm_connector *connector = &aconnector->base; + +	mutex_lock(&connector->dev->mode_config.mutex); +	aconnector->base.force = force_state; +	mutex_unlock(&connector->dev->mode_config.mutex); + +	mutex_lock(&aconnector->hpd_lock); +	drm_kms_helper_connector_hotplug_event(connector); +	mutex_unlock(&aconnector->hpd_lock); +} +  static void dm_handle_hpd_rx_offload_work(struct work_struct *work)  {  	struct hpd_rx_irq_offload_work *offload_work; @@ -1237,6 +1269,9 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)  	struct amdgpu_device *adev;  	enum dc_connection_type new_connection_type = dc_connection_none;  	unsigned long flags; +	union test_response test_response; + +	memset(&test_response, 0, sizeof(test_response));  	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);  	aconnector = offload_work->offload_wq->aconnector; @@ -1250,7 +1285,7 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)  	dc_link = aconnector->dc_link;  	mutex_lock(&aconnector->hpd_lock); -	if (!dc_link_detect_sink(dc_link, &new_connection_type)) +	if (!dc_link_detect_connection_type(dc_link, &new_connection_type))  		DRM_ERROR("KMS: Failed to detect connector\n");  	mutex_unlock(&aconnector->hpd_lock); @@ -1261,15 +1296,49 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)  		goto skip;  	mutex_lock(&adev->dm.dc_lock); -	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) +	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) {  		dc_link_dp_handle_automated_test(dc_link); + +		if (aconnector->timing_changed) { +			/* force connector disconnect and reconnect */ +			force_connector_state(aconnector, DRM_FORCE_OFF); +			msleep(100); +			force_connector_state(aconnector, DRM_FORCE_UNSPECIFIED); +		} + +		test_response.bits.ACK = 1; + +		core_link_write_dpcd( +		dc_link, +		DP_TEST_RESPONSE, +		&test_response.raw, +		sizeof(test_response)); +	}  	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) && -			hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) && +			dc_link_check_link_loss_status(dc_link, &offload_work->data) &&  			dc_link_dp_allow_hpd_rx_irq(dc_link)) { -		dc_link_dp_handle_link_loss(dc_link); +		/* offload_work->data is from handle_hpd_rx_irq-> +		 * schedule_hpd_rx_offload_work.this is defer handle +		 * for hpd short pulse. upon here, link status may be +		 * changed, need get latest link status from dpcd +		 * registers. if link status is good, skip run link +		 * training again. +		 */ +		union hpd_irq_data irq_data; + +		memset(&irq_data, 0, sizeof(irq_data)); + +		/* before dc_link_dp_handle_link_loss, allow new link lost handle +		 * request be added to work queue if link lost at end of dc_link_ +		 * dp_handle_link_loss +		 */  		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);  		offload_work->offload_wq->is_handling_link_loss = false;  		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); + +		if ((dc_link_dp_read_hpd_rx_irq_data(dc_link, &irq_data) == DC_OK) && +			dc_link_check_link_loss_status(dc_link, &irq_data)) +			dc_link_dp_handle_link_loss(dc_link);  	}  	mutex_unlock(&adev->dm.dc_lock); @@ -1513,6 +1582,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)  		}  		break;  	} +	if (init_data.flags.gpu_vm_support && +	    (amdgpu_sg_display == 0)) +		init_data.flags.gpu_vm_support = false;  	if (init_data.flags.gpu_vm_support)  		adev->mode_info.gpu_vm_support = true; @@ -1534,6 +1606,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)  	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)  		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true; +	/* Disable SubVP + DRR config by default */ +	init_data.flags.disable_subvp_drr = true; +	if (amdgpu_dc_feature_mask & DC_ENABLE_SUBVP_DRR) +		init_data.flags.disable_subvp_drr = false; +  	init_data.flags.seamless_boot_edp_requested = false;  	if (check_seamless_boot_capability(adev)) { @@ -1589,6 +1666,26 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)  	/* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */  	adev->dm.dc->debug.ignore_cable_id = true; +	/* TODO: There is a new drm mst change where the freedom of +	 * vc_next_start_slot update is revoked/moved into drm, instead of in +	 * driver. This forces us to make sure to get vc_next_start_slot updated +	 * in drm function each time without considering if mst_state is active +	 * or not. Otherwise, next time hotplug will give wrong start_slot +	 * number. We are implementing a temporary solution to even notify drm +	 * mst deallocation when link is no longer of MST type when uncommitting +	 * the stream so we will have more time to work on a proper solution. +	 * Ideally when dm_helpers_dp_mst_stop_top_mgr message is triggered, we +	 * should notify drm to do a complete "reset" of its states and stop +	 * calling further drm mst functions when link is no longer of an MST +	 * type. This could happen when we unplug an MST hubs/displays. When +	 * uncommit stream comes later after unplug, we should just reset +	 * hardware states only. +	 */ +	adev->dm.dc->debug.temp_mst_deallocation_sequence = true; + +	if (adev->dm.dc->caps.dp_hdmi21_pcon_support) +		DRM_INFO("DP-HDMI FRL PCON supported\n"); +  	r = dm_dmub_hw_init(adev);  	if (r) {  		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); @@ -1642,7 +1739,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)  	}  #endif  #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) -	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work(); +	adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev); +	if (!adev->dm.secure_display_ctxs) { +		DRM_ERROR("amdgpu: failed to initialize secure_display_ctxs.\n"); +	}  #endif  	if (dc_is_dmub_outbox_supported(adev->dm.dc)) {  		init_completion(&adev->dm.dmub_aux_transfer_done); @@ -1730,17 +1830,18 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)  		adev->dm.vblank_control_workqueue = NULL;  	} -	for (i = 0; i < adev->dm.display_indexes_num; i++) { -		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base); -	} -  	amdgpu_dm_destroy_drm_device(&adev->dm);  #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) -	if (adev->dm.crc_rd_wrk) { -		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work); -		kfree(adev->dm.crc_rd_wrk); -		adev->dm.crc_rd_wrk = NULL; +	if (adev->dm.secure_display_ctxs) { +		for (i = 0; i < adev->mode_info.num_crtc; i++) { +			if (adev->dm.secure_display_ctxs[i].crtc) { +				flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work); +				flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work); +			} +		} +		kfree(adev->dm.secure_display_ctxs); +		adev->dm.secure_display_ctxs = NULL;  	}  #endif  #ifdef CONFIG_DRM_AMD_DC_HDCP @@ -1875,25 +1976,17 @@ static int load_dmcu_fw(struct amdgpu_device *adev)  		return 0;  	} -	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev); -	if (r == -ENOENT) { +	r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, fw_name_dmcu); +	if (r == -ENODEV) {  		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */  		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");  		adev->dm.fw_dmcu = NULL;  		return 0;  	}  	if (r) { -		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n", -			fw_name_dmcu); -		return r; -	} - -	r = amdgpu_ucode_validate(adev->dm.fw_dmcu); -	if (r) {  		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",  			fw_name_dmcu); -		release_firmware(adev->dm.fw_dmcu); -		adev->dm.fw_dmcu = NULL; +		amdgpu_ucode_release(&adev->dm.fw_dmcu);  		return r;  	} @@ -1939,7 +2032,6 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)  	struct dmub_srv_fb_info *fb_info;  	struct dmub_srv *dmub_srv;  	const struct dmcub_firmware_header_v1_0 *hdr; -	const char *fw_name_dmub;  	enum dmub_asic dmub_asic;  	enum dmub_status status;  	int r; @@ -1947,73 +2039,43 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)  	switch (adev->ip_versions[DCE_HWIP][0]) {  	case IP_VERSION(2, 1, 0):  		dmub_asic = DMUB_ASIC_DCN21; -		fw_name_dmub = FIRMWARE_RENOIR_DMUB; -		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id)) -			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;  		break;  	case IP_VERSION(3, 0, 0): -		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) { -			dmub_asic = DMUB_ASIC_DCN30; -			fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB; -		} else { -			dmub_asic = DMUB_ASIC_DCN30; -			fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB; -		} +		dmub_asic = DMUB_ASIC_DCN30;  		break;  	case IP_VERSION(3, 0, 1):  		dmub_asic = DMUB_ASIC_DCN301; -		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;  		break;  	case IP_VERSION(3, 0, 2):  		dmub_asic = DMUB_ASIC_DCN302; -		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;  		break;  	case IP_VERSION(3, 0, 3):  		dmub_asic = DMUB_ASIC_DCN303; -		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;  		break;  	case IP_VERSION(3, 1, 2):  	case IP_VERSION(3, 1, 3):  		dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31; -		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;  		break;  	case IP_VERSION(3, 1, 4):  		dmub_asic = DMUB_ASIC_DCN314; -		fw_name_dmub = FIRMWARE_DCN_314_DMUB;  		break;  	case IP_VERSION(3, 1, 5):  		dmub_asic = DMUB_ASIC_DCN315; -		fw_name_dmub = FIRMWARE_DCN_315_DMUB;  		break;  	case IP_VERSION(3, 1, 6):  		dmub_asic = DMUB_ASIC_DCN316; -		fw_name_dmub = FIRMWARE_DCN316_DMUB;  		break;  	case IP_VERSION(3, 2, 0):  		dmub_asic = DMUB_ASIC_DCN32; -		fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;  		break;  	case IP_VERSION(3, 2, 1):  		dmub_asic = DMUB_ASIC_DCN321; -		fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;  		break;  	default:  		/* ASIC doesn't support DMUB. */  		return 0;  	} -	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev); -	if (r) { -		DRM_ERROR("DMUB firmware loading failed: %d\n", r); -		return 0; -	} - -	r = amdgpu_ucode_validate(adev->dm.dmub_fw); -	if (r) { -		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r); -		return 0; -	} -  	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;  	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version); @@ -2080,7 +2142,9 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)  	 * TODO: Move this into GART.  	 */  	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE, -				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo, +				    AMDGPU_GEM_DOMAIN_VRAM | +				    AMDGPU_GEM_DOMAIN_GTT, +				    &adev->dm.dmub_bo,  				    &adev->dm.dmub_bo_gpu_addr,  				    &adev->dm.dmub_bo_cpu_addr);  	if (r) @@ -2135,11 +2199,8 @@ static int dm_sw_fini(void *handle)  		adev->dm.dmub_srv = NULL;  	} -	release_firmware(adev->dm.dmub_fw); -	adev->dm.dmub_fw = NULL; - -	release_firmware(adev->dm.fw_dmcu); -	adev->dm.fw_dmcu = NULL; +	amdgpu_ucode_release(&adev->dm.dmub_fw); +	amdgpu_ucode_release(&adev->dm.fw_dmcu);  	return 0;  } @@ -2165,6 +2226,8 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev)  				DRM_ERROR("DM_MST: Failed to start MST\n");  				aconnector->dc_link->type =  					dc_connection_single; +				ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, +								     aconnector->dc_link);  				break;  			}  		} @@ -2233,7 +2296,7 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)  	drm_for_each_connector_iter(connector, &iter) {  		aconnector = to_amdgpu_dm_connector(connector);  		if (aconnector->dc_link->type != dc_connection_mst_branch || -		    aconnector->mst_port) +		    aconnector->mst_root)  			continue;  		mgr = &aconnector->mst_mgr; @@ -2241,6 +2304,14 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)  		if (suspend) {  			drm_dp_mst_topology_mgr_suspend(mgr);  		} else { +			/* if extended timeout is supported in hardware, +			 * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer +			 * CTS 4.2.1.1 regression introduced by CTS specs requirement update. +			 */ +			try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD); +			if (!dp_is_lttpr_present(aconnector->dc_link)) +				try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD); +  			ret = drm_dp_mst_topology_mgr_resume(mgr, true);  			if (ret < 0) {  				dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, @@ -2486,7 +2557,7 @@ struct amdgpu_dm_connector *  amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,  					     struct drm_crtc *crtc)  { -	uint32_t i; +	u32 i;  	struct drm_connector_state *new_con_state;  	struct drm_connector *connector;  	struct drm_crtc *crtc_from_state; @@ -2734,16 +2805,18 @@ static int dm_resume(void *handle)  	drm_for_each_connector_iter(connector, &iter) {  		aconnector = to_amdgpu_dm_connector(connector); +		if (!aconnector->dc_link) +			continue; +  		/*  		 * this is the case when traversing through already created  		 * MST connectors, should be skipped  		 */ -		if (aconnector->dc_link && -		    aconnector->dc_link->type == dc_connection_mst_branch) +		if (aconnector->dc_link->type == dc_connection_mst_branch)  			continue;  		mutex_lock(&aconnector->hpd_lock); -		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type)) +		if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))  			DRM_ERROR("KMS: Failed to detect connector\n");  		if (aconnector->base.force && new_connection_type == dc_connection_none) { @@ -3021,6 +3094,10 @@ void amdgpu_dm_update_connector_after_detect(  						    aconnector->edid);  		} +		aconnector->timing_requested = kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL); +		if (!aconnector->timing_requested) +			dm_error("%s: failed to create aconnector->requested_timing\n", __func__); +  		drm_connector_update_edid_property(connector, aconnector->edid);  		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);  		update_connector_ext_caps(aconnector); @@ -3032,6 +3109,8 @@ void amdgpu_dm_update_connector_after_detect(  		dc_sink_release(aconnector->dc_sink);  		aconnector->dc_sink = NULL;  		aconnector->edid = NULL; +		kfree(aconnector->timing_requested); +		aconnector->timing_requested = NULL;  #ifdef CONFIG_DRM_AMD_DC_HDCP  		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */  		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) @@ -3076,7 +3155,9 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)  	if (aconnector->fake_enable)  		aconnector->fake_enable = false; -	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type)) +	aconnector->timing_changed = false; + +	if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))  		DRM_ERROR("KMS: Failed to detect connector\n");  	if (aconnector->base.force && new_connection_type == dc_connection_none) { @@ -3117,8 +3198,8 @@ static void handle_hpd_irq(void *param)  static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)  { -	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; -	uint8_t dret; +	u8 esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; +	u8 dret;  	bool new_irq_handled = false;  	int dpcd_addr;  	int dpcd_bytes_to_read; @@ -3146,7 +3227,7 @@ static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)  	while (dret == dpcd_bytes_to_read &&  		process_count < max_process_count) { -		uint8_t retry; +		u8 retry;  		dret = 0;  		process_count++; @@ -3165,7 +3246,7 @@ static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)  				dpcd_bytes_to_read - 1;  			for (retry = 0; retry < 3; retry++) { -				uint8_t wret; +				u8 wret;  				wret = drm_dp_dpcd_write(  					&aconnector->dm_dp_aux.aux, @@ -3225,7 +3306,7 @@ static void handle_hpd_rx_irq(void *param)  	union hpd_irq_data hpd_irq_data;  	bool link_loss = false;  	bool has_left_work = false; -	int idx = aconnector->base.index; +	int idx = dc_link->link_index;  	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];  	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data)); @@ -3279,7 +3360,7 @@ static void handle_hpd_rx_irq(void *param)  out:  	if (result && !is_mst_root_connector) {  		/* Downstream Port status changed. */ -		if (!dc_link_detect_sink(dc_link, &new_connection_type)) +		if (!dc_link_detect_connection_type(dc_link, &new_connection_type))  			DRM_ERROR("KMS: Failed to detect connector\n");  		if (aconnector->base.force && new_connection_type == dc_connection_none) { @@ -3367,7 +3448,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev)  					(void *) aconnector);  			if (adev->dm.hpd_rx_offload_wq) -				adev->dm.hpd_rx_offload_wq[connector->index].aconnector = +				adev->dm.hpd_rx_offload_wq[dc_link->link_index].aconnector =  					aconnector;  		}  	} @@ -4179,20 +4260,23 @@ static void amdgpu_set_panel_orientation(struct drm_connector *connector);  static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)  {  	struct amdgpu_display_manager *dm = &adev->dm; -	int32_t i; +	s32 i;  	struct amdgpu_dm_connector *aconnector = NULL;  	struct amdgpu_encoder *aencoder = NULL;  	struct amdgpu_mode_info *mode_info = &adev->mode_info; -	uint32_t link_cnt; -	int32_t primary_planes; +	u32 link_cnt; +	s32 primary_planes;  	enum dc_connection_type new_connection_type = dc_connection_none;  	const struct dc_plane_cap *plane;  	bool psr_feature_enabled = false; +	int max_overlay = dm->dc->caps.max_slave_planes;  	dm->display_indexes_num = dm->dc->caps.max_streams;  	/* Update the actual used number of crtc */  	adev->mode_info.num_crtc = adev->dm.display_indexes_num; +	amdgpu_dm_set_irq_funcs(adev); +  	link_cnt = dm->dc->caps.max_links;  	if (amdgpu_dm_mode_config_init(dm->adev)) {  		DRM_ERROR("DM: Failed to initialize mode config\n"); @@ -4242,14 +4326,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)  		if (!plane->pixel_format_support.argb8888)  			continue; +		if (max_overlay-- == 0) +			break; +  		if (initialize_plane(dm, NULL, primary_planes + i,  				     DRM_PLANE_TYPE_OVERLAY, plane)) {  			DRM_ERROR("KMS: Failed to initialize overlay plane\n");  			goto fail;  		} - -		/* Only create one overlay plane. */ -		break;  	}  	for (i = 0; i < dm->dc->caps.max_streams; i++) @@ -4328,7 +4412,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)  		link = dc_get_link_at_index(dm->dc, i); -		if (!dc_link_detect_sink(link, &new_connection_type)) +		if (!dc_link_detect_connection_type(link, &new_connection_type))  			DRM_ERROR("KMS: Failed to detect connector\n");  		if (aconnector->base.force && new_connection_type == dc_connection_none) { @@ -4504,9 +4588,75 @@ DEVICE_ATTR_WO(s3_debug);  #endif +static int dm_init_microcode(struct amdgpu_device *adev) +{ +	char *fw_name_dmub; +	int r; + +	switch (adev->ip_versions[DCE_HWIP][0]) { +	case IP_VERSION(2, 1, 0): +		fw_name_dmub = FIRMWARE_RENOIR_DMUB; +		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id)) +			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB; +		break; +	case IP_VERSION(3, 0, 0): +		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) +			fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB; +		else +			fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB; +		break; +	case IP_VERSION(3, 0, 1): +		fw_name_dmub = FIRMWARE_VANGOGH_DMUB; +		break; +	case IP_VERSION(3, 0, 2): +		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB; +		break; +	case IP_VERSION(3, 0, 3): +		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB; +		break; +	case IP_VERSION(3, 1, 2): +	case IP_VERSION(3, 1, 3): +		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB; +		break; +	case IP_VERSION(3, 1, 4): +		fw_name_dmub = FIRMWARE_DCN_314_DMUB; +		break; +	case IP_VERSION(3, 1, 5): +		fw_name_dmub = FIRMWARE_DCN_315_DMUB; +		break; +	case IP_VERSION(3, 1, 6): +		fw_name_dmub = FIRMWARE_DCN316_DMUB; +		break; +	case IP_VERSION(3, 2, 0): +		fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB; +		break; +	case IP_VERSION(3, 2, 1): +		fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB; +		break; +	default: +		/* ASIC doesn't support DMUB. */ +		return 0; +	} +	r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, fw_name_dmub); +	if (r) +		DRM_ERROR("DMUB firmware loading failed: %d\n", r); +	return r; +} +  static int dm_early_init(void *handle)  {  	struct amdgpu_device *adev = (struct amdgpu_device *)handle; +	struct amdgpu_mode_info *mode_info = &adev->mode_info; +	struct atom_context *ctx = mode_info->atom_context; +	int index = GetIndexIntoMasterTable(DATA, Object_Header); +	u16 data_offset; + +	/* if there is no object header, skip DM */ +	if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { +		adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; +		dev_info(adev->dev, "No object header, skipping DM\n"); +		return -ENOENT; +	}  	switch (adev->asic_type) {  #if defined(CONFIG_DRM_AMD_DC_SI) @@ -4619,8 +4769,6 @@ static int dm_early_init(void *handle)  		break;  	} -	amdgpu_dm_set_irq_funcs(adev); -  	if (adev->mode_info.funcs == NULL)  		adev->mode_info.funcs = &dm_display_funcs; @@ -4636,7 +4784,7 @@ static int dm_early_init(void *handle)  #endif  	adev->dc_enabled = true; -	return 0; +	return dm_init_microcode(adev);  }  static bool modereset_required(struct drm_crtc_state *crtc_state) @@ -4701,7 +4849,7 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state,  static int  fill_dc_plane_info_and_addr(struct amdgpu_device *adev,  			    const struct drm_plane_state *plane_state, -			    const uint64_t tiling_flags, +			    const u64 tiling_flags,  			    struct dc_plane_info *plane_info,  			    struct dc_plane_address *address,  			    bool tmz_surface, @@ -4876,7 +5024,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,  static inline void fill_dc_dirty_rect(struct drm_plane *plane,  				      struct rect *dirty_rect, int32_t x, -				      int32_t y, int32_t width, int32_t height, +				      s32 y, s32 width, s32 height,  				      int *i, bool ffu)  {  	if (*i > DC_MAX_DIRTY_RECTS) @@ -4912,6 +5060,7 @@ out:   * @new_plane_state: New state of @plane   * @crtc_state: New state of CRTC connected to the @plane   * @flip_addrs: DC flip tracking struct, which also tracts dirty rects + * @dirty_regions_changed: dirty regions changed   *   * For PSR SU, DC informs the DMUB uController of dirty rectangle regions   * (referred to as "damage clips" in DRM nomenclature) that require updating on @@ -4928,15 +5077,17 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,  				struct drm_plane_state *old_plane_state,  				struct drm_plane_state *new_plane_state,  				struct drm_crtc_state *crtc_state, -				struct dc_flip_addrs *flip_addrs) +				struct dc_flip_addrs *flip_addrs, +				bool *dirty_regions_changed)  {  	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);  	struct rect *dirty_rects = flip_addrs->dirty_rects; -	uint32_t num_clips; +	u32 num_clips;  	struct drm_mode_rect *clips;  	bool bb_changed;  	bool fb_changed; -	uint32_t i = 0; +	u32 i = 0; +	*dirty_regions_changed = false;  	/*  	 * Cursor plane has it's own dirty rect update interface. See @@ -4981,6 +5132,8 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,  		new_plane_state->plane->base.id,  		bb_changed, fb_changed, num_clips); +	*dirty_regions_changed = bb_changed; +  	if (bb_changed) {  		fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],  				   new_plane_state->crtc_x, @@ -5082,7 +5235,7 @@ static enum dc_color_depth  convert_color_depth_from_display_info(const struct drm_connector *connector,  				      bool is_y420, int requested_bpc)  { -	uint8_t bpc; +	u8 bpc;  	if (is_y420) {  		bpc = 8; @@ -5311,8 +5464,6 @@ static void fill_stream_properties_from_drm_display_mode(  	timing_out->aspect_ratio = get_aspect_ratio(mode_in); -	stream->output_color_space = get_output_color_space(timing_out); -  	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;  	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;  	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { @@ -5323,6 +5474,8 @@ static void fill_stream_properties_from_drm_display_mode(  			adjust_colour_depth_from_display_info(timing_out, info);  		}  	} + +	stream->output_color_space = get_output_color_space(timing_out);  }  static void fill_audio_info(struct audio_info *audio_info, @@ -5626,8 +5779,8 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,  				    uint32_t max_dsc_target_bpp_limit_override)  {  	const struct dc_link_settings *verified_link_cap = NULL; -	uint32_t link_bw_in_kbps; -	uint32_t edp_min_bpp_x16, edp_max_bpp_x16; +	u32 link_bw_in_kbps; +	u32 edp_min_bpp_x16, edp_max_bpp_x16;  	struct dc *dc = sink->ctx->dc;  	struct dc_dsc_bw_range bw_range = {0};  	struct dc_dsc_config dsc_cfg = {0}; @@ -5684,11 +5837,11 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,  					struct dsc_dec_dpcd_caps *dsc_caps)  {  	struct drm_connector *drm_connector = &aconnector->base; -	uint32_t link_bandwidth_kbps; +	u32 link_bandwidth_kbps;  	struct dc *dc = sink->ctx->dc; -	uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps; -	uint32_t dsc_max_supported_bw_in_kbps; -	uint32_t max_dsc_target_bpp_limit_override = +	u32 max_supported_bw_in_kbps, timing_bw_in_kbps; +	u32 dsc_max_supported_bw_in_kbps; +	u32 max_dsc_target_bpp_limit_override =  		drm_connector->display_info.max_dsc_bpp;  	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, @@ -5867,6 +6020,14 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,  			stream, &mode, &aconnector->base, con_state, old_stream,  			requested_bpc); +	if (aconnector->timing_changed) { +		DC_LOG_DEBUG("%s: overriding timing for automated test, bpc %d, changing to %d\n", +				__func__, +				stream->timing.display_color_depth, +				aconnector->timing_requested->display_color_depth); +		stream->timing = *aconnector->timing_requested; +	} +  #if defined(CONFIG_DRM_AMD_DC_DCN)  	/* SST DSC determination policy */  	update_dsc_caps(aconnector, sink, stream, &dsc_caps); @@ -6059,15 +6220,12 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)  	if (aconnector->mst_mgr.dev)  		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr); -#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ -	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)  	for (i = 0; i < dm->num_of_edps; i++) {  		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {  			backlight_device_unregister(dm->backlight_dev[i]);  			dm->backlight_dev[i] = NULL;  		}  	} -#endif  	if (aconnector->dc_em_sink)  		dc_sink_release(aconnector->dc_em_sink); @@ -6261,7 +6419,6 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc,  	dc_plane_state->plane_size.surface_size.width  = stream->src.width;  	dc_plane_state->plane_size.chroma_size.height  = stream->src.height;  	dc_plane_state->plane_size.chroma_size.width   = stream->src.width; -	dc_plane_state->tiling_info.gfx9.swizzle =  DC_SW_UNKNOWN;  	dc_plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;  	dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN;  	dc_plane_state->rotation = ROTATION_ANGLE_0; @@ -6559,11 +6716,11 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,  	int clock, bpp = 0;  	bool is_y420 = false; -	if (!aconnector->port || !aconnector->dc_sink) +	if (!aconnector->mst_output_port || !aconnector->dc_sink)  		return 0; -	mst_port = aconnector->port; -	mst_mgr = &aconnector->mst_port->mst_mgr; +	mst_port = aconnector->mst_output_port; +	mst_mgr = &aconnector->mst_root->mst_mgr;  	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)  		return 0; @@ -6573,7 +6730,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,  		return PTR_ERR(mst_state);  	if (!mst_state->pbn_div) -		mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_port->dc_link); +		mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link);  	if (!state->duplicated) {  		int max_bpc = conn_state->max_requested_bpc; @@ -6619,7 +6776,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,  		aconnector = to_amdgpu_dm_connector(connector); -		if (!aconnector->port) +		if (!aconnector->mst_output_port)  			continue;  		if (!new_con_state || !new_con_state->crtc) @@ -6659,7 +6816,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,  			dm_conn_state->pbn = pbn;  			dm_conn_state->vcpi_slots = slot_num; -			ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->port, +			ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port,  							   dm_conn_state->pbn, false);  			if (ret < 0)  				return ret; @@ -6667,7 +6824,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,  			continue;  		} -		vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->port, pbn, true); +		vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, pbn, true);  		if (vcpi < 0)  			return vcpi; @@ -6910,7 +7067,7 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)  	const struct drm_display_mode *m;  	struct drm_display_mode *new_mode;  	uint i; -	uint32_t new_modes_count = 0; +	u32 new_modes_count = 0;  	/* Standard FPS values  	 * @@ -6924,7 +7081,7 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)  	 * 60 	        - Commonly used  	 * 48,72,96,120 - Multiples of 24  	 */ -	static const uint32_t common_rates[] = { +	static const u32 common_rates[] = {  		23976, 24000, 25000, 29970, 30000,  		48000, 50000, 60000, 72000, 96000, 120000  	}; @@ -6940,8 +7097,8 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)  		return 0;  	for (i = 0; i < ARRAY_SIZE(common_rates); i++) { -		uint64_t target_vtotal, target_vtotal_diff; -		uint64_t num, den; +		u64 target_vtotal, target_vtotal_diff; +		u64 num, den;  		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])  			continue; @@ -7040,6 +7197,9 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,  	aconnector->base.dpms = DRM_MODE_DPMS_OFF;  	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */  	aconnector->audio_inst = -1; +	aconnector->pack_sdp_v1_3 = false; +	aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE; +	memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info));  	mutex_init(&aconnector->hpd_lock);  	/* @@ -7081,11 +7241,11 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,  				adev->mode_info.underscan_vborder_property,  				0); -	if (!aconnector->mst_port) +	if (!aconnector->mst_root)  		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);  	/* This defaults to the max in the range, but we want 8bpc for non-edp. */ -	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8; +	aconnector->base.state->max_bpc = 16;  	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;  	if (connector_type == DRM_MODE_CONNECTOR_eDP && @@ -7099,7 +7259,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,  	    connector_type == DRM_MODE_CONNECTOR_eDP) {  		drm_connector_attach_hdr_output_metadata_property(&aconnector->base); -		if (!aconnector->mst_port) +		if (!aconnector->mst_root)  			drm_connector_attach_vrr_capable_property(&aconnector->base);  #ifdef CONFIG_DRM_AMD_DC_HDCP @@ -7183,7 +7343,7 @@ create_i2c(struct ddc_service *ddc_service,   */  static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,  				    struct amdgpu_dm_connector *aconnector, -				    uint32_t link_index, +				    u32 link_index,  				    struct amdgpu_encoder *aencoder)  {  	int res = 0; @@ -7368,27 +7528,55 @@ is_scaling_state_different(const struct dm_connector_state *dm_state,  }  #ifdef CONFIG_DRM_AMD_DC_HDCP -static bool is_content_protection_different(struct drm_connector_state *state, -					    const struct drm_connector_state *old_state, -					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w) +static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state, +					    struct drm_crtc_state *old_crtc_state, +					    struct drm_connector_state *new_conn_state, +					    struct drm_connector_state *old_conn_state, +					    const struct drm_connector *connector, +					    struct hdcp_workqueue *hdcp_w)  {  	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);  	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); -	/* Handle: Type0/1 change */ -	if (old_state->hdcp_content_type != state->hdcp_content_type && -	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { -		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; +	pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", +		connector->index, connector->status, connector->dpms); +	pr_debug("[HDCP_DM] state protection old: %x new: %x\n", +		old_conn_state->content_protection, new_conn_state->content_protection); + +	if (old_crtc_state) +		pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", +		old_crtc_state->enable, +		old_crtc_state->active, +		old_crtc_state->mode_changed, +		old_crtc_state->active_changed, +		old_crtc_state->connectors_changed); + +	if (new_crtc_state) +		pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", +		new_crtc_state->enable, +		new_crtc_state->active, +		new_crtc_state->mode_changed, +		new_crtc_state->active_changed, +		new_crtc_state->connectors_changed); + +	/* hdcp content type change */ +	if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type && +	    new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { +		new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; +		pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__);  		return true;  	} -	/* CP is being re enabled, ignore this -	 * -	 * Handles:	ENABLED -> DESIRED -	 */ -	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && -	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { -		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED; +	/* CP is being re enabled, ignore this */ +	if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && +	    new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { +		if (new_crtc_state && new_crtc_state->mode_changed) { +			new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; +			pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__); +			return true; +		} +		new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED; +		pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__);  		return false;  	} @@ -7396,9 +7584,9 @@ static bool is_content_protection_different(struct drm_connector_state *state,  	 *  	 * Handles:	UNDESIRED -> ENABLED  	 */ -	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED && -	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) -		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; +	if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED && +	    new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) +		new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;  	/* Stream removed and re-enabled  	 * @@ -7408,10 +7596,12 @@ static bool is_content_protection_different(struct drm_connector_state *state,  	 *  	 * Handles:	DESIRED -> DESIRED (Special case)  	 */ -	if (!(old_state->crtc && old_state->crtc->enabled) && -		state->crtc && state->crtc->enabled && +	if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) && +		new_conn_state->crtc && new_conn_state->crtc->enabled &&  		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {  		dm_con_state->update_hdcp = false; +		pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n", +			__func__);  		return true;  	} @@ -7423,35 +7613,42 @@ static bool is_content_protection_different(struct drm_connector_state *state,  	 *  	 * Handles:	DESIRED -> DESIRED (Special case)  	 */ -	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && -	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) { +	if (dm_con_state->update_hdcp && +	new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && +	connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {  		dm_con_state->update_hdcp = false; +		pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n", +			__func__);  		return true;  	} -	/* -	 * Handles:	UNDESIRED -> UNDESIRED -	 *		DESIRED -> DESIRED -	 *		ENABLED -> ENABLED -	 */ -	if (old_state->content_protection == state->content_protection) +	if (old_conn_state->content_protection == new_conn_state->content_protection) { +		if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) { +			if (new_crtc_state && new_crtc_state->mode_changed) { +				pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n", +					__func__); +				return true; +			} +			pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n", +				__func__); +			return false; +		} + +		pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__);  		return false; +	} -	/* -	 * Handles:	UNDESIRED -> DESIRED -	 *		DESIRED -> UNDESIRED -	 *		ENABLED -> UNDESIRED -	 */ -	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) +	if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) { +		pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n", +			__func__);  		return true; +	} -	/* -	 * Handles:	DESIRED -> ENABLED -	 */ +	pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__);  	return false;  } -  #endif +  static void remove_stream(struct amdgpu_device *adev,  			  struct amdgpu_crtc *acrtc,  			  struct dc_stream_state *stream) @@ -7493,6 +7690,8 @@ static void update_freesync_state_on_stream(  	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);  	unsigned long flags;  	bool pack_sdp_v1_3 = false; +	struct amdgpu_dm_connector *aconn; +	enum vrr_packet_type packet_type = PACKET_TYPE_VRR;  	if (!new_stream)  		return; @@ -7528,11 +7727,27 @@ static void update_freesync_state_on_stream(  		}  	} +	aconn = (struct amdgpu_dm_connector *)new_stream->dm_stream_context; + +	if (aconn && aconn->as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) { +		pack_sdp_v1_3 = aconn->pack_sdp_v1_3; + +		if (aconn->vsdb_info.amd_vsdb_version == 1) +			packet_type = PACKET_TYPE_FS_V1; +		else if (aconn->vsdb_info.amd_vsdb_version == 2) +			packet_type = PACKET_TYPE_FS_V2; +		else if (aconn->vsdb_info.amd_vsdb_version == 3) +			packet_type = PACKET_TYPE_FS_V3; + +		mod_build_adaptive_sync_infopacket(new_stream, aconn->as_type, NULL, +					&new_stream->adaptive_sync_infopacket); +	} +  	mod_freesync_build_vrr_infopacket(  		dm->freesync_module,  		new_stream,  		&vrr_params, -		PACKET_TYPE_VRR, +		packet_type,  		TRANSFER_FUNC_UNKNOWN,  		&vrr_infopacket,  		pack_sdp_v1_3); @@ -7546,6 +7761,7 @@ static void update_freesync_state_on_stream(  	new_crtc_state->vrr_infopacket = vrr_infopacket;  	new_stream->vrr_infopacket = vrr_infopacket; +	new_stream->allow_freesync = mod_freesync_get_freesync_enabled(&vrr_params);  	if (new_crtc_state->freesync_vrr_info_changed)  		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d", @@ -7667,8 +7883,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,  				    struct drm_crtc *pcrtc,  				    bool wait_for_vblank)  { -	uint32_t i; -	uint64_t timestamp_ns; +	u32 i; +	u64 timestamp_ns = ktime_get_ns();  	struct drm_plane *plane;  	struct drm_plane_state *old_plane_state, *new_plane_state;  	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc); @@ -7679,10 +7895,11 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,  			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));  	int planes_count = 0, vpos, hpos;  	unsigned long flags; -	uint32_t target_vblank, last_flip_vblank; +	u32 target_vblank, last_flip_vblank;  	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);  	bool cursor_update = false;  	bool pflip_present = false; +	bool dirty_rects_changed = false;  	struct {  		struct dc_surface_update surface_updates[MAX_SURFACES];  		struct dc_plane_info plane_infos[MAX_SURFACES]; @@ -7770,10 +7987,32 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,  		bundle->surface_updates[planes_count].plane_info =  			&bundle->plane_infos[planes_count]; -		if (acrtc_state->stream->link->psr_settings.psr_feature_enabled) +		if (acrtc_state->stream->link->psr_settings.psr_feature_enabled) {  			fill_dc_dirty_rects(plane, old_plane_state,  					    new_plane_state, new_crtc_state, -					    &bundle->flip_addrs[planes_count]); +					    &bundle->flip_addrs[planes_count], +					    &dirty_rects_changed); + +			/* +			 * If the dirty regions changed, PSR-SU need to be disabled temporarily +			 * and enabled it again after dirty regions are stable to avoid video glitch. +			 * PSR-SU will be enabled in vblank_control_worker() if user pause the video +			 * during the PSR-SU was disabled. +			 */ +			if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && +			    acrtc_attach->dm_irq_params.allow_psr_entry && +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY +			    !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && +#endif +			    dirty_rects_changed) { +				mutex_lock(&dm->dc_lock); +				acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns = +				timestamp_ns; +				if (acrtc_state->stream->link->psr_settings.psr_allow_active) +					amdgpu_dm_psr_disable(acrtc_state->stream); +				mutex_unlock(&dm->dc_lock); +			} +		}  		/*  		 * Only allow immediate flips for fast updates that don't @@ -7992,7 +8231,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,  #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY  			    !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&  #endif -			    !acrtc_state->stream->link->psr_settings.psr_allow_active) +			    !acrtc_state->stream->link->psr_settings.psr_allow_active && +			    (timestamp_ns - +			    acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) > +			    500000000)  				amdgpu_dm_psr_enable(acrtc_state->stream);  		} else {  			acrtc_attach->dm_irq_params.allow_psr_entry = false; @@ -8117,7 +8359,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)  	struct amdgpu_display_manager *dm = &adev->dm;  	struct dm_atomic_state *dm_state;  	struct dc_state *dc_state = NULL, *dc_state_temp = NULL; -	uint32_t i, j; +	u32 i, j;  	struct drm_crtc *crtc;  	struct drm_crtc_state *old_crtc_state, *new_crtc_state;  	unsigned long flags; @@ -8291,10 +8533,61 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)  		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);  		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); +		pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i); + +		if (!connector) +			continue; + +		pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", +			connector->index, connector->status, connector->dpms); +		pr_debug("[HDCP_DM] state protection old: %x new: %x\n", +			old_con_state->content_protection, new_con_state->content_protection); + +		if (aconnector->dc_sink) { +			if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL && +				aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) { +				pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n", +				aconnector->dc_sink->edid_caps.display_name); +			} +		} + +		new_crtc_state = NULL; +		old_crtc_state = NULL; + +		if (acrtc) { +			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); +			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); +		} + +		if (old_crtc_state) +			pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", +			old_crtc_state->enable, +			old_crtc_state->active, +			old_crtc_state->mode_changed, +			old_crtc_state->active_changed, +			old_crtc_state->connectors_changed); + +		if (new_crtc_state) +			pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", +			new_crtc_state->enable, +			new_crtc_state->active, +			new_crtc_state->mode_changed, +			new_crtc_state->active_changed, +			new_crtc_state->connectors_changed); +	} + +	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { +		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); +		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); +		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); +  		new_crtc_state = NULL; +		old_crtc_state = NULL; -		if (acrtc) +		if (acrtc) {  			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); +			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); +		}  		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); @@ -8306,11 +8599,44 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)  			continue;  		} -		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue)) +		if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state, +											old_con_state, connector, adev->dm.hdcp_workqueue)) { +			/* when display is unplugged from mst hub, connctor will +			 * be destroyed within dm_dp_mst_connector_destroy. connector +			 * hdcp perperties, like type, undesired, desired, enabled, +			 * will be lost. So, save hdcp properties into hdcp_work within +			 * amdgpu_dm_atomic_commit_tail. if the same display is +			 * plugged back with same display index, its hdcp properties +			 * will be retrieved from hdcp_work within dm_dp_mst_get_modes +			 */ + +			bool enable_encryption = false; + +			if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) +				enable_encryption = true; + +			if (aconnector->dc_link && aconnector->dc_sink && +				aconnector->dc_link->type == dc_connection_mst_branch) { +				struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue; +				struct hdcp_workqueue *hdcp_w = +					&hdcp_work[aconnector->dc_link->link_index]; + +				hdcp_w->hdcp_content_type[connector->index] = +					new_con_state->hdcp_content_type; +				hdcp_w->content_protection[connector->index] = +					new_con_state->content_protection; +			} + +			if (new_crtc_state && new_crtc_state->mode_changed && +				new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) +				enable_encryption = true; + +			DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption); +  			hdcp_update_display(  				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector, -				new_con_state->hdcp_content_type, -				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED); +				new_con_state->hdcp_content_type, enable_encryption); +		}  	}  #endif @@ -8408,9 +8734,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)  		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);  #ifdef CONFIG_DEBUG_FS  		enum amdgpu_dm_pipe_crc_source cur_crc_src; -#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) -		struct crc_rd_work *crc_rd_wrk; -#endif  #endif  		/* Count number of newly disabled CRTCs for dropping PM refs later. */  		if (old_crtc_state->active && !new_crtc_state->active) @@ -8423,9 +8746,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)  		update_stream_irq_parameters(dm, dm_new_crtc_state);  #ifdef CONFIG_DEBUG_FS -#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) -		crc_rd_wrk = dm->crc_rd_wrk; -#endif  		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);  		cur_crc_src = acrtc->dm_irq_params.crc_src;  		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); @@ -8454,10 +8774,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)  				if (amdgpu_dm_crc_window_is_activated(crtc)) {  					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);  					acrtc->dm_irq_params.window_param.update_win = true; + +					/** +					 * It takes 2 frames for HW to stably generate CRC when +					 * resuming from suspend, so we set skip_frame_cnt 2. +					 */  					acrtc->dm_irq_params.window_param.skip_frame_cnt = 2; -					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock); -					crc_rd_wrk->crtc = crtc; -					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);  					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);  				}  #endif @@ -8748,7 +9070,7 @@ is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,  }  static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) { -	uint64_t num, den, res; +	u64 num, den, res;  	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;  	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED; @@ -8887,6 +9209,13 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,  		if (!dm_old_crtc_state->stream)  			goto skip_modeset; +		/* Unset freesync video if it was active before */ +		if (dm_old_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) { +			dm_new_crtc_state->freesync_config.state = VRR_STATE_INACTIVE; +			dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = 0; +		} + +		/* Now check if we should set freesync video mode */  		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&  		    is_timing_unchanged_for_freesync(new_crtc_state,  						     old_crtc_state)) { @@ -9197,7 +9526,8 @@ static int dm_update_plane_state(struct dc *dc,  				 struct drm_plane_state *old_plane_state,  				 struct drm_plane_state *new_plane_state,  				 bool enable, -				 bool *lock_and_validation_needed) +				 bool *lock_and_validation_needed, +				 bool *is_top_most_overlay)  {  	struct dm_atomic_state *dm_state = NULL; @@ -9305,6 +9635,14 @@ static int dm_update_plane_state(struct dc *dc,  		if (!dc_new_plane_state)  			return -ENOMEM; +		/* Block top most plane from being a video plane */ +		if (plane->type == DRM_PLANE_TYPE_OVERLAY) { +			if (is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay) +				return -EINVAL; +			else +				*is_top_most_overlay = false; +		} +  		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",  				 plane->base.id, new_plane_crtc->base.id); @@ -9448,7 +9786,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm  			continue;  		aconnector = to_amdgpu_dm_connector(connector); -		if (!aconnector->port || !aconnector->mst_port) +		if (!aconnector->mst_output_port || !aconnector->mst_root)  			aconnector = NULL;  		else  			break; @@ -9457,7 +9795,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm  	if (!aconnector)  		return 0; -	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr); +	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_root->mst_mgr);  }  #endif @@ -9501,8 +9839,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,  	enum dc_status status;  	int ret, i;  	bool lock_and_validation_needed = false; +	bool is_top_most_overlay = true;  	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;  #if defined(CONFIG_DRM_AMD_DC_DCN) +	struct drm_dp_mst_topology_mgr *mgr; +	struct drm_dp_mst_topology_state *mst_state;  	struct dsc_mst_fairness_vars vars[MAX_PIPES];  #endif @@ -9530,8 +9871,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,  			goto fail;  		} -		if (dm_old_con_state->abm_level != -		    dm_new_con_state->abm_level) +		if (dm_old_con_state->abm_level != dm_new_con_state->abm_level || +		    dm_old_con_state->scaling != dm_new_con_state->scaling)  			new_crtc_state->connectors_changed = true;  	} @@ -9625,7 +9966,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,  	 * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in  	 * atomic state, so call drm helper to normalize zpos.  	 */ -	drm_atomic_normalize_zpos(dev, state); +	ret = drm_atomic_normalize_zpos(dev, state); +	if (ret) { +		drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n"); +		goto fail; +	}  	/* Remove exiting planes if they are modified */  	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { @@ -9633,7 +9978,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,  					    old_plane_state,  					    new_plane_state,  					    false, -					    &lock_and_validation_needed); +					    &lock_and_validation_needed, +					    &is_top_most_overlay);  		if (ret) {  			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");  			goto fail; @@ -9672,7 +10018,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,  					    old_plane_state,  					    new_plane_state,  					    true, -					    &lock_and_validation_needed); +					    &lock_and_validation_needed, +					    &is_top_most_overlay);  		if (ret) {  			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");  			goto fail; @@ -9751,6 +10098,28 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,  		lock_and_validation_needed = true;  	} +#if defined(CONFIG_DRM_AMD_DC_DCN) +	/* set the slot info for each mst_state based on the link encoding format */ +	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { +		struct amdgpu_dm_connector *aconnector; +		struct drm_connector *connector; +		struct drm_connector_list_iter iter; +		u8 link_coding_cap; + +		drm_connector_list_iter_begin(dev, &iter); +		drm_for_each_connector_iter(connector, &iter) { +			if (connector->index == mst_state->mgr->conn_base_id) { +				aconnector = to_amdgpu_dm_connector(connector); +				link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link); +				drm_dp_mst_update_slots(mst_state, link_coding_cap); + +				break; +			} +		} +		drm_connector_list_iter_end(&iter); +	} +#endif +  	/**  	 * Streams and planes are reset when there are changes that affect  	 * bandwidth. Anything that affects bandwidth needs to go through @@ -9885,7 +10254,7 @@ fail:  static bool is_dp_capable_without_timing_msa(struct dc *dc,  					     struct amdgpu_dm_connector *amdgpu_dm_connector)  { -	uint8_t dpcd_data; +	u8 dpcd_data;  	bool capable = false;  	if (amdgpu_dm_connector->dc_link && @@ -9904,7 +10273,7 @@ static bool is_dp_capable_without_timing_msa(struct dc *dc,  static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,  		unsigned int offset,  		unsigned int total_length, -		uint8_t *data, +		u8 *data,  		unsigned int length,  		struct amdgpu_hdmi_vsdb_info *vsdb)  { @@ -9959,7 +10328,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,  }  static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm, -		uint8_t *edid_ext, int len, +		u8 *edid_ext, int len,  		struct amdgpu_hdmi_vsdb_info *vsdb_info)  {  	int i; @@ -10000,7 +10369,7 @@ static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,  }  static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm, -		uint8_t *edid_ext, int len, +		u8 *edid_ext, int len,  		struct amdgpu_hdmi_vsdb_info *vsdb_info)  {  	int i; @@ -10016,21 +10385,25 @@ static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,  }  static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector, -		uint8_t *edid_ext, int len, +		u8 *edid_ext, int len,  		struct amdgpu_hdmi_vsdb_info *vsdb_info)  {  	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev); +	bool ret; +	mutex_lock(&adev->dm.dc_lock);  	if (adev->dm.dmub_srv) -		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info); +		ret = parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);  	else -		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info); +		ret = parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info); +	mutex_unlock(&adev->dm.dc_lock); +	return ret;  }  static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,  		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)  { -	uint8_t *edid_ext = NULL; +	u8 *edid_ext = NULL;  	int i;  	bool valid_vsdb_found = false; @@ -10085,6 +10458,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,  	struct amdgpu_device *adev = drm_to_adev(dev);  	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};  	bool freesync_capable = false; +	enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE;  	if (!connector->state) {  		DRM_ERROR("%s - Connector has no state", __func__); @@ -10177,6 +10551,26 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,  		}  	} +	as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link); + +	if (as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) { +		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); +		if (i >= 0 && vsdb_info.freesync_supported && vsdb_info.amd_vsdb_version > 0) { + +			amdgpu_dm_connector->pack_sdp_v1_3 = true; +			amdgpu_dm_connector->as_type = as_type; +			amdgpu_dm_connector->vsdb_info = vsdb_info; + +			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; +			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; +			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) +				freesync_capable = true; + +			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz; +			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz; +		} +	} +  update:  	if (dm_con_state)  		dm_con_state->freesync_capable = freesync_capable; @@ -10206,7 +10600,7 @@ void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)  }  void dm_write_reg_func(const struct dc_context *ctx, uint32_t address, -		       uint32_t value, const char *func_name) +		       u32 value, const char *func_name)  {  #ifdef DM_CHECK_ADDR_0  	if (address == 0) { @@ -10221,7 +10615,7 @@ void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,  uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,  			  const char *func_name)  { -	uint32_t value; +	u32 value;  #ifdef DM_CHECK_ADDR_0  	if (address == 0) {  		DC_ERR("invalid register read; address = 0\n"); @@ -10300,6 +10694,7 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(  	ret = p_notify->aux_reply.length;  	*operation_result = p_notify->result;  out: +	reinit_completion(&adev->dm.dmub_aux_transfer_done);  	mutex_unlock(&adev->dm.dpia_aux_lock);  	return ret;  } @@ -10327,6 +10722,8 @@ int amdgpu_dm_process_dmub_set_config_sync(  		*operation_result = SET_CONFIG_UNKNOWN_ERROR;  	} +	if (!is_cmd_complete) +		reinit_completion(&adev->dm.dmub_aux_transfer_done);  	mutex_unlock(&adev->dm.dpia_aux_lock);  	return ret;  }  |