diff options
Diffstat (limited to 'drivers/gpu/drm/amd/display')
265 files changed, 14442 insertions, 3838 deletions
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index 313183b80032..87858bc57e64 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0-only +# SPDX-License-Identifier: MIT  menu "Display Engine Configuration"  	depends on DRM && DRM_AMDGPU @@ -6,43 +6,16 @@ config DRM_AMD_DC  	bool "AMD DC - Enable new display engine"  	default y  	select SND_HDA_COMPONENT if SND_HDA_CORE -	select DRM_AMD_DC_DCN1_0 if X86 && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS) +	select DRM_AMD_DC_DCN if (X86 || PPC64) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)  	help  	  Choose this option if you want to use the new display engine  	  support for AMDGPU. This adds required support for Vega and  	  Raven ASICs. -config DRM_AMD_DC_DCN1_0 +config DRM_AMD_DC_DCN  	def_bool n  	help -	  RV family support for display engine - -config DRM_AMD_DC_DCN2_0 -	bool "DCN 2.0 family" -	default y -	depends on DRM_AMD_DC && X86 -	depends on DRM_AMD_DC_DCN1_0 -	help -	  Choose this option if you want to have -	  Navi support for display engine - -config DRM_AMD_DC_DCN2_1 -	bool "DCN 2.1 family" -	depends on DRM_AMD_DC && X86 -	depends on DRM_AMD_DC_DCN2_0 -	help -	  Choose this option if you want to have -	  Renoir support for display engine - -config DRM_AMD_DC_DSC_SUPPORT -	bool "DSC support" -	default y -	depends on DRM_AMD_DC && X86 -	depends on DRM_AMD_DC_DCN1_0 -	depends on DRM_AMD_DC_DCN2_0 -	help -	  Choose this option if you want to have -	  Dynamic Stream Compression support +	  Raven, Navi and Renoir family support for display engine  config DRM_AMD_DC_HDCP  	bool "Enable HDCP support in DC" diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile index 36b3d6a5d04d..2633de77de5e 100644 --- a/drivers/gpu/drm/amd/display/Makefile +++ b/drivers/gpu/drm/amd/display/Makefile @@ -34,6 +34,8 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync  subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color  subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/info_packet  subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/power +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dmub/inc +  ifdef CONFIG_DRM_AMD_DC_HDCP  subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/hdcp  endif @@ -41,7 +43,7 @@ endif  #TODO: remove when Timing Sync feature is complete  subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0 -DAL_LIBS = amdgpu_dm dc	modules/freesync modules/color modules/info_packet modules/power +DAL_LIBS = amdgpu_dm dc	modules/freesync modules/color modules/info_packet modules/power dmub/src  ifdef CONFIG_DRM_AMD_DC_HDCP  DAL_LIBS += modules/hdcp diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 7aac9568d3be..9402374d2466 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -30,6 +30,10 @@  #include "dc.h"  #include "dc/inc/core_types.h"  #include "dal_asic_id.h" +#include "dmub/inc/dmub_srv.h" +#include "dc/inc/hw/dmcu.h" +#include "dc/inc/hw/abm.h" +#include "dc/dc_dmub_srv.h"  #include "vid.h"  #include "amdgpu.h" @@ -39,6 +43,7 @@  #include "amdgpu_dm.h"  #ifdef CONFIG_DRM_AMD_DC_HDCP  #include "amdgpu_dm_hdcp.h" +#include <drm/drm_hdcp.h>  #endif  #include "amdgpu_pm.h" @@ -72,7 +77,7 @@  #include <drm/drm_audio_component.h>  #include <drm/drm_hdcp.h> -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"  #include "dcn/dcn_1_0_offset.h" @@ -87,9 +92,18 @@  #include "modules/power/power_helpers.h"  #include "modules/inc/mod_info_packet.h" +#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB); +  #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"  MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); +/* Number of bytes in PSP header for firmware. */ +#define PSP_HEADER_BYTES 0x100 + +/* Number of bytes in PSP footer for firmware. */ +#define PSP_FOOTER_BYTES 0x100 +  /**   * DOC: overview   * @@ -478,6 +492,70 @@ static void dm_crtc_high_irq(void *interrupt_params)  	}  } +#if defined(CONFIG_DRM_AMD_DC_DCN) +/** + * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs + * @interrupt params - interrupt parameters + * + * Notify DRM's vblank event handler at VSTARTUP + * + * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which: + * * We are close enough to VUPDATE - the point of no return for hw + * * We are in the fixed portion of variable front porch when vrr is enabled + * * We are before VUPDATE, where double-buffered vrr registers are swapped + * + * It is therefore the correct place to signal vblank, send user flip events, + * and update VRR. + */ +static void dm_dcn_crtc_high_irq(void *interrupt_params) +{ +	struct common_irq_params *irq_params = interrupt_params; +	struct amdgpu_device *adev = irq_params->adev; +	struct amdgpu_crtc *acrtc; +	struct dm_crtc_state *acrtc_state; +	unsigned long flags; + +	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK); + +	if (!acrtc) +		return; + +	acrtc_state = to_dm_crtc_state(acrtc->base.state); + +	DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id, +				amdgpu_dm_vrr_active(acrtc_state)); + +	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); +	drm_crtc_handle_vblank(&acrtc->base); + +	spin_lock_irqsave(&adev->ddev->event_lock, flags); + +	if (acrtc_state->vrr_params.supported && +	    acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) { +		mod_freesync_handle_v_update( +		adev->dm.freesync_module, +		acrtc_state->stream, +		&acrtc_state->vrr_params); + +		dc_stream_adjust_vmin_vmax( +			adev->dm.dc, +			acrtc_state->stream, +			&acrtc_state->vrr_params.adjust); +	} + +	if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED) { +		if (acrtc->event) { +			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event); +			acrtc->event = NULL; +			drm_crtc_vblank_put(&acrtc->base); +		} +		acrtc->pflip_status = AMDGPU_FLIP_NONE; +	} + +	spin_unlock_irqrestore(&adev->ddev->event_lock, flags); +} +#endif +  static int dm_set_clockgating_state(void *handle,  		  enum amd_clockgating_state state)  { @@ -667,12 +745,126 @@ void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)  	}  } +static int dm_dmub_hw_init(struct amdgpu_device *adev) +{ +	const struct dmcub_firmware_header_v1_0 *hdr; +	struct dmub_srv *dmub_srv = adev->dm.dmub_srv; +	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info; +	const struct firmware *dmub_fw = adev->dm.dmub_fw; +	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu; +	struct abm *abm = adev->dm.dc->res_pool->abm; +	struct dmub_srv_hw_params hw_params; +	enum dmub_status status; +	const unsigned char *fw_inst_const, *fw_bss_data; +	uint32_t i, fw_inst_const_size, fw_bss_data_size; +	bool has_hw_support; + +	if (!dmub_srv) +		/* DMUB isn't supported on the ASIC. */ +		return 0; + +	if (!fb_info) { +		DRM_ERROR("No framebuffer info for DMUB service.\n"); +		return -EINVAL; +	} + +	if (!dmub_fw) { +		/* Firmware required for DMUB support. */ +		DRM_ERROR("No firmware provided for DMUB.\n"); +		return -EINVAL; +	} + +	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support); +	if (status != DMUB_STATUS_OK) { +		DRM_ERROR("Error checking HW support for DMUB: %d\n", status); +		return -EINVAL; +	} + +	if (!has_hw_support) { +		DRM_INFO("DMUB unsupported on ASIC\n"); +		return 0; +	} + +	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data; + +	fw_inst_const = dmub_fw->data + +			le32_to_cpu(hdr->header.ucode_array_offset_bytes) + +			PSP_HEADER_BYTES; + +	fw_bss_data = dmub_fw->data + +		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) + +		      le32_to_cpu(hdr->inst_const_bytes); + +	/* Copy firmware and bios info into FB memory. */ +	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - +			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES; + +	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes); + +	memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const, +	       fw_inst_const_size); +	memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data, +	       fw_bss_data_size); +	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios, +	       adev->bios_size); + +	/* Reset regions that need to be reset. */ +	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0, +	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size); + +	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0, +	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size); + +	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0, +	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size); + +	/* Initialize hardware. */ +	memset(&hw_params, 0, sizeof(hw_params)); +	hw_params.fb_base = adev->gmc.fb_start; +	hw_params.fb_offset = adev->gmc.aper_base; + +	if (dmcu) +		hw_params.psp_version = dmcu->psp_version; + +	for (i = 0; i < fb_info->num_fb; ++i) +		hw_params.fb[i] = &fb_info->fb[i]; + +	status = dmub_srv_hw_init(dmub_srv, &hw_params); +	if (status != DMUB_STATUS_OK) { +		DRM_ERROR("Error initializing DMUB HW: %d\n", status); +		return -EINVAL; +	} + +	/* Wait for firmware load to finish. */ +	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); +	if (status != DMUB_STATUS_OK) +		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); + +	/* Init DMCU and ABM if available. */ +	if (dmcu && abm) { +		dmcu->funcs->dmcu_init(dmcu); +		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu); +	} + +	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv); +	if (!adev->dm.dc->ctx->dmub_srv) { +		DRM_ERROR("Couldn't allocate DC DMUB server!\n"); +		return -ENOMEM; +	} + +	DRM_INFO("DMUB hardware initialized: version=0x%08X\n", +		 adev->dm.dmcub_fw_version); + +	return 0; +} +  static int amdgpu_dm_init(struct amdgpu_device *adev)  {  	struct dc_init_data init_data;  #ifdef CONFIG_DRM_AMD_DC_HDCP  	struct dc_callback_init init_params;  #endif +	int r;  	adev->dm.ddev = adev->ddev;  	adev->dm.adev = adev; @@ -714,13 +906,16 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)  	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV; -	/* -	 * TODO debug why this doesn't work on Raven -	 */ -	if (adev->flags & AMD_IS_APU && -	    adev->asic_type >= CHIP_CARRIZO && -	    adev->asic_type < CHIP_RAVEN) +	switch (adev->asic_type) { +	case CHIP_CARRIZO: +	case CHIP_STONEY: +	case CHIP_RAVEN: +	case CHIP_RENOIR:  		init_data.flags.gpu_vm_support = true; +		break; +	default: +		break; +	}  	if (amdgpu_dc_feature_mask & DC_FBC_MASK)  		init_data.flags.fbc_support = true; @@ -733,9 +928,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)  	init_data.flags.power_down_display_on_boot = true; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  	init_data.soc_bounding_box = adev->dm.soc_bounding_box; -#endif  	/* Display Core create. */  	adev->dm.dc = dc_create(&init_data); @@ -749,6 +942,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)  	dc_hardware_init(adev->dm.dc); +	r = dm_dmub_hw_init(adev); +	if (r) { +		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); +		goto error; +	} +  	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);  	if (!adev->dm.freesync_module) {  		DRM_ERROR( @@ -821,6 +1020,15 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)  	if (adev->dm.dc)  		dc_deinit_callbacks(adev->dm.dc);  #endif +	if (adev->dm.dc->ctx->dmub_srv) { +		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv); +		adev->dm.dc->ctx->dmub_srv = NULL; +	} + +	if (adev->dm.dmub_bo) +		amdgpu_bo_free_kernel(&adev->dm.dmub_bo, +				      &adev->dm.dmub_bo_gpu_addr, +				      &adev->dm.dmub_bo_cpu_addr);  	/* DC Destroy TODO: Replace destroy DAL */  	if (adev->dm.dc) @@ -932,9 +1140,160 @@ static int load_dmcu_fw(struct amdgpu_device *adev)  	return 0;  } +static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address) +{ +	struct amdgpu_device *adev = ctx; + +	return dm_read_reg(adev->dm.dc->ctx, address); +} + +static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address, +				     uint32_t value) +{ +	struct amdgpu_device *adev = ctx; + +	return dm_write_reg(adev->dm.dc->ctx, address, value); +} + +static int dm_dmub_sw_init(struct amdgpu_device *adev) +{ +	struct dmub_srv_create_params create_params; +	struct dmub_srv_region_params region_params; +	struct dmub_srv_region_info region_info; +	struct dmub_srv_fb_params fb_params; +	struct dmub_srv_fb_info *fb_info; +	struct dmub_srv *dmub_srv; +	const struct dmcub_firmware_header_v1_0 *hdr; +	const char *fw_name_dmub; +	enum dmub_asic dmub_asic; +	enum dmub_status status; +	int r; + +	switch (adev->asic_type) { +	case CHIP_RENOIR: +		dmub_asic = DMUB_ASIC_DCN21; +		fw_name_dmub = FIRMWARE_RENOIR_DMUB; +		break; + +	default: +		/* ASIC doesn't support DMUB. */ +		return 0; +	} + +	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev); +	if (r) { +		DRM_ERROR("DMUB firmware loading failed: %d\n", r); +		return 0; +	} + +	r = amdgpu_ucode_validate(adev->dm.dmub_fw); +	if (r) { +		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r); +		return 0; +	} + +	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { +		DRM_WARN("Only PSP firmware loading is supported for DMUB\n"); +		return 0; +	} + +	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data; +	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id = +		AMDGPU_UCODE_ID_DMCUB; +	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = adev->dm.dmub_fw; +	adev->firmware.fw_size += +		ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE); + +	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version); + +	DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n", +		 adev->dm.dmcub_fw_version); + +	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL); +	dmub_srv = adev->dm.dmub_srv; + +	if (!dmub_srv) { +		DRM_ERROR("Failed to allocate DMUB service!\n"); +		return -ENOMEM; +	} + +	memset(&create_params, 0, sizeof(create_params)); +	create_params.user_ctx = adev; +	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read; +	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write; +	create_params.asic = dmub_asic; + +	/* Create the DMUB service. */ +	status = dmub_srv_create(dmub_srv, &create_params); +	if (status != DMUB_STATUS_OK) { +		DRM_ERROR("Error creating DMUB service: %d\n", status); +		return -EINVAL; +	} + +	/* Calculate the size of all the regions for the DMUB service. */ +	memset(®ion_params, 0, sizeof(region_params)); + +	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - +					PSP_HEADER_BYTES - PSP_FOOTER_BYTES; +	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes); +	region_params.vbios_size = adev->bios_size; +	region_params.fw_bss_data = +		adev->dm.dmub_fw->data + +		le32_to_cpu(hdr->header.ucode_array_offset_bytes) + +		le32_to_cpu(hdr->inst_const_bytes); + +	status = dmub_srv_calc_region_info(dmub_srv, ®ion_params, +					   ®ion_info); + +	if (status != DMUB_STATUS_OK) { +		DRM_ERROR("Error calculating DMUB region info: %d\n", status); +		return -EINVAL; +	} + +	/* +	 * Allocate a framebuffer based on the total size of all the regions. +	 * TODO: Move this into GART. +	 */ +	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE, +				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo, +				    &adev->dm.dmub_bo_gpu_addr, +				    &adev->dm.dmub_bo_cpu_addr); +	if (r) +		return r; + +	/* Rebase the regions on the framebuffer address. */ +	memset(&fb_params, 0, sizeof(fb_params)); +	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr; +	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr; +	fb_params.region_info = ®ion_info; + +	adev->dm.dmub_fb_info = +		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL); +	fb_info = adev->dm.dmub_fb_info; + +	if (!fb_info) { +		DRM_ERROR( +			"Failed to allocate framebuffer info for DMUB service!\n"); +		return -ENOMEM; +	} + +	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info); +	if (status != DMUB_STATUS_OK) { +		DRM_ERROR("Error calculating DMUB FB info: %d\n", status); +		return -EINVAL; +	} + +	return 0; +} +  static int dm_sw_init(void *handle)  {  	struct amdgpu_device *adev = (struct amdgpu_device *)handle; +	int r; + +	r = dm_dmub_sw_init(adev); +	if (r) +		return r;  	return load_dmcu_fw(adev);  } @@ -943,6 +1302,19 @@ static int dm_sw_fini(void *handle)  {  	struct amdgpu_device *adev = (struct amdgpu_device *)handle; +	kfree(adev->dm.dmub_fb_info); +	adev->dm.dmub_fb_info = NULL; + +	if (adev->dm.dmub_srv) { +		dmub_srv_destroy(adev->dm.dmub_srv); +		adev->dm.dmub_srv = NULL; +	} + +	if (adev->dm.dmub_fw) { +		release_firmware(adev->dm.dmub_fw); +		adev->dm.dmub_fw = NULL; +	} +  	if(adev->dm.fw_dmcu) {  		release_firmware(adev->dm.fw_dmcu);  		adev->dm.fw_dmcu = NULL; @@ -1235,7 +1607,7 @@ static int dm_resume(void *handle)  	struct dm_plane_state *dm_new_plane_state;  	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);  	enum dc_connection_type new_connection_type = dc_connection_none; -	int i; +	int i, r;  	/* Recreate dc_state - DC invalidates it when setting power state to S3. */  	dc_release_state(dm_state->context); @@ -1243,6 +1615,11 @@ static int dm_resume(void *handle)  	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */  	dc_resource_state_construct(dm->dc, dm_state->context); +	/* Before powering on DC we need to re-initialize DMUB. */ +	r = dm_dmub_hw_init(adev); +	if (r) +		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); +  	/* power on hardware */  	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); @@ -1868,7 +2245,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)  	return 0;  } -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  /* Register IRQ sources and initialize IRQ callbacks */  static int dcn10_register_irq_handlers(struct amdgpu_device *adev)  { @@ -1914,35 +2291,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)  		c_irq_params->irq_src = int_params.irq_source;  		amdgpu_dm_irq_register_interrupt(adev, &int_params, -				dm_crtc_high_irq, c_irq_params); -	} - -	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to -	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx -	 * to trigger at end of each vblank, regardless of state of the lock, -	 * matching DCE behaviour. -	 */ -	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT; -	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1; -	     i++) { -		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq); - -		if (r) { -			DRM_ERROR("Failed to add vupdate irq id!\n"); -			return r; -		} - -		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; -		int_params.irq_source = -			dc_interrupt_to_irq_source(dc, i, 0); - -		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; - -		c_irq_params->adev = adev; -		c_irq_params->irq_src = int_params.irq_source; - -		amdgpu_dm_irq_register_interrupt(adev, &int_params, -				dm_vupdate_high_irq, c_irq_params); +				dm_dcn_crtc_high_irq, c_irq_params);  	}  	/* Use GRPH_PFLIP interrupt */ @@ -2457,16 +2806,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)  			goto fail;  		}  		break; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  	case CHIP_RAVEN: -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	case CHIP_NAVI12:  	case CHIP_NAVI10:  	case CHIP_NAVI14: -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  	case CHIP_RENOIR: -#endif  		if (dcn10_register_irq_handlers(dm->adev)) {  			DRM_ERROR("DM: Failed to initialize IRQ\n");  			goto fail; @@ -2612,14 +2957,13 @@ static int dm_early_init(void *handle)  		adev->mode_info.num_hpd = 6;  		adev->mode_info.num_dig = 6;  		break; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  	case CHIP_RAVEN:  		adev->mode_info.num_crtc = 4;  		adev->mode_info.num_hpd = 4;  		adev->mode_info.num_dig = 4;  		break;  #endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	case CHIP_NAVI10:  	case CHIP_NAVI12:  		adev->mode_info.num_crtc = 6; @@ -2631,14 +2975,11 @@ static int dm_early_init(void *handle)  		adev->mode_info.num_hpd = 5;  		adev->mode_info.num_dig = 5;  		break; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  	case CHIP_RENOIR:  		adev->mode_info.num_crtc = 4;  		adev->mode_info.num_hpd = 4;  		adev->mode_info.num_dig = 4;  		break; -#endif  	default:  		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);  		return -EINVAL; @@ -2931,14 +3272,10 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,  	if (adev->asic_type == CHIP_VEGA10 ||  	    adev->asic_type == CHIP_VEGA12 ||  	    adev->asic_type == CHIP_VEGA20 || -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	    adev->asic_type == CHIP_NAVI10 ||  	    adev->asic_type == CHIP_NAVI14 ||  	    adev->asic_type == CHIP_NAVI12 || -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  	    adev->asic_type == CHIP_RENOIR || -#endif  	    adev->asic_type == CHIP_RAVEN) {  		/* Fill GFX9 params */  		tiling_info->gfx9.num_pipes = @@ -3256,12 +3593,26 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode,  static enum dc_color_depth  convert_color_depth_from_display_info(const struct drm_connector *connector, -				      const struct drm_connector_state *state) +				      const struct drm_connector_state *state, +				      bool is_y420)  { -	uint8_t bpc = (uint8_t)connector->display_info.bpc; +	uint8_t bpc; -	/* Assume 8 bpc by default if no bpc is specified. */ -	bpc = bpc ? bpc : 8; +	if (is_y420) { +		bpc = 8; + +		/* Cap display bpc based on HDMI 2.0 HF-VSDB */ +		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48) +			bpc = 16; +		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36) +			bpc = 12; +		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30) +			bpc = 10; +	} else { +		bpc = (uint8_t)connector->display_info.bpc; +		/* Assume 8 bpc by default if no bpc is specified. */ +		bpc = bpc ? bpc : 8; +	}  	if (!state)  		state = connector->state; @@ -3356,27 +3707,21 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)  	return color_space;  } -static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out) -{ -	if (timing_out->display_color_depth <= COLOR_DEPTH_888) -		return; - -	timing_out->display_color_depth--; -} - -static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out, -						const struct drm_display_info *info) +static bool adjust_colour_depth_from_display_info( +	struct dc_crtc_timing *timing_out, +	const struct drm_display_info *info)  { +	enum dc_color_depth depth = timing_out->display_color_depth;  	int normalized_clk; -	if (timing_out->display_color_depth <= COLOR_DEPTH_888) -		return;  	do {  		normalized_clk = timing_out->pix_clk_100hz / 10;  		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */  		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)  			normalized_clk /= 2;  		/* Adjusting pix clock following on HDMI spec based on colour depth */ -		switch (timing_out->display_color_depth) { +		switch (depth) { +		case COLOR_DEPTH_888: +			break;  		case COLOR_DEPTH_101010:  			normalized_clk = (normalized_clk * 30) / 24;  			break; @@ -3387,14 +3732,15 @@ static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_  			normalized_clk = (normalized_clk * 48) / 24;  			break;  		default: -			return; +			/* The above depths are the only ones valid for HDMI. */ +			return false;  		} -		if (normalized_clk <= info->max_tmds_clock) -			return; -		reduce_mode_colour_depth(timing_out); - -	} while (timing_out->display_color_depth > COLOR_DEPTH_888); - +		if (normalized_clk <= info->max_tmds_clock) { +			timing_out->display_color_depth = depth; +			return true; +		} +	} while (--depth > COLOR_DEPTH_666); +	return false;  }  static void fill_stream_properties_from_drm_display_mode( @@ -3432,7 +3778,8 @@ static void fill_stream_properties_from_drm_display_mode(  	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;  	timing_out->display_color_depth = convert_color_depth_from_display_info( -		connector, connector_state); +		connector, connector_state, +		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420));  	timing_out->scan_type = SCANNING_TYPE_NODATA;  	timing_out->hdmi_vic = 0; @@ -3474,8 +3821,14 @@ static void fill_stream_properties_from_drm_display_mode(  	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;  	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; -	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) -		adjust_colour_depth_from_display_info(timing_out, info); +	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { +		if (!adjust_colour_depth_from_display_info(timing_out, info) && +		    drm_mode_is_420_also(info, mode_in) && +		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) { +			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; +			adjust_colour_depth_from_display_info(timing_out, info); +		} +	}  }  static void fill_audio_info(struct audio_info *audio_info, @@ -3644,10 +3997,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,  	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;  	int mode_refresh;  	int preferred_refresh = 0; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT +#if defined(CONFIG_DRM_AMD_DC_DCN)  	struct dsc_dec_dpcd_caps dsc_caps; -	uint32_t link_bandwidth_kbps;  #endif +	uint32_t link_bandwidth_kbps;  	struct dc_sink *sink = NULL;  	if (aconnector == NULL) { @@ -3722,16 +4075,19 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,  		fill_stream_properties_from_drm_display_mode(stream,  			&mode, &aconnector->base, con_state, old_stream); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	stream->timing.flags.DSC = 0;  	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { -		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, +#if defined(CONFIG_DRM_AMD_DC_DCN) +		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, +				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,  				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,  				      &dsc_caps); +#endif  		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,  							     dc_link_get_link_cap(aconnector->dc_link)); +#if defined(CONFIG_DRM_AMD_DC_DCN)  		if (dsc_caps.is_dsc_supported)  			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],  						  &dsc_caps, @@ -3740,8 +4096,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,  						  &stream->timing,  						  &stream->timing.dsc_cfg))  				stream->timing.flags.DSC = 1; -	}  #endif +	}  	update_stream_scaling_settings(&mode, dm_state, stream); @@ -3761,7 +4117,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,  			struct dmcu *dmcu = core_dc->res_pool->dmcu;  			stream->psr_version = dmcu->dmcu_version.psr_version; -			mod_build_vsc_infopacket(stream, &stream->vsc_infopacket); +			mod_build_vsc_infopacket(stream, +					&stream->vsc_infopacket, +					&stream->use_vsc_sdp_for_colorimetry);  		}  	}  finish: @@ -3852,6 +4210,10 @@ static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)  	struct amdgpu_device *adev = crtc->dev->dev_private;  	int rc; +	/* Do not set vupdate for DCN hardware */ +	if (adev->family > AMDGPU_FAMILY_AI) +		return 0; +  	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;  	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; @@ -4095,7 +4457,8 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)  		state->underscan_hborder = 0;  		state->underscan_vborder = 0;  		state->base.max_requested_bpc = 8; - +		state->vcpi_slots = 0; +		state->pbn = 0;  		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)  			state->abm_level = amdgpu_dm_abm_level; @@ -4123,7 +4486,8 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)  	new_state->underscan_enable = state->underscan_enable;  	new_state->underscan_hborder = state->underscan_hborder;  	new_state->underscan_vborder = state->underscan_vborder; - +	new_state->vcpi_slots = state->vcpi_slots; +	new_state->pbn = state->pbn;  	return &new_state->base;  } @@ -4520,10 +4884,69 @@ static void dm_encoder_helper_disable(struct drm_encoder *encoder)  } +static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth) +{ +	switch (display_color_depth) { +		case COLOR_DEPTH_666: +			return 6; +		case COLOR_DEPTH_888: +			return 8; +		case COLOR_DEPTH_101010: +			return 10; +		case COLOR_DEPTH_121212: +			return 12; +		case COLOR_DEPTH_141414: +			return 14; +		case COLOR_DEPTH_161616: +			return 16; +		default: +			break; +		} +	return 0; +} +  static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,  					  struct drm_crtc_state *crtc_state,  					  struct drm_connector_state *conn_state)  { +	struct drm_atomic_state *state = crtc_state->state; +	struct drm_connector *connector = conn_state->connector; +	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); +	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state); +	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; +	struct drm_dp_mst_topology_mgr *mst_mgr; +	struct drm_dp_mst_port *mst_port; +	enum dc_color_depth color_depth; +	int clock, bpp = 0; +	bool is_y420 = false; + +	if (!aconnector->port || !aconnector->dc_sink) +		return 0; + +	mst_port = aconnector->port; +	mst_mgr = &aconnector->mst_port->mst_mgr; + +	if (!crtc_state->connectors_changed && !crtc_state->mode_changed) +		return 0; + +	if (!state->duplicated) { +		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) && +				aconnector->force_yuv420_output; +		color_depth = convert_color_depth_from_display_info(connector, conn_state, +								    is_y420); +		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3; +		clock = adjusted_mode->clock; +		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false); +	} +	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state, +									   mst_mgr, +									   mst_port, +									   dm_new_connector_state->pbn, +									   0); +	if (dm_new_connector_state->vcpi_slots < 0) { +		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots); +		return dm_new_connector_state->vcpi_slots; +	}  	return 0;  } @@ -4532,6 +4955,71 @@ const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {  	.atomic_check = dm_encoder_helper_atomic_check  }; +#if defined(CONFIG_DRM_AMD_DC_DCN) +static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, +					    struct dc_state *dc_state) +{ +	struct dc_stream_state *stream = NULL; +	struct drm_connector *connector; +	struct drm_connector_state *new_con_state, *old_con_state; +	struct amdgpu_dm_connector *aconnector; +	struct dm_connector_state *dm_conn_state; +	int i, j, clock, bpp; +	int vcpi, pbn_div, pbn = 0; + +	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { + +		aconnector = to_amdgpu_dm_connector(connector); + +		if (!aconnector->port) +			continue; + +		if (!new_con_state || !new_con_state->crtc) +			continue; + +		dm_conn_state = to_dm_connector_state(new_con_state); + +		for (j = 0; j < dc_state->stream_count; j++) { +			stream = dc_state->streams[j]; +			if (!stream) +				continue; + +			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector) +				break; + +			stream = NULL; +		} + +		if (!stream) +			continue; + +		if (stream->timing.flags.DSC != 1) { +			drm_dp_mst_atomic_enable_dsc(state, +						     aconnector->port, +						     dm_conn_state->pbn, +						     0, +						     false); +			continue; +		} + +		pbn_div = dm_mst_get_pbn_divider(stream->link); +		bpp = stream->timing.dsc_cfg.bits_per_pixel; +		clock = stream->timing.pix_clk_100hz / 10; +		pbn = drm_dp_calc_pbn_mode(clock, bpp, true); +		vcpi = drm_dp_mst_atomic_enable_dsc(state, +						    aconnector->port, +						    pbn, pbn_div, +						    true); +		if (vcpi < 0) +			return vcpi; + +		dm_conn_state->pbn = pbn; +		dm_conn_state->vcpi_slots = vcpi; +	} +	return 0; +} +#endif +  static void dm_drm_plane_reset(struct drm_plane *plane)  {  	struct dm_plane_state *amdgpu_state = NULL; @@ -5194,9 +5682,9 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,  	drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16); -	/* This defaults to the max in the range, but we want 8bpc. */ -	aconnector->base.state->max_bpc = 8; -	aconnector->base.state->max_requested_bpc = 8; +	/* This defaults to the max in the range, but we want 8bpc for non-edp. */ +	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8; +	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;  	if (connector_type == DRM_MODE_CONNECTOR_eDP &&  	    dc_is_dmcu_initialized(adev->dm.dc)) { @@ -5215,7 +5703,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,  			&aconnector->base);  #ifdef CONFIG_DRM_AMD_DC_HDCP  		if (adev->asic_type >= CHIP_RAVEN) -			drm_connector_attach_content_protection_property(&aconnector->base, false); +			drm_connector_attach_content_protection_property(&aconnector->base, true);  #endif  	}  } @@ -5324,11 +5812,12 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,  	connector_type = to_drm_connector_type(link->connector_signal); -	res = drm_connector_init( +	res = drm_connector_init_with_ddc(  			dm->ddev,  			&aconnector->base,  			&amdgpu_dm_connector_funcs, -			connector_type); +			connector_type, +			&i2c->base);  	if (res) {  		DRM_ERROR("connector_init failed\n"); @@ -5466,6 +5955,12 @@ static bool is_content_protection_different(struct drm_connector_state *state,  {  	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); +	if (old_state->hdcp_content_type != state->hdcp_content_type && +	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { +		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; +		return true; +	} +  	/* CP is being re enabled, ignore this */  	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&  	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { @@ -5494,17 +5989,6 @@ static bool is_content_protection_different(struct drm_connector_state *state,  	return false;  } -static void update_content_protection(struct drm_connector_state *state, const struct drm_connector *connector, -				      struct hdcp_workqueue *hdcp_w) -{ -	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); - -	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) -		hdcp_add_display(hdcp_w, aconnector->dc_link->link_index, aconnector); -	else if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) -		hdcp_remove_display(hdcp_w, aconnector->dc_link->link_index, aconnector->base.index); - -}  #endif  static void remove_stream(struct amdgpu_device *adev,  			  struct amdgpu_crtc *acrtc, @@ -6474,7 +6958,11 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)  		}  		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue)) -			update_content_protection(new_con_state, connector, adev->dm.hdcp_workqueue); +			hdcp_update_display( +				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector, +				new_con_state->hdcp_content_type, +				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true +													 : false);  	}  #endif @@ -7264,7 +7752,7 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,  	int i, j, num_plane, ret = 0;  	struct drm_plane_state *old_plane_state, *new_plane_state;  	struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state; -	struct drm_crtc *new_plane_crtc, *old_plane_crtc; +	struct drm_crtc *new_plane_crtc;  	struct drm_plane *plane;  	struct drm_crtc *crtc; @@ -7310,7 +7798,6 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,  			uint64_t tiling_flags;  			new_plane_crtc = new_plane_state->crtc; -			old_plane_crtc = old_plane_state->crtc;  			new_dm_plane_state = to_dm_plane_state(new_plane_state);  			old_dm_plane_state = to_dm_plane_state(old_plane_state); @@ -7411,6 +7898,29 @@ cleanup:  	return ret;  } +static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc) +{ +	struct drm_connector *connector; +	struct drm_connector_state *conn_state; +	struct amdgpu_dm_connector *aconnector = NULL; +	int i; +	for_each_new_connector_in_state(state, connector, conn_state, i) { +		if (conn_state->crtc != crtc) +			continue; + +		aconnector = to_amdgpu_dm_connector(connector); +		if (!aconnector->port || !aconnector->mst_port) +			aconnector = NULL; +		else +			break; +	} + +	if (!aconnector) +		return 0; + +	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr); +} +  /**   * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.   * @dev: The DRM device @@ -7463,6 +7973,16 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,  	if (ret)  		goto fail; +	if (adev->asic_type >= CHIP_NAVI10) { +		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { +			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) { +				ret = add_affected_mst_dsc_crtcs(state, crtc); +				if (ret) +					goto fail; +			} +		} +	} +  	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {  		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&  		    !new_crtc_state->color_mgmt_changed && @@ -7634,6 +8154,15 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,  		if (ret)  			goto fail; +#if defined(CONFIG_DRM_AMD_DC_DCN) +		if (!compute_mst_dsc_configs_for_state(state, dm_state->context)) +			goto fail; + +		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context); +		if (ret) +			goto fail; +#endif +  		if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {  			ret = -EINVAL;  			goto fail; @@ -7662,6 +8191,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,  				dc_retain_state(old_dm_state->context);  		}  	} +	/* Perform validation of MST topology in the state*/ +	ret = drm_dp_mst_atomic_check(state); +	if (ret) +		goto fail;  	/* Store the overall update type for use later in atomic check. */  	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) { @@ -7860,17 +8393,37 @@ static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)  bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)  {  	struct dc_link *link = stream->link; -	struct dc_static_screen_events triggers = {0}; +	unsigned int vsync_rate_hz = 0; +	struct dc_static_screen_params params = {0}; +	/* Calculate number of static frames before generating interrupt to +	 * enter PSR. +	 */ +	unsigned int frame_time_microsec = 1000000 / vsync_rate_hz; +	// Init fail safe of 2 frames static +	unsigned int num_frames_static = 2;  	DRM_DEBUG_DRIVER("Enabling psr...\n"); -	triggers.cursor_update = true; -	triggers.overlay_update = true; -	triggers.surface_update = true; +	vsync_rate_hz = div64_u64(div64_u64(( +			stream->timing.pix_clk_100hz * 100), +			stream->timing.v_total), +			stream->timing.h_total); + +	/* Round up +	 * Calculate number of frames such that at least 30 ms of time has +	 * passed. +	 */ +	if (vsync_rate_hz != 0) +		num_frames_static = (30000 / frame_time_microsec) + 1; + +	params.triggers.cursor_update = true; +	params.triggers.overlay_update = true; +	params.triggers.surface_update = true; +	params.num_frames = num_frames_static; -	dc_stream_set_static_screen_events(link->ctx->dc, +	dc_stream_set_static_screen_params(link->ctx->dc,  					   &stream, 1, -					   &triggers); +					   ¶ms);  	return dc_link_set_psr_allow_active(link, true, false);  } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 77c5166e6b08..7ea9acb0358d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -57,6 +57,8 @@ struct amdgpu_device;  struct drm_device;  struct amdgpu_dm_irq_handler_data;  struct dc; +struct amdgpu_bo; +struct dmub_srv;  struct common_irq_params {  	struct amdgpu_device *adev; @@ -122,6 +124,57 @@ struct amdgpu_display_manager {  	struct dc *dc;  	/** +	 * @dmub_srv: +	 * +	 * DMUB service, used for controlling the DMUB on hardware +	 * that supports it. The pointer to the dmub_srv will be +	 * NULL on hardware that does not support it. +	 */ +	struct dmub_srv *dmub_srv; + +	/** +	 * @dmub_fb_info: +	 * +	 * Framebuffer regions for the DMUB. +	 */ +	struct dmub_srv_fb_info *dmub_fb_info; + +	/** +	 * @dmub_fw: +	 * +	 * DMUB firmware, required on hardware that has DMUB support. +	 */ +	const struct firmware *dmub_fw; + +	/** +	 * @dmub_bo: +	 * +	 * Buffer object for the DMUB. +	 */ +	struct amdgpu_bo *dmub_bo; + +	/** +	 * @dmub_bo_gpu_addr: +	 * +	 * GPU virtual address for the DMUB buffer object. +	 */ +	u64 dmub_bo_gpu_addr; + +	/** +	 * @dmub_bo_cpu_addr: +	 * +	 * CPU address for the DMUB buffer object. +	 */ +	void *dmub_bo_cpu_addr; + +	/** +	 * @dmcub_fw_version: +	 * +	 * DMCUB firmware version. +	 */ +	uint32_t dmcub_fw_version; + +	/**  	 * @cgs_device:  	 *  	 * The Common Graphics Services device. It provides an interface for @@ -241,7 +294,6 @@ struct amdgpu_display_manager {  	const struct firmware *fw_dmcu;  	uint32_t dmcu_fw_version; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  	/**  	 * @soc_bounding_box:  	 * @@ -249,7 +301,6 @@ struct amdgpu_display_manager {  	 * available in FW  	 */  	const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box; -#endif  };  struct amdgpu_dm_connector { @@ -279,6 +330,7 @@ struct amdgpu_dm_connector {  	struct drm_dp_mst_port *port;  	struct amdgpu_dm_connector *mst_port;  	struct amdgpu_encoder *mst_encoder; +	struct drm_dp_aux *dsc_aux;  	/* TODO see if we can merge with ddc_bus or make a dm_connector */  	struct amdgpu_i2c_adapter *i2c; @@ -359,6 +411,8 @@ struct dm_connector_state {  	bool underscan_enable;  	bool freesync_capable;  	uint8_t abm_level; +	int vcpi_slots; +	uint64_t pbn;  };  #define to_dm_connector_state(x)\ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index bdb37e611015..f81d3439ee8c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -657,6 +657,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us  	dc_link_set_test_pattern(  		link,  		test_pattern, +		DP_TEST_PATTERN_COLOR_SPACE_RGB,  		&link_training_settings,  		custom_pattern,  		10); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index 77181ddf6c8e..ae329335dfcc 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -85,42 +85,54 @@ static void process_output(struct hdcp_workqueue *hdcp_work)  		schedule_delayed_work(&hdcp_work->watchdog_timer_dwork,  				      msecs_to_jiffies(output.watchdog_timer_delay)); +	schedule_delayed_work(&hdcp_work->property_validate_dwork, msecs_to_jiffies(0));  } -void hdcp_add_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, struct amdgpu_dm_connector *aconnector) +void hdcp_update_display(struct hdcp_workqueue *hdcp_work, +			 unsigned int link_index, +			 struct amdgpu_dm_connector *aconnector, +			 uint8_t content_type, +			 bool enable_encryption)  {  	struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];  	struct mod_hdcp_display *display = &hdcp_work[link_index].display;  	struct mod_hdcp_link *link = &hdcp_work[link_index].link; +	struct mod_hdcp_display_query query;  	mutex_lock(&hdcp_w->mutex);  	hdcp_w->aconnector = aconnector; -	mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output); - -	schedule_delayed_work(&hdcp_w->property_validate_dwork, msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS)); - -	process_output(hdcp_w); +	query.display = NULL; +	mod_hdcp_query_display(&hdcp_w->hdcp, aconnector->base.index, &query); -	mutex_unlock(&hdcp_w->mutex); +	if (query.display != NULL) { +		memcpy(display, query.display, sizeof(struct mod_hdcp_display)); +		mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output); -} +		hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0; -void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index,  unsigned int display_index) -{ -	struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; +		if (enable_encryption) { +			display->adjust.disable = 0; +			if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) +				hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0; +			else if (content_type == DRM_MODE_HDCP_CONTENT_TYPE1) +				hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_1; -	mutex_lock(&hdcp_w->mutex); +			schedule_delayed_work(&hdcp_w->property_validate_dwork, +					      msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS)); +		} else { +			display->adjust.disable = 1; +			hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; +			cancel_delayed_work(&hdcp_w->property_validate_dwork); +		} -	mod_hdcp_remove_display(&hdcp_w->hdcp, display_index, &hdcp_w->output); +		display->state = MOD_HDCP_DISPLAY_ACTIVE; +	} -	cancel_delayed_work(&hdcp_w->property_validate_dwork); -	hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; +	mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output);  	process_output(hdcp_w); -  	mutex_unlock(&hdcp_w->mutex); -  }  void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index) @@ -190,10 +202,16 @@ static void event_property_update(struct work_struct *work)  		}  	} -	if (hdcp_work->encryption_status == MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON) -		drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_ENABLED); -	else +	if (hdcp_work->encryption_status != MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF) { +		if (aconnector->base.state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE0 && +		    hdcp_work->encryption_status <= MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON) +			drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_ENABLED); +		else if (aconnector->base.state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE1 && +			 hdcp_work->encryption_status == MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON) +			drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_ENABLED); +	} else {  		drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_DESIRED); +	}  	mutex_unlock(&hdcp_work->mutex); @@ -207,6 +225,9 @@ static void event_property_validate(struct work_struct *work)  	struct mod_hdcp_display_query query;  	struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector; +	if (!aconnector) +		return; +  	mutex_lock(&hdcp_work->mutex);  	query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; @@ -217,8 +238,6 @@ static void event_property_validate(struct work_struct *work)  		schedule_work(&hdcp_work->property_update_work);  	} -	schedule_delayed_work(&hdcp_work->property_validate_dwork, msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS)); -  	mutex_unlock(&hdcp_work->mutex);  } @@ -294,8 +313,10 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)  	link->dig_be = config->link_enc_inst;  	link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1;  	link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw; -	link->adjust.hdcp2.disable = 1; +	display->adjust.disable = 1; +	link->adjust.auth_delay = 2; +	hdcp_update_display(hdcp_work, link_index, aconnector, DRM_MODE_HDCP_CONTENT_TYPE0, false);  }  struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h index d3ba505d0696..6abde86bce4a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h @@ -54,9 +54,12 @@ struct hdcp_workqueue {  	uint8_t max_link;  }; -void hdcp_add_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, -		      struct amdgpu_dm_connector *aconnector); -void hdcp_remove_display(struct hdcp_workqueue *work, unsigned int link_index, unsigned int display_index); +void hdcp_update_display(struct hdcp_workqueue *hdcp_work, +			 unsigned int link_index, +			 struct amdgpu_dm_connector *aconnector, +			 uint8_t content_type, +			 bool enable_encryption); +  void hdcp_reset_display(struct hdcp_workqueue *work, unsigned int link_index);  void hdcp_handle_cpirq(struct hdcp_workqueue *work, unsigned int link_index);  void hdcp_destroy(struct hdcp_workqueue *work); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 11e5784aa62a..069b7a6f5597 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -37,6 +37,7 @@  #include "dc.h"  #include "amdgpu_dm.h"  #include "amdgpu_dm_irq.h" +#include "amdgpu_dm_mst_types.h"  #include "dm_helpers.h" @@ -97,8 +98,6 @@ enum dc_edid_status dm_helpers_parse_edid_caps(  			(struct edid *) edid->raw_edid);  	sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads); -	if (sad_count < 0) -		DRM_ERROR("Couldn't read SADs: %d\n", sad_count);  	if (sad_count <= 0)  		return result; @@ -182,19 +181,22 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(  		bool enable)  {  	struct amdgpu_dm_connector *aconnector; +	struct dm_connector_state *dm_conn_state;  	struct drm_dp_mst_topology_mgr *mst_mgr;  	struct drm_dp_mst_port *mst_port; -	int slots = 0;  	bool ret; -	int clock; -	int bpp = 0; -	int pbn = 0;  	aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; +	/* Accessing the connector state is required for vcpi_slots allocation +	 * and directly relies on behaviour in commit check +	 * that blocks before commit guaranteeing that the state +	 * is not gonna be swapped while still in use in commit tail */  	if (!aconnector || !aconnector->mst_port)  		return false; +	dm_conn_state = to_dm_connector_state(aconnector->base.state); +  	mst_mgr = &aconnector->mst_port->mst_mgr;  	if (!mst_mgr->mst_state) @@ -203,42 +205,10 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(  	mst_port = aconnector->port;  	if (enable) { -		clock = stream->timing.pix_clk_100hz / 10; - -		switch (stream->timing.display_color_depth) { - -		case COLOR_DEPTH_666: -			bpp = 6; -			break; -		case COLOR_DEPTH_888: -			bpp = 8; -			break; -		case COLOR_DEPTH_101010: -			bpp = 10; -			break; -		case COLOR_DEPTH_121212: -			bpp = 12; -			break; -		case COLOR_DEPTH_141414: -			bpp = 14; -			break; -		case COLOR_DEPTH_161616: -			bpp = 16; -			break; -		default: -			ASSERT(bpp != 0); -			break; -		} - -		bpp = bpp * 3; - -		/* TODO need to know link rate */ - -		pbn = drm_dp_calc_pbn_mode(clock, bpp); - -		slots = drm_dp_find_vcpi_slots(mst_mgr, pbn); -		ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port, pbn, slots); +		ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port, +					       dm_conn_state->pbn, +					       dm_conn_state->vcpi_slots);  		if (!ret)  			return false; @@ -540,7 +510,6 @@ bool dm_helpers_submit_i2c(  	return result;  } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  bool dm_helpers_dp_write_dsc_enable(  		struct dc_context *ctx,  		const struct dc_stream_state *stream, @@ -548,10 +517,25 @@ bool dm_helpers_dp_write_dsc_enable(  )  {  	uint8_t enable_dsc = enable ? 1 : 0; +	struct amdgpu_dm_connector *aconnector; + +	if (!stream) +		return false; + +	if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { +		aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; + +		if (!aconnector->dsc_aux) +			return false; + +		return (drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1) >= 0); +	} + +	if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT) +		return dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); -	return dm_helpers_dp_write_dpcd(ctx, stream->sink->link, DP_DSC_ENABLE, &enable_dsc, 1); +	return false;  } -#endif  bool dm_helpers_is_dp_sink_present(struct dc_link *link)  { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index 64445c4cc4c2..cbcf504f73a5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -111,17 +111,12 @@ static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd,   */  static void dm_irq_work_func(struct work_struct *work)  { -	struct list_head *entry;  	struct irq_list_head *irq_list_head =  		container_of(work, struct irq_list_head, work);  	struct list_head *handler_list = &irq_list_head->head;  	struct amdgpu_dm_irq_handler_data *handler_data; -	list_for_each(entry, handler_list) { -		handler_data = list_entry(entry, -					  struct amdgpu_dm_irq_handler_data, -					  list); - +	list_for_each_entry(handler_data, handler_list, list) {  		DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n",  				handler_data->irq_source); @@ -528,19 +523,13 @@ static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,  					 enum dc_irq_source irq_source)  {  	struct amdgpu_dm_irq_handler_data *handler_data; -	struct list_head *entry;  	unsigned long irq_table_flags;  	DM_IRQ_TABLE_LOCK(adev, irq_table_flags); -	list_for_each( -		entry, -		&adev->dm.irq_handler_list_high_tab[irq_source]) { - -		handler_data = list_entry(entry, -					  struct amdgpu_dm_irq_handler_data, -					  list); - +	list_for_each_entry(handler_data, +			    &adev->dm.irq_handler_list_high_tab[irq_source], +			    list) {  		/* Call a subcomponent which registered for immediate  		 * interrupt notification */  		handler_data->handler(handler_data->handler_arg); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 2bf8534c18fb..96b391e4b3e7 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -25,6 +25,7 @@  #include <linux/version.h>  #include <drm/drm_atomic_helper.h> +#include <drm/drm_dp_mst_helper.h>  #include "dm_services.h"  #include "amdgpu.h"  #include "amdgpu_dm.h" @@ -39,6 +40,12 @@  #if defined(CONFIG_DEBUG_FS)  #include "amdgpu_dm_debugfs.h"  #endif + + +#if defined(CONFIG_DRM_AMD_DC_DCN) +#include "dc/dcn20/dcn20_resource.h" +#endif +  /* #define TRACE_DPCD */  #ifdef TRACE_DPCD @@ -180,6 +187,30 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {  	.early_unregister = amdgpu_dm_mst_connector_early_unregister,  }; +#if defined(CONFIG_DRM_AMD_DC_DCN) +static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector) +{ +	struct dc_sink *dc_sink = aconnector->dc_sink; +	struct drm_dp_mst_port *port = aconnector->port; +	u8 dsc_caps[16] = { 0 }; + +	aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port); + +	if (!aconnector->dsc_aux) +		return false; + +	if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DSC_SUPPORT, dsc_caps, 16) < 0) +		return false; + +	if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, +				   dsc_caps, NULL, +				   &dc_sink->sink_dsc_caps.dsc_dec_caps)) +		return false; + +	return true; +} +#endif +  static int dm_dp_mst_get_modes(struct drm_connector *connector)  {  	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); @@ -222,10 +253,16 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)  		/* dc_link_add_remote_sink returns a new reference */  		aconnector->dc_sink = dc_sink; -		if (aconnector->dc_sink) +		if (aconnector->dc_sink) {  			amdgpu_dm_update_freesync_caps(  					connector, aconnector->edid); +#if defined(CONFIG_DRM_AMD_DC_DCN) +			if (!validate_dsc_caps_on_connector(aconnector)) +				memset(&aconnector->dc_sink->sink_dsc_caps, +				       0, sizeof(aconnector->dc_sink->sink_dsc_caps)); +#endif +		}  	}  	drm_connector_update_edid_property( @@ -254,11 +291,43 @@ dm_dp_mst_detect(struct drm_connector *connector,  				      aconnector->port);  } +static int dm_dp_mst_atomic_check(struct drm_connector *connector, +				struct drm_atomic_state *state) +{ +	struct drm_connector_state *new_conn_state = +			drm_atomic_get_new_connector_state(state, connector); +	struct drm_connector_state *old_conn_state = +			drm_atomic_get_old_connector_state(state, connector); +	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); +	struct drm_crtc_state *new_crtc_state; +	struct drm_dp_mst_topology_mgr *mst_mgr; +	struct drm_dp_mst_port *mst_port; + +	mst_port = aconnector->port; +	mst_mgr = &aconnector->mst_port->mst_mgr; + +	if (!old_conn_state->crtc) +		return 0; + +	if (new_conn_state->crtc) { +		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); +		if (!new_crtc_state || +		    !drm_atomic_crtc_needs_modeset(new_crtc_state) || +		    new_crtc_state->enable) +			return 0; +		} + +	return drm_dp_atomic_release_vcpi_slots(state, +						mst_mgr, +						mst_port); +} +  static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = {  	.get_modes = dm_dp_mst_get_modes,  	.mode_valid = amdgpu_dm_connector_mode_valid,  	.atomic_best_encoder = dm_mst_atomic_best_encoder,  	.detect_ctx = dm_dp_mst_detect, +	.atomic_check = dm_dp_mst_atomic_check,  };  static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) @@ -434,3 +503,384 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,  		aconnector->connector_id);  } +int dm_mst_get_pbn_divider(struct dc_link *link) +{ +	if (!link) +		return 0; + +	return dc_link_bandwidth_kbps(link, +			dc_link_get_link_cap(link)) / (8 * 1000 * 54); +} + +#if defined(CONFIG_DRM_AMD_DC_DCN) + +struct dsc_mst_fairness_params { +	struct dc_crtc_timing *timing; +	struct dc_sink *sink; +	struct dc_dsc_bw_range bw_range; +	bool compression_possible; +	struct drm_dp_mst_port *port; +}; + +struct dsc_mst_fairness_vars { +	int pbn; +	bool dsc_enabled; +	int bpp_x16; +}; + +static int kbps_to_peak_pbn(int kbps) +{ +	u64 peak_kbps = kbps; + +	peak_kbps *= 1006; +	peak_kbps = div_u64(peak_kbps, 1000); +	return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000)); +} + +static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params, +		struct dsc_mst_fairness_vars *vars, +		int count) +{ +	int i; + +	for (i = 0; i < count; i++) { +		memset(¶ms[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg)); +		if (vars[i].dsc_enabled && dc_dsc_compute_config( +					params[i].sink->ctx->dc->res_pool->dscs[0], +					¶ms[i].sink->sink_dsc_caps.dsc_dec_caps, +					params[i].sink->ctx->dc->debug.dsc_min_slice_height_override, +					0, +					params[i].timing, +					¶ms[i].timing->dsc_cfg)) { +			params[i].timing->flags.DSC = 1; +			params[i].timing->dsc_cfg.bits_per_pixel = vars[i].bpp_x16; +		} else { +			params[i].timing->flags.DSC = 0; +		} +	} +} + +static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn) +{ +	struct dc_dsc_config dsc_config; +	u64 kbps; + +	kbps = div_u64((u64)pbn * 994 * 8 * 54, 64); +	dc_dsc_compute_config( +			param.sink->ctx->dc->res_pool->dscs[0], +			¶m.sink->sink_dsc_caps.dsc_dec_caps, +			param.sink->ctx->dc->debug.dsc_min_slice_height_override, +			(int) kbps, param.timing, &dsc_config); + +	return dsc_config.bits_per_pixel; +} + +static void increase_dsc_bpp(struct drm_atomic_state *state, +			     struct dc_link *dc_link, +			     struct dsc_mst_fairness_params *params, +			     struct dsc_mst_fairness_vars *vars, +			     int count) +{ +	int i; +	bool bpp_increased[MAX_PIPES]; +	int initial_slack[MAX_PIPES]; +	int min_initial_slack; +	int next_index; +	int remaining_to_increase = 0; +	int pbn_per_timeslot; +	int link_timeslots_used; +	int fair_pbn_alloc; + +	for (i = 0; i < count; i++) { +		if (vars[i].dsc_enabled) { +			initial_slack[i] = kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i].pbn; +			bpp_increased[i] = false; +			remaining_to_increase += 1; +		} else { +			initial_slack[i] = 0; +			bpp_increased[i] = true; +		} +	} + +	pbn_per_timeslot = dc_link_bandwidth_kbps(dc_link, +			dc_link_get_link_cap(dc_link)) / (8 * 1000 * 54); + +	while (remaining_to_increase) { +		next_index = -1; +		min_initial_slack = -1; +		for (i = 0; i < count; i++) { +			if (!bpp_increased[i]) { +				if (min_initial_slack == -1 || min_initial_slack > initial_slack[i]) { +					min_initial_slack = initial_slack[i]; +					next_index = i; +				} +			} +		} + +		if (next_index == -1) +			break; + +		link_timeslots_used = 0; + +		for (i = 0; i < count; i++) +			link_timeslots_used += DIV_ROUND_UP(vars[i].pbn, pbn_per_timeslot); + +		fair_pbn_alloc = (63 - link_timeslots_used) / remaining_to_increase * pbn_per_timeslot; + +		if (initial_slack[next_index] > fair_pbn_alloc) { +			vars[next_index].pbn += fair_pbn_alloc; +			if (drm_dp_atomic_find_vcpi_slots(state, +							  params[next_index].port->mgr, +							  params[next_index].port, +							  vars[next_index].pbn,\ +							  dm_mst_get_pbn_divider(dc_link)) < 0) +				return; +			if (!drm_dp_mst_atomic_check(state)) { +				vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn); +			} else { +				vars[next_index].pbn -= fair_pbn_alloc; +				if (drm_dp_atomic_find_vcpi_slots(state, +								  params[next_index].port->mgr, +								  params[next_index].port, +								  vars[next_index].pbn, +								  dm_mst_get_pbn_divider(dc_link)) < 0) +					return; +			} +		} else { +			vars[next_index].pbn += initial_slack[next_index]; +			if (drm_dp_atomic_find_vcpi_slots(state, +							  params[next_index].port->mgr, +							  params[next_index].port, +							  vars[next_index].pbn, +							  dm_mst_get_pbn_divider(dc_link)) < 0) +				return; +			if (!drm_dp_mst_atomic_check(state)) { +				vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16; +			} else { +				vars[next_index].pbn -= initial_slack[next_index]; +				if (drm_dp_atomic_find_vcpi_slots(state, +								  params[next_index].port->mgr, +								  params[next_index].port, +								  vars[next_index].pbn, +								  dm_mst_get_pbn_divider(dc_link)) < 0) +					return; +			} +		} + +		bpp_increased[next_index] = true; +		remaining_to_increase--; +	} +} + +static void try_disable_dsc(struct drm_atomic_state *state, +			    struct dc_link *dc_link, +			    struct dsc_mst_fairness_params *params, +			    struct dsc_mst_fairness_vars *vars, +			    int count) +{ +	int i; +	bool tried[MAX_PIPES]; +	int kbps_increase[MAX_PIPES]; +	int max_kbps_increase; +	int next_index; +	int remaining_to_try = 0; + +	for (i = 0; i < count; i++) { +		if (vars[i].dsc_enabled && vars[i].bpp_x16 == params[i].bw_range.max_target_bpp_x16) { +			kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps; +			tried[i] = false; +			remaining_to_try += 1; +		} else { +			kbps_increase[i] = 0; +			tried[i] = true; +		} +	} + +	while (remaining_to_try) { +		next_index = -1; +		max_kbps_increase = -1; +		for (i = 0; i < count; i++) { +			if (!tried[i]) { +				if (max_kbps_increase == -1 || max_kbps_increase < kbps_increase[i]) { +					max_kbps_increase = kbps_increase[i]; +					next_index = i; +				} +			} +		} + +		if (next_index == -1) +			break; + +		vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps); +		if (drm_dp_atomic_find_vcpi_slots(state, +						  params[next_index].port->mgr, +						  params[next_index].port, +						  vars[next_index].pbn, +						  0) < 0) +			return; + +		if (!drm_dp_mst_atomic_check(state)) { +			vars[next_index].dsc_enabled = false; +			vars[next_index].bpp_x16 = 0; +		} else { +			vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps); +			if (drm_dp_atomic_find_vcpi_slots(state, +							  params[next_index].port->mgr, +							  params[next_index].port, +							  vars[next_index].pbn, +							  dm_mst_get_pbn_divider(dc_link)) < 0) +				return; +		} + +		tried[next_index] = true; +		remaining_to_try--; +	} +} + +static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, +					     struct dc_state *dc_state, +					     struct dc_link *dc_link) +{ +	int i; +	struct dc_stream_state *stream; +	struct dsc_mst_fairness_params params[MAX_PIPES]; +	struct dsc_mst_fairness_vars vars[MAX_PIPES]; +	struct amdgpu_dm_connector *aconnector; +	int count = 0; + +	memset(params, 0, sizeof(params)); + +	/* Set up params */ +	for (i = 0; i < dc_state->stream_count; i++) { +		struct dc_dsc_policy dsc_policy = {0}; + +		stream = dc_state->streams[i]; + +		if (stream->link != dc_link) +			continue; + +		stream->timing.flags.DSC = 0; + +		params[count].timing = &stream->timing; +		params[count].sink = stream->sink; +		aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; +		params[count].port = aconnector->port; +		params[count].compression_possible = stream->sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported; +		dc_dsc_get_policy_for_timing(params[count].timing, &dsc_policy); +		if (!dc_dsc_compute_bandwidth_range( +				stream->sink->ctx->dc->res_pool->dscs[0], +				stream->sink->ctx->dc->debug.dsc_min_slice_height_override, +				dsc_policy.min_target_bpp, +				dsc_policy.max_target_bpp, +				&stream->sink->sink_dsc_caps.dsc_dec_caps, +				&stream->timing, ¶ms[count].bw_range)) +			params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing); + +		count++; +	} +	/* Try no compression */ +	for (i = 0; i < count; i++) { +		vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); +		vars[i].dsc_enabled = false; +		vars[i].bpp_x16 = 0; +		if (drm_dp_atomic_find_vcpi_slots(state, +						 params[i].port->mgr, +						 params[i].port, +						 vars[i].pbn, +						 0) < 0) +			return false; +	} +	if (!drm_dp_mst_atomic_check(state)) { +		set_dsc_configs_from_fairness_vars(params, vars, count); +		return true; +	} + +	/* Try max compression */ +	for (i = 0; i < count; i++) { +		if (params[i].compression_possible) { +			vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps); +			vars[i].dsc_enabled = true; +			vars[i].bpp_x16 = params[i].bw_range.min_target_bpp_x16; +			if (drm_dp_atomic_find_vcpi_slots(state, +							  params[i].port->mgr, +							  params[i].port, +							  vars[i].pbn, +							  dm_mst_get_pbn_divider(dc_link)) < 0) +				return false; +		} else { +			vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); +			vars[i].dsc_enabled = false; +			vars[i].bpp_x16 = 0; +			if (drm_dp_atomic_find_vcpi_slots(state, +							  params[i].port->mgr, +							  params[i].port, +							  vars[i].pbn, +							  0) < 0) +				return false; +		} +	} +	if (drm_dp_mst_atomic_check(state)) +		return false; + +	/* Optimize degree of compression */ +	increase_dsc_bpp(state, dc_link, params, vars, count); + +	try_disable_dsc(state, dc_link, params, vars, count); + +	set_dsc_configs_from_fairness_vars(params, vars, count); + +	return true; +} + +bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, +				       struct dc_state *dc_state) +{ +	int i, j; +	struct dc_stream_state *stream; +	bool computed_streams[MAX_PIPES]; +	struct amdgpu_dm_connector *aconnector; + +	for (i = 0; i < dc_state->stream_count; i++) +		computed_streams[i] = false; + +	for (i = 0; i < dc_state->stream_count; i++) { +		stream = dc_state->streams[i]; + +		if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST) +			continue; + +		aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; + +		if (!aconnector || !aconnector->dc_sink) +			continue; + +		if (!aconnector->dc_sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported) +			continue; + +		if (computed_streams[i]) +			continue; + +		mutex_lock(&aconnector->mst_mgr.lock); +		if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) { +			mutex_unlock(&aconnector->mst_mgr.lock); +			return false; +		} +		mutex_unlock(&aconnector->mst_mgr.lock); + +		for (j = 0; j < dc_state->stream_count; j++) { +			if (dc_state->streams[j]->link == stream->link) +				computed_streams[j] = true; +		} +	} + +	for (i = 0; i < dc_state->stream_count; i++) { +		stream = dc_state->streams[i]; + +		if (stream->timing.flags.DSC == 1) +			dcn20_add_dsc_to_stream_resource(stream->ctx->dc, dc_state, stream); +	} + +	return true; +} + +#endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h index 2da851b40042..d6813ce67bbd 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h @@ -29,7 +29,14 @@  struct amdgpu_display_manager;  struct amdgpu_dm_connector; +int dm_mst_get_pbn_divider(struct dc_link *link); +  void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,  				       struct amdgpu_dm_connector *aconnector); +#if defined(CONFIG_DRM_AMD_DC_DCN) +bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, +				       struct dc_state *dc_state); +#endif +  #endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index 778f186b3a05..a2e1a73f66b8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -892,7 +892,6 @@ enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,  	return PP_SMU_RESULT_FAIL;  } -#ifdef CONFIG_DRM_AMD_DC_DCN2_1  enum pp_smu_status pp_rn_get_dpm_clock_table(  		struct pp_smu *pp, struct dpm_clocks *clock_table)  { @@ -974,7 +973,6 @@ enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp,  	return PP_SMU_RESULT_OK;  } -#endif  void dm_pp_get_funcs(  		struct dc_context *ctx, @@ -996,7 +994,6 @@ void dm_pp_get_funcs(  		funcs->rv_funcs.set_hard_min_fclk_by_freq =  				pp_rv_set_hard_min_fclk_by_freq;  		break; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  	case DCN_VERSION_2_0:  		funcs->ctx.ver = PP_SMU_VER_NV;  		funcs->nv_funcs.pp_smu.dm = ctx; @@ -1019,16 +1016,13 @@ void dm_pp_get_funcs(  		funcs->nv_funcs.get_uclk_dpm_states = pp_nv_get_uclk_dpm_states;  		funcs->nv_funcs.set_pstate_handshake_support = pp_nv_set_pstate_handshake_support;  		break; -#endif -#ifdef CONFIG_DRM_AMD_DC_DCN2_1  	case DCN_VERSION_2_1:  		funcs->ctx.ver = PP_SMU_VER_RN;  		funcs->rn_funcs.pp_smu.dm = ctx;  		funcs->rn_funcs.set_wm_ranges = pp_rn_set_wm_ranges;  		funcs->rn_funcs.get_dpm_clock_table = pp_rn_get_dpm_clock_table;  		break; -#endif  	default:  		DRM_ERROR("smu version is not supported !\n");  		break; diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index a160512a2f04..6e3dddc73246 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile @@ -25,19 +25,10 @@  DC_LIBS = basics bios calcs clk_mgr dce gpio irq virtual -ifdef CONFIG_DRM_AMD_DC_DCN2_0 +ifdef CONFIG_DRM_AMD_DC_DCN  DC_LIBS += dcn20 -endif - - -ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  DC_LIBS += dsc -endif - -ifdef CONFIG_DRM_AMD_DC_DCN1_0  DC_LIBS += dcn10 dml -endif -ifdef CONFIG_DRM_AMD_DC_DCN2_1  DC_LIBS += dcn21  endif @@ -59,7 +50,7 @@ include $(AMD_DC)  DISPLAY_CORE = dc.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \  dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o -ifdef CONFIG_DRM_AMD_DC_DCN2_0 +ifdef CONFIG_DRM_AMD_DC_DCN  DISPLAY_CORE += dc_vm_helper.o  endif @@ -70,5 +61,6 @@ AMD_DM_REG_UPDATE = $(addprefix $(AMDDALPATH)/dc/,dc_helper.o)  AMD_DISPLAY_FILES += $(AMD_DISPLAY_CORE)  AMD_DISPLAY_FILES += $(AMD_DM_REG_UPDATE) - - +DC_DMUB += dc_dmub_srv.o +AMD_DISPLAY_DMUB = $(addprefix $(AMDDALPATH)/dc/,$(DC_DMUB)) +AMD_DISPLAY_FILES += $(AMD_DISPLAY_DMUB) diff --git a/drivers/gpu/drm/amd/display/dc/basics/Makefile b/drivers/gpu/drm/amd/display/dc/basics/Makefile index a50a76471107..7ad0cad0f4ef 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/Makefile +++ b/drivers/gpu/drm/amd/display/dc/basics/Makefile @@ -25,7 +25,7 @@  # subcomponents.  BASICS = conversion.o fixpt31_32.o \ -	log_helpers.o vector.o +	log_helpers.o vector.o dc_common.o  AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS)) diff --git a/drivers/gpu/drm/amd/display/dc/basics/dc_common.c b/drivers/gpu/drm/amd/display/dc/basics/dc_common.c new file mode 100644 index 000000000000..b2fc4f8e6482 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/basics/dc_common.c @@ -0,0 +1,101 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "core_types.h" +#include "dc_common.h" +#include "basics/conversion.h" + +bool is_rgb_cspace(enum dc_color_space output_color_space) +{ +	switch (output_color_space) { +	case COLOR_SPACE_SRGB: +	case COLOR_SPACE_SRGB_LIMITED: +	case COLOR_SPACE_2020_RGB_FULLRANGE: +	case COLOR_SPACE_2020_RGB_LIMITEDRANGE: +	case COLOR_SPACE_ADOBERGB: +		return true; +	case COLOR_SPACE_YCBCR601: +	case COLOR_SPACE_YCBCR709: +	case COLOR_SPACE_YCBCR601_LIMITED: +	case COLOR_SPACE_YCBCR709_LIMITED: +	case COLOR_SPACE_2020_YCBCR: +		return false; +	default: +		/* Add a case to switch */ +		BREAK_TO_DEBUGGER(); +		return false; +	} +} + +bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx) +{ +	if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible) +		return true; +	if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe)) +		return true; +	return false; +} + +bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx) +{ +	if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible) +		return true; +	if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe)) +		return true; +	return false; +} + +bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx) +{ +	if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible) +		return true; +	if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe)) +		return true; +	if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe)) +		return true; +	return false; +} + +void build_prescale_params(struct  dc_bias_and_scale *bias_and_scale, +		const struct dc_plane_state *plane_state) +{ +	if (plane_state->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN +			&& plane_state->format != SURFACE_PIXEL_FORMAT_INVALID +			&& plane_state->input_csc_color_matrix.enable_adjustment +			&& plane_state->coeff_reduction_factor.value != 0) { +		bias_and_scale->scale_blue = fixed_point_to_int_frac( +			dc_fixpt_mul(plane_state->coeff_reduction_factor, +					dc_fixpt_from_fraction(256, 255)), +				2, +				13); +		bias_and_scale->scale_red = bias_and_scale->scale_blue; +		bias_and_scale->scale_green = bias_and_scale->scale_blue; +	} else { +		bias_and_scale->scale_blue = 0x2000; +		bias_and_scale->scale_red = 0x2000; +		bias_and_scale->scale_green = 0x2000; +	} +} + diff --git a/drivers/gpu/drm/amd/display/dc/basics/dc_common.h b/drivers/gpu/drm/amd/display/dc/basics/dc_common.h new file mode 100644 index 000000000000..7c0cbf47e8ce --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/basics/dc_common.h @@ -0,0 +1,42 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DAL_DC_COMMON_H__ +#define __DAL_DC_COMMON_H__ + +#include "core_types.h" + +bool is_rgb_cspace(enum dc_color_space output_color_space); + +bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx); + +bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx); + +bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx); + +void build_prescale_params(struct  dc_bias_and_scale *bias_and_scale, +		const struct dc_plane_state *plane_state); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index 823843cd2613..008d4d11339d 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -111,7 +111,7 @@ struct dc_bios *bios_parser_create(  	return NULL;  } -static void destruct(struct bios_parser *bp) +static void bios_parser_destruct(struct bios_parser *bp)  {  	kfree(bp->base.bios_local_image);  	kfree(bp->base.integrated_info); @@ -126,7 +126,7 @@ static void bios_parser_destroy(struct dc_bios **dcb)  		return;  	} -	destruct(bp); +	bios_parser_destruct(bp);  	kfree(bp);  	*dcb = NULL; @@ -2189,7 +2189,7 @@ static uint32_t get_support_mask_for_device_id(struct device_id device_id)  		break;  	default:  		break; -	}; +	}  	/* Unidentified device ID, return empty support mask. */  	return 0; @@ -2739,7 +2739,6 @@ static enum bp_result bios_get_board_layout_info(  	struct board_layout_info *board_layout_info)  {  	unsigned int i; -	struct bios_parser *bp;  	enum bp_result record_result;  	const unsigned int slot_index_to_vbios_id[MAX_BOARD_SLOTS] = { @@ -2748,7 +2747,6 @@ static enum bp_result bios_get_board_layout_info(  		0, 0  	}; -	bp = BP_FROM_DCB(dcb);  	if (board_layout_info == NULL) {  		DC_LOG_DETECTION_EDID_PARSER("Invalid board_layout_info\n");  		return BP_RESULT_BADINPUT; diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 7873abea4112..2f1c9584ac32 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -111,7 +111,7 @@ static struct atom_encoder_caps_record *get_encoder_cap_record(  #define DATA_TABLES(table) (bp->master_data_tbl->listOfdatatables.table) -static void destruct(struct bios_parser *bp) +static void bios_parser2_destruct(struct bios_parser *bp)  {  	kfree(bp->base.bios_local_image);  	kfree(bp->base.integrated_info); @@ -126,7 +126,7 @@ static void firmware_parser_destroy(struct dc_bios **dcb)  		return;  	} -	destruct(bp); +	bios_parser2_destruct(bp);  	kfree(bp);  	*dcb = NULL; @@ -294,11 +294,21 @@ static enum bp_result bios_parser_get_i2c_info(struct dc_bios *dcb,  	struct atom_display_object_path_v2 *object;  	struct atom_common_record_header *header;  	struct atom_i2c_record *record; +	struct atom_i2c_record dummy_record = {0};  	struct bios_parser *bp = BP_FROM_DCB(dcb);  	if (!info)  		return BP_RESULT_BADINPUT; +	if (id.type == OBJECT_TYPE_GENERIC) { +		dummy_record.i2c_id = id.id; + +		if (get_gpio_i2c_info(bp, &dummy_record, info) == BP_RESULT_OK) +			return BP_RESULT_OK; +		else +			return BP_RESULT_NORECORD; +	} +  	object = get_bios_object(bp, id);  	if (!object) @@ -341,6 +351,7 @@ static enum bp_result get_gpio_i2c_info(  	struct atom_gpio_pin_lut_v2_1 *header;  	uint32_t count = 0;  	unsigned int table_index = 0; +	bool find_valid = false;  	if (!info)  		return BP_RESULT_BADINPUT; @@ -368,33 +379,28 @@ static enum bp_result get_gpio_i2c_info(  			- sizeof(struct atom_common_table_header))  				/ sizeof(struct atom_gpio_pin_assignment); -	table_index = record->i2c_id  & I2C_HW_LANE_MUX; - -	if (count < table_index) { -		bool find_valid = false; - -		for (table_index = 0; table_index < count; table_index++) { -			if (((record->i2c_id & I2C_HW_CAP) == ( -			header->gpio_pin[table_index].gpio_id & -							I2C_HW_CAP)) && -			((record->i2c_id & I2C_HW_ENGINE_ID_MASK)  == -			(header->gpio_pin[table_index].gpio_id & -						I2C_HW_ENGINE_ID_MASK)) && -			((record->i2c_id & I2C_HW_LANE_MUX) == -			(header->gpio_pin[table_index].gpio_id & -							I2C_HW_LANE_MUX))) { -				/* still valid */ -				find_valid = true; -				break; -			} +	for (table_index = 0; table_index < count; table_index++) { +		if (((record->i2c_id & I2C_HW_CAP) == ( +		header->gpio_pin[table_index].gpio_id & +						I2C_HW_CAP)) && +		((record->i2c_id & I2C_HW_ENGINE_ID_MASK)  == +		(header->gpio_pin[table_index].gpio_id & +					I2C_HW_ENGINE_ID_MASK)) && +		((record->i2c_id & I2C_HW_LANE_MUX) == +		(header->gpio_pin[table_index].gpio_id & +						I2C_HW_LANE_MUX))) { +			/* still valid */ +			find_valid = true; +			break;  		} -		/* If we don't find the entry that we are looking for then -		 *  we will return BP_Result_BadBiosTable. -		 */ -		if (find_valid == false) -			return BP_RESULT_BADBIOSTABLE;  	} +	/* If we don't find the entry that we are looking for then +	 *  we will return BP_Result_BadBiosTable. +	 */ +	if (find_valid == false) +		return BP_RESULT_BADBIOSTABLE; +  	/* get the GPIO_I2C_INFO */  	info->i2c_hw_assist = (record->i2c_id & I2C_HW_CAP) ? true : false;  	info->i2c_line = record->i2c_id & I2C_HW_LANE_MUX; @@ -828,6 +834,7 @@ static enum bp_result bios_parser_get_spread_spectrum_info(  		case 1:  			return get_ss_info_v4_1(bp, signal, index, ss_info);  		case 2: +		case 3:  			return get_ss_info_v4_2(bp, signal, index, ss_info);  		default:  			break; @@ -986,7 +993,7 @@ static uint32_t get_support_mask_for_device_id(struct device_id device_id)  		break;  	default:  		break; -	}; +	}  	/* Unidentified device ID, return empty support mask. */  	return 0; @@ -1205,6 +1212,8 @@ static enum bp_result get_firmware_info_v3_1(  				bp->cmd_tbl.get_smu_clock_info(bp, SMU9_SYSPLL0_ID) * 10;  	} +	info->oem_i2c_present = false; +  	return BP_RESULT_OK;  } @@ -1283,6 +1292,13 @@ static enum bp_result get_firmware_info_v3_2(  					bp->cmd_tbl.get_smu_clock_info(bp, SMU11_SYSPLL3_0_ID) * 10;  	} +	if (firmware_info->board_i2c_feature_id == 0x2) { +		info->oem_i2c_present = true; +		info->oem_i2c_obj_id = firmware_info->board_i2c_feature_gpio_id; +	} else { +		info->oem_i2c_present = false; +	} +  	return BP_RESULT_OK;  } @@ -1402,10 +1418,8 @@ static enum bp_result get_integrated_info_v11(  	info->ma_channel_number = info_v11->umachannelnumber;  	info->lvds_ss_percentage =  	le16_to_cpu(info_v11->lvds_ss_percentage); -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  	info->dp_ss_control =  	le16_to_cpu(info_v11->reserved1); -#endif  	info->lvds_sspread_rate_in_10hz =  	le16_to_cpu(info_v11->lvds_ss_rate_10hz);  	info->hdmi_ss_percentage = @@ -1625,6 +1639,7 @@ static enum bp_result construct_integrated_info(  		/* Don't need to check major revision as they are all 1 */  		switch (revision.minor) {  		case 11: +		case 12:  			result = get_integrated_info_v11(bp, info);  			break;  		default: @@ -1825,7 +1840,6 @@ static enum bp_result bios_get_board_layout_info(  	struct board_layout_info *board_layout_info)  {  	unsigned int i; -	struct bios_parser *bp;  	enum bp_result record_result;  	const unsigned int slot_index_to_vbios_id[MAX_BOARD_SLOTS] = { @@ -1834,7 +1848,6 @@ static enum bp_result bios_get_board_layout_info(  		0, 0  	}; -	bp = BP_FROM_DCB(dcb);  	if (board_layout_info == NULL) {  		DC_LOG_DETECTION_EDID_PARSER("Invalid board_layout_info\n");  		return BP_RESULT_BADINPUT; @@ -1914,7 +1927,7 @@ static const struct dc_vbios_funcs vbios_funcs = {  	.get_board_layout_info = bios_get_board_layout_info,  }; -static bool bios_parser_construct( +static bool bios_parser2_construct(  	struct bios_parser *bp,  	struct bp_init_data *init,  	enum dce_version dce_version) @@ -2007,7 +2020,7 @@ struct dc_bios *firmware_parser_create(  	if (!bp)  		return NULL; -	if (bios_parser_construct(bp, init, dce_version)) +	if (bios_parser2_construct(bp, init, dce_version))  		return &bp->base;  	kfree(bp); diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c index bb2e8105e6ab..2cb7a4288cb7 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c @@ -37,6 +37,8 @@  #include "bios_parser_types_internal2.h"  #include "amdgpu.h" +#include "dc_dmub_srv.h" +#include "dc.h"  #define DC_LOGGER \  	bp->base.ctx->logger @@ -103,6 +105,21 @@ static void init_dig_encoder_control(struct bios_parser *bp)  	}  } +static void encoder_control_dmcub( +		struct dc_dmub_srv *dmcub, +		struct dig_encoder_stream_setup_parameters_v1_5 *dig) +{ +	struct dmub_rb_cmd_digx_encoder_control encoder_control = { 0 }; + +	encoder_control.header.type = DMUB_CMD__VBIOS; +	encoder_control.header.sub_type = DMUB_CMD__VBIOS_DIGX_ENCODER_CONTROL; +	encoder_control.encoder_control.dig.stream_param = *dig; + +	dc_dmub_srv_cmd_queue(dmcub, &encoder_control.header); +	dc_dmub_srv_cmd_execute(dmcub); +	dc_dmub_srv_wait_idle(dmcub); +} +  static enum bp_result encoder_control_digx_v1_5(  	struct bios_parser *bp,  	struct bp_encoder_control *cntl) @@ -155,6 +172,12 @@ static enum bp_result encoder_control_digx_v1_5(  			break;  		} +	if (bp->base.ctx->dc->ctx->dmub_srv && +	    bp->base.ctx->dc->debug.dmub_command_table) { +		encoder_control_dmcub(bp->base.ctx->dmub_srv, ¶ms); +		return BP_RESULT_OK; +	} +  	if (EXEC_BIOS_CMD_TABLE(digxencodercontrol, params))  		result = BP_RESULT_OK; @@ -191,6 +214,22 @@ static void init_transmitter_control(struct bios_parser *bp)  	}  } +static void transmitter_control_dmcub( +		struct dc_dmub_srv *dmcub, +		struct dig_transmitter_control_parameters_v1_6 *dig) +{ +	struct dmub_rb_cmd_dig1_transmitter_control transmitter_control; + +	transmitter_control.header.type = DMUB_CMD__VBIOS; +	transmitter_control.header.sub_type = +		DMUB_CMD__VBIOS_DIG1_TRANSMITTER_CONTROL; +	transmitter_control.transmitter_control.dig = *dig; + +	dc_dmub_srv_cmd_queue(dmcub, &transmitter_control.header); +	dc_dmub_srv_cmd_execute(dmcub); +	dc_dmub_srv_wait_idle(dmcub); +} +  static enum bp_result transmitter_control_v1_6(  	struct bios_parser *bp,  	struct bp_transmitter_control *cntl) @@ -222,6 +261,11 @@ static enum bp_result transmitter_control_v1_6(  		__func__, ps.param.symclk_10khz);  	} +	if (bp->base.ctx->dc->ctx->dmub_srv && +	    bp->base.ctx->dc->debug.dmub_command_table) { +		transmitter_control_dmcub(bp->base.ctx->dmub_srv, &ps.param); +		return BP_RESULT_OK; +	}  /*color_depth not used any more, driver has deep color factor in the Phyclk*/  	if (EXEC_BIOS_CMD_TABLE(dig1transmittercontrol, ps)) @@ -255,7 +299,20 @@ static void init_set_pixel_clock(struct bios_parser *bp)  	}  } +static void set_pixel_clock_dmcub( +		struct dc_dmub_srv *dmcub, +		struct set_pixel_clock_parameter_v1_7 *clk) +{ +	struct dmub_rb_cmd_set_pixel_clock pixel_clock = { 0 }; + +	pixel_clock.header.type = DMUB_CMD__VBIOS; +	pixel_clock.header.sub_type = DMUB_CMD__VBIOS_SET_PIXEL_CLOCK; +	pixel_clock.pixel_clock.clk = *clk; +	dc_dmub_srv_cmd_queue(dmcub, &pixel_clock.header); +	dc_dmub_srv_cmd_execute(dmcub); +	dc_dmub_srv_wait_idle(dmcub); +}  static enum bp_result set_pixel_clock_v7(  	struct bios_parser *bp, @@ -331,6 +388,12 @@ static enum bp_result set_pixel_clock_v7(  		if (bp_params->signal_type == SIGNAL_TYPE_DVI_DUAL_LINK)  			clk.miscinfo |= PIXEL_CLOCK_V7_MISC_DVI_DUALLINK_EN; +		if (bp->base.ctx->dc->ctx->dmub_srv && +		    bp->base.ctx->dc->debug.dmub_command_table) { +			set_pixel_clock_dmcub(bp->base.ctx->dmub_srv, &clk); +			return BP_RESULT_OK; +		} +  		if (EXEC_BIOS_CMD_TABLE(setpixelclock, clk))  			result = BP_RESULT_OK;  	} @@ -585,6 +648,21 @@ static void init_enable_disp_power_gating(  	}  } +static void enable_disp_power_gating_dmcub( +	struct dc_dmub_srv *dmcub, +	struct enable_disp_power_gating_parameters_v2_1 *pwr) +{ +	struct dmub_rb_cmd_enable_disp_power_gating power_gating; + +	power_gating.header.type = DMUB_CMD__VBIOS; +	power_gating.header.sub_type = DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING; +	power_gating.power_gating.pwr = *pwr; + +	dc_dmub_srv_cmd_queue(dmcub, &power_gating.header); +	dc_dmub_srv_cmd_execute(dmcub); +	dc_dmub_srv_wait_idle(dmcub); +} +  static enum bp_result enable_disp_power_gating_v2_1(  	struct bios_parser *bp,  	enum controller_id crtc_id, @@ -604,6 +682,13 @@ static enum bp_result enable_disp_power_gating_v2_1(  	ps.param.enable =  		bp->cmd_helper->disp_power_gating_action_to_atom(action); +	if (bp->base.ctx->dc->ctx->dmub_srv && +	    bp->base.ctx->dc->debug.dmub_command_table) { +		enable_disp_power_gating_dmcub(bp->base.ctx->dmub_srv, +					       &ps.param); +		return BP_RESULT_OK; +	} +  	if (EXEC_BIOS_CMD_TABLE(enabledisppowergating, ps.param))  		result = BP_RESULT_OK; diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c index db153ddf0fee..7388c987c595 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c @@ -55,23 +55,19 @@ bool dal_bios_parser_init_cmd_tbl_helper2(  	case DCE_VERSION_11_22:  		*h = dal_cmd_tbl_helper_dce112_get_table2();  		return true; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  	case DCN_VERSION_1_0:  	case DCN_VERSION_1_01:  		*h = dal_cmd_tbl_helper_dce112_get_table2();  		return true;  #endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	case DCN_VERSION_2_0:  		*h = dal_cmd_tbl_helper_dce112_get_table2();  		return true; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  	case DCN_VERSION_2_1:  		*h = dal_cmd_tbl_helper_dce112_get_table2();  		return true; -#endif  	case DCE_VERSION_12_0:  	case DCE_VERSION_12_1:  		*h = dal_cmd_tbl_helper_dce112_get_table2(); diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile index 26c6d735cdc7..4674aca8f206 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile +++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile @@ -1,5 +1,6 @@  #  # Copyright 2017 Advanced Micro Devices, Inc. +# Copyright 2019 Raptor Engineering, LLC  #  # Permission is hereby granted, free of charge, to any person obtaining a  # copy of this software and associated documentation files (the "Software"), @@ -24,7 +25,13 @@  # It calculates Bandwidth and Watermarks values for HW programming  # +ifdef CONFIG_X86  calcs_ccflags := -mhard-float -msse +endif + +ifdef CONFIG_PPC64 +calcs_ccflags := -mhard-float -maltivec +endif  ifdef CONFIG_CC_IS_GCC  ifeq ($(call cc-ifversion, -lt, 0701, y), y) @@ -32,6 +39,7 @@ IS_OLD_GCC = 1  endif  endif +ifdef CONFIG_X86  ifdef IS_OLD_GCC  # Stack alignment mismatch, proceed with caution.  # GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 @@ -40,6 +48,7 @@ calcs_ccflags += -mpreferred-stack-boundary=4  else  calcs_ccflags += -msse2  endif +endif  CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calcs.o := $(calcs_ccflags)  CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calc_auto.o := $(calcs_ccflags) @@ -47,7 +56,7 @@ CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calc_math.o := $(calcs_ccflags) -Wno-tautologi  BW_CALCS = dce_calcs.o bw_fixed.o custom_float.o -ifdef CONFIG_DRM_AMD_DC_DCN1_0 +ifdef CONFIG_DRM_AMD_DC_DCN  BW_CALCS += dcn_calcs.o dcn_calc_math.o dcn_calc_auto.o  endif diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c index a1d49256fab7..5d081c42e81b 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c @@ -154,14 +154,14 @@ static void calculate_bandwidth( -	if (data->d0_underlay_mode == bw_def_none) { d0_underlay_enable = 0; } -	else { -		d0_underlay_enable = 1; -	} -	if (data->d1_underlay_mode == bw_def_none) { d1_underlay_enable = 0; } -	else { -		d1_underlay_enable = 1; -	} +	if (data->d0_underlay_mode == bw_def_none) +		d0_underlay_enable = false; +	else +		d0_underlay_enable = true; +	if (data->d1_underlay_mode == bw_def_none) +		d1_underlay_enable = false; +	else +		d1_underlay_enable = true;  	data->number_of_underlay_surfaces = d0_underlay_enable + d1_underlay_enable;  	switch (data->underlay_surface_type) {  	case bw_def_420: @@ -286,8 +286,8 @@ static void calculate_bandwidth(  	data->cursor_width_pixels[2] = bw_int_to_fixed(0);  	data->cursor_width_pixels[3] = bw_int_to_fixed(0);  	/* graphics surface parameters from spreadsheet*/ -	fbc_enabled = 0; -	lpt_enabled = 0; +	fbc_enabled = false; +	lpt_enabled = false;  	for (i = 4; i <= maximum_number_of_surfaces - 3; i++) {  		if (i < data->number_of_displays + 4) {  			if (i == 4 && data->d0_underlay_mode == bw_def_underlay_only) { @@ -338,9 +338,9 @@ static void calculate_bandwidth(  			data->access_one_channel_only[i] = 0;  		}  		if (data->fbc_en[i] == 1) { -			fbc_enabled = 1; +			fbc_enabled = true;  			if (data->lpt_en[i] == 1) { -				lpt_enabled = 1; +				lpt_enabled = true;  			}  		}  		data->cursor_width_pixels[i] = bw_int_to_fixed(vbios->cursor_width); diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c index 9b2cb57bf2ba..a27d84ca15a5 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c @@ -1,5 +1,6 @@  /*   * Copyright 2017 Advanced Micro Devices, Inc. + * Copyright 2019 Raptor Engineering, LLC   *   * Permission is hereby granted, free of charge, to any person obtaining a   * copy of this software and associated documentation files (the "Software"), @@ -53,13 +54,9 @@   * remain as-is as it provides us with a guarantee from HW that it is correct.   */ -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  /* Defaults from spreadsheet rev#247.   * RV2 delta: dram_clock_change_latency, max_num_dpp   */ -#else -/* Defaults from spreadsheet rev#247 */ -#endif  const struct dcn_soc_bounding_box dcn10_soc_defaults = {  		/* latencies */  		.sr_exit_time = 17, /*us*/ @@ -626,7 +623,7 @@ static bool dcn_bw_apply_registry_override(struct dc *dc)  {  	bool updated = false; -	kernel_fpu_begin(); +	DC_FP_START();  	if ((int)(dc->dcn_soc->sr_exit_time * 1000) != dc->debug.sr_exit_time_ns  			&& dc->debug.sr_exit_time_ns) {  		updated = true; @@ -662,7 +659,7 @@ static bool dcn_bw_apply_registry_override(struct dc *dc)  		dc->dcn_soc->dram_clock_change_latency =  				dc->debug.dram_clock_change_latency_ns / 1000.0;  	} -	kernel_fpu_end(); +	DC_FP_END();  	return updated;  } @@ -708,8 +705,8 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,  unsigned int get_highest_allowed_voltage_level(uint32_t hw_internal_rev)  { -	/* for dali, the highest voltage level we want is 0 */ -	if (ASICREV_IS_DALI(hw_internal_rev)) +	/* for dali & pollock, the highest voltage level we want is 0 */ +	if (ASICREV_IS_POLLOCK(hw_internal_rev) || ASICREV_IS_DALI(hw_internal_rev))  		return 0;  	/* we are ok with all levels */ @@ -742,7 +739,7 @@ bool dcn_validate_bandwidth(  		dcn_bw_sync_calcs_and_dml(dc);  	memset(v, 0, sizeof(*v)); -	kernel_fpu_begin(); +	DC_FP_START();  	v->sr_exit_time = dc->dcn_soc->sr_exit_time;  	v->sr_enter_plus_exit_time = dc->dcn_soc->sr_enter_plus_exit_time; @@ -1275,7 +1272,7 @@ bool dcn_validate_bandwidth(  	bw_limit = dc->dcn_soc->percent_disp_bw_limit * v->fabric_and_dram_bandwidth_vmax0p9;  	bw_limit_pass = (v->total_data_read_bandwidth / 1000.0) < bw_limit; -	kernel_fpu_end(); +	DC_FP_END();  	PERFORMANCE_TRACE_END();  	BW_VAL_TRACE_FINISH(); @@ -1443,7 +1440,7 @@ void dcn_bw_update_from_pplib(struct dc *dc)  	res = dm_pp_get_clock_levels_by_type_with_voltage(  			ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks); -	kernel_fpu_begin(); +	DC_FP_START();  	if (res)  		res = verify_clock_values(&fclks); @@ -1463,12 +1460,12 @@ void dcn_bw_update_from_pplib(struct dc *dc)  	} else  		BREAK_TO_DEBUGGER(); -	kernel_fpu_end(); +	DC_FP_END();  	res = dm_pp_get_clock_levels_by_type_with_voltage(  			ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks); -	kernel_fpu_begin(); +	DC_FP_START();  	if (res)  		res = verify_clock_values(&dcfclks); @@ -1481,7 +1478,7 @@ void dcn_bw_update_from_pplib(struct dc *dc)  	} else  		BREAK_TO_DEBUGGER(); -	kernel_fpu_end(); +	DC_FP_END();  }  void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc) @@ -1496,11 +1493,11 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)  	if (!pp || !pp->set_wm_ranges)  		return; -	kernel_fpu_begin(); +	DC_FP_START();  	min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32;  	min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000;  	socclk_khz = dc->dcn_soc->socclk * 1000; -	kernel_fpu_end(); +	DC_FP_END();  	/* Now notify PPLib/SMU about which Watermarks sets they should select  	 * depending on DPM state they are in. And update BW MGR GFX Engine and @@ -1551,7 +1548,7 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)  void dcn_bw_sync_calcs_and_dml(struct dc *dc)  { -	kernel_fpu_begin(); +	DC_FP_START();  	DC_LOG_BANDWIDTH_CALCS("sr_exit_time: %f ns\n"  			"sr_enter_plus_exit_time: %f ns\n"  			"urgent_latency: %f ns\n" @@ -1740,5 +1737,5 @@ void dcn_bw_sync_calcs_and_dml(struct dc *dc)  	dc->dml.ip.bug_forcing_LC_req_same_size_fixed =  		dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes;  	dc->dml.ip.dcfclk_cstate_latency = dc->dcn_ip->dcfclk_cstate_latency; -	kernel_fpu_end(); +	DC_FP_END();  } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile index b864869cc7e3..3cd283195091 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile @@ -63,7 +63,7 @@ CLK_MGR_DCE120 = dce120_clk_mgr.o  AMD_DAL_CLK_MGR_DCE120 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dce120/,$(CLK_MGR_DCE120))  AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCE120) -ifdef CONFIG_DRM_AMD_DC_DCN1_0 +ifdef CONFIG_DRM_AMD_DC_DCN  ###############################################################################  # DCN10  ############################################################################### @@ -72,9 +72,7 @@ CLK_MGR_DCN10 = rv1_clk_mgr.o rv1_clk_mgr_vbios_smu.o rv2_clk_mgr.o  AMD_DAL_CLK_MGR_DCN10 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn10/,$(CLK_MGR_DCN10))  AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN10) -endif -ifdef CONFIG_DRM_AMD_DC_DCN2_0  ###############################################################################  # DCN20  ############################################################################### @@ -83,9 +81,7 @@ CLK_MGR_DCN20 = dcn20_clk_mgr.o  AMD_DAL_CLK_MGR_DCN20 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn20/,$(CLK_MGR_DCN20))  AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN20) -endif -ifdef CONFIG_DRM_AMD_DC_DCN2_1  ###############################################################################  # DCN21  ############################################################################### diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index 8828dd9c3783..a78e5c74c79c 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -37,9 +37,7 @@  #include "dcn10/rv1_clk_mgr.h"  #include "dcn10/rv2_clk_mgr.h"  #include "dcn20/dcn20_clk_mgr.h" -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  #include "dcn21/rn_clk_mgr.h" -#endif  int clk_mgr_helper_get_active_display_cnt( @@ -134,14 +132,19 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p  			dce120_clk_mgr_construct(ctx, clk_mgr);  		break; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  	case FAMILY_RV: -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) +		if (ASICREV_IS_DALI(asic_id.hw_internal_rev) || +				ASICREV_IS_POLLOCK(asic_id.hw_internal_rev)) { +			/* TEMP: this check has to come before ASICREV_IS_RENOIR */ +			/* which also incorrectly returns true for Dali/Pollock*/ +			rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu); +			break; +		}  		if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev)) {  			rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);  			break;  		} -#endif	/* DCN2_1 */  		if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev)) {  			rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu);  			break; @@ -152,13 +155,11 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p  			break;  		}  		break; -#endif	/* Family RV */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	case FAMILY_NV:  		dcn20_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);  		break; -#endif /* Family NV */ +#endif	/* Family RV and NV*/  	default:  		ASSERT(0); /* Unknown Asic */ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c index a6c46e903ff9..d031bd3d3072 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c @@ -72,8 +72,8 @@ int dce112_set_clock(struct clk_mgr *clk_mgr_base, int requested_clk_khz)  	struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);  	struct bp_set_dce_clock_parameters dce_clk_params;  	struct dc_bios *bp = clk_mgr_base->ctx->dc_bios; -	struct dc *core_dc = clk_mgr_base->ctx->dc; -	struct dmcu *dmcu = core_dc->res_pool->dmcu; +	struct dc *dc = clk_mgr_base->ctx->dc; +	struct dmcu *dmcu = dc->res_pool->dmcu;  	int actual_clock = requested_clk_khz;  	/* Prepare to program display clock*/  	memset(&dce_clk_params, 0, sizeof(dce_clk_params)); @@ -110,7 +110,7 @@ int dce112_set_clock(struct clk_mgr *clk_mgr_base, int requested_clk_khz)  	bp->funcs->set_dce_clock(bp, &dce_clk_params); -	if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { +	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {  		if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {  			if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock)  				dmcu->funcs->set_psr_wait_loop(dmcu, @@ -126,8 +126,8 @@ int dce112_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_clk_khz)  {  	struct bp_set_dce_clock_parameters dce_clk_params;  	struct dc_bios *bp = clk_mgr->base.ctx->dc_bios; -	struct dc *core_dc = clk_mgr->base.ctx->dc; -	struct dmcu *dmcu = core_dc->res_pool->dmcu; +	struct dc *dc = clk_mgr->base.ctx->dc; +	struct dmcu *dmcu = dc->res_pool->dmcu;  	int actual_clock = requested_clk_khz;  	/* Prepare to program display clock*/  	memset(&dce_clk_params, 0, sizeof(dce_clk_params)); @@ -152,7 +152,7 @@ int dce112_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_clk_khz)  		clk_mgr->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; -	if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { +	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {  		if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {  			if (clk_mgr->dfs_bypass_disp_clk != actual_clock)  				dmcu->funcs->set_psr_wait_loop(dmcu, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c index 1897e91c8ccb..97b7f32294fd 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c @@ -88,8 +88,8 @@ int rv1_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, unsigned  int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz)  {  	int actual_dispclk_set_mhz = -1; -	struct dc *core_dc = clk_mgr->base.ctx->dc; -	struct dmcu *dmcu = core_dc->res_pool->dmcu; +	struct dc *dc = clk_mgr->base.ctx->dc; +	struct dmcu *dmcu = dc->res_pool->dmcu;  	/*  Unit of SMU msg parameter is Mhz */  	actual_dispclk_set_mhz = rv1_vbios_smu_send_msg_with_param( @@ -100,7 +100,7 @@ int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_di  	/* Actual dispclk set is returned in the parameter register */  	actual_dispclk_set_mhz = REG_READ(MP1_SMN_C2PMSG_83) * 1000; -	if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { +	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {  		if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {  			if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz)  				dmcu->funcs->set_psr_wait_loop(dmcu, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 25d7b7c6681c..495f01e9f2ca 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -27,6 +27,7 @@  #include "clk_mgr_internal.h"  #include "dce100/dce_clk_mgr.h" +#include "dcn20_clk_mgr.h"  #include "reg_helper.h"  #include "core_types.h"  #include "dm_helpers.h" @@ -100,13 +101,13 @@ uint32_t dentist_get_did_from_divider(int divider)  }  void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr, -		struct dc_state *context) +		struct dc_state *context, bool safe_to_lower)  {  	int i;  	clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz;  	for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { -		int dpp_inst, dppclk_khz; +		int dpp_inst, dppclk_khz, prev_dppclk_khz;  		/* Loop index will match dpp->inst if resource exists,  		 * and we want to avoid dependency on dpp object @@ -114,8 +115,12 @@ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,  		dpp_inst = i;  		dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz; -		clk_mgr->dccg->funcs->update_dpp_dto( -				clk_mgr->dccg, dpp_inst, dppclk_khz); +		prev_dppclk_khz = clk_mgr->base.ctx->dc->current_state->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz; + +		if (safe_to_lower || prev_dppclk_khz < dppclk_khz) { +			clk_mgr->dccg->funcs->update_dpp_dto( +							clk_mgr->dccg, dpp_inst, dppclk_khz); +		}  	}  } @@ -161,6 +166,9 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,  		dc->debug.force_clock_mode & 0x1) {  		//this is from resume or boot up, if forced_clock cfg option used, we bypass program dispclk and DPPCLK, but need set them for S3.  		force_reset = true; + +		dcn2_read_clocks_from_hw_dentist(clk_mgr_base); +  		//force_clock_mode 0x1:  force reset the clock even it is the same clock as long as it is in Passive level.  	}  	display_count = clk_mgr_helper_get_active_display_cnt(dc, context); @@ -240,7 +248,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,  	if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {  		if (dpp_clock_lowered) {  			// if clock is being lowered, increase DTO before lowering refclk -			dcn20_update_clocks_update_dpp_dto(clk_mgr, context); +			dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);  			dcn20_update_clocks_update_dentist(clk_mgr);  		} else {  			// if clock is being raised, increase refclk before lowering DTO @@ -248,7 +256,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,  				dcn20_update_clocks_update_dentist(clk_mgr);  			// always update dtos unless clock is lowered and not safe to lower  			if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz) -				dcn20_update_clocks_update_dpp_dto(clk_mgr, context); +				dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);  		}  	} @@ -339,6 +347,32 @@ void dcn2_enable_pme_wa(struct clk_mgr *clk_mgr_base)  	}  } + +void dcn2_read_clocks_from_hw_dentist(struct clk_mgr *clk_mgr_base) +{ +	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); +	uint32_t dispclk_wdivider; +	uint32_t dppclk_wdivider; +	int disp_divider; +	int dpp_divider; + +	REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, &dispclk_wdivider); +	REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_WDIVIDER, &dppclk_wdivider); + +	disp_divider = dentist_get_divider_from_did(dispclk_wdivider); +	dpp_divider = dentist_get_divider_from_did(dispclk_wdivider); + +	if (disp_divider && dpp_divider) { +		/* Calculate the current DFS clock, in kHz.*/ +		clk_mgr_base->clks.dispclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR +			* clk_mgr->base.dentist_vco_freq_khz) / disp_divider; + +		clk_mgr_base->clks.dppclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR +				* clk_mgr->base.dentist_vco_freq_khz) / dpp_divider; +	} + +} +  void dcn2_get_clock(struct clk_mgr *clk_mgr,  		struct dc_state *context,  			enum dc_clock_type clock_type, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h index c9fd824f3c23..0b9c045b0c8e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h @@ -34,7 +34,7 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr,  			struct dc_state *context,  			bool safe_to_lower);  void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr, -		struct dc_state *context); +		struct dc_state *context, bool safe_to_lower);  void dcn2_init_clocks(struct clk_mgr *clk_mgr); @@ -51,4 +51,8 @@ void dcn2_get_clock(struct clk_mgr *clk_mgr,  			struct dc_clock_config *clock_cfg);  void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr); + +void dcn2_read_clocks_from_hw_dentist(struct clk_mgr *clk_mgr_base); + +  #endif //__DCN20_CLK_MGR_H__ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index 790a2d211bd6..7ae4c06232dd 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -59,14 +59,16 @@ int rn_get_active_display_cnt_wa(  		struct dc_state *context)  {  	int i, display_count; -	bool hdmi_present = false; +	bool tmds_present = false;  	display_count = 0;  	for (i = 0; i < context->stream_count; i++) {  		const struct dc_stream_state *stream = context->streams[i]; -		if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) -			hdmi_present = true; +		if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A || +				stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK || +				stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) +			tmds_present = true;  	}  	for (i = 0; i < dc->link_count; i++) { @@ -85,7 +87,7 @@ int rn_get_active_display_cnt_wa(  	}  	/* WA for hang on HDMI after display off back back on*/ -	if (display_count == 0 && hdmi_present) +	if (display_count == 0 && tmds_present)  		display_count = 1;  	return display_count; @@ -164,16 +166,16 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,  	}  	if (dpp_clock_lowered) { -		// if clock is being lowered, increase DTO before lowering refclk -		dcn20_update_clocks_update_dpp_dto(clk_mgr, context); +		// increase per DPP DTO before lowering global dppclk +		dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);  		rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);  	} else { -		// if clock is being raised, increase refclk before lowering DTO +		// increase global DPPCLK before lowering per DPP DTO  		if (update_dppclk || update_dispclk)  			rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);  		// always update dtos unless clock is lowered and not safe to lower  		if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz) -			dcn20_update_clocks_update_dpp_dto(clk_mgr, context); +			dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);  	}  	if (update_dispclk && @@ -409,7 +411,7 @@ void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_ra  			continue;  		ranges->reader_wm_sets[num_valid_sets].wm_inst = bw_params->wm_table.entries[i].wm_inst; -		ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type;; +		ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type;  		/* We will not select WM based on dcfclk, so leave it as unconstrained */  		ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;  		ranges->reader_wm_sets[num_valid_sets].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; @@ -471,12 +473,28 @@ static void rn_notify_wm_ranges(struct clk_mgr *clk_mgr_base)  } +static bool rn_are_clock_states_equal(struct dc_clocks *a, +		struct dc_clocks *b) +{ +	if (a->dispclk_khz != b->dispclk_khz) +		return false; +	else if (a->dppclk_khz != b->dppclk_khz) +		return false; +	else if (a->dcfclk_khz != b->dcfclk_khz) +		return false; +	else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz) +		return false; + +	return true; +} + +  static struct clk_mgr_funcs dcn21_funcs = {  	.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,  	.update_clocks = rn_update_clocks,  	.init_clocks = rn_init_clocks,  	.enable_pme_wa = rn_enable_pme_wa, -	/* .dump_clk_registers = rn_dump_clk_registers, */ +	.are_clock_states_equal = rn_are_clock_states_equal,  	.notify_wm_ranges = rn_notify_wm_ranges  }; @@ -518,36 +536,83 @@ struct clk_bw_params rn_bw_params = {  		.num_entries = 4,  	}, -	.wm_table = { -		.entries = { -			{ -				.wm_inst = WM_A, -				.wm_type = WM_TYPE_PSTATE_CHG, -				.pstate_latency_us = 23.84, -				.valid = true, -			}, -			{ -				.wm_inst = WM_B, -				.wm_type = WM_TYPE_PSTATE_CHG, -				.pstate_latency_us = 23.84, -				.valid = true, -			}, -			{ -				.wm_inst = WM_C, -				.wm_type = WM_TYPE_PSTATE_CHG, -				.pstate_latency_us = 23.84, -				.valid = true, -			}, -			{ -				.wm_inst = WM_D, -				.wm_type = WM_TYPE_PSTATE_CHG, -				.pstate_latency_us = 23.84, -				.valid = true, -			}, +}; + +struct wm_table ddr4_wm_table = { +	.entries = { +		{ +			.wm_inst = WM_A, +			.wm_type = WM_TYPE_PSTATE_CHG, +			.pstate_latency_us = 11.72, +			.sr_exit_time_us = 6.09, +			.sr_enter_plus_exit_time_us = 7.14, +			.valid = true, +		}, +		{ +			.wm_inst = WM_B, +			.wm_type = WM_TYPE_PSTATE_CHG, +			.pstate_latency_us = 11.72, +			.sr_exit_time_us = 10.12, +			.sr_enter_plus_exit_time_us = 11.48, +			.valid = true, +		}, +		{ +			.wm_inst = WM_C, +			.wm_type = WM_TYPE_PSTATE_CHG, +			.pstate_latency_us = 11.72, +			.sr_exit_time_us = 10.12, +			.sr_enter_plus_exit_time_us = 11.48, +			.valid = true, +		}, +		{ +			.wm_inst = WM_D, +			.wm_type = WM_TYPE_PSTATE_CHG, +			.pstate_latency_us = 11.72, +			.sr_exit_time_us = 10.12, +			.sr_enter_plus_exit_time_us = 11.48, +			.valid = true,  		},  	}  }; +struct wm_table lpddr4_wm_table = { +	.entries = { +		{ +			.wm_inst = WM_A, +			.wm_type = WM_TYPE_PSTATE_CHG, +			.pstate_latency_us = 11.65333, +			.sr_exit_time_us = 5.32, +			.sr_enter_plus_exit_time_us = 6.38, +			.valid = true, +		}, +		{ +			.wm_inst = WM_B, +			.wm_type = WM_TYPE_PSTATE_CHG, +			.pstate_latency_us = 11.65333, +			.sr_exit_time_us = 9.82, +			.sr_enter_plus_exit_time_us = 11.196, +			.valid = true, +		}, +		{ +			.wm_inst = WM_C, +			.wm_type = WM_TYPE_PSTATE_CHG, +			.pstate_latency_us = 11.65333, +			.sr_exit_time_us = 9.89, +			.sr_enter_plus_exit_time_us = 11.24, +			.valid = true, +		}, +		{ +			.wm_inst = WM_D, +			.wm_type = WM_TYPE_PSTATE_CHG, +			.pstate_latency_us = 11.65333, +			.sr_exit_time_us = 9.748, +			.sr_enter_plus_exit_time_us = 11.102, +			.valid = true, +		}, +	} +}; + +  static unsigned int find_dcfclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage)  {  	int i; @@ -561,7 +626,7 @@ static unsigned int find_dcfclk_for_voltage(struct dpm_clocks *clock_table, unsi  	return 0;  } -static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct dpm_clocks *clock_table, struct hw_asic_id *asic_id) +static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct dpm_clocks *clock_table, struct integrated_info *bios_info)  {  	int i, j = 0; @@ -593,8 +658,8 @@ static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params  		bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->FClocks[j].Vol);  	} -	bw_params->vram_type = asic_id->vram_type; -	bw_params->num_channels = asic_id->vram_width / DDR4_DRAM_WIDTH; +	bw_params->vram_type = bios_info->memory_type; +	bw_params->num_channels = bios_info->ma_channel_number;  	for (i = 0; i < WM_SET_COUNT; i++) {  		bw_params->wm_table.entries[i].wm_inst = i; @@ -628,7 +693,6 @@ void rn_clk_mgr_construct(  {  	struct dc_debug_options *debug = &ctx->dc->debug;  	struct dpm_clocks clock_table = { 0 }; -	struct clk_state_registers_and_bypass s = { 0 };  	clk_mgr->base.ctx = ctx;  	clk_mgr->base.funcs = &dcn21_funcs; @@ -648,7 +712,6 @@ void rn_clk_mgr_construct(  	if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {  		dcn21_funcs.update_clocks = dcn2_update_clocks_fpga;  		clk_mgr->base.dentist_vco_freq_khz = 3600000; -		clk_mgr->base.dprefclk_khz = 600000;  	} else {  		struct clk_log_info log_info = {0}; @@ -659,25 +722,26 @@ void rn_clk_mgr_construct(  		if (clk_mgr->base.dentist_vco_freq_khz == 0)  			clk_mgr->base.dentist_vco_freq_khz = 3600000; -		rn_dump_clk_registers(&s, &clk_mgr->base, &log_info); -		/* Convert dprefclk units from MHz to KHz */ -		/* Value already divided by 10, some resolution lost */ -		clk_mgr->base.dprefclk_khz = s.dprefclk * 1000; - -		/* in case we don't get a value from the register, use default */ -		if (clk_mgr->base.dprefclk_khz == 0) { -			ASSERT(clk_mgr->base.dprefclk_khz == 600000); -			clk_mgr->base.dprefclk_khz = 600000; +		if (ctx->dc_bios->integrated_info->memory_type == LpDdr4MemType) { +			rn_bw_params.wm_table = lpddr4_wm_table; +		} else { +			rn_bw_params.wm_table = ddr4_wm_table;  		} +		/* Saved clocks configured at boot for debug purposes */ +		rn_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info);  	} +	clk_mgr->base.dprefclk_khz = 600000;  	dce_clock_read_ss_info(clk_mgr); +  	clk_mgr->base.bw_params = &rn_bw_params;  	if (pp_smu && pp_smu->rn_funcs.get_dpm_clock_table) {  		pp_smu->rn_funcs.get_dpm_clock_table(&pp_smu->rn_funcs.pp_smu, &clock_table); -		rn_clk_mgr_helper_populate_bw_params(clk_mgr->base.bw_params, &clock_table, &ctx->asic_id); +		if (ctx->dc_bios && ctx->dc_bios->integrated_info) { +			rn_clk_mgr_helper_populate_bw_params (clk_mgr->base.bw_params, &clock_table, ctx->dc_bios->integrated_info); +		}  	}  	if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment) && clk_mgr->smu_ver >= 0x00371500) { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c index cb7c0e8b7e1b..6878aedf1d3e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c @@ -82,8 +82,8 @@ int rn_vbios_smu_get_smu_version(struct clk_mgr_internal *clk_mgr)  int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz)  {  	int actual_dispclk_set_mhz = -1; -	struct dc *core_dc = clk_mgr->base.ctx->dc; -	struct dmcu *dmcu = core_dc->res_pool->dmcu; +	struct dc *dc = clk_mgr->base.ctx->dc; +	struct dmcu *dmcu = dc->res_pool->dmcu;  	/*  Unit of SMU msg parameter is Mhz */  	actual_dispclk_set_mhz = rn_vbios_smu_send_msg_with_param( @@ -91,7 +91,7 @@ int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dis  			VBIOSSMC_MSG_SetDispclkFreq,  			requested_dispclk_khz / 1000); -	if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { +	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {  		if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {  			if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz)  				dmcu->funcs->set_psr_wait_loop(dmcu, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 32f31bf91915..6c797fac189d 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -58,21 +58,21 @@  #include "hubp.h"  #include "dc_link_dp.h" +#include "dc_dmub_srv.h" -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  #include "dsc.h" -#endif -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  #include "vm_helper.h" -#endif  #include "dce/dce_i2c.h" +#define CTX \ +	dc->ctx +  #define DC_LOGGER \  	dc->ctx->logger -const static char DC_BUILD_ID[] = "production-build"; +static const char DC_BUILD_ID[] = "production-build";  /**   * DOC: Overview @@ -287,7 +287,6 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,  		struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];  		if (pipe->stream == stream && pipe->stream_res.tg) { -			pipe->stream->adjust = *adjust;  			dc->hwss.set_drr(&pipe,  					1,  					adjust->v_total_min, @@ -511,10 +510,10 @@ bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)  	return ret;  } -void dc_stream_set_static_screen_events(struct dc *dc, +void dc_stream_set_static_screen_params(struct dc *dc,  		struct dc_stream_state **streams,  		int num_streams, -		const struct dc_static_screen_events *events) +		const struct dc_static_screen_params *params)  {  	int i = 0;  	int j = 0; @@ -533,10 +532,10 @@ void dc_stream_set_static_screen_events(struct dc *dc,  		}  	} -	dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events); +	dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params);  } -static void destruct(struct dc *dc) +static void dc_destruct(struct dc *dc)  {  	if (dc->current_state) {  		dc_release_state(dc->current_state); @@ -569,7 +568,7 @@ static void destruct(struct dc *dc)  	kfree(dc->bw_dceip);  	dc->bw_dceip = NULL; -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 +#ifdef CONFIG_DRM_AMD_DC_DCN  	kfree(dc->dcn_soc);  	dc->dcn_soc = NULL; @@ -577,28 +576,58 @@ static void destruct(struct dc *dc)  	dc->dcn_ip = NULL;  #endif -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  	kfree(dc->vm_helper);  	dc->vm_helper = NULL; -#endif  } -static bool construct(struct dc *dc, +static bool dc_construct_ctx(struct dc *dc, +		const struct dc_init_data *init_params) +{ +	struct dc_context *dc_ctx; +	enum dce_version dc_version = DCE_VERSION_UNKNOWN; + +	dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL); +	if (!dc_ctx) +		return false; + +	dc_ctx->cgs_device = init_params->cgs_device; +	dc_ctx->driver_context = init_params->driver; +	dc_ctx->dc = dc; +	dc_ctx->asic_id = init_params->asic_id; +	dc_ctx->dc_sink_id_count = 0; +	dc_ctx->dc_stream_id_count = 0; +	dc_ctx->dce_environment = init_params->dce_environment; + +	/* Create logger */ + +	dc_version = resource_parse_asic_id(init_params->asic_id); +	dc_ctx->dce_version = dc_version; + +	dc_ctx->perf_trace = dc_perf_trace_create(); +	if (!dc_ctx->perf_trace) { +		ASSERT_CRITICAL(false); +		return false; +	} + +	dc->ctx = dc_ctx; + +	return true; +} + +static bool dc_construct(struct dc *dc,  		const struct dc_init_data *init_params)  {  	struct dc_context *dc_ctx;  	struct bw_calcs_dceip *dc_dceip;  	struct bw_calcs_vbios *dc_vbios; -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 +#ifdef CONFIG_DRM_AMD_DC_DCN  	struct dcn_soc_bounding_box *dcn_soc;  	struct dcn_ip_params *dcn_ip;  #endif -	enum dce_version dc_version = DCE_VERSION_UNKNOWN;  	dc->config = init_params->flags; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  	// Allocate memory for the vm_helper  	dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);  	if (!dc->vm_helper) { @@ -606,7 +635,6 @@ static bool construct(struct dc *dc,  		goto fail;  	} -#endif  	memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));  	dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL); @@ -624,7 +652,7 @@ static bool construct(struct dc *dc,  	}  	dc->bw_vbios = dc_vbios; -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 +#ifdef CONFIG_DRM_AMD_DC_DCN  	dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);  	if (!dcn_soc) {  		dm_error("%s: failed to create dcn_soc\n", __func__); @@ -640,31 +668,15 @@ static bool construct(struct dc *dc,  	}  	dc->dcn_ip = dcn_ip; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  	dc->soc_bounding_box = init_params->soc_bounding_box;  #endif -#endif -	dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL); -	if (!dc_ctx) { +	if (!dc_construct_ctx(dc, init_params)) {  		dm_error("%s: failed to create ctx\n", __func__);  		goto fail;  	} -	dc_ctx->cgs_device = init_params->cgs_device; -	dc_ctx->driver_context = init_params->driver; -	dc_ctx->dc = dc; -	dc_ctx->asic_id = init_params->asic_id; -	dc_ctx->dc_sink_id_count = 0; -	dc_ctx->dc_stream_id_count = 0; -	dc->ctx = dc_ctx; - -	/* Create logger */ - -	dc_ctx->dce_environment = init_params->dce_environment; - -	dc_version = resource_parse_asic_id(init_params->asic_id); -	dc_ctx->dce_version = dc_version; +        dc_ctx = dc->ctx;  	/* Resource should construct all asic specific resources.  	 * This should be the only place where we need to parse the asic id @@ -679,7 +691,7 @@ static bool construct(struct dc *dc,  		bp_init_data.bios = init_params->asic_id.atombios_base_address;  		dc_ctx->dc_bios = dal_bios_parser_create( -				&bp_init_data, dc_version); +				&bp_init_data, dc_ctx->dce_version);  		if (!dc_ctx->dc_bios) {  			ASSERT_CRITICAL(false); @@ -687,17 +699,13 @@ static bool construct(struct dc *dc,  		}  		dc_ctx->created_bios = true; -		} - -	dc_ctx->perf_trace = dc_perf_trace_create(); -	if (!dc_ctx->perf_trace) { -		ASSERT_CRITICAL(false); -		goto fail;  	} + +  	/* Create GPIO service */  	dc_ctx->gpio_service = dal_gpio_service_create( -			dc_version, +			dc_ctx->dce_version,  			dc_ctx->dce_environment,  			dc_ctx); @@ -706,7 +714,7 @@ static bool construct(struct dc *dc,  		goto fail;  	} -	dc->res_pool = dc_create_resource_pool(dc, init_params, dc_version); +	dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);  	if (!dc->res_pool)  		goto fail; @@ -714,10 +722,8 @@ static bool construct(struct dc *dc,  	if (!dc->clk_mgr)  		goto fail; -#ifdef CONFIG_DRM_AMD_DC_DCN2_1  	if (dc->res_pool->funcs->update_bw_bounding_box)  		dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params); -#endif  	/* Creation of current_state must occur after dc->dml  	 * is initialized in dc_create_resource_pool because @@ -739,12 +745,9 @@ static bool construct(struct dc *dc,  	return true;  fail: - -	destruct(dc);  	return false;  } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  static bool disable_all_writeback_pipes_for_stream(  		const struct dc *dc,  		struct dc_stream_state *stream, @@ -757,7 +760,6 @@ static bool disable_all_writeback_pipes_for_stream(  	return true;  } -#endif  static void disable_dangling_plane(struct dc *dc, struct dc_state *context)  { @@ -783,16 +785,12 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)  		}  		if (should_disable && old_stream) {  			dc_rem_all_planes_for_stream(dc, old_stream, dangling_context); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  			disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); -#endif  			if (dc->hwss.apply_ctx_for_surface)  				dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);  		} -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  		if (dc->hwss.program_front_end_for_ctx)  			dc->hwss.program_front_end_for_ctx(dc, dangling_context); -#endif  	}  	current_ctx = dc->current_state; @@ -800,6 +798,33 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)  	dc_release_state(current_ctx);  } +static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context) +{ +	int i; +	int count = 0; +	struct pipe_ctx *pipe; +	PERF_TRACE(); +	for (i = 0; i < MAX_PIPES; i++) { +		pipe = &context->res_ctx.pipe_ctx[i]; + +		if (!pipe->plane_state) +			continue; + +		/* Timeout 100 ms */ +		while (count < 100000) { +			/* Must set to false to start with, due to OR in update function */ +			pipe->plane_state->status.is_flip_pending = false; +			dc->hwss.update_pending_status(pipe); +			if (!pipe->plane_state->status.is_flip_pending) +				break; +			udelay(1); +			count++; +		} +		ASSERT(!pipe->plane_state->status.is_flip_pending); +	} +	PERF_TRACE(); +} +  /*******************************************************************************   * Public functions   ******************************************************************************/ @@ -812,26 +837,38 @@ struct dc *dc_create(const struct dc_init_data *init_params)  	if (NULL == dc)  		goto alloc_fail; -	if (false == construct(dc, init_params)) -		goto construct_fail; +	if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) { +		if (false == dc_construct_ctx(dc, init_params)) { +			dc_destruct(dc); +			goto construct_fail; +		} +	} else { +		if (false == dc_construct(dc, init_params)) { +			dc_destruct(dc); +			goto construct_fail; +		} + +		full_pipe_count = dc->res_pool->pipe_count; +		if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE) +			full_pipe_count--; +		dc->caps.max_streams = min( +				full_pipe_count, +				dc->res_pool->stream_enc_count); -	full_pipe_count = dc->res_pool->pipe_count; -	if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE) -		full_pipe_count--; -	dc->caps.max_streams = min( -			full_pipe_count, -			dc->res_pool->stream_enc_count); +		dc->optimize_seamless_boot_streams = 0; +		dc->caps.max_links = dc->link_count; +		dc->caps.max_audios = dc->res_pool->audio_count; +		dc->caps.linear_pitch_alignment = 64; -	dc->caps.max_links = dc->link_count; -	dc->caps.max_audios = dc->res_pool->audio_count; -	dc->caps.linear_pitch_alignment = 64; +		dc->caps.max_dp_protocol_version = DP_VERSION_1_4; + +		if (dc->res_pool->dmcu != NULL) +			dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version; +	}  	/* Populate versioning information */  	dc->versions.dc_ver = DC_VER; -	if (dc->res_pool->dmcu != NULL) -		dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version; -  	dc->build_id = DC_BUILD_ID;  	DC_LOG_DC("Display Core initialized\n"); @@ -849,7 +886,8 @@ alloc_fail:  void dc_hardware_init(struct dc *dc)  { -	dc->hwss.init_hw(dc); +	if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW) +		dc->hwss.init_hw(dc);  }  void dc_init_callbacks(struct dc *dc, @@ -869,7 +907,7 @@ void dc_deinit_callbacks(struct dc *dc)  void dc_destroy(struct dc **dc)  { -	destruct(*dc); +	dc_destruct(*dc);  	kfree(*dc);  	*dc = NULL;  } @@ -1163,10 +1201,10 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c  	for (i = 0; i < context->stream_count; i++) {  		if (context->streams[i]->apply_seamless_boot_optimization) -			dc->optimize_seamless_boot = true; +			dc->optimize_seamless_boot_streams++;  	} -	if (!dc->optimize_seamless_boot) +	if (dc->optimize_seamless_boot_streams == 0)  		dc->hwss.prepare_bandwidth(dc, context);  	/* re-program planes for existing stream, in case we need to @@ -1182,10 +1220,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c  				context->stream_status[i].plane_count,  				context); /* use new pipe config in new context */  		} -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) -	if (dc->hwss.program_front_end_for_ctx) -		dc->hwss.program_front_end_for_ctx(dc, context); -#endif  	/* Program hardware */  	for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -1204,10 +1238,8 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c  	}  	/* Program all planes within new context*/ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	if (dc->hwss.program_front_end_for_ctx)  		dc->hwss.program_front_end_for_ctx(dc, context); -#endif  	for (i = 0; i < context->stream_count; i++) {  		const struct dc_link *link = context->streams[i]->link; @@ -1245,6 +1277,13 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c  	dc_enable_stereo(dc, context, dc_streams, context->stream_count); +	if (dc->optimize_seamless_boot_streams == 0) { +		/* Must wait for no flips to be pending before doing optimize bw */ +		wait_for_no_pipes_pending(dc, context); +		/* pplib is notified if disp_num changed */ +		dc->hwss.optimize_bandwidth(dc, context); +	} +  	for (i = 0; i < context->stream_count; i++)  		context->streams[i]->mode_changed = false; @@ -1279,12 +1318,18 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)  	return (result == DC_OK);  } +bool dc_is_hw_initialized(struct dc *dc) +{ +	struct dc_bios *dcb = dc->ctx->dc_bios; +	return dcb->funcs->is_accelerated_mode(dcb); +} +  bool dc_post_update_surfaces_to_stream(struct dc *dc)  {  	int i;  	struct dc_state *context = dc->current_state; -	if (!dc->optimized_required || dc->optimize_seamless_boot) +	if (!dc->optimized_required || dc->optimize_seamless_boot_streams > 0)  		return true;  	post_surface_trace(dc); @@ -1313,7 +1358,7 @@ struct dc_state *dc_create_state(struct dc *dc)  	 * initialize and obtain IP and SOC the base DML instance from DC is  	 * initially copied into every context  	 */ -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 +#ifdef CONFIG_DRM_AMD_DC_DCN  	memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));  #endif @@ -1486,11 +1531,6 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa  		elevate_update_type(&update_type, UPDATE_TYPE_MED);  	} -	if (u->plane_info->sdr_white_level != u->surface->sdr_white_level) { -		update_flags->bits.sdr_white_level = 1; -		elevate_update_type(&update_type, UPDATE_TYPE_MED); -	} -  	if (u->plane_info->dcc.enable != u->surface->dcc.enable  			|| u->plane_info->dcc.independent_64b_blks != u->surface->dcc.independent_64b_blks  			|| u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) { @@ -1508,7 +1548,6 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa  	}  	if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch -			|| u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch  			|| u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {  		update_flags->bits.plane_size_change = 1;  		elevate_update_type(&update_type, UPDATE_TYPE_MED); @@ -1547,7 +1586,10 @@ static enum surface_update_type get_scaling_info_update_type(  	if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width  			|| u->scaling_info->clip_rect.height != u->surface->clip_rect.height  			|| u->scaling_info->dst_rect.width != u->surface->dst_rect.width -			|| u->scaling_info->dst_rect.height != u->surface->dst_rect.height) { +			|| u->scaling_info->dst_rect.height != u->surface->dst_rect.height +			|| u->scaling_info->scaling_quality.integer_scaling != +				u->surface->scaling_quality.integer_scaling +			) {  		update_flags->bits.scaling_change = 1;  		if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width @@ -1563,7 +1605,7 @@ static enum surface_update_type get_scaling_info_update_type(  		update_flags->bits.scaling_change = 1;  		if (u->scaling_info->src_rect.width > u->surface->src_rect.width -				&& u->scaling_info->src_rect.height > u->surface->src_rect.height) +				|| u->scaling_info->src_rect.height > u->surface->src_rect.height)  			/* Making src rect bigger requires a bandwidth change */  			update_flags->bits.clock_change = 1;  	} @@ -1577,11 +1619,11 @@ static enum surface_update_type get_scaling_info_update_type(  		update_flags->bits.position_change = 1;  	if (update_flags->bits.clock_change -			|| update_flags->bits.bandwidth_change) +			|| update_flags->bits.bandwidth_change +			|| update_flags->bits.scaling_change)  		return UPDATE_TYPE_FULL; -	if (update_flags->bits.scaling_change -			|| update_flags->bits.position_change) +	if (update_flags->bits.position_change)  		return UPDATE_TYPE_MED;  	return UPDATE_TYPE_FAST; @@ -1635,6 +1677,12 @@ static enum surface_update_type det_surface_update(const struct dc *dc,  			update_flags->bits.gamma_change = 1;  	} +	if (u->hdr_mult.value) +		if (u->hdr_mult.value != u->surface->hdr_mult.value) { +			update_flags->bits.hdr_mult = 1; +			elevate_update_type(&overall_type, UPDATE_TYPE_MED); +		} +  	if (update_flags->bits.in_transfer_func_change) {  		type = UPDATE_TYPE_MED;  		elevate_update_type(&overall_type, type); @@ -1668,7 +1716,8 @@ static enum surface_update_type check_update_surfaces_for_stream(  		union stream_update_flags *su_flags = &stream_update->stream->update_flags;  		if ((stream_update->src.height != 0 && stream_update->src.width != 0) || -				(stream_update->dst.height != 0 && stream_update->dst.width != 0)) +			(stream_update->dst.height != 0 && stream_update->dst.width != 0) || +			stream_update->integer_scaling_update)  			su_flags->bits.scaling = 1;  		if (stream_update->out_transfer_func) @@ -1683,15 +1732,16 @@ static enum surface_update_type check_update_surfaces_for_stream(  		if (stream_update->gamut_remap)  			su_flags->bits.gamut_remap = 1; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  		if (stream_update->wb_update)  			su_flags->bits.wb_update = 1; -#endif  		if (su_flags->raw != 0)  			overall_type = UPDATE_TYPE_FULL;  		if (stream_update->output_csc_transform || stream_update->output_color_space)  			su_flags->bits.out_csc = 1; + +		if (stream_update->dsc_config) +			overall_type = UPDATE_TYPE_FULL;  	}  	for (i = 0 ; i < surface_count; i++) { @@ -1817,8 +1867,6 @@ static void copy_surface_update_to_plane(  				srf_update->plane_info->global_alpha_value;  		surface->dcc =  				srf_update->plane_info->dcc; -		surface->sdr_white_level = -				srf_update->plane_info->sdr_white_level;  		surface->layer_index =  				srf_update->plane_info->layer_index;  	} @@ -1851,7 +1899,6 @@ static void copy_surface_update_to_plane(  			sizeof(struct dc_transfer_func_distributed_points));  	} -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	if (srf_update->func_shaper &&  			(surface->in_shaper_func !=  			srf_update->func_shaper)) @@ -1864,13 +1911,16 @@ static void copy_surface_update_to_plane(  		memcpy(surface->lut3d_func, srf_update->lut3d_func,  		sizeof(*surface->lut3d_func)); +	if (srf_update->hdr_mult.value) +		surface->hdr_mult = +				srf_update->hdr_mult; +  	if (srf_update->blend_tf &&  			(surface->blend_tf !=  			srf_update->blend_tf))  		memcpy(surface->blend_tf, srf_update->blend_tf,  		sizeof(*surface->blend_tf)); -#endif  	if (srf_update->input_csc_color_matrix)  		surface->input_csc_color_matrix =  			*srf_update->input_csc_color_matrix; @@ -1883,8 +1933,10 @@ static void copy_surface_update_to_plane(  static void copy_stream_update_to_stream(struct dc *dc,  					 struct dc_state *context,  					 struct dc_stream_state *stream, -					 const struct dc_stream_update *update) +					 struct dc_stream_update *update)  { +	struct dc_context *dc_ctx = dc->ctx; +  	if (update == NULL || stream == NULL)  		return; @@ -1945,7 +1997,6 @@ static void copy_stream_update_to_stream(struct dc *dc,  	if (update->dither_option)  		stream->dither_option = *update->dither_option; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	/* update current stream with writeback info */  	if (update->wb_update) {  		int i; @@ -1956,23 +2007,32 @@ static void copy_stream_update_to_stream(struct dc *dc,  			stream->writeback_info[i] =  				update->wb_update->writeback_info[i];  	} -#endif -#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)  	if (update->dsc_config) {  		struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;  		uint32_t old_dsc_enabled = stream->timing.flags.DSC;  		uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&  				       update->dsc_config->num_slices_v != 0); -		stream->timing.dsc_cfg = *update->dsc_config; -		stream->timing.flags.DSC = enable_dsc; -		if (!dc->res_pool->funcs->validate_bandwidth(dc, context, -							     true)) { -			stream->timing.dsc_cfg = old_dsc_cfg; -			stream->timing.flags.DSC = old_dsc_enabled; +		/* Use temporarry context for validating new DSC config */ +		struct dc_state *dsc_validate_context = dc_create_state(dc); + +		if (dsc_validate_context) { +			dc_resource_state_copy_construct(dc->current_state, dsc_validate_context); + +			stream->timing.dsc_cfg = *update->dsc_config; +			stream->timing.flags.DSC = enable_dsc; +			if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) { +				stream->timing.dsc_cfg = old_dsc_cfg; +				stream->timing.flags.DSC = old_dsc_enabled; +				update->dsc_config = NULL; +			} + +			dc_release_state(dsc_validate_context); +		} else { +			DC_ERROR("Failed to allocate new validate context for DSC change\n"); +			update->dsc_config = NULL;  		}  	} -#endif  }  static void commit_planes_do_stream_update(struct dc *dc, @@ -1992,11 +2052,11 @@ static void commit_planes_do_stream_update(struct dc *dc,  			if (stream_update->periodic_interrupt0 &&  					dc->hwss.setup_periodic_interrupt) -				dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE0); +				dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE0);  			if (stream_update->periodic_interrupt1 &&  					dc->hwss.setup_periodic_interrupt) -				dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE1); +				dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE1);  			if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||  					stream_update->vrr_infopacket || @@ -2006,6 +2066,12 @@ static void commit_planes_do_stream_update(struct dc *dc,  				dc->hwss.update_info_frame(pipe_ctx);  			} +			if (stream_update->hdr_static_metadata && +					stream->use_dynamic_meta && +					dc->hwss.set_dmdata_attributes && +					pipe_ctx->stream->dmdata_address.quad_part != 0) +				dc->hwss.set_dmdata_attributes(pipe_ctx); +  			if (stream_update->gamut_remap)  				dc_stream_set_gamut_remap(dc, stream); @@ -2013,31 +2079,25 @@ static void commit_planes_do_stream_update(struct dc *dc,  				dc_stream_program_csc_matrix(dc, stream);  			if (stream_update->dither_option) { -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  				struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; -#endif  				resource_build_bit_depth_reduction_params(pipe_ctx->stream,  									&pipe_ctx->stream->bit_depth_params);  				pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,  						&stream->bit_depth_params,  						&stream->clamping); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  				while (odm_pipe) {  					odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,  							&stream->bit_depth_params,  							&stream->clamping);  					odm_pipe = odm_pipe->next_odm_pipe;  				} -#endif  			} -#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)  			if (stream_update->dsc_config && dc->hwss.pipe_control_lock_global) {  				dc->hwss.pipe_control_lock_global(dc, pipe_ctx, true);  				dp_update_dsc_config(pipe_ctx);  				dc->hwss.pipe_control_lock_global(dc, pipe_ctx, false);  			} -#endif  			/* Full fe update*/  			if (update_type == UPDATE_TYPE_FAST)  				continue; @@ -2053,7 +2113,7 @@ static void commit_planes_do_stream_update(struct dc *dc,  					dc->hwss.optimize_bandwidth(dc, dc->current_state);  				} else { -					if (!dc->optimize_seamless_boot) +					if (dc->optimize_seamless_boot_streams == 0)  						dc->hwss.prepare_bandwidth(dc, dc->current_state);  					core_link_enable_stream(dc->current_state, pipe_ctx); @@ -2094,7 +2154,7 @@ static void commit_planes_for_stream(struct dc *dc,  	int i, j;  	struct pipe_ctx *top_pipe_to_program = NULL; -	if (dc->optimize_seamless_boot && surface_count > 0) { +	if (dc->optimize_seamless_boot_streams > 0 && surface_count > 0) {  		/* Optimize seamless boot flag keeps clocks and watermarks high until  		 * first flip. After first flip, optimization is required to lower  		 * bandwidth. Important to note that it is expected UEFI will @@ -2103,12 +2163,14 @@ static void commit_planes_for_stream(struct dc *dc,  		 */  		if (stream->apply_seamless_boot_optimization) {  			stream->apply_seamless_boot_optimization = false; -			dc->optimize_seamless_boot = false; -			dc->optimized_required = true; +			dc->optimize_seamless_boot_streams--; + +			if (dc->optimize_seamless_boot_streams == 0) +				dc->optimized_required = true;  		}  	} -	if (update_type == UPDATE_TYPE_FULL && !dc->optimize_seamless_boot) { +	if (update_type == UPDATE_TYPE_FULL && dc->optimize_seamless_boot_streams == 0) {  		dc->hwss.prepare_bandwidth(dc, context);  		context_clock_trace(dc, context);  	} @@ -2124,15 +2186,12 @@ static void commit_planes_for_stream(struct dc *dc,  		 */  		if (dc->hwss.apply_ctx_for_surface)  			dc->hwss.apply_ctx_for_surface(dc, stream, 0, context); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  		if (dc->hwss.program_front_end_for_ctx)  			dc->hwss.program_front_end_for_ctx(dc, context); -#endif  		return;  	} -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	if (!IS_DIAG_DC(dc->ctx->dce_environment)) {  		for (i = 0; i < surface_count; i++) {  			struct dc_plane_state *plane_state = srf_updates[i].surface; @@ -2154,7 +2213,6 @@ static void commit_planes_for_stream(struct dc *dc,  			}  		}  	} -#endif  	// Update Type FULL, Surface updates  	for (j = 0; j < dc->res_pool->pipe_count; j++) { @@ -2175,7 +2233,6 @@ static void commit_planes_for_stream(struct dc *dc,  			if (update_type == UPDATE_TYPE_FAST)  				continue; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  			ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);  			if (dc->hwss.program_triplebuffer != NULL && @@ -2184,7 +2241,6 @@ static void commit_planes_for_stream(struct dc *dc,  				dc->hwss.program_triplebuffer(  					dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);  			} -#endif  			stream_status =  				stream_get_status(context, pipe_ctx->stream); @@ -2193,10 +2249,24 @@ static void commit_planes_for_stream(struct dc *dc,  					dc, pipe_ctx->stream, stream_status->plane_count, context);  		}  	} -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) -	if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) +	if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {  		dc->hwss.program_front_end_for_ctx(dc, context); +#ifdef CONFIG_DRM_AMD_DC_DCN +		if (dc->debug.validate_dml_output) { +			for (i = 0; i < dc->res_pool->pipe_count; i++) { +				struct pipe_ctx cur_pipe = context->res_ctx.pipe_ctx[i]; +				if (cur_pipe.stream == NULL) +					continue; + +				cur_pipe.plane_res.hubp->funcs->validate_dml_output( +						cur_pipe.plane_res.hubp, dc->ctx, +						&context->res_ctx.pipe_ctx[i].rq_regs, +						&context->res_ctx.pipe_ctx[i].dlg_regs, +						&context->res_ctx.pipe_ctx[i].ttu_regs); +			} +		}  #endif +	}  	// Update Type FAST, Surface updates  	if (update_type == UPDATE_TYPE_FAST) { @@ -2206,7 +2276,6 @@ static void commit_planes_for_stream(struct dc *dc,  		 */  		dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  		if (dc->hwss.set_flip_control_gsl)  			for (i = 0; i < surface_count; i++) {  				struct dc_plane_state *plane_state = srf_updates[i].surface; @@ -2225,7 +2294,6 @@ static void commit_planes_for_stream(struct dc *dc,  							plane_state->flip_immediate);  				}  			} -#endif  		/* Perform requested Updates */  		for (i = 0; i < surface_count; i++) {  			struct dc_plane_state *plane_state = srf_updates[i].surface; @@ -2238,7 +2306,6 @@ static void commit_planes_for_stream(struct dc *dc,  				if (pipe_ctx->plane_state != plane_state)  					continue; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  				/*program triple buffer after lock based on flip type*/  				if (dc->hwss.program_triplebuffer != NULL &&  					!dc->debug.disable_tri_buf) { @@ -2246,7 +2313,6 @@ static void commit_planes_for_stream(struct dc *dc,  					dc->hwss.program_triplebuffer(  						dc, pipe_ctx, plane_state->triplebuffer_flips);  				} -#endif  				if (srf_updates[i].flip_addr)  					dc->hwss.update_plane_addr(dc, pipe_ctx);  			} @@ -2407,14 +2473,15 @@ void dc_set_power_state(  	case DC_ACPI_CM_POWER_STATE_D0:  		dc_resource_state_construct(dc, dc->current_state); +		if (dc->ctx->dmub_srv) +			dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv); +  		dc->hwss.init_hw(dc); -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  		if (dc->hwss.init_sys_ctx != NULL &&  			dc->vm_pa_config.valid) {  			dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);  		} -#endif  		break;  	default: @@ -2494,6 +2561,17 @@ bool dc_submit_i2c(  		cmd);  } +bool dc_submit_i2c_oem( +		struct dc *dc, +		struct i2c_command *cmd) +{ +	struct ddc_service *ddc = dc->res_pool->oem_device; +	return dce_i2c_submit_command( +		dc->res_pool, +		ddc->ddc_pin, +		cmd); +} +  static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)  {  	if (dc_link->sink_count >= MAX_SINKS_PER_LINK) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c index b9227d5de3a3..502ed3c7959d 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c @@ -33,7 +33,6 @@  #include "core_status.h"  #include "core_types.h" -#include "hw_sequencer.h"  #include "resource.h" @@ -310,14 +309,13 @@ void context_timing_trace(  		struct resource_context *res_ctx)  {  	int i; -	struct dc  *core_dc = dc;  	int h_pos[MAX_PIPES] = {0}, v_pos[MAX_PIPES] = {0};  	struct crtc_position position; -	unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index; +	unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;  	DC_LOGGER_INIT(dc->ctx->logger); -	for (i = 0; i < core_dc->res_pool->pipe_count; i++) { +	for (i = 0; i < dc->res_pool->pipe_count; i++) {  		struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];  		/* get_position() returns CRTC vertical/horizontal counter  		 * hence not applicable for underlay pipe @@ -329,7 +327,7 @@ void context_timing_trace(  		h_pos[i] = position.horizontal_count;  		v_pos[i] = position.vertical_count;  	} -	for (i = 0; i < core_dc->res_pool->pipe_count; i++) { +	for (i = 0; i < dc->res_pool->pipe_count; i++) {  		struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];  		if (pipe_ctx->stream == NULL || pipe_ctx->pipe_idx == underlay_idx) @@ -347,7 +345,7 @@ void context_clock_trace(  		struct dc *dc,  		struct dc_state *context)  { -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  	DC_LOGGER_INIT(dc->ctx->logger);  	CLOCK_TRACE("Current: dispclk_khz:%d  max_dppclk_khz:%d  dcfclk_khz:%d\n"  			"dcfclk_deep_sleep_khz:%d  fclk_khz:%d  socclk_khz:%d\n", diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 12ba6fdf89b7..260c0b62d37d 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -45,6 +45,7 @@  #include "dpcd_defs.h"  #include "dmcu.h"  #include "hw/clk_mgr.h" +#include "../dce/dmub_psr.h"  #define DC_LOGGER_INIT(logger) @@ -74,7 +75,7 @@ enum {  /*******************************************************************************   * Private functions   ******************************************************************************/ -static void destruct(struct dc_link *link) +static void dc_link_destruct(struct dc_link *link)  {  	int i; @@ -372,7 +373,7 @@ bool dc_link_is_dp_sink_present(struct dc_link *link)  	if (GPIO_RESULT_OK != dal_ddc_open(  		ddc, GPIO_MODE_INPUT, GPIO_DDC_CONFIG_TYPE_MODE_I2C)) { -		dal_gpio_destroy_ddc(&ddc); +		dal_ddc_close(ddc);  		return present;  	} @@ -817,8 +818,8 @@ static bool dc_link_detect_helper(struct dc_link *link,  		}  		case SIGNAL_TYPE_EDP: { -			read_current_link_settings_on_detect(link);  			detect_edp_sink_caps(link); +			read_current_link_settings_on_detect(link);  			sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;  			sink_caps.signal = SIGNAL_TYPE_EDP;  			break; @@ -1244,7 +1245,7 @@ static enum transmitter translate_encoder_to_transmitter(  	}  } -static bool construct( +static bool dc_link_construct(  	struct dc_link *link,  	const struct link_init_data *init_params)  { @@ -1446,7 +1447,7 @@ struct dc_link *link_create(const struct link_init_data *init_params)  	if (NULL == link)  		goto alloc_fail; -	if (false == construct(link, init_params)) +	if (false == dc_link_construct(link, init_params))  		goto construct_fail;  	return link; @@ -1460,7 +1461,7 @@ alloc_fail:  void link_destroy(struct dc_link **link)  { -	destruct(*link); +	dc_link_destruct(*link);  	kfree(*link);  	*link = NULL;  } @@ -1495,10 +1496,7 @@ static enum dc_status enable_link_dp(  	bool skip_video_pattern;  	struct dc_link *link = stream->link;  	struct dc_link_settings link_settings = {0}; -	enum dp_panel_mode panel_mode; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	bool fec_enable; -#endif  	int i;  	bool apply_seamless_boot_optimization = false; @@ -1514,15 +1512,6 @@ static enum dc_status enable_link_dp(  	decide_link_settings(stream, &link_settings);  	if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) { -		/* If link settings are different than current and link already enabled -		 * then need to disable before programming to new rate. -		 */ -		if (link->link_status.link_active && -			(link->cur_link_settings.lane_count != link_settings.lane_count || -			 link->cur_link_settings.link_rate != link_settings.link_rate)) { -			dp_disable_link_phy(link, pipe_ctx->stream->signal); -		} -  		/*in case it is not on*/  		link->dc->hwss.edp_power_control(link, true);  		link->dc->hwss.edp_wait_for_hpd_ready(link, true); @@ -1533,50 +1522,29 @@ static enum dc_status enable_link_dp(  	if (state->clk_mgr && !apply_seamless_boot_optimization)  		state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false); -	dp_enable_link_phy( -		link, -		pipe_ctx->stream->signal, -		pipe_ctx->clock_source->id, -		&link_settings); - -	if (stream->sink_patches.dppowerup_delay > 0) { -		int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay; - -		msleep(delay_dp_power_up_in_ms); -	} - -	panel_mode = dp_get_panel_mode(link); -	dp_set_panel_mode(link, panel_mode); -  	skip_video_pattern = true;  	if (link_settings.link_rate == LINK_RATE_LOW)  			skip_video_pattern = false; -	if (link->aux_access_disabled) { -		dc_link_dp_perform_link_training_skip_aux(link, &link_settings); - -		link->cur_link_settings = link_settings; -		status = DC_OK; -	} else if (perform_link_training_with_retries( -			link, +	if (perform_link_training_with_retries(  			&link_settings,  			skip_video_pattern, -			LINK_TRAINING_ATTEMPTS)) { +			LINK_TRAINING_ATTEMPTS, +			pipe_ctx, +			pipe_ctx->stream->signal)) {  		link->cur_link_settings = link_settings;  		status = DC_OK;  	}  	else  		status = DC_FAIL_DP_LINK_TRAINING; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	if (link->preferred_training_settings.fec_enable != NULL)  		fec_enable = *link->preferred_training_settings.fec_enable;  	else  		fec_enable = true;  	dp_set_fec_enable(link, fec_enable); -#endif  	return status;  } @@ -2063,6 +2031,45 @@ static void write_i2c_redriver_setting(  		ASSERT(i2c_success);  } +static void disable_link(struct dc_link *link, enum signal_type signal) +{ +	/* +	 * TODO: implement call for dp_set_hw_test_pattern +	 * it is needed for compliance testing +	 */ + +	/* Here we need to specify that encoder output settings +	 * need to be calculated as for the set mode, +	 * it will lead to querying dynamic link capabilities +	 * which should be done before enable output +	 */ + +	if (dc_is_dp_signal(signal)) { +		/* SST DP, eDP */ +		if (dc_is_dp_sst_signal(signal)) +			dp_disable_link_phy(link, signal); +		else +			dp_disable_link_phy_mst(link, signal); + +		if (dc_is_dp_sst_signal(signal) || +				link->mst_stream_alloc_table.stream_count == 0) { +			dp_set_fec_enable(link, false); +			dp_set_fec_ready(link, false); +		} +	} else { +		if (signal != SIGNAL_TYPE_VIRTUAL) +			link->link_enc->funcs->disable_output(link->link_enc, signal); +	} + +	if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { +		/* MST disable link only when no stream use the link */ +		if (link->mst_stream_alloc_table.stream_count <= 0) +			link->link_status.link_active = false; +	} else { +		link->link_status.link_active = false; +	} +} +  static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)  {  	struct dc_stream_state *stream = pipe_ctx->stream; @@ -2147,6 +2154,19 @@ static enum dc_status enable_link(  		struct pipe_ctx *pipe_ctx)  {  	enum dc_status status = DC_ERROR_UNEXPECTED; +	struct dc_stream_state *stream = pipe_ctx->stream; +	struct dc_link *link = stream->link; + +	/* There's some scenarios where driver is unloaded with display +	 * still enabled. When driver is reloaded, it may cause a display +	 * to not light up if there is a mismatch between old and new +	 * link settings. Need to call disable first before enabling at +	 * new link settings. +	 */ +	if (link->link_status.link_active) { +		disable_link(link, pipe_ctx->stream->signal); +	} +  	switch (pipe_ctx->stream->signal) {  	case SIGNAL_TYPE_DISPLAY_PORT:  		status = enable_link_dp(state, pipe_ctx); @@ -2181,46 +2201,6 @@ static enum dc_status enable_link(  	return status;  } -static void disable_link(struct dc_link *link, enum signal_type signal) -{ -	/* -	 * TODO: implement call for dp_set_hw_test_pattern -	 * it is needed for compliance testing -	 */ - -	/* here we need to specify that encoder output settings -	 * need to be calculated as for the set mode, -	 * it will lead to querying dynamic link capabilities -	 * which should be done before enable output */ - -	if (dc_is_dp_signal(signal)) { -		/* SST DP, eDP */ -		if (dc_is_dp_sst_signal(signal)) -			dp_disable_link_phy(link, signal); -		else -			dp_disable_link_phy_mst(link, signal); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT - -		if (dc_is_dp_sst_signal(signal) || -				link->mst_stream_alloc_table.stream_count == 0) { -			dp_set_fec_enable(link, false); -			dp_set_fec_ready(link, false); -		} -#endif -	} else { -		if (signal != SIGNAL_TYPE_VIRTUAL) -			link->link_enc->funcs->disable_output(link->link_enc, signal); -	} - -	if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { -		/* MST disable link only when no stream use the link */ -		if (link->mst_stream_alloc_table.stream_count <= 0) -			link->link_status.link_active = false; -	} else { -		link->link_status.link_active = false; -	} -} -  static uint32_t get_timing_pixel_clock_100hz(const struct dc_crtc_timing *timing)  { @@ -2357,9 +2337,9 @@ bool dc_link_set_backlight_level(const struct dc_link *link,  		uint32_t backlight_pwm_u16_16,  		uint32_t frame_ramp)  { -	struct dc  *core_dc = link->ctx->dc; -	struct abm *abm = core_dc->res_pool->abm; -	struct dmcu *dmcu = core_dc->res_pool->dmcu; +	struct dc  *dc = link->ctx->dc; +	struct abm *abm = dc->res_pool->abm; +	struct dmcu *dmcu = dc->res_pool->dmcu;  	unsigned int controller_id = 0;  	bool use_smooth_brightness = true;  	int i; @@ -2377,22 +2357,22 @@ bool dc_link_set_backlight_level(const struct dc_link *link,  	if (dc_is_embedded_signal(link->connector_signal)) {  		for (i = 0; i < MAX_PIPES; i++) { -			if (core_dc->current_state->res_ctx.pipe_ctx[i].stream) { -				if (core_dc->current_state->res_ctx. +			if (dc->current_state->res_ctx.pipe_ctx[i].stream) { +				if (dc->current_state->res_ctx.  						pipe_ctx[i].stream->link  						== link) {  					/* DMCU -1 for all controller id values,  					 * therefore +1 here  					 */  					controller_id = -						core_dc->current_state-> +						dc->current_state->  						res_ctx.pipe_ctx[i].stream_res.tg->inst +  						1;  					/* Disable brightness ramping when the display is blanked  					 * as it can hang the DMCU  					 */ -					if (core_dc->current_state->res_ctx.pipe_ctx[i].plane_state == NULL) +					if (dc->current_state->res_ctx.pipe_ctx[i].plane_state == NULL)  						frame_ramp = 0;  				}  			} @@ -2410,8 +2390,8 @@ bool dc_link_set_backlight_level(const struct dc_link *link,  bool dc_link_set_abm_disable(const struct dc_link *link)  { -	struct dc  *core_dc = link->ctx->dc; -	struct abm *abm = core_dc->res_pool->abm; +	struct dc  *dc = link->ctx->dc; +	struct abm *abm = dc->res_pool->abm;  	if ((abm == NULL) || (abm->funcs->set_backlight_level_pwm == NULL))  		return false; @@ -2423,12 +2403,13 @@ bool dc_link_set_abm_disable(const struct dc_link *link)  bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool wait)  { -	struct dc  *core_dc = link->ctx->dc; -	struct dmcu *dmcu = core_dc->res_pool->dmcu; - +	struct dc  *dc = link->ctx->dc; +	struct dmcu *dmcu = dc->res_pool->dmcu; +	struct dmub_psr *psr = dc->res_pool->psr; - -	if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_feature_enabled) +	if ((psr != NULL) && link->psr_feature_enabled) +		psr->funcs->set_psr_enable(psr, allow_active); +	else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_feature_enabled)  		dmcu->funcs->set_psr_enable(dmcu, allow_active, wait);  	link->psr_allow_active = allow_active; @@ -2438,10 +2419,13 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool  bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state)  { -	struct dc  *core_dc = link->ctx->dc; -	struct dmcu *dmcu = core_dc->res_pool->dmcu; +	struct dc  *dc = link->ctx->dc; +	struct dmcu *dmcu = dc->res_pool->dmcu; +	struct dmub_psr *psr = dc->res_pool->psr; -	if (dmcu != NULL && link->psr_feature_enabled) +	if (psr != NULL && link->psr_feature_enabled) +		psr->funcs->get_psr_state(psr_state); +	else if (dmcu != NULL && link->psr_feature_enabled)  		dmcu->funcs->get_psr_state(dmcu, psr_state);  	return true; @@ -2486,8 +2470,9 @@ bool dc_link_setup_psr(struct dc_link *link,  		const struct dc_stream_state *stream, struct psr_config *psr_config,  		struct psr_context *psr_context)  { -	struct dc *core_dc; +	struct dc *dc;  	struct dmcu *dmcu; +	struct dmub_psr *psr;  	int i;  	/* updateSinkPsrDpcdConfig*/  	union dpcd_psr_configuration psr_configuration; @@ -2497,10 +2482,11 @@ bool dc_link_setup_psr(struct dc_link *link,  	if (!link)  		return false; -	core_dc = link->ctx->dc; -	dmcu = core_dc->res_pool->dmcu; +	dc = link->ctx->dc; +	dmcu = dc->res_pool->dmcu; +	psr = dc->res_pool->psr; -	if (!dmcu) +	if (!dmcu && !psr)  		return false; @@ -2537,13 +2523,13 @@ bool dc_link_setup_psr(struct dc_link *link,  	psr_context->engineId = link->link_enc->preferred_engine;  	for (i = 0; i < MAX_PIPES; i++) { -		if (core_dc->current_state->res_ctx.pipe_ctx[i].stream +		if (dc->current_state->res_ctx.pipe_ctx[i].stream  				== stream) {  			/* dmcu -1 for all controller id values,  			 * therefore +1 here  			 */  			psr_context->controllerId = -				core_dc->current_state->res_ctx. +				dc->current_state->res_ctx.  				pipe_ctx[i].stream_res.tg->inst + 1;  			break;  		} @@ -2556,7 +2542,7 @@ bool dc_link_setup_psr(struct dc_link *link,  		transmitter_to_phy_id(link->link_enc->transmitter);  	psr_context->crtcTimingVerticalTotal = stream->timing.v_total; -	psr_context->vsyncRateHz = div64_u64(div64_u64((stream-> +	psr_context->vsync_rate_hz = div64_u64(div64_u64((stream->  					timing.pix_clk_100hz * 100),  					stream->timing.v_total),  					stream->timing.h_total); @@ -2586,7 +2572,7 @@ bool dc_link_setup_psr(struct dc_link *link,  	psr_context->psr_level.u32all = 0; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  	/*skip power down the single pipe since it blocks the cstate*/  	if (ASICREV_IS_RAVEN(link->ctx->asic_id.hw_internal_rev))  		psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true; @@ -2609,7 +2595,10 @@ bool dc_link_setup_psr(struct dc_link *link,  	 */  	psr_context->frame_delay = 0; -	link->psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context); +	if (psr) +		link->psr_feature_enabled = psr->funcs->setup_psr(psr, link, psr_context); +	else +		link->psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context);  	/* psr_enabled == 0 indicates setup_psr did not succeed, but this  	 * should not happen since firmware should be running at this point @@ -2644,28 +2633,13 @@ static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream)  	return dc_fixpt_div_int(mbytes_per_sec, 54);  } -static int get_color_depth(enum dc_color_depth color_depth) -{ -	switch (color_depth) { -	case COLOR_DEPTH_666: return 6; -	case COLOR_DEPTH_888: return 8; -	case COLOR_DEPTH_101010: return 10; -	case COLOR_DEPTH_121212: return 12; -	case COLOR_DEPTH_141414: return 14; -	case COLOR_DEPTH_161616: return 16; -	default: return 0; -	} -} -  static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)  { -	uint32_t bpc;  	uint64_t kbps;  	struct fixed31_32 peak_kbps;  	uint32_t numerator;  	uint32_t denominator; -	bpc = get_color_depth(pipe_ctx->stream_res.pix_clk_params.color_depth);  	kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing);  	/* @@ -2899,6 +2873,39 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)  	return DC_OK;  } + +enum dc_status dc_link_reallocate_mst_payload(struct dc_link *link) +{ +	int i; +	struct pipe_ctx *pipe_ctx; + +	// Clear all of MST payload then reallocate +	for (i = 0; i < MAX_PIPES; i++) { +		pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; +		if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link && +				pipe_ctx->stream->dpms_off == false && +				pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { +			deallocate_mst_payload(pipe_ctx); +		} +	} + +	for (i = 0; i < MAX_PIPES; i++) { +		pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; +		if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link && +				pipe_ctx->stream->dpms_off == false && +				pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { +			/* enable/disable PHY will clear connection between BE and FE +			 * need to restore it. +			 */ +			link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc, +									pipe_ctx->stream_res.stream_enc->id, true); +			dc_link_allocate_mst_payload(pipe_ctx); +		} +	} + +	return DC_OK; +} +  #if defined(CONFIG_DRM_AMD_DC_HDCP)  static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)  { @@ -2922,12 +2929,12 @@ void core_link_enable_stream(  		struct dc_state *state,  		struct pipe_ctx *pipe_ctx)  { -	struct dc *core_dc = pipe_ctx->stream->ctx->dc; +	struct dc *dc = pipe_ctx->stream->ctx->dc;  	struct dc_stream_state *stream = pipe_ctx->stream;  	enum dc_status status;  	DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); -	if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment) && +	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) &&  			dc_is_virtual_signal(pipe_ctx->stream->signal))  		return; @@ -2946,6 +2953,7 @@ void core_link_enable_stream(  			pipe_ctx->stream_res.stream_enc,  			&stream->timing,  			stream->output_color_space, +			stream->use_vsc_sdp_for_colorimetry,  			stream->link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);  	if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) @@ -2969,14 +2977,14 @@ void core_link_enable_stream(  			pipe_ctx->stream_res.stream_enc,  			&stream->timing); -	if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { +	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {  		bool apply_edp_fast_boot_optimization =  			pipe_ctx->stream->apply_edp_fast_boot_optimization;  		pipe_ctx->stream->apply_edp_fast_boot_optimization = false;  		resource_build_info_frame(pipe_ctx); -		core_dc->hwss.update_info_frame(pipe_ctx); +		dc->hwss.update_info_frame(pipe_ctx);  		/* Do not touch link on seamless boot optimization. */  		if (pipe_ctx->stream->apply_seamless_boot_optimization) { @@ -3019,7 +3027,7 @@ void core_link_enable_stream(  			}  		} -		core_dc->hwss.enable_audio_stream(pipe_ctx); +		dc->hwss.enable_audio_stream(pipe_ctx);  		/* turn off otg test pattern if enable */  		if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) @@ -3027,28 +3035,24 @@ void core_link_enable_stream(  					CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,  					COLOR_DEPTH_UNDEFINED); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  		if (pipe_ctx->stream->timing.flags.DSC) {  			if (dc_is_dp_signal(pipe_ctx->stream->signal) ||  					dc_is_virtual_signal(pipe_ctx->stream->signal))  				dp_set_dsc_enable(pipe_ctx, true);  		} -#endif -		core_dc->hwss.enable_stream(pipe_ctx); +		dc->hwss.enable_stream(pipe_ctx); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  		/* Set DPS PPS SDP (AKA "info frames") */  		if (pipe_ctx->stream->timing.flags.DSC) {  			if (dc_is_dp_signal(pipe_ctx->stream->signal) ||  					dc_is_virtual_signal(pipe_ctx->stream->signal))  				dp_set_dsc_pps_sdp(pipe_ctx, true);  		} -#endif  		if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)  			dc_link_allocate_mst_payload(pipe_ctx); -		core_dc->hwss.unblank_stream(pipe_ctx, +		dc->hwss.unblank_stream(pipe_ctx,  			&pipe_ctx->stream->link->cur_link_settings);  		if (dc_is_dp_signal(pipe_ctx->stream->signal)) @@ -3056,24 +3060,21 @@ void core_link_enable_stream(  #if defined(CONFIG_DRM_AMD_DC_HDCP)  		update_psp_stream_config(pipe_ctx, false);  #endif -	} -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT -	else { // if (IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) +	} else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))  		if (dc_is_dp_signal(pipe_ctx->stream->signal) ||  				dc_is_virtual_signal(pipe_ctx->stream->signal))  			dp_set_dsc_enable(pipe_ctx, true);  	} -#endif  }  void core_link_disable_stream(struct pipe_ctx *pipe_ctx)  { -	struct dc  *core_dc = pipe_ctx->stream->ctx->dc; +	struct dc  *dc = pipe_ctx->stream->ctx->dc;  	struct dc_stream_state *stream = pipe_ctx->stream;  	struct dc_link *link = stream->sink->link; -	if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment) && +	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) &&  			dc_is_virtual_signal(pipe_ctx->stream->signal))  		return; @@ -3081,7 +3082,7 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)  	update_psp_stream_config(pipe_ctx, true);  #endif -	core_dc->hwss.blank_stream(pipe_ctx); +	dc->hwss.blank_stream(pipe_ctx);  	if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)  		deallocate_mst_payload(pipe_ctx); @@ -3110,25 +3111,23 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)  			write_i2c_redriver_setting(pipe_ctx, false);  		}  	} -	core_dc->hwss.disable_stream(pipe_ctx); +	dc->hwss.disable_stream(pipe_ctx);  	disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	if (pipe_ctx->stream->timing.flags.DSC) {  		if (dc_is_dp_signal(pipe_ctx->stream->signal))  			dp_set_dsc_enable(pipe_ctx, false);  	} -#endif  }  void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)  { -	struct dc  *core_dc = pipe_ctx->stream->ctx->dc; +	struct dc  *dc = pipe_ctx->stream->ctx->dc;  	if (!dc_is_hdmi_signal(pipe_ctx->stream->signal))  		return; -	core_dc->hwss.set_avmute(pipe_ctx, enable); +	dc->hwss.set_avmute(pipe_ctx, enable);  }  /** @@ -3186,13 +3185,11 @@ uint32_t dc_bandwidth_in_kbps_from_timing(  	uint32_t bits_per_channel = 0;  	uint32_t kbps; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	if (timing->flags.DSC) {  		kbps = (timing->pix_clk_100hz * timing->dsc_cfg.bits_per_pixel);  		kbps = kbps / 160 + ((kbps % 160) ? 1 : 0);  		return kbps;  	} -#endif  	switch (timing->display_color_depth) {  	case COLOR_DEPTH_666: @@ -3345,6 +3342,7 @@ void dc_link_disable_hpd(const struct dc_link *link)  void dc_link_set_test_pattern(struct dc_link *link,  			      enum dp_test_pattern test_pattern, +			      enum dp_test_pattern_color_space test_pattern_color_space,  			      const struct link_training_settings *p_link_settings,  			      const unsigned char *p_custom_pattern,  			      unsigned int cust_pattern_size) @@ -3353,6 +3351,7 @@ void dc_link_set_test_pattern(struct dc_link *link,  		dc_link_dp_set_test_pattern(  			link,  			test_pattern, +			test_pattern_color_space,  			p_link_settings,  			p_custom_pattern,  			cust_pattern_size); @@ -3368,7 +3367,6 @@ uint32_t dc_link_bandwidth_kbps(  	link_bw_kbps *= 8;   /* 8 bits per byte*/  	link_bw_kbps *= link_setting->lane_count; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	if (link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) {  		/* Account for FEC overhead.  		 * We have to do it based on caps, @@ -3393,7 +3391,6 @@ uint32_t dc_link_bandwidth_kbps(  		link_bw_kbps = mul_u64_u32_shr(BIT_ULL(32) * 970LL / 1000,  					       link_bw_kbps, 32);  	} -#endif  	return link_bw_kbps; @@ -3407,3 +3404,10 @@ const struct dc_link_settings *dc_link_get_link_cap(  		return &link->preferred_link_setting;  	return &link->verified_link_cap;  } + +void dc_link_overwrite_extended_receiver_cap( +		struct dc_link *link) +{ +	dp_overwrite_extended_receiver_cap(link); +} + diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c index 7f904d55c1bc..a49c10d5df26 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c @@ -187,7 +187,7 @@ void dal_ddc_i2c_payloads_add(  } -static void construct( +static void ddc_service_construct(  	struct ddc_service *ddc_service,  	struct ddc_service_init_data *init_data)  { @@ -206,7 +206,10 @@ static void construct(  		ddc_service->ddc_pin = NULL;  	} else {  		hw_info.ddc_channel = i2c_info.i2c_line; -		hw_info.hw_supported = i2c_info.i2c_hw_assist; +		if (ddc_service->link != NULL) +			hw_info.hw_supported = i2c_info.i2c_hw_assist; +		else +			hw_info.hw_supported = false;  		ddc_service->ddc_pin = dal_gpio_create_ddc(  			gpio_service, @@ -236,11 +239,11 @@ struct ddc_service *dal_ddc_service_create(  	if (!ddc_service)  		return NULL; -	construct(ddc_service, init_data); +	ddc_service_construct(ddc_service, init_data);  	return ddc_service;  } -static void destruct(struct ddc_service *ddc) +static void ddc_service_destruct(struct ddc_service *ddc)  {  	if (ddc->ddc_pin)  		dal_gpio_destroy_ddc(&ddc->ddc_pin); @@ -252,7 +255,7 @@ void dal_ddc_service_destroy(struct ddc_service **ddc)  		BREAK_TO_DEBUGGER();  		return;  	} -	destruct(*ddc); +	ddc_service_destruct(*ddc);  	kfree(*ddc);  	*ddc = NULL;  } @@ -586,8 +589,8 @@ bool dal_ddc_service_query_ddc_data(  bool dal_ddc_submit_aux_command(struct ddc_service *ddc,  		struct aux_payload *payload)  { -	uint8_t retrieved = 0; -	bool ret = 0; +	uint32_t retrieved = 0; +	bool ret = false;  	if (!ddc)  		return false; @@ -647,17 +650,16 @@ bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc,  } -enum dc_status dc_link_aux_configure_timeout(struct ddc_service *ddc, +uint32_t dc_link_aux_configure_timeout(struct ddc_service *ddc,  		uint32_t timeout)  { -	enum dc_status status = DC_OK; +	uint32_t prev_timeout = 0;  	struct ddc *ddc_pin = ddc->ddc_pin; -	if (ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout == NULL) -		return DC_ERROR_UNEXPECTED; -	if (!ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout(ddc, timeout)) -		status = DC_ERROR_UNEXPECTED; -	return status; +	if (ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout) +		prev_timeout = +				ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout(ddc, timeout); +	return prev_timeout;  }  /*test only function*/ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 0f59b68aa4c2..6ab298c65247 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -4,12 +4,8 @@  #include "dc_link_dp.h"  #include "dm_helpers.h"  #include "opp.h" -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  #include "dsc.h" -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  #include "resource.h" -#endif  #include "inc/core_types.h"  #include "link_hwss.h" @@ -21,6 +17,9 @@  #define DC_LOGGER \  	link->ctx->logger + +#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE   0x50 +  /* maximum pre emphasis level allowed for each voltage swing level*/  static const enum dc_pre_emphasis voltage_swing_to_pre_emphasis[] = {  		PRE_EMPHASIS_LEVEL3, @@ -221,19 +220,31 @@ static enum dpcd_training_patterns  	return dpcd_tr_pattern;  } +static inline bool is_repeater(struct dc_link *link, uint32_t offset) +{ +	return (!link->is_lttpr_mode_transparent && offset != 0); +} +  static void dpcd_set_lt_pattern_and_lane_settings(  	struct dc_link *link,  	const struct link_training_settings *lt_settings, -	enum dc_dp_training_pattern pattern) +	enum dc_dp_training_pattern pattern, +	uint32_t offset)  {  	union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = { { {0} } }; -	const uint32_t dpcd_base_lt_offset = -	DP_TRAINING_PATTERN_SET; + +	uint32_t dpcd_base_lt_offset; +  	uint8_t dpcd_lt_buffer[5] = {0};  	union dpcd_training_pattern dpcd_pattern = { {0} };  	uint32_t lane;  	uint32_t size_in_bytes;  	bool edp_workaround = false; /* TODO link_prop.INTERNAL */ +	dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET; + +	if (is_repeater(link, offset)) +		dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + +			((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));  	/*****************************************************************  	* DpcdAddress_TrainingPatternSet @@ -241,14 +252,21 @@ static void dpcd_set_lt_pattern_and_lane_settings(  	dpcd_pattern.v1_4.TRAINING_PATTERN_SET =  		dc_dp_training_pattern_to_dpcd_training_pattern(link, pattern); -	dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - dpcd_base_lt_offset] +	dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET]  		= dpcd_pattern.raw; -	DC_LOG_HW_LINK_TRAINING("%s\n %x pattern = %x\n", -		__func__, -		DP_TRAINING_PATTERN_SET, -		dpcd_pattern.v1_4.TRAINING_PATTERN_SET); - +	if (is_repeater(link, offset)) { +		DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n", +			__func__, +			offset, +			dpcd_base_lt_offset, +			dpcd_pattern.v1_4.TRAINING_PATTERN_SET); +	} else { +		DC_LOG_HW_LINK_TRAINING("%s\n 0x%X pattern = %x\n", +			__func__, +			dpcd_base_lt_offset, +			dpcd_pattern.v1_4.TRAINING_PATTERN_SET); +	}  	/*****************************************************************  	* DpcdAddress_Lane0Set -> DpcdAddress_Lane3Set  	*****************************************************************/ @@ -268,24 +286,35 @@ static void dpcd_set_lt_pattern_and_lane_settings(  		PRE_EMPHASIS_MAX_LEVEL ? 1 : 0);  	} -	/* concatinate everything into one buffer*/ +	/* concatenate everything into one buffer*/  	size_in_bytes = lt_settings->link_settings.lane_count * sizeof(dpcd_lane[0]);  	 // 0x00103 - 0x00102  	memmove( -		&dpcd_lt_buffer[DP_TRAINING_LANE0_SET - dpcd_base_lt_offset], +		&dpcd_lt_buffer[DP_TRAINING_LANE0_SET - DP_TRAINING_PATTERN_SET],  		dpcd_lane,  		size_in_bytes); -	DC_LOG_HW_LINK_TRAINING("%s:\n %x VS set = %x  PE set = %x max VS Reached = %x  max PE Reached = %x\n", -		__func__, -		DP_TRAINING_LANE0_SET, -		dpcd_lane[0].bits.VOLTAGE_SWING_SET, -		dpcd_lane[0].bits.PRE_EMPHASIS_SET, -		dpcd_lane[0].bits.MAX_SWING_REACHED, -		dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED); - +	if (is_repeater(link, offset)) { +		DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" +				" 0x%X VS set = %x PE set = %x max VS Reached = %x  max PE Reached = %x\n", +			__func__, +			offset, +			dpcd_base_lt_offset, +			dpcd_lane[0].bits.VOLTAGE_SWING_SET, +			dpcd_lane[0].bits.PRE_EMPHASIS_SET, +			dpcd_lane[0].bits.MAX_SWING_REACHED, +			dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED); +	} else { +		DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X VS set = %x  PE set = %x max VS Reached = %x  max PE Reached = %x\n", +			__func__, +			dpcd_base_lt_offset, +			dpcd_lane[0].bits.VOLTAGE_SWING_SET, +			dpcd_lane[0].bits.PRE_EMPHASIS_SET, +			dpcd_lane[0].bits.MAX_SWING_REACHED, +			dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED); +	}  	if (edp_workaround) {  		/* for eDP write in 2 parts because the 5-byte burst is  		* causing issues on some eDP panels (EPR#366724) @@ -495,8 +524,12 @@ static void get_lane_status_and_drive_settings(  	const struct link_training_settings *link_training_setting,  	union lane_status *ln_status,  	union lane_align_status_updated *ln_status_updated, -	struct link_training_settings *req_settings) +	struct link_training_settings *req_settings, +	uint32_t offset)  { +	unsigned int lane01_status_address = DP_LANE0_1_STATUS; +	uint8_t lane_adjust_offset = 4; +	unsigned int lane01_adjust_address;  	uint8_t dpcd_buf[6] = {0};  	union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };  	struct link_training_settings request_settings = { {0} }; @@ -504,9 +537,16 @@ static void get_lane_status_and_drive_settings(  	memset(req_settings, '\0', sizeof(struct link_training_settings)); +	if (is_repeater(link, offset)) { +		lane01_status_address = +				DP_LANE0_1_STATUS_PHY_REPEATER1 + +				((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); +		lane_adjust_offset = 3; +	} +  	core_link_read_dpcd(  		link, -		DP_LANE0_1_STATUS, +		lane01_status_address,  		(uint8_t *)(dpcd_buf),  		sizeof(dpcd_buf)); @@ -517,22 +557,47 @@ static void get_lane_status_and_drive_settings(  		ln_status[lane].raw =  			get_nibble_at_index(&dpcd_buf[0], lane);  		dpcd_lane_adjust[lane].raw = -			get_nibble_at_index(&dpcd_buf[4], lane); +			get_nibble_at_index(&dpcd_buf[lane_adjust_offset], lane);  	}  	ln_status_updated->raw = dpcd_buf[2]; -	DC_LOG_HW_LINK_TRAINING("%s:\n%x Lane01Status = %x\n %x Lane23Status = %x\n ", -		__func__, -		DP_LANE0_1_STATUS, dpcd_buf[0], -		DP_LANE2_3_STATUS, dpcd_buf[1]); - -	DC_LOG_HW_LINK_TRAINING("%s:\n %x Lane01AdjustRequest = %x\n %x Lane23AdjustRequest = %x\n", -		__func__, -		DP_ADJUST_REQUEST_LANE0_1, -		dpcd_buf[4], -		DP_ADJUST_REQUEST_LANE2_3, -		dpcd_buf[5]); +	if (is_repeater(link, offset)) { +		DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" +				" 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ", +			__func__, +			offset, +			lane01_status_address, dpcd_buf[0], +			lane01_status_address + 1, dpcd_buf[1]); +	} else { +		DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ", +			__func__, +			lane01_status_address, dpcd_buf[0], +			lane01_status_address + 1, dpcd_buf[1]); +	} +	lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1; + +	if (is_repeater(link, offset)) +		lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1_PHY_REPEATER1 + +				((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + +	if (is_repeater(link, offset)) { +		DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" +				" 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n", +					__func__, +					offset, +					lane01_adjust_address, +					dpcd_buf[lane_adjust_offset], +					lane01_adjust_address + 1, +					dpcd_buf[lane_adjust_offset + 1]); +	} else { +		DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n", +			__func__, +			lane01_adjust_address, +			dpcd_buf[lane_adjust_offset], +			lane01_adjust_address + 1, +			dpcd_buf[lane_adjust_offset + 1]); +	}  	/*copy to req_settings*/  	request_settings.link_settings.lane_count = @@ -571,10 +636,18 @@ static void get_lane_status_and_drive_settings(  static void dpcd_set_lane_settings(  	struct dc_link *link, -	const struct link_training_settings *link_training_setting) +	const struct link_training_settings *link_training_setting, +	uint32_t offset)  {  	union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}};  	uint32_t lane; +	unsigned int lane0_set_address; + +	lane0_set_address = DP_TRAINING_LANE0_SET; + +	if (is_repeater(link, offset)) +		lane0_set_address = DP_TRAINING_LANE0_SET_PHY_REPEATER1 + +		((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));  	for (lane = 0; lane <  		(uint32_t)(link_training_setting-> @@ -597,7 +670,7 @@ static void dpcd_set_lane_settings(  	}  	core_link_write_dpcd(link, -		DP_TRAINING_LANE0_SET, +		lane0_set_address,  		(uint8_t *)(dpcd_lane),  		link_training_setting->link_settings.lane_count); @@ -620,14 +693,26 @@ static void dpcd_set_lane_settings(  	}  	*/ -	DC_LOG_HW_LINK_TRAINING("%s\n %x VS set = %x  PE set = %x max VS Reached = %x  max PE Reached = %x\n", -		__func__, -		DP_TRAINING_LANE0_SET, -		dpcd_lane[0].bits.VOLTAGE_SWING_SET, -		dpcd_lane[0].bits.PRE_EMPHASIS_SET, -		dpcd_lane[0].bits.MAX_SWING_REACHED, -		dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED); +	if (is_repeater(link, offset)) { +		DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n" +				" 0x%X VS set = %x  PE set = %x max VS Reached = %x  max PE Reached = %x\n", +			__func__, +			offset, +			lane0_set_address, +			dpcd_lane[0].bits.VOLTAGE_SWING_SET, +			dpcd_lane[0].bits.PRE_EMPHASIS_SET, +			dpcd_lane[0].bits.MAX_SWING_REACHED, +			dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED); +	} else { +		DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x  PE set = %x max VS Reached = %x  max PE Reached = %x\n", +			__func__, +			lane0_set_address, +			dpcd_lane[0].bits.VOLTAGE_SWING_SET, +			dpcd_lane[0].bits.PRE_EMPHASIS_SET, +			dpcd_lane[0].bits.MAX_SWING_REACHED, +			dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED); +	}  	link->cur_lane_setting = link_training_setting->lane_settings[0];  } @@ -647,17 +732,6 @@ static bool is_max_vs_reached(  } -void dc_link_dp_set_drive_settings( -	struct dc_link *link, -	struct link_training_settings *lt_settings) -{ -	/* program ASIC PHY settings*/ -	dp_set_hw_lane_settings(link, lt_settings); - -	/* Notify DP sink the PHY settings from source */ -	dpcd_set_lane_settings(link, lt_settings); -} -  static bool perform_post_lt_adj_req_sequence(  	struct dc_link *link,  	struct link_training_settings *lt_settings) @@ -690,7 +764,8 @@ static bool perform_post_lt_adj_req_sequence(  			lt_settings,  			dpcd_lane_status,  			&dpcd_lane_status_updated, -			&req_settings); +			&req_settings, +			DPRX);  			if (dpcd_lane_status_updated.bits.  					POST_LT_ADJ_REQ_IN_PROGRESS == 0) @@ -747,6 +822,31 @@ static bool perform_post_lt_adj_req_sequence(  } +/* Only used for channel equalization */ +static uint32_t translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval) +{ +	unsigned int aux_rd_interval_us = 400; + +	switch (dpcd_aux_read_interval) { +	case 0x01: +		aux_rd_interval_us = 400; +		break; +	case 0x02: +		aux_rd_interval_us = 4000; +		break; +	case 0x03: +		aux_rd_interval_us = 8000; +		break; +	case 0x04: +		aux_rd_interval_us = 16000; +		break; +	default: +		break; +	} + +	return aux_rd_interval_us; +} +  static enum link_training_result get_cr_failure(enum dc_lane_count ln_count,  					union lane_status *dpcd_lane_status)  { @@ -765,37 +865,55 @@ static enum link_training_result get_cr_failure(enum dc_lane_count ln_count,  static enum link_training_result perform_channel_equalization_sequence(  	struct dc_link *link, -	struct link_training_settings *lt_settings) +	struct link_training_settings *lt_settings, +	uint32_t offset)  {  	struct link_training_settings req_settings;  	enum dc_dp_training_pattern tr_pattern;  	uint32_t retries_ch_eq; +	uint32_t wait_time_microsec;  	enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;  	union lane_align_status_updated dpcd_lane_status_updated = { {0} };  	union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } }; +	/* Note: also check that TPS4 is a supported feature*/ +  	tr_pattern = lt_settings->pattern_for_eq; -	dp_set_hw_training_pattern(link, tr_pattern); +	if (is_repeater(link, offset)) +		tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4; + +	dp_set_hw_training_pattern(link, tr_pattern, offset);  	for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT;  		retries_ch_eq++) { -		dp_set_hw_lane_settings(link, lt_settings); +		dp_set_hw_lane_settings(link, lt_settings, offset);  		/* 2. update DPCD*/  		if (!retries_ch_eq)  			/* EPR #361076 - write as a 5-byte burst, -			 * but only for the 1-st iteration*/ +			 * but only for the 1-st iteration +			 */ +  			dpcd_set_lt_pattern_and_lane_settings(  				link,  				lt_settings, -				tr_pattern); +				tr_pattern, offset);  		else -			dpcd_set_lane_settings(link, lt_settings); +			dpcd_set_lane_settings(link, lt_settings, offset);  		/* 3. wait for receiver to lock-on*/ -		wait_for_training_aux_rd_interval(link, lt_settings->eq_pattern_time); +		wait_time_microsec = lt_settings->eq_pattern_time; + +		if (is_repeater(link, offset)) +			wait_time_microsec = +					translate_training_aux_read_interval( +						link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]); + +		wait_for_training_aux_rd_interval( +				link, +				wait_time_microsec);  		/* 4. Read lane status and requested  		 * drive settings as set by the sink*/ @@ -805,7 +923,8 @@ static enum link_training_result perform_channel_equalization_sequence(  			lt_settings,  			dpcd_lane_status,  			&dpcd_lane_status_updated, -			&req_settings); +			&req_settings, +			offset);  		/* 5. check CR done*/  		if (!is_cr_done(lane_count, dpcd_lane_status)) @@ -824,13 +943,16 @@ static enum link_training_result perform_channel_equalization_sequence(  	return LINK_TRAINING_EQ_FAIL_EQ;  } +#define TRAINING_AUX_RD_INTERVAL 100 //us  static enum link_training_result perform_clock_recovery_sequence(  	struct dc_link *link, -	struct link_training_settings *lt_settings) +	struct link_training_settings *lt_settings, +	uint32_t offset)  {  	uint32_t retries_cr;  	uint32_t retry_count; +	uint32_t wait_time_microsec;  	struct link_training_settings req_settings;  	enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;  	enum dc_dp_training_pattern tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_1; @@ -840,7 +962,7 @@ static enum link_training_result perform_clock_recovery_sequence(  	retries_cr = 0;  	retry_count = 0; -	dp_set_hw_training_pattern(link, tr_pattern); +	dp_set_hw_training_pattern(link, tr_pattern, offset);  	/* najeeb - The synaptics MST hub can put the LT in  	* infinite loop by switching the VS @@ -857,7 +979,8 @@ static enum link_training_result perform_clock_recovery_sequence(  		/* 1. call HWSS to set lane settings*/  		dp_set_hw_lane_settings(  				link, -				lt_settings); +				lt_settings, +				offset);  		/* 2. update DPCD of the receiver*/  		if (!retries_cr) @@ -866,16 +989,23 @@ static enum link_training_result perform_clock_recovery_sequence(  			dpcd_set_lt_pattern_and_lane_settings(  					link,  					lt_settings, -					tr_pattern); +					tr_pattern, +					offset);  		else  			dpcd_set_lane_settings(  					link, -					lt_settings); +					lt_settings, +					offset);  		/* 3. wait receiver to lock-on*/ +		wait_time_microsec = lt_settings->cr_pattern_time; + +		if (!link->is_lttpr_mode_transparent) +			wait_time_microsec = TRAINING_AUX_RD_INTERVAL; +  		wait_for_training_aux_rd_interval(  				link, -				lt_settings->cr_pattern_time); +				wait_time_microsec);  		/* 4. Read lane status and requested drive  		* settings as set by the sink @@ -885,7 +1015,8 @@ static enum link_training_result perform_clock_recovery_sequence(  				lt_settings,  				dpcd_lane_status,  				&dpcd_lane_status_updated, -				&req_settings); +				&req_settings, +				offset);  		/* 5. check CR done*/  		if (is_cr_done(lane_count, dpcd_lane_status)) @@ -1054,6 +1185,102 @@ static void initialize_training_settings(  		lt_settings->enhanced_framing = 1;  } +static uint8_t convert_to_count(uint8_t lttpr_repeater_count) +{ +	switch (lttpr_repeater_count) { +	case 0x80: // 1 lttpr repeater +		return 1; +	case 0x40: // 2 lttpr repeaters +		return 2; +	case 0x20: // 3 lttpr repeaters +		return 3; +	case 0x10: // 4 lttpr repeaters +		return 4; +	case 0x08: // 5 lttpr repeaters +		return 5; +	case 0x04: // 6 lttpr repeaters +		return 6; +	case 0x02: // 7 lttpr repeaters +		return 7; +	case 0x01: // 8 lttpr repeaters +		return 8; +	default: +		break; +	} +	return 0; // invalid value +} + +static void configure_lttpr_mode(struct dc_link *link) +{ +	/* aux timeout is already set to extended */ +	/* RESET/SET lttpr mode to enable non transparent mode */ +	uint8_t repeater_cnt; +	uint32_t aux_interval_address; +	uint8_t repeater_id; +	enum dc_status result = DC_ERROR_UNEXPECTED; +	uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT; + +	DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__); +	result = core_link_write_dpcd(link, +			DP_PHY_REPEATER_MODE, +			(uint8_t *)&repeater_mode, +			sizeof(repeater_mode)); + +	if (result == DC_OK) { +		link->dpcd_caps.lttpr_caps.mode = repeater_mode; +	} + +	if (!link->is_lttpr_mode_transparent) { + +		DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Non Transparent Mode\n", __func__); + +		repeater_mode = DP_PHY_REPEATER_MODE_NON_TRANSPARENT; +		result = core_link_write_dpcd(link, +				DP_PHY_REPEATER_MODE, +				(uint8_t *)&repeater_mode, +				sizeof(repeater_mode)); + +		if (result == DC_OK) { +			link->dpcd_caps.lttpr_caps.mode = repeater_mode; +		} + +		repeater_cnt = convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); +		for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) { +			aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 + +						((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (repeater_id - 1)); +			core_link_read_dpcd( +				link, +				aux_interval_address, +				(uint8_t *)&link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1], +				sizeof(link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1])); +			link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1] &= 0x7F; +		} +	} +} + +static void repeater_training_done(struct dc_link *link, uint32_t offset) +{ +	union dpcd_training_pattern dpcd_pattern = { {0} }; + +	const uint32_t dpcd_base_lt_offset = +			DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + +				((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); +	/* Set training not in progress*/ +	dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE; + +	core_link_write_dpcd( +		link, +		dpcd_base_lt_offset, +		&dpcd_pattern.raw, +		1); + +	DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Id: %d 0x%X pattern = %x\n", +		__func__, +		offset, +		dpcd_base_lt_offset, +		dpcd_pattern.v1_4.TRAINING_PATTERN_SET); +} +  static void print_status_message(  	struct dc_link *link,  	const struct link_training_settings *lt_settings, @@ -1133,6 +1360,17 @@ static void print_status_message(  				lt_spread);  } +void dc_link_dp_set_drive_settings( +	struct dc_link *link, +	struct link_training_settings *lt_settings) +{ +	/* program ASIC PHY settings*/ +	dp_set_hw_lane_settings(link, lt_settings, DPRX); + +	/* Notify DP sink the PHY settings from source */ +	dpcd_set_lane_settings(link, lt_settings, DPRX); +} +  bool dc_link_dp_perform_link_training_skip_aux(  	struct dc_link *link,  	const struct dc_link_settings *link_setting) @@ -1149,10 +1387,10 @@ bool dc_link_dp_perform_link_training_skip_aux(  	/* 1. Perform_clock_recovery_sequence. */  	/* transmit training pattern for clock recovery */ -	dp_set_hw_training_pattern(link, pattern_for_cr); +	dp_set_hw_training_pattern(link, pattern_for_cr, DPRX);  	/* call HWSS to set lane settings*/ -	dp_set_hw_lane_settings(link, <_settings); +	dp_set_hw_lane_settings(link, <_settings, DPRX);  	/* wait receiver to lock-on*/  	wait_for_training_aux_rd_interval(link, lt_settings.cr_pattern_time); @@ -1160,10 +1398,10 @@ bool dc_link_dp_perform_link_training_skip_aux(  	/* 2. Perform_channel_equalization_sequence. */  	/* transmit training pattern for channel equalization. */ -	dp_set_hw_training_pattern(link, lt_settings.pattern_for_eq); +	dp_set_hw_training_pattern(link, lt_settings.pattern_for_eq, DPRX);  	/* call HWSS to set lane settings*/ -	dp_set_hw_lane_settings(link, <_settings); +	dp_set_hw_lane_settings(link, <_settings, DPRX);  	/* wait receiver to lock-on. */  	wait_for_training_aux_rd_interval(link, lt_settings.eq_pattern_time); @@ -1185,9 +1423,10 @@ enum link_training_result dc_link_dp_perform_link_training(  {  	enum link_training_result status = LINK_TRAINING_SUCCESS;  	struct link_training_settings lt_settings; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT +  	bool fec_enable; -#endif +	uint8_t repeater_cnt; +	uint8_t repeater_id;  	initialize_training_settings(  			link, @@ -1198,23 +1437,47 @@ enum link_training_result dc_link_dp_perform_link_training(  	/* 1. set link rate, lane count and spread. */  	dpcd_set_link_settings(link, <_settings); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	if (link->preferred_training_settings.fec_enable != NULL)  		fec_enable = *link->preferred_training_settings.fec_enable;  	else  		fec_enable = true;  	dp_set_fec_ready(link, fec_enable); -#endif +	if (!link->is_lttpr_mode_transparent) { +		/* Configure lttpr mode */ +		configure_lttpr_mode(link); -	/* 2. perform link training (set link training done -	 *  to false is done as well) -	 */ -	status = perform_clock_recovery_sequence(link, <_settings); +		/* 2. perform link training (set link training done +		 *  to false is done as well) +		 */ +		repeater_cnt = convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + +		for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS); +				repeater_id--) { +			status = perform_clock_recovery_sequence(link, <_settings, repeater_id); + +			if (status != LINK_TRAINING_SUCCESS) +				break; + +			status = perform_channel_equalization_sequence(link, +					<_settings, +					repeater_id); + +			if (status != LINK_TRAINING_SUCCESS) +				break; + +			repeater_training_done(link, repeater_id); +		} +	} + +	if (status == LINK_TRAINING_SUCCESS) { +		status = perform_clock_recovery_sequence(link, <_settings, DPRX);  	if (status == LINK_TRAINING_SUCCESS) {  		status = perform_channel_equalization_sequence(link, -				<_settings); +					<_settings, +					DPRX); +		}  	}  	if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) { @@ -1233,23 +1496,58 @@ enum link_training_result dc_link_dp_perform_link_training(  }  bool perform_link_training_with_retries( -	struct dc_link *link,  	const struct dc_link_settings *link_setting,  	bool skip_video_pattern, -	int attempts) +	int attempts, +	struct pipe_ctx *pipe_ctx, +	enum signal_type signal)  {  	uint8_t j;  	uint8_t delay_between_attempts = LINK_TRAINING_RETRY_DELAY; +	struct dc_stream_state *stream = pipe_ctx->stream; +	struct dc_link *link = stream->link; +	enum dp_panel_mode panel_mode = dp_get_panel_mode(link);  	for (j = 0; j < attempts; ++j) { -		if (dc_link_dp_perform_link_training( +		dp_enable_link_phy( +			link, +			signal, +			pipe_ctx->clock_source->id, +			link_setting); + +		if (stream->sink_patches.dppowerup_delay > 0) { +			int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay; + +			msleep(delay_dp_power_up_in_ms); +		} + +		dp_set_panel_mode(link, panel_mode); + +		/* We need to do this before the link training to ensure the idle pattern in SST +		 * mode will be sent right after the link training +		 */ +		link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc, +								pipe_ctx->stream_res.stream_enc->id, true); + +		if (link->aux_access_disabled) { +			dc_link_dp_perform_link_training_skip_aux(link, link_setting); +			return true; +		} else if (dc_link_dp_perform_link_training(  				link,  				link_setting,  				skip_video_pattern) == LINK_TRAINING_SUCCESS)  			return true; +		/* latest link training still fail, skip delay and keep PHY on +		 */ +		if (j == (attempts - 1)) +			break; + +		dp_disable_link_phy(link, signal); +  		msleep(delay_between_attempts); +  		delay_between_attempts += LINK_TRAINING_RETRY_DELAY;  	} @@ -1321,9 +1619,7 @@ enum link_training_result dc_link_dp_sync_lt_attempt(  	enum link_training_result lt_status = LINK_TRAINING_SUCCESS;  	enum dp_panel_mode panel_mode = DP_PANEL_MODE_DEFAULT;  	enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	bool fec_enable = false; -#endif  	initialize_training_settings(  		link, @@ -1343,11 +1639,9 @@ enum link_training_result dc_link_dp_sync_lt_attempt(  	dp_enable_link_phy(link, link->connector_signal,  		dp_cs_id, link_settings); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	/* Set FEC enable */  	fec_enable = lt_overrides->fec_enable && *lt_overrides->fec_enable;  	dp_set_fec_ready(link, fec_enable); -#endif  	if (lt_overrides->alternate_scrambler_reset) {  		if (*lt_overrides->alternate_scrambler_reset) @@ -1367,10 +1661,11 @@ enum link_training_result dc_link_dp_sync_lt_attempt(  	/* 2. perform link training (set link training done  	 *  to false is done as well)  	 */ -	lt_status = perform_clock_recovery_sequence(link, <_settings); +	lt_status = perform_clock_recovery_sequence(link, <_settings, DPRX);  	if (lt_status == LINK_TRAINING_SUCCESS) {  		lt_status = perform_channel_equalization_sequence(link, -						<_settings); +						<_settings, +						DPRX);  	}  	/* 3. Sync LT must skip TRAINING_PATTERN_SET:0 (video pattern)*/ @@ -1387,9 +1682,7 @@ bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down)  	 */  	if (link_down == true) {  		dp_disable_link_phy(link, link->connector_signal); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  		dp_set_fec_ready(link, false); -#endif  	}  	link->sync_lt_in_progress = false; @@ -1423,6 +1716,22 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)  			max_link_cap.link_spread)  		max_link_cap.link_spread =  				link->reported_link_cap.link_spread; +	/* +	 * account for lttpr repeaters cap +	 * notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3). +	 */ +	if (!link->is_lttpr_mode_transparent) { +		if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count) +			max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count; + +		if (link->dpcd_caps.lttpr_caps.max_link_rate < max_link_cap.link_rate) +			max_link_cap.link_rate = link->dpcd_caps.lttpr_caps.max_link_rate; + +		DC_LOG_HW_LINK_TRAINING("%s\n Training with LTTPR,  max_lane count %d max_link rate %d \n", +						__func__, +						max_link_cap.lane_count, +						max_link_cap.link_rate); +	}  	return max_link_cap;  } @@ -1568,6 +1877,13 @@ bool dp_verify_link_cap(  	max_link_cap = get_max_link_cap(link); +	/* Grant extended timeout request */ +	if (!link->is_lttpr_mode_transparent && link->dpcd_caps.lttpr_caps.max_ext_timeout > 0) { +		uint8_t grant = link->dpcd_caps.lttpr_caps.max_ext_timeout & 0x80; + +		core_link_write_dpcd(link, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &grant, sizeof(grant)); +	} +  	/* TODO implement override and monitor patch later */  	/* try to train the link from high to low to @@ -1576,6 +1892,16 @@ bool dp_verify_link_cap(  	/* disable PHY done possible by BIOS, will be done by driver itself */  	dp_disable_link_phy(link, link->connector_signal); +	/* Temporary Renoir-specific workaround for SWDEV-215184; +	 * PHY will sometimes be in bad state on hotplugging display from certain USB-C dongle, +	 * so add extra cycle of enabling and disabling the PHY before first link training. +	 */ +	if (link->link_enc->features.flags.bits.DP_IS_USB_C && +			link->dc->debug.usbc_combo_phy_reset_wa) { +		dp_enable_link_phy(link, link->connector_signal, dp_cs_id, cur); +		dp_disable_link_phy(link, link->connector_signal); +	} +  	dp_cs_id = get_clock_source_id(link);  	/* link training starts with the maximum common settings @@ -2280,6 +2606,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)  	dc_link_dp_set_test_pattern(  		link,  		test_pattern, +		DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED,  		&link_training_settings,  		test_80_bit_pattern,  		(DP_TEST_80BIT_CUSTOM_PATTERN_79_72 - @@ -2291,6 +2618,8 @@ static void dp_test_send_link_test_pattern(struct dc_link *link)  	union link_test_pattern dpcd_test_pattern;  	union test_misc dpcd_test_params;  	enum dp_test_pattern test_pattern; +	enum dp_test_pattern_color_space test_pattern_color_space = +			DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED;  	memset(&dpcd_test_pattern, 0, sizeof(dpcd_test_pattern));  	memset(&dpcd_test_params, 0, sizeof(dpcd_test_params)); @@ -2325,14 +2654,105 @@ static void dp_test_send_link_test_pattern(struct dc_link *link)  	break;  	} +	test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ? +			DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 : +			DP_TEST_PATTERN_COLOR_SPACE_YCBCR601; +  	dc_link_dp_set_test_pattern(  			link,  			test_pattern, +			test_pattern_color_space,  			NULL,  			NULL,  			0);  } +static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video) +{ +	union audio_test_mode            dpcd_test_mode = {0}; +	struct audio_test_pattern_type   dpcd_pattern_type = {0}; +	union audio_test_pattern_period  dpcd_pattern_period[AUDIO_CHANNELS_COUNT] = {0}; +	enum dp_test_pattern test_pattern = DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED; + +	struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; +	struct pipe_ctx *pipe_ctx = &pipes[0]; +	unsigned int channel_count; +	unsigned int channel = 0; +	unsigned int modes = 0; +	unsigned int sampling_rate_in_hz = 0; + +	// get audio test mode and test pattern parameters +	core_link_read_dpcd( +		link, +		DP_TEST_AUDIO_MODE, +		&dpcd_test_mode.raw, +		sizeof(dpcd_test_mode)); + +	core_link_read_dpcd( +		link, +		DP_TEST_AUDIO_PATTERN_TYPE, +		&dpcd_pattern_type.value, +		sizeof(dpcd_pattern_type)); + +	channel_count = dpcd_test_mode.bits.channel_count + 1; + +	// read pattern periods for requested channels when sawTooth pattern is requested +	if (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH || +			dpcd_pattern_type.value == AUDIO_TEST_PATTERN_OPERATOR_DEFINED) { + +		test_pattern = (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH) ? +				DP_TEST_PATTERN_AUDIO_SAWTOOTH : DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED; +		// read period for each channel +		for (channel = 0; channel < channel_count; channel++) { +			core_link_read_dpcd( +							link, +							DP_TEST_AUDIO_PERIOD_CH1 + channel, +							&dpcd_pattern_period[channel].raw, +							sizeof(dpcd_pattern_period[channel])); +		} +	} + +	// translate sampling rate +	switch (dpcd_test_mode.bits.sampling_rate) { +	case AUDIO_SAMPLING_RATE_32KHZ: +		sampling_rate_in_hz = 32000; +		break; +	case AUDIO_SAMPLING_RATE_44_1KHZ: +		sampling_rate_in_hz = 44100; +		break; +	case AUDIO_SAMPLING_RATE_48KHZ: +		sampling_rate_in_hz = 48000; +		break; +	case AUDIO_SAMPLING_RATE_88_2KHZ: +		sampling_rate_in_hz = 88200; +		break; +	case AUDIO_SAMPLING_RATE_96KHZ: +		sampling_rate_in_hz = 96000; +		break; +	case AUDIO_SAMPLING_RATE_176_4KHZ: +		sampling_rate_in_hz = 176400; +		break; +	case AUDIO_SAMPLING_RATE_192KHZ: +		sampling_rate_in_hz = 192000; +		break; +	default: +		sampling_rate_in_hz = 0; +		break; +	} + +	link->audio_test_data.flags.test_requested = 1; +	link->audio_test_data.flags.disable_video = disable_video; +	link->audio_test_data.sampling_rate = sampling_rate_in_hz; +	link->audio_test_data.channel_count = channel_count; +	link->audio_test_data.pattern_type = test_pattern; + +	if (test_pattern == DP_TEST_PATTERN_AUDIO_SAWTOOTH) { +		for (modes = 0; modes < pipe_ctx->stream->audio_info.mode_count; modes++) { +			link->audio_test_data.pattern_period[modes] = dpcd_pattern_period[modes].bits.pattern_period; +		} +	} +} +  static void handle_automated_test(struct dc_link *link)  {  	union test_request test_request; @@ -2362,6 +2782,12 @@ static void handle_automated_test(struct dc_link *link)  		dp_test_send_link_test_pattern(link);  		test_response.bits.ACK = 1;  	} + +	if (test_request.bits.AUDIO_TEST_PATTERN) { +		dp_test_get_audio_test_data(link, test_request.bits.TEST_AUDIO_DISABLED_VIDEO); +		test_response.bits.ACK = 1; +	} +  	if (test_request.bits.PHY_TEST_PATTERN) {  		dp_test_send_phy_test_pattern(link);  		test_response.bits.ACK = 1; @@ -2381,9 +2807,9 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd  	union hpd_irq_data hpd_irq_dpcd_data = { { { {0} } } };  	union device_service_irq device_service_clear = { { 0 } };  	enum dc_status result; -  	bool status = false;  	struct pipe_ctx *pipe_ctx; +	struct dc_link_settings previous_link_settings;  	int i;  	if (out_link_loss) @@ -2447,29 +2873,37 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd  	/* For now we only handle 'Downstream port status' case.  	 * If we got sink count changed it means  	 * Downstream port status changed, -	 * then DM should call DC to do the detection. */ -	if (hpd_rx_irq_check_link_loss_status( -		link, -		&hpd_irq_dpcd_data)) { +	 * then DM should call DC to do the detection. +	 * NOTE: Do not handle link loss on eDP since it is internal link*/ +	if ((link->connector_signal != SIGNAL_TYPE_EDP) && +		hpd_rx_irq_check_link_loss_status( +			link, +			&hpd_irq_dpcd_data)) {  		/* Connectivity log: link loss */  		CONN_DATA_LINK_LOSS(link,  					hpd_irq_dpcd_data.raw,  					sizeof(hpd_irq_dpcd_data),  					"Status: "); -		perform_link_training_with_retries(link, -			&link->cur_link_settings, -			true, LINK_TRAINING_ATTEMPTS); -  		for (i = 0; i < MAX_PIPES; i++) {  			pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; -			if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link && -					pipe_ctx->stream->dpms_off == false && -					pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { -				dc_link_allocate_mst_payload(pipe_ctx); -			} +			if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link) +				break;  		} +		if (pipe_ctx == NULL || pipe_ctx->stream == NULL) +			return false; + +		previous_link_settings = link->cur_link_settings; + +		perform_link_training_with_retries(&previous_link_settings, +			true, LINK_TRAINING_ATTEMPTS, +			pipe_ctx, +			pipe_ctx->stream->signal); + +		if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) +			dc_link_reallocate_mst_payload(link); +  		status = false;  		if (out_link_loss)  			*out_link_loss = true; @@ -2697,7 +3131,6 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,  		int length)  {  	int retry = 0; -	union dp_downstream_port_present ds_port = { 0 };  	if (!link->dpcd_caps.dpcd_rev.raw) {  		do { @@ -2710,9 +3143,6 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,  		} while (retry++ < 4 && !link->dpcd_caps.dpcd_rev.raw);  	} -	ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT - -				 DP_DPCD_REV]; -  	if (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) {  		switch (link->dpcd_caps.branch_dev_id) {  		/* 0010FA active dongles (DP-VGA, DP-DLDVI converters) power down @@ -2737,7 +3167,11 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,  static bool retrieve_link_cap(struct dc_link *link)  { -	uint8_t dpcd_data[DP_ADAPTER_CAP - DP_DPCD_REV + 1]; +	/* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16, +	 * which means size 16 will be good for both of those DPCD register block reads +	 */ +	uint8_t dpcd_data[16]; +	uint8_t lttpr_dpcd_data[6];  	/*Only need to read 1 byte starting from DP_DPRX_FEATURE_ENUMERATION_LIST.  	 */ @@ -2753,7 +3187,19 @@ static bool retrieve_link_cap(struct dc_link *link)  	int i;  	struct dp_sink_hw_fw_revision dp_hw_fw_revision; +	/* Set default timeout to 3.2ms and read LTTPR capabilities */ +	bool ext_timeout_support = link->dc->caps.extended_aux_timeout_support && +			!link->dc->config.disable_extended_timeout_support; + +	link->is_lttpr_mode_transparent = true; + +	if (ext_timeout_support) { +		dc_link_aux_configure_timeout(link->ddc, +					LINK_AUX_DEFAULT_EXTENDED_TIMEOUT_PERIOD); +	} +  	memset(dpcd_data, '\0', sizeof(dpcd_data)); +	memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data));  	memset(&down_strm_port_count,  		'\0', sizeof(union down_stream_port_count));  	memset(&edp_config_cap, '\0', @@ -2785,6 +3231,52 @@ static bool retrieve_link_cap(struct dc_link *link)  		return false;  	} +	if (ext_timeout_support) { + +		status = core_link_read_dpcd( +				link, +				DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV, +				lttpr_dpcd_data, +				sizeof(lttpr_dpcd_data)); + +		link->dpcd_caps.lttpr_caps.revision.raw = +				lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV - +								DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + +		link->dpcd_caps.lttpr_caps.max_link_rate = +				lttpr_dpcd_data[DP_MAX_LINK_RATE_PHY_REPEATER - +								DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + +		link->dpcd_caps.lttpr_caps.phy_repeater_cnt = +				lttpr_dpcd_data[DP_PHY_REPEATER_CNT - +								DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + +		link->dpcd_caps.lttpr_caps.max_lane_count = +				lttpr_dpcd_data[DP_MAX_LANE_COUNT_PHY_REPEATER - +								DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + +		link->dpcd_caps.lttpr_caps.mode = +				lttpr_dpcd_data[DP_PHY_REPEATER_MODE - +								DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + +		link->dpcd_caps.lttpr_caps.max_ext_timeout = +				lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT - +								DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + +		if (link->dpcd_caps.lttpr_caps.phy_repeater_cnt > 0 && +				link->dpcd_caps.lttpr_caps.max_lane_count > 0 && +				link->dpcd_caps.lttpr_caps.max_lane_count <= 4 && +				link->dpcd_caps.lttpr_caps.revision.raw >= 0x14) { +			link->is_lttpr_mode_transparent = false; +		} else { +			/*No lttpr reset timeout to its default value*/ +			link->is_lttpr_mode_transparent = true; +			dc_link_aux_configure_timeout(link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD); +		} + +		CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: "); +	} +  	{  		union training_aux_rd_interval aux_rd_interval; @@ -2792,7 +3284,7 @@ static bool retrieve_link_cap(struct dc_link *link)  			dpcd_data[DP_TRAINING_AUX_RD_INTERVAL];  		link->dpcd_caps.ext_receiver_cap_field_present = -				aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1 ? true:false; +				aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1;  		if (aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1) {  			uint8_t ext_cap_data[16]; @@ -2923,7 +3415,6 @@ static bool retrieve_link_cap(struct dc_link *link)  		dp_hw_fw_revision.ieee_fw_rev,  		sizeof(dp_hw_fw_revision.ieee_fw_rev)); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	memset(&link->dpcd_caps.dsc_caps, '\0',  			sizeof(link->dpcd_caps.dsc_caps));  	memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap)); @@ -2945,7 +3436,6 @@ static bool retrieve_link_cap(struct dc_link *link)  				link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,  				sizeof(link->dpcd_caps.dsc_caps.dsc_ext_caps.raw));  	} -#endif  	/* Connectivity log: detection */  	CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: "); @@ -2953,6 +3443,68 @@ static bool retrieve_link_cap(struct dc_link *link)  	return true;  } +bool dp_overwrite_extended_receiver_cap(struct dc_link *link) +{ +	uint8_t dpcd_data[16]; +	uint32_t read_dpcd_retry_cnt = 3; +	enum dc_status status = DC_ERROR_UNEXPECTED; +	union dp_downstream_port_present ds_port = { 0 }; +	union down_stream_port_count down_strm_port_count; +	union edp_configuration_cap edp_config_cap; + +	int i; + +	for (i = 0; i < read_dpcd_retry_cnt; i++) { +		status = core_link_read_dpcd( +				link, +				DP_DPCD_REV, +				dpcd_data, +				sizeof(dpcd_data)); +		if (status == DC_OK) +			break; +	} + +	link->dpcd_caps.dpcd_rev.raw = +		dpcd_data[DP_DPCD_REV - DP_DPCD_REV]; + +	if (dpcd_data[DP_MAX_LANE_COUNT - DP_DPCD_REV] == 0) +		return false; + +	ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT - +			DP_DPCD_REV]; + +	get_active_converter_info(ds_port.byte, link); + +	down_strm_port_count.raw = dpcd_data[DP_DOWN_STREAM_PORT_COUNT - +			DP_DPCD_REV]; + +	link->dpcd_caps.allow_invalid_MSA_timing_param = +		down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM; + +	link->dpcd_caps.max_ln_count.raw = dpcd_data[ +		DP_MAX_LANE_COUNT - DP_DPCD_REV]; + +	link->dpcd_caps.max_down_spread.raw = dpcd_data[ +		DP_MAX_DOWNSPREAD - DP_DPCD_REV]; + +	link->reported_link_cap.lane_count = +		link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT; +	link->reported_link_cap.link_rate = dpcd_data[ +		DP_MAX_LINK_RATE - DP_DPCD_REV]; +	link->reported_link_cap.link_spread = +		link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ? +		LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; + +	edp_config_cap.raw = dpcd_data[ +		DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV]; +	link->dpcd_caps.panel_mode_edp = +		edp_config_cap.bits.ALT_SCRAMBLER_RESET; +	link->dpcd_caps.dpcd_display_control_capable = +		edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE; + +	return true; +} +  bool detect_dp_sink_caps(struct dc_link *link)  {  	return retrieve_link_cap(link); @@ -3067,21 +3619,20 @@ static bool is_dp_phy_pattern(enum dp_test_pattern test_pattern)  static void set_crtc_test_pattern(struct dc_link *link,  				struct pipe_ctx *pipe_ctx, -				enum dp_test_pattern test_pattern) +				enum dp_test_pattern test_pattern, +				enum dp_test_pattern_color_space test_pattern_color_space)  {  	enum controller_dp_test_pattern controller_test_pattern;  	enum dc_color_depth color_depth = pipe_ctx->  		stream->timing.display_color_depth;  	struct bit_depth_reduction_params params;  	struct output_pixel_processor *opp = pipe_ctx->stream_res.opp; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	int width = pipe_ctx->stream->timing.h_addressable +  		pipe_ctx->stream->timing.h_border_left +  		pipe_ctx->stream->timing.h_border_right;  	int height = pipe_ctx->stream->timing.v_addressable +  		pipe_ctx->stream->timing.v_border_bottom +  		pipe_ctx->stream->timing.v_border_top; -#endif  	memset(¶ms, 0, sizeof(params)); @@ -3125,10 +3676,29 @@ static void set_crtc_test_pattern(struct dc_link *link,  		if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)  			pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,  				controller_test_pattern, color_depth); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  		else if (opp->funcs->opp_set_disp_pattern_generator) {  			struct pipe_ctx *odm_pipe; +			enum controller_dp_color_space controller_color_space;  			int opp_cnt = 1; +			uint8_t count = 0; + +			switch (test_pattern_color_space) { +			case DP_TEST_PATTERN_COLOR_SPACE_RGB: +				controller_color_space = CONTROLLER_DP_COLOR_SPACE_RGB; +				break; +			case DP_TEST_PATTERN_COLOR_SPACE_YCBCR601: +				controller_color_space = CONTROLLER_DP_COLOR_SPACE_YCBCR601; +				break; +			case DP_TEST_PATTERN_COLOR_SPACE_YCBCR709: +				controller_color_space = CONTROLLER_DP_COLOR_SPACE_YCBCR709; +				break; +			case DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED: +			default: +				controller_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED; +				DC_LOG_ERROR("%s: Color space must be defined for test pattern", __func__); +				ASSERT(0); +				break; +			}  			for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)  				opp_cnt++; @@ -3141,6 +3711,7 @@ static void set_crtc_test_pattern(struct dc_link *link,  				odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms);  				odm_opp->funcs->opp_set_disp_pattern_generator(odm_opp,  					controller_test_pattern, +					controller_color_space,  					color_depth,  					NULL,  					width, @@ -3148,12 +3719,18 @@ static void set_crtc_test_pattern(struct dc_link *link,  			}  			opp->funcs->opp_set_disp_pattern_generator(opp,  				controller_test_pattern, +				controller_color_space,  				color_depth,  				NULL,  				width,  				height); +			/* wait for dpg to blank pixel data with test pattern */ +			for (count = 0; count < 1000; count++) +				if (opp->funcs->dpg_is_blanked(opp)) +					break; +				else +					udelay(100);  		} -#endif  	}  	break;  	case DP_TEST_PATTERN_VIDEO_MODE: @@ -3166,7 +3743,6 @@ static void set_crtc_test_pattern(struct dc_link *link,  			pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,  				CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,  				color_depth); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  		else if (opp->funcs->opp_set_disp_pattern_generator) {  			struct pipe_ctx *odm_pipe;  			int opp_cnt = 1; @@ -3181,6 +3757,7 @@ static void set_crtc_test_pattern(struct dc_link *link,  				odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms);  				odm_opp->funcs->opp_set_disp_pattern_generator(odm_opp,  					CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, +					CONTROLLER_DP_COLOR_SPACE_UDEFINED,  					color_depth,  					NULL,  					width, @@ -3188,12 +3765,12 @@ static void set_crtc_test_pattern(struct dc_link *link,  			}  			opp->funcs->opp_set_disp_pattern_generator(opp,  				CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, +				CONTROLLER_DP_COLOR_SPACE_UDEFINED,  				color_depth,  				NULL,  				width,  				height);  		} -#endif  	}  	break; @@ -3205,6 +3782,7 @@ static void set_crtc_test_pattern(struct dc_link *link,  bool dc_link_dp_set_test_pattern(  	struct dc_link *link,  	enum dp_test_pattern test_pattern, +	enum dp_test_pattern_color_space test_pattern_color_space,  	const struct link_training_settings *p_link_settings,  	const unsigned char *p_custom_pattern,  	unsigned int cust_pattern_size) @@ -3233,7 +3811,7 @@ bool dc_link_dp_set_test_pattern(  	if (link->test_pattern_enabled && test_pattern ==  			DP_TEST_PATTERN_VIDEO_MODE) {  		/* Set CRTC Test Pattern */ -		set_crtc_test_pattern(link, pipe_ctx, test_pattern); +		set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space);  		dp_set_hw_test_pattern(link, test_pattern,  				(uint8_t *)p_custom_pattern,  				(uint32_t)cust_pattern_size); @@ -3256,8 +3834,8 @@ bool dc_link_dp_set_test_pattern(  	if (is_dp_phy_pattern(test_pattern)) {  		/* Set DPCD Lane Settings before running test pattern */  		if (p_link_settings != NULL) { -			dp_set_hw_lane_settings(link, p_link_settings); -			dpcd_set_lane_settings(link, p_link_settings); +			dp_set_hw_lane_settings(link, p_link_settings, DPRX); +			dpcd_set_lane_settings(link, p_link_settings, DPRX);  		}  		/* Blank stream if running test pattern */ @@ -3348,7 +3926,7 @@ bool dc_link_dp_set_test_pattern(  		}  	} else {  	/* CRTC Patterns */ -		set_crtc_test_pattern(link, pipe_ctx, test_pattern); +		set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space);  		/* Set Test Pattern state */  		link->test_pattern_enabled = true;  	} @@ -3468,7 +4046,6 @@ enum dp_panel_mode dp_get_panel_mode(struct dc_link *link)  	return DP_PANEL_MODE_DEFAULT;  } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  void dp_set_fec_ready(struct dc_link *link, bool ready)  {  	/* FEC has to be "set ready" before the link training. @@ -3522,7 +4099,14 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)  	if (link_enc->funcs->fec_set_enable &&  			link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) {  		if (link->fec_state == dc_link_fec_ready && enable) { -			msleep(1); +			/* Accord to DP spec, FEC enable sequence can first +			 * be transmitted anytime after 1000 LL codes have +			 * been transmitted on the link after link training +			 * completion. Using 1 lane RBR should have the maximum +			 * time for transmitting 1000 LL codes which is 6.173 us. +			 * So use 7 microseconds delay instead. +			 */ +			udelay(7);  			link_enc->funcs->fec_set_enable(link_enc, true);  			link->fec_state = dc_link_fec_enabled;  		} else if (link->fec_state == dc_link_fec_enabled && !enable) { @@ -3531,5 +4115,4 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)  		}  	}  } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index a519dbc5ecb6..ddb855045767 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -12,12 +12,38 @@  #include "dc_link_ddc.h"  #include "dm_helpers.h"  #include "dpcd_defs.h" -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  #include "dsc.h" -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  #include "resource.h" -#endif + +static uint8_t convert_to_count(uint8_t lttpr_repeater_count) +{ +	switch (lttpr_repeater_count) { +	case 0x80: // 1 lttpr repeater +		return 1; +	case 0x40: // 2 lttpr repeaters +		return 2; +	case 0x20: // 3 lttpr repeaters +		return 3; +	case 0x10: // 4 lttpr repeaters +		return 4; +	case 0x08: // 5 lttpr repeaters +		return 5; +	case 0x04: // 6 lttpr repeaters +		return 6; +	case 0x02: // 7 lttpr repeaters +		return 7; +	case 0x01: // 8 lttpr repeaters +		return 8; +	default: +		break; +	} +	return 0; // invalid value +} + +static inline bool is_immediate_downstream(struct dc_link *link, uint32_t offset) +{ +	return (convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == offset); +}  enum dc_status core_link_read_dpcd(  	struct dc_link *link, @@ -69,8 +95,8 @@ void dp_enable_link_phy(  	const struct dc_link_settings *link_settings)  {  	struct link_encoder *link_enc = link->link_enc; -	struct dc  *core_dc = link->ctx->dc; -	struct dmcu *dmcu = core_dc->res_pool->dmcu; +	struct dc  *dc = link->ctx->dc; +	struct dmcu *dmcu = dc->res_pool->dmcu;  	struct pipe_ctx *pipes =  			link->dc->current_state->res_ctx.pipe_ctx; @@ -147,15 +173,20 @@ bool edp_receiver_ready_T9(struct dc_link *link)  }  bool edp_receiver_ready_T7(struct dc_link *link)  { -	unsigned int tries = 0;  	unsigned char sinkstatus = 0;  	unsigned char edpRev = 0;  	enum dc_status result = DC_OK; +	/* use absolute time stamp to constrain max T7*/ +	unsigned long long enter_timestamp = 0; +	unsigned long long finish_timestamp = 0; +	unsigned long long time_taken_in_ns = 0; +  	result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));  	if (result == DC_OK && edpRev < DP_EDP_12)  		return true;  	/* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/ +	enter_timestamp = dm_get_timestamp(link->ctx);  	do {  		sinkstatus = 0;  		result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus)); @@ -163,8 +194,10 @@ bool edp_receiver_ready_T7(struct dc_link *link)  			break;  		if (result != DC_OK)  			break; -		udelay(25); //MAx T7 is 50ms -	} while (++tries < 300); +		udelay(25); +		finish_timestamp = dm_get_timestamp(link->ctx); +		time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp); +	} while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms  	if (link->local_sink->edid_caps.panel_patch.extra_t7_ms > 0)  		udelay(link->local_sink->edid_caps.panel_patch.extra_t7_ms * 1000); @@ -174,8 +207,8 @@ bool edp_receiver_ready_T7(struct dc_link *link)  void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)  { -	struct dc  *core_dc = link->ctx->dc; -	struct dmcu *dmcu = core_dc->res_pool->dmcu; +	struct dc  *dc = link->ctx->dc; +	struct dmcu *dmcu = dc->res_pool->dmcu;  	if (!link->wa_flags.dp_keep_receiver_powered)  		dp_receiver_power_ctrl(link, false); @@ -212,7 +245,8 @@ void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal)  bool dp_set_hw_training_pattern(  	struct dc_link *link, -	enum dc_dp_training_pattern pattern) +	enum dc_dp_training_pattern pattern, +	uint32_t offset)  {  	enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED; @@ -240,10 +274,14 @@ bool dp_set_hw_training_pattern(  void dp_set_hw_lane_settings(  	struct dc_link *link, -	const struct link_training_settings *link_settings) +	const struct link_training_settings *link_settings, +	uint32_t offset)  {  	struct link_encoder *encoder = link->link_enc; +	if (!link->is_lttpr_mode_transparent && !is_immediate_downstream(link, offset)) +		return; +  	/* call Encoder to set lane settings */  	encoder->funcs->dp_set_lane_settings(encoder, link_settings);  } @@ -302,20 +340,12 @@ void dp_retrain_link_dp_test(struct dc_link *link,  			memset(&link->cur_link_settings, 0,  				sizeof(link->cur_link_settings)); -			link->link_enc->funcs->enable_dp_output( -						link->link_enc, -						link_setting, -						pipes[i].clock_source->id); -			link->cur_link_settings = *link_setting; - -			dp_receiver_power_ctrl(link, true); -  			perform_link_training_with_retries( -					link,  					link_setting,  					skip_video_pattern, -					LINK_TRAINING_ATTEMPTS); - +					LINK_TRAINING_ATTEMPTS, +					&pipes[i], +					SIGNAL_TYPE_DISPLAY_PORT);  			link->dc->hwss.enable_stream(&pipes[i]); @@ -339,7 +369,6 @@ void dp_retrain_link_dp_test(struct dc_link *link,  	}  } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  #define DC_LOGGER \  	dsc->ctx->logger  static void dsc_optc_config_log(struct display_stream_compressor *dsc, @@ -365,14 +394,14 @@ static void dsc_optc_config_log(struct display_stream_compressor *dsc,  static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)  { -	struct dc *core_dc = pipe_ctx->stream->ctx->dc; +	struct dc *dc = pipe_ctx->stream->ctx->dc;  	struct dc_stream_state *stream = pipe_ctx->stream;  	bool result = false; -	if (IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) +	if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))  		result = true;  	else -		result = dm_helpers_dp_write_dsc_enable(core_dc->ctx, stream, enable); +		result = dm_helpers_dp_write_dsc_enable(dc->ctx, stream, enable);  	return result;  } @@ -382,7 +411,7 @@ static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)  void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)  {  	struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; -	struct dc *core_dc = pipe_ctx->stream->ctx->dc; +	struct dc *dc = pipe_ctx->stream->ctx->dc;  	struct dc_stream_state *stream = pipe_ctx->stream;  	struct pipe_ctx *odm_pipe;  	int opp_cnt = 1; @@ -418,7 +447,7 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)  		optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED;  		/* Enable DSC in encoder */ -		if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { +		if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {  			DC_LOG_DSC("Setting stream encoder DSC config for engine %d:", (int)pipe_ctx->stream_res.stream_enc->id);  			dsc_optc_config_log(dsc, &dsc_optc_cfg);  			pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc, @@ -443,7 +472,7 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)  				OPTC_DSC_DISABLED, 0, 0);  		/* disable DSC in stream encoder */ -		if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { +		if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {  			pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(  					pipe_ctx->stream_res.stream_enc,  					OPTC_DSC_DISABLED, 0, 0); @@ -486,7 +515,7 @@ out:  bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable)  {  	struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; -	struct dc *core_dc = pipe_ctx->stream->ctx->dc; +	struct dc *dc = pipe_ctx->stream->ctx->dc;  	struct dc_stream_state *stream = pipe_ctx->stream;  	if (!pipe_ctx->stream->timing.flags.DSC || !dsc) @@ -496,6 +525,9 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable)  		struct dsc_config dsc_cfg;  		uint8_t dsc_packed_pps[128]; +		memset(&dsc_cfg, 0, sizeof(dsc_cfg)); +		memset(dsc_packed_pps, 0, 128); +  		/* Enable DSC hw block */  		dsc_cfg.pic_width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;  		dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; @@ -505,7 +537,7 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable)  		DC_LOG_DSC(" ");  		dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]); -		if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { +		if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {  			DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id);  			pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(  									pipe_ctx->stream_res.stream_enc, @@ -514,7 +546,7 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable)  		}  	} else {  		/* disable DSC PPS in stream encoder */ -		if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { +		if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {  			pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(  						pipe_ctx->stream_res.stream_enc, false, NULL);  		} @@ -537,5 +569,4 @@ bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx)  	dp_set_dsc_pps_sdp(pipe_ctx, true);  	return true;  } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 37698305a2dc..a0eb9e533a61 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -46,15 +46,11 @@  #include "dce100/dce100_resource.h"  #include "dce110/dce110_resource.h"  #include "dce112/dce112_resource.h" -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  #include "dcn10/dcn10_resource.h"  #endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  #include "dcn20/dcn20_resource.h" -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  #include "dcn21/dcn21_resource.h" -#endif  #include "dce120/dce120_resource.h"  #define DC_LOGGER_INIT(logger) @@ -99,23 +95,19 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)  		else  			dc_version = DCE_VERSION_12_0;  		break; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  	case FAMILY_RV:  		dc_version = DCN_VERSION_1_0;  		if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev))  			dc_version = DCN_VERSION_1_01; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  		if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev))  			dc_version = DCN_VERSION_2_1; -#endif  		break;  #endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	case FAMILY_NV:  		dc_version = DCN_VERSION_2_0;  		break; -#endif  	default:  		dc_version = DCE_VERSION_UNKNOWN;  		break; @@ -162,20 +154,16 @@ struct resource_pool *dc_create_resource_pool(struct dc  *dc,  				init_data->num_virtual_links, dc);  		break; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  	case DCN_VERSION_1_0:  	case DCN_VERSION_1_01:  		res_pool = dcn10_create_resource_pool(init_data, dc);  		break; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	case DCN_VERSION_2_0:  		res_pool = dcn20_create_resource_pool(init_data, dc);  		break; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  	case DCN_VERSION_2_1:  		res_pool = dcn21_create_resource_pool(init_data, dc);  		break; @@ -951,44 +939,44 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)  	data->inits.v_c_bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c);  } -static bool are_rects_integer_multiples(struct rect src, struct rect dest) -{ -	if (dest.width  >= src.width  && dest.width  % src.width  == 0 && -		dest.height >= src.height && dest.height % src.height == 0) -		return true; - -	return false; -} -static void calculate_integer_scaling(struct pipe_ctx *pipe_ctx) +/* + * When handling 270 rotation in mixed SLS mode, we have + * stream->timing.h_border_left that is non zero.  If we are doing + * pipe-splitting, this h_border_left value gets added to recout.x and when it + * calls calculate_inits_and_adj_vp() and + * adjust_vp_and_init_for_seamless_clip(), it can cause viewport.height for a + * pipe to be incorrect. + * + * To fix this, instead of using stream->timing.h_border_left, we can use + * stream->dst.x to represent the border instead.  So we will set h_border_left + * to 0 and shift the appropriate amount in stream->dst.x.  We will then + * perform all calculations in resource_build_scaling_params() based on this + * and then restore the h_border_left and stream->dst.x to their original + * values. + * + * shift_border_left_to_dst() will shift the amount of h_border_left to + * stream->dst.x and set h_border_left to 0.  restore_border_left_from_dst() + * will restore h_border_left and stream->dst.x back to their original values + * We also need to make sure pipe_ctx->plane_res.scl_data.h_active uses the + * original h_border_left value in its calculation. + */ +int shift_border_left_to_dst(struct pipe_ctx *pipe_ctx)  { -	if (!pipe_ctx->plane_state->scaling_quality.integer_scaling) -		return; - -	//for Centered Mode -	if (pipe_ctx->stream->dst.width  == pipe_ctx->stream->src.width && -		pipe_ctx->stream->dst.height == pipe_ctx->stream->src.height) { -		// calculate maximum # of replication of src onto addressable -		unsigned int integer_multiple = min( -				pipe_ctx->stream->timing.h_addressable / pipe_ctx->stream->src.width, -				pipe_ctx->stream->timing.v_addressable  / pipe_ctx->stream->src.height); - -		//scale dst -		pipe_ctx->stream->dst.width  = integer_multiple * pipe_ctx->stream->src.width; -		pipe_ctx->stream->dst.height = integer_multiple * pipe_ctx->stream->src.height; +	int store_h_border_left = pipe_ctx->stream->timing.h_border_left; -		//center dst onto addressable -		pipe_ctx->stream->dst.x = (pipe_ctx->stream->timing.h_addressable - pipe_ctx->stream->dst.width)/2; -		pipe_ctx->stream->dst.y = (pipe_ctx->stream->timing.v_addressable - pipe_ctx->stream->dst.height)/2; +	if (store_h_border_left) { +		pipe_ctx->stream->timing.h_border_left = 0; +		pipe_ctx->stream->dst.x += store_h_border_left;  	} +	return store_h_border_left; +} -	//disable taps if src & dst are integer ratio -	if (are_rects_integer_multiples(pipe_ctx->stream->src, pipe_ctx->stream->dst)) { -		pipe_ctx->plane_state->scaling_quality.v_taps = 1; -		pipe_ctx->plane_state->scaling_quality.h_taps = 1; -		pipe_ctx->plane_state->scaling_quality.v_taps_c = 1; -		pipe_ctx->plane_state->scaling_quality.h_taps_c = 1; -	} +void restore_border_left_from_dst(struct pipe_ctx *pipe_ctx, +                                  int store_h_border_left) +{ +	pipe_ctx->stream->dst.x -= store_h_border_left; +	pipe_ctx->stream->timing.h_border_left = store_h_border_left;  }  bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) @@ -996,6 +984,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)  	const struct dc_plane_state *plane_state = pipe_ctx->plane_state;  	struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;  	bool res = false; +	int store_h_border_left = shift_border_left_to_dst(pipe_ctx);  	DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);  	/* Important: scaling ratio calculation requires pixel format,  	 * lb depth calculation requires recout and taps require scaling ratios. @@ -1004,14 +993,18 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)  	pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(  			pipe_ctx->plane_state->format); -	calculate_integer_scaling(pipe_ctx); -  	calculate_scaling_ratios(pipe_ctx);  	calculate_viewport(pipe_ctx); -	if (pipe_ctx->plane_res.scl_data.viewport.height < 16 || pipe_ctx->plane_res.scl_data.viewport.width < 16) +	if (pipe_ctx->plane_res.scl_data.viewport.height < 16 || +		pipe_ctx->plane_res.scl_data.viewport.width < 16) { +		if (store_h_border_left) { +			restore_border_left_from_dst(pipe_ctx, +				store_h_border_left); +		}  		return false; +	}  	calculate_recout(pipe_ctx); @@ -1024,8 +1017,10 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)  	pipe_ctx->plane_res.scl_data.recout.x += timing->h_border_left;  	pipe_ctx->plane_res.scl_data.recout.y += timing->v_border_top; -	pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right; -	pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom; +	pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + +		store_h_border_left + timing->h_border_right; +	pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + +		timing->v_border_top + timing->v_border_bottom;  	/* Taps calculations */  	if (pipe_ctx->plane_res.xfm != NULL) @@ -1072,6 +1067,9 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)  				plane_state->dst_rect.x,  				plane_state->dst_rect.y); +	if (store_h_border_left) +		restore_border_left_from_dst(pipe_ctx, store_h_border_left); +  	return res;  } @@ -1217,7 +1215,7 @@ static struct pipe_ctx *acquire_free_pipe_for_head(  	return pool->funcs->acquire_idle_pipe_for_layer(context, pool, head_pipe->stream);  } -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  static int acquire_first_split_pipe(  		struct resource_context *res_ctx,  		const struct resource_pool *pool, @@ -1298,7 +1296,7 @@ bool dc_add_plane_to_context(  		free_pipe = acquire_free_pipe_for_head(context, pool, head_pipe); -	#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +	#if defined(CONFIG_DRM_AMD_DC_DCN)  		if (!free_pipe) {  			int pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);  			if (pipe_idx >= 0) @@ -1891,7 +1889,7 @@ static int acquire_resource_from_hw_enabled_state(  	inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);  	if (inst == ENGINE_ID_UNKNOWN) -		return false; +		return -1;  	for (i = 0; i < pool->stream_enc_count; i++) {  		if (pool->stream_enc[i]->id == inst) { @@ -1903,10 +1901,10 @@ static int acquire_resource_from_hw_enabled_state(  	// tg_inst not found  	if (i == pool->stream_enc_count) -		return false; +		return -1;  	if (tg_inst >= pool->timing_generator_count) -		return false; +		return -1;  	if (!res_ctx->pipe_ctx[tg_inst].stream) {  		struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[tg_inst]; @@ -1919,8 +1917,26 @@ static int acquire_resource_from_hw_enabled_state(  		pipe_ctx->plane_res.dpp = pool->dpps[tg_inst];  		pipe_ctx->stream_res.opp = pool->opps[tg_inst]; -		if (pool->dpps[tg_inst]) +		if (pool->dpps[tg_inst]) {  			pipe_ctx->plane_res.mpcc_inst = pool->dpps[tg_inst]->inst; + +			// Read DPP->MPCC->OPP Pipe from HW State +			if (pool->mpc->funcs->read_mpcc_state) { +				struct mpcc_state s = {0}; + +				pool->mpc->funcs->read_mpcc_state(pool->mpc, pipe_ctx->plane_res.mpcc_inst, &s); + +				if (s.dpp_id < MAX_MPCC) +					pool->mpc->mpcc_array[pipe_ctx->plane_res.mpcc_inst].dpp_id = s.dpp_id; + +				if (s.bot_mpcc_id < MAX_MPCC) +					pool->mpc->mpcc_array[pipe_ctx->plane_res.mpcc_inst].mpcc_bot = +							&pool->mpc->mpcc_array[s.bot_mpcc_id]; + +				if (s.opp_id < MAX_OPP) +					pipe_ctx->stream_res.opp->mpc_tree_params.opp_id = s.opp_id; +			} +		}  		pipe_ctx->pipe_idx = tg_inst;  		pipe_ctx->stream = stream; @@ -1972,7 +1988,7 @@ enum dc_status resource_map_pool_resources(  		/* acquire new resources */  		pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream); -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 +#ifdef CONFIG_DRM_AMD_DC_DCN  	if (pipe_idx < 0)  		pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);  #endif @@ -2050,6 +2066,13 @@ void dc_resource_state_construct(  	dst_ctx->clk_mgr = dc->clk_mgr;  } + +bool dc_resource_is_dsc_encoding_supported(const struct dc *dc) +{ +	return dc->res_pool->res_cap->num_dsc > 0; +} + +  /**   * dc_validate_global_state() - Determine if HW can support a given state   * Checks HW resource availability and bandwidth requirement. @@ -2306,7 +2329,7 @@ static void set_avi_info_frame(  		if (color_space == COLOR_SPACE_SRGB ||  			color_space == COLOR_SPACE_2020_RGB_FULLRANGE) {  			hdmi_info.bits.Q0_Q1   = RGB_QUANTIZATION_FULL_RANGE; -			hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_FULL_RANGE; +			hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;  		} else if (color_space == COLOR_SPACE_SRGB_LIMITED ||  					color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE) {  			hdmi_info.bits.Q0_Q1   = RGB_QUANTIZATION_LIMITED_RANGE; @@ -2772,9 +2795,8 @@ void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream,  enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream)  { -	struct dc  *core_dc = dc;  	struct dc_link *link = stream->link; -	struct timing_generator *tg = core_dc->res_pool->timing_generators[0]; +	struct timing_generator *tg = dc->res_pool->timing_generators[0];  	enum dc_status res = DC_OK;  	calculate_phy_pix_clks(stream); @@ -2837,3 +2859,48 @@ unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format)  		return -1;  	}  } +static unsigned int get_max_audio_sample_rate(struct audio_mode *modes) +{ +	if (modes) { +		if (modes->sample_rates.rate.RATE_192) +			return 192000; +		if (modes->sample_rates.rate.RATE_176_4) +			return 176400; +		if (modes->sample_rates.rate.RATE_96) +			return 96000; +		if (modes->sample_rates.rate.RATE_88_2) +			return 88200; +		if (modes->sample_rates.rate.RATE_48) +			return 48000; +		if (modes->sample_rates.rate.RATE_44_1) +			return 44100; +		if (modes->sample_rates.rate.RATE_32) +			return 32000; +	} +	/*original logic when no audio info*/ +	return 441000; +} + +void get_audio_check(struct audio_info *aud_modes, +	struct audio_check *audio_chk) +{ +	unsigned int i; +	unsigned int max_sample_rate = 0; + +	if (aud_modes) { +		audio_chk->audio_packet_type = 0x2;/*audio sample packet AP = .25 for layout0, 1 for layout1*/ + +		audio_chk->max_audiosample_rate = 0; +		for (i = 0; i < aud_modes->mode_count; i++) { +			max_sample_rate = get_max_audio_sample_rate(&aud_modes->modes[i]); +			if (audio_chk->max_audiosample_rate < max_sample_rate) +				audio_chk->max_audiosample_rate = max_sample_rate; +			/*dts takes the same as type 2: AP = 0.25*/ +		} +		/*check which one take more bandwidth*/ +		if (audio_chk->max_audiosample_rate > 192000) +			audio_chk->audio_packet_type = 0x9;/*AP =1*/ +		audio_chk->acat = 0;/*not support*/ +	} +} + diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c index 5cbfdf1c4b11..a249a0e5edd0 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c @@ -33,7 +33,7 @@   * Private functions   ******************************************************************************/ -static void destruct(struct dc_sink *sink) +static void dc_sink_destruct(struct dc_sink *sink)  {  	if (sink->dc_container_id) {  		kfree(sink->dc_container_id); @@ -41,7 +41,7 @@ static void destruct(struct dc_sink *sink)  	}  } -static bool construct(struct dc_sink *sink, const struct dc_sink_init_data *init_params) +static bool dc_sink_construct(struct dc_sink *sink, const struct dc_sink_init_data *init_params)  {  	struct dc_link *link = init_params->link; @@ -75,7 +75,7 @@ void dc_sink_retain(struct dc_sink *sink)  static void dc_sink_free(struct kref *kref)  {  	struct dc_sink *sink = container_of(kref, struct dc_sink, refcount); -	destruct(sink); +	dc_sink_destruct(sink);  	kfree(sink);  } @@ -91,7 +91,7 @@ struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params)  	if (NULL == sink)  		goto alloc_fail; -	if (false == construct(sink, init_params)) +	if (false == dc_sink_construct(sink, init_params))  		goto construct_fail;  	kref_init(&sink->refcount); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index bb09243758fe..6ddbb00ed37a 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -27,14 +27,12 @@  #include <linux/slab.h>  #include "dm_services.h" +#include "basics/dc_common.h"  #include "dc.h"  #include "core_types.h"  #include "resource.h"  #include "ipp.h"  #include "timing_generator.h" -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) -#include "dcn10/dcn10_hw_sequencer.h" -#endif  #define DC_LOGGER dc->ctx->logger @@ -58,7 +56,7 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink)  	}  } -static void construct(struct dc_stream_state *stream, +static void dc_stream_construct(struct dc_stream_state *stream,  	struct dc_sink *dc_sink_data)  {  	uint32_t i = 0; @@ -108,7 +106,6 @@ static void construct(struct dc_stream_state *stream,  	/* EDID CAP translation for HDMI 2.0 */  	stream->timing.flags.LTE_340MCSC_SCRAMBLE = dc_sink_data->edid_caps.lte_340mcsc_scramble; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	memset(&stream->timing.dsc_cfg, 0, sizeof(stream->timing.dsc_cfg));  	stream->timing.dsc_cfg.num_slices_h = 0;  	stream->timing.dsc_cfg.num_slices_v = 0; @@ -117,7 +114,6 @@ static void construct(struct dc_stream_state *stream,  	stream->timing.dsc_cfg.linebuf_depth = 9;  	stream->timing.dsc_cfg.version_minor = 2;  	stream->timing.dsc_cfg.ycbcr422_simple = 0; -#endif  	update_stream_signal(stream, dc_sink_data); @@ -129,7 +125,7 @@ static void construct(struct dc_stream_state *stream,  	stream->ctx->dc_stream_id_count++;  } -static void destruct(struct dc_stream_state *stream) +static void dc_stream_destruct(struct dc_stream_state *stream)  {  	dc_sink_release(stream->sink);  	if (stream->out_transfer_func != NULL) { @@ -147,7 +143,7 @@ static void dc_stream_free(struct kref *kref)  {  	struct dc_stream_state *stream = container_of(kref, struct dc_stream_state, refcount); -	destruct(stream); +	dc_stream_destruct(stream);  	kfree(stream);  } @@ -170,7 +166,7 @@ struct dc_stream_state *dc_create_stream_for_sink(  	if (stream == NULL)  		return NULL; -	construct(stream, sink); +	dc_stream_construct(stream, sink);  	kref_init(&stream->refcount); @@ -237,7 +233,7 @@ struct dc_stream_status *dc_stream_get_status(  static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc)  { -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  	unsigned int vupdate_line;  	unsigned int lines_to_vupdate, us_to_vupdate, vpos, nvpos;  	struct dc_stream_state *stream = pipe_ctx->stream; @@ -246,7 +242,7 @@ static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc)  	if (stream->ctx->asic_id.chip_family == FAMILY_RV &&  			ASICREV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) { -		vupdate_line = get_vupdate_offset_from_vsync(pipe_ctx); +		vupdate_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);  		if (!dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos))  			return; @@ -272,7 +268,7 @@ bool dc_stream_set_cursor_attributes(  	const struct dc_cursor_attributes *attributes)  {  	int i; -	struct dc  *core_dc; +	struct dc  *dc;  	struct resource_context *res_ctx;  	struct pipe_ctx *pipe_to_program = NULL; @@ -290,8 +286,8 @@ bool dc_stream_set_cursor_attributes(  		return false;  	} -	core_dc = stream->ctx->dc; -	res_ctx = &core_dc->current_state->res_ctx; +	dc = stream->ctx->dc; +	res_ctx = &dc->current_state->res_ctx;  	stream->cursor_attributes = *attributes;  	for (i = 0; i < MAX_PIPES; i++) { @@ -303,17 +299,17 @@ bool dc_stream_set_cursor_attributes(  		if (!pipe_to_program) {  			pipe_to_program = pipe_ctx; -			delay_cursor_until_vupdate(pipe_ctx, core_dc); -			core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, true); +			delay_cursor_until_vupdate(pipe_ctx, dc); +			dc->hwss.pipe_control_lock(dc, pipe_to_program, true);  		} -		core_dc->hwss.set_cursor_attribute(pipe_ctx); -		if (core_dc->hwss.set_cursor_sdr_white_level) -			core_dc->hwss.set_cursor_sdr_white_level(pipe_ctx); +		dc->hwss.set_cursor_attribute(pipe_ctx); +		if (dc->hwss.set_cursor_sdr_white_level) +			dc->hwss.set_cursor_sdr_white_level(pipe_ctx);  	}  	if (pipe_to_program) -		core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, false); +		dc->hwss.pipe_control_lock(dc, pipe_to_program, false);  	return true;  } @@ -323,7 +319,7 @@ bool dc_stream_set_cursor_position(  	const struct dc_cursor_position *position)  {  	int i; -	struct dc  *core_dc; +	struct dc  *dc;  	struct resource_context *res_ctx;  	struct pipe_ctx *pipe_to_program = NULL; @@ -337,8 +333,8 @@ bool dc_stream_set_cursor_position(  		return false;  	} -	core_dc = stream->ctx->dc; -	res_ctx = &core_dc->current_state->res_ctx; +	dc = stream->ctx->dc; +	res_ctx = &dc->current_state->res_ctx;  	stream->cursor_position = *position;  	for (i = 0; i < MAX_PIPES; i++) { @@ -354,20 +350,19 @@ bool dc_stream_set_cursor_position(  		if (!pipe_to_program) {  			pipe_to_program = pipe_ctx; -			delay_cursor_until_vupdate(pipe_ctx, core_dc); -			core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, true); +			delay_cursor_until_vupdate(pipe_ctx, dc); +			dc->hwss.pipe_control_lock(dc, pipe_to_program, true);  		} -		core_dc->hwss.set_cursor_position(pipe_ctx); +		dc->hwss.set_cursor_position(pipe_ctx);  	}  	if (pipe_to_program) -		core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, false); +		dc->hwss.pipe_control_lock(dc, pipe_to_program, false);  	return true;  } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  bool dc_stream_add_writeback(struct dc *dc,  		struct dc_stream_state *stream,  		struct dc_writeback_info *wb_info) @@ -411,25 +406,30 @@ bool dc_stream_add_writeback(struct dc *dc,  		stream->writeback_info[stream->num_wb_info++] = *wb_info;  	} -	if (!dc->hwss.update_bandwidth(dc, dc->current_state)) { -		dm_error("DC: update_bandwidth failed!\n"); -		return false; -	} - -	/* enable writeback */  	if (dc->hwss.enable_writeback) {  		struct dc_stream_status *stream_status = dc_stream_get_status(stream);  		struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; +		dwb->otg_inst = stream_status->primary_otg_inst; +	} +	if (IS_DIAG_DC(dc->ctx->dce_environment)) { +		if (!dc->hwss.update_bandwidth(dc, dc->current_state)) { +			dm_error("DC: update_bandwidth failed!\n"); +			return false; +		} -		if (dwb->funcs->is_enabled(dwb)) { -			/* writeback pipe already enabled, only need to update */ -			dc->hwss.update_writeback(dc, stream_status, wb_info, dc->current_state); -		} else { -			/* Enable writeback pipe from scratch*/ -			dc->hwss.enable_writeback(dc, stream_status, wb_info, dc->current_state); +		/* enable writeback */ +		if (dc->hwss.enable_writeback) { +			struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; + +			if (dwb->funcs->is_enabled(dwb)) { +				/* writeback pipe already enabled, only need to update */ +				dc->hwss.update_writeback(dc, wb_info, dc->current_state); +			} else { +				/* Enable writeback pipe from scratch*/ +				dc->hwss.enable_writeback(dc, wb_info, dc->current_state); +			}  		}  	} -  	return true;  } @@ -468,26 +468,35 @@ bool dc_stream_remove_writeback(struct dc *dc,  	}  	stream->num_wb_info = j; -	/* recalculate and apply DML parameters */ -	if (!dc->hwss.update_bandwidth(dc, dc->current_state)) { -		dm_error("DC: update_bandwidth failed!\n"); -		return false; -	} - -	/* disable writeback */ -	if (dc->hwss.disable_writeback) -		dc->hwss.disable_writeback(dc, dwb_pipe_inst); +	if (IS_DIAG_DC(dc->ctx->dce_environment)) { +		/* recalculate and apply DML parameters */ +		if (!dc->hwss.update_bandwidth(dc, dc->current_state)) { +			dm_error("DC: update_bandwidth failed!\n"); +			return false; +		} +		/* disable writeback */ +		if (dc->hwss.disable_writeback) +			dc->hwss.disable_writeback(dc, dwb_pipe_inst); +	}  	return true;  } -#endif +bool dc_stream_warmup_writeback(struct dc *dc, +		int num_dwb, +		struct dc_writeback_info *wb_info) +{ +	if (dc->hwss.mmhubbub_warmup) +		return dc->hwss.mmhubbub_warmup(dc, num_dwb, wb_info); +	else +		return false; +}  uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)  {  	uint8_t i; -	struct dc  *core_dc = stream->ctx->dc; +	struct dc  *dc = stream->ctx->dc;  	struct resource_context *res_ctx = -		&core_dc->current_state->res_ctx; +		&dc->current_state->res_ctx;  	for (i = 0; i < MAX_PIPES; i++) {  		struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg; @@ -544,9 +553,9 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,  {  	uint8_t i;  	bool ret = false; -	struct dc  *core_dc = stream->ctx->dc; +	struct dc  *dc = stream->ctx->dc;  	struct resource_context *res_ctx = -		&core_dc->current_state->res_ctx; +		&dc->current_state->res_ctx;  	for (i = 0; i < MAX_PIPES; i++) {  		struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg; @@ -567,10 +576,8 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,  	return ret;  } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream)  { -	bool status = true;  	struct pipe_ctx *pipe = NULL;  	int i; @@ -586,8 +593,7 @@ bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream)  	if (i == MAX_PIPES)  		return true; -	status = dc->hwss.dmdata_status_done(pipe); -	return status; +	return dc->hwss.dmdata_status_done(pipe);  }  bool dc_stream_set_dynamic_metadata(struct dc *dc, @@ -630,7 +636,6 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc,  	return true;  } -#endif  void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream)  { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c index b9d6a5bd8522..ea1229a3e2b2 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c @@ -37,7 +37,7 @@  /*******************************************************************************   * Private functions   ******************************************************************************/ -static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state) +static void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state)  {  	plane_state->ctx = ctx; @@ -50,7 +50,6 @@ static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state  		plane_state->in_transfer_func->type = TF_TYPE_BYPASS;  		plane_state->in_transfer_func->ctx = ctx;  	} -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	plane_state->in_shaper_func = dc_create_transfer_func();  	if (plane_state->in_shaper_func != NULL) {  		plane_state->in_shaper_func->type = TF_TYPE_BYPASS; @@ -67,10 +66,9 @@ static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state  		plane_state->blend_tf->ctx = ctx;  	} -#endif  } -static void destruct(struct dc_plane_state *plane_state) +static void dc_plane_destruct(struct dc_plane_state *plane_state)  {  	if (plane_state->gamma_correction != NULL) {  		dc_gamma_release(&plane_state->gamma_correction); @@ -80,7 +78,6 @@ static void destruct(struct dc_plane_state *plane_state)  				plane_state->in_transfer_func);  		plane_state->in_transfer_func = NULL;  	} -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	if (plane_state->in_shaper_func != NULL) {  		dc_transfer_func_release(  				plane_state->in_shaper_func); @@ -97,7 +94,6 @@ static void destruct(struct dc_plane_state *plane_state)  		plane_state->blend_tf = NULL;  	} -#endif  }  /******************************************************************************* @@ -112,16 +108,14 @@ void enable_surface_flip_reporting(struct dc_plane_state *plane_state,  struct dc_plane_state *dc_create_plane_state(struct dc *dc)  { -	struct dc *core_dc = dc; -  	struct dc_plane_state *plane_state = kvzalloc(sizeof(*plane_state), -						      GFP_KERNEL); +							GFP_KERNEL);  	if (NULL == plane_state)  		return NULL;  	kref_init(&plane_state->refcount); -	construct(core_dc->ctx, plane_state); +	dc_plane_construct(dc->ctx, plane_state);  	return plane_state;  } @@ -141,7 +135,7 @@ const struct dc_plane_status *dc_plane_get_status(  		const struct dc_plane_state *plane_state)  {  	const struct dc_plane_status *plane_status; -	struct dc  *core_dc; +	struct dc  *dc;  	int i;  	if (!plane_state || @@ -152,15 +146,15 @@ const struct dc_plane_status *dc_plane_get_status(  	}  	plane_status = &plane_state->status; -	core_dc = plane_state->ctx->dc; +	dc = plane_state->ctx->dc; -	if (core_dc->current_state == NULL) +	if (dc->current_state == NULL)  		return NULL;  	/* Find the current plane state and set its pending bit to false */ -	for (i = 0; i < core_dc->res_pool->pipe_count; i++) { +	for (i = 0; i < dc->res_pool->pipe_count; i++) {  		struct pipe_ctx *pipe_ctx = -				&core_dc->current_state->res_ctx.pipe_ctx[i]; +				&dc->current_state->res_ctx.pipe_ctx[i];  		if (pipe_ctx->plane_state != plane_state)  			continue; @@ -170,14 +164,14 @@ const struct dc_plane_status *dc_plane_get_status(  		break;  	} -	for (i = 0; i < core_dc->res_pool->pipe_count; i++) { +	for (i = 0; i < dc->res_pool->pipe_count; i++) {  		struct pipe_ctx *pipe_ctx = -				&core_dc->current_state->res_ctx.pipe_ctx[i]; +				&dc->current_state->res_ctx.pipe_ctx[i];  		if (pipe_ctx->plane_state != plane_state)  			continue; -		core_dc->hwss.update_pending_status(pipe_ctx); +		dc->hwss.update_pending_status(pipe_ctx);  	}  	return plane_status; @@ -191,7 +185,7 @@ void dc_plane_state_retain(struct dc_plane_state *plane_state)  static void dc_plane_state_free(struct kref *kref)  {  	struct dc_plane_state *plane_state = container_of(kref, struct dc_plane_state, refcount); -	destruct(plane_state); +	dc_plane_destruct(plane_state);  	kvfree(plane_state);  } @@ -262,7 +256,6 @@ alloc_fail:  	return NULL;  } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  static void dc_3dlut_func_free(struct kref *kref)  {  	struct dc_3dlut *lut = container_of(kref, struct dc_3dlut, refcount); @@ -296,6 +289,5 @@ void dc_3dlut_func_retain(struct dc_3dlut *lut)  {  	kref_get(&lut->refcount);  } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 0416a17b0897..3fa85a54360f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -39,7 +39,7 @@  #include "inc/hw/dmcu.h"  #include "dml/display_mode_lib.h" -#define DC_VER "3.2.56" +#define DC_VER "3.2.68"  #define MAX_SURFACES 3  #define MAX_PLANES 6 @@ -54,6 +54,10 @@ struct dc_versions {  	struct dmcu_version dmcu_version;  }; +enum dp_protocol_version { +	DP_VERSION_1_4, +}; +  enum dc_plane_type {  	DC_PLANE_TYPE_INVALID,  	DC_PLANE_TYPE_DCE_RGB, @@ -112,17 +116,15 @@ struct dc_caps {  	bool disable_dp_clk_share;  	bool psp_setup_panel_mode;  	bool extended_aux_timeout_support; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 +	bool dmcub_support;  	bool hw_3d_lut; -#endif +	enum dp_protocol_version max_dp_protocol_version;  	struct dc_plane_cap planes[MAX_PLANES];  };  struct dc_bug_wa { -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	bool no_connect_phy_config;  	bool dedcn20_305_wa; -#endif  	bool skip_clock_update;  }; @@ -155,11 +157,14 @@ struct dc_surface_dcc_cap {  	bool const_color_support;  }; -struct dc_static_screen_events { -	bool force_trigger; -	bool cursor_update; -	bool surface_update; -	bool overlay_update; +struct dc_static_screen_params { +	struct { +		bool force_trigger; +		bool cursor_update; +		bool surface_update; +		bool overlay_update; +	} triggers; +	unsigned int num_frames;  }; @@ -363,10 +368,10 @@ struct dc_debug_options {  	bool disable_dfs_bypass;  	bool disable_dpp_power_gate;  	bool disable_hubp_power_gate; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	bool disable_dsc_power_gate;  	int dsc_min_slice_height_override; -#endif +	int dsc_bpp_increment_div; +	bool native422_support;  	bool disable_pplib_wm_range;  	enum wm_report_mode pplib_wm_report_mode;  	unsigned int min_disp_clk_khz; @@ -401,22 +406,25 @@ struct dc_debug_options {  	unsigned int force_odm_combine; //bit vector based on otg inst  	unsigned int force_fclk_khz;  	bool disable_tri_buf; +	bool dmub_offload_enabled; +	bool dmcub_emulation; +	bool dmub_command_table; /* for testing only */  	struct dc_bw_validation_profile bw_val_profile; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	bool disable_fec; -#endif -#ifdef CONFIG_DRM_AMD_DC_DCN2_1  	bool disable_48mhz_pwrdwn; -#endif  	/* This forces a hard min on the DCFCLK requested to SMU/PP  	 * watermarks are not affected.  	 */  	unsigned int force_min_dcfclk_mhz;  	bool disable_timing_sync; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	bool cm_in_bypass; -#endif  	int force_clock_mode;/*every mode change.*/ + +	bool nv12_iflip_vm_wa; +	bool disable_dram_clock_change_vactive_support; +	bool validate_dml_output; +	bool enable_dmcub_surface_flip; +	bool usbc_combo_phy_reset_wa;  };  struct dc_debug_data { @@ -425,7 +433,6 @@ struct dc_debug_data {  	uint32_t auxErrorCount;  }; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  struct dc_phy_addr_space_config {  	struct {  		uint64_t start_addr; @@ -455,7 +462,6 @@ struct dc_virtual_addr_space_config {  	uint32_t	page_table_block_size_in_bytes;  	uint8_t		page_table_depth; // 1 = 1 level, 2 = 2 level, etc.  0 = invalid  }; -#endif  struct dc_bounding_box_overrides {  	int sr_exit_time_ns; @@ -483,9 +489,7 @@ struct dc {  	struct dc_bounding_box_overrides bb_overrides;  	struct dc_bug_wa work_arounds;  	struct dc_context *ctx; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  	struct dc_phy_addr_space_config vm_pa_config; -#endif  	uint8_t link_count;  	struct dc_link *links[MAX_PIPES * 2]; @@ -501,7 +505,7 @@ struct dc {  	/* Inputs into BW and WM calculations. */  	struct bw_calcs_dceip *bw_dceip;  	struct bw_calcs_vbios *bw_vbios; -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 +#ifdef CONFIG_DRM_AMD_DC_DCN  	struct dcn_soc_bounding_box *dcn_soc;  	struct dcn_ip_params *dcn_ip;  	struct display_mode_lib dml; @@ -515,7 +519,7 @@ struct dc {  	bool optimized_required;  	/* Require to maintain clocks and bandwidth for UEFI enabled HW */ -	bool optimize_seamless_boot; +	int optimize_seamless_boot_streams;  	/* FBC compressor */  	struct compressor *fbc_compressor; @@ -523,10 +527,8 @@ struct dc {  	struct dc_debug_data debug_data;  	const char *build_id; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  	struct vm_helper *vm_helper;  	const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box; -#endif  };  enum frame_buffer_mode { @@ -558,15 +560,16 @@ struct dc_init_data {  	struct dc_bios *vbios_override;  	enum dce_environment dce_environment; +	struct dmub_offload_funcs *dmub_if; +	struct dc_reg_helper_state *dmub_offload; +  	struct dc_config flags;  	uint32_t log_mask; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  	/**  	 * gpu_info FW provided soc bounding box struct or 0 if not  	 * available in FW  	 */  	const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box; -#endif  };  struct dc_callback_init { @@ -581,11 +584,9 @@ struct dc *dc_create(const struct dc_init_data *init_params);  void dc_hardware_init(struct dc *dc);  int dc_get_vmid_use_vector(struct dc *dc); -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  void dc_setup_vm_context(struct dc *dc, struct dc_virtual_addr_space_config *va_config, int vmid);  /* Returns the number of vmids supported */  int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_config); -#endif  void dc_init_callbacks(struct dc *dc,  		const struct dc_callback_init *init_params);  void dc_deinit_callbacks(struct dc *dc); @@ -661,7 +662,6 @@ struct dc_transfer_func {  	};  }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  union dc_3dlut_state {  	struct { @@ -680,12 +680,11 @@ union dc_3dlut_state {  struct dc_3dlut {  	struct kref refcount;  	struct tetrahedral_params lut_3d; -	uint32_t hdr_multiplier; +	struct fixed31_32 hdr_multiplier;  	bool initialized; /*remove after diag fix*/  	union dc_3dlut_state state;  	struct dc_context *ctx;  }; -#endif  /*   * This structure is filled in by dc_surface_get_status and contains   * the last requested address and the currently active address so the called @@ -708,7 +707,7 @@ union surface_update_flags {  		uint32_t horizontal_mirror_change:1;  		uint32_t per_pixel_alpha_change:1;  		uint32_t global_alpha_change:1; -		uint32_t sdr_white_level:1; +		uint32_t hdr_mult:1;  		uint32_t rotation_change:1;  		uint32_t swizzle_change:1;  		uint32_t scaling_change:1; @@ -736,9 +735,7 @@ union surface_update_flags {  struct dc_plane_state {  	struct dc_plane_address address;  	struct dc_plane_flip_time time; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	bool triplebuffer_flips; -#endif  	struct scaling_taps scaling_quality;  	struct rect src_rect;  	struct rect dst_rect; @@ -754,18 +751,16 @@ struct dc_plane_state {  	struct dc_bias_and_scale *bias_and_scale;  	struct dc_csc_transform input_csc_color_matrix;  	struct fixed31_32 coeff_reduction_factor; -	uint32_t sdr_white_level; +	struct fixed31_32 hdr_mult;  	// TODO: No longer used, remove  	struct dc_hdr_static_metadata hdr_static_ctx;  	enum dc_color_space color_space; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	struct dc_3dlut *lut3d_func;  	struct dc_transfer_func *in_shaper_func;  	struct dc_transfer_func *blend_tf; -#endif  	enum surface_pixel_format format;  	enum dc_rotation_angle rotation; @@ -801,7 +796,6 @@ struct dc_plane_info {  	enum dc_rotation_angle rotation;  	enum plane_stereo_format stereo_format;  	enum dc_color_space color_space; -	unsigned int sdr_white_level;  	bool horizontal_mirror;  	bool visible;  	bool per_pixel_alpha; @@ -825,7 +819,7 @@ struct dc_surface_update {  	const struct dc_flip_addrs *flip_addr;  	const struct dc_plane_info *plane_info;  	const struct dc_scaling_info *scaling_info; - +	struct fixed31_32 hdr_mult;  	/* following updates require alloc/sleep/spin that is not isr safe,  	 * null means no updates  	 */ @@ -834,11 +828,9 @@ struct dc_surface_update {  	const struct dc_csc_transform *input_csc_color_matrix;  	const struct fixed31_32 *coeff_reduction_factor; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	const struct dc_transfer_func *func_shaper;  	const struct dc_3dlut *lut3d_func;  	const struct dc_transfer_func *blend_tf; -#endif  };  /* @@ -859,11 +851,9 @@ void dc_transfer_func_retain(struct dc_transfer_func *dc_tf);  void dc_transfer_func_release(struct dc_transfer_func *dc_tf);  struct dc_transfer_func *dc_create_transfer_func(void); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  struct dc_3dlut *dc_create_3dlut_func(void);  void dc_3dlut_func_release(struct dc_3dlut *lut);  void dc_3dlut_func_retain(struct dc_3dlut *lut); -#endif  /*   * This structure holds a surface address.  There could be multiple addresses   * in cases such as Stereo 3D, Planar YUV, etc.  Other per-flip attributes such @@ -925,6 +915,8 @@ void dc_resource_state_copy_construct_current(  void dc_resource_state_destruct(struct dc_state *context); +bool dc_resource_is_dsc_encoding_supported(const struct dc *dc); +  /*   * TODO update to make it about validation sets   * Set up streams and links associated to drive sinks @@ -980,10 +972,10 @@ struct dpcd_caps {  	bool panel_mode_edp;  	bool dpcd_display_control_capable;  	bool ext_receiver_cap_field_present; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	union dpcd_fec_capability fec_cap;  	struct dpcd_dsc_capabilities dsc_caps; -#endif +	struct dc_lttpr_caps lttpr_caps; +  };  #include "dc_link.h" @@ -1004,14 +996,12 @@ struct dc_container_id {  }; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  struct dc_sink_dsc_caps {  	// 'true' if these are virtual DPCD's DSC caps (immediately upstream of sink in MST topology),  	// 'false' if they are sink's DSC caps  	bool is_virtual_dpcd_dsc;  	struct dsc_dec_dpcd_caps dsc_dec_caps;  }; -#endif  /*   * The sink structure contains EDID and other display device properties @@ -1026,9 +1016,7 @@ struct dc_sink {  	struct stereo_3d_features features_3d[TIMING_3D_FORMAT_MAX];  	bool converter_disable_audio; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	struct dc_sink_dsc_caps sink_dsc_caps; -#endif  	/* private to DC core */  	struct dc_link *link; @@ -1086,13 +1074,12 @@ unsigned int dc_get_current_backlight_pwm(struct dc *dc);  unsigned int dc_get_target_backlight_pwm(struct dc *dc);  bool dc_is_dmcu_initialized(struct dc *dc); +bool dc_is_hw_initialized(struct dc *dc);  enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping);  void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg); -#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)  /*******************************************************************************   * DSC Interfaces   ******************************************************************************/  #include "dc_dsc.h" -#endif  #endif /* DC_INTERFACE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c new file mode 100644 index 000000000000..59c298a6484f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -0,0 +1,134 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dc.h" +#include "dc_dmub_srv.h" +#include "../dmub/inc/dmub_srv.h" + +static void dc_dmub_srv_construct(struct dc_dmub_srv *dc_srv, struct dc *dc, +				  struct dmub_srv *dmub) +{ +	dc_srv->dmub = dmub; +	dc_srv->ctx = dc->ctx; +} + +struct dc_dmub_srv *dc_dmub_srv_create(struct dc *dc, struct dmub_srv *dmub) +{ +	struct dc_dmub_srv *dc_srv = +		kzalloc(sizeof(struct dc_dmub_srv), GFP_KERNEL); + +	if (dc_srv == NULL) { +		BREAK_TO_DEBUGGER(); +		return NULL; +	} + +	dc_dmub_srv_construct(dc_srv, dc, dmub); + +	return dc_srv; +} + +void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv) +{ +	if (*dmub_srv) { +		kfree(*dmub_srv); +		*dmub_srv = NULL; +	} +} + +void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv, +			   struct dmub_cmd_header *cmd) +{ +	struct dmub_srv *dmub = dc_dmub_srv->dmub; +	struct dc_context *dc_ctx = dc_dmub_srv->ctx; +	enum dmub_status status; + +	status = dmub_srv_cmd_queue(dmub, cmd); +	if (status == DMUB_STATUS_OK) +		return; + +	if (status != DMUB_STATUS_QUEUE_FULL) +		goto error; + +	/* Execute and wait for queue to become empty again. */ +	dc_dmub_srv_cmd_execute(dc_dmub_srv); +	dc_dmub_srv_wait_idle(dc_dmub_srv); + +	/* Requeue the command. */ +	status = dmub_srv_cmd_queue(dmub, cmd); +	if (status == DMUB_STATUS_OK) +		return; + +error: +	DC_ERROR("Error queuing DMUB command: status=%d\n", status); +} + +void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv) +{ +	struct dmub_srv *dmub = dc_dmub_srv->dmub; +	struct dc_context *dc_ctx = dc_dmub_srv->ctx; +	enum dmub_status status; + +	status = dmub_srv_cmd_execute(dmub); +	if (status != DMUB_STATUS_OK) +		DC_ERROR("Error starting DMUB execution: status=%d\n", status); +} + +void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv) +{ +	struct dmub_srv *dmub = dc_dmub_srv->dmub; +	struct dc_context *dc_ctx = dc_dmub_srv->ctx; +	enum dmub_status status; + +	status = dmub_srv_wait_for_idle(dmub, 100000); +	if (status != DMUB_STATUS_OK) +		DC_ERROR("Error waiting for DMUB idle: status=%d\n", status); +} + +void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv) +{ +	struct dmub_srv *dmub = dc_dmub_srv->dmub; +	struct dc_context *dc_ctx = dc_dmub_srv->ctx; +	enum dmub_status status; + +	for (;;) { +		/* Wait up to a second for PHY init. */ +		status = dmub_srv_wait_for_phy_init(dmub, 1000000); +		if (status == DMUB_STATUS_OK) +			/* Initialization OK */ +			break; + +		DC_ERROR("DMCUB PHY init failed: status=%d\n", status); +		ASSERT(0); + +		if (status != DMUB_STATUS_TIMEOUT) +			/* +			 * Server likely initialized or we don't have +			 * DMCUB HW support - this won't end. +			 */ +			break; + +		/* Continue spinning so we don't hang the ASIC. */ +	} +} diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h new file mode 100644 index 000000000000..754b6077539c --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h @@ -0,0 +1,60 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_DC_SRV_H_ +#define _DMUB_DC_SRV_H_ + +#include "os_types.h" +#include "../dmub/inc/dmub_cmd.h" + +struct dmub_srv; +struct dmub_cmd_header; + +struct dc_reg_helper_state { +	bool gather_in_progress; +	uint32_t same_addr_count; +	bool should_burst_write; +	union dmub_rb_cmd cmd_data; +	unsigned int reg_seq_count; +}; + +struct dc_dmub_srv { +	struct dmub_srv *dmub; +	struct dc_reg_helper_state reg_helper_offload; + +	struct dc_context *ctx; +	void *dm; +}; + +void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv, +			   struct dmub_cmd_header *cmd); + +void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv); + +void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv); + +void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv); + +#endif /* _DMUB_DC_SRV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index ef79a686e4c2..dfe4472c9e40 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -129,9 +129,7 @@ struct dc_link_training_overrides {  	bool *alternate_scrambler_reset;  	bool *enhanced_framing;  	bool *mst_enable; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	bool *fec_enable; -#endif  };  union dpcd_rev { @@ -471,13 +469,13 @@ union training_aux_rd_interval {  /* Automated test structures */  union test_request {  	struct { -	uint8_t LINK_TRAINING         :1; -	uint8_t LINK_TEST_PATTRN      :1; -	uint8_t EDID_READ             :1; -	uint8_t PHY_TEST_PATTERN      :1; -	uint8_t AUDIO_TEST_PATTERN    :1; -	uint8_t RESERVED              :1; -	uint8_t TEST_STEREO_3D        :1; +	uint8_t LINK_TRAINING                :1; +	uint8_t LINK_TEST_PATTRN             :1; +	uint8_t EDID_READ                    :1; +	uint8_t PHY_TEST_PATTERN             :1; +	uint8_t RESERVED                     :1; +	uint8_t AUDIO_TEST_PATTERN           :1; +	uint8_t TEST_AUDIO_DISABLED_VIDEO    :1;  	} bits;  	uint8_t raw;  }; @@ -524,19 +522,52 @@ union link_test_pattern {  union test_misc {  	struct dpcd_test_misc_bits { -		unsigned char SYNC_CLOCK :1; +		unsigned char SYNC_CLOCK  :1;  		/* dpcd_test_color_format */ -		unsigned char CLR_FORMAT :2; +		unsigned char CLR_FORMAT  :2;  		/* dpcd_test_dyn_range */ -		unsigned char DYN_RANGE  :1; -		unsigned char YCBCR      :1; +		unsigned char DYN_RANGE   :1; +		unsigned char YCBCR_COEFS :1;  		/* dpcd_test_bit_depth */ -		unsigned char BPC        :3; +		unsigned char BPC         :3;  	} bits;  	unsigned char raw;  }; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT +union audio_test_mode { +	struct { +		unsigned char sampling_rate   :4; +		unsigned char channel_count   :4; +	} bits; +	unsigned char raw; +}; + +union audio_test_pattern_period { +	struct { +		unsigned char pattern_period   :4; +		unsigned char reserved         :4; +	} bits; +	unsigned char raw; +}; + +struct audio_test_pattern_type { +	unsigned char value; +}; + +struct dp_audio_test_data_flags { +	uint8_t test_requested  :1; +	uint8_t disable_video   :1; +}; + +struct dp_audio_test_data { + +	struct dp_audio_test_data_flags flags; +	uint8_t sampling_rate; +	uint8_t channel_count; +	uint8_t pattern_type; +	uint8_t pattern_period[8]; +}; +  /* FEC capability DPCD register field bits-*/  union dpcd_fec_capability {  	struct { @@ -661,6 +692,5 @@ struct dpcd_dsc_capabilities {  	union dpcd_dsc_ext_capabilities dsc_ext_caps;  }; -#endif /* CONFIG_DRM_AMD_DC_DSC_SUPPORT */  #endif /* DC_DP_TYPES_H */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h index 0ed2962add5a..3800340a5b4f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h @@ -1,4 +1,3 @@ -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  #ifndef DC_DSC_H_  #define DC_DSC_H_  /* @@ -42,21 +41,28 @@ struct dc_dsc_bw_range {  struct display_stream_compressor {  	const struct dsc_funcs *funcs; -#ifndef AMD_EDID_UTILITY  	struct dc_context *ctx;  	int inst; -#endif  }; -bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data, +struct dc_dsc_policy { +	bool use_min_slices_h; +	int max_slices_h; // Maximum available if 0 +	int min_slice_height; // Must not be less than 8 +	uint32_t max_target_bpp; +	uint32_t min_target_bpp; +}; + +bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, +		const uint8_t *dpcd_dsc_basic_data,  		const uint8_t *dpcd_dsc_ext_data,  		struct dsc_dec_dpcd_caps *dsc_sink_caps);  bool dc_dsc_compute_bandwidth_range(  		const struct display_stream_compressor *dsc,  		const uint32_t dsc_min_slice_height_override, -		const uint32_t min_kbps, -		const uint32_t max_kbps, +		const uint32_t min_bpp, +		const uint32_t max_bpp,  		const struct dsc_dec_dpcd_caps *dsc_sink_caps,  		const struct dc_crtc_timing *timing,  		struct dc_dsc_bw_range *range); @@ -68,5 +74,10 @@ bool dc_dsc_compute_config(  		uint32_t target_bandwidth_kbps,  		const struct dc_crtc_timing *timing,  		struct dc_dsc_config *dsc_cfg); -#endif + +void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, +		struct dc_dsc_policy *policy); + +void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit); +  #endif diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c index 30b2f9edd42f..737048d8a96c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c @@ -32,6 +32,74 @@  #include "dm_services.h"  #include <stdarg.h> +#include "dc.h" +#include "dc_dmub_srv.h" + +static inline void submit_dmub_read_modify_write( +	struct dc_reg_helper_state *offload, +	const struct dc_context *ctx) +{ +	struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write; +	bool gather = false; + +	offload->should_burst_write = +			(offload->same_addr_count == (DMUB_READ_MODIFY_WRITE_SEQ__MAX - 1)); +	cmd_buf->header.payload_bytes = +			sizeof(struct dmub_cmd_read_modify_write_sequence) * offload->reg_seq_count; + +	gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress; +	ctx->dmub_srv->reg_helper_offload.gather_in_progress = false; + +	dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header); + +	ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather; + +	memset(cmd_buf, 0, sizeof(*cmd_buf)); + +	offload->reg_seq_count = 0; +	offload->same_addr_count = 0; +} + +static inline void submit_dmub_burst_write( +	struct dc_reg_helper_state *offload, +	const struct dc_context *ctx) +{ +	struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write; +	bool gather = false; + +	cmd_buf->header.payload_bytes = +			sizeof(uint32_t) * offload->reg_seq_count; + +	gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress; +	ctx->dmub_srv->reg_helper_offload.gather_in_progress = false; + +	dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header); + +	ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather; + +	memset(cmd_buf, 0, sizeof(*cmd_buf)); + +	offload->reg_seq_count = 0; +} + +static inline void submit_dmub_reg_wait( +		struct dc_reg_helper_state *offload, +		const struct dc_context *ctx) +{ +	struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait; +	bool gather = false; + +	gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress; +	ctx->dmub_srv->reg_helper_offload.gather_in_progress = false; + +	dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header); + +	memset(cmd_buf, 0, sizeof(*cmd_buf)); +	offload->reg_seq_count = 0; + +	ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather; +} +  struct dc_reg_value_masks {  	uint32_t value;  	uint32_t mask; @@ -77,6 +145,100 @@ static void set_reg_field_values(struct dc_reg_value_masks *field_value_mask,  	}  } +static void dmub_flush_buffer_execute( +		struct dc_reg_helper_state *offload, +		const struct dc_context *ctx) +{ +	submit_dmub_read_modify_write(offload, ctx); +	dc_dmub_srv_cmd_execute(ctx->dmub_srv); +} + +static void dmub_flush_burst_write_buffer_execute( +		struct dc_reg_helper_state *offload, +		const struct dc_context *ctx) +{ +	submit_dmub_burst_write(offload, ctx); +	dc_dmub_srv_cmd_execute(ctx->dmub_srv); +} + +static bool dmub_reg_value_burst_set_pack(const struct dc_context *ctx, uint32_t addr, +		uint32_t reg_val) +{ +	struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload; +	struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write; + +	/* flush command if buffer is full */ +	if (offload->reg_seq_count == DMUB_BURST_WRITE_VALUES__MAX) +		dmub_flush_burst_write_buffer_execute(offload, ctx); + +	if (offload->cmd_data.cmd_common.header.type == DMUB_CMD__REG_SEQ_BURST_WRITE && +			addr != cmd_buf->addr) { +		dmub_flush_burst_write_buffer_execute(offload, ctx); +		return false; +	} + +	cmd_buf->header.type = DMUB_CMD__REG_SEQ_BURST_WRITE; +	cmd_buf->header.sub_type = 0; +	cmd_buf->addr = addr; +	cmd_buf->write_values[offload->reg_seq_count] = reg_val; +	offload->reg_seq_count++; + +	return true; +} + +static uint32_t dmub_reg_value_pack(const struct dc_context *ctx, uint32_t addr, +		struct dc_reg_value_masks *field_value_mask) +{ +	struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload; +	struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write; +	struct dmub_cmd_read_modify_write_sequence *seq; + +	/* flush command if buffer is full */ +	if (offload->cmd_data.cmd_common.header.type != DMUB_CMD__REG_SEQ_BURST_WRITE && +			offload->reg_seq_count == DMUB_READ_MODIFY_WRITE_SEQ__MAX) +		dmub_flush_buffer_execute(offload, ctx); + +	if (offload->should_burst_write) { +		if (dmub_reg_value_burst_set_pack(ctx, addr, field_value_mask->value)) +			return field_value_mask->value; +		else +			offload->should_burst_write = false; +	} + +	/* pack commands */ +	cmd_buf->header.type = DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE; +	cmd_buf->header.sub_type = 0; +	seq = &cmd_buf->seq[offload->reg_seq_count]; + +	if (offload->reg_seq_count) { +		if (cmd_buf->seq[offload->reg_seq_count - 1].addr == addr) +			offload->same_addr_count++; +		else +			offload->same_addr_count = 0; +	} + +	seq->addr = addr; +	seq->modify_mask = field_value_mask->mask; +	seq->modify_value = field_value_mask->value; +	offload->reg_seq_count++; + +	return field_value_mask->value; +} + +static void dmub_reg_wait_done_pack(const struct dc_context *ctx, uint32_t addr, +		uint32_t mask, uint32_t shift, uint32_t condition_value, uint32_t time_out_us) +{ +	struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload; +	struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait; + +	cmd_buf->header.type = DMUB_CMD__REG_REG_WAIT; +	cmd_buf->header.sub_type = 0; +	cmd_buf->reg_wait.addr = addr; +	cmd_buf->reg_wait.condition_field_value = mask & (condition_value << shift); +	cmd_buf->reg_wait.mask = mask; +	cmd_buf->reg_wait.time_out_us = time_out_us; +} +  uint32_t generic_reg_update_ex(const struct dc_context *ctx,  		uint32_t addr, int n,  		uint8_t shift1, uint32_t mask1, uint32_t field_value1, @@ -93,6 +255,11 @@ uint32_t generic_reg_update_ex(const struct dc_context *ctx,  	va_end(ap); +	if (ctx->dmub_srv && +	    ctx->dmub_srv->reg_helper_offload.gather_in_progress) +		return dmub_reg_value_pack(ctx, addr, &field_value_mask); +		/* todo: return void so we can decouple code running in driver from register states */ +  	/* mmio write directly */  	reg_val = dm_read_reg(ctx, addr);  	reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value; @@ -118,6 +285,13 @@ uint32_t generic_reg_set_ex(const struct dc_context *ctx,  	/* mmio write directly */  	reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value; + +	if (ctx->dmub_srv && +	    ctx->dmub_srv->reg_helper_offload.gather_in_progress) { +		return dmub_reg_value_burst_set_pack(ctx, addr, reg_val); +		/* todo: return void so we can decouple code running in driver from register states */ +	} +  	dm_write_reg(ctx, addr, reg_val);  	return reg_val;  } @@ -134,6 +308,14 @@ uint32_t dm_read_reg_func(  		return 0;  	}  #endif + +	if (ctx->dmub_srv && +	    ctx->dmub_srv->reg_helper_offload.gather_in_progress && +	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) { +		ASSERT(false); +		return 0; +	} +  	value = cgs_read_register(ctx->cgs_device, address);  	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value); @@ -299,7 +481,19 @@ void generic_reg_wait(const struct dc_context *ctx,  	uint32_t reg_val;  	int i; -	/* something is terribly wrong if time out is > 200ms. (5Hz) */ +	if (ctx->dmub_srv && +	    ctx->dmub_srv->reg_helper_offload.gather_in_progress) { +		dmub_reg_wait_done_pack(ctx, addr, mask, shift, condition_value, +				delay_between_poll_us * time_out_num_tries); +		return; +	} + +	/* +	 * Something is terribly wrong if time out is > 3000ms. +	 * 3000ms is the maximum time needed for SMU to pass values back. +	 * This value comes from experiments. +	 * +	 */  	ASSERT(delay_between_poll_us * time_out_num_tries <= 3000000);  	for (i = 0; i <= time_out_num_tries; i++) { @@ -346,12 +540,48 @@ uint32_t generic_read_indirect_reg(const struct dc_context *ctx,  {  	uint32_t value = 0; +	// when reg read, there should not be any offload. +	if (ctx->dmub_srv && +	    ctx->dmub_srv->reg_helper_offload.gather_in_progress) { +		ASSERT(false); +	} +  	dm_write_reg(ctx, addr_index, index);  	value = dm_read_reg(ctx, addr_data);  	return value;  } +uint32_t generic_indirect_reg_get(const struct dc_context *ctx, +		uint32_t addr_index, uint32_t addr_data, +		uint32_t index, int n, +		uint8_t shift1, uint32_t mask1, uint32_t *field_value1, +		...) +{ +	uint32_t shift, mask, *field_value; +	uint32_t value = 0; +	int i = 1; + +	va_list ap; + +	va_start(ap, field_value1); + +	value = generic_read_indirect_reg(ctx, addr_index, addr_data, index); +	*field_value1 = get_reg_field_value_ex(value, mask1, shift1); + +	while (i < n) { +		shift = va_arg(ap, uint32_t); +		mask = va_arg(ap, uint32_t); +		field_value = va_arg(ap, uint32_t *); + +		*field_value = get_reg_field_value_ex(value, mask, shift); +		i++; +	} + +	va_end(ap); + +	return value; +}  uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,  		uint32_t addr_index, uint32_t addr_data, @@ -382,3 +612,68 @@ uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,  	return reg_val;  } + +void reg_sequence_start_gather(const struct dc_context *ctx) +{ +	/* if reg sequence is supported and enabled, set flag to +	 * indicate we want to have REG_SET, REG_UPDATE macro build +	 * reg sequence command buffer rather than MMIO directly. +	 */ + +	if (ctx->dmub_srv && ctx->dc->debug.dmub_offload_enabled) { +		struct dc_reg_helper_state *offload = +			&ctx->dmub_srv->reg_helper_offload; + +		/* caller sequence mismatch.  need to debug caller.  offload will not work!!! */ +		ASSERT(!offload->gather_in_progress); + +		offload->gather_in_progress = true; +	} +} + +void reg_sequence_start_execute(const struct dc_context *ctx) +{ +	struct dc_reg_helper_state *offload; + +	if (!ctx->dmub_srv) +		return; + +	offload = &ctx->dmub_srv->reg_helper_offload; + +	if (offload && offload->gather_in_progress) { +		offload->gather_in_progress = false; +		offload->should_burst_write = false; +		switch (offload->cmd_data.cmd_common.header.type) { +		case DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE: +			submit_dmub_read_modify_write(offload, ctx); +			break; +		case DMUB_CMD__REG_REG_WAIT: +			submit_dmub_reg_wait(offload, ctx); +			break; +		case DMUB_CMD__REG_SEQ_BURST_WRITE: +			submit_dmub_burst_write(offload, ctx); +			break; +		default: +			return; +		} + +		dc_dmub_srv_cmd_execute(ctx->dmub_srv); +	} +} + +void reg_sequence_wait_done(const struct dc_context *ctx) +{ +	/* callback to DM to poll for last submission done*/ +	struct dc_reg_helper_state *offload; + +	if (!ctx->dmub_srv) +		return; + +	offload = &ctx->dmub_srv->reg_helper_offload; + +	if (offload && +	    ctx->dc->debug.dmub_offload_enabled && +	    !ctx->dc->debug.dmcub_emulation) { +		dc_dmub_srv_wait_idle(ctx->dmub_srv); +	} +} diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index e0856bb8511f..25c50bcab9e9 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -26,8 +26,6 @@  #ifndef DC_HW_TYPES_H  #define DC_HW_TYPES_H -#ifndef AMD_EDID_UTILITY -  #include "os_types.h"  #include "fixed31_32.h"  #include "signal_types.h" @@ -167,12 +165,10 @@ enum surface_pixel_format {  	/*swaped & float*/  	SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F,  	/*grow graphics here if necessary */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX,  	SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX,  	SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT,  	SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT, -#endif  	SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,  	SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr =  		SURFACE_PIXEL_FORMAT_VIDEO_BEGIN, @@ -180,10 +176,8 @@ enum surface_pixel_format {  	SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr,  	SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb,  		SURFACE_PIXEL_FORMAT_SUBSAMPLE_END, -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010,  	SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102, -#endif  	SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888,  	SURFACE_PIXEL_FORMAT_INVALID @@ -222,12 +216,10 @@ enum tile_split_values {  	DC_ROTATED_MICRO_TILING = 0x3,  }; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  enum tripleBuffer_enable {  	DC_TRIPLEBUFFER_DISABLE = 0x0,  	DC_TRIPLEBUFFER_ENABLE = 0x1,  }; -#endif  /* TODO: These values come from hardware spec. We need to readdress this   * if they ever change. @@ -427,13 +419,11 @@ struct dc_csc_transform {  	bool enable_adjustment;  }; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  struct dc_rgb_fixed {  	struct fixed31_32 red;  	struct fixed31_32 green;  	struct fixed31_32 blue;  }; -#endif  struct dc_gamma {  	struct kref refcount; @@ -468,10 +458,8 @@ enum dc_cursor_color_format {  	CURSOR_MODE_COLOR_1BIT_AND,  	CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA,  	CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA, -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED,  	CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED -#endif  };  /* @@ -594,8 +582,6 @@ struct scaling_taps {  	bool integer_scaling;  }; -#endif /* AMD_EDID_UTILITY */ -  enum dc_timing_standard {  	DC_TIMING_STANDARD_UNDEFINED,  	DC_TIMING_STANDARD_DMT, @@ -626,10 +612,8 @@ enum dc_color_depth {  	COLOR_DEPTH_121212,  	COLOR_DEPTH_141414,  	COLOR_DEPTH_161616, -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  	COLOR_DEPTH_999,  	COLOR_DEPTH_111111, -#endif  	COLOR_DEPTH_COUNT  }; @@ -690,9 +674,7 @@ struct dc_crtc_timing_flags {  	 * rates less than or equal to 340Mcsc */  	uint32_t LTE_340MCSC_SCRAMBLE:1; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	uint32_t DSC : 1; /* Use DSC with this timing */ -#endif  };  enum dc_timing_3d_format { @@ -717,7 +699,6 @@ enum dc_timing_3d_format {  	TIMING_3D_FORMAT_MAX,  }; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  struct dc_dsc_config {  	uint32_t num_slices_h; /* Number of DSC slices - horizontal */  	uint32_t num_slices_v; /* Number of DSC slices - vertical */ @@ -728,7 +709,6 @@ struct dc_dsc_config {  	bool ycbcr422_simple; /* Tell DSC engine to convert YCbCr 4:2:2 to 'YCbCr 4:2:2 simple'. */  	int32_t rc_buffer_size; /* DSC RC buffer block size in bytes */  }; -#endif  struct dc_crtc_timing {  	uint32_t h_total;  	uint32_t h_border_left; @@ -755,13 +735,9 @@ struct dc_crtc_timing {  	enum scanning_type scan_type;  	struct dc_crtc_timing_flags flags; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	struct dc_dsc_config dsc_cfg; -#endif  }; -#ifndef AMD_EDID_UTILITY -  enum trigger_delay {  	TRIGGER_DELAY_NEXT_PIXEL = 0,  	TRIGGER_DELAY_NEXT_LINE, @@ -796,7 +772,6 @@ enum vram_type {  	VIDEO_MEMORY_TYPE_GDDR6  = 6,  }; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  enum dwb_cnv_out_bpc {  	DWB_CNV_OUT_BPC_8BPC  = 0,  	DWB_CNV_OUT_BPC_10BPC = 1, @@ -847,7 +822,6 @@ struct mcif_buf_params {  	unsigned int		swlock;  }; -#endif  #define MAX_TG_COLOR_VALUE 0x3FF  struct tg_color { @@ -857,7 +831,5 @@ struct tg_color {  	uint16_t color_b_cb;  }; -#endif /* AMD_EDID_UTILITY */ -  #endif /* DC_HW_TYPES_H */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index f24fd19ed93d..d25603128394 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -29,13 +29,11 @@  #include "dc_types.h"  #include "grph_object_defs.h" -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  enum dc_link_fec_state {  	dc_link_fec_not_ready,  	dc_link_fec_ready,  	dc_link_fec_enabled  }; -#endif  struct dc_link_status {  	bool link_active;  	struct dpcd_caps *dpcd_caps; @@ -85,6 +83,7 @@ struct dc_link {  	bool link_state_valid;  	bool aux_access_disabled;  	bool sync_lt_in_progress; +	bool is_lttpr_mode_transparent;  	/* caps is the same as reported_link_cap. link_traing use  	 * reported_link_cap. Will clean up.  TODO @@ -95,6 +94,7 @@ struct dc_link {  	struct dc_lane_settings cur_lane_setting;  	struct dc_link_settings preferred_link_setting;  	struct dc_link_training_overrides preferred_training_settings; +	struct dp_audio_test_data audio_test_data;  	uint8_t ddc_hw_inst; @@ -133,6 +133,7 @@ struct dc_link {  	struct link_flags {  		bool dp_keep_receiver_powered;  		bool dp_skip_DID2; +		bool dp_skip_reset_segment;  	} wa_flags;  	struct link_mst_stream_allocation_table mst_stream_alloc_table; @@ -140,9 +141,7 @@ struct dc_link {  	struct link_trace link_trace;  	struct gpio *hpd_gpio; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	enum dc_link_fec_state fec_state; -#endif  };  const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link); @@ -206,6 +205,7 @@ enum dc_detect_reason {  bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);  bool dc_link_get_hpd_state(struct dc_link *dc_link);  enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx); +enum dc_status dc_link_reallocate_mst_payload(struct dc_link *link);  /* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt).   * Return: @@ -259,6 +259,7 @@ void dc_link_dp_disable_hpd(const struct dc_link *link);  bool dc_link_dp_set_test_pattern(  	struct dc_link *link,  	enum dp_test_pattern test_pattern, +	enum dp_test_pattern_color_space test_pattern_color_space,  	const struct link_training_settings *p_link_settings,  	const unsigned char *p_custom_pattern,  	unsigned int cust_pattern_size); @@ -290,6 +291,7 @@ void dc_link_enable_hpd(const struct dc_link *link);  void dc_link_disable_hpd(const struct dc_link *link);  void dc_link_set_test_pattern(struct dc_link *link,  			enum dp_test_pattern test_pattern, +			enum dp_test_pattern_color_space test_pattern_color_space,  			const struct link_training_settings *p_link_settings,  			const unsigned char *p_custom_pattern,  			unsigned int cust_pattern_size); @@ -300,11 +302,18 @@ uint32_t dc_link_bandwidth_kbps(  const struct dc_link_settings *dc_link_get_link_cap(  		const struct dc_link *link); +void dc_link_overwrite_extended_receiver_cap( +		struct dc_link *link); +  bool dc_submit_i2c(  		struct dc *dc,  		uint32_t link_index,  		struct i2c_command *cmd); +bool dc_submit_i2c_oem( +		struct dc *dc, +		struct i2c_command *cmd); +  uint32_t dc_bandwidth_in_kbps_from_timing(  	const struct dc_crtc_timing *timing);  #endif /* DC_LINK_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index fdb6adc37857..92096de79dec 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -52,7 +52,6 @@ struct freesync_context {  	bool dummy;  }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  enum hubp_dmdata_mode {  	DMDATA_SW_MODE,  	DMDATA_HW_MODE @@ -82,9 +81,7 @@ struct dc_dmdata_attributes {  	/* An unbounded array of uint32s, represents software dmdata to be loaded */  	uint32_t *dmdata_sw_data;  }; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  struct dc_writeback_info {  	bool wb_enabled;  	int dwb_pipe_inst; @@ -96,7 +93,6 @@ struct dc_writeback_update {  	unsigned int num_wb_info;  	struct dc_writeback_info writeback_info[MAX_DWB_PIPES];  }; -#endif  enum vertical_interrupt_ref_point {  	START_V_UPDATE = 0, @@ -121,9 +117,7 @@ union stream_update_flags {  		uint32_t abm_level:1;  		uint32_t dpms_off:1;  		uint32_t gamut_remap:1; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  		uint32_t wb_update:1; -#endif  	} bits;  	uint32_t raw; @@ -164,6 +158,7 @@ struct dc_stream_state {  	enum view_3d_format view_format; +	bool use_vsc_sdp_for_colorimetry;  	bool ignore_msa_timing_param;  	bool converter_disable_audio;  	uint8_t qs_bit; @@ -203,11 +198,9 @@ struct dc_stream_state {  	struct crtc_trigger_info triggered_crtc_reset; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	/* writeback */  	unsigned int num_wb_info;  	struct dc_writeback_info writeback_info[MAX_DWB_PIPES]; -#endif  	/* Computed state bits */  	bool mode_changed : 1; @@ -226,9 +219,7 @@ struct dc_stream_state {  	bool apply_seamless_boot_optimization;  	uint32_t stream_id; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	bool is_dsc_enabled; -#endif  	union stream_update_flags update_flags;  }; @@ -251,6 +242,7 @@ struct dc_stream_update {  	struct dc_info_packet *vsp_infopacket;  	bool *dpms_off; +	bool integer_scaling_update;  	struct colorspace_transform *gamut_remap;  	enum dc_color_space *output_color_space; @@ -258,12 +250,8 @@ struct dc_stream_update {  	struct dc_csc_transform *output_csc_transform; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	struct dc_writeback_update *wb_update; -#endif -#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)  	struct dc_dsc_config *dsc_config; -#endif  };  bool dc_is_stream_unchanged( @@ -353,18 +341,23 @@ bool dc_add_all_planes_for_stream(  		int plane_count,  		struct dc_state *context); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  bool dc_stream_add_writeback(struct dc *dc,  		struct dc_stream_state *stream,  		struct dc_writeback_info *wb_info); +  bool dc_stream_remove_writeback(struct dc *dc,  		struct dc_stream_state *stream,  		uint32_t dwb_pipe_inst); + +bool dc_stream_warmup_writeback(struct dc *dc, +		int num_dwb, +		struct dc_writeback_info *wb_info); +  bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream); +  bool dc_stream_set_dynamic_metadata(struct dc *dc,  		struct dc_stream_state *stream,  		struct dc_dmdata_attributes *dmdata_attr); -#endif  enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream); @@ -446,10 +439,10 @@ bool dc_stream_get_crc(struct dc *dc,  		       uint32_t *g_y,  		       uint32_t *b_cb); -void dc_stream_set_static_screen_events(struct dc *dc, +void dc_stream_set_static_screen_params(struct dc *dc,  					struct dc_stream_state **stream,  					int num_streams, -					const struct dc_static_screen_events *events); +					const struct dc_static_screen_params *params);  void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,  		enum dc_dynamic_expansion option); diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index d9be8fc3889f..e59532d98cb4 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -25,7 +25,6 @@  #ifndef DC_TYPES_H_  #define DC_TYPES_H_ -#ifndef AMD_EDID_UTILITY  /* AND EdidUtility only needs a portion   * of this file, including the rest only   * causes additional issues. @@ -48,6 +47,7 @@ struct dc_stream_state;  struct dc_link;  struct dc_sink;  struct dal; +struct dc_dmub_srv;  /********************************   * Environment definitions @@ -60,7 +60,12 @@ enum dce_environment {  	DCE_ENV_FPGA_MAXIMUS,  	/* Emulation on real HW or on FPGA. Used by Diagnostics, enforces  	 * requirements of Diagnostics team. */ -	DCE_ENV_DIAG +	DCE_ENV_DIAG, +	/* +	 * Guest VM system, DC HW may exist but is not virtualized and +	 * should not be used.  SW support for VDI only. +	 */ +	DCE_ENV_VIRTUAL_HW  };  /* Note: use these macro definitions instead of direct comparison! */ @@ -109,6 +114,8 @@ struct dc_context {  	uint32_t dc_sink_id_count;  	uint32_t dc_stream_id_count;  	uint64_t fbc_gpu_addr; +	struct dc_dmub_srv *dmub_srv; +  #ifdef CONFIG_DRM_AMD_DC_HDCP  	struct cp_psp cp_psp;  #endif @@ -119,6 +126,7 @@ struct dc_context {  #define DC_EDID_BLOCK_SIZE 128  #define MAX_SURFACE_NUM 4  #define NUM_PIXEL_FORMATS 10 +#define MAX_REPEATER_CNT 8  #include "dc_ddc_types.h" @@ -221,6 +229,7 @@ struct dc_panel_patch {  	unsigned int extra_t12_ms;  	unsigned int extra_delay_backlight_off;  	unsigned int extra_t7_ms; +	unsigned int manage_secondary_link;  };  struct dc_edid_caps { @@ -402,6 +411,30 @@ enum dpcd_downstream_port_max_bpc {  	DOWN_STREAM_MAX_12BPC,  	DOWN_STREAM_MAX_16BPC  }; + + +enum link_training_offset { +	DPRX                = 0, +	LTTPR_PHY_REPEATER1 = 1, +	LTTPR_PHY_REPEATER2 = 2, +	LTTPR_PHY_REPEATER3 = 3, +	LTTPR_PHY_REPEATER4 = 4, +	LTTPR_PHY_REPEATER5 = 5, +	LTTPR_PHY_REPEATER6 = 6, +	LTTPR_PHY_REPEATER7 = 7, +	LTTPR_PHY_REPEATER8 = 8 +}; + +struct dc_lttpr_caps { +	union dpcd_rev revision; +	uint8_t mode; +	uint8_t max_lane_count; +	uint8_t max_link_rate; +	uint8_t phy_repeater_cnt; +	uint8_t max_ext_timeout; +	uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1]; +}; +  struct dc_dongle_caps {  	/* dongle type (DP converter, CV smart dongle) */  	enum display_dongle_type dongle_type; @@ -440,7 +473,6 @@ enum display_content_type {  	DISPLAY_CONTENT_TYPE_GAME = 8  }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  /* writeback */  struct dwb_stereo_params {  	bool				stereo_enabled;		/* false: normal mode, true: 3D stereo */ @@ -471,7 +503,6 @@ struct dc_dwb_params {  	enum dwb_subsample_position	subsample_position;  	struct dc_transfer_func *out_transfer_func;  }; -#endif  /* audio*/ @@ -573,15 +604,17 @@ struct audio_info {  	/* this field must be last in this struct */  	struct audio_mode modes[DC_MAX_AUDIO_DESC_COUNT];  }; - +struct audio_check { +	unsigned int audio_packet_type; +	unsigned int max_audiosample_rate; +	unsigned int acat; +};  enum dc_infoframe_type {  	DC_HDMI_INFOFRAME_TYPE_VENDOR = 0x81,  	DC_HDMI_INFOFRAME_TYPE_AVI = 0x82,  	DC_HDMI_INFOFRAME_TYPE_SPD = 0x83,  	DC_HDMI_INFOFRAME_TYPE_AUDIO = 0x84, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	DC_DP_INFOFRAME_TYPE_PPS = 0x10, -#endif  };  struct dc_info_packet { @@ -696,7 +729,7 @@ struct psr_context {  	/* The VSync rate in Hz used to calculate the  	 * step size for smooth brightness feature  	 */ -	unsigned int vsyncRateHz; +	unsigned int vsync_rate_hz;  	unsigned int skipPsrWaitForPllLock;  	unsigned int numberOfControllers;  	/* Unused, for future use. To indicate that first changed frame from @@ -757,10 +790,6 @@ struct dc_clock_config {  	uint32_t current_clock_khz;/*current clock in use*/  }; -#endif /*AMD_EDID_UTILITY*/ -//AMD EDID UTILITY does not need any of the above structures - -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  /* DSC DPCD capabilities */  union dsc_slice_caps1 {  	struct { @@ -830,6 +859,5 @@ struct dsc_dec_dpcd_caps {  	uint32_t branch_overall_throughput_1_mps; /* In MPs */  	uint32_t branch_max_line_width;  }; -#endif  #endif /* DC_TYPES_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h index 7ba7e6f722f6..ba0caaffa24b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h @@ -67,7 +67,6 @@  	SRI(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \  	NBIO_SR(BIOS_SCRATCH_2) -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  #define ABM_DCN20_REG_LIST() \  	ABM_COMMON_REG_LIST_DCE_BASE(), \  	SR(DC_ABM1_HG_SAMPLE_RATE), \ @@ -81,7 +80,6 @@  	SR(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES), \  	SR(DC_ABM1_HGLS_REG_READ_PROGRESS), \  	NBIO_SR(BIOS_SCRATCH_2) -#endif  #define ABM_SF(reg_name, field_name, post_fix)\  	.field_name = reg_name ## __ ## field_name ## post_fix @@ -163,9 +161,7 @@  	ABM_SF(ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS, \  			ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, mask_sh) -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  #define ABM_MASK_SH_LIST_DCN20(mask_sh) ABM_MASK_SH_LIST_DCE110(mask_sh) -#endif  #define ABM_REG_FIELD_LIST(type) \  	type ABM1_HG_NUM_OF_BINS_SEL; \ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c index e472608faf33..f1a5d2c6aa37 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c @@ -60,12 +60,14 @@ enum {  	AUX_DEFER_RETRY_COUNTER = 6  }; -#define TIME_OUT_INCREMENT      1016 -#define TIME_OUT_MULTIPLIER_8 	8 -#define TIME_OUT_MULTIPLIER_16  16 -#define TIME_OUT_MULTIPLIER_32  32 -#define TIME_OUT_MULTIPLIER_64  64 -#define MAX_TIMEOUT_LENGTH      127 +#define TIME_OUT_INCREMENT        1016 +#define TIME_OUT_MULTIPLIER_8     8 +#define TIME_OUT_MULTIPLIER_16    16 +#define TIME_OUT_MULTIPLIER_32    32 +#define TIME_OUT_MULTIPLIER_64    64 +#define MAX_TIMEOUT_LENGTH        127 +#define DEFAULT_AUX_ENGINE_MULT   0 +#define DEFAULT_AUX_ENGINE_LENGTH 69  static void release_engine(  	struct dce_aux *engine) @@ -427,11 +429,14 @@ void dce110_engine_destroy(struct dce_aux **engine)  } -static bool dce_aux_configure_timeout(struct ddc_service *ddc, +static uint32_t dce_aux_configure_timeout(struct ddc_service *ddc,  		uint32_t timeout_in_us)  {  	uint32_t multiplier = 0;  	uint32_t length = 0; +	uint32_t prev_length = 0; +	uint32_t prev_mult = 0; +	uint32_t prev_timeout_val = 0;  	struct ddc *ddc_pin = ddc->ddc_pin;  	struct dce_aux *aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];  	struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(aux_engine); @@ -440,7 +445,10 @@ static bool dce_aux_configure_timeout(struct ddc_service *ddc,  	aux110->polling_timeout_period = timeout_in_us * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER;  	/* 2-Update aux timeout period length and multiplier */ -	if (timeout_in_us <= TIME_OUT_INCREMENT) { +	if (timeout_in_us == 0) { +		multiplier = DEFAULT_AUX_ENGINE_MULT; +		length = DEFAULT_AUX_ENGINE_LENGTH; +	} else if (timeout_in_us <= TIME_OUT_INCREMENT) {  		multiplier = 0;  		length = timeout_in_us/TIME_OUT_MULTIPLIER_8;  		if (timeout_in_us % TIME_OUT_MULTIPLIER_8 != 0) @@ -464,9 +472,29 @@ static bool dce_aux_configure_timeout(struct ddc_service *ddc,  	length = (length < MAX_TIMEOUT_LENGTH) ? length : MAX_TIMEOUT_LENGTH; +	REG_GET_2(AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, &prev_length, AUX_RX_TIMEOUT_LEN_MUL, &prev_mult); + +	switch (prev_mult) { +	case 0: +		prev_timeout_val = prev_length * TIME_OUT_MULTIPLIER_8; +		break; +	case 1: +		prev_timeout_val = prev_length * TIME_OUT_MULTIPLIER_16; +		break; +	case 2: +		prev_timeout_val = prev_length * TIME_OUT_MULTIPLIER_32; +		break; +	case 3: +		prev_timeout_val = prev_length * TIME_OUT_MULTIPLIER_64; +		break; +	default: +		prev_timeout_val = DEFAULT_AUX_ENGINE_LENGTH * TIME_OUT_MULTIPLIER_8; +		break; +	} +  	REG_UPDATE_SEQ_2(AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, length, AUX_RX_TIMEOUT_LEN_MUL, multiplier); -	return true; +	return prev_timeout_val;  }  static struct dce_aux_funcs aux_functions = { @@ -583,6 +611,8 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,  	uint8_t reply;  	bool payload_reply = true;  	enum aux_channel_operation_result operation_result; +	bool retry_on_defer = false; +  	int aux_ack_retries = 0,  		aux_defer_retries = 0,  		aux_i2c_defer_retries = 0, @@ -613,8 +643,10 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,  			break;  			case AUX_TRANSACTION_REPLY_AUX_DEFER: -			case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:  			case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER: +				retry_on_defer = true; +				/* fall through */ +			case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:  				if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) {  					goto fail;  				} else { @@ -647,15 +679,24 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,  			break;  		case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT: -			if (++aux_timeout_retries >= AUX_MAX_TIMEOUT_RETRIES) -				goto fail; -			else { -				/* -				 * DP 1.4, 2.8.2:  AUX Transaction Response/Reply Timeouts -				 * According to the DP spec there should be 3 retries total -				 * with a 400us wait inbetween each. Hardware already waits -				 * for 550us therefore no wait is required here. -				 */ +			// Check whether a DEFER had occurred before the timeout. +			// If so, treat timeout as a DEFER. +			if (retry_on_defer) { +				if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) +					goto fail; +				else if (payload->defer_delay > 0) +					msleep(payload->defer_delay); +			} else { +				if (++aux_timeout_retries >= AUX_MAX_TIMEOUT_RETRIES) +					goto fail; +				else { +					/* +					 * DP 1.4, 2.8.2:  AUX Transaction Response/Reply Timeouts +					 * According to the DP spec there should be 3 retries total +					 * with a 400us wait inbetween each. Hardware already waits +					 * for 550us therefore no wait is required here. +					 */ +				}  			}  			break; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h index b4b2c79a8073..382465862f29 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h @@ -30,7 +30,6 @@  #include "inc/hw/aux_engine.h" -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  #define AUX_COMMON_REG_LIST0(id)\  	SRI(AUX_CONTROL, DP_AUX, id), \  	SRI(AUX_ARB_CONTROL, DP_AUX, id), \ @@ -39,7 +38,6 @@  	SRI(AUX_INTERRUPT_CONTROL, DP_AUX, id), \  	SRI(AUX_DPHY_RX_CONTROL1, DP_AUX, id), \  	SRI(AUX_SW_STATUS, DP_AUX, id) -#endif  #define AUX_COMMON_REG_LIST(id)\  	SRI(AUX_CONTROL, DP_AUX, id), \ @@ -311,7 +309,7 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,  		struct aux_payload *cmd);  struct dce_aux_funcs { -	bool (*configure_timeout) +	uint32_t (*configure_timeout)  		(struct ddc_service *ddc,  		 uint32_t timeout);  	void (*destroy) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c index f787a6b94781..2e992fbc0d71 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c @@ -905,7 +905,7 @@ static bool dce112_program_pix_clk(  	struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source);  	struct bp_pixel_clock_parameters bp_pc_params = {0}; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  	if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) {  		unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;  		unsigned dp_dto_ref_100hz = 7000000; @@ -1004,7 +1004,6 @@ static bool get_pixel_clk_frequency_100hz(  	return false;  } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  /* this table is use to find *1.001 and /1.001 pixel rates from non-precise pixel rate */  struct pixel_rate_range_table_entry { @@ -1064,7 +1063,6 @@ static const struct clock_source_funcs dcn20_clk_src_funcs = {  	.get_pix_clk_dividers = dce112_get_pix_clk_dividers,  	.get_pixel_clk_frequency_100hz = get_pixel_clk_frequency_100hz  }; -#endif  /*****************************************/  /* Constructor                           */ @@ -1435,7 +1433,6 @@ bool dce112_clk_src_construct(  	return true;  } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  bool dcn20_clk_src_construct(  	struct dce110_clk_src *clk_src,  	struct dc_context *ctx, @@ -1451,4 +1448,3 @@ bool dcn20_clk_src_construct(  	return ret;  } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h index 43c1bf60b83c..51bd25079606 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h @@ -55,7 +55,6 @@  	CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_DCCG_DEEP_COLOR_CNTL, mask_sh),\  	CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE, mask_sh) -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  #define CS_COMMON_REG_LIST_DCN2_0(index, pllid) \  		SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\  		SRII(PHASE, DP_DTO, 0),\ @@ -76,9 +75,7 @@  		SRII(PIXEL_RATE_CNTL, OTG, 3),\  		SRII(PIXEL_RATE_CNTL, OTG, 4),\  		SRII(PIXEL_RATE_CNTL, OTG, 5) -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  #define CS_COMMON_REG_LIST_DCN2_1(index, pllid) \  		SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\  		SRII(PHASE, DP_DTO, 0),\ @@ -93,17 +90,14 @@  		SRII(PIXEL_RATE_CNTL, OTG, 1),\  		SRII(PIXEL_RATE_CNTL, OTG, 2),\  		SRII(PIXEL_RATE_CNTL, OTG, 3) -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  #define CS_COMMON_MASK_SH_LIST_DCN2_0(mask_sh)\  	CS_SF(DP_DTO0_PHASE, DP_DTO0_PHASE, mask_sh),\  	CS_SF(DP_DTO0_MODULO, DP_DTO0_MODULO, mask_sh),\  	CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_DCCG_DEEP_COLOR_CNTL, mask_sh),\  	CS_SF(OTG0_PIXEL_RATE_CNTL, DP_DTO0_ENABLE, mask_sh) -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  #define CS_COMMON_REG_LIST_DCN1_0(index, pllid) \  		SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\ @@ -201,7 +195,6 @@ bool dce112_clk_src_construct(  	const struct dce110_clk_src_shift *cs_shift,  	const struct dce110_clk_src_mask *cs_mask); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  bool dcn20_clk_src_construct(  	struct dce110_clk_src *clk_src,  	struct dc_context *ctx, @@ -210,6 +203,5 @@ bool dcn20_clk_src_construct(  	const struct dce110_clk_src_regs *regs,  	const struct dce110_clk_src_shift *cs_shift,  	const struct dce110_clk_src_mask *cs_mask); -#endif  #endif diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c index ba995d3f2318..30d953acd016 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c @@ -59,6 +59,12 @@  #define MCP_BL_SET_PWM_FRAC 0x6A  /* Enable or disable Fractional PWM */  #define MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK   0x00000001L +// PSP FW version +#define mmMP0_SMN_C2PMSG_58				0x1607A + +//Register access policy version +#define mmMP0_SMN_C2PMSG_91				0x1609B +  static bool dce_dmcu_init(struct dmcu *dmcu)  {  	// Do nothing @@ -318,7 +324,7 @@ static void dce_get_psr_wait_loop(  	return;  } -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  static void dcn10_get_dmcu_version(struct dmcu *dmcu)  {  	struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); @@ -373,6 +379,7 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu)  	const struct dc_config *config = &dmcu->ctx->dc->config;  	bool status = false; +	PERF_TRACE();  	/*  Definition of DC_DMCU_SCRATCH  	 *  0 : firmare not loaded  	 *  1 : PSP load DMCU FW but not initialized @@ -429,9 +436,21 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu)  		break;  	} +	PERF_TRACE();  	return status;  } +static bool dcn21_dmcu_init(struct dmcu *dmcu) +{ +	struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); +	uint32_t dmcub_psp_version = REG_READ(DMCUB_SCRATCH15); + +	if (dmcu->auto_load_dmcu && dmcub_psp_version == 0) { +		return false; +	} + +	return dcn10_dmcu_init(dmcu); +}  static bool dcn10_dmcu_load_iram(struct dmcu *dmcu,  		unsigned int start_offset, @@ -518,9 +537,6 @@ static void dcn10_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)  	if (dmcu->dmcu_state != DMCU_RUNNING)  		return; -	dcn10_get_dmcu_psr_state(dmcu, &psr_state); -	if (psr_state == 0 && !enable) -		return;  	/* waitDMCUReadyForCmd */  	REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,  				dmcu_wait_reg_ready_interval, @@ -727,9 +743,7 @@ static bool dcn10_is_dmcu_initialized(struct dmcu *dmcu)  	return true;  } -#endif //(CONFIG_DRM_AMD_DC_DCN1_0) -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  static bool dcn20_lock_phy(struct dmcu *dmcu)  { @@ -777,7 +791,7 @@ static bool dcn20_unlock_phy(struct dmcu *dmcu)  	return true;  } -#endif //(CONFIG_DRM_AMD_DC_DCN2_0) +#endif //(CONFIG_DRM_AMD_DC_DCN)  static const struct dmcu_funcs dce_funcs = {  	.dmcu_init = dce_dmcu_init, @@ -790,7 +804,7 @@ static const struct dmcu_funcs dce_funcs = {  	.is_dmcu_initialized = dce_is_dmcu_initialized  }; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  static const struct dmcu_funcs dcn10_funcs = {  	.dmcu_init = dcn10_dmcu_init,  	.load_iram = dcn10_dmcu_load_iram, @@ -801,9 +815,7 @@ static const struct dmcu_funcs dcn10_funcs = {  	.get_psr_wait_loop = dcn10_get_psr_wait_loop,  	.is_dmcu_initialized = dcn10_is_dmcu_initialized  }; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  static const struct dmcu_funcs dcn20_funcs = {  	.dmcu_init = dcn10_dmcu_init,  	.load_iram = dcn10_dmcu_load_iram, @@ -816,6 +828,19 @@ static const struct dmcu_funcs dcn20_funcs = {  	.lock_phy = dcn20_lock_phy,  	.unlock_phy = dcn20_unlock_phy  }; + +static const struct dmcu_funcs dcn21_funcs = { +	.dmcu_init = dcn21_dmcu_init, +	.load_iram = dcn10_dmcu_load_iram, +	.set_psr_enable = dcn10_dmcu_set_psr_enable, +	.setup_psr = dcn10_dmcu_setup_psr, +	.get_psr_state = dcn10_get_dmcu_psr_state, +	.set_psr_wait_loop = dcn10_psr_wait_loop, +	.get_psr_wait_loop = dcn10_get_psr_wait_loop, +	.is_dmcu_initialized = dcn10_is_dmcu_initialized, +	.lock_phy = dcn20_lock_phy, +	.unlock_phy = dcn20_unlock_phy +};  #endif  static void dce_dmcu_construct( @@ -836,6 +861,26 @@ static void dce_dmcu_construct(  	dmcu_dce->dmcu_mask = dmcu_mask;  } +#if defined(CONFIG_DRM_AMD_DC_DCN) +static void dcn21_dmcu_construct( +		struct dce_dmcu *dmcu_dce, +		struct dc_context *ctx, +		const struct dce_dmcu_registers *regs, +		const struct dce_dmcu_shift *dmcu_shift, +		const struct dce_dmcu_mask *dmcu_mask) +{ +	uint32_t psp_version = 0; + +	dce_dmcu_construct(dmcu_dce, ctx, regs, dmcu_shift, dmcu_mask); + +	if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { +		psp_version = dm_read_reg(ctx, mmMP0_SMN_C2PMSG_58); +		dmcu_dce->base.auto_load_dmcu = ((psp_version & 0x00FF00FF) > 0x00110029); +		dmcu_dce->base.psp_version = psp_version; +	} +} +#endif +  struct dmcu *dce_dmcu_create(  	struct dc_context *ctx,  	const struct dce_dmcu_registers *regs, @@ -857,7 +902,7 @@ struct dmcu *dce_dmcu_create(  	return &dmcu_dce->base;  } -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  struct dmcu *dcn10_dmcu_create(  	struct dc_context *ctx,  	const struct dce_dmcu_registers *regs, @@ -878,9 +923,7 @@ struct dmcu *dcn10_dmcu_create(  	return &dmcu_dce->base;  } -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  struct dmcu *dcn20_dmcu_create(  	struct dc_context *ctx,  	const struct dce_dmcu_registers *regs, @@ -901,6 +944,27 @@ struct dmcu *dcn20_dmcu_create(  	return &dmcu_dce->base;  } + +struct dmcu *dcn21_dmcu_create( +	struct dc_context *ctx, +	const struct dce_dmcu_registers *regs, +	const struct dce_dmcu_shift *dmcu_shift, +	const struct dce_dmcu_mask *dmcu_mask) +{ +	struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL); + +	if (dmcu_dce == NULL) { +		BREAK_TO_DEBUGGER(); +		return NULL; +	} + +	dcn21_dmcu_construct( +		dmcu_dce, ctx, regs, dmcu_shift, dmcu_mask); + +	dmcu_dce->base.funcs = &dcn21_funcs; + +	return &dmcu_dce->base; +}  #endif  void dce_dmcu_destroy(struct dmcu **dmcu) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h index cc8587683b4b..5e044c2d3d6d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h @@ -71,6 +71,10 @@  	DMCU_COMMON_REG_LIST_DCE_BASE(), \  	SR(DMU_MEM_PWR_CNTL) +#define DMCU_DCN20_REG_LIST()\ +	DMCU_DCN10_REG_LIST(), \ +	SR(DMCUB_SCRATCH15) +  #define DMCU_SF(reg_name, field_name, post_fix)\  	.field_name = reg_name ## __ ## field_name ## post_fix @@ -175,6 +179,7 @@ struct dce_dmcu_registers {  	uint32_t DMCU_INTERRUPT_TO_UC_EN_MASK;  	uint32_t SMU_INTERRUPT_CONTROL;  	uint32_t DC_DMCU_SCRATCH; +	uint32_t DMCUB_SCRATCH15;  };  struct dce_dmcu { @@ -261,13 +266,17 @@ struct dmcu *dcn10_dmcu_create(  	const struct dce_dmcu_shift *dmcu_shift,  	const struct dce_dmcu_mask *dmcu_mask); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  struct dmcu *dcn20_dmcu_create(  	struct dc_context *ctx,  	const struct dce_dmcu_registers *regs,  	const struct dce_dmcu_shift *dmcu_shift,  	const struct dce_dmcu_mask *dmcu_mask); -#endif + +struct dmcu *dcn21_dmcu_create( +	struct dc_context *ctx, +	const struct dce_dmcu_registers *regs, +	const struct dce_dmcu_shift *dmcu_shift, +	const struct dce_dmcu_mask *dmcu_mask);  void dce_dmcu_destroy(struct dmcu **dmcu); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c index 0275d6d60da4..e1c5839a80dc 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c @@ -25,7 +25,7 @@  #include "dce_hwseq.h"  #include "reg_helper.h" -#include "hw_sequencer.h" +#include "hw_sequencer_private.h"  #include "core_types.h"  #define CTX \ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h index 32d145a0d6fc..c5aa1f48593a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h @@ -25,7 +25,7 @@  #ifndef __DCE_HWSEQ_H__  #define __DCE_HWSEQ_H__ -#include "hw_sequencer.h" +#include "dc_types.h"  #define BL_REG_LIST()\  	SR(LVTMA_PWRSEQ_CNTL), \ @@ -210,7 +210,6 @@  	SR(DC_IP_REQUEST_CNTL), \  	BL_REG_LIST() -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  #define HWSEQ_DCN2_REG_LIST()\  	HWSEQ_DCN_REG_LIST(), \  	HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 0), \ @@ -276,9 +275,7 @@  	SR(D6VGA_CONTROL), \  	SR(DC_IP_REQUEST_CNTL), \  	BL_REG_LIST() -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  #define HWSEQ_DCN21_REG_LIST()\  	HWSEQ_DCN_REG_LIST(), \  	HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 0), \ @@ -329,7 +326,6 @@  	SR(D6VGA_CONTROL), \  	SR(DC_IP_REQUEST_CNTL), \  	BL_REG_LIST() -#endif  struct dce_hwseq_registers { @@ -577,7 +573,6 @@ struct dce_hwseq_registers {  	HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh),\  	HWSEQ_LVTMA_MASK_SH_LIST(mask_sh) -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  #define HWSEQ_DCN2_MASK_SH_LIST(mask_sh)\  	HWSEQ_DCN_MASK_SH_LIST(mask_sh), \  	HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ @@ -637,9 +632,7 @@ struct dce_hwseq_registers {  	HWS_SF(, DOMAIN21_PG_STATUS, DOMAIN21_PGFSM_PWR_STATUS, mask_sh), \  	HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \  	HWSEQ_LVTMA_MASK_SH_LIST(mask_sh) -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  #define HWSEQ_DCN21_MASK_SH_LIST(mask_sh)\  	HWSEQ_DCN_MASK_SH_LIST(mask_sh), \  	HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ @@ -682,7 +675,6 @@ struct dce_hwseq_registers {  	HWSEQ_LVTMA_MASK_SH_LIST(mask_sh), \  	HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \  	HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh) -#endif  #define HWSEQ_REG_FIELD_LIST(type) \  	type DCFE_CLOCK_ENABLE; \ @@ -800,8 +792,7 @@ struct dce_hwseq_registers {  	type D2VGA_MODE_ENABLE; \  	type D3VGA_MODE_ENABLE; \  	type D4VGA_MODE_ENABLE; \ -	type AZALIA_AUDIO_DTO_MODULE;\ -	type HPO_HDMISTREAMCLK_GATE_DIS; +	type AZALIA_AUDIO_DTO_MODULE;  struct dce_hwseq_shift {  	HWSEQ_REG_FIELD_LIST(uint8_t) @@ -820,6 +811,10 @@ enum blnd_mode {  	BLND_MODE_BLENDING,/* Alpha blending - blend 'current' and 'other' */  }; +struct dce_hwseq; +struct pipe_ctx; +struct clock_source; +  void dce_enable_fe_clock(struct dce_hwseq *hwss,  		unsigned int inst, bool enable); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c index 35a75398fcb4..dd41736bb5c4 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c @@ -31,7 +31,7 @@ bool dce_i2c_submit_command(  	struct i2c_command *cmd)  {  	struct dce_i2c_hw *dce_i2c_hw; -	struct dce_i2c_sw *dce_i2c_sw; +	struct dce_i2c_sw dce_i2c_sw = {0};  	if (!ddc) {  		BREAK_TO_DEBUGGER(); @@ -43,18 +43,15 @@ bool dce_i2c_submit_command(  		return false;  	} -	/* The software engine is only available on dce8 */ -	dce_i2c_sw = dce_i2c_acquire_i2c_sw_engine(pool, ddc); - -	if (!dce_i2c_sw) { -		dce_i2c_hw = acquire_i2c_hw_engine(pool, ddc); - -		if (!dce_i2c_hw) -			return false; +	dce_i2c_hw = acquire_i2c_hw_engine(pool, ddc); +	if (dce_i2c_hw)  		return dce_i2c_submit_command_hw(pool, ddc, cmd, dce_i2c_hw); -	} -	return dce_i2c_submit_command_sw(pool, ddc, cmd, dce_i2c_sw); +	dce_i2c_sw.ctx = ddc->ctx; +	if (dce_i2c_engine_acquire_sw(&dce_i2c_sw, ddc)) { +		return dce_i2c_submit_command_sw(pool, ddc, cmd, &dce_i2c_sw); +	} +	return false;  } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c index aad7b52165be..1cd4d8fc361f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c @@ -296,9 +296,7 @@ static bool setup_engine(  	struct dce_i2c_hw *dce_i2c_hw)  {  	uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	uint32_t  reset_length = 0; -#endif  	/* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/  	REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1); @@ -322,14 +320,12 @@ static bool setup_engine(  		REG_UPDATE_N(SETUP, 2,  			     FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), i2c_setup_limit,  			     FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	} else {  		reset_length = dce_i2c_hw->send_reset_length;  		REG_UPDATE_N(SETUP, 3,  			     FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), i2c_setup_limit,  			     FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_SEND_RESET_LENGTH), reset_length,  			     FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1); -#endif  	}  	/* Program HW priority  	 * set to High - interrupt software I2C at any time @@ -705,7 +701,6 @@ void dcn1_i2c_hw_construct(  	dce_i2c_hw->setup_limit = I2C_SETUP_TIME_LIMIT_DCN;  } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  void dcn2_i2c_hw_construct(  	struct dce_i2c_hw *dce_i2c_hw,  	struct dc_context *ctx, @@ -724,4 +719,3 @@ void dcn2_i2c_hw_construct(  	if (ctx->dc->debug.scl_reset_length10)  		dce_i2c_hw->send_reset_length = I2C_SEND_RESET_LENGTH_10;  } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h index cb0234e5d597..d4b2037f7d74 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h @@ -177,9 +177,7 @@ struct dce_i2c_shift {  	uint8_t DC_I2C_INDEX;  	uint8_t DC_I2C_INDEX_WRITE;  	uint8_t XTAL_REF_DIV; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	uint8_t DC_I2C_DDC1_SEND_RESET_LENGTH; -#endif  	uint8_t DC_I2C_REG_RW_CNTL_STATUS;  }; @@ -220,17 +218,13 @@ struct dce_i2c_mask {  	uint32_t DC_I2C_INDEX;  	uint32_t DC_I2C_INDEX_WRITE;  	uint32_t XTAL_REF_DIV; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	uint32_t DC_I2C_DDC1_SEND_RESET_LENGTH; -#endif  	uint32_t DC_I2C_REG_RW_CNTL_STATUS;  }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  #define I2C_COMMON_MASK_SH_LIST_DCN2(mask_sh)\  	I2C_COMMON_MASK_SH_LIST_DCE110(mask_sh),\  	I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_SEND_RESET_LENGTH, mask_sh) -#endif  struct dce_i2c_registers {  	uint32_t SETUP; @@ -312,7 +306,6 @@ void dcn1_i2c_hw_construct(  	const struct dce_i2c_shift *shifts,  	const struct dce_i2c_mask *masks); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  void dcn2_i2c_hw_construct(  	struct dce_i2c_hw *dce_i2c_hw,  	struct dc_context *ctx, @@ -320,7 +313,6 @@ void dcn2_i2c_hw_construct(  	const struct dce_i2c_registers *regs,  	const struct dce_i2c_shift *shifts,  	const struct dce_i2c_mask *masks); -#endif  bool dce_i2c_submit_command_hw(  	struct resource_pool *pool, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c index a5a11c251e25..87d8428df6c4 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c @@ -73,31 +73,6 @@ static void release_engine_dce_sw(  	dce_i2c_sw->ddc = NULL;  } -static bool get_hw_supported_ddc_line( -	struct ddc *ddc, -	enum gpio_ddc_line *line) -{ -	enum gpio_ddc_line line_found; - -	*line = GPIO_DDC_LINE_UNKNOWN; - -	if (!ddc) { -		BREAK_TO_DEBUGGER(); -		return false; -	} - -	if (!ddc->hw_info.hw_supported) -		return false; - -	line_found = dal_ddc_get_line(ddc); - -	if (line_found >= GPIO_DDC_LINE_COUNT) -		return false; - -	*line = line_found; - -	return true; -}  static bool wait_for_scl_high_sw(  	struct dc_context *ctx,  	struct ddc *ddc, @@ -524,21 +499,3 @@ bool dce_i2c_submit_command_sw(  	return result;  } -struct dce_i2c_sw *dce_i2c_acquire_i2c_sw_engine( -	struct resource_pool *pool, -	struct ddc *ddc) -{ -	enum gpio_ddc_line line; -	struct dce_i2c_sw *engine = NULL; - -	if (get_hw_supported_ddc_line(ddc, &line)) -		engine = pool->sw_i2cs[line]; - -	if (!engine) -		return NULL; - -	if (!dce_i2c_engine_acquire_sw(engine, ddc)) -		return NULL; - -	return engine; -} diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h index 5bbcdd455614..019fc47bb767 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h @@ -49,9 +49,9 @@ bool dce_i2c_submit_command_sw(  	struct i2c_command *cmd,  	struct dce_i2c_sw *dce_i2c_sw); -struct dce_i2c_sw *dce_i2c_acquire_i2c_sw_engine( -	struct resource_pool *pool, -	struct ddc *ddc); +bool dce_i2c_engine_acquire_sw( +	struct dce_i2c_sw *dce_i2c_sw, +	struct ddc *ddc_handle);  #endif diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c index 6ed922a3c1cd..451574971b96 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c @@ -137,7 +137,7 @@ static void dce110_update_generic_info_packet(  			AFMT_GENERIC0_UPDATE, (packet_index == 0),  			AFMT_GENERIC2_UPDATE, (packet_index == 2));  	} -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  	if (REG(AFMT_VBI_PACKET_CONTROL1)) {  		switch (packet_index) {  		case 0: @@ -231,7 +231,7 @@ static void dce110_update_hdmi_info_packet(  				HDMI_GENERIC1_SEND, send,  				HDMI_GENERIC1_LINE, line);  		break; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  	case 4:  		if (REG(HDMI_GENERIC_PACKET_CONTROL2))  			REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL2, @@ -275,9 +275,10 @@ static void dce110_stream_encoder_dp_set_stream_attribute(  	struct stream_encoder *enc,  	struct dc_crtc_timing *crtc_timing,  	enum dc_color_space output_color_space, +	bool use_vsc_sdp_for_colorimetry,  	uint32_t enable_sdp_splitting)  { -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  	uint32_t h_active_start;  	uint32_t v_active_start;  	uint32_t misc0 = 0; @@ -329,7 +330,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(  		if (enc110->se_mask->DP_VID_M_DOUBLE_VALUE_EN)  			REG_UPDATE(DP_VID_TIMING, DP_VID_M_DOUBLE_VALUE_EN, 1); -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  		if (enc110->se_mask->DP_VID_N_MUL)  			REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1);  #endif @@ -340,7 +341,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(  		break;  	} -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  	if (REG(DP_MSA_MISC))  		misc1 = REG_READ(DP_MSA_MISC);  #endif @@ -374,7 +375,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(  	/* set dynamic range and YCbCr range */ -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  	switch (hw_crtc_timing.display_color_depth) {  	case COLOR_DEPTH_666:  		colorimetry_bpc = 0; @@ -454,7 +455,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(  				DP_DYN_RANGE, dynamic_range_rgb,  				DP_YCBCR_RANGE, dynamic_range_ycbcr); -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  		if (REG(DP_MSA_COLORIMETRY))  			REG_SET(DP_MSA_COLORIMETRY, 0, DP_MSA_MISC0, misc0); @@ -489,7 +490,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(  				hw_crtc_timing.v_front_porch; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  		/* start at begining of left border */  		if (REG(DP_MSA_TIMING_PARAM2))  			REG_SET_2(DP_MSA_TIMING_PARAM2, 0, @@ -786,7 +787,7 @@ static void dce110_stream_encoder_update_hdmi_info_packets(  		dce110_update_hdmi_info_packet(enc110, 3, &info_frame->hdrsmd);  	} -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  	if (enc110->se_mask->HDMI_DB_DISABLE) {  		/* for bring up, disable dp double  TODO */  		if (REG(HDMI_DB_CONTROL)) @@ -824,7 +825,7 @@ static void dce110_stream_encoder_stop_hdmi_info_packets(  		HDMI_GENERIC1_LINE, 0,  		HDMI_GENERIC1_SEND, 0); -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  	/* stop generic packets 2 & 3 on HDMI */  	if (REG(HDMI_GENERIC_PACKET_CONTROL2))  		REG_SET_6(HDMI_GENERIC_PACKET_CONTROL2, 0, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c new file mode 100644 index 000000000000..225955ec6d39 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c @@ -0,0 +1,220 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dmub_psr.h" +#include "dc.h" +#include "dc_dmub_srv.h" +#include "../../dmub/inc/dmub_srv.h" +#include "dmub_fw_state.h" +#include "core_types.h" +#include "ipp.h" + +#define MAX_PIPES 6 + +/** + * Get PSR state from firmware. + */ +static void dmub_get_psr_state(uint32_t *psr_state) +{ +	// Not yet implemented +	// Trigger GPINT interrupt from firmware +} + +/** + * Enable/Disable PSR. + */ +static void dmub_set_psr_enable(struct dmub_psr *dmub, bool enable) +{ +	union dmub_rb_cmd cmd; +	struct dc_context *dc = dmub->ctx; + +	cmd.psr_enable.header.type = DMUB_CMD__PSR; + +	if (enable) +		cmd.psr_enable.header.sub_type = DMUB_CMD__PSR_ENABLE; +	else +		cmd.psr_enable.header.sub_type = DMUB_CMD__PSR_DISABLE; + +	cmd.psr_enable.header.payload_bytes = 0; // Send header only + +	dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_enable.header); +	dc_dmub_srv_cmd_execute(dc->dmub_srv); +	dc_dmub_srv_wait_idle(dc->dmub_srv); +} + +/** + * Set PSR level. + */ +static void dmub_set_psr_level(struct dmub_psr *dmub, uint16_t psr_level) +{ +	union dmub_rb_cmd cmd; +	uint32_t psr_state = 0; +	struct dc_context *dc = dmub->ctx; + +	dmub_get_psr_state(&psr_state); + +	if (psr_state == 0) +		return; + +	cmd.psr_set_level.header.type = DMUB_CMD__PSR; +	cmd.psr_set_level.header.sub_type = DMUB_CMD__PSR_SET_LEVEL; +	cmd.psr_set_level.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_level_data); +	cmd.psr_set_level.psr_set_level_data.psr_level = psr_level; + +	dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_set_level.header); +	dc_dmub_srv_cmd_execute(dc->dmub_srv); +	dc_dmub_srv_wait_idle(dc->dmub_srv); +} + +/** + * Setup PSR by programming phy registers and sending psr hw context values to firmware. + */ +static bool dmub_setup_psr(struct dmub_psr *dmub, +		struct dc_link *link, +		struct psr_context *psr_context) +{ +	union dmub_rb_cmd cmd; +	struct dc_context *dc = dmub->ctx; +	struct dmub_cmd_psr_copy_settings_data *copy_settings_data +		= &cmd.psr_copy_settings.psr_copy_settings_data; +	struct pipe_ctx *pipe_ctx = NULL; +	struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx; + +	for (int i = 0; i < MAX_PIPES; i++) { +		if (res_ctx && +				res_ctx->pipe_ctx[i].stream && +				res_ctx->pipe_ctx[i].stream->link && +				res_ctx->pipe_ctx[i].stream->link == link && +				res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) { +			pipe_ctx = &res_ctx->pipe_ctx[i]; +			break; +		} +	} + +	if (!pipe_ctx || +			!&pipe_ctx->plane_res || +			!&pipe_ctx->stream_res) +		return false; + +	// Program DP DPHY fast training registers +	link->link_enc->funcs->psr_program_dp_dphy_fast_training(link->link_enc, +			psr_context->psrExitLinkTrainingRequired); + +	// Program DP_SEC_CNTL1 register to set transmission GPS0 line num and priority to high +	link->link_enc->funcs->psr_program_secondary_packet(link->link_enc, +			psr_context->sdpTransmitLineNumDeadline); + +	cmd.psr_copy_settings.header.type = DMUB_CMD__PSR; +	cmd.psr_copy_settings.header.sub_type = DMUB_CMD__PSR_COPY_SETTINGS; +	cmd.psr_copy_settings.header.payload_bytes = sizeof(struct dmub_cmd_psr_copy_settings_data); + +	// Hw insts +	copy_settings_data->dpphy_inst				= psr_context->phyType; +	copy_settings_data->aux_inst				= psr_context->channel; +	copy_settings_data->digfe_inst				= psr_context->engineId; +	copy_settings_data->digbe_inst				= psr_context->transmitterId; + +	copy_settings_data->mpcc_inst				= pipe_ctx->plane_res.mpcc_inst; + +	if (pipe_ctx->plane_res.hubp) +		copy_settings_data->hubp_inst			= pipe_ctx->plane_res.hubp->inst; +	else +		copy_settings_data->hubp_inst			= 0; +	if (pipe_ctx->plane_res.dpp) +		copy_settings_data->dpp_inst			= pipe_ctx->plane_res.dpp->inst; +	else +		copy_settings_data->dpp_inst			= 0; +	if (pipe_ctx->stream_res.opp) +		copy_settings_data->opp_inst			= pipe_ctx->stream_res.opp->inst; +	else +		copy_settings_data->opp_inst			= 0; +	if (pipe_ctx->stream_res.tg) +		copy_settings_data->otg_inst			= pipe_ctx->stream_res.tg->inst; +	else +		copy_settings_data->otg_inst			= 0; + +	// Misc +	copy_settings_data->psr_level				= psr_context->psr_level.u32all; +	copy_settings_data->hyst_frames				= psr_context->timehyst_frames; +	copy_settings_data->hyst_lines				= psr_context->hyst_lines; +	copy_settings_data->phy_type				= psr_context->phyType; +	copy_settings_data->aux_repeat				= psr_context->aux_repeats; +	copy_settings_data->smu_optimizations_en	= psr_context->allow_smu_optimizations; +	copy_settings_data->skip_wait_for_pll_lock	= psr_context->skipPsrWaitForPllLock; +	copy_settings_data->frame_delay				= psr_context->frame_delay; +	copy_settings_data->smu_phy_id				= psr_context->smuPhyId; +	copy_settings_data->num_of_controllers		= psr_context->numberOfControllers; +	copy_settings_data->frame_cap_ind			= psr_context->psrFrameCaptureIndicationReq; +	copy_settings_data->phy_num					= psr_context->frame_delay & 0x7; +	copy_settings_data->link_rate				= psr_context->frame_delay & 0xF; + +	dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_copy_settings.header); +	dc_dmub_srv_cmd_execute(dc->dmub_srv); +	dc_dmub_srv_wait_idle(dc->dmub_srv); + +	return true; +} + +static const struct dmub_psr_funcs psr_funcs = { +	.set_psr_enable			= dmub_set_psr_enable, +	.setup_psr				= dmub_setup_psr, +	.get_psr_state			= dmub_get_psr_state, +	.set_psr_level			= dmub_set_psr_level, +}; + +/** + * Construct PSR object. + */ +static void dmub_psr_construct(struct dmub_psr *psr, struct dc_context *ctx) +{ +	psr->ctx = ctx; +	psr->funcs = &psr_funcs; +} + +/** + * Allocate and initialize PSR object. + */ +struct dmub_psr *dmub_psr_create(struct dc_context *ctx) +{ +	struct dmub_psr *psr = kzalloc(sizeof(struct dmub_psr), GFP_KERNEL); + +	if (psr == NULL) { +		BREAK_TO_DEBUGGER(); +		return NULL; +	} + +	dmub_psr_construct(psr, ctx); + +	return psr; +} + +/** + * Deallocate PSR object. + */ +void dmub_psr_destroy(struct dmub_psr **dmub) +{ +	kfree(dmub); +	*dmub = NULL; +} diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h new file mode 100644 index 000000000000..229958de3035 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h @@ -0,0 +1,47 @@ +/* + * Copyright 2012-16 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_PSR_H_ +#define _DMUB_PSR_H_ + +#include "os_types.h" + +struct dmub_psr { +	struct dc_context *ctx; +	const struct dmub_psr_funcs *funcs; +}; + +struct dmub_psr_funcs { +	void (*set_psr_enable)(struct dmub_psr *dmub, bool enable); +	bool (*setup_psr)(struct dmub_psr *dmub, struct dc_link *link, struct psr_context *psr_context); +	void (*get_psr_state)(uint32_t *psr_state); +	void (*set_psr_level)(struct dmub_psr *dmub, uint16_t psr_level); +}; + +struct dmub_psr *dmub_psr_create(struct dc_context *ctx); +void dmub_psr_destroy(struct dmub_psr **dmub); + + +#endif /* _DCE_DMUB_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c index 799d36299c9b..753cb8edd996 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c @@ -26,7 +26,6 @@  #include "dc.h"  #include "core_types.h"  #include "clk_mgr.h" -#include "hw_sequencer.h"  #include "dce100_hw_sequencer.h"  #include "resource.h" @@ -136,7 +135,7 @@ void dce100_hw_sequencer_construct(struct dc *dc)  {  	dce110_hw_sequencer_construct(dc); -	dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating; +	dc->hwseq->funcs.enable_display_power_gating = dce100_enable_display_power_gating;  	dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;  	dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth;  } diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h index a6b80fdaa666..34518da20009 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h @@ -27,6 +27,7 @@  #define __DC_HWSS_DCE100_H__  #include "core_types.h" +#include "hw_sequencer_private.h"  struct dc;  struct dc_state; diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c index a5e122c721ec..8f78bf9abbca 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c @@ -725,7 +725,7 @@ void dce100_clock_source_destroy(struct clock_source **clk_src)  	*clk_src = NULL;  } -static void destruct(struct dce110_resource_pool *pool) +static void dce100_resource_destruct(struct dce110_resource_pool *pool)  {  	unsigned int i; @@ -885,7 +885,7 @@ static void dce100_destroy_resource_pool(struct resource_pool **pool)  {  	struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); -	destruct(dce110_pool); +	dce100_resource_destruct(dce110_pool);  	kfree(dce110_pool);  	*pool = NULL;  } @@ -950,7 +950,7 @@ static const struct resource_funcs dce100_res_pool_funcs = {  	.find_first_free_match_stream_enc_for_link = dce100_find_first_free_match_stream_enc_for_link  }; -static bool construct( +static bool dce100_resource_construct(  	uint8_t num_virtual_links,  	struct dc  *dc,  	struct dce110_resource_pool *pool) @@ -1122,7 +1122,7 @@ static bool construct(  	return true;  res_create_fail: -	destruct(pool); +	dce100_resource_destruct(pool);  	return false;  } @@ -1137,7 +1137,7 @@ struct resource_pool *dce100_create_resource_pool(  	if (!pool)  		return NULL; -	if (construct(num_virtual_links, dc, pool)) +	if (dce100_resource_construct(num_virtual_links, dc, pool))  		return &pool->base;  	kfree(pool); diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index f0e837d14000..5b689273ff44 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -61,6 +61,8 @@  #include "atomfirmware.h" +#define GAMMA_HW_POINTS_NUM 256 +  /*   * All values are in milliseconds;   * For eDP, after power-up/power/down, @@ -268,7 +270,7 @@ static void build_prescale_params(struct ipp_prescale_params *prescale_params,  }  static bool -dce110_set_input_transfer_func(struct pipe_ctx *pipe_ctx, +dce110_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,  			       const struct dc_plane_state *plane_state)  {  	struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp; @@ -596,7 +598,7 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,  }  static bool -dce110_set_output_transfer_func(struct pipe_ctx *pipe_ctx, +dce110_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,  				const struct dc_stream_state *stream)  {  	struct transform *xfm = pipe_ctx->plane_res.xfm; @@ -651,10 +653,9 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)  {  	enum dc_lane_count lane_count =  		pipe_ctx->stream->link->cur_link_settings.lane_count; -  	struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;  	struct dc_link *link = pipe_ctx->stream->link; - +	const struct dc *dc = link->dc;  	uint32_t active_total_with_borders;  	uint32_t early_control = 0; @@ -667,7 +668,7 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)  	link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,  						    pipe_ctx->stream_res.stream_enc->id, true); -	link->dc->hwss.update_info_frame(pipe_ctx); +	dc->hwss.update_info_frame(pipe_ctx);  	/* enable early control to avoid corruption on DP monitor*/  	active_total_with_borders = @@ -943,15 +944,15 @@ void dce110_edp_backlight_control(  void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)  {  	/* notify audio driver for audio modes of monitor */ -	struct dc *core_dc; +	struct dc *dc;  	struct clk_mgr *clk_mgr;  	unsigned int i, num_audio = 1;  	if (!pipe_ctx->stream)  		return; -	core_dc = pipe_ctx->stream->ctx->dc; -	clk_mgr = core_dc->clk_mgr; +	dc = pipe_ctx->stream->ctx->dc; +	clk_mgr = dc->clk_mgr;  	if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == true)  		return; @@ -959,7 +960,7 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)  	if (pipe_ctx->stream_res.audio) {  		for (i = 0; i < MAX_PIPES; i++) {  			/*current_state not updated yet*/ -			if (core_dc->current_state->res_ctx.pipe_ctx[i].stream_res.audio != NULL) +			if (dc->current_state->res_ctx.pipe_ctx[i].stream_res.audio != NULL)  				num_audio++;  		} @@ -1047,6 +1048,7 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,  	struct encoder_unblank_param params = { { 0 } };  	struct dc_stream_state *stream = pipe_ctx->stream;  	struct dc_link *link = stream->link; +	struct dce_hwseq *hws = link->dc->hwseq;  	/* only 3 items below are used by unblank */  	params.timing = pipe_ctx->stream->timing; @@ -1056,7 +1058,7 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,  		pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, ¶ms);  	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { -		link->dc->hwss.edp_backlight_control(link, true); +		hws->funcs.edp_backlight_control(link, true);  	}  } @@ -1064,9 +1066,10 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)  {  	struct dc_stream_state *stream = pipe_ctx->stream;  	struct dc_link *link = stream->link; +	struct dce_hwseq *hws = link->dc->hwseq;  	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { -		link->dc->hwss.edp_backlight_control(link, false); +		hws->funcs.edp_backlight_control(link, false);  		dc_link_set_abm_disable(link);  	} @@ -1223,7 +1226,7 @@ static void program_scaler(const struct dc *dc,  {  	struct tg_color color = {0}; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  	/* TOFPGA */  	if (pipe_ctx->plane_res.xfm->funcs->transform_set_pixel_storage_depth == NULL)  		return; @@ -1322,12 +1325,11 @@ static enum dc_status apply_single_controller_ctx_to_hw(  	struct dc_stream_state *stream = pipe_ctx->stream;  	struct drr_params params = {0};  	unsigned int event_triggers = 0; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; -#endif +	struct dce_hwseq *hws = dc->hwseq; -	if (dc->hwss.disable_stream_gating) { -		dc->hwss.disable_stream_gating(dc, pipe_ctx); +	if (hws->funcs.disable_stream_gating) { +		hws->funcs.disable_stream_gating(dc, pipe_ctx);  	}  	if (pipe_ctx->stream_res.audio != NULL) { @@ -1357,10 +1359,10 @@ static enum dc_status apply_single_controller_ctx_to_hw(  	/*  */  	/* Do not touch stream timing on seamless boot optimization. */  	if (!pipe_ctx->stream->apply_seamless_boot_optimization) -		dc->hwss.enable_stream_timing(pipe_ctx, context, dc); +		hws->funcs.enable_stream_timing(pipe_ctx, context, dc); -	if (dc->hwss.setup_vupdate_interrupt) -		dc->hwss.setup_vupdate_interrupt(pipe_ctx); +	if (hws->funcs.setup_vupdate_interrupt) +		hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);  	params.vertical_total_min = stream->adjust.v_total_min;  	params.vertical_total_max = stream->adjust.v_total_max; @@ -1371,9 +1373,13 @@ static enum dc_status apply_single_controller_ctx_to_hw(  	// DRR should set trigger event to monitor surface update event  	if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0)  		event_triggers = 0x80; +	/* Event triggers and num frames initialized for DRR, but can be +	 * later updated for PSR use. Note DRR trigger events are generated +	 * regardless of whether num frames met. +	 */  	if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control)  		pipe_ctx->stream_res.tg->funcs->set_static_screen_control( -				pipe_ctx->stream_res.tg, event_triggers); +				pipe_ctx->stream_res.tg, event_triggers, 2);  	if (!dc_is_virtual_signal(pipe_ctx->stream->signal))  		pipe_ctx->stream_res.stream_enc->funcs->dig_connect_to_otg( @@ -1390,7 +1396,6 @@ static enum dc_status apply_single_controller_ctx_to_hw(  		pipe_ctx->stream_res.opp,  		&stream->bit_depth_params,  		&stream->clamping); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	while (odm_pipe) {  		odm_pipe->stream_res.opp->funcs->opp_set_dyn_expansion(  				odm_pipe->stream_res.opp, @@ -1404,7 +1409,6 @@ static enum dc_status apply_single_controller_ctx_to_hw(  				&stream->clamping);  		odm_pipe = odm_pipe->next_odm_pipe;  	} -#endif  	if (!stream->dpms_off)  		core_link_enable_stream(context, pipe_ctx); @@ -1438,6 +1442,9 @@ static void power_down_encoders(struct dc *dc)  			if (!dc->links[i]->wa_flags.dp_keep_receiver_powered)  				dp_receiver_power_ctrl(dc->links[i], false); +		if (signal != SIGNAL_TYPE_EDP) +			signal = SIGNAL_TYPE_NONE; +  		dc->links[i]->link_enc->funcs->disable_output(  				dc->links[i]->link_enc, signal);  	} @@ -1552,9 +1559,10 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)  	bool can_apply_edp_fast_boot = false;  	bool can_apply_seamless_boot = false;  	bool keep_edp_vdd_on = false; +	struct dce_hwseq *hws = dc->hwseq; -	if (dc->hwss.init_pipes) -		dc->hwss.init_pipes(dc, context); +	if (hws->funcs.init_pipes) +		hws->funcs.init_pipes(dc, context);  	edp_stream = get_edp_stream(context); @@ -1591,7 +1599,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)  	if (!can_apply_edp_fast_boot && !can_apply_seamless_boot) {  		if (edp_link_with_sink && !keep_edp_vdd_on) {  			/*turn off backlight before DP_blank and encoder powered down*/ -			dc->hwss.edp_backlight_control(edp_link_with_sink, false); +			hws->funcs.edp_backlight_control(edp_link_with_sink, false);  		}  		/*resume from S3, no vbios posting, no need to power down again*/  		power_down_all_hw_blocks(dc); @@ -1702,6 +1710,8 @@ static void set_drr(struct pipe_ctx **pipe_ctx,  	struct drr_params params = {0};  	// DRR should set trigger event to monitor surface update event  	unsigned int event_triggers = 0x80; +	// Note DRR trigger events are generated regardless of whether num frames met. +	unsigned int num_frames = 2;  	params.vertical_total_max = vmax;  	params.vertical_total_min = vmin; @@ -1717,7 +1727,7 @@ static void set_drr(struct pipe_ctx **pipe_ctx,  		if (vmax != 0 && vmin != 0)  			pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(  					pipe_ctx[i]->stream_res.tg, -					event_triggers); +					event_triggers, num_frames);  	}  } @@ -1734,30 +1744,31 @@ static void get_position(struct pipe_ctx **pipe_ctx,  }  static void set_static_screen_control(struct pipe_ctx **pipe_ctx, -		int num_pipes, const struct dc_static_screen_events *events) +		int num_pipes, const struct dc_static_screen_params *params)  {  	unsigned int i; -	unsigned int value = 0; +	unsigned int triggers = 0; -	if (events->overlay_update) -		value |= 0x100; -	if (events->surface_update) -		value |= 0x80; -	if (events->cursor_update) -		value |= 0x2; -	if (events->force_trigger) -		value |= 0x1; +	if (params->triggers.overlay_update) +		triggers |= 0x100; +	if (params->triggers.surface_update) +		triggers |= 0x80; +	if (params->triggers.cursor_update) +		triggers |= 0x2; +	if (params->triggers.force_trigger) +		triggers |= 0x1;  	if (num_pipes) {  		struct dc *dc = pipe_ctx[0]->stream->ctx->dc;  		if (dc->fbc_compressor) -			value |= 0x84; +			triggers |= 0x84;  	}  	for (i = 0; i < num_pipes; i++)  		pipe_ctx[i]->stream_res.tg->funcs-> -			set_static_screen_control(pipe_ctx[i]->stream_res.tg, value); +			set_static_screen_control(pipe_ctx[i]->stream_res.tg, +					triggers, params->num_frames);  }  /* @@ -2006,13 +2017,14 @@ enum dc_status dce110_apply_ctx_to_hw(  		struct dc *dc,  		struct dc_state *context)  { +	struct dce_hwseq *hws = dc->hwseq;  	struct dc_bios *dcb = dc->ctx->dc_bios;  	enum dc_status status;  	int i;  	/* Reset old context */  	/* look up the targets that have been removed since last commit */ -	dc->hwss.reset_hw_ctx_wrap(dc, context); +	hws->funcs.reset_hw_ctx_wrap(dc, context);  	/* Skip applying if no targets */  	if (context->stream_count <= 0) @@ -2037,7 +2049,7 @@ enum dc_status dce110_apply_ctx_to_hw(  			continue;  		} -		dc->hwss.enable_display_power_gating( +		hws->funcs.enable_display_power_gating(  				dc, i, dc->ctx->dc_bios,  				PIPE_GATING_CONTROL_DISABLE);  	} @@ -2346,19 +2358,20 @@ static void init_hw(struct dc *dc)  	struct transform *xfm;  	struct abm *abm;  	struct dmcu *dmcu; +	struct dce_hwseq *hws = dc->hwseq;  	bp = dc->ctx->dc_bios;  	for (i = 0; i < dc->res_pool->pipe_count; i++) {  		xfm = dc->res_pool->transforms[i];  		xfm->funcs->transform_reset(xfm); -		dc->hwss.enable_display_power_gating( +		hws->funcs.enable_display_power_gating(  				dc, i, bp,  				PIPE_GATING_CONTROL_INIT); -		dc->hwss.enable_display_power_gating( +		hws->funcs.enable_display_power_gating(  				dc, i, bp,  				PIPE_GATING_CONTROL_DISABLE); -		dc->hwss.enable_display_pipe_clock_gating( +		hws->funcs.enable_display_pipe_clock_gating(  			dc->ctx,  			true);  	} @@ -2444,6 +2457,8 @@ static void dce110_program_front_end_for_pipe(  	struct xfm_grph_csc_adjustment adjust;  	struct out_csc_color_matrix tbl_entry;  	unsigned int i; +	struct dce_hwseq *hws = dc->hwseq; +  	DC_LOGGER_INIT();  	memset(&tbl_entry, 0, sizeof(tbl_entry)); @@ -2502,10 +2517,10 @@ static void dce110_program_front_end_for_pipe(  	if (pipe_ctx->plane_state->update_flags.bits.full_update ||  			pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||  			pipe_ctx->plane_state->update_flags.bits.gamma_change) -		dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state); +		hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);  	if (pipe_ctx->plane_state->update_flags.bits.full_update) -		dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream); +		hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);  	DC_LOG_SURFACE(  			"Pipe:%d %p: addr hi:0x%x, " @@ -2608,6 +2623,7 @@ static void dce110_apply_ctx_for_surface(  static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx)  { +	struct dce_hwseq *hws = dc->hwseq;  	int fe_idx = pipe_ctx->plane_res.mi ?  		pipe_ctx->plane_res.mi->inst : pipe_ctx->pipe_idx; @@ -2615,7 +2631,7 @@ static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx)  	if (dc->current_state->res_ctx.pipe_ctx[fe_idx].stream)  		return; -	dc->hwss.enable_display_power_gating( +	hws->funcs.enable_display_power_gating(  		dc, fe_idx, dc->ctx->dc_bios, PIPE_GATING_CONTROL_ENABLE);  	dc->res_pool->transforms[fe_idx]->funcs->transform_reset( @@ -2704,14 +2720,10 @@ static const struct hw_sequencer_funcs dce110_funcs = {  	.program_gamut_remap = program_gamut_remap,  	.program_output_csc = program_output_csc,  	.init_hw = init_hw, -	.init_pipes = init_pipes,  	.apply_ctx_to_hw = dce110_apply_ctx_to_hw,  	.apply_ctx_for_surface = dce110_apply_ctx_for_surface,  	.update_plane_addr = update_plane_addr,  	.update_pending_status = dce110_update_pending_status, -	.set_input_transfer_func = dce110_set_input_transfer_func, -	.set_output_transfer_func = dce110_set_output_transfer_func, -	.power_down = dce110_power_down,  	.enable_accelerated_mode = dce110_enable_accelerated_mode,  	.enable_timing_synchronization = dce110_enable_timing_synchronization,  	.enable_per_frame_crtc_position_reset = dce110_enable_per_frame_crtc_position_reset, @@ -2722,8 +2734,6 @@ static const struct hw_sequencer_funcs dce110_funcs = {  	.blank_stream = dce110_blank_stream,  	.enable_audio_stream = dce110_enable_audio_stream,  	.disable_audio_stream = dce110_disable_audio_stream, -	.enable_display_pipe_clock_gating = enable_display_pipe_clock_gating, -	.enable_display_power_gating = dce110_enable_display_power_gating,  	.disable_plane = dce110_power_down_fe,  	.pipe_control_lock = dce_pipe_control_lock,  	.prepare_bandwidth = dce110_prepare_bandwidth, @@ -2731,22 +2741,33 @@ static const struct hw_sequencer_funcs dce110_funcs = {  	.set_drr = set_drr,  	.get_position = get_position,  	.set_static_screen_control = set_static_screen_control, -	.reset_hw_ctx_wrap = dce110_reset_hw_ctx_wrap, -	.enable_stream_timing = dce110_enable_stream_timing, -	.disable_stream_gating = NULL, -	.enable_stream_gating = NULL,  	.setup_stereo = NULL,  	.set_avmute = dce110_set_avmute,  	.wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect, -	.edp_backlight_control = dce110_edp_backlight_control,  	.edp_power_control = dce110_edp_power_control,  	.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,  	.set_cursor_position = dce110_set_cursor_position,  	.set_cursor_attribute = dce110_set_cursor_attribute  }; +static const struct hwseq_private_funcs dce110_private_funcs = { +	.init_pipes = init_pipes, +	.update_plane_addr = update_plane_addr, +	.set_input_transfer_func = dce110_set_input_transfer_func, +	.set_output_transfer_func = dce110_set_output_transfer_func, +	.power_down = dce110_power_down, +	.enable_display_pipe_clock_gating = enable_display_pipe_clock_gating, +	.enable_display_power_gating = dce110_enable_display_power_gating, +	.reset_hw_ctx_wrap = dce110_reset_hw_ctx_wrap, +	.enable_stream_timing = dce110_enable_stream_timing, +	.disable_stream_gating = NULL, +	.enable_stream_gating = NULL, +	.edp_backlight_control = dce110_edp_backlight_control, +}; +  void dce110_hw_sequencer_construct(struct dc *dc)  {  	dc->hwss = dce110_funcs; +	dc->hwseq->funcs = dce110_private_funcs;  } diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h index 2f9b7dbdf415..26a9c14a58b1 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h @@ -27,8 +27,8 @@  #define __DC_HWSS_DCE110_H__  #include "core_types.h" +#include "hw_sequencer_private.h" -#define GAMMA_HW_POINTS_NUM 256  struct dc;  struct dc_state;  struct dm_pp_display_configuration; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index 83a4dbf6d76e..bf14e9ab040c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c @@ -782,7 +782,7 @@ void dce110_clock_source_destroy(struct clock_source **clk_src)  	*clk_src = NULL;  } -static void destruct(struct dce110_resource_pool *pool) +static void dce110_resource_destruct(struct dce110_resource_pool *pool)  {  	unsigned int i; @@ -1097,6 +1097,7 @@ static struct pipe_ctx *dce110_acquire_underlay(  		struct dc_stream_state *stream)  {  	struct dc *dc = stream->ctx->dc; +	struct dce_hwseq *hws = dc->hwseq;  	struct resource_context *res_ctx = &context->res_ctx;  	unsigned int underlay_idx = pool->underlay_pipe_index;  	struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[underlay_idx]; @@ -1117,7 +1118,7 @@ static struct pipe_ctx *dce110_acquire_underlay(  		struct tg_color black_color = {0};  		struct dc_bios *dcb = dc->ctx->dc_bios; -		dc->hwss.enable_display_power_gating( +		hws->funcs.enable_display_power_gating(  				dc,  				pipe_ctx->stream_res.tg->inst,  				dcb, PIPE_GATING_CONTROL_DISABLE); @@ -1161,7 +1162,7 @@ static void dce110_destroy_resource_pool(struct resource_pool **pool)  {  	struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); -	destruct(dce110_pool); +	dce110_resource_destruct(dce110_pool);  	kfree(dce110_pool);  	*pool = NULL;  } @@ -1313,7 +1314,7 @@ const struct resource_caps *dce110_resource_cap(  		return &carrizo_resource_cap;  } -static bool construct( +static bool dce110_resource_construct(  	uint8_t num_virtual_links,  	struct dc *dc,  	struct dce110_resource_pool *pool, @@ -1492,7 +1493,7 @@ static bool construct(  	return true;  res_create_fail: -	destruct(pool); +	dce110_resource_destruct(pool);  	return false;  } @@ -1507,7 +1508,7 @@ struct resource_pool *dce110_create_resource_pool(  	if (!pool)  		return NULL; -	if (construct(num_virtual_links, dc, pool, asic_id)) +	if (dce110_resource_construct(num_virtual_links, dc, pool, asic_id))  		return &pool->base;  	kfree(pool); diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c index 5f7c2c5641c4..1ea7db8eeb98 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c @@ -469,22 +469,27 @@ void dce110_timing_generator_set_drr(  void dce110_timing_generator_set_static_screen_control(  	struct timing_generator *tg, -	uint32_t value) +	uint32_t event_triggers, +	uint32_t num_frames)  {  	struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);  	uint32_t static_screen_cntl = 0;  	uint32_t addr = 0; +	// By register spec, it only takes 8 bit value +	if (num_frames > 0xFF) +		num_frames = 0xFF; +  	addr = CRTC_REG(mmCRTC_STATIC_SCREEN_CONTROL);  	static_screen_cntl = dm_read_reg(tg->ctx, addr);  	set_reg_field_value(static_screen_cntl, -				value, +				event_triggers,  				CRTC_STATIC_SCREEN_CONTROL,  				CRTC_STATIC_SCREEN_EVENT_MASK);  	set_reg_field_value(static_screen_cntl, -				2, +				num_frames,  				CRTC_STATIC_SCREEN_CONTROL,  				CRTC_STATIC_SCREEN_FRAME_COUNT); diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h index 768ccf27ada9..d8a5ed7b485d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h @@ -231,7 +231,8 @@ void dce110_timing_generator_set_drr(  void dce110_timing_generator_set_static_screen_control(  	struct timing_generator *tg, -	uint32_t value); +	uint32_t event_triggers, +	uint32_t num_frames);  void dce110_timing_generator_get_crtc_scanoutpos(  	struct timing_generator *tg, diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c index 1e4a7c13f0ed..19873ee1f78d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c @@ -158,6 +158,6 @@ void dce112_hw_sequencer_construct(struct dc *dc)  	 * structure  	 */  	dce110_hw_sequencer_construct(dc); -	dc->hwss.enable_display_power_gating = dce112_enable_display_power_gating; +	dc->hwseq->funcs.enable_display_power_gating = dce112_enable_display_power_gating;  } diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h index e646f4a37fa2..943f1b2c5b2f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h @@ -27,6 +27,7 @@  #define __DC_HWSS_DCE112_H__  #include "core_types.h" +#include "hw_sequencer_private.h"  struct dc; diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index 97dcc5d0862b..700ad8b3e54b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c @@ -744,7 +744,7 @@ void dce112_clock_source_destroy(struct clock_source **clk_src)  	*clk_src = NULL;  } -static void destruct(struct dce110_resource_pool *pool) +static void dce112_resource_destruct(struct dce110_resource_pool *pool)  {  	unsigned int i; @@ -1013,7 +1013,7 @@ static void dce112_destroy_resource_pool(struct resource_pool **pool)  {  	struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); -	destruct(dce110_pool); +	dce112_resource_destruct(dce110_pool);  	kfree(dce110_pool);  	*pool = NULL;  } @@ -1186,7 +1186,7 @@ const struct resource_caps *dce112_resource_cap(  		return &polaris_10_resource_cap;  } -static bool construct( +static bool dce112_resource_construct(  	uint8_t num_virtual_links,  	struct dc *dc,  	struct dce110_resource_pool *pool) @@ -1372,7 +1372,7 @@ static bool construct(  	return true;  res_create_fail: -	destruct(pool); +	dce112_resource_destruct(pool);  	return false;  } @@ -1386,7 +1386,7 @@ struct resource_pool *dce112_create_resource_pool(  	if (!pool)  		return NULL; -	if (construct(num_virtual_links, dc, pool)) +	if (dce112_resource_construct(num_virtual_links, dc, pool))  		return &pool->base;  	kfree(pool); diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c index 1ca30928025e..66a13aa39c95 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c @@ -265,7 +265,7 @@ void dce120_hw_sequencer_construct(struct dc *dc)  	 * structure  	 */  	dce110_hw_sequencer_construct(dc); -	dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating; +	dc->hwseq->funcs.enable_display_power_gating = dce120_enable_display_power_gating;  	dc->hwss.update_dchub = dce120_update_dchub;  } diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h index c51afbd0b012..bc024534732f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h @@ -27,6 +27,7 @@  #define __DC_HWSS_DCE120_H__  #include "core_types.h" +#include "hw_sequencer_private.h"  struct dc; diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index 63543f6918ff..53ab88ef71f5 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -63,8 +63,8 @@  #include "soc15_hw_ip.h"  #include "vega10_ip_offset.h"  #include "nbio/nbio_6_1_offset.h" -#include "mmhub/mmhub_9_4_0_offset.h" -#include "mmhub/mmhub_9_4_0_sh_mask.h" +#include "mmhub/mmhub_1_0_offset.h" +#include "mmhub/mmhub_1_0_sh_mask.h"  #include "reg_helper.h"  #include "dce100/dce100_resource.h" @@ -587,7 +587,7 @@ static void dce120_transform_destroy(struct transform **xfm)  	*xfm = NULL;  } -static void destruct(struct dce110_resource_pool *pool) +static void dce120_resource_destruct(struct dce110_resource_pool *pool)  {  	unsigned int i; @@ -872,7 +872,7 @@ static void dce120_destroy_resource_pool(struct resource_pool **pool)  {  	struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); -	destruct(dce110_pool); +	dce120_resource_destruct(dce110_pool);  	kfree(dce110_pool);  	*pool = NULL;  } @@ -1024,7 +1024,7 @@ static uint32_t read_pipe_fuses(struct dc_context *ctx)  	return value;  } -static bool construct( +static bool dce120_resource_construct(  	uint8_t num_virtual_links,  	struct dc *dc,  	struct dce110_resource_pool *pool) @@ -1237,7 +1237,7 @@ controller_create_fail:  clk_src_create_fail:  res_create_fail: -	destruct(pool); +	dce120_resource_destruct(pool);  	return false;  } @@ -1252,7 +1252,7 @@ struct resource_pool *dce120_create_resource_pool(  	if (!pool)  		return NULL; -	if (construct(num_virtual_links, dc, pool)) +	if (dce120_resource_construct(num_virtual_links, dc, pool))  		return &pool->base;  	kfree(pool); diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c index 098e56962f2a..82bc4e192bbf 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c @@ -819,13 +819,18 @@ void dce120_tg_set_colors(struct timing_generator *tg,  static void dce120_timing_generator_set_static_screen_control(  	struct timing_generator *tg, -	uint32_t value) +	uint32_t event_triggers, +	uint32_t num_frames)  {  	struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); +	// By register spec, it only takes 8 bit value +	if (num_frames > 0xFF) +		num_frames = 0xFF; +  	CRTC_REG_UPDATE_2(CRTC0_CRTC_STATIC_SCREEN_CONTROL, -			CRTC_STATIC_SCREEN_EVENT_MASK, value, -			CRTC_STATIC_SCREEN_FRAME_COUNT, 2); +			CRTC_STATIC_SCREEN_EVENT_MASK, event_triggers, +			CRTC_STATIC_SCREEN_FRAME_COUNT, num_frames);  }  void dce120_timing_generator_set_test_pattern( diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c index c4543178ba20..893261c81854 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c @@ -74,7 +74,7 @@ void dce80_hw_sequencer_construct(struct dc *dc)  {  	dce110_hw_sequencer_construct(dc); -	dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating; +	dc->hwseq->funcs.enable_display_power_gating = dce100_enable_display_power_gating;  	dc->hwss.pipe_control_lock = dce_pipe_control_lock;  	dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;  	dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth; diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h index 7a1b31def66f..e43af832d00c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h @@ -27,6 +27,7 @@  #define __DC_HWSS_DCE80_H__  #include "core_types.h" +#include "hw_sequencer_private.h"  struct dc; diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index 3e8d4b49f279..2ad5c28c6e66 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c @@ -773,7 +773,7 @@ static struct input_pixel_processor *dce80_ipp_create(  	return &ipp->base;  } -static void destruct(struct dce110_resource_pool *pool) +static void dce80_resource_destruct(struct dce110_resource_pool *pool)  {  	unsigned int i; @@ -901,7 +901,7 @@ static void dce80_destroy_resource_pool(struct resource_pool **pool)  {  	struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); -	destruct(dce110_pool); +	dce80_resource_destruct(dce110_pool);  	kfree(dce110_pool);  	*pool = NULL;  } @@ -1093,7 +1093,7 @@ static bool dce80_construct(  	return true;  res_create_fail: -	destruct(pool); +	dce80_resource_destruct(pool);  	return false;  } @@ -1290,7 +1290,7 @@ static bool dce81_construct(  	return true;  res_create_fail: -	destruct(pool); +	dce80_resource_destruct(pool);  	return false;  } @@ -1483,7 +1483,7 @@ static bool dce83_construct(  	return true;  res_create_fail: -	destruct(pool); +	dce80_resource_destruct(pool);  	return false;  } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile index 032f872be89c..62ad1a11bff9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile @@ -22,7 +22,8 @@  #  # Makefile for DCN. -DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o dcn10_hw_sequencer_debug.o \ +DCN10 = dcn10_init.o dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \ +		dcn10_hw_sequencer_debug.o \  		dcn10_dpp.o dcn10_opp.o dcn10_optc.o \  		dcn10_hubp.o dcn10_mpc.o \  		dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index 997e9582edc7..0e682b5aa3eb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c @@ -290,12 +290,8 @@ void dpp1_cnv_setup (  		enum surface_pixel_format format,  		enum expansion_mode mode,  		struct dc_csc_transform input_csc_color_matrix, -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  		enum dc_color_space input_color_space,  		struct cnv_alpha_2bit_lut *alpha_2bit_lut) -#else -		enum dc_color_space input_color_space) -#endif  {  	uint32_t pixel_format;  	uint32_t alpha_en; @@ -542,11 +538,9 @@ static const struct dpp_funcs dcn10_dpp_funcs = {  		.set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,  		.dpp_dppclk_control = dpp1_dppclk_control,  		.dpp_set_hdr_multiplier = dpp1_set_hdr_multiplier, -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  		.dpp_program_blnd_lut = NULL,  		.dpp_program_shaper_lut = NULL,  		.dpp_program_3dlut = NULL -#endif  };  static struct dpp_caps dcn10_dpp_cap = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h index 1d4a7d640334..2edf566b3a72 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h @@ -1486,12 +1486,8 @@ void dpp1_cnv_setup (  		enum surface_pixel_format format,  		enum expansion_mode mode,  		struct dc_csc_transform input_csc_color_matrix, -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  		enum dc_color_space input_color_space,  		struct cnv_alpha_2bit_lut *alpha_2bit_lut); -#else -		enum dc_color_space input_color_space); -#endif  void dpp1_full_bypass(struct dpp *dpp_base); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c index aa0c7a7d13a0..4d3f7d5e1473 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c @@ -88,26 +88,6 @@ enum dscl_mode_sel {  	DSCL_MODE_DSCL_BYPASS = 6  }; -static const struct dpp_input_csc_matrix dpp_input_csc_matrix[] = { -	{COLOR_SPACE_SRGB, -		{0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} }, -	{COLOR_SPACE_SRGB_LIMITED, -		{0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} }, -	{COLOR_SPACE_YCBCR601, -		{0x2cdd, 0x2000, 0, 0xe991, 0xe926, 0x2000, 0xf4fd, 0x10ef, -						0, 0x2000, 0x38b4, 0xe3a6} }, -	{COLOR_SPACE_YCBCR601_LIMITED, -		{0x3353, 0x2568, 0, 0xe400, 0xe5dc, 0x2568, 0xf367, 0x1108, -						0, 0x2568, 0x40de, 0xdd3a} }, -	{COLOR_SPACE_YCBCR709, -		{0x3265, 0x2000, 0, 0xe6ce, 0xf105, 0x2000, 0xfa01, 0xa7d, 0, -						0x2000, 0x3b61, 0xe24f} }, - -	{COLOR_SPACE_YCBCR709_LIMITED, -		{0x39a6, 0x2568, 0, 0xe0d6, 0xeedd, 0x2568, 0xf925, 0x9a8, 0, -						0x2568, 0x43ee, 0xdbb2} } -}; -  static void program_gamut_remap(  		struct dcn10_dpp *dpp,  		const uint16_t *regval, @@ -352,6 +332,8 @@ void dpp1_cm_program_regamma_lut(struct dpp *dpp_base,  	uint32_t i;  	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); +	REG_SEQ_START(); +  	for (i = 0 ; i < num; i++) {  		REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].red_reg);  		REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].green_reg); @@ -626,10 +608,16 @@ void dpp1_set_degamma(  	case IPP_DEGAMMA_MODE_HW_xvYCC:  		REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2);  			break; +	case IPP_DEGAMMA_MODE_USER_PWL: +		REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3); +		break;  	default:  		BREAK_TO_DEBUGGER();  		break;  	} + +	REG_SEQ_SUBMIT(); +	REG_SEQ_WAIT_DONE();  }  void dpp1_degamma_ram_select( @@ -731,10 +719,8 @@ void dpp1_full_bypass(struct dpp *dpp_base)  	/* COLOR_KEYER_CONTROL.COLOR_KEYER_EN = 0 this should be default */  	if (dpp->tf_mask->CM_BYPASS_EN)  		REG_SET(CM_CONTROL, 0, CM_BYPASS_EN, 1); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	else  		REG_SET(CM_CONTROL, 0, CM_BYPASS, 1); -#endif  	/* Setting degamma bypass for now */  	REG_SET(CM_DGAM_CONTROL, 0, CM_DGAM_LUT_MODE, 0); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c index d67e0abeee93..fce37c527a0b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c @@ -218,14 +218,12 @@ static void dpp1_dscl_set_lb(  			INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */  			LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */  	} -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	else {  		/* DSCL caps: pixel data processed in float format */  		REG_SET_2(LB_DATA_FORMAT, 0,  			INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */  			LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */  	} -#endif  	REG_SET_2(LB_MEMORY_CTRL, 0,  		MEMORY_CONFIG, mem_size_config, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c index 374cc9acda3b..b6391a5ead78 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c @@ -23,7 +23,7 @@   *   */ -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  #include "reg_helper.h"  #include "resource.h" @@ -109,9 +109,7 @@ const struct dwbc_funcs dcn10_dwbc_funcs = {  	.update				= NULL,  	.set_stereo			= NULL,  	.set_new_content		= NULL, -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	.set_warmup			= NULL, -#endif  	.dwb_set_scaler			= NULL,  }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h index c175edd0bae7..d56ea7c8171e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h @@ -24,7 +24,7 @@  #ifndef __DC_DWBC_DCN10_H__  #define __DC_DWBC_DCN10_H__ -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  /* DCN */  #define BASE_INNER(seg) \ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c index a02c10e23e0d..f36a0d8cedfe 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c @@ -930,6 +930,9 @@ static bool hubbub1_get_dcc_compression_cap(struct hubbub *hubbub,  		output->grph.rgb.max_compressed_blk_size = 64;  		output->grph.rgb.independent_64b_blks = true;  		break; +	default: +		ASSERT(false); +		break;  	}  	output->capable = true; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h index 69d903d68661..af57751253de 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h @@ -121,7 +121,6 @@ struct dcn_hubbub_registers {  	uint32_t DCN_VM_AGP_BASE;  	uint32_t DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB;  	uint32_t DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  	uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_A;  	uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_B;  	uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_C; @@ -140,7 +139,6 @@ struct dcn_hubbub_registers {  	uint32_t DCHVM_CLK_CTRL;  	uint32_t DCHVM_RIOMMU_CTRL0;  	uint32_t DCHVM_RIOMMU_STAT0; -#endif  };  /* set field name */ @@ -232,7 +230,6 @@ struct dcn_hubbub_registers {  		type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C;\  		type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  #define HUBBUB_HVM_REG_FIELD_LIST(type) \  		type DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD;\  		type DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A;\ @@ -278,22 +275,17 @@ struct dcn_hubbub_registers {  		type HOSTVM_POWERSTATUS; \  		type RIOMMU_ACTIVE; \  		type HOSTVM_PREFETCH_DONE -#endif  struct dcn_hubbub_shift {  	DCN_HUBBUB_REG_FIELD_LIST(uint8_t);  	HUBBUB_STUTTER_REG_FIELD_LIST(uint8_t); -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  	HUBBUB_HVM_REG_FIELD_LIST(uint8_t); -#endif  };  struct dcn_hubbub_mask {  	DCN_HUBBUB_REG_FIELD_LIST(uint32_t);  	HUBBUB_STUTTER_REG_FIELD_LIST(uint32_t); -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  	HUBBUB_HVM_REG_FIELD_LIST(uint32_t); -#endif  };  struct dc; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 14d1be6c66e6..31b64733d693 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -306,7 +306,6 @@ void hubp1_program_pixel_format(  		REG_UPDATE(DCSURF_SURFACE_CONFIG,  				SURFACE_PIXEL_FORMAT, 12);  		break; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:  		REG_UPDATE(DCSURF_SURFACE_CONFIG,  				SURFACE_PIXEL_FORMAT, 112); @@ -327,7 +326,6 @@ void hubp1_program_pixel_format(  		REG_UPDATE(DCSURF_SURFACE_CONFIG,  				SURFACE_PIXEL_FORMAT, 119);  		break; -#endif  	default:  		BREAK_TO_DEBUGGER();  		break; @@ -1014,6 +1012,9 @@ void hubp1_read_state_common(struct hubp *hubp)  			HUBP_TTU_DISABLE, &s->ttu_disable,  			HUBP_UNDERFLOW_STATUS, &s->underflow_status); +	REG_GET(HUBP_CLK_CNTL, +			HUBP_CLOCK_ENABLE, &s->clock_en); +  	REG_GET(DCN_GLOBAL_TTU_CNTL,  			MIN_TTU_VBLANK, &s->min_ttu_vblank); @@ -1248,10 +1249,8 @@ static const struct hubp_funcs dcn10_hubp_funcs = {  	.hubp_get_underflow_status = hubp1_get_underflow_status,  	.hubp_init = hubp1_init, -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	.dmdata_set_attributes = NULL,  	.dmdata_load = NULL, -#endif  };  /*****************************************/ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index ae70d9c0aa1d..780af5b3c16f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -670,6 +670,7 @@ struct dcn_hubp_state {  	uint32_t sw_mode;  	uint32_t dcc_en;  	uint32_t blank_en; +	uint32_t clock_en;  	uint32_t underflow_status;  	uint32_t ttu_disable;  	uint32_t min_ttu_vblank; @@ -728,13 +729,11 @@ void hubp1_dcc_control(struct hubp *hubp,  		bool enable,  		enum hubp_ind_block_size independent_64b_blks); -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  bool hubp1_program_surface_flip_and_addr(  	struct hubp *hubp,  	const struct dc_plane_address *address,  	bool flip_immediate); -#endif  bool hubp1_is_flip_pending(struct hubp *hubp);  void hubp1_cursor_set_attributes( diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index eb91432621ab..f2127afb37b2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -25,17 +25,18 @@  #include <linux/delay.h>  #include "dm_services.h" +#include "basics/dc_common.h"  #include "core_types.h"  #include "resource.h"  #include "custom_float.h"  #include "dcn10_hw_sequencer.h" -#include "dce110/dce110_hw_sequencer.h" +#include "dcn10_hw_sequencer_debug.h"  #include "dce/dce_hwseq.h"  #include "abm.h"  #include "dmcu.h"  #include "dcn10_optc.h" -#include "dcn10/dcn10_dpp.h" -#include "dcn10/dcn10_mpc.h" +#include "dcn10_dpp.h" +#include "dcn10_mpc.h"  #include "timing_generator.h"  #include "opp.h"  #include "ipp.h" @@ -49,9 +50,7 @@  #include "clk_mgr.h" -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  #include "dsc.h" -#endif  #define DC_LOGGER_INIT(logger) @@ -68,6 +67,8 @@  #define DTN_INFO_MICRO_SEC(ref_cycle) \  	print_microsec(dc_ctx, log_ctx, ref_cycle) +#define GAMMA_HW_POINTS_NUM 256 +  void print_microsec(struct dc_context *dc_ctx,  	struct dc_log_buffer_ctx *log_ctx,  	uint32_t ref_cycle) @@ -81,6 +82,33 @@ void print_microsec(struct dc_context *dc_ctx,  			us_x10 % frac);  } +static void dcn10_lock_all_pipes(struct dc *dc, +	struct dc_state *context, +	bool lock) +{ +	struct pipe_ctx *pipe_ctx; +	struct timing_generator *tg; +	int i; + +	for (i = 0; i < dc->res_pool->pipe_count; i++) { +		pipe_ctx = &context->res_ctx.pipe_ctx[i]; +		tg = pipe_ctx->stream_res.tg; +		/* +		 * Only lock the top pipe's tg to prevent redundant +		 * (un)locking. Also skip if pipe is disabled. +		 */ +		if (pipe_ctx->top_pipe || +		    !pipe_ctx->stream || !pipe_ctx->plane_state || +		    !tg->funcs->is_tg_enabled(tg)) +			continue; + +		if (lock) +			tg->funcs->lock(tg); +		else +			tg->funcs->unlock(tg); +	} +} +  static void log_mpc_crc(struct dc *dc,  	struct dc_log_buffer_ctx *log_ctx)  { @@ -129,9 +157,8 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)  	struct resource_pool *pool = dc->res_pool;  	int i; -	DTN_INFO("HUBP:  format  addr_hi  width  height" -			"  rot  mir  sw_mode  dcc_en  blank_en  ttu_dis  underflow" -			"   min_ttu_vblank       qos_low_wm      qos_high_wm\n"); +	DTN_INFO( +		"HUBP:  format  addr_hi  width  height  rot  mir  sw_mode  dcc_en  blank_en  clock_en  ttu_dis  underflow   min_ttu_vblank       qos_low_wm      qos_high_wm\n");  	for (i = 0; i < pool->pipe_count; i++) {  		struct hubp *hubp = pool->hubps[i];  		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state); @@ -139,8 +166,7 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)  		hubp->funcs->hubp_read_state(hubp);  		if (!s->blank_en) { -			DTN_INFO("[%2d]:  %5xh  %6xh  %5d  %6d  %2xh  %2xh  %6xh" -					"  %6d  %8d  %7d  %8xh", +			DTN_INFO("[%2d]:  %5xh  %6xh  %5d  %6d  %2xh  %2xh  %6xh  %6d  %8d  %8d  %7d  %8xh",  					hubp->inst,  					s->pixel_format,  					s->inuse_addr_hi, @@ -151,6 +177,7 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)  					s->sw_mode,  					s->dcc_en,  					s->blank_en, +					s->clock_en,  					s->ttu_disable,  					s->underflow_status);  			DTN_INFO_MICRO_SEC(s->min_ttu_vblank); @@ -308,21 +335,31 @@ void dcn10_log_hw_state(struct dc *dc,  	}  	DTN_INFO("\n"); -	DTN_INFO("OTG:  v_bs  v_be  v_ss  v_se  vpol  vmax  vmin  vmax_sel  vmin_sel" -			"  h_bs  h_be  h_ss  h_se  hpol  htot  vtot  underflow\n"); +	DTN_INFO("OTG:  v_bs  v_be  v_ss  v_se  vpol  vmax  vmin  vmax_sel  vmin_sel  h_bs  h_be  h_ss  h_se  hpol  htot  vtot  underflow blank_en\n");  	for (i = 0; i < pool->timing_generator_count; i++) {  		struct timing_generator *tg = pool->timing_generators[i];  		struct dcn_otg_state s = {0}; - +		/* Read shared OTG state registers for all DCNx */  		optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s); +		/* +		 * For DCN2 and greater, a register on the OPP is used to +		 * determine if the CRTC is blanked instead of the OTG. So use +		 * dpg_is_blanked() if exists, otherwise fallback on otg. +		 * +		 * TODO: Implement DCN-specific read_otg_state hooks. +		 */ +		if (pool->opps[i]->funcs->dpg_is_blanked) +			s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]); +		else +			s.blank_enabled = tg->funcs->is_blanked(tg); +  		//only print if OTG master is enabled  		if ((s.otg_enabled & 1) == 0)  			continue; -		DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d" -				" %5d %5d %5d %5d  %9d\n", +		DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d  %9d %8d\n",  				tg->inst,  				s.v_blank_start,  				s.v_blank_end, @@ -340,7 +377,8 @@ void dcn10_log_hw_state(struct dc *dc,  				s.h_sync_a_pol,  				s.h_total,  				s.v_total, -				s.underflow_occurred_status); +				s.underflow_occurred_status, +				s.blank_enabled);  		// Clear underflow for debug purposes  		// We want to keep underflow sticky bit on for the longevity tests outside of test environment. @@ -350,7 +388,6 @@ void dcn10_log_hw_state(struct dc *dc,  	}  	DTN_INFO("\n"); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	DTN_INFO("DSC: CLOCK_EN  SLICE_WIDTH  Bytes_pp\n");  	for (i = 0; i < pool->res_cap->num_dsc; i++) {  		struct display_stream_compressor *dsc = pool->dscs[i]; @@ -387,7 +424,7 @@ void dcn10_log_hw_state(struct dc *dc,  	}  	DTN_INFO("\n"); -	DTN_INFO("L_ENC: DPHY_FEC_EN  DPHY_FEC_READY_SHADOW  DPHY_FEC_ACTIVE_STATUS\n"); +	DTN_INFO("L_ENC: DPHY_FEC_EN  DPHY_FEC_READY_SHADOW  DPHY_FEC_ACTIVE_STATUS  DP_LINK_TRAINING_COMPLETE\n");  	for (i = 0; i < dc->link_count; i++) {  		struct link_encoder *lenc = dc->links[i]->link_enc; @@ -395,16 +432,16 @@ void dcn10_log_hw_state(struct dc *dc,  		if (lenc->funcs->read_state) {  			lenc->funcs->read_state(lenc, &s); -			DTN_INFO("[%-3d]: %-12d %-22d %-22d\n", +			DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",  				i,  				s.dphy_fec_en,  				s.dphy_fec_ready_shadow, -				s.dphy_fec_active_status); +				s.dphy_fec_active_status, +				s.dp_link_training_complete);  			DTN_INFO("\n");  		}  	}  	DTN_INFO("\n"); -#endif  	DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d  dcfclk_deep_sleep_khz:%d  dispclk_khz:%d\n"  		"dppclk_khz:%d  max_supported_dppclk_khz:%d  fclk_khz:%d  socclk_khz:%d\n\n", @@ -438,14 +475,14 @@ bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)  	return false;  } -static void dcn10_enable_power_gating_plane( +void dcn10_enable_power_gating_plane(  	struct dce_hwseq *hws,  	bool enable)  { -	bool force_on = 1; /* disable power gating */ +	bool force_on = true; /* disable power gating */  	if (enable) -		force_on = 0; +		force_on = false;  	/* DCHUBP0/1/2/3 */  	REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on); @@ -460,7 +497,7 @@ static void dcn10_enable_power_gating_plane(  	REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);  } -static void dcn10_disable_vga( +void dcn10_disable_vga(  	struct dce_hwseq *hws)  {  	unsigned int in_vga1_mode = 0; @@ -493,7 +530,7 @@ static void dcn10_disable_vga(  	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);  } -static void dcn10_dpp_pg_control( +void dcn10_dpp_pg_control(  		struct dce_hwseq *hws,  		unsigned int dpp_inst,  		bool power_on) @@ -545,7 +582,7 @@ static void dcn10_dpp_pg_control(  	}  } -static void dcn10_hubp_pg_control( +void dcn10_hubp_pg_control(  		struct dce_hwseq *hws,  		unsigned int hubp_inst,  		bool power_on) @@ -605,8 +642,8 @@ static void power_on_plane(  	if (REG(DC_IP_REQUEST_CNTL)) {  		REG_SET(DC_IP_REQUEST_CNTL, 0,  				IP_REQUEST_EN, 1); -		hws->ctx->dc->hwss.dpp_pg_control(hws, plane_id, true); -		hws->ctx->dc->hwss.hubp_pg_control(hws, plane_id, true); +		hws->funcs.dpp_pg_control(hws, plane_id, true); +		hws->funcs.hubp_pg_control(hws, plane_id, true);  		REG_SET(DC_IP_REQUEST_CNTL, 0,  				IP_REQUEST_EN, 0);  		DC_LOG_DEBUG( @@ -627,7 +664,7 @@ static void undo_DEGVIDCN10_253_wa(struct dc *dc)  	REG_SET(DC_IP_REQUEST_CNTL, 0,  			IP_REQUEST_EN, 1); -	dc->hwss.hubp_pg_control(hws, 0, false); +	hws->funcs.hubp_pg_control(hws, 0, false);  	REG_SET(DC_IP_REQUEST_CNTL, 0,  			IP_REQUEST_EN, 0); @@ -656,7 +693,7 @@ static void apply_DEGVIDCN10_253_wa(struct dc *dc)  	REG_SET(DC_IP_REQUEST_CNTL, 0,  			IP_REQUEST_EN, 1); -	dc->hwss.hubp_pg_control(hws, 0, true); +	hws->funcs.hubp_pg_control(hws, 0, true);  	REG_SET(DC_IP_REQUEST_CNTL, 0,  			IP_REQUEST_EN, 0); @@ -664,16 +701,16 @@ static void apply_DEGVIDCN10_253_wa(struct dc *dc)  	hws->wa_state.DEGVIDCN10_253_applied = true;  } -static void dcn10_bios_golden_init(struct dc *dc) +void dcn10_bios_golden_init(struct dc *dc)  { +	struct dce_hwseq *hws = dc->hwseq;  	struct dc_bios *bp = dc->ctx->dc_bios;  	int i;  	bool allow_self_fresh_force_enable = true; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) -	if (dc->hwss.s0i3_golden_init_wa && dc->hwss.s0i3_golden_init_wa(dc)) +	if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))  		return; -#endif +  	if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)  		allow_self_fresh_force_enable =  				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub); @@ -732,7 +769,7 @@ static void false_optc_underflow_wa(  		tg->funcs->clear_optc_underflow(tg);  } -static enum dc_status dcn10_enable_stream_timing( +enum dc_status dcn10_enable_stream_timing(  		struct pipe_ctx *pipe_ctx,  		struct dc_state *context,  		struct dc *dc) @@ -823,6 +860,7 @@ static void dcn10_reset_back_end_for_pipe(  		struct dc_state *context)  {  	int i; +	struct dc_link *link;  	DC_LOGGER_INIT(dc->ctx->logger);  	if (pipe_ctx->stream_res.stream_enc == NULL) {  		pipe_ctx->stream = NULL; @@ -830,8 +868,14 @@ static void dcn10_reset_back_end_for_pipe(  	}  	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { -		/* DPMS may already disable */ -		if (!pipe_ctx->stream->dpms_off) +		link = pipe_ctx->stream->link; +		/* DPMS may already disable or */ +		/* dpms_off status is incorrect due to fastboot +		 * feature. When system resume from S4 with second +		 * screen only, the dpms_off would be true but +		 * VBIOS lit up eDP, so check link status too. +		 */ +		if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)  			core_link_disable_stream(pipe_ctx);  		else if (pipe_ctx->stream_res.audio)  			dc->hwss.disable_audio_stream(pipe_ctx); @@ -978,8 +1022,9 @@ void dcn10_verify_allow_pstate_change_high(struct dc *dc)  }  /* trigger HW to start disconnect plane from stream on the next vsync */ -void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)  { +	struct dce_hwseq *hws = dc->hwseq;  	struct hubp *hubp = pipe_ctx->plane_res.hubp;  	int dpp_id = pipe_ctx->plane_res.dpp->inst;  	struct mpc *mpc = dc->res_pool->mpc; @@ -1004,10 +1049,10 @@ void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)  		hubp->funcs->hubp_disconnect(hubp);  	if (dc->debug.sanity_checks) -		dcn10_verify_allow_pstate_change_high(dc); +		hws->funcs.verify_allow_pstate_change_high(dc);  } -static void dcn10_plane_atomic_power_down(struct dc *dc, +void dcn10_plane_atomic_power_down(struct dc *dc,  		struct dpp *dpp,  		struct hubp *hubp)  { @@ -1017,8 +1062,8 @@ static void dcn10_plane_atomic_power_down(struct dc *dc,  	if (REG(DC_IP_REQUEST_CNTL)) {  		REG_SET(DC_IP_REQUEST_CNTL, 0,  				IP_REQUEST_EN, 1); -		dc->hwss.dpp_pg_control(hws, dpp->inst, false); -		dc->hwss.hubp_pg_control(hws, hubp->inst, false); +		hws->funcs.dpp_pg_control(hws, dpp->inst, false); +		hws->funcs.hubp_pg_control(hws, hubp->inst, false);  		dpp->funcs->dpp_reset(dpp);  		REG_SET(DC_IP_REQUEST_CNTL, 0,  				IP_REQUEST_EN, 0); @@ -1030,8 +1075,9 @@ static void dcn10_plane_atomic_power_down(struct dc *dc,  /* disable HW used by plane.   * note:  cannot disable until disconnect is complete   */ -static void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)  { +	struct dce_hwseq *hws = dc->hwseq;  	struct hubp *hubp = pipe_ctx->plane_res.hubp;  	struct dpp *dpp = pipe_ctx->plane_res.dpp;  	int opp_id = hubp->opp_id; @@ -1050,7 +1096,7 @@ static void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)  	hubp->power_gated = true;  	dc->optimized_required = false; /* We're powering off, no need to optimize */ -	dc->hwss.plane_atomic_power_down(dc, +	hws->funcs.plane_atomic_power_down(dc,  			pipe_ctx->plane_res.dpp,  			pipe_ctx->plane_res.hubp); @@ -1062,14 +1108,15 @@ static void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)  	pipe_ctx->plane_state = NULL;  } -static void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)  { +	struct dce_hwseq *hws = dc->hwseq;  	DC_LOGGER_INIT(dc->ctx->logger);  	if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)  		return; -	dc->hwss.plane_atomic_disable(dc, pipe_ctx); +	hws->funcs.plane_atomic_disable(dc, pipe_ctx);  	apply_DEGVIDCN10_253_wa(dc); @@ -1077,9 +1124,10 @@ static void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)  					pipe_ctx->pipe_idx);  } -static void dcn10_init_pipes(struct dc *dc, struct dc_state *context) +void dcn10_init_pipes(struct dc *dc, struct dc_state *context)  {  	int i; +	struct dce_hwseq *hws = dc->hwseq;  	bool can_apply_seamless_boot = false;  	for (i = 0; i < context->stream_count; i++) { @@ -1104,8 +1152,8 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)  		 * command table.  		 */  		if (tg->funcs->is_tg_enabled(tg)) { -			if (dc->hwss.init_blank != NULL) { -				dc->hwss.init_blank(dc, tg); +			if (hws->funcs.init_blank != NULL) { +				hws->funcs.init_blank(dc, tg);  				tg->funcs->lock(tg);  			} else {  				tg->funcs->lock(tg); @@ -1115,7 +1163,8 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)  		}  	} -	for (i = 0; i < dc->res_pool->pipe_count; i++) { +	/* num_opp will be equal to number of mpcc */ +	for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {  		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];  		/* Cannot reset the MPC mux if seamless boot */ @@ -1139,8 +1188,14 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)  		if (can_apply_seamless_boot &&  			pipe_ctx->stream != NULL &&  			pipe_ctx->stream_res.tg->funcs->is_tg_enabled( -				pipe_ctx->stream_res.tg)) +				pipe_ctx->stream_res.tg)) { +			// Enable double buffering for OTG_BLANK no matter if +			// seamless boot is enabled or not to suppress global sync +			// signals when OTG blanked. This is to prevent pipe from +			// requesting data while in PSR. +			tg->funcs->tg_init(tg);  			continue; +		}  		/* Disable on the current state so the new one isn't cleared. */  		pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; @@ -1162,7 +1217,7 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)  		dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;  		pipe_ctx->stream_res.opp = dc->res_pool->opps[i]; -		dc->hwss.plane_atomic_disconnect(dc, pipe_ctx); +		hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);  		if (tg->funcs->is_tg_enabled(tg))  			tg->funcs->unlock(tg); @@ -1176,7 +1231,7 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)  	}  } -static void dcn10_init_hw(struct dc *dc) +void dcn10_init_hw(struct dc *dc)  {  	int i;  	struct abm *abm = dc->res_pool->abm; @@ -1208,15 +1263,15 @@ static void dcn10_init_hw(struct dc *dc)  		}  		//Enable ability to power gate / don't force power on permanently -		dc->hwss.enable_power_gating_plane(hws, true); +		hws->funcs.enable_power_gating_plane(hws, true);  		return;  	}  	if (!dcb->funcs->is_accelerated_mode(dcb)) -		dc->hwss.disable_vga(dc->hwseq); +		hws->funcs.disable_vga(dc->hwseq); -	dc->hwss.bios_golden_init(dc); +	hws->funcs.bios_golden_init(dc);  	if (dc->ctx->dc_bios->fw_info_valid) {  		res_pool->ref_clocks.xtalin_clock_inKhz =  				dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency; @@ -1258,11 +1313,9 @@ static void dcn10_init_hw(struct dc *dc)  	}  	/* Power gate DSCs */ -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	for (i = 0; i < res_pool->res_cap->num_dsc; i++) -		if (dc->hwss.dsc_pg_control != NULL) -			dc->hwss.dsc_pg_control(hws, res_pool->dscs[i]->inst, false); -#endif +		if (hws->funcs.dsc_pg_control != NULL) +			hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);  	/* If taking control over from VBIOS, we may want to optimize our first  	 * mode set, so we need to skip powering down pipes until we know which @@ -1271,7 +1324,7 @@ static void dcn10_init_hw(struct dc *dc)  	 * everything down.  	 */  	if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) { -		dc->hwss.init_pipes(dc, dc->current_state); +		hws->funcs.init_pipes(dc, dc->current_state);  	}  	for (i = 0; i < res_pool->audio_count; i++) { @@ -1285,7 +1338,7 @@ static void dcn10_init_hw(struct dc *dc)  		abm->funcs->abm_init(abm);  	} -	if (dmcu != NULL) +	if (dmcu != NULL && !dmcu->auto_load_dmcu)  		dmcu->funcs->dmcu_init(dmcu);  	if (abm != NULL && dmcu != NULL) @@ -1303,18 +1356,19 @@ static void dcn10_init_hw(struct dc *dc)  		REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);  	} -	dc->hwss.enable_power_gating_plane(dc->hwseq, true); +	hws->funcs.enable_power_gating_plane(dc->hwseq, true);  	if (dc->clk_mgr->funcs->notify_wm_ranges)  		dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);  } -static void dcn10_reset_hw_ctx_wrap( +void dcn10_reset_hw_ctx_wrap(  		struct dc *dc,  		struct dc_state *context)  {  	int i; +	struct dce_hwseq *hws = dc->hwseq;  	/* Reset Back End*/  	for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) { @@ -1333,8 +1387,8 @@ static void dcn10_reset_hw_ctx_wrap(  			struct clock_source *old_clk = pipe_ctx_old->clock_source;  			dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); -			if (dc->hwss.enable_stream_gating) -				dc->hwss.enable_stream_gating(dc, pipe_ctx); +			if (hws->funcs.enable_stream_gating) +				hws->funcs.enable_stream_gating(dc, pipe_ctx);  			if (old_clk)  				old_clk->funcs->cs_power_down(old_clk);  		} @@ -1367,9 +1421,7 @@ static bool patch_address_for_sbs_tb_stereo(  	return false;  } - - -static void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)  {  	bool addr_patched = false;  	PHYSICAL_ADDRESS_LOC addr; @@ -1394,8 +1446,8 @@ static void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_c  		pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;  } -static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx, -					  const struct dc_plane_state *plane_state) +bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, +			const struct dc_plane_state *plane_state)  {  	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;  	const struct dc_transfer_func *tf = NULL; @@ -1427,6 +1479,11 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,  			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);  			break;  		case TRANSFER_FUNCTION_PQ: +			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL); +			cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params); +			dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params); +			result = true; +			break;  		default:  			result = false;  			break; @@ -1472,9 +1529,8 @@ static void log_tf(struct dc_context *ctx,  	}  } -static bool -dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx, -			       const struct dc_stream_state *stream) +bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, +				const struct dc_stream_state *stream)  {  	struct dpp *dpp = pipe_ctx->plane_res.dpp; @@ -1510,11 +1566,13 @@ dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx,  	return true;  } -static void dcn10_pipe_control_lock( +void dcn10_pipe_control_lock(  	struct dc *dc,  	struct pipe_ctx *pipe,  	bool lock)  { +	struct dce_hwseq *hws = dc->hwseq; +  	/* use TG master update lock to lock everything on the TG  	 * therefore only top pipe need to lock  	 */ @@ -1522,7 +1580,7 @@ static void dcn10_pipe_control_lock(  		return;  	if (dc->debug.sanity_checks) -		dcn10_verify_allow_pstate_change_high(dc); +		hws->funcs.verify_allow_pstate_change_high(dc);  	if (lock)  		pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg); @@ -1530,7 +1588,7 @@ static void dcn10_pipe_control_lock(  		pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);  	if (dc->debug.sanity_checks) -		dcn10_verify_allow_pstate_change_high(dc); +		hws->funcs.verify_allow_pstate_change_high(dc);  }  static bool wait_for_reset_trigger_to_occur( @@ -1570,7 +1628,7 @@ static bool wait_for_reset_trigger_to_occur(  	return rc;  } -static void dcn10_enable_timing_synchronization( +void dcn10_enable_timing_synchronization(  	struct dc *dc,  	int group_index,  	int group_size, @@ -1600,7 +1658,7 @@ static void dcn10_enable_timing_synchronization(  	DC_SYNC_INFO("Sync complete\n");  } -static void dcn10_enable_per_frame_crtc_position_reset( +void dcn10_enable_per_frame_crtc_position_reset(  	struct dc *dc,  	int group_size,  	struct pipe_ctx *grouped_pipes[]) @@ -1625,10 +1683,10 @@ static void dcn10_enable_per_frame_crtc_position_reset(  }  /*static void print_rq_dlg_ttu( -		struct dc *core_dc, +		struct dc *dc,  		struct pipe_ctx *pipe_ctx)  { -	DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger, +	DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,  			"\n============== DML TTU Output parameters [%d] ==============\n"  			"qos_level_low_wm: %d, \n"  			"qos_level_high_wm: %d, \n" @@ -1658,7 +1716,7 @@ static void dcn10_enable_per_frame_crtc_position_reset(  			pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_c  			); -	DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger, +	DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,  			"\n============== DML DLG Output parameters [%d] ==============\n"  			"refcyc_h_blank_end: %d, \n"  			"dlg_vblank_end: %d, \n" @@ -1693,7 +1751,7 @@ static void dcn10_enable_per_frame_crtc_position_reset(  			pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_l  			); -	DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger, +	DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,  			"\ndst_y_per_meta_row_nom_l: %d, \n"  			"refcyc_per_meta_chunk_nom_l: %d, \n"  			"refcyc_per_line_delivery_pre_l: %d, \n" @@ -1723,7 +1781,7 @@ static void dcn10_enable_per_frame_crtc_position_reset(  			pipe_ctx->dlg_regs.refcyc_per_line_delivery_c  			); -	DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger, +	DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,  			"\n============== DML RQ Output parameters [%d] ==============\n"  			"chunk_size: %d \n"  			"min_chunk_size: %d \n" @@ -1838,7 +1896,7 @@ static void dcn10_enable_plane(  	struct dce_hwseq *hws = dc->hwseq;  	if (dc->debug.sanity_checks) { -		dcn10_verify_allow_pstate_change_high(dc); +		hws->funcs.verify_allow_pstate_change_high(dc);  	}  	undo_DEGVIDCN10_253_wa(dc); @@ -1895,11 +1953,11 @@ static void dcn10_enable_plane(  		dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);  	if (dc->debug.sanity_checks) { -		dcn10_verify_allow_pstate_change_high(dc); +		hws->funcs.verify_allow_pstate_change_high(dc);  	}  } -static void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx) +void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)  {  	int i = 0;  	struct dpp_grph_csc_adjustment adjust; @@ -1947,7 +2005,7 @@ static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint  	matrix[11] = rgb_bias;  } -static void dcn10_program_output_csc(struct dc *dc, +void dcn10_program_output_csc(struct dc *dc,  		struct pipe_ctx *pipe_ctx,  		enum dc_color_space colorspace,  		uint16_t *matrix, @@ -1979,57 +2037,6 @@ static void dcn10_program_output_csc(struct dc *dc,  	}  } -bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx) -{ -	if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible) -		return true; -	if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe)) -		return true; -	return false; -} - -bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx) -{ -	if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible) -		return true; -	if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe)) -		return true; -	return false; -} - -bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx) -{ -	if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible) -		return true; -	if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe)) -		return true; -	if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe)) -		return true; -	return false; -} - -bool is_rgb_cspace(enum dc_color_space output_color_space) -{ -	switch (output_color_space) { -	case COLOR_SPACE_SRGB: -	case COLOR_SPACE_SRGB_LIMITED: -	case COLOR_SPACE_2020_RGB_FULLRANGE: -	case COLOR_SPACE_2020_RGB_LIMITEDRANGE: -	case COLOR_SPACE_ADOBERGB: -		return true; -	case COLOR_SPACE_YCBCR601: -	case COLOR_SPACE_YCBCR709: -	case COLOR_SPACE_YCBCR601_LIMITED: -	case COLOR_SPACE_YCBCR709_LIMITED: -	case COLOR_SPACE_2020_YCBCR: -		return false; -	default: -		/* Add a case to switch */ -		BREAK_TO_DEBUGGER(); -		return false; -	} -} -  void dcn10_get_surface_visual_confirm_color(  		const struct pipe_ctx *pipe_ctx,  		struct tg_color *color) @@ -2103,70 +2110,7 @@ void dcn10_get_hdr_visual_confirm_color(  	}  } -static uint16_t fixed_point_to_int_frac( -	struct fixed31_32 arg, -	uint8_t integer_bits, -	uint8_t fractional_bits) -{ -	int32_t numerator; -	int32_t divisor = 1 << fractional_bits; - -	uint16_t result; - -	uint16_t d = (uint16_t)dc_fixpt_floor( -		dc_fixpt_abs( -			arg)); - -	if (d <= (uint16_t)(1 << integer_bits) - (1 / (uint16_t)divisor)) -		numerator = (uint16_t)dc_fixpt_floor( -			dc_fixpt_mul_int( -				arg, -				divisor)); -	else { -		numerator = dc_fixpt_floor( -			dc_fixpt_sub( -				dc_fixpt_from_int( -					1LL << integer_bits), -				dc_fixpt_recip( -					dc_fixpt_from_int( -						divisor)))); -	} - -	if (numerator >= 0) -		result = (uint16_t)numerator; -	else -		result = (uint16_t)( -		(1 << (integer_bits + fractional_bits + 1)) + numerator); - -	if ((result != 0) && dc_fixpt_lt( -		arg, dc_fixpt_zero)) -		result |= 1 << (integer_bits + fractional_bits); - -	return result; -} - -void dcn10_build_prescale_params(struct  dc_bias_and_scale *bias_and_scale, -		const struct dc_plane_state *plane_state) -{ -	if (plane_state->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN -			&& plane_state->format != SURFACE_PIXEL_FORMAT_INVALID -			&& plane_state->input_csc_color_matrix.enable_adjustment -			&& plane_state->coeff_reduction_factor.value != 0) { -		bias_and_scale->scale_blue = fixed_point_to_int_frac( -			dc_fixpt_mul(plane_state->coeff_reduction_factor, -					dc_fixpt_from_fraction(256, 255)), -				2, -				13); -		bias_and_scale->scale_red = bias_and_scale->scale_blue; -		bias_and_scale->scale_green = bias_and_scale->scale_blue; -	} else { -		bias_and_scale->scale_blue = 0x2000; -		bias_and_scale->scale_red = 0x2000; -		bias_and_scale->scale_green = 0x2000; -	} -} - -static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state) +static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)  {  	struct dc_bias_and_scale bns_params = {0}; @@ -2175,21 +2119,18 @@ static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)  			plane_state->format,  			EXPANSION_MODE_ZERO,  			plane_state->input_csc_color_matrix, -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  			plane_state->color_space,  			NULL); -#else -			plane_state->color_space); -#endif  	//set scale and bias registers -	dcn10_build_prescale_params(&bns_params, plane_state); +	build_prescale_params(&bns_params, plane_state);  	if (dpp->funcs->dpp_program_bias_and_scale)  		dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);  } -static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)  { +	struct dce_hwseq *hws = dc->hwseq;  	struct hubp *hubp = pipe_ctx->plane_res.hubp;  	struct mpcc_blnd_cfg blnd_cfg = {{0}};  	bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe; @@ -2199,10 +2140,10 @@ static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)  	struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);  	if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) { -		dcn10_get_hdr_visual_confirm_color( +		hws->funcs.get_hdr_visual_confirm_color(  				pipe_ctx, &blnd_cfg.black_color);  	} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) { -		dcn10_get_surface_visual_confirm_color( +		hws->funcs.get_surface_visual_confirm_color(  				pipe_ctx, &blnd_cfg.black_color);  	} else {  		color_space_to_black_color( @@ -2284,11 +2225,12 @@ static void update_scaler(struct pipe_ctx *pipe_ctx)  			pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);  } -void update_dchubp_dpp( +static void dcn10_update_dchubp_dpp(  	struct dc *dc,  	struct pipe_ctx *pipe_ctx,  	struct dc_state *context)  { +	struct dce_hwseq *hws = dc->hwseq;  	struct hubp *hubp = pipe_ctx->plane_res.hubp;  	struct dpp *dpp = pipe_ctx->plane_res.dpp;  	struct dc_plane_state *plane_state = pipe_ctx->plane_state; @@ -2342,12 +2284,12 @@ void update_dchubp_dpp(  	if (plane_state->update_flags.bits.full_update ||  		plane_state->update_flags.bits.bpp_change) -		update_dpp(dpp, plane_state); +		dcn10_update_dpp(dpp, plane_state);  	if (plane_state->update_flags.bits.full_update ||  		plane_state->update_flags.bits.per_pixel_alpha_change ||  		plane_state->update_flags.bits.global_alpha_change) -		dc->hwss.update_mpcc(dc, pipe_ctx); +		hws->funcs.update_mpcc(dc, pipe_ctx);  	if (plane_state->update_flags.bits.full_update ||  		plane_state->update_flags.bits.per_pixel_alpha_change || @@ -2407,13 +2349,13 @@ void update_dchubp_dpp(  	hubp->power_gated = false; -	dc->hwss.update_plane_addr(dc, pipe_ctx); +	hws->funcs.update_plane_addr(dc, pipe_ctx);  	if (is_pipe_tree_visible(pipe_ctx))  		hubp->funcs->set_blank(hubp, false);  } -static void dcn10_blank_pixel_data( +void dcn10_blank_pixel_data(  		struct dc *dc,  		struct pipe_ctx *pipe_ctx,  		bool blank) @@ -2456,10 +2398,9 @@ static void dcn10_blank_pixel_data(  	}  } -void set_hdr_multiplier(struct pipe_ctx *pipe_ctx) +void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)  { -	struct fixed31_32 multiplier = dc_fixpt_from_fraction( -			pipe_ctx->plane_state->sdr_white_level, 80); +	struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;  	uint32_t hw_mult = 0x1f000; // 1.0 default multiplier  	struct custom_float_format fmt; @@ -2467,7 +2408,8 @@ void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)  	fmt.mantissa_bits = 12;  	fmt.sign = true; -	if (pipe_ctx->plane_state->sdr_white_level > 80) + +	if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0  		convert_to_custom_float_format(multiplier, &fmt, &hw_mult);  	pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier( @@ -2479,17 +2421,19 @@ void dcn10_program_pipe(  		struct pipe_ctx *pipe_ctx,  		struct dc_state *context)  { +	struct dce_hwseq *hws = dc->hwseq; +  	if (pipe_ctx->plane_state->update_flags.bits.full_update)  		dcn10_enable_plane(dc, pipe_ctx, context); -	update_dchubp_dpp(dc, pipe_ctx, context); +	dcn10_update_dchubp_dpp(dc, pipe_ctx, context); -	set_hdr_multiplier(pipe_ctx); +	hws->funcs.set_hdr_multiplier(pipe_ctx);  	if (pipe_ctx->plane_state->update_flags.bits.full_update ||  			pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||  			pipe_ctx->plane_state->update_flags.bits.gamma_change) -		dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state); +		hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);  	/* dcn10_translate_regamma_to_hw_format takes 750us to finish  	 * only do gamma programming for full update. @@ -2498,14 +2442,16 @@ void dcn10_program_pipe(  	 * doing heavy calculation and programming  	 */  	if (pipe_ctx->plane_state->update_flags.bits.full_update) -		dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream); +		hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);  } -static void program_all_pipe_in_tree( +static void dcn10_program_all_pipe_in_tree(  		struct dc *dc,  		struct pipe_ctx *pipe_ctx,  		struct dc_state *context)  { +	struct dce_hwseq *hws = dc->hwseq; +  	if (pipe_ctx->top_pipe == NULL) {  		bool blank = !is_pipe_tree_visible(pipe_ctx); @@ -2519,20 +2465,20 @@ static void program_all_pipe_in_tree(  		pipe_ctx->stream_res.tg->funcs->set_vtg_params(  				pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); -		if (dc->hwss.setup_vupdate_interrupt) -			dc->hwss.setup_vupdate_interrupt(pipe_ctx); +		if (hws->funcs.setup_vupdate_interrupt) +			hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx); -		dc->hwss.blank_pixel_data(dc, pipe_ctx, blank); +		hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);  	}  	if (pipe_ctx->plane_state != NULL) -		dcn10_program_pipe(dc, pipe_ctx, context); +		hws->funcs.program_pipe(dc, pipe_ctx, context);  	if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx) -		program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context); +		dcn10_program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context);  } -struct pipe_ctx *find_top_pipe_for_stream( +static struct pipe_ctx *dcn10_find_top_pipe_for_stream(  		struct dc *dc,  		struct dc_state *context,  		const struct dc_stream_state *stream) @@ -2556,19 +2502,20 @@ struct pipe_ctx *find_top_pipe_for_stream(  	return NULL;  } -static void dcn10_apply_ctx_for_surface( +void dcn10_apply_ctx_for_surface(  		struct dc *dc,  		const struct dc_stream_state *stream,  		int num_planes,  		struct dc_state *context)  { +	struct dce_hwseq *hws = dc->hwseq;  	int i;  	struct timing_generator *tg;  	uint32_t underflow_check_delay_us;  	bool removed_pipe[4] = { false };  	bool interdependent_update = false;  	struct pipe_ctx *top_pipe_to_program = -			find_top_pipe_for_stream(dc, context, stream); +			dcn10_find_top_pipe_for_stream(dc, context, stream);  	DC_LOGGER_INIT(dc->ctx->logger);  	if (!top_pipe_to_program) @@ -2581,23 +2528,23 @@ static void dcn10_apply_ctx_for_surface(  	underflow_check_delay_us = dc->debug.underflow_assert_delay_us; -	if (underflow_check_delay_us != 0xFFFFFFFF && dc->hwss.did_underflow_occur) -		ASSERT(dc->hwss.did_underflow_occur(dc, top_pipe_to_program)); +	if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur) +		ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program));  	if (interdependent_update) -		lock_all_pipes(dc, context, true); +		dcn10_lock_all_pipes(dc, context, true);  	else  		dcn10_pipe_control_lock(dc, top_pipe_to_program, true);  	if (underflow_check_delay_us != 0xFFFFFFFF)  		udelay(underflow_check_delay_us); -	if (underflow_check_delay_us != 0xFFFFFFFF && dc->hwss.did_underflow_occur) -		ASSERT(dc->hwss.did_underflow_occur(dc, top_pipe_to_program)); +	if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur) +		ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program));  	if (num_planes == 0) {  		/* OTG blank before remove all front end */ -		dc->hwss.blank_pixel_data(dc, top_pipe_to_program, true); +		hws->funcs.blank_pixel_data(dc, top_pipe_to_program, true);  	}  	/* Disconnect unused mpcc */ @@ -2623,7 +2570,7 @@ static void dcn10_apply_ctx_for_surface(  		    old_pipe_ctx->plane_state &&  		    old_pipe_ctx->stream_res.tg == tg) { -			dc->hwss.plane_atomic_disconnect(dc, old_pipe_ctx); +			hws->funcs.plane_atomic_disconnect(dc, old_pipe_ctx);  			removed_pipe[i] = true;  			DC_LOG_DC("Reset mpcc for pipe %d\n", @@ -2632,13 +2579,11 @@ static void dcn10_apply_ctx_for_surface(  	}  	if (num_planes > 0) -		program_all_pipe_in_tree(dc, top_pipe_to_program, context); +		dcn10_program_all_pipe_in_tree(dc, top_pipe_to_program, context); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	/* Program secondary blending tree and writeback pipes */ -	if ((stream->num_wb_info > 0) && (dc->hwss.program_all_writeback_pipes_in_tree)) -		dc->hwss.program_all_writeback_pipes_in_tree(dc, stream, context); -#endif +	if ((stream->num_wb_info > 0) && (hws->funcs.program_all_writeback_pipes_in_tree)) +		hws->funcs.program_all_writeback_pipes_in_tree(dc, stream, context);  	if (interdependent_update)  		for (i = 0; i < dc->res_pool->pipe_count; i++) {  			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; @@ -2654,7 +2599,7 @@ static void dcn10_apply_ctx_for_surface(  		}  	if (interdependent_update) -		lock_all_pipes(dc, context, false); +		dcn10_lock_all_pipes(dc, context, false);  	else  		dcn10_pipe_control_lock(dc, top_pipe_to_program, false); @@ -2691,14 +2636,15 @@ static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *contex  	}  } -static void dcn10_prepare_bandwidth( +void dcn10_prepare_bandwidth(  		struct dc *dc,  		struct dc_state *context)  { +	struct dce_hwseq *hws = dc->hwseq;  	struct hubbub *hubbub = dc->res_pool->hubbub;  	if (dc->debug.sanity_checks) -		dcn10_verify_allow_pstate_change_high(dc); +		hws->funcs.verify_allow_pstate_change_high(dc);  	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {  		if (context->stream_count == 0) @@ -2720,17 +2666,18 @@ static void dcn10_prepare_bandwidth(  		dcn_bw_notify_pplib_of_wm_ranges(dc);  	if (dc->debug.sanity_checks) -		dcn10_verify_allow_pstate_change_high(dc); +		hws->funcs.verify_allow_pstate_change_high(dc);  } -static void dcn10_optimize_bandwidth( +void dcn10_optimize_bandwidth(  		struct dc *dc,  		struct dc_state *context)  { +	struct dce_hwseq *hws = dc->hwseq;  	struct hubbub *hubbub = dc->res_pool->hubbub;  	if (dc->debug.sanity_checks) -		dcn10_verify_allow_pstate_change_high(dc); +		hws->funcs.verify_allow_pstate_change_high(dc);  	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {  		if (context->stream_count == 0) @@ -2752,10 +2699,10 @@ static void dcn10_optimize_bandwidth(  		dcn_bw_notify_pplib_of_wm_ranges(dc);  	if (dc->debug.sanity_checks) -		dcn10_verify_allow_pstate_change_high(dc); +		hws->funcs.verify_allow_pstate_change_high(dc);  } -static void dcn10_set_drr(struct pipe_ctx **pipe_ctx, +void dcn10_set_drr(struct pipe_ctx **pipe_ctx,  		int num_pipes, unsigned int vmin, unsigned int vmax,  		unsigned int vmid, unsigned int vmid_frame_number)  { @@ -2763,6 +2710,8 @@ static void dcn10_set_drr(struct pipe_ctx **pipe_ctx,  	struct drr_params params = {0};  	// DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow  	unsigned int event_triggers = 0x800; +	// Note DRR trigger events are generated regardless of whether num frames met. +	unsigned int num_frames = 2;  	params.vertical_total_max = vmax;  	params.vertical_total_min = vmin; @@ -2779,11 +2728,11 @@ static void dcn10_set_drr(struct pipe_ctx **pipe_ctx,  		if (vmax != 0 && vmin != 0)  			pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(  					pipe_ctx[i]->stream_res.tg, -					event_triggers); +					event_triggers, num_frames);  	}  } -static void dcn10_get_position(struct pipe_ctx **pipe_ctx, +void dcn10_get_position(struct pipe_ctx **pipe_ctx,  		int num_pipes,  		struct crtc_position *position)  { @@ -2795,22 +2744,23 @@ static void dcn10_get_position(struct pipe_ctx **pipe_ctx,  		pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);  } -static void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx, -		int num_pipes, const struct dc_static_screen_events *events) +void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx, +		int num_pipes, const struct dc_static_screen_params *params)  {  	unsigned int i; -	unsigned int value = 0; +	unsigned int triggers = 0; -	if (events->surface_update) -		value |= 0x80; -	if (events->cursor_update) -		value |= 0x2; -	if (events->force_trigger) -		value |= 0x1; +	if (params->triggers.surface_update) +		triggers |= 0x80; +	if (params->triggers.cursor_update) +		triggers |= 0x2; +	if (params->triggers.force_trigger) +		triggers |= 0x1;  	for (i = 0; i < num_pipes; i++)  		pipe_ctx[i]->stream_res.tg->funcs-> -			set_static_screen_control(pipe_ctx[i]->stream_res.tg, value); +			set_static_screen_control(pipe_ctx[i]->stream_res.tg, +					triggers, params->num_frames);  }  static void dcn10_config_stereo_parameters( @@ -2850,7 +2800,7 @@ static void dcn10_config_stereo_parameters(  	return;  } -static void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc) +void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)  {  	struct crtc_stereo_flags flags = { 0 };  	struct dc_stream_state *stream = pipe_ctx->stream; @@ -2889,15 +2839,16 @@ static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_in  	return NULL;  } -static void dcn10_wait_for_mpcc_disconnect( +void dcn10_wait_for_mpcc_disconnect(  		struct dc *dc,  		struct resource_pool *res_pool,  		struct pipe_ctx *pipe_ctx)  { +	struct dce_hwseq *hws = dc->hwseq;  	int mpcc_inst;  	if (dc->debug.sanity_checks) { -		dcn10_verify_allow_pstate_change_high(dc); +		hws->funcs.verify_allow_pstate_change_high(dc);  	}  	if (!pipe_ctx->stream_res.opp) @@ -2914,12 +2865,12 @@ static void dcn10_wait_for_mpcc_disconnect(  	}  	if (dc->debug.sanity_checks) { -		dcn10_verify_allow_pstate_change_high(dc); +		hws->funcs.verify_allow_pstate_change_high(dc);  	}  } -static bool dcn10_dummy_display_power_gating( +bool dcn10_dummy_display_power_gating(  	struct dc *dc,  	uint8_t controller_id,  	struct dc_bios *dcb, @@ -2928,7 +2879,7 @@ static bool dcn10_dummy_display_power_gating(  	return true;  } -static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx) +void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)  {  	struct dc_plane_state *plane_state = pipe_ctx->plane_state;  	struct timing_generator *tg = pipe_ctx->stream_res.tg; @@ -2952,7 +2903,7 @@ static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)  	}  } -static void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data) +void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)  {  	struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub; @@ -2960,7 +2911,7 @@ static void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh  	hubbub->funcs->update_dchub(hubbub, dh_data);  } -static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) +void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)  {  	struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;  	struct hubp *hubp = pipe_ctx->plane_res.hubp; @@ -2974,15 +2925,32 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)  		.rotation = pipe_ctx->plane_state->rotation,  		.mirror = pipe_ctx->plane_state->horizontal_mirror  	}; -	uint32_t x_plane = pipe_ctx->plane_state->dst_rect.x; -	uint32_t y_plane = pipe_ctx->plane_state->dst_rect.y; -	uint32_t x_offset = min(x_plane, pos_cpy.x); -	uint32_t y_offset = min(y_plane, pos_cpy.y); +	bool pipe_split_on = (pipe_ctx->top_pipe != NULL) || +		(pipe_ctx->bottom_pipe != NULL); + +	int x_plane = pipe_ctx->plane_state->dst_rect.x; +	int y_plane = pipe_ctx->plane_state->dst_rect.y; +	int x_pos = pos_cpy.x; +	int y_pos = pos_cpy.y; + +	// translate cursor from stream space to plane space +	x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width / +			pipe_ctx->plane_state->dst_rect.width; +	y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height / +			pipe_ctx->plane_state->dst_rect.height; -	pos_cpy.x -= x_offset; -	pos_cpy.y -= y_offset; -	pos_cpy.x_hotspot += (x_plane - x_offset); -	pos_cpy.y_hotspot += (y_plane - y_offset); +	if (x_pos < 0) { +		pos_cpy.x_hotspot -= x_pos; +		x_pos = 0; +	} + +	if (y_pos < 0) { +		pos_cpy.y_hotspot -= y_pos; +		y_pos = 0; +	} + +	pos_cpy.x = (uint32_t)x_pos; +	pos_cpy.y = (uint32_t)y_pos;  	if (pipe_ctx->plane_state->address.type  			== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) @@ -2991,6 +2959,7 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)  	// Swap axis and mirror horizontally  	if (param.rotation == ROTATION_ANGLE_90) {  		uint32_t temp_x = pos_cpy.x; +  		pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -  				(pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;  		pos_cpy.y = temp_x; @@ -2998,26 +2967,44 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)  	// Swap axis and mirror vertically  	else if (param.rotation == ROTATION_ANGLE_270) {  		uint32_t temp_y = pos_cpy.y; -		if (pos_cpy.x >  pipe_ctx->plane_res.scl_data.viewport.height) { -			pos_cpy.x = pos_cpy.x - pipe_ctx->plane_res.scl_data.viewport.height; -			pos_cpy.y = pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.x; -		} else { -			pos_cpy.y = 2 * pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.x; -		} +		int viewport_height = +			pipe_ctx->plane_res.scl_data.viewport.height; + +		if (pipe_split_on) { +			if (pos_cpy.x > viewport_height) { +				pos_cpy.x = pos_cpy.x - viewport_height; +				pos_cpy.y = viewport_height - pos_cpy.x; +			} else { +				pos_cpy.y = 2 * viewport_height - pos_cpy.x; +			} +		} else +			pos_cpy.y = viewport_height - pos_cpy.x;  		pos_cpy.x = temp_y;  	}  	// Mirror horizontally and vertically  	else if (param.rotation == ROTATION_ANGLE_180) { -		if (pos_cpy.x >= pipe_ctx->plane_res.scl_data.viewport.width + pipe_ctx->plane_res.scl_data.viewport.x) { -			pos_cpy.x = 2 * pipe_ctx->plane_res.scl_data.viewport.width -					- pos_cpy.x + 2 * pipe_ctx->plane_res.scl_data.viewport.x; -		} else { -			uint32_t temp_x = pos_cpy.x; -			pos_cpy.x = 2 * pipe_ctx->plane_res.scl_data.viewport.x - pos_cpy.x; -			if (temp_x >= pipe_ctx->plane_res.scl_data.viewport.x + (int)hubp->curs_attr.width -					|| pos_cpy.x <= (int)hubp->curs_attr.width + pipe_ctx->plane_state->src_rect.x) { -				pos_cpy.x = temp_x + pipe_ctx->plane_res.scl_data.viewport.width; +		int viewport_width = +			pipe_ctx->plane_res.scl_data.viewport.width; +		int viewport_x = +			pipe_ctx->plane_res.scl_data.viewport.x; + +		if (pipe_split_on) { +			if (pos_cpy.x >= viewport_width + viewport_x) { +				pos_cpy.x = 2 * viewport_width +						- pos_cpy.x + 2 * viewport_x; +			} else { +				uint32_t temp_x = pos_cpy.x; + +				pos_cpy.x = 2 * viewport_x - pos_cpy.x; +				if (temp_x >= viewport_x + +					(int)hubp->curs_attr.width || pos_cpy.x +					<= (int)hubp->curs_attr.width + +					pipe_ctx->plane_state->src_rect.x) { +					pos_cpy.x = temp_x + viewport_width; +				}  			} +		} else { +			pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;  		}  		pos_cpy.y = pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;  	} @@ -3026,7 +3013,7 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)  	dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width, hubp->curs_attr.height);  } -static void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx) +void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)  {  	struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes; @@ -3036,7 +3023,7 @@ static void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)  		pipe_ctx->plane_res.dpp, attributes);  } -static void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx) +void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)  {  	uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;  	struct fixed31_32 multiplier; @@ -3063,12 +3050,12 @@ static void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)  			pipe_ctx->plane_res.dpp, &opt_attr);  } -/** -* apply_front_porch_workaround  TODO FPGA still need? -* -* This is a workaround for a bug that has existed since R5xx and has not been -* fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive. -*/ +/* + * apply_front_porch_workaround  TODO FPGA still need? + * + * This is a workaround for a bug that has existed since R5xx and has not been + * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive. + */  static void apply_front_porch_workaround(  	struct dc_crtc_timing *timing)  { @@ -3081,7 +3068,7 @@ static void apply_front_porch_workaround(  	}  } -int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx) +int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)  {  	const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;  	struct dc_crtc_timing patched_crtc_timing; @@ -3110,34 +3097,8 @@ int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)  	return vertical_line_start;  } -void lock_all_pipes(struct dc *dc, -	struct dc_state *context, -	bool lock) -{ -	struct pipe_ctx *pipe_ctx; -	struct timing_generator *tg; -	int i; - -	for (i = 0; i < dc->res_pool->pipe_count; i++) { -		pipe_ctx = &context->res_ctx.pipe_ctx[i]; -		tg = pipe_ctx->stream_res.tg; -		/* -		 * Only lock the top pipe's tg to prevent redundant -		 * (un)locking. Also skip if pipe is disabled. -		 */ -		if (pipe_ctx->top_pipe || -		    !pipe_ctx->stream || !pipe_ctx->plane_state || -		    !tg->funcs->is_tg_enabled(tg)) -			continue; - -		if (lock) -			tg->funcs->lock(tg); -		else -			tg->funcs->unlock(tg); -	} -} - -static void calc_vupdate_position( +static void dcn10_calc_vupdate_position( +		struct dc *dc,  		struct pipe_ctx *pipe_ctx,  		uint32_t *start_line,  		uint32_t *end_line) @@ -3145,7 +3106,7 @@ static void calc_vupdate_position(  	const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;  	int vline_int_offset_from_vupdate =  			pipe_ctx->stream->periodic_interrupt0.lines_offset; -	int vupdate_offset_from_vsync = get_vupdate_offset_from_vsync(pipe_ctx); +	int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);  	int start_position;  	if (vline_int_offset_from_vupdate > 0) @@ -3166,7 +3127,8 @@ static void calc_vupdate_position(  		*end_line = 2;  } -static void cal_vline_position( +static void dcn10_cal_vline_position( +		struct dc *dc,  		struct pipe_ctx *pipe_ctx,  		enum vline_select vline,  		uint32_t *start_line, @@ -3181,7 +3143,8 @@ static void cal_vline_position(  	switch (ref_point) {  	case START_V_UPDATE: -		calc_vupdate_position( +		dcn10_calc_vupdate_position( +				dc,  				pipe_ctx,  				start_line,  				end_line); @@ -3195,7 +3158,8 @@ static void cal_vline_position(  	}  } -static void dcn10_setup_periodic_interrupt( +void dcn10_setup_periodic_interrupt( +		struct dc *dc,  		struct pipe_ctx *pipe_ctx,  		enum vline_select vline)  { @@ -3205,7 +3169,7 @@ static void dcn10_setup_periodic_interrupt(  		uint32_t start_line = 0;  		uint32_t end_line = 0; -		cal_vline_position(pipe_ctx, vline, &start_line, &end_line); +		dcn10_cal_vline_position(dc, pipe_ctx, vline, &start_line, &end_line);  		tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line); @@ -3216,10 +3180,10 @@ static void dcn10_setup_periodic_interrupt(  	}  } -static void dcn10_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx) +void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)  {  	struct timing_generator *tg = pipe_ctx->stream_res.tg; -	int start_line = get_vupdate_offset_from_vsync(pipe_ctx); +	int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);  	if (start_line < 0) {  		ASSERT(0); @@ -3230,12 +3194,13 @@ static void dcn10_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx)  		tg->funcs->setup_vertical_interrupt2(tg, start_line);  } -static void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx, +void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,  		struct dc_link_settings *link_settings)  {  	struct encoder_unblank_param params = { { 0 } };  	struct dc_stream_state *stream = pipe_ctx->stream;  	struct dc_link *link = stream->link; +	struct dce_hwseq *hws = link->dc->hwseq;  	/* only 3 items below are used by unblank */  	params.timing = pipe_ctx->stream->timing; @@ -3249,11 +3214,11 @@ static void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,  	}  	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { -		link->dc->hwss.edp_backlight_control(link, true); +		hws->funcs.edp_backlight_control(link, true);  	}  } -static void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx, +void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,  				const uint8_t *custom_sdp_message,  				unsigned int sdp_message_size)  { @@ -3264,7 +3229,7 @@ static void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,  				sdp_message_size);  	}  } -static enum dc_status dcn10_set_clock(struct dc *dc, +enum dc_status dcn10_set_clock(struct dc *dc,  			enum dc_clock_type clock_type,  			uint32_t clk_khz,  			uint32_t stepping) @@ -3304,7 +3269,7 @@ static enum dc_status dcn10_set_clock(struct dc *dc,  } -static void dcn10_get_clock(struct dc *dc, +void dcn10_get_clock(struct dc *dc,  			enum dc_clock_type clock_type,  			struct dc_clock_config *clock_cfg)  { @@ -3314,77 +3279,3 @@ static void dcn10_get_clock(struct dc *dc,  				dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);  } - -static const struct hw_sequencer_funcs dcn10_funcs = { -	.program_gamut_remap = dcn10_program_gamut_remap, -	.init_hw = dcn10_init_hw, -	.init_pipes = dcn10_init_pipes, -	.apply_ctx_to_hw = dce110_apply_ctx_to_hw, -	.apply_ctx_for_surface = dcn10_apply_ctx_for_surface, -	.update_plane_addr = dcn10_update_plane_addr, -	.plane_atomic_disconnect = hwss1_plane_atomic_disconnect, -	.update_dchub = dcn10_update_dchub, -	.update_mpcc = dcn10_update_mpcc, -	.update_pending_status = dcn10_update_pending_status, -	.set_input_transfer_func = dcn10_set_input_transfer_func, -	.set_output_transfer_func = dcn10_set_output_transfer_func, -	.program_output_csc = dcn10_program_output_csc, -	.power_down = dce110_power_down, -	.enable_accelerated_mode = dce110_enable_accelerated_mode, -	.enable_timing_synchronization = dcn10_enable_timing_synchronization, -	.enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, -	.update_info_frame = dce110_update_info_frame, -	.send_immediate_sdp_message = dcn10_send_immediate_sdp_message, -	.enable_stream = dce110_enable_stream, -	.disable_stream = dce110_disable_stream, -	.unblank_stream = dcn10_unblank_stream, -	.blank_stream = dce110_blank_stream, -	.enable_audio_stream = dce110_enable_audio_stream, -	.disable_audio_stream = dce110_disable_audio_stream, -	.enable_display_power_gating = dcn10_dummy_display_power_gating, -	.disable_plane = dcn10_disable_plane, -	.blank_pixel_data = dcn10_blank_pixel_data, -	.pipe_control_lock = dcn10_pipe_control_lock, -	.prepare_bandwidth = dcn10_prepare_bandwidth, -	.optimize_bandwidth = dcn10_optimize_bandwidth, -	.reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap, -	.enable_stream_timing = dcn10_enable_stream_timing, -	.set_drr = dcn10_set_drr, -	.get_position = dcn10_get_position, -	.set_static_screen_control = dcn10_set_static_screen_control, -	.setup_stereo = dcn10_setup_stereo, -	.set_avmute = dce110_set_avmute, -	.log_hw_state = dcn10_log_hw_state, -	.get_hw_state = dcn10_get_hw_state, -	.clear_status_bits = dcn10_clear_status_bits, -	.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, -	.edp_backlight_control = dce110_edp_backlight_control, -	.edp_power_control = dce110_edp_power_control, -	.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, -	.set_cursor_position = dcn10_set_cursor_position, -	.set_cursor_attribute = dcn10_set_cursor_attribute, -	.set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, -	.disable_stream_gating = NULL, -	.enable_stream_gating = NULL, -	.setup_periodic_interrupt = dcn10_setup_periodic_interrupt, -	.setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt, -	.set_clock = dcn10_set_clock, -	.get_clock = dcn10_get_clock, -	.did_underflow_occur = dcn10_did_underflow_occur, -	.init_blank = NULL, -	.disable_vga = dcn10_disable_vga, -	.bios_golden_init = dcn10_bios_golden_init, -	.plane_atomic_disable = dcn10_plane_atomic_disable, -	.plane_atomic_power_down = dcn10_plane_atomic_power_down, -	.enable_power_gating_plane = dcn10_enable_power_gating_plane, -	.dpp_pg_control = dcn10_dpp_pg_control, -	.hubp_pg_control = dcn10_hubp_pg_control, -	.dsc_pg_control = NULL, -}; - - -void dcn10_hw_sequencer_construct(struct dc *dc) -{ -	dc->hwss = dcn10_funcs; -} - diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h index d3616b1948cc..4d20f6586bb5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h @@ -27,68 +27,160 @@  #define __DC_HWSS_DCN10_H__  #include "core_types.h" +#include "hw_sequencer_private.h"  struct dc;  void dcn10_hw_sequencer_construct(struct dc *dc); -extern void fill_display_configs( -	const struct dc_state *context, -	struct dm_pp_display_configuration *pp_display_cfg); - -bool is_rgb_cspace(enum dc_color_space output_color_space); - -void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx); - -void dcn10_verify_allow_pstate_change_high(struct dc *dc); +int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx); +void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx); +enum dc_status dcn10_enable_stream_timing( +		struct pipe_ctx *pipe_ctx, +		struct dc_state *context, +		struct dc *dc); +void dcn10_optimize_bandwidth( +		struct dc *dc, +		struct dc_state *context); +void dcn10_prepare_bandwidth( +		struct dc *dc, +		struct dc_state *context); +void dcn10_pipe_control_lock( +	struct dc *dc, +	struct pipe_ctx *pipe, +	bool lock); +void dcn10_blank_pixel_data( +		struct dc *dc, +		struct pipe_ctx *pipe_ctx, +		bool blank); +void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx, +		struct dc_link_settings *link_settings); +void dcn10_program_output_csc(struct dc *dc, +		struct pipe_ctx *pipe_ctx, +		enum dc_color_space colorspace, +		uint16_t *matrix, +		int opp_id); +bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, +				const struct dc_stream_state *stream); +bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, +			const struct dc_plane_state *plane_state); +void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn10_reset_hw_ctx_wrap( +		struct dc *dc, +		struct dc_state *context); +void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn10_apply_ctx_for_surface( +		struct dc *dc, +		const struct dc_stream_state *stream, +		int num_planes, +		struct dc_state *context); +void dcn10_hubp_pg_control( +		struct dce_hwseq *hws, +		unsigned int hubp_inst, +		bool power_on); +void dcn10_dpp_pg_control( +		struct dce_hwseq *hws, +		unsigned int dpp_inst, +		bool power_on); +void dcn10_enable_power_gating_plane( +	struct dce_hwseq *hws, +	bool enable); +void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn10_disable_vga( +	struct dce_hwseq *hws);  void dcn10_program_pipe(  		struct dc *dc,  		struct pipe_ctx *pipe_ctx,  		struct dc_state *context); - -void dcn10_get_hw_state( +void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx); +void dcn10_init_hw(struct dc *dc); +void dcn10_init_pipes(struct dc *dc, struct dc_state *context); +enum dc_status dce110_apply_ctx_to_hw( +		struct dc *dc, +		struct dc_state *context); +void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data); +void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx); +void dce110_power_down(struct dc *dc); +void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context); +void dcn10_enable_timing_synchronization( +		struct dc *dc, +		int group_index, +		int group_size, +		struct pipe_ctx *grouped_pipes[]); +void dcn10_enable_per_frame_crtc_position_reset( +		struct dc *dc, +		int group_size, +		struct pipe_ctx *grouped_pipes[]); +void dce110_update_info_frame(struct pipe_ctx *pipe_ctx); +void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx, +		const uint8_t *custom_sdp_message, +		unsigned int sdp_message_size); +void dce110_blank_stream(struct pipe_ctx *pipe_ctx); +void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx); +void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx); +bool dcn10_dummy_display_power_gating(  		struct dc *dc, -		char *pBuf, unsigned int bufSize, +		uint8_t controller_id, +		struct dc_bios *dcb, +		enum pipe_gating_control power_gating); +void dcn10_set_drr(struct pipe_ctx **pipe_ctx, +		int num_pipes, unsigned int vmin, unsigned int vmax, +		unsigned int vmid, unsigned int vmid_frame_number); +void dcn10_get_position(struct pipe_ctx **pipe_ctx, +		int num_pipes, +		struct crtc_position *position); +void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx, +		int num_pipes, const struct dc_static_screen_params *params); +void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc); +void dce110_set_avmute(struct pipe_ctx *pipe_ctx, bool enable); +void dcn10_log_hw_state(struct dc *dc, +		struct dc_log_buffer_ctx *log_ctx); +void dcn10_get_hw_state(struct dc *dc, +		char *pBuf, +		unsigned int bufSize,  		unsigned int mask); -  void dcn10_clear_status_bits(struct dc *dc, unsigned int mask); - -bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx); - -bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx); - -bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx); - -void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp); - -void set_hdr_multiplier(struct pipe_ctx *pipe_ctx); - +void dcn10_wait_for_mpcc_disconnect( +		struct dc *dc, +		struct resource_pool *res_pool, +		struct pipe_ctx *pipe_ctx); +void dce110_edp_backlight_control( +		struct dc_link *link, +		bool enable); +void dce110_edp_power_control( +		struct dc_link *link, +		bool power_up); +void dce110_edp_wait_for_hpd_ready( +		struct dc_link *link, +		bool power_up); +void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx); +void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx); +void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx); +void dcn10_setup_periodic_interrupt( +		struct dc *dc, +		struct pipe_ctx *pipe_ctx, +		enum vline_select vline); +enum dc_status dcn10_set_clock(struct dc *dc, +		enum dc_clock_type clock_type, +		uint32_t clk_khz, +		uint32_t stepping); +void dcn10_get_clock(struct dc *dc, +		enum dc_clock_type clock_type, +		struct dc_clock_config *clock_cfg); +bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn10_bios_golden_init(struct dc *dc); +void dcn10_plane_atomic_power_down(struct dc *dc, +		struct dpp *dpp, +		struct hubp *hubp);  void dcn10_get_surface_visual_confirm_color(  		const struct pipe_ctx *pipe_ctx,  		struct tg_color *color); -  void dcn10_get_hdr_visual_confirm_color(  		struct pipe_ctx *pipe_ctx,  		struct tg_color *color); - -bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx); - -void update_dchubp_dpp( -	struct dc *dc, -	struct pipe_ctx *pipe_ctx, -	struct dc_state *context); - -struct pipe_ctx *find_top_pipe_for_stream( -		struct dc *dc, -		struct dc_state *context, -		const struct dc_stream_state *stream); - -int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx); - -void dcn10_build_prescale_params(struct  dc_bias_and_scale *bias_and_scale, -		const struct dc_plane_state *plane_state); -void lock_all_pipes(struct dc *dc, -	struct dc_state *context, -	bool lock); +void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx); +void dcn10_verify_allow_pstate_change_high(struct dc *dc);  #endif /* __DC_HWSS_DCN10_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.h new file mode 100644 index 000000000000..596f95c22e85 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.h @@ -0,0 +1,43 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_HWSS_DCN10_DEBUG_H__ +#define __DC_HWSS_DCN10_DEBUG_H__ + +#include "core_types.h" + +struct dc; + +void dcn10_clear_status_bits(struct dc *dc, unsigned int mask); + +void dcn10_log_hw_state(struct dc *dc, +		struct dc_log_buffer_ctx *log_ctx); + +void dcn10_get_hw_state(struct dc *dc, +		char *pBuf, +		unsigned int bufSize, +		unsigned int mask); + +#endif /* __DC_HWSS_DCN10_DEBUG_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c new file mode 100644 index 000000000000..e7e5352ec424 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c @@ -0,0 +1,111 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "hw_sequencer_private.h" +#include "dce110/dce110_hw_sequencer.h" +#include "dcn10_hw_sequencer.h" + +static const struct hw_sequencer_funcs dcn10_funcs = { +	.program_gamut_remap = dcn10_program_gamut_remap, +	.init_hw = dcn10_init_hw, +	.apply_ctx_to_hw = dce110_apply_ctx_to_hw, +	.apply_ctx_for_surface = dcn10_apply_ctx_for_surface, +	.update_plane_addr = dcn10_update_plane_addr, +	.update_dchub = dcn10_update_dchub, +	.update_pending_status = dcn10_update_pending_status, +	.program_output_csc = dcn10_program_output_csc, +	.enable_accelerated_mode = dce110_enable_accelerated_mode, +	.enable_timing_synchronization = dcn10_enable_timing_synchronization, +	.enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, +	.update_info_frame = dce110_update_info_frame, +	.send_immediate_sdp_message = dcn10_send_immediate_sdp_message, +	.enable_stream = dce110_enable_stream, +	.disable_stream = dce110_disable_stream, +	.unblank_stream = dcn10_unblank_stream, +	.blank_stream = dce110_blank_stream, +	.enable_audio_stream = dce110_enable_audio_stream, +	.disable_audio_stream = dce110_disable_audio_stream, +	.disable_plane = dcn10_disable_plane, +	.pipe_control_lock = dcn10_pipe_control_lock, +	.prepare_bandwidth = dcn10_prepare_bandwidth, +	.optimize_bandwidth = dcn10_optimize_bandwidth, +	.set_drr = dcn10_set_drr, +	.get_position = dcn10_get_position, +	.set_static_screen_control = dcn10_set_static_screen_control, +	.setup_stereo = dcn10_setup_stereo, +	.set_avmute = dce110_set_avmute, +	.log_hw_state = dcn10_log_hw_state, +	.get_hw_state = dcn10_get_hw_state, +	.clear_status_bits = dcn10_clear_status_bits, +	.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, +	.edp_power_control = dce110_edp_power_control, +	.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, +	.set_cursor_position = dcn10_set_cursor_position, +	.set_cursor_attribute = dcn10_set_cursor_attribute, +	.set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, +	.setup_periodic_interrupt = dcn10_setup_periodic_interrupt, +	.set_clock = dcn10_set_clock, +	.get_clock = dcn10_get_clock, +	.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, +}; + +static const struct hwseq_private_funcs dcn10_private_funcs = { +	.init_pipes = dcn10_init_pipes, +	.update_plane_addr = dcn10_update_plane_addr, +	.plane_atomic_disconnect = dcn10_plane_atomic_disconnect, +	.program_pipe = dcn10_program_pipe, +	.update_mpcc = dcn10_update_mpcc, +	.set_input_transfer_func = dcn10_set_input_transfer_func, +	.set_output_transfer_func = dcn10_set_output_transfer_func, +	.power_down = dce110_power_down, +	.enable_display_power_gating = dcn10_dummy_display_power_gating, +	.blank_pixel_data = dcn10_blank_pixel_data, +	.reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap, +	.enable_stream_timing = dcn10_enable_stream_timing, +	.edp_backlight_control = dce110_edp_backlight_control, +	.disable_stream_gating = NULL, +	.enable_stream_gating = NULL, +	.setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt, +	.did_underflow_occur = dcn10_did_underflow_occur, +	.init_blank = NULL, +	.disable_vga = dcn10_disable_vga, +	.bios_golden_init = dcn10_bios_golden_init, +	.plane_atomic_disable = dcn10_plane_atomic_disable, +	.plane_atomic_power_down = dcn10_plane_atomic_power_down, +	.enable_power_gating_plane = dcn10_enable_power_gating_plane, +	.dpp_pg_control = dcn10_dpp_pg_control, +	.hubp_pg_control = dcn10_hubp_pg_control, +	.dsc_pg_control = NULL, +	.get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color, +	.get_hdr_visual_confirm_color = dcn10_get_hdr_visual_confirm_color, +	.set_hdr_multiplier = dcn10_set_hdr_multiplier, +	.verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, +}; + +void dcn10_hw_sequencer_construct(struct dc *dc) +{ +	dc->hwss = dcn10_funcs; +	dc->hwseq->funcs = dcn10_private_funcs; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h new file mode 100644 index 000000000000..8c6fd7b844a4 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h @@ -0,0 +1,33 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_DCN10_INIT_H__ +#define __DC_DCN10_INIT_H__ + +struct dc; + +void dcn10_hw_sequencer_construct(struct dc *dc); + +#endif /* __DC_DCN10_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c index 0fb9e440cb9d..f05371c1fc36 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c @@ -53,11 +53,9 @@ static const struct ipp_funcs dcn10_ipp_funcs = {  	.ipp_destroy			= dcn10_ipp_destroy  }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  static const struct ipp_funcs dcn20_ipp_funcs = {  	.ipp_destroy			= dcn10_ipp_destroy  }; -#endif  void dcn10_ipp_construct(  	struct dcn10_ipp *ippn10, @@ -76,7 +74,6 @@ void dcn10_ipp_construct(  	ippn10->ipp_mask = ipp_mask;  } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  void dcn20_ipp_construct(  	struct dcn10_ipp *ippn10,  	struct dc_context *ctx, @@ -93,4 +90,3 @@ void dcn20_ipp_construct(  	ippn10->ipp_shift = ipp_shift;  	ippn10->ipp_mask = ipp_mask;  } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h index cfa24459242b..f0e0d07b0311 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h @@ -49,7 +49,6 @@  	SRI(CURSOR_HOT_SPOT, CURSOR, id), \  	SRI(CURSOR_DST_OFFSET, CURSOR, id) -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  #define IPP_REG_LIST_DCN20(id) \  	IPP_REG_LIST_DCN(id), \  	SRI(CURSOR_SETTINGS, HUBPREQ, id), \ @@ -60,7 +59,6 @@  	SRI(CURSOR_POSITION, CURSOR0_, id), \  	SRI(CURSOR_HOT_SPOT, CURSOR0_, id), \  	SRI(CURSOR_DST_OFFSET, CURSOR0_, id) -#endif  #define CURSOR0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT	0x4  #define CURSOR0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK		0x00000010L @@ -105,7 +103,6 @@  	IPP_SF(CURSOR0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh), \  	IPP_SF(CNVC_CFG0_FORMAT_CONTROL, OUTPUT_FP, mask_sh) -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  #define IPP_MASK_SH_LIST_DCN20(mask_sh) \  	IPP_MASK_SH_LIST_DCN(mask_sh), \  	IPP_SF(HUBPREQ0_CURSOR_SETTINGS, CURSOR0_DST_Y_OFFSET, mask_sh), \ @@ -124,7 +121,6 @@  	IPP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \  	IPP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \  	IPP_SF(CURSOR0_0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh) -#endif  #define IPP_DCN10_REG_FIELD_LIST(type) \  	type CNVC_SURFACE_PIXEL_FORMAT; \ @@ -196,13 +192,11 @@ void dcn10_ipp_construct(struct dcn10_ipp *ippn10,  	const struct dcn10_ipp_shift *ipp_shift,  	const struct dcn10_ipp_mask *ipp_mask); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  void dcn20_ipp_construct(struct dcn10_ipp *ippn10,  	struct dc_context *ctx,  	int inst,  	const struct dcn10_ipp_registers *regs,  	const struct dcn10_ipp_shift *ipp_shift,  	const struct dcn10_ipp_mask *ipp_mask); -#endif  #endif /* _DCN10_IPP_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h index 88fcc395adf5..eb13589b9a81 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h @@ -72,9 +72,7 @@  struct dcn10_link_enc_aux_registers {  	uint32_t AUX_CONTROL;  	uint32_t AUX_DPHY_RX_CONTROL0; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  	uint32_t AUX_DPHY_TX_CONTROL; -#endif  };  struct dcn10_link_enc_hpd_registers { @@ -106,7 +104,6 @@ struct dcn10_link_enc_registers {  	uint32_t DP_DPHY_HBR2_PATTERN_CONTROL;  	uint32_t DP_SEC_CNTL1;  	uint32_t TMDS_CTL_BITS; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	/* DCCG  */  	uint32_t CLOCK_ENABLE;  	/* DIG */ @@ -127,6 +124,26 @@ struct dcn10_link_enc_registers {  	uint32_t RDPCSTX_PHY_CNTL13;  	uint32_t RDPCSTX_PHY_CNTL14;  	uint32_t RDPCSTX_PHY_CNTL15; +	uint32_t RDPCSTX_CNTL; +	uint32_t RDPCSTX_CLOCK_CNTL; +	uint32_t RDPCSTX_PHY_CNTL0; +	uint32_t RDPCSTX_PHY_CNTL2; +	uint32_t RDPCSTX_PLL_UPDATE_DATA; +	uint32_t RDPCS_TX_CR_ADDR; +	uint32_t RDPCS_TX_CR_DATA; +	uint32_t DPCSTX_TX_CLOCK_CNTL; +	uint32_t DPCSTX_TX_CNTL; +	uint32_t RDPCSTX_INTERRUPT_CONTROL; +	uint32_t RDPCSTX_PHY_FUSE0; +	uint32_t RDPCSTX_PHY_FUSE1; +	uint32_t RDPCSTX_PHY_FUSE2; +	uint32_t RDPCSTX_PHY_FUSE3; +	uint32_t RDPCSTX_PHY_RX_LD_VAL; +	uint32_t DPCSTX_DEBUG_CONFIG; +	uint32_t RDPCSTX_DEBUG_CONFIG; +	uint32_t RDPCSTX0_RDPCSTX_SCRATCH; +	uint32_t RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG; +	uint32_t DCIO_SOFT_RESET;  	/* indirect registers */  	uint32_t RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2;  	uint32_t RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3; @@ -136,7 +153,6 @@ struct dcn10_link_enc_registers {  	uint32_t RAWLANE2_DIG_PCS_XF_RX_OVRD_IN_3;  	uint32_t RAWLANE3_DIG_PCS_XF_RX_OVRD_IN_2;  	uint32_t RAWLANE3_DIG_PCS_XF_RX_OVRD_IN_3; -#endif  };  #define LE_SF(reg_name, field_name, post_fix)\ @@ -242,7 +258,6 @@ struct dcn10_link_enc_registers {  	type AUX_LS_READ_EN;\  	type AUX_RX_RECEIVE_WINDOW -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  #define DCN20_LINK_ENCODER_DPCS_REG_FIELD_LIST(type) \  		type RDPCS_PHY_DP_TX0_DATA_EN;\ @@ -423,20 +438,15 @@ struct dcn10_link_enc_registers {  	type AUX_TX_PRECHARGE_SYMBOLS; \  	type AUX_MODE_DET_CHECK_DELAY;\  	type DPCS_DBG_CBUS_DIS -#endif  struct dcn10_link_enc_shift {  	DCN_LINK_ENCODER_REG_FIELD_LIST(uint8_t); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	DCN20_LINK_ENCODER_REG_FIELD_LIST(uint8_t); -#endif  };  struct dcn10_link_enc_mask {  	DCN_LINK_ENCODER_REG_FIELD_LIST(uint32_t); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	DCN20_LINK_ENCODER_REG_FIELD_LIST(uint32_t); -#endif  };  struct dcn10_link_encoder { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c index 8b2f29f6dabd..04f863499cfb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c @@ -42,20 +42,27 @@ void mpc1_set_bg_color(struct mpc *mpc,  		int mpcc_id)  {  	struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); +	struct mpcc *bottommost_mpcc = mpc1_get_mpcc(mpc, mpcc_id); +	uint32_t bg_r_cr, bg_g_y, bg_b_cb; + +	/* find bottommost mpcc. */ +	while (bottommost_mpcc->mpcc_bot) { +		bottommost_mpcc = bottommost_mpcc->mpcc_bot; +	}  	/* mpc color is 12 bit.  tg_color is 10 bit */  	/* todo: might want to use 16 bit to represent color and have each  	 * hw block translate to correct color depth.  	 */ -	uint32_t bg_r_cr = bg_color->color_r_cr << 2; -	uint32_t bg_g_y = bg_color->color_g_y << 2; -	uint32_t bg_b_cb = bg_color->color_b_cb << 2; +	bg_r_cr = bg_color->color_r_cr << 2; +	bg_g_y = bg_color->color_g_y << 2; +	bg_b_cb = bg_color->color_b_cb << 2; -	REG_SET(MPCC_BG_R_CR[mpcc_id], 0, +	REG_SET(MPCC_BG_R_CR[bottommost_mpcc->mpcc_id], 0,  			MPCC_BG_R_CR, bg_r_cr); -	REG_SET(MPCC_BG_G_Y[mpcc_id], 0, +	REG_SET(MPCC_BG_G_Y[bottommost_mpcc->mpcc_id], 0,  			MPCC_BG_G_Y, bg_g_y); -	REG_SET(MPCC_BG_B_CB[mpcc_id], 0, +	REG_SET(MPCC_BG_B_CB[bottommost_mpcc->mpcc_id], 0,  			MPCC_BG_B_CB, bg_b_cb);  } @@ -457,12 +464,10 @@ static const struct mpc_funcs dcn10_mpc_funcs = {  	.assert_mpcc_idle_before_connect = mpc1_assert_mpcc_idle_before_connect,  	.init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw,  	.update_blending = mpc1_update_blending, -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	.set_denorm = NULL,  	.set_denorm_clamp = NULL,  	.set_output_csc = NULL,  	.set_output_gamma = NULL, -#endif  };  void dcn10_mpc_construct(struct dcn10_mpc *mpc10, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c index 0a9ad692f541..d79718fde5a6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c @@ -373,11 +373,9 @@ void opp1_program_oppbuf(  	 */  	REG_UPDATE(OPPBUF_CONTROL, OPPBUF_PIXEL_REPETITION, oppbuf->pixel_repetition); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	/* Controls the number of padded pixels at the end of a segment */  	if (REG(OPPBUF_CONTROL1))  		REG_UPDATE(OPPBUF_CONTROL1, OPPBUF_NUM_SEGMENT_PADDED_PIXELS, oppbuf->num_segment_padded_pixels); -#endif  }  void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable) @@ -404,9 +402,8 @@ static const struct opp_funcs dcn10_opp_funcs = {  		.opp_program_bit_depth_reduction = opp1_program_bit_depth_reduction,  		.opp_program_stereo = opp1_program_stereo,  		.opp_pipe_clock_control = opp1_pipe_clock_control, -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  		.opp_set_disp_pattern_generator = NULL, -#endif +		.dpg_is_blanked = NULL,  		.opp_destroy = opp1_destroy  }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index dabccbd49ad4..a9a43b397db9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c @@ -457,11 +457,16 @@ static bool optc1_enable_crtc(struct timing_generator *optc)  	REG_UPDATE(CONTROL,  			VTG0_ENABLE, 1); +	REG_SEQ_START(); +  	/* Enable CRTC */  	REG_UPDATE_2(OTG_CONTROL,  			OTG_DISABLE_POINT_CNTL, 3,  			OTG_MASTER_EN, 1); +	REG_SEQ_SUBMIT(); +	REG_SEQ_WAIT_DONE(); +  	return true;  } @@ -784,21 +789,26 @@ void optc1_set_early_control(  void optc1_set_static_screen_control(  	struct timing_generator *optc, -	uint32_t value) +	uint32_t event_triggers, +	uint32_t num_frames)  {  	struct optc *optc1 = DCN10TG_FROM_TG(optc); +	// By register spec, it only takes 8 bit value +	if (num_frames > 0xFF) +		num_frames = 0xFF; +  	/* Bit 8 is no longer applicable in RV for PSR case,  	 * set bit 8 to 0 if given  	 */ -	if ((value & STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN) +	if ((event_triggers & STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN)  			!= 0) -		value = value & +		event_triggers = event_triggers &  		~STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN;  	REG_SET_2(OTG_STATIC_SCREEN_CONTROL, 0, -			OTG_STATIC_SCREEN_EVENT_MASK, value, -			OTG_STATIC_SCREEN_FRAME_COUNT, 2); +			OTG_STATIC_SCREEN_EVENT_MASK, event_triggers, +			OTG_STATIC_SCREEN_FRAME_COUNT, num_frames);  }  void optc1_setup_manual_trigger(struct timing_generator *optc) @@ -1497,7 +1507,6 @@ void dcn10_timing_generator_init(struct optc *optc1)  	optc1->min_v_sync_width = 1;  } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  /* "Containter" vs. "pixel" is a concept within HW blocks, mostly those closer to the back-end. It works like this:   *   * - In most of the formats (RGB or YCbCr 4:4:4, 4:2:2 uncompressed and DSC 4:2:2 Simple) pixel rate is the same as @@ -1510,15 +1519,12 @@ void dcn10_timing_generator_init(struct optc *optc1)   *   to it) and has to be treated the same as 4:2:0, i.e. target containter rate has to be halved in this case as well.   *   */ -#endif  bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing)  {  	bool two_pix = timing->pixel_encoding == PIXEL_ENCODING_YCBCR420; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	two_pix = two_pix || (timing->flags.DSC && timing->pixel_encoding == PIXEL_ENCODING_YCBCR422  			&& !timing->dsc_cfg.ycbcr422_simple); -#endif  	return two_pix;  } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h index c8d795b335ba..f277656d5464 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h @@ -165,13 +165,11 @@ struct dcn_optc_registers {  	uint32_t OTG_CRC0_WINDOWB_X_CONTROL;  	uint32_t OTG_CRC0_WINDOWB_Y_CONTROL;  	uint32_t GSL_SOURCE_SELECT; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  	uint32_t DWB_SOURCE_SELECT;  	uint32_t OTG_DSC_START_POSITION;  	uint32_t OPTC_DATA_FORMAT_CONTROL;  	uint32_t OPTC_BYTES_PER_PIXEL;  	uint32_t OPTC_WIDTH_CONTROL; -#endif  };  #define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\ @@ -456,7 +454,6 @@ struct dcn_optc_registers {  	type MANUAL_FLOW_CONTROL;\  	type MANUAL_FLOW_CONTROL_SEL; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  #define TG_REG_FIELD_LIST(type) \  	TG_REG_FIELD_LIST_DCN1_0(type)\ @@ -479,12 +476,6 @@ struct dcn_optc_registers {  	type OPTC_DWB0_SOURCE_SELECT;\  	type OPTC_DWB1_SOURCE_SELECT; -#else - -#define TG_REG_FIELD_LIST(type) \ -	TG_REG_FIELD_LIST_DCN1_0(type) - -#endif  struct dcn_optc_shift { @@ -542,6 +533,7 @@ struct dcn_otg_state {  	uint32_t h_total;  	uint32_t underflow_occurred_status;  	uint32_t otg_enabled; +	uint32_t blank_enabled;  };  void optc1_read_otg_state(struct optc *optc1, @@ -633,7 +625,8 @@ void optc1_set_drr(  void optc1_set_static_screen_control(  	struct timing_generator *optc, -	uint32_t value); +	uint32_t event_triggers, +	uint32_t num_frames);  void optc1_program_stereo(struct timing_generator *optc,  	const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 15640aedd664..3b71898e859e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -28,6 +28,8 @@  #include "dm_services.h"  #include "dc.h" +#include "dcn10_init.h" +  #include "resource.h"  #include "include/irq_service_interface.h"  #include "dcn10_resource.h" @@ -919,7 +921,7 @@ static struct pp_smu_funcs *dcn10_pp_smu_create(struct dc_context *ctx)  	return pp_smu;  } -static void destruct(struct dcn10_resource_pool *pool) +static void dcn10_resource_destruct(struct dcn10_resource_pool *pool)  {  	unsigned int i; @@ -1166,7 +1168,7 @@ static void dcn10_destroy_resource_pool(struct resource_pool **pool)  {  	struct dcn10_resource_pool *dcn10_pool = TO_DCN10_RES_POOL(*pool); -	destruct(dcn10_pool); +	dcn10_resource_destruct(dcn10_pool);  	kfree(dcn10_pool);  	*pool = NULL;  } @@ -1305,7 +1307,7 @@ static uint32_t read_pipe_fuses(struct dc_context *ctx)  	return value;  } -static bool construct( +static bool dcn10_resource_construct(  	uint8_t num_virtual_links,  	struct dc *dc,  	struct dcn10_resource_pool *pool) @@ -1592,7 +1594,7 @@ static bool construct(  fail: -	destruct(pool); +	dcn10_resource_destruct(pool);  	return false;  } @@ -1607,7 +1609,7 @@ struct resource_pool *dcn10_create_resource_pool(  	if (!pool)  		return NULL; -	if (construct(init_data->num_virtual_links, dc, pool)) +	if (dcn10_resource_construct(init_data->num_virtual_links, dc, pool))  		return &pool->base;  	kfree(pool); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c index 06e5bbb4545c..376c4264d295 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c @@ -247,6 +247,7 @@ void enc1_stream_encoder_dp_set_stream_attribute(  	struct stream_encoder *enc,  	struct dc_crtc_timing *crtc_timing,  	enum dc_color_space output_color_space, +	bool use_vsc_sdp_for_colorimetry,  	uint32_t enable_sdp_splitting)  {  	uint32_t h_active_start; @@ -312,10 +313,7 @@ void enc1_stream_encoder_dp_set_stream_attribute(  	 * Pixel Encoding/Colorimetry Format and that a Sink device shall ignore MISC1, bit 7,  	 * and MISC0, bits 7:1 (MISC1, bit 7, and MISC0, bits 7:1, become "don't care").  	 */ -	if ((hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) || -			(output_color_space == COLOR_SPACE_2020_YCBCR) || -			(output_color_space == COLOR_SPACE_2020_RGB_FULLRANGE) || -			(output_color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE)) +	if (use_vsc_sdp_for_colorimetry)  		misc1 = misc1 | 0x40;  	else  		misc1 = misc1 & ~0x40; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h index c9cbc21d121e..f9b9e221c698 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h @@ -163,14 +163,12 @@ struct dcn10_stream_enc_registers {  	uint32_t DP_MSA_TIMING_PARAM3;  	uint32_t DP_MSA_TIMING_PARAM4;  	uint32_t HDMI_DB_CONTROL; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	uint32_t DP_DSC_CNTL;  	uint32_t DP_DSC_BYTES_PER_PIXEL;  	uint32_t DME_CONTROL;  	uint32_t DP_SEC_METADATA_TRANSMISSION;  	uint32_t HDMI_METADATA_PACKET_CONTROL;  	uint32_t DP_SEC_FRAMING4; -#endif  	uint32_t DIG_CLOCK_PATTERN;  }; @@ -466,7 +464,6 @@ struct dcn10_stream_enc_registers {  	type DIG_SOURCE_SELECT;\  	type DIG_CLOCK_PATTERN -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  #define SE_REG_FIELD_LIST_DCN2_0(type) \  	type DP_DSC_MODE;\  	type DP_DSC_SLICE_WIDTH;\ @@ -485,20 +482,15 @@ struct dcn10_stream_enc_registers {  	type DOLBY_VISION_EN;\  	type DP_PIXEL_COMBINE;\  	type DP_SST_SDP_SPLITTING -#endif  struct dcn10_stream_encoder_shift {  	SE_REG_FIELD_LIST_DCN1_0(uint8_t); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	SE_REG_FIELD_LIST_DCN2_0(uint8_t); -#endif  };  struct dcn10_stream_encoder_mask {  	SE_REG_FIELD_LIST_DCN1_0(uint32_t); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	SE_REG_FIELD_LIST_DCN2_0(uint32_t); -#endif  };  struct dcn10_stream_encoder { @@ -526,6 +518,7 @@ void enc1_stream_encoder_dp_set_stream_attribute(  	struct stream_encoder *enc,  	struct dc_crtc_timing *crtc_timing,  	enum dc_color_space output_color_space, +	bool use_vsc_sdp_for_colorimetry,  	uint32_t enable_sdp_splitting);  void enc1_stream_encoder_hdmi_set_stream_attribute( diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile index 63f3bddba7da..5fcaf78334ff 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile @@ -1,16 +1,21 @@ +# SPDX-License-Identifier: MIT  #  # Makefile for DCN. -DCN20 = dcn20_resource.o dcn20_hwseq.o dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \ +DCN20 = dcn20_resource.o dcn20_init.o dcn20_hwseq.o dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \  		dcn20_mpc.o dcn20_opp.o dcn20_hubbub.o dcn20_optc.o dcn20_mmhubbub.o \  		dcn20_stream_encoder.o dcn20_link_encoder.o dcn20_dccg.o \  		dcn20_vmid.o dcn20_dwb.o dcn20_dwb_scl.o -ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  DCN20 += dcn20_dsc.o -endif +ifdef CONFIG_X86  CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -msse +endif + +ifdef CONFIG_PPC64 +CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -maltivec +endif  ifdef CONFIG_CC_IS_GCC  ifeq ($(call cc-ifversion, -lt, 0701, y), y) @@ -18,6 +23,7 @@ IS_OLD_GCC = 1  endif  endif +ifdef CONFIG_X86  ifdef IS_OLD_GCC  # Stack alignment mismatch, proceed with caution.  # GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 @@ -26,6 +32,7 @@ CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -mpreferred-stack-boundary=4  else  CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -msse2  endif +endif  AMD_DAL_DCN20 = $(addprefix $(AMDDALPATH)/dc/dcn20/,$(DCN20)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c index 1e1151356e60..50bffbfdd394 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c @@ -50,20 +50,20 @@ void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)  	if (dccg->ref_dppclk && req_dppclk) {  		int ref_dppclk = dccg->ref_dppclk; +		int modulo, phase; -		ASSERT(req_dppclk <= ref_dppclk); -		/* need to clamp to 8 bits */ -		if (ref_dppclk > 0xff) { -			int divider = (ref_dppclk + 0xfe) / 0xff; +		// phase / modulo = dpp pipe clk / dpp global clk +		modulo = 0xff;   // use FF at the end +		phase = ((modulo * req_dppclk) + ref_dppclk - 1) / ref_dppclk; -			ref_dppclk /= divider; -			req_dppclk = (req_dppclk + divider - 1) / divider; -			if (req_dppclk > ref_dppclk) -				req_dppclk = ref_dppclk; +		if (phase > 0xff) { +			ASSERT(false); +			phase = 0xff;  		} +  		REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0, -				DPPCLK0_DTO_PHASE, req_dppclk, -				DPPCLK0_DTO_MODULO, ref_dppclk); +				DPPCLK0_DTO_PHASE, phase, +				DPPCLK0_DTO_MODULO, modulo);  		REG_UPDATE(DPPCLK_DTO_CTRL,  				DPPCLK_DTO_ENABLE[dpp_inst], 1);  	} else { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c index 4d7e45892f08..13e057d7ee93 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c @@ -104,7 +104,7 @@ static void dpp2_cnv_setup (  	uint32_t pixel_format = 0;  	uint32_t alpha_en = 1;  	enum dc_color_space color_space = COLOR_SPACE_SRGB; -	enum dcn10_input_csc_select select = INPUT_CSC_SELECT_BYPASS; +	enum dcn20_input_csc_select select = DCN2_ICSC_SELECT_BYPASS;  	bool force_disable_cursor = false;  	struct out_csc_color_matrix tbl_entry;  	uint32_t is_2bit = 0; @@ -145,25 +145,25 @@ static void dpp2_cnv_setup (  		force_disable_cursor = false;  		pixel_format = 65;  		color_space = COLOR_SPACE_YCBCR709; -		select = INPUT_CSC_SELECT_ICSC; +		select = DCN2_ICSC_SELECT_ICSC_A;  		break;  	case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:  		force_disable_cursor = true;  		pixel_format = 64;  		color_space = COLOR_SPACE_YCBCR709; -		select = INPUT_CSC_SELECT_ICSC; +		select = DCN2_ICSC_SELECT_ICSC_A;  		break;  	case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:  		force_disable_cursor = true;  		pixel_format = 67;  		color_space = COLOR_SPACE_YCBCR709; -		select = INPUT_CSC_SELECT_ICSC; +		select = DCN2_ICSC_SELECT_ICSC_A;  		break;  	case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:  		force_disable_cursor = true;  		pixel_format = 66;  		color_space = COLOR_SPACE_YCBCR709; -		select = INPUT_CSC_SELECT_ICSC; +		select = DCN2_ICSC_SELECT_ICSC_A;  		break;  	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:  		pixel_format = 22; @@ -177,7 +177,7 @@ static void dpp2_cnv_setup (  	case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888:  		pixel_format = 12;  		color_space = COLOR_SPACE_YCBCR709; -		select = INPUT_CSC_SELECT_ICSC; +		select = DCN2_ICSC_SELECT_ICSC_A;  		break;  	case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:  		pixel_format = 112; @@ -188,13 +188,13 @@ static void dpp2_cnv_setup (  	case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010:  		pixel_format = 114;  		color_space = COLOR_SPACE_YCBCR709; -		select = INPUT_CSC_SELECT_ICSC; +		select = DCN2_ICSC_SELECT_ICSC_A;  		is_2bit = 1;  		break;  	case SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102:  		pixel_format = 115;  		color_space = COLOR_SPACE_YCBCR709; -		select = INPUT_CSC_SELECT_ICSC; +		select = DCN2_ICSC_SELECT_ICSC_A;  		is_2bit = 1;  		break;  	case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT: @@ -227,13 +227,13 @@ static void dpp2_cnv_setup (  		tbl_entry.color_space = input_color_space;  		if (color_space >= COLOR_SPACE_YCBCR601) -			select = INPUT_CSC_SELECT_ICSC; +			select = DCN2_ICSC_SELECT_ICSC_A;  		else -			select = INPUT_CSC_SELECT_BYPASS; +			select = DCN2_ICSC_SELECT_BYPASS; -		dpp1_program_input_csc(dpp_base, color_space, select, &tbl_entry); +		dpp2_program_input_csc(dpp_base, color_space, select, &tbl_entry);  	} else -	dpp1_program_input_csc(dpp_base, color_space, select, NULL); +	dpp2_program_input_csc(dpp_base, color_space, select, NULL);  	if (force_disable_cursor) {  		REG_UPDATE(CURSOR_CONTROL, @@ -458,7 +458,7 @@ static struct dpp_funcs dcn20_dpp_funcs = {  	.dpp_reset = dpp_reset,  	.dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,  	.dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps, -	.dpp_set_gamut_remap = dpp1_cm_set_gamut_remap, +	.dpp_set_gamut_remap = dpp2_cm_set_gamut_remap,  	.dpp_set_csc_adjustment = NULL,  	.dpp_set_csc_default = NULL,  	.dpp_program_regamma_pwl = oppn20_dummy_program_regamma_pwl, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h index 5b03b737b1d6..27610251c57f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h @@ -150,6 +150,16 @@  	SRI(CM_SHAPER_RAMA_REGION_32_33, CM, id), \  	SRI(CM_SHAPER_LUT_INDEX, CM, id) +#define TF_REG_LIST_DCN20_COMMON_APPEND(id) \ +	SRI(CM_GAMUT_REMAP_B_C11_C12, CM, id),\ +	SRI(CM_GAMUT_REMAP_B_C13_C14, CM, id),\ +	SRI(CM_GAMUT_REMAP_B_C21_C22, CM, id),\ +	SRI(CM_GAMUT_REMAP_B_C23_C24, CM, id),\ +	SRI(CM_GAMUT_REMAP_B_C31_C32, CM, id),\ +	SRI(CM_GAMUT_REMAP_B_C33_C34, CM, id),\ +	SRI(CM_ICSC_B_C11_C12, CM, id), \ +	SRI(CM_ICSC_B_C33_C34, CM, id) +  #define TF_REG_LIST_DCN20(id) \  	TF_REG_LIST_DCN(id), \  	TF_REG_LIST_DCN20_COMMON(id), \ @@ -572,10 +582,29 @@  	TF_SF(DSCL0_OBUF_MEM_PWR_CTRL, OBUF_MEM_PWR_FORCE, mask_sh),\  	TF_SF(DSCL0_DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, mask_sh) +/* DPP CM debug status register: + * + *		Status index including current ICSC, Gamut Remap Mode is 9 + *			ICSC Mode: [4..3] + *			Gamut Remap Mode: [10..9] + */ +#define CM_TEST_DEBUG_DATA_STATUS_IDX 9 + +#define TF_DEBUG_REG_LIST_SH_DCN20 \ +	TF_DEBUG_REG_LIST_SH_DCN10, \ +	.CM_TEST_DEBUG_DATA_ICSC_MODE = 3, \ +	.CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE = 9 + +#define TF_DEBUG_REG_LIST_MASK_DCN20 \ +	TF_DEBUG_REG_LIST_MASK_DCN10, \ +	.CM_TEST_DEBUG_DATA_ICSC_MODE = 0x18, \ +	.CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE = 0x600  #define TF_REG_FIELD_LIST_DCN2_0(type) \  	TF_REG_FIELD_LIST(type) \  	type CM_BLNDGAM_LUT_DATA; \ +	type CM_TEST_DEBUG_DATA_ICSC_MODE; \ +	type CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE; \  	type FORMAT_CNV16; \  	type CNVC_BYPASS_MSB_ALIGN; \  	type CLAMP_POSITIVE; \ @@ -630,11 +659,22 @@ struct dcn2_dpp_mask {  	uint32_t COLOR_KEYER_RED; \  	uint32_t COLOR_KEYER_GREEN; \  	uint32_t COLOR_KEYER_BLUE; \ -	uint32_t OBUF_MEM_PWR_CTRL;\ +	uint32_t OBUF_MEM_PWR_CTRL; \  	uint32_t DSCL_MEM_PWR_CTRL +#define DPP_DCN2_REG_VARIABLE_LIST_CM_APPEND \ +	uint32_t CM_GAMUT_REMAP_B_C11_C12; \ +	uint32_t CM_GAMUT_REMAP_B_C13_C14; \ +	uint32_t CM_GAMUT_REMAP_B_C21_C22; \ +	uint32_t CM_GAMUT_REMAP_B_C23_C24; \ +	uint32_t CM_GAMUT_REMAP_B_C31_C32; \ +	uint32_t CM_GAMUT_REMAP_B_C33_C34; \ +	uint32_t CM_ICSC_B_C11_C12; \ +	uint32_t CM_ICSC_B_C33_C34 +  struct dcn2_dpp_registers {  	DPP_DCN2_REG_VARIABLE_LIST; +	DPP_DCN2_REG_VARIABLE_LIST_CM_APPEND;  };  struct dcn20_dpp { @@ -656,6 +696,18 @@ struct dcn20_dpp {  	struct pwl_params pwl_data;  }; +enum dcn20_input_csc_select { +	DCN2_ICSC_SELECT_BYPASS = 0, +	DCN2_ICSC_SELECT_ICSC_A = 1, +	DCN2_ICSC_SELECT_ICSC_B = 2 +}; + +enum dcn20_gamut_remap_select { +	DCN2_GAMUT_REMAP_BYPASS = 0, +	DCN2_GAMUT_REMAP_COEF_A = 1, +	DCN2_GAMUT_REMAP_COEF_B = 2 +}; +  void dpp20_read_state(struct dpp *dpp_base,  		struct dcn_dpp_state *s); @@ -667,6 +719,16 @@ void dpp2_set_degamma(  		struct dpp *dpp_base,  		enum ipp_degamma_mode mode); +void dpp2_cm_set_gamut_remap( +	struct dpp *dpp_base, +	const struct dpp_grph_csc_adjustment *adjust); + +void dpp2_program_input_csc( +		struct dpp *dpp_base, +		enum dc_color_space color_space, +		enum dcn20_input_csc_select input_select, +		const struct out_csc_color_matrix *tbl_entry); +  bool dpp20_program_blnd_lut(  	struct dpp *dpp_base, const struct pwl_params *params); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c index 2d112c316424..8dc3d1f73984 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c @@ -36,6 +36,9 @@  #define REG(reg)\  	dpp->tf_regs->reg +#define IND_REG(index) \ +	(index) +  #define CTX \  	dpp->base.ctx @@ -44,9 +47,6 @@  	dpp->tf_shift->field_name, dpp->tf_mask->field_name - - -  static void dpp2_enable_cm_block(  		struct dpp *dpp_base)  { @@ -149,12 +149,164 @@ void dpp2_set_degamma(  	case IPP_DEGAMMA_MODE_HW_xvYCC:  		REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2);  			break; +	case IPP_DEGAMMA_MODE_USER_PWL: +		REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3); +		break;  	default:  		BREAK_TO_DEBUGGER();  		break;  	}  } +static void program_gamut_remap( +		struct dcn20_dpp *dpp, +		const uint16_t *regval, +		enum dcn20_gamut_remap_select select) +{ +	uint32_t cur_select = 0; +	struct color_matrices_reg gam_regs; + +	if (regval == NULL || select == DCN2_GAMUT_REMAP_BYPASS) { +		REG_SET(CM_GAMUT_REMAP_CONTROL, 0, +				CM_GAMUT_REMAP_MODE, 0); +		return; +	} + +	/* determine which gamut_remap coefficients (A or B) we are using +	 * currently. select the alternate set to double buffer +	 * the update so gamut_remap is updated on frame boundary +	 */ +	IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA, +					CM_TEST_DEBUG_DATA_STATUS_IDX, +					CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE, &cur_select); + +	/* value stored in dbg reg will be 1 greater than mode we want */ +	if (cur_select != DCN2_GAMUT_REMAP_COEF_A) +		select = DCN2_GAMUT_REMAP_COEF_A; +	else +		select = DCN2_GAMUT_REMAP_COEF_B; + +	gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; +	gam_regs.masks.csc_c11  = dpp->tf_mask->CM_GAMUT_REMAP_C11; +	gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; +	gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; + +	if (select == DCN2_GAMUT_REMAP_COEF_A) { +		gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); +		gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); +	} else { +		gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12); +		gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34); +	} + +	cm_helper_program_color_matrices( +				dpp->base.ctx, +				regval, +				&gam_regs); + +	REG_SET( +			CM_GAMUT_REMAP_CONTROL, 0, +			CM_GAMUT_REMAP_MODE, select); + +} + +void dpp2_cm_set_gamut_remap( +	struct dpp *dpp_base, +	const struct dpp_grph_csc_adjustment *adjust) +{ +	struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); +	int i = 0; + +	if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW) +		/* Bypass if type is bypass or hw */ +		program_gamut_remap(dpp, NULL, DCN2_GAMUT_REMAP_BYPASS); +	else { +		struct fixed31_32 arr_matrix[12]; +		uint16_t arr_reg_val[12]; + +		for (i = 0; i < 12; i++) +			arr_matrix[i] = adjust->temperature_matrix[i]; + +		convert_float_matrix( +			arr_reg_val, arr_matrix, 12); + +		program_gamut_remap(dpp, arr_reg_val, DCN2_GAMUT_REMAP_COEF_A); +	} +} + +void dpp2_program_input_csc( +		struct dpp *dpp_base, +		enum dc_color_space color_space, +		enum dcn20_input_csc_select input_select, +		const struct out_csc_color_matrix *tbl_entry) +{ +	struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); +	int i; +	int arr_size = sizeof(dpp_input_csc_matrix)/sizeof(struct dpp_input_csc_matrix); +	const uint16_t *regval = NULL; +	uint32_t cur_select = 0; +	enum dcn20_input_csc_select select; +	struct color_matrices_reg icsc_regs; + +	if (input_select == DCN2_ICSC_SELECT_BYPASS) { +		REG_SET(CM_ICSC_CONTROL, 0, CM_ICSC_MODE, 0); +		return; +	} + +	if (tbl_entry == NULL) { +		for (i = 0; i < arr_size; i++) +			if (dpp_input_csc_matrix[i].color_space == color_space) { +				regval = dpp_input_csc_matrix[i].regval; +				break; +			} + +		if (regval == NULL) { +			BREAK_TO_DEBUGGER(); +			return; +		} +	} else { +		regval = tbl_entry->regval; +	} + +	/* determine which CSC coefficients (A or B) we are using +	 * currently.  select the alternate set to double buffer +	 * the CSC update so CSC is updated on frame boundary +	 */ +	IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA, +					CM_TEST_DEBUG_DATA_STATUS_IDX, +					CM_TEST_DEBUG_DATA_ICSC_MODE, &cur_select); + +	if (cur_select != DCN2_ICSC_SELECT_ICSC_A) +		select = DCN2_ICSC_SELECT_ICSC_A; +	else +		select = DCN2_ICSC_SELECT_ICSC_B; + +	icsc_regs.shifts.csc_c11 = dpp->tf_shift->CM_ICSC_C11; +	icsc_regs.masks.csc_c11  = dpp->tf_mask->CM_ICSC_C11; +	icsc_regs.shifts.csc_c12 = dpp->tf_shift->CM_ICSC_C12; +	icsc_regs.masks.csc_c12 = dpp->tf_mask->CM_ICSC_C12; + +	if (select == DCN2_ICSC_SELECT_ICSC_A) { + +		icsc_regs.csc_c11_c12 = REG(CM_ICSC_C11_C12); +		icsc_regs.csc_c33_c34 = REG(CM_ICSC_C33_C34); + +	} else { + +		icsc_regs.csc_c11_c12 = REG(CM_ICSC_B_C11_C12); +		icsc_regs.csc_c33_c34 = REG(CM_ICSC_B_C33_C34); + +	} + +	cm_helper_program_color_matrices( +			dpp->base.ctx, +			regval, +			&icsc_regs); + +	REG_SET(CM_ICSC_CONTROL, 0, +				CM_ICSC_MODE, select); +} +  static void dpp20_power_on_blnd_lut(  	struct dpp *dpp_base,  	bool power_on) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c index 63eb377ed9c0..6bdfee20b6a7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c @@ -23,7 +23,6 @@   *   */ -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  #include "reg_helper.h"  #include "dcn20_dsc.h"  #include "dsc/dscc_types.h" @@ -207,6 +206,9 @@ static bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const str  	struct dsc_reg_values dsc_reg_vals;  	struct dsc_optc_config dsc_optc_cfg; +	memset(&dsc_reg_vals, 0, sizeof(dsc_reg_vals)); +	memset(&dsc_optc_cfg, 0, sizeof(dsc_optc_cfg)); +  	DC_LOG_DSC("Getting packed DSC PPS for DSC Config:");  	dsc_config_log(dsc, dsc_cfg);  	DC_LOG_DSC("DSC Picture Parameter Set (PPS):"); @@ -222,9 +224,18 @@ static bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const str  static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe)  {  	struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc); +	int dsc_clock_en; +	int dsc_fw_config; +	int enabled_opp_pipe; + +	DC_LOG_DSC("enable DSC %d at opp pipe %d", dsc->inst, opp_pipe); -	/* TODO Check if DSC alreay in use? */ -	DC_LOG_DSC("enable DSC at opp pipe %d", opp_pipe); +	REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &dsc_clock_en); +	REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &dsc_fw_config, DSCRM_DSC_OPP_PIPE_SOURCE, &enabled_opp_pipe); +	if ((dsc_clock_en || dsc_fw_config) && enabled_opp_pipe != opp_pipe) { +		DC_LOG_DSC("ERROR: DSC %d at opp pipe %d already enabled!", dsc->inst, enabled_opp_pipe); +		ASSERT(0); +	}  	REG_UPDATE(DSC_TOP_CONTROL,  		DSC_CLOCK_EN, 1); @@ -238,8 +249,18 @@ static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe)  static void dsc2_disable(struct display_stream_compressor *dsc)  {  	struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc); +	int dsc_clock_en; +	int dsc_fw_config; +	int enabled_opp_pipe; -	DC_LOG_DSC("disable DSC"); +	DC_LOG_DSC("disable DSC %d", dsc->inst); + +	REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &dsc_clock_en); +	REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &dsc_fw_config, DSCRM_DSC_OPP_PIPE_SOURCE, &enabled_opp_pipe); +	if (!dsc_clock_en || !dsc_fw_config) { +		DC_LOG_DSC("ERROR: DSC %d at opp pipe %d already disabled!", dsc->inst, enabled_opp_pipe); +		ASSERT(0); +	}  	REG_UPDATE(DSCRM_DSC_FORWARD_CONFIG,  		DSCRM_DSC_FORWARD_EN, 0); @@ -715,4 +736,3 @@ static void dsc_write_to_registers(struct display_stream_compressor *dsc, const  	}  } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h index 4e2fb38390a4..9855a7ed0387 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h @@ -21,7 +21,6 @@   * Authors: AMD   *   */ -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  #ifndef __DCN20_DSC_H__  #define __DCN20_DSC_H__ @@ -572,4 +571,3 @@ void dsc2_construct(struct dcn20_dsc *dsc,  #endif -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c index 8b8438566101..9235f7d29454 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c @@ -293,6 +293,9 @@ bool hubbub2_get_dcc_compression_cap(struct hubbub *hubbub,  		output->grph.rgb.max_compressed_blk_size = 64;  		output->grph.rgb.independent_64b_blks = true;  		break; +	default: +		ASSERT(false); +		break;  	}  	output->capable = true;  	output->const_color_support = true; @@ -601,7 +604,8 @@ static const struct hubbub_funcs hubbub2_funcs = {  	.wm_read_state = hubbub2_wm_read_state,  	.get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,  	.program_watermarks = hubbub2_program_watermarks, -	.allow_self_refresh_control = hubbub1_allow_self_refresh_control +	.is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, +	.allow_self_refresh_control = hubbub1_allow_self_refresh_control,  };  void hubbub2_construct(struct dcn20_hubbub *hubbub, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c index 69e2aae42394..84d7ac5dd206 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c @@ -30,6 +30,8 @@  #include "reg_helper.h"  #include "basics/conversion.h" +#define DC_LOGGER_INIT(logger) +  #define REG(reg)\  	hubp2->hubp_regs->reg @@ -483,7 +485,6 @@ void hubp2_program_pixel_format(  		REG_UPDATE(DCSURF_SURFACE_CONFIG,  				SURFACE_PIXEL_FORMAT, 12);  		break; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:  		REG_UPDATE(DCSURF_SURFACE_CONFIG,  				SURFACE_PIXEL_FORMAT, 112); @@ -504,7 +505,6 @@ void hubp2_program_pixel_format(  		REG_UPDATE(DCSURF_SURFACE_CONFIG,  				SURFACE_PIXEL_FORMAT, 119);  		break; -#endif  	default:  		BREAK_TO_DEBUGGER();  		break; @@ -1204,6 +1204,9 @@ void hubp2_read_state_common(struct hubp *hubp)  			HUBP_TTU_DISABLE, &s->ttu_disable,  			HUBP_UNDERFLOW_STATUS, &s->underflow_status); +	REG_GET(HUBP_CLK_CNTL, +			HUBP_CLOCK_ENABLE, &s->clock_en); +  	REG_GET(DCN_GLOBAL_TTU_CNTL,  			MIN_TTU_VBLANK, &s->min_ttu_vblank); @@ -1243,6 +1246,314 @@ void hubp2_read_state(struct hubp *hubp)  } +void hubp2_validate_dml_output(struct hubp *hubp, +		struct dc_context *ctx, +		struct _vcs_dpi_display_rq_regs_st *dml_rq_regs, +		struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr, +		struct _vcs_dpi_display_ttu_regs_st *dml_ttu_attr) +{ +	struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); +	struct _vcs_dpi_display_rq_regs_st rq_regs = {0}; +	struct _vcs_dpi_display_dlg_regs_st dlg_attr = {0}; +	struct _vcs_dpi_display_ttu_regs_st ttu_attr = {0}; +	DC_LOGGER_INIT(ctx->logger); +	DC_LOG_DEBUG("DML Validation | Running Validation"); + +	/* Requestor Regs */ +	REG_GET(HUBPRET_CONTROL, +		DET_BUF_PLANE1_BASE_ADDRESS, &rq_regs.plane1_base_address); +	REG_GET_4(DCN_EXPANSION_MODE, +		DRQ_EXPANSION_MODE, &rq_regs.drq_expansion_mode, +		PRQ_EXPANSION_MODE, &rq_regs.prq_expansion_mode, +		MRQ_EXPANSION_MODE, &rq_regs.mrq_expansion_mode, +		CRQ_EXPANSION_MODE, &rq_regs.crq_expansion_mode); +	REG_GET_8(DCHUBP_REQ_SIZE_CONFIG, +		CHUNK_SIZE, &rq_regs.rq_regs_l.chunk_size, +		MIN_CHUNK_SIZE, &rq_regs.rq_regs_l.min_chunk_size, +		META_CHUNK_SIZE, &rq_regs.rq_regs_l.meta_chunk_size, +		MIN_META_CHUNK_SIZE, &rq_regs.rq_regs_l.min_meta_chunk_size, +		DPTE_GROUP_SIZE, &rq_regs.rq_regs_l.dpte_group_size, +		MPTE_GROUP_SIZE, &rq_regs.rq_regs_l.mpte_group_size, +		SWATH_HEIGHT, &rq_regs.rq_regs_l.swath_height, +		PTE_ROW_HEIGHT_LINEAR, &rq_regs.rq_regs_l.pte_row_height_linear); +	REG_GET_8(DCHUBP_REQ_SIZE_CONFIG_C, +		CHUNK_SIZE_C, &rq_regs.rq_regs_c.chunk_size, +		MIN_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_chunk_size, +		META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.meta_chunk_size, +		MIN_META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_meta_chunk_size, +		DPTE_GROUP_SIZE_C, &rq_regs.rq_regs_c.dpte_group_size, +		MPTE_GROUP_SIZE_C, &rq_regs.rq_regs_c.mpte_group_size, +		SWATH_HEIGHT_C, &rq_regs.rq_regs_c.swath_height, +		PTE_ROW_HEIGHT_LINEAR_C, &rq_regs.rq_regs_c.pte_row_height_linear); + +	if (rq_regs.plane1_base_address != dml_rq_regs->plane1_base_address) +		DC_LOG_DEBUG("DML Validation | HUBPRET_CONTROL:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u  Actual: %u\n", +				dml_rq_regs->plane1_base_address, rq_regs.plane1_base_address); +	if (rq_regs.drq_expansion_mode != dml_rq_regs->drq_expansion_mode) +		DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DRQ_EXPANSION_MODE - Expected: %u  Actual: %u\n", +				dml_rq_regs->drq_expansion_mode, rq_regs.drq_expansion_mode); +	if (rq_regs.prq_expansion_mode != dml_rq_regs->prq_expansion_mode) +		DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:MRQ_EXPANSION_MODE - Expected: %u  Actual: %u\n", +				dml_rq_regs->prq_expansion_mode, rq_regs.prq_expansion_mode); +	if (rq_regs.mrq_expansion_mode != dml_rq_regs->mrq_expansion_mode) +		DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u  Actual: %u\n", +				dml_rq_regs->mrq_expansion_mode, rq_regs.mrq_expansion_mode); +	if (rq_regs.crq_expansion_mode != dml_rq_regs->crq_expansion_mode) +		DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:CRQ_EXPANSION_MODE - Expected: %u  Actual: %u\n", +				dml_rq_regs->crq_expansion_mode, rq_regs.crq_expansion_mode); + +	if (rq_regs.rq_regs_l.chunk_size != dml_rq_regs->rq_regs_l.chunk_size) +		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:CHUNK_SIZE - Expected: %u  Actual: %u\n", +				dml_rq_regs->rq_regs_l.chunk_size, rq_regs.rq_regs_l.chunk_size); +	if (rq_regs.rq_regs_l.min_chunk_size != dml_rq_regs->rq_regs_l.min_chunk_size) +		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_CHUNK_SIZE - Expected: %u  Actual: %u\n", +				dml_rq_regs->rq_regs_l.min_chunk_size, rq_regs.rq_regs_l.min_chunk_size); +	if (rq_regs.rq_regs_l.meta_chunk_size != dml_rq_regs->rq_regs_l.meta_chunk_size) +		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:META_CHUNK_SIZE - Expected: %u  Actual: %u\n", +				dml_rq_regs->rq_regs_l.meta_chunk_size, rq_regs.rq_regs_l.meta_chunk_size); +	if (rq_regs.rq_regs_l.min_meta_chunk_size != dml_rq_regs->rq_regs_l.min_meta_chunk_size) +		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_META_CHUNK_SIZE - Expected: %u  Actual: %u\n", +				dml_rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs.rq_regs_l.min_meta_chunk_size); +	if (rq_regs.rq_regs_l.dpte_group_size != dml_rq_regs->rq_regs_l.dpte_group_size) +		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:DPTE_GROUP_SIZE - Expected: %u  Actual: %u\n", +				dml_rq_regs->rq_regs_l.dpte_group_size, rq_regs.rq_regs_l.dpte_group_size); +	if (rq_regs.rq_regs_l.mpte_group_size != dml_rq_regs->rq_regs_l.mpte_group_size) +		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MPTE_GROUP_SIZE - Expected: %u  Actual: %u\n", +				dml_rq_regs->rq_regs_l.mpte_group_size, rq_regs.rq_regs_l.mpte_group_size); +	if (rq_regs.rq_regs_l.swath_height != dml_rq_regs->rq_regs_l.swath_height) +		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:SWATH_HEIGHT - Expected: %u  Actual: %u\n", +				dml_rq_regs->rq_regs_l.swath_height, rq_regs.rq_regs_l.swath_height); +	if (rq_regs.rq_regs_l.pte_row_height_linear != dml_rq_regs->rq_regs_l.pte_row_height_linear) +		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:PTE_ROW_HEIGHT_LINEAR - Expected: %u  Actual: %u\n", +				dml_rq_regs->rq_regs_l.pte_row_height_linear, rq_regs.rq_regs_l.pte_row_height_linear); + +	if (rq_regs.rq_regs_c.chunk_size != dml_rq_regs->rq_regs_c.chunk_size) +		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:CHUNK_SIZE_C - Expected: %u  Actual: %u\n", +				dml_rq_regs->rq_regs_c.chunk_size, rq_regs.rq_regs_c.chunk_size); +	if (rq_regs.rq_regs_c.min_chunk_size != dml_rq_regs->rq_regs_c.min_chunk_size) +		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_CHUNK_SIZE_C - Expected: %u  Actual: %u\n", +				dml_rq_regs->rq_regs_c.min_chunk_size, rq_regs.rq_regs_c.min_chunk_size); +	if (rq_regs.rq_regs_c.meta_chunk_size != dml_rq_regs->rq_regs_c.meta_chunk_size) +		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:META_CHUNK_SIZE_C - Expected: %u  Actual: %u\n", +				dml_rq_regs->rq_regs_c.meta_chunk_size, rq_regs.rq_regs_c.meta_chunk_size); +	if (rq_regs.rq_regs_c.min_meta_chunk_size != dml_rq_regs->rq_regs_c.min_meta_chunk_size) +		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_META_CHUNK_SIZE_C - Expected: %u  Actual: %u\n", +				dml_rq_regs->rq_regs_c.min_meta_chunk_size, rq_regs.rq_regs_c.min_meta_chunk_size); +	if (rq_regs.rq_regs_c.dpte_group_size != dml_rq_regs->rq_regs_c.dpte_group_size) +		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:DPTE_GROUP_SIZE_C - Expected: %u  Actual: %u\n", +				dml_rq_regs->rq_regs_c.dpte_group_size, rq_regs.rq_regs_c.dpte_group_size); +	if (rq_regs.rq_regs_c.mpte_group_size != dml_rq_regs->rq_regs_c.mpte_group_size) +		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MPTE_GROUP_SIZE_C - Expected: %u  Actual: %u\n", +				dml_rq_regs->rq_regs_c.mpte_group_size, rq_regs.rq_regs_c.mpte_group_size); +	if (rq_regs.rq_regs_c.swath_height != dml_rq_regs->rq_regs_c.swath_height) +		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:SWATH_HEIGHT_C - Expected: %u  Actual: %u\n", +				dml_rq_regs->rq_regs_c.swath_height, rq_regs.rq_regs_c.swath_height); +	if (rq_regs.rq_regs_c.pte_row_height_linear != dml_rq_regs->rq_regs_c.pte_row_height_linear) +		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:PTE_ROW_HEIGHT_LINEAR_C - Expected: %u  Actual: %u\n", +				dml_rq_regs->rq_regs_c.pte_row_height_linear, rq_regs.rq_regs_c.pte_row_height_linear); + +	/* DLG - Per hubp */ +	REG_GET_2(BLANK_OFFSET_0, +		REFCYC_H_BLANK_END, &dlg_attr.refcyc_h_blank_end, +		DLG_V_BLANK_END, &dlg_attr.dlg_vblank_end); +	REG_GET(BLANK_OFFSET_1, +		MIN_DST_Y_NEXT_START, &dlg_attr.min_dst_y_next_start); +	REG_GET(DST_DIMENSIONS, +		REFCYC_PER_HTOTAL, &dlg_attr.refcyc_per_htotal); +	REG_GET_2(DST_AFTER_SCALER, +		REFCYC_X_AFTER_SCALER, &dlg_attr.refcyc_x_after_scaler, +		DST_Y_AFTER_SCALER, &dlg_attr.dst_y_after_scaler); +	REG_GET(REF_FREQ_TO_PIX_FREQ, +		REF_FREQ_TO_PIX_FREQ, &dlg_attr.ref_freq_to_pix_freq); + +	if (dlg_attr.refcyc_h_blank_end != dml_dlg_attr->refcyc_h_blank_end) +		DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:REFCYC_H_BLANK_END - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_h_blank_end, dlg_attr.refcyc_h_blank_end); +	if (dlg_attr.dlg_vblank_end != dml_dlg_attr->dlg_vblank_end) +		DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:DLG_V_BLANK_END - Expected: %u  Actual: %u\n", +				dml_dlg_attr->dlg_vblank_end, dlg_attr.dlg_vblank_end); +	if (dlg_attr.min_dst_y_next_start != dml_dlg_attr->min_dst_y_next_start) +		DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_1:MIN_DST_Y_NEXT_START - Expected: %u  Actual: %u\n", +				dml_dlg_attr->min_dst_y_next_start, dlg_attr.min_dst_y_next_start); +	if (dlg_attr.refcyc_per_htotal != dml_dlg_attr->refcyc_per_htotal) +		DC_LOG_DEBUG("DML Validation | DST_DIMENSIONS:REFCYC_PER_HTOTAL - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_per_htotal, dlg_attr.refcyc_per_htotal); +	if (dlg_attr.refcyc_x_after_scaler != dml_dlg_attr->refcyc_x_after_scaler) +		DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:REFCYC_X_AFTER_SCALER - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_x_after_scaler, dlg_attr.refcyc_x_after_scaler); +	if (dlg_attr.dst_y_after_scaler != dml_dlg_attr->dst_y_after_scaler) +		DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:DST_Y_AFTER_SCALER - Expected: %u  Actual: %u\n", +				dml_dlg_attr->dst_y_after_scaler, dlg_attr.dst_y_after_scaler); +	if (dlg_attr.ref_freq_to_pix_freq != dml_dlg_attr->ref_freq_to_pix_freq) +		DC_LOG_DEBUG("DML Validation | REF_FREQ_TO_PIX_FREQ:REF_FREQ_TO_PIX_FREQ - Expected: %u  Actual: %u\n", +				dml_dlg_attr->ref_freq_to_pix_freq, dlg_attr.ref_freq_to_pix_freq); + +	/* DLG - Per luma/chroma */ +	REG_GET(VBLANK_PARAMETERS_1, +		REFCYC_PER_PTE_GROUP_VBLANK_L, &dlg_attr.refcyc_per_pte_group_vblank_l); +	if (REG(NOM_PARAMETERS_0)) +		REG_GET(NOM_PARAMETERS_0, +			DST_Y_PER_PTE_ROW_NOM_L, &dlg_attr.dst_y_per_pte_row_nom_l); +	if (REG(NOM_PARAMETERS_1)) +		REG_GET(NOM_PARAMETERS_1, +			REFCYC_PER_PTE_GROUP_NOM_L, &dlg_attr.refcyc_per_pte_group_nom_l); +	REG_GET(NOM_PARAMETERS_4, +		DST_Y_PER_META_ROW_NOM_L, &dlg_attr.dst_y_per_meta_row_nom_l); +	REG_GET(NOM_PARAMETERS_5, +		REFCYC_PER_META_CHUNK_NOM_L, &dlg_attr.refcyc_per_meta_chunk_nom_l); +	REG_GET_2(PER_LINE_DELIVERY, +		REFCYC_PER_LINE_DELIVERY_L, &dlg_attr.refcyc_per_line_delivery_l, +		REFCYC_PER_LINE_DELIVERY_C, &dlg_attr.refcyc_per_line_delivery_c); +	REG_GET_2(PER_LINE_DELIVERY_PRE, +		REFCYC_PER_LINE_DELIVERY_PRE_L, &dlg_attr.refcyc_per_line_delivery_pre_l, +		REFCYC_PER_LINE_DELIVERY_PRE_C, &dlg_attr.refcyc_per_line_delivery_pre_c); +	REG_GET(VBLANK_PARAMETERS_2, +		REFCYC_PER_PTE_GROUP_VBLANK_C, &dlg_attr.refcyc_per_pte_group_vblank_c); +	if (REG(NOM_PARAMETERS_2)) +		REG_GET(NOM_PARAMETERS_2, +			DST_Y_PER_PTE_ROW_NOM_C, &dlg_attr.dst_y_per_pte_row_nom_c); +	if (REG(NOM_PARAMETERS_3)) +		REG_GET(NOM_PARAMETERS_3, +			REFCYC_PER_PTE_GROUP_NOM_C, &dlg_attr.refcyc_per_pte_group_nom_c); +	REG_GET(NOM_PARAMETERS_6, +		DST_Y_PER_META_ROW_NOM_C, &dlg_attr.dst_y_per_meta_row_nom_c); +	REG_GET(NOM_PARAMETERS_7, +		REFCYC_PER_META_CHUNK_NOM_C, &dlg_attr.refcyc_per_meta_chunk_nom_c); +	REG_GET(VBLANK_PARAMETERS_3, +			REFCYC_PER_META_CHUNK_VBLANK_L, &dlg_attr.refcyc_per_meta_chunk_vblank_l); +	REG_GET(VBLANK_PARAMETERS_4, +			REFCYC_PER_META_CHUNK_VBLANK_C, &dlg_attr.refcyc_per_meta_chunk_vblank_c); + +	if (dlg_attr.refcyc_per_pte_group_vblank_l != dml_dlg_attr->refcyc_per_pte_group_vblank_l) +		DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_1:REFCYC_PER_PTE_GROUP_VBLANK_L - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_per_pte_group_vblank_l, dlg_attr.refcyc_per_pte_group_vblank_l); +	if (dlg_attr.dst_y_per_pte_row_nom_l != dml_dlg_attr->dst_y_per_pte_row_nom_l) +		DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_0:DST_Y_PER_PTE_ROW_NOM_L - Expected: %u  Actual: %u\n", +				dml_dlg_attr->dst_y_per_pte_row_nom_l, dlg_attr.dst_y_per_pte_row_nom_l); +	if (dlg_attr.refcyc_per_pte_group_nom_l != dml_dlg_attr->refcyc_per_pte_group_nom_l) +		DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_1:REFCYC_PER_PTE_GROUP_NOM_L - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_per_pte_group_nom_l, dlg_attr.refcyc_per_pte_group_nom_l); +	if (dlg_attr.dst_y_per_meta_row_nom_l != dml_dlg_attr->dst_y_per_meta_row_nom_l) +		DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_4:DST_Y_PER_META_ROW_NOM_L - Expected: %u  Actual: %u\n", +				dml_dlg_attr->dst_y_per_meta_row_nom_l, dlg_attr.dst_y_per_meta_row_nom_l); +	if (dlg_attr.refcyc_per_meta_chunk_nom_l != dml_dlg_attr->refcyc_per_meta_chunk_nom_l) +		DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_5:REFCYC_PER_META_CHUNK_NOM_L - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_per_meta_chunk_nom_l, dlg_attr.refcyc_per_meta_chunk_nom_l); +	if (dlg_attr.refcyc_per_line_delivery_l != dml_dlg_attr->refcyc_per_line_delivery_l) +		DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_L - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_per_line_delivery_l, dlg_attr.refcyc_per_line_delivery_l); +	if (dlg_attr.refcyc_per_line_delivery_c != dml_dlg_attr->refcyc_per_line_delivery_c) +		DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_C - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_per_line_delivery_c, dlg_attr.refcyc_per_line_delivery_c); +	if (dlg_attr.refcyc_per_pte_group_vblank_c != dml_dlg_attr->refcyc_per_pte_group_vblank_c) +		DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_2:REFCYC_PER_PTE_GROUP_VBLANK_C - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_per_pte_group_vblank_c, dlg_attr.refcyc_per_pte_group_vblank_c); +	if (dlg_attr.dst_y_per_pte_row_nom_c != dml_dlg_attr->dst_y_per_pte_row_nom_c) +		DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_2:DST_Y_PER_PTE_ROW_NOM_C - Expected: %u  Actual: %u\n", +				dml_dlg_attr->dst_y_per_pte_row_nom_c, dlg_attr.dst_y_per_pte_row_nom_c); +	if (dlg_attr.refcyc_per_pte_group_nom_c != dml_dlg_attr->refcyc_per_pte_group_nom_c) +		DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_3:REFCYC_PER_PTE_GROUP_NOM_C - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_per_pte_group_nom_c, dlg_attr.refcyc_per_pte_group_nom_c); +	if (dlg_attr.dst_y_per_meta_row_nom_c != dml_dlg_attr->dst_y_per_meta_row_nom_c) +		DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_6:DST_Y_PER_META_ROW_NOM_C - Expected: %u  Actual: %u\n", +				dml_dlg_attr->dst_y_per_meta_row_nom_c, dlg_attr.dst_y_per_meta_row_nom_c); +	if (dlg_attr.refcyc_per_meta_chunk_nom_c != dml_dlg_attr->refcyc_per_meta_chunk_nom_c) +		DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_7:REFCYC_PER_META_CHUNK_NOM_C - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_per_meta_chunk_nom_c, dlg_attr.refcyc_per_meta_chunk_nom_c); +	if (dlg_attr.refcyc_per_line_delivery_pre_l != dml_dlg_attr->refcyc_per_line_delivery_pre_l) +		DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_L - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_per_line_delivery_pre_l, dlg_attr.refcyc_per_line_delivery_pre_l); +	if (dlg_attr.refcyc_per_line_delivery_pre_c != dml_dlg_attr->refcyc_per_line_delivery_pre_c) +		DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_C - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_per_line_delivery_pre_c, dlg_attr.refcyc_per_line_delivery_pre_c); +	if (dlg_attr.refcyc_per_meta_chunk_vblank_l != dml_dlg_attr->refcyc_per_meta_chunk_vblank_l) +		DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_3:REFCYC_PER_META_CHUNK_VBLANK_L - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_per_meta_chunk_vblank_l, dlg_attr.refcyc_per_meta_chunk_vblank_l); +	if (dlg_attr.refcyc_per_meta_chunk_vblank_c != dml_dlg_attr->refcyc_per_meta_chunk_vblank_c) +		DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_4:REFCYC_PER_META_CHUNK_VBLANK_C - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_per_meta_chunk_vblank_c, dlg_attr.refcyc_per_meta_chunk_vblank_c); + +	/* TTU - per hubp */ +	REG_GET_2(DCN_TTU_QOS_WM, +		QoS_LEVEL_LOW_WM, &ttu_attr.qos_level_low_wm, +		QoS_LEVEL_HIGH_WM, &ttu_attr.qos_level_high_wm); + +	if (ttu_attr.qos_level_low_wm != dml_ttu_attr->qos_level_low_wm) +		DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_LOW_WM - Expected: %u  Actual: %u\n", +				dml_ttu_attr->qos_level_low_wm, ttu_attr.qos_level_low_wm); +	if (ttu_attr.qos_level_high_wm != dml_ttu_attr->qos_level_high_wm) +		DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_HIGH_WM - Expected: %u  Actual: %u\n", +				dml_ttu_attr->qos_level_high_wm, ttu_attr.qos_level_high_wm); + +	/* TTU - per luma/chroma */ +	/* Assumed surf0 is luma and 1 is chroma */ +	REG_GET_3(DCN_SURF0_TTU_CNTL0, +		REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_l, +		QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_l, +		QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_l); +	REG_GET_3(DCN_SURF1_TTU_CNTL0, +		REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_c, +		QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_c, +		QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_c); +	REG_GET_3(DCN_CUR0_TTU_CNTL0, +		REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_cur0, +		QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_cur0, +		QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_cur0); +	REG_GET(FLIP_PARAMETERS_1, +		REFCYC_PER_PTE_GROUP_FLIP_L, &dlg_attr.refcyc_per_pte_group_flip_l); +	REG_GET(DCN_CUR0_TTU_CNTL1, +			REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur0); +	REG_GET(DCN_CUR1_TTU_CNTL1, +			REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur1); +	REG_GET(DCN_SURF0_TTU_CNTL1, +			REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_l); +	REG_GET(DCN_SURF1_TTU_CNTL1, +			REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_c); + +	if (ttu_attr.refcyc_per_req_delivery_l != dml_ttu_attr->refcyc_per_req_delivery_l) +		DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u  Actual: %u\n", +				dml_ttu_attr->refcyc_per_req_delivery_l, ttu_attr.refcyc_per_req_delivery_l); +	if (ttu_attr.qos_level_fixed_l != dml_ttu_attr->qos_level_fixed_l) +		DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u  Actual: %u\n", +				dml_ttu_attr->qos_level_fixed_l, ttu_attr.qos_level_fixed_l); +	if (ttu_attr.qos_ramp_disable_l != dml_ttu_attr->qos_ramp_disable_l) +		DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u  Actual: %u\n", +				dml_ttu_attr->qos_ramp_disable_l, ttu_attr.qos_ramp_disable_l); +	if (ttu_attr.refcyc_per_req_delivery_c != dml_ttu_attr->refcyc_per_req_delivery_c) +		DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u  Actual: %u\n", +				dml_ttu_attr->refcyc_per_req_delivery_c, ttu_attr.refcyc_per_req_delivery_c); +	if (ttu_attr.qos_level_fixed_c != dml_ttu_attr->qos_level_fixed_c) +		DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u  Actual: %u\n", +				dml_ttu_attr->qos_level_fixed_c, ttu_attr.qos_level_fixed_c); +	if (ttu_attr.qos_ramp_disable_c != dml_ttu_attr->qos_ramp_disable_c) +		DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u  Actual: %u\n", +				dml_ttu_attr->qos_ramp_disable_c, ttu_attr.qos_ramp_disable_c); +	if (ttu_attr.refcyc_per_req_delivery_cur0 != dml_ttu_attr->refcyc_per_req_delivery_cur0) +		DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u  Actual: %u\n", +				dml_ttu_attr->refcyc_per_req_delivery_cur0, ttu_attr.refcyc_per_req_delivery_cur0); +	if (ttu_attr.qos_level_fixed_cur0 != dml_ttu_attr->qos_level_fixed_cur0) +		DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u  Actual: %u\n", +				dml_ttu_attr->qos_level_fixed_cur0, ttu_attr.qos_level_fixed_cur0); +	if (ttu_attr.qos_ramp_disable_cur0 != dml_ttu_attr->qos_ramp_disable_cur0) +		DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u  Actual: %u\n", +				dml_ttu_attr->qos_ramp_disable_cur0, ttu_attr.qos_ramp_disable_cur0); +	if (dlg_attr.refcyc_per_pte_group_flip_l != dml_dlg_attr->refcyc_per_pte_group_flip_l) +		DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_1:REFCYC_PER_PTE_GROUP_FLIP_L - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_per_pte_group_flip_l, dlg_attr.refcyc_per_pte_group_flip_l); +	if (ttu_attr.refcyc_per_req_delivery_pre_cur0 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur0) +		DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u  Actual: %u\n", +				dml_ttu_attr->refcyc_per_req_delivery_pre_cur0, ttu_attr.refcyc_per_req_delivery_pre_cur0); +	if (ttu_attr.refcyc_per_req_delivery_pre_cur1 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur1) +		DC_LOG_DEBUG("DML Validation | DCN_CUR1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u  Actual: %u\n", +				dml_ttu_attr->refcyc_per_req_delivery_pre_cur1, ttu_attr.refcyc_per_req_delivery_pre_cur1); +	if (ttu_attr.refcyc_per_req_delivery_pre_l != dml_ttu_attr->refcyc_per_req_delivery_pre_l) +		DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u  Actual: %u\n", +				dml_ttu_attr->refcyc_per_req_delivery_pre_l, ttu_attr.refcyc_per_req_delivery_pre_l); +	if (ttu_attr.refcyc_per_req_delivery_pre_c != dml_ttu_attr->refcyc_per_req_delivery_pre_c) +		DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u  Actual: %u\n", +				dml_ttu_attr->refcyc_per_req_delivery_pre_c, ttu_attr.refcyc_per_req_delivery_pre_c); +} +  static struct hubp_funcs dcn20_hubp_funcs = {  	.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,  	.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled, @@ -1266,6 +1577,7 @@ static struct hubp_funcs dcn20_hubp_funcs = {  	.hubp_clear_underflow = hubp2_clear_underflow,  	.hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,  	.hubp_init = hubp1_init, +	.validate_dml_output = hubp2_validate_dml_output,  }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h index d5c8615af45e..8c04a3606a54 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h @@ -148,7 +148,6 @@  	uint32_t VMID_SETTINGS_0 -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  #define DCN21_HUBP_REG_COMMON_VARIABLE_LIST \  	DCN2_HUBP_REG_COMMON_VARIABLE_LIST; \  	uint32_t FLIP_PARAMETERS_3;\ @@ -157,7 +156,6 @@  	uint32_t FLIP_PARAMETERS_6;\  	uint32_t VBLANK_PARAMETERS_5;\  	uint32_t VBLANK_PARAMETERS_6 -#endif  #define DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type) \  	DCN_HUBP_REG_FIELD_BASE_LIST(type); \ @@ -184,7 +182,6 @@  	type SURFACE_TRIPLE_BUFFER_ENABLE;\  	type VMID -#ifdef CONFIG_DRM_AMD_DC_DCN2_1  #define DCN21_HUBP_REG_FIELD_VARIABLE_LIST(type) \  	DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type);\  	type REFCYC_PER_VM_GROUP_FLIP;\ @@ -194,31 +191,18 @@  	type REFCYC_PER_PTE_GROUP_FLIP_C; \  	type REFCYC_PER_META_CHUNK_FLIP_C; \  	type VM_GROUP_SIZE -#endif  struct dcn_hubp2_registers { -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  	DCN21_HUBP_REG_COMMON_VARIABLE_LIST; -#else -	DCN2_HUBP_REG_COMMON_VARIABLE_LIST; -#endif  };  struct dcn_hubp2_shift { -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  	DCN21_HUBP_REG_FIELD_VARIABLE_LIST(uint8_t); -#else -	DCN2_HUBP_REG_FIELD_VARIABLE_LIST(uint8_t); -#endif  };  struct dcn_hubp2_mask { -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  	DCN21_HUBP_REG_FIELD_VARIABLE_LIST(uint32_t); -#else -	DCN2_HUBP_REG_FIELD_VARIABLE_LIST(uint32_t); -#endif  };  struct dcn20_hubp { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index ac8c18fadefc..cfbbaffa8654 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -25,17 +25,15 @@  #include <linux/delay.h>  #include "dm_services.h" +#include "basics/dc_common.h"  #include "dm_helpers.h"  #include "core_types.h"  #include "resource.h" -#include "dcn20/dcn20_resource.h" -#include "dce110/dce110_hw_sequencer.h" -#include "dcn10/dcn10_hw_sequencer.h" +#include "dcn20_resource.h"  #include "dcn20_hwseq.h"  #include "dce/dce_hwseq.h" -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT -#include "dcn20/dcn20_dsc.h" -#endif +#include "dcn20_dsc.h" +#include "dcn20_optc.h"  #include "abm.h"  #include "clk_mgr.h"  #include "dmcu.h" @@ -45,10 +43,9 @@  #include "ipp.h"  #include "mpc.h"  #include "mcif_wb.h" +#include "dchubbub.h"  #include "reg_helper.h"  #include "dcn10/dcn10_cm_common.h" -#include "dcn10/dcn10_hubbub.h" -#include "dcn10/dcn10_optc.h"  #include "dc_link_dp.h"  #include "vm_helper.h"  #include "dccg.h" @@ -64,14 +61,132 @@  #define FN(reg_name, field_name) \  	hws->shifts->field_name, hws->masks->field_name -static void dcn20_enable_power_gating_plane( +static int find_free_gsl_group(const struct dc *dc) +{ +	if (dc->res_pool->gsl_groups.gsl_0 == 0) +		return 1; +	if (dc->res_pool->gsl_groups.gsl_1 == 0) +		return 2; +	if (dc->res_pool->gsl_groups.gsl_2 == 0) +		return 3; + +	return 0; +} + +/* NOTE: This is not a generic setup_gsl function (hence the suffix as_lock) + * This is only used to lock pipes in pipe splitting case with immediate flip + * Ordinary MPC/OTG locks suppress VUPDATE which doesn't help with immediate, + * so we get tearing with freesync since we cannot flip multiple pipes + * atomically. + * We use GSL for this: + * - immediate flip: find first available GSL group if not already assigned + *                   program gsl with that group, set current OTG as master + *                   and always us 0x4 = AND of flip_ready from all pipes + * - vsync flip: disable GSL if used + * + * Groups in stream_res are stored as +1 from HW registers, i.e. + * gsl_0 <=> pipe_ctx->stream_res.gsl_group == 1 + * Using a magic value like -1 would require tracking all inits/resets + */ +static void dcn20_setup_gsl_group_as_lock( +		const struct dc *dc, +		struct pipe_ctx *pipe_ctx, +		bool enable) +{ +	struct gsl_params gsl; +	int group_idx; + +	memset(&gsl, 0, sizeof(struct gsl_params)); + +	if (enable) { +		/* return if group already assigned since GSL was set up +		 * for vsync flip, we would unassign so it can't be "left over" +		 */ +		if (pipe_ctx->stream_res.gsl_group > 0) +			return; + +		group_idx = find_free_gsl_group(dc); +		ASSERT(group_idx != 0); +		pipe_ctx->stream_res.gsl_group = group_idx; + +		/* set gsl group reg field and mark resource used */ +		switch (group_idx) { +		case 1: +			gsl.gsl0_en = 1; +			dc->res_pool->gsl_groups.gsl_0 = 1; +			break; +		case 2: +			gsl.gsl1_en = 1; +			dc->res_pool->gsl_groups.gsl_1 = 1; +			break; +		case 3: +			gsl.gsl2_en = 1; +			dc->res_pool->gsl_groups.gsl_2 = 1; +			break; +		default: +			BREAK_TO_DEBUGGER(); +			return; // invalid case +		} +		gsl.gsl_master_en = 1; +	} else { +		group_idx = pipe_ctx->stream_res.gsl_group; +		if (group_idx == 0) +			return; // if not in use, just return + +		pipe_ctx->stream_res.gsl_group = 0; + +		/* unset gsl group reg field and mark resource free */ +		switch (group_idx) { +		case 1: +			gsl.gsl0_en = 0; +			dc->res_pool->gsl_groups.gsl_0 = 0; +			break; +		case 2: +			gsl.gsl1_en = 0; +			dc->res_pool->gsl_groups.gsl_1 = 0; +			break; +		case 3: +			gsl.gsl2_en = 0; +			dc->res_pool->gsl_groups.gsl_2 = 0; +			break; +		default: +			BREAK_TO_DEBUGGER(); +			return; +		} +		gsl.gsl_master_en = 0; +	} + +	/* at this point we want to program whether it's to enable or disable */ +	if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL && +		pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) { +		pipe_ctx->stream_res.tg->funcs->set_gsl( +			pipe_ctx->stream_res.tg, +			&gsl); + +		pipe_ctx->stream_res.tg->funcs->set_gsl_source_select( +			pipe_ctx->stream_res.tg, group_idx,	enable ? 4 : 0); +	} else +		BREAK_TO_DEBUGGER(); +} + +void dcn20_set_flip_control_gsl( +		struct pipe_ctx *pipe_ctx, +		bool flip_immediate) +{ +	if (pipe_ctx && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl) +		pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl( +				pipe_ctx->plane_res.hubp, flip_immediate); + +} + +void dcn20_enable_power_gating_plane(  	struct dce_hwseq *hws,  	bool enable)  { -	bool force_on = 1; /* disable power gating */ +	bool force_on = true; /* disable power gating */  	if (enable) -		force_on = 0; +		force_on = false;  	/* DCHUBP0/1/2/3/4/5 */  	REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on); @@ -128,44 +243,6 @@ void dcn20_dccg_init(struct dce_hwseq *hws)  	/* This value is dependent on the hardware pipeline delay so set once per SOC */  	REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0x801003c);  } -void dcn20_display_init(struct dc *dc) -{ -	struct dce_hwseq *hws = dc->hwseq; - -	/* RBBMIF -	 * disable RBBMIF timeout detection for all clients -	 * Ensure RBBMIF does not drop register accesses due to the per-client timeout -	 */ -	REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF); -	REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF); - -	/* DCCG */ -	dcn20_dccg_init(hws); - -	REG_UPDATE(DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, 0); - -	/* DCHUB/MMHUBBUB -	 * set global timer refclk divider -	 * 100Mhz refclk -> 2 -	 * 27Mhz refclk ->  1 -	 * 48Mhz refclk ->  1 -	 */ -	REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2); -	REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); -	REG_WRITE(REFCLK_CNTL, 0); - -	/* OPTC -	 * OTG_CONTROL.OTG_DISABLE_POINT_CNTL = 0x3; will be set during optc2_enable_crtc -	 */ - -	/* AZ -	 * default value is 0x64 for 100Mhz ref clock, if the ref clock is 100Mhz, no need to program this regiser, -	 * if not, it should be programmed according to the ref clock -	 */ -	REG_UPDATE(AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, 0x64); -	/* Enable controller clock gating */ -	REG_WRITE(AZALIA_CONTROLLER_CLOCK_GATING, 0x1); -}  void dcn20_disable_vga(  	struct dce_hwseq *hws) @@ -178,15 +255,15 @@ void dcn20_disable_vga(  	REG_WRITE(D6VGA_CONTROL, 0);  } -void dcn20_program_tripleBuffer( +void dcn20_program_triple_buffer(  	const struct dc *dc,  	struct pipe_ctx *pipe_ctx, -	bool enableTripleBuffer) +	bool enable_triple_buffer)  {  	if (pipe_ctx->plane_res.hubp && pipe_ctx->plane_res.hubp->funcs) {  		pipe_ctx->plane_res.hubp->funcs->hubp_enable_tripleBuffer(  			pipe_ctx->plane_res.hubp, -			enableTripleBuffer); +			enable_triple_buffer);  	}  } @@ -195,6 +272,7 @@ void dcn20_init_blank(  		struct dc *dc,  		struct timing_generator *tg)  { +	struct dce_hwseq *hws = dc->hwseq;  	enum dc_color_space color_space;  	struct tg_color black_color = {0};  	struct output_pixel_processor *opp = NULL; @@ -225,6 +303,7 @@ void dcn20_init_blank(  	opp->funcs->opp_set_disp_pattern_generator(  			opp,  			CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR, +			CONTROLLER_DP_COLOR_SPACE_UDEFINED,  			COLOR_DEPTH_UNDEFINED,  			&black_color,  			otg_active_width, @@ -234,17 +313,17 @@ void dcn20_init_blank(  		bottom_opp->funcs->opp_set_disp_pattern_generator(  				bottom_opp,  				CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR, +				CONTROLLER_DP_COLOR_SPACE_UDEFINED,  				COLOR_DEPTH_UNDEFINED,  				&black_color,  				otg_active_width,  				otg_active_height);  	} -	dcn20_hwss_wait_for_blank_complete(opp); +	hws->funcs.wait_for_blank_complete(opp);  } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT -static void dcn20_dsc_pg_control( +void dcn20_dsc_pg_control(  		struct dce_hwseq *hws,  		unsigned int dsc_inst,  		bool power_on) @@ -320,9 +399,8 @@ static void dcn20_dsc_pg_control(  	if (org_ip_request_cntl == 0)  		REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);  } -#endif -static void dcn20_dpp_pg_control( +void dcn20_dpp_pg_control(  		struct dce_hwseq *hws,  		unsigned int dpp_inst,  		bool power_on) @@ -396,7 +474,7 @@ static void dcn20_dpp_pg_control(  } -static void dcn20_hubp_pg_control( +void dcn20_hubp_pg_control(  		struct dce_hwseq *hws,  		unsigned int hubp_inst,  		bool power_on) @@ -473,8 +551,9 @@ static void dcn20_hubp_pg_control(  /* disable HW used by plane.   * note:  cannot disable until disconnect is complete   */ -static void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)  { +	struct dce_hwseq *hws = dc->hwseq;  	struct hubp *hubp = pipe_ctx->plane_res.hubp;  	struct dpp *dpp = pipe_ctx->plane_res.dpp; @@ -495,7 +574,7 @@ static void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)  	hubp->power_gated = true;  	dc->optimized_required = false; /* We're powering off, no need to optimize */ -	dc->hwss.plane_atomic_power_down(dc, +	hws->funcs.plane_atomic_power_down(dc,  			pipe_ctx->plane_res.dpp,  			pipe_ctx->plane_res.hubp); @@ -526,6 +605,7 @@ enum dc_status dcn20_enable_stream_timing(  		struct dc_state *context,  		struct dc *dc)  { +	struct dce_hwseq *hws = dc->hwseq;  	struct dc_stream_state *stream = pipe_ctx->stream;  	struct drr_params params = {0};  	unsigned int event_triggers = 0; @@ -585,7 +665,7 @@ enum dc_status dcn20_enable_stream_timing(  			pipe_ctx->stream_res.opp,  			true); -	dc->hwss.blank_pixel_data(dc, pipe_ctx, true); +	hws->funcs.blank_pixel_data(dc, pipe_ctx, true);  	/* VTG is  within DCHUB command block. DCFCLK is always on */  	if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) { @@ -593,7 +673,7 @@ enum dc_status dcn20_enable_stream_timing(  		return DC_ERROR_UNEXPECTED;  	} -	dcn20_hwss_wait_for_blank_complete(pipe_ctx->stream_res.opp); +	hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp);  	params.vertical_total_min = stream->adjust.v_total_min;  	params.vertical_total_max = stream->adjust.v_total_max; @@ -606,9 +686,13 @@ enum dc_status dcn20_enable_stream_timing(  	// DRR should set trigger event to monitor surface update event  	if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0)  		event_triggers = 0x80; +	/* Event triggers and num frames initialized for DRR, but can be +	 * later updated for PSR use. Note DRR trigger events are generated +	 * regardless of whether num frames met. +	 */  	if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control)  		pipe_ctx->stream_res.tg->funcs->set_static_screen_control( -				pipe_ctx->stream_res.tg, event_triggers); +				pipe_ctx->stream_res.tg, event_triggers, 2);  	/* TODO program crtc source select for non-virtual signal*/  	/* TODO program FMT */ @@ -649,7 +733,7 @@ void dcn20_program_output_csc(struct dc *dc,  	}  } -bool dcn20_set_output_transfer_func(struct pipe_ctx *pipe_ctx, +bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,  				const struct dc_stream_state *stream)  {  	int mpcc_id = pipe_ctx->plane_res.hubp->inst; @@ -736,20 +820,14 @@ bool dcn20_set_shaper_3dlut(  	else  		result = dpp_base->funcs->dpp_program_3dlut(dpp_base, NULL); -	if (plane_state->lut3d_func && -		plane_state->lut3d_func->state.bits.initialized == 1 && -		plane_state->lut3d_func->hdr_multiplier != 0) -		dpp_base->funcs->dpp_set_hdr_multiplier(dpp_base, -				plane_state->lut3d_func->hdr_multiplier); -	else -		dpp_base->funcs->dpp_set_hdr_multiplier(dpp_base, 0x1f000); -  	return result;  } -bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx, -					  const struct dc_plane_state *plane_state) +bool dcn20_set_input_transfer_func(struct dc *dc, +				struct pipe_ctx *pipe_ctx, +				const struct dc_plane_state *plane_state)  { +	struct dce_hwseq *hws = dc->hwseq;  	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;  	const struct dc_transfer_func *tf = NULL;  	bool result = true; @@ -758,8 +836,8 @@ bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx,  	if (dpp_base == NULL || plane_state == NULL)  		return false; -	dcn20_set_shaper_3dlut(pipe_ctx, plane_state); -	dcn20_set_blend_lut(pipe_ctx, plane_state); +	hws->funcs.set_shaper_3dlut(pipe_ctx, plane_state); +	hws->funcs.set_blend_lut(pipe_ctx, plane_state);  	if (plane_state->in_transfer_func)  		tf = plane_state->in_transfer_func; @@ -804,6 +882,11 @@ bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx,  					IPP_DEGAMMA_MODE_BYPASS);  			break;  		case TRANSFER_FUNCTION_PQ: +			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL); +			cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params); +			dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params); +			result = true; +			break;  		default:  			result = false;  			break; @@ -824,7 +907,7 @@ bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx,  	return result;  } -static void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) +void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)  {  	struct pipe_ctx *odm_pipe;  	int opp_cnt = 1; @@ -855,12 +938,16 @@ void dcn20_blank_pixel_data(  	struct dc_stream_state *stream = pipe_ctx->stream;  	enum dc_color_space color_space = stream->output_color_space;  	enum controller_dp_test_pattern test_pattern = CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR; +	enum controller_dp_color_space test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED;  	struct pipe_ctx *odm_pipe;  	int odm_cnt = 1;  	int width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;  	int height = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top; +	if (stream->link->test_pattern_enabled) +		return; +  	/* get opp dpg blank color */  	color_space_to_black_color(dc, color_space, &black_color); @@ -873,8 +960,10 @@ void dcn20_blank_pixel_data(  		if (stream_res->abm)  			stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm); -		if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) +		if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {  			test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORSQUARES; +			test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_RGB; +		}  	} else {  		test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE;  	} @@ -882,6 +971,7 @@ void dcn20_blank_pixel_data(  	stream_res->opp->funcs->opp_set_disp_pattern_generator(  			stream_res->opp,  			test_pattern, +			test_pattern_color_space,  			stream->timing.display_color_depth,  			&black_color,  			width, @@ -892,6 +982,7 @@ void dcn20_blank_pixel_data(  				odm_pipe->stream_res.opp,  				dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE && blank ?  						CONTROLLER_DP_TEST_PATTERN_COLORRAMP : test_pattern, +				test_pattern_color_space,  				stream->timing.display_color_depth,  				&black_color,  				width, @@ -1217,9 +1308,11 @@ static void dcn20_update_dchubp_dpp(  	struct pipe_ctx *pipe_ctx,  	struct dc_state *context)  { +	struct dce_hwseq *hws = dc->hwseq;  	struct hubp *hubp = pipe_ctx->plane_res.hubp;  	struct dpp *dpp = pipe_ctx->plane_res.dpp;  	struct dc_plane_state *plane_state = pipe_ctx->plane_state; +	bool viewport_changed = false;  	if (pipe_ctx->update_flags.bits.dppclk)  		dpp->funcs->dpp_dppclk_control(dpp, false, true); @@ -1261,7 +1354,7 @@ static void dcn20_update_dchubp_dpp(  		if (dpp->funcs->dpp_program_bias_and_scale) {  			//TODO :for CNVC set scale and bias registers if necessary -			dcn10_build_prescale_params(&bns_params, plane_state); +			build_prescale_params(&bns_params, plane_state);  			dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);  		}  	} @@ -1269,19 +1362,19 @@ static void dcn20_update_dchubp_dpp(  	if (pipe_ctx->update_flags.bits.mpcc  			|| plane_state->update_flags.bits.global_alpha_change  			|| plane_state->update_flags.bits.per_pixel_alpha_change) { -		/* Need mpcc to be idle if changing opp */ -		if (pipe_ctx->update_flags.bits.opp_changed) { -			struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx]; -			int mpcc_inst; - -			for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) { -				if (!old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) -					continue; +		// MPCC inst is equal to pipe index in practice +		int mpcc_inst = hubp->inst; +		int opp_inst; +		int opp_count = dc->res_pool->pipe_count; + +		for (opp_inst = 0; opp_inst < opp_count; opp_inst++) { +			if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) {  				dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst); -				old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false; +				dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false; +				break;  			}  		} -		dc->hwss.update_mpcc(dc, pipe_ctx); +		hws->funcs.update_mpcc(dc, pipe_ctx);  	}  	if (pipe_ctx->update_flags.bits.scaler || @@ -1298,14 +1391,18 @@ static void dcn20_update_dchubp_dpp(  	if (pipe_ctx->update_flags.bits.viewport ||  			(context == dc->current_state && plane_state->update_flags.bits.scaling_change) || -			(context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) +			(context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) { +  		hubp->funcs->mem_program_viewport(  			hubp,  			&pipe_ctx->plane_res.scl_data.viewport,  			&pipe_ctx->plane_res.scl_data.viewport_c); +		viewport_changed = true; +	}  	/* Any updates are handled in dc interface, just need to apply existing for plane enable */ -	if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed) +	if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed || +			pipe_ctx->update_flags.bits.scaler || pipe_ctx->update_flags.bits.viewport)  			&& pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {  		dc->hwss.set_cursor_position(pipe_ctx);  		dc->hwss.set_cursor_attribute(pipe_ctx); @@ -1355,8 +1452,13 @@ static void dcn20_update_dchubp_dpp(  		hubp->power_gated = false;  	} +	if (hubp->funcs->apply_PLAT_54186_wa && viewport_changed) +		hubp->funcs->apply_PLAT_54186_wa(hubp, &plane_state->address); +  	if (pipe_ctx->update_flags.bits.enable || plane_state->update_flags.bits.addr_update) -		dc->hwss.update_plane_addr(dc, pipe_ctx); +		hws->funcs.update_plane_addr(dc, pipe_ctx); + +  	if (pipe_ctx->update_flags.bits.enable)  		hubp->funcs->set_blank(hubp, false); @@ -1368,10 +1470,11 @@ static void dcn20_program_pipe(  		struct pipe_ctx *pipe_ctx,  		struct dc_state *context)  { +	struct dce_hwseq *hws = dc->hwseq;  	/* Only need to unblank on top pipe */  	if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.abm_level)  			&& !pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe) -		dc->hwss.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible); +		hws->funcs.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible);  	if (pipe_ctx->update_flags.bits.global_sync) {  		pipe_ctx->stream_res.tg->funcs->program_global_sync( @@ -1384,12 +1487,12 @@ static void dcn20_program_pipe(  		pipe_ctx->stream_res.tg->funcs->set_vtg_params(  				pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); -		if (dc->hwss.setup_vupdate_interrupt) -			dc->hwss.setup_vupdate_interrupt(pipe_ctx); +		if (hws->funcs.setup_vupdate_interrupt) +			hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);  	}  	if (pipe_ctx->update_flags.bits.odm) -		dc->hwss.update_odm(dc, context, pipe_ctx); +		hws->funcs.update_odm(dc, context, pipe_ctx);  	if (pipe_ctx->update_flags.bits.enable)  		dcn20_enable_plane(dc, pipe_ctx, context); @@ -1398,20 +1501,20 @@ static void dcn20_program_pipe(  		dcn20_update_dchubp_dpp(dc, pipe_ctx, context);  	if (pipe_ctx->update_flags.bits.enable -			|| pipe_ctx->plane_state->update_flags.bits.sdr_white_level) -		set_hdr_multiplier(pipe_ctx); +			|| pipe_ctx->plane_state->update_flags.bits.hdr_mult) +		hws->funcs.set_hdr_multiplier(pipe_ctx);  	if (pipe_ctx->update_flags.bits.enable ||  			pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||  			pipe_ctx->plane_state->update_flags.bits.gamma_change) -		dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state); +		hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);  	/* dcn10_translate_regamma_to_hw_format takes 750us to finish  	 * only do gamma programming for powering on, internal memcmp to avoid  	 * updating on slave planes  	 */  	if (pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.out_tf) -		dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream); +		hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);  	/* If the pipe has been enabled or has a different opp, we  	 * should reprogram the fmt. This deals with cases where @@ -1445,12 +1548,13 @@ static bool does_pipe_need_lock(struct pipe_ctx *pipe)  	return false;  } -static void dcn20_program_front_end_for_ctx( +void dcn20_program_front_end_for_ctx(  		struct dc *dc,  		struct dc_state *context)  {  	const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100;  	int i; +	struct dce_hwseq *hws = dc->hwseq;  	bool pipe_locked[MAX_PIPES] = {false};  	DC_LOGGER_INIT(dc->ctx->logger); @@ -1482,13 +1586,13 @@ static void dcn20_program_front_end_for_ctx(  				&& !context->res_ctx.pipe_ctx[i].top_pipe  				&& !context->res_ctx.pipe_ctx[i].prev_odm_pipe  				&& context->res_ctx.pipe_ctx[i].stream) -			dc->hwss.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true); +			hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);  	/* Disconnect mpcc */  	for (i = 0; i < dc->res_pool->pipe_count; i++)  		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable  				|| context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) { -			dc->hwss.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]); +			hws->funcs.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]);  			DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);  		} @@ -1508,8 +1612,8 @@ static void dcn20_program_front_end_for_ctx(  			pipe = &context->res_ctx.pipe_ctx[i];  			if (!pipe->prev_odm_pipe && pipe->stream->num_wb_info > 0  					&& (pipe->update_flags.raw || pipe->plane_state->update_flags.raw || pipe->stream->update_flags.raw) -					&& dc->hwss.program_all_writeback_pipes_in_tree) -				dc->hwss.program_all_writeback_pipes_in_tree(dc, pipe->stream, context); +					&& hws->funcs.program_all_writeback_pipes_in_tree) +				hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);  		}  	} @@ -1541,9 +1645,9 @@ static void dcn20_program_front_end_for_ctx(  			struct hubp *hubp = pipe->plane_res.hubp;  			int j = 0; -			for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS +			for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS*1000  					&& hubp->funcs->hubp_is_flip_pending(hubp); j++) -				msleep(1); +				mdelay(1);  		}  	} @@ -1594,6 +1698,7 @@ bool dcn20_update_bandwidth(  		struct dc_state *context)  {  	int i; +	struct dce_hwseq *hws = dc->hwseq;  	/* recalculate DML parameters */  	if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) @@ -1623,10 +1728,10 @@ bool dcn20_update_bandwidth(  					pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);  			if (pipe_ctx->prev_odm_pipe == NULL) -				dc->hwss.blank_pixel_data(dc, pipe_ctx, blank); +				hws->funcs.blank_pixel_data(dc, pipe_ctx, blank); -			if (dc->hwss.setup_vupdate_interrupt) -				dc->hwss.setup_vupdate_interrupt(pipe_ctx); +			if (hws->funcs.setup_vupdate_interrupt) +				hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);  		}  		pipe_ctx->plane_res.hubp->funcs->hubp_setup( @@ -1640,9 +1745,8 @@ bool dcn20_update_bandwidth(  	return true;  } -static void dcn20_enable_writeback( +void dcn20_enable_writeback(  		struct dc *dc, -		const struct dc_stream_status *stream_status,  		struct dc_writeback_info *wb_info,  		struct dc_state *context)  { @@ -1656,8 +1760,7 @@ static void dcn20_enable_writeback(  	mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];  	/* set the OPTC source mux */ -	ASSERT(stream_status->primary_otg_inst < MAX_PIPES); -	optc = dc->res_pool->timing_generators[stream_status->primary_otg_inst]; +	optc = dc->res_pool->timing_generators[dwb->otg_inst];  	optc->funcs->set_dwb_source(optc, wb_info->dwb_pipe_inst);  	/* set MCIF_WB buffer and arbitration configuration */  	mcif_wb->funcs->config_mcif_buf(mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height); @@ -1684,7 +1787,7 @@ void dcn20_disable_writeback(  	mcif_wb->funcs->disable_mcif(mcif_wb);  } -bool dcn20_hwss_wait_for_blank_complete( +bool dcn20_wait_for_blank_complete(  		struct output_pixel_processor *opp)  {  	int counter; @@ -1713,9 +1816,8 @@ bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx)  	return hubp->funcs->dmdata_status_done(hubp);  } -static void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)  { -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	struct dce_hwseq *hws = dc->hwseq;  	if (pipe_ctx->stream_res.dsc) { @@ -1727,12 +1829,10 @@ static void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx  			odm_pipe = odm_pipe->next_odm_pipe;  		}  	} -#endif  } -static void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)  { -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	struct dce_hwseq *hws = dc->hwseq;  	if (pipe_ctx->stream_res.dsc) { @@ -1744,7 +1844,6 @@ static void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)  			odm_pipe = odm_pipe->next_odm_pipe;  		}  	} -#endif  }  void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx) @@ -1767,12 +1866,7 @@ void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx)  	hubp->funcs->dmdata_set_attributes(hubp, &attr);  } -void dcn20_disable_stream(struct pipe_ctx *pipe_ctx) -{ -	dce110_disable_stream(pipe_ctx); -} - -static void dcn20_init_vm_ctx( +void dcn20_init_vm_ctx(  		struct dce_hwseq *hws,  		struct dc *dc,  		struct dc_virtual_addr_space_config *va_config, @@ -1794,7 +1888,7 @@ static void dcn20_init_vm_ctx(  	dc->res_pool->hubbub->funcs->init_vm_ctx(dc->res_pool->hubbub, &config, vmid);  } -static int dcn20_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config) +int dcn20_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config)  {  	struct dcn_hubbub_phys_addr_config config; @@ -1838,8 +1932,7 @@ static bool patch_address_for_sbs_tb_stereo(  	return false;  } - -static void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)  {  	bool addr_patched = false;  	PHYSICAL_ADDRESS_LOC addr; @@ -1873,6 +1966,7 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,  	struct encoder_unblank_param params = { { 0 } };  	struct dc_stream_state *stream = pipe_ctx->stream;  	struct dc_link *link = stream->link; +	struct dce_hwseq *hws = link->dc->hwseq;  	struct pipe_ctx *odm_pipe;  	params.opp_cnt = 1; @@ -1885,7 +1979,7 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,  	params.link_settings.link_rate = link_settings->link_rate;  	if (dc_is_dp_signal(pipe_ctx->stream->signal)) { -		if (optc1_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1) +		if (optc2_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1)  			params.timing.pix_clk_100hz /= 2;  		pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine(  				pipe_ctx->stream_res.stream_enc, params.opp_cnt > 1); @@ -1893,14 +1987,14 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,  	}  	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { -		link->dc->hwss.edp_backlight_control(link, true); +		hws->funcs.edp_backlight_control(link, true);  	}  } -void dcn20_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx) +void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)  {  	struct timing_generator *tg = pipe_ctx->stream_res.tg; -	int start_line = get_vupdate_offset_from_vsync(pipe_ctx); +	int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);  	if (start_line < 0)  		start_line = 0; @@ -1915,6 +2009,7 @@ static void dcn20_reset_back_end_for_pipe(  		struct dc_state *context)  {  	int i; +	struct dc_link *link;  	DC_LOGGER_INIT(dc->ctx->logger);  	if (pipe_ctx->stream_res.stream_enc == NULL) {  		pipe_ctx->stream = NULL; @@ -1922,8 +2017,14 @@ static void dcn20_reset_back_end_for_pipe(  	}  	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { -		/* DPMS may already disable */ -		if (!pipe_ctx->stream->dpms_off) +		link = pipe_ctx->stream->link; +		/* DPMS may already disable or */ +		/* dpms_off status is incorrect due to fastboot +		 * feature. When system resume from S4 with second +		 * screen only, the dpms_off would be true but +		 * VBIOS lit up eDP, so check link status too. +		 */ +		if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)  			core_link_disable_stream(pipe_ctx);  		else if (pipe_ctx->stream_res.audio)  			dc->hwss.disable_audio_stream(pipe_ctx); @@ -1943,11 +2044,9 @@ static void dcn20_reset_back_end_for_pipe(  			}  		}  	} -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	else if (pipe_ctx->stream_res.dsc) {  		dp_set_dsc_enable(pipe_ctx, false);  	} -#endif  	/* by upper caller loop, parent pipe: pipe0, will be reset last.  	 * back end share by all pipes and will be disable only when disable @@ -1978,11 +2077,12 @@ static void dcn20_reset_back_end_for_pipe(  					pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);  } -static void dcn20_reset_hw_ctx_wrap( +void dcn20_reset_hw_ctx_wrap(  		struct dc *dc,  		struct dc_state *context)  {  	int i; +	struct dce_hwseq *hws = dc->hwseq;  	/* Reset Back End*/  	for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) { @@ -2001,8 +2101,8 @@ static void dcn20_reset_hw_ctx_wrap(  			struct clock_source *old_clk = pipe_ctx_old->clock_source;  			dcn20_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); -			if (dc->hwss.enable_stream_gating) -				dc->hwss.enable_stream_gating(dc, pipe_ctx); +			if (hws->funcs.enable_stream_gating) +				hws->funcs.enable_stream_gating(dc, pipe_ctx);  			if (old_clk)  				old_clk->funcs->cs_power_down(old_clk);  		} @@ -2031,8 +2131,9 @@ void dcn20_get_mpctree_visual_confirm_color(  	*color = pipe_colors[top_pipe->pipe_idx];  } -static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)  { +	struct dce_hwseq *hws = dc->hwseq;  	struct hubp *hubp = pipe_ctx->plane_res.hubp;  	struct mpcc_blnd_cfg blnd_cfg = { {0} };  	bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha; @@ -2043,10 +2144,10 @@ static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)  	// input to MPCC is always RGB, by default leave black_color at 0  	if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) { -		dcn10_get_hdr_visual_confirm_color( +		hws->funcs.get_hdr_visual_confirm_color(  				pipe_ctx, &blnd_cfg.black_color);  	} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) { -		dcn10_get_surface_visual_confirm_color( +		hws->funcs.get_surface_visual_confirm_color(  				pipe_ctx, &blnd_cfg.black_color);  	} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) {  		dcn20_get_mpctree_visual_confirm_color( @@ -2083,12 +2184,6 @@ static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)  	 */  	mpcc_id = hubp->inst; -	/* If there is no full update, don't need to touch MPC tree*/ -	if (!pipe_ctx->plane_state->update_flags.bits.full_update) { -		mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id); -		return; -	} -  	/* check if this MPCC is already being used */  	new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);  	/* remove MPCC if being used */ @@ -2113,125 +2208,7 @@ static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)  	hubp->mpcc_id = mpcc_id;  } -static int find_free_gsl_group(const struct dc *dc) -{ -	if (dc->res_pool->gsl_groups.gsl_0 == 0) -		return 1; -	if (dc->res_pool->gsl_groups.gsl_1 == 0) -		return 2; -	if (dc->res_pool->gsl_groups.gsl_2 == 0) -		return 3; - -	return 0; -} - -/* NOTE: This is not a generic setup_gsl function (hence the suffix as_lock) - * This is only used to lock pipes in pipe splitting case with immediate flip - * Ordinary MPC/OTG locks suppress VUPDATE which doesn't help with immediate, - * so we get tearing with freesync since we cannot flip multiple pipes - * atomically. - * We use GSL for this: - * - immediate flip: find first available GSL group if not already assigned - *                   program gsl with that group, set current OTG as master - *                   and always us 0x4 = AND of flip_ready from all pipes - * - vsync flip: disable GSL if used - * - * Groups in stream_res are stored as +1 from HW registers, i.e. - * gsl_0 <=> pipe_ctx->stream_res.gsl_group == 1 - * Using a magic value like -1 would require tracking all inits/resets - */ -void dcn20_setup_gsl_group_as_lock( -		const struct dc *dc, -		struct pipe_ctx *pipe_ctx, -		bool enable) -{ -	struct gsl_params gsl; -	int group_idx; - -	memset(&gsl, 0, sizeof(struct gsl_params)); - -	if (enable) { -		/* return if group already assigned since GSL was set up -		 * for vsync flip, we would unassign so it can't be "left over" -		 */ -		if (pipe_ctx->stream_res.gsl_group > 0) -			return; - -		group_idx = find_free_gsl_group(dc); -		ASSERT(group_idx != 0); -		pipe_ctx->stream_res.gsl_group = group_idx; - -		/* set gsl group reg field and mark resource used */ -		switch (group_idx) { -		case 1: -			gsl.gsl0_en = 1; -			dc->res_pool->gsl_groups.gsl_0 = 1; -			break; -		case 2: -			gsl.gsl1_en = 1; -			dc->res_pool->gsl_groups.gsl_1 = 1; -			break; -		case 3: -			gsl.gsl2_en = 1; -			dc->res_pool->gsl_groups.gsl_2 = 1; -			break; -		default: -			BREAK_TO_DEBUGGER(); -			return; // invalid case -		} -		gsl.gsl_master_en = 1; -	} else { -		group_idx = pipe_ctx->stream_res.gsl_group; -		if (group_idx == 0) -			return; // if not in use, just return - -		pipe_ctx->stream_res.gsl_group = 0; - -		/* unset gsl group reg field and mark resource free */ -		switch (group_idx) { -		case 1: -			gsl.gsl0_en = 0; -			dc->res_pool->gsl_groups.gsl_0 = 0; -			break; -		case 2: -			gsl.gsl1_en = 0; -			dc->res_pool->gsl_groups.gsl_1 = 0; -			break; -		case 3: -			gsl.gsl2_en = 0; -			dc->res_pool->gsl_groups.gsl_2 = 0; -			break; -		default: -			BREAK_TO_DEBUGGER(); -			return; -		} -		gsl.gsl_master_en = 0; -	} - -	/* at this point we want to program whether it's to enable or disable */ -	if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL && -		pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) { -		pipe_ctx->stream_res.tg->funcs->set_gsl( -			pipe_ctx->stream_res.tg, -			&gsl); - -		pipe_ctx->stream_res.tg->funcs->set_gsl_source_select( -			pipe_ctx->stream_res.tg, group_idx,	enable ? 4 : 0); -	} else -		BREAK_TO_DEBUGGER(); -} - -static void dcn20_set_flip_control_gsl( -		struct pipe_ctx *pipe_ctx, -		bool flip_immediate) -{ -	if (pipe_ctx && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl) -		pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl( -				pipe_ctx->plane_res.hubp, flip_immediate); - -} - -static void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) +void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)  {  	enum dc_lane_count lane_count =  		pipe_ctx->stream->link->cur_link_settings.lane_count; @@ -2279,7 +2256,7 @@ static void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)  	}  } -static void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx) +void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx)  {  	struct dc_stream_state    *stream     = pipe_ctx->stream;  	struct hubp               *hubp       = pipe_ctx->plane_res.hubp; @@ -2305,7 +2282,7 @@ static void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx)  						hubp->inst, mode);  } -static void dcn20_fpga_init_hw(struct dc *dc) +void dcn20_fpga_init_hw(struct dc *dc)  {  	int i, j;  	struct dce_hwseq *hws = dc->hwseq; @@ -2320,13 +2297,13 @@ static void dcn20_fpga_init_hw(struct dc *dc)  		res_pool->dccg->funcs->dccg_init(res_pool->dccg);  	//Enable ability to power gate / don't force power on permanently -	dc->hwss.enable_power_gating_plane(hws, true); +	hws->funcs.enable_power_gating_plane(hws, true);  	// Specific to FPGA dccg and registers  	REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF);  	REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF); -	dcn20_dccg_init(hws); +	hws->funcs.dccg_init(hws);  	REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2);  	REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); @@ -2390,7 +2367,7 @@ static void dcn20_fpga_init_hw(struct dc *dc)  		dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;  		pipe_ctx->stream_res.opp = dc->res_pool->opps[i];  		/*to do*/ -		hwss1_plane_atomic_disconnect(dc, pipe_ctx); +		hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);  	}  	/* initialize DWB pointer to MCIF_WB */ @@ -2419,57 +2396,3 @@ static void dcn20_fpga_init_hw(struct dc *dc)  		tg->funcs->tg_init(tg);  	}  } - -void dcn20_hw_sequencer_construct(struct dc *dc) -{ -	dcn10_hw_sequencer_construct(dc); -	dc->hwss.unblank_stream = dcn20_unblank_stream; -	dc->hwss.update_plane_addr = dcn20_update_plane_addr; -	dc->hwss.enable_stream_timing = dcn20_enable_stream_timing; -	dc->hwss.program_triplebuffer = dcn20_program_tripleBuffer; -	dc->hwss.set_input_transfer_func = dcn20_set_input_transfer_func; -	dc->hwss.set_output_transfer_func = dcn20_set_output_transfer_func; -	dc->hwss.apply_ctx_for_surface = NULL; -	dc->hwss.program_front_end_for_ctx = dcn20_program_front_end_for_ctx; -	dc->hwss.pipe_control_lock = dcn20_pipe_control_lock; -	dc->hwss.pipe_control_lock_global = dcn20_pipe_control_lock_global; -	dc->hwss.optimize_bandwidth = dcn20_optimize_bandwidth; -	dc->hwss.prepare_bandwidth = dcn20_prepare_bandwidth; -	dc->hwss.update_bandwidth = dcn20_update_bandwidth; -	dc->hwss.enable_writeback = dcn20_enable_writeback; -	dc->hwss.disable_writeback = dcn20_disable_writeback; -	dc->hwss.program_output_csc = dcn20_program_output_csc; -	dc->hwss.update_odm = dcn20_update_odm; -	dc->hwss.blank_pixel_data = dcn20_blank_pixel_data; -	dc->hwss.dmdata_status_done = dcn20_dmdata_status_done; -	dc->hwss.program_dmdata_engine = dcn20_program_dmdata_engine; -	dc->hwss.enable_stream = dcn20_enable_stream; -	dc->hwss.disable_stream = dcn20_disable_stream; -	dc->hwss.init_sys_ctx = dcn20_init_sys_ctx; -	dc->hwss.init_vm_ctx = dcn20_init_vm_ctx; -	dc->hwss.disable_stream_gating = dcn20_disable_stream_gating; -	dc->hwss.enable_stream_gating = dcn20_enable_stream_gating; -	dc->hwss.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt; -	dc->hwss.reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap; -	dc->hwss.update_mpcc = dcn20_update_mpcc; -	dc->hwss.set_flip_control_gsl = dcn20_set_flip_control_gsl; -	dc->hwss.init_blank = dcn20_init_blank; -	dc->hwss.disable_plane = dcn20_disable_plane; -	dc->hwss.plane_atomic_disable = dcn20_plane_atomic_disable; -	dc->hwss.enable_power_gating_plane = dcn20_enable_power_gating_plane; -	dc->hwss.dpp_pg_control = dcn20_dpp_pg_control; -	dc->hwss.hubp_pg_control = dcn20_hubp_pg_control; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT -	dc->hwss.dsc_pg_control = dcn20_dsc_pg_control; -#else -	dc->hwss.dsc_pg_control = NULL; -#endif -	dc->hwss.disable_vga = dcn20_disable_vga; - -	if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { -		dc->hwss.init_hw = dcn20_fpga_init_hw; -		dc->hwss.init_pipes = NULL; -	} - - -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h index 3098f1049ed7..02c9be5ebd47 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h @@ -26,90 +26,112 @@  #ifndef __DC_HWSS_DCN20_H__  #define __DC_HWSS_DCN20_H__ -struct dc; +#include "hw_sequencer_private.h" -void dcn20_hw_sequencer_construct(struct dc *dc); - -enum dc_status dcn20_enable_stream_timing( -		struct pipe_ctx *pipe_ctx, -		struct dc_state *context, -		struct dc *dc); - -void dcn20_blank_pixel_data( +bool dcn20_set_blend_lut( +	struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state); +bool dcn20_set_shaper_3dlut( +	struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state); +void dcn20_program_front_end_for_ctx(  		struct dc *dc, -		struct pipe_ctx *pipe_ctx, -		bool blank); - +		struct dc_state *context); +void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx); +bool dcn20_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, +			const struct dc_plane_state *plane_state); +bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, +			const struct dc_stream_state *stream);  void dcn20_program_output_csc(struct dc *dc,  		struct pipe_ctx *pipe_ctx,  		enum dc_color_space colorspace,  		uint16_t *matrix,  		int opp_id); - +void dcn20_enable_stream(struct pipe_ctx *pipe_ctx); +void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx, +		struct dc_link_settings *link_settings); +void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_blank_pixel_data( +		struct dc *dc, +		struct pipe_ctx *pipe_ctx, +		bool blank); +void dcn20_pipe_control_lock( +	struct dc *dc, +	struct pipe_ctx *pipe, +	bool lock); +void dcn20_pipe_control_lock_global( +		struct dc *dc, +		struct pipe_ctx *pipe, +		bool lock);  void dcn20_prepare_bandwidth(  		struct dc *dc,  		struct dc_state *context); -  void dcn20_optimize_bandwidth(  		struct dc *dc,  		struct dc_state *context); -  bool dcn20_update_bandwidth(  		struct dc *dc,  		struct dc_state *context); - +void dcn20_reset_hw_ctx_wrap( +		struct dc *dc, +		struct dc_state *context); +enum dc_status dcn20_enable_stream_timing( +		struct pipe_ctx *pipe_ctx, +		struct dc_state *context, +		struct dc *dc); +void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_init_blank( +		struct dc *dc, +		struct timing_generator *tg); +void dcn20_disable_vga( +	struct dce_hwseq *hws); +void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_enable_power_gating_plane( +	struct dce_hwseq *hws, +	bool enable); +void dcn20_dpp_pg_control( +		struct dce_hwseq *hws, +		unsigned int dpp_inst, +		bool power_on); +void dcn20_hubp_pg_control( +		struct dce_hwseq *hws, +		unsigned int hubp_inst, +		bool power_on); +void dcn20_program_triple_buffer( +	const struct dc *dc, +	struct pipe_ctx *pipe_ctx, +	bool enable_triple_buffer); +void dcn20_enable_writeback( +		struct dc *dc, +		struct dc_writeback_info *wb_info, +		struct dc_state *context);  void dcn20_disable_writeback(  		struct dc *dc,  		unsigned int dwb_pipe_inst); - -bool dcn20_hwss_wait_for_blank_complete( -		struct output_pixel_processor *opp); - -bool dcn20_set_output_transfer_func(struct pipe_ctx *pipe_ctx, -			const struct dc_stream_state *stream); - -bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx, -			const struct dc_plane_state *plane_state); - +void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx);  bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx); - +void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx);  void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx); - -void dcn20_disable_stream(struct pipe_ctx *pipe_ctx); - -void dcn20_program_tripleBuffer( -		const struct dc *dc, -		struct pipe_ctx *pipe_ctx, -		bool enableTripleBuffer); - -void dcn20_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx); - -void dcn20_pipe_control_lock_global( +void dcn20_init_vm_ctx( +		struct dce_hwseq *hws,  		struct dc *dc, -		struct pipe_ctx *pipe, -		bool lock); -void dcn20_setup_gsl_group_as_lock(const struct dc *dc, -				struct pipe_ctx *pipe_ctx, -				bool enable); -void dcn20_dccg_init(struct dce_hwseq *hws); -void dcn20_init_blank( -	   struct dc *dc, -	   struct timing_generator *tg); -void dcn20_display_init(struct dc *dc); -void dcn20_pipe_control_lock( -	struct dc *dc, -	struct pipe_ctx *pipe, -	bool lock); -void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx); -void dcn20_enable_plane( -	struct dc *dc, -	struct pipe_ctx *pipe_ctx, -	struct dc_state *context); -bool dcn20_set_blend_lut( -	struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state); -bool dcn20_set_shaper_3dlut( -	struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state); -void dcn20_get_mpctree_visual_confirm_color( +		struct dc_virtual_addr_space_config *va_config, +		int vmid); +void dcn20_set_flip_control_gsl(  		struct pipe_ctx *pipe_ctx, -		struct tg_color *color); +		bool flip_immediate); +void dcn20_dsc_pg_control( +		struct dce_hwseq *hws, +		unsigned int dsc_inst, +		bool power_on); +void dcn20_fpga_init_hw(struct dc *dc); +bool dcn20_wait_for_blank_complete( +		struct output_pixel_processor *opp); +void dcn20_dccg_init(struct dce_hwseq *hws); +int dcn20_init_sys_ctx(struct dce_hwseq *hws, +		struct dc *dc, +		struct dc_phy_addr_space_config *pa_config); +  #endif /* __DC_HWSS_DCN20_H__ */ + diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c new file mode 100644 index 000000000000..d51e02fdab4d --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c @@ -0,0 +1,133 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dce110/dce110_hw_sequencer.h" +#include "dcn10/dcn10_hw_sequencer.h" +#include "dcn20_hwseq.h" + +static const struct hw_sequencer_funcs dcn20_funcs = { +	.program_gamut_remap = dcn10_program_gamut_remap, +	.init_hw = dcn10_init_hw, +	.apply_ctx_to_hw = dce110_apply_ctx_to_hw, +	.apply_ctx_for_surface = NULL, +	.program_front_end_for_ctx = dcn20_program_front_end_for_ctx, +	.update_plane_addr = dcn20_update_plane_addr, +	.update_dchub = dcn10_update_dchub, +	.update_pending_status = dcn10_update_pending_status, +	.program_output_csc = dcn20_program_output_csc, +	.enable_accelerated_mode = dce110_enable_accelerated_mode, +	.enable_timing_synchronization = dcn10_enable_timing_synchronization, +	.enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, +	.update_info_frame = dce110_update_info_frame, +	.send_immediate_sdp_message = dcn10_send_immediate_sdp_message, +	.enable_stream = dcn20_enable_stream, +	.disable_stream = dce110_disable_stream, +	.unblank_stream = dcn20_unblank_stream, +	.blank_stream = dce110_blank_stream, +	.enable_audio_stream = dce110_enable_audio_stream, +	.disable_audio_stream = dce110_disable_audio_stream, +	.disable_plane = dcn20_disable_plane, +	.pipe_control_lock = dcn20_pipe_control_lock, +	.pipe_control_lock_global = dcn20_pipe_control_lock_global, +	.prepare_bandwidth = dcn20_prepare_bandwidth, +	.optimize_bandwidth = dcn20_optimize_bandwidth, +	.update_bandwidth = dcn20_update_bandwidth, +	.set_drr = dcn10_set_drr, +	.get_position = dcn10_get_position, +	.set_static_screen_control = dcn10_set_static_screen_control, +	.setup_stereo = dcn10_setup_stereo, +	.set_avmute = dce110_set_avmute, +	.log_hw_state = dcn10_log_hw_state, +	.get_hw_state = dcn10_get_hw_state, +	.clear_status_bits = dcn10_clear_status_bits, +	.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, +	.edp_power_control = dce110_edp_power_control, +	.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, +	.set_cursor_position = dcn10_set_cursor_position, +	.set_cursor_attribute = dcn10_set_cursor_attribute, +	.set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, +	.setup_periodic_interrupt = dcn10_setup_periodic_interrupt, +	.set_clock = dcn10_set_clock, +	.get_clock = dcn10_get_clock, +	.program_triplebuffer = dcn20_program_triple_buffer, +	.enable_writeback = dcn20_enable_writeback, +	.disable_writeback = dcn20_disable_writeback, +	.dmdata_status_done = dcn20_dmdata_status_done, +	.program_dmdata_engine = dcn20_program_dmdata_engine, +	.set_dmdata_attributes = dcn20_set_dmdata_attributes, +	.init_sys_ctx = dcn20_init_sys_ctx, +	.init_vm_ctx = dcn20_init_vm_ctx, +	.set_flip_control_gsl = dcn20_set_flip_control_gsl, +	.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, +}; + +static const struct hwseq_private_funcs dcn20_private_funcs = { +	.init_pipes = dcn10_init_pipes, +	.update_plane_addr = dcn20_update_plane_addr, +	.plane_atomic_disconnect = dcn10_plane_atomic_disconnect, +	.update_mpcc = dcn20_update_mpcc, +	.set_input_transfer_func = dcn20_set_input_transfer_func, +	.set_output_transfer_func = dcn20_set_output_transfer_func, +	.power_down = dce110_power_down, +	.enable_display_power_gating = dcn10_dummy_display_power_gating, +	.blank_pixel_data = dcn20_blank_pixel_data, +	.reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap, +	.enable_stream_timing = dcn20_enable_stream_timing, +	.edp_backlight_control = dce110_edp_backlight_control, +	.disable_stream_gating = dcn20_disable_stream_gating, +	.enable_stream_gating = dcn20_enable_stream_gating, +	.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, +	.did_underflow_occur = dcn10_did_underflow_occur, +	.init_blank = dcn20_init_blank, +	.disable_vga = dcn20_disable_vga, +	.bios_golden_init = dcn10_bios_golden_init, +	.plane_atomic_disable = dcn20_plane_atomic_disable, +	.plane_atomic_power_down = dcn10_plane_atomic_power_down, +	.enable_power_gating_plane = dcn20_enable_power_gating_plane, +	.dpp_pg_control = dcn20_dpp_pg_control, +	.hubp_pg_control = dcn20_hubp_pg_control, +	.dsc_pg_control = NULL, +	.update_odm = dcn20_update_odm, +	.dsc_pg_control = dcn20_dsc_pg_control, +	.get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color, +	.get_hdr_visual_confirm_color = dcn10_get_hdr_visual_confirm_color, +	.set_hdr_multiplier = dcn10_set_hdr_multiplier, +	.verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, +	.wait_for_blank_complete = dcn20_wait_for_blank_complete, +	.dccg_init = dcn20_dccg_init, +	.set_blend_lut = dcn20_set_blend_lut, +	.set_shaper_3dlut = dcn20_set_shaper_3dlut, +}; + +void dcn20_hw_sequencer_construct(struct dc *dc) +{ +	dc->hwss = dcn20_funcs; +	dc->hwseq->funcs = dcn20_private_funcs; + +	if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { +		dc->hwss.init_hw = dcn20_fpga_init_hw; +		dc->hwseq->funcs.init_pipes = NULL; +	} +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h new file mode 100644 index 000000000000..12277797cd71 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h @@ -0,0 +1,33 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_DCN20_INIT_H__ +#define __DC_DCN20_INIT_H__ + +struct dc; + +void dcn20_hw_sequencer_construct(struct dc *dc); + +#endif /* __DC_DCN20_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c index e476f27aa3a9..e4ac73035c84 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c @@ -168,10 +168,8 @@ static struct mpll_cfg dcn2_mpll_cfg[] = {  void enc2_fec_set_enable(struct link_encoder *enc, bool enable)  {  	struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	DC_LOG_DSC("%s FEC at link encoder inst %d",  			enable ? "Enabling" : "Disabling", enc->id.enum_id); -#endif  	REG_UPDATE(DP_DPHY_CNTL, DPHY_FEC_EN, enable);  } @@ -192,7 +190,6 @@ bool enc2_fec_is_active(struct link_encoder *enc)  	return (active != 0);  } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  /* this function reads dsc related register fields to be logged later in dcn10_log_hw_state   * into a dcn_dsc_state struct.   */ @@ -203,8 +200,8 @@ void link_enc2_read_state(struct link_encoder *enc, struct link_enc_state *s)  	REG_GET(DP_DPHY_CNTL, DPHY_FEC_EN, &s->dphy_fec_en);  	REG_GET(DP_DPHY_CNTL, DPHY_FEC_READY_SHADOW, &s->dphy_fec_ready_shadow);  	REG_GET(DP_DPHY_CNTL, DPHY_FEC_ACTIVE_STATUS, &s->dphy_fec_active_status); +	REG_GET(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, &s->dp_link_training_complete);  } -#endif  static bool update_cfg_data(  		struct dcn10_link_encoder *enc10, @@ -315,9 +312,7 @@ void enc2_hw_init(struct link_encoder *enc)  }  static const struct link_encoder_funcs dcn20_link_enc_funcs = { -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	.read_state = link_enc2_read_state, -#endif  	.validate_output_with_stream =  		dcn10_link_encoder_validate_output_with_stream,  	.hw_init = enc2_hw_init, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h index 0c98a0bbbd14..8cab8107fd94 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h @@ -33,7 +33,142 @@  	SRI(AUX_DPHY_TX_CONTROL, DP_AUX, id)  #define UNIPHY_MASK_SH_LIST(mask_sh)\ -	LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_LINK_ENABLE, mask_sh) +	LE_SF(SYMCLKA_CLOCK_ENABLE, SYMCLKA_CLOCK_ENABLE, mask_sh),\ +	LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_LINK_ENABLE, mask_sh),\ +	LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL0_XBAR_SOURCE, mask_sh),\ +	LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL1_XBAR_SOURCE, mask_sh),\ +	LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL2_XBAR_SOURCE, mask_sh),\ +	LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL3_XBAR_SOURCE, mask_sh) + +#define DPCS_MASK_SH_LIST(mask_sh)\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_CLK_RDY, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_DATA_EN, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_CLK_RDY, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_DATA_EN, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_CLK_RDY, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_DATA_EN, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_CLK_RDY, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_DATA_EN, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX0_TERM_CTRL, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX1_TERM_CTRL, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX2_TERM_CTRL, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX3_TERM_CTRL, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_DP_MPLLB_MULTIPLIER, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX0_WIDTH, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX0_RATE, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX1_WIDTH, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX1_RATE, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX2_PSTATE, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX3_PSTATE, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX2_MPLL_EN, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX3_MPLL_EN, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL7, RDPCS_PHY_DP_MPLLB_FRACN_QUOT, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL7, RDPCS_PHY_DP_MPLLB_FRACN_DEN, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL8, RDPCS_PHY_DP_MPLLB_SSC_PEAK, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL9, RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL9, RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL10, RDPCS_PHY_DP_MPLLB_FRACN_REM, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_DP_REF_CLK_MPLLB_DIV, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_HDMI_MPLLB_HDMI_DIV, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_SSC_EN, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_TX_CLK_DIV, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_STATE, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL13, RDPCS_PHY_DP_MPLLB_DIV_CLK_EN, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL13, RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL14, RDPCS_PHY_DP_MPLLB_FRACN_EN, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL14, RDPCS_PHY_DP_MPLLB_PMIX_EN, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE0_EN, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE1_EN, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE2_EN, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE3_EN, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_EN, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_RD_START_DELAY, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_EXT_REFCLK_EN, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SRAMCLK_BYPASS, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SRAMCLK_EN, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SRAMCLK_CLOCK_ON, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SYMCLK_DIV2_CLOCK_ON, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SYMCLK_DIV2_GATE_DIS, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SYMCLK_DIV2_EN, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_DISABLE, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_DISABLE, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_DISABLE, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_DISABLE, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_REQ, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_REQ, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_REQ, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_REQ, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_ACK, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_ACK, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_ACK, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_ACK, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_RESET, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_RESET, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_RESET, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_RESET, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_RESET, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_CR_MUX_SEL, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_REF_RANGE, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_SRAM_BYPASS, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_SRAM_EXT_LD_DONE, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_HDMIMODE_ENABLE, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_SRAM_INIT_DONE, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DP4_POR, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA, RDPCS_PLL_UPDATE_DATA, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_REG_FIFO_ERROR_MASK, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_TX_FIFO_ERROR_MASK, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_DPALT_DISABLE_TOGGLE_MASK, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_DPALT_4LANE_TOGGLE_MASK, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCS_TX_CR_ADDR, RDPCS_TX_CR_ADDR, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCS_TX_CR_DATA, RDPCS_TX_CR_DATA, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_MPLLB_V2I, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_MAIN, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_PRE, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_POST, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_MPLLB_FREQ_VCO, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_MPLLB_CP_INT, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_MPLLB_CP_PROP, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_MAIN, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_PRE, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_POST, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_MAIN, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_PRE, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_POST, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_MAIN, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DCO_FINETUNE, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DCO_RANGE, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_PRE, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_POST, mask_sh),\ +	LE_SF(DPCSTX0_DPCSTX_TX_CLOCK_CNTL, DPCS_SYMCLK_CLOCK_ON, mask_sh),\ +	LE_SF(DPCSTX0_DPCSTX_TX_CLOCK_CNTL, DPCS_SYMCLK_GATE_DIS, mask_sh),\ +	LE_SF(DPCSTX0_DPCSTX_TX_CLOCK_CNTL, DPCS_SYMCLK_EN, mask_sh),\ +	LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_SWAP, mask_sh),\ +	LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_ORDER_INVERT, mask_sh),\ +	LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_EN, mask_sh),\ +	LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_RD_START_DELAY, mask_sh),\ +	LE_SF(DPCSTX0_DPCSTX_DEBUG_CONFIG, DPCS_DBG_CBUS_DIS, mask_sh) + +#define DPCS_DCN2_MASK_SH_LIST(mask_sh)\ +	DPCS_MASK_SH_LIST(mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL, RDPCS_PHY_RX_REF_LD_VAL, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL, RDPCS_PHY_RX_VCO_LD_VAL, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE_ACK, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX0_PSTATE, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX1_PSTATE, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX0_MPLL_EN, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX1_MPLL_EN, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_REF_CLK_EN, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX2_WIDTH, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX2_RATE, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX3_WIDTH, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX3_RATE, mask_sh),\ +	LE_SF(DCIO_SOFT_RESET, UNIPHYA_SOFT_RESET, mask_sh),\ +	LE_SF(DCIO_SOFT_RESET, UNIPHYB_SOFT_RESET, mask_sh),\ +	LE_SF(DCIO_SOFT_RESET, UNIPHYC_SOFT_RESET, mask_sh),\ +	LE_SF(DCIO_SOFT_RESET, UNIPHYD_SOFT_RESET, mask_sh),\ +	LE_SF(DCIO_SOFT_RESET, UNIPHYE_SOFT_RESET, mask_sh)  #define LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh)\  	LINK_ENCODER_MASK_SH_LIST_DCN10(mask_sh),\ @@ -63,6 +198,49 @@  	SRI(CLOCK_ENABLE, SYMCLK, id), \  	SRI(CHANNEL_XBAR_CNTL, UNIPHY, id) +#define DPCS_DCN2_CMN_REG_LIST(id) \ +	SRI(DIG_LANE_ENABLE, DIG, id), \ +	SRI(TMDS_CTL_BITS, DIG, id), \ +	SRI(RDPCSTX_PHY_CNTL3, RDPCSTX, id), \ +	SRI(RDPCSTX_PHY_CNTL4, RDPCSTX, id), \ +	SRI(RDPCSTX_PHY_CNTL5, RDPCSTX, id), \ +	SRI(RDPCSTX_PHY_CNTL6, RDPCSTX, id), \ +	SRI(RDPCSTX_PHY_CNTL7, RDPCSTX, id), \ +	SRI(RDPCSTX_PHY_CNTL8, RDPCSTX, id), \ +	SRI(RDPCSTX_PHY_CNTL9, RDPCSTX, id), \ +	SRI(RDPCSTX_PHY_CNTL10, RDPCSTX, id), \ +	SRI(RDPCSTX_PHY_CNTL11, RDPCSTX, id), \ +	SRI(RDPCSTX_PHY_CNTL12, RDPCSTX, id), \ +	SRI(RDPCSTX_PHY_CNTL13, RDPCSTX, id), \ +	SRI(RDPCSTX_PHY_CNTL14, RDPCSTX, id), \ +	SRI(RDPCSTX_CNTL, RDPCSTX, id), \ +	SRI(RDPCSTX_CLOCK_CNTL, RDPCSTX, id), \ +	SRI(RDPCSTX_INTERRUPT_CONTROL, RDPCSTX, id), \ +	SRI(RDPCSTX_PHY_CNTL0, RDPCSTX, id), \ +	SRI(RDPCSTX_PHY_CNTL2, RDPCSTX, id), \ +	SRI(RDPCSTX_PLL_UPDATE_DATA, RDPCSTX, id), \ +	SRI(RDPCS_TX_CR_ADDR, RDPCSTX, id), \ +	SRI(RDPCS_TX_CR_DATA, RDPCSTX, id), \ +	SRI(RDPCSTX_PHY_FUSE0, RDPCSTX, id), \ +	SRI(RDPCSTX_PHY_FUSE1, RDPCSTX, id), \ +	SRI(RDPCSTX_PHY_FUSE2, RDPCSTX, id), \ +	SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \ +	SRI(DPCSTX_TX_CLOCK_CNTL, DPCSTX, id), \ +	SRI(DPCSTX_TX_CNTL, DPCSTX, id), \ +	SRI(DPCSTX_DEBUG_CONFIG, DPCSTX, id), \ +	SRI(RDPCSTX_DEBUG_CONFIG, RDPCSTX, id), \ +	SR(RDPCSTX0_RDPCSTX_SCRATCH) + + +#define DPCS_DCN2_REG_LIST(id) \ +	DPCS_DCN2_CMN_REG_LIST(id), \ +	SRI(RDPCSTX_PHY_RX_LD_VAL, RDPCSTX, id),\ +	SRI(RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG, RDPCSTX, id) + +#define LE_DCN2_REG_LIST(id) \ +		LE_DCN10_REG_LIST(id), \ +		SR(DCIO_SOFT_RESET) +  struct mpll_cfg {  	uint32_t mpllb_ana_v2i;  	uint32_t mpllb_ana_freq_vco; @@ -158,9 +336,7 @@ void enc2_fec_set_ready(struct link_encoder *enc, bool ready);  bool enc2_fec_is_active(struct link_encoder *enc);  void enc2_hw_init(struct link_encoder *enc); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  void link_enc2_read_state(struct link_encoder *enc, struct link_enc_state *s); -#endif  void dcn20_link_encoder_enable_dp_output(  	struct link_encoder *enc, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c index 5a188b2bc033..de9c857ab3e9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c @@ -33,6 +33,9 @@  #define REG(reg)\  	mpc20->mpc_regs->reg +#define IND_REG(index) \ +	(index) +  #define CTX \  	mpc20->base.ctx @@ -132,19 +135,33 @@ void mpc2_set_output_csc(  		const uint16_t *regval,  		enum mpc_output_csc_mode ocsc_mode)  { +	uint32_t cur_mode;  	struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);  	struct color_matrices_reg ocsc_regs; -	REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode); - -	if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE) +	if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE) { +		REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);  		return; +	}  	if (regval == NULL) {  		BREAK_TO_DEBUGGER();  		return;  	} +	/* determine which CSC coefficients (A or B) we are using +	 * currently.  select the alternate set to double buffer +	 * the CSC update so CSC is updated on frame boundary +	 */ +	IX_REG_GET(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_DATA, +						MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX, +						MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE, &cur_mode); + +	if (cur_mode != MPC_OUTPUT_CSC_COEF_A) +		ocsc_mode = MPC_OUTPUT_CSC_COEF_A; +	else +		ocsc_mode = MPC_OUTPUT_CSC_COEF_B; +  	ocsc_regs.shifts.csc_c11 = mpc20->mpc_shift->MPC_OCSC_C11_A;  	ocsc_regs.masks.csc_c11  = mpc20->mpc_mask->MPC_OCSC_C11_A;  	ocsc_regs.shifts.csc_c12 = mpc20->mpc_shift->MPC_OCSC_C12_A; @@ -157,10 +174,13 @@ void mpc2_set_output_csc(  		ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_B[opp_id]);  		ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_B[opp_id]);  	} +  	cm_helper_program_color_matrices(  			mpc20->base.ctx,  			regval,  			&ocsc_regs); + +	REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);  }  void mpc2_set_ocsc_default( @@ -169,14 +189,16 @@ void mpc2_set_ocsc_default(  		enum dc_color_space color_space,  		enum mpc_output_csc_mode ocsc_mode)  { +	uint32_t cur_mode;  	struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);  	uint32_t arr_size;  	struct color_matrices_reg ocsc_regs;  	const uint16_t *regval = NULL; -	REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode); -	if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE) +	if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE) { +		REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);  		return; +	}  	regval = find_color_matrix(color_space, &arr_size); @@ -185,6 +207,19 @@ void mpc2_set_ocsc_default(  		return;  	} +	/* determine which CSC coefficients (A or B) we are using +	 * currently.  select the alternate set to double buffer +	 * the CSC update so CSC is updated on frame boundary +	 */ +	IX_REG_GET(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_DATA, +						MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX, +						MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE, &cur_mode); + +	if (cur_mode != MPC_OUTPUT_CSC_COEF_A) +		ocsc_mode = MPC_OUTPUT_CSC_COEF_A; +	else +		ocsc_mode = MPC_OUTPUT_CSC_COEF_B; +  	ocsc_regs.shifts.csc_c11 = mpc20->mpc_shift->MPC_OCSC_C11_A;  	ocsc_regs.masks.csc_c11  = mpc20->mpc_mask->MPC_OCSC_C11_A;  	ocsc_regs.shifts.csc_c12 = mpc20->mpc_shift->MPC_OCSC_C12_A; @@ -203,6 +238,8 @@ void mpc2_set_ocsc_default(  			mpc20->base.ctx,  			regval,  			&ocsc_regs); + +	REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);  }  static void mpc2_ogam_get_reg_field( @@ -345,6 +382,9 @@ static void mpc20_program_ogam_pwl(  	uint32_t i;  	struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); +	PERF_TRACE(); +	REG_SEQ_START(); +  	for (i = 0 ; i < num; i++) {  		REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].red_reg);  		REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].green_reg); @@ -463,6 +503,11 @@ void mpc2_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id)  		ASSERT(!mpc_disabled);  		ASSERT(!mpc_idle);  	} + +	REG_SEQ_SUBMIT(); +	PERF_TRACE(); +	REG_SEQ_WAIT_DONE(); +	PERF_TRACE();  }  static void mpc2_init_mpcc(struct mpcc *mpcc, int mpcc_inst) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h index 9f53192da2dc..c78fd5123497 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h @@ -80,6 +80,10 @@  	SRII(DENORM_CLAMP_G_Y, MPC_OUT, inst),\  	SRII(DENORM_CLAMP_B_CB, MPC_OUT, inst) +#define MPC_DBG_REG_LIST_DCN2_0() \ +	SR(MPC_OCSC_TEST_DEBUG_DATA),\ +	SR(MPC_OCSC_TEST_DEBUG_INDEX) +  #define MPC_REG_VARIABLE_LIST_DCN2_0 \  	MPC_COMMON_REG_VARIABLE_LIST \  	uint32_t MPCC_TOP_GAIN[MAX_MPCC]; \ @@ -118,6 +122,8 @@  	uint32_t MPCC_OGAM_LUT_RAM_CONTROL[MAX_MPCC];\  	uint32_t MPCC_OGAM_LUT_DATA[MAX_MPCC];\  	uint32_t MPCC_OGAM_MODE[MAX_MPCC];\ +	uint32_t MPC_OCSC_TEST_DEBUG_DATA;\ +	uint32_t MPC_OCSC_TEST_DEBUG_INDEX;\  	uint32_t CSC_MODE[MAX_OPP]; \  	uint32_t CSC_C11_C12_A[MAX_OPP]; \  	uint32_t CSC_C33_C34_A[MAX_OPP]; \ @@ -134,6 +140,7 @@  	SF(MPCC0_MPCC_TOP_GAIN, MPCC_TOP_GAIN, mask_sh),\  	SF(MPCC0_MPCC_BOT_GAIN_INSIDE, MPCC_BOT_GAIN_INSIDE, mask_sh),\  	SF(MPCC0_MPCC_BOT_GAIN_OUTSIDE, MPCC_BOT_GAIN_OUTSIDE, mask_sh),\ +	SF(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_INDEX, mask_sh),\  	SF(MPC_OUT0_CSC_MODE, MPC_OCSC_MODE, mask_sh),\  	SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C11_A, mask_sh),\  	SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C12_A, mask_sh),\ @@ -174,6 +181,19 @@  	SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MAX_B_CB, mask_sh),\  	SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh) +/* + *	DCN2 MPC_OCSC debug status register: + * + *		Status index including current OCSC Mode is 1 + *			OCSC Mode: [1..0] + */ +#define MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX 1 + +#define MPC_DEBUG_REG_LIST_SH_DCN20 \ +	.MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE = 0 + +#define MPC_DEBUG_REG_LIST_MASK_DCN20 \ +	.MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE = 0x3  #define MPC_REG_FIELD_LIST_DCN2_0(type) \  	MPC_REG_FIELD_LIST(type)\ @@ -182,6 +202,8 @@  	type MPCC_TOP_GAIN;\  	type MPCC_BOT_GAIN_INSIDE;\  	type MPCC_BOT_GAIN_OUTSIDE;\ +	type MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE;\ +	type MPC_OCSC_TEST_DEBUG_INDEX;\  	type MPC_OCSC_MODE;\  	type MPC_OCSC_C11_A;\  	type MPC_OCSC_C12_A;\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c index 40164ed015ea..023cc71fad0f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c @@ -41,6 +41,7 @@  void opp2_set_disp_pattern_generator(  		struct output_pixel_processor *opp,  		enum controller_dp_test_pattern test_pattern, +		enum controller_dp_color_space color_space,  		enum dc_color_depth color_depth,  		const struct tg_color *solid_color,  		int width, @@ -100,9 +101,22 @@ void opp2_set_disp_pattern_generator(  				TEST_PATTERN_DYN_RANGE_CEA :  				TEST_PATTERN_DYN_RANGE_VESA); +		switch (color_space) { +		case CONTROLLER_DP_COLOR_SPACE_YCBCR601: +			mode = TEST_PATTERN_MODE_COLORSQUARES_YCBCR601; +		break; +		case CONTROLLER_DP_COLOR_SPACE_YCBCR709: +			mode = TEST_PATTERN_MODE_COLORSQUARES_YCBCR709; +		break; +		case CONTROLLER_DP_COLOR_SPACE_RGB: +		default: +			mode = TEST_PATTERN_MODE_COLORSQUARES_RGB; +		break; +		} +  		REG_UPDATE_6(DPG_CONTROL,  			DPG_EN, 1, -			DPG_MODE, TEST_PATTERN_MODE_COLORSQUARES_RGB, +			DPG_MODE, mode,  			DPG_DYNAMIC_RANGE, dyn_range,  			DPG_BIT_DEPTH, bit_depth,  			DPG_VRES, 6, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h index abd8de9a78f8..4093bec172c1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h @@ -140,6 +140,7 @@ void dcn20_opp_construct(struct dcn20_opp *oppn20,  void opp2_set_disp_pattern_generator(  	struct output_pixel_processor *opp,  	enum controller_dp_test_pattern test_pattern, +	enum controller_dp_color_space color_space,  	enum dc_color_depth color_depth,  	const struct tg_color *solid_color,  	int width, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c index 3b613fb93ef8..d875b0c38fde 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c @@ -59,11 +59,16 @@ bool optc2_enable_crtc(struct timing_generator *optc)  	REG_UPDATE(CONTROL,  			VTG0_ENABLE, 1); +	REG_SEQ_START(); +  	/* Enable CRTC */  	REG_UPDATE_2(OTG_CONTROL,  			OTG_DISABLE_POINT_CNTL, 3,  			OTG_MASTER_EN, 1); +	REG_SEQ_SUBMIT(); +	REG_SEQ_WAIT_DONE(); +  	return true;  } @@ -167,7 +172,6 @@ void optc2_set_gsl_source_select(  	}  } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  /* DSC encoder frame start controls: x = h position, line_num = # of lines from vstartup */  void optc2_set_dsc_encoder_frame_start(struct timing_generator *optc,  					int x_position, @@ -201,13 +205,12 @@ void optc2_set_dsc_config(struct timing_generator *optc,  	REG_UPDATE(OPTC_WIDTH_CONTROL,  		OPTC_DSC_SLICE_WIDTH, dsc_slice_width);  } -#endif -/** - * PTI i think is already done somewhere else for 2ka - * (opp?, please double check. - * OPTC side only has 1 register to set for PTI_ENABLE) - */ +/*TEMP: Need to figure out inheritance model here.*/ +bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing) +{ +	return optc1_is_two_pixels_per_containter(timing); +}  void optc2_set_odm_bypass(struct timing_generator *optc,  		const struct dc_crtc_timing *dc_crtc_timing) @@ -221,7 +224,7 @@ void optc2_set_odm_bypass(struct timing_generator *optc,  			OPTC_SEG1_SRC_SEL, 0xf);  	REG_WRITE(OTG_H_TIMING_CNTL, 0); -	h_div_2 = optc1_is_two_pixels_per_containter(dc_crtc_timing); +	h_div_2 = optc2_is_two_pixels_per_containter(dc_crtc_timing);  	REG_UPDATE(OTG_H_TIMING_CNTL,  			OTG_H_TIMING_DIV_BY2, h_div_2);  	REG_SET(OPTC_MEMORY_CONFIG, 0, @@ -233,12 +236,13 @@ void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c  		struct dc_crtc_timing *timing)  {  	struct optc *optc1 = DCN10TG_FROM_TG(optc); -	/* 2 pieces of memory required for up to 5120 displays, 4 for up to 8192 */  	int mpcc_hactive = (timing->h_addressable + timing->h_border_left + timing->h_border_right)  			/ opp_cnt; -	int memory_mask = mpcc_hactive <= 2560 ? 0x3 : 0xf; +	uint32_t memory_mask;  	uint32_t data_fmt = 0; +	ASSERT(opp_cnt == 2); +  	/* TODO: In pseudocode but does not affect maximus, delete comment if we dont need on asic  	 * REG_SET(OTG_GLOBAL_CONTROL2, 0, GLOBAL_UPDATE_LOCK_EN, 1);  	 * Program OTG register MASTER_UPDATE_LOCK_DB_X/Y to the position before DP frame start @@ -246,9 +250,17 @@ void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c  	 *		MASTER_UPDATE_LOCK_DB_X, 160,  	 *		MASTER_UPDATE_LOCK_DB_Y, 240);  	 */ + +	/* 2 pieces of memory required for up to 5120 displays, 4 for up to 8192, +	 * however, for ODM combine we can simplify by always using 4. +	 * To make sure there's no overlap, each instance "reserves" 2 memories and +	 * they are uniquely combined here. +	 */ +	memory_mask = 0x3 << (opp_id[0] * 2) | 0x3 << (opp_id[1] * 2); +  	if (REG(OPTC_MEMORY_CONFIG))  		REG_SET(OPTC_MEMORY_CONFIG, 0, -			OPTC_MEM_SEL, memory_mask << (optc->inst * 4)); +			OPTC_MEM_SEL, memory_mask);  	if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)  		data_fmt = 1; @@ -257,7 +269,6 @@ void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c  	REG_UPDATE(OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, data_fmt); -	ASSERT(opp_cnt == 2);  	REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0,  			OPTC_NUM_OF_INPUT_SEGMENT, 1,  			OPTC_SEG0_SRC_SEL, opp_id[0], @@ -379,14 +390,8 @@ void optc2_setup_manual_trigger(struct timing_generator *optc)  {  	struct optc *optc1 = DCN10TG_FROM_TG(optc); -	REG_SET(OTG_MANUAL_FLOW_CONTROL, 0, -			MANUAL_FLOW_CONTROL, 1); - -	REG_SET(OTG_GLOBAL_CONTROL2, 0, -			MANUAL_FLOW_CONTROL_SEL, optc->inst); -  	REG_SET_8(OTG_TRIGA_CNTL, 0, -			OTG_TRIGA_SOURCE_SELECT, 22, +			OTG_TRIGA_SOURCE_SELECT, 21,  			OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst,  			OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1,  			OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 0, @@ -448,9 +453,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = {  		.setup_global_swap_lock = NULL,  		.get_crc = optc1_get_crc,  		.configure_crc = optc1_configure_crc, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  		.set_dsc_config = optc2_set_dsc_config, -#endif  		.set_dwb_source = optc2_set_dwb_source,  		.set_odm_bypass = optc2_set_odm_bypass,  		.set_odm_combine = optc2_set_odm_combine, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h index 32a58431fd09..239cc40ae474 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h @@ -86,12 +86,10 @@ void optc2_set_gsl_source_select(struct timing_generator *optc,  		int group_idx,  		uint32_t gsl_ready_signal); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  void optc2_set_dsc_config(struct timing_generator *optc,  					enum optc_dsc_mode dsc_mode,  					uint32_t dsc_bytes_per_pixel,  					uint32_t dsc_slice_width); -#endif  void optc2_set_odm_bypass(struct timing_generator *optc,  		const struct dc_crtc_timing *dc_crtc_timing); @@ -108,6 +106,7 @@ void optc2_triplebuffer_lock(struct timing_generator *optc);  void optc2_triplebuffer_unlock(struct timing_generator *optc);  void optc2_lock_doublebuffer_disable(struct timing_generator *optc);  void optc2_lock_doublebuffer_enable(struct timing_generator *optc); +void optc2_setup_manual_trigger(struct timing_generator *optc);  void optc2_program_manual_trigger(struct timing_generator *optc); - +bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing);  #endif /* __DC_OPTC_DCN20_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 09793336d84f..85f90f3e24cb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -1,5 +1,6 @@  /*  * Copyright 2016 Advanced Micro Devices, Inc. + * Copyright 2019 Raptor Engineering, LLC   *   * Permission is hereby granted, free of charge, to any person obtaining a   * copy of this software and associated documentation files (the "Software"), @@ -28,6 +29,8 @@  #include "dm_services.h"  #include "dc.h" +#include "dcn20_init.h" +  #include "resource.h"  #include "include/irq_service_interface.h"  #include "dcn20/dcn20_resource.h" @@ -45,9 +48,7 @@  #include "dcn10/dcn10_resource.h"  #include "dcn20_opp.h" -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  #include "dcn20_dsc.h" -#endif  #include "dcn20_link_encoder.h"  #include "dcn20_stream_encoder.h" @@ -59,11 +60,14 @@  #include "dml/display_mode_vba.h"  #include "dcn20_dccg.h"  #include "dcn20_vmid.h" +#include "dc_link_ddc.h"  #include "navi10_ip_offset.h"  #include "dcn/dcn_2_0_0_offset.h"  #include "dcn/dcn_2_0_0_sh_mask.h" +#include "dpcs/dpcs_2_0_0_offset.h" +#include "dpcs/dpcs_2_0_0_sh_mask.h"  #include "nbio/nbio_2_3_offset.h" @@ -82,8 +86,6 @@  #include "amdgpu_socbb.h" -/* NV12 SOC BB is currently in FW, mark SW bounding box invalid. */ -#define SOC_BOUNDING_BOX_VALID false  #define DC_LOGGER_INIT(logger)  struct _vcs_dpi_ip_params_st dcn2_0_ip = { @@ -94,11 +96,7 @@ struct _vcs_dpi_ip_params_st dcn2_0_ip = {  	.hostvm_max_page_table_levels = 4,  	.hostvm_cached_page_table_levels = 0,  	.pte_group_size_bytes = 2048, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	.num_dsc = 6, -#else -	.num_dsc = 0, -#endif  	.rob_buffer_size_kbytes = 168,  	.det_buffer_size_kbytes = 164,  	.dpte_buffer_size_in_pte_reqs_luma = 84, @@ -553,6 +551,7 @@ static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {  [id] = {\  	LE_DCN10_REG_LIST(id), \  	UNIPHY_DCN2_REG_LIST(phyid), \ +	DPCS_DCN2_REG_LIST(id), \  	SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \  } @@ -566,11 +565,13 @@ static const struct dcn10_link_enc_registers link_enc_regs[] = {  };  static const struct dcn10_link_enc_shift le_shift = { -	LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT) +	LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT),\ +	DPCS_DCN2_MASK_SH_LIST(__SHIFT)  };  static const struct dcn10_link_enc_mask le_mask = { -	LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK) +	LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK),\ +	DPCS_DCN2_MASK_SH_LIST(_MASK)  };  #define ipp_regs(id)\ @@ -637,6 +638,7 @@ static const struct dce110_aux_registers aux_engine_regs[] = {  #define tf_regs(id)\  [id] = {\  	TF_REG_LIST_DCN20(id),\ +	TF_REG_LIST_DCN20_COMMON_APPEND(id),\  }  static const struct dcn2_dpp_registers tf_regs[] = { @@ -650,12 +652,12 @@ static const struct dcn2_dpp_registers tf_regs[] = {  static const struct dcn2_dpp_shift tf_shift = {  		TF_REG_LIST_SH_MASK_DCN20(__SHIFT), -		TF_DEBUG_REG_LIST_SH_DCN10 +		TF_DEBUG_REG_LIST_SH_DCN20  };  static const struct dcn2_dpp_mask tf_mask = {  		TF_REG_LIST_SH_MASK_DCN20(_MASK), -		TF_DEBUG_REG_LIST_MASK_DCN10 +		TF_DEBUG_REG_LIST_MASK_DCN20  };  #define dwbc_regs_dcn2(id)\ @@ -705,14 +707,17 @@ static const struct dcn20_mpc_registers mpc_regs = {  		MPC_OUT_MUX_REG_LIST_DCN2_0(3),  		MPC_OUT_MUX_REG_LIST_DCN2_0(4),  		MPC_OUT_MUX_REG_LIST_DCN2_0(5), +		MPC_DBG_REG_LIST_DCN2_0()  };  static const struct dcn20_mpc_shift mpc_shift = { -	MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) +	MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT), +	MPC_DEBUG_REG_LIST_SH_DCN20  };  static const struct dcn20_mpc_mask mpc_mask = { -	MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK) +	MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK), +	MPC_DEBUG_REG_LIST_MASK_DCN20  };  #define tg_regs(id)\ @@ -838,7 +843,6 @@ static int map_transmitter_id_to_phy_instance(  	}  } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  #define dsc_regsDCN20(id)\  [id] = {\  	DSC_REG_LIST_DCN20(id)\ @@ -860,7 +864,6 @@ static const struct dcn20_dsc_shift dsc_shift = {  static const struct dcn20_dsc_mask dsc_mask = {  	DSC_REG_LIST_SH_MASK_DCN20(_MASK)  }; -#endif  static const struct dccg_registers dccg_regs = {  		DCCG_REG_LIST_DCN2() @@ -884,9 +887,7 @@ static const struct resource_caps res_cap_nv10 = {  		.num_dwb = 1,  		.num_ddc = 6,  		.num_vmid = 16, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  		.num_dsc = 6, -#endif  };  static const struct dc_plane_cap plane_cap = { @@ -1282,7 +1283,6 @@ void dcn20_clock_source_destroy(struct clock_source **clk_src)  	*clk_src = NULL;  } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  struct display_stream_compressor *dcn20_dsc_create(  	struct dc_context *ctx, uint32_t inst) @@ -1305,9 +1305,8 @@ void dcn20_dsc_destroy(struct display_stream_compressor **dsc)  	*dsc = NULL;  } -#endif -static void destruct(struct dcn20_resource_pool *pool) +static void dcn20_resource_destruct(struct dcn20_resource_pool *pool)  {  	unsigned int i; @@ -1318,12 +1317,10 @@ static void destruct(struct dcn20_resource_pool *pool)  		}  	} -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	for (i = 0; i < pool->base.res_cap->num_dsc; i++) {  		if (pool->base.dscs[i] != NULL)  			dcn20_dsc_destroy(&pool->base.dscs[i]);  	} -#endif  	if (pool->base.mpc != NULL) {  		kfree(TO_DCN20_MPC(pool->base.mpc)); @@ -1416,6 +1413,8 @@ static void destruct(struct dcn20_resource_pool *pool)  	if (pool->base.pp_smu != NULL)  		dcn20_pp_smu_destroy(&pool->base.pp_smu); +	if (pool->base.oem_device != NULL) +		dal_ddc_service_destroy(&pool->base.oem_device);  }  struct hubp *dcn20_hubp_create( @@ -1466,7 +1465,7 @@ static void get_pixel_clock_parameters(  	if (opp_cnt == 4)  		pixel_clk_params->requested_pix_clk_100hz /= 4; -	else if (optc1_is_two_pixels_per_containter(&stream->timing) || opp_cnt == 2) +	else if (optc2_is_two_pixels_per_containter(&stream->timing) || opp_cnt == 2)  		pixel_clk_params->requested_pix_clk_100hz /= 2;  	if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) @@ -1532,17 +1531,23 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state  	return status;  } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  static void acquire_dsc(struct resource_context *res_ctx,  			const struct resource_pool *pool, -			struct display_stream_compressor **dsc) +			struct display_stream_compressor **dsc, +			int pipe_idx)  {  	int i;  	ASSERT(*dsc == NULL);  	*dsc = NULL; +	if (pool->res_cap->num_dsc == pool->res_cap->num_opp) { +		*dsc = pool->dscs[pipe_idx]; +		res_ctx->is_dsc_acquired[pipe_idx] = true; +		return; +	} +  	/* Find first free DSC */  	for (i = 0; i < pool->res_cap->num_dsc; i++)  		if (!res_ctx->is_dsc_acquired[i]) { @@ -1566,11 +1571,9 @@ static void release_dsc(struct resource_context *res_ctx,  		}  } -#endif -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT -static enum dc_status add_dsc_to_stream_resource(struct dc *dc, +enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc,  		struct dc_state *dc_ctx,  		struct dc_stream_state *dc_stream)  { @@ -1585,11 +1588,13 @@ static enum dc_status add_dsc_to_stream_resource(struct dc *dc,  		if (pipe_ctx->stream != dc_stream)  			continue; -		acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc); +		if (pipe_ctx->stream_res.dsc) +			continue; + +		acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc, i);  		/* The number of DSCs can be less than the number of pipes */  		if (!pipe_ctx->stream_res.dsc) { -			dm_output_to_console("No DSCs available\n");  			result = DC_NO_DSC_RESOURCE;  		} @@ -1621,7 +1626,6 @@ static enum dc_status remove_dsc_from_stream_resource(struct dc *dc,  	else  		return DC_OK;  } -#endif  enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream) @@ -1633,11 +1637,9 @@ enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx,  	if (result == DC_OK)  		result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	/* Get a DSC if required and available */  	if (result == DC_OK && dc_stream->timing.flags.DSC) -		result = add_dsc_to_stream_resource(dc, new_ctx, dc_stream); -#endif +		result = dcn20_add_dsc_to_stream_resource(dc, new_ctx, dc_stream);  	if (result == DC_OK)  		result = dcn20_build_mapped_resource(dc, new_ctx, dc_stream); @@ -1650,9 +1652,7 @@ enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_  {  	enum dc_status result = DC_OK; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	result = remove_dsc_from_stream_resource(dc, new_ctx, dc_stream); -#endif  	return result;  } @@ -1735,9 +1735,7 @@ bool dcn20_split_stream_for_odm(  	next_odm_pipe->plane_res.xfm = pool->transforms[next_odm_pipe->pipe_idx];  	next_odm_pipe->plane_res.dpp = pool->dpps[next_odm_pipe->pipe_idx];  	next_odm_pipe->plane_res.mpcc_inst = pool->dpps[next_odm_pipe->pipe_idx]->inst; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	next_odm_pipe->stream_res.dsc = NULL; -#endif  	if (prev_odm_pipe->next_odm_pipe && prev_odm_pipe->next_odm_pipe != next_odm_pipe) {  		next_odm_pipe->next_odm_pipe = prev_odm_pipe->next_odm_pipe;  		next_odm_pipe->next_odm_pipe->prev_odm_pipe = next_odm_pipe; @@ -1783,14 +1781,12 @@ bool dcn20_split_stream_for_odm(  		sd->recout.x = 0;  	}  	next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx]; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	if (next_odm_pipe->stream->timing.flags.DSC == 1) { -		acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc); +		acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx);  		ASSERT(next_odm_pipe->stream_res.dsc);  		if (next_odm_pipe->stream_res.dsc == NULL)  			return false;  	} -#endif  	return true;  } @@ -1814,9 +1810,7 @@ void dcn20_split_stream_for_mpc(  	secondary_pipe->plane_res.xfm = pool->transforms[secondary_pipe->pipe_idx];  	secondary_pipe->plane_res.dpp = pool->dpps[secondary_pipe->pipe_idx];  	secondary_pipe->plane_res.mpcc_inst = pool->dpps[secondary_pipe->pipe_idx]->inst; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	secondary_pipe->stream_res.dsc = NULL; -#endif  	if (primary_pipe->bottom_pipe && primary_pipe->bottom_pipe != secondary_pipe) {  		ASSERT(!secondary_pipe->bottom_pipe);  		secondary_pipe->bottom_pipe = primary_pipe->bottom_pipe; @@ -1867,11 +1861,28 @@ void dcn20_populate_dml_writeback_from_context(  } +static int get_num_odm_heads(struct pipe_ctx *pipe) +{ +	int odm_head_count = 0; +	struct pipe_ctx *next_pipe = pipe->next_odm_pipe; +	while (next_pipe) { +		odm_head_count++; +		next_pipe = next_pipe->next_odm_pipe; +	} +	pipe = pipe->prev_odm_pipe; +	while (pipe) { +		odm_head_count++; +		pipe = pipe->prev_odm_pipe; +	} +	return odm_head_count ? odm_head_count + 1 : 0; +} +  int dcn20_populate_dml_pipes_from_context( -		struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes) +		struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes)  {  	int pipe_cnt, i;  	bool synchronized_vblank = true; +	struct resource_context *res_ctx = &context->res_ctx;  	for (i = 0, pipe_cnt = -1; i < dc->res_pool->pipe_count; i++) {  		if (!res_ctx->pipe_ctx[i].stream) @@ -1891,25 +1902,30 @@ int dcn20_populate_dml_pipes_from_context(  	for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {  		struct dc_crtc_timing *timing = &res_ctx->pipe_ctx[i].stream->timing; +		unsigned int v_total; +		unsigned int front_porch;  		int output_bpc;  		if (!res_ctx->pipe_ctx[i].stream)  			continue; + +		v_total = timing->v_total; +		front_porch = timing->v_front_porch;  		/* todo:  		pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = 0;  		pipes[pipe_cnt].pipe.src.dcc = 0;  		pipes[pipe_cnt].pipe.src.vm = 0;*/ -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT +		pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0; +  		pipes[pipe_cnt].dout.dsc_enable = res_ctx->pipe_ctx[i].stream->timing.flags.DSC;  		/* todo: rotation?*/  		pipes[pipe_cnt].dout.dsc_slices = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.num_slices_h; -#endif  		if (res_ctx->pipe_ctx[i].stream->use_dynamic_meta) {  			pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = true;  			/* 1/2 vblank */  			pipes[pipe_cnt].pipe.src.dynamic_metadata_lines_before_active = -				(timing->v_total - timing->v_addressable +				(v_total - timing->v_addressable  					- timing->v_border_top - timing->v_border_bottom) / 2;  			/* 36 bytes dp, 32 hdmi */  			pipes[pipe_cnt].pipe.src.dynamic_metadata_xmit_bytes = @@ -1923,13 +1939,13 @@ int dcn20_populate_dml_pipes_from_context(  				- timing->h_addressable  				- timing->h_border_left  				- timing->h_border_right; -		pipes[pipe_cnt].pipe.dest.vblank_start = timing->v_total - timing->v_front_porch; +		pipes[pipe_cnt].pipe.dest.vblank_start = v_total - front_porch;  		pipes[pipe_cnt].pipe.dest.vblank_end = pipes[pipe_cnt].pipe.dest.vblank_start  				- timing->v_addressable  				- timing->v_border_top  				- timing->v_border_bottom;  		pipes[pipe_cnt].pipe.dest.htotal = timing->h_total; -		pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total; +		pipes[pipe_cnt].pipe.dest.vtotal = v_total;  		pipes[pipe_cnt].pipe.dest.hactive = timing->h_addressable;  		pipes[pipe_cnt].pipe.dest.vactive = timing->v_addressable;  		pipes[pipe_cnt].pipe.dest.interlaced = timing->flags.INTERLACE; @@ -1940,8 +1956,13 @@ int dcn20_populate_dml_pipes_from_context(  		pipes[pipe_cnt].dout.dp_lanes = 4;  		pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min;  		pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max; -		pipes[pipe_cnt].pipe.dest.odm_combine = res_ctx->pipe_ctx[i].prev_odm_pipe -							|| res_ctx->pipe_ctx[i].next_odm_pipe; +		switch (get_num_odm_heads(&res_ctx->pipe_ctx[i])) { +		case 2: +			pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_2to1; +			break; +		default: +			pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_disabled; +		}  		pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].pipe_idx;  		if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state  				== res_ctx->pipe_ctx[i].plane_state) @@ -1992,14 +2013,12 @@ int dcn20_populate_dml_pipes_from_context(  		case COLOR_DEPTH_161616:  			output_bpc = 16;  			break; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  		case COLOR_DEPTH_999:  			output_bpc = 9;  			break;  		case COLOR_DEPTH_111111:  			output_bpc = 11;  			break; -#endif  		default:  			output_bpc = 8;  			break; @@ -2027,10 +2046,8 @@ int dcn20_populate_dml_pipes_from_context(  			pipes[pipe_cnt].dout.output_bpp = output_bpc * 3;  		} -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  		if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC)  			pipes[pipe_cnt].dout.output_bpp = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0; -#endif  		/* todo: default max for now, until there is logic reflecting this in dc*/  		pipes[pipe_cnt].dout.output_bpc = 12; @@ -2054,6 +2071,10 @@ int dcn20_populate_dml_pipes_from_context(  			pipes[pipe_cnt].pipe.src.viewport_height = timing->v_addressable;  			if (pipes[pipe_cnt].pipe.src.viewport_height > 1080)  				pipes[pipe_cnt].pipe.src.viewport_height = 1080; +			pipes[pipe_cnt].pipe.src.surface_height_y = pipes[pipe_cnt].pipe.src.viewport_height; +			pipes[pipe_cnt].pipe.src.surface_width_y = pipes[pipe_cnt].pipe.src.viewport_width; +			pipes[pipe_cnt].pipe.src.surface_height_c = pipes[pipe_cnt].pipe.src.viewport_height; +			pipes[pipe_cnt].pipe.src.surface_width_c = pipes[pipe_cnt].pipe.src.viewport_width;  			pipes[pipe_cnt].pipe.src.data_pitch = ((pipes[pipe_cnt].pipe.src.viewport_width + 63) / 64) * 64; /* linear sw only */  			pipes[pipe_cnt].pipe.src.source_format = dm_444_32;  			pipes[pipe_cnt].pipe.dest.recout_width = pipes[pipe_cnt].pipe.src.viewport_width; /*vp_width/hratio*/ @@ -2068,8 +2089,8 @@ int dcn20_populate_dml_pipes_from_context(  			pipes[pipe_cnt].pipe.scale_taps.vtaps = 1;  			pipes[pipe_cnt].pipe.src.is_hsplit = 0;  			pipes[pipe_cnt].pipe.dest.odm_combine = 0; -			pipes[pipe_cnt].pipe.dest.vtotal_min = timing->v_total; -			pipes[pipe_cnt].pipe.dest.vtotal_max = timing->v_total; +			pipes[pipe_cnt].pipe.dest.vtotal_min = v_total; +			pipes[pipe_cnt].pipe.dest.vtotal_max = v_total;  		} else {  			struct dc_plane_state *pln = res_ctx->pipe_ctx[i].plane_state;  			struct scaler_data *scl = &res_ctx->pipe_ctx[i].plane_res.scl_data; @@ -2087,6 +2108,10 @@ int dcn20_populate_dml_pipes_from_context(  			pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c.width;  			pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport.height;  			pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c.height; +			pipes[pipe_cnt].pipe.src.surface_width_y = pln->plane_size.surface_size.width; +			pipes[pipe_cnt].pipe.src.surface_height_y = pln->plane_size.surface_size.height; +			pipes[pipe_cnt].pipe.src.surface_width_c = pln->plane_size.chroma_size.width; +			pipes[pipe_cnt].pipe.src.surface_height_c = pln->plane_size.chroma_size.height;  			if (pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {  				pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.surface_pitch;  				pipes[pipe_cnt].pipe.src.data_pitch_c = pln->plane_size.chroma_pitch; @@ -2252,7 +2277,6 @@ void dcn20_set_mcif_arb_params(  	}  } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)  {  	int i; @@ -2286,7 +2310,6 @@ bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)  	}  	return true;  } -#endif  struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,  		struct resource_context *res_ctx, @@ -2389,10 +2412,8 @@ void dcn20_merge_pipes_for_validate(  			odm_pipe->bottom_pipe = NULL;  			odm_pipe->prev_odm_pipe = NULL;  			odm_pipe->next_odm_pipe = NULL; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  			if (odm_pipe->stream_res.dsc)  				release_dsc(&context->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc); -#endif  			/* Clear plane_res and stream_res */  			memset(&odm_pipe->plane_res, 0, sizeof(odm_pipe->plane_res));  			memset(&odm_pipe->stream_res, 0, sizeof(odm_pipe->stream_res)); @@ -2504,7 +2525,7 @@ int dcn20_validate_apply_pipe_split_flags(  			split[i] = true;  		if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {  			split[i] = true; -			context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = true; +			context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = dm_odm_combine_mode_2to1;  		}  		context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] =  			context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx]; @@ -2535,7 +2556,7 @@ bool dcn20_fast_validate_bw(  	dcn20_merge_pipes_for_validate(dc, context); -	pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, &context->res_ctx, pipes); +	pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes);  	*pipe_cnt_out = pipe_cnt; @@ -2612,14 +2633,12 @@ bool dcn20_fast_validate_bw(  			ASSERT(0);  		}  	} -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	/* Actual dsc count per stream dsc validation*/  	if (!dcn20_validate_dsc(dc, context)) {  		context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] =  				DML_FAIL_DSC_VALIDATION_FAILURE;  		goto validate_fail;  	} -#endif  	*vlevel_out = vlevel; @@ -2683,10 +2702,10 @@ static void dcn20_calculate_wm(  	if (pipe_cnt != pipe_idx) {  		if (dc->res_pool->funcs->populate_dml_pipes)  			pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, -				&context->res_ctx, pipes); +				context, pipes);  		else  			pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, -				&context->res_ctx, pipes); +				context, pipes);  	}  	*out_pipe_cnt = pipe_cnt; @@ -2706,11 +2725,9 @@ static void dcn20_calculate_wm(  	context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;  	context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;  	context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  	context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;  	context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;  	context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; -#endif  	if (vlevel < 2) {  		pipes[0].clks_cfg.voltage = 2; @@ -2722,10 +2739,8 @@ static void dcn20_calculate_wm(  	context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;  	context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;  	context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  	context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;  	context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; -#endif  	if (vlevel < 3) {  		pipes[0].clks_cfg.voltage = 3; @@ -2737,10 +2752,8 @@ static void dcn20_calculate_wm(  	context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;  	context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;  	context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  	context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;  	context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; -#endif  	pipes[0].clks_cfg.voltage = vlevel;  	pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz; @@ -2750,10 +2763,8 @@ static void dcn20_calculate_wm(  	context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;  	context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;  	context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  	context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;  	context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; -#endif  }  void dcn20_calculate_dlg_params( @@ -2919,11 +2930,19 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,  	bool voltage_supported = false;  	bool full_pstate_supported = false;  	bool dummy_pstate_supported = false; -	double p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us; +	double p_state_latency_us; + +	DC_FP_START(); +	p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us; +	context->bw_ctx.dml.soc.disable_dram_clock_change_vactive_support = +		dc->debug.disable_dram_clock_change_vactive_support; -	if (fast_validate) -		return dcn20_validate_bandwidth_internal(dc, context, true); +	if (fast_validate) { +		voltage_supported = dcn20_validate_bandwidth_internal(dc, context, true); +		DC_FP_END(); +		return voltage_supported; +	}  	// Best case, we support full UCLK switch latency  	voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false); @@ -2931,7 +2950,7 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,  	if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 ||  		(voltage_supported && full_pstate_supported)) { -		context->bw_ctx.bw.dcn.clk.p_state_change_support = true; +		context->bw_ctx.bw.dcn.clk.p_state_change_support = full_pstate_supported;  		goto restore_dml_state;  	} @@ -2952,6 +2971,7 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,  restore_dml_state:  	context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us; +	DC_FP_END();  	return voltage_supported;  } @@ -2996,7 +3016,7 @@ static void dcn20_destroy_resource_pool(struct resource_pool **pool)  {  	struct dcn20_resource_pool *dcn20_pool = TO_DCN20_RES_POOL(*pool); -	destruct(dcn20_pool); +	dcn20_resource_destruct(dcn20_pool);  	kfree(dcn20_pool);  	*pool = NULL;  } @@ -3243,7 +3263,6 @@ void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s  void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb)  { -	kernel_fpu_begin();  	if ((int)(bb->sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns  			&& dc->bb_overrides.sr_exit_time_ns) {  		bb->sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0; @@ -3267,7 +3286,6 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st  		bb->dram_clock_change_latency_us =  				dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;  	} -	kernel_fpu_end();  }  static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb( @@ -3309,12 +3327,13 @@ static bool init_soc_bounding_box(struct dc *dc,  	DC_LOGGER_INIT(dc->ctx->logger); -	if (!bb && !SOC_BOUNDING_BOX_VALID) { +	/* TODO: upstream NV12 bounding box when its launched */ +	if (!bb && ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) {  		DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__);  		return false;  	} -	if (bb && !SOC_BOUNDING_BOX_VALID) { +	if (bb && ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) {  		int i;  		dcn2_0_nv12_soc.sr_exit_time_us = @@ -3456,7 +3475,7 @@ static bool init_soc_bounding_box(struct dc *dc,  	return true;  } -static bool construct( +static bool dcn20_resource_construct(  	uint8_t num_virtual_links,  	struct dc *dc,  	struct dcn20_resource_pool *pool) @@ -3464,6 +3483,7 @@ static bool construct(  	int i;  	struct dc_context *ctx = dc->ctx;  	struct irq_service_init_data init_data; +	struct ddc_service_init_data ddc_init_data;  	struct _vcs_dpi_soc_bounding_box_st *loaded_bb =  			get_asic_rev_soc_bb(ctx->asic_id.hw_internal_rev);  	struct _vcs_dpi_ip_params_st *loaded_ip = @@ -3471,6 +3491,8 @@ static bool construct(  	enum dml_project dml_project_version =  			get_dml_project_version(ctx->asic_id.hw_internal_rev); +	DC_FP_START(); +  	ctx->dc_bios->regs = &bios_regs;  	pool->base.funcs = &dcn20_res_pool_funcs; @@ -3723,7 +3745,6 @@ static bool construct(  		goto create_fail;  	} -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	for (i = 0; i < pool->base.res_cap->num_dsc; i++) {  		pool->base.dscs[i] = dcn20_dsc_create(ctx, i);  		if (pool->base.dscs[i] == NULL) { @@ -3732,7 +3753,6 @@ static bool construct(  			goto create_fail;  		}  	} -#endif  	if (!dcn20_dwbc_create(ctx, &pool->base)) {  		BREAK_TO_DEBUGGER(); @@ -3759,11 +3779,24 @@ static bool construct(  	dc->cap_funcs = cap_funcs; +	if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { +		ddc_init_data.ctx = dc->ctx; +		ddc_init_data.link = NULL; +		ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; +		ddc_init_data.id.enum_id = 0; +		ddc_init_data.id.type = OBJECT_TYPE_GENERIC; +		pool->base.oem_device = dal_ddc_service_create(&ddc_init_data); +	} else { +		pool->base.oem_device = NULL; +	} + +	DC_FP_END();  	return true;  create_fail: -	destruct(pool); +	DC_FP_END(); +	dcn20_resource_destruct(pool);  	return false;  } @@ -3778,7 +3811,7 @@ struct resource_pool *dcn20_create_resource_pool(  	if (!pool)  		return NULL; -	if (construct(init_data->num_virtual_links, dc, pool)) +	if (dcn20_resource_construct(init_data->num_virtual_links, dc, pool))  		return &pool->base;  	BREAK_TO_DEBUGGER(); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h index fef473d68a4a..f5893840b79b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h @@ -50,7 +50,7 @@ unsigned int dcn20_calc_max_scaled_time(  		enum mmhubbub_wbif_mode mode,  		unsigned int urgent_watermark);  int dcn20_populate_dml_pipes_from_context( -		struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes); +		struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes);  struct pipe_ctx *dcn20_acquire_idle_pipe_for_layer(  		struct dc_state *state,  		const struct resource_pool *pool, @@ -127,9 +127,7 @@ int dcn20_validate_apply_pipe_split_flags(  		struct dc_state *context,  		int vlevel,  		bool *split); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx); -#endif  void dcn20_split_stream_for_mpc(  		struct resource_context *res_ctx,  		const struct resource_pool *pool, @@ -159,6 +157,7 @@ void dcn20_calculate_dlg_params(  enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream);  enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream); +enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, struct dc_state *dc_ctx, struct dc_stream_state *dc_stream);  enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);  enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c index 4b3401616434..9b70a1e7b962 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c @@ -205,7 +205,6 @@ static void enc2_stream_encoder_stop_hdmi_info_packets(  		HDMI_GENERIC7_LINE, 0);  } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  /* Update GSP7 SDP 128 byte long */  static void enc2_update_gsp7_128_info_packet( @@ -360,7 +359,6 @@ static void enc2_read_state(struct stream_encoder *enc, struct enc_state *s)  		REG_GET(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, &s->sec_stream_enable);  	}  } -#endif  /* Set Dynamic Metadata-configuration.   *   enable_dme:         TRUE: enables Dynamic Metadata Enfine, FALSE: disables DME @@ -440,10 +438,8 @@ static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing)  {  	bool two_pix = timing->pixel_encoding == PIXEL_ENCODING_YCBCR420; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	two_pix = two_pix || (timing->flags.DSC && timing->pixel_encoding == PIXEL_ENCODING_YCBCR422  			&& !timing->dsc_cfg.ycbcr422_simple); -#endif  	return two_pix;  } @@ -492,15 +488,23 @@ void enc2_stream_encoder_dp_unblank(  				DP_VID_N_MUL, n_multiply);  	} -	/* set DIG_START to 0x1 to reset FIFO */ +	/* make sure stream is disabled before resetting steer fifo */ +	REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, false); +	REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, 0, 10, 5000); +	/* set DIG_START to 0x1 to reset FIFO */  	REG_UPDATE(DIG_FE_CNTL, DIG_START, 1); +	udelay(1);  	/* write 0 to take the FIFO out of reset */  	REG_UPDATE(DIG_FE_CNTL, DIG_START, 0); -	/* switch DP encoder to CRTC data */ +	/* switch DP encoder to CRTC data, but reset it the fifo first. It may happen +	 * that it overflows during mode transition, and sometimes doesn't recover. +	 */ +	REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 1); +	udelay(10);  	REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0); @@ -533,11 +537,16 @@ void enc2_stream_encoder_dp_set_stream_attribute(  	struct stream_encoder *enc,  	struct dc_crtc_timing *crtc_timing,  	enum dc_color_space output_color_space, +	bool use_vsc_sdp_for_colorimetry,  	uint32_t enable_sdp_splitting)  {  	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); -	enc1_stream_encoder_dp_set_stream_attribute(enc, crtc_timing, output_color_space, enable_sdp_splitting); +	enc1_stream_encoder_dp_set_stream_attribute(enc, +			crtc_timing, +			output_color_space, +			use_vsc_sdp_for_colorimetry, +			enable_sdp_splitting);  	REG_UPDATE(DP_SEC_FRAMING4,  		DP_SST_SDP_SPLITTING, enable_sdp_splitting); @@ -560,6 +569,8 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = {  		enc2_stream_encoder_stop_hdmi_info_packets,  	.update_dp_info_packets =  		enc2_stream_encoder_update_dp_info_packets, +	.send_immediate_sdp_message = +		enc1_stream_encoder_send_immediate_sdp_message,  	.stop_dp_info_packets =  		enc1_stream_encoder_stop_dp_info_packets,  	.dp_blank = @@ -582,11 +593,9 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = {  	.dp_get_pixel_format =  		enc1_stream_encoder_dp_get_pixel_format, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	.enc_read_state = enc2_read_state,  	.dp_set_dsc_config = enc2_dp_set_dsc_config,  	.dp_set_dsc_pps_info_packet = enc2_dp_set_dsc_pps_info_packet, -#endif  	.set_dynamic_metadata = enc2_set_dynamic_metadata,  	.hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute,  }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h index 3f94a9f13c4a..d2a805bd4573 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h @@ -98,6 +98,7 @@ void enc2_stream_encoder_dp_set_stream_attribute(  	struct stream_encoder *enc,  	struct dc_crtc_timing *crtc_timing,  	enum dc_color_space output_color_space, +	bool use_vsc_sdp_for_colorimetry,  	uint32_t enable_sdp_splitting);  void enc2_stream_encoder_dp_unblank( diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile index 14113ccf498d..07684d3e375a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile @@ -1,9 +1,17 @@ +# SPDX-License-Identifier: MIT  #  # Makefile for DCN21. -DCN21 = dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o dcn21_hwseq.o dcn21_link_encoder.o +DCN21 = dcn21_init.o dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o \ +	 dcn21_hwseq.o dcn21_link_encoder.o +ifdef CONFIG_X86  CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse +endif + +ifdef CONFIG_PPC64 +CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -maltivec +endif  ifdef CONFIG_CC_IS_GCC  ifeq ($(call cc-ifversion, -lt, 0701, y), y) @@ -11,6 +19,7 @@ IS_OLD_GCC = 1  endif  endif +ifdef CONFIG_X86  ifdef IS_OLD_GCC  # Stack alignment mismatch, proceed with caution.  # GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 @@ -19,6 +28,7 @@ CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -mpreferred-stack-boundary=4  else  CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -msse2  endif +endif  AMD_DAL_DCN21 = $(addprefix $(AMDDALPATH)/dc/dcn21/,$(DCN21)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c index 2f5a5867e674..da63fc53cc4a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c @@ -29,6 +29,10 @@  #include "dm_services.h"  #include "reg_helper.h" +#include "dc_dmub_srv.h" + +#define DC_LOGGER_INIT(logger) +  #define REG(reg)\  	hubp21->hubp_regs->reg @@ -164,6 +168,158 @@ static void hubp21_setup(  } +void hubp21_set_viewport( +	struct hubp *hubp, +	const struct rect *viewport, +	const struct rect *viewport_c) +{ +	struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); + +	REG_SET_2(DCSURF_PRI_VIEWPORT_DIMENSION, 0, +		  PRI_VIEWPORT_WIDTH, viewport->width, +		  PRI_VIEWPORT_HEIGHT, viewport->height); + +	REG_SET_2(DCSURF_PRI_VIEWPORT_START, 0, +		  PRI_VIEWPORT_X_START, viewport->x, +		  PRI_VIEWPORT_Y_START, viewport->y); + +	/*for stereo*/ +	REG_SET_2(DCSURF_SEC_VIEWPORT_DIMENSION, 0, +		  SEC_VIEWPORT_WIDTH, viewport->width, +		  SEC_VIEWPORT_HEIGHT, viewport->height); + +	REG_SET_2(DCSURF_SEC_VIEWPORT_START, 0, +		  SEC_VIEWPORT_X_START, viewport->x, +		  SEC_VIEWPORT_Y_START, viewport->y); + +	/* DC supports NV12 only at the moment */ +	REG_SET_2(DCSURF_PRI_VIEWPORT_DIMENSION_C, 0, +		  PRI_VIEWPORT_WIDTH_C, viewport_c->width, +		  PRI_VIEWPORT_HEIGHT_C, viewport_c->height); + +	REG_SET_2(DCSURF_PRI_VIEWPORT_START_C, 0, +		  PRI_VIEWPORT_X_START_C, viewport_c->x, +		  PRI_VIEWPORT_Y_START_C, viewport_c->y); + +	REG_SET_2(DCSURF_SEC_VIEWPORT_DIMENSION_C, 0, +		  SEC_VIEWPORT_WIDTH_C, viewport_c->width, +		  SEC_VIEWPORT_HEIGHT_C, viewport_c->height); + +	REG_SET_2(DCSURF_SEC_VIEWPORT_START_C, 0, +		  SEC_VIEWPORT_X_START_C, viewport_c->x, +		  SEC_VIEWPORT_Y_START_C, viewport_c->y); +} + +static void hubp21_apply_PLAT_54186_wa( +		struct hubp *hubp, +		const struct dc_plane_address *address) +{ +	struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); +	struct dc_debug_options *debug = &hubp->ctx->dc->debug; +	unsigned int chroma_bpe = 2; +	unsigned int luma_addr_high_part = 0; +	unsigned int row_height = 0; +	unsigned int chroma_pitch = 0; +	unsigned int viewport_c_height = 0; +	unsigned int viewport_c_width = 0; +	unsigned int patched_viewport_height = 0; +	unsigned int patched_viewport_width = 0; +	unsigned int rotation_angle = 0; +	unsigned int pix_format = 0; +	unsigned int h_mirror_en = 0; +	unsigned int tile_blk_size = 64 * 1024; /* 64KB for 64KB SW, 4KB for 4KB SW */ + + +	if (!debug->nv12_iflip_vm_wa) +		return; + +	REG_GET(DCHUBP_REQ_SIZE_CONFIG_C, +		PTE_ROW_HEIGHT_LINEAR_C, &row_height); + +	REG_GET_2(DCSURF_PRI_VIEWPORT_DIMENSION_C, +			PRI_VIEWPORT_WIDTH_C, &viewport_c_width, +			PRI_VIEWPORT_HEIGHT_C, &viewport_c_height); + +	REG_GET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, +			PRIMARY_SURFACE_ADDRESS_HIGH_C, &luma_addr_high_part); + +	REG_GET(DCSURF_SURFACE_PITCH_C, +			PITCH_C, &chroma_pitch); + +	chroma_pitch += 1; + +	REG_GET_3(DCSURF_SURFACE_CONFIG, +			SURFACE_PIXEL_FORMAT, &pix_format, +			ROTATION_ANGLE, &rotation_angle, +			H_MIRROR_EN, &h_mirror_en); + +	/* reset persistent cached data */ +	hubp21->PLAT_54186_wa_chroma_addr_offset = 0; +	/* apply wa only for NV12 surface with scatter gather enabled with viewport > 512 along +	 * the vertical direction*/ +	if (address->type != PLN_ADDR_TYPE_VIDEO_PROGRESSIVE || +			address->video_progressive.luma_addr.high_part == 0xf4) +		return; + +	if ((rotation_angle == 0 || rotation_angle == 180) +			&& viewport_c_height <= 512) +		return; + +	if ((rotation_angle == 90 || rotation_angle == 270) +				&& viewport_c_width <= 512) +		return; + +	switch (rotation_angle) { +	case 0: /* 0 degree rotation */ +		row_height = 128; +		patched_viewport_height = (viewport_c_height / row_height + 1) * row_height + 1; +		patched_viewport_width = viewport_c_width; +		hubp21->PLAT_54186_wa_chroma_addr_offset = 0; +		break; +	case 2: /* 180 degree rotation */ +		row_height = 128; +		patched_viewport_height = viewport_c_height + row_height; +		patched_viewport_width = viewport_c_width; +		hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - chroma_pitch * row_height * chroma_bpe; +		break; +	case 1: /* 90 degree rotation */ +		row_height = 256; +		if (h_mirror_en) { +			patched_viewport_height = viewport_c_height; +			patched_viewport_width = viewport_c_width + row_height; +			hubp21->PLAT_54186_wa_chroma_addr_offset = 0; +		} else { +			patched_viewport_height = viewport_c_height; +			patched_viewport_width = viewport_c_width + row_height; +			hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - tile_blk_size; +		} +		break; +	case 3:	/* 270 degree rotation */ +		row_height = 256; +		if (h_mirror_en) { +			patched_viewport_height = viewport_c_height; +			patched_viewport_width = viewport_c_width + row_height; +			hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - tile_blk_size; +		} else { +			patched_viewport_height = viewport_c_height; +			patched_viewport_width = viewport_c_width + row_height; +			hubp21->PLAT_54186_wa_chroma_addr_offset = 0; +		} +		break; +	default: +		ASSERT(0); +		break; +	} + +	/* catch cases where viewport keep growing */ +	ASSERT(patched_viewport_height && patched_viewport_height < 5000); +	ASSERT(patched_viewport_width && patched_viewport_width < 5000); + +	REG_UPDATE_2(DCSURF_PRI_VIEWPORT_DIMENSION_C, +			PRI_VIEWPORT_WIDTH_C, patched_viewport_width, +			PRI_VIEWPORT_HEIGHT_C, patched_viewport_height); +} +  void hubp21_set_vm_system_aperture_settings(struct hubp *hubp,  		struct vm_system_aperture_param *apt)  { @@ -191,6 +347,562 @@ void hubp21_set_vm_system_aperture_settings(struct hubp *hubp,  			SYSTEM_ACCESS_MODE, 0x3);  } +void hubp21_validate_dml_output(struct hubp *hubp, +		struct dc_context *ctx, +		struct _vcs_dpi_display_rq_regs_st *dml_rq_regs, +		struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr, +		struct _vcs_dpi_display_ttu_regs_st *dml_ttu_attr) +{ +	struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); +	struct _vcs_dpi_display_rq_regs_st rq_regs = {0}; +	struct _vcs_dpi_display_dlg_regs_st dlg_attr = {0}; +	struct _vcs_dpi_display_ttu_regs_st ttu_attr = {0}; +	DC_LOGGER_INIT(ctx->logger); +	DC_LOG_DEBUG("DML Validation | Running Validation"); + +	/* Requester - Per hubp */ +	REG_GET(HUBPRET_CONTROL, +		DET_BUF_PLANE1_BASE_ADDRESS, &rq_regs.plane1_base_address); +	REG_GET_4(DCN_EXPANSION_MODE, +		DRQ_EXPANSION_MODE, &rq_regs.drq_expansion_mode, +		PRQ_EXPANSION_MODE, &rq_regs.prq_expansion_mode, +		MRQ_EXPANSION_MODE, &rq_regs.mrq_expansion_mode, +		CRQ_EXPANSION_MODE, &rq_regs.crq_expansion_mode); +	REG_GET_8(DCHUBP_REQ_SIZE_CONFIG, +		CHUNK_SIZE, &rq_regs.rq_regs_l.chunk_size, +		MIN_CHUNK_SIZE, &rq_regs.rq_regs_l.min_chunk_size, +		META_CHUNK_SIZE, &rq_regs.rq_regs_l.meta_chunk_size, +		MIN_META_CHUNK_SIZE, &rq_regs.rq_regs_l.min_meta_chunk_size, +		DPTE_GROUP_SIZE, &rq_regs.rq_regs_l.dpte_group_size, +		VM_GROUP_SIZE, &rq_regs.rq_regs_l.mpte_group_size, +		SWATH_HEIGHT, &rq_regs.rq_regs_l.swath_height, +		PTE_ROW_HEIGHT_LINEAR, &rq_regs.rq_regs_l.pte_row_height_linear); +	REG_GET_7(DCHUBP_REQ_SIZE_CONFIG_C, +		CHUNK_SIZE_C, &rq_regs.rq_regs_c.chunk_size, +		MIN_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_chunk_size, +		META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.meta_chunk_size, +		MIN_META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_meta_chunk_size, +		DPTE_GROUP_SIZE_C, &rq_regs.rq_regs_c.dpte_group_size, +		SWATH_HEIGHT_C, &rq_regs.rq_regs_c.swath_height, +		PTE_ROW_HEIGHT_LINEAR_C, &rq_regs.rq_regs_c.pte_row_height_linear); + +	if (rq_regs.plane1_base_address != dml_rq_regs->plane1_base_address) +		DC_LOG_DEBUG("DML Validation | HUBPRET_CONTROL:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u  Actual: %u\n", +				dml_rq_regs->plane1_base_address, rq_regs.plane1_base_address); +	if (rq_regs.drq_expansion_mode != dml_rq_regs->drq_expansion_mode) +		DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DRQ_EXPANSION_MODE - Expected: %u  Actual: %u\n", +				dml_rq_regs->drq_expansion_mode, rq_regs.drq_expansion_mode); +	if (rq_regs.prq_expansion_mode != dml_rq_regs->prq_expansion_mode) +		DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:MRQ_EXPANSION_MODE - Expected: %u  Actual: %u\n", +				dml_rq_regs->prq_expansion_mode, rq_regs.prq_expansion_mode); +	if (rq_regs.mrq_expansion_mode != dml_rq_regs->mrq_expansion_mode) +		DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u  Actual: %u\n", +				dml_rq_regs->mrq_expansion_mode, rq_regs.mrq_expansion_mode); +	if (rq_regs.crq_expansion_mode != dml_rq_regs->crq_expansion_mode) +		DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:CRQ_EXPANSION_MODE - Expected: %u  Actual: %u\n", +				dml_rq_regs->crq_expansion_mode, rq_regs.crq_expansion_mode); + +	if (rq_regs.rq_regs_l.chunk_size != dml_rq_regs->rq_regs_l.chunk_size) +		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:CHUNK_SIZE - Expected: %u  Actual: %u\n", +				dml_rq_regs->rq_regs_l.chunk_size, rq_regs.rq_regs_l.chunk_size); +	if (rq_regs.rq_regs_l.min_chunk_size != dml_rq_regs->rq_regs_l.min_chunk_size) +		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_CHUNK_SIZE - Expected: %u  Actual: %u\n", +				dml_rq_regs->rq_regs_l.min_chunk_size, rq_regs.rq_regs_l.min_chunk_size); +	if (rq_regs.rq_regs_l.meta_chunk_size != dml_rq_regs->rq_regs_l.meta_chunk_size) +		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:META_CHUNK_SIZE - Expected: %u  Actual: %u\n", +				dml_rq_regs->rq_regs_l.meta_chunk_size, rq_regs.rq_regs_l.meta_chunk_size); +	if (rq_regs.rq_regs_l.min_meta_chunk_size != dml_rq_regs->rq_regs_l.min_meta_chunk_size) +		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_META_CHUNK_SIZE - Expected: %u  Actual: %u\n", +				dml_rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs.rq_regs_l.min_meta_chunk_size); +	if (rq_regs.rq_regs_l.dpte_group_size != dml_rq_regs->rq_regs_l.dpte_group_size) +		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:DPTE_GROUP_SIZE - Expected: %u  Actual: %u\n", +				dml_rq_regs->rq_regs_l.dpte_group_size, rq_regs.rq_regs_l.dpte_group_size); +	if (rq_regs.rq_regs_l.mpte_group_size != dml_rq_regs->rq_regs_l.mpte_group_size) +		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:VM_GROUP_SIZE - Expected: %u  Actual: %u\n", +				dml_rq_regs->rq_regs_l.mpte_group_size, rq_regs.rq_regs_l.mpte_group_size); +	if (rq_regs.rq_regs_l.swath_height != dml_rq_regs->rq_regs_l.swath_height) +		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:SWATH_HEIGHT - Expected: %u  Actual: %u\n", +				dml_rq_regs->rq_regs_l.swath_height, rq_regs.rq_regs_l.swath_height); +	if (rq_regs.rq_regs_l.pte_row_height_linear != dml_rq_regs->rq_regs_l.pte_row_height_linear) +		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:PTE_ROW_HEIGHT_LINEAR - Expected: %u  Actual: %u\n", +				dml_rq_regs->rq_regs_l.pte_row_height_linear, rq_regs.rq_regs_l.pte_row_height_linear); + +	if (rq_regs.rq_regs_c.chunk_size != dml_rq_regs->rq_regs_c.chunk_size) +		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:CHUNK_SIZE_C - Expected: %u  Actual: %u\n", +				dml_rq_regs->rq_regs_c.chunk_size, rq_regs.rq_regs_c.chunk_size); +	if (rq_regs.rq_regs_c.min_chunk_size != dml_rq_regs->rq_regs_c.min_chunk_size) +		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_CHUNK_SIZE_C - Expected: %u  Actual: %u\n", +				dml_rq_regs->rq_regs_c.min_chunk_size, rq_regs.rq_regs_c.min_chunk_size); +	if (rq_regs.rq_regs_c.meta_chunk_size != dml_rq_regs->rq_regs_c.meta_chunk_size) +		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:META_CHUNK_SIZE_C - Expected: %u  Actual: %u\n", +				dml_rq_regs->rq_regs_c.meta_chunk_size, rq_regs.rq_regs_c.meta_chunk_size); +	if (rq_regs.rq_regs_c.min_meta_chunk_size != dml_rq_regs->rq_regs_c.min_meta_chunk_size) +		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_META_CHUNK_SIZE_C - Expected: %u  Actual: %u\n", +				dml_rq_regs->rq_regs_c.min_meta_chunk_size, rq_regs.rq_regs_c.min_meta_chunk_size); +	if (rq_regs.rq_regs_c.dpte_group_size != dml_rq_regs->rq_regs_c.dpte_group_size) +		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:DPTE_GROUP_SIZE_C - Expected: %u  Actual: %u\n", +				dml_rq_regs->rq_regs_c.dpte_group_size, rq_regs.rq_regs_c.dpte_group_size); +	if (rq_regs.rq_regs_c.swath_height != dml_rq_regs->rq_regs_c.swath_height) +		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:SWATH_HEIGHT_C - Expected: %u  Actual: %u\n", +				dml_rq_regs->rq_regs_c.swath_height, rq_regs.rq_regs_c.swath_height); +	if (rq_regs.rq_regs_c.pte_row_height_linear != dml_rq_regs->rq_regs_c.pte_row_height_linear) +		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:PTE_ROW_HEIGHT_LINEAR_C - Expected: %u  Actual: %u\n", +				dml_rq_regs->rq_regs_c.pte_row_height_linear, rq_regs.rq_regs_c.pte_row_height_linear); + + +	/* DLG - Per hubp */ +	REG_GET_2(BLANK_OFFSET_0, +		REFCYC_H_BLANK_END, &dlg_attr.refcyc_h_blank_end, +		DLG_V_BLANK_END, &dlg_attr.dlg_vblank_end); +	REG_GET(BLANK_OFFSET_1, +		MIN_DST_Y_NEXT_START, &dlg_attr.min_dst_y_next_start); +	REG_GET(DST_DIMENSIONS, +		REFCYC_PER_HTOTAL, &dlg_attr.refcyc_per_htotal); +	REG_GET_2(DST_AFTER_SCALER, +		REFCYC_X_AFTER_SCALER, &dlg_attr.refcyc_x_after_scaler, +		DST_Y_AFTER_SCALER, &dlg_attr.dst_y_after_scaler); +	REG_GET(REF_FREQ_TO_PIX_FREQ, +		REF_FREQ_TO_PIX_FREQ, &dlg_attr.ref_freq_to_pix_freq); + +	if (dlg_attr.refcyc_h_blank_end != dml_dlg_attr->refcyc_h_blank_end) +		DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:REFCYC_H_BLANK_END - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_h_blank_end, dlg_attr.refcyc_h_blank_end); +	if (dlg_attr.dlg_vblank_end != dml_dlg_attr->dlg_vblank_end) +		DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:DLG_V_BLANK_END - Expected: %u  Actual: %u\n", +				dml_dlg_attr->dlg_vblank_end, dlg_attr.dlg_vblank_end); +	if (dlg_attr.min_dst_y_next_start != dml_dlg_attr->min_dst_y_next_start) +		DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_1:MIN_DST_Y_NEXT_START - Expected: %u  Actual: %u\n", +				dml_dlg_attr->min_dst_y_next_start, dlg_attr.min_dst_y_next_start); +	if (dlg_attr.refcyc_per_htotal != dml_dlg_attr->refcyc_per_htotal) +		DC_LOG_DEBUG("DML Validation | DST_DIMENSIONS:REFCYC_PER_HTOTAL - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_per_htotal, dlg_attr.refcyc_per_htotal); +	if (dlg_attr.refcyc_x_after_scaler != dml_dlg_attr->refcyc_x_after_scaler) +		DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:REFCYC_X_AFTER_SCALER - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_x_after_scaler, dlg_attr.refcyc_x_after_scaler); +	if (dlg_attr.dst_y_after_scaler != dml_dlg_attr->dst_y_after_scaler) +		DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:DST_Y_AFTER_SCALER - Expected: %u  Actual: %u\n", +				dml_dlg_attr->dst_y_after_scaler, dlg_attr.dst_y_after_scaler); +	if (dlg_attr.ref_freq_to_pix_freq != dml_dlg_attr->ref_freq_to_pix_freq) +		DC_LOG_DEBUG("DML Validation | REF_FREQ_TO_PIX_FREQ:REF_FREQ_TO_PIX_FREQ - Expected: %u  Actual: %u\n", +				dml_dlg_attr->ref_freq_to_pix_freq, dlg_attr.ref_freq_to_pix_freq); + +	/* DLG - Per luma/chroma */ +	REG_GET(VBLANK_PARAMETERS_1, +		REFCYC_PER_PTE_GROUP_VBLANK_L, &dlg_attr.refcyc_per_pte_group_vblank_l); +	if (REG(NOM_PARAMETERS_0)) +		REG_GET(NOM_PARAMETERS_0, +			DST_Y_PER_PTE_ROW_NOM_L, &dlg_attr.dst_y_per_pte_row_nom_l); +	if (REG(NOM_PARAMETERS_1)) +		REG_GET(NOM_PARAMETERS_1, +			REFCYC_PER_PTE_GROUP_NOM_L, &dlg_attr.refcyc_per_pte_group_nom_l); +	REG_GET(NOM_PARAMETERS_4, +		DST_Y_PER_META_ROW_NOM_L, &dlg_attr.dst_y_per_meta_row_nom_l); +	REG_GET(NOM_PARAMETERS_5, +		REFCYC_PER_META_CHUNK_NOM_L, &dlg_attr.refcyc_per_meta_chunk_nom_l); +	REG_GET_2(PER_LINE_DELIVERY, +		REFCYC_PER_LINE_DELIVERY_L, &dlg_attr.refcyc_per_line_delivery_l, +		REFCYC_PER_LINE_DELIVERY_C, &dlg_attr.refcyc_per_line_delivery_c); +	REG_GET_2(PER_LINE_DELIVERY_PRE, +		REFCYC_PER_LINE_DELIVERY_PRE_L, &dlg_attr.refcyc_per_line_delivery_pre_l, +		REFCYC_PER_LINE_DELIVERY_PRE_C, &dlg_attr.refcyc_per_line_delivery_pre_c); +	REG_GET(VBLANK_PARAMETERS_2, +		REFCYC_PER_PTE_GROUP_VBLANK_C, &dlg_attr.refcyc_per_pte_group_vblank_c); +	if (REG(NOM_PARAMETERS_2)) +		REG_GET(NOM_PARAMETERS_2, +			DST_Y_PER_PTE_ROW_NOM_C, &dlg_attr.dst_y_per_pte_row_nom_c); +	if (REG(NOM_PARAMETERS_3)) +		REG_GET(NOM_PARAMETERS_3, +			REFCYC_PER_PTE_GROUP_NOM_C, &dlg_attr.refcyc_per_pte_group_nom_c); +	REG_GET(NOM_PARAMETERS_6, +		DST_Y_PER_META_ROW_NOM_C, &dlg_attr.dst_y_per_meta_row_nom_c); +	REG_GET(NOM_PARAMETERS_7, +		REFCYC_PER_META_CHUNK_NOM_C, &dlg_attr.refcyc_per_meta_chunk_nom_c); +	REG_GET(VBLANK_PARAMETERS_3, +			REFCYC_PER_META_CHUNK_VBLANK_L, &dlg_attr.refcyc_per_meta_chunk_vblank_l); +	REG_GET(VBLANK_PARAMETERS_4, +			REFCYC_PER_META_CHUNK_VBLANK_C, &dlg_attr.refcyc_per_meta_chunk_vblank_c); + +	if (dlg_attr.refcyc_per_pte_group_vblank_l != dml_dlg_attr->refcyc_per_pte_group_vblank_l) +		DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_1:REFCYC_PER_PTE_GROUP_VBLANK_L - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_per_pte_group_vblank_l, dlg_attr.refcyc_per_pte_group_vblank_l); +	if (dlg_attr.dst_y_per_pte_row_nom_l != dml_dlg_attr->dst_y_per_pte_row_nom_l) +		DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_0:DST_Y_PER_PTE_ROW_NOM_L - Expected: %u  Actual: %u\n", +				dml_dlg_attr->dst_y_per_pte_row_nom_l, dlg_attr.dst_y_per_pte_row_nom_l); +	if (dlg_attr.refcyc_per_pte_group_nom_l != dml_dlg_attr->refcyc_per_pte_group_nom_l) +		DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_1:REFCYC_PER_PTE_GROUP_NOM_L - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_per_pte_group_nom_l, dlg_attr.refcyc_per_pte_group_nom_l); +	if (dlg_attr.dst_y_per_meta_row_nom_l != dml_dlg_attr->dst_y_per_meta_row_nom_l) +		DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_4:DST_Y_PER_META_ROW_NOM_L - Expected: %u  Actual: %u\n", +				dml_dlg_attr->dst_y_per_meta_row_nom_l, dlg_attr.dst_y_per_meta_row_nom_l); +	if (dlg_attr.refcyc_per_meta_chunk_nom_l != dml_dlg_attr->refcyc_per_meta_chunk_nom_l) +		DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_5:REFCYC_PER_META_CHUNK_NOM_L - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_per_meta_chunk_nom_l, dlg_attr.refcyc_per_meta_chunk_nom_l); +	if (dlg_attr.refcyc_per_line_delivery_l != dml_dlg_attr->refcyc_per_line_delivery_l) +		DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_L - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_per_line_delivery_l, dlg_attr.refcyc_per_line_delivery_l); +	if (dlg_attr.refcyc_per_line_delivery_c != dml_dlg_attr->refcyc_per_line_delivery_c) +		DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_C - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_per_line_delivery_c, dlg_attr.refcyc_per_line_delivery_c); +	if (dlg_attr.refcyc_per_pte_group_vblank_c != dml_dlg_attr->refcyc_per_pte_group_vblank_c) +		DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_2:REFCYC_PER_PTE_GROUP_VBLANK_C - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_per_pte_group_vblank_c, dlg_attr.refcyc_per_pte_group_vblank_c); +	if (dlg_attr.dst_y_per_pte_row_nom_c != dml_dlg_attr->dst_y_per_pte_row_nom_c) +		DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_2:DST_Y_PER_PTE_ROW_NOM_C - Expected: %u  Actual: %u\n", +				dml_dlg_attr->dst_y_per_pte_row_nom_c, dlg_attr.dst_y_per_pte_row_nom_c); +	if (dlg_attr.refcyc_per_pte_group_nom_c != dml_dlg_attr->refcyc_per_pte_group_nom_c) +		DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_3:REFCYC_PER_PTE_GROUP_NOM_C - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_per_pte_group_nom_c, dlg_attr.refcyc_per_pte_group_nom_c); +	if (dlg_attr.dst_y_per_meta_row_nom_c != dml_dlg_attr->dst_y_per_meta_row_nom_c) +		DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_6:DST_Y_PER_META_ROW_NOM_C - Expected: %u  Actual: %u\n", +				dml_dlg_attr->dst_y_per_meta_row_nom_c, dlg_attr.dst_y_per_meta_row_nom_c); +	if (dlg_attr.refcyc_per_meta_chunk_nom_c != dml_dlg_attr->refcyc_per_meta_chunk_nom_c) +		DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_7:REFCYC_PER_META_CHUNK_NOM_C - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_per_meta_chunk_nom_c, dlg_attr.refcyc_per_meta_chunk_nom_c); +	if (dlg_attr.refcyc_per_line_delivery_pre_l != dml_dlg_attr->refcyc_per_line_delivery_pre_l) +		DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_L - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_per_line_delivery_pre_l, dlg_attr.refcyc_per_line_delivery_pre_l); +	if (dlg_attr.refcyc_per_line_delivery_pre_c != dml_dlg_attr->refcyc_per_line_delivery_pre_c) +		DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_C - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_per_line_delivery_pre_c, dlg_attr.refcyc_per_line_delivery_pre_c); +	if (dlg_attr.refcyc_per_meta_chunk_vblank_l != dml_dlg_attr->refcyc_per_meta_chunk_vblank_l) +		DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_3:REFCYC_PER_META_CHUNK_VBLANK_L - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_per_meta_chunk_vblank_l, dlg_attr.refcyc_per_meta_chunk_vblank_l); +	if (dlg_attr.refcyc_per_meta_chunk_vblank_c != dml_dlg_attr->refcyc_per_meta_chunk_vblank_c) +		DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_4:REFCYC_PER_META_CHUNK_VBLANK_C - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_per_meta_chunk_vblank_c, dlg_attr.refcyc_per_meta_chunk_vblank_c); + +	/* TTU - per hubp */ +	REG_GET_2(DCN_TTU_QOS_WM, +		QoS_LEVEL_LOW_WM, &ttu_attr.qos_level_low_wm, +		QoS_LEVEL_HIGH_WM, &ttu_attr.qos_level_high_wm); + +	if (ttu_attr.qos_level_low_wm != dml_ttu_attr->qos_level_low_wm) +		DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_LOW_WM - Expected: %u  Actual: %u\n", +				dml_ttu_attr->qos_level_low_wm, ttu_attr.qos_level_low_wm); +	if (ttu_attr.qos_level_high_wm != dml_ttu_attr->qos_level_high_wm) +		DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_HIGH_WM - Expected: %u  Actual: %u\n", +				dml_ttu_attr->qos_level_high_wm, ttu_attr.qos_level_high_wm); + +	/* TTU - per luma/chroma */ +	/* Assumed surf0 is luma and 1 is chroma */ +	REG_GET_3(DCN_SURF0_TTU_CNTL0, +		REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_l, +		QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_l, +		QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_l); +	REG_GET_3(DCN_SURF1_TTU_CNTL0, +		REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_c, +		QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_c, +		QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_c); +	REG_GET_3(DCN_CUR0_TTU_CNTL0, +		REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_cur0, +		QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_cur0, +		QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_cur0); +	REG_GET(FLIP_PARAMETERS_1, +		REFCYC_PER_PTE_GROUP_FLIP_L, &dlg_attr.refcyc_per_pte_group_flip_l); +	REG_GET(DCN_CUR0_TTU_CNTL1, +			REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur0); +	REG_GET(DCN_CUR1_TTU_CNTL1, +			REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur1); +	REG_GET(DCN_SURF0_TTU_CNTL1, +			REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_l); +	REG_GET(DCN_SURF1_TTU_CNTL1, +			REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_c); + +	if (ttu_attr.refcyc_per_req_delivery_l != dml_ttu_attr->refcyc_per_req_delivery_l) +		DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u  Actual: %u\n", +				dml_ttu_attr->refcyc_per_req_delivery_l, ttu_attr.refcyc_per_req_delivery_l); +	if (ttu_attr.qos_level_fixed_l != dml_ttu_attr->qos_level_fixed_l) +		DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u  Actual: %u\n", +				dml_ttu_attr->qos_level_fixed_l, ttu_attr.qos_level_fixed_l); +	if (ttu_attr.qos_ramp_disable_l != dml_ttu_attr->qos_ramp_disable_l) +		DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u  Actual: %u\n", +				dml_ttu_attr->qos_ramp_disable_l, ttu_attr.qos_ramp_disable_l); +	if (ttu_attr.refcyc_per_req_delivery_c != dml_ttu_attr->refcyc_per_req_delivery_c) +		DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u  Actual: %u\n", +				dml_ttu_attr->refcyc_per_req_delivery_c, ttu_attr.refcyc_per_req_delivery_c); +	if (ttu_attr.qos_level_fixed_c != dml_ttu_attr->qos_level_fixed_c) +		DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u  Actual: %u\n", +				dml_ttu_attr->qos_level_fixed_c, ttu_attr.qos_level_fixed_c); +	if (ttu_attr.qos_ramp_disable_c != dml_ttu_attr->qos_ramp_disable_c) +		DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u  Actual: %u\n", +				dml_ttu_attr->qos_ramp_disable_c, ttu_attr.qos_ramp_disable_c); +	if (ttu_attr.refcyc_per_req_delivery_cur0 != dml_ttu_attr->refcyc_per_req_delivery_cur0) +		DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u  Actual: %u\n", +				dml_ttu_attr->refcyc_per_req_delivery_cur0, ttu_attr.refcyc_per_req_delivery_cur0); +	if (ttu_attr.qos_level_fixed_cur0 != dml_ttu_attr->qos_level_fixed_cur0) +		DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u  Actual: %u\n", +				dml_ttu_attr->qos_level_fixed_cur0, ttu_attr.qos_level_fixed_cur0); +	if (ttu_attr.qos_ramp_disable_cur0 != dml_ttu_attr->qos_ramp_disable_cur0) +		DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u  Actual: %u\n", +				dml_ttu_attr->qos_ramp_disable_cur0, ttu_attr.qos_ramp_disable_cur0); +	if (dlg_attr.refcyc_per_pte_group_flip_l != dml_dlg_attr->refcyc_per_pte_group_flip_l) +		DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_1:REFCYC_PER_PTE_GROUP_FLIP_L - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_per_pte_group_flip_l, dlg_attr.refcyc_per_pte_group_flip_l); +	if (ttu_attr.refcyc_per_req_delivery_pre_cur0 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur0) +		DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u  Actual: %u\n", +				dml_ttu_attr->refcyc_per_req_delivery_pre_cur0, ttu_attr.refcyc_per_req_delivery_pre_cur0); +	if (ttu_attr.refcyc_per_req_delivery_pre_cur1 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur1) +		DC_LOG_DEBUG("DML Validation | DCN_CUR1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u  Actual: %u\n", +				dml_ttu_attr->refcyc_per_req_delivery_pre_cur1, ttu_attr.refcyc_per_req_delivery_pre_cur1); +	if (ttu_attr.refcyc_per_req_delivery_pre_l != dml_ttu_attr->refcyc_per_req_delivery_pre_l) +		DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u  Actual: %u\n", +				dml_ttu_attr->refcyc_per_req_delivery_pre_l, ttu_attr.refcyc_per_req_delivery_pre_l); +	if (ttu_attr.refcyc_per_req_delivery_pre_c != dml_ttu_attr->refcyc_per_req_delivery_pre_c) +		DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u  Actual: %u\n", +				dml_ttu_attr->refcyc_per_req_delivery_pre_c, ttu_attr.refcyc_per_req_delivery_pre_c); + +	/* Host VM deadline regs */ +	REG_GET(VBLANK_PARAMETERS_5, +		REFCYC_PER_VM_GROUP_VBLANK, &dlg_attr.refcyc_per_vm_group_vblank); +	REG_GET(VBLANK_PARAMETERS_6, +		REFCYC_PER_VM_REQ_VBLANK, &dlg_attr.refcyc_per_vm_req_vblank); +	REG_GET(FLIP_PARAMETERS_3, +		REFCYC_PER_VM_GROUP_FLIP, &dlg_attr.refcyc_per_vm_group_flip); +	REG_GET(FLIP_PARAMETERS_4, +		REFCYC_PER_VM_REQ_FLIP, &dlg_attr.refcyc_per_vm_req_flip); +	REG_GET(FLIP_PARAMETERS_5, +		REFCYC_PER_PTE_GROUP_FLIP_C, &dlg_attr.refcyc_per_pte_group_flip_c); +	REG_GET(FLIP_PARAMETERS_6, +		REFCYC_PER_META_CHUNK_FLIP_C, &dlg_attr.refcyc_per_meta_chunk_flip_c); +	REG_GET(FLIP_PARAMETERS_2, +		REFCYC_PER_META_CHUNK_FLIP_L, &dlg_attr.refcyc_per_meta_chunk_flip_l); + +	if (dlg_attr.refcyc_per_vm_group_vblank != dml_dlg_attr->refcyc_per_vm_group_vblank) +		DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_5:REFCYC_PER_VM_GROUP_VBLANK - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_per_vm_group_vblank, dlg_attr.refcyc_per_vm_group_vblank); +	if (dlg_attr.refcyc_per_vm_req_vblank != dml_dlg_attr->refcyc_per_vm_req_vblank) +		DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_6:REFCYC_PER_VM_REQ_VBLANK - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_per_vm_req_vblank, dlg_attr.refcyc_per_vm_req_vblank); +	if (dlg_attr.refcyc_per_vm_group_flip != dml_dlg_attr->refcyc_per_vm_group_flip) +		DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_3:REFCYC_PER_VM_GROUP_FLIP - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_per_vm_group_flip, dlg_attr.refcyc_per_vm_group_flip); +	if (dlg_attr.refcyc_per_vm_req_flip != dml_dlg_attr->refcyc_per_vm_req_flip) +		DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_4:REFCYC_PER_VM_REQ_FLIP - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_per_vm_req_flip, dlg_attr.refcyc_per_vm_req_flip); +	if (dlg_attr.refcyc_per_pte_group_flip_c != dml_dlg_attr->refcyc_per_pte_group_flip_c) +		DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_5:REFCYC_PER_PTE_GROUP_FLIP_C - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_per_pte_group_flip_c, dlg_attr.refcyc_per_pte_group_flip_c); +	if (dlg_attr.refcyc_per_meta_chunk_flip_c != dml_dlg_attr->refcyc_per_meta_chunk_flip_c) +		DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_6:REFCYC_PER_META_CHUNK_FLIP_C - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_per_meta_chunk_flip_c, dlg_attr.refcyc_per_meta_chunk_flip_c); +	if (dlg_attr.refcyc_per_meta_chunk_flip_l != dml_dlg_attr->refcyc_per_meta_chunk_flip_l) +		DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_2:REFCYC_PER_META_CHUNK_FLIP_L - Expected: %u  Actual: %u\n", +				dml_dlg_attr->refcyc_per_meta_chunk_flip_l, dlg_attr.refcyc_per_meta_chunk_flip_l); +} + +static void program_surface_flip_and_addr(struct hubp *hubp, struct surface_flip_registers *flip_regs) +{ +	struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); + +	REG_UPDATE_3(DCSURF_FLIP_CONTROL, +					SURFACE_FLIP_TYPE, flip_regs->immediate, +					SURFACE_FLIP_MODE_FOR_STEREOSYNC, flip_regs->grph_stereo, +					SURFACE_FLIP_IN_STEREOSYNC, flip_regs->grph_stereo); + +	REG_UPDATE(VMID_SETTINGS_0, +				VMID, flip_regs->vmid); + +	REG_UPDATE_8(DCSURF_SURFACE_CONTROL, +			PRIMARY_SURFACE_TMZ, flip_regs->tmz_surface, +			PRIMARY_SURFACE_TMZ_C, flip_regs->tmz_surface, +			PRIMARY_META_SURFACE_TMZ, flip_regs->tmz_surface, +			PRIMARY_META_SURFACE_TMZ_C, flip_regs->tmz_surface, +			SECONDARY_SURFACE_TMZ, flip_regs->tmz_surface, +			SECONDARY_SURFACE_TMZ_C, flip_regs->tmz_surface, +			SECONDARY_META_SURFACE_TMZ, flip_regs->tmz_surface, +			SECONDARY_META_SURFACE_TMZ_C, flip_regs->tmz_surface); + +	REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, 0, +			PRIMARY_META_SURFACE_ADDRESS_HIGH_C, +			flip_regs->DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C); + +	REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, 0, +			PRIMARY_META_SURFACE_ADDRESS_C, +			flip_regs->DCSURF_PRIMARY_META_SURFACE_ADDRESS_C); + +	REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0, +			PRIMARY_META_SURFACE_ADDRESS_HIGH, +			flip_regs->DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH); + +	REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0, +			PRIMARY_META_SURFACE_ADDRESS, +			flip_regs->DCSURF_PRIMARY_META_SURFACE_ADDRESS); + +	REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH, 0, +			SECONDARY_META_SURFACE_ADDRESS_HIGH, +			flip_regs->DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH); + +	REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS, 0, +			SECONDARY_META_SURFACE_ADDRESS, +			flip_regs->DCSURF_SECONDARY_META_SURFACE_ADDRESS); + + +	REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, 0, +			SECONDARY_SURFACE_ADDRESS_HIGH, +			flip_regs->DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH); + +	REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS, 0, +			SECONDARY_SURFACE_ADDRESS, +			flip_regs->DCSURF_SECONDARY_SURFACE_ADDRESS); + + +	REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, 0, +			PRIMARY_SURFACE_ADDRESS_HIGH_C, +			flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C); + +	REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_C, 0, +			PRIMARY_SURFACE_ADDRESS_C, +			flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_C); + +	REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0, +			PRIMARY_SURFACE_ADDRESS_HIGH, +			flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH); + +	REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0, +			PRIMARY_SURFACE_ADDRESS, +			flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS); +} + +void dmcub_PLAT_54186_wa(struct hubp *hubp, struct surface_flip_registers *flip_regs) +{ +	struct dc_dmub_srv *dmcub = hubp->ctx->dmub_srv; +	struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); +	struct dmub_rb_cmd_PLAT_54186_wa PLAT_54186_wa = { 0 }; + +	PLAT_54186_wa.header.type = DMUB_CMD__PLAT_54186_WA; +	PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS; +	PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_C = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_C; +	PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH; +	PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C; +	PLAT_54186_wa.flip.flip_params.grph_stereo = flip_regs->grph_stereo; +	PLAT_54186_wa.flip.flip_params.hubp_inst = hubp->inst; +	PLAT_54186_wa.flip.flip_params.immediate = flip_regs->immediate; +	PLAT_54186_wa.flip.flip_params.tmz_surface = flip_regs->tmz_surface; +	PLAT_54186_wa.flip.flip_params.vmid = flip_regs->vmid; + +	PERF_TRACE();  // TODO: remove after performance is stable. +	dc_dmub_srv_cmd_queue(dmcub, &PLAT_54186_wa.header); +	PERF_TRACE();  // TODO: remove after performance is stable. +	dc_dmub_srv_cmd_execute(dmcub); +	PERF_TRACE();  // TODO: remove after performance is stable. +	dc_dmub_srv_wait_idle(dmcub); +	PERF_TRACE();  // TODO: remove after performance is stable. +} + +bool hubp21_program_surface_flip_and_addr( +		struct hubp *hubp, +		const struct dc_plane_address *address, +		bool flip_immediate) +{ +	struct dc_debug_options *debug = &hubp->ctx->dc->debug; +	struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); +	struct surface_flip_registers flip_regs = { 0 }; + +	flip_regs.vmid = address->vmid; + +	switch (address->type) { +	case PLN_ADDR_TYPE_GRAPHICS: +		if (address->grph.addr.quad_part == 0) { +			BREAK_TO_DEBUGGER(); +			break; +		} + +		if (address->grph.meta_addr.quad_part != 0) { +			flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS = +					address->grph.meta_addr.low_part; +			flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH = +					address->grph.meta_addr.high_part; +		} + +		flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS = +				address->grph.addr.low_part; +		flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH = +				address->grph.addr.high_part; +		break; +	case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE: +		if (address->video_progressive.luma_addr.quad_part == 0 +				|| address->video_progressive.chroma_addr.quad_part == 0) +			break; + +		if (address->video_progressive.luma_meta_addr.quad_part != 0) { +			flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS = +					address->video_progressive.luma_meta_addr.low_part; +			flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH = +					address->video_progressive.luma_meta_addr.high_part; + +			flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_C = +					address->video_progressive.chroma_meta_addr.low_part; +			flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C = +					address->video_progressive.chroma_meta_addr.high_part; +		} + +		flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS = +				address->video_progressive.luma_addr.low_part; +		flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH = +				address->video_progressive.luma_addr.high_part; + +		if (debug->nv12_iflip_vm_wa) { +			flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_C = +					address->video_progressive.chroma_addr.low_part + hubp21->PLAT_54186_wa_chroma_addr_offset; +		} else +			flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_C = +					address->video_progressive.chroma_addr.low_part; + +		flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C = +				address->video_progressive.chroma_addr.high_part; + +		break; +	case PLN_ADDR_TYPE_GRPH_STEREO: +		if (address->grph_stereo.left_addr.quad_part == 0) +			break; +		if (address->grph_stereo.right_addr.quad_part == 0) +			break; + +		flip_regs.grph_stereo = true; + +		if (address->grph_stereo.right_meta_addr.quad_part != 0) { +			flip_regs.DCSURF_SECONDARY_META_SURFACE_ADDRESS = +					address->grph_stereo.right_meta_addr.low_part; +			flip_regs.DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH = +					address->grph_stereo.right_meta_addr.high_part; +		} + +		if (address->grph_stereo.left_meta_addr.quad_part != 0) { +			flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS = +					address->grph_stereo.left_meta_addr.low_part; +			flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH = +					address->grph_stereo.left_meta_addr.high_part; +		} + +		flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS = +				address->grph_stereo.left_addr.low_part; +		flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH = +				address->grph_stereo.left_addr.high_part; + +		flip_regs.DCSURF_SECONDARY_SURFACE_ADDRESS = +				address->grph_stereo.right_addr.low_part; +		flip_regs.DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH = +				address->grph_stereo.right_addr.high_part; + +		break; +	default: +		BREAK_TO_DEBUGGER(); +		break; +	} + +	flip_regs.tmz_surface = address->tmz_surface; +	flip_regs.immediate = flip_immediate; + +	if (hubp->ctx->dc->debug.enable_dmcub_surface_flip && address->type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) +		dmcub_PLAT_54186_wa(hubp, &flip_regs); +	else +		program_surface_flip_and_addr(hubp, &flip_regs); + +	hubp->request_address = *address; + +	return true; +} +  void hubp21_init(struct hubp *hubp)  {  	// DEDCN21-133: Inconsistent row starting line for flip between DPTE and Meta @@ -203,7 +915,7 @@ void hubp21_init(struct hubp *hubp)  static struct hubp_funcs dcn21_hubp_funcs = {  	.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,  	.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled, -	.hubp_program_surface_flip_and_addr = hubp2_program_surface_flip_and_addr, +	.hubp_program_surface_flip_and_addr = hubp21_program_surface_flip_and_addr,  	.hubp_program_surface_config = hubp1_program_surface_config,  	.hubp_is_flip_pending = hubp1_is_flip_pending,  	.hubp_setup = hubp21_setup, @@ -211,7 +923,8 @@ static struct hubp_funcs dcn21_hubp_funcs = {  	.hubp_set_vm_system_aperture_settings = hubp21_set_vm_system_aperture_settings,  	.set_blank = hubp1_set_blank,  	.dcc_control = hubp1_dcc_control, -	.mem_program_viewport = min_set_viewport, +	.mem_program_viewport = hubp21_set_viewport, +	.apply_PLAT_54186_wa = hubp21_apply_PLAT_54186_wa,  	.set_cursor_attributes	= hubp2_cursor_set_attributes,  	.set_cursor_position	= hubp1_cursor_set_position,  	.hubp_clk_cntl = hubp1_clk_cntl, @@ -223,6 +936,7 @@ static struct hubp_funcs dcn21_hubp_funcs = {  	.hubp_clear_underflow = hubp1_clear_underflow,  	.hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,  	.hubp_init = hubp21_init, +	.validate_dml_output = hubp21_validate_dml_output,  };  bool hubp21_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.h index aeda719a2a13..9873b6cbc5ba 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.h @@ -108,6 +108,7 @@ struct dcn21_hubp {  	const struct dcn_hubp2_registers *hubp_regs;  	const struct dcn_hubp2_shift *hubp_shift;  	const struct dcn_hubp2_mask *hubp_mask; +	int PLAT_54186_wa_chroma_addr_offset;  };  bool hubp21_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c index b25215cadf85..081ad8e43d58 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c @@ -28,7 +28,7 @@  #include "core_types.h"  #include "resource.h"  #include "dce/dce_hwseq.h" -#include "dcn20/dcn20_hwseq.h" +#include "dcn21_hwseq.h"  #include "vmid.h"  #include "reg_helper.h"  #include "hw/clk_mgr.h" @@ -61,7 +61,7 @@ static void mmhub_update_page_table_config(struct dcn_hubbub_phys_addr_config *c  } -static int dcn21_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config) +int dcn21_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config)  {  	struct dcn_hubbub_phys_addr_config config; @@ -82,7 +82,7 @@ static int dcn21_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_ph  // work around for Renoir s0i3, if register is programmed, bypass golden init. -static bool dcn21_s0i3_golden_init_wa(struct dc *dc) +bool dcn21_s0i3_golden_init_wa(struct dc *dc)  {  	struct dce_hwseq *hws = dc->hwseq;  	uint32_t value = 0; @@ -112,11 +112,3 @@ void dcn21_optimize_pwr_state(  			true);  } -void dcn21_hw_sequencer_construct(struct dc *dc) -{ -	dcn20_hw_sequencer_construct(dc); -	dc->hwss.init_sys_ctx = dcn21_init_sys_ctx; -	dc->hwss.s0i3_golden_init_wa = dcn21_s0i3_golden_init_wa; -	dc->hwss.optimize_pwr_state = dcn21_optimize_pwr_state; -	dc->hwss.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state; -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h index be67b62e6fb1..182736096123 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h @@ -26,8 +26,22 @@  #ifndef __DC_HWSS_DCN21_H__  #define __DC_HWSS_DCN21_H__ +#include "hw_sequencer_private.h" +  struct dc; -void dcn21_hw_sequencer_construct(struct dc *dc); +int dcn21_init_sys_ctx(struct dce_hwseq *hws, +		struct dc *dc, +		struct dc_phy_addr_space_config *pa_config); + +bool dcn21_s0i3_golden_init_wa(struct dc *dc); + +void dcn21_exit_optimized_pwr_state( +		const struct dc *dc, +		struct dc_state *context); + +void dcn21_optimize_pwr_state( +		const struct dc *dc, +		struct dc_state *context);  #endif /* __DC_HWSS_DCN21_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c new file mode 100644 index 000000000000..4861aa5c59ae --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c @@ -0,0 +1,142 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dce110/dce110_hw_sequencer.h" +#include "dcn10/dcn10_hw_sequencer.h" +#include "dcn20/dcn20_hwseq.h" +#include "dcn21_hwseq.h" + +static const struct hw_sequencer_funcs dcn21_funcs = { +	.program_gamut_remap = dcn10_program_gamut_remap, +	.init_hw = dcn10_init_hw, +	.apply_ctx_to_hw = dce110_apply_ctx_to_hw, +	.apply_ctx_for_surface = NULL, +	.program_front_end_for_ctx = dcn20_program_front_end_for_ctx, +	.update_plane_addr = dcn20_update_plane_addr, +	.update_dchub = dcn10_update_dchub, +	.update_pending_status = dcn10_update_pending_status, +	.program_output_csc = dcn20_program_output_csc, +	.enable_accelerated_mode = dce110_enable_accelerated_mode, +	.enable_timing_synchronization = dcn10_enable_timing_synchronization, +	.enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, +	.update_info_frame = dce110_update_info_frame, +	.send_immediate_sdp_message = dcn10_send_immediate_sdp_message, +	.enable_stream = dcn20_enable_stream, +	.disable_stream = dce110_disable_stream, +	.unblank_stream = dcn20_unblank_stream, +	.blank_stream = dce110_blank_stream, +	.enable_audio_stream = dce110_enable_audio_stream, +	.disable_audio_stream = dce110_disable_audio_stream, +	.disable_plane = dcn20_disable_plane, +	.pipe_control_lock = dcn20_pipe_control_lock, +	.pipe_control_lock_global = dcn20_pipe_control_lock_global, +	.prepare_bandwidth = dcn20_prepare_bandwidth, +	.optimize_bandwidth = dcn20_optimize_bandwidth, +	.update_bandwidth = dcn20_update_bandwidth, +	.set_drr = dcn10_set_drr, +	.get_position = dcn10_get_position, +	.set_static_screen_control = dcn10_set_static_screen_control, +	.setup_stereo = dcn10_setup_stereo, +	.set_avmute = dce110_set_avmute, +	.log_hw_state = dcn10_log_hw_state, +	.get_hw_state = dcn10_get_hw_state, +	.clear_status_bits = dcn10_clear_status_bits, +	.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, +	.edp_power_control = dce110_edp_power_control, +	.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, +	.set_cursor_position = dcn10_set_cursor_position, +	.set_cursor_attribute = dcn10_set_cursor_attribute, +	.set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, +	.setup_periodic_interrupt = dcn10_setup_periodic_interrupt, +	.set_clock = dcn10_set_clock, +	.get_clock = dcn10_get_clock, +	.program_triplebuffer = dcn20_program_triple_buffer, +	.enable_writeback = dcn20_enable_writeback, +	.disable_writeback = dcn20_disable_writeback, +	.dmdata_status_done = dcn20_dmdata_status_done, +	.program_dmdata_engine = dcn20_program_dmdata_engine, +	.set_dmdata_attributes = dcn20_set_dmdata_attributes, +	.init_sys_ctx = dcn21_init_sys_ctx, +	.init_vm_ctx = dcn20_init_vm_ctx, +	.set_flip_control_gsl = dcn20_set_flip_control_gsl, +	.optimize_pwr_state = dcn21_optimize_pwr_state, +	.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, +	.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, +	.set_cursor_position = dcn10_set_cursor_position, +	.set_cursor_attribute = dcn10_set_cursor_attribute, +	.set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, +	.optimize_pwr_state = dcn21_optimize_pwr_state, +	.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, +}; + +static const struct hwseq_private_funcs dcn21_private_funcs = { +	.init_pipes = dcn10_init_pipes, +	.update_plane_addr = dcn20_update_plane_addr, +	.plane_atomic_disconnect = dcn10_plane_atomic_disconnect, +	.update_mpcc = dcn20_update_mpcc, +	.set_input_transfer_func = dcn20_set_input_transfer_func, +	.set_output_transfer_func = dcn20_set_output_transfer_func, +	.power_down = dce110_power_down, +	.enable_display_power_gating = dcn10_dummy_display_power_gating, +	.blank_pixel_data = dcn20_blank_pixel_data, +	.reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap, +	.enable_stream_timing = dcn20_enable_stream_timing, +	.edp_backlight_control = dce110_edp_backlight_control, +	.disable_stream_gating = dcn20_disable_stream_gating, +	.enable_stream_gating = dcn20_enable_stream_gating, +	.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, +	.did_underflow_occur = dcn10_did_underflow_occur, +	.init_blank = dcn20_init_blank, +	.disable_vga = dcn20_disable_vga, +	.bios_golden_init = dcn10_bios_golden_init, +	.plane_atomic_disable = dcn20_plane_atomic_disable, +	.plane_atomic_power_down = dcn10_plane_atomic_power_down, +	.enable_power_gating_plane = dcn20_enable_power_gating_plane, +	.dpp_pg_control = dcn20_dpp_pg_control, +	.hubp_pg_control = dcn20_hubp_pg_control, +	.dsc_pg_control = NULL, +	.update_odm = dcn20_update_odm, +	.dsc_pg_control = dcn20_dsc_pg_control, +	.get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color, +	.get_hdr_visual_confirm_color = dcn10_get_hdr_visual_confirm_color, +	.set_hdr_multiplier = dcn10_set_hdr_multiplier, +	.verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, +	.s0i3_golden_init_wa = dcn21_s0i3_golden_init_wa, +	.wait_for_blank_complete = dcn20_wait_for_blank_complete, +	.dccg_init = dcn20_dccg_init, +	.set_blend_lut = dcn20_set_blend_lut, +	.set_shaper_3dlut = dcn20_set_shaper_3dlut, +}; + +void dcn21_hw_sequencer_construct(struct dc *dc) +{ +	dc->hwss = dcn21_funcs; +	dc->hwseq->funcs = dcn21_private_funcs; + +	if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { +		dc->hwss.init_hw = dcn20_fpga_init_hw; +		dc->hwseq->funcs.init_pipes = NULL; +	} +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h new file mode 100644 index 000000000000..3ed24292648a --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h @@ -0,0 +1,33 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_DCN21_INIT_H__ +#define __DC_DCN21_INIT_H__ + +struct dc; + +void dcn21_hw_sequencer_construct(struct dc *dc); + +#endif /* __DC_DCN20_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c index e8a504ca5890..e45683ac871a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c @@ -323,9 +323,7 @@ void dcn21_link_encoder_disable_output(  static const struct link_encoder_funcs dcn21_link_enc_funcs = { -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	.read_state = link_enc2_read_state, -#endif  	.validate_output_with_stream =  		dcn10_link_encoder_validate_output_with_stream,  	.hw_init = enc2_hw_init, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h index 1d7a1a51f13d..033d5d76f195 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h @@ -33,6 +33,45 @@ struct dcn21_link_encoder {  	struct dpcssys_phy_seq_cfg phy_seq_cfg;  }; +#define DPCS_DCN21_MASK_SH_LIST(mask_sh)\ +	DPCS_DCN2_MASK_SH_LIST(mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_TX_VBOOST_LVL, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_MPLLB_CP_PROP_GS, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_RX_VREF_CTRL, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_MPLLB_CP_INT_GS, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG, RDPCS_DMCU_DPALT_DIS_BLOCK_REG, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL15, RDPCS_PHY_SUP_PRE_HP, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL15, RDPCS_PHY_DP_TX0_VREGDRV_BYP, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL15, RDPCS_PHY_DP_TX1_VREGDRV_BYP, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL15, RDPCS_PHY_DP_TX2_VREGDRV_BYP, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL15, RDPCS_PHY_DP_TX3_VREGDRV_BYP, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_MAIN, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_PRE, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_POST, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_MAIN, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_PRE, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_POST, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_MAIN, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_PRE, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_POST, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_MAIN, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DCO_FINETUNE, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DCO_RANGE, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_PRE, mask_sh),\ +	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_POST, mask_sh),\ +	LE_SF(DCIO_SOFT_RESET, UNIPHYA_SOFT_RESET, mask_sh),\ +	LE_SF(DCIO_SOFT_RESET, UNIPHYB_SOFT_RESET, mask_sh),\ +	LE_SF(DCIO_SOFT_RESET, UNIPHYC_SOFT_RESET, mask_sh),\ +	LE_SF(DCIO_SOFT_RESET, UNIPHYD_SOFT_RESET, mask_sh),\ +	LE_SF(DCIO_SOFT_RESET, UNIPHYE_SOFT_RESET, mask_sh) + +#define DPCS_DCN21_REG_LIST(id) \ +	DPCS_DCN2_REG_LIST(id),\ +	SRI(RDPCSTX_PHY_CNTL15, RDPCSTX, id),\ +	SRI(RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG, RDPCSTX, id) +  #define LINK_ENCODER_MASK_SH_LIST_DCN21(mask_sh)\  	LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh),\  	LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL0_XBAR_SOURCE, mask_sh),\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 459bd9a5caed..1d741bca2211 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -1,5 +1,6 @@  /*  * Copyright 2018 Advanced Micro Devices, Inc. + * Copyright 2019 Raptor Engineering, LLC   *   * Permission is hereby granted, free of charge, to any person obtaining a   * copy of this software and associated documentation files (the "Software"), @@ -23,9 +24,13 @@   *   */ +#include <linux/slab.h> +  #include "dm_services.h"  #include "dc.h" +#include "dcn21_init.h" +  #include "resource.h"  #include "include/irq_service_interface.h"  #include "dcn20/dcn20_resource.h" @@ -58,6 +63,8 @@  #include "dcn20/dcn20_dwb.h"  #include "dcn20/dcn20_mmhubbub.h" +#include "dpcs/dpcs_2_1_0_offset.h" +#include "dpcs/dpcs_2_1_0_sh_mask.h"  #include "renoir_ip_offset.h"  #include "dcn/dcn_2_1_0_offset.h" @@ -76,6 +83,7 @@  #include "dcn21_resource.h"  #include "vm_helper.h"  #include "dcn20/dcn20_vmid.h" +#include "../dce/dmub_psr.h"  #define SOC_BOUNDING_BOX_VALID false  #define DC_LOGGER_INIT(logger) @@ -88,11 +96,7 @@ struct _vcs_dpi_ip_params_st dcn2_1_ip = {  	.gpuvm_max_page_table_levels = 1,  	.hostvm_max_page_table_levels = 4,  	.hostvm_cached_page_table_levels = 2, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	.num_dsc = 3, -#else -	.num_dsc = 0, -#endif  	.rob_buffer_size_kbytes = 168,  	.det_buffer_size_kbytes = 164,  	.dpte_buffer_size_in_pte_reqs_luma = 44, @@ -257,7 +261,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {  	.vmm_page_size_bytes = 4096,  	.dram_clock_change_latency_us = 23.84,  	.return_bus_width_bytes = 64, -	.dispclk_dppclk_vco_speed_mhz = 3550, +	.dispclk_dppclk_vco_speed_mhz = 3600,  	.xfc_bus_transport_time_us = 4,  	.xfc_xbuf_latency_tolerance_us = 4,  	.use_urgent_burst_bw = 1, @@ -350,7 +354,7 @@ static const struct bios_registers bios_regs = {  };  static const struct dce_dmcu_registers dmcu_regs = { -		DMCU_DCN10_REG_LIST() +		DMCU_DCN20_REG_LIST()  };  static const struct dce_dmcu_shift dmcu_shift = { @@ -373,20 +377,6 @@ static const struct dce_abm_mask abm_mask = {  		ABM_MASK_SH_LIST_DCN20(_MASK)  }; -#ifdef CONFIG_DRM_AMD_DC_DMUB -static const struct dcn21_dmcub_registers dmcub_regs = { -		DMCUB_REG_LIST_DCN() -}; - -static const struct dcn21_dmcub_shift dmcub_shift = { -		DMCUB_COMMON_MASK_SH_LIST_BASE(__SHIFT) -}; - -static const struct dcn21_dmcub_mask dmcub_mask = { -		DMCUB_COMMON_MASK_SH_LIST_BASE(_MASK) -}; -#endif -  #define audio_regs(id)\  [id] = {\  		AUD_COMMON_REG_LIST(id)\ @@ -476,15 +466,18 @@ static const struct dcn20_mpc_registers mpc_regs = {  		MPC_OUT_MUX_REG_LIST_DCN2_0(0),  		MPC_OUT_MUX_REG_LIST_DCN2_0(1),  		MPC_OUT_MUX_REG_LIST_DCN2_0(2), -		MPC_OUT_MUX_REG_LIST_DCN2_0(3) +		MPC_OUT_MUX_REG_LIST_DCN2_0(3), +		MPC_DBG_REG_LIST_DCN2_0()  };  static const struct dcn20_mpc_shift mpc_shift = { -	MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) +	MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT), +	MPC_DEBUG_REG_LIST_SH_DCN20  };  static const struct dcn20_mpc_mask mpc_mask = { -	MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK) +	MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK), +	MPC_DEBUG_REG_LIST_MASK_DCN20  };  #define hubp_regs(id)\ @@ -552,7 +545,6 @@ static const struct dcn20_vmid_mask vmid_masks = {  		DCN20_VMID_MASK_SH_LIST(_MASK)  }; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  #define dsc_regsDCN20(id)\  [id] = {\  	DSC_REG_LIST_DCN20(id)\ @@ -574,7 +566,6 @@ static const struct dcn20_dsc_shift dsc_shift = {  static const struct dcn20_dsc_mask dsc_mask = {  	DSC_REG_LIST_SH_MASK_DCN20(_MASK)  }; -#endif  #define ipp_regs(id)\  [id] = {\ @@ -621,6 +612,7 @@ static const struct dce110_aux_registers aux_engine_regs[] = {  #define tf_regs(id)\  [id] = {\  	TF_REG_LIST_DCN20(id),\ +	TF_REG_LIST_DCN20_COMMON_APPEND(id),\  }  static const struct dcn2_dpp_registers tf_regs[] = { @@ -631,11 +623,13 @@ static const struct dcn2_dpp_registers tf_regs[] = {  };  static const struct dcn2_dpp_shift tf_shift = { -		TF_REG_LIST_SH_MASK_DCN20(__SHIFT) +		TF_REG_LIST_SH_MASK_DCN20(__SHIFT), +		TF_DEBUG_REG_LIST_SH_DCN20  };  static const struct dcn2_dpp_mask tf_mask = { -		TF_REG_LIST_SH_MASK_DCN20(_MASK) +		TF_REG_LIST_SH_MASK_DCN20(_MASK), +		TF_DEBUG_REG_LIST_MASK_DCN20  };  #define stream_enc_regs(id)\ @@ -670,7 +664,7 @@ static const struct dcn10_stream_encoder_mask se_mask = {  static void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu);  static int dcn21_populate_dml_pipes_from_context( -		struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes); +		struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes);  static struct input_pixel_processor *dcn21_ipp_create(  	struct dc_context *ctx, uint32_t inst) @@ -771,9 +765,7 @@ static const struct resource_caps res_cap_rn = {  		.num_dwb = 1,  		.num_ddc = 5,  		.num_vmid = 1, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  		.num_dsc = 3, -#endif  };  #ifdef DIAGS_BUILD @@ -798,9 +790,7 @@ static const struct resource_caps res_cap_rn_FPGA_2pipe_dsc = {  		.num_pll = 4,  		.num_dwb = 1,  		.num_ddc = 4, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  		.num_dsc = 2, -#endif  };  #endif @@ -845,6 +835,8 @@ static const struct dc_debug_options debug_defaults_drv = {  		.scl_reset_length10 = true,  		.sanity_checks = true,  		.disable_48mhz_pwrdwn = false, +		.nv12_iflip_vm_wa = true, +		.usbc_combo_phy_reset_wa = true  };  static const struct dc_debug_options debug_defaults_diags = { @@ -867,7 +859,7 @@ enum dcn20_clk_src_array_id {  	DCN20_CLK_SRC_TOTAL_DCN21  }; -static void destruct(struct dcn21_resource_pool *pool) +static void dcn21_resource_destruct(struct dcn21_resource_pool *pool)  {  	unsigned int i; @@ -878,12 +870,10 @@ static void destruct(struct dcn21_resource_pool *pool)  		}  	} -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	for (i = 0; i < pool->base.res_cap->num_dsc; i++) {  		if (pool->base.dscs[i] != NULL)  			dcn20_dsc_destroy(&pool->base.dscs[i]);  	} -#endif  	if (pool->base.mpc != NULL) {  		kfree(TO_DCN20_MPC(pool->base.mpc)); @@ -970,11 +960,6 @@ static void destruct(struct dcn21_resource_pool *pool)  	if (pool->base.dmcu != NULL)  		dce_dmcu_destroy(&pool->base.dmcu); -#ifdef CONFIG_DRM_AMD_DC_DMUB -	if (pool->base.dmcub != NULL) -		dcn21_dmcub_destroy(&pool->base.dmcub); -#endif -  	if (pool->base.dccg != NULL)  		dcn_dccg_destroy(&pool->base.dccg); @@ -1000,31 +985,39 @@ static void calculate_wm_set_for_vlevel(  	pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz;  	dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us; +	dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us; +	dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us;  	wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000;  	wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000;  	wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000;  	wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000;  	wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  	wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000;  	wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000;  	wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000; -#endif  	dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached;  }  static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb)  { -	kernel_fpu_begin(); +	int i; + +	DC_FP_START(); +  	if (dc->bb_overrides.sr_exit_time_ns) { -		bb->sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0; +		for (i = 0; i < WM_SET_COUNT; i++) { +			  dc->clk_mgr->bw_params->wm_table.entries[i].sr_exit_time_us = +					  dc->bb_overrides.sr_exit_time_ns / 1000.0; +		}  	}  	if (dc->bb_overrides.sr_enter_plus_exit_time_ns) { -		bb->sr_enter_plus_exit_time_us = -				dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; +		for (i = 0; i < WM_SET_COUNT; i++) { +			  dc->clk_mgr->bw_params->wm_table.entries[i].sr_enter_plus_exit_time_us = +					  dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; +		}  	}  	if (dc->bb_overrides.urgent_latency_ns) { @@ -1032,10 +1025,13 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s  	}  	if (dc->bb_overrides.dram_clock_change_latency_ns) { -		bb->dram_clock_change_latency_us = +		for (i = 0; i < WM_SET_COUNT; i++) { +			dc->clk_mgr->bw_params->wm_table.entries[i].pstate_latency_us =  				dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; +		}  	} -	kernel_fpu_end(); + +	DC_FP_END();  }  void dcn21_calculate_wm( @@ -1085,10 +1081,10 @@ void dcn21_calculate_wm(  	if (pipe_cnt != pipe_idx) {  		if (dc->res_pool->funcs->populate_dml_pipes)  			pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, -				&context->res_ctx, pipes); +				context, pipes);  		else  			pipe_cnt = dcn21_populate_dml_pipes_from_context(dc, -				&context->res_ctx, pipes); +				context, pipes);  	}  	*out_pipe_cnt = pipe_cnt; @@ -1178,7 +1174,7 @@ static void dcn21_destroy_resource_pool(struct resource_pool **pool)  {  	struct dcn21_resource_pool *dcn21_pool = TO_DCN21_RES_POOL(*pool); -	destruct(dcn21_pool); +	dcn21_resource_destruct(dcn21_pool);  	kfree(dcn21_pool);  	*pool = NULL;  } @@ -1317,7 +1313,6 @@ static void read_dce_straps(  } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  struct display_stream_compressor *dcn21_dsc_create(  	struct dc_context *ctx, uint32_t inst) @@ -1333,16 +1328,9 @@ struct display_stream_compressor *dcn21_dsc_create(  	dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask);  	return &dsc->base;  } -#endif  static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)  { -	/* -	TODO: Fix this function to calcualte correct values. -	There are known issues with this function currently -	that will need to be investigated. Use hardcoded known good values for now. - -  	struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool);  	struct clk_limit_table *clk_table = &bw_params->clk_table;  	int i; @@ -1357,11 +1345,14 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param  		dcn2_1_soc.clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;  		dcn2_1_soc.clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;  		dcn2_1_soc.clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz; -		dcn2_1_soc.clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 16 / 1000; +		dcn2_1_soc.clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;  	} -	dcn2_1_soc.clock_limits[i] = dcn2_1_soc.clock_limits[i - i]; +	dcn2_1_soc.clock_limits[i] = dcn2_1_soc.clock_limits[i - 1];  	dcn2_1_soc.num_states = i; -	*/ + +	// diags does not retrieve proper values from SMU, do not update DML instance for diags +	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && !IS_DIAG_DC(dc->ctx->dce_environment)) +		dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21);  }  /* Temporary Place holder until we can get them from fuse */ @@ -1515,8 +1506,9 @@ static const struct encoder_feature_support link_enc_feature = {  #define link_regs(id, phyid)\  [id] = {\ -	LE_DCN10_REG_LIST(id), \ +	LE_DCN2_REG_LIST(id), \  	UNIPHY_DCN2_REG_LIST(phyid), \ +	DPCS_DCN21_REG_LIST(id), \  	SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \  } @@ -1555,11 +1547,13 @@ static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {  };  static const struct dcn10_link_enc_shift le_shift = { -	LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT) +	LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT),\ +	DPCS_DCN21_MASK_SH_LIST(__SHIFT)  };  static const struct dcn10_link_enc_mask le_mask = { -	LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK) +	LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK),\ +	DPCS_DCN21_MASK_SH_LIST(_MASK)  };  static int map_transmitter_id_to_phy_instance( @@ -1625,10 +1619,11 @@ static uint32_t read_pipe_fuses(struct dc_context *ctx)  }  static int dcn21_populate_dml_pipes_from_context( -		struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes) +		struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes)  { -	uint32_t pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, res_ctx, pipes); +	uint32_t pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes);  	int i; +	struct resource_context *res_ctx = &context->res_ctx;  	for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -1657,7 +1652,7 @@ static struct resource_funcs dcn21_res_pool_funcs = {  	.update_bw_bounding_box = update_bw_bounding_box  }; -static bool construct( +static bool dcn21_resource_construct(  	uint8_t num_virtual_links,  	struct dc *dc,  	struct dcn21_resource_pool *pool) @@ -1697,6 +1692,7 @@ static bool construct(  	dc->caps.post_blend_color_processing = true;  	dc->caps.force_dp_tps4_for_cp2520 = true;  	dc->caps.extended_aux_timeout_support = true; +	dc->caps.dmcub_support = true;  	if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)  		dc->debug = debug_defaults_drv; @@ -1746,7 +1742,7 @@ static bool construct(  		goto create_fail;  	} -	pool->base.dmcu = dcn20_dmcu_create(ctx, +	pool->base.dmcu = dcn21_dmcu_create(ctx,  			&dmcu_regs,  			&dmcu_shift,  			&dmcu_mask); @@ -1756,6 +1752,10 @@ static bool construct(  		goto create_fail;  	} +	// Leave as NULL to not affect current dmcu psr programming sequence +	// Will be uncommented when functionality is confirmed to be working +	pool->base.psr = NULL; +  	pool->base.abm = dce_abm_create(ctx,  			&abm_regs,  			&abm_shift, @@ -1766,18 +1766,6 @@ static bool construct(  		goto create_fail;  	} -#ifdef CONFIG_DRM_AMD_DC_DMUB -	pool->base.dmcub = dcn21_dmcub_create(ctx, -			&dmcub_regs, -			&dmcub_shift, -			&dmcub_mask); -	if (pool->base.dmcub == NULL) { -		dm_error("DC: failed to create dmcub!\n"); -		BREAK_TO_DEBUGGER(); -		goto create_fail; -	} -#endif -  	pool->base.pp_smu = dcn21_pp_smu_create(ctx);  	num_pipes = dcn2_1_ip.max_num_dpp; @@ -1804,41 +1792,41 @@ static bool construct(  		if ((pipe_fuses & (1 << i)) != 0)  			continue; -		pool->base.hubps[i] = dcn21_hubp_create(ctx, i); -		if (pool->base.hubps[i] == NULL) { +		pool->base.hubps[j] = dcn21_hubp_create(ctx, i); +		if (pool->base.hubps[j] == NULL) {  			BREAK_TO_DEBUGGER();  			dm_error(  				"DC: failed to create memory input!\n");  			goto create_fail;  		} -		pool->base.ipps[i] = dcn21_ipp_create(ctx, i); -		if (pool->base.ipps[i] == NULL) { +		pool->base.ipps[j] = dcn21_ipp_create(ctx, i); +		if (pool->base.ipps[j] == NULL) {  			BREAK_TO_DEBUGGER();  			dm_error(  				"DC: failed to create input pixel processor!\n");  			goto create_fail;  		} -		pool->base.dpps[i] = dcn21_dpp_create(ctx, i); -		if (pool->base.dpps[i] == NULL) { +		pool->base.dpps[j] = dcn21_dpp_create(ctx, i); +		if (pool->base.dpps[j] == NULL) {  			BREAK_TO_DEBUGGER();  			dm_error(  				"DC: failed to create dpps!\n");  			goto create_fail;  		} -		pool->base.opps[i] = dcn21_opp_create(ctx, i); -		if (pool->base.opps[i] == NULL) { +		pool->base.opps[j] = dcn21_opp_create(ctx, i); +		if (pool->base.opps[j] == NULL) {  			BREAK_TO_DEBUGGER();  			dm_error(  				"DC: failed to create output pixel processor!\n");  			goto create_fail;  		} -		pool->base.timing_generators[i] = dcn21_timing_generator_create( +		pool->base.timing_generators[j] = dcn21_timing_generator_create(  				ctx, i); -		if (pool->base.timing_generators[i] == NULL) { +		if (pool->base.timing_generators[j] == NULL) {  			BREAK_TO_DEBUGGER();  			dm_error("DC: failed to create tg!\n");  			goto create_fail; @@ -1882,7 +1870,6 @@ static bool construct(  		goto create_fail;  	} -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	for (i = 0; i < pool->base.res_cap->num_dsc; i++) {  		pool->base.dscs[i] = dcn21_dsc_create(ctx, i);  		if (pool->base.dscs[i] == NULL) { @@ -1891,7 +1878,6 @@ static bool construct(  			goto create_fail;  		}  	} -#endif  	if (!dcn20_dwbc_create(ctx, &pool->base)) {  		BREAK_TO_DEBUGGER(); @@ -1922,7 +1908,7 @@ static bool construct(  create_fail: -	destruct(pool); +	dcn21_resource_destruct(pool);  	return false;  } @@ -1937,7 +1923,7 @@ struct resource_pool *dcn21_create_resource_pool(  	if (!pool)  		return NULL; -	if (construct(init_data->num_virtual_links, dc, pool)) +	if (dcn21_resource_construct(init_data->num_virtual_links, dc, pool))  		return &pool->base;  	BREAK_TO_DEBUGGER(); diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h index 94b75e942607..8bde1d688f2e 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h +++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h @@ -118,13 +118,11 @@ bool dm_helpers_submit_i2c(  		const struct dc_link *link,  		struct i2c_command *cmd); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  bool dm_helpers_dp_write_dsc_enable(  		struct dc_context *ctx,  		const struct dc_stream_state *stream,  		bool enable  ); -#endif  bool dm_helpers_is_dp_sink_present(  		struct dc_link *link); diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h index ef7df9ef6d7e..ae608c329366 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h +++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h @@ -41,12 +41,8 @@ enum pp_smu_ver {  	 */  	PP_SMU_UNSUPPORTED,  	PP_SMU_VER_RV, -#ifndef CONFIG_TRIM_DRM_AMD_DC_DCN2_0  	PP_SMU_VER_NV, -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  	PP_SMU_VER_RN, -#endif  	PP_SMU_VER_MAX  }; @@ -143,7 +139,6 @@ struct pp_smu_funcs_rv {  	void (*set_pme_wa_enable)(struct pp_smu *pp);  }; -#ifndef CONFIG_TRIM_DRM_AMD_DC_DCN2_0  /* Used by pp_smu_funcs_nv.set_voltage_by_freq   *   */ @@ -247,7 +242,6 @@ struct pp_smu_funcs_nv {  	enum pp_smu_status (*set_pstate_handshake_support)(struct pp_smu *pp,  			BOOLEAN pstate_handshake_supported);  }; -#endif  #define PP_SMU_NUM_SOCCLK_DPM_LEVELS  8  #define PP_SMU_NUM_DCFCLK_DPM_LEVELS  8 @@ -291,12 +285,8 @@ struct pp_smu_funcs {  	struct pp_smu ctx;  	union {  		struct pp_smu_funcs_rv rv_funcs; -#ifndef CONFIG_TRIM_DRM_AMD_DC_DCN2_0  		struct pp_smu_funcs_nv nv_funcs; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  		struct pp_smu_funcs_rn rn_funcs; -#endif  	};  }; diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h index 1a0429744630..968ff1fef486 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_services.h +++ b/drivers/gpu/drm/amd/display/dc/dm_services.h @@ -40,6 +40,9 @@  #undef DEPRECATED +struct dmub_srv; +struct dc_dmub_srv; +  irq_handler_idx dm_register_interrupt(  	struct dc_context *ctx,  	struct dc_interrupt_params *int_params, @@ -139,6 +142,13 @@ uint32_t generic_reg_update_ex(const struct dc_context *ctx,  		uint32_t addr, int n,  		uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...); +struct dc_dmub_srv *dc_dmub_srv_create(struct dc *dc, struct dmub_srv *dmub); +void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv); + +void reg_sequence_start_gather(const struct dc_context *ctx); +void reg_sequence_start_execute(const struct dc_context *ctx); +void reg_sequence_wait_done(const struct dc_context *ctx); +  #define FD(reg_field)	reg_field ## __SHIFT, \  						reg_field ## _MASK diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h index a3d1be20dd9d..b52ba6ffabe1 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h +++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h @@ -220,6 +220,7 @@ struct dm_bl_data_point {  };  /* Total size of the structure should not exceed 256 bytes */ +#define BL_DATA_POINTS 99  struct dm_acpi_atif_backlight_caps {  	uint16_t size; /* Bytes 0-1 (2 bytes) */  	uint16_t flags; /* Byted 2-3 (2 bytes) */ @@ -229,7 +230,7 @@ struct dm_acpi_atif_backlight_caps {  	uint8_t  min_input_signal; /* Byte 7 */  	uint8_t  max_input_signal; /* Byte 8 */  	uint8_t  num_data_points; /* Byte 9 */ -	struct dm_bl_data_point data_points[99]; /* Bytes 10-207 (198 bytes)*/ +	struct dm_bl_data_point data_points[BL_DATA_POINTS]; /* Bytes 10-207 (198 bytes)*/  };  enum dm_acpi_display_type { diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index 8df251626e22..7ee8b8460a9b 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -1,5 +1,6 @@  #  # Copyright 2017 Advanced Micro Devices, Inc. +# Copyright 2019 Raptor Engineering, LLC  #  # Permission is hereby granted, free of charge, to any person obtaining a  # copy of this software and associated documentation files (the "Software"), @@ -24,7 +25,13 @@  # It provides the general basic services required by other DAL  # subcomponents. +ifdef CONFIG_X86  dml_ccflags := -mhard-float -msse +endif + +ifdef CONFIG_PPC64 +dml_ccflags := -mhard-float -maltivec +endif  ifdef CONFIG_CC_IS_GCC  ifeq ($(call cc-ifversion, -lt, 0701, y), y) @@ -32,6 +39,7 @@ IS_OLD_GCC = 1  endif  endif +ifdef CONFIG_X86  ifdef IS_OLD_GCC  # Stack alignment mismatch, proceed with caution.  # GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 @@ -40,17 +48,16 @@ dml_ccflags += -mpreferred-stack-boundary=4  else  dml_ccflags += -msse2  endif +endif  CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags) -ifdef CONFIG_DRM_AMD_DC_DCN2_0 +ifdef CONFIG_DRM_AMD_DC_DCN  CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags)  CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)  CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags)  CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags)  CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags) -endif -ifdef CONFIG_DRM_AMD_DC_DCN2_1  CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags)  CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags)  endif @@ -61,11 +68,9 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dml_common_defs.o := $(dml_ccflags)  DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \  	dml_common_defs.o -ifdef CONFIG_DRM_AMD_DC_DCN2_0 +ifdef CONFIG_DRM_AMD_DC_DCN  DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_20.o  DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o -endif -ifdef CONFIG_DRM_AMD_DC_DCN2_1  DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o  endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c index 6c6c486b774a..e7a8ac7a1f22 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c @@ -937,7 +937,7 @@ static unsigned int CalculateVMAndRowBytes(  		*MetaRowByte = 0;  	} -	if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_lvp) { +	if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_l_vp) {  		MacroTileSizeBytes = 256;  		MacroTileHeight = BlockHeight256Bytes;  	} else if (SurfaceTiling == dm_sw_4kb_s || SurfaceTiling == dm_sw_4kb_s_x @@ -1335,11 +1335,11 @@ static void dml20_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPer  		else  			mode_lib->vba.SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportHeight[k]; -		if (mode_lib->vba.ODMCombineEnabled[k] == true) +		if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)  			MainPlaneDoesODMCombine = true;  		for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)  			if (mode_lib->vba.BlendingAndTiming[k] == j -					&& mode_lib->vba.ODMCombineEnabled[j] == true) +					&& mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)  				MainPlaneDoesODMCombine = true;  		if (MainPlaneDoesODMCombine == true) @@ -2577,7 +2577,8 @@ static void dml20_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPer  			mode_lib->vba.MinActiveDRAMClockChangeMargin  					+ mode_lib->vba.DRAMClockChangeLatency; -	if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 50) { +	if (mode_lib->vba.DRAMClockChangeSupportsVActive && +			mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) {  		mode_lib->vba.DRAMClockChangeWatermark += 25;  		mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;  	} else { @@ -2847,12 +2848,12 @@ static void dml20_DisplayPipeConfiguration(struct display_mode_lib *mode_lib)  			SwathWidth = mode_lib->vba.ViewportHeight[k];  		} -		if (mode_lib->vba.ODMCombineEnabled[k] == true) { +		if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {  			MainPlaneDoesODMCombine = true;  		}  		for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {  			if (mode_lib->vba.BlendingAndTiming[k] == j -					&& mode_lib->vba.ODMCombineEnabled[j] == true) { +					&& mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {  				MainPlaneDoesODMCombine = true;  			}  		} @@ -3347,7 +3348,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  										== dm_420_10))  				|| (((mode_lib->vba.SurfaceTiling[k] == dm_sw_gfx7_2d_thin_gl  						|| mode_lib->vba.SurfaceTiling[k] -								== dm_sw_gfx7_2d_thin_lvp) +								== dm_sw_gfx7_2d_thin_l_vp)  						&& !((mode_lib->vba.SourcePixelFormat[k]  								== dm_444_64  								|| mode_lib->vba.SourcePixelFormat[k] @@ -3445,10 +3446,10 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  				locals->FabricAndDRAMBandwidthPerState[i] * 1000)  				* locals->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly / 100; -		locals->ReturnBWPerState[i] = locals->ReturnBWToDCNPerState; +		locals->ReturnBWPerState[i][0] = locals->ReturnBWToDCNPerState;  		if (locals->DCCEnabledInAnyPlane == true && locals->ReturnBWToDCNPerState > locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) { -			locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i], +			locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],  					locals->ReturnBWToDCNPerState * 4 * (1 - locals->UrgentLatency /  					((locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024  					/ (locals->ReturnBWToDCNPerState - locals->DCFCLKPerState[i] @@ -3459,7 +3460,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  				+ (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024);  		if (locals->DCCEnabledInAnyPlane && locals->CriticalPoint > 1 && locals->CriticalPoint < 4) { -			locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i], +			locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],  				4 * locals->ReturnBWToDCNPerState *  				(locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024  				* locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency / @@ -3471,7 +3472,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  				locals->DCFCLKPerState[i], locals->FabricAndDRAMBandwidthPerState[i] * 1000);  		if (locals->DCCEnabledInAnyPlane == true && locals->ReturnBWToDCNPerState > locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) { -			locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i], +			locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],  					locals->ReturnBWToDCNPerState * 4 * (1 - locals->UrgentLatency /  					((locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024  					/ (locals->ReturnBWToDCNPerState - locals->DCFCLKPerState[i] @@ -3482,7 +3483,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  				+ (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024);  		if (locals->DCCEnabledInAnyPlane && locals->CriticalPoint > 1 && locals->CriticalPoint < 4) { -			locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i], +			locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],  				4 * locals->ReturnBWToDCNPerState *  				(locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024  				* locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency / @@ -3520,12 +3521,12 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  	for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {  		locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i] =  				(mode_lib->vba.RoundTripPingLatencyCycles + 32.0) / mode_lib->vba.DCFCLKPerState[i] -				+ locals->UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i]; -		if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i] +				+ locals->UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i][0]; +		if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i][0]  				> locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i]) { -			locals->ROBSupport[i] = true; +			locals->ROBSupport[i][0] = true;  		} else { -			locals->ROBSupport[i] = false; +			locals->ROBSupport[i][0] = false;  		}  	}  	/*Writeback Mode Support Check*/ @@ -3902,7 +3903,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  				}  				if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity  						&& locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k] -						&& locals->ODMCombineEnablePerState[i][k] == false) { +						&& locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {  					locals->NoOfDPP[i][j][k] = 1;  					locals->RequiredDPPCLK[i][j][k] =  						locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); @@ -3991,16 +3992,16 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  	/*Viewport Size Check*/  	for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { -		locals->ViewportSizeSupport[i] = true; +		locals->ViewportSizeSupport[i][0] = true;  		for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { -			if (locals->ODMCombineEnablePerState[i][k] == true) { +			if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {  				if (dml_min(locals->SwathWidthYSingleDPP[k], dml_round(mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]))  						> locals->MaximumSwathWidth[k]) { -					locals->ViewportSizeSupport[i] = false; +					locals->ViewportSizeSupport[i][0] = false;  				}  			} else {  				if (locals->SwathWidthYSingleDPP[k] / 2.0 > locals->MaximumSwathWidth[k]) { -					locals->ViewportSizeSupport[i] = false; +					locals->ViewportSizeSupport[i][0] = false;  				}  			}  		} @@ -4182,8 +4183,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  						mode_lib->vba.DSCFormatFactor = 1;  					}  					if (locals->RequiresDSC[i][k] == true) { -						if (locals->ODMCombineEnablePerState[i][k] -								== true) { +						if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {  							if (mode_lib->vba.PixelClockBackEnd[k] / 6.0 / mode_lib->vba.DSCFormatFactor  									> (1.0 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * mode_lib->vba.MaxDSCCLK[i]) {  								locals->DSCCLKRequiredMoreThanSupported[i] = @@ -4206,7 +4206,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  		mode_lib->vba.TotalDSCUnitsRequired = 0.0;  		for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {  			if (locals->RequiresDSC[i][k] == true) { -				if (locals->ODMCombineEnablePerState[i][k] == true) { +				if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {  					mode_lib->vba.TotalDSCUnitsRequired =  							mode_lib->vba.TotalDSCUnitsRequired + 2.0;  				} else { @@ -4248,7 +4248,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  				mode_lib->vba.bpp = locals->OutputBppPerState[i][k];  			}  			if (locals->RequiresDSC[i][k] == true && mode_lib->vba.bpp != 0.0) { -				if (locals->ODMCombineEnablePerState[i][k] == false) { +				if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {  					locals->DSCDelayPerState[i][k] =  							dscceComputeDelay(  									mode_lib->vba.DSCInputBitPerComponent[k], @@ -4291,7 +4291,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  	for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {  		for (j = 0; j < 2; j++) {  			for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { -				if (locals->ODMCombineEnablePerState[i][k] == true) +				if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1)  					locals->SwathWidthYPerState[i][j][k] = dml_min(locals->SwathWidthYSingleDPP[k], dml_round(locals->HActive[k] / 2 * locals->HRatio[k]));  				else  					locals->SwathWidthYPerState[i][j][k] = locals->SwathWidthYSingleDPP[k] / locals->NoOfDPP[i][j][k]; @@ -4344,28 +4344,28 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  				locals->EffectiveDETLBLinesLuma = dml_floor(locals->LinesInDETLuma +  dml_min(  						locals->LinesInDETLuma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETY[k] * -						locals->PSCL_FACTOR[k] / locals->ReturnBWPerState[i], +						locals->PSCL_FACTOR[k] / locals->ReturnBWPerState[i][0],  						locals->EffectiveLBLatencyHidingSourceLinesLuma),  						locals->SwathHeightYPerState[i][j][k]);  				locals->EffectiveDETLBLinesChroma = dml_floor(locals->LinesInDETChroma + dml_min(  						locals->LinesInDETChroma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETC[k] * -						locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i], +						locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i][0],  						locals->EffectiveLBLatencyHidingSourceLinesChroma),  						locals->SwathHeightCPerState[i][j][k]);  				if (locals->BytePerPixelInDETC[k] == 0) {  					locals->UrgentLatencySupportUsPerState[i][j][k] = locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k])  							/ locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] * -								dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k]); +								dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]);  				} else {  					locals->UrgentLatencySupportUsPerState[i][j][k] = dml_min(  						locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k])  						/ locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] * -						dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k]), +						dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]),  							locals->EffectiveDETLBLinesChroma * (locals->HTotal[k] / locals->PixelClock[k]) / (locals->VRatio[k] / 2) -  							locals->EffectiveDETLBLinesChroma * locals->SwathWidthYPerState[i][j][k] / 2 * -							dml_ceil(locals->BytePerPixelInDETC[k], 2) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k])); +							dml_ceil(locals->BytePerPixelInDETC[k], 2) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]));  				}  			}  		} @@ -4405,14 +4405,14 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  				locals->SwathHeightYThisState[k] = locals->SwathHeightYPerState[i][j][k];  				locals->SwathHeightCThisState[k] = locals->SwathHeightCPerState[i][j][k];  				locals->SwathWidthYThisState[k] = locals->SwathWidthYPerState[i][j][k]; -				mode_lib->vba.ProjectedDCFCLKDeepSleep = dml_max( -						mode_lib->vba.ProjectedDCFCLKDeepSleep, +				mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( +						mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],  						mode_lib->vba.PixelClock[k] / 16.0);  				if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {  					if (mode_lib->vba.VRatio[k] <= 1.0) { -						mode_lib->vba.ProjectedDCFCLKDeepSleep = +						mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =  								dml_max( -										mode_lib->vba.ProjectedDCFCLKDeepSleep, +										mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],  										1.1  												* dml_ceil(  														mode_lib->vba.BytePerPixelInDETY[k], @@ -4422,9 +4422,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  												* mode_lib->vba.PixelClock[k]  												/ mode_lib->vba.NoOfDPP[i][j][k]);  					} else { -						mode_lib->vba.ProjectedDCFCLKDeepSleep = +						mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =  								dml_max( -										mode_lib->vba.ProjectedDCFCLKDeepSleep, +										mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],  										1.1  												* dml_ceil(  														mode_lib->vba.BytePerPixelInDETY[k], @@ -4435,9 +4435,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  					}  				} else {  					if (mode_lib->vba.VRatio[k] <= 1.0) { -						mode_lib->vba.ProjectedDCFCLKDeepSleep = +						mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =  								dml_max( -										mode_lib->vba.ProjectedDCFCLKDeepSleep, +										mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],  										1.1  												* dml_ceil(  														mode_lib->vba.BytePerPixelInDETY[k], @@ -4447,9 +4447,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  												* mode_lib->vba.PixelClock[k]  												/ mode_lib->vba.NoOfDPP[i][j][k]);  					} else { -						mode_lib->vba.ProjectedDCFCLKDeepSleep = +						mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =  								dml_max( -										mode_lib->vba.ProjectedDCFCLKDeepSleep, +										mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],  										1.1  												* dml_ceil(  														mode_lib->vba.BytePerPixelInDETY[k], @@ -4459,9 +4459,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  												* mode_lib->vba.RequiredDPPCLK[i][j][k]);  					}  					if (mode_lib->vba.VRatio[k] / 2.0 <= 1.0) { -						mode_lib->vba.ProjectedDCFCLKDeepSleep = +						mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =  								dml_max( -										mode_lib->vba.ProjectedDCFCLKDeepSleep, +										mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],  										1.1  												* dml_ceil(  														mode_lib->vba.BytePerPixelInDETC[k], @@ -4472,9 +4472,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  												* mode_lib->vba.PixelClock[k]  												/ mode_lib->vba.NoOfDPP[i][j][k]);  					} else { -						mode_lib->vba.ProjectedDCFCLKDeepSleep = +						mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =  								dml_max( -										mode_lib->vba.ProjectedDCFCLKDeepSleep, +										mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],  										1.1  												* dml_ceil(  														mode_lib->vba.BytePerPixelInDETC[k], @@ -4510,7 +4510,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  						&mode_lib->vba.PTEBufferSizeNotExceededY[i][j][k],  						&mode_lib->vba.dpte_row_height[k],  						&mode_lib->vba.meta_row_height[k]); -				mode_lib->vba.PrefetchLinesY[k] = CalculatePrefetchSourceLines( +				mode_lib->vba.PrefetchLinesY[0][0][k] = CalculatePrefetchSourceLines(  						mode_lib,  						mode_lib->vba.VRatio[k],  						mode_lib->vba.vtaps[k], @@ -4549,7 +4549,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  							&mode_lib->vba.PTEBufferSizeNotExceededC[i][j][k],  							&mode_lib->vba.dpte_row_height_chroma[k],  							&mode_lib->vba.meta_row_height_chroma[k]); -					mode_lib->vba.PrefetchLinesC[k] = CalculatePrefetchSourceLines( +					mode_lib->vba.PrefetchLinesC[0][0][k] = CalculatePrefetchSourceLines(  							mode_lib,  							mode_lib->vba.VRatio[k] / 2.0,  							mode_lib->vba.VTAPsChroma[k], @@ -4563,14 +4563,14 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  					mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = 0.0;  					mode_lib->vba.MetaRowBytesC = 0.0;  					mode_lib->vba.DPTEBytesPerRowC = 0.0; -					locals->PrefetchLinesC[k] = 0.0; +					locals->PrefetchLinesC[0][0][k] = 0.0;  					locals->PTEBufferSizeNotExceededC[i][j][k] = true;  					locals->PTEBufferSizeInRequestsForLuma = mode_lib->vba.PTEBufferSizeInRequestsLuma + mode_lib->vba.PTEBufferSizeInRequestsChroma;  				} -				locals->PDEAndMetaPTEBytesPerFrame[k] = +				locals->PDEAndMetaPTEBytesPerFrame[0][0][k] =  						mode_lib->vba.PDEAndMetaPTEBytesPerFrameY + mode_lib->vba.PDEAndMetaPTEBytesPerFrameC; -				locals->MetaRowBytes[k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC; -				locals->DPTEBytesPerRow[k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC; +				locals->MetaRowBytes[0][0][k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC; +				locals->DPTEBytesPerRow[0][0][k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC;  				CalculateActiveRowBandwidth(  						mode_lib->vba.GPUVMEnable, @@ -4597,14 +4597,14 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  									+ mode_lib->vba.TotalNumberOfDCCActiveDPP[i][j]  											* mode_lib->vba.MetaChunkSize)  									* 1024.0 -									/ mode_lib->vba.ReturnBWPerState[i]; +									/ mode_lib->vba.ReturnBWPerState[i][0];  			if (mode_lib->vba.GPUVMEnable == true) {  				mode_lib->vba.ExtraLatency = mode_lib->vba.ExtraLatency  						+ mode_lib->vba.TotalNumberOfActiveDPP[i][j]  								* mode_lib->vba.PTEGroupSize -								/ mode_lib->vba.ReturnBWPerState[i]; +								/ mode_lib->vba.ReturnBWPerState[i][0];  			} -			mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep; +			mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0];  			for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {  				if (mode_lib->vba.BlendingAndTiming[k] == k) { @@ -4654,7 +4654,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  			}  			for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { -				locals->MaximumVStartup[k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k] +				locals->MaximumVStartup[0][0][k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]  					- dml_max(1.0, dml_ceil(locals->WritebackDelay[i][k] / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1.0));  			} @@ -4699,7 +4699,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  									mode_lib->vba.RequiredDPPCLK[i][j][k],  									mode_lib->vba.RequiredDISPCLK[i][j],  									mode_lib->vba.PixelClock[k], -									mode_lib->vba.ProjectedDCFCLKDeepSleep, +									mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],  									mode_lib->vba.DSCDelayPerState[i][k],  									mode_lib->vba.NoOfDPP[i][j][k],  									mode_lib->vba.ScalerEnabled[k], @@ -4717,7 +4717,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  											- mode_lib->vba.VActive[k],  									mode_lib->vba.HTotal[k],  									mode_lib->vba.MaxInterDCNTileRepeaters, -									mode_lib->vba.MaximumVStartup[k], +									mode_lib->vba.MaximumVStartup[0][0][k],  									mode_lib->vba.GPUVMMaxPageTableLevels,  									mode_lib->vba.GPUVMEnable,  									mode_lib->vba.DynamicMetadataEnable[k], @@ -4727,15 +4727,15 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  									mode_lib->vba.UrgentLatencyPixelDataOnly,  									mode_lib->vba.ExtraLatency,  									mode_lib->vba.TimeCalc, -									mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k], -									mode_lib->vba.MetaRowBytes[k], -									mode_lib->vba.DPTEBytesPerRow[k], -									mode_lib->vba.PrefetchLinesY[k], +									mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k], +									mode_lib->vba.MetaRowBytes[0][0][k], +									mode_lib->vba.DPTEBytesPerRow[0][0][k], +									mode_lib->vba.PrefetchLinesY[0][0][k],  									mode_lib->vba.SwathWidthYPerState[i][j][k],  									mode_lib->vba.BytePerPixelInDETY[k],  									mode_lib->vba.PrefillY[k],  									mode_lib->vba.MaxNumSwY[k], -									mode_lib->vba.PrefetchLinesC[k], +									mode_lib->vba.PrefetchLinesC[0][0][k],  									mode_lib->vba.BytePerPixelInDETC[k],  									mode_lib->vba.PrefillC[k],  									mode_lib->vba.MaxNumSwC[k], @@ -4766,19 +4766,19 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  				locals->prefetch_vm_bw_valid = true;  				locals->prefetch_row_bw_valid = true;  				for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { -					if (locals->PDEAndMetaPTEBytesPerFrame[k] == 0) +					if (locals->PDEAndMetaPTEBytesPerFrame[0][0][k] == 0)  						locals->prefetch_vm_bw[k] = 0;  					else if (locals->LinesForMetaPTE[k] > 0) -						locals->prefetch_vm_bw[k] = locals->PDEAndMetaPTEBytesPerFrame[k] +						locals->prefetch_vm_bw[k] = locals->PDEAndMetaPTEBytesPerFrame[0][0][k]  							/ (locals->LinesForMetaPTE[k] * locals->HTotal[k] / locals->PixelClock[k]);  					else {  						locals->prefetch_vm_bw[k] = 0;  						locals->prefetch_vm_bw_valid = false;  					} -					if (locals->MetaRowBytes[k] + locals->DPTEBytesPerRow[k] == 0) +					if (locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k] == 0)  						locals->prefetch_row_bw[k] = 0;  					else if (locals->LinesForMetaAndDPTERow[k] > 0) -						locals->prefetch_row_bw[k] = (locals->MetaRowBytes[k] + locals->DPTEBytesPerRow[k]) +						locals->prefetch_row_bw[k] = (locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k])  							/ (locals->LinesForMetaAndDPTERow[k] * locals->HTotal[k] / locals->PixelClock[k]);  					else {  						locals->prefetch_row_bw[k] = 0; @@ -4797,13 +4797,13 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  											mode_lib->vba.RequiredPrefetchPixelDataBWLuma[i][j][k])  											+ mode_lib->vba.meta_row_bw[k] + mode_lib->vba.dpte_row_bw[k]);  				} -				locals->BandwidthWithoutPrefetchSupported[i] = true; -				if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i]) { -					locals->BandwidthWithoutPrefetchSupported[i] = false; +				locals->BandwidthWithoutPrefetchSupported[i][0] = true; +				if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i][0]) { +					locals->BandwidthWithoutPrefetchSupported[i][0] = false;  				}  				locals->PrefetchSupported[i][j] = true; -				if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i]) { +				if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i][0]) {  					locals->PrefetchSupported[i][j] = false;  				}  				for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { @@ -4828,7 +4828,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  			if (mode_lib->vba.PrefetchSupported[i][j] == true  					&& mode_lib->vba.VRatioInPrefetchSupported[i][j] == true) {  				mode_lib->vba.BandwidthAvailableForImmediateFlip = -						mode_lib->vba.ReturnBWPerState[i]; +						mode_lib->vba.ReturnBWPerState[i][0];  				for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {  					mode_lib->vba.BandwidthAvailableForImmediateFlip =  							mode_lib->vba.BandwidthAvailableForImmediateFlip @@ -4842,9 +4842,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  					if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8  							&& mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {  						mode_lib->vba.ImmediateFlipBytes[k] = -								mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k] -										+ mode_lib->vba.MetaRowBytes[k] -										+ mode_lib->vba.DPTEBytesPerRow[k]; +								mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k] +										+ mode_lib->vba.MetaRowBytes[0][0][k] +										+ mode_lib->vba.DPTEBytesPerRow[0][0][k];  					}  				}  				mode_lib->vba.TotImmediateFlipBytes = 0.0; @@ -4872,9 +4872,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  									/ mode_lib->vba.PixelClock[k],  							mode_lib->vba.VRatio[k],  							mode_lib->vba.Tno_bw[k], -							mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k], -							mode_lib->vba.MetaRowBytes[k], -							mode_lib->vba.DPTEBytesPerRow[k], +							mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k], +							mode_lib->vba.MetaRowBytes[0][0][k], +							mode_lib->vba.DPTEBytesPerRow[0][0][k],  							mode_lib->vba.DCCEnable[k],  							mode_lib->vba.dpte_row_height[k],  							mode_lib->vba.meta_row_height[k], @@ -4899,7 +4899,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  				}  				mode_lib->vba.ImmediateFlipSupportedForState[i][j] = true;  				if (mode_lib->vba.total_dcn_read_bw_with_flip -						> mode_lib->vba.ReturnBWPerState[i]) { +						> mode_lib->vba.ReturnBWPerState[i][0]) {  					mode_lib->vba.ImmediateFlipSupportedForState[i][j] = false;  				}  				for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { @@ -4918,13 +4918,13 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  	for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; k++)  		mode_lib->vba.MaxTotalVActiveRDBandwidth = mode_lib->vba.MaxTotalVActiveRDBandwidth + mode_lib->vba.ReadBandwidth[k];  	for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { -		mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i] = dml_min(mode_lib->vba.ReturnBusWidth * +		mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][0] = dml_min(mode_lib->vba.ReturnBusWidth *  				mode_lib->vba.DCFCLKPerState[i], mode_lib->vba.FabricAndDRAMBandwidthPerState[i] * 1000) *  				mode_lib->vba.MaxAveragePercentOfIdealDRAMBWDisplayCanUseInNormalSystemOperation / 100; -		if (mode_lib->vba.MaxTotalVActiveRDBandwidth <= mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i]) -			mode_lib->vba.TotalVerticalActiveBandwidthSupport[i] = true; +		if (mode_lib->vba.MaxTotalVActiveRDBandwidth <= mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][0]) +			mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][0] = true;  		else -			mode_lib->vba.TotalVerticalActiveBandwidthSupport[i] = false; +			mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][0] = false;  	}  	/*PTE Buffer Size Check*/ @@ -5012,7 +5012,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  				status = DML_FAIL_SCALE_RATIO_TAP;  			} else if (mode_lib->vba.SourceFormatPixelAndScanSupport != true) {  				status = DML_FAIL_SOURCE_PIXEL_FORMAT; -			} else if (locals->ViewportSizeSupport[i] != true) { +			} else if (locals->ViewportSizeSupport[i][0] != true) {  				status = DML_FAIL_VIEWPORT_SIZE;  			} else if (locals->DIOSupport[i] != true) {  				status = DML_FAIL_DIO_SUPPORT; @@ -5022,7 +5022,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  				status = DML_FAIL_DSC_CLK_REQUIRED;  			} else if (locals->UrgentLatencySupport[i][j] != true) {  				status = DML_FAIL_URGENT_LATENCY; -			} else if (locals->ROBSupport[i] != true) { +			} else if (locals->ROBSupport[i][0] != true) {  				status = DML_FAIL_REORDERING_BUFFER;  			} else if (locals->DISPCLK_DPPCLK_Support[i][j] != true) {  				status = DML_FAIL_DISPCLK_DPPCLK; @@ -5042,7 +5042,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  				status = DML_FAIL_PITCH_SUPPORT;  			} else if (locals->PrefetchSupported[i][j] != true) {  				status = DML_FAIL_PREFETCH_SUPPORT; -			} else if (locals->TotalVerticalActiveBandwidthSupport[i] != true) { +			} else if (locals->TotalVerticalActiveBandwidthSupport[i][0] != true) {  				status = DML_FAIL_TOTAL_V_ACTIVE_BW;  			} else if (locals->VRatioInPrefetchSupported[i][j] != true) {  				status = DML_FAIL_V_RATIO_PREFETCH; @@ -5088,7 +5088,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  	mode_lib->vba.DRAMSpeed = mode_lib->vba.DRAMSpeedPerState[mode_lib->vba.VoltageLevel];  	mode_lib->vba.FabricClock = mode_lib->vba.FabricClockPerState[mode_lib->vba.VoltageLevel];  	mode_lib->vba.SOCCLK = mode_lib->vba.SOCCLKPerState[mode_lib->vba.VoltageLevel]; -	mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel]; +	mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel][0];  	mode_lib->vba.FabricAndDRAMBandwidth = locals->FabricAndDRAMBandwidthPerState[mode_lib->vba.VoltageLevel];  	for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {  		if (mode_lib->vba.BlendingAndTiming[k] == k) { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c index 3c70dd577292..22f3b5a4b3b9 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c @@ -997,7 +997,7 @@ static unsigned int CalculateVMAndRowBytes(  		*MetaRowByte = 0;  	} -	if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_lvp) { +	if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_l_vp) {  		MacroTileSizeBytes = 256;  		MacroTileHeight = BlockHeight256Bytes;  	} else if (SurfaceTiling == dm_sw_4kb_s || SurfaceTiling == dm_sw_4kb_s_x @@ -1395,11 +1395,11 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP  		else  			mode_lib->vba.SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportHeight[k]; -		if (mode_lib->vba.ODMCombineEnabled[k] == true) +		if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)  			MainPlaneDoesODMCombine = true;  		for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)  			if (mode_lib->vba.BlendingAndTiming[k] == j -					&& mode_lib->vba.ODMCombineEnabled[j] == true) +					&& mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)  				MainPlaneDoesODMCombine = true;  		if (MainPlaneDoesODMCombine == true) @@ -2611,9 +2611,13 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP  			mode_lib->vba.MinActiveDRAMClockChangeMargin  					+ mode_lib->vba.DRAMClockChangeLatency; -	if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 50) { +	if (mode_lib->vba.DRAMClockChangeSupportsVActive && +			mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) {  		mode_lib->vba.DRAMClockChangeWatermark += 25;  		mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive; +	} else if (mode_lib->vba.DummyPStateCheck && +			mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) { +		mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;  	} else {  		if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) {  			mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vblank; @@ -2881,12 +2885,12 @@ static void dml20v2_DisplayPipeConfiguration(struct display_mode_lib *mode_lib)  			SwathWidth = mode_lib->vba.ViewportHeight[k];  		} -		if (mode_lib->vba.ODMCombineEnabled[k] == true) { +		if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {  			MainPlaneDoesODMCombine = true;  		}  		for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {  			if (mode_lib->vba.BlendingAndTiming[k] == j -					&& mode_lib->vba.ODMCombineEnabled[j] == true) { +					&& mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {  				MainPlaneDoesODMCombine = true;  			}  		} @@ -3381,7 +3385,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  										== dm_420_10))  				|| (((mode_lib->vba.SurfaceTiling[k] == dm_sw_gfx7_2d_thin_gl  						|| mode_lib->vba.SurfaceTiling[k] -								== dm_sw_gfx7_2d_thin_lvp) +								== dm_sw_gfx7_2d_thin_l_vp)  						&& !((mode_lib->vba.SourcePixelFormat[k]  								== dm_444_64  								|| mode_lib->vba.SourcePixelFormat[k] @@ -3479,10 +3483,10 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  				locals->FabricAndDRAMBandwidthPerState[i] * 1000)  				* locals->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly / 100; -		locals->ReturnBWPerState[i] = locals->ReturnBWToDCNPerState; +		locals->ReturnBWPerState[i][0] = locals->ReturnBWToDCNPerState;  		if (locals->DCCEnabledInAnyPlane == true && locals->ReturnBWToDCNPerState > locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) { -			locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i], +			locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],  					locals->ReturnBWToDCNPerState * 4 * (1 - locals->UrgentLatency /  					((locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024  					/ (locals->ReturnBWToDCNPerState - locals->DCFCLKPerState[i] @@ -3493,7 +3497,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  				+ (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024);  		if (locals->DCCEnabledInAnyPlane && locals->CriticalPoint > 1 && locals->CriticalPoint < 4) { -			locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i], +			locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],  				4 * locals->ReturnBWToDCNPerState *  				(locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024  				* locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency / @@ -3505,7 +3509,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  				locals->DCFCLKPerState[i], locals->FabricAndDRAMBandwidthPerState[i] * 1000);  		if (locals->DCCEnabledInAnyPlane == true && locals->ReturnBWToDCNPerState > locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) { -			locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i], +			locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],  					locals->ReturnBWToDCNPerState * 4 * (1 - locals->UrgentLatency /  					((locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024  					/ (locals->ReturnBWToDCNPerState - locals->DCFCLKPerState[i] @@ -3516,7 +3520,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  				+ (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024);  		if (locals->DCCEnabledInAnyPlane && locals->CriticalPoint > 1 && locals->CriticalPoint < 4) { -			locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i], +			locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],  				4 * locals->ReturnBWToDCNPerState *  				(locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024  				* locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency / @@ -3554,12 +3558,12 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  	for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {  		locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i] =  				(mode_lib->vba.RoundTripPingLatencyCycles + 32.0) / mode_lib->vba.DCFCLKPerState[i] -				+ locals->UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i]; -		if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i] +				+ locals->UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i][0]; +		if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i][0]  				> locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i]) { -			locals->ROBSupport[i] = true; +			locals->ROBSupport[i][0] = true;  		} else { -			locals->ROBSupport[i] = false; +			locals->ROBSupport[i][0] = false;  		}  	}  	/*Writeback Mode Support Check*/ @@ -3942,7 +3946,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  				}  				if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity  						&& locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k] -						&& locals->ODMCombineEnablePerState[i][k] == false) { +						&& locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {  					locals->NoOfDPP[i][j][k] = 1;  					locals->RequiredDPPCLK[i][j][k] =  						locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); @@ -4031,16 +4035,16 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  	/*Viewport Size Check*/  	for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { -		locals->ViewportSizeSupport[i] = true; +		locals->ViewportSizeSupport[i][0] = true;  		for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { -			if (locals->ODMCombineEnablePerState[i][k] == true) { +			if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {  				if (dml_min(locals->SwathWidthYSingleDPP[k], dml_round(mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]))  						> locals->MaximumSwathWidth[k]) { -					locals->ViewportSizeSupport[i] = false; +					locals->ViewportSizeSupport[i][0] = false;  				}  			} else {  				if (locals->SwathWidthYSingleDPP[k] / 2.0 > locals->MaximumSwathWidth[k]) { -					locals->ViewportSizeSupport[i] = false; +					locals->ViewportSizeSupport[i][0] = false;  				}  			}  		} @@ -4222,8 +4226,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  						mode_lib->vba.DSCFormatFactor = 1;  					}  					if (locals->RequiresDSC[i][k] == true) { -						if (locals->ODMCombineEnablePerState[i][k] -								== true) { +						if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {  							if (mode_lib->vba.PixelClockBackEnd[k] / 6.0 / mode_lib->vba.DSCFormatFactor  									> (1.0 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * mode_lib->vba.MaxDSCCLK[i]) {  								locals->DSCCLKRequiredMoreThanSupported[i] = @@ -4246,7 +4249,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  		mode_lib->vba.TotalDSCUnitsRequired = 0.0;  		for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {  			if (locals->RequiresDSC[i][k] == true) { -				if (locals->ODMCombineEnablePerState[i][k] == true) { +				if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {  					mode_lib->vba.TotalDSCUnitsRequired =  							mode_lib->vba.TotalDSCUnitsRequired + 2.0;  				} else { @@ -4288,7 +4291,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  				mode_lib->vba.bpp = locals->OutputBppPerState[i][k];  			}  			if (locals->RequiresDSC[i][k] == true && mode_lib->vba.bpp != 0.0) { -				if (locals->ODMCombineEnablePerState[i][k] == false) { +				if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {  					locals->DSCDelayPerState[i][k] =  							dscceComputeDelay(  									mode_lib->vba.DSCInputBitPerComponent[k], @@ -4331,7 +4334,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  	for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {  		for (j = 0; j < 2; j++) {  			for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { -				if (locals->ODMCombineEnablePerState[i][k] == true) +				if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1)  					locals->SwathWidthYPerState[i][j][k] = dml_min(locals->SwathWidthYSingleDPP[k], dml_round(locals->HActive[k] / 2 * locals->HRatio[k]));  				else  					locals->SwathWidthYPerState[i][j][k] = locals->SwathWidthYSingleDPP[k] / locals->NoOfDPP[i][j][k]; @@ -4384,28 +4387,28 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  				locals->EffectiveDETLBLinesLuma = dml_floor(locals->LinesInDETLuma +  dml_min(  						locals->LinesInDETLuma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETY[k] * -						locals->PSCL_FACTOR[k] / locals->ReturnBWPerState[i], +						locals->PSCL_FACTOR[k] / locals->ReturnBWPerState[i][0],  						locals->EffectiveLBLatencyHidingSourceLinesLuma),  						locals->SwathHeightYPerState[i][j][k]);  				locals->EffectiveDETLBLinesChroma = dml_floor(locals->LinesInDETChroma + dml_min(  						locals->LinesInDETChroma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETC[k] * -						locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i], +						locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i][0],  						locals->EffectiveLBLatencyHidingSourceLinesChroma),  						locals->SwathHeightCPerState[i][j][k]);  				if (locals->BytePerPixelInDETC[k] == 0) {  					locals->UrgentLatencySupportUsPerState[i][j][k] = locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k])  							/ locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] * -								dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k]); +								dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]);  				} else {  					locals->UrgentLatencySupportUsPerState[i][j][k] = dml_min(  						locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k])  						/ locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] * -						dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k]), +						dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]),  							locals->EffectiveDETLBLinesChroma * (locals->HTotal[k] / locals->PixelClock[k]) / (locals->VRatio[k] / 2) -  							locals->EffectiveDETLBLinesChroma * locals->SwathWidthYPerState[i][j][k] / 2 * -							dml_ceil(locals->BytePerPixelInDETC[k], 2) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k])); +							dml_ceil(locals->BytePerPixelInDETC[k], 2) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]));  				}  			}  		} @@ -4450,14 +4453,14 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  				locals->SwathHeightYThisState[k] = locals->SwathHeightYPerState[i][j][k];  				locals->SwathHeightCThisState[k] = locals->SwathHeightCPerState[i][j][k];  				locals->SwathWidthYThisState[k] = locals->SwathWidthYPerState[i][j][k]; -				mode_lib->vba.ProjectedDCFCLKDeepSleep = dml_max( -						mode_lib->vba.ProjectedDCFCLKDeepSleep, +				mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( +						mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],  						mode_lib->vba.PixelClock[k] / 16.0);  				if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {  					if (mode_lib->vba.VRatio[k] <= 1.0) { -						mode_lib->vba.ProjectedDCFCLKDeepSleep = +						mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =  								dml_max( -										mode_lib->vba.ProjectedDCFCLKDeepSleep, +										mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],  										1.1  												* dml_ceil(  														mode_lib->vba.BytePerPixelInDETY[k], @@ -4467,9 +4470,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  												* mode_lib->vba.PixelClock[k]  												/ mode_lib->vba.NoOfDPP[i][j][k]);  					} else { -						mode_lib->vba.ProjectedDCFCLKDeepSleep = +						mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =  								dml_max( -										mode_lib->vba.ProjectedDCFCLKDeepSleep, +										mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],  										1.1  												* dml_ceil(  														mode_lib->vba.BytePerPixelInDETY[k], @@ -4480,9 +4483,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  					}  				} else {  					if (mode_lib->vba.VRatio[k] <= 1.0) { -						mode_lib->vba.ProjectedDCFCLKDeepSleep = +						mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =  								dml_max( -										mode_lib->vba.ProjectedDCFCLKDeepSleep, +										mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],  										1.1  												* dml_ceil(  														mode_lib->vba.BytePerPixelInDETY[k], @@ -4492,9 +4495,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  												* mode_lib->vba.PixelClock[k]  												/ mode_lib->vba.NoOfDPP[i][j][k]);  					} else { -						mode_lib->vba.ProjectedDCFCLKDeepSleep = +						mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =  								dml_max( -										mode_lib->vba.ProjectedDCFCLKDeepSleep, +										mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],  										1.1  												* dml_ceil(  														mode_lib->vba.BytePerPixelInDETY[k], @@ -4504,9 +4507,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  												* mode_lib->vba.RequiredDPPCLK[i][j][k]);  					}  					if (mode_lib->vba.VRatio[k] / 2.0 <= 1.0) { -						mode_lib->vba.ProjectedDCFCLKDeepSleep = +						mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =  								dml_max( -										mode_lib->vba.ProjectedDCFCLKDeepSleep, +										mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],  										1.1  												* dml_ceil(  														mode_lib->vba.BytePerPixelInDETC[k], @@ -4517,9 +4520,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  												* mode_lib->vba.PixelClock[k]  												/ mode_lib->vba.NoOfDPP[i][j][k]);  					} else { -						mode_lib->vba.ProjectedDCFCLKDeepSleep = +						mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =  								dml_max( -										mode_lib->vba.ProjectedDCFCLKDeepSleep, +										mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],  										1.1  												* dml_ceil(  														mode_lib->vba.BytePerPixelInDETC[k], @@ -4555,7 +4558,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  						&mode_lib->vba.PTEBufferSizeNotExceededY[i][j][k],  						&mode_lib->vba.dpte_row_height[k],  						&mode_lib->vba.meta_row_height[k]); -				mode_lib->vba.PrefetchLinesY[k] = CalculatePrefetchSourceLines( +				mode_lib->vba.PrefetchLinesY[0][0][k] = CalculatePrefetchSourceLines(  						mode_lib,  						mode_lib->vba.VRatio[k],  						mode_lib->vba.vtaps[k], @@ -4594,7 +4597,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  							&mode_lib->vba.PTEBufferSizeNotExceededC[i][j][k],  							&mode_lib->vba.dpte_row_height_chroma[k],  							&mode_lib->vba.meta_row_height_chroma[k]); -					mode_lib->vba.PrefetchLinesC[k] = CalculatePrefetchSourceLines( +					mode_lib->vba.PrefetchLinesC[0][0][k] = CalculatePrefetchSourceLines(  							mode_lib,  							mode_lib->vba.VRatio[k] / 2.0,  							mode_lib->vba.VTAPsChroma[k], @@ -4608,14 +4611,14 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  					mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = 0.0;  					mode_lib->vba.MetaRowBytesC = 0.0;  					mode_lib->vba.DPTEBytesPerRowC = 0.0; -					locals->PrefetchLinesC[k] = 0.0; +					locals->PrefetchLinesC[0][0][k] = 0.0;  					locals->PTEBufferSizeNotExceededC[i][j][k] = true;  					locals->PTEBufferSizeInRequestsForLuma = mode_lib->vba.PTEBufferSizeInRequestsLuma + mode_lib->vba.PTEBufferSizeInRequestsChroma;  				} -				locals->PDEAndMetaPTEBytesPerFrame[k] = +				locals->PDEAndMetaPTEBytesPerFrame[0][0][k] =  						mode_lib->vba.PDEAndMetaPTEBytesPerFrameY + mode_lib->vba.PDEAndMetaPTEBytesPerFrameC; -				locals->MetaRowBytes[k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC; -				locals->DPTEBytesPerRow[k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC; +				locals->MetaRowBytes[0][0][k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC; +				locals->DPTEBytesPerRow[0][0][k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC;  				CalculateActiveRowBandwidth(  						mode_lib->vba.GPUVMEnable, @@ -4642,14 +4645,14 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  									+ mode_lib->vba.TotalNumberOfDCCActiveDPP[i][j]  											* mode_lib->vba.MetaChunkSize)  									* 1024.0 -									/ mode_lib->vba.ReturnBWPerState[i]; +									/ mode_lib->vba.ReturnBWPerState[i][0];  			if (mode_lib->vba.GPUVMEnable == true) {  				mode_lib->vba.ExtraLatency = mode_lib->vba.ExtraLatency  						+ mode_lib->vba.TotalNumberOfActiveDPP[i][j]  								* mode_lib->vba.PTEGroupSize -								/ mode_lib->vba.ReturnBWPerState[i]; +								/ mode_lib->vba.ReturnBWPerState[i][0];  			} -			mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep; +			mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0];  			for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {  				if (mode_lib->vba.BlendingAndTiming[k] == k) { @@ -4699,7 +4702,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  			}  			for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { -				locals->MaximumVStartup[k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k] +				locals->MaximumVStartup[0][0][k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]  					- dml_max(1.0, dml_ceil(locals->WritebackDelay[i][k] / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1.0));  			} @@ -4739,7 +4742,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  						mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0.0;  					} -					CalculateDelayAfterScaler(mode_lib, mode_lib->vba.ReturnBWPerState[i], mode_lib->vba.ReadBandwidthLuma[k], mode_lib->vba.ReadBandwidthChroma[k], mode_lib->vba.MaxTotalVActiveRDBandwidth, +					CalculateDelayAfterScaler(mode_lib, mode_lib->vba.ReturnBWPerState[i][0], mode_lib->vba.ReadBandwidthLuma[k], mode_lib->vba.ReadBandwidthChroma[k], mode_lib->vba.MaxTotalVActiveRDBandwidth,  						mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k], mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k],  						mode_lib->vba.RequiredDPPCLK[i][j][k], mode_lib->vba.RequiredDISPCLK[i][j], mode_lib->vba.PixelClock[k], mode_lib->vba.DSCDelayPerState[i][k], mode_lib->vba.NoOfDPP[i][j][k], mode_lib->vba.ScalerEnabled[k], mode_lib->vba.NumberOfCursors[k],  						mode_lib->vba.DPPCLKDelaySubtotal, mode_lib->vba.DPPCLKDelaySCL, mode_lib->vba.DPPCLKDelaySCLLBOnly, mode_lib->vba.DPPCLKDelayCNVCFormater, mode_lib->vba.DPPCLKDelayCNVCCursor, mode_lib->vba.DISPCLKDelaySubtotal, @@ -4753,14 +4756,14 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  									mode_lib->vba.RequiredDPPCLK[i][j][k],  									mode_lib->vba.RequiredDISPCLK[i][j],  									mode_lib->vba.PixelClock[k], -									mode_lib->vba.ProjectedDCFCLKDeepSleep, +									mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],  									mode_lib->vba.NoOfDPP[i][j][k],  									mode_lib->vba.NumberOfCursors[k],  									mode_lib->vba.VTotal[k]  											- mode_lib->vba.VActive[k],  									mode_lib->vba.HTotal[k],  									mode_lib->vba.MaxInterDCNTileRepeaters, -									mode_lib->vba.MaximumVStartup[k], +									mode_lib->vba.MaximumVStartup[0][0][k],  									mode_lib->vba.GPUVMMaxPageTableLevels,  									mode_lib->vba.GPUVMEnable,  									mode_lib->vba.DynamicMetadataEnable[k], @@ -4770,15 +4773,15 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  									mode_lib->vba.UrgentLatencyPixelDataOnly,  									mode_lib->vba.ExtraLatency,  									mode_lib->vba.TimeCalc, -									mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k], -									mode_lib->vba.MetaRowBytes[k], -									mode_lib->vba.DPTEBytesPerRow[k], -									mode_lib->vba.PrefetchLinesY[k], +									mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k], +									mode_lib->vba.MetaRowBytes[0][0][k], +									mode_lib->vba.DPTEBytesPerRow[0][0][k], +									mode_lib->vba.PrefetchLinesY[0][0][k],  									mode_lib->vba.SwathWidthYPerState[i][j][k],  									mode_lib->vba.BytePerPixelInDETY[k],  									mode_lib->vba.PrefillY[k],  									mode_lib->vba.MaxNumSwY[k], -									mode_lib->vba.PrefetchLinesC[k], +									mode_lib->vba.PrefetchLinesC[0][0][k],  									mode_lib->vba.BytePerPixelInDETC[k],  									mode_lib->vba.PrefillC[k],  									mode_lib->vba.MaxNumSwC[k], @@ -4808,19 +4811,19 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  				locals->prefetch_vm_bw_valid = true;  				locals->prefetch_row_bw_valid = true;  				for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { -					if (locals->PDEAndMetaPTEBytesPerFrame[k] == 0) +					if (locals->PDEAndMetaPTEBytesPerFrame[0][0][k] == 0)  						locals->prefetch_vm_bw[k] = 0;  					else if (locals->LinesForMetaPTE[k] > 0) -						locals->prefetch_vm_bw[k] = locals->PDEAndMetaPTEBytesPerFrame[k] +						locals->prefetch_vm_bw[k] = locals->PDEAndMetaPTEBytesPerFrame[0][0][k]  							/ (locals->LinesForMetaPTE[k] * locals->HTotal[k] / locals->PixelClock[k]);  					else {  						locals->prefetch_vm_bw[k] = 0;  						locals->prefetch_vm_bw_valid = false;  					} -					if (locals->MetaRowBytes[k] + locals->DPTEBytesPerRow[k] == 0) +					if (locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k] == 0)  						locals->prefetch_row_bw[k] = 0;  					else if (locals->LinesForMetaAndDPTERow[k] > 0) -						locals->prefetch_row_bw[k] = (locals->MetaRowBytes[k] + locals->DPTEBytesPerRow[k]) +						locals->prefetch_row_bw[k] = (locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k])  							/ (locals->LinesForMetaAndDPTERow[k] * locals->HTotal[k] / locals->PixelClock[k]);  					else {  						locals->prefetch_row_bw[k] = 0; @@ -4839,13 +4842,13 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  											mode_lib->vba.RequiredPrefetchPixelDataBWLuma[i][j][k])  											+ mode_lib->vba.meta_row_bw[k] + mode_lib->vba.dpte_row_bw[k]);  				} -				locals->BandwidthWithoutPrefetchSupported[i] = true; -				if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i]) { -					locals->BandwidthWithoutPrefetchSupported[i] = false; +				locals->BandwidthWithoutPrefetchSupported[i][0] = true; +				if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i][0]) { +					locals->BandwidthWithoutPrefetchSupported[i][0] = false;  				}  				locals->PrefetchSupported[i][j] = true; -				if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i]) { +				if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i][0]) {  					locals->PrefetchSupported[i][j] = false;  				}  				for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { @@ -4870,7 +4873,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  			if (mode_lib->vba.PrefetchSupported[i][j] == true  					&& mode_lib->vba.VRatioInPrefetchSupported[i][j] == true) {  				mode_lib->vba.BandwidthAvailableForImmediateFlip = -						mode_lib->vba.ReturnBWPerState[i]; +						mode_lib->vba.ReturnBWPerState[i][0];  				for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {  					mode_lib->vba.BandwidthAvailableForImmediateFlip =  							mode_lib->vba.BandwidthAvailableForImmediateFlip @@ -4884,9 +4887,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  					if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8  							&& mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {  						mode_lib->vba.ImmediateFlipBytes[k] = -								mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k] -										+ mode_lib->vba.MetaRowBytes[k] -										+ mode_lib->vba.DPTEBytesPerRow[k]; +								mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k] +										+ mode_lib->vba.MetaRowBytes[0][0][k] +										+ mode_lib->vba.DPTEBytesPerRow[0][0][k];  					}  				}  				mode_lib->vba.TotImmediateFlipBytes = 0.0; @@ -4914,9 +4917,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  									/ mode_lib->vba.PixelClock[k],  							mode_lib->vba.VRatio[k],  							mode_lib->vba.Tno_bw[k], -							mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k], -							mode_lib->vba.MetaRowBytes[k], -							mode_lib->vba.DPTEBytesPerRow[k], +							mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k], +							mode_lib->vba.MetaRowBytes[0][0][k], +							mode_lib->vba.DPTEBytesPerRow[0][0][k],  							mode_lib->vba.DCCEnable[k],  							mode_lib->vba.dpte_row_height[k],  							mode_lib->vba.meta_row_height[k], @@ -4941,7 +4944,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  				}  				mode_lib->vba.ImmediateFlipSupportedForState[i][j] = true;  				if (mode_lib->vba.total_dcn_read_bw_with_flip -						> mode_lib->vba.ReturnBWPerState[i]) { +						> mode_lib->vba.ReturnBWPerState[i][0]) {  					mode_lib->vba.ImmediateFlipSupportedForState[i][j] = false;  				}  				for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { @@ -4957,13 +4960,13 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  	/*Vertical Active BW support*/  	for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { -		mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i] = dml_min(mode_lib->vba.ReturnBusWidth * +		mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][0] = dml_min(mode_lib->vba.ReturnBusWidth *  				mode_lib->vba.DCFCLKPerState[i], mode_lib->vba.FabricAndDRAMBandwidthPerState[i] * 1000) *  				mode_lib->vba.MaxAveragePercentOfIdealDRAMBWDisplayCanUseInNormalSystemOperation / 100; -		if (mode_lib->vba.MaxTotalVActiveRDBandwidth <= mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i]) -			mode_lib->vba.TotalVerticalActiveBandwidthSupport[i] = true; +		if (mode_lib->vba.MaxTotalVActiveRDBandwidth <= mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][0]) +			mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][0] = true;  		else -			mode_lib->vba.TotalVerticalActiveBandwidthSupport[i] = false; +			mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][0] = false;  	}  	/*PTE Buffer Size Check*/ @@ -5051,7 +5054,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  				status = DML_FAIL_SCALE_RATIO_TAP;  			} else if (mode_lib->vba.SourceFormatPixelAndScanSupport != true) {  				status = DML_FAIL_SOURCE_PIXEL_FORMAT; -			} else if (locals->ViewportSizeSupport[i] != true) { +			} else if (locals->ViewportSizeSupport[i][0] != true) {  				status = DML_FAIL_VIEWPORT_SIZE;  			} else if (locals->DIOSupport[i] != true) {  				status = DML_FAIL_DIO_SUPPORT; @@ -5061,7 +5064,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  				status = DML_FAIL_DSC_CLK_REQUIRED;  			} else if (locals->UrgentLatencySupport[i][j] != true) {  				status = DML_FAIL_URGENT_LATENCY; -			} else if (locals->ROBSupport[i] != true) { +			} else if (locals->ROBSupport[i][0] != true) {  				status = DML_FAIL_REORDERING_BUFFER;  			} else if (locals->DISPCLK_DPPCLK_Support[i][j] != true) {  				status = DML_FAIL_DISPCLK_DPPCLK; @@ -5081,7 +5084,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  				status = DML_FAIL_PITCH_SUPPORT;  			} else if (locals->PrefetchSupported[i][j] != true) {  				status = DML_FAIL_PREFETCH_SUPPORT; -			} else if (locals->TotalVerticalActiveBandwidthSupport[i] != true) { +			} else if (locals->TotalVerticalActiveBandwidthSupport[i][0] != true) {  				status = DML_FAIL_TOTAL_V_ACTIVE_BW;  			} else if (locals->VRatioInPrefetchSupported[i][j] != true) {  				status = DML_FAIL_V_RATIO_PREFETCH; @@ -5127,7 +5130,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode  	mode_lib->vba.DRAMSpeed = mode_lib->vba.DRAMSpeedPerState[mode_lib->vba.VoltageLevel];  	mode_lib->vba.FabricClock = mode_lib->vba.FabricClockPerState[mode_lib->vba.VoltageLevel];  	mode_lib->vba.SOCCLK = mode_lib->vba.SOCCLKPerState[mode_lib->vba.VoltageLevel]; -	mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel]; +	mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel][0];  	mode_lib->vba.FabricAndDRAMBandwidth = locals->FabricAndDRAMBandwidthPerState[mode_lib->vba.VoltageLevel];  	for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {  		if (mode_lib->vba.BlendingAndTiming[k] == k) { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c index 2c7455e22a65..ca807846032f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c @@ -107,10 +107,10 @@ static unsigned int get_bytes_per_element(enum source_format_class source_format  static bool is_dual_plane(enum source_format_class source_format)  { -	bool ret_val = 0; +	bool ret_val = false;  	if ((source_format == dm_420_8) || (source_format == dm_420_10)) -		ret_val = 1; +		ret_val = true;  	return ret_val;  } @@ -240,8 +240,8 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,  	unsigned int swath_bytes_c = 0;  	unsigned int full_swath_bytes_packed_l = 0;  	unsigned int full_swath_bytes_packed_c = 0; -	bool req128_l = 0; -	bool req128_c = 0; +	bool req128_l = false; +	bool req128_c = false;  	bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);  	bool surf_vert = (pipe_src_param.source_scan == dm_vert);  	unsigned int log2_swath_height_l = 0; @@ -264,13 +264,13 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,  		total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c;  		if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request -			req128_l = 0; -			req128_c = 0; +			req128_l = false; +			req128_c = false;  			swath_bytes_l = full_swath_bytes_packed_l;  			swath_bytes_c = full_swath_bytes_packed_c;  		} else { //128b request (for luma only for yuv420 8bpc) -			req128_l = 1; -			req128_c = 0; +			req128_l = true; +			req128_c = false;  			swath_bytes_l = full_swath_bytes_packed_l / 2;  			swath_bytes_c = full_swath_bytes_packed_c;  		} @@ -280,9 +280,9 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,  		total_swath_bytes = 2 * full_swath_bytes_packed_l;  		if (total_swath_bytes <= detile_buf_size_in_bytes) -			req128_l = 0; +			req128_l = false;  		else -			req128_l = 1; +			req128_l = true;  		swath_bytes_l = total_swath_bytes;  		swath_bytes_c = 0; @@ -670,7 +670,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,  		const display_pipe_source_params_st pipe_src_param,  		bool is_chroma)  { -	bool mode_422 = 0; +	bool mode_422 = false;  	unsigned int vp_width = 0;  	unsigned int vp_height = 0;  	unsigned int data_pitch = 0; @@ -929,8 +929,7 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,  	min_dst_y_ttu_vblank = min_ttu_vblank * pclk_freq_in_mhz / (double) htotal;  	dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start; -	disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start -			+ min_dst_y_ttu_vblank) * dml_pow(2, 2)); +	disp_dlg_regs->min_dst_y_next_start = (unsigned int) ((double) dlg_vblank_start * dml_pow(2, 2));  	ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int) dml_pow(2, 18));  	dml_print("DML_DLG: %s: min_dcfclk_mhz                         = %3.2f\n", @@ -959,7 +958,7 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,  	// Source  //             dcc_en              = src.dcc;  	dual_plane = is_dual_plane((enum source_format_class)(src->source_format)); -	mode_422 = 0; // TODO +	mode_422 = false; // TODO  	access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed  //      bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0);  //      bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c index 1e6aeb1bd2bf..287b7a0ad108 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c @@ -107,10 +107,10 @@ static unsigned int get_bytes_per_element(enum source_format_class source_format  static bool is_dual_plane(enum source_format_class source_format)  { -	bool ret_val = 0; +	bool ret_val = false;  	if ((source_format == dm_420_8) || (source_format == dm_420_10)) -		ret_val = 1; +		ret_val = true;  	return ret_val;  } @@ -240,8 +240,8 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,  	unsigned int swath_bytes_c = 0;  	unsigned int full_swath_bytes_packed_l = 0;  	unsigned int full_swath_bytes_packed_c = 0; -	bool req128_l = 0; -	bool req128_c = 0; +	bool req128_l = false; +	bool req128_c = false;  	bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);  	bool surf_vert = (pipe_src_param.source_scan == dm_vert);  	unsigned int log2_swath_height_l = 0; @@ -264,13 +264,13 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,  		total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c;  		if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request -			req128_l = 0; -			req128_c = 0; +			req128_l = false; +			req128_c = false;  			swath_bytes_l = full_swath_bytes_packed_l;  			swath_bytes_c = full_swath_bytes_packed_c;  		} else { //128b request (for luma only for yuv420 8bpc) -			req128_l = 1; -			req128_c = 0; +			req128_l = true; +			req128_c = false;  			swath_bytes_l = full_swath_bytes_packed_l / 2;  			swath_bytes_c = full_swath_bytes_packed_c;  		} @@ -280,9 +280,9 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,  		total_swath_bytes = 2 * full_swath_bytes_packed_l;  		if (total_swath_bytes <= detile_buf_size_in_bytes) -			req128_l = 0; +			req128_l = false;  		else -			req128_l = 1; +			req128_l = true;  		swath_bytes_l = total_swath_bytes;  		swath_bytes_c = 0; @@ -670,7 +670,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,  		const display_pipe_source_params_st pipe_src_param,  		bool is_chroma)  { -	bool mode_422 = 0; +	bool mode_422 = false;  	unsigned int vp_width = 0;  	unsigned int vp_height = 0;  	unsigned int data_pitch = 0; @@ -959,7 +959,7 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,  	// Source  //             dcc_en              = src.dcc;  	dual_plane = is_dual_plane((enum source_format_class)(src->source_format)); -	mode_422 = 0; // TODO +	mode_422 = false; // TODO  	access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed  //      bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0);  //      bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c index ba77957aefe3..af35b3bea909 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c @@ -23,7 +23,6 @@   *   */ -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  #include "../display_mode_lib.h"  #include "../dml_inline_defs.h" @@ -198,7 +197,7 @@ static unsigned int CalculateVMAndRowBytes(  		unsigned int *meta_row_width,  		unsigned int *meta_row_height,  		unsigned int *vm_group_bytes, -		long         *dpte_group_bytes, +		unsigned int *dpte_group_bytes,  		unsigned int *PixelPTEReqWidth,  		unsigned int *PixelPTEReqHeight,  		unsigned int *PTERequestSize, @@ -296,7 +295,7 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(  		double UrgentOutOfOrderReturn,  		double ReturnBW,  		bool GPUVMEnable, -		long dpte_group_bytes[], +		int dpte_group_bytes[],  		unsigned int MetaChunkSize,  		double UrgentLatency,  		double ExtraLatency, @@ -310,13 +309,13 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(  		int DPPPerPlane[],  		bool DCCEnable[],  		double DPPCLK[], -		unsigned int SwathWidthSingleDPPY[], +		double SwathWidthSingleDPPY[],  		unsigned int SwathHeightY[],  		double ReadBandwidthPlaneLuma[],  		unsigned int SwathHeightC[],  		double ReadBandwidthPlaneChroma[],  		unsigned int LBBitPerPixel[], -		unsigned int SwathWidthY[], +		double SwathWidthY[],  		double HRatio[],  		unsigned int vtaps[],  		unsigned int VTAPsChroma[], @@ -345,7 +344,7 @@ static void CalculateDCFCLKDeepSleep(  		double BytePerPixelDETY[],  		double BytePerPixelDETC[],  		double VRatio[], -		unsigned int SwathWidthY[], +		double SwathWidthY[],  		int DPPPerPlane[],  		double HRatio[],  		double PixelClock[], @@ -436,7 +435,7 @@ static void CalculateMetaAndPTETimes(  		unsigned int           meta_row_height[],  		unsigned int           meta_req_width[],  		unsigned int           meta_req_height[], -		long                   dpte_group_bytes[], +		int                   dpte_group_bytes[],  		unsigned int           PTERequestSizeY[],  		unsigned int           PTERequestSizeC[],  		unsigned int           PixelPTEReqWidthY[], @@ -478,7 +477,7 @@ static double CalculateExtraLatency(  		bool HostVMEnable,  		int NumberOfActivePlanes,  		int NumberOfDPP[], -		long dpte_group_bytes[], +		int dpte_group_bytes[],  		double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,  		double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,  		int HostVMMaxPageTableLevels, @@ -1281,7 +1280,7 @@ static unsigned int CalculateVMAndRowBytes(  		unsigned int *meta_row_width,  		unsigned int *meta_row_height,  		unsigned int *vm_group_bytes, -		long         *dpte_group_bytes, +		unsigned int *dpte_group_bytes,  		unsigned int *PixelPTEReqWidth,  		unsigned int *PixelPTEReqHeight,  		unsigned int *PTERequestSize, @@ -1339,7 +1338,7 @@ static unsigned int CalculateVMAndRowBytes(  		*MetaRowByte = 0;  	} -	if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_lvp) { +	if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_l_vp) {  		MacroTileSizeBytes = 256;  		MacroTileHeight = BlockHeight256Bytes;  	} else if (SurfaceTiling == dm_sw_4kb_s || SurfaceTiling == dm_sw_4kb_s_x @@ -1684,11 +1683,11 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman  		else  			locals->SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportHeight[k]; -		if (mode_lib->vba.ODMCombineEnabled[k] == true) +		if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)  			MainPlaneDoesODMCombine = true;  		for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)  			if (mode_lib->vba.BlendingAndTiming[k] == j -					&& mode_lib->vba.ODMCombineEnabled[j] == true) +					&& mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)  				MainPlaneDoesODMCombine = true;  		if (MainPlaneDoesODMCombine == true) @@ -2941,12 +2940,12 @@ static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib)  			SwathWidth = mode_lib->vba.ViewportHeight[k];  		} -		if (mode_lib->vba.ODMCombineEnabled[k] == true) { +		if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {  			MainPlaneDoesODMCombine = true;  		}  		for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {  			if (mode_lib->vba.BlendingAndTiming[k] == j -					&& mode_lib->vba.ODMCombineEnabled[j] == true) { +					&& mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {  				MainPlaneDoesODMCombine = true;  			}  		} @@ -3454,7 +3453,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  										== dm_420_10))  				|| (((mode_lib->vba.SurfaceTiling[k] == dm_sw_gfx7_2d_thin_gl  						|| mode_lib->vba.SurfaceTiling[k] -								== dm_sw_gfx7_2d_thin_lvp) +								== dm_sw_gfx7_2d_thin_l_vp)  						&& !((mode_lib->vba.SourcePixelFormat[k]  								== dm_444_64  								|| mode_lib->vba.SourcePixelFormat[k] @@ -3543,17 +3542,17 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  		}  	}  	for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { -		locals->IdealSDPPortBandwidthPerState[i] = dml_min3( +		locals->IdealSDPPortBandwidthPerState[i][0] = dml_min3(  				mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKPerState[i],  				mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels  						* mode_lib->vba.DRAMChannelWidth,  				mode_lib->vba.FabricClockPerState[i]  						* mode_lib->vba.FabricDatapathToDCNDataReturn);  		if (mode_lib->vba.HostVMEnable == false) { -			locals->ReturnBWPerState[i] = locals->IdealSDPPortBandwidthPerState[i] +			locals->ReturnBWPerState[i][0] = locals->IdealSDPPortBandwidthPerState[i][0]  					* mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly / 100.0;  		} else { -			locals->ReturnBWPerState[i] = locals->IdealSDPPortBandwidthPerState[i] +			locals->ReturnBWPerState[i][0] = locals->IdealSDPPortBandwidthPerState[i][0]  					* mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData / 100.0;  		}  	} @@ -3590,12 +3589,12 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  				+ dml_max3(mode_lib->vba.UrgentOutOfOrderReturnPerChannelPixelDataOnly,  						mode_lib->vba.UrgentOutOfOrderReturnPerChannelPixelMixedWithVMData,  						mode_lib->vba.UrgentOutOfOrderReturnPerChannelVMDataOnly) -					* mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i]; -		if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i] +					* mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i][0]; +		if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i][0]  				> locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i]) { -			locals->ROBSupport[i] = true; +			locals->ROBSupport[i][0] = true;  		} else { -			locals->ROBSupport[i] = false; +			locals->ROBSupport[i][0] = false;  		}  	}  	/*Writeback Mode Support Check*/ @@ -3983,7 +3982,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  				}  				if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity  						&& locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k] -						&& locals->ODMCombineEnablePerState[i][k] == false) { +						&& locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {  					locals->NoOfDPP[i][j][k] = 1;  					locals->RequiredDPPCLK[i][j][k] =  						locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); @@ -4072,16 +4071,16 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  	/*Viewport Size Check*/  	for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { -		locals->ViewportSizeSupport[i] = true; +		locals->ViewportSizeSupport[i][0] = true;  		for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { -			if (locals->ODMCombineEnablePerState[i][k] == true) { +			if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {  				if (dml_min(locals->SwathWidthYSingleDPP[k], dml_round(mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]))  						> locals->MaximumSwathWidth[k]) { -					locals->ViewportSizeSupport[i] = false; +					locals->ViewportSizeSupport[i][0] = false;  				}  			} else {  				if (locals->SwathWidthYSingleDPP[k] / 2.0 > locals->MaximumSwathWidth[k]) { -					locals->ViewportSizeSupport[i] = false; +					locals->ViewportSizeSupport[i][0] = false;  				}  			}  		} @@ -4122,11 +4121,11 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  	}  	for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {  		for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { -			locals->RequiresDSC[i][k] = 0; +			locals->RequiresDSC[i][k] = false;  			locals->RequiresFEC[i][k] = 0;  			if (mode_lib->vba.BlendingAndTiming[k] == k) {  				if (mode_lib->vba.Output[k] == dm_hdmi) { -					locals->RequiresDSC[i][k] = 0; +					locals->RequiresDSC[i][k] = false;  					locals->RequiresFEC[i][k] = 0;  					locals->OutputBppPerState[i][k] = TruncToValidBPP(  							dml_min(600.0, mode_lib->vba.PHYCLKPerState[i]) / mode_lib->vba.PixelClockBackEnd[k] * 24, @@ -4270,8 +4269,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  						mode_lib->vba.DSCFormatFactor = 1;  					}  					if (locals->RequiresDSC[i][k] == true) { -						if (locals->ODMCombineEnablePerState[i][k] -								== true) { +						if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {  							if (mode_lib->vba.PixelClockBackEnd[k] / 6.0 / mode_lib->vba.DSCFormatFactor  									> (1.0 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * mode_lib->vba.MaxDSCCLK[i]) {  								locals->DSCCLKRequiredMoreThanSupported[i] = @@ -4294,7 +4292,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  		mode_lib->vba.TotalDSCUnitsRequired = 0.0;  		for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {  			if (locals->RequiresDSC[i][k] == true) { -				if (locals->ODMCombineEnablePerState[i][k] == true) { +				if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {  					mode_lib->vba.TotalDSCUnitsRequired =  							mode_lib->vba.TotalDSCUnitsRequired + 2.0;  				} else { @@ -4336,7 +4334,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  				mode_lib->vba.bpp = locals->OutputBppPerState[i][k];  			}  			if (locals->RequiresDSC[i][k] == true && mode_lib->vba.bpp != 0.0) { -				if (locals->ODMCombineEnablePerState[i][k] == false) { +				if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {  					locals->DSCDelayPerState[i][k] =  							dscceComputeDelay(  									mode_lib->vba.DSCInputBitPerComponent[k], @@ -4400,7 +4398,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  			for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {  				locals->RequiredDPPCLKThisState[k] = locals->RequiredDPPCLK[i][j][k];  				locals->NoOfDPPThisState[k]        = locals->NoOfDPP[i][j][k]; -				if (locals->ODMCombineEnablePerState[i][k] == true) { +				if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {  					locals->SwathWidthYThisState[k] =  						dml_min(locals->SwathWidthYSingleDPP[k], dml_round(mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]));  				} else { @@ -4452,7 +4450,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  					locals->PSCL_FACTOR,  					locals->PSCL_FACTOR_CHROMA,  					locals->RequiredDPPCLKThisState, -					&mode_lib->vba.ProjectedDCFCLKDeepSleep); +					&mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0]);  			for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {  				if ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64 @@ -4497,7 +4495,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  							locals->PTERequestSizeC,  							locals->dpde0_bytes_per_frame_ub_c,  							locals->meta_pte_bytes_per_frame_ub_c); -					locals->PrefetchLinesC[k] = CalculatePrefetchSourceLines( +					locals->PrefetchLinesC[0][0][k] = CalculatePrefetchSourceLines(  							mode_lib,  							mode_lib->vba.VRatio[k]/2,  							mode_lib->vba.VTAPsChroma[k], @@ -4512,7 +4510,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  					mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = 0.0;  					mode_lib->vba.MetaRowBytesC = 0.0;  					mode_lib->vba.DPTEBytesPerRowC = 0.0; -					locals->PrefetchLinesC[k] = 0.0; +					locals->PrefetchLinesC[0][0][k] = 0.0;  					locals->PTEBufferSizeNotExceededC[i][j][k] = true;  					locals->PTEBufferSizeInRequestsForLuma = mode_lib->vba.PTEBufferSizeInRequestsLuma + mode_lib->vba.PTEBufferSizeInRequestsChroma;  				} @@ -4553,7 +4551,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  						locals->PTERequestSizeY,  						locals->dpde0_bytes_per_frame_ub_l,  						locals->meta_pte_bytes_per_frame_ub_l); -				locals->PrefetchLinesY[k] = CalculatePrefetchSourceLines( +				locals->PrefetchLinesY[0][0][k] = CalculatePrefetchSourceLines(  						mode_lib,  						mode_lib->vba.VRatio[k],  						mode_lib->vba.vtaps[k], @@ -4563,10 +4561,10 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  						mode_lib->vba.ViewportYStartY[k],  						&locals->PrefillY[k],  						&locals->MaxNumSwY[k]); -				locals->PDEAndMetaPTEBytesPerFrame[k] = +				locals->PDEAndMetaPTEBytesPerFrame[0][0][k] =  						mode_lib->vba.PDEAndMetaPTEBytesPerFrameY + mode_lib->vba.PDEAndMetaPTEBytesPerFrameC; -				locals->MetaRowBytes[k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC; -				locals->DPTEBytesPerRow[k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC; +				locals->MetaRowBytes[0][0][k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC; +				locals->DPTEBytesPerRow[0][0][k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC;  				CalculateActiveRowBandwidth(  						mode_lib->vba.GPUVMEnable, @@ -4592,7 +4590,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  					mode_lib->vba.PixelChunkSizeInKByte,  					locals->TotalNumberOfDCCActiveDPP[i][j],  					mode_lib->vba.MetaChunkSize, -					locals->ReturnBWPerState[i], +					locals->ReturnBWPerState[i][0],  					mode_lib->vba.GPUVMEnable,  					mode_lib->vba.HostVMEnable,  					mode_lib->vba.NumberOfActivePlanes, @@ -4603,7 +4601,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  					mode_lib->vba.HostVMMaxPageTableLevels,  					mode_lib->vba.HostVMCachedPageTableLevels); -			mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep; +			mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0];  			for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {  				if (mode_lib->vba.BlendingAndTiming[k] == k) {  					if (mode_lib->vba.WritebackEnable[k] == true) { @@ -4645,15 +4643,15 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  					}  				}  			} -			mode_lib->vba.MaxMaxVStartup = 0; +			mode_lib->vba.MaxMaxVStartup[0][0] = 0;  			for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { -				locals->MaximumVStartup[k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k] +				locals->MaximumVStartup[0][0][k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]  					- dml_max(1.0, dml_ceil(locals->WritebackDelay[i][k] / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1.0)); -				mode_lib->vba.MaxMaxVStartup = dml_max(mode_lib->vba.MaxMaxVStartup, locals->MaximumVStartup[k]); +				mode_lib->vba.MaxMaxVStartup[0][0] = dml_max(mode_lib->vba.MaxMaxVStartup[0][0], locals->MaximumVStartup[0][0][k]);  			}  			mode_lib->vba.NextPrefetchMode = mode_lib->vba.MinPrefetchMode; -			mode_lib->vba.NextMaxVStartup = mode_lib->vba.MaxMaxVStartup; +			mode_lib->vba.NextMaxVStartup = mode_lib->vba.MaxMaxVStartup[0][0];  			do {  				mode_lib->vba.PrefetchMode[i][j] = mode_lib->vba.NextPrefetchMode;  				mode_lib->vba.MaxVStartup = mode_lib->vba.NextMaxVStartup; @@ -4694,7 +4692,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  					myPipe.DPPCLK = locals->RequiredDPPCLK[i][j][k];  					myPipe.DISPCLK = locals->RequiredDISPCLK[i][j];  					myPipe.PixelClock = mode_lib->vba.PixelClock[k]; -					myPipe.DCFCLKDeepSleep = mode_lib->vba.ProjectedDCFCLKDeepSleep; +					myPipe.DCFCLKDeepSleep = mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0];  					myPipe.DPPPerPlane = locals->NoOfDPP[i][j][k];  					myPipe.ScalerEnabled = mode_lib->vba.ScalerEnabled[k];  					myPipe.SourceScan = mode_lib->vba.SourceScan[k]; @@ -4728,8 +4726,8 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  							locals->SwathWidthYThisState[k] / mode_lib->vba.HRatio[k],  							mode_lib->vba.OutputFormat[k],  							mode_lib->vba.MaxInterDCNTileRepeaters, -							dml_min(mode_lib->vba.MaxVStartup, locals->MaximumVStartup[k]), -							locals->MaximumVStartup[k], +							dml_min(mode_lib->vba.MaxVStartup, locals->MaximumVStartup[0][0][k]), +							locals->MaximumVStartup[0][0][k],  							mode_lib->vba.GPUVMMaxPageTableLevels,  							mode_lib->vba.GPUVMEnable,  							&myHostVM, @@ -4740,15 +4738,15 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  							mode_lib->vba.UrgentLatency,  							mode_lib->vba.ExtraLatency,  							mode_lib->vba.TimeCalc, -							locals->PDEAndMetaPTEBytesPerFrame[k], -							locals->MetaRowBytes[k], -							locals->DPTEBytesPerRow[k], -							locals->PrefetchLinesY[k], +							locals->PDEAndMetaPTEBytesPerFrame[0][0][k], +							locals->MetaRowBytes[0][0][k], +							locals->DPTEBytesPerRow[0][0][k], +							locals->PrefetchLinesY[0][0][k],  							locals->SwathWidthYThisState[k],  							locals->BytePerPixelInDETY[k],  							locals->PrefillY[k],  							locals->MaxNumSwY[k], -							locals->PrefetchLinesC[k], +							locals->PrefetchLinesC[0][0][k],  							locals->BytePerPixelInDETC[k],  							locals->PrefillC[k],  							locals->MaxNumSwC[k], @@ -4837,14 +4835,14 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  						+ locals->RequiredPrefetchPixelDataBWChroma[i][j][k] * locals->UrgentBurstFactorChromaPre[k]  						+ locals->cursor_bw_pre[k] * locals->UrgentBurstFactorCursorPre[k]);  				} -				locals->BandwidthWithoutPrefetchSupported[i] = true; -				if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i] +				locals->BandwidthWithoutPrefetchSupported[i][0] = true; +				if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i][0]  						|| locals->NotEnoughUrgentLatencyHiding == 1) { -					locals->BandwidthWithoutPrefetchSupported[i] = false; +					locals->BandwidthWithoutPrefetchSupported[i][0] = false;  				}  				locals->PrefetchSupported[i][j] = true; -				if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i] +				if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i][0]  						|| locals->NotEnoughUrgentLatencyHiding == 1  						|| locals->NotEnoughUrgentLatencyHidingPre == 1) {  					locals->PrefetchSupported[i][j] = false; @@ -4873,17 +4871,17 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  				}  				if (mode_lib->vba.MaxVStartup <= 13 || mode_lib->vba.AnyLinesForVMOrRowTooLarge == false) { -					mode_lib->vba.NextMaxVStartup = mode_lib->vba.MaxMaxVStartup; +					mode_lib->vba.NextMaxVStartup = mode_lib->vba.MaxMaxVStartup[0][0];  					mode_lib->vba.NextPrefetchMode = mode_lib->vba.NextPrefetchMode + 1;  				} else {  					mode_lib->vba.NextMaxVStartup = mode_lib->vba.NextMaxVStartup - 1;  				}  			} while ((locals->PrefetchSupported[i][j] != true || locals->VRatioInPrefetchSupported[i][j] != true) -					&& (mode_lib->vba.NextMaxVStartup != mode_lib->vba.MaxMaxVStartup +					&& (mode_lib->vba.NextMaxVStartup != mode_lib->vba.MaxMaxVStartup[0][0]  						|| mode_lib->vba.NextPrefetchMode < mode_lib->vba.MaxPrefetchMode));  			if (locals->PrefetchSupported[i][j] == true && locals->VRatioInPrefetchSupported[i][j] == true) { -				mode_lib->vba.BandwidthAvailableForImmediateFlip = locals->ReturnBWPerState[i]; +				mode_lib->vba.BandwidthAvailableForImmediateFlip = locals->ReturnBWPerState[i][0];  				for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {  					mode_lib->vba.BandwidthAvailableForImmediateFlip = mode_lib->vba.BandwidthAvailableForImmediateFlip  						- dml_max(locals->ReadBandwidthLuma[k] * locals->UrgentBurstFactorLuma[k] @@ -4896,7 +4894,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  				mode_lib->vba.TotImmediateFlipBytes = 0.0;  				for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {  					mode_lib->vba.TotImmediateFlipBytes = mode_lib->vba.TotImmediateFlipBytes -						+ locals->PDEAndMetaPTEBytesPerFrame[k] + locals->MetaRowBytes[k] + locals->DPTEBytesPerRow[k]; +						+ locals->PDEAndMetaPTEBytesPerFrame[0][0][k] + locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k];  				}  				for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { @@ -4911,9 +4909,9 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  							mode_lib->vba.HostVMMaxPageTableLevels,  							mode_lib->vba.HostVMCachedPageTableLevels,  							mode_lib->vba.GPUVMEnable, -							locals->PDEAndMetaPTEBytesPerFrame[k], -							locals->MetaRowBytes[k], -							locals->DPTEBytesPerRow[k], +							locals->PDEAndMetaPTEBytesPerFrame[0][0][k], +							locals->MetaRowBytes[0][0][k], +							locals->DPTEBytesPerRow[0][0][k],  							mode_lib->vba.BandwidthAvailableForImmediateFlip,  							mode_lib->vba.TotImmediateFlipBytes,  							mode_lib->vba.SourcePixelFormat[k], @@ -4944,7 +4942,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  				}  				locals->ImmediateFlipSupportedForState[i][j] = true;  				if (mode_lib->vba.total_dcn_read_bw_with_flip -						> locals->ReturnBWPerState[i]) { +						> locals->ReturnBWPerState[i][0]) {  					locals->ImmediateFlipSupportedForState[i][j] = false;  				}  				for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { @@ -4971,7 +4969,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  					mode_lib->vba.WritebackInterfaceChromaBufferSize,  					mode_lib->vba.DCFCLKPerState[i],  					mode_lib->vba.UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels, -					locals->ReturnBWPerState[i], +					locals->ReturnBWPerState[i][0],  					mode_lib->vba.GPUVMEnable,  					locals->dpte_group_bytes,  					mode_lib->vba.MetaChunkSize, @@ -4983,7 +4981,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  					mode_lib->vba.DRAMClockChangeLatency,  					mode_lib->vba.SRExitTime,  					mode_lib->vba.SREnterPlusExitTime, -					mode_lib->vba.ProjectedDCFCLKDeepSleep, +					mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],  					locals->NoOfDPPThisState,  					mode_lib->vba.DCCEnable,  					locals->RequiredDPPCLKThisState, @@ -5026,8 +5024,8 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  				MaxTotalVActiveRDBandwidth = MaxTotalVActiveRDBandwidth + locals->ReadBandwidth[k];  		}  		for (i = 0; i <= mode_lib->vba.soc.num_states; ++i) { -			locals->MaxTotalVerticalActiveAvailableBandwidth[i] = dml_min( -				locals->IdealSDPPortBandwidthPerState[i] * +			locals->MaxTotalVerticalActiveAvailableBandwidth[i][0] = dml_min( +				locals->IdealSDPPortBandwidthPerState[i][0] *  				mode_lib->vba.MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation  				/ 100.0, mode_lib->vba.DRAMSpeedPerState[i] *  				mode_lib->vba.NumberOfChannels * @@ -5035,10 +5033,10 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  				mode_lib->vba.MaxAveragePercentOfIdealDRAMBWDisplayCanUseInNormalSystemOperation  				/ 100.0); -			if (MaxTotalVActiveRDBandwidth <= locals->MaxTotalVerticalActiveAvailableBandwidth[i]) { -				locals->TotalVerticalActiveBandwidthSupport[i] = true; +			if (MaxTotalVActiveRDBandwidth <= locals->MaxTotalVerticalActiveAvailableBandwidth[i][0]) { +				locals->TotalVerticalActiveBandwidthSupport[i][0] = true;  			} else { -				locals->TotalVerticalActiveBandwidthSupport[i] = false; +				locals->TotalVerticalActiveBandwidthSupport[i][0] = false;  			}  		}  	} @@ -5117,7 +5115,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  				status = DML_FAIL_SCALE_RATIO_TAP;  			} else if (mode_lib->vba.SourceFormatPixelAndScanSupport != true) {  				status = DML_FAIL_SOURCE_PIXEL_FORMAT; -			} else if (locals->ViewportSizeSupport[i] != true) { +			} else if (locals->ViewportSizeSupport[i][0] != true) {  				status = DML_FAIL_VIEWPORT_SIZE;  			} else if (locals->DIOSupport[i] != true) {  				status = DML_FAIL_DIO_SUPPORT; @@ -5125,7 +5123,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  				status = DML_FAIL_NOT_ENOUGH_DSC;  			} else if (locals->DSCCLKRequiredMoreThanSupported[i] != false) {  				status = DML_FAIL_DSC_CLK_REQUIRED; -			} else if (locals->ROBSupport[i] != true) { +			} else if (locals->ROBSupport[i][0] != true) {  				status = DML_FAIL_REORDERING_BUFFER;  			} else if (locals->DISPCLK_DPPCLK_Support[i][j] != true) {  				status = DML_FAIL_DISPCLK_DPPCLK; @@ -5143,7 +5141,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  				status = DML_FAIL_CURSOR_SUPPORT;  			} else if (mode_lib->vba.PitchSupport != true) {  				status = DML_FAIL_PITCH_SUPPORT; -			} else if (locals->TotalVerticalActiveBandwidthSupport[i] != true) { +			} else if (locals->TotalVerticalActiveBandwidthSupport[i][0] != true) {  				status = DML_FAIL_TOTAL_V_ACTIVE_BW;  			} else if (locals->PTEBufferSizeNotExceeded[i][j] != true) {  				status = DML_FAIL_PTE_BUFFER_SIZE; @@ -5199,13 +5197,13 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  	mode_lib->vba.DRAMSpeed = mode_lib->vba.DRAMSpeedPerState[mode_lib->vba.VoltageLevel];  	mode_lib->vba.FabricClock = mode_lib->vba.FabricClockPerState[mode_lib->vba.VoltageLevel];  	mode_lib->vba.SOCCLK = mode_lib->vba.SOCCLKPerState[mode_lib->vba.VoltageLevel]; -	mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel]; +	mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel][0];  	for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {  		if (mode_lib->vba.BlendingAndTiming[k] == k) {  			mode_lib->vba.ODMCombineEnabled[k] =  					locals->ODMCombineEnablePerState[mode_lib->vba.VoltageLevel][k];  		} else { -			mode_lib->vba.ODMCombineEnabled[k] = 0; +			mode_lib->vba.ODMCombineEnabled[k] = false;  		}  		mode_lib->vba.DSCEnabled[k] =  				locals->RequiresDSC[mode_lib->vba.VoltageLevel][k]; @@ -5228,7 +5226,7 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(  		double UrgentOutOfOrderReturn,  		double ReturnBW,  		bool GPUVMEnable, -		long dpte_group_bytes[], +		int dpte_group_bytes[],  		unsigned int MetaChunkSize,  		double UrgentLatency,  		double ExtraLatency, @@ -5242,13 +5240,13 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(  		int DPPPerPlane[],  		bool DCCEnable[],  		double DPPCLK[], -		unsigned int SwathWidthSingleDPPY[], +		double SwathWidthSingleDPPY[],  		unsigned int SwathHeightY[],  		double ReadBandwidthPlaneLuma[],  		unsigned int SwathHeightC[],  		double ReadBandwidthPlaneChroma[],  		unsigned int LBBitPerPixel[], -		unsigned int SwathWidthY[], +		double SwathWidthY[],  		double HRatio[],  		unsigned int vtaps[],  		unsigned int VTAPsChroma[], @@ -5504,7 +5502,7 @@ static void CalculateDCFCLKDeepSleep(  		double BytePerPixelDETY[],  		double BytePerPixelDETC[],  		double VRatio[], -		unsigned int SwathWidthY[], +		double SwathWidthY[],  		int DPPPerPlane[],  		double HRatio[],  		double PixelClock[], @@ -5832,7 +5830,7 @@ static void CalculateMetaAndPTETimes(  		unsigned int meta_row_height[],  		unsigned int meta_req_width[],  		unsigned int meta_req_height[], -		long dpte_group_bytes[], +		int dpte_group_bytes[],  		unsigned int PTERequestSizeY[],  		unsigned int PTERequestSizeC[],  		unsigned int PixelPTEReqWidthY[], @@ -6088,7 +6086,7 @@ static double CalculateExtraLatency(  		bool HostVMEnable,  		int NumberOfActivePlanes,  		int NumberOfDPP[], -		long dpte_group_bytes[], +		int dpte_group_bytes[],  		double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,  		double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,  		int HostVMMaxPageTableLevels, @@ -6126,4 +6124,3 @@ static double CalculateExtraLatency(  	return CalculateExtraLatency;  } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c index a1f207cbb966..a38baa73d484 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c @@ -23,7 +23,6 @@   *   */ -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  #include "../display_mode_lib.h"  #include "../display_mode_vba.h" @@ -83,10 +82,10 @@ static unsigned int get_bytes_per_element(enum source_format_class source_format  static bool is_dual_plane(enum source_format_class source_format)  { -	bool ret_val = 0; +	bool ret_val = false;  	if ((source_format == dm_420_8) || (source_format == dm_420_10)) -		ret_val = 1; +		ret_val = true;  	return ret_val;  } @@ -223,8 +222,8 @@ static void handle_det_buf_split(  	unsigned int swath_bytes_c = 0;  	unsigned int full_swath_bytes_packed_l = 0;  	unsigned int full_swath_bytes_packed_c = 0; -	bool req128_l = 0; -	bool req128_c = 0; +	bool req128_l = false; +	bool req128_c = false;  	bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);  	bool surf_vert = (pipe_src_param.source_scan == dm_vert);  	unsigned int log2_swath_height_l = 0; @@ -249,13 +248,13 @@ static void handle_det_buf_split(  		total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c;  		if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request -			req128_l = 0; -			req128_c = 0; +			req128_l = false; +			req128_c = false;  			swath_bytes_l = full_swath_bytes_packed_l;  			swath_bytes_c = full_swath_bytes_packed_c;  		} else { //128b request (for luma only for yuv420 8bpc) -			req128_l = 1; -			req128_c = 0; +			req128_l = true; +			req128_c = false;  			swath_bytes_l = full_swath_bytes_packed_l / 2;  			swath_bytes_c = full_swath_bytes_packed_c;  		} @@ -265,9 +264,9 @@ static void handle_det_buf_split(  		total_swath_bytes = 2 * full_swath_bytes_packed_l;  		if (total_swath_bytes <= detile_buf_size_in_bytes) -			req128_l = 0; +			req128_l = false;  		else -			req128_l = 1; +			req128_l = true;  		swath_bytes_l = total_swath_bytes;  		swath_bytes_c = 0; @@ -680,7 +679,7 @@ static void get_surf_rq_param(  		const display_pipe_params_st pipe_param,  		bool is_chroma)  { -	bool mode_422 = 0; +	bool mode_422 = false;  	unsigned int vp_width = 0;  	unsigned int vp_height = 0;  	unsigned int data_pitch = 0; @@ -1011,7 +1010,7 @@ static void dml_rq_dlg_get_dlg_params(  	// Source  	//             dcc_en              = src.dcc;  	dual_plane = is_dual_plane((enum source_format_class) (src->source_format)); -	mode_422 = 0; // FIXME +	mode_422 = false; // FIXME  	access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed  						    //      bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0);  						    //      bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1); @@ -1523,8 +1522,8 @@ static void dml_rq_dlg_get_dlg_params(  	disp_dlg_regs->refcyc_per_vm_group_vblank   = get_refcyc_per_vm_group_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;  	disp_dlg_regs->refcyc_per_vm_group_flip     = get_refcyc_per_vm_group_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; -	disp_dlg_regs->refcyc_per_vm_req_vblank     = get_refcyc_per_vm_req_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;; -	disp_dlg_regs->refcyc_per_vm_req_flip       = get_refcyc_per_vm_req_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;; +	disp_dlg_regs->refcyc_per_vm_req_vblank     = get_refcyc_per_vm_req_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; +	disp_dlg_regs->refcyc_per_vm_req_flip       = get_refcyc_per_vm_req_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;  	// Clamp to max for now  	if (disp_dlg_regs->refcyc_per_vm_group_vblank >= (unsigned int)dml_pow(2, 23)) @@ -1820,4 +1819,3 @@ static void calculate_ttu_cursor(  	}  } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h index 1c97083b8d0b..bfc2f39bd1ef 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h @@ -85,7 +85,7 @@ enum dm_swizzle_mode {  	dm_sw_var_s_x = 29,  	dm_sw_var_d_x = 30,  	dm_sw_64kb_r_x, -	dm_sw_gfx7_2d_thin_lvp, +	dm_sw_gfx7_2d_thin_l_vp,  	dm_sw_gfx7_2d_thin_gl,  };  enum lb_depth { @@ -119,6 +119,10 @@ enum mpc_combine_affinity {  	dm_mpc_never  }; +enum RequestType { +	REQ_256Bytes, REQ_128BytesNonContiguous, REQ_128BytesContiguous, REQ_NA +}; +  enum self_refresh_affinity {  	dm_try_to_allow_self_refresh_and_mclk_switch,  	dm_allow_self_refresh_and_mclk_switch, @@ -135,9 +139,7 @@ enum dm_validation_status {  	DML_FAIL_DIO_SUPPORT,  	DML_FAIL_NOT_ENOUGH_DSC,  	DML_FAIL_DSC_CLK_REQUIRED, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	DML_FAIL_DSC_VALIDATION_FAILURE, -#endif  	DML_FAIL_URGENT_LATENCY,  	DML_FAIL_REORDERING_BUFFER,  	DML_FAIL_DISPCLK_DPPCLK, @@ -167,4 +169,16 @@ enum odm_combine_mode {  	dm_odm_combine_mode_4to1,  }; +enum odm_combine_policy { +	dm_odm_combine_policy_dal, +	dm_odm_combine_policy_none, +	dm_odm_combine_policy_2to1, +	dm_odm_combine_policy_4to1, +}; + +enum immediate_flip_requirement { +	dm_immediate_flip_not_required, +	dm_immediate_flip_required, +}; +  #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c index 704efefdcba8..2689401a03a3 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c @@ -25,18 +25,13 @@  #include "display_mode_lib.h"  #include "dc_features.h" -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  #include "dcn20/display_mode_vba_20.h"  #include "dcn20/display_rq_dlg_calc_20.h"  #include "dcn20/display_mode_vba_20v2.h"  #include "dcn20/display_rq_dlg_calc_20v2.h" -#endif -#ifdef CONFIG_DRM_AMD_DC_DCN2_1  #include "dcn21/display_mode_vba_21.h"  #include "dcn21/display_rq_dlg_calc_21.h" -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  const struct dml_funcs dml20_funcs = {  	.validate = dml20_ModeSupportAndSystemConfigurationFull,  	.recalculate = dml20_recalculate, @@ -50,16 +45,13 @@ const struct dml_funcs dml20v2_funcs = {  	.rq_dlg_get_dlg_reg = dml20v2_rq_dlg_get_dlg_reg,  	.rq_dlg_get_rq_reg = dml20v2_rq_dlg_get_rq_reg  }; -#endif -#ifdef CONFIG_DRM_AMD_DC_DCN2_1  const struct dml_funcs dml21_funcs = {          .validate = dml21_ModeSupportAndSystemConfigurationFull,          .recalculate = dml21_recalculate,          .rq_dlg_get_dlg_reg = dml21_rq_dlg_get_dlg_reg,          .rq_dlg_get_rq_reg = dml21_rq_dlg_get_rq_reg  }; -#endif  void dml_init_instance(struct display_mode_lib *lib,  		const struct _vcs_dpi_soc_bounding_box_st *soc_bb, @@ -70,19 +62,15 @@ void dml_init_instance(struct display_mode_lib *lib,  	lib->ip = *ip_params;  	lib->project = project;  	switch (project) { -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  	case DML_PROJECT_NAVI10:  		lib->funcs = dml20_funcs;  		break;  	case DML_PROJECT_NAVI10v2:  		lib->funcs = dml20v2_funcs;  		break; -#endif -#ifdef CONFIG_DRM_AMD_DC_DCN2_1          case DML_PROJECT_DCN21:                  lib->funcs = dml21_funcs;                  break; -#endif  	default:  		break; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h index d8c59aa356b6..cf2758ca5b02 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h @@ -27,20 +27,14 @@  #include "dml_common_defs.h" -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  #include "display_mode_vba.h" -#endif  enum dml_project {  	DML_PROJECT_UNDEFINED,  	DML_PROJECT_RAVEN1, -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  	DML_PROJECT_NAVI10,  	DML_PROJECT_NAVI10v2, -#endif -#ifdef CONFIG_DRM_AMD_DC_DCN2_1  	DML_PROJECT_DCN21, -#endif  };  struct display_mode_lib; @@ -70,9 +64,7 @@ struct display_mode_lib {  	struct _vcs_dpi_ip_params_st ip;  	struct _vcs_dpi_soc_bounding_box_st soc;  	enum dml_project project; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  	struct vba_vars_st vba; -#endif  	struct dal_logger *logger;  	struct dml_funcs funcs;  }; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h index cfacd6027467..658f81e757e9 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h @@ -63,6 +63,7 @@ struct _vcs_dpi_voltage_scaling_st {  	double dispclk_mhz;  	double phyclk_mhz;  	double dppclk_mhz; +	double dtbclk_mhz;  };  struct _vcs_dpi_soc_bounding_box_st { @@ -99,6 +100,7 @@ struct _vcs_dpi_soc_bounding_box_st {  	unsigned int num_chans;  	unsigned int vmm_page_size_bytes;  	unsigned int hostvm_min_page_size_bytes; +	unsigned int gpuvm_min_page_size_bytes;  	double dram_clock_change_latency_us;  	double dummy_pstate_latency_us;  	double writeback_dram_clock_change_latency_us; @@ -112,6 +114,7 @@ struct _vcs_dpi_soc_bounding_box_st {  	bool do_urgent_latency_adjustment;  	double urgent_latency_adjustment_fabric_clock_component_us;  	double urgent_latency_adjustment_fabric_clock_reference_mhz; +	bool disable_dram_clock_change_vactive_support;  };  struct _vcs_dpi_ip_params_st { @@ -145,7 +148,6 @@ struct _vcs_dpi_ip_params_st {  	unsigned int writeback_interface_buffer_size_kbytes;  	unsigned int writeback_line_buffer_buffer_size; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  	unsigned int writeback_10bpc420_supported;  	double writeback_max_hscl_ratio;  	double writeback_max_vscl_ratio; @@ -155,7 +157,6 @@ struct _vcs_dpi_ip_params_st {  	unsigned int writeback_max_vscl_taps;  	unsigned int writeback_line_buffer_luma_buffer_size;  	unsigned int writeback_line_buffer_chroma_buffer_size; -#endif  	unsigned int max_page_table_levels;  	unsigned int max_num_dpp; @@ -214,6 +215,7 @@ struct _vcs_dpi_display_pipe_source_params_st {  	int source_format;  	unsigned char dcc;  	unsigned int dcc_rate; +	unsigned int dcc_rate_chroma;  	unsigned char dcc_use_global;  	unsigned char vm;  	bool gpuvm;    // gpuvm enabled @@ -225,6 +227,10 @@ struct _vcs_dpi_display_pipe_source_params_st {  	int source_scan;  	int sw_mode;  	int macro_tile_size; +	unsigned int surface_width_y; +	unsigned int surface_height_y; +	unsigned int surface_width_c; +	unsigned int surface_height_c;  	unsigned int viewport_width;  	unsigned int viewport_height;  	unsigned int viewport_y_y; @@ -277,6 +283,7 @@ struct _vcs_dpi_display_output_params_st {  	int output_type;  	int output_format;  	int dsc_slices; +	int max_audio_sample_rate;  	struct writeback_st wb;  }; @@ -322,7 +329,7 @@ struct _vcs_dpi_display_pipe_dest_params_st {  	double pixel_rate_mhz;  	unsigned char synchronized_vblank_all_planes;  	unsigned char otg_inst; -	unsigned char odm_combine; +	unsigned int odm_combine;  	unsigned char use_maximum_vstartup;  	unsigned int vtotal_max;  	unsigned int vtotal_min; @@ -401,6 +408,7 @@ struct _vcs_dpi_display_rq_misc_params_st {  struct _vcs_dpi_display_rq_params_st {  	unsigned char yuv420;  	unsigned char yuv420_10bpc; +	unsigned char rgbe_alpha;  	display_rq_misc_params_st misc;  	display_rq_sizing_params_st sizing;  	display_rq_dlg_params_st dlg; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c index 7f9a5621922f..b3c96d9b472f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c @@ -23,7 +23,6 @@   *   */ -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  #include "display_mode_lib.h"  #include "display_mode_vba.h" @@ -222,13 +221,17 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib)  	mode_lib->vba.SRExitTime = soc->sr_exit_time_us;  	mode_lib->vba.SREnterPlusExitTime = soc->sr_enter_plus_exit_time_us;  	mode_lib->vba.DRAMClockChangeLatency = soc->dram_clock_change_latency_us; +	mode_lib->vba.DummyPStateCheck = soc->dram_clock_change_latency_us == soc->dummy_pstate_latency_us; +	mode_lib->vba.DRAMClockChangeSupportsVActive = !soc->disable_dram_clock_change_vactive_support || +			mode_lib->vba.DummyPStateCheck; +  	mode_lib->vba.Downspreading = soc->downspread_percent;  	mode_lib->vba.DRAMChannelWidth = soc->dram_channel_width_bytes;   // new!  	mode_lib->vba.FabricDatapathToDCNDataReturn = soc->fabric_datapath_to_dcn_data_return_bytes; // new!  	mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading = soc->dcn_downspread_percent;   // new  	mode_lib->vba.DISPCLKDPPCLKVCOSpeed = soc->dispclk_dppclk_vco_speed_mhz;   // new  	mode_lib->vba.VMMPageSize = soc->vmm_page_size_bytes; -	mode_lib->vba.GPUVMMinPageSize = soc->vmm_page_size_bytes / 1024; +	mode_lib->vba.GPUVMMinPageSize = soc->gpuvm_min_page_size_bytes / 1024;  	mode_lib->vba.HostVMMinPageSize = soc->hostvm_min_page_size_bytes / 1024;  	// Set the voltage scaling clocks as the defaults. Most of these will  	// be set to different values by the test @@ -261,7 +264,10 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib)  		mode_lib->vba.DRAMSpeedPerState[i] = soc->clock_limits[i].dram_speed_mts;  		//mode_lib->vba.DRAMSpeedPerState[i] = soc->clock_limits[i].dram_speed_mhz;  		mode_lib->vba.MaxDispclk[i] = soc->clock_limits[i].dispclk_mhz; +		mode_lib->vba.DTBCLKPerState[i] = soc->clock_limits[i].dtbclk_mhz;  	} +	mode_lib->vba.MinVoltageLevel = 0; +	mode_lib->vba.MaxVoltageLevel = mode_lib->vba.soc.num_states;  	mode_lib->vba.DoUrgentLatencyAdjustment =  		soc->do_urgent_latency_adjustment; @@ -303,8 +309,6 @@ static void fetch_ip_params(struct display_mode_lib *mode_lib)  	mode_lib->vba.WritebackInterfaceBufferSize = ip->writeback_interface_buffer_size_kbytes;  	mode_lib->vba.WritebackLineBufferSize = ip->writeback_line_buffer_buffer_size; -	mode_lib->vba.MinVoltageLevel = 0; -	mode_lib->vba.MaxVoltageLevel = 5;  	mode_lib->vba.WritebackChromaLineBufferWidth =  			ip->writeback_chroma_line_buffer_width_pixels; @@ -420,8 +424,8 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)  						ip->dcc_supported : src->dcc && ip->dcc_supported;  		mode_lib->vba.DCCRate[mode_lib->vba.NumberOfActivePlanes] = src->dcc_rate;  		/* TODO: Needs to be set based on src->dcc_rate_luma/chroma */ -		mode_lib->vba.DCCRateLuma[mode_lib->vba.NumberOfActivePlanes] = 0; -		mode_lib->vba.DCCRateChroma[mode_lib->vba.NumberOfActivePlanes] = 0; +		mode_lib->vba.DCCRateLuma[mode_lib->vba.NumberOfActivePlanes] = src->dcc_rate; +		mode_lib->vba.DCCRateChroma[mode_lib->vba.NumberOfActivePlanes] = src->dcc_rate_chroma;  		mode_lib->vba.SourcePixelFormat[mode_lib->vba.NumberOfActivePlanes] =  				(enum source_format_class) (src->source_format); @@ -433,8 +437,6 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)  				dst->recout_width; // TODO: or should this be full_recout_width???...maybe only when in hsplit mode?  		mode_lib->vba.ODMCombineEnabled[mode_lib->vba.NumberOfActivePlanes] =  				dst->odm_combine; -		mode_lib->vba.ODMCombineTypeEnabled[mode_lib->vba.NumberOfActivePlanes] = -				dst->odm_combine;  		mode_lib->vba.OutputFormat[mode_lib->vba.NumberOfActivePlanes] =  				(enum output_format_class) (dout->output_format);  		mode_lib->vba.OutputBpp[mode_lib->vba.NumberOfActivePlanes] = @@ -451,7 +453,7 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)  				dout->dp_lanes;  		/* TODO: Needs to be set based on dout->audio.audio_sample_rate_khz/sample_layout */  		mode_lib->vba.AudioSampleRate[mode_lib->vba.NumberOfActivePlanes] = -			44.1 * 1000; +			dout->max_audio_sample_rate;  		mode_lib->vba.AudioSampleLayout[mode_lib->vba.NumberOfActivePlanes] =  			1;  		mode_lib->vba.DRAMClockChangeLatencyOverride = 0.0; @@ -587,6 +589,7 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)  			for (k = j + 1; k < mode_lib->vba.cache_num_pipes; ++k) {  				display_pipe_source_params_st *src_k = &pipes[k].pipe.src;  				display_pipe_dest_params_st *dst_k = &pipes[k].pipe.dest; +				display_output_params_st *dout_k = &pipes[j].dout;  				if (src_k->is_hsplit && !visited[k]  						&& src->hsplit_grp == src_k->hsplit_grp) { @@ -597,12 +600,18 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)  							== dm_horz) {  						mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] +=  								src_k->viewport_width; +						mode_lib->vba.ViewportWidthChroma[mode_lib->vba.NumberOfActivePlanes] += +								src_k->viewport_width;  						mode_lib->vba.ScalerRecoutWidth[mode_lib->vba.NumberOfActivePlanes] +=  								dst_k->recout_width;  					} else {  						mode_lib->vba.ViewportHeight[mode_lib->vba.NumberOfActivePlanes] +=  								src_k->viewport_height; +						mode_lib->vba.ViewportHeightChroma[mode_lib->vba.NumberOfActivePlanes] += +								src_k->viewport_height;  					} +					mode_lib->vba.NumberOfDSCSlices[mode_lib->vba.NumberOfActivePlanes] += +							dout_k->dsc_slices;  					visited[k] = true;  				} @@ -808,7 +817,9 @@ void ModeSupportAndSystemConfiguration(struct display_mode_lib *mode_lib)  	unsigned int total_pipes = 0;  	mode_lib->vba.VoltageLevel = mode_lib->vba.cache_pipes[0].clks_cfg.voltage; -	mode_lib->vba.ReturnBW = mode_lib->vba.ReturnBWPerState[mode_lib->vba.VoltageLevel]; +	mode_lib->vba.ReturnBW = mode_lib->vba.ReturnBWPerState[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb]; +	if (mode_lib->vba.ReturnBW == 0) +		mode_lib->vba.ReturnBW = mode_lib->vba.ReturnBWPerState[mode_lib->vba.VoltageLevel][0];  	mode_lib->vba.FabricAndDRAMBandwidth = mode_lib->vba.FabricAndDRAMBandwidthPerState[mode_lib->vba.VoltageLevel];  	fetch_socbb_params(mode_lib); @@ -858,4 +869,3 @@ double CalculateWriteBackDISPCLK(  	return CalculateWriteBackDISPCLK;  } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h index 1540ffbe3979..e7a44df676ca 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h @@ -23,7 +23,6 @@   *   */ -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  #ifndef __DML2_DISPLAY_MODE_VBA_H__  #define __DML2_DISPLAY_MODE_VBA_H__ @@ -155,7 +154,10 @@ struct vba_vars_st {  	double UrgentLatencySupportUsChroma;  	unsigned int DSCFormatFactor; +	bool DummyPStateCheck; +	bool DRAMClockChangeSupportsVActive;  	bool PrefetchModeSupported; +	bool PrefetchAndImmediateFlipSupported;  	enum self_refresh_affinity AllowDRAMSelfRefreshOrDRAMClockChangeInVblank; // Mode Support only  	double XFCRemoteSurfaceFlipDelay;  	double TInitXFill; @@ -317,8 +319,7 @@ struct vba_vars_st {  	unsigned int DynamicMetadataTransmittedBytes[DC__NUM_DPP__MAX];  	double DCCRate[DC__NUM_DPP__MAX];  	double AverageDCCCompressionRate; -	bool ODMCombineEnabled[DC__NUM_DPP__MAX]; -	enum odm_combine_mode ODMCombineTypeEnabled[DC__NUM_DPP__MAX]; +	enum odm_combine_mode ODMCombineEnabled[DC__NUM_DPP__MAX];  	double OutputBpp[DC__NUM_DPP__MAX];  	bool DSCEnabled[DC__NUM_DPP__MAX];  	unsigned int DSCInputBitPerComponent[DC__NUM_DPP__MAX]; @@ -346,6 +347,7 @@ struct vba_vars_st {  	unsigned int EffectiveLBLatencyHidingSourceLinesChroma;  	double BandwidthAvailableForImmediateFlip;  	unsigned int PrefetchMode[DC__VOLTAGE_STATES + 1][2]; +	unsigned int PrefetchModePerState[DC__VOLTAGE_STATES + 1][2];  	unsigned int MinPrefetchMode;  	unsigned int MaxPrefetchMode;  	bool AnyLinesForVMOrRowTooLarge; @@ -395,6 +397,7 @@ struct vba_vars_st {  	bool WritebackLumaAndChromaScalingSupported;  	bool Cursor64BppSupport;  	double DCFCLKPerState[DC__VOLTAGE_STATES + 1]; +	double DCFCLKState[DC__VOLTAGE_STATES + 1][2];  	double FabricClockPerState[DC__VOLTAGE_STATES + 1];  	double SOCCLKPerState[DC__VOLTAGE_STATES + 1];  	double PHYCLKPerState[DC__VOLTAGE_STATES + 1]; @@ -443,7 +446,7 @@ struct vba_vars_st {  	double OutputLinkDPLanes[DC__NUM_DPP__MAX];  	double ForcedOutputLinkBPP[DC__NUM_DPP__MAX]; // Mode Support only  	double ImmediateFlipBW[DC__NUM_DPP__MAX]; -	double MaxMaxVStartup; +	double MaxMaxVStartup[DC__VOLTAGE_STATES + 1][2];  	double WritebackLumaVExtra;  	double WritebackChromaVExtra; @@ -470,7 +473,7 @@ struct vba_vars_st {  	double RoundedUpMaxSwathSizeBytesC;  	double EffectiveDETLBLinesLuma;  	double EffectiveDETLBLinesChroma; -	double ProjectedDCFCLKDeepSleep; +	double ProjectedDCFCLKDeepSleep[DC__VOLTAGE_STATES + 1][2];  	double PDEAndMetaPTEBytesPerFrameY;  	double PDEAndMetaPTEBytesPerFrameC;  	unsigned int MetaRowBytesY; @@ -488,12 +491,11 @@ struct vba_vars_st {  	double FractionOfUrgentBandwidthImmediateFlip; // Mode Support debugging output  	/* ms locals */ -	double IdealSDPPortBandwidthPerState[DC__VOLTAGE_STATES + 1]; +	double IdealSDPPortBandwidthPerState[DC__VOLTAGE_STATES + 1][2];  	unsigned int NoOfDPP[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];  	int NoOfDPPThisState[DC__NUM_DPP__MAX]; -	bool ODMCombineEnablePerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX]; -	enum odm_combine_mode ODMCombineTypeEnablePerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX]; -	unsigned int SwathWidthYThisState[DC__NUM_DPP__MAX]; +	enum odm_combine_mode ODMCombineEnablePerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX]; +	double SwathWidthYThisState[DC__NUM_DPP__MAX];  	unsigned int SwathHeightCPerState[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];  	unsigned int SwathHeightYThisState[DC__NUM_DPP__MAX];  	unsigned int SwathHeightCThisState[DC__NUM_DPP__MAX]; @@ -505,7 +507,7 @@ struct vba_vars_st {  	double RequiredDPPCLKThisState[DC__NUM_DPP__MAX];  	bool PTEBufferSizeNotExceededY[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];  	bool PTEBufferSizeNotExceededC[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; -	bool BandwidthWithoutPrefetchSupported[DC__VOLTAGE_STATES + 1]; +	bool BandwidthWithoutPrefetchSupported[DC__VOLTAGE_STATES + 1][2];  	bool PrefetchSupported[DC__VOLTAGE_STATES + 1][2];  	bool VRatioInPrefetchSupported[DC__VOLTAGE_STATES + 1][2];  	double RequiredDISPCLK[DC__VOLTAGE_STATES + 1][2]; @@ -514,22 +516,22 @@ struct vba_vars_st {  	unsigned int TotalNumberOfActiveDPP[DC__VOLTAGE_STATES + 1][2];  	unsigned int TotalNumberOfDCCActiveDPP[DC__VOLTAGE_STATES + 1][2];  	bool ModeSupport[DC__VOLTAGE_STATES + 1][2]; -	double ReturnBWPerState[DC__VOLTAGE_STATES + 1]; +	double ReturnBWPerState[DC__VOLTAGE_STATES + 1][2];  	bool DIOSupport[DC__VOLTAGE_STATES + 1];  	bool NotEnoughDSCUnits[DC__VOLTAGE_STATES + 1];  	bool DSCCLKRequiredMoreThanSupported[DC__VOLTAGE_STATES + 1];  	bool DTBCLKRequiredMoreThanSupported[DC__VOLTAGE_STATES + 1];  	double UrgentRoundTripAndOutOfOrderLatencyPerState[DC__VOLTAGE_STATES + 1]; -	bool ROBSupport[DC__VOLTAGE_STATES + 1]; +	bool ROBSupport[DC__VOLTAGE_STATES + 1][2];  	bool PTEBufferSizeNotExceeded[DC__VOLTAGE_STATES + 1][2]; -	bool TotalVerticalActiveBandwidthSupport[DC__VOLTAGE_STATES + 1]; -	double MaxTotalVerticalActiveAvailableBandwidth[DC__VOLTAGE_STATES + 1]; +	bool TotalVerticalActiveBandwidthSupport[DC__VOLTAGE_STATES + 1][2]; +	double MaxTotalVerticalActiveAvailableBandwidth[DC__VOLTAGE_STATES + 1][2];  	double PrefetchBW[DC__NUM_DPP__MAX]; -	double PDEAndMetaPTEBytesPerFrame[DC__NUM_DPP__MAX]; -	double MetaRowBytes[DC__NUM_DPP__MAX]; -	double DPTEBytesPerRow[DC__NUM_DPP__MAX]; -	double PrefetchLinesY[DC__NUM_DPP__MAX]; -	double PrefetchLinesC[DC__NUM_DPP__MAX]; +	double PDEAndMetaPTEBytesPerFrame[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; +	double MetaRowBytes[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; +	double DPTEBytesPerRow[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; +	double PrefetchLinesY[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; +	double PrefetchLinesC[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];  	unsigned int MaxNumSwY[DC__NUM_DPP__MAX];  	unsigned int MaxNumSwC[DC__NUM_DPP__MAX];  	double PrefillY[DC__NUM_DPP__MAX]; @@ -538,7 +540,7 @@ struct vba_vars_st {  	double LinesForMetaPTE[DC__NUM_DPP__MAX];  	double LinesForMetaAndDPTERow[DC__NUM_DPP__MAX];  	double MinDPPCLKUsingSingleDPP[DC__NUM_DPP__MAX]; -	unsigned int SwathWidthYSingleDPP[DC__NUM_DPP__MAX]; +	double SwathWidthYSingleDPP[DC__NUM_DPP__MAX];  	double BytePerPixelInDETY[DC__NUM_DPP__MAX];  	double BytePerPixelInDETC[DC__NUM_DPP__MAX];  	bool RequiresDSC[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX]; @@ -546,7 +548,7 @@ struct vba_vars_st {  	double RequiresFEC[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];  	double OutputBppPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];  	double DSCDelayPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX]; -	bool ViewportSizeSupport[DC__VOLTAGE_STATES + 1]; +	bool ViewportSizeSupport[DC__VOLTAGE_STATES + 1][2];  	unsigned int Read256BlockHeightY[DC__NUM_DPP__MAX];  	unsigned int Read256BlockWidthY[DC__NUM_DPP__MAX];  	unsigned int Read256BlockHeightC[DC__NUM_DPP__MAX]; @@ -561,7 +563,7 @@ struct vba_vars_st {  	double WriteBandwidth[DC__NUM_DPP__MAX];  	double PSCL_FACTOR[DC__NUM_DPP__MAX];  	double PSCL_FACTOR_CHROMA[DC__NUM_DPP__MAX]; -	double MaximumVStartup[DC__NUM_DPP__MAX]; +	double MaximumVStartup[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];  	unsigned int MacroTileWidthY[DC__NUM_DPP__MAX];  	unsigned int MacroTileWidthC[DC__NUM_DPP__MAX];  	double AlignedDCCMetaPitch[DC__NUM_DPP__MAX]; @@ -578,7 +580,7 @@ struct vba_vars_st {  	bool ImmediateFlipSupportedForState[DC__VOLTAGE_STATES + 1][2];  	double WritebackDelay[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];  	unsigned int vm_group_bytes[DC__NUM_DPP__MAX]; -	long dpte_group_bytes[DC__NUM_DPP__MAX]; +	unsigned int dpte_group_bytes[DC__NUM_DPP__MAX];  	unsigned int dpte_row_height[DC__NUM_DPP__MAX];  	unsigned int meta_req_height[DC__NUM_DPP__MAX];  	unsigned int meta_req_width[DC__NUM_DPP__MAX]; @@ -604,14 +606,14 @@ struct vba_vars_st {  	double UrgentBurstFactorChroma[DC__NUM_DPP__MAX];  	double UrgentBurstFactorChromaPre[DC__NUM_DPP__MAX]; +  	bool           MPCCombine[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];  	double         SwathWidthCSingleDPP[DC__NUM_DPP__MAX];  	double         MaximumSwathWidthInLineBufferLuma;  	double         MaximumSwathWidthInLineBufferChroma;  	double         MaximumSwathWidthLuma[DC__NUM_DPP__MAX];  	double         MaximumSwathWidthChroma[DC__NUM_DPP__MAX]; -	bool odm_combine_dummy[DC__NUM_DPP__MAX]; -	enum odm_combine_mode odm_combine_mode_dummy[DC__NUM_DPP__MAX]; +	enum odm_combine_mode odm_combine_dummy[DC__NUM_DPP__MAX];  	double         dummy1[DC__NUM_DPP__MAX];  	double         dummy2[DC__NUM_DPP__MAX];  	double         dummy3[DC__NUM_DPP__MAX]; @@ -621,9 +623,9 @@ struct vba_vars_st {  	double         dummy7[DC__NUM_DPP__MAX];  	double         dummy8[DC__NUM_DPP__MAX];  	unsigned int        dummyinteger1ms[DC__NUM_DPP__MAX]; -	unsigned int        dummyinteger2ms[DC__NUM_DPP__MAX]; +	double        dummyinteger2ms[DC__NUM_DPP__MAX];  	unsigned int        dummyinteger3[DC__NUM_DPP__MAX]; -	unsigned int        dummyinteger4; +	unsigned int        dummyinteger4[DC__NUM_DPP__MAX];  	unsigned int        dummyinteger5;  	unsigned int        dummyinteger6;  	unsigned int        dummyinteger7; @@ -636,7 +638,6 @@ struct vba_vars_st {  	unsigned int        dummyintegerarr2[DC__NUM_DPP__MAX];  	unsigned int        dummyintegerarr3[DC__NUM_DPP__MAX];  	unsigned int        dummyintegerarr4[DC__NUM_DPP__MAX]; -	long                dummylongarr1[DC__NUM_DPP__MAX];  	bool           dummysinglestring;  	bool           SingleDPPViewportSizeSupportPerPlane[DC__NUM_DPP__MAX];  	double         PlaneRequiredDISPCLKWithODMCombine2To1; @@ -644,20 +645,19 @@ struct vba_vars_st {  	unsigned int   TotalNumberOfSingleDPPPlanes[DC__VOLTAGE_STATES + 1][2];  	bool           LinkDSCEnable;  	bool           ODMCombine4To1SupportCheckOK[DC__VOLTAGE_STATES + 1]; -	bool ODMCombineEnableThisState[DC__NUM_DPP__MAX]; -	enum odm_combine_mode ODMCombineEnableTypeThisState[DC__NUM_DPP__MAX]; -	unsigned int   SwathWidthCThisState[DC__NUM_DPP__MAX]; +	enum odm_combine_mode ODMCombineEnableThisState[DC__NUM_DPP__MAX]; +	double   SwathWidthCThisState[DC__NUM_DPP__MAX];  	bool           ViewportSizeSupportPerPlane[DC__NUM_DPP__MAX];  	double         AlignedDCCMetaPitchY[DC__NUM_DPP__MAX];  	double         AlignedDCCMetaPitchC[DC__NUM_DPP__MAX];  	unsigned int NotEnoughUrgentLatencyHiding;  	unsigned int NotEnoughUrgentLatencyHidingPre; -	long PTEBufferSizeInRequestsForLuma; -	long PTEBufferSizeInRequestsForChroma; +	int PTEBufferSizeInRequestsForLuma; +	int PTEBufferSizeInRequestsForChroma;  	// Missing from VBA -	long dpte_group_bytes_chroma; +	int dpte_group_bytes_chroma;  	unsigned int vm_group_bytes_chroma;  	double dst_x_after_scaler;  	double dst_y_after_scaler; @@ -682,8 +682,8 @@ struct vba_vars_st {  	double MinTTUVBlank[DC__NUM_DPP__MAX];  	double BytePerPixelDETY[DC__NUM_DPP__MAX];  	double BytePerPixelDETC[DC__NUM_DPP__MAX]; -	unsigned int SwathWidthY[DC__NUM_DPP__MAX]; -	unsigned int SwathWidthSingleDPPY[DC__NUM_DPP__MAX]; +	double SwathWidthY[DC__NUM_DPP__MAX]; +	double SwathWidthSingleDPPY[DC__NUM_DPP__MAX];  	double CursorRequestDeliveryTime[DC__NUM_DPP__MAX];  	double CursorRequestDeliveryTimePrefetch[DC__NUM_DPP__MAX];  	double ReadBandwidthPlaneLuma[DC__NUM_DPP__MAX]; @@ -759,8 +759,8 @@ struct vba_vars_st {  	double LinesInDETY[DC__NUM_DPP__MAX];  	double LinesInDETYRoundedDownToSwath[DC__NUM_DPP__MAX]; -	unsigned int SwathWidthSingleDPPC[DC__NUM_DPP__MAX]; -	unsigned int SwathWidthC[DC__NUM_DPP__MAX]; +	double SwathWidthSingleDPPC[DC__NUM_DPP__MAX]; +	double SwathWidthC[DC__NUM_DPP__MAX];  	unsigned int BytePerPixelY[DC__NUM_DPP__MAX];  	unsigned int BytePerPixelC[DC__NUM_DPP__MAX];  	long dummyinteger1; @@ -778,6 +778,7 @@ struct vba_vars_st {  	unsigned int DCCCMaxCompressedBlock[DC__NUM_DPP__MAX];  	unsigned int DCCCIndependent64ByteBlock[DC__NUM_DPP__MAX];  	double VStartupMargin; +	bool NotEnoughTimeForDynamicMetadata;  	/* Missing from VBA */  	unsigned int MaximumMaxVStartupLines; @@ -813,7 +814,7 @@ struct vba_vars_st {  	unsigned int ViewportHeightChroma[DC__NUM_DPP__MAX];  	double HRatioChroma[DC__NUM_DPP__MAX];  	double VRatioChroma[DC__NUM_DPP__MAX]; -	long WritebackSourceWidth[DC__NUM_DPP__MAX]; +	int WritebackSourceWidth[DC__NUM_DPP__MAX];  	bool ModeIsSupported;  	bool ODMCombine4To1Supported; @@ -849,6 +850,58 @@ struct vba_vars_st {  	unsigned int MaxNumHDMIFRLOutputs;  	int    AudioSampleRate[DC__NUM_DPP__MAX];  	int    AudioSampleLayout[DC__NUM_DPP__MAX]; + +	int PercentMarginOverMinimumRequiredDCFCLK; +	bool DynamicMetadataSupported[DC__VOLTAGE_STATES + 1][2]; +	enum immediate_flip_requirement ImmediateFlipRequirement; +	double DETBufferSizeYThisState[DC__NUM_DPP__MAX]; +	double DETBufferSizeCThisState[DC__NUM_DPP__MAX]; +	bool NoUrgentLatencyHiding[DC__NUM_DPP__MAX]; +	bool NoUrgentLatencyHidingPre[DC__NUM_DPP__MAX]; +	int swath_width_luma_ub_this_state[DC__NUM_DPP__MAX]; +	int swath_width_chroma_ub_this_state[DC__NUM_DPP__MAX]; +	double UrgLatency[DC__VOLTAGE_STATES + 1]; +	double VActiveCursorBandwidth[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; +	double VActivePixelBandwidth[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; +	bool NoTimeForPrefetch[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; +	bool NoTimeForDynamicMetadata[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; +	double dpte_row_bandwidth[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; +	double meta_row_bandwidth[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; +	double DETBufferSizeYAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; +	double DETBufferSizeCAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; +	int swath_width_luma_ub_all_states[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; +	int swath_width_chroma_ub_all_states[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; +	bool NotUrgentLatencyHiding[DC__VOLTAGE_STATES + 1][2]; +	unsigned int SwathHeightYAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; +	unsigned int SwathHeightCAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; +	unsigned int SwathWidthYAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; +	unsigned int SwathWidthCAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; +	double TotalDPTERowBandwidth[DC__VOLTAGE_STATES + 1][2]; +	double TotalMetaRowBandwidth[DC__VOLTAGE_STATES + 1][2]; +	double TotalVActiveCursorBandwidth[DC__VOLTAGE_STATES + 1][2]; +	double TotalVActivePixelBandwidth[DC__VOLTAGE_STATES + 1][2]; +	bool UseMinimumRequiredDCFCLK; +	double WritebackDelayTime[DC__NUM_DPP__MAX]; +	unsigned int DCCYIndependentBlock[DC__NUM_DPP__MAX]; +	unsigned int DCCCIndependentBlock[DC__NUM_DPP__MAX]; +	unsigned int dummyinteger15; +	unsigned int dummyinteger16; +	unsigned int dummyinteger17; +	unsigned int dummyinteger18; +	unsigned int dummyinteger19; +	unsigned int dummyinteger20; +	unsigned int dummyinteger21; +	unsigned int dummyinteger22; +	unsigned int dummyinteger23; +	unsigned int dummyinteger24; +	unsigned int dummyinteger25; +	unsigned int dummyinteger26; +	unsigned int dummyinteger27; +	unsigned int dummyinteger28; +	unsigned int dummyinteger29; +	bool dummystring[DC__NUM_DPP__MAX]; +	double BPP; +	enum odm_combine_policy ODMCombinePolicy;  };  bool CalculateMinAndMaxPrefetchMode( @@ -870,4 +923,3 @@ double CalculateWriteBackDISPCLK(  		unsigned int WritebackChromaLineBufferWidth);  #endif /* _DML2_DISPLAY_MODE_VBA_H_ */ -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c index b953b02a1512..723af0b2dda0 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c @@ -24,7 +24,7 @@   */  #include "dml_common_defs.h" -#include "../calcs/dcn_calc_math.h" +#include "dcn_calc_math.h"  #include "dml_inline_defs.h" diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h index eca140da13d8..ded71ea82413 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h @@ -27,7 +27,7 @@  #define __DML_INLINE_DEFS_H__  #include "dml_common_defs.h" -#include "../calcs/dcn_calc_math.h" +#include "dcn_calc_math.h"  #include "dml_logger.h"  static inline double dml_min(double a, double b) diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile index 970737217e53..3f66868df171 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile @@ -1,7 +1,14 @@ +# SPDX-License-Identifier: MIT  #  # Makefile for the 'dsc' sub-component of DAL. +ifdef CONFIG_X86  dsc_ccflags := -mhard-float -msse +endif + +ifdef CONFIG_PPC64 +dsc_ccflags := -mhard-float -maltivec +endif  ifdef CONFIG_CC_IS_GCC  ifeq ($(call cc-ifversion, -lt, 0701, y), y) @@ -9,6 +16,7 @@ IS_OLD_GCC = 1  endif  endif +ifdef CONFIG_X86  ifdef IS_OLD_GCC  # Stack alignment mismatch, proceed with caution.  # GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 @@ -17,6 +25,7 @@ dsc_ccflags += -mpreferred-stack-boundary=4  else  dsc_ccflags += -msse2  endif +endif  CFLAGS_$(AMDDALPATH)/dc/dsc/rc_calc.o := $(dsc_ccflags)  CFLAGS_$(AMDDALPATH)/dc/dsc/rc_calc_dpi.o := $(dsc_ccflags) diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index e60f760585e4..8b78fcbfe746 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -22,30 +22,16 @@   * Author: AMD   */ -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  #include "dc_hw_types.h"  #include "dsc.h"  #include <drm/drm_dp_helper.h> - -struct dc_dsc_policy { -	bool use_min_slices_h; -	int max_slices_h; // Maximum available if 0 -	int min_sice_height; // Must not be less than 8 -	int max_target_bpp; -	int min_target_bpp; // Minimum target bits per pixel -}; - -const struct dc_dsc_policy dsc_policy = { -	.use_min_slices_h = true, // DSC Policy: Use minimum number of slices that fits the pixel clock -	.max_slices_h = 0, // DSC Policy: Use max available slices (in our case 4 for or 8, depending on the mode) -	.min_sice_height = 108, // DSC Policy: Use slice height recommended by VESA DSC Spreadsheet user guide -	.max_target_bpp = 16, -	.min_target_bpp = 8, -}; - +#include "dc.h"  /* This module's internal functions */ +/* default DSC policy target bitrate limit is 16bpp */ +static uint32_t dsc_policy_max_target_bpp_limit = 16; +  static uint32_t dc_dsc_bandwidth_in_kbps_from_timing(  	const struct dc_crtc_timing *timing)  { @@ -237,8 +223,11 @@ static void get_dsc_enc_caps(  	// This is a static HW query, so we can use any DSC  	memset(dsc_enc_caps, 0, sizeof(struct dsc_enc_caps)); -	if (dsc) +	if (dsc) {  		dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz); +		if (dsc->ctx->dc->debug.native422_support) +			dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1; +	}  }  /* Returns 'false' if no intersection was found for at least one capablity. @@ -578,9 +567,11 @@ static bool setup_dsc_config(  	bool is_dsc_possible = false;  	int pic_height;  	int slice_height; +	struct dc_dsc_policy policy;  	memset(dsc_cfg, 0, sizeof(struct dc_dsc_config)); +	dc_dsc_get_policy_for_timing(timing, &policy);  	pic_width = timing->h_addressable + timing->h_border_left + timing->h_border_right;  	pic_height = timing->v_addressable + timing->v_border_top + timing->v_border_bottom; @@ -596,7 +587,12 @@ static bool setup_dsc_config(  		goto done;  	if (target_bandwidth_kbps > 0) { -		is_dsc_possible = decide_dsc_target_bpp_x16(&dsc_policy, &dsc_common_caps, target_bandwidth_kbps, timing, &target_bpp); +		is_dsc_possible = decide_dsc_target_bpp_x16( +				&policy, +				&dsc_common_caps, +				target_bandwidth_kbps, +				timing, +				&target_bpp);  		dsc_cfg->bits_per_pixel = target_bpp;  	}  	if (!is_dsc_possible) @@ -698,20 +694,20 @@ static bool setup_dsc_config(  	if (!is_dsc_possible)  		goto done; -	if (dsc_policy.use_min_slices_h) { +	if (policy.use_min_slices_h) {  		if (min_slices_h > 0)  			num_slices_h = min_slices_h;  		else if (max_slices_h > 0) { // Fall back to max slices if min slices is not working out -			if (dsc_policy.max_slices_h) -				num_slices_h = min(dsc_policy.max_slices_h, max_slices_h); +			if (policy.max_slices_h) +				num_slices_h = min(policy.max_slices_h, max_slices_h);  			else  				num_slices_h = max_slices_h;  		} else  			is_dsc_possible = false;  	} else {  		if (max_slices_h > 0) { -			if (dsc_policy.max_slices_h) -				num_slices_h = min(dsc_policy.max_slices_h, max_slices_h); +			if (policy.max_slices_h) +				num_slices_h = min(policy.max_slices_h, max_slices_h);  			else  				num_slices_h = max_slices_h;  		} else if (min_slices_h > 0) // Fall back to min slices if max slices is not possible @@ -733,7 +729,7 @@ static bool setup_dsc_config(  	// Slice height (i.e. number of slices per column): start with policy and pick the first one that height is divisible by.  	// For 4:2:0 make sure the slice height is divisible by 2 as well.  	if (min_slice_height_override == 0) -		slice_height = min(dsc_policy.min_sice_height, pic_height); +		slice_height = min(policy.min_slice_height, pic_height);  	else  		slice_height = min(min_slice_height_override, pic_height); @@ -764,7 +760,7 @@ done:  	return is_dsc_possible;  } -bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data, const uint8_t *dpcd_dsc_ext_data, struct dsc_dec_dpcd_caps *dsc_sink_caps) +bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, const uint8_t *dpcd_dsc_basic_data, const uint8_t *dpcd_dsc_ext_data, struct dsc_dec_dpcd_caps *dsc_sink_caps)  {  	if (!dpcd_dsc_basic_data)  		return false; @@ -817,6 +813,23 @@ bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data, const uint8_t *dp  	if (!dsc_bpp_increment_div_from_dpcd(dpcd_dsc_basic_data[DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT], &dsc_sink_caps->bpp_increment_div))  		return false; +	if (dc->debug.dsc_bpp_increment_div) { +		/* dsc_bpp_increment_div should onl be 1, 2, 4, 8 or 16, but rather than rejecting invalid values, +		 * we'll accept all and get it into range. This also makes the above check against 0 redundant, +		 * but that one stresses out the override will be only used if it's not 0. +		 */ +		if (dc->debug.dsc_bpp_increment_div >= 1) +			dsc_sink_caps->bpp_increment_div = 1; +		if (dc->debug.dsc_bpp_increment_div >= 2) +			dsc_sink_caps->bpp_increment_div = 2; +		if (dc->debug.dsc_bpp_increment_div >= 4) +			dsc_sink_caps->bpp_increment_div = 4; +		if (dc->debug.dsc_bpp_increment_div >= 8) +			dsc_sink_caps->bpp_increment_div = 8; +		if (dc->debug.dsc_bpp_increment_div >= 16) +			dsc_sink_caps->bpp_increment_div = 16; +	} +  	/* Extended caps */  	if (dpcd_dsc_ext_data == NULL) { // Extended DPCD DSC data can be null, e.g. because it doesn't apply to SST  		dsc_sink_caps->branch_overall_throughput_0_mps = 0; @@ -903,4 +916,67 @@ bool dc_dsc_compute_config(  			timing, dsc_min_slice_height_override, dsc_cfg);  	return is_dsc_possible;  } -#endif /* CONFIG_DRM_AMD_DC_DSC_SUPPORT */ + +void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, struct dc_dsc_policy *policy) +{ +	uint32_t bpc = 0; + +	policy->min_target_bpp = 0; +	policy->max_target_bpp = 0; + +	/* DSC Policy: Use minimum number of slices that fits the pixel clock */ +	policy->use_min_slices_h = true; + +	/* DSC Policy: Use max available slices +	 * (in our case 4 for or 8, depending on the mode) +	 */ +	policy->max_slices_h = 0; + +	/* DSC Policy: Use slice height recommended +	 * by VESA DSC Spreadsheet user guide +	 */ +	policy->min_slice_height = 108; + +	/* DSC Policy: follow DP specs with an internal upper limit to 16 bpp +	 * for better interoperability +	 */ +	switch (timing->display_color_depth) { +	case COLOR_DEPTH_888: +		bpc = 8; +		break; +	case COLOR_DEPTH_101010: +		bpc = 10; +		break; +	case COLOR_DEPTH_121212: +		bpc = 12; +		break; +	default: +		return; +	} +	switch (timing->pixel_encoding) { +	case PIXEL_ENCODING_RGB: +	case PIXEL_ENCODING_YCBCR444: +	case PIXEL_ENCODING_YCBCR422: /* assume no YCbCr422 native support */ +		/* DP specs limits to 8 */ +		policy->min_target_bpp = 8; +		/* DP specs limits to 3 x bpc */ +		policy->max_target_bpp = 3 * bpc; +		break; +	case PIXEL_ENCODING_YCBCR420: +		/* DP specs limits to 6 */ +		policy->min_target_bpp = 6; +		/* DP specs limits to 1.5 x bpc assume bpc is an even number */ +		policy->max_target_bpp = bpc * 3 / 2; +		break; +	default: +		return; +	} +	/* internal upper limit, default 16 bpp */ +	if (policy->max_target_bpp > dsc_policy_max_target_bpp_limit) +		policy->max_target_bpp = dsc_policy_max_target_bpp_limit; +} + +void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit) +{ +	dsc_policy_max_target_bpp_limit = limit; +} diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h b/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h index 020ad8f685ea..9f70e87b3ecb 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h @@ -1,4 +1,3 @@ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  /*   * Copyright 2017 Advanced Micro Devices, Inc. @@ -51,4 +50,3 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_par  #endif -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h b/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h index f66d006eac5d..e5fac9f4181d 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h @@ -1,4 +1,3 @@ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  /*   * Copyright 2017 Advanced Micro Devices, Inc. @@ -703,4 +702,3 @@ const qp_table   qp_table_422_8bpc_max = {  	{  16, { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 4} }  }; -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c index 76c4b12d6824..03ae15946c6d 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c @@ -1,4 +1,3 @@ -#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)  /*   * Copyright 2017 Advanced Micro Devices, Inc. @@ -252,4 +251,3 @@ void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_com  	rc->rc_buf_thresh[13] = 8064;  } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h index f1d6e793bc61..b6b1f09c2009 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h @@ -1,4 +1,3 @@ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  /*   * Copyright 2017 Advanced Micro Devices, Inc. @@ -82,4 +81,3 @@ void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_com  #endif -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c index 73172fd0b529..1f6e63b71456 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c @@ -1,4 +1,3 @@ -#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)  /*   * Copyright 2012-17 Advanced Micro Devices, Inc.   * @@ -144,4 +143,3 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_par  	return ret;  } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/Makefile b/drivers/gpu/drm/amd/display/dc/gpio/Makefile index b3062275711e..202baa210cc8 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/Makefile +++ b/drivers/gpu/drm/amd/display/dc/gpio/Makefile @@ -61,26 +61,25 @@ AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCE120)  ###############################################################################  # DCN 1x  ############################################################################### -ifdef CONFIG_DRM_AMD_DC_DCN1_0 +ifdef CONFIG_DRM_AMD_DC_DCN  GPIO_DCN10 = hw_translate_dcn10.o hw_factory_dcn10.o  AMD_DAL_GPIO_DCN10 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn10/,$(GPIO_DCN10))  AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCN10) -endif  ###############################################################################  # DCN 2  ############################################################################### -ifdef CONFIG_DRM_AMD_DC_DCN2_0  GPIO_DCN20 = hw_translate_dcn20.o hw_factory_dcn20.o  AMD_DAL_GPIO_DCN20 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn20/,$(GPIO_DCN20))  AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCN20) -endif -ifdef CONFIG_DRM_AMD_DC_DCN2_1 +############################################################################### +# DCN 21 +###############################################################################  GPIO_DCN21 = hw_translate_dcn21.o hw_factory_dcn21.o  AMD_DAL_GPIO_DCN21 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn21/,$(GPIO_DCN21)) diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c index 43a440385b43..83f798cb8b21 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c @@ -22,7 +22,6 @@   * Authors: AMD   *   */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  #include "dm_services.h"  #include "include/gpio_types.h"  #include "../hw_factory.h" @@ -110,6 +109,12 @@ static const struct ddc_registers ddc_data_regs_dcn[] = {  	ddc_data_regs_dcn2(4),  	ddc_data_regs_dcn2(5),  	ddc_data_regs_dcn2(6), +	{ +			DDC_GPIO_VGA_REG_LIST(DATA), +			.ddc_setup = 0, +			.phy_aux_cntl = 0, +			.dc_gpio_aux_ctrl_5 = 0 +	}  };  static const struct ddc_registers ddc_clk_regs_dcn[] = { @@ -119,6 +124,12 @@ static const struct ddc_registers ddc_clk_regs_dcn[] = {  	ddc_clk_regs_dcn2(4),  	ddc_clk_regs_dcn2(5),  	ddc_clk_regs_dcn2(6), +	{ +			DDC_GPIO_VGA_REG_LIST(CLK), +			.ddc_setup = 0, +			.phy_aux_cntl = 0, +			.dc_gpio_aux_ctrl_5 = 0 +	}  };  static const struct ddc_sh_mask ddc_shift[] = { @@ -246,4 +257,3 @@ void dal_hw_factory_dcn20_init(struct hw_factory *factory)  	factory->funcs = &funcs;  } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.h index 43a4ce7aa3bf..0fd9b315bd7a 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.h +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.h @@ -22,7 +22,6 @@   * Authors: AMD   *   */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  #ifndef __DAL_HW_FACTORY_DCN20_H__  #define __DAL_HW_FACTORY_DCN20_H__ @@ -30,4 +29,3 @@  void dal_hw_factory_dcn20_init(struct hw_factory *factory);  #endif /* __DAL_HW_FACTORY_DCN20_H__ */ -#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c index 915e896e0e91..52ba62b3b5e4 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c @@ -26,7 +26,6 @@  /*   * Pre-requisites: headers required by header of this unit   */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  #include "hw_translate_dcn20.h"  #include "dm_services.h" @@ -379,4 +378,3 @@ void dal_hw_translate_dcn20_init(struct hw_translate *tr)  	tr->funcs = &funcs;  } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.h index 01f52c7bed86..5f7a35530e26 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.h +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.h @@ -22,7 +22,6 @@   * Authors: AMD   *   */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  #ifndef __DAL_HW_TRANSLATE_DCN20_H__  #define __DAL_HW_TRANSLATE_DCN20_H__ @@ -32,4 +31,3 @@ struct hw_translate;  void dal_hw_translate_dcn20_init(struct hw_translate *tr);  #endif /* __DAL_HW_TRANSLATE_DCN20_H__ */ -#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c index 8572678f8d4f..907c5911eb9e 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c @@ -22,7 +22,6 @@   * Authors: AMD   *   */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  #include "dm_services.h"  #include "include/gpio_types.h"  #include "../hw_factory.h" @@ -239,4 +238,3 @@ void dal_hw_factory_dcn21_init(struct hw_factory *factory)  	factory->funcs = &funcs;  } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.h index 2443f9e7afbf..4949e0c7fa06 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.h +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.h @@ -22,7 +22,6 @@   * Authors: AMD   *   */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  #ifndef __DAL_HW_FACTORY_DCN21_H__  #define __DAL_HW_FACTORY_DCN21_H__ @@ -30,4 +29,3 @@  void dal_hw_factory_dcn21_init(struct hw_factory *factory);  #endif /* __DAL_HW_FACTORY_DCN20_H__ */ -#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c index fbb58fb8c318..291966efe63d 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c @@ -26,7 +26,6 @@  /*   * Pre-requisites: headers required by header of this unit   */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  #include "hw_translate_dcn21.h"  #include "dm_services.h" @@ -382,4 +381,3 @@ void dal_hw_translate_dcn21_init(struct hw_translate *tr)  	tr->funcs = &funcs;  } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.h index 2bfaac24c574..9462b0a65200 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.h +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.h @@ -22,7 +22,6 @@   * Authors: AMD   *   */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  #ifndef __DAL_HW_TRANSLATE_DCN21_H__  #define __DAL_HW_TRANSLATE_DCN21_H__ @@ -32,4 +31,3 @@ struct hw_translate;  void dal_hw_translate_dcn21_init(struct hw_translate *tr);  #endif /* __DAL_HW_TRANSLATE_DCN21_H__ */ -#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h index f91e85b04956..308a543178a5 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h +++ b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h @@ -48,13 +48,11 @@  	DDC_GPIO_REG_LIST(cd,id),\  	.ddc_setup = REG(DC_I2C_DDC ## id ## _SETUP) -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	#define DDC_REG_LIST_DCN2(cd, id) \  	DDC_GPIO_REG_LIST(cd, id),\  	.ddc_setup = REG(DC_I2C_DDC ## id ## _SETUP),\  	.phy_aux_cntl = REG(PHY_AUX_CNTL), \  	.dc_gpio_aux_ctrl_5 = REG(DC_GPIO_AUX_CTRL_5) -#endif  #define DDC_GPIO_VGA_REG_LIST_ENTRY(type,cd)\  	.type ## _reg =   REG(DC_GPIO_DDCVGA_ ## type),\ @@ -90,13 +88,11 @@  	DDC_GPIO_I2C_REG_LIST(cd),\  	.ddc_setup = 0 -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  #define DDC_I2C_REG_LIST_DCN2(cd) \  	DDC_GPIO_I2C_REG_LIST(cd),\  	.ddc_setup = 0,\  	.phy_aux_cntl = REG(PHY_AUX_CNTL), \  	.dc_gpio_aux_ctrl_5 = REG(DC_GPIO_AUX_CTRL_5) -#endif  #define DDC_MASK_SH_LIST_COMMON(mask_sh) \  		SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE, mask_sh),\  		SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_EDID_DETECT_ENABLE, mask_sh),\ @@ -110,22 +106,18 @@  		SF_DDC(DC_GPIO_I2CPAD_MASK, DC_GPIO_SDA_PD_DIS, mask_sh),\  		SF_DDC(DC_GPIO_I2CPAD_MASK, DC_GPIO_SCL_PD_DIS, mask_sh) -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  #define DDC_MASK_SH_LIST_DCN2(mask_sh, cd) \  	{DDC_MASK_SH_LIST_COMMON(mask_sh),\  	0,\  	0,\  	(PHY_AUX_CNTL__AUX## cd ##_PAD_RXSEL## mask_sh),\  	(DC_GPIO_AUX_CTRL_5__DDC_PAD## cd ##_I2CMODE## mask_sh)} -#endif  struct ddc_registers {  	struct gpio_registers gpio;  	uint32_t ddc_setup; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	uint32_t phy_aux_cntl;  	uint32_t dc_gpio_aux_ctrl_5; -#endif  };  struct ddc_sh_mask { @@ -140,11 +132,9 @@ struct ddc_sh_mask {  	/* i2cpad_mask */  	uint32_t DC_GPIO_SDA_PD_DIS;  	uint32_t DC_GPIO_SCL_PD_DIS; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	//phy_aux_cntl  	uint32_t AUX_PAD_RXSEL;  	uint32_t DDC_PAD_I2CMODE; -#endif  }; @@ -180,7 +170,6 @@ struct ddc_sh_mask {  {\  	DDC_I2C_REG_LIST(SCL)\  } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  #define ddc_data_regs_dcn2(id) \  {\  	DDC_REG_LIST_DCN2(DATA, id)\ @@ -200,7 +189,6 @@ struct ddc_sh_mask {  {\  	DDC_REG_LIST_DCN2(SCL)\  } -#endif  #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_DDC_REGS_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c index 1c12961f6472..1ae153eab31d 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c @@ -48,18 +48,18 @@  struct gpio; -static void destruct( +static void dal_hw_ddc_destruct(  	struct hw_ddc *pin)  {  	dal_hw_gpio_destruct(&pin->base);  } -static void destroy( +static void dal_hw_ddc_destroy(  	struct hw_gpio_pin **ptr)  {  	struct hw_ddc *pin = HW_DDC_FROM_BASE(*ptr); -	destruct(pin); +	dal_hw_ddc_destruct(pin);  	kfree(pin); @@ -150,7 +150,6 @@ static enum gpio_result set_config(  					AUX_PAD1_MODE, 0);  		} -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  		if (ddc->regs->dc_gpio_aux_ctrl_5 != 0) {  				REG_UPDATE(dc_gpio_aux_ctrl_5, DDC_PAD_I2CMODE, 1);  		} @@ -158,7 +157,6 @@ static enum gpio_result set_config(  		if (ddc->regs->phy_aux_cntl != 0) {  				REG_UPDATE(phy_aux_cntl, AUX_PAD_RXSEL, 1);  		} -#endif  		return GPIO_RESULT_OK;  	case GPIO_DDC_CONFIG_TYPE_MODE_AUX:  		/* set the AUX pad mode */ @@ -166,12 +164,10 @@ static enum gpio_result set_config(  			REG_SET(gpio.MASK_reg, regval,  					AUX_PAD1_MODE, 1);  		} -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  		if (ddc->regs->dc_gpio_aux_ctrl_5 != 0) {  			REG_UPDATE(dc_gpio_aux_ctrl_5,  					DDC_PAD_I2CMODE, 0);  		} -#endif  		return GPIO_RESULT_OK;  	case GPIO_DDC_CONFIG_TYPE_POLL_FOR_CONNECT: @@ -211,7 +207,7 @@ static enum gpio_result set_config(  }  static const struct hw_gpio_pin_funcs funcs = { -	.destroy = destroy, +	.destroy = dal_hw_ddc_destroy,  	.open = dal_hw_gpio_open,  	.get_value = dal_hw_gpio_get_value,  	.set_value = dal_hw_gpio_set_value, @@ -220,7 +216,7 @@ static const struct hw_gpio_pin_funcs funcs = {  	.close = dal_hw_gpio_close,  }; -static void construct( +static void dal_hw_ddc_construct(  	struct hw_ddc *ddc,  	enum gpio_id id,  	uint32_t en, @@ -247,7 +243,7 @@ void dal_hw_ddc_init(  		return;  	} -	construct(*hw_ddc, id, en, ctx); +	dal_hw_ddc_construct(*hw_ddc, id, en, ctx);  }  struct hw_gpio_pin *dal_hw_ddc_get_pin(struct gpio *gpio) diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c index fa9f1d055ec8..d2d36d48caaa 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c @@ -45,15 +45,11 @@  #include "dce80/hw_factory_dce80.h"  #include "dce110/hw_factory_dce110.h"  #include "dce120/hw_factory_dce120.h" -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  #include "dcn10/hw_factory_dcn10.h"  #endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  #include "dcn20/hw_factory_dcn20.h" -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  #include "dcn21/hw_factory_dcn21.h" -#endif  #include "diagnostics/hw_factory_diag.h" @@ -90,19 +86,15 @@ bool dal_hw_factory_init(  	case DCE_VERSION_12_1:  		dal_hw_factory_dce120_init(factory);  		return true; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  	case DCN_VERSION_1_0:  	case DCN_VERSION_1_01:  		dal_hw_factory_dcn10_init(factory);  		return true; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	case DCN_VERSION_2_0:  		dal_hw_factory_dcn20_init(factory);  		return true; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  	case DCN_VERSION_2_1:  		dal_hw_factory_dcn21_init(factory);  		return true; diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c index 69b899741f6d..f9e847e6555d 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c @@ -46,22 +46,13 @@  struct gpio; -static void dal_hw_generic_construct( -	struct hw_generic *pin, -	enum gpio_id id, -	uint32_t en, -	struct dc_context *ctx) -{ -	dal_hw_gpio_construct(&pin->base, id, en, ctx); -} -  static void dal_hw_generic_destruct(  	struct hw_generic *pin)  {  	dal_hw_gpio_destruct(&pin->base);  } -static void destroy( +static void dal_hw_generic_destroy(  	struct hw_gpio_pin **ptr)  {  	struct hw_generic *generic = HW_GENERIC_FROM_BASE(*ptr); @@ -90,7 +81,7 @@ static enum gpio_result set_config(  }  static const struct hw_gpio_pin_funcs funcs = { -	.destroy = destroy, +	.destroy = dal_hw_generic_destroy,  	.open = dal_hw_gpio_open,  	.get_value = dal_hw_gpio_get_value,  	.set_value = dal_hw_gpio_set_value, @@ -99,14 +90,14 @@ static const struct hw_gpio_pin_funcs funcs = {  	.close = dal_hw_gpio_close,  }; -static void construct( -	struct hw_generic *generic, +static void dal_hw_generic_construct( +	struct hw_generic *pin,  	enum gpio_id id,  	uint32_t en,  	struct dc_context *ctx)  { -	dal_hw_generic_construct(generic, id, en, ctx); -	generic->base.base.funcs = &funcs; +	dal_hw_gpio_construct(&pin->base, id, en, ctx); +	pin->base.base.funcs = &funcs;  }  void dal_hw_generic_init( @@ -126,7 +117,7 @@ void dal_hw_generic_init(  		return;  	} -	construct(*hw_generic, id, en, ctx); +	dal_hw_generic_construct(*hw_generic, id, en, ctx);  } diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c index 00c9bcf660a3..692f29de7797 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c @@ -46,34 +46,18 @@  struct gpio; -static void dal_hw_hpd_construct( -	struct hw_hpd *pin, -	enum gpio_id id, -	uint32_t en, -	struct dc_context *ctx) -{ -	dal_hw_gpio_construct(&pin->base, id, en, ctx); -} -  static void dal_hw_hpd_destruct(  	struct hw_hpd *pin)  {  	dal_hw_gpio_destruct(&pin->base);  } - -static void destruct( -	struct hw_hpd *hpd) -{ -	dal_hw_hpd_destruct(hpd); -} - -static void destroy( +static void dal_hw_hpd_destroy(  	struct hw_gpio_pin **ptr)  {  	struct hw_hpd *hpd = HW_HPD_FROM_BASE(*ptr); -	destruct(hpd); +	dal_hw_hpd_destruct(hpd);  	kfree(hpd); @@ -120,7 +104,7 @@ static enum gpio_result set_config(  }  static const struct hw_gpio_pin_funcs funcs = { -	.destroy = destroy, +	.destroy = dal_hw_hpd_destroy,  	.open = dal_hw_gpio_open,  	.get_value = get_value,  	.set_value = dal_hw_gpio_set_value, @@ -129,14 +113,14 @@ static const struct hw_gpio_pin_funcs funcs = {  	.close = dal_hw_gpio_close,  }; -static void construct( -	struct hw_hpd *hpd, +static void dal_hw_hpd_construct( +	struct hw_hpd *pin,  	enum gpio_id id,  	uint32_t en,  	struct dc_context *ctx)  { -	dal_hw_hpd_construct(hpd, id, en, ctx); -	hpd->base.base.funcs = &funcs; +	dal_hw_gpio_construct(&pin->base, id, en, ctx); +	pin->base.base.funcs = &funcs;  }  void dal_hw_hpd_init( @@ -156,7 +140,7 @@ void dal_hw_hpd_init(  		return;  	} -	construct(*hw_hpd, id, en, ctx); +	dal_hw_hpd_construct(*hw_hpd, id, en, ctx);  }  struct hw_gpio_pin *dal_hw_hpd_get_pin(struct gpio *gpio) diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c index f2046f55d6a8..5d396657a1ee 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c @@ -43,15 +43,11 @@  #include "dce80/hw_translate_dce80.h"  #include "dce110/hw_translate_dce110.h"  #include "dce120/hw_translate_dce120.h" -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  #include "dcn10/hw_translate_dcn10.h"  #endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  #include "dcn20/hw_translate_dcn20.h" -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  #include "dcn21/hw_translate_dcn21.h" -#endif  #include "diagnostics/hw_translate_diag.h" @@ -85,19 +81,15 @@ bool dal_hw_translate_init(  	case DCE_VERSION_12_1:  		dal_hw_translate_dce120_init(translate);  		return true; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  	case DCN_VERSION_1_0:  	case DCN_VERSION_1_01:  		dal_hw_translate_dcn10_init(translate);  		return true; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	case DCN_VERSION_2_0:  		dal_hw_translate_dcn20_init(translate);  		return true; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  	case DCN_VERSION_2_1:  		dal_hw_translate_dcn21_init(translate);  		return true; diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h index fd39e2abe2ed..4ead89dd7c41 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h @@ -43,10 +43,8 @@ enum dc_status {  	DC_FAIL_BANDWIDTH_VALIDATE = 13, /* BW and Watermark validation */  	DC_FAIL_SCALING = 14,  	DC_FAIL_DP_LINK_TRAINING = 15, -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  	DC_FAIL_DSC_VALIDATE = 16,  	DC_NO_DSC_RESOURCE = 17, -#endif  	DC_FAIL_UNSUPPORTED_1 = 18,  	DC_FAIL_CLK_EXCEED_MAX = 21,  	DC_FAIL_CLK_BELOW_MIN = 22, /*THIS IS MIN PER IP*/ diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index a831079607cd..f285b76888fb 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -33,13 +33,11 @@  #include "dc_bios_types.h"  #include "mem_input.h"  #include "hubp.h" -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  #include "mpc.h"  #endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  #include "dwb.h"  #include "mcif_wb.h" -#endif  #define MAX_CLOCK_SOURCES 7 @@ -89,9 +87,7 @@ void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);  struct resource_pool;  struct dc_state;  struct resource_context; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  struct clk_bw_params; -#endif  struct resource_funcs {  	void (*destroy)(struct resource_pool **pool); @@ -105,7 +101,7 @@ struct resource_funcs {  	int (*populate_dml_pipes)(  		struct dc *dc, -		struct resource_context *res_ctx, +		struct dc_state *context,  		display_e2e_pipe_params_st *pipes);  	enum dc_status (*validate_global)( @@ -135,7 +131,6 @@ struct resource_funcs {  			struct resource_context *res_ctx,  			const struct resource_pool *pool,  			struct dc_stream_state *stream); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	void (*populate_dml_writeback_from_context)(  			struct dc *dc,  			struct resource_context *res_ctx, @@ -146,12 +141,9 @@ struct resource_funcs {  			struct dc_state *context,  			display_e2e_pipe_params_st *pipes,  			int pipe_cnt); -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  	void (*update_bw_bounding_box)(  			struct dc *dc,  			struct clk_bw_params *bw_params); -#endif  }; @@ -180,7 +172,6 @@ struct resource_pool {  	struct dce_i2c_sw *sw_i2cs[MAX_PIPES];  	bool i2c_hw_buffer_in_use; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	struct dwbc *dwbc[MAX_DWB_PIPES];  	struct mcif_wb *mcif_wb[MAX_DWB_PIPES];  	struct { @@ -188,11 +179,8 @@ struct resource_pool {  		unsigned int gsl_1:1;  		unsigned int gsl_2:1;  	} gsl_groups; -#endif -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	struct display_stream_compressor *dscs[MAX_PIPES]; -#endif  	unsigned int pipe_count;  	unsigned int underlay_pipe_index; @@ -206,9 +194,7 @@ struct resource_pool {  	unsigned int timing_generator_count;  	unsigned int mpcc_count; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	unsigned int writeback_pipe_count; -#endif  	/*  	 * reserved clock source for DP  	 */ @@ -226,9 +212,12 @@ struct resource_pool {  	struct abm *abm;  	struct dmcu *dmcu; +	struct dmub_psr *psr;  	const struct resource_funcs *funcs;  	const struct resource_caps *res_cap; + +	struct ddc_service *oem_device;  };  struct dcn_fe_bandwidth { @@ -238,9 +227,7 @@ struct dcn_fe_bandwidth {  struct stream_resource {  	struct output_pixel_processor *opp; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	struct display_stream_compressor *dsc; -#endif  	struct timing_generator *tg;  	struct stream_encoder *stream_enc;  	struct audio *audio; @@ -249,12 +236,10 @@ struct stream_resource {  	struct encoder_info_frame encoder_info_frame;  	struct abm *abm; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	/* There are only (num_pipes+1)/2 groups. 0 means unassigned,  	 * otherwise it's using group number 'gsl_group-1'  	 */  	uint8_t gsl_group; -#endif  };  struct plane_resource { @@ -306,17 +291,15 @@ struct pipe_ctx {  	struct pipe_ctx *next_odm_pipe;  	struct pipe_ctx *prev_odm_pipe; -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 +#ifdef CONFIG_DRM_AMD_DC_DCN  	struct _vcs_dpi_display_dlg_regs_st dlg_regs;  	struct _vcs_dpi_display_ttu_regs_st ttu_regs;  	struct _vcs_dpi_display_rq_regs_st rq_regs;  	struct _vcs_dpi_display_pipe_dest_params_st pipe_dlg_param;  #endif  	union pipe_update_flags update_flags; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  	struct dwbc *dwbc;  	struct mcif_wb *mcif_wb; -#endif  };  struct resource_context { @@ -325,9 +308,7 @@ struct resource_context {  	bool is_audio_acquired[MAX_PIPES];  	uint8_t clock_source_ref_count[MAX_CLOCK_SOURCES];  	uint8_t dp_clock_source_ref_count; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  	bool is_dsc_acquired[MAX_PIPES]; -#endif  };  struct dce_bw_output { @@ -347,18 +328,14 @@ struct dce_bw_output {  	int blackout_recovery_time_us;  }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  struct dcn_bw_writeback {  	struct mcif_arb_params mcif_wb_arb[MAX_DWB_PIPES];  }; -#endif  struct dcn_bw_output {  	struct dc_clocks clk;  	struct dcn_watermark_set watermarks; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	struct dcn_bw_writeback bw_writeback; -#endif  };  union bw_output { @@ -392,7 +369,7 @@ struct dc_state {  	/* Note: these are big structures, do *not* put on stack! */  	struct dm_pp_display_configuration pp_display_cfg; -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 +#ifdef CONFIG_DRM_AMD_DC_DCN  	struct dcn_bw_internal_vars dcn_bw_vars;  #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h index 14716ba35662..de2d160114db 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h @@ -105,7 +105,7 @@ int dc_link_aux_transfer_raw(struct ddc_service *ddc,  bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc,  		struct aux_payload *payload); -enum dc_status dc_link_aux_configure_timeout(struct ddc_service *ddc, +uint32_t dc_link_aux_configure_timeout(struct ddc_service *ddc,  		uint32_t timeout);  void dal_ddc_service_write_scdc_data( diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h index 045138dbdccb..8b1f0ce6c2a7 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h @@ -28,8 +28,8 @@  #define LINK_TRAINING_ATTEMPTS 4  #define LINK_TRAINING_RETRY_DELAY 50 /* ms */ -#define LINK_AUX_DEFAULT_EXTENDED_TIMEOUT_PERIOD 32000 /*us*/ -#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 400 /*us*/ +#define LINK_AUX_DEFAULT_EXTENDED_TIMEOUT_PERIOD 3200 /*us*/ +#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 552 /*us*/  struct dc_link;  struct dc_stream_state; @@ -57,10 +57,11 @@ void decide_link_settings(  	struct dc_link_settings *link_setting);  bool perform_link_training_with_retries( -	struct dc_link *link,  	const struct dc_link_settings *link_setting,  	bool skip_video_pattern, -	int attempts); +	int attempts, +	struct pipe_ctx *pipe_ctx, +	enum signal_type signal);  bool is_mst_supported(struct dc_link *link); @@ -75,13 +76,13 @@ void dp_enable_mst_on_sink(struct dc_link *link, bool enable);  enum dp_panel_mode dp_get_panel_mode(struct dc_link *link);  void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT +bool dp_overwrite_extended_receiver_cap(struct dc_link *link); +  void dp_set_fec_ready(struct dc_link *link, bool ready);  void dp_set_fec_enable(struct dc_link *link, bool enable);  bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable);  bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable);  void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable);  bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx); -#endif  #endif /* __DC_LINK_DP_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calc_math.h index 45a07eeffbb6..45a07eeffbb6 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calc_math.h diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index 4e18e77dcf42..ac530c057ddd 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -31,7 +31,6 @@  #define DCN_MINIMUM_DISPCLK_Khz 100000  #define DCN_MINIMUM_DPPCLK_Khz 100000 -#ifdef CONFIG_DRM_AMD_DC_DCN2_1  /* Constants */  #define DDR4_DRAM_WIDTH   64  #define WM_A 0 @@ -39,12 +38,10 @@  #define WM_C 2  #define WM_D 3  #define WM_SET_COUNT 4 -#endif  #define DCN_MINIMUM_DISPCLK_Khz 100000  #define DCN_MINIMUM_DPPCLK_Khz 100000 -#ifdef CONFIG_DRM_AMD_DC_DCN2_1  /* Will these bw structures be ASIC specific? */  #define MAX_NUM_DPM_LVL		8 @@ -69,6 +66,8 @@ struct wm_range_table_entry {  	unsigned int wm_inst;  	unsigned int wm_type;  	double pstate_latency_us; +	double sr_exit_time_us; +	double sr_enter_plus_exit_time_us;  	bool valid;  }; @@ -152,7 +151,6 @@ struct clk_bw_params {  	struct clk_limit_table clk_table;  	struct wm_table wm_table;  }; -#endif  /* Public interfaces */  struct clk_states { @@ -193,9 +191,8 @@ struct clk_mgr {  	bool psr_allow_active_cache;  	int dprefclk_khz; // Used by program pixel clock in clock source funcs, need to figureout where this goes  	int dentist_vco_freq_khz; -#ifdef CONFIG_DRM_AMD_DC_DCN2_1 +	struct clk_state_registers_and_bypass boot_snapshot;  	struct clk_bw_params *bw_params; -#endif  };  /* forward declarations */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h index a17a77192690..862952c0286a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h @@ -96,12 +96,10 @@ enum dentist_divider_range {  	.MP1_SMN_C2PMSG_83 = mmMP1_SMN_C2PMSG_83, \  	.MP1_SMN_C2PMSG_67 = mmMP1_SMN_C2PMSG_67 -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  #define CLK_REG_LIST_NV10() \  	SR(DENTIST_DISPCLK_CNTL), \  	CLK_SRI(CLK3_CLK_PLL_REQ, CLK3, 0), \  	CLK_SRI(CLK3_CLK2_DFS_CNTL, CLK3, 0) -#endif  #define CLK_SF(reg_name, field_name, post_fix)\  	.field_name = reg_name ## __ ## field_name ## post_fix @@ -120,7 +118,6 @@ enum dentist_divider_range {  	CLK_SF(MP1_SMN_C2PMSG_83, CONTENT, mask_sh),\  	CLK_SF(MP1_SMN_C2PMSG_91, CONTENT, mask_sh), -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  #define CLK_COMMON_MASK_SH_LIST_DCN20_BASE(mask_sh) \  	CLK_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh),\  	CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_WDIVIDER, mask_sh),\ @@ -130,7 +127,6 @@ enum dentist_divider_range {  	CLK_COMMON_MASK_SH_LIST_DCN20_BASE(mask_sh),\  	CLK_SF(CLK3_0_CLK3_CLK_PLL_REQ, FbMult_int, mask_sh),\  	CLK_SF(CLK3_0_CLK3_CLK_PLL_REQ, FbMult_frac, mask_sh) -#endif  #define CLK_REG_FIELD_LIST(type) \  	type DPREFCLK_SRC_SEL; \ @@ -143,30 +139,24 @@ enum dentist_divider_range {   ****************** Clock Manager Private Structures ***********************************   ***************************************************************************************   */ -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  #define CLK20_REG_FIELD_LIST(type) \  	type DENTIST_DPPCLK_WDIVIDER; \  	type DENTIST_DPPCLK_CHG_DONE; \  	type FbMult_int; \  	type FbMult_frac; -#endif  #define VBIOS_SMU_REG_FIELD_LIST(type) \  	type CONTENT;  struct clk_mgr_shift {  	CLK_REG_FIELD_LIST(uint8_t) -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  	CLK20_REG_FIELD_LIST(uint8_t) -#endif  	VBIOS_SMU_REG_FIELD_LIST(uint32_t)  };  struct clk_mgr_mask {  	CLK_REG_FIELD_LIST(uint32_t) -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  	CLK20_REG_FIELD_LIST(uint32_t) -#endif  	VBIOS_SMU_REG_FIELD_LIST(uint32_t)  }; @@ -174,10 +164,8 @@ struct clk_mgr_registers {  	uint32_t DPREFCLK_CNTL;  	uint32_t DENTIST_DISPCLK_CNTL; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  	uint32_t CLK3_CLK2_DFS_CNTL;  	uint32_t CLK3_CLK_PLL_REQ; -#endif  	uint32_t MP1_SMN_C2PMSG_67;  	uint32_t MP1_SMN_C2PMSG_83; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index c81a17aeaa25..c0dc1d0f5cae 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -52,7 +52,6 @@ struct dcn_hubbub_wm {  	struct dcn_hubbub_wm_set sets[4];  }; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  enum dcn_hubbub_page_table_depth {  	DCN_PAGE_TABLE_DEPTH_1_LEVEL,  	DCN_PAGE_TABLE_DEPTH_2_LEVEL, @@ -101,13 +100,11 @@ struct hubbub_addr_config {  	} default_addrs;  }; -#endif  struct hubbub_funcs {  	void (*update_dchub)(  			struct hubbub *hubbub,  			struct dchub_init_data *dh_data); -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  	int (*init_dchub_sys_ctx)(  			struct hubbub *hubbub,  			struct dcn_hubbub_phys_addr_config *pa_config); @@ -116,7 +113,6 @@ struct hubbub_funcs {  			struct dcn_hubbub_virt_addr_config *va_config,  			int vmid); -#endif  	bool (*get_dcc_compression_cap)(struct hubbub *hubbub,  			const struct dc_dcc_surface_param *input,  			struct dc_surface_dcc_cap *output); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h index c68f0ce346c7..5315f1f86b21 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h @@ -52,6 +52,8 @@ struct dmcu {  	enum dmcu_state dmcu_state;  	struct dmcu_version dmcu_version;  	unsigned int cached_wait_loop_number; +	uint32_t psp_version; +	bool auto_load_dmcu;  };  struct dmcu_funcs { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h index 474c7194a9f8..45ef390ae052 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h @@ -36,14 +36,10 @@ struct dpp {  	struct dpp_caps *caps;  	struct pwl_params regamma_params;  	struct pwl_params degamma_params; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	struct dpp_cursor_attributes cur_attr; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	struct pwl_params shaper_params;  	bool cm_bypass_mode; -#endif  };  struct dpp_input_csc_matrix { @@ -51,12 +47,31 @@ struct dpp_input_csc_matrix {  	uint16_t regval[12];  }; +static const struct dpp_input_csc_matrix dpp_input_csc_matrix[] = { +	{COLOR_SPACE_SRGB, +		{0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} }, +	{COLOR_SPACE_SRGB_LIMITED, +		{0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} }, +	{COLOR_SPACE_YCBCR601, +		{0x2cdd, 0x2000, 0, 0xe991, 0xe926, 0x2000, 0xf4fd, 0x10ef, +						0, 0x2000, 0x38b4, 0xe3a6} }, +	{COLOR_SPACE_YCBCR601_LIMITED, +		{0x3353, 0x2568, 0, 0xe400, 0xe5dc, 0x2568, 0xf367, 0x1108, +						0, 0x2568, 0x40de, 0xdd3a} }, +	{COLOR_SPACE_YCBCR709, +		{0x3265, 0x2000, 0, 0xe6ce, 0xf105, 0x2000, 0xfa01, 0xa7d, 0, +						0x2000, 0x3b61, 0xe24f} }, + +	{COLOR_SPACE_YCBCR709_LIMITED, +		{0x39a6, 0x2568, 0, 0xe0d6, 0xeedd, 0x2568, 0xf925, 0x9a8, 0, +						0x2568, 0x43ee, 0xdbb2} } +}; +  struct dpp_grph_csc_adjustment {  	struct fixed31_32 temperature_matrix[CSC_TEMPERATURE_MATRIX_SIZE];  	enum graphics_gamut_adjust_type gamut_adjust_type;  }; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  struct cnv_color_keyer_params {  	int color_keyer_en;  	int color_keyer_mode; @@ -82,7 +97,6 @@ struct cnv_alpha_2bit_lut {  	int lut2;  	int lut3;  }; -#endif  struct dcn_dpp_state {  	uint32_t is_enabled; @@ -190,12 +204,8 @@ struct dpp_funcs {  			enum surface_pixel_format format,  			enum expansion_mode mode,  			struct dc_csc_transform input_csc_color_matrix, -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  			enum dc_color_space input_color_space,  			struct cnv_alpha_2bit_lut *alpha_2bit_lut); -#else -			enum dc_color_space input_color_space); -#endif  	void (*dpp_full_bypass)(struct dpp *dpp_base); @@ -224,7 +234,6 @@ struct dpp_funcs {  			bool dppclk_div,  			bool enable); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	bool (*dpp_program_blnd_lut)(  			struct dpp *dpp,  			const struct pwl_params *params); @@ -237,7 +246,6 @@ struct dpp_funcs {  	void (*dpp_cnv_set_alpha_keyer)(  			struct dpp *dpp_base,  			struct cnv_color_keyer_params *color_keyer); -#endif  }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h index c6ff3d78b435..c59740084ebc 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h @@ -22,7 +22,6 @@   * Authors: AMD   *   */ -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  #ifndef __DAL_DSC_H__  #define __DAL_DSC_H__ @@ -98,4 +97,3 @@ struct dsc_funcs {  };  #endif -#endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h index ff1a07b35c85..459f95f52486 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h @@ -51,20 +51,15 @@ enum dwb_source {  	dwb_src_otg3,		/* for DCN1.x/DCN2.x */  }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  /* DCN1.x, DCN2.x support 2 pipes */ -#else -/* DCN1.x supports 2 pipes */ -#endif  enum dwb_pipe {  	dwb_pipe0 = 0, -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  	dwb_pipe1,  #endif  	dwb_pipe_max_num,  }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  enum dwb_frame_capture_enable {  	DWB_FRAME_CAPTURE_DISABLE = 0,  	DWB_FRAME_CAPTURE_ENABLE = 1, @@ -77,9 +72,7 @@ enum wbscl_coef_filter_type_sel {  	WBSCL_COEF_CHROMA_HORZ_FILTER = 3  }; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  struct dwb_warmup_params {  	bool	warmup_en;	/* false: normal mode, true: enable pattern generator */  	bool	warmup_mode;	/* false: 420, true: 444 */ @@ -88,7 +81,6 @@ struct dwb_warmup_params {  	int	warmup_width;	/* Pattern width (pixels) */  	int	warmup_height;	/* Pattern height (lines) */  }; -#endif  struct dwb_caps {  	enum dce_version hw_version;	/* DCN engine version. */ @@ -121,7 +113,8 @@ struct dwbc {  	int wb_src_plane_inst;/*hubp, mpcc, inst*/  	bool update_privacymask;  	uint32_t mask_id; - +        int otg_inst; +        bool mvc_cfg;  };  struct dwbc_funcs { @@ -150,13 +143,11 @@ struct dwbc_funcs {  		struct dwbc *dwbc,  		bool is_new_content); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	void (*set_warmup)(  		struct dwbc *dwbc,  		struct dwb_warmup_params *warmup_params); -#endif  	bool (*get_dwb_status)(  		struct dwbc *dwbc); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h index 809b62b51a43..2cb8466e657b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h @@ -38,9 +38,7 @@ enum cursor_pitch {  };  enum cursor_lines_per_chunk { -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	CURSOR_LINE_PER_CHUNK_1 = 0, /* new for DCN2 */ -#endif  	CURSOR_LINE_PER_CHUNK_2 = 1,  	CURSOR_LINE_PER_CHUNK_4,  	CURSOR_LINE_PER_CHUNK_8, @@ -65,6 +63,26 @@ struct hubp {  	bool power_gated;  }; +struct surface_flip_registers { +	uint32_t DCSURF_SURFACE_CONTROL; +	uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH; +	uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS; +	uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH; +	uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS; +	uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C; +	uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_C; +	uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C; +	uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C; +	uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH; +	uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS; +	uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH; +	uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS; +	bool tmz_surface; +	bool immediate; +	uint8_t vmid; +	bool grph_stereo; +}; +  struct hubp_funcs {  	void (*hubp_setup)(  			struct hubp *hubp, @@ -86,6 +104,9 @@ struct hubp_funcs {  			const struct rect *viewport,  			const struct rect *viewport_c); +	void (*apply_PLAT_54186_wa)(struct hubp *hubp, +			const struct dc_plane_address *address); +  	bool (*hubp_program_surface_flip_and_addr)(  		struct hubp *hubp,  		const struct dc_plane_address *address, @@ -139,7 +160,6 @@ struct hubp_funcs {  	unsigned int (*hubp_get_underflow_status)(struct hubp *hubp);  	void (*hubp_init)(struct hubp *hubp); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	void (*dmdata_set_attributes)(  			struct hubp *hubp,  			const struct dc_dmdata_attributes *attr); @@ -159,7 +179,13 @@ struct hubp_funcs {  	void (*hubp_set_flip_control_surface_gsl)(  		struct hubp *hubp,  		bool enable); -#endif + +	void (*validate_dml_output)( +			struct hubp *hubp, +			struct dc_context *ctx, +			struct _vcs_dpi_display_rq_regs_st *dml_rq_regs, +			struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr, +			struct _vcs_dpi_display_ttu_regs_st *dml_ttu_attr);  }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index f82365e2d03c..75d419081e76 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h @@ -36,9 +36,7 @@  #define MAX_AUDIOS 7  #define MAX_PIPES 6 -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  #define MAX_DWB_PIPES	1 -#endif  struct gamma_curve {  	uint32_t offset; @@ -81,7 +79,6 @@ struct pwl_result_data {  	uint32_t delta_blue_reg;  }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  struct dc_rgb {  	uint32_t red;  	uint32_t green; @@ -110,7 +107,6 @@ struct tetrahedral_params {  	bool use_12bits;  }; -#endif  /* arr_curve_points - regamma regions/segments specification   * arr_points - beginning and end point specified separately (only one on DCE) @@ -195,13 +191,11 @@ enum opp_regamma {  	OPP_REGAMMA_USER  }; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  enum optc_dsc_mode {  	OPTC_DSC_DISABLED = 0,  	OPTC_DSC_ENABLED_444 = 1, /* 'RGB 444' or 'Simple YCbCr 4:2:2' (4:2:2 upsampled to 4:4:4) */  	OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED = 2 /* Native 4:2:2 or 4:2:0 */  }; -#endif  struct dc_bias_and_scale {  	uint16_t scale_red; @@ -224,12 +218,8 @@ enum test_pattern_mode {  	TEST_PATTERN_MODE_VERTICALBARS,  	TEST_PATTERN_MODE_HORIZONTALBARS,  	TEST_PATTERN_MODE_SINGLERAMP_RGB, -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	TEST_PATTERN_MODE_DUALRAMP_RGB,  	TEST_PATTERN_MODE_XR_BIAS_RGB -#else -	TEST_PATTERN_MODE_DUALRAMP_RGB -#endif  };  enum test_pattern_color_format { @@ -255,6 +245,13 @@ enum controller_dp_test_pattern {  	CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR  }; +enum controller_dp_color_space { +	CONTROLLER_DP_COLOR_SPACE_RGB, +	CONTROLLER_DP_COLOR_SPACE_YCBCR601, +	CONTROLLER_DP_COLOR_SPACE_YCBCR709, +	CONTROLLER_DP_COLOR_SPACE_UDEFINED +}; +  enum dc_lut_mode {  	LUT_BYPASS,  	LUT_RAM_A, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h index b21909216fb6..fb748f082c56 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h @@ -113,26 +113,21 @@ struct link_encoder {  	struct encoder_feature_support features;  	enum transmitter transmitter;  	enum hpd_source_id hpd_source; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  	bool usbc_combo_phy; -#endif  }; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  struct link_enc_state {  		uint32_t dphy_fec_en;  		uint32_t dphy_fec_ready_shadow;  		uint32_t dphy_fec_active_status; +		uint32_t dp_link_training_complete;  }; -#endif  struct link_encoder_funcs { -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	void (*read_state)(  			struct link_encoder *enc, struct link_enc_state *s); -#endif  	bool (*validate_output_with_stream)(  		struct link_encoder *enc, const struct dc_stream_state *stream);  	void (*hw_init)(struct link_encoder *enc); @@ -174,7 +169,6 @@ struct link_encoder_funcs {  	unsigned int (*get_dig_frontend)(struct link_encoder *enc);  	void (*destroy)(struct link_encoder **enc); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	void (*fec_set_enable)(struct link_encoder *enc,  		bool enable); @@ -182,7 +176,6 @@ struct link_encoder_funcs {  		bool ready);  	bool (*fec_is_active)(struct link_encoder *enc); -#endif  	bool (*is_in_alt_mode) (struct link_encoder *enc);  	void (*get_max_link_cap)(struct link_encoder *enc, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h index 67b610d6d91f..2e2310f1901a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h @@ -40,11 +40,9 @@ struct cstate_pstate_watermarks_st {  struct dcn_watermarks {  	uint32_t pte_meta_urgent_ns;  	uint32_t urgent_ns; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  	uint32_t frac_urg_bw_nom;  	uint32_t frac_urg_bw_flip;  	int32_t urgent_latency_ns; -#endif  	struct cstate_pstate_watermarks_st cstate_pstate;  }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h index 58826be81395..094afc4c8173 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h @@ -31,9 +31,7 @@  #define MAX_MPCC 6  #define MAX_OPP 6 -#if   defined(CONFIG_DRM_AMD_DC_DCN2_0)  #define MAX_DWB		1 -#endif  enum mpc_output_csc_mode {  	MPC_OUTPUT_CSC_DISABLE = 0, @@ -66,14 +64,12 @@ struct mpcc_blnd_cfg {  	int global_alpha;  	bool overlap_only; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	/* MPCC top/bottom gain settings */  	int bottom_gain_mode;  	int background_color_bpc;  	int top_gain;  	int bottom_inside_gain;  	int bottom_outside_gain; -#endif  };  struct mpcc_sm_cfg { @@ -90,7 +86,6 @@ struct mpcc_sm_cfg {  	int force_next_field_polarity;  }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  struct mpc_denorm_clamp {  	int clamp_max_r_cr;  	int clamp_min_r_cr; @@ -99,7 +94,6 @@ struct mpc_denorm_clamp {  	int clamp_max_b_cb;  	int clamp_min_b_cb;  }; -#endif  /*   * MPCC connection and blending configuration for a single MPCC instance. @@ -126,10 +120,8 @@ struct mpc {  	struct dc_context *ctx;  	struct mpcc mpcc_array[MAX_MPCC]; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	struct pwl_params blender_params;  	bool cm_bypass_mode; -#endif  };  struct mpcc_state { @@ -230,7 +222,6 @@ struct mpc_funcs {  		struct mpc *mpc,  		struct mpc_tree *tree); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	void (*set_denorm)(struct mpc *mpc,  			int opp_id,  			enum dc_color_depth output_depth); @@ -258,7 +249,6 @@ struct mpc_funcs {  			struct mpc *mpc,  			int mpcc_id,  			bool power_on); -#endif  }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h index 18def2b6fafe..7575564b2265 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h @@ -263,9 +263,7 @@ struct oppbuf_params {  	enum oppbuf_display_segmentation mso_segmentation;  	uint32_t mso_overlap_pixel_num;  	uint32_t pixel_repetition; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	uint32_t num_segment_padded_pixels; -#endif  };  struct opp_funcs { @@ -305,10 +303,10 @@ struct opp_funcs {  			struct output_pixel_processor *opp,  			bool enable); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	void (*opp_set_disp_pattern_generator)(  			struct output_pixel_processor *opp,  			enum controller_dp_test_pattern test_pattern, +			enum controller_dp_color_space color_space,  			enum dc_color_depth color_depth,  			const struct tg_color *solid_color,  			int width, @@ -324,7 +322,6 @@ struct opp_funcs {  	void (*opp_program_left_edge_extra_pixel)(  			struct output_pixel_processor *opp,  			bool count); -#endif  }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h index 6305e388612a..351b387ad606 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h @@ -65,13 +65,11 @@ struct audio_clock_info {  	uint32_t cts_48khz;  }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  enum dynamic_metadata_mode {  	dmdata_dp,  	dmdata_hdmi,  	dmdata_dolby_vision  }; -#endif  struct encoder_info_frame {  	/* auxiliary video information */ @@ -90,9 +88,7 @@ struct encoder_info_frame {  struct encoder_unblank_param {  	struct dc_link_settings link_settings;  	struct dc_crtc_timing timing; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  	int opp_cnt; -#endif  };  struct encoder_set_dp_phy_pattern_param { @@ -109,7 +105,6 @@ struct stream_encoder {  	enum engine_id id;  }; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  struct enc_state {  	uint32_t dsc_mode;  // DISABLED  0; 1 or 2 indicate enabled state.  	uint32_t dsc_slice_width; @@ -119,13 +114,13 @@ struct enc_state {  	uint32_t sec_gsp_pps_enable;  	uint32_t sec_stream_enable;  }; -#endif  struct stream_encoder_funcs {  	void (*dp_set_stream_attribute)(  		struct stream_encoder *enc,  		struct dc_crtc_timing *crtc_timing,  		enum dc_color_space output_color_space, +		bool use_vsc_sdp_for_colorimetry,  		uint32_t enable_sdp_splitting);  	void (*hdmi_set_stream_attribute)( @@ -219,8 +214,6 @@ struct stream_encoder_funcs {  		enum dc_pixel_encoding *encoding,  		enum dc_color_depth *depth); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	void (*enc_read_state)(struct stream_encoder *enc, struct enc_state *s);  	void (*dp_set_dsc_config)( @@ -232,7 +225,6 @@ struct stream_encoder_funcs {  	void (*dp_set_dsc_pps_info_packet)(struct stream_encoder *enc,  				bool enable,  				uint8_t *dsc_packed_pps); -#endif  	void (*set_dynamic_metadata)(struct stream_encoder *enc,  			bool enable, @@ -242,7 +234,6 @@ struct stream_encoder_funcs {  	void (*dp_set_odm_combine)(  		struct stream_encoder *enc,  		bool odm_combine); -#endif  };  #endif /* STREAM_ENCODER_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index 27c73caf74ee..e5e7d94026fc 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -195,10 +195,8 @@ struct timing_generator_funcs {  	void (*lock)(struct timing_generator *tg);  	void (*lock_doublebuffer_disable)(struct timing_generator *tg);  	void (*lock_doublebuffer_enable)(struct timing_generator *tg); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	void(*triplebuffer_unlock)(struct timing_generator *tg);  	void(*triplebuffer_lock)(struct timing_generator *tg); -#endif  	void (*enable_reset_trigger)(struct timing_generator *tg,  				     int source_tg_inst);  	void (*enable_crtc_reset)(struct timing_generator *tg, @@ -210,7 +208,8 @@ struct timing_generator_funcs {  					bool enable, const struct dc_crtc_timing *timing);  	void (*set_drr)(struct timing_generator *tg, const struct drr_params *params);  	void (*set_static_screen_control)(struct timing_generator *tg, -							uint32_t value); +						uint32_t event_triggers, +						uint32_t num_frames);  	void (*set_test_pattern)(  		struct timing_generator *tg,  		enum controller_dp_test_pattern test_pattern, @@ -235,7 +234,6 @@ struct timing_generator_funcs {  	bool (*is_optc_underflow_occurred)(struct timing_generator *tg);  	void (*clear_optc_underflow)(struct timing_generator *tg); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	void (*set_dwb_source)(struct timing_generator *optc,  		uint32_t dwb_pipe_inst); @@ -243,7 +241,6 @@ struct timing_generator_funcs {  			uint32_t *num_of_input_segments,  			uint32_t *seg0_src_sel,  			uint32_t *seg1_src_sel); -#endif  	/**  	 * Configure CRCs for the given timing generator. Return false if TG is @@ -267,13 +264,10 @@ struct timing_generator_funcs {  	void (*set_vtg_params)(struct timing_generator *optc,  			const struct dc_crtc_timing *dc_crtc_timing); -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	void (*set_dsc_config)(struct timing_generator *optc,  			       enum optc_dsc_mode dsc_mode,  			       uint32_t dsc_bytes_per_pixel,  			       uint32_t dsc_slice_width); -#endif  	void (*set_odm_bypass)(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing);  	void (*set_odm_combine)(struct timing_generator *optc, int *opp_id, int opp_cnt,  			struct dc_crtc_timing *timing); @@ -281,7 +275,6 @@ struct timing_generator_funcs {  	void (*set_gsl_source_select)(struct timing_generator *optc,  			int group_idx,  			uint32_t gsl_ready_signal); -#endif  };  #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index d39c1e11def5..209118f9f193 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -32,326 +32,160 @@  #include "inc/hw/link_encoder.h"  #include "core_status.h" -enum pipe_gating_control { -	PIPE_GATING_CONTROL_DISABLE = 0, -	PIPE_GATING_CONTROL_ENABLE, -	PIPE_GATING_CONTROL_INIT -}; -  enum vline_select {  	VLINE0,  	VLINE1  }; -struct dce_hwseq_wa { -	bool blnd_crtc_trigger; -	bool DEGVIDCN10_253; -	bool false_optc_underflow; -	bool DEGVIDCN10_254; -	bool DEGVIDCN21; -}; - -struct hwseq_wa_state { -	bool DEGVIDCN10_253_applied; -}; - -struct dce_hwseq { -	struct dc_context *ctx; -	const struct dce_hwseq_registers *regs; -	const struct dce_hwseq_shift *shifts; -	const struct dce_hwseq_mask *masks; -	struct dce_hwseq_wa wa; -	struct hwseq_wa_state wa_state; -}; -  struct pipe_ctx;  struct dc_state; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  struct dc_stream_status;  struct dc_writeback_info; -#endif  struct dchub_init_data; -struct dc_static_screen_events; +struct dc_static_screen_params;  struct resource_pool; -struct resource_context; -struct stream_resource; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  struct dc_phy_addr_space_config;  struct dc_virtual_addr_space_config; -#endif -struct hubp;  struct dpp; +struct dce_hwseq;  struct hw_sequencer_funcs { +	/* Embedded Display Related */ +	void (*edp_power_control)(struct dc_link *link, bool enable); +	void (*edp_wait_for_hpd_ready)(struct dc_link *link, bool power_up); -	void (*disable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx); - -	void (*enable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx); - +	/* Pipe Programming Related */  	void (*init_hw)(struct dc *dc); - -	void (*init_pipes)(struct dc *dc, struct dc_state *context); - -	enum dc_status (*apply_ctx_to_hw)( -			struct dc *dc, struct dc_state *context); - -	void (*reset_hw_ctx_wrap)( -			struct dc *dc, struct dc_state *context); - -	void (*apply_ctx_for_surface)( -			struct dc *dc, +	void (*enable_accelerated_mode)(struct dc *dc, +			struct dc_state *context); +	enum dc_status (*apply_ctx_to_hw)(struct dc *dc, +			struct dc_state *context); +	void (*disable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx); +	void (*apply_ctx_for_surface)(struct dc *dc,  			const struct dc_stream_state *stream, -			int num_planes, +			int num_planes, struct dc_state *context); +	void (*program_front_end_for_ctx)(struct dc *dc,  			struct dc_state *context); - -	void (*program_gamut_remap)( +	void (*update_plane_addr)(const struct dc *dc,  			struct pipe_ctx *pipe_ctx); - -	void (*program_output_csc)(struct dc *dc, -			struct pipe_ctx *pipe_ctx, -			enum dc_color_space colorspace, -			uint16_t *matrix, -			int opp_id); - -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) -	void (*program_front_end_for_ctx)( -			struct dc *dc, -			struct dc_state *context); -	void (*program_triplebuffer)( -		const struct dc *dc, -		struct pipe_ctx *pipe_ctx, -		bool enableTripleBuffer); -	void (*set_flip_control_gsl)( -		struct pipe_ctx *pipe_ctx, -		bool flip_immediate); -#endif - -	void (*update_plane_addr)( -		const struct dc *dc, -		struct pipe_ctx *pipe_ctx); - -	void (*plane_atomic_disconnect)( -		struct dc *dc, -		struct pipe_ctx *pipe_ctx); - -	void (*update_dchub)( -		struct dce_hwseq *hws, -		struct dchub_init_data *dh_data); - -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 -	int (*init_sys_ctx)( -			struct dce_hwseq *hws, -			struct dc *dc, -			struct dc_phy_addr_space_config *pa_config); -	void (*init_vm_ctx)( -			struct dce_hwseq *hws, -			struct dc *dc, -			struct dc_virtual_addr_space_config *va_config, -			int vmid); -#endif -	void (*update_mpcc)( -		struct dc *dc, -		struct pipe_ctx *pipe_ctx); - -	void (*update_pending_status)( +	void (*update_dchub)(struct dce_hwseq *hws, +			struct dchub_init_data *dh_data); +	void (*wait_for_mpcc_disconnect)(struct dc *dc, +			struct resource_pool *res_pool,  			struct pipe_ctx *pipe_ctx); - -	bool (*set_input_transfer_func)( -				struct pipe_ctx *pipe_ctx, -				const struct dc_plane_state *plane_state); - -	bool (*set_output_transfer_func)( -				struct pipe_ctx *pipe_ctx, -				const struct dc_stream_state *stream); - -	void (*power_down)(struct dc *dc); - -	void (*enable_accelerated_mode)(struct dc *dc, struct dc_state *context); - -	void (*enable_timing_synchronization)( -			struct dc *dc, -			int group_index, -			int group_size, -			struct pipe_ctx *grouped_pipes[]); - -	void (*enable_per_frame_crtc_position_reset)( -			struct dc *dc, -			int group_size, +	void (*program_triplebuffer)(const struct dc *dc, +		struct pipe_ctx *pipe_ctx, bool enableTripleBuffer); +	void (*update_pending_status)(struct pipe_ctx *pipe_ctx); + +	/* Pipe Lock Related */ +	void (*pipe_control_lock_global)(struct dc *dc, +			struct pipe_ctx *pipe, bool lock); +	void (*pipe_control_lock)(struct dc *dc, +			struct pipe_ctx *pipe, bool lock); +	void (*set_flip_control_gsl)(struct pipe_ctx *pipe_ctx, +			bool flip_immediate); + +	/* Timing Related */ +	void (*get_position)(struct pipe_ctx **pipe_ctx, int num_pipes, +			struct crtc_position *position); +	int (*get_vupdate_offset_from_vsync)(struct pipe_ctx *pipe_ctx); +	void (*enable_per_frame_crtc_position_reset)(struct dc *dc, +			int group_size, struct pipe_ctx *grouped_pipes[]); +	void (*enable_timing_synchronization)(struct dc *dc, +			int group_index, int group_size,  			struct pipe_ctx *grouped_pipes[]); +	void (*setup_periodic_interrupt)(struct dc *dc, +			struct pipe_ctx *pipe_ctx, +			enum vline_select vline); +	void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes, +			unsigned int vmin, unsigned int vmax, +			unsigned int vmid, unsigned int vmid_frame_number); +	void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx, +			int num_pipes, +			const struct dc_static_screen_params *events); -	void (*enable_display_pipe_clock_gating)( -					struct dc_context *ctx, -					bool clock_gating); - -	bool (*enable_display_power_gating)( -					struct dc *dc, -					uint8_t controller_id, -					struct dc_bios *dcb, -					enum pipe_gating_control power_gating); - -	void (*disable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx); - -	void (*update_info_frame)(struct pipe_ctx *pipe_ctx); - -	void (*send_immediate_sdp_message)( -				struct pipe_ctx *pipe_ctx, -				const uint8_t *custom_sdp_message, -				unsigned int sdp_message_size); - +	/* Stream Related */  	void (*enable_stream)(struct pipe_ctx *pipe_ctx); -  	void (*disable_stream)(struct pipe_ctx *pipe_ctx); - +	void (*blank_stream)(struct pipe_ctx *pipe_ctx);  	void (*unblank_stream)(struct pipe_ctx *pipe_ctx,  			struct dc_link_settings *link_settings); -	void (*blank_stream)(struct pipe_ctx *pipe_ctx); - -	void (*enable_audio_stream)(struct pipe_ctx *pipe_ctx); +	/* Bandwidth Related */ +	void (*prepare_bandwidth)(struct dc *dc, struct dc_state *context); +	bool (*update_bandwidth)(struct dc *dc, struct dc_state *context); +	void (*optimize_bandwidth)(struct dc *dc, struct dc_state *context); -	void (*disable_audio_stream)(struct pipe_ctx *pipe_ctx); - -	void (*pipe_control_lock)( -				struct dc *dc, -				struct pipe_ctx *pipe, -				bool lock); - -	void (*pipe_control_lock_global)( -				struct dc *dc, -				struct pipe_ctx *pipe, -				bool lock); -	void (*blank_pixel_data)( -			struct dc *dc, +	/* Infopacket Related */ +	void (*set_avmute)(struct pipe_ctx *pipe_ctx, bool enable); +	void (*send_immediate_sdp_message)(  			struct pipe_ctx *pipe_ctx, -			bool blank); - -	void (*prepare_bandwidth)( -			struct dc *dc, -			struct dc_state *context); -	void (*optimize_bandwidth)( -			struct dc *dc, -			struct dc_state *context); - -	void (*exit_optimized_pwr_state)( -			const struct dc *dc, -			struct dc_state *context); -	void (*optimize_pwr_state)( -			const struct dc *dc, -			struct dc_state *context); - -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) -	bool (*update_bandwidth)( -			struct dc *dc, -			struct dc_state *context); +			const uint8_t *custom_sdp_message, +			unsigned int sdp_message_size); +	void (*update_info_frame)(struct pipe_ctx *pipe_ctx); +	void (*set_dmdata_attributes)(struct pipe_ctx *pipe);  	void (*program_dmdata_engine)(struct pipe_ctx *pipe_ctx);  	bool (*dmdata_status_done)(struct pipe_ctx *pipe_ctx); -#endif - -	void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes, -			unsigned int vmin, unsigned int vmax, -			unsigned int vmid, unsigned int vmid_frame_number); - -	void (*get_position)(struct pipe_ctx **pipe_ctx, int num_pipes, -			struct crtc_position *position); - -	void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx, -			int num_pipes, const struct dc_static_screen_events *events); - -	enum dc_status (*enable_stream_timing)( -			struct pipe_ctx *pipe_ctx, -			struct dc_state *context, -			struct dc *dc); - -	void (*setup_stereo)( -			struct pipe_ctx *pipe_ctx, -			struct dc *dc); - -	void (*set_avmute)(struct pipe_ctx *pipe_ctx, bool enable); - -	void (*log_hw_state)(struct dc *dc, -		struct dc_log_buffer_ctx *log_ctx); -	void (*get_hw_state)(struct dc *dc, char *pBuf, unsigned int bufSize, unsigned int mask); -	void (*clear_status_bits)(struct dc *dc, unsigned int mask); - -	void (*wait_for_mpcc_disconnect)(struct dc *dc, -			struct resource_pool *res_pool, -			struct pipe_ctx *pipe_ctx); - -	void (*edp_power_control)( -			struct dc_link *link, -			bool enable); -	void (*edp_backlight_control)( -			struct dc_link *link, -			bool enable); -	void (*edp_wait_for_hpd_ready)(struct dc_link *link, bool power_up); +	/* Cursor Related */  	void (*set_cursor_position)(struct pipe_ctx *pipe);  	void (*set_cursor_attribute)(struct pipe_ctx *pipe);  	void (*set_cursor_sdr_white_level)(struct pipe_ctx *pipe); -	void (*setup_periodic_interrupt)(struct pipe_ctx *pipe_ctx, enum vline_select vline); -	void (*setup_vupdate_interrupt)(struct pipe_ctx *pipe_ctx); -	bool (*did_underflow_occur)(struct dc *dc, struct pipe_ctx *pipe_ctx); - -	void (*init_blank)(struct dc *dc, struct timing_generator *tg); -	void (*disable_vga)(struct dce_hwseq *hws); -	void (*bios_golden_init)(struct dc *dc); -	void (*plane_atomic_power_down)(struct dc *dc, -			struct dpp *dpp, -			struct hubp *hubp); - -	void (*plane_atomic_disable)( -			struct dc *dc, struct pipe_ctx *pipe_ctx); - -	void (*enable_power_gating_plane)( -		struct dce_hwseq *hws, -		bool enable); - -	void (*dpp_pg_control)( -			struct dce_hwseq *hws, -			unsigned int dpp_inst, -			bool power_on); - -	void (*hubp_pg_control)( -			struct dce_hwseq *hws, -			unsigned int hubp_inst, -			bool power_on); - -	void (*dsc_pg_control)( -			struct dce_hwseq *hws, -			unsigned int dsc_inst, -			bool power_on); - +	/* Colour Related */ +	void (*program_gamut_remap)(struct pipe_ctx *pipe_ctx); +	void (*program_output_csc)(struct dc *dc, struct pipe_ctx *pipe_ctx, +			enum dc_color_space colorspace, +			uint16_t *matrix, int opp_id); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) -	void (*update_odm)(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx); -	void (*program_all_writeback_pipes_in_tree)( +	/* VM Related */ +	int (*init_sys_ctx)(struct dce_hwseq *hws,  			struct dc *dc, -			const struct dc_stream_state *stream, -			struct dc_state *context); +			struct dc_phy_addr_space_config *pa_config); +	void (*init_vm_ctx)(struct dce_hwseq *hws, +			struct dc *dc, +			struct dc_virtual_addr_space_config *va_config, +			int vmid); + +	/* Writeback Related */  	void (*update_writeback)(struct dc *dc, -			const struct dc_stream_status *stream_status,  			struct dc_writeback_info *wb_info,  			struct dc_state *context);  	void (*enable_writeback)(struct dc *dc, -			const struct dc_stream_status *stream_status,  			struct dc_writeback_info *wb_info,  			struct dc_state *context);  	void (*disable_writeback)(struct dc *dc,  			unsigned int dwb_pipe_inst); -#endif -	enum dc_status (*set_clock)(struct dc *dc, -			enum dc_clock_type clock_type, -			uint32_t clk_khz, -			uint32_t stepping); -	void (*get_clock)(struct dc *dc, +	bool (*mmhubbub_warmup)(struct dc *dc, +			unsigned int num_dwb, +			struct dc_writeback_info *wb_info); + +	/* Clock Related */ +	enum dc_status (*set_clock)(struct dc *dc,  			enum dc_clock_type clock_type, +			uint32_t clk_khz, uint32_t stepping); +	void (*get_clock)(struct dc *dc, enum dc_clock_type clock_type,  			struct dc_clock_config *clock_cfg); +	void (*optimize_pwr_state)(const struct dc *dc, +			struct dc_state *context); +	void (*exit_optimized_pwr_state)(const struct dc *dc, +			struct dc_state *context); + +	/* Audio Related */ +	void (*enable_audio_stream)(struct pipe_ctx *pipe_ctx); +	void (*disable_audio_stream)(struct pipe_ctx *pipe_ctx); + +	/* Stereo 3D Related */ +	void (*setup_stereo)(struct pipe_ctx *pipe_ctx, struct dc *dc); + +	/* HW State Logging Related */ +	void (*log_hw_state)(struct dc *dc, struct dc_log_buffer_ctx *log_ctx); +	void (*get_hw_state)(struct dc *dc, char *pBuf, +			unsigned int bufSize, unsigned int mask); +	void (*clear_status_bits)(struct dc *dc, unsigned int mask); + -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) -	bool (*s0i3_golden_init_wa)(struct dc *dc); -#endif  };  void color_space_to_black_color( diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h new file mode 100644 index 000000000000..ecf566378ccd --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h @@ -0,0 +1,156 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_HW_SEQUENCER_PRIVATE_H__ +#define __DC_HW_SEQUENCER_PRIVATE_H__ + +#include "dc_types.h" + +enum pipe_gating_control { +	PIPE_GATING_CONTROL_DISABLE = 0, +	PIPE_GATING_CONTROL_ENABLE, +	PIPE_GATING_CONTROL_INIT +}; + +struct dce_hwseq_wa { +	bool blnd_crtc_trigger; +	bool DEGVIDCN10_253; +	bool false_optc_underflow; +	bool DEGVIDCN10_254; +	bool DEGVIDCN21; +}; + +struct hwseq_wa_state { +	bool DEGVIDCN10_253_applied; +}; + +struct pipe_ctx; +struct dc_state; +struct dc_stream_status; +struct dc_writeback_info; +struct dchub_init_data; +struct dc_static_screen_params; +struct resource_pool; +struct resource_context; +struct stream_resource; +struct dc_phy_addr_space_config; +struct dc_virtual_addr_space_config; +struct hubp; +struct dpp; +struct dce_hwseq; +struct timing_generator; +struct tg_color; +struct output_pixel_processor; + +struct hwseq_private_funcs { + +	void (*disable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx); +	void (*enable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx); +	void (*init_pipes)(struct dc *dc, struct dc_state *context); +	void (*reset_hw_ctx_wrap)(struct dc *dc, struct dc_state *context); +	void (*update_plane_addr)(const struct dc *dc, +			struct pipe_ctx *pipe_ctx); +	void (*plane_atomic_disconnect)(struct dc *dc, +			struct pipe_ctx *pipe_ctx); +	void (*update_mpcc)(struct dc *dc, struct pipe_ctx *pipe_ctx); +	bool (*set_input_transfer_func)(struct dc *dc, +				struct pipe_ctx *pipe_ctx, +				const struct dc_plane_state *plane_state); +	bool (*set_output_transfer_func)(struct dc *dc, +				struct pipe_ctx *pipe_ctx, +				const struct dc_stream_state *stream); +	void (*power_down)(struct dc *dc); +	void (*enable_display_pipe_clock_gating)(struct dc_context *ctx, +					bool clock_gating); +	bool (*enable_display_power_gating)(struct dc *dc, +					uint8_t controller_id, +					struct dc_bios *dcb, +					enum pipe_gating_control power_gating); +	void (*blank_pixel_data)(struct dc *dc, +			struct pipe_ctx *pipe_ctx, +			bool blank); +	enum dc_status (*enable_stream_timing)( +			struct pipe_ctx *pipe_ctx, +			struct dc_state *context, +			struct dc *dc); +	void (*edp_backlight_control)(struct dc_link *link, +			bool enable); +	void (*setup_vupdate_interrupt)(struct dc *dc, +			struct pipe_ctx *pipe_ctx); +	bool (*did_underflow_occur)(struct dc *dc, struct pipe_ctx *pipe_ctx); +	void (*init_blank)(struct dc *dc, struct timing_generator *tg); +	void (*disable_vga)(struct dce_hwseq *hws); +	void (*bios_golden_init)(struct dc *dc); +	void (*plane_atomic_power_down)(struct dc *dc, +			struct dpp *dpp, +			struct hubp *hubp); +	void (*plane_atomic_disable)(struct dc *dc, struct pipe_ctx *pipe_ctx); +	void (*enable_power_gating_plane)(struct dce_hwseq *hws, +		bool enable); +	void (*dpp_pg_control)(struct dce_hwseq *hws, +			unsigned int dpp_inst, +			bool power_on); +	void (*hubp_pg_control)(struct dce_hwseq *hws, +			unsigned int hubp_inst, +			bool power_on); +	void (*dsc_pg_control)(struct dce_hwseq *hws, +			unsigned int dsc_inst, +			bool power_on); +	void (*update_odm)(struct dc *dc, struct dc_state *context, +			struct pipe_ctx *pipe_ctx); +	void (*program_all_writeback_pipes_in_tree)(struct dc *dc, +			const struct dc_stream_state *stream, +			struct dc_state *context); +	bool (*s0i3_golden_init_wa)(struct dc *dc); +	void (*get_surface_visual_confirm_color)( +			const struct pipe_ctx *pipe_ctx, +			struct tg_color *color); +	void (*get_hdr_visual_confirm_color)(struct pipe_ctx *pipe_ctx, +			struct tg_color *color); +	void (*set_hdr_multiplier)(struct pipe_ctx *pipe_ctx); +	void (*verify_allow_pstate_change_high)(struct dc *dc); +	void (*program_pipe)(struct dc *dc, +			struct pipe_ctx *pipe_ctx, +			struct dc_state *context); +	bool (*wait_for_blank_complete)(struct output_pixel_processor *opp); +	void (*dccg_init)(struct dce_hwseq *hws); +	bool (*set_blend_lut)(struct pipe_ctx *pipe_ctx, +			const struct dc_plane_state *plane_state); +	bool (*set_shaper_3dlut)(struct pipe_ctx *pipe_ctx, +			const struct dc_plane_state *plane_state); +}; + +struct dce_hwseq { +	struct dc_context *ctx; +	const struct dce_hwseq_registers *regs; +	const struct dce_hwseq_shift *shifts; +	const struct dce_hwseq_mask *masks; +	struct dce_hwseq_wa wa; +	struct hwseq_wa_state wa_state; +	struct hwseq_private_funcs funcs; + +}; + +#endif /* __DC_HW_SEQUENCER_PRIVATE_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h index 4eff5d38a2f9..9af7ee5bc8ee 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h @@ -60,11 +60,13 @@ void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal);  bool dp_set_hw_training_pattern(  	struct dc_link *link, -	enum dc_dp_training_pattern pattern); +	enum dc_dp_training_pattern pattern, +	uint32_t offset);  void dp_set_hw_lane_settings(  	struct dc_link *link, -	const struct link_training_settings *link_settings); +	const struct link_training_settings *link_settings, +	uint32_t offset);  void dp_set_hw_test_pattern(  	struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h index 8503d9cc4763..2470405e996b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h +++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h @@ -458,7 +458,14 @@ uint32_t generic_reg_get8(const struct dc_context *ctx, uint32_t addr,  #define IX_REG_READ(index_reg_name, data_reg_name, index) \  		generic_read_indirect_reg(CTX, REG(index_reg_name), REG(data_reg_name), IND_REG(index)) +#define IX_REG_GET_N(index_reg_name, data_reg_name, index, n, ...) \ +		generic_indirect_reg_get(CTX, REG(index_reg_name), REG(data_reg_name), \ +				IND_REG(index), \ +				n, __VA_ARGS__) +#define IX_REG_GET(index_reg_name, data_reg_name, index, field, val) \ +		IX_REG_GET_N(index_reg_name, data_reg_name, index, 1, \ +				FN(data_reg_name, field), val)  #define IX_REG_UPDATE_N(index_reg_name, data_reg_name, index, n, ...)	\  		generic_indirect_reg_update_ex(CTX, \ @@ -479,10 +486,35 @@ uint32_t generic_read_indirect_reg(const struct dc_context *ctx,  		uint32_t addr_index, uint32_t addr_data,  		uint32_t index); +uint32_t generic_indirect_reg_get(const struct dc_context *ctx, +		uint32_t addr_index, uint32_t addr_data, +		uint32_t index, int n, +		uint8_t shift1, uint32_t mask1, uint32_t *field_value1, +		...); +  uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,  		uint32_t addr_index, uint32_t addr_data,  		uint32_t index, uint32_t reg_val, int n,  		uint8_t shift1, uint32_t mask1, uint32_t field_value1,  		...); +/* register offload macros + * + * instead of MMIO to register directly, in some cases we want + * to gather register sequence and execute the register sequence + * from another thread so we optimize time required for lengthy ops + */ + +/* start gathering register sequence */ +#define REG_SEQ_START() \ +	reg_sequence_start_gather(CTX) + +/* start execution of register sequence gathered since REG_SEQ_START */ +#define REG_SEQ_SUBMIT() \ +	reg_sequence_start_execute(CTX) + +/* wait for the last REG_SEQ_SUBMIT to finish */ +#define REG_SEQ_WAIT_DONE() \ +	reg_sequence_wait_done(CTX) +  #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_REG_HELPER_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index bef224bf803e..5ae8ada154ef 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -46,12 +46,8 @@ struct resource_caps {  	int num_pll;  	int num_dwb;  	int num_ddc; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0  	int num_vmid; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	int num_dsc; -#endif -#endif  };  struct resource_straps { @@ -181,4 +177,6 @@ void update_audio_usage(  unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format); +void get_audio_check(struct audio_info *aud_modes, +	struct audio_check *aud_chk);  #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile index ea75420fc876..0f682ac53bb2 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/Makefile +++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile @@ -60,27 +60,23 @@ AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCE12)  ###############################################################################  # DCN 1x  ############################################################################### -ifdef CONFIG_DRM_AMD_DC_DCN1_0 +ifdef CONFIG_DRM_AMD_DC_DCN  IRQ_DCN1 = irq_service_dcn10.o  AMD_DAL_IRQ_DCN1 = $(addprefix $(AMDDALPATH)/dc/irq/dcn10/,$(IRQ_DCN1))  AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN1) -endif  ###############################################################################  # DCN 20  ############################################################################### -ifdef CONFIG_DRM_AMD_DC_DCN2_0  IRQ_DCN2 = irq_service_dcn20.o  AMD_DAL_IRQ_DCN2 = $(addprefix $(AMDDALPATH)/dc/irq/dcn20/,$(IRQ_DCN2))  AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN2) -endif  ###############################################################################  # DCN 21  ############################################################################### -ifdef CONFIG_DRM_AMD_DC_DCN2_1  IRQ_DCN21 = irq_service_dcn21.o  AMD_DAL_IRQ_DCN21= $(addprefix $(AMDDALPATH)/dc/irq/dcn21/,$(IRQ_DCN21)) diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c index 1a581c464345..378cc11aa047 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c @@ -204,7 +204,7 @@ bool dce110_vblank_set(struct irq_service *irq_service,  		       bool enable)  {  	struct dc_context *dc_ctx = irq_service->ctx; -	struct dc *core_dc = irq_service->ctx->dc; +	struct dc *dc = irq_service->ctx->dc;  	enum dc_irq_source dal_irq_src =  			dc_interrupt_to_irq_source(irq_service->ctx->dc,  						   info->src_id, @@ -212,7 +212,7 @@ bool dce110_vblank_set(struct irq_service *irq_service,  	uint8_t pipe_offset = dal_irq_src - IRQ_TYPE_VBLANK;  	struct timing_generator *tg = -			core_dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg; +			dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;  	if (enable) {  		if (!tg || !tg->funcs->arm_vert_intr(tg, 2)) { @@ -403,7 +403,7 @@ static const struct irq_service_funcs irq_service_funcs_dce110 = {  		.to_dal_irq_source = to_dal_irq_source_dce110  }; -static void construct(struct irq_service *irq_service, +static void dce110_irq_construct(struct irq_service *irq_service,  		      struct irq_service_init_data *init_data)  {  	dal_irq_service_construct(irq_service, init_data); @@ -421,6 +421,6 @@ dal_irq_service_dce110_create(struct irq_service_init_data *init_data)  	if (!irq_service)  		return NULL; -	construct(irq_service, init_data); +	dce110_irq_construct(irq_service, init_data);  	return irq_service;  } diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c index 15380336cb51..2fe4703395f3 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c @@ -273,7 +273,7 @@ static const struct irq_service_funcs irq_service_funcs_dce120 = {  		.to_dal_irq_source = to_dal_irq_source_dce110  }; -static void construct( +static void dce120_irq_construct(  	struct irq_service *irq_service,  	struct irq_service_init_data *init_data)  { @@ -292,6 +292,6 @@ struct irq_service *dal_irq_service_dce120_create(  	if (!irq_service)  		return NULL; -	construct(irq_service, init_data); +	dce120_irq_construct(irq_service, init_data);  	return irq_service;  } diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c index 281fee8ad1e5..17e426b80a00 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c @@ -283,7 +283,7 @@ static const struct irq_service_funcs irq_service_funcs_dce80 = {  		.to_dal_irq_source = to_dal_irq_source_dce110  }; -static void construct( +static void dce80_irq_construct(  	struct irq_service *irq_service,  	struct irq_service_init_data *init_data)  { @@ -302,7 +302,7 @@ struct irq_service *dal_irq_service_dce80_create(  	if (!irq_service)  		return NULL; -	construct(irq_service, init_data); +	dce80_irq_construct(irq_service, init_data);  	return irq_service;  } diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c index cc8e7dedccce..f956b3bde680 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c @@ -355,7 +355,7 @@ static const struct irq_service_funcs irq_service_funcs_dcn10 = {  		.to_dal_irq_source = to_dal_irq_source_dcn10  }; -static void construct( +static void dcn10_irq_construct(  	struct irq_service *irq_service,  	struct irq_service_init_data *init_data)  { @@ -374,6 +374,6 @@ struct irq_service *dal_irq_service_dcn10_create(  	if (!irq_service)  		return NULL; -	construct(irq_service, init_data); +	dcn10_irq_construct(irq_service, init_data);  	return irq_service;  } diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c index 5db29bf582d3..2a1fea501f8c 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c @@ -359,7 +359,7 @@ static const struct irq_service_funcs irq_service_funcs_dcn20 = {  		.to_dal_irq_source = to_dal_irq_source_dcn20  }; -static void construct( +static void dcn20_irq_construct(  	struct irq_service *irq_service,  	struct irq_service_init_data *init_data)  { @@ -378,6 +378,6 @@ struct irq_service *dal_irq_service_dcn20_create(  	if (!irq_service)  		return NULL; -	construct(irq_service, init_data); +	dcn20_irq_construct(irq_service, init_data);  	return irq_service;  } diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c index cbe7818529bb..1b971265418b 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c @@ -350,7 +350,7 @@ static const struct irq_service_funcs irq_service_funcs_dcn21 = {  		.to_dal_irq_source = to_dal_irq_source_dcn21  }; -static void construct( +static void dcn21_irq_construct(  	struct irq_service *irq_service,  	struct irq_service_init_data *init_data)  { @@ -369,6 +369,6 @@ struct irq_service *dal_irq_service_dcn21_create(  	if (!irq_service)  		return NULL; -	construct(irq_service, init_data); +	dcn21_irq_construct(irq_service, init_data);  	return irq_service;  } diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c index 0878550a8178..33053b9fe6bd 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c +++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c @@ -38,7 +38,7 @@  #include "dce120/irq_service_dce120.h" -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN)  #include "dcn10/irq_service_dcn10.h"  #endif diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h index 30ec80ac6fc8..c34eba19860a 100644 --- a/drivers/gpu/drm/amd/display/dc/os_types.h +++ b/drivers/gpu/drm/amd/display/dc/os_types.h @@ -1,5 +1,6 @@  /*   * Copyright 2012-16 Advanced Micro Devices, Inc. + * Copyright 2019 Raptor Engineering, LLC   *   * Permission is hereby granted, free of charge, to any person obtaining a   * copy of this software and associated documentation files (the "Software"), @@ -29,6 +30,7 @@  #include <linux/kgdb.h>  #include <linux/kref.h>  #include <linux/types.h> +#include <linux/slab.h>  #include <asm/byteorder.h> @@ -48,8 +50,39 @@  #define dm_error(fmt, ...) DRM_ERROR(fmt, ##__VA_ARGS__) -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) +#if defined(CONFIG_X86)  #include <asm/fpu/api.h> +#define DC_FP_START() kernel_fpu_begin() +#define DC_FP_END() kernel_fpu_end() +#elif defined(CONFIG_PPC64) +#include <asm/switch_to.h> +#include <asm/cputable.h> +#define DC_FP_START() { \ +	if (cpu_has_feature(CPU_FTR_VSX_COMP)) { \ +		preempt_disable(); \ +		enable_kernel_vsx(); \ +	} else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) { \ +		preempt_disable(); \ +		enable_kernel_altivec(); \ +	} else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) { \ +		preempt_disable(); \ +		enable_kernel_fp(); \ +	} \ +} +#define DC_FP_END() { \ +	if (cpu_has_feature(CPU_FTR_VSX_COMP)) { \ +		disable_kernel_vsx(); \ +		preempt_enable(); \ +	} else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) { \ +		disable_kernel_altivec(); \ +		preempt_enable(); \ +	} else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) { \ +		disable_kernel_fp(); \ +		preempt_enable(); \ +	} \ +} +#endif  #endif  /* diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c index ff664bdb1482..b8040da94b9d 100644 --- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c @@ -32,6 +32,7 @@ static void virtual_stream_encoder_dp_set_stream_attribute(  	struct stream_encoder *enc,  	struct dc_crtc_timing *crtc_timing,  	enum dc_color_space output_color_space, +	bool use_vsc_sdp_for_colorimetry,  	uint32_t enable_sdp_splitting) {}  static void virtual_stream_encoder_hdmi_set_stream_attribute( @@ -81,22 +82,14 @@ static void virtual_stream_encoder_reset_hdmi_stream_attribute(  		struct stream_encoder *enc)  {} -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  static void virtual_enc_dp_set_odm_combine(  	struct stream_encoder *enc,  	bool odm_combine)  {} -#endif -#endif  static const struct stream_encoder_funcs virtual_str_enc_funcs = { -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	.dp_set_odm_combine =  		virtual_enc_dp_set_odm_combine, -#endif -#endif  	.dp_set_stream_attribute =  		virtual_stream_encoder_dp_set_stream_attribute,  	.hdmi_set_stream_attribute = diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h new file mode 100644 index 000000000000..cd9532b4f14d --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -0,0 +1,289 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_CMD_H_ +#define _DMUB_CMD_H_ + +#include "dmub_types.h" +#include "dmub_cmd_dal.h" +#include "dmub_cmd_vbios.h" +#include "atomfirmware.h" + +#define DMUB_RB_CMD_SIZE 64 +#define DMUB_RB_MAX_ENTRY 128 +#define DMUB_RB_SIZE (DMUB_RB_CMD_SIZE * DMUB_RB_MAX_ENTRY) +#define REG_SET_MASK 0xFFFF + + +/* + * Command IDs should be treated as stable ABI. + * Do not reuse or modify IDs. + */ + +enum dmub_cmd_type { +	DMUB_CMD__NULL = 0, +	DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE = 1, +	DMUB_CMD__REG_SEQ_FIELD_UPDATE_SEQ = 2, +	DMUB_CMD__REG_SEQ_BURST_WRITE = 3, +	DMUB_CMD__REG_REG_WAIT = 4, +	DMUB_CMD__PLAT_54186_WA = 5, +	DMUB_CMD__PSR = 64, +	DMUB_CMD__VBIOS = 128, +}; + +#pragma pack(push, 1) + +struct dmub_cmd_header { +	unsigned int type : 8; +	unsigned int sub_type : 8; +	unsigned int reserved0 : 8; +	unsigned int payload_bytes : 6;  /* up to 60 bytes */ +	unsigned int reserved1 : 2; +}; + +/* + * Read modify write + * + * 60 payload bytes can hold up to 5 sets of read modify writes, + * each take 3 dwords. + * + * number of sequences = header.payload_bytes / sizeof(struct dmub_cmd_read_modify_write_sequence) + * + * modify_mask = 0xffff'ffff means all fields are going to be updated.  in this case + * command parser will skip the read and we can use modify_mask = 0xffff'ffff as reg write + */ +struct dmub_cmd_read_modify_write_sequence { +	uint32_t addr; +	uint32_t modify_mask; +	uint32_t modify_value; +}; + +#define DMUB_READ_MODIFY_WRITE_SEQ__MAX		5 +struct dmub_rb_cmd_read_modify_write { +	struct dmub_cmd_header header;  // type = DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE +	struct dmub_cmd_read_modify_write_sequence seq[DMUB_READ_MODIFY_WRITE_SEQ__MAX]; +}; + +/* + * Update a register with specified masks and values sequeunce + * + * 60 payload bytes can hold address + up to 7 sets of mask/value combo, each take 2 dword + * + * number of field update sequence = (header.payload_bytes - sizeof(addr)) / sizeof(struct read_modify_write_sequence) + * + * + * USE CASE: + *   1. auto-increment register where additional read would update pointer and produce wrong result + *   2. toggle a bit without read in the middle + */ + +struct dmub_cmd_reg_field_update_sequence { +	uint32_t modify_mask;  // 0xffff'ffff to skip initial read +	uint32_t modify_value; +}; + +#define DMUB_REG_FIELD_UPDATE_SEQ__MAX		7 + +struct dmub_rb_cmd_reg_field_update_sequence { +	struct dmub_cmd_header header; +	uint32_t addr; +	struct dmub_cmd_reg_field_update_sequence seq[DMUB_REG_FIELD_UPDATE_SEQ__MAX]; +}; + + +/* + * Burst write + * + * support use case such as writing out LUTs. + * + * 60 payload bytes can hold up to 14 values to write to given address + * + * number of payload = header.payload_bytes / sizeof(struct read_modify_write_sequence) + */ +#define DMUB_BURST_WRITE_VALUES__MAX  14 +struct dmub_rb_cmd_burst_write { +	struct dmub_cmd_header header;  // type = DMUB_CMD__REG_SEQ_BURST_WRITE +	uint32_t addr; +	uint32_t write_values[DMUB_BURST_WRITE_VALUES__MAX]; +}; + + +struct dmub_rb_cmd_common { +	struct dmub_cmd_header header; +	uint8_t cmd_buffer[DMUB_RB_CMD_SIZE - sizeof(struct dmub_cmd_header)]; +}; + +struct dmub_cmd_reg_wait_data { +	uint32_t addr; +	uint32_t mask; +	uint32_t condition_field_value; +	uint32_t time_out_us; +}; + +struct dmub_rb_cmd_reg_wait { +	struct dmub_cmd_header header; +	struct dmub_cmd_reg_wait_data reg_wait; +}; + +#ifndef PHYSICAL_ADDRESS_LOC +#define PHYSICAL_ADDRESS_LOC union large_integer +#endif + +struct dmub_cmd_PLAT_54186_wa { +	uint32_t DCSURF_SURFACE_CONTROL; +	uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH; +	uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS; +	uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C; +	uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C; +	struct { +		uint8_t hubp_inst : 4; +		uint8_t tmz_surface : 1; +		uint8_t immediate :1; +		uint8_t vmid : 4; +		uint8_t grph_stereo : 1; +		uint32_t reserved : 21; +	} flip_params; +	uint32_t reserved[9]; +}; + +struct dmub_rb_cmd_PLAT_54186_wa { +	struct dmub_cmd_header header; +	struct dmub_cmd_PLAT_54186_wa flip; +}; + +struct dmub_cmd_digx_encoder_control_data { +	union dig_encoder_control_parameters_v1_5 dig; +}; + +struct dmub_rb_cmd_digx_encoder_control { +	struct dmub_cmd_header header; +	struct dmub_cmd_digx_encoder_control_data encoder_control; +}; + +struct dmub_cmd_set_pixel_clock_data { +	struct set_pixel_clock_parameter_v1_7 clk; +}; + +struct dmub_rb_cmd_set_pixel_clock { +	struct dmub_cmd_header header; +	struct dmub_cmd_set_pixel_clock_data pixel_clock; +}; + +struct dmub_cmd_enable_disp_power_gating_data { +	struct enable_disp_power_gating_parameters_v2_1 pwr; +}; + +struct dmub_rb_cmd_enable_disp_power_gating { +	struct dmub_cmd_header header; +	struct dmub_cmd_enable_disp_power_gating_data power_gating; +}; + +struct dmub_cmd_dig1_transmitter_control_data { +	struct dig_transmitter_control_parameters_v1_6 dig; +}; + +struct dmub_rb_cmd_dig1_transmitter_control { +	struct dmub_cmd_header header; +	struct dmub_cmd_dig1_transmitter_control_data transmitter_control; +}; + +struct dmub_rb_cmd_dpphy_init { +	struct dmub_cmd_header header; +	uint8_t reserved[60]; +}; + +struct dmub_cmd_psr_copy_settings_data { +	uint16_t psr_level; +	uint8_t hubp_inst; +	uint8_t dpp_inst; +	uint8_t mpcc_inst; +	uint8_t opp_inst; +	uint8_t otg_inst; +	uint8_t digfe_inst; +	uint8_t digbe_inst; +	uint8_t dpphy_inst; +	uint8_t aux_inst; +	uint8_t hyst_frames; +	uint8_t hyst_lines; +	uint8_t phy_num; +	uint8_t phy_type; +	uint8_t aux_repeat; +	uint8_t smu_optimizations_en; +	uint8_t skip_wait_for_pll_lock; +	uint8_t frame_delay; +	uint8_t smu_phy_id; +	uint8_t num_of_controllers; +	uint8_t link_rate; +	uint8_t frame_cap_ind; +}; + +struct dmub_rb_cmd_psr_copy_settings { +	struct dmub_cmd_header header; +	struct dmub_cmd_psr_copy_settings_data psr_copy_settings_data; +}; + +struct dmub_cmd_psr_set_level_data { +	uint16_t psr_level; +}; + +struct dmub_rb_cmd_psr_set_level { +	struct dmub_cmd_header header; +	struct dmub_cmd_psr_set_level_data psr_set_level_data; +}; + +struct dmub_rb_cmd_psr_enable { +	struct dmub_cmd_header header; +}; + +struct dmub_cmd_psr_setup_data { +	enum psr_version version; // PSR version 1 or 2 +}; + +struct dmub_rb_cmd_psr_setup { +	struct dmub_cmd_header header; +	struct dmub_cmd_psr_setup_data psr_setup_data; +}; + +union dmub_rb_cmd { +	struct dmub_rb_cmd_read_modify_write read_modify_write; +	struct dmub_rb_cmd_reg_field_update_sequence reg_field_update_seq; +	struct dmub_rb_cmd_burst_write burst_write; +	struct dmub_rb_cmd_reg_wait reg_wait; +	struct dmub_rb_cmd_common cmd_common; +	struct dmub_rb_cmd_digx_encoder_control digx_encoder_control; +	struct dmub_rb_cmd_set_pixel_clock set_pixel_clock; +	struct dmub_rb_cmd_enable_disp_power_gating enable_disp_power_gating; +	struct dmub_rb_cmd_dpphy_init dpphy_init; +	struct dmub_rb_cmd_dig1_transmitter_control dig1_transmitter_control; +	struct dmub_rb_cmd_psr_enable psr_enable; +	struct dmub_rb_cmd_psr_copy_settings psr_copy_settings; +	struct dmub_rb_cmd_psr_set_level psr_set_level; +	struct dmub_rb_cmd_PLAT_54186_wa PLAT_54186_wa; +	struct dmub_rb_cmd_psr_setup psr_setup; +}; + +#pragma pack(pop) + +#endif /* _DMUB_CMD_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h new file mode 100644 index 000000000000..7b69eb37f762 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h @@ -0,0 +1,48 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_CMD_DAL_H_ +#define _DMUB_CMD_DAL_H_ + +/* + * Command IDs should be treated as stable ABI. + * Do not reuse or modify IDs. + */ + +enum dmub_cmd_psr_type { +	DMUB_CMD__PSR_SETUP		= 0, +	DMUB_CMD__PSR_COPY_SETTINGS	= 1, +	DMUB_CMD__PSR_ENABLE		= 2, +	DMUB_CMD__PSR_DISABLE		= 3, +	DMUB_CMD__PSR_SET_LEVEL		= 4, +}; + +enum psr_version { +	PSR_VERSION_1			= 0x10, // PSR Version 1 +	PSR_VERSION_2			= 0x20, // PSR Version 2, includes selective update +	PSR_VERSION_2_Y_COORD		= 0x21, // PSR Version 2, includes Y-coordinate support for SU +}; + +#endif /* _DMUB_CMD_DAL_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_vbios.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_vbios.h new file mode 100644 index 000000000000..b6deb8e2590f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_vbios.h @@ -0,0 +1,41 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_CMD_VBIOS_H_ +#define _DMUB_CMD_VBIOS_H_ + +/* + * Command IDs should be treated as stable ABI. + * Do not reuse or modify IDs. + */ + +enum dmub_cmd_vbios_type { +	DMUB_CMD__VBIOS_DIGX_ENCODER_CONTROL = 0, +	DMUB_CMD__VBIOS_DIG1_TRANSMITTER_CONTROL = 1, +	DMUB_CMD__VBIOS_SET_PIXEL_CLOCK = 2, +	DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING = 3, +}; + +#endif /* _DMUB_CMD_VBIOS_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_meta.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_meta.h new file mode 100644 index 000000000000..242ec257998c --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_meta.h @@ -0,0 +1,63 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef _DMUB_META_H_ +#define _DMUB_META_H_ + +#include "dmub_types.h" + +#pragma pack(push, 1) + +/* Magic value for identifying dmub_fw_meta_info */ +#define DMUB_FW_META_MAGIC 0x444D5542 + +/* Offset from the end of the file to the dmub_fw_meta_info */ +#define DMUB_FW_META_OFFSET 0x24 + +/** + * struct dmub_fw_meta_info - metadata associated with fw binary + * + * NOTE: This should be considered a stable API. Fields should + *       not be repurposed or reordered. New fields should be + *       added instead to extend the structure. + * + * @magic_value: magic value identifying DMUB firmware meta info + * @fw_region_size: size of the firmware state region + * @trace_buffer_size: size of the tracebuffer region + */ +struct dmub_fw_meta_info { +	uint32_t magic_value; +	uint32_t fw_region_size; +	uint32_t trace_buffer_size; +}; + +/* Ensure that the structure remains 64 bytes. */ +union dmub_fw_meta { +	struct dmub_fw_meta_info info; +	uint8_t reserved[64]; +}; + +#pragma pack(pop) + +#endif /* _DMUB_META_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h new file mode 100644 index 000000000000..df875fdd2ab0 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h @@ -0,0 +1,154 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_RB_H_ +#define _DMUB_RB_H_ + +#include "dmub_types.h" +#include "dmub_cmd.h" + +#if defined(__cplusplus) +extern "C" { +#endif + +struct dmub_cmd_header; + +struct dmub_rb_init_params { +	void *ctx; +	void *base_address; +	uint32_t capacity; +}; + +struct dmub_rb { +	void *base_address; +	uint32_t data_count; +	uint32_t rptr; +	uint32_t wrpt; +	uint32_t capacity; + +	void *ctx; +	void *dmub; +}; + + +static inline bool dmub_rb_empty(struct dmub_rb *rb) +{ +	return (rb->wrpt == rb->rptr); +} + +static inline bool dmub_rb_full(struct dmub_rb *rb) +{ +	uint32_t data_count; + +	if (rb->wrpt >= rb->rptr) +		data_count = rb->wrpt - rb->rptr; +	else +		data_count = rb->capacity - (rb->rptr - rb->wrpt); + +	return (data_count == (rb->capacity - DMUB_RB_CMD_SIZE)); +} + +static inline bool dmub_rb_push_front(struct dmub_rb *rb, +				      const struct dmub_cmd_header *cmd) +{ +	uint64_t volatile *dst = (uint64_t volatile *)(rb->base_address) + rb->wrpt / sizeof(uint64_t); +	const uint64_t *src = (const uint64_t *)cmd; +	int i; + +	if (dmub_rb_full(rb)) +		return false; + +	// copying data +	for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++) +		*dst++ = *src++; + +	rb->wrpt += DMUB_RB_CMD_SIZE; + +	if (rb->wrpt >= rb->capacity) +		rb->wrpt %= rb->capacity; + +	return true; +} + +static inline bool dmub_rb_front(struct dmub_rb *rb, +				 struct dmub_cmd_header *cmd) +{ +	uint8_t *rd_ptr = (uint8_t *)rb->base_address + rb->rptr; + +	if (dmub_rb_empty(rb)) +		return false; + +	dmub_memcpy(cmd, rd_ptr, DMUB_RB_CMD_SIZE); + +	return true; +} + +static inline bool dmub_rb_pop_front(struct dmub_rb *rb) +{ +	if (dmub_rb_empty(rb)) +		return false; + +	rb->rptr += DMUB_RB_CMD_SIZE; + +	if (rb->rptr >= rb->capacity) +		rb->rptr %= rb->capacity; + +	return true; +} + +static inline void dmub_rb_flush_pending(const struct dmub_rb *rb) +{ +	uint32_t rptr = rb->rptr; +	uint32_t wptr = rb->wrpt; + +	while (rptr != wptr) { +		uint64_t volatile *data = (uint64_t volatile *)rb->base_address + rptr / sizeof(uint64_t); +		//uint64_t volatile *p = (uint64_t volatile *)data; +		uint64_t temp; +		int i; + +		for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++) +			temp = *data++; + +		rptr += DMUB_RB_CMD_SIZE; +		if (rptr >= rb->capacity) +			rptr %= rb->capacity; +	} +} + +static inline void dmub_rb_init(struct dmub_rb *rb, +				struct dmub_rb_init_params *init_params) +{ +	rb->base_address = init_params->base_address; +	rb->capacity = init_params->capacity; +	rb->rptr = 0; +	rb->wrpt = 0; +} + +#if defined(__cplusplus) +} +#endif + +#endif /* _DMUB_RB_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h new file mode 100644 index 000000000000..8e23a7017588 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h @@ -0,0 +1,506 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_SRV_H_ +#define _DMUB_SRV_H_ + +/** + * DOC: DMUB interface and operation + * + * DMUB is the interface to the display DMCUB microcontroller on DCN hardware. + * It delegates hardware initialization and command submission to the + * microcontroller. DMUB is the shortname for DMCUB. + * + * This interface is not thread-safe. Ensure that all access to the interface + * is properly synchronized by the caller. + * + * Initialization and usage of the DMUB service should be done in the + * steps given below: + * + * 1. dmub_srv_create() + * 2. dmub_srv_has_hw_support() + * 3. dmub_srv_calc_region_info() + * 4. dmub_srv_hw_init() + * + * The call to dmub_srv_create() is required to use the server. + * + * The calls to dmub_srv_has_hw_support() and dmub_srv_calc_region_info() + * are helpers to query cache window size and allocate framebuffer(s) + * for the cache windows. + * + * The call to dmub_srv_hw_init() programs the DMCUB registers to prepare + * for command submission. Commands can be queued via dmub_srv_cmd_queue() + * and executed via dmub_srv_cmd_execute(). + * + * If the queue is full the dmub_srv_wait_for_idle() call can be used to + * wait until the queue has been cleared. + * + * Destroying the DMUB service can be done by calling dmub_srv_destroy(). + * This does not clear DMUB hardware state, only software state. + * + * The interface is intended to be standalone and should not depend on any + * other component within DAL. + */ + +#include "dmub_types.h" +#include "dmub_cmd.h" +#include "dmub_rb.h" + +#if defined(__cplusplus) +extern "C" { +#endif + +/* Forward declarations */ +struct dmub_srv; +struct dmub_cmd_header; +struct dmub_srv_common_regs; + +/* enum dmub_status - return code for dmcub functions */ +enum dmub_status { +	DMUB_STATUS_OK = 0, +	DMUB_STATUS_NO_CTX, +	DMUB_STATUS_QUEUE_FULL, +	DMUB_STATUS_TIMEOUT, +	DMUB_STATUS_INVALID, +}; + +/* enum dmub_asic - dmub asic identifier */ +enum dmub_asic { +	DMUB_ASIC_NONE = 0, +	DMUB_ASIC_DCN20, +	DMUB_ASIC_DCN21, +	DMUB_ASIC_MAX, +}; + +/* enum dmub_window_id - dmub window identifier */ +enum dmub_window_id { +	DMUB_WINDOW_0_INST_CONST = 0, +	DMUB_WINDOW_1_STACK, +	DMUB_WINDOW_2_BSS_DATA, +	DMUB_WINDOW_3_VBIOS, +	DMUB_WINDOW_4_MAILBOX, +	DMUB_WINDOW_5_TRACEBUFF, +	DMUB_WINDOW_6_FW_STATE, +	DMUB_WINDOW_7_RESERVED, +	DMUB_WINDOW_TOTAL, +}; + +/** + * struct dmub_region - dmub hw memory region + * @base: base address for region, must be 256 byte aligned + * @top: top address for region + */ +struct dmub_region { +	uint32_t base; +	uint32_t top; +}; + +/** + * struct dmub_window - dmub hw cache window + * @off: offset to the fb memory in gpu address space + * @r: region in uc address space for cache window + */ +struct dmub_window { +	union dmub_addr offset; +	struct dmub_region region; +}; + +/** + * struct dmub_fb - defines a dmub framebuffer memory region + * @cpu_addr: cpu virtual address for the region, NULL if invalid + * @gpu_addr: gpu virtual address for the region, NULL if invalid + * @size: size of the region in bytes, zero if invalid + */ +struct dmub_fb { +	void *cpu_addr; +	uint64_t gpu_addr; +	uint32_t size; +}; + +/** + * struct dmub_srv_region_params - params used for calculating dmub regions + * @inst_const_size: size of the fw inst const section + * @bss_data_size: size of the fw bss data section + * @vbios_size: size of the vbios data + * @fw_bss_data: raw firmware bss data section + */ +struct dmub_srv_region_params { +	uint32_t inst_const_size; +	uint32_t bss_data_size; +	uint32_t vbios_size; +	const uint8_t *fw_bss_data; +}; + +/** + * struct dmub_srv_region_info - output region info from the dmub service + * @fb_size: required minimum fb size for all regions, aligned to 4096 bytes + * @num_regions: number of regions used by the dmub service + * @regions: region info + * + * The regions are aligned such that they can be all placed within the + * same framebuffer but they can also be placed into different framebuffers. + * + * The size of each region can be calculated by the caller: + * size = reg.top - reg.base + * + * Care must be taken when performing custom allocations to ensure that each + * region base address is 256 byte aligned. + */ +struct dmub_srv_region_info { +	uint32_t fb_size; +	uint8_t num_regions; +	struct dmub_region regions[DMUB_WINDOW_TOTAL]; +}; + +/** + * struct dmub_srv_fb_params - parameters used for driver fb setup + * @region_info: region info calculated by dmub service + * @cpu_addr: base cpu address for the framebuffer + * @gpu_addr: base gpu virtual address for the framebuffer + */ +struct dmub_srv_fb_params { +	const struct dmub_srv_region_info *region_info; +	void *cpu_addr; +	uint64_t gpu_addr; +}; + +/** + * struct dmub_srv_fb_info - output fb info from the dmub service + * @num_fbs: number of required dmub framebuffers + * @fbs: fb data for each region + * + * Output from the dmub service helper that can be used by the + * driver to prepare dmub_fb that can be passed into the dmub + * hw init service. + * + * Assumes that all regions are within the same framebuffer + * and have been setup according to the region_info generated + * by the dmub service. + */ +struct dmub_srv_fb_info { +	uint8_t num_fb; +	struct dmub_fb fb[DMUB_WINDOW_TOTAL]; +}; + +/** + * struct dmub_srv_base_funcs - Driver specific base callbacks + */ +struct dmub_srv_base_funcs { +	/** +	 * @reg_read: +	 * +	 * Hook for reading a register. +	 * +	 * Return: The 32-bit register value from the given address. +	 */ +	uint32_t (*reg_read)(void *ctx, uint32_t address); + +	/** +	 * @reg_write: +	 * +	 * Hook for writing a value to the register specified by address. +	 */ +	void (*reg_write)(void *ctx, uint32_t address, uint32_t value); +}; + +/** + * struct dmub_srv_hw_funcs - hardware sequencer funcs for dmub + */ +struct dmub_srv_hw_funcs { +	/* private: internal use only */ + +	void (*reset)(struct dmub_srv *dmub); + +	void (*reset_release)(struct dmub_srv *dmub); + +	void (*backdoor_load)(struct dmub_srv *dmub, +			      const struct dmub_window *cw0, +			      const struct dmub_window *cw1); + +	void (*setup_windows)(struct dmub_srv *dmub, +			      const struct dmub_window *cw2, +			      const struct dmub_window *cw3, +			      const struct dmub_window *cw4, +			      const struct dmub_window *cw5, +			      const struct dmub_window *cw6); + +	void (*setup_mailbox)(struct dmub_srv *dmub, +			      const struct dmub_region *inbox1); + +	uint32_t (*get_inbox1_rptr)(struct dmub_srv *dmub); + +	void (*set_inbox1_wptr)(struct dmub_srv *dmub, uint32_t wptr_offset); + +	bool (*is_supported)(struct dmub_srv *dmub); + +	bool (*is_hw_init)(struct dmub_srv *dmub); + +	bool (*is_phy_init)(struct dmub_srv *dmub); + +	bool (*is_auto_load_done)(struct dmub_srv *dmub); +}; + +/** + * struct dmub_srv_create_params - params for dmub service creation + * @base_funcs: driver supplied base routines + * @hw_funcs: optional overrides for hw funcs + * @user_ctx: context data for callback funcs + * @asic: driver supplied asic + * @is_virtual: false for hw support only + */ +struct dmub_srv_create_params { +	struct dmub_srv_base_funcs funcs; +	struct dmub_srv_hw_funcs *hw_funcs; +	void *user_ctx; +	enum dmub_asic asic; +	bool is_virtual; +}; + +/* + * struct dmub_srv_hw_params - params for dmub hardware initialization + * @fb: framebuffer info for each region + * @fb_base: base of the framebuffer aperture + * @fb_offset: offset of the framebuffer aperture + * @psp_version: psp version to pass for DMCU init + * @load_inst_const: true if DMUB should load inst const fw + */ +struct dmub_srv_hw_params { +	struct dmub_fb *fb[DMUB_WINDOW_TOTAL]; +	uint64_t fb_base; +	uint64_t fb_offset; +	uint32_t psp_version; +	bool load_inst_const; +}; + +/** + * struct dmub_srv - software state for dmcub + * @asic: dmub asic identifier + * @user_ctx: user provided context for the dmub_srv + * @is_virtual: false if hardware support only + * @fw_state: dmub firmware state pointer + */ +struct dmub_srv { +	enum dmub_asic asic; +	void *user_ctx; +	bool is_virtual; +	volatile const struct dmub_fw_state *fw_state; + +	/* private: internal use only */ +	const struct dmub_srv_common_regs *regs; + +	struct dmub_srv_base_funcs funcs; +	struct dmub_srv_hw_funcs hw_funcs; +	struct dmub_rb inbox1_rb; + +	bool sw_init; +	bool hw_init; + +	uint64_t fb_base; +	uint64_t fb_offset; +	uint32_t psp_version; +}; + +/** + * dmub_srv_create() - creates the DMUB service. + * @dmub: the dmub service + * @params: creation parameters for the service + * + * Return: + *   DMUB_STATUS_OK - success + *   DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_create(struct dmub_srv *dmub, +				 const struct dmub_srv_create_params *params); + +/** + * dmub_srv_destroy() - destroys the DMUB service. + * @dmub: the dmub service + */ +void dmub_srv_destroy(struct dmub_srv *dmub); + +/** + * dmub_srv_calc_region_info() - retreives region info from the dmub service + * @dmub: the dmub service + * @params: parameters used to calculate region locations + * @info_out: the output region info from dmub + * + * Calculates the base and top address for all relevant dmub regions + * using the parameters given (if any). + * + * Return: + *   DMUB_STATUS_OK - success + *   DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status +dmub_srv_calc_region_info(struct dmub_srv *dmub, +			  const struct dmub_srv_region_params *params, +			  struct dmub_srv_region_info *out); + +/** + * dmub_srv_calc_region_info() - retreives fb info from the dmub service + * @dmub: the dmub service + * @params: parameters used to calculate fb locations + * @info_out: the output fb info from dmub + * + * Calculates the base and top address for all relevant dmub regions + * using the parameters given (if any). + * + * Return: + *   DMUB_STATUS_OK - success + *   DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub, +				       const struct dmub_srv_fb_params *params, +				       struct dmub_srv_fb_info *out); + +/** + * dmub_srv_has_hw_support() - returns hw support state for dmcub + * @dmub: the dmub service + * @is_supported: hw support state + * + * Queries the hardware for DMCUB support and returns the result. + * + * Can be called before dmub_srv_hw_init(). + * + * Return: + *   DMUB_STATUS_OK - success + *   DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_has_hw_support(struct dmub_srv *dmub, +					 bool *is_supported); + +/** + * dmub_srv_is_hw_init() - returns hardware init state + * + * Return: + *   DMUB_STATUS_OK - success + *   DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_is_hw_init(struct dmub_srv *dmub, bool *is_hw_init); + +/** + * dmub_srv_hw_init() - initializes the underlying DMUB hardware + * @dmub: the dmub service + * @params: params for hardware initialization + * + * Resets the DMUB hardware and performs backdoor loading of the + * required cache regions based on the input framebuffer regions. + * + * Return: + *   DMUB_STATUS_OK - success + *   DMUB_STATUS_NO_CTX - dmcub context not initialized + *   DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, +				  const struct dmub_srv_hw_params *params); + +/** + * dmub_srv_cmd_queue() - queues a command to the DMUB + * @dmub: the dmub service + * @cmd: the command to queue + * + * Queues a command to the DMUB service but does not begin execution + * immediately. + * + * Return: + *   DMUB_STATUS_OK - success + *   DMUB_STATUS_QUEUE_FULL - no remaining room in queue + *   DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub, +				    const struct dmub_cmd_header *cmd); + +/** + * dmub_srv_cmd_execute() - Executes a queued sequence to the dmub + * @dmub: the dmub service + * + * Begins execution of queued commands on the dmub. + * + * Return: + *   DMUB_STATUS_OK - success + *   DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub); + +/** + * dmub_srv_wait_for_auto_load() - Waits for firmware auto load to complete + * @dmub: the dmub service + * @timeout_us: the maximum number of microseconds to wait + * + * Waits until firmware has been autoloaded by the DMCUB. The maximum + * wait time is given in microseconds to prevent spinning forever. + * + * On ASICs without firmware autoload support this function will return + * immediately. + * + * Return: + *   DMUB_STATUS_OK - success + *   DMUB_STATUS_TIMEOUT - wait for phy init timed out + *   DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub, +					     uint32_t timeout_us); + +/** + * dmub_srv_wait_for_phy_init() - Waits for DMUB PHY init to complete + * @dmub: the dmub service + * @timeout_us: the maximum number of microseconds to wait + * + * Waits until the PHY has been initialized by the DMUB. The maximum + * wait time is given in microseconds to prevent spinning forever. + * + * On ASICs without PHY init support this function will return + * immediately. + * + * Return: + *   DMUB_STATUS_OK - success + *   DMUB_STATUS_TIMEOUT - wait for phy init timed out + *   DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub, +					    uint32_t timeout_us); + +/** + * dmub_srv_wait_for_idle() - Waits for the DMUB to be idle + * @dmub: the dmub service + * @timeout_us: the maximum number of microseconds to wait + * + * Waits until the DMUB buffer is empty and all commands have + * finished processing. The maximum wait time is given in + * microseconds to prevent spinning forever. + * + * Return: + *   DMUB_STATUS_OK - success + *   DMUB_STATUS_TIMEOUT - wait for buffer to flush timed out + *   DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub, +					uint32_t timeout_us); + +#if defined(__cplusplus) +} +#endif + +#endif /* _DMUB_SRV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h new file mode 100644 index 000000000000..6b3ee42db350 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h @@ -0,0 +1,69 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef _DMUB_TRACE_BUFFER_H_ +#define _DMUB_TRACE_BUFFER_H_ + +#include "dmub_types.h" + +#define LOAD_DMCU_FW	1 +#define LOAD_PHY_FW	2 + + +enum dmucb_trace_code { +	DMCUB__UNKNOWN, +	DMCUB__MAIN_BEGIN, +	DMCUB__PHY_INIT_BEGIN, +	DMCUB__PHY_FW_SRAM_LOAD_BEGIN, +	DMCUB__PHY_FW_SRAM_LOAD_END, +	DMCUB__PHY_INIT_POLL_DONE, +	DMCUB__PHY_INIT_END, +	DMCUB__DMCU_ERAM_LOAD_BEGIN, +	DMCUB__DMCU_ERAM_LOAD_END, +	DMCUB__DMCU_ISR_LOAD_BEGIN, +	DMCUB__DMCU_ISR_LOAD_END, +	DMCUB__MAIN_IDLE, +	DMCUB__PERF_TRACE, +	DMCUB__PG_DONE, +}; + +struct dmcub_trace_buf_entry { +	enum dmucb_trace_code trace_code; +	uint32_t tick_count; +	uint32_t param0; +	uint32_t param1; +}; + +#define TRACE_BUF_SIZE (1024) //1 kB +#define PERF_TRACE_MAX_ENTRY ((TRACE_BUF_SIZE - 8)/sizeof(struct dmcub_trace_buf_entry)) + + +struct dmcub_trace_buf { +	uint32_t entry_count; +	uint32_t clk_freq; +	struct dmcub_trace_buf_entry entries[PERF_TRACE_MAX_ENTRY]; +}; + + +#endif /* _DMUB_TRACE_BUFFER_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h new file mode 100644 index 000000000000..41d524b0db2f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h @@ -0,0 +1,64 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_TYPES_H_ +#define _DMUB_TYPES_H_ + +/* Basic type definitions. */ +#include <asm/byteorder.h> +#include <linux/types.h> +#include <linux/string.h> +#include <linux/delay.h> +#include <stdarg.h> + +#if defined(__cplusplus) +extern "C" { +#endif + +#ifndef dmub_memcpy +#define dmub_memcpy(dest, source, bytes) memcpy((dest), (source), (bytes)) +#endif + +#ifndef dmub_memset +#define dmub_memset(dest, val, bytes) memset((dest), (val), (bytes)) +#endif + +#ifndef dmub_udelay +#define dmub_udelay(microseconds) udelay(microseconds) +#endif + +union dmub_addr { +	struct { +		uint32_t low_part; +		uint32_t high_part; +	} u; +	uint64_t quad_part; +}; + +#if defined(__cplusplus) +} +#endif + +#endif /* _DMUB_TYPES_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/Makefile b/drivers/gpu/drm/amd/display/dmub/src/Makefile new file mode 100644 index 000000000000..e08dfeea24b0 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/src/Makefile @@ -0,0 +1,27 @@ +# +# Copyright 2019 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# + +DMUB = dmub_srv.o dmub_reg.o dmub_dcn20.o dmub_dcn21.o + +AMD_DAL_DMUB = $(addprefix $(AMDDALPATH)/dmub/src/,$(DMUB)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DMUB) diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c new file mode 100644 index 000000000000..cd51c6138894 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c @@ -0,0 +1,202 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "../inc/dmub_srv.h" +#include "dmub_reg.h" +#include "dmub_dcn20.h" + +#include "dcn/dcn_2_0_0_offset.h" +#include "dcn/dcn_2_0_0_sh_mask.h" +#include "soc15_hw_ip.h" +#include "vega10_ip_offset.h" + +#define BASE_INNER(seg) DCN_BASE__INST0_SEG##seg +#define CTX dmub +#define REGS dmub->regs + +/* Registers. */ + +const struct dmub_srv_common_regs dmub_srv_dcn20_regs = { +#define DMUB_SR(reg) REG_OFFSET(reg), +	{ DMUB_COMMON_REGS() }, +#undef DMUB_SR + +#define DMUB_SF(reg, field) FD_MASK(reg, field), +	{ DMUB_COMMON_FIELDS() }, +#undef DMUB_SF + +#define DMUB_SF(reg, field) FD_SHIFT(reg, field), +	{ DMUB_COMMON_FIELDS() }, +#undef DMUB_SF +}; + +/* Shared functions. */ + +static inline void dmub_dcn20_translate_addr(const union dmub_addr *addr_in, +					     uint64_t fb_base, +					     uint64_t fb_offset, +					     union dmub_addr *addr_out) +{ +	addr_out->quad_part = addr_in->quad_part - fb_base + fb_offset; +} + +void dmub_dcn20_reset(struct dmub_srv *dmub) +{ +	REG_UPDATE(DMCUB_CNTL, DMCUB_SOFT_RESET, 1); +	REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); +	REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1); +} + +void dmub_dcn20_reset_release(struct dmub_srv *dmub) +{ +	REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 0); +	REG_WRITE(DMCUB_SCRATCH15, dmub->psp_version & 0x001100FF); +	REG_UPDATE_2(DMCUB_CNTL, DMCUB_ENABLE, 1, DMCUB_TRACEPORT_EN, 1); +	REG_UPDATE(DMCUB_CNTL, DMCUB_SOFT_RESET, 0); +} + +void dmub_dcn20_backdoor_load(struct dmub_srv *dmub, +			      const struct dmub_window *cw0, +			      const struct dmub_window *cw1) +{ +	union dmub_addr offset; +	uint64_t fb_base = dmub->fb_base, fb_offset = dmub->fb_offset; + +	REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1); +	REG_UPDATE_2(DMCUB_MEM_CNTL, DMCUB_MEM_READ_SPACE, 0x3, +		     DMCUB_MEM_WRITE_SPACE, 0x3); + +	dmub_dcn20_translate_addr(&cw0->offset, fb_base, fb_offset, &offset); + +	REG_WRITE(DMCUB_REGION3_CW0_OFFSET, offset.u.low_part); +	REG_WRITE(DMCUB_REGION3_CW0_OFFSET_HIGH, offset.u.high_part); +	REG_WRITE(DMCUB_REGION3_CW0_BASE_ADDRESS, cw0->region.base); +	REG_SET_2(DMCUB_REGION3_CW0_TOP_ADDRESS, 0, +		  DMCUB_REGION3_CW0_TOP_ADDRESS, cw0->region.top, +		  DMCUB_REGION3_CW0_ENABLE, 1); + +	dmub_dcn20_translate_addr(&cw1->offset, fb_base, fb_offset, &offset); + +	REG_WRITE(DMCUB_REGION3_CW1_OFFSET, offset.u.low_part); +	REG_WRITE(DMCUB_REGION3_CW1_OFFSET_HIGH, offset.u.high_part); +	REG_WRITE(DMCUB_REGION3_CW1_BASE_ADDRESS, cw1->region.base); +	REG_SET_2(DMCUB_REGION3_CW1_TOP_ADDRESS, 0, +		  DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top, +		  DMCUB_REGION3_CW1_ENABLE, 1); + +	REG_UPDATE_2(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 0, DMCUB_MEM_UNIT_ID, +		     0x20); +} + +void dmub_dcn20_setup_windows(struct dmub_srv *dmub, +			      const struct dmub_window *cw2, +			      const struct dmub_window *cw3, +			      const struct dmub_window *cw4, +			      const struct dmub_window *cw5, +			      const struct dmub_window *cw6) +{ +	union dmub_addr offset; +	uint64_t fb_base = dmub->fb_base, fb_offset = dmub->fb_offset; + +	dmub_dcn20_translate_addr(&cw2->offset, fb_base, fb_offset, &offset); + +	REG_WRITE(DMCUB_REGION3_CW2_OFFSET, offset.u.low_part); +	REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, offset.u.high_part); +	REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, cw2->region.base); +	REG_SET_2(DMCUB_REGION3_CW2_TOP_ADDRESS, 0, +		  DMCUB_REGION3_CW2_TOP_ADDRESS, cw2->region.top, +		  DMCUB_REGION3_CW2_ENABLE, 1); + +	dmub_dcn20_translate_addr(&cw3->offset, fb_base, fb_offset, &offset); + +	REG_WRITE(DMCUB_REGION3_CW3_OFFSET, offset.u.low_part); +	REG_WRITE(DMCUB_REGION3_CW3_OFFSET_HIGH, offset.u.high_part); +	REG_WRITE(DMCUB_REGION3_CW3_BASE_ADDRESS, cw3->region.base); +	REG_SET_2(DMCUB_REGION3_CW3_TOP_ADDRESS, 0, +		  DMCUB_REGION3_CW3_TOP_ADDRESS, cw3->region.top, +		  DMCUB_REGION3_CW3_ENABLE, 1); + +	/* TODO: Move this to CW4. */ +	dmub_dcn20_translate_addr(&cw4->offset, fb_base, fb_offset, &offset); + +	REG_WRITE(DMCUB_REGION4_OFFSET, offset.u.low_part); +	REG_WRITE(DMCUB_REGION4_OFFSET_HIGH, offset.u.high_part); +	REG_SET_2(DMCUB_REGION4_TOP_ADDRESS, 0, DMCUB_REGION4_TOP_ADDRESS, +		  cw4->region.top - cw4->region.base - 1, DMCUB_REGION4_ENABLE, +		  1); + +	dmub_dcn20_translate_addr(&cw5->offset, fb_base, fb_offset, &offset); + +	REG_WRITE(DMCUB_REGION3_CW5_OFFSET, offset.u.low_part); +	REG_WRITE(DMCUB_REGION3_CW5_OFFSET_HIGH, offset.u.high_part); +	REG_WRITE(DMCUB_REGION3_CW5_BASE_ADDRESS, cw5->region.base); +	REG_SET_2(DMCUB_REGION3_CW5_TOP_ADDRESS, 0, +		  DMCUB_REGION3_CW5_TOP_ADDRESS, cw5->region.top, +		  DMCUB_REGION3_CW5_ENABLE, 1); + +	dmub_dcn20_translate_addr(&cw6->offset, fb_base, fb_offset, &offset); + +	REG_WRITE(DMCUB_REGION3_CW6_OFFSET, offset.u.low_part); +	REG_WRITE(DMCUB_REGION3_CW6_OFFSET_HIGH, offset.u.high_part); +	REG_WRITE(DMCUB_REGION3_CW6_BASE_ADDRESS, cw6->region.base); +	REG_SET_2(DMCUB_REGION3_CW6_TOP_ADDRESS, 0, +		  DMCUB_REGION3_CW6_TOP_ADDRESS, cw6->region.top, +		  DMCUB_REGION3_CW6_ENABLE, 1); +} + +void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub, +			      const struct dmub_region *inbox1) +{ +	/* TODO: Use CW4 instead of region 4. */ + +	REG_WRITE(DMCUB_INBOX1_BASE_ADDRESS, 0x80000000); +	REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base); +	REG_WRITE(DMCUB_INBOX1_RPTR, 0); +	REG_WRITE(DMCUB_INBOX1_WPTR, 0); +} + +uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub) +{ +	return REG_READ(DMCUB_INBOX1_RPTR); +} + +void dmub_dcn20_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset) +{ +	REG_WRITE(DMCUB_INBOX1_WPTR, wptr_offset); +} + +bool dmub_dcn20_is_hw_init(struct dmub_srv *dmub) +{ +	return REG_READ(DMCUB_REGION3_CW2_BASE_ADDRESS) != 0; +} + +bool dmub_dcn20_is_supported(struct dmub_srv *dmub) +{ +	uint32_t supported = 0; + +	REG_GET(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE, &supported); + +	return supported; +} diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h new file mode 100644 index 000000000000..53bfd4da69ad --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h @@ -0,0 +1,182 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_DCN20_H_ +#define _DMUB_DCN20_H_ + +#include "../inc/dmub_types.h" + +struct dmub_srv; + +/* DCN20 register definitions. */ + +#define DMUB_COMMON_REGS() \ +	DMUB_SR(DMCUB_CNTL) \ +	DMUB_SR(DMCUB_MEM_CNTL) \ +	DMUB_SR(DMCUB_SEC_CNTL) \ +	DMUB_SR(DMCUB_INBOX1_BASE_ADDRESS) \ +	DMUB_SR(DMCUB_INBOX1_SIZE) \ +	DMUB_SR(DMCUB_INBOX1_RPTR) \ +	DMUB_SR(DMCUB_INBOX1_WPTR) \ +	DMUB_SR(DMCUB_REGION3_CW0_OFFSET) \ +	DMUB_SR(DMCUB_REGION3_CW1_OFFSET) \ +	DMUB_SR(DMCUB_REGION3_CW2_OFFSET) \ +	DMUB_SR(DMCUB_REGION3_CW3_OFFSET) \ +	DMUB_SR(DMCUB_REGION3_CW4_OFFSET) \ +	DMUB_SR(DMCUB_REGION3_CW5_OFFSET) \ +	DMUB_SR(DMCUB_REGION3_CW6_OFFSET) \ +	DMUB_SR(DMCUB_REGION3_CW7_OFFSET) \ +	DMUB_SR(DMCUB_REGION3_CW0_OFFSET_HIGH) \ +	DMUB_SR(DMCUB_REGION3_CW1_OFFSET_HIGH) \ +	DMUB_SR(DMCUB_REGION3_CW2_OFFSET_HIGH) \ +	DMUB_SR(DMCUB_REGION3_CW3_OFFSET_HIGH) \ +	DMUB_SR(DMCUB_REGION3_CW4_OFFSET_HIGH) \ +	DMUB_SR(DMCUB_REGION3_CW5_OFFSET_HIGH) \ +	DMUB_SR(DMCUB_REGION3_CW6_OFFSET_HIGH) \ +	DMUB_SR(DMCUB_REGION3_CW7_OFFSET_HIGH) \ +	DMUB_SR(DMCUB_REGION3_CW0_BASE_ADDRESS) \ +	DMUB_SR(DMCUB_REGION3_CW1_BASE_ADDRESS) \ +	DMUB_SR(DMCUB_REGION3_CW2_BASE_ADDRESS) \ +	DMUB_SR(DMCUB_REGION3_CW3_BASE_ADDRESS) \ +	DMUB_SR(DMCUB_REGION3_CW4_BASE_ADDRESS) \ +	DMUB_SR(DMCUB_REGION3_CW5_BASE_ADDRESS) \ +	DMUB_SR(DMCUB_REGION3_CW6_BASE_ADDRESS) \ +	DMUB_SR(DMCUB_REGION3_CW7_BASE_ADDRESS) \ +	DMUB_SR(DMCUB_REGION3_CW0_TOP_ADDRESS) \ +	DMUB_SR(DMCUB_REGION3_CW1_TOP_ADDRESS) \ +	DMUB_SR(DMCUB_REGION3_CW2_TOP_ADDRESS) \ +	DMUB_SR(DMCUB_REGION3_CW3_TOP_ADDRESS) \ +	DMUB_SR(DMCUB_REGION3_CW4_TOP_ADDRESS) \ +	DMUB_SR(DMCUB_REGION3_CW5_TOP_ADDRESS) \ +	DMUB_SR(DMCUB_REGION3_CW6_TOP_ADDRESS) \ +	DMUB_SR(DMCUB_REGION3_CW7_TOP_ADDRESS) \ +	DMUB_SR(DMCUB_REGION4_OFFSET) \ +	DMUB_SR(DMCUB_REGION4_OFFSET_HIGH) \ +	DMUB_SR(DMCUB_REGION4_TOP_ADDRESS) \ +	DMUB_SR(DMCUB_SCRATCH0) \ +	DMUB_SR(DMCUB_SCRATCH1) \ +	DMUB_SR(DMCUB_SCRATCH2) \ +	DMUB_SR(DMCUB_SCRATCH3) \ +	DMUB_SR(DMCUB_SCRATCH4) \ +	DMUB_SR(DMCUB_SCRATCH5) \ +	DMUB_SR(DMCUB_SCRATCH6) \ +	DMUB_SR(DMCUB_SCRATCH7) \ +	DMUB_SR(DMCUB_SCRATCH8) \ +	DMUB_SR(DMCUB_SCRATCH9) \ +	DMUB_SR(DMCUB_SCRATCH10) \ +	DMUB_SR(DMCUB_SCRATCH11) \ +	DMUB_SR(DMCUB_SCRATCH12) \ +	DMUB_SR(DMCUB_SCRATCH13) \ +	DMUB_SR(DMCUB_SCRATCH14) \ +	DMUB_SR(DMCUB_SCRATCH15) \ +	DMUB_SR(CC_DC_PIPE_DIS) \ +	DMUB_SR(MMHUBBUB_SOFT_RESET) + +#define DMUB_COMMON_FIELDS() \ +	DMUB_SF(DMCUB_CNTL, DMCUB_ENABLE) \ +	DMUB_SF(DMCUB_CNTL, DMCUB_SOFT_RESET) \ +	DMUB_SF(DMCUB_CNTL, DMCUB_TRACEPORT_EN) \ +	DMUB_SF(DMCUB_MEM_CNTL, DMCUB_MEM_READ_SPACE) \ +	DMUB_SF(DMCUB_MEM_CNTL, DMCUB_MEM_WRITE_SPACE) \ +	DMUB_SF(DMCUB_SEC_CNTL, DMCUB_SEC_RESET) \ +	DMUB_SF(DMCUB_SEC_CNTL, DMCUB_MEM_UNIT_ID) \ +	DMUB_SF(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_TOP_ADDRESS) \ +	DMUB_SF(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE) \ +	DMUB_SF(DMCUB_REGION3_CW1_TOP_ADDRESS, DMCUB_REGION3_CW1_TOP_ADDRESS) \ +	DMUB_SF(DMCUB_REGION3_CW1_TOP_ADDRESS, DMCUB_REGION3_CW1_ENABLE) \ +	DMUB_SF(DMCUB_REGION3_CW2_TOP_ADDRESS, DMCUB_REGION3_CW2_TOP_ADDRESS) \ +	DMUB_SF(DMCUB_REGION3_CW2_TOP_ADDRESS, DMCUB_REGION3_CW2_ENABLE) \ +	DMUB_SF(DMCUB_REGION3_CW3_TOP_ADDRESS, DMCUB_REGION3_CW3_TOP_ADDRESS) \ +	DMUB_SF(DMCUB_REGION3_CW3_TOP_ADDRESS, DMCUB_REGION3_CW3_ENABLE) \ +	DMUB_SF(DMCUB_REGION3_CW4_TOP_ADDRESS, DMCUB_REGION3_CW4_TOP_ADDRESS) \ +	DMUB_SF(DMCUB_REGION3_CW4_TOP_ADDRESS, DMCUB_REGION3_CW4_ENABLE) \ +	DMUB_SF(DMCUB_REGION3_CW5_TOP_ADDRESS, DMCUB_REGION3_CW5_TOP_ADDRESS) \ +	DMUB_SF(DMCUB_REGION3_CW5_TOP_ADDRESS, DMCUB_REGION3_CW5_ENABLE) \ +	DMUB_SF(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_TOP_ADDRESS) \ +	DMUB_SF(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE) \ +	DMUB_SF(DMCUB_REGION3_CW7_TOP_ADDRESS, DMCUB_REGION3_CW7_TOP_ADDRESS) \ +	DMUB_SF(DMCUB_REGION3_CW7_TOP_ADDRESS, DMCUB_REGION3_CW7_ENABLE) \ +	DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_TOP_ADDRESS) \ +	DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_ENABLE) \ +	DMUB_SF(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE) \ +	DMUB_SF(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET) + +struct dmub_srv_common_reg_offset { +#define DMUB_SR(reg) uint32_t reg; +	DMUB_COMMON_REGS() +#undef DMUB_SR +}; + +struct dmub_srv_common_reg_shift { +#define DMUB_SF(reg, field) uint8_t reg##__##field; +	DMUB_COMMON_FIELDS() +#undef DMUB_SF +}; + +struct dmub_srv_common_reg_mask { +#define DMUB_SF(reg, field) uint32_t reg##__##field; +	DMUB_COMMON_FIELDS() +#undef DMUB_SF +}; + +struct dmub_srv_common_regs { +	const struct dmub_srv_common_reg_offset offset; +	const struct dmub_srv_common_reg_mask mask; +	const struct dmub_srv_common_reg_shift shift; +}; + +extern const struct dmub_srv_common_regs dmub_srv_dcn20_regs; + +/* Hardware functions. */ + +void dmub_dcn20_init(struct dmub_srv *dmub); + +void dmub_dcn20_reset(struct dmub_srv *dmub); + +void dmub_dcn20_reset_release(struct dmub_srv *dmub); + +void dmub_dcn20_backdoor_load(struct dmub_srv *dmub, +			      const struct dmub_window *cw0, +			      const struct dmub_window *cw1); + +void dmub_dcn20_setup_windows(struct dmub_srv *dmub, +			      const struct dmub_window *cw2, +			      const struct dmub_window *cw3, +			      const struct dmub_window *cw4, +			      const struct dmub_window *cw5, +			      const struct dmub_window *cw6); + +void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub, +			      const struct dmub_region *inbox1); + +uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub); + +void dmub_dcn20_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset); + +bool dmub_dcn20_is_hw_init(struct dmub_srv *dmub); + +bool dmub_dcn20_is_supported(struct dmub_srv *dmub); + +#endif /* _DMUB_DCN20_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c new file mode 100644 index 000000000000..5bed9fcd6b5c --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c @@ -0,0 +1,64 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "../inc/dmub_srv.h" +#include "dmub_reg.h" +#include "dmub_dcn21.h" + +#include "dcn/dcn_2_1_0_offset.h" +#include "dcn/dcn_2_1_0_sh_mask.h" +#include "renoir_ip_offset.h" + +#define BASE_INNER(seg) DMU_BASE__INST0_SEG##seg +#define CTX dmub +#define REGS dmub->regs + +/* Registers. */ + +const struct dmub_srv_common_regs dmub_srv_dcn21_regs = { +#define DMUB_SR(reg) REG_OFFSET(reg), +	{ DMUB_COMMON_REGS() }, +#undef DMUB_SR + +#define DMUB_SF(reg, field) FD_MASK(reg, field), +	{ DMUB_COMMON_FIELDS() }, +#undef DMUB_SF + +#define DMUB_SF(reg, field) FD_SHIFT(reg, field), +	{ DMUB_COMMON_FIELDS() }, +#undef DMUB_SF +}; + +/* Shared functions. */ + +bool dmub_dcn21_is_auto_load_done(struct dmub_srv *dmub) +{ +	return (REG_READ(DMCUB_SCRATCH0) == 3); +} + +bool dmub_dcn21_is_phy_init(struct dmub_srv *dmub) +{ +	return REG_READ(DMCUB_SCRATCH10) == 0; +} diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h new file mode 100644 index 000000000000..2bbea237137b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h @@ -0,0 +1,41 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_DCN21_H_ +#define _DMUB_DCN21_H_ + +#include "dmub_dcn20.h" + +/* Registers. */ + +extern const struct dmub_srv_common_regs dmub_srv_dcn21_regs; + +/* Hardware functions. */ + +bool dmub_dcn21_is_auto_load_done(struct dmub_srv *dmub); + +bool dmub_dcn21_is_phy_init(struct dmub_srv *dmub); + +#endif /* _DMUB_DCN21_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c new file mode 100644 index 000000000000..4094eca212f0 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c @@ -0,0 +1,109 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dmub_reg.h" +#include "../inc/dmub_srv.h" + +struct dmub_reg_value_masks { +	uint32_t value; +	uint32_t mask; +}; + +static inline void +set_reg_field_value_masks(struct dmub_reg_value_masks *field_value_mask, +			  uint32_t value, uint32_t mask, uint8_t shift) +{ +	field_value_mask->value = +		(field_value_mask->value & ~mask) | (mask & (value << shift)); +	field_value_mask->mask = field_value_mask->mask | mask; +} + +static void set_reg_field_values(struct dmub_reg_value_masks *field_value_mask, +				 uint32_t addr, int n, uint8_t shift1, +				 uint32_t mask1, uint32_t field_value1, +				 va_list ap) +{ +	uint32_t shift, mask, field_value; +	int i = 1; + +	/* gather all bits value/mask getting updated in this register */ +	set_reg_field_value_masks(field_value_mask, field_value1, mask1, +				  shift1); + +	while (i < n) { +		shift = va_arg(ap, uint32_t); +		mask = va_arg(ap, uint32_t); +		field_value = va_arg(ap, uint32_t); + +		set_reg_field_value_masks(field_value_mask, field_value, mask, +					  shift); +		i++; +	} +} + +static inline uint32_t get_reg_field_value_ex(uint32_t reg_value, uint32_t mask, +					      uint8_t shift) +{ +	return (mask & reg_value) >> shift; +} + +void dmub_reg_update(struct dmub_srv *srv, uint32_t addr, int n, uint8_t shift1, +		     uint32_t mask1, uint32_t field_value1, ...) +{ +	struct dmub_reg_value_masks field_value_mask = { 0 }; +	uint32_t reg_val; +	va_list ap; + +	va_start(ap, field_value1); +	set_reg_field_values(&field_value_mask, addr, n, shift1, mask1, +			     field_value1, ap); +	va_end(ap); + +	reg_val = srv->funcs.reg_read(srv->user_ctx, addr); +	reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value; +	srv->funcs.reg_write(srv->user_ctx, addr, reg_val); +} + +void dmub_reg_set(struct dmub_srv *srv, uint32_t addr, uint32_t reg_val, int n, +		  uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...) +{ +	struct dmub_reg_value_masks field_value_mask = { 0 }; +	va_list ap; + +	va_start(ap, field_value1); +	set_reg_field_values(&field_value_mask, addr, n, shift1, mask1, +			     field_value1, ap); +	va_end(ap); + +	reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value; +	srv->funcs.reg_write(srv->user_ctx, addr, reg_val); +} + +void dmub_reg_get(struct dmub_srv *srv, uint32_t addr, uint8_t shift, +		  uint32_t mask, uint32_t *field_value) +{ +	uint32_t reg_val = srv->funcs.reg_read(srv->user_ctx, addr); +	*field_value = get_reg_field_value_ex(reg_val, mask, shift); +} diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h new file mode 100644 index 000000000000..c1f4030929a4 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h @@ -0,0 +1,124 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_REG_H_ +#define _DMUB_REG_H_ + +#include "../inc/dmub_types.h" + +struct dmub_srv; + +/* Register offset and field lookup. */ + +#define BASE(seg) BASE_INNER(seg) + +#define REG_OFFSET(reg_name) (BASE(mm##reg_name##_BASE_IDX) + mm##reg_name) + +#define FD_SHIFT(reg_name, field) reg_name##__##field##__SHIFT + +#define FD_MASK(reg_name, field) reg_name##__##field##_MASK + +#define REG(reg) (REGS)->offset.reg + +#define FD(reg_field) (REGS)->shift.reg_field, (REGS)->mask.reg_field + +#define FN(reg_name, field) FD(reg_name##__##field) + +/* Register reads and writes. */ + +#define REG_READ(reg) ((CTX)->funcs.reg_read((CTX)->user_ctx, REG(reg))) + +#define REG_WRITE(reg, val) \ +	((CTX)->funcs.reg_write((CTX)->user_ctx, REG(reg), (val))) + +/* Register field setting. */ + +#define REG_SET_N(reg_name, n, initial_val, ...) \ +	dmub_reg_set(CTX, REG(reg_name), initial_val, n, __VA_ARGS__) + +#define REG_SET(reg_name, initial_val, field, val) \ +		REG_SET_N(reg_name, 1, initial_val, \ +				FN(reg_name, field), val) + +#define REG_SET_2(reg, init_value, f1, v1, f2, v2) \ +		REG_SET_N(reg, 2, init_value, \ +				FN(reg, f1), v1, \ +				FN(reg, f2), v2) + +#define REG_SET_3(reg, init_value, f1, v1, f2, v2, f3, v3) \ +		REG_SET_N(reg, 3, init_value, \ +				FN(reg, f1), v1, \ +				FN(reg, f2), v2, \ +				FN(reg, f3), v3) + +#define REG_SET_4(reg, init_value, f1, v1, f2, v2, f3, v3, f4, v4) \ +		REG_SET_N(reg, 4, init_value, \ +				FN(reg, f1), v1, \ +				FN(reg, f2), v2, \ +				FN(reg, f3), v3, \ +				FN(reg, f4), v4) + +/* Register field updating. */ + +#define REG_UPDATE_N(reg_name, n, ...)\ +		dmub_reg_update(CTX, REG(reg_name), n, __VA_ARGS__) + +#define REG_UPDATE(reg_name, field, val)	\ +		REG_UPDATE_N(reg_name, 1, \ +				FN(reg_name, field), val) + +#define REG_UPDATE_2(reg, f1, v1, f2, v2)	\ +		REG_UPDATE_N(reg, 2,\ +				FN(reg, f1), v1,\ +				FN(reg, f2), v2) + +#define REG_UPDATE_3(reg, f1, v1, f2, v2, f3, v3) \ +		REG_UPDATE_N(reg, 3, \ +				FN(reg, f1), v1, \ +				FN(reg, f2), v2, \ +				FN(reg, f3), v3) + +#define REG_UPDATE_4(reg, f1, v1, f2, v2, f3, v3, f4, v4) \ +		REG_UPDATE_N(reg, 4, \ +				FN(reg, f1), v1, \ +				FN(reg, f2), v2, \ +				FN(reg, f3), v3, \ +				FN(reg, f4), v4) + +/* Register field getting. */ + +#define REG_GET(reg_name, field, val) \ +	dmub_reg_get(CTX, REG(reg_name), FN(reg_name, field), val) + +void dmub_reg_set(struct dmub_srv *srv, uint32_t addr, uint32_t reg_val, int n, +		  uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...); + +void dmub_reg_update(struct dmub_srv *srv, uint32_t addr, int n, uint8_t shift1, +		     uint32_t mask1, uint32_t field_value1, ...); + +void dmub_reg_get(struct dmub_srv *srv, uint32_t addr, uint8_t shift, +		  uint32_t mask, uint32_t *field_value); + +#endif /* _DMUB_REG_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c new file mode 100644 index 000000000000..dee676335d73 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -0,0 +1,505 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "../inc/dmub_srv.h" +#include "dmub_dcn20.h" +#include "dmub_dcn21.h" +#include "dmub_fw_meta.h" +#include "os_types.h" +/* + * Note: the DMUB service is standalone. No additional headers should be + * added below or above this line unless they reside within the DMUB + * folder. + */ + +/* Alignment for framebuffer memory. */ +#define DMUB_FB_ALIGNMENT (1024 * 1024) + +/* Stack size. */ +#define DMUB_STACK_SIZE (128 * 1024) + +/* Context size. */ +#define DMUB_CONTEXT_SIZE (512 * 1024) + +/* Mailbox size */ +#define DMUB_MAILBOX_SIZE (DMUB_RB_SIZE) + +/* Default state size if meta is absent. */ +#define DMUB_FW_STATE_SIZE (1024) + +/* Default tracebuffer size if meta is absent. */ +#define DMUB_TRACE_BUFFER_SIZE (1024) + +/* Number of windows in use. */ +#define DMUB_NUM_WINDOWS (DMUB_WINDOW_6_FW_STATE + 1) +/* Base addresses. */ + +#define DMUB_CW0_BASE (0x60000000) +#define DMUB_CW1_BASE (0x61000000) +#define DMUB_CW3_BASE (0x63000000) +#define DMUB_CW5_BASE (0x65000000) +#define DMUB_CW6_BASE (0x66000000) + +static inline uint32_t dmub_align(uint32_t val, uint32_t factor) +{ +	return (val + factor - 1) / factor * factor; +} + +static void dmub_flush_buffer_mem(const struct dmub_fb *fb) +{ +	const uint8_t *base = (const uint8_t *)fb->cpu_addr; +	uint8_t buf[64]; +	uint32_t pos, end; + +	/** +	 * Read 64-byte chunks since we don't want to store a +	 * large temporary buffer for this purpose. +	 */ +	end = fb->size / sizeof(buf) * sizeof(buf); + +	for (pos = 0; pos < end; pos += sizeof(buf)) +		dmub_memcpy(buf, base + pos, sizeof(buf)); + +	/* Read anything leftover into the buffer. */ +	if (end < fb->size) +		dmub_memcpy(buf, base + pos, fb->size - end); +} + +static const struct dmub_fw_meta_info * +dmub_get_fw_meta_info(const uint8_t *fw_bss_data, uint32_t fw_bss_data_size) +{ +	const union dmub_fw_meta *meta; + +	if (fw_bss_data == NULL) +		return NULL; + +	if (fw_bss_data_size < sizeof(union dmub_fw_meta) + DMUB_FW_META_OFFSET) +		return NULL; + +	meta = (const union dmub_fw_meta *)(fw_bss_data + fw_bss_data_size - +					    DMUB_FW_META_OFFSET - +					    sizeof(union dmub_fw_meta)); + +	if (meta->info.magic_value != DMUB_FW_META_MAGIC) +		return NULL; + +	return &meta->info; +} + +static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) +{ +	struct dmub_srv_hw_funcs *funcs = &dmub->hw_funcs; + +	switch (asic) { +	case DMUB_ASIC_DCN20: +	case DMUB_ASIC_DCN21: +		dmub->regs = &dmub_srv_dcn20_regs; + +		funcs->reset = dmub_dcn20_reset; +		funcs->reset_release = dmub_dcn20_reset_release; +		funcs->backdoor_load = dmub_dcn20_backdoor_load; +		funcs->setup_windows = dmub_dcn20_setup_windows; +		funcs->setup_mailbox = dmub_dcn20_setup_mailbox; +		funcs->get_inbox1_rptr = dmub_dcn20_get_inbox1_rptr; +		funcs->set_inbox1_wptr = dmub_dcn20_set_inbox1_wptr; +		funcs->is_supported = dmub_dcn20_is_supported; +		funcs->is_hw_init = dmub_dcn20_is_hw_init; + +		if (asic == DMUB_ASIC_DCN21) { +			dmub->regs = &dmub_srv_dcn21_regs; + +			funcs->is_auto_load_done = dmub_dcn21_is_auto_load_done; +			funcs->is_phy_init = dmub_dcn21_is_phy_init; +		} +		break; + +	default: +		return false; +	} + +	return true; +} + +enum dmub_status dmub_srv_create(struct dmub_srv *dmub, +				 const struct dmub_srv_create_params *params) +{ +	enum dmub_status status = DMUB_STATUS_OK; + +	dmub_memset(dmub, 0, sizeof(*dmub)); + +	dmub->funcs = params->funcs; +	dmub->user_ctx = params->user_ctx; +	dmub->asic = params->asic; +	dmub->is_virtual = params->is_virtual; + +	/* Setup asic dependent hardware funcs. */ +	if (!dmub_srv_hw_setup(dmub, params->asic)) { +		status = DMUB_STATUS_INVALID; +		goto cleanup; +	} + +	/* Override (some) hardware funcs based on user params. */ +	if (params->hw_funcs) { +		if (params->hw_funcs->get_inbox1_rptr) +			dmub->hw_funcs.get_inbox1_rptr = +				params->hw_funcs->get_inbox1_rptr; + +		if (params->hw_funcs->set_inbox1_wptr) +			dmub->hw_funcs.set_inbox1_wptr = +				params->hw_funcs->set_inbox1_wptr; + +		if (params->hw_funcs->is_supported) +			dmub->hw_funcs.is_supported = +				params->hw_funcs->is_supported; +	} + +	/* Sanity checks for required hw func pointers. */ +	if (!dmub->hw_funcs.get_inbox1_rptr || +	    !dmub->hw_funcs.set_inbox1_wptr) { +		status = DMUB_STATUS_INVALID; +		goto cleanup; +	} + +cleanup: +	if (status == DMUB_STATUS_OK) +		dmub->sw_init = true; +	else +		dmub_srv_destroy(dmub); + +	return status; +} + +void dmub_srv_destroy(struct dmub_srv *dmub) +{ +	dmub_memset(dmub, 0, sizeof(*dmub)); +} + +enum dmub_status +dmub_srv_calc_region_info(struct dmub_srv *dmub, +			  const struct dmub_srv_region_params *params, +			  struct dmub_srv_region_info *out) +{ +	struct dmub_region *inst = &out->regions[DMUB_WINDOW_0_INST_CONST]; +	struct dmub_region *stack = &out->regions[DMUB_WINDOW_1_STACK]; +	struct dmub_region *data = &out->regions[DMUB_WINDOW_2_BSS_DATA]; +	struct dmub_region *bios = &out->regions[DMUB_WINDOW_3_VBIOS]; +	struct dmub_region *mail = &out->regions[DMUB_WINDOW_4_MAILBOX]; +	struct dmub_region *trace_buff = &out->regions[DMUB_WINDOW_5_TRACEBUFF]; +	struct dmub_region *fw_state = &out->regions[DMUB_WINDOW_6_FW_STATE]; +	const struct dmub_fw_meta_info *fw_info; +	uint32_t fw_state_size = DMUB_FW_STATE_SIZE; +	uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE; + +	if (!dmub->sw_init) +		return DMUB_STATUS_INVALID; + +	memset(out, 0, sizeof(*out)); + +	out->num_regions = DMUB_NUM_WINDOWS; + +	inst->base = 0x0; +	inst->top = inst->base + params->inst_const_size; + +	data->base = dmub_align(inst->top, 256); +	data->top = data->base + params->bss_data_size; + +	/* +	 * All cache windows below should be aligned to the size +	 * of the DMCUB cache line, 64 bytes. +	 */ + +	stack->base = dmub_align(data->top, 256); +	stack->top = stack->base + DMUB_STACK_SIZE + DMUB_CONTEXT_SIZE; + +	bios->base = dmub_align(stack->top, 256); +	bios->top = bios->base + params->vbios_size; + +	mail->base = dmub_align(bios->top, 256); +	mail->top = mail->base + DMUB_MAILBOX_SIZE; + +	fw_info = dmub_get_fw_meta_info(params->fw_bss_data, +					params->bss_data_size); + +	if (fw_info) { +		fw_state_size = fw_info->fw_region_size; +		trace_buffer_size = fw_info->trace_buffer_size; +	} + +	trace_buff->base = dmub_align(mail->top, 256); +	trace_buff->top = trace_buff->base + dmub_align(trace_buffer_size, 64); + +	fw_state->base = dmub_align(trace_buff->top, 256); +	fw_state->top = fw_state->base + dmub_align(fw_state_size, 64); + +	out->fb_size = dmub_align(fw_state->top, 4096); + +	return DMUB_STATUS_OK; +} + +enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub, +				       const struct dmub_srv_fb_params *params, +				       struct dmub_srv_fb_info *out) +{ +	uint8_t *cpu_base; +	uint64_t gpu_base; +	uint32_t i; + +	if (!dmub->sw_init) +		return DMUB_STATUS_INVALID; + +	memset(out, 0, sizeof(*out)); + +	if (params->region_info->num_regions != DMUB_NUM_WINDOWS) +		return DMUB_STATUS_INVALID; + +	cpu_base = (uint8_t *)params->cpu_addr; +	gpu_base = params->gpu_addr; + +	for (i = 0; i < DMUB_NUM_WINDOWS; ++i) { +		const struct dmub_region *reg = +			¶ms->region_info->regions[i]; + +		out->fb[i].cpu_addr = cpu_base + reg->base; +		out->fb[i].gpu_addr = gpu_base + reg->base; +		out->fb[i].size = reg->top - reg->base; +	} + +	out->num_fb = DMUB_NUM_WINDOWS; + +	return DMUB_STATUS_OK; +} + +enum dmub_status dmub_srv_has_hw_support(struct dmub_srv *dmub, +					 bool *is_supported) +{ +	*is_supported = false; + +	if (!dmub->sw_init) +		return DMUB_STATUS_INVALID; + +	if (dmub->hw_funcs.is_supported) +		*is_supported = dmub->hw_funcs.is_supported(dmub); + +	return DMUB_STATUS_OK; +} + +enum dmub_status dmub_srv_is_hw_init(struct dmub_srv *dmub, bool *is_hw_init) +{ +	*is_hw_init = false; + +	if (!dmub->sw_init) +		return DMUB_STATUS_INVALID; + +	if (dmub->hw_funcs.is_hw_init) +		*is_hw_init = dmub->hw_funcs.is_hw_init(dmub); + +	return DMUB_STATUS_OK; +} + +enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, +				  const struct dmub_srv_hw_params *params) +{ +	struct dmub_fb *inst_fb = params->fb[DMUB_WINDOW_0_INST_CONST]; +	struct dmub_fb *stack_fb = params->fb[DMUB_WINDOW_1_STACK]; +	struct dmub_fb *data_fb = params->fb[DMUB_WINDOW_2_BSS_DATA]; +	struct dmub_fb *bios_fb = params->fb[DMUB_WINDOW_3_VBIOS]; +	struct dmub_fb *mail_fb = params->fb[DMUB_WINDOW_4_MAILBOX]; +	struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF]; +	struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE]; + +	struct dmub_rb_init_params rb_params; +	struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6; +	struct dmub_region inbox1; + +	if (!dmub->sw_init) +		return DMUB_STATUS_INVALID; + +	dmub->fb_base = params->fb_base; +	dmub->fb_offset = params->fb_offset; +	dmub->psp_version = params->psp_version; + +	if (inst_fb && data_fb) { +		cw0.offset.quad_part = inst_fb->gpu_addr; +		cw0.region.base = DMUB_CW0_BASE; +		cw0.region.top = cw0.region.base + inst_fb->size - 1; + +		cw1.offset.quad_part = stack_fb->gpu_addr; +		cw1.region.base = DMUB_CW1_BASE; +		cw1.region.top = cw1.region.base + stack_fb->size - 1; + +		/** +		 * Read back all the instruction memory so we don't hang the +		 * DMCUB when backdoor loading if the write from x86 hasn't been +		 * flushed yet. This only occurs in backdoor loading. +		 */ +		dmub_flush_buffer_mem(inst_fb); + +		if (params->load_inst_const && dmub->hw_funcs.backdoor_load) +			dmub->hw_funcs.backdoor_load(dmub, &cw0, &cw1); +	} + +	if (dmub->hw_funcs.reset) +		dmub->hw_funcs.reset(dmub); + +	if (inst_fb && data_fb && bios_fb && mail_fb && tracebuff_fb && +	    fw_state_fb) { +		cw2.offset.quad_part = data_fb->gpu_addr; +		cw2.region.base = DMUB_CW0_BASE + inst_fb->size; +		cw2.region.top = cw2.region.base + data_fb->size; + +		cw3.offset.quad_part = bios_fb->gpu_addr; +		cw3.region.base = DMUB_CW3_BASE; +		cw3.region.top = cw3.region.base + bios_fb->size; + +		cw4.offset.quad_part = mail_fb->gpu_addr; +		cw4.region.base = cw3.region.top + 1; +		cw4.region.top = cw4.region.base + mail_fb->size; + +		inbox1.base = cw4.region.base; +		inbox1.top = cw4.region.top; + +		cw5.offset.quad_part = tracebuff_fb->gpu_addr; +		cw5.region.base = DMUB_CW5_BASE; +		cw5.region.top = cw5.region.base + tracebuff_fb->size; + +		cw6.offset.quad_part = fw_state_fb->gpu_addr; +		cw6.region.base = DMUB_CW6_BASE; +		cw6.region.top = cw6.region.base + fw_state_fb->size; + +		dmub->fw_state = fw_state_fb->cpu_addr; + +		if (dmub->hw_funcs.setup_windows) +			dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, +						     &cw5, &cw6); + +		if (dmub->hw_funcs.setup_mailbox) +			dmub->hw_funcs.setup_mailbox(dmub, &inbox1); +	} + +	if (mail_fb) { +		dmub_memset(&rb_params, 0, sizeof(rb_params)); +		rb_params.ctx = dmub; +		rb_params.base_address = mail_fb->cpu_addr; +		rb_params.capacity = DMUB_RB_SIZE; + +		dmub_rb_init(&dmub->inbox1_rb, &rb_params); +	} + +	if (dmub->hw_funcs.reset_release) +		dmub->hw_funcs.reset_release(dmub); + +	dmub->hw_init = true; + +	return DMUB_STATUS_OK; +} + +enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub, +				    const struct dmub_cmd_header *cmd) +{ +	if (!dmub->hw_init) +		return DMUB_STATUS_INVALID; + +	if (dmub_rb_push_front(&dmub->inbox1_rb, cmd)) +		return DMUB_STATUS_OK; + +	return DMUB_STATUS_QUEUE_FULL; +} + +enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub) +{ +	if (!dmub->hw_init) +		return DMUB_STATUS_INVALID; + +	/** +	 * Read back all the queued commands to ensure that they've +	 * been flushed to framebuffer memory. Otherwise DMCUB might +	 * read back stale, fully invalid or partially invalid data. +	 */ +	dmub_rb_flush_pending(&dmub->inbox1_rb); + +	dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt); +	return DMUB_STATUS_OK; +} + +enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub, +					     uint32_t timeout_us) +{ +	uint32_t i; + +	if (!dmub->hw_init) +		return DMUB_STATUS_INVALID; + +	if (!dmub->hw_funcs.is_auto_load_done) +		return DMUB_STATUS_OK; + +	for (i = 0; i <= timeout_us; i += 100) { +		if (dmub->hw_funcs.is_auto_load_done(dmub)) +			return DMUB_STATUS_OK; + +		udelay(100); +	} + +	return DMUB_STATUS_TIMEOUT; +} + +enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub, +					    uint32_t timeout_us) +{ +	uint32_t i = 0; + +	if (!dmub->hw_init) +		return DMUB_STATUS_INVALID; + +	if (!dmub->hw_funcs.is_phy_init) +		return DMUB_STATUS_OK; + +	for (i = 0; i <= timeout_us; i += 10) { +		if (dmub->hw_funcs.is_phy_init(dmub)) +			return DMUB_STATUS_OK; + +		udelay(10); +	} + +	return DMUB_STATUS_TIMEOUT; +} + +enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub, +					uint32_t timeout_us) +{ +	uint32_t i; + +	if (!dmub->hw_init) +		return DMUB_STATUS_INVALID; + +	for (i = 0; i <= timeout_us; ++i) { +		dmub->inbox1_rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub); +		if (dmub_rb_empty(&dmub->inbox1_rb)) +			return DMUB_STATUS_OK; + +		udelay(1); +	} + +	return DMUB_STATUS_TIMEOUT; +} diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h index 1be6c44fd32f..a2903985b9e8 100644 --- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h +++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h @@ -134,23 +134,34 @@  #define PICASSO_A0 0x41  /* DCN1_01 */  #define RAVEN2_A0 0x81 +#define RAVEN2_15D8_REV_94 0x94 +#define RAVEN2_15D8_REV_95 0x95 +#define RAVEN2_15D8_REV_E3 0xE3 +#define RAVEN2_15D8_REV_E4 0xE4 +#define RAVEN2_15D8_REV_E9 0xE9 +#define RAVEN2_15D8_REV_EA 0xEA +#define RAVEN2_15D8_REV_EB 0xEB  #define RAVEN1_F0 0xF0  #define RAVEN_UNKNOWN 0xFF - -#define PICASSO_15D8_REV_E3 0xE3 -#define PICASSO_15D8_REV_E4 0xE4 - +#ifndef ASICREV_IS_RAVEN  #define ASICREV_IS_RAVEN(eChipRev) ((eChipRev >= RAVEN_A0) && eChipRev < RAVEN_UNKNOWN) -#define ASICREV_IS_PICASSO(eChipRev) ((eChipRev >= PICASSO_A0) && (eChipRev < RAVEN2_A0)) -#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < PICASSO_15D8_REV_E3)) -#define ASICREV_IS_DALI(eChipRev) ((eChipRev >= PICASSO_15D8_REV_E3) && (eChipRev < RAVEN1_F0)) +#endif +#define ASICREV_IS_PICASSO(eChipRev) ((eChipRev >= PICASSO_A0) && (eChipRev < RAVEN2_A0)) +#ifndef ASICREV_IS_RAVEN2 +#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < RAVEN1_F0)) +#endif  #define ASICREV_IS_RV1_F0(eChipRev) ((eChipRev >= RAVEN1_F0) && (eChipRev < RAVEN_UNKNOWN)) - +#define ASICREV_IS_DALI(eChipRev) ((eChipRev == RAVEN2_15D8_REV_E3) \ +		|| (eChipRev == RAVEN2_15D8_REV_E4)) +#define ASICREV_IS_POLLOCK(eChipRev) (eChipRev == RAVEN2_15D8_REV_94 \ +		|| eChipRev == RAVEN2_15D8_REV_95 \ +			|| eChipRev == RAVEN2_15D8_REV_E9 \ +				|| eChipRev == RAVEN2_15D8_REV_EA \ +					|| eChipRev == RAVEN2_15D8_REV_EB)  #define FAMILY_RV 142 /* DCN 1*/ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  #define FAMILY_NV 143 /* DCN 2*/ @@ -164,12 +175,9 @@ enum {  #define ASICREV_IS_NAVI10_P(eChipRev)        (eChipRev < NV_NAVI12_P_A0)  #define ASICREV_IS_NAVI12_P(eChipRev)        ((eChipRev >= NV_NAVI12_P_A0) && (eChipRev < NV_NAVI14_M_A0))  #define ASICREV_IS_NAVI14_M(eChipRev)        ((eChipRev >= NV_NAVI14_M_A0) && (eChipRev < NV_UNKNOWN)) -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  #define RENOIR_A0 0x91  #define DEVICE_ID_RENOIR_1636 0x1636   // Renoir  #define ASICREV_IS_RENOIR(eChipRev) ((eChipRev >= RENOIR_A0) && (eChipRev < 0xFF)) -#endif  /*   * ASIC chip ID diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h index fcc42372b6cf..0b6859189ca7 100644 --- a/drivers/gpu/drm/amd/display/include/dal_types.h +++ b/drivers/gpu/drm/amd/display/include/dal_types.h @@ -46,12 +46,8 @@ enum dce_version {  	DCE_VERSION_MAX,  	DCN_VERSION_1_0,  	DCN_VERSION_1_01, -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  	DCN_VERSION_2_0, -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1)  	DCN_VERSION_2_1, -#endif  	DCN_VERSION_MAX  }; diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h index f312834fef50..d51de94e4bc3 100644 --- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h +++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h @@ -178,7 +178,8 @@ struct dc_firmware_info {  	uint32_t default_engine_clk; /* in KHz */  	uint32_t dp_phy_ref_clk; /* in KHz - DCE12 only */  	uint32_t i2c_engine_ref_clk; /* in KHz - DCE12 only */ - +	bool oem_i2c_present; +	uint8_t oem_i2c_obj_id;  }; diff --git a/drivers/gpu/drm/amd/display/include/i2caux_interface.h b/drivers/gpu/drm/amd/display/include/i2caux_interface.h index bb012cb1a9f5..c7fbb9c3ad6b 100644 --- a/drivers/gpu/drm/amd/display/include/i2caux_interface.h +++ b/drivers/gpu/drm/amd/display/include/i2caux_interface.h @@ -42,7 +42,7 @@ struct aux_payload {  	bool write;  	bool mot;  	uint32_t address; -	uint8_t length; +	uint32_t length;  	uint8_t *data;  	/*  	 * used to return the reply type of the transaction diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h index 876b0b3e1a9c..4869d4562e4d 100644 --- a/drivers/gpu/drm/amd/display/include/link_service_types.h +++ b/drivers/gpu/drm/amd/display/include/link_service_types.h @@ -123,6 +123,13 @@ enum dp_test_pattern {  	DP_TEST_PATTERN_UNSUPPORTED  }; +enum dp_test_pattern_color_space { +	DP_TEST_PATTERN_COLOR_SPACE_RGB, +	DP_TEST_PATTERN_COLOR_SPACE_YCBCR601, +	DP_TEST_PATTERN_COLOR_SPACE_YCBCR709, +	DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED +}; +  enum dp_panel_mode {  	/* not required */  	DP_PANEL_MODE_DEFAULT, diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h index 2b219cdb13ad..89a709267019 100644 --- a/drivers/gpu/drm/amd/display/include/logger_types.h +++ b/drivers/gpu/drm/amd/display/include/logger_types.h @@ -66,12 +66,8 @@  #define DC_LOG_GAMMA(...) pr_debug("[GAMMA]:"__VA_ARGS__)  #define DC_LOG_ALL_GAMMA(...) pr_debug("[GAMMA]:"__VA_ARGS__)  #define DC_LOG_ALL_TF_CHANNELS(...) pr_debug("[GAMMA]:"__VA_ARGS__) -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  #define DC_LOG_DSC(...) DRM_DEBUG_KMS(__VA_ARGS__) -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN3_0) || defined(CONFIG_DRM_AMD_DC_DCN2_0)  #define DC_LOG_DWB(...) DRM_DEBUG_KMS(__VA_ARGS__) -#endif  struct dal_logger; @@ -116,9 +112,7 @@ enum dc_log_type {  	LOG_PERF_TRACE,  	LOG_DISPLAYSTATS,  	LOG_HDMI_RETIMER_REDRIVER, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT  	LOG_DSC, -#endif  	LOG_DWB,  	LOG_GAMMA_DEBUG,  	LOG_MAX_HW_POINTS, diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index 1de4805cb8c7..1b278c42809a 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -154,6 +154,7 @@ static void compute_de_pq(struct fixed31_32 in_x, struct fixed31_32 *out_y)  	struct fixed31_32 l_pow_m1;  	struct fixed31_32 base, div; +	struct fixed31_32 base2;  	if (dc_fixpt_lt(in_x, dc_fixpt_zero)) @@ -163,13 +164,15 @@ static void compute_de_pq(struct fixed31_32 in_x, struct fixed31_32 *out_y)  			dc_fixpt_div(dc_fixpt_one, m2));  	base = dc_fixpt_sub(l_pow_m1, c1); -	if (dc_fixpt_lt(base, dc_fixpt_zero)) -		base = dc_fixpt_zero; -  	div = dc_fixpt_sub(c2, dc_fixpt_mul(c3, l_pow_m1)); -	*out_y = dc_fixpt_pow(dc_fixpt_div(base, div), -			dc_fixpt_div(dc_fixpt_one, m1)); +	base2 = dc_fixpt_div(base, div); +	//avoid complex numbers +	if (dc_fixpt_lt(base2, dc_fixpt_zero)) +		base2 = dc_fixpt_sub(dc_fixpt_zero, base2); + + +	*out_y = dc_fixpt_pow(base2, dc_fixpt_div(dc_fixpt_one, m1));  } @@ -361,8 +364,10 @@ static struct fixed31_32 translate_from_linear_space(  			scratch_2 = dc_fixpt_mul(gamma_of_2,  					pow_buffer[pow_buffer_ptr%16]); -		pow_buffer[pow_buffer_ptr%16] = scratch_2; -		pow_buffer_ptr++; +		if (pow_buffer_ptr != -1) { +			pow_buffer[pow_buffer_ptr%16] = scratch_2; +			pow_buffer_ptr++; +		}  		scratch_1 = dc_fixpt_mul(scratch_1, scratch_2);  		scratch_1 = dc_fixpt_sub(scratch_1, args->a2); @@ -937,7 +942,6 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,  	struct fixed31_32 max_display;  	struct fixed31_32 min_display;  	struct fixed31_32 max_content; -	struct fixed31_32 min_content;  	struct fixed31_32 clip = dc_fixpt_one;  	struct fixed31_32 output;  	bool use_eetf = false; @@ -951,7 +955,6 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,  	max_display = dc_fixpt_from_int(fs_params->max_display);  	min_display = dc_fixpt_from_fraction(fs_params->min_display, 10000);  	max_content = dc_fixpt_from_int(fs_params->max_content); -	min_content = dc_fixpt_from_fraction(fs_params->min_content, 10000);  	sdr_white_level = dc_fixpt_from_int(fs_params->sdr_white_level);  	if (fs_params->min_display > 1000) // cap at 0.1 at the bottom @@ -2000,10 +2003,28 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,  	tf_pts->x_point_at_y1_green = 1;  	tf_pts->x_point_at_y1_blue = 1; -	map_regamma_hw_to_x_user(ramp, coeff, rgb_user, -			coordinates_x, axis_x, curve, -			MAX_HW_POINTS, tf_pts, -			mapUserRamp && ramp && ramp->type == GAMMA_RGB_256); +	if (input_tf->tf == TRANSFER_FUNCTION_PQ) { +		/* just copy current rgb_regamma into  tf_pts */ +		struct pwl_float_data_ex *curvePt = curve; +		int i = 0; + +		while (i <= MAX_HW_POINTS) { +			tf_pts->red[i]   = curvePt->r; +			tf_pts->green[i] = curvePt->g; +			tf_pts->blue[i]  = curvePt->b; +			++curvePt; +			++i; +		} +	} else { +		//clamps to 0-1 +		map_regamma_hw_to_x_user(ramp, coeff, rgb_user, +				coordinates_x, axis_x, curve, +				MAX_HW_POINTS, tf_pts, +				mapUserRamp && ramp && ramp->type == GAMMA_RGB_256); +	} + + +  	if (ramp->type == GAMMA_CUSTOM)  		apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts); diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index 16e69bbc69aa..6e5ecefe7d9d 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -122,7 +122,7 @@ static unsigned int calc_v_total_from_refresh(  		const struct dc_stream_state *stream,  		unsigned int refresh_in_uhz)  { -	unsigned int v_total = stream->timing.v_total; +	unsigned int v_total;  	unsigned int frame_duration_in_ns;  	frame_duration_in_ns = @@ -816,6 +816,8 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,  	in_out_vrr->btr.inserted_duration_in_us = 0;  	in_out_vrr->btr.frames_to_insert = 0;  	in_out_vrr->btr.frame_counter = 0; +	in_out_vrr->fixed.fixed_active = false; +	in_out_vrr->fixed.target_refresh_in_uhz = 0;  	in_out_vrr->btr.mid_point_in_us =  				(in_out_vrr->min_duration_in_us + @@ -832,6 +834,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,  		in_out_vrr->adjust.v_total_max = stream->timing.v_total;  	} else if (in_out_vrr->state == VRR_STATE_ACTIVE_VARIABLE &&  			refresh_range >= MIN_REFRESH_RANGE_IN_US) { +  		in_out_vrr->adjust.v_total_min =  			calc_v_total_from_refresh(stream,  				in_out_vrr->max_refresh_in_uhz); diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/Makefile b/drivers/gpu/drm/amd/display/modules/hdcp/Makefile index 1c3c6d47973a..904424da01b5 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/Makefile +++ b/drivers/gpu/drm/amd/display/modules/hdcp/Makefile @@ -24,7 +24,8 @@  #  HDCP = hdcp_ddc.o hdcp_log.o hdcp_psp.o hdcp.o \ -		hdcp1_execution.o hdcp1_transition.o +		hdcp1_execution.o hdcp1_transition.o \ +		hdcp2_execution.o hdcp2_transition.o  AMD_DAL_HDCP = $(addprefix $(AMDDALPATH)/modules/hdcp/,$(HDCP))  #$(info ************  DAL-HDCP_MAKEFILE ************) diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c index d7ac445dec6f..8aa528e874c4 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c @@ -37,24 +37,52 @@ static void push_error_status(struct mod_hdcp *hdcp,  		HDCP_ERROR_TRACE(hdcp, status);  	} -	hdcp->connection.hdcp1_retry_count++; +	if (is_hdcp1(hdcp)) { +		hdcp->connection.hdcp1_retry_count++; +	} else if (is_hdcp2(hdcp)) { +		hdcp->connection.hdcp2_retry_count++; +	}  }  static uint8_t is_cp_desired_hdcp1(struct mod_hdcp *hdcp)  { -	int i, display_enabled = 0; +	int i, is_auth_needed = 0; -	/* if all displays on the link are disabled, hdcp is not desired */ +	/* if all displays on the link don't need authentication, +	 * hdcp is not desired +	 */  	for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {  		if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&  				!hdcp->connection.displays[i].adjust.disable) { -			display_enabled = 1; +			is_auth_needed = 1;  			break;  		}  	}  	return (hdcp->connection.hdcp1_retry_count < MAX_NUM_OF_ATTEMPTS) && -			display_enabled && !hdcp->connection.link.adjust.hdcp1.disable; +			is_auth_needed && +			!hdcp->connection.link.adjust.hdcp1.disable; +} + +static uint8_t is_cp_desired_hdcp2(struct mod_hdcp *hdcp) +{ +	int i, is_auth_needed = 0; + +	/* if all displays on the link don't need authentication, +	 * hdcp is not desired +	 */ +	for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { +		if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_INACTIVE && +				!hdcp->connection.displays[i].adjust.disable) { +			is_auth_needed = 1; +			break; +		} +	} + +	return (hdcp->connection.hdcp2_retry_count < MAX_NUM_OF_ATTEMPTS) && +			is_auth_needed && +			!hdcp->connection.link.adjust.hdcp2.disable && +			!hdcp->connection.is_hdcp2_revoked;  }  static enum mod_hdcp_status execution(struct mod_hdcp *hdcp, @@ -82,6 +110,11 @@ static enum mod_hdcp_status execution(struct mod_hdcp *hdcp,  	} else if (is_in_hdcp1_dp_states(hdcp)) {  		status = mod_hdcp_hdcp1_dp_execution(hdcp,  				event_ctx, &input->hdcp1); +	} else if (is_in_hdcp2_states(hdcp)) { +		status = mod_hdcp_hdcp2_execution(hdcp, event_ctx, &input->hdcp2); +	} else if (is_in_hdcp2_dp_states(hdcp)) { +		status = mod_hdcp_hdcp2_dp_execution(hdcp, +				event_ctx, &input->hdcp2);  	}  out:  	return status; @@ -99,7 +132,10 @@ static enum mod_hdcp_status transition(struct mod_hdcp *hdcp,  	if (is_in_initialized_state(hdcp)) {  		if (is_dp_hdcp(hdcp)) -			if (is_cp_desired_hdcp1(hdcp)) { +			if (is_cp_desired_hdcp2(hdcp)) { +				callback_in_ms(0, output); +				set_state_id(hdcp, output, D2_A0_DETERMINE_RX_HDCP_CAPABLE); +			} else if (is_cp_desired_hdcp1(hdcp)) {  				callback_in_ms(0, output);  				set_state_id(hdcp, output, D1_A0_DETERMINE_RX_HDCP_CAPABLE);  			} else { @@ -107,7 +143,10 @@ static enum mod_hdcp_status transition(struct mod_hdcp *hdcp,  				set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED);  			}  		else if (is_hdmi_dvi_sl_hdcp(hdcp)) -			if (is_cp_desired_hdcp1(hdcp)) { +			if (is_cp_desired_hdcp2(hdcp)) { +				callback_in_ms(0, output); +				set_state_id(hdcp, output, H2_A0_KNOWN_HDCP2_CAPABLE_RX); +			} else if (is_cp_desired_hdcp1(hdcp)) {  				callback_in_ms(0, output);  				set_state_id(hdcp, output, H1_A0_WAIT_FOR_ACTIVE_RX);  			} else { @@ -126,6 +165,12 @@ static enum mod_hdcp_status transition(struct mod_hdcp *hdcp,  	} else if (is_in_hdcp1_dp_states(hdcp)) {  		status = mod_hdcp_hdcp1_dp_transition(hdcp,  				event_ctx, &input->hdcp1, output); +	} else if (is_in_hdcp2_states(hdcp)) { +		status = mod_hdcp_hdcp2_transition(hdcp, +				event_ctx, &input->hdcp2, output); +	} else if (is_in_hdcp2_dp_states(hdcp)) { +		status = mod_hdcp_hdcp2_dp_transition(hdcp, +				event_ctx, &input->hdcp2, output);  	} else {  		status = MOD_HDCP_STATUS_INVALID_STATE;  	} @@ -139,9 +184,13 @@ static enum mod_hdcp_status reset_authentication(struct mod_hdcp *hdcp,  	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;  	if (is_hdcp1(hdcp)) { -		if (hdcp->auth.trans_input.hdcp1.create_session != UNKNOWN) +		if (hdcp->auth.trans_input.hdcp1.create_session != UNKNOWN) { +			/* TODO - update psp to unify create session failure +			 * recovery between hdcp1 and 2. +			 */  			mod_hdcp_hdcp1_destroy_session(hdcp); +		}  		if (hdcp->auth.trans_input.hdcp1.add_topology == PASS) {  			status = mod_hdcp_remove_display_topology(hdcp);  			if (status != MOD_HDCP_STATUS_SUCCESS) { @@ -154,6 +203,27 @@ static enum mod_hdcp_status reset_authentication(struct mod_hdcp *hdcp,  		memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication));  		memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state));  		set_state_id(hdcp, output, HDCP_INITIALIZED); +	} else if (is_hdcp2(hdcp)) { +		if (hdcp->auth.trans_input.hdcp2.create_session == PASS) { +			status = mod_hdcp_hdcp2_destroy_session(hdcp); +			if (status != MOD_HDCP_STATUS_SUCCESS) { +				output->callback_needed = 0; +				output->watchdog_timer_needed = 0; +				goto out; +			} +		} +		if (hdcp->auth.trans_input.hdcp2.add_topology == PASS) { +			status = mod_hdcp_remove_display_topology(hdcp); +			if (status != MOD_HDCP_STATUS_SUCCESS) { +				output->callback_needed = 0; +				output->watchdog_timer_needed = 0; +				goto out; +			} +		} +		HDCP_TOP_RESET_AUTH_TRACE(hdcp); +		memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication)); +		memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state)); +		set_state_id(hdcp, output, HDCP_INITIALIZED);  	} else if (is_in_cp_not_desired_state(hdcp)) {  		status = mod_hdcp_remove_display_topology(hdcp);  		if (status != MOD_HDCP_STATUS_SUCCESS) { @@ -347,7 +417,20 @@ enum mod_hdcp_status mod_hdcp_query_display(struct mod_hdcp *hdcp,  	query->trace = &hdcp->connection.trace;  	query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; -	mod_hdcp_hdcp1_get_link_encryption_status(hdcp, &query->encryption_status); +	if (is_display_encryption_enabled(display)) { +		if (is_hdcp1(hdcp)) { +			query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON; +		} else if (is_hdcp2(hdcp)) { +			if (query->link->adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0) +				query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON; +			else if (query->link->adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_1) +				query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON; +			else +				query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_ON; +		} +	} else { +		query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; +	}  out:  	return status; @@ -420,7 +503,7 @@ enum mod_hdcp_operation_mode mod_hdcp_signal_type_to_operation_mode(  		break;  	default:  		break; -	}; +	}  	return mode;  } diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h index 5664bc0b5bd0..f98d3d9ecb6d 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h @@ -29,32 +29,8 @@  #include "mod_hdcp.h"  #include "hdcp_log.h" -#define BCAPS_READY_MASK				0x20 -#define BCAPS_REPEATER_MASK				0x40 -#define BSTATUS_DEVICE_COUNT_MASK			0X007F -#define BSTATUS_MAX_DEVS_EXCEEDED_MASK			0x0080 -#define BSTATUS_MAX_CASCADE_EXCEEDED_MASK		0x0800 -#define BCAPS_HDCP_CAPABLE_MASK_DP			0x01 -#define BCAPS_REPEATER_MASK_DP				0x02 -#define BSTATUS_READY_MASK_DP				0x01 -#define BSTATUS_R0_P_AVAILABLE_MASK_DP			0x02 -#define BSTATUS_LINK_INTEGRITY_FAILURE_MASK_DP		0x04 -#define BSTATUS_REAUTH_REQUEST_MASK_DP			0x08 -#define BINFO_DEVICE_COUNT_MASK_DP			0X007F -#define BINFO_MAX_DEVS_EXCEEDED_MASK_DP			0x0080 -#define BINFO_MAX_CASCADE_EXCEEDED_MASK_DP		0x0800 - -#define RXSTATUS_MSG_SIZE_MASK				0x03FF -#define RXSTATUS_READY_MASK				0x0400 -#define RXSTATUS_REAUTH_REQUEST_MASK			0x0800 -#define RXIDLIST_DEVICE_COUNT_LOWER_MASK		0xf0 -#define RXIDLIST_DEVICE_COUNT_UPPER_MASK		0x01 -#define RXCAPS_BYTE0_HDCP_CAPABLE_MASK_DP		0x02 -#define RXSTATUS_READY_MASK_DP				0x0001 -#define RXSTATUS_H_P_AVAILABLE_MASK_DP			0x0002 -#define RXSTATUS_PAIRING_AVAILABLE_MASK_DP		0x0004 -#define RXSTATUS_REAUTH_REQUEST_MASK_DP			0x0008 -#define RXSTATUS_LINK_INTEGRITY_FAILURE_MASK_DP		0x0010 +#include <drm/drm_hdcp.h> +#include <drm/drm_dp_helper.h>  enum mod_hdcp_trans_input_result {  	UNKNOWN = 0, @@ -92,8 +68,52 @@ struct mod_hdcp_transition_input_hdcp1 {  	uint8_t stream_encryption_dp;  }; +struct mod_hdcp_transition_input_hdcp2 { +	uint8_t hdcp2version_read; +	uint8_t hdcp2_capable_check; +	uint8_t add_topology; +	uint8_t create_session; +	uint8_t ake_init_prepare; +	uint8_t ake_init_write; +	uint8_t rxstatus_read; +	uint8_t ake_cert_available; +	uint8_t ake_cert_read; +	uint8_t ake_cert_validation; +	uint8_t stored_km_write; +	uint8_t no_stored_km_write; +	uint8_t h_prime_available; +	uint8_t h_prime_read; +	uint8_t pairing_available; +	uint8_t pairing_info_read; +	uint8_t h_prime_validation; +	uint8_t lc_init_prepare; +	uint8_t lc_init_write; +	uint8_t l_prime_available_poll; +	uint8_t l_prime_read; +	uint8_t l_prime_validation; +	uint8_t eks_prepare; +	uint8_t eks_write; +	uint8_t enable_encryption; +	uint8_t reauth_request_check; +	uint8_t rx_id_list_read; +	uint8_t device_count_check; +	uint8_t rx_id_list_validation; +	uint8_t repeater_auth_ack_write; +	uint8_t prepare_stream_manage; +	uint8_t stream_manage_write; +	uint8_t stream_ready_available; +	uint8_t stream_ready_read; +	uint8_t stream_ready_validation; + +	uint8_t rx_caps_read_dp; +	uint8_t content_stream_type_write; +	uint8_t link_integrity_check_dp; +	uint8_t stream_encryption_dp; +}; +  union mod_hdcp_transition_input {  	struct mod_hdcp_transition_input_hdcp1 hdcp1; +	struct mod_hdcp_transition_input_hdcp2 hdcp2;  };  struct mod_hdcp_message_hdcp1 { @@ -111,8 +131,33 @@ struct mod_hdcp_message_hdcp1 {  	uint16_t	binfo_dp;  }; +struct mod_hdcp_message_hdcp2 { +	uint8_t		hdcp2version_hdmi; +	uint8_t		rxcaps_dp[3]; +	uint8_t		rxstatus[2]; + +	uint8_t		ake_init[12]; +	uint8_t		ake_cert[534]; +	uint8_t		ake_no_stored_km[129]; +	uint8_t		ake_stored_km[33]; +	uint8_t		ake_h_prime[33]; +	uint8_t		ake_pairing_info[17]; +	uint8_t		lc_init[9]; +	uint8_t		lc_l_prime[33]; +	uint8_t		ske_eks[25]; +	uint8_t		rx_id_list[177]; // 22 + 5 * 31 +	uint16_t	rx_id_list_size; +	uint8_t		repeater_auth_ack[17]; +	uint8_t		repeater_auth_stream_manage[68]; // 6 + 2 * 31 +	uint16_t	stream_manage_size; +	uint8_t		repeater_auth_stream_ready[33]; +	uint8_t		rxstatus_dp; +	uint8_t		content_stream_type_dp[2]; +}; +  union mod_hdcp_message {  	struct mod_hdcp_message_hdcp1 hdcp1; +	struct mod_hdcp_message_hdcp2 hdcp2;  };  struct mod_hdcp_auth_counters { @@ -125,8 +170,10 @@ struct mod_hdcp_connection {  	struct mod_hdcp_display displays[MAX_NUM_OF_DISPLAYS];  	uint8_t is_repeater;  	uint8_t is_km_stored; +	uint8_t is_hdcp2_revoked;  	struct mod_hdcp_trace trace;  	uint8_t hdcp1_retry_count; +	uint8_t hdcp2_retry_count;  };  /* contains values per authentication cycle */ @@ -194,6 +241,50 @@ enum mod_hdcp_hdcp1_dp_state_id {  	HDCP1_DP_STATE_END = D1_A7_READ_KSV_LIST,  }; +enum mod_hdcp_hdcp2_state_id { +	HDCP2_STATE_START = HDCP1_DP_STATE_END, +	H2_A0_KNOWN_HDCP2_CAPABLE_RX, +	H2_A1_SEND_AKE_INIT, +	H2_A1_VALIDATE_AKE_CERT, +	H2_A1_SEND_NO_STORED_KM, +	H2_A1_READ_H_PRIME, +	H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME, +	H2_A1_SEND_STORED_KM, +	H2_A1_VALIDATE_H_PRIME, +	H2_A2_LOCALITY_CHECK, +	H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER, +	H2_ENABLE_ENCRYPTION, +	H2_A5_AUTHENTICATED, +	H2_A6_WAIT_FOR_RX_ID_LIST, +	H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK, +	H2_A9_SEND_STREAM_MANAGEMENT, +	H2_A9_VALIDATE_STREAM_READY, +	HDCP2_STATE_END = H2_A9_VALIDATE_STREAM_READY, +}; + +enum mod_hdcp_hdcp2_dp_state_id { +	HDCP2_DP_STATE_START = HDCP2_STATE_END, +	D2_A0_DETERMINE_RX_HDCP_CAPABLE, +	D2_A1_SEND_AKE_INIT, +	D2_A1_VALIDATE_AKE_CERT, +	D2_A1_SEND_NO_STORED_KM, +	D2_A1_READ_H_PRIME, +	D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME, +	D2_A1_SEND_STORED_KM, +	D2_A1_VALIDATE_H_PRIME, +	D2_A2_LOCALITY_CHECK, +	D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER, +	D2_SEND_CONTENT_STREAM_TYPE, +	D2_ENABLE_ENCRYPTION, +	D2_A5_AUTHENTICATED, +	D2_A6_WAIT_FOR_RX_ID_LIST, +	D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK, +	D2_A9_SEND_STREAM_MANAGEMENT, +	D2_A9_VALIDATE_STREAM_READY, +	HDCP2_DP_STATE_END = D2_A9_VALIDATE_STREAM_READY, +	HDCP_STATE_END = HDCP2_DP_STATE_END, +}; +  /* hdcp1 executions and transitions */  typedef enum mod_hdcp_status (*mod_hdcp_action)(struct mod_hdcp *hdcp);  uint8_t mod_hdcp_execute_and_set( @@ -214,6 +305,22 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,  	struct mod_hdcp_transition_input_hdcp1 *input,  	struct mod_hdcp_output *output); +/* hdcp2 executions and transitions */ +enum mod_hdcp_status mod_hdcp_hdcp2_execution(struct mod_hdcp *hdcp, +	struct mod_hdcp_event_context *event_ctx, +	struct mod_hdcp_transition_input_hdcp2 *input); +enum mod_hdcp_status mod_hdcp_hdcp2_dp_execution(struct mod_hdcp *hdcp, +	struct mod_hdcp_event_context *event_ctx, +	struct mod_hdcp_transition_input_hdcp2 *input); +enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp, +	struct mod_hdcp_event_context *event_ctx, +	struct mod_hdcp_transition_input_hdcp2 *input, +	struct mod_hdcp_output *output); +enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp, +	struct mod_hdcp_event_context *event_ctx, +	struct mod_hdcp_transition_input_hdcp2 *input, +	struct mod_hdcp_output *output); +  /* log functions */  void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size,  		uint8_t *buf, uint32_t buf_size); @@ -234,6 +341,25 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(  enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp);  enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp *hdcp,  							       enum mod_hdcp_encryption_status *encryption_status); +enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_prepare_lc_init(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_validate_l_prime(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_prepare_eks(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_enable_encryption(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption( +		struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management( +		struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready( +		struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_get_link_encryption_status(struct mod_hdcp *hdcp, +							       enum mod_hdcp_encryption_status *encryption_status); +  /* ddc functions */  enum mod_hdcp_status mod_hdcp_read_bksv(struct mod_hdcp *hdcp);  enum mod_hdcp_status mod_hdcp_read_bcaps(struct mod_hdcp *hdcp); @@ -245,6 +371,7 @@ enum mod_hdcp_status mod_hdcp_read_binfo(struct mod_hdcp *hdcp);  enum mod_hdcp_status mod_hdcp_write_aksv(struct mod_hdcp *hdcp);  enum mod_hdcp_status mod_hdcp_write_ainfo(struct mod_hdcp *hdcp);  enum mod_hdcp_status mod_hdcp_write_an(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_hdcp2version(struct mod_hdcp *hdcp);  enum mod_hdcp_status mod_hdcp_read_rxcaps(struct mod_hdcp *hdcp);  enum mod_hdcp_status mod_hdcp_read_rxstatus(struct mod_hdcp *hdcp);  enum mod_hdcp_status mod_hdcp_read_ake_cert(struct mod_hdcp *hdcp); @@ -308,11 +435,28 @@ static inline uint8_t is_in_hdcp1_dp_states(struct mod_hdcp *hdcp)  			current_state(hdcp) <= HDCP1_DP_STATE_END);  } +static inline uint8_t is_in_hdcp2_states(struct mod_hdcp *hdcp) +{ +	return (current_state(hdcp) > HDCP2_STATE_START && +			current_state(hdcp) <= HDCP2_STATE_END); +} + +static inline uint8_t is_in_hdcp2_dp_states(struct mod_hdcp *hdcp) +{ +	return (current_state(hdcp) > HDCP2_DP_STATE_START && +			current_state(hdcp) <= HDCP2_DP_STATE_END); +} +  static inline uint8_t is_hdcp1(struct mod_hdcp *hdcp)  {  	return (is_in_hdcp1_states(hdcp) || is_in_hdcp1_dp_states(hdcp));  } +static inline uint8_t is_hdcp2(struct mod_hdcp *hdcp) +{ +	return (is_in_hdcp2_states(hdcp) || is_in_hdcp2_dp_states(hdcp)); +} +  static inline uint8_t is_in_cp_not_desired_state(struct mod_hdcp *hdcp)  {  	return current_state(hdcp) == HDCP_CP_NOT_DESIRED; @@ -437,6 +581,7 @@ static inline struct mod_hdcp_display *get_empty_display_container(  static inline void reset_retry_counts(struct mod_hdcp *hdcp)  {  	hdcp->connection.hdcp1_retry_count = 0; +	hdcp->connection.hdcp2_retry_count = 0;  }  #endif /* HDCP_H_ */ diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c index 3db4a7da414f..04845e43df15 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c @@ -27,9 +27,11 @@  static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp)  { -	uint64_t n = *(uint64_t *)hdcp->auth.msg.hdcp1.bksv; +	uint64_t n = 0;  	uint8_t count = 0; +	memcpy(&n, hdcp->auth.msg.hdcp1.bksv, sizeof(uint64_t)); +  	while (n) {  		count++;  		n &= (n - 1); @@ -41,17 +43,17 @@ static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp)  static inline enum mod_hdcp_status check_ksv_ready(struct mod_hdcp *hdcp)  {  	if (is_dp_hdcp(hdcp)) -		return (hdcp->auth.msg.hdcp1.bstatus & BSTATUS_READY_MASK_DP) ? +		return (hdcp->auth.msg.hdcp1.bstatus & DP_BSTATUS_READY) ?  				MOD_HDCP_STATUS_SUCCESS :  				MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY; -	return (hdcp->auth.msg.hdcp1.bcaps & BCAPS_READY_MASK) ? +	return (hdcp->auth.msg.hdcp1.bcaps & DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY) ?  			MOD_HDCP_STATUS_SUCCESS :  			MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY;  }  static inline enum mod_hdcp_status check_hdcp_capable_dp(struct mod_hdcp *hdcp)  { -	return (hdcp->auth.msg.hdcp1.bcaps & BCAPS_HDCP_CAPABLE_MASK_DP) ? +	return (hdcp->auth.msg.hdcp1.bcaps & DP_BCAPS_HDCP_CAPABLE) ?  			MOD_HDCP_STATUS_SUCCESS :  			MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE;  } @@ -61,7 +63,7 @@ static inline enum mod_hdcp_status check_r0p_available_dp(struct mod_hdcp *hdcp)  	enum mod_hdcp_status status;  	if (is_dp_hdcp(hdcp)) {  		status = (hdcp->auth.msg.hdcp1.bstatus & -				BSTATUS_R0_P_AVAILABLE_MASK_DP) ? +				DP_BSTATUS_R0_PRIME_READY) ?  			MOD_HDCP_STATUS_SUCCESS :  			MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING;  	} else { @@ -74,7 +76,7 @@ static inline enum mod_hdcp_status check_link_integrity_dp(  		struct mod_hdcp *hdcp)  {  	return (hdcp->auth.msg.hdcp1.bstatus & -			BSTATUS_LINK_INTEGRITY_FAILURE_MASK_DP) ? +			DP_BSTATUS_LINK_FAILURE) ?  			MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE :  			MOD_HDCP_STATUS_SUCCESS;  } @@ -82,7 +84,7 @@ static inline enum mod_hdcp_status check_link_integrity_dp(  static inline enum mod_hdcp_status check_no_reauthentication_request_dp(  		struct mod_hdcp *hdcp)  { -	return (hdcp->auth.msg.hdcp1.bstatus & BSTATUS_REAUTH_REQUEST_MASK_DP) ? +	return (hdcp->auth.msg.hdcp1.bstatus & DP_BSTATUS_REAUTH_REQ) ?  			MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED :  			MOD_HDCP_STATUS_SUCCESS;  } @@ -92,15 +94,13 @@ static inline enum mod_hdcp_status check_no_max_cascade(struct mod_hdcp *hdcp)  	enum mod_hdcp_status status;  	if (is_dp_hdcp(hdcp)) -		status = (hdcp->auth.msg.hdcp1.binfo_dp & -				BINFO_MAX_CASCADE_EXCEEDED_MASK_DP) ? -			MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE : -			MOD_HDCP_STATUS_SUCCESS; +		status = DRM_HDCP_MAX_CASCADE_EXCEEDED(hdcp->auth.msg.hdcp1.binfo_dp >> 8) +				 ? MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE +				 : MOD_HDCP_STATUS_SUCCESS;  	else -		status = (hdcp->auth.msg.hdcp1.bstatus & -				BSTATUS_MAX_CASCADE_EXCEEDED_MASK) ? -				MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE : -				MOD_HDCP_STATUS_SUCCESS; +		status = DRM_HDCP_MAX_CASCADE_EXCEEDED(hdcp->auth.msg.hdcp1.bstatus >> 8) +				 ? MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE +				 : MOD_HDCP_STATUS_SUCCESS;  	return status;  } @@ -109,13 +109,11 @@ static inline enum mod_hdcp_status check_no_max_devs(struct mod_hdcp *hdcp)  	enum mod_hdcp_status status;  	if (is_dp_hdcp(hdcp)) -		status = (hdcp->auth.msg.hdcp1.binfo_dp & -				BINFO_MAX_DEVS_EXCEEDED_MASK_DP) ? +		status = DRM_HDCP_MAX_DEVICE_EXCEEDED(hdcp->auth.msg.hdcp1.binfo_dp) ?  				MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE :  				MOD_HDCP_STATUS_SUCCESS;  	else -		status = (hdcp->auth.msg.hdcp1.bstatus & -				BSTATUS_MAX_DEVS_EXCEEDED_MASK) ? +		status = DRM_HDCP_MAX_DEVICE_EXCEEDED(hdcp->auth.msg.hdcp1.bstatus) ?  				MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE :  				MOD_HDCP_STATUS_SUCCESS;  	return status; @@ -124,8 +122,8 @@ static inline enum mod_hdcp_status check_no_max_devs(struct mod_hdcp *hdcp)  static inline uint8_t get_device_count(struct mod_hdcp *hdcp)  {  	return is_dp_hdcp(hdcp) ? -			(hdcp->auth.msg.hdcp1.binfo_dp & BINFO_DEVICE_COUNT_MASK_DP) : -			(hdcp->auth.msg.hdcp1.bstatus & BSTATUS_DEVICE_COUNT_MASK); +			DRM_HDCP_NUM_DOWNSTREAM(hdcp->auth.msg.hdcp1.binfo_dp) : +			DRM_HDCP_NUM_DOWNSTREAM(hdcp->auth.msg.hdcp1.bstatus);  }  static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp) diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c index 136b8011ff3f..21ebc62bb9d9 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c @@ -67,11 +67,19 @@ enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp,  		break;  	case H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER:  		if (input->bcaps_read != PASS || -				input->r0p_read != PASS || -				input->rx_validation != PASS || -				(!conn->is_repeater && input->encryption != PASS)) { +				input->r0p_read != PASS) { +			fail_and_restart_in_ms(0, &status, output); +			break; +		} else if (input->rx_validation != PASS) {  			/* 1A-06: consider invalid r0' a failure */  			/* 1A-08: consider bksv listed in SRM a failure */ +			/* +			 * some slow RX will fail rx validation when it is +			 * not ready. give it more time to react before retry. +			 */ +			fail_and_restart_in_ms(1000, &status, output); +			break; +		} else if (!conn->is_repeater && input->encryption != PASS) {  			fail_and_restart_in_ms(0, &status, output);  			break;  		} @@ -212,7 +220,11 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,  				 * after 3 attempts.  				 * 1A-08: consider bksv listed in SRM a failure  				 */ -				fail_and_restart_in_ms(0, &status, output); +				/* +				 * some slow RX will fail rx validation when it is +				 * not ready. give it more time to react before retry. +				 */ +				fail_and_restart_in_ms(1000, &status, output);  			}  			break;  		} else if ((!conn->is_repeater && input->encryption != PASS) || diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c new file mode 100644 index 000000000000..f730b94ac3c0 --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c @@ -0,0 +1,886 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include <linux/delay.h> + +#include "hdcp.h" + +static inline enum mod_hdcp_status check_receiver_id_list_ready(struct mod_hdcp *hdcp) +{ +	uint8_t is_ready = 0; + +	if (is_dp_hdcp(hdcp)) +		is_ready = HDCP_2_2_DP_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus_dp) ? 1 : 0; +	else +		is_ready = (HDCP_2_2_HDMI_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus[0]) && +				(HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | +						hdcp->auth.msg.hdcp2.rxstatus[0])) ? 1 : 0; +	return is_ready ? MOD_HDCP_STATUS_SUCCESS : +			MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY; +} + +static inline enum mod_hdcp_status check_hdcp2_capable(struct mod_hdcp *hdcp) +{ +	enum mod_hdcp_status status; + +	if (is_dp_hdcp(hdcp)) +		status = (hdcp->auth.msg.hdcp2.rxcaps_dp[2] & HDCP_2_2_RX_CAPS_VERSION_VAL) && +				HDCP_2_2_DP_HDCP_CAPABLE(hdcp->auth.msg.hdcp2.rxcaps_dp[0]) ? +				MOD_HDCP_STATUS_SUCCESS : +				MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE; +	else +		status = (hdcp->auth.msg.hdcp2.hdcp2version_hdmi & HDCP_2_2_HDMI_SUPPORT_MASK) ? +				MOD_HDCP_STATUS_SUCCESS : +				MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE; +	return status; +} + +static inline enum mod_hdcp_status check_reauthentication_request( +		struct mod_hdcp *hdcp) +{ +	uint8_t ret = 0; + +	if (is_dp_hdcp(hdcp)) +		ret = HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(hdcp->auth.msg.hdcp2.rxstatus_dp) ? +				MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST : +				MOD_HDCP_STATUS_SUCCESS; +	else +		ret = HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(hdcp->auth.msg.hdcp2.rxstatus[0]) ? +				MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST : +				MOD_HDCP_STATUS_SUCCESS; +	return ret; +} + +static inline enum mod_hdcp_status check_link_integrity_failure_dp( +		struct mod_hdcp *hdcp) +{ +	return HDCP_2_2_DP_RXSTATUS_LINK_FAILED(hdcp->auth.msg.hdcp2.rxstatus_dp) ? +			MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE : +			MOD_HDCP_STATUS_SUCCESS; +} + +static enum mod_hdcp_status check_ake_cert_available(struct mod_hdcp *hdcp) +{ +	enum mod_hdcp_status status; +	uint16_t size; + +	if (is_dp_hdcp(hdcp)) { +		status = MOD_HDCP_STATUS_SUCCESS; +	} else { +		status = mod_hdcp_read_rxstatus(hdcp); +		if (status == MOD_HDCP_STATUS_SUCCESS) { +			size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | +			       hdcp->auth.msg.hdcp2.rxstatus[0]; +			status = (size == sizeof(hdcp->auth.msg.hdcp2.ake_cert)) ? +					MOD_HDCP_STATUS_SUCCESS : +					MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING; +		} +	} +	return status; +} + +static enum mod_hdcp_status check_h_prime_available(struct mod_hdcp *hdcp) +{ +	enum mod_hdcp_status status; +	uint8_t size; + +	status = mod_hdcp_read_rxstatus(hdcp); +	if (status != MOD_HDCP_STATUS_SUCCESS) +		goto out; + +	if (is_dp_hdcp(hdcp)) { +		status = HDCP_2_2_DP_RXSTATUS_H_PRIME(hdcp->auth.msg.hdcp2.rxstatus_dp) ? +				MOD_HDCP_STATUS_SUCCESS : +				MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING; +	} else { +		size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | +		       hdcp->auth.msg.hdcp2.rxstatus[0]; +		status = (size == sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)) ? +				MOD_HDCP_STATUS_SUCCESS : +				MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING; +	} +out: +	return status; +} + +static enum mod_hdcp_status check_pairing_info_available(struct mod_hdcp *hdcp) +{ +	enum mod_hdcp_status status; +	uint8_t size; + +	status = mod_hdcp_read_rxstatus(hdcp); +	if (status != MOD_HDCP_STATUS_SUCCESS) +		goto out; + +	if (is_dp_hdcp(hdcp)) { +		status = HDCP_2_2_DP_RXSTATUS_PAIRING(hdcp->auth.msg.hdcp2.rxstatus_dp) ? +				MOD_HDCP_STATUS_SUCCESS : +				MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING; +	} else { +		size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | +		       hdcp->auth.msg.hdcp2.rxstatus[0]; +		status = (size == sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)) ? +				MOD_HDCP_STATUS_SUCCESS : +				MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING; +	} +out: +	return status; +} + +static enum mod_hdcp_status poll_l_prime_available(struct mod_hdcp *hdcp) +{ +	enum mod_hdcp_status status; +	uint8_t size; +	uint16_t max_wait = 20; // units of ms +	uint16_t num_polls = 5; +	uint16_t wait_time = max_wait / num_polls; + +	if (is_dp_hdcp(hdcp)) +		status = MOD_HDCP_STATUS_INVALID_OPERATION; +	else +		for (; num_polls; num_polls--) { +			msleep(wait_time); + +			status = mod_hdcp_read_rxstatus(hdcp); +			if (status != MOD_HDCP_STATUS_SUCCESS) +				break; + +			size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | +			       hdcp->auth.msg.hdcp2.rxstatus[0]; +			status = (size == sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)) ? +					MOD_HDCP_STATUS_SUCCESS : +					MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING; +			if (status == MOD_HDCP_STATUS_SUCCESS) +				break; +		} +	return status; +} + +static enum mod_hdcp_status check_stream_ready_available(struct mod_hdcp *hdcp) +{ +	enum mod_hdcp_status status; +	uint8_t size; + +	if (is_dp_hdcp(hdcp)) { +		status = MOD_HDCP_STATUS_INVALID_OPERATION; +	} else { +		status = mod_hdcp_read_rxstatus(hdcp); +		if (status != MOD_HDCP_STATUS_SUCCESS) +			goto out; +		size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | +		       hdcp->auth.msg.hdcp2.rxstatus[0]; +		status = (size == sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)) ? +				MOD_HDCP_STATUS_SUCCESS : +				MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING; +	} +out: +	return status; +} + +static inline uint8_t get_device_count(struct mod_hdcp *hdcp) +{ +	return HDCP_2_2_DEV_COUNT_LO(hdcp->auth.msg.hdcp2.rx_id_list[2]) + +			(HDCP_2_2_DEV_COUNT_HI(hdcp->auth.msg.hdcp2.rx_id_list[1]) << 4); +} + +static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp) +{ +	/* device count must be greater than or equal to tracked hdcp displays */ +	return (get_device_count(hdcp) < get_added_display_count(hdcp)) ? +			MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE : +			MOD_HDCP_STATUS_SUCCESS; +} + +static uint8_t process_rxstatus(struct mod_hdcp *hdcp, +		struct mod_hdcp_event_context *event_ctx, +		struct mod_hdcp_transition_input_hdcp2 *input, +		enum mod_hdcp_status *status) +{ +	if (!mod_hdcp_execute_and_set(mod_hdcp_read_rxstatus, +			&input->rxstatus_read, status, +			hdcp, "rxstatus_read")) +		goto out; +	if (!mod_hdcp_execute_and_set(check_reauthentication_request, +			&input->reauth_request_check, status, +			hdcp, "reauth_request_check")) +		goto out; +	if (is_dp_hdcp(hdcp)) { +		if (!mod_hdcp_execute_and_set(check_link_integrity_failure_dp, +				&input->link_integrity_check_dp, status, +				hdcp, "link_integrity_check_dp")) +			goto out; +	} +	if (hdcp->connection.is_repeater) +		if (check_receiver_id_list_ready(hdcp) == +				MOD_HDCP_STATUS_SUCCESS) { +			HDCP_INPUT_PASS_TRACE(hdcp, "rx_id_list_ready"); +			event_ctx->rx_id_list_ready = 1; +			if (is_dp_hdcp(hdcp)) +				hdcp->auth.msg.hdcp2.rx_id_list_size = +						sizeof(hdcp->auth.msg.hdcp2.rx_id_list); +			else +				hdcp->auth.msg.hdcp2.rx_id_list_size = +					HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | +					hdcp->auth.msg.hdcp2.rxstatus[0]; +		} +out: +	return (*status == MOD_HDCP_STATUS_SUCCESS); +} + +static enum mod_hdcp_status known_hdcp2_capable_rx(struct mod_hdcp *hdcp, +		struct mod_hdcp_event_context *event_ctx, +		struct mod_hdcp_transition_input_hdcp2 *input) +{ +	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + +	if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { +		event_ctx->unexpected_event = 1; +		goto out; +	} +	if (!mod_hdcp_execute_and_set(mod_hdcp_read_hdcp2version, +			&input->hdcp2version_read, &status, +			hdcp, "hdcp2version_read")) +		goto out; +	if (!mod_hdcp_execute_and_set(check_hdcp2_capable, +			&input->hdcp2_capable_check, &status, +			hdcp, "hdcp2_capable")) +		goto out; +out: +	return status; +} + +static enum mod_hdcp_status send_ake_init(struct mod_hdcp *hdcp, +		struct mod_hdcp_event_context *event_ctx, +		struct mod_hdcp_transition_input_hdcp2 *input) +{ +	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + +	if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { +		event_ctx->unexpected_event = 1; +		goto out; +	} +	if (!mod_hdcp_execute_and_set(mod_hdcp_add_display_topology, +			&input->add_topology, &status, +			hdcp, "add_topology")) +		goto out; +	if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_create_session, +			&input->create_session, &status, +			hdcp, "create_session")) +		goto out; +	if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_ake_init, +			&input->ake_init_prepare, &status, +			hdcp, "ake_init_prepare")) +		goto out; +	if (!mod_hdcp_execute_and_set(mod_hdcp_write_ake_init, +			&input->ake_init_write, &status, +			hdcp, "ake_init_write")) +		goto out; +out: +	return status; +} + +static enum mod_hdcp_status validate_ake_cert(struct mod_hdcp *hdcp, +		struct mod_hdcp_event_context *event_ctx, +		struct mod_hdcp_transition_input_hdcp2 *input) +{ +	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + +	if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && +			event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { +		event_ctx->unexpected_event = 1; +		goto out; +	} + +	if (is_hdmi_dvi_sl_hdcp(hdcp)) +		if (!mod_hdcp_execute_and_set(check_ake_cert_available, +				&input->ake_cert_available, &status, +				hdcp, "ake_cert_available")) +			goto out; +	if (!mod_hdcp_execute_and_set(mod_hdcp_read_ake_cert, +			&input->ake_cert_read, &status, +			hdcp, "ake_cert_read")) +		goto out; +	if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_ake_cert, +			&input->ake_cert_validation, &status, +			hdcp, "ake_cert_validation")) +		goto out; +out: +	return status; +} + +static enum mod_hdcp_status send_no_stored_km(struct mod_hdcp *hdcp, +		struct mod_hdcp_event_context *event_ctx, +		struct mod_hdcp_transition_input_hdcp2 *input) +{ +	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + +	if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { +		event_ctx->unexpected_event = 1; +		goto out; +	} + +	if (!mod_hdcp_execute_and_set(mod_hdcp_write_no_stored_km, +			&input->no_stored_km_write, &status, +			hdcp, "no_stored_km_write")) +		goto out; +out: +	return status; +} + +static enum mod_hdcp_status read_h_prime(struct mod_hdcp *hdcp, +		struct mod_hdcp_event_context *event_ctx, +		struct mod_hdcp_transition_input_hdcp2 *input) +{ +	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + +	if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && +			event_ctx->event != MOD_HDCP_EVENT_CPIRQ && +			event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { +		event_ctx->unexpected_event = 1; +		goto out; +	} + +	if (!mod_hdcp_execute_and_set(check_h_prime_available, +			&input->h_prime_available, &status, +			hdcp, "h_prime_available")) +		goto out; + +	if (!mod_hdcp_execute_and_set(mod_hdcp_read_h_prime, +			&input->h_prime_read, &status, +			hdcp, "h_prime_read")) +		goto out; +out: +	return status; +} + +static enum mod_hdcp_status read_pairing_info_and_validate_h_prime( +		struct mod_hdcp *hdcp, +		struct mod_hdcp_event_context *event_ctx, +		struct mod_hdcp_transition_input_hdcp2 *input) +{ +	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + +	if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && +			event_ctx->event != MOD_HDCP_EVENT_CPIRQ && +			event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { +		event_ctx->unexpected_event = 1; +		goto out; +	} + +	if (!mod_hdcp_execute_and_set(check_pairing_info_available, +			&input->pairing_available, &status, +			hdcp, "pairing_available")) +		goto out; +	if (!mod_hdcp_execute_and_set(mod_hdcp_read_pairing_info, +			&input->pairing_info_read, &status, +			hdcp, "pairing_info_read")) +		goto out; +	if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_h_prime, +			&input->h_prime_validation, &status, +			hdcp, "h_prime_validation")) +		goto out; +out: +	return status; +} + +static enum mod_hdcp_status send_stored_km(struct mod_hdcp *hdcp, +		struct mod_hdcp_event_context *event_ctx, +		struct mod_hdcp_transition_input_hdcp2 *input) +{ +	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + +	if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { +		event_ctx->unexpected_event = 1; +		goto out; +	} + +	if (!mod_hdcp_execute_and_set(mod_hdcp_write_stored_km, +			&input->stored_km_write, &status, +			hdcp, "stored_km_write")) +		goto out; +out: +	return status; +} + +static enum mod_hdcp_status validate_h_prime(struct mod_hdcp *hdcp, +		struct mod_hdcp_event_context *event_ctx, +		struct mod_hdcp_transition_input_hdcp2 *input) +{ +	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + +	if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && +			event_ctx->event != MOD_HDCP_EVENT_CPIRQ && +			event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { +		event_ctx->unexpected_event = 1; +		goto out; +	} + +	if (!mod_hdcp_execute_and_set(check_h_prime_available, +			&input->h_prime_available, &status, +			hdcp, "h_prime_available")) +		goto out; +	if (!mod_hdcp_execute_and_set(mod_hdcp_read_h_prime, +			&input->h_prime_read, &status, +			hdcp, "h_prime_read")) +		goto out; +	if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_h_prime, +			&input->h_prime_validation, &status, +			hdcp, "h_prime_validation")) +		goto out; +out: +	return status; +} + +static enum mod_hdcp_status locality_check(struct mod_hdcp *hdcp, +		struct mod_hdcp_event_context *event_ctx, +		struct mod_hdcp_transition_input_hdcp2 *input) +{ +	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + +	if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { +		event_ctx->unexpected_event = 1; +		goto out; +	} + +	if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_lc_init, +			&input->lc_init_prepare, &status, +			hdcp, "lc_init_prepare")) +		goto out; +	if (!mod_hdcp_execute_and_set(mod_hdcp_write_lc_init, +			&input->lc_init_write, &status, +			 hdcp, "lc_init_write")) +		goto out; +	if (is_dp_hdcp(hdcp)) +		msleep(16); +	else +		if (!mod_hdcp_execute_and_set(poll_l_prime_available, +				&input->l_prime_available_poll, &status, +				hdcp, "l_prime_available_poll")) +			goto out; +	if (!mod_hdcp_execute_and_set(mod_hdcp_read_l_prime, +			&input->l_prime_read, &status, +			hdcp, "l_prime_read")) +		goto out; +	if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_l_prime, +			&input->l_prime_validation, &status, +			hdcp, "l_prime_validation")) +		goto out; +out: +	return status; +} + +static enum mod_hdcp_status exchange_ks_and_test_for_repeater(struct mod_hdcp *hdcp, +		struct mod_hdcp_event_context *event_ctx, +		struct mod_hdcp_transition_input_hdcp2 *input) +{ +	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + +	if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { +		event_ctx->unexpected_event = 1; +		goto out; +	} + +	if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_eks, +			&input->eks_prepare, &status, +			hdcp, "eks_prepare")) +		goto out; +	if (!mod_hdcp_execute_and_set(mod_hdcp_write_eks, +			&input->eks_write, &status, +			hdcp, "eks_write")) +		goto out; +out: +	return status; +} + +static enum mod_hdcp_status enable_encryption(struct mod_hdcp *hdcp, +		struct mod_hdcp_event_context *event_ctx, +		struct mod_hdcp_transition_input_hdcp2 *input) +{ +	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + +	if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && +			event_ctx->event != MOD_HDCP_EVENT_CPIRQ) { +		event_ctx->unexpected_event = 1; +		goto out; +	} +	if (event_ctx->event == MOD_HDCP_EVENT_CPIRQ) { +		process_rxstatus(hdcp, event_ctx, input, &status); +		goto out; +	} + +	if (is_hdmi_dvi_sl_hdcp(hdcp)) { +		if (!process_rxstatus(hdcp, event_ctx, input, &status)) +			goto out; +		if (event_ctx->rx_id_list_ready) +			goto out; +	} +	if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_enable_encryption, +			&input->enable_encryption, &status, +			hdcp, "enable_encryption")) +		goto out; +	if (is_dp_mst_hdcp(hdcp)) { +		if (!mod_hdcp_execute_and_set( +				mod_hdcp_hdcp2_enable_dp_stream_encryption, +				&input->stream_encryption_dp, &status, +				hdcp, "stream_encryption_dp")) +			goto out; +	} +out: +	return status; +} + +static enum mod_hdcp_status authenticated(struct mod_hdcp *hdcp, +		struct mod_hdcp_event_context *event_ctx, +		struct mod_hdcp_transition_input_hdcp2 *input) +{ +	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + +	if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && +			event_ctx->event != MOD_HDCP_EVENT_CPIRQ) { +		event_ctx->unexpected_event = 1; +		goto out; +	} + +	if (!process_rxstatus(hdcp, event_ctx, input, &status)) +		goto out; +	if (event_ctx->rx_id_list_ready) +		goto out; +out: +	return status; +} + +static enum mod_hdcp_status wait_for_rx_id_list(struct mod_hdcp *hdcp, +		struct mod_hdcp_event_context *event_ctx, +		struct mod_hdcp_transition_input_hdcp2 *input) +{ +	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + +	if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && +			event_ctx->event != MOD_HDCP_EVENT_CPIRQ && +			event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { +		event_ctx->unexpected_event = 1; +		goto out; +	} + +	if (!process_rxstatus(hdcp, event_ctx, input, &status)) +		goto out; +	if (!event_ctx->rx_id_list_ready) { +		status = MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY; +		goto out; +	} +out: +	return status; +} + +static enum mod_hdcp_status verify_rx_id_list_and_send_ack(struct mod_hdcp *hdcp, +		struct mod_hdcp_event_context *event_ctx, +		struct mod_hdcp_transition_input_hdcp2 *input) +{ +	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + +	if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && +			event_ctx->event != MOD_HDCP_EVENT_CPIRQ) { +		event_ctx->unexpected_event = 1; +		goto out; +	} +	if (event_ctx->event == MOD_HDCP_EVENT_CPIRQ) { +		process_rxstatus(hdcp, event_ctx, input, &status); +		goto out; +	} + +	if (!mod_hdcp_execute_and_set(mod_hdcp_read_rx_id_list, +			&input->rx_id_list_read, +			&status, hdcp, "receiver_id_list_read")) +		goto out; +	if (!mod_hdcp_execute_and_set(check_device_count, +			&input->device_count_check, +			&status, hdcp, "device_count_check")) +		goto out; +	if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_rx_id_list, +			&input->rx_id_list_validation, +			&status, hdcp, "rx_id_list_validation")) +		goto out; +	if (!mod_hdcp_execute_and_set(mod_hdcp_write_repeater_auth_ack, +			&input->repeater_auth_ack_write, +			&status, hdcp, "repeater_auth_ack_write")) +		goto out; +out: +	return status; +} + +static enum mod_hdcp_status send_stream_management(struct mod_hdcp *hdcp, +		struct mod_hdcp_event_context *event_ctx, +		struct mod_hdcp_transition_input_hdcp2 *input) +{ +	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + +	if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && +			event_ctx->event != MOD_HDCP_EVENT_CPIRQ) { +		event_ctx->unexpected_event = 1; +		goto out; +	} +	if (event_ctx->event == MOD_HDCP_EVENT_CPIRQ) { +		process_rxstatus(hdcp, event_ctx, input, &status); +		goto out; +	} + +	if (is_hdmi_dvi_sl_hdcp(hdcp)) { +		if (!process_rxstatus(hdcp, event_ctx, input, &status)) +			goto out; +		if (event_ctx->rx_id_list_ready) +			goto out; +	} +	if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_stream_management, +			&input->prepare_stream_manage, +			&status, hdcp, "prepare_stream_manage")) +		goto out; + +	if (!mod_hdcp_execute_and_set(mod_hdcp_write_stream_manage, +			&input->stream_manage_write, +			&status, hdcp, "stream_manage_write")) +		goto out; +out: +	return status; +} + +static enum mod_hdcp_status validate_stream_ready(struct mod_hdcp *hdcp, +		struct mod_hdcp_event_context *event_ctx, +		struct mod_hdcp_transition_input_hdcp2 *input) +{ +	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + +	if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && +			event_ctx->event != MOD_HDCP_EVENT_CPIRQ && +			event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { +		event_ctx->unexpected_event = 1; +		goto out; +	} +	if (event_ctx->event == MOD_HDCP_EVENT_CPIRQ) { +		process_rxstatus(hdcp, event_ctx, input, &status); +		goto out; +	} + +	if (is_hdmi_dvi_sl_hdcp(hdcp)) { +		if (!process_rxstatus(hdcp, event_ctx, input, &status)) +			goto out; +		if (event_ctx->rx_id_list_ready) { +			goto out; +		} +	} +	if (is_hdmi_dvi_sl_hdcp(hdcp)) +		if (!mod_hdcp_execute_and_set(check_stream_ready_available, +				&input->stream_ready_available, +				&status, hdcp, "stream_ready_available")) +			goto out; +	if (!mod_hdcp_execute_and_set(mod_hdcp_read_stream_ready, +			&input->stream_ready_read, +			&status, hdcp, "stream_ready_read")) +		goto out; +	if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_stream_ready, +			&input->stream_ready_validation, +			&status, hdcp, "stream_ready_validation")) +		goto out; + +out: +	return status; +} + +static enum mod_hdcp_status determine_rx_hdcp_capable_dp(struct mod_hdcp *hdcp, +		struct mod_hdcp_event_context *event_ctx, +		struct mod_hdcp_transition_input_hdcp2 *input) +{ +	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + +	if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { +		event_ctx->unexpected_event = 1; +		goto out; +	} + +	if (!mod_hdcp_execute_and_set(mod_hdcp_read_rxcaps, +			&input->rx_caps_read_dp, +			&status, hdcp, "rx_caps_read_dp")) +		goto out; +	if (!mod_hdcp_execute_and_set(check_hdcp2_capable, +			&input->hdcp2_capable_check, &status, +			hdcp, "hdcp2_capable_check")) +		goto out; +out: +	return status; +} + +static enum mod_hdcp_status send_content_stream_type_dp(struct mod_hdcp *hdcp, +		struct mod_hdcp_event_context *event_ctx, +		struct mod_hdcp_transition_input_hdcp2 *input) +{ +	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + +	if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && +			event_ctx->event != MOD_HDCP_EVENT_CPIRQ) { +		event_ctx->unexpected_event = 1; +		goto out; +	} + +	if (!process_rxstatus(hdcp, event_ctx, input, &status)) +		goto out; +	if (!mod_hdcp_execute_and_set(mod_hdcp_write_content_type, +			&input->content_stream_type_write, &status, +			hdcp, "content_stream_type_write")) +		goto out; +out: +	return status; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_execution(struct mod_hdcp *hdcp, +	struct mod_hdcp_event_context *event_ctx, +	struct mod_hdcp_transition_input_hdcp2 *input) +{ +	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + +	switch (current_state(hdcp)) { +	case H2_A0_KNOWN_HDCP2_CAPABLE_RX: +		status = known_hdcp2_capable_rx(hdcp, event_ctx, input); +		break; +	case H2_A1_SEND_AKE_INIT: +		status = send_ake_init(hdcp, event_ctx, input); +		break; +	case H2_A1_VALIDATE_AKE_CERT: +		status = validate_ake_cert(hdcp, event_ctx, input); +		break; +	case H2_A1_SEND_NO_STORED_KM: +		status = send_no_stored_km(hdcp, event_ctx, input); +		break; +	case H2_A1_READ_H_PRIME: +		status = read_h_prime(hdcp, event_ctx, input); +		break; +	case H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME: +		status = read_pairing_info_and_validate_h_prime(hdcp, +				event_ctx, input); +		break; +	case H2_A1_SEND_STORED_KM: +		status = send_stored_km(hdcp, event_ctx, input); +		break; +	case H2_A1_VALIDATE_H_PRIME: +		status = validate_h_prime(hdcp, event_ctx, input); +		break; +	case H2_A2_LOCALITY_CHECK: +		status = locality_check(hdcp, event_ctx, input); +		break; +	case H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER: +		status = exchange_ks_and_test_for_repeater(hdcp, event_ctx, input); +		break; +	case H2_ENABLE_ENCRYPTION: +		status = enable_encryption(hdcp, event_ctx, input); +		break; +	case H2_A5_AUTHENTICATED: +		status = authenticated(hdcp, event_ctx, input); +		break; +	case H2_A6_WAIT_FOR_RX_ID_LIST: +		status = wait_for_rx_id_list(hdcp, event_ctx, input); +		break; +	case H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK: +		status = verify_rx_id_list_and_send_ack(hdcp, event_ctx, input); +		break; +	case H2_A9_SEND_STREAM_MANAGEMENT: +		status = send_stream_management(hdcp, event_ctx, input); +		break; +	case H2_A9_VALIDATE_STREAM_READY: +		status = validate_stream_ready(hdcp, event_ctx, input); +		break; +	default: +		status = MOD_HDCP_STATUS_INVALID_STATE; +		break; +	} + +	return status; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_dp_execution(struct mod_hdcp *hdcp, +	struct mod_hdcp_event_context *event_ctx, +	struct mod_hdcp_transition_input_hdcp2 *input) +{ +	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + +	switch (current_state(hdcp)) { +	case D2_A0_DETERMINE_RX_HDCP_CAPABLE: +		status = determine_rx_hdcp_capable_dp(hdcp, event_ctx, input); +		break; +	case D2_A1_SEND_AKE_INIT: +		status = send_ake_init(hdcp, event_ctx, input); +		break; +	case D2_A1_VALIDATE_AKE_CERT: +		status = validate_ake_cert(hdcp, event_ctx, input); +		break; +	case D2_A1_SEND_NO_STORED_KM: +		status = send_no_stored_km(hdcp, event_ctx, input); +		break; +	case D2_A1_READ_H_PRIME: +		status = read_h_prime(hdcp, event_ctx, input); +		break; +	case D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME: +		status = read_pairing_info_and_validate_h_prime(hdcp, +				event_ctx, input); +		break; +	case D2_A1_SEND_STORED_KM: +		status = send_stored_km(hdcp, event_ctx, input); +		break; +	case D2_A1_VALIDATE_H_PRIME: +		status = validate_h_prime(hdcp, event_ctx, input); +		break; +	case D2_A2_LOCALITY_CHECK: +		status = locality_check(hdcp, event_ctx, input); +		break; +	case D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER: +		status = exchange_ks_and_test_for_repeater(hdcp, +				event_ctx, input); +		break; +	case D2_SEND_CONTENT_STREAM_TYPE: +		status = send_content_stream_type_dp(hdcp, event_ctx, input); +		break; +	case D2_ENABLE_ENCRYPTION: +		status = enable_encryption(hdcp, event_ctx, input); +		break; +	case D2_A5_AUTHENTICATED: +		status = authenticated(hdcp, event_ctx, input); +		break; +	case D2_A6_WAIT_FOR_RX_ID_LIST: +		status = wait_for_rx_id_list(hdcp, event_ctx, input); +		break; +	case D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK: +		status = verify_rx_id_list_and_send_ack(hdcp, event_ctx, input); +		break; +	case D2_A9_SEND_STREAM_MANAGEMENT: +		status = send_stream_management(hdcp, event_ctx, input); +		break; +	case D2_A9_VALIDATE_STREAM_READY: +		status = validate_stream_ready(hdcp, event_ctx, input); +		break; +	default: +		status = MOD_HDCP_STATUS_INVALID_STATE; +		break; +	} + +	return status; +} diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c new file mode 100644 index 000000000000..8cae3e3aacd5 --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c @@ -0,0 +1,679 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "hdcp.h" + +enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp, +		struct mod_hdcp_event_context *event_ctx, +		struct mod_hdcp_transition_input_hdcp2 *input, +		struct mod_hdcp_output *output) +{ +	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; +	struct mod_hdcp_connection *conn = &hdcp->connection; +	struct mod_hdcp_link_adjustment *adjust = &hdcp->connection.link.adjust; + +	switch (current_state(hdcp)) { +	case H2_A0_KNOWN_HDCP2_CAPABLE_RX: +		if (input->hdcp2version_read != PASS || +				input->hdcp2_capable_check != PASS) { +			adjust->hdcp2.disable = 1; +			callback_in_ms(0, output); +			set_state_id(hdcp, output, HDCP_INITIALIZED); +		} else { +			callback_in_ms(0, output); +			set_state_id(hdcp, output, H2_A1_SEND_AKE_INIT); +		} +		break; +	case H2_A1_SEND_AKE_INIT: +		if (input->add_topology != PASS || +				input->create_session != PASS || +				input->ake_init_prepare != PASS) { +			/* out of sync with psp state */ +			adjust->hdcp2.disable = 1; +			fail_and_restart_in_ms(0, &status, output); +			break; +		} else if (input->ake_init_write != PASS) { +			fail_and_restart_in_ms(0, &status, output); +			break; +		} +		set_watchdog_in_ms(hdcp, 100, output); +		callback_in_ms(0, output); +		set_state_id(hdcp, output, H2_A1_VALIDATE_AKE_CERT); +		break; +	case H2_A1_VALIDATE_AKE_CERT: +		if (input->ake_cert_available != PASS) { +			if (event_ctx->event == +					MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { +				/* 1A-08: consider ake timeout a failure */ +				/* some hdmi receivers are not ready for HDCP +				 * immediately after video becomes active, +				 * delay 1s before retry on first HDCP message +				 * timeout. +				 */ +				fail_and_restart_in_ms(1000, &status, output); +			} else { +				/* continue ake cert polling*/ +				callback_in_ms(10, output); +				increment_stay_counter(hdcp); +			} +			break; +		} else if (input->ake_cert_read != PASS || +				input->ake_cert_validation != PASS) { +			/* +			 * 1A-09: consider invalid ake cert a failure +			 * 1A-10: consider receiver id listed in SRM a failure +			 */ +			fail_and_restart_in_ms(0, &status, output); +			break; +		} +		if (conn->is_km_stored && +				!adjust->hdcp2.force_no_stored_km) { +			callback_in_ms(0, output); +			set_state_id(hdcp, output, H2_A1_SEND_STORED_KM); +		} else { +			callback_in_ms(0, output); +			set_state_id(hdcp, output, H2_A1_SEND_NO_STORED_KM); +		} +		break; +	case H2_A1_SEND_NO_STORED_KM: +		if (input->no_stored_km_write != PASS) { +			fail_and_restart_in_ms(0, &status, output); +			break; +		} +		if (adjust->hdcp2.increase_h_prime_timeout) +			set_watchdog_in_ms(hdcp, 2000, output); +		else +			set_watchdog_in_ms(hdcp, 1000, output); +		callback_in_ms(0, output); +		set_state_id(hdcp, output, H2_A1_READ_H_PRIME); +		break; +	case H2_A1_READ_H_PRIME: +		if (input->h_prime_available != PASS) { +			if (event_ctx->event == +					MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { +				/* 1A-11-3: consider h' timeout a failure */ +				fail_and_restart_in_ms(1000, &status, output); +			} else { +				/* continue h' polling */ +				callback_in_ms(100, output); +				increment_stay_counter(hdcp); +			} +			break; +		} else if (input->h_prime_read != PASS) { +			fail_and_restart_in_ms(0, &status, output); +			break; +		} +		set_watchdog_in_ms(hdcp, 200, output); +		callback_in_ms(0, output); +		set_state_id(hdcp, output, H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME); +		break; +	case H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME: +		if (input->pairing_available != PASS) { +			if (event_ctx->event == +					MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { +				/* 1A-12: consider pairing info timeout +				 * a failure +				 */ +				fail_and_restart_in_ms(0, &status, output); +			} else { +				/* continue pairing info polling */ +				callback_in_ms(20, output); +				increment_stay_counter(hdcp); +			} +			break; +		} else if (input->pairing_info_read != PASS || +				input->h_prime_validation != PASS) { +			/* 1A-11-1: consider invalid h' a failure */ +			fail_and_restart_in_ms(0, &status, output); +			break; +		} +		callback_in_ms(0, output); +		set_state_id(hdcp, output, H2_A2_LOCALITY_CHECK); +		break; +	case H2_A1_SEND_STORED_KM: +		if (input->stored_km_write != PASS) { +			fail_and_restart_in_ms(0, &status, output); +			break; +		} +		set_watchdog_in_ms(hdcp, 200, output); +		callback_in_ms(0, output); +		set_state_id(hdcp, output, H2_A1_VALIDATE_H_PRIME); +		break; +	case H2_A1_VALIDATE_H_PRIME: +		if (input->h_prime_available != PASS) { +			if (event_ctx->event == +					MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { +				/* 1A-11-2: consider h' timeout a failure */ +				fail_and_restart_in_ms(1000, &status, output); +			} else { +				/* continue h' polling */ +				callback_in_ms(20, output); +				increment_stay_counter(hdcp); +			} +			break; +		} else if (input->h_prime_read != PASS) { +			fail_and_restart_in_ms(0, &status, output); +			break; +		} else if (input->h_prime_validation != PASS) { +			/* 1A-11-1: consider invalid h' a failure */ +			adjust->hdcp2.force_no_stored_km = 1; +			fail_and_restart_in_ms(0, &status, output); +			break; +		} +		callback_in_ms(0, output); +		set_state_id(hdcp, output, H2_A2_LOCALITY_CHECK); +		break; +	case H2_A2_LOCALITY_CHECK: +		if (hdcp->state.stay_count > 10 || +				input->lc_init_prepare != PASS || +				input->lc_init_write != PASS || +				input->l_prime_available_poll != PASS || +				input->l_prime_read != PASS) { +			/* +			 * 1A-05: consider disconnection after LC init a failure +			 * 1A-13-1: consider invalid l' a failure +			 * 1A-13-2: consider l' timeout a failure +			 */ +			fail_and_restart_in_ms(0, &status, output); +			break; +		} else if (input->l_prime_validation != PASS) { +			callback_in_ms(0, output); +			increment_stay_counter(hdcp); +			break; +		} +		callback_in_ms(0, output); +		set_state_id(hdcp, output, H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER); +		break; +	case H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER: +		if (input->eks_prepare != PASS || +				input->eks_write != PASS) { +			fail_and_restart_in_ms(0, &status, output); +			break; +		} +		if (conn->is_repeater) { +			set_watchdog_in_ms(hdcp, 3000, output); +			callback_in_ms(0, output); +			set_state_id(hdcp, output, H2_A6_WAIT_FOR_RX_ID_LIST); +		} else { +			/* some CTS equipment requires a delay GREATER than +			 * 200 ms, so delay 210 ms instead of 200 ms +			 */ +			callback_in_ms(210, output); +			set_state_id(hdcp, output, H2_ENABLE_ENCRYPTION); +		} +		break; +	case H2_ENABLE_ENCRYPTION: +		if (input->rxstatus_read != PASS || +				input->reauth_request_check != PASS) { +			/* +			 * 1A-07: restart hdcp on REAUTH_REQ +			 * 1B-08: restart hdcp on REAUTH_REQ +			 */ +			fail_and_restart_in_ms(0, &status, output); +			break; +		} else if (event_ctx->rx_id_list_ready && conn->is_repeater) { +			callback_in_ms(0, output); +			set_state_id(hdcp, output, H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK); +			break; +		} else if (input->enable_encryption != PASS) { +			fail_and_restart_in_ms(0, &status, output); +			break; +		} +		callback_in_ms(0, output); +		set_state_id(hdcp, output, H2_A5_AUTHENTICATED); +		HDCP_FULL_DDC_TRACE(hdcp); +		break; +	case H2_A5_AUTHENTICATED: +		if (input->rxstatus_read != PASS || +				input->reauth_request_check != PASS) { +			fail_and_restart_in_ms(0, &status, output); +			break; +		} else if (event_ctx->rx_id_list_ready && conn->is_repeater) { +			callback_in_ms(0, output); +			set_state_id(hdcp, output, H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK); +			break; +		} +		callback_in_ms(500, output); +		increment_stay_counter(hdcp); +		break; +	case H2_A6_WAIT_FOR_RX_ID_LIST: +		if (input->rxstatus_read != PASS || +				input->reauth_request_check != PASS) { +			fail_and_restart_in_ms(0, &status, output); +			break; +		} else if (!event_ctx->rx_id_list_ready) { +			if (event_ctx->event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { +				/* 1B-02: consider rx id list timeout a failure */ +				/* some CTS equipment's actual timeout +				 * measurement is slightly greater than 3000 ms. +				 * Delay 100 ms to ensure it is fully timeout +				 * before re-authentication. +				 */ +				fail_and_restart_in_ms(100, &status, output); +			} else { +				callback_in_ms(300, output); +				increment_stay_counter(hdcp); +			} +			break; +		} +		callback_in_ms(0, output); +		set_state_id(hdcp, output, H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK); +		break; +	case H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK: +		if (input->rxstatus_read != PASS || +				input->reauth_request_check != PASS || +				input->rx_id_list_read != PASS || +				input->device_count_check != PASS || +				input->rx_id_list_validation != PASS || +				input->repeater_auth_ack_write != PASS) { +			/* 1B-03: consider invalid v' a failure +			 * 1B-04: consider MAX_DEVS_EXCEEDED a failure +			 * 1B-05: consider MAX_CASCADE_EXCEEDED a failure +			 * 1B-06: consider invalid seq_num_V a failure +			 * 1B-09: consider seq_num_V rollover a failure +			 */ +			fail_and_restart_in_ms(0, &status, output); +			break; +		} +		callback_in_ms(0, output); +		set_state_id(hdcp, output, H2_A9_SEND_STREAM_MANAGEMENT); +		break; +	case H2_A9_SEND_STREAM_MANAGEMENT: +		if (input->rxstatus_read != PASS || +				input->reauth_request_check != PASS) { +			fail_and_restart_in_ms(0, &status, output); +			break; +		} else if (event_ctx->rx_id_list_ready && conn->is_repeater) { +			callback_in_ms(0, output); +			set_state_id(hdcp, output, H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK); +			break; +		} else if (input->prepare_stream_manage != PASS || +				input->stream_manage_write != PASS) { +			fail_and_restart_in_ms(0, &status, output); +			break; +		} +		set_watchdog_in_ms(hdcp, 100, output); +		callback_in_ms(0, output); +		set_state_id(hdcp, output, H2_A9_VALIDATE_STREAM_READY); +		break; +	case H2_A9_VALIDATE_STREAM_READY: +		if (input->rxstatus_read != PASS || +				input->reauth_request_check != PASS) { +			fail_and_restart_in_ms(0, &status, output); +			break; +		} else if (event_ctx->rx_id_list_ready && conn->is_repeater) { +			callback_in_ms(0, output); +			set_state_id(hdcp, output, H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK); +			break; +		} else if (input->stream_ready_available != PASS) { +			if (event_ctx->event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { +				/* 1B-10-2: restart content stream management on +				 * stream ready timeout +				 */ +				hdcp->auth.count.stream_management_retry_count++; +				callback_in_ms(0, output); +				set_state_id(hdcp, output, H2_A9_SEND_STREAM_MANAGEMENT); +			} else { +				callback_in_ms(10, output); +				increment_stay_counter(hdcp); +			} +			break; +		} else if (input->stream_ready_read != PASS || +				input->stream_ready_validation != PASS) { +			/* +			 * 1B-10-1: restart content stream management +			 * on invalid M' +			 */ +			if (hdcp->auth.count.stream_management_retry_count > 10) { +				fail_and_restart_in_ms(0, &status, output); +			} else { +				hdcp->auth.count.stream_management_retry_count++; +				callback_in_ms(0, output); +				set_state_id(hdcp, output, H2_A9_SEND_STREAM_MANAGEMENT); +			} +			break; +		} +		callback_in_ms(200, output); +		set_state_id(hdcp, output, H2_ENABLE_ENCRYPTION); +		break; +	default: +		status = MOD_HDCP_STATUS_INVALID_STATE; +		fail_and_restart_in_ms(0, &status, output); +		break; +	} + +	return status; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp, +		struct mod_hdcp_event_context *event_ctx, +		struct mod_hdcp_transition_input_hdcp2 *input, +		struct mod_hdcp_output *output) +{ +	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; +	struct mod_hdcp_connection *conn = &hdcp->connection; +	struct mod_hdcp_link_adjustment *adjust = &hdcp->connection.link.adjust; + +	switch (current_state(hdcp)) { +	case D2_A0_DETERMINE_RX_HDCP_CAPABLE: +		if (input->rx_caps_read_dp != PASS || +				input->hdcp2_capable_check != PASS) { +			adjust->hdcp2.disable = 1; +			callback_in_ms(0, output); +			set_state_id(hdcp, output, HDCP_INITIALIZED); +		} else { +			callback_in_ms(0, output); +			set_state_id(hdcp, output, D2_A1_SEND_AKE_INIT); +		} +		break; +	case D2_A1_SEND_AKE_INIT: +		if (input->add_topology != PASS || +				input->create_session != PASS || +				input->ake_init_prepare != PASS) { +			/* out of sync with psp state */ +			adjust->hdcp2.disable = 1; +			fail_and_restart_in_ms(0, &status, output); +			break; +		} else if (input->ake_init_write != PASS) { +			/* possibly display not ready */ +			fail_and_restart_in_ms(0, &status, output); +			break; +		} +		callback_in_ms(100, output); +		set_state_id(hdcp, output, D2_A1_VALIDATE_AKE_CERT); +		break; +	case D2_A1_VALIDATE_AKE_CERT: +		if (input->ake_cert_read != PASS || +				input->ake_cert_validation != PASS) { +			/* +			 * 1A-08: consider invalid ake cert a failure +			 * 1A-09: consider receiver id listed in SRM a failure +			 */ +			fail_and_restart_in_ms(0, &status, output); +			break; +		} +		if (conn->is_km_stored && +				!adjust->hdcp2.force_no_stored_km) { +			callback_in_ms(0, output); +			set_state_id(hdcp, output, D2_A1_SEND_STORED_KM); +		} else { +			callback_in_ms(0, output); +			set_state_id(hdcp, output, D2_A1_SEND_NO_STORED_KM); +		} +		break; +	case D2_A1_SEND_NO_STORED_KM: +		if (input->no_stored_km_write != PASS) { +			fail_and_restart_in_ms(0, &status, output); +			break; +		} +		if (adjust->hdcp2.increase_h_prime_timeout) +			set_watchdog_in_ms(hdcp, 2000, output); +		else +			set_watchdog_in_ms(hdcp, 1000, output); +		set_state_id(hdcp, output, D2_A1_READ_H_PRIME); +		break; +	case D2_A1_READ_H_PRIME: +		if (input->h_prime_available != PASS) { +			if (event_ctx->event == +					MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) +				/* 1A-10-3: consider h' timeout a failure */ +				fail_and_restart_in_ms(1000, &status, output); +			else +				increment_stay_counter(hdcp); +			break; +		} else if (input->h_prime_read != PASS) { +			fail_and_restart_in_ms(0, &status, output); +			break; +		} +		set_watchdog_in_ms(hdcp, 200, output); +		set_state_id(hdcp, output, D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME); +		break; +	case D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME: +		if (input->pairing_available != PASS) { +			if (event_ctx->event == +					MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) +				/* +				 * 1A-11: consider pairing info timeout +				 * a failure +				 */ +				fail_and_restart_in_ms(0, &status, output); +			else +				increment_stay_counter(hdcp); +			break; +		} else if (input->pairing_info_read != PASS || +				input->h_prime_validation != PASS) { +			/* 1A-10-1: consider invalid h' a failure */ +			fail_and_restart_in_ms(0, &status, output); +			break; +		} +		callback_in_ms(0, output); +		set_state_id(hdcp, output, D2_A2_LOCALITY_CHECK); +		break; +	case D2_A1_SEND_STORED_KM: +		if (input->stored_km_write != PASS) { +			fail_and_restart_in_ms(0, &status, output); +			break; +		} +		set_watchdog_in_ms(hdcp, 200, output); +		set_state_id(hdcp, output, D2_A1_VALIDATE_H_PRIME); +		break; +	case D2_A1_VALIDATE_H_PRIME: +		if (input->h_prime_available != PASS) { +			if (event_ctx->event == +					MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) +				/* 1A-10-2: consider h' timeout a failure */ +				fail_and_restart_in_ms(1000, &status, output); +			else +				increment_stay_counter(hdcp); +			break; +		} else if (input->h_prime_read != PASS) { +			fail_and_restart_in_ms(0, &status, output); +			break; +		} else if (input->h_prime_validation != PASS) { +			/* 1A-10-1: consider invalid h' a failure */ +			adjust->hdcp2.force_no_stored_km = 1; +			fail_and_restart_in_ms(0, &status, output); +			break; +		} +		callback_in_ms(0, output); +		set_state_id(hdcp, output, D2_A2_LOCALITY_CHECK); +		break; +	case D2_A2_LOCALITY_CHECK: +		if (hdcp->state.stay_count > 10 || +				input->lc_init_prepare != PASS || +				input->lc_init_write != PASS || +				input->l_prime_read != PASS) { +			/* 1A-12: consider invalid l' a failure */ +			fail_and_restart_in_ms(0, &status, output); +			break; +		} else if (input->l_prime_validation != PASS) { +			callback_in_ms(0, output); +			increment_stay_counter(hdcp); +			break; +		} +		callback_in_ms(0, output); +		set_state_id(hdcp, output, D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER); +		break; +	case D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER: +		if (input->eks_prepare != PASS || +				input->eks_write != PASS) { +			fail_and_restart_in_ms(0, &status, output); +			break; +		} +		if (conn->is_repeater) { +			set_watchdog_in_ms(hdcp, 3000, output); +			set_state_id(hdcp, output, D2_A6_WAIT_FOR_RX_ID_LIST); +		} else { +			callback_in_ms(0, output); +			set_state_id(hdcp, output, D2_SEND_CONTENT_STREAM_TYPE); +		} +		break; +	case D2_SEND_CONTENT_STREAM_TYPE: +		if (input->rxstatus_read != PASS || +				input->reauth_request_check != PASS || +				input->link_integrity_check_dp != PASS || +				input->content_stream_type_write != PASS) { +			fail_and_restart_in_ms(0, &status, output); +			break; +		} +		callback_in_ms(210, output); +		set_state_id(hdcp, output, D2_ENABLE_ENCRYPTION); +		break; +	case D2_ENABLE_ENCRYPTION: +		if (input->rxstatus_read != PASS || +				input->reauth_request_check != PASS || +				input->link_integrity_check_dp != PASS) { +			/* +			 * 1A-07: restart hdcp on REAUTH_REQ +			 * 1B-08: restart hdcp on REAUTH_REQ +			 */ +			fail_and_restart_in_ms(0, &status, output); +			break; +		} else if (event_ctx->rx_id_list_ready && conn->is_repeater) { +			callback_in_ms(0, output); +			set_state_id(hdcp, output, D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK); +			break; +		} else if (input->enable_encryption != PASS || +				(is_dp_mst_hdcp(hdcp) && input->stream_encryption_dp != PASS)) { +			fail_and_restart_in_ms(0, &status, output); +			break; +		} +		set_state_id(hdcp, output, D2_A5_AUTHENTICATED); +		HDCP_FULL_DDC_TRACE(hdcp); +		break; +	case D2_A5_AUTHENTICATED: +		if (input->rxstatus_read != PASS || +				input->reauth_request_check != PASS) { +			fail_and_restart_in_ms(0, &status, output); +			break; +		} else if (input->link_integrity_check_dp != PASS) { +			if (hdcp->connection.hdcp2_retry_count >= 1) +				adjust->hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0; +			fail_and_restart_in_ms(0, &status, output); +			break; +		} else if (event_ctx->rx_id_list_ready && conn->is_repeater) { +			callback_in_ms(0, output); +			set_state_id(hdcp, output, D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK); +			break; +		} +		increment_stay_counter(hdcp); +		break; +	case D2_A6_WAIT_FOR_RX_ID_LIST: +		if (input->rxstatus_read != PASS || +				input->reauth_request_check != PASS || +				input->link_integrity_check_dp != PASS) { +			fail_and_restart_in_ms(0, &status, output); +			break; +		} else if (!event_ctx->rx_id_list_ready) { +			if (event_ctx->event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) +				/* 1B-02: consider rx id list timeout a failure */ +				fail_and_restart_in_ms(0, &status, output); +			else +				increment_stay_counter(hdcp); +			break; +		} +		callback_in_ms(0, output); +		set_state_id(hdcp, output, D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK); +		break; +	case D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK: +		if (input->rxstatus_read != PASS || +				input->reauth_request_check != PASS || +				input->link_integrity_check_dp != PASS || +				input->rx_id_list_read != PASS || +				input->device_count_check != PASS || +				input->rx_id_list_validation != PASS || +				input->repeater_auth_ack_write != PASS) { +			/* +			 * 1B-03: consider invalid v' a failure +			 * 1B-04: consider MAX_DEVS_EXCEEDED a failure +			 * 1B-05: consider MAX_CASCADE_EXCEEDED a failure +			 * 1B-06: consider invalid seq_num_V a failure +			 * 1B-09: consider seq_num_V rollover a failure +			 */ +			fail_and_restart_in_ms(0, &status, output); +			break; +		} +		callback_in_ms(0, output); +		set_state_id(hdcp, output, D2_A9_SEND_STREAM_MANAGEMENT); +		break; +	case D2_A9_SEND_STREAM_MANAGEMENT: +		if (input->rxstatus_read != PASS || +				input->reauth_request_check != PASS || +				input->link_integrity_check_dp != PASS) { +			fail_and_restart_in_ms(0, &status, output); +			break; +		} else if (event_ctx->rx_id_list_ready) { +			callback_in_ms(0, output); +			set_state_id(hdcp, output, D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK); +			break; +		} else if (input->prepare_stream_manage != PASS || +				input->stream_manage_write != PASS) { +			if (event_ctx->event == MOD_HDCP_EVENT_CALLBACK) +				fail_and_restart_in_ms(0, &status, output); +			else +				increment_stay_counter(hdcp); +			break; +		} +		callback_in_ms(100, output); +		set_state_id(hdcp, output, D2_A9_VALIDATE_STREAM_READY); +		break; +	case D2_A9_VALIDATE_STREAM_READY: +		if (input->rxstatus_read != PASS || +				input->reauth_request_check != PASS || +				input->link_integrity_check_dp != PASS) { +			fail_and_restart_in_ms(0, &status, output); +			break; +		} else if (event_ctx->rx_id_list_ready) { +			callback_in_ms(0, output); +			set_state_id(hdcp, output, D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK); +			break; +		} else if (input->stream_ready_read != PASS || +				input->stream_ready_validation != PASS) { +			/* +			 * 1B-10-1: restart content stream management +			 * on invalid M' +			 * 1B-10-2: consider stream ready timeout a failure +			 */ +			if (hdcp->auth.count.stream_management_retry_count > 10) { +				fail_and_restart_in_ms(0, &status, output); +			} else if (event_ctx->event == MOD_HDCP_EVENT_CALLBACK) { +				hdcp->auth.count.stream_management_retry_count++; +				callback_in_ms(0, output); +				set_state_id(hdcp, output, D2_A9_SEND_STREAM_MANAGEMENT); +			} else { +				increment_stay_counter(hdcp); +			} +			break; +		} +		callback_in_ms(200, output); +		set_state_id(hdcp, output, D2_ENABLE_ENCRYPTION); +		break; +	default: +		status = MOD_HDCP_STATUS_INVALID_STATE; +		fail_and_restart_in_ms(0, &status, output); +		break; +	} +	return status; +} diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c index e7baae059b85..ff9d54812e62 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c @@ -51,6 +51,26 @@ enum mod_hdcp_ddc_message_id {  	MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO,  	MOD_HDCP_MESSAGE_ID_READ_BINFO, +	/* HDCP 2.2 */ + +	MOD_HDCP_MESSAGE_ID_HDCP2VERSION, +	MOD_HDCP_MESSAGE_ID_RX_CAPS, +	MOD_HDCP_MESSAGE_ID_WRITE_AKE_INIT, +	MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT, +	MOD_HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM, +	MOD_HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM, +	MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME, +	MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO, +	MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT, +	MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME, +	MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS, +	MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST, +	MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK, +	MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE, +	MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY, +	MOD_HDCP_MESSAGE_ID_READ_RXSTATUS, +	MOD_HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE, +  	MOD_HDCP_MESSAGE_ID_MAX  }; @@ -70,6 +90,22 @@ static const uint8_t hdcp_i2c_offsets[] = {  	[MOD_HDCP_MESSAGE_ID_READ_BSTATUS] = 0x41,  	[MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x43,  	[MOD_HDCP_MESSAGE_ID_READ_BINFO] = 0xFF, +	[MOD_HDCP_MESSAGE_ID_HDCP2VERSION] = 0x50, +	[MOD_HDCP_MESSAGE_ID_WRITE_AKE_INIT] = 0x60, +	[MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = 0x80, +	[MOD_HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = 0x60, +	[MOD_HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = 0x60, +	[MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = 0x80, +	[MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = 0x80, +	[MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT] = 0x60, +	[MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x80, +	[MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x60, +	[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x80, +	[MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x60, +	[MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x60, +	[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x80, +	[MOD_HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x70, +	[MOD_HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = 0x0  };  static const uint32_t hdcp_dpcd_addrs[] = { @@ -88,6 +124,22 @@ static const uint32_t hdcp_dpcd_addrs[] = {  	[MOD_HDCP_MESSAGE_ID_READ_BSTATUS] = 0x68029,  	[MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x6802c,  	[MOD_HDCP_MESSAGE_ID_READ_BINFO] = 0x6802a, +	[MOD_HDCP_MESSAGE_ID_RX_CAPS] = 0x6921d, +	[MOD_HDCP_MESSAGE_ID_WRITE_AKE_INIT] = 0x69000, +	[MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = 0x6900b, +	[MOD_HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = 0x69220, +	[MOD_HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = 0x692a0, +	[MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = 0x692c0, +	[MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = 0x692e0, +	[MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT] = 0x692f0, +	[MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x692f8, +	[MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x69318, +	[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x69330, +	[MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x693e0, +	[MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x693f0, +	[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x69473, +	[MOD_HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x69493, +	[MOD_HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = 0x69494  };  static enum mod_hdcp_status read(struct mod_hdcp *hdcp, @@ -303,3 +355,277 @@ enum mod_hdcp_status mod_hdcp_write_an(struct mod_hdcp *hdcp)  			hdcp->auth.msg.hdcp1.an,  			sizeof(hdcp->auth.msg.hdcp1.an));  } + +enum mod_hdcp_status mod_hdcp_read_hdcp2version(struct mod_hdcp *hdcp) +{ +	enum mod_hdcp_status status; + +	if (is_dp_hdcp(hdcp)) +		status = MOD_HDCP_STATUS_INVALID_OPERATION; +	else +		status = read(hdcp, MOD_HDCP_MESSAGE_ID_HDCP2VERSION, +				&hdcp->auth.msg.hdcp2.hdcp2version_hdmi, +				sizeof(hdcp->auth.msg.hdcp2.hdcp2version_hdmi)); + +	return status; +} + +enum mod_hdcp_status mod_hdcp_read_rxcaps(struct mod_hdcp *hdcp) +{ +	enum mod_hdcp_status status; + +	if (!is_dp_hdcp(hdcp)) +		status = MOD_HDCP_STATUS_INVALID_OPERATION; +	else +		status = read(hdcp, MOD_HDCP_MESSAGE_ID_RX_CAPS, +				hdcp->auth.msg.hdcp2.rxcaps_dp, +				sizeof(hdcp->auth.msg.hdcp2.rxcaps_dp)); + +	return status; +} + +enum mod_hdcp_status mod_hdcp_read_rxstatus(struct mod_hdcp *hdcp) +{ +	enum mod_hdcp_status status; + +	if (is_dp_hdcp(hdcp)) { +		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_RXSTATUS, +				&hdcp->auth.msg.hdcp2.rxstatus_dp, +				1); +	} else { +		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_RXSTATUS, +					(uint8_t *)&hdcp->auth.msg.hdcp2.rxstatus, +					sizeof(hdcp->auth.msg.hdcp2.rxstatus)); +	} +	return status; +} + +enum mod_hdcp_status mod_hdcp_read_ake_cert(struct mod_hdcp *hdcp) +{ +	enum mod_hdcp_status status; + +	if (is_dp_hdcp(hdcp)) { +		hdcp->auth.msg.hdcp2.ake_cert[0] = 3; +		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT, +				hdcp->auth.msg.hdcp2.ake_cert+1, +				sizeof(hdcp->auth.msg.hdcp2.ake_cert)-1); + +	} else { +		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT, +					hdcp->auth.msg.hdcp2.ake_cert, +					sizeof(hdcp->auth.msg.hdcp2.ake_cert)); +	} +	return status; +} + +enum mod_hdcp_status mod_hdcp_read_h_prime(struct mod_hdcp *hdcp) +{ +	enum mod_hdcp_status status; + +	if (is_dp_hdcp(hdcp)) { +		hdcp->auth.msg.hdcp2.ake_h_prime[0] = 7; +		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME, +				hdcp->auth.msg.hdcp2.ake_h_prime+1, +				sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)-1); + +	} else { +		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME, +				hdcp->auth.msg.hdcp2.ake_h_prime, +				sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)); +	} +	return status; +} + +enum mod_hdcp_status mod_hdcp_read_pairing_info(struct mod_hdcp *hdcp) +{ +	enum mod_hdcp_status status; + +	if (is_dp_hdcp(hdcp)) { +		hdcp->auth.msg.hdcp2.ake_pairing_info[0] = 8; +		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO, +				hdcp->auth.msg.hdcp2.ake_pairing_info+1, +				sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)-1); + +	} else { +		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO, +				hdcp->auth.msg.hdcp2.ake_pairing_info, +				sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)); +	} +	return status; +} + +enum mod_hdcp_status mod_hdcp_read_l_prime(struct mod_hdcp *hdcp) +{ +	enum mod_hdcp_status status; + +	if (is_dp_hdcp(hdcp)) { +		hdcp->auth.msg.hdcp2.lc_l_prime[0] = 10; +		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME, +				hdcp->auth.msg.hdcp2.lc_l_prime+1, +				sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)-1); + +	} else { +		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME, +				hdcp->auth.msg.hdcp2.lc_l_prime, +				sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)); +	} +	return status; +} + +enum mod_hdcp_status mod_hdcp_read_rx_id_list(struct mod_hdcp *hdcp) +{ +	enum mod_hdcp_status status; + +	if (is_dp_hdcp(hdcp)) { +		hdcp->auth.msg.hdcp2.rx_id_list[0] = 12; +		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST, +				hdcp->auth.msg.hdcp2.rx_id_list+1, +				sizeof(hdcp->auth.msg.hdcp2.rx_id_list)-1); + +	} else { +		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST, +				hdcp->auth.msg.hdcp2.rx_id_list, +				hdcp->auth.msg.hdcp2.rx_id_list_size); +	} +	return status; +} + +enum mod_hdcp_status mod_hdcp_read_stream_ready(struct mod_hdcp *hdcp) +{ +	enum mod_hdcp_status status; + +	if (is_dp_hdcp(hdcp)) { +		hdcp->auth.msg.hdcp2.repeater_auth_stream_ready[0] = 17; +		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY, +				hdcp->auth.msg.hdcp2.repeater_auth_stream_ready+1, +				sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)-1); + +	} else { +		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY, +				hdcp->auth.msg.hdcp2.repeater_auth_stream_ready, +				sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)); +	} +	return status; +} + +enum mod_hdcp_status mod_hdcp_write_ake_init(struct mod_hdcp *hdcp) +{ +	enum mod_hdcp_status status; + +	if (is_dp_hdcp(hdcp)) +		status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_INIT, +				hdcp->auth.msg.hdcp2.ake_init+1, +				sizeof(hdcp->auth.msg.hdcp2.ake_init)-1); +	else +		status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_INIT, +					hdcp->auth.msg.hdcp2.ake_init, +					sizeof(hdcp->auth.msg.hdcp2.ake_init)); +	return status; +} + +enum mod_hdcp_status mod_hdcp_write_no_stored_km(struct mod_hdcp *hdcp) +{ +	enum mod_hdcp_status status; + +	if (is_dp_hdcp(hdcp)) +		status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM, +				hdcp->auth.msg.hdcp2.ake_no_stored_km+1, +				sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)-1); +	else +		status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM, +			hdcp->auth.msg.hdcp2.ake_no_stored_km, +			sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)); +	return status; +} + +enum mod_hdcp_status mod_hdcp_write_stored_km(struct mod_hdcp *hdcp) +{ +	enum mod_hdcp_status status; + +	if (is_dp_hdcp(hdcp)) +		status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM, +				hdcp->auth.msg.hdcp2.ake_stored_km+1, +				sizeof(hdcp->auth.msg.hdcp2.ake_stored_km)-1); +	else +		status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM, +				hdcp->auth.msg.hdcp2.ake_stored_km, +				sizeof(hdcp->auth.msg.hdcp2.ake_stored_km)); +	return status; +} + +enum mod_hdcp_status mod_hdcp_write_lc_init(struct mod_hdcp *hdcp) +{ +	enum mod_hdcp_status status; + +	if (is_dp_hdcp(hdcp)) +		status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT, +				hdcp->auth.msg.hdcp2.lc_init+1, +				sizeof(hdcp->auth.msg.hdcp2.lc_init)-1); +	else +		status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT, +				hdcp->auth.msg.hdcp2.lc_init, +				sizeof(hdcp->auth.msg.hdcp2.lc_init)); +	return status; +} + +enum mod_hdcp_status mod_hdcp_write_eks(struct mod_hdcp *hdcp) +{ +	enum mod_hdcp_status status; + +	if (is_dp_hdcp(hdcp)) +		status = write(hdcp, +				MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS, +				hdcp->auth.msg.hdcp2.ske_eks+1, +				sizeof(hdcp->auth.msg.hdcp2.ske_eks)-1); +	else +		status = write(hdcp, +			MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS, +			hdcp->auth.msg.hdcp2.ske_eks, +			sizeof(hdcp->auth.msg.hdcp2.ske_eks)); +	return status; +} + +enum mod_hdcp_status mod_hdcp_write_repeater_auth_ack(struct mod_hdcp *hdcp) +{ +	enum mod_hdcp_status status; + +	if (is_dp_hdcp(hdcp)) +		status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK, +				hdcp->auth.msg.hdcp2.repeater_auth_ack+1, +				sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack)-1); +	else +		status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK, +				hdcp->auth.msg.hdcp2.repeater_auth_ack, +				sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack)); +	return status; +} + +enum mod_hdcp_status mod_hdcp_write_stream_manage(struct mod_hdcp *hdcp) +{ +	enum mod_hdcp_status status; + +	if (is_dp_hdcp(hdcp)) +		status = write(hdcp, +				MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE, +				hdcp->auth.msg.hdcp2.repeater_auth_stream_manage+1, +				hdcp->auth.msg.hdcp2.stream_manage_size-1); +	else +		status = write(hdcp, +				MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE, +				hdcp->auth.msg.hdcp2.repeater_auth_stream_manage, +				hdcp->auth.msg.hdcp2.stream_manage_size); +	return status; +} + +enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp) +{ +	enum mod_hdcp_status status; + +	if (is_dp_hdcp(hdcp)) +		status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE, +				hdcp->auth.msg.hdcp2.content_stream_type_dp+1, +				sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp)-1); +	else +		status = MOD_HDCP_STATUS_INVALID_OPERATION; +	return status; +} diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c index 3982ced5f969..724ebcee9a19 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c @@ -116,6 +116,58 @@ char *mod_hdcp_status_to_str(int32_t status)  		return "MOD_HDCP_STATUS_DDC_FAILURE";  	case MOD_HDCP_STATUS_INVALID_OPERATION:  		return "MOD_HDCP_STATUS_INVALID_OPERATION"; +	case MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE: +		return "MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE"; +	case MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE: +		return "MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE"; +	case MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE: +		return "MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE"; +	case MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE: +		return "MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE"; +	case MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING: +		return "MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING"; +	case MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING: +		return "MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING"; +	case MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING: +		return "MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING"; +	case MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE: +		return "MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE"; +	case MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED: +		return "MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED"; +	case MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE: +		return "MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE"; +	case MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE: +		return "MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE"; +	case MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE: +		return "MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE"; +	case MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING: +		return "MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING"; +	case MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE: +		return "MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE"; +	case MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE: +		return "MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE"; +	case MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE: +		return "MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE"; +	case MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE: +		return "MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE"; +	case MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED: +		return "MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED"; +	case MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY: +		return "MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY"; +	case MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION: +		return "MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION"; +	case MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING: +		return "MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING"; +	case MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE: +		return "MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE"; +	case MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE: +		return "MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE"; +	case MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST: +		return "MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST"; +	case MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE: +		return "MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE"; +	case MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE: +		return "MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE";  	default:  		return "MOD_HDCP_STATUS_UNKNOWN";  	} @@ -156,6 +208,72 @@ char *mod_hdcp_state_id_to_str(int32_t id)  		return "D1_A6_WAIT_FOR_READY";  	case D1_A7_READ_KSV_LIST:  		return "D1_A7_READ_KSV_LIST"; +	case H2_A0_KNOWN_HDCP2_CAPABLE_RX: +		return "H2_A0_KNOWN_HDCP2_CAPABLE_RX"; +	case H2_A1_SEND_AKE_INIT: +		return "H2_A1_SEND_AKE_INIT"; +	case H2_A1_VALIDATE_AKE_CERT: +		return "H2_A1_VALIDATE_AKE_CERT"; +	case H2_A1_SEND_NO_STORED_KM: +		return "H2_A1_SEND_NO_STORED_KM"; +	case H2_A1_READ_H_PRIME: +		return "H2_A1_READ_H_PRIME"; +	case H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME: +		return "H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME"; +	case H2_A1_SEND_STORED_KM: +		return "H2_A1_SEND_STORED_KM"; +	case H2_A1_VALIDATE_H_PRIME: +		return "H2_A1_VALIDATE_H_PRIME"; +	case H2_A2_LOCALITY_CHECK: +		return "H2_A2_LOCALITY_CHECK"; +	case H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER: +		return "H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER"; +	case H2_ENABLE_ENCRYPTION: +		return "H2_ENABLE_ENCRYPTION"; +	case H2_A5_AUTHENTICATED: +		return "H2_A5_AUTHENTICATED"; +	case H2_A6_WAIT_FOR_RX_ID_LIST: +		return "H2_A6_WAIT_FOR_RX_ID_LIST"; +	case H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK: +		return "H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK"; +	case H2_A9_SEND_STREAM_MANAGEMENT: +		return "H2_A9_SEND_STREAM_MANAGEMENT"; +	case H2_A9_VALIDATE_STREAM_READY: +		return "H2_A9_VALIDATE_STREAM_READY"; +	case D2_A0_DETERMINE_RX_HDCP_CAPABLE: +		return "D2_A0_DETERMINE_RX_HDCP_CAPABLE"; +	case D2_A1_SEND_AKE_INIT: +		return "D2_A1_SEND_AKE_INIT"; +	case D2_A1_VALIDATE_AKE_CERT: +		return "D2_A1_VALIDATE_AKE_CERT"; +	case D2_A1_SEND_NO_STORED_KM: +		return "D2_A1_SEND_NO_STORED_KM"; +	case D2_A1_READ_H_PRIME: +		return "D2_A1_READ_H_PRIME"; +	case D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME: +		return "D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME"; +	case D2_A1_SEND_STORED_KM: +		return "D2_A1_SEND_STORED_KM"; +	case D2_A1_VALIDATE_H_PRIME: +		return "D2_A1_VALIDATE_H_PRIME"; +	case D2_A2_LOCALITY_CHECK: +		return "D2_A2_LOCALITY_CHECK"; +	case D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER: +		return "D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER"; +	case D2_SEND_CONTENT_STREAM_TYPE: +		return "D2_SEND_CONTENT_STREAM_TYPE"; +	case D2_ENABLE_ENCRYPTION: +		return "D2_ENABLE_ENCRYPTION"; +	case D2_A5_AUTHENTICATED: +		return "D2_A5_AUTHENTICATED"; +	case D2_A6_WAIT_FOR_RX_ID_LIST: +		return "D2_A6_WAIT_FOR_RX_ID_LIST"; +	case D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK: +		return "D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK"; +	case D2_A9_SEND_STREAM_MANAGEMENT: +		return "D2_A9_SEND_STREAM_MANAGEMENT"; +	case D2_A9_VALIDATE_STREAM_READY: +		return "D2_A9_VALIDATE_STREAM_READY";  	default:  		return "UNKNOWN_STATE_ID";  	}; diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h index 2fd0e0a893ef..ff91373ebada 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h @@ -27,7 +27,7 @@  #define MOD_HDCP_LOG_H_  #ifdef CONFIG_DRM_AMD_DC_HDCP -#define HDCP_LOG_ERR(hdcp, ...) DRM_ERROR(__VA_ARGS__) +#define HDCP_LOG_ERR(hdcp, ...) DRM_WARN(__VA_ARGS__)  #define HDCP_LOG_VER(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)  #define HDCP_LOG_FSM(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)  #define HDCP_LOG_TOP(hdcp, ...) pr_debug("[HDCP_TOP]:"__VA_ARGS__) @@ -37,7 +37,7 @@  /* default logs */  #define HDCP_ERROR_TRACE(hdcp, status) \  		HDCP_LOG_ERR(hdcp, \ -			"[Link %d] ERROR %s IN STATE %s", \ +			"[Link %d] WARNING %s IN STATE %s", \  			hdcp->config.index, \  			mod_hdcp_status_to_str(status), \  			mod_hdcp_state_id_to_str(hdcp->state.id)) @@ -45,6 +45,10 @@  		HDCP_LOG_VER(hdcp, \  			"[Link %d] HDCP 1.4 enabled on display %d", \  			hdcp->config.index, displayIndex) +#define HDCP_HDCP2_ENABLED_TRACE(hdcp, displayIndex) \ +		HDCP_LOG_VER(hdcp, \ +			"[Link %d] HDCP 2.2 enabled on display %d", \ +			hdcp->config.index, displayIndex)  /* state machine logs */  #define HDCP_REMOVE_DISPLAY_TRACE(hdcp, displayIndex) \  		HDCP_LOG_FSM(hdcp, \ @@ -93,26 +97,73 @@  				hdcp->buf); \  } while (0)  #define HDCP_FULL_DDC_TRACE(hdcp) do { \ -	HDCP_DDC_READ_TRACE(hdcp, "BKSV", hdcp->auth.msg.hdcp1.bksv, \ -			sizeof(hdcp->auth.msg.hdcp1.bksv)); \ -	HDCP_DDC_READ_TRACE(hdcp, "BCAPS", &hdcp->auth.msg.hdcp1.bcaps, \ -			sizeof(hdcp->auth.msg.hdcp1.bcaps)); \ -	HDCP_DDC_WRITE_TRACE(hdcp, "AN", hdcp->auth.msg.hdcp1.an, \ -			sizeof(hdcp->auth.msg.hdcp1.an)); \ -	HDCP_DDC_WRITE_TRACE(hdcp, "AKSV", hdcp->auth.msg.hdcp1.aksv, \ -			sizeof(hdcp->auth.msg.hdcp1.aksv)); \ -	HDCP_DDC_WRITE_TRACE(hdcp, "AINFO", &hdcp->auth.msg.hdcp1.ainfo, \ -			sizeof(hdcp->auth.msg.hdcp1.ainfo)); \ -	HDCP_DDC_READ_TRACE(hdcp, "RI' / R0'", \ -			(uint8_t *)&hdcp->auth.msg.hdcp1.r0p, \ -			sizeof(hdcp->auth.msg.hdcp1.r0p)); \ -	HDCP_DDC_READ_TRACE(hdcp, "BINFO", \ -			(uint8_t *)&hdcp->auth.msg.hdcp1.binfo_dp, \ -			sizeof(hdcp->auth.msg.hdcp1.binfo_dp)); \ -	HDCP_DDC_READ_TRACE(hdcp, "KSVLIST", hdcp->auth.msg.hdcp1.ksvlist, \ -			hdcp->auth.msg.hdcp1.ksvlist_size); \ -	HDCP_DDC_READ_TRACE(hdcp, "V'", hdcp->auth.msg.hdcp1.vp, \ -			sizeof(hdcp->auth.msg.hdcp1.vp)); \ +	if (is_hdcp1(hdcp)) { \ +		HDCP_DDC_READ_TRACE(hdcp, "BKSV", hdcp->auth.msg.hdcp1.bksv, \ +				sizeof(hdcp->auth.msg.hdcp1.bksv)); \ +		HDCP_DDC_READ_TRACE(hdcp, "BCAPS", &hdcp->auth.msg.hdcp1.bcaps, \ +				sizeof(hdcp->auth.msg.hdcp1.bcaps)); \ +		HDCP_DDC_WRITE_TRACE(hdcp, "AN", hdcp->auth.msg.hdcp1.an, \ +				sizeof(hdcp->auth.msg.hdcp1.an)); \ +		HDCP_DDC_WRITE_TRACE(hdcp, "AKSV", hdcp->auth.msg.hdcp1.aksv, \ +				sizeof(hdcp->auth.msg.hdcp1.aksv)); \ +		HDCP_DDC_WRITE_TRACE(hdcp, "AINFO", &hdcp->auth.msg.hdcp1.ainfo, \ +				sizeof(hdcp->auth.msg.hdcp1.ainfo)); \ +		HDCP_DDC_READ_TRACE(hdcp, "RI' / R0'", \ +				(uint8_t *)&hdcp->auth.msg.hdcp1.r0p, \ +				sizeof(hdcp->auth.msg.hdcp1.r0p)); \ +		HDCP_DDC_READ_TRACE(hdcp, "BINFO", \ +				(uint8_t *)&hdcp->auth.msg.hdcp1.binfo_dp, \ +				sizeof(hdcp->auth.msg.hdcp1.binfo_dp)); \ +		HDCP_DDC_READ_TRACE(hdcp, "KSVLIST", hdcp->auth.msg.hdcp1.ksvlist, \ +				hdcp->auth.msg.hdcp1.ksvlist_size); \ +		HDCP_DDC_READ_TRACE(hdcp, "V'", hdcp->auth.msg.hdcp1.vp, \ +				sizeof(hdcp->auth.msg.hdcp1.vp)); \ +	} else { \ +		HDCP_DDC_READ_TRACE(hdcp, "HDCP2Version", \ +				&hdcp->auth.msg.hdcp2.hdcp2version_hdmi, \ +				sizeof(hdcp->auth.msg.hdcp2.hdcp2version_hdmi)); \ +		HDCP_DDC_READ_TRACE(hdcp, "Rx Caps", hdcp->auth.msg.hdcp2.rxcaps_dp, \ +				sizeof(hdcp->auth.msg.hdcp2.rxcaps_dp)); \ +		HDCP_DDC_WRITE_TRACE(hdcp, "AKE Init", hdcp->auth.msg.hdcp2.ake_init, \ +				sizeof(hdcp->auth.msg.hdcp2.ake_init)); \ +		HDCP_DDC_READ_TRACE(hdcp, "AKE Cert", hdcp->auth.msg.hdcp2.ake_cert, \ +				sizeof(hdcp->auth.msg.hdcp2.ake_cert)); \ +		HDCP_DDC_WRITE_TRACE(hdcp, "Stored KM", \ +				hdcp->auth.msg.hdcp2.ake_stored_km, \ +				sizeof(hdcp->auth.msg.hdcp2.ake_stored_km)); \ +		HDCP_DDC_WRITE_TRACE(hdcp, "No Stored KM", \ +				hdcp->auth.msg.hdcp2.ake_no_stored_km, \ +				sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)); \ +		HDCP_DDC_READ_TRACE(hdcp, "H'", hdcp->auth.msg.hdcp2.ake_h_prime, \ +				sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)); \ +		HDCP_DDC_READ_TRACE(hdcp, "Pairing Info", \ +				hdcp->auth.msg.hdcp2.ake_pairing_info, \ +				sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)); \ +		HDCP_DDC_WRITE_TRACE(hdcp, "LC Init", hdcp->auth.msg.hdcp2.lc_init, \ +				sizeof(hdcp->auth.msg.hdcp2.lc_init)); \ +		HDCP_DDC_READ_TRACE(hdcp, "L'", hdcp->auth.msg.hdcp2.lc_l_prime, \ +				sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)); \ +		HDCP_DDC_WRITE_TRACE(hdcp, "Exchange KS", hdcp->auth.msg.hdcp2.ske_eks, \ +				sizeof(hdcp->auth.msg.hdcp2.ske_eks)); \ +		HDCP_DDC_READ_TRACE(hdcp, "Rx Status", \ +				(uint8_t *)&hdcp->auth.msg.hdcp2.rxstatus, \ +				sizeof(hdcp->auth.msg.hdcp2.rxstatus)); \ +		HDCP_DDC_READ_TRACE(hdcp, "Rx Id List", \ +				hdcp->auth.msg.hdcp2.rx_id_list, \ +				hdcp->auth.msg.hdcp2.rx_id_list_size); \ +		HDCP_DDC_WRITE_TRACE(hdcp, "Rx Id List Ack", \ +				hdcp->auth.msg.hdcp2.repeater_auth_ack, \ +				sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack)); \ +		HDCP_DDC_WRITE_TRACE(hdcp, "Content Stream Management", \ +				hdcp->auth.msg.hdcp2.repeater_auth_stream_manage, \ +				hdcp->auth.msg.hdcp2.stream_manage_size); \ +		HDCP_DDC_READ_TRACE(hdcp, "Stream Ready", \ +				hdcp->auth.msg.hdcp2.repeater_auth_stream_ready, \ +				sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)); \ +		HDCP_DDC_WRITE_TRACE(hdcp, "Content Stream Type", \ +				hdcp->auth.msg.hdcp2.content_stream_type_dp, \ +				sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp)); \ +	} \  } while (0)  #define HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, i) \  		HDCP_LOG_TOP(hdcp, "[Link %d]\tadd display %d", \ @@ -123,6 +174,9 @@  #define HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp) \  		HDCP_LOG_TOP(hdcp, "[Link %d]\tdestroy hdcp1 session", \  				hdcp->config.index) +#define HDCP_TOP_HDCP2_DESTROY_SESSION_TRACE(hdcp) \ +		HDCP_LOG_TOP(hdcp, "[Link %d]\tdestroy hdcp2 session", \ +				hdcp->config.index)  #define HDCP_TOP_RESET_AUTH_TRACE(hdcp) \  		HDCP_LOG_TOP(hdcp, "[Link %d]\treset authentication", hdcp->config.index)  #define HDCP_TOP_RESET_CONN_TRACE(hdcp) \ diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c index 646d909bbc37..7911dc157d5a 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c @@ -31,6 +31,19 @@  #include "amdgpu.h"  #include "hdcp_psp.h" +static void hdcp2_message_init(struct mod_hdcp *hdcp, +			       struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *in) +{ +	in->session_handle = hdcp->auth.id; +	in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE; +	in->prepare.msg2_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE; +	in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE; +	in->process.msg1_desc.msg_size = 0; +	in->process.msg2_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE; +	in->process.msg2_desc.msg_size = 0; +	in->process.msg3_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE; +	in->process.msg3_desc.msg_size = 0; +}  enum mod_hdcp_status mod_hdcp_remove_display_topology(struct mod_hdcp *hdcp)  { @@ -42,7 +55,7 @@ enum mod_hdcp_status mod_hdcp_remove_display_topology(struct mod_hdcp *hdcp)  	dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;  	for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { -		if (hdcp->connection.displays[i].state == MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED) { +		if (is_display_added(&(hdcp->connection.displays[i]))) {  			memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory)); @@ -96,7 +109,7 @@ enum mod_hdcp_status mod_hdcp_add_display_topology(struct mod_hdcp *hdcp)  			dtm_cmd->dtm_in_message.topology_update_v2.dig_fe = display->dig_fe;  			dtm_cmd->dtm_in_message.topology_update_v2.dp_mst_vcid = display->vc_id;  			dtm_cmd->dtm_in_message.topology_update_v2.max_hdcp_supported_version = -				TA_DTM_HDCP_VERSION_MAX_SUPPORTED__1_x; +				TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_2;  			dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;  			psp_dtm_invoke(psp, dtm_cmd->cmd_id); @@ -132,10 +145,11 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)  	psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); +	hdcp->auth.id = hdcp_cmd->out_msg.hdcp1_create_session.session_handle; +  	if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)  		return MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE; -	hdcp->auth.id = hdcp_cmd->out_msg.hdcp1_create_session.session_handle;  	hdcp->auth.msg.hdcp1.ainfo = hdcp_cmd->out_msg.hdcp1_create_session.ainfo_primary;  	memcpy(hdcp->auth.msg.hdcp1.aksv, hdcp_cmd->out_msg.hdcp1_create_session.aksv_primary,  		sizeof(hdcp->auth.msg.hdcp1.aksv)); @@ -326,3 +340,493 @@ enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp *  	return MOD_HDCP_STATUS_SUCCESS;  } +enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp) +{ +	struct psp_context *psp = hdcp->config.psp.handle; +	struct ta_hdcp_shared_memory *hdcp_cmd; +	struct mod_hdcp_display *display = get_first_added_display(hdcp); + +	if (!psp->hdcp_context.hdcp_initialized) { +		DRM_ERROR("Failed to create hdcp session, HDCP TA is not initialized"); +		return MOD_HDCP_STATUS_FAILURE; +	} + +	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; +	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + +	if (!display) +		return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND; + +	hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index; + +	if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0) +		hdcp_cmd->in_msg.hdcp2_create_session_v2.negotiate_content_type = +			TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__FORCE_TYPE0; +	else if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_1) +		hdcp_cmd->in_msg.hdcp2_create_session_v2.negotiate_content_type = +			TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__FORCE_TYPE1; +	else if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_MAX) +		hdcp_cmd->in_msg.hdcp2_create_session_v2.negotiate_content_type = +			TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__MAX_SUPPORTED; + +	hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_CREATE_SESSION_V2; + +	psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + +	if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) +		return MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE; + +	hdcp->auth.id = hdcp_cmd->out_msg.hdcp2_create_session_v2.session_handle; + +	return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp) +{ +	struct psp_context *psp = hdcp->config.psp.handle; +	struct ta_hdcp_shared_memory *hdcp_cmd; + +	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; +	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + +	hdcp_cmd->in_msg.hdcp2_destroy_session.session_handle = hdcp->auth.id; +	hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_DESTROY_SESSION; + +	psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + +	if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) +		return MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE; + +	HDCP_TOP_HDCP2_DESTROY_SESSION_TRACE(hdcp); + +	return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp) +{ +	struct psp_context *psp = hdcp->config.psp.handle; +	struct ta_hdcp_shared_memory *hdcp_cmd; +	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; +	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + +	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; +	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + +	msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; +	msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2; + +	hdcp2_message_init(hdcp, msg_in); + +	hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2; +	msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__AKE_INIT; + +	psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + +	if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) +		return MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE; + +	memcpy(&hdcp->auth.msg.hdcp2.ake_init[0], &msg_out->prepare.transmitter_message[0], +	       sizeof(hdcp->auth.msg.hdcp2.ake_init)); + +	return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp) +{ +	struct psp_context *psp = hdcp->config.psp.handle; +	struct ta_hdcp_shared_memory *hdcp_cmd; +	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; +	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + +	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; +	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + +	msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; +	msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2; + +	hdcp2_message_init(hdcp, msg_in); + +	msg_in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__AKE_SEND_CERT; +	msg_in->process.msg1_desc.msg_size = TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_CERT; + +	memcpy(&msg_in->process.receiver_message[0], hdcp->auth.msg.hdcp2.ake_cert, +	       sizeof(hdcp->auth.msg.hdcp2.ake_cert)); + +	msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__AKE_NO_STORED_KM; +	msg_in->prepare.msg2_id = TA_HDCP_HDCP2_MSG_ID__AKE_STORED_KM; + +	hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2; + +	psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + +	if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) +		return MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE; + +	memcpy(hdcp->auth.msg.hdcp2.ake_no_stored_km, &msg_out->prepare.transmitter_message[0], +	       sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)); + +	memcpy(hdcp->auth.msg.hdcp2.ake_stored_km, +	       &msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)], +	       sizeof(hdcp->auth.msg.hdcp2.ake_stored_km)); + +	if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) { +		hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0; +		hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0; +		return MOD_HDCP_STATUS_SUCCESS; +	} + +	return MOD_HDCP_STATUS_FAILURE; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp) +{ +	struct psp_context *psp = hdcp->config.psp.handle; +	struct ta_hdcp_shared_memory *hdcp_cmd; +	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; +	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + +	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; +	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + +	msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; +	msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2; + +	hdcp2_message_init(hdcp, msg_in); + +	msg_in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__AKE_SEND_H_PRIME; +	msg_in->process.msg1_desc.msg_size = TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_H_PRIME; + +	memcpy(&msg_in->process.receiver_message[0], hdcp->auth.msg.hdcp2.ake_h_prime, +	       sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)); + +	if (!hdcp->connection.is_km_stored) { +		msg_in->process.msg2_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__AKE_SEND_PAIRING_INFO; +		msg_in->process.msg2_desc.msg_size = TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_PAIRING_INFO; +		memcpy(&msg_in->process.receiver_message[sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)], +		       hdcp->auth.msg.hdcp2.ake_pairing_info, sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)); +	} + +	hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2; + +	psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + +	if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) +		return MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE; + +	if (msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) +		return MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE; +	else if (!hdcp->connection.is_km_stored && +		 msg_out->process.msg2_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) +		return MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE; + + +	return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_prepare_lc_init(struct mod_hdcp *hdcp) +{ +	struct psp_context *psp = hdcp->config.psp.handle; +	struct ta_hdcp_shared_memory *hdcp_cmd; +	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; +	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + +	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; +	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + +	msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; +	msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2; + +	hdcp2_message_init(hdcp, msg_in); + +	msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__LC_INIT; + +	hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2; + +	psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + +	if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) +		return MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE; + +	memcpy(hdcp->auth.msg.hdcp2.lc_init, &msg_out->prepare.transmitter_message[0], +	       sizeof(hdcp->auth.msg.hdcp2.lc_init)); + +	return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_validate_l_prime(struct mod_hdcp *hdcp) +{ +	struct psp_context *psp = hdcp->config.psp.handle; +	struct ta_hdcp_shared_memory *hdcp_cmd; +	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; +	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + +	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; +	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + +	msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; +	msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2; + +	hdcp2_message_init(hdcp, msg_in); + +	msg_in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__LC_SEND_L_PRIME; +	msg_in->process.msg1_desc.msg_size = TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__LC_SEND_L_PRIME; + +	memcpy(&msg_in->process.receiver_message[0], hdcp->auth.msg.hdcp2.lc_l_prime, +	       sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)); + +	hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2; + +	psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + +	if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) +		return MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE; + +	if (msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) +		return MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE; + +	return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_prepare_eks(struct mod_hdcp *hdcp) +{ +	struct psp_context *psp = hdcp->config.psp.handle; +	struct ta_hdcp_shared_memory *hdcp_cmd; +	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; +	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + +	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; +	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + +	msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; +	msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2; + +	hdcp2_message_init(hdcp, msg_in); + +	msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__SKE_SEND_EKS; + +	if (is_dp_hdcp(hdcp)) +		msg_in->prepare.msg2_id = TA_HDCP_HDCP2_MSG_ID__SIGNAL_CONTENT_STREAM_TYPE_DP; + +	hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2; +	psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + +	if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) +		return MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE; + +	memcpy(hdcp->auth.msg.hdcp2.ske_eks, &msg_out->prepare.transmitter_message[0], +	       sizeof(hdcp->auth.msg.hdcp2.ske_eks)); +	msg_out->prepare.msg1_desc.msg_size = sizeof(hdcp->auth.msg.hdcp2.ske_eks); + +	if (is_dp_hdcp(hdcp)) { +		memcpy(hdcp->auth.msg.hdcp2.content_stream_type_dp, +		       &msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ske_eks)], +		       sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp)); +	} + +	return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_enable_encryption(struct mod_hdcp *hdcp) +{ +	struct psp_context *psp = hdcp->config.psp.handle; +	struct ta_hdcp_shared_memory *hdcp_cmd; +	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; +	struct mod_hdcp_display *display = get_first_added_display(hdcp); + +	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; +	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + +	msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; + +	hdcp2_message_init(hdcp, msg_in); + +	if (!display) +		return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND; + +	hdcp_cmd->in_msg.hdcp1_enable_encryption.session_handle = hdcp->auth.id; + +	hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_SET_ENCRYPTION; +	psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + +	if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) +		return MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE; + +	if (!is_dp_mst_hdcp(hdcp)) { +		display->state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; +		HDCP_HDCP2_ENABLED_TRACE(hdcp, display->index); +	} + +	return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp) +{ +	struct psp_context *psp = hdcp->config.psp.handle; +	struct ta_hdcp_shared_memory *hdcp_cmd; +	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; +	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + +	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; +	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + +	msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; +	msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2; + +	hdcp2_message_init(hdcp, msg_in); + +	msg_in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_SEND_RECEIVERID_LIST; +	msg_in->process.msg1_desc.msg_size = sizeof(hdcp->auth.msg.hdcp2.rx_id_list); +	memcpy(&msg_in->process.receiver_message[0], hdcp->auth.msg.hdcp2.rx_id_list, +	       sizeof(hdcp->auth.msg.hdcp2.rx_id_list)); + +	msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_SEND_ACK; + +	hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2; + +	psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + +	if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) +		return MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE; + +	memcpy(hdcp->auth.msg.hdcp2.repeater_auth_ack, &msg_out->prepare.transmitter_message[0], +	       sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack)); + +	if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) { +		hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0; +		hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0; +		return MOD_HDCP_STATUS_SUCCESS; +	} + + +	return MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp *hdcp) +{ +	struct psp_context *psp = hdcp->config.psp.handle; +	struct ta_hdcp_shared_memory *hdcp_cmd; +	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; +	uint8_t i; + +	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; +	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + +	msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; + +	hdcp2_message_init(hdcp, msg_in); + + +	for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { +		if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED || +		    hdcp->connection.displays[i].adjust.disable) +			continue; +		hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->connection.displays[i].index; +		hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.session_handle = hdcp->auth.id; + +		hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_ENABLE_DP_STREAM_ENCRYPTION; +		psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + +		if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) +			break; + +		hdcp->connection.displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; +		HDCP_HDCP2_ENABLED_TRACE(hdcp, hdcp->connection.displays[i].index); +	} + +	return (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS) ? MOD_HDCP_STATUS_SUCCESS +								  : MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(struct mod_hdcp *hdcp) +{ + +	struct psp_context *psp = hdcp->config.psp.handle; +	struct ta_hdcp_shared_memory *hdcp_cmd; +	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; +	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + +	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; +	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + +	msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; +	msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2; + +	hdcp2_message_init(hdcp, msg_in); + +	msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_STREAM_MANAGE; + + +	hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2; +	psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + +	if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) +		return MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE; + +	hdcp->auth.msg.hdcp2.stream_manage_size = msg_out->prepare.msg1_desc.msg_size; + +	memcpy(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage, &msg_out->prepare.transmitter_message[0], +	       sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage)); + +	return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp) +{ +	struct psp_context *psp = hdcp->config.psp.handle; +	struct ta_hdcp_shared_memory *hdcp_cmd; +	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; +	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + +	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; +	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + +	msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; +	msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2; + +	hdcp2_message_init(hdcp, msg_in); + +	msg_in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_STREAM_READY; + +	msg_in->process.msg1_desc.msg_size = sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready); + +	memcpy(&msg_in->process.receiver_message[0], hdcp->auth.msg.hdcp2.repeater_auth_stream_ready, +	       sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)); + +	hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2; +	psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + +	return (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS) && +			       (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) +		       ? MOD_HDCP_STATUS_SUCCESS +		       : MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_get_link_encryption_status(struct mod_hdcp *hdcp, +							       enum mod_hdcp_encryption_status *encryption_status) +{ +	struct psp_context *psp = hdcp->config.psp.handle; +	struct ta_hdcp_shared_memory *hdcp_cmd; + +	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + +	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + +	hdcp_cmd->in_msg.hdcp2_get_encryption_status.session_handle = hdcp->auth.id; +	hdcp_cmd->out_msg.hdcp2_get_encryption_status.protection_level = 0; +	hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_GET_ENCRYPTION_STATUS; +	*encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + +	psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + +	if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) +		return MOD_HDCP_STATUS_FAILURE; + +	if (hdcp_cmd->out_msg.hdcp2_get_encryption_status.protection_level == 1) { +		if (hdcp_cmd->out_msg.hdcp2_get_encryption_status.hdcp2_type == TA_HDCP2_CONTENT_TYPE__TYPE1) +			*encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON; +		else +			*encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON; +	} + +	return MOD_HDCP_STATUS_SUCCESS; +} diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h index 986fc07ea9ea..82a5e997d573 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h @@ -36,6 +36,11 @@ enum bgd_security_hdcp_encryption_level {  	HDCP_ENCRYPTION_LEVEL__ON  }; +enum bgd_security_hdcp2_content_type { +	HDCP2_CONTENT_TYPE__INVALID = 0, +	HDCP2_CONTENT_TYPE__TYPE0, +	HDCP2_CONTENT_TYPE__TYPE1 +};  enum ta_dtm_command {  	TA_DTM_COMMAND__UNUSED_1 = 1,  	TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2, @@ -121,8 +126,64 @@ enum ta_hdcp_command {  	TA_HDCP_COMMAND__HDCP1_ENABLE_ENCRYPTION,  	TA_HDCP_COMMAND__HDCP1_ENABLE_DP_STREAM_ENCRYPTION,  	TA_HDCP_COMMAND__HDCP1_GET_ENCRYPTION_STATUS, +	TA_HDCP_COMMAND__UNUSED_1, +	TA_HDCP_COMMAND__HDCP2_DESTROY_SESSION, +	TA_HDCP_COMMAND__UNUSED_2, +	TA_HDCP_COMMAND__HDCP2_SET_ENCRYPTION, +	TA_HDCP_COMMAND__HDCP2_GET_ENCRYPTION_STATUS, +	TA_HDCP_COMMAND__UNUSED_3, +	TA_HDCP_COMMAND__HDCP2_CREATE_SESSION_V2, +	TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2, +	TA_HDCP_COMMAND__HDCP2_ENABLE_DP_STREAM_ENCRYPTION +}; + +enum ta_hdcp2_msg_id { +	TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE = 1, +	TA_HDCP_HDCP2_MSG_ID__AKE_INIT = 2, +	TA_HDCP_HDCP2_MSG_ID__AKE_SEND_CERT = 3, +	TA_HDCP_HDCP2_MSG_ID__AKE_NO_STORED_KM = 4, +	TA_HDCP_HDCP2_MSG_ID__AKE_STORED_KM = 5, +	TA_HDCP_HDCP2_MSG_ID__AKE_SEND_RRX = 6, +	TA_HDCP_HDCP2_MSG_ID__AKE_SEND_H_PRIME = 7, +	TA_HDCP_HDCP2_MSG_ID__AKE_SEND_PAIRING_INFO = 8, +	TA_HDCP_HDCP2_MSG_ID__LC_INIT = 9, +	TA_HDCP_HDCP2_MSG_ID__LC_SEND_L_PRIME = 10, +	TA_HDCP_HDCP2_MSG_ID__SKE_SEND_EKS = 11, +	TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_SEND_RECEIVERID_LIST = 12, +	TA_HDCP_HDCP2_MSG_ID__RTT_READY = 13, +	TA_HDCP_HDCP2_MSG_ID__RTT_CHALLENGE = 14, +	TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_SEND_ACK = 15, +	TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_STREAM_MANAGE = 16, +	TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_STREAM_READY = 17, +	TA_HDCP_HDCP2_MSG_ID__RECEIVER_AUTH_STATUS = 18, +	TA_HDCP_HDCP2_MSG_ID__AKE_TRANSMITTER_INFO = 19, +	TA_HDCP_HDCP2_MSG_ID__AKE_RECEIVER_INFO = 20, +	TA_HDCP_HDCP2_MSG_ID__SIGNAL_CONTENT_STREAM_TYPE_DP = 129  }; +enum ta_hdcp2_hdcp2_msg_id_max_size { +	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__NULL_MESSAGE = 0, +	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_INIT = 12, +	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_CERT = 534, +	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_NO_STORED_KM = 129, +	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_STORED_KM = 33, +	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_RRX = 9, +	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_H_PRIME = 33, +	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_PAIRING_INFO = 17, +	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__LC_INIT = 9, +	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__LC_SEND_L_PRIME = 33, +	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__SKE_SEND_EKS = 25, +	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__REPEATERAUTH_SEND_RECEIVERID_LIST = 181, +	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__RTT_READY = 1, +	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__RTT_CHALLENGE = 17, +	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__REPEATERAUTH_SEND_RACK = 17, +	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__REPEATERAUTH_STREAM_MANAGE = 13, +	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__REPEATERAUTH_STREAM_READY = 33, +	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__RECEIVER_AUTH_STATUS = 4, +	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_TRANSMITTER_INFO = 6, +	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_RECEIVER_INFO = 6, +	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__SIGNAL_CONTENT_STREAM_TYPE_DP = 1 +};  /* HDCP related enumerations */  /**********************************************************/ @@ -131,6 +192,12 @@ enum ta_hdcp_command {  #define TA_HDCP__HDCP1_KSV_SIZE 5  #define TA_HDCP__HDCP1_KSV_LIST_MAX_ENTRIES 127  #define TA_HDCP__HDCP1_V_PRIME_SIZE 20 +#define TA_HDCP__HDCP2_TX_BUF_MAX_SIZE                                                                                 \ +	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_NO_STORED_KM + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_STORED_KM + 6 + +// 64 bits boundaries +#define TA_HDCP__HDCP2_RX_BUF_MAX_SIZE                                                                                 \ +	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_CERT + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_RECEIVER_INFO + 4  enum ta_hdcp_status {  	TA_HDCP_STATUS__SUCCESS = 0x00, @@ -165,9 +232,47 @@ enum ta_hdcp_authentication_status {  	TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_COMPLETE = 0x02,  	TA_HDCP_AUTHENTICATION_STATUS__HDCP1_SECOND_PART_FAILED = 0x03,  	TA_HDCP_AUTHENTICATION_STATUS__HDCP1_AUTHENTICATED = 0x04, +	TA_HDCP_AUTHENTICATION_STATUS__HDCP22_AUTHENTICATION_PENDING = 0x06, +	TA_HDCP_AUTHENTICATION_STATUS__HDCP22_AUTHENTICATION_FAILED = 0x07, +	TA_HDCP_AUTHENTICATION_STATUS__HDCP22_AUTHENTICATED = 0x08,  	TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_VALIDATION_FAILED = 0x09  }; +enum ta_hdcp2_msg_authentication_status { +	TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS = 0, +	TA_HDCP2_MSG_AUTHENTICATION_STATUS__KM_NOT_AVAILABLE, +	TA_HDCP2_MSG_AUTHENTICATION_STATUS__UNUSED, +	TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID = 100, // everything above does not fail the request +	TA_HDCP2_MSG_AUTHENTICATION_STATUS__NOT_ENOUGH_MEMORY, +	TA_HDCP2_MSG_AUTHENTICATION_STATUS__NOT_EXPECTED_MSG, +	TA_HDCP2_MSG_AUTHENTICATION_STATUS__SIGNATURE_CERTIFICAT_ERROR, +	TA_HDCP2_MSG_AUTHENTICATION_STATUS__INCORRECT_HDCP_VERSION, +	TA_HDCP2_MSG_AUTHENTICATION_STATUS__UNKNOWN_MESSAGE, +	TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_HMAC, +	TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_TOPOLOGY, +	TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_SEQ_NUM, +	TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_SIZE, +	TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_LENGTH, +	TA_HDCP2_MSG_AUTHENTICATION_STATUS__REAUTH_REQUEST +}; + +enum ta_hdcp_content_type { +	TA_HDCP2_CONTENT_TYPE__TYPE0 = 1, +	TA_HDCP2_CONTENT_TYPE__TYPE1, +}; + +enum ta_hdcp_content_type_negotiation_type { +	TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__FORCE_TYPE0 = 1, +	TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__FORCE_TYPE1, +	TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__MAX_SUPPORTED +}; + +enum ta_hdcp2_version { +	TA_HDCP2_VERSION_UNKNOWN = 0, +	TA_HDCP2_VERSION_2_0 = 20, +	TA_HDCP2_VERSION_2_1 = 21, +	TA_HDCP2_VERSION_2_2 = 22 +};  /* input/output structures for HDCP commands */  /**********************************************************/ @@ -232,6 +337,84 @@ struct ta_hdcp_cmd_hdcp1_get_encryption_status_output {  	uint32_t protection_level;  }; +struct ta_hdcp_cmd_hdcp2_create_session_input_v2 { +	uint32_t display_handle; +	enum ta_hdcp_content_type_negotiation_type negotiate_content_type; +}; + +struct ta_hdcp_cmd_hdcp2_create_session_output_v2 { +	uint32_t session_handle; +}; + +struct ta_hdcp_cmd_hdcp2_destroy_session_input { +	uint32_t session_handle; +}; + +struct ta_hdcp_cmd_hdcp2_authentication_message_v2 { +	enum ta_hdcp2_msg_id msg_id; +	uint32_t msg_size; +}; + +struct ta_hdcp_cmd_hdcp2_process_authentication_message_input_v2 { +	struct ta_hdcp_cmd_hdcp2_authentication_message_v2 msg1_desc; +	struct ta_hdcp_cmd_hdcp2_authentication_message_v2 msg2_desc; +	struct ta_hdcp_cmd_hdcp2_authentication_message_v2 msg3_desc; +	uint8_t receiver_message[TA_HDCP__HDCP2_RX_BUF_MAX_SIZE]; +}; + +struct ta_hdcp_cmd_hdcp2_process_authentication_message_output_v2 { +	uint32_t hdcp_version; +	uint32_t is_km_stored; +	uint32_t is_locality_precompute_support; +	uint32_t is_repeater; +	enum ta_hdcp2_msg_authentication_status msg1_status; +	enum ta_hdcp2_msg_authentication_status msg2_status; +	enum ta_hdcp2_msg_authentication_status msg3_status; +}; + +struct ta_hdcp_cmd_hdcp2_prepare_authentication_message_input_v2 { +	enum ta_hdcp2_msg_id msg1_id; +	enum ta_hdcp2_msg_id msg2_id; +}; + +struct ta_hdcp_cmd_hdcp2_prepare_authentication_message_output_v2 { +	enum ta_hdcp2_msg_authentication_status msg1_status; +	enum ta_hdcp2_msg_authentication_status msg2_status; +	struct ta_hdcp_cmd_hdcp2_authentication_message_v2 msg1_desc; +	struct ta_hdcp_cmd_hdcp2_authentication_message_v2 msg2_desc; +	uint8_t transmitter_message[TA_HDCP__HDCP2_TX_BUF_MAX_SIZE]; +}; + +struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 { +	uint32_t session_handle; +	struct ta_hdcp_cmd_hdcp2_process_authentication_message_input_v2 process; +	struct ta_hdcp_cmd_hdcp2_prepare_authentication_message_input_v2 prepare; +}; + +struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 { +	uint32_t authentication_status; +	struct ta_hdcp_cmd_hdcp2_process_authentication_message_output_v2 process; +	struct ta_hdcp_cmd_hdcp2_prepare_authentication_message_output_v2 prepare; +}; + +struct ta_hdcp_cmd_hdcp2_set_encryption_input { +	uint32_t session_handle; +}; + +struct ta_hdcp_cmd_hdcp2_get_encryption_status_input { +	uint32_t session_handle; +}; + +struct ta_hdcp_cmd_hdcp2_get_encryption_status_output { +	enum ta_hdcp_content_type hdcp2_type; +	uint32_t protection_level; +}; + +struct ta_hdcp_cmd_hdcp2_enable_dp_stream_encryption_input { +	uint32_t session_handle; +	uint32_t display_handle; +}; +  /**********************************************************/  /* Common input structure for HDCP callbacks */  union ta_hdcp_cmd_input { @@ -242,6 +425,13 @@ union ta_hdcp_cmd_input {  	struct ta_hdcp_cmd_hdcp1_enable_encryption_input hdcp1_enable_encryption;  	struct ta_hdcp_cmd_hdcp1_enable_dp_stream_encryption_input hdcp1_enable_dp_stream_encryption;  	struct ta_hdcp_cmd_hdcp1_get_encryption_status_input hdcp1_get_encryption_status; +	struct ta_hdcp_cmd_hdcp2_destroy_session_input hdcp2_destroy_session; +	struct ta_hdcp_cmd_hdcp2_set_encryption_input hdcp2_set_encryption; +	struct ta_hdcp_cmd_hdcp2_get_encryption_status_input hdcp2_get_encryption_status; +	struct ta_hdcp_cmd_hdcp2_create_session_input_v2 hdcp2_create_session_v2; +	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 +		hdcp2_prepare_process_authentication_message_v2; +	struct ta_hdcp_cmd_hdcp2_enable_dp_stream_encryption_input hdcp2_enable_dp_stream_encryption;  };  /* Common output structure for HDCP callbacks */ @@ -250,6 +440,10 @@ union ta_hdcp_cmd_output {  	struct ta_hdcp_cmd_hdcp1_first_part_authentication_output hdcp1_first_part_authentication;  	struct ta_hdcp_cmd_hdcp1_second_part_authentication_output hdcp1_second_part_authentication;  	struct ta_hdcp_cmd_hdcp1_get_encryption_status_output hdcp1_get_encryption_status; +	struct ta_hdcp_cmd_hdcp2_get_encryption_status_output hdcp2_get_encryption_status; +	struct ta_hdcp_cmd_hdcp2_create_session_output_v2 hdcp2_create_session_v2; +	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 +		hdcp2_prepare_process_authentication_message_v2;  };  /**********************************************************/ diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h index dea21702edff..f2a0e1a064da 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h @@ -77,6 +77,7 @@ enum mod_hdcp_status {  	MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING,  	MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING,  	MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE, +	MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED,  	MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE,  	MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE,  	MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE, @@ -86,6 +87,7 @@ enum mod_hdcp_status {  	MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE,  	MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY,  	MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE, +	MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED,  	MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION,  	MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING,  	MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE, @@ -156,12 +158,18 @@ struct mod_hdcp_link_adjustment_hdcp1 {  	uint8_t reserved		: 6;  }; +enum mod_hdcp_force_hdcp_type { +	MOD_HDCP_FORCE_TYPE_MAX = 0, +	MOD_HDCP_FORCE_TYPE_0, +	MOD_HDCP_FORCE_TYPE_1 +}; +  struct mod_hdcp_link_adjustment_hdcp2 {  	uint8_t disable			: 1; -	uint8_t disable_type1		: 1; +	uint8_t force_type		: 2;  	uint8_t force_no_stored_km	: 1;  	uint8_t increase_h_prime_timeout: 1; -	uint8_t reserved		: 4; +	uint8_t reserved		: 3;  };  struct mod_hdcp_link_adjustment { @@ -184,7 +192,8 @@ enum mod_hdcp_encryption_status {  	MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF = 0,  	MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON,  	MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON, -	MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON +	MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON, +	MOD_HDCP_ENCRYPTION_STATUS_HDCP2_ON  };  /* per link events dm has to notify to hdcp module */ diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h index ca8ce3c55337..42cbeffac640 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h @@ -26,6 +26,7 @@  #ifndef MOD_INFO_PACKET_H_  #define MOD_INFO_PACKET_H_ +#include "dm_services.h"  #include "mod_shared.h"  //Forward Declarations  struct dc_stream_state; @@ -33,7 +34,8 @@ struct dc_info_packet;  struct mod_vrr_params;  void mod_build_vsc_infopacket(const struct dc_stream_state *stream, -		struct dc_info_packet *info_packet); +		struct dc_info_packet *info_packet, +		bool *use_vsc_sdp_for_colorimetry);  void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,  		struct dc_info_packet *info_packet, int ALLMEnabled, int ALLMValue); diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h index b45f7d65e76a..fe2117904329 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h @@ -45,7 +45,6 @@ enum vrr_packet_type {  	PACKET_TYPE_VTEM  }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0)  union lut3d_control_flags {  	unsigned int raw;  	struct { @@ -104,6 +103,5 @@ struct lut3d_settings {  	enum lut3d_control_gamut_map map2;  	enum lut3d_control_rotation_mode rotation2;  }; -#endif  #endif /* MOD_SHARED_H_ */ diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c index db6b08f6d093..6a8a056424b8 100644 --- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c +++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c @@ -30,6 +30,20 @@  #include "mod_freesync.h"  #include "dc.h" +enum vsc_packet_revision { +	vsc_packet_undefined = 0, +	//01h = VSC SDP supports only 3D stereo. +	vsc_packet_rev1 = 1, +	//02h = 3D stereo + PSR. +	vsc_packet_rev2 = 2, +	//03h = 3D stereo + PSR2. +	vsc_packet_rev3 = 3, +	//04h = 3D stereo + PSR/PSR2 + Y-coordinate. +	vsc_packet_rev4 = 4, +	//05h = 3D stereo + PSR/PSR2 + Y-coordinate + Pixel Encoding/Colorimetry Format +	vsc_packet_rev5 = 5, +}; +  #define HDMI_INFOFRAME_TYPE_VENDOR 0x81  #define HF_VSIF_VERSION 1 @@ -116,35 +130,41 @@ enum ColorimetryYCCDP {  };  void mod_build_vsc_infopacket(const struct dc_stream_state *stream, -		struct dc_info_packet *info_packet) +		struct dc_info_packet *info_packet, +		bool *use_vsc_sdp_for_colorimetry)  { -	unsigned int vscPacketRevision = 0; +	unsigned int vsc_packet_revision = vsc_packet_undefined;  	unsigned int i;  	unsigned int pixelEncoding = 0;  	unsigned int colorimetryFormat = 0;  	bool stereo3dSupport = false; +	/* Initialize first, later if infopacket is valid determine if VSC SDP +	 * should be used to signal colorimetry format and pixel encoding. +	 */ +	*use_vsc_sdp_for_colorimetry = false; +  	if (stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE && stream->view_format != VIEW_3D_FORMAT_NONE) { -		vscPacketRevision = 1; +		vsc_packet_revision = vsc_packet_rev1;  		stereo3dSupport = true;  	}  	/*VSC packet set to 2 when DP revision >= 1.2*/  	if (stream->psr_version != 0) -		vscPacketRevision = 2; +		vsc_packet_revision = vsc_packet_rev2;  	/* Update to revision 5 for extended colorimetry support for DPCD 1.4+ */  	if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&  			stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) -		vscPacketRevision = 5; +		vsc_packet_revision = vsc_packet_rev5;  	/* VSC packet not needed based on the features  	 * supported by this DP display  	 */ -	if (vscPacketRevision == 0) +	if (vsc_packet_revision == vsc_packet_undefined)  		return; -	if (vscPacketRevision == 0x2) { +	if (vsc_packet_revision == vsc_packet_rev2) {  		/* Secondary-data Packet ID = 0*/  		info_packet->hb0 = 0x00;  		/* 07h - Packet Type Value indicating Video @@ -166,7 +186,7 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,  		info_packet->valid = true;  	} -	if (vscPacketRevision == 0x1) { +	if (vsc_packet_revision == vsc_packet_rev1) {  		info_packet->hb0 = 0x00;	// Secondary-data Packet ID = 0  		info_packet->hb1 = 0x07;	// 07h = Packet Type Value indicating Video Stream Configuration packet @@ -237,7 +257,7 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,  	 *   the Pixel Encoding/Colorimetry Format and that a Sink device must ignore MISC1, bit 7, and  	 *   MISC0, bits 7:1 (MISC1, bit 7. and MISC0, bits 7:1 become "don't care").)  	 */ -	if (vscPacketRevision == 0x5) { +	if (vsc_packet_revision == vsc_packet_rev5) {  		/* Secondary-data Packet ID = 0 */  		info_packet->hb0 = 0x00;  		/* 07h - Packet Type Value indicating Video Stream Configuration packet */ @@ -249,6 +269,13 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,  		info_packet->valid = true; +		/* If we are using VSC SDP revision 05h, use this to signal for +		 * colorimetry format and pixel encoding. HW should later be +		 * programmed to set MSA MISC1 bit 6 to indicate ignore +		 * colorimetry format and pixel encoding in the MSA. +		 */ +		*use_vsc_sdp_for_colorimetry = true; +  		/* Set VSC SDP fields for pixel encoding and colorimetry format from DP 1.3 specs  		 * Data Bytes DB 18~16  		 * Bits 3:0 (Colorimetry Format)        |  Bits 7:4 (Pixel Encoding) @@ -393,7 +420,6 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,  		 */  		info_packet->sb[18] = 0;  	} -  }  /** diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c index 4e2f615c3566..e75a4bb94488 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c @@ -662,7 +662,11 @@ bool dmcu_load_iram(struct dmcu *dmcu,  	memset(&ram_table, 0, sizeof(ram_table)); -	if (dmcu->dmcu_version.abm_version == 0x23) { +	if (dmcu->dmcu_version.abm_version == 0x24) { +		fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params); +		result = dmcu->funcs->load_iram( +				dmcu, 0, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2); +	} else if (dmcu->dmcu_version.abm_version == 0x23) {  		fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params);  		result = dmcu->funcs->load_iram( @@ -687,3 +691,4 @@ bool dmcu_load_iram(struct dmcu *dmcu,  	return result;  } +  |