diff options
Diffstat (limited to 'drivers/gpu/drm/amd/display/dc')
129 files changed, 5352 insertions, 1031 deletions
| diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c index 1090d235086a..bd1f60ecaba4 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/conversion.c +++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c @@ -101,6 +101,40 @@ void convert_float_matrix(  	}  } +static struct fixed31_32 int_frac_to_fixed_point(uint16_t arg, +						 uint8_t integer_bits, +						 uint8_t fractional_bits) +{ +	struct fixed31_32 result; +	uint16_t sign_mask = 1 << (fractional_bits + integer_bits); +	uint16_t value_mask = sign_mask - 1; + +	result.value = (long long)(arg & value_mask) << +		       (FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits); + +	if (arg & sign_mask) +		result = dc_fixpt_neg(result); + +	return result; +} + +/** + * convert_hw_matrix - converts HW values into fixed31_32 matrix. + * @matrix: fixed point 31.32 matrix + * @reg: array of register values + * @buffer_size: size of the array of register values + * + * Converts HW register spec defined format S2D13 into a fixed-point 31.32 + * matrix. + */ +void convert_hw_matrix(struct fixed31_32 *matrix, +		       uint16_t *reg, +		       uint32_t buffer_size) +{ +	for (int i = 0; i < buffer_size; ++i) +		matrix[i] = int_frac_to_fixed_point(reg[i], 2, 13); +} +  static uint32_t find_gcd(uint32_t a, uint32_t b)  {  	uint32_t remainder; diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.h b/drivers/gpu/drm/amd/display/dc/basics/conversion.h index 81da4e6f7a1a..a433cef78496 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/conversion.h +++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.h @@ -41,6 +41,10 @@ void convert_float_matrix(  void reduce_fraction(uint32_t num, uint32_t den,  		uint32_t *out_num, uint32_t *out_den); +void convert_hw_matrix(struct fixed31_32 *matrix, +		       uint16_t *reg, +		       uint32_t buffer_size); +  static inline unsigned int log_2(unsigned int num)  {  	return ilog2(num); diff --git a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c index 39530b2ea495..b30c2cdc1a61 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c @@ -23,8 +23,6 @@   *   */ -#include <linux/slab.h> -  #include "resource.h"  #include "dm_services.h"  #include "dce_calcs.h" diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c index 818a529cacc3..86f9198e7501 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c @@ -37,7 +37,7 @@  #define EXEC_BIOS_CMD_TABLE(command, params)\  	(amdgpu_atom_execute_table(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \  		GetIndexIntoMasterTable(COMMAND, command), \ -		(uint32_t *)¶ms) == 0) +		(uint32_t *)¶ms, sizeof(params)) == 0)  #define BIOS_CMD_TABLE_REVISION(command, frev, crev)\  	amdgpu_atom_parse_cmd_header(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \ diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c index 293a919d605d..cbae1be7b009 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c @@ -49,7 +49,7 @@  #define EXEC_BIOS_CMD_TABLE(fname, params)\  	(amdgpu_atom_execute_table(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \  		GET_INDEX_INTO_MASTER_TABLE(command, fname), \ -		(uint32_t *)¶ms) == 0) +		(uint32_t *)¶ms, sizeof(params)) == 0)  #define BIOS_CMD_TABLE_REVISION(fname, frev, crev)\  	amdgpu_atom_parse_cmd_header(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \ diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c index 9d347960e2b0..117fc6d4c1de 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c @@ -81,6 +81,7 @@ bool dal_bios_parser_init_cmd_tbl_helper2(  	case DCN_VERSION_3_2:  	case DCN_VERSION_3_21:  	case DCN_VERSION_3_5: +	case DCN_VERSION_3_51:  		*h = dal_cmd_tbl_helper_dce112_get_table2();  		return true; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index 28a2a837d2f0..9f0f25aee426 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -23,8 +23,6 @@   *   */ -#include <linux/slab.h> -  #include "dal_asic_id.h"  #include "dc_types.h"  #include "dccg.h" @@ -340,7 +338,6 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p  	    dcn32_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);  	    return &clk_mgr->base; -	    break;  	}  	case AMDGPU_FAMILY_GC_11_0_1: { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c index 26feefbb8990..b77804cfde0f 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c @@ -132,7 +132,7 @@ int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)  	int dprefclk_wdivider;  	int dprefclk_src_sel;  	int dp_ref_clk_khz; -	int target_div; +	int target_div = 600000;  	/* ASSERT DP Reference Clock source is from DFS*/  	REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c index 60761ff3cbf1..2a74e2d74909 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c @@ -23,8 +23,6 @@   *   */ -#include <linux/slab.h> -  #include "reg_helper.h"  #include "core_types.h"  #include "clk_mgr_internal.h" diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_clk.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_clk.c deleted file mode 100644 index 61dd12198a3c..000000000000 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_clk.c +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright 2012-16 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "reg_helper.h" -#include "clk_mgr_internal.h" -#include "rv1_clk_mgr_clk.h" - -#include "ip/Discovery/hwid.h" -#include "ip/Discovery/v1/ip_offset_1.h" -#include "ip/CLK/clk_10_0_default.h" -#include "ip/CLK/clk_10_0_offset.h" -#include "ip/CLK/clk_10_0_reg.h" -#include "ip/CLK/clk_10_0_sh_mask.h" - -#include "dce100/dce_clk_mgr.h" - -#define CLK_BASE_INNER(inst) \ -	CLK_BASE__INST ## inst ## _SEG0 - - -#define CLK_REG(reg_name, block, inst)\ -	CLK_BASE(mm ## block ## _ ## inst ## _ ## reg_name ## _BASE_IDX) + \ -					mm ## block ## _ ## inst ## _ ## reg_name - -#define REG(reg_name) \ -	CLK_REG(reg_name, CLK0, 0) - - -/* Only used by testing framework*/ -void rv1_dump_clk_registers(struct clk_state_registers *regs, struct clk_bypass *bypass, struct clk_mgr *clk_mgr_base) -{ -	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); - -		regs->CLK0_CLK8_CURRENT_CNT = REG_READ(CLK0_CLK8_CURRENT_CNT) / 10; //dcf clk - -		bypass->dcfclk_bypass = REG_READ(CLK0_CLK8_BYPASS_CNTL) & 0x0007; -		if (bypass->dcfclk_bypass < 0 || bypass->dcfclk_bypass > 4) -			bypass->dcfclk_bypass = 0; - - -		regs->CLK0_CLK8_DS_CNTL = REG_READ(CLK0_CLK8_DS_CNTL) / 10;	//dcf deep sleep divider - -		regs->CLK0_CLK8_ALLOW_DS = REG_READ(CLK0_CLK8_ALLOW_DS); //dcf deep sleep allow - -		regs->CLK0_CLK10_CURRENT_CNT = REG_READ(CLK0_CLK10_CURRENT_CNT) / 10; //dpref clk - -		bypass->dispclk_pypass = REG_READ(CLK0_CLK10_BYPASS_CNTL) & 0x0007; -		if (bypass->dispclk_pypass < 0 || bypass->dispclk_pypass > 4) -			bypass->dispclk_pypass = 0; - -		regs->CLK0_CLK11_CURRENT_CNT = REG_READ(CLK0_CLK11_CURRENT_CNT) / 10; //disp clk - -		bypass->dprefclk_bypass = REG_READ(CLK0_CLK11_BYPASS_CNTL) & 0x0007; -		if (bypass->dprefclk_bypass < 0 || bypass->dprefclk_bypass > 4) -			bypass->dprefclk_bypass = 0; - -} diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index 0c6a4ab72b1d..e3e1940198a9 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -707,9 +707,7 @@ void rn_clk_mgr_construct(  	int is_green_sardine = 0;  	struct clk_log_info log_info = {0}; -#if defined(CONFIG_DRM_AMD_DC_FP)  	is_green_sardine = ASICREV_IS_GREEN_SARDINE(ctx->asic_id.hw_internal_rev); -#endif  	clk_mgr->base.ctx = ctx;  	clk_mgr->base.funcs = &dcn21_funcs; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c index 8c9d45e5b13b..23b390245b5d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c @@ -26,6 +26,10 @@  #include "core_types.h"  #include "clk_mgr_internal.h"  #include "reg_helper.h" +#include "dm_helpers.h" + +#include "rn_clk_mgr_vbios_smu.h" +  #include <linux/delay.h>  #include "renoir_ip_offset.h" @@ -33,8 +37,6 @@  #include "mp/mp_12_0_0_offset.h"  #include "mp/mp_12_0_0_sh_mask.h" -#include "rn_clk_mgr_vbios_smu.h" -  #define REG(reg_name) \  	(MP0_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name) @@ -120,7 +122,10 @@ static int rn_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,  	result = rn_smu_wait_for_response(clk_mgr, 10, 200000); -	ASSERT(result == VBIOSSMC_Result_OK || result == VBIOSSMC_Result_UnknownCmd); +	if (IS_SMU_TIMEOUT(result)) { +		ASSERT(0); +		dm_helpers_smu_timeout(CTX, msg_id, param, 10 * 200000); +	}  	/* Actual dispclk set is returned in the parameter register */  	return REG_READ(MP1_SMN_C2PMSG_83); @@ -185,10 +190,6 @@ int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int reque  			VBIOSSMC_MSG_SetHardMinDcfclkByFreq,  			khz_to_mhz_ceil(requested_dcfclk_khz)); -#ifdef DBG -	smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000); -#endif -  	return actual_dcfclk_set_mhz * 1000;  } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c index e4f96b6fd79d..b4fb17b7a096 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c @@ -29,6 +29,7 @@  #include <linux/delay.h>  #include "dcn301_smu.h" +#include "dm_helpers.h"  #include "vangogh_ip_offset.h" @@ -120,7 +121,10 @@ static int dcn301_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,  	result = dcn301_smu_wait_for_response(clk_mgr, 10, 200000); -	ASSERT(result == VBIOSSMC_Result_OK); +	if (IS_SMU_TIMEOUT(result)) { +		ASSERT(0); +		dm_helpers_smu_timeout(CTX, msg_id, param, 10 * 200000); +	}  	/* Actual dispclk set is returned in the parameter register */  	return REG_READ(MP1_SMN_C2PMSG_83); @@ -180,10 +184,6 @@ int dcn301_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request  			VBIOSSMC_MSG_SetHardMinDcfclkByFreq,  			khz_to_mhz_ceil(requested_dcfclk_khz)); -#ifdef DBG -	smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000); -#endif -  	return actual_dcfclk_set_mhz * 1000;  } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c index 32279c5db724..6904e95113c1 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c @@ -202,10 +202,6 @@ int dcn31_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requeste  			VBIOSSMC_MSG_SetHardMinDcfclkByFreq,  			khz_to_mhz_ceil(requested_dcfclk_khz)); -#ifdef DBG -	smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000); -#endif -  	return actual_dcfclk_set_mhz * 1000;  } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c index 07baa10a8647..c4af406146b7 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c @@ -220,12 +220,6 @@ int dcn314_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request  			VBIOSSMC_MSG_SetHardMinDcfclkByFreq,  			khz_to_mhz_ceil(requested_dcfclk_khz)); -#ifdef DBG -	smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", -			actual_dcfclk_set_mhz, -			actual_dcfclk_set_mhz * 1000); -#endif -  	return actual_dcfclk_set_mhz * 1000;  } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c index 1042cf1a3ab0..879f1494c4cd 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c @@ -215,10 +215,6 @@ int dcn315_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request  			VBIOSSMC_MSG_SetHardMinDcfclkByFreq,  			khz_to_mhz_ceil(requested_dcfclk_khz)); -#ifdef DBG -	smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000); -#endif -  	return actual_dcfclk_set_mhz * 1000;  } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c index 3ed19197a755..8b82092b91cd 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c @@ -189,10 +189,6 @@ int dcn316_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request  			VBIOSSMC_MSG_SetHardMinDcfclkByFreq,  			khz_to_mhz_ceil(requested_dcfclk_khz)); -#ifdef DBG -	smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000); -#endif -  	return actual_dcfclk_set_mhz * 1000;  } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index aadd07bc68c5..668f05c8654e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -243,10 +243,8 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)  	/* Get UCLK, update bounding box */  	clk_mgr_base->funcs->get_memclk_states_from_smu(clk_mgr_base); -	DC_FP_START();  	/* WM range table */  	dcn32_build_wm_range_table(clk_mgr); -	DC_FP_END();  }  static void dcn32_update_clocks_update_dtb_dto(struct clk_mgr_internal *clk_mgr, @@ -387,7 +385,15 @@ static void dcn32_update_clocks_update_dentist(  		uint32_t temp_dispclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR * clk_mgr->base.dentist_vco_freq_khz) / temp_disp_divider;  		if (clk_mgr->smu_present) -			dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(temp_dispclk_khz)); +			/* +			 * SMU uses discrete dispclk presets. We applied +			 * the same formula to increase our dppclk_khz +			 * to the next matching discrete value. By +			 * contract, we should use the preset dispclk +			 * floored in Mhz to describe the intended clock. +			 */ +			dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, +					khz_to_mhz_floor(temp_dispclk_khz));  		if (dc->debug.override_dispclk_programming) {  			REG_GET(DENTIST_DISPCLK_CNTL, @@ -426,7 +432,15 @@ static void dcn32_update_clocks_update_dentist(  	/* do requested DISPCLK updates*/  	if (clk_mgr->smu_present) -		dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(clk_mgr->base.clks.dispclk_khz)); +		/* +		 * SMU uses discrete dispclk presets. We applied +		 * the same formula to increase our dppclk_khz +		 * to the next matching discrete value. By +		 * contract, we should use the preset dispclk +		 * floored in Mhz to describe the intended clock. +		 */ +		dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, +				khz_to_mhz_floor(clk_mgr->base.clks.dispclk_khz));  	if (dc->debug.override_dispclk_programming) {  		REG_GET(DENTIST_DISPCLK_CNTL, @@ -493,6 +507,8 @@ static void dcn32_auto_dpm_test_log(  		}  	} +	msleep(5); +  	mall_ss_size_bytes = context->bw_ctx.bw.dcn.mall_ss_size_bytes;      dispclk_khz_reg    = REG_READ(CLK1_CLK0_CURRENT_CNT); // DISPCLK @@ -734,7 +750,15 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,  		clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;  		if (clk_mgr->smu_present && !dpp_clock_lowered) -			dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dppclk_khz)); +			/* +			 * SMU uses discrete dppclk presets. We applied +			 * the same formula to increase our dppclk_khz +			 * to the next matching discrete value. By +			 * contract, we should use the preset dppclk +			 * floored in Mhz to describe the intended clock. +			 */ +			dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK, +					khz_to_mhz_floor(clk_mgr_base->clks.dppclk_khz));  		update_dppclk = true;  	} @@ -765,7 +789,15 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,  			dcn32_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);  			dcn32_update_clocks_update_dentist(clk_mgr, context);  			if (clk_mgr->smu_present) -				dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dppclk_khz)); +				/* +				 * SMU uses discrete dppclk presets. We applied +				 * the same formula to increase our dppclk_khz +				 * to the next matching discrete value. By +				 * contract, we should use the preset dppclk +				 * floored in Mhz to describe the intended clock. +				 */ +				dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK, +						khz_to_mhz_floor(clk_mgr_base->clks.dppclk_khz));  		} else {  			/* if clock is being raised, increase refclk before lowering DTO */  			if (update_dppclk || update_dispclk) @@ -783,7 +815,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,  		dmcu->funcs->set_psr_wait_loop(dmcu,  				clk_mgr_base->clks.dispclk_khz / 1000 / 7); -	if (dc->config.enable_auto_dpm_test_logs) { +	if (dc->config.enable_auto_dpm_test_logs && safe_to_lower) {  	    dcn32_auto_dpm_test_log(new_clocks, clk_mgr, context);  	}  } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h index a34c258c19dc..c76352a817de 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h @@ -36,8 +36,7 @@  #define DALSMC_MSG_SetCabForUclkPstate	0x12  #define DALSMC_Result_OK				0x1 -void -dcn32_smu_send_fclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool enable); +void dcn32_smu_send_fclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool enable);  void dcn32_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr);  void dcn32_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr);  void dcn32_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsigned int num_ways); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c index e64890259235..c378b879c76d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c @@ -384,19 +384,6 @@ static void dcn35_enable_pme_wa(struct clk_mgr *clk_mgr_base)  	dcn35_smu_enable_pme_wa(clk_mgr);  } -void dcn35_init_clocks(struct clk_mgr *clk_mgr) -{ -	uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz; - -	memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); - -	// Assumption is that boot state always supports pstate -	clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk;	// restore ref_dtbclk -	clk_mgr->clks.p_state_change_support = true; -	clk_mgr->clks.prev_p_state_change_support = true; -	clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN; -	clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN; -}  bool dcn35_are_clock_states_equal(struct dc_clocks *a,  		struct dc_clocks *b) @@ -422,6 +409,22 @@ static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs  {  } +static void init_clk_states(struct clk_mgr *clk_mgr) +{ +	uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz; +	memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); + +	clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk;	// restore ref_dtbclk +	clk_mgr->clks.p_state_change_support = true; +	clk_mgr->clks.prev_p_state_change_support = true; +	clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN; +	clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN; +} + +void dcn35_init_clocks(struct clk_mgr *clk_mgr) +{ +	init_clk_states(clk_mgr); +}  static struct clk_bw_params dcn35_bw_params = {  	.vram_type = Ddr4MemType,  	.num_channels = 1, @@ -833,7 +836,7 @@ static void dcn35_set_low_power_state(struct clk_mgr *clk_mgr_base)  	}  } -static void dcn35_set_idle_state(struct clk_mgr *clk_mgr_base, bool allow_idle) +static void dcn35_set_ips_idle_state(struct clk_mgr *clk_mgr_base, bool allow_idle)  {  	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);  	struct dc *dc = clk_mgr_base->ctx->dc; @@ -881,7 +884,7 @@ static bool dcn35_is_ips_supported(struct clk_mgr *clk_mgr_base)  	return ips_supported;  } -static uint32_t dcn35_get_idle_state(struct clk_mgr *clk_mgr_base) +static uint32_t dcn35_get_ips_idle_state(struct clk_mgr *clk_mgr_base)  {  	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); @@ -890,7 +893,7 @@ static uint32_t dcn35_get_idle_state(struct clk_mgr *clk_mgr_base)  static void dcn35_init_clocks_fpga(struct clk_mgr *clk_mgr)  { -	dcn35_init_clocks(clk_mgr); +	init_clk_states(clk_mgr);  /* TODO: Implement the functions and remove the ifndef guard */  } @@ -975,8 +978,8 @@ static struct clk_mgr_funcs dcn35_funcs = {  	.set_low_power_state = dcn35_set_low_power_state,  	.exit_low_power_state = dcn35_exit_low_power_state,  	.is_ips_supported = dcn35_is_ips_supported, -	.set_idle_state = dcn35_set_idle_state, -	.get_idle_state = dcn35_get_idle_state +	.set_idle_state = dcn35_set_ips_idle_state, +	.get_idle_state = dcn35_get_ips_idle_state  };  struct clk_mgr_funcs dcn35_fpga_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c index 6d4a1ffab5ed..9e588c56c570 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c @@ -361,32 +361,32 @@ void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zst  	case DCN_ZSTATE_SUPPORT_ALLOW:  		msg_id = VBIOSSMC_MSG_AllowZstatesEntry;  		param = (1 << 10) | (1 << 9) | (1 << 8); -		smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW, param = %d\n", __func__, param); +		smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW, param = 0x%x\n", __func__, param);  		break;  	case DCN_ZSTATE_SUPPORT_DISALLOW:  		msg_id = VBIOSSMC_MSG_AllowZstatesEntry;  		param = 0; -		smu_print("%s: SMC_MSG_AllowZstatesEntry msg_id = DISALLOW, param = %d\n",  __func__, param); +		smu_print("%s: SMC_MSG_AllowZstatesEntry msg_id = DISALLOW, param = 0x%x\n",  __func__, param);  		break;  	case DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY:  		msg_id = VBIOSSMC_MSG_AllowZstatesEntry;  		param = (1 << 10); -		smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z10_ONLY, param = %d\n", __func__, param); +		smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z10_ONLY, param = 0x%x\n", __func__, param);  		break;  	case DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY:  		msg_id = VBIOSSMC_MSG_AllowZstatesEntry;  		param = (1 << 10) | (1 << 8); -		smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_Z10_ONLY, param = %d\n", __func__, param); +		smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_Z10_ONLY, param = 0x%x\n", __func__, param);  		break;  	case DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY:  		msg_id = VBIOSSMC_MSG_AllowZstatesEntry;  		param = (1 << 8); -		smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_ONLY, param = %d\n", __func__, param); +		smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_ONLY, param = 0x%x\n", __func__, param);  		break;  	default: //DCN_ZSTATE_SUPPORT_UNKNOWN @@ -400,7 +400,7 @@ void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zst  		clk_mgr,  		msg_id,  		param); -	smu_print("%s:  msg_id = %d, param = 0x%x, return = %d\n", __func__, msg_id, param, retv); +	smu_print("%s:  msg_id = %d, param = 0x%x, return = 0x%x\n", __func__, msg_id, param, retv);  }  int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr) @@ -447,6 +447,9 @@ void dcn35_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable)  void dcn35_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable)  { +	if (!clk_mgr->smu_present) +		return; +  	dcn35_smu_send_msg_with_param(  			clk_mgr,  			VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown, @@ -458,6 +461,9 @@ int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr)  {  	int retv; +	if (!clk_mgr->smu_present) +		return 0; +  	retv = dcn35_smu_send_msg_with_param(  		clk_mgr,  		VBIOSSMC_MSG_DispPsrExit, @@ -470,6 +476,9 @@ int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr)  {  	int retv; +	if (!clk_mgr->smu_present) +		return 0; +  	retv = dcn35_smu_send_msg_with_param(  			clk_mgr,  			VBIOSSMC_MSG_QueryIPS2Support, @@ -481,6 +490,9 @@ int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr)  void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param)  { +	if (!clk_mgr->smu_present) +		return; +  	REG_WRITE(MP1_SMN_C2PMSG_71, param);  	//smu_print("%s: write_ips_scratch = %x\n", __func__, param);  } @@ -489,6 +501,9 @@ uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr)  {  	uint32_t retv; +	if (!clk_mgr->smu_present) +		return 0; +  	retv = REG_READ(MP1_SMN_C2PMSG_71);  	//smu_print("%s: dcn35_smu_read_ips_scratch = %x\n",  __func__, retv);  	return retv; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 2c424e435962..5211c1c0f3c0 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -414,6 +414,8 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,  		if (dc->optimized_required || dc->wm_optimized_required)  			return false; +	dc_exit_ips_for_hw_access(dc); +  	stream->adjust.v_total_max = adjust->v_total_max;  	stream->adjust.v_total_mid = adjust->v_total_mid;  	stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num; @@ -454,6 +456,8 @@ bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,  	int i = 0; +	dc_exit_ips_for_hw_access(dc); +  	for (i = 0; i < MAX_PIPES; i++) {  		struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; @@ -484,6 +488,8 @@ bool dc_stream_get_crtc_position(struct dc *dc,  	bool ret = false;  	struct crtc_position position; +	dc_exit_ips_for_hw_access(dc); +  	for (i = 0; i < MAX_PIPES; i++) {  		struct pipe_ctx *pipe =  				&dc->current_state->res_ctx.pipe_ctx[i]; @@ -603,6 +609,8 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,  	if (pipe == NULL)  		return false; +	dc_exit_ips_for_hw_access(dc); +  	/* By default, capture the full frame */  	param.windowa_x_start = 0;  	param.windowa_y_start = 0; @@ -662,6 +670,8 @@ bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,  	struct pipe_ctx *pipe;  	struct timing_generator *tg; +	dc_exit_ips_for_hw_access(dc); +  	for (i = 0; i < MAX_PIPES; i++) {  		pipe = &dc->current_state->res_ctx.pipe_ctx[i];  		if (pipe->stream == stream) @@ -686,6 +696,8 @@ void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,  	int i;  	struct pipe_ctx *pipe_ctx; +	dc_exit_ips_for_hw_access(dc); +  	for (i = 0; i < MAX_PIPES; i++) {  		if (dc->current_state->res_ctx.pipe_ctx[i].stream  				== stream) { @@ -721,6 +733,8 @@ void dc_stream_set_dither_option(struct dc_stream_state *stream,  	if (option > DITHER_OPTION_MAX)  		return; +	dc_exit_ips_for_hw_access(stream->ctx->dc); +  	stream->dither_option = option;  	memset(¶ms, 0, sizeof(params)); @@ -745,6 +759,8 @@ bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stre  	bool ret = false;  	struct pipe_ctx *pipes; +	dc_exit_ips_for_hw_access(dc); +  	for (i = 0; i < MAX_PIPES; i++) {  		if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {  			pipes = &dc->current_state->res_ctx.pipe_ctx[i]; @@ -762,6 +778,8 @@ bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)  	bool ret = false;  	struct pipe_ctx *pipes; +	dc_exit_ips_for_hw_access(dc); +  	for (i = 0; i < MAX_PIPES; i++) {  		if (dc->current_state->res_ctx.pipe_ctx[i].stream  				== stream) { @@ -788,6 +806,8 @@ void dc_stream_set_static_screen_params(struct dc *dc,  	struct pipe_ctx *pipes_affected[MAX_PIPES];  	int num_pipes_affected = 0; +	dc_exit_ips_for_hw_access(dc); +  	for (i = 0; i < num_streams; i++) {  		struct dc_stream_state *stream = streams[i]; @@ -1766,6 +1786,8 @@ void dc_enable_stereo(  	int i, j;  	struct pipe_ctx *pipe; +	dc_exit_ips_for_hw_access(dc); +  	for (i = 0; i < MAX_PIPES; i++) {  		if (context != NULL) {  			pipe = &context->res_ctx.pipe_ctx[i]; @@ -1785,6 +1807,8 @@ void dc_enable_stereo(  void dc_trigger_sync(struct dc *dc, struct dc_state *context)  {  	if (context->stream_count > 1 && !dc->debug.disable_timing_sync) { +		dc_exit_ips_for_hw_access(dc); +  		enable_timing_multisync(dc, context);  		program_timing_sync(dc, context);  	} @@ -2008,7 +2032,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c  	return result;  } -static bool commit_minimal_transition_state(struct dc *dc, +static bool commit_minimal_transition_state_legacy(struct dc *dc,  		struct dc_state *transition_base_context);  /** @@ -2041,6 +2065,8 @@ enum dc_status dc_commit_streams(struct dc *dc,  	if (!streams_changed(dc, streams, stream_count))  		return res; +	dc_exit_ips_for_hw_access(dc); +  	DC_LOG_DC("%s: %d streams\n", __func__, stream_count);  	for (i = 0; i < stream_count; i++) { @@ -2072,7 +2098,7 @@ enum dc_status dc_commit_streams(struct dc *dc,  	}  	if (handle_exit_odm2to1) -		res = commit_minimal_transition_state(dc, dc->current_state); +		res = commit_minimal_transition_state_legacy(dc, dc->current_state);  	context = dc_state_create_current_copy(dc);  	if (!context) @@ -2428,6 +2454,10 @@ static enum surface_update_type get_scaling_info_update_type(  		 /* Changing clip size of a large surface may result in MPC slice count change */  		update_flags->bits.bandwidth_change = 1; +	if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width || +			u->scaling_info->clip_rect.height != u->surface->clip_rect.height) +		update_flags->bits.clip_size_change = 1; +  	if (u->scaling_info->src_rect.x != u->surface->src_rect.x  			|| u->scaling_info->src_rect.y != u->surface->src_rect.y  			|| u->scaling_info->clip_rect.x != u->surface->clip_rect.x @@ -2441,7 +2471,8 @@ static enum surface_update_type get_scaling_info_update_type(  			|| update_flags->bits.scaling_change)  		return UPDATE_TYPE_FULL; -	if (update_flags->bits.position_change) +	if (update_flags->bits.position_change || +			update_flags->bits.clip_size_change)  		return UPDATE_TYPE_MED;  	return UPDATE_TYPE_FAST; @@ -2921,8 +2952,8 @@ static void copy_stream_update_to_stream(struct dc *dc,  	}  } -static void backup_plane_states_for_stream( -		struct dc_plane_state plane_states[MAX_SURFACE_NUM], +static void backup_planes_and_stream_state( +		struct dc_scratch_space *scratch,  		struct dc_stream_state *stream)  {  	int i; @@ -2931,12 +2962,20 @@ static void backup_plane_states_for_stream(  	if (!status)  		return; -	for (i = 0; i < status->plane_count; i++) -		plane_states[i] = *status->plane_states[i]; +	for (i = 0; i < status->plane_count; i++) { +		scratch->plane_states[i] = *status->plane_states[i]; +		scratch->gamma_correction[i] = *status->plane_states[i]->gamma_correction; +		scratch->in_transfer_func[i] = *status->plane_states[i]->in_transfer_func; +		scratch->lut3d_func[i] = *status->plane_states[i]->lut3d_func; +		scratch->in_shaper_func[i] = *status->plane_states[i]->in_shaper_func; +		scratch->blend_tf[i] = *status->plane_states[i]->blend_tf; +	} +	scratch->stream_state = *stream; +	scratch->out_transfer_func = *stream->out_transfer_func;  } -static void restore_plane_states_for_stream( -		struct dc_plane_state plane_states[MAX_SURFACE_NUM], +static void restore_planes_and_stream_state( +		struct dc_scratch_space *scratch,  		struct dc_stream_state *stream)  {  	int i; @@ -2945,8 +2984,16 @@ static void restore_plane_states_for_stream(  	if (!status)  		return; -	for (i = 0; i < status->plane_count; i++) -		*status->plane_states[i] = plane_states[i]; +	for (i = 0; i < status->plane_count; i++) { +		*status->plane_states[i] = scratch->plane_states[i]; +		*status->plane_states[i]->gamma_correction = scratch->gamma_correction[i]; +		*status->plane_states[i]->in_transfer_func = scratch->in_transfer_func[i]; +		*status->plane_states[i]->lut3d_func = scratch->lut3d_func[i]; +		*status->plane_states[i]->in_shaper_func = scratch->in_shaper_func[i]; +		*status->plane_states[i]->blend_tf = scratch->blend_tf[i]; +	} +	*stream = scratch->stream_state; +	*stream->out_transfer_func = scratch->out_transfer_func;  }  static bool update_planes_and_stream_state(struct dc *dc, @@ -2972,7 +3019,7 @@ static bool update_planes_and_stream_state(struct dc *dc,  	}  	context = dc->current_state; -	backup_plane_states_for_stream(dc->current_state->scratch.plane_states, stream); +	backup_planes_and_stream_state(&dc->current_state->scratch, stream);  	update_type = dc_check_update_surfaces_for_stream(  			dc, srf_updates, surface_count, stream_update, stream_status); @@ -3072,7 +3119,7 @@ static bool update_planes_and_stream_state(struct dc *dc,  	*new_context = context;  	*new_update_type = update_type; -	backup_plane_states_for_stream(context->scratch.plane_states, stream); +	backup_planes_and_stream_state(&context->scratch, stream);  	return true; @@ -3376,6 +3423,8 @@ static void commit_planes_for_stream_fast(struct dc *dc,  	int i, j;  	struct pipe_ctx *top_pipe_to_program = NULL;  	struct dc_stream_status *stream_status = NULL; +	dc_exit_ips_for_hw_access(dc); +  	dc_z10_restore(dc);  	top_pipe_to_program = resource_get_otg_master_for_stream( @@ -3503,10 +3552,23 @@ static void commit_planes_for_stream(struct dc *dc,  	// dc->current_state anymore, so we have to cache it before we apply  	// the new SubVP context  	subvp_prev_use = false; +	dc_exit_ips_for_hw_access(dc); +  	dc_z10_restore(dc);  	if (update_type == UPDATE_TYPE_FULL)  		wait_for_outstanding_hw_updates(dc, context); +	for (i = 0; i < dc->res_pool->pipe_count; i++) { +		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + +		if (pipe->stream && pipe->plane_state) { +			set_p_state_switch_method(dc, context, pipe); + +			if (dc->debug.visual_confirm) +				dc_update_visual_confirm_color(dc, context, pipe); +		} +	} +  	if (update_type == UPDATE_TYPE_FULL) {  		dc_allow_idle_optimizations(dc, false); @@ -3541,17 +3603,6 @@ static void commit_planes_for_stream(struct dc *dc,  		}  	} -	for (i = 0; i < dc->res_pool->pipe_count; i++) { -		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - -		if (pipe->stream && pipe->plane_state) { -			set_p_state_switch_method(dc, context, pipe); - -			if (dc->debug.visual_confirm) -				dc_update_visual_confirm_color(dc, context, pipe); -		} -	} -  	if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {  		struct pipe_ctx *mpcc_pipe;  		struct pipe_ctx *odm_pipe; @@ -4012,7 +4063,23 @@ static struct dc_state *create_minimal_transition_state(struct dc *dc,  	return minimal_transition_context;  } -static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc, + +/** + * commit_minimal_transition_state - Commit a minimal state based on current or new context + * + * @dc: DC structure, used to get the current state + * @context: New context + * @stream: Stream getting the update for the flip + * + * The function takes in current state and new state and determine a minimal transition state + * as the intermediate step which could make the transition between current and new states + * seamless. If found, it will commit the minimal transition state and update current state to + * this minimal transition state and return true, if not, it will return false. + * + * Return: + * Return True if the minimal transition succeeded, false otherwise + */ +static bool commit_minimal_transition_state(struct dc *dc,  		struct dc_state *context,  		struct dc_stream_state *stream)  { @@ -4021,12 +4088,6 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc,  	struct pipe_split_policy_backup policy;  	/* commit based on new context */ -	/* Since all phantom pipes are removed in full validation, -	 * we have to save and restore the subvp/mall config when -	 * we do a minimal transition since the flags marking the -	 * pipe as subvp/phantom will be cleared (dc copy constructor -	 * creates a shallow copy). -	 */  	minimal_transition_context = create_minimal_transition_state(dc,  			context, &policy);  	if (minimal_transition_context) { @@ -4043,7 +4104,7 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc,  	if (!success) {  		/* commit based on current context */ -		restore_plane_states_for_stream(dc->current_state->scratch.plane_states, stream); +		restore_planes_and_stream_state(&dc->current_state->scratch, stream);  		minimal_transition_context = create_minimal_transition_state(dc,  				dc->current_state, &policy);  		if (minimal_transition_context) { @@ -4056,7 +4117,7 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc,  			}  			release_minimal_transition_state(dc, minimal_transition_context, &policy);  		} -		restore_plane_states_for_stream(context->scratch.plane_states, stream); +		restore_planes_and_stream_state(&context->scratch, stream);  	}  	ASSERT(success); @@ -4064,7 +4125,7 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc,  }  /** - * commit_minimal_transition_state - Create a transition pipe split state + * commit_minimal_transition_state_legacy - Create a transition pipe split state   *   * @dc: Used to get the current state status   * @transition_base_context: New transition state @@ -4081,7 +4142,7 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc,   * Return:   * Return false if something is wrong in the transition state.   */ -static bool commit_minimal_transition_state(struct dc *dc, +static bool commit_minimal_transition_state_legacy(struct dc *dc,  		struct dc_state *transition_base_context)  {  	struct dc_state *transition_context; @@ -4319,53 +4380,6 @@ static bool fast_update_only(struct dc *dc,  			&& !full_update_required(dc, srf_updates, surface_count, stream_update, stream);  } -static bool should_commit_minimal_transition_for_windowed_mpo_odm(struct dc *dc, -		struct dc_stream_state *stream, -		struct dc_state *context) -{ -	struct pipe_ctx *cur_pipe, *new_pipe; -	bool cur_is_odm_in_use, new_is_odm_in_use; -	struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream); -	struct dc_stream_status *new_stream_status = stream_get_status(context, stream); - -	if (!dc->debug.enable_single_display_2to1_odm_policy || -			!dc->config.enable_windowed_mpo_odm) -		/* skip the check if windowed MPO ODM or dynamic ODM is turned -		 * off. -		 */ -		return false; - -	if (context == dc->current_state) -		/* skip the check for fast update */ -		return false; - -	if (new_stream_status->plane_count != cur_stream_status->plane_count) -		/* plane count changed, not a plane scaling update so not the -		 * case we are looking for -		 */ -		return false; - -	cur_pipe = resource_get_otg_master_for_stream(&dc->current_state->res_ctx, stream); -	new_pipe = resource_get_otg_master_for_stream(&context->res_ctx, stream); -	if (!cur_pipe || !new_pipe) -		return false; -	cur_is_odm_in_use = resource_get_odm_slice_count(cur_pipe) > 1; -	new_is_odm_in_use = resource_get_odm_slice_count(new_pipe) > 1; -	if (cur_is_odm_in_use == new_is_odm_in_use) -		/* ODM state isn't changed, not the case we are looking for */ -		return false; - -	if (dc->hwss.is_pipe_topology_transition_seamless && -			dc->hwss.is_pipe_topology_transition_seamless( -					dc, dc->current_state, context)) -		/* transition can be achieved without the need for committing -		 * minimal transition state first -		 */ -		return false; - -	return true; -} -  bool dc_update_planes_and_stream(struct dc *dc,  		struct dc_surface_update *srf_updates, int surface_count,  		struct dc_stream_state *stream, @@ -4384,6 +4398,8 @@ bool dc_update_planes_and_stream(struct dc *dc,  	bool is_plane_addition = 0;  	bool is_fast_update_only; +	dc_exit_ips_for_hw_access(dc); +  	populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);  	is_fast_update_only = fast_update_only(dc, fast_update, srf_updates,  			surface_count, stream_update, stream); @@ -4396,7 +4412,7 @@ bool dc_update_planes_and_stream(struct dc *dc,  	/* on plane addition, minimal state is the current one */  	if (force_minimal_pipe_splitting && is_plane_addition && -		!commit_minimal_transition_state(dc, dc->current_state)) +		!commit_minimal_transition_state_legacy(dc, dc->current_state))  				return false;  	if (!update_planes_and_stream_state( @@ -4411,32 +4427,19 @@ bool dc_update_planes_and_stream(struct dc *dc,  	/* on plane removal, minimal state is the new one */  	if (force_minimal_pipe_splitting && !is_plane_addition) { -		/* Since all phantom pipes are removed in full validation, -		 * we have to save and restore the subvp/mall config when -		 * we do a minimal transition since the flags marking the -		 * pipe as subvp/phantom will be cleared (dc copy constructor -		 * creates a shallow copy). -		 */ -		if (!commit_minimal_transition_state(dc, context)) { +		if (!commit_minimal_transition_state_legacy(dc, context)) {  			dc_state_release(context);  			return false;  		}  		update_type = UPDATE_TYPE_FULL;  	} -	/* when windowed MPO ODM is supported, we need to handle a special case -	 * where we can transition between ODM combine and MPC combine due to -	 * plane scaling update. This transition will require us to commit -	 * minimal transition state. The condition to trigger this update can't -	 * be predicted by could_mpcc_tree_change_for_active_pipes because we -	 * can only determine it after DML validation. Therefore we can't rely -	 * on the existing commit minimal transition state sequence. Instead -	 * we have to add additional handling here to handle this transition -	 * with its own special sequence. -	 */ -	if (should_commit_minimal_transition_for_windowed_mpo_odm(dc, stream, context)) -		commit_minimal_transition_state_for_windowed_mpo_odm(dc, +	if (dc->hwss.is_pipe_topology_transition_seamless && +			!dc->hwss.is_pipe_topology_transition_seamless( +					dc, dc->current_state, context)) { +		commit_minimal_transition_state(dc,  				context, stream); +	}  	update_seamless_boot_flags(dc, context, surface_count, stream);  	if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) {  		commit_planes_for_stream_fast(dc, @@ -4504,6 +4507,8 @@ void dc_commit_updates_for_stream(struct dc *dc,  	int i, j;  	struct dc_fast_update fast_update[MAX_SURFACES] = {0}; +	dc_exit_ips_for_hw_access(dc); +  	populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);  	stream_status = dc_stream_get_status(stream);  	context = dc->current_state; @@ -4688,6 +4693,8 @@ void dc_set_power_state(  	case DC_ACPI_CM_POWER_STATE_D0:  		dc_state_construct(dc, dc->current_state); +		dc_exit_ips_for_hw_access(dc); +  		dc_z10_restore(dc);  		dc->hwss.init_hw(dc); @@ -4829,6 +4836,12 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow)  		dc->idle_optimizations_allowed = allow;  } +void dc_exit_ips_for_hw_access(struct dc *dc) +{ +	if (dc->caps.ips_support) +		dc_allow_idle_optimizations(dc, false); +} +  bool dc_dmub_is_ips_idle_state(struct dc *dc)  {  	uint32_t idle_state = 0; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 9fbdb09697fd..ec4bf9432bdb 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -73,6 +73,7 @@  #include "dcn32/dcn32_resource.h"  #include "dcn321/dcn321_resource.h"  #include "dcn35/dcn35_resource.h" +#include "dcn351/dcn351_resource.h"  #define VISUAL_CONFIRM_BASE_DEFAULT 3  #define VISUAL_CONFIRM_BASE_MIN 1 @@ -195,6 +196,8 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)  		break;  	case AMDGPU_FAMILY_GC_11_5_0:  		dc_version = DCN_VERSION_3_5; +		if (ASICREV_IS_GC_11_0_4(asic_id.hw_internal_rev)) +			dc_version = DCN_VERSION_3_51;  		break;  	default:  		dc_version = DCE_VERSION_UNKNOWN; @@ -303,6 +306,9 @@ struct resource_pool *dc_create_resource_pool(struct dc  *dc,  	case DCN_VERSION_3_5:  		res_pool = dcn35_create_resource_pool(init_data, dc);  		break; +	case DCN_VERSION_3_51: +		res_pool = dcn351_create_resource_pool(init_data, dc); +		break;  #endif /* CONFIG_DRM_AMD_DC_FP */  	default:  		break; @@ -1834,23 +1840,6 @@ int resource_find_any_free_pipe(struct resource_context *new_res_ctx,  bool resource_is_pipe_type(const struct pipe_ctx *pipe_ctx, enum pipe_type type)  { -#ifdef DBG -	if (pipe_ctx->stream == NULL) { -		/* a free pipe with dangling states */ -		ASSERT(!pipe_ctx->plane_state); -		ASSERT(!pipe_ctx->prev_odm_pipe); -		ASSERT(!pipe_ctx->next_odm_pipe); -		ASSERT(!pipe_ctx->top_pipe); -		ASSERT(!pipe_ctx->bottom_pipe); -	} else if (pipe_ctx->top_pipe) { -		/* a secondary DPP pipe must be signed to a plane */ -		ASSERT(pipe_ctx->plane_state) -	} -	/* Add more checks here to prevent corrupted pipe ctx. It is very hard -	 * to debug this issue afterwards because we can't pinpoint the code -	 * location causing inconsistent pipe context states. -	 */ -#endif  	switch (type) {  	case OTG_MASTER:  		return !pipe_ctx->prev_odm_pipe && diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 54670e0b1518..51a970fcb5d0 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -423,6 +423,8 @@ bool dc_stream_add_writeback(struct dc *dc,  		return false;  	} +	dc_exit_ips_for_hw_access(dc); +  	wb_info->dwb_params.out_transfer_func = stream->out_transfer_func;  	dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; @@ -493,6 +495,8 @@ bool dc_stream_fc_disable_writeback(struct dc *dc,  		return false;  	} +	dc_exit_ips_for_hw_access(dc); +  	if (dwb->funcs->set_fc_enable)  		dwb->funcs->set_fc_enable(dwb, DWB_FRAME_CAPTURE_DISABLE); @@ -542,6 +546,8 @@ bool dc_stream_remove_writeback(struct dc *dc,  		return false;  	} +	dc_exit_ips_for_hw_access(dc); +  	/* disable writeback */  	if (dc->hwss.disable_writeback) {  		struct dwbc *dwb = dc->res_pool->dwbc[dwb_pipe_inst]; @@ -557,6 +563,8 @@ bool dc_stream_warmup_writeback(struct dc *dc,  		int num_dwb,  		struct dc_writeback_info *wb_info)  { +	dc_exit_ips_for_hw_access(dc); +  	if (dc->hwss.mmhubbub_warmup)  		return dc->hwss.mmhubbub_warmup(dc, num_dwb, wb_info);  	else @@ -569,6 +577,8 @@ uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)  	struct resource_context *res_ctx =  		&dc->current_state->res_ctx; +	dc_exit_ips_for_hw_access(dc); +  	for (i = 0; i < MAX_PIPES; i++) {  		struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg; @@ -597,6 +607,8 @@ bool dc_stream_send_dp_sdp(const struct dc_stream_state *stream,  	dc = stream->ctx->dc;  	res_ctx = &dc->current_state->res_ctx; +	dc_exit_ips_for_hw_access(dc); +  	for (i = 0; i < MAX_PIPES; i++) {  		struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; @@ -628,6 +640,8 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,  	struct resource_context *res_ctx =  		&dc->current_state->res_ctx; +	dc_exit_ips_for_hw_access(dc); +  	for (i = 0; i < MAX_PIPES; i++) {  		struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg; @@ -664,6 +678,8 @@ bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream)  	if (i == MAX_PIPES)  		return true; +	dc_exit_ips_for_hw_access(dc); +  	return dc->hwss.dmdata_status_done(pipe);  } @@ -698,6 +714,8 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc,  	pipe_ctx->stream->dmdata_address = attr->address; +	dc_exit_ips_for_hw_access(dc); +  	dc->hwss.program_dmdata_engine(pipe_ctx);  	if (hubp->funcs->dmdata_set_attributes != NULL && diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c index 19a2c7140ae8..19140fb65787 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c @@ -161,6 +161,8 @@ const struct dc_plane_status *dc_plane_get_status(  		break;  	} +	dc_exit_ips_for_hw_access(dc); +  	for (i = 0; i < dc->res_pool->pipe_count; i++) {  		struct pipe_ctx *pipe_ctx =  				&dc->current_state->res_ctx.pipe_ctx[i]; diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index c9317ea0258e..ee8453bf958f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -51,7 +51,7 @@ struct aux_payload;  struct set_config_cmd_payload;  struct dmub_notification; -#define DC_VER "3.2.266" +#define DC_VER "3.2.273"  #define MAX_SURFACES 3  #define MAX_PLANES 6 @@ -429,12 +429,12 @@ struct dc_config {  	bool force_bios_enable_lttpr;  	uint8_t force_bios_fixed_vs;  	int sdpif_request_limit_words_per_umc; -	bool use_old_fixed_vs_sequence;  	bool dc_mode_clk_limit_support;  	bool EnableMinDispClkODM;  	bool enable_auto_dpm_test_logs;  	unsigned int disable_ips;  	unsigned int disable_ips_in_vpb; +	bool usb4_bw_alloc_support;  };  enum visual_confirm { @@ -987,9 +987,11 @@ struct dc_debug_options {  	bool psp_disabled_wa;  	unsigned int ips2_eval_delay_us;  	unsigned int ips2_entry_delay_us; +	bool disable_dmub_reallow_idle;  	bool disable_timeout;  	bool disable_extblankadj;  	unsigned int static_screen_wait_frames; +	bool force_chroma_subsampling_1tap;  };  struct gpu_info_soc_bounding_box_v1_0; @@ -1068,6 +1070,7 @@ struct dc {  	} scratch;  	struct dml2_configuration_options dml2_options; +	enum dc_acpi_cm_power_state power_state;  };  enum frame_buffer_mode { @@ -1249,6 +1252,7 @@ union surface_update_flags {  		uint32_t rotation_change:1;  		uint32_t swizzle_change:1;  		uint32_t scaling_change:1; +		uint32_t clip_size_change: 1;  		uint32_t position_change:1;  		uint32_t in_transfer_func_change:1;  		uint32_t input_csc_change:1; @@ -1568,7 +1572,19 @@ struct dc_link {  	enum engine_id dpia_preferred_eng_id;  	bool test_pattern_enabled; +	/* Pending/Current test pattern are only used to perform and track +	 * FIXED_VS retimer test pattern/lane adjustment override state. +	 * Pending allows link HWSS to differentiate PHY vs non-PHY pattern, +	 * to perform specific lane adjust overrides before setting certain +	 * PHY test patterns. In cases when lane adjust and set test pattern +	 * calls are not performed atomically (i.e. performing link training), +	 * pending_test_pattern will be invalid or contain a non-PHY test pattern +	 * and current_test_pattern will contain required context for any future +	 * set pattern/set lane adjust to transition between override state(s). +	 * */  	enum dp_test_pattern current_test_pattern; +	enum dp_test_pattern pending_test_pattern; +  	union compliance_test_state compliance_test_state;  	void *priv; @@ -2219,11 +2235,9 @@ struct dc_sink_dsc_caps {  	// 'true' if these are virtual DPCD's DSC caps (immediately upstream of sink in MST topology),  	// 'false' if they are sink's DSC caps  	bool is_virtual_dpcd_dsc; -#if defined(CONFIG_DRM_AMD_DC_FP)  	// 'true' if MST topology supports DSC passthrough for sink  	// 'false' if MST topology does not support DSC passthrough  	bool is_dsc_passthrough_supported; -#endif  	struct dsc_dec_dpcd_caps dsc_dec_caps;  }; @@ -2325,6 +2339,7 @@ bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_  				struct dc_cursor_attributes *cursor_attr);  void dc_allow_idle_optimizations(struct dc *dc, bool allow); +void dc_exit_ips_for_hw_access(struct dc *dc);  bool dc_dmub_is_ips_idle_state(struct dc *dc);  /* set min and max memory clock to lowest and highest DPM level, respectively */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 2b79a0e5638e..6083b1dcf050 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -74,7 +74,10 @@ void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv)  	struct dc_context *dc_ctx = dc_dmub_srv->ctx;  	enum dmub_status status; -	status = dmub_srv_wait_for_idle(dmub, 100000); +	do { +		status = dmub_srv_wait_for_idle(dmub, 100000); +	} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK); +  	if (status != DMUB_STATUS_OK) {  		DC_ERROR("Error waiting for DMUB idle: status=%d\n", status);  		dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); @@ -125,7 +128,7 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,  		unsigned int count,  		union dmub_rb_cmd *cmd_list)  { -	struct dc_context *dc_ctx = dc_dmub_srv->ctx; +	struct dc_context *dc_ctx;  	struct dmub_srv *dmub;  	enum dmub_status status;  	int i; @@ -133,6 +136,7 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,  	if (!dc_dmub_srv || !dc_dmub_srv->dmub)  		return false; +	dc_ctx = dc_dmub_srv->ctx;  	dmub = dc_dmub_srv->dmub;  	for (i = 0 ; i < count; i++) { @@ -145,7 +149,9 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,  			if (status == DMUB_STATUS_POWER_STATE_D3)  				return false; -			dmub_srv_wait_for_idle(dmub, 100000); +			do { +				status = dmub_srv_wait_for_idle(dmub, 100000); +			} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);  			/* Requeue the command. */  			status = dmub_srv_cmd_queue(dmub, &cmd_list[i]); @@ -186,7 +192,9 @@ bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv,  	// Wait for DMUB to process command  	if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) { -		status = dmub_srv_wait_for_idle(dmub, 100000); +		do { +			status = dmub_srv_wait_for_idle(dmub, 100000); +		} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);  		if (status != DMUB_STATUS_OK) {  			DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status); @@ -780,21 +788,22 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,  	} else if (subvp_pipe->next_odm_pipe) {  		pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->next_odm_pipe->pipe_idx;  	} else { -		pipe_data->pipe_config.subvp_data.main_split_pipe_index = 0; +		pipe_data->pipe_config.subvp_data.main_split_pipe_index = 0xF;  	}  	// Find phantom pipe index based on phantom stream  	for (j = 0; j < dc->res_pool->pipe_count; j++) {  		struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j]; -		if (phantom_pipe->stream == dc_state_get_paired_subvp_stream(context, subvp_pipe->stream)) { +		if (resource_is_pipe_type(phantom_pipe, OTG_MASTER) && +				phantom_pipe->stream == dc_state_get_paired_subvp_stream(context, subvp_pipe->stream)) {  			pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->stream_res.tg->inst;  			if (phantom_pipe->bottom_pipe) {  				pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->plane_res.hubp->inst;  			} else if (phantom_pipe->next_odm_pipe) {  				pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->next_odm_pipe->plane_res.hubp->inst;  			} else { -				pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = 0; +				pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = 0xF;  			}  			break;  		} @@ -1161,7 +1170,7 @@ void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, con  bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)  { -	struct dc_context *dc_ctx = dc_dmub_srv->ctx; +	struct dc_context *dc_ctx;  	enum dmub_status status;  	if (!dc_dmub_srv || !dc_dmub_srv->dmub) @@ -1170,6 +1179,8 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)  	if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation)  		return true; +	dc_ctx = dc_dmub_srv->ctx; +  	if (wait) {  		if (dc_dmub_srv->ctx->dc->debug.disable_timeout) {  			do { @@ -1190,11 +1201,17 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)  static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)  { +	struct dc_dmub_srv *dc_dmub_srv;  	union dmub_rb_cmd cmd = {0};  	if (dc->debug.dmcub_emulation)  		return; +	if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub) +		return; + +	dc_dmub_srv = dc->ctx->dmub_srv; +  	memset(&cmd, 0, sizeof(cmd));  	cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT;  	cmd.idle_opt_notify_idle.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE; @@ -1205,19 +1222,42 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)  	cmd.idle_opt_notify_idle.cntl_data.driver_idle = allow_idle;  	if (allow_idle) { -		if (dc->hwss.set_idle_state) -			dc->hwss.set_idle_state(dc, true); +		volatile struct dmub_shared_state_ips_driver *ips_driver = +			&dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver; +		union dmub_shared_state_ips_driver_signals new_signals; + +		dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + +		memset(&new_signals, 0, sizeof(new_signals)); + +		if (dc->config.disable_ips == DMUB_IPS_ENABLE || +		    dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) { +			new_signals.bits.allow_pg = 1; +			new_signals.bits.allow_ips1 = 1; +			new_signals.bits.allow_ips2 = 1; +			new_signals.bits.allow_z10 = 1; +		} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) { +			new_signals.bits.allow_ips1 = 1; +		} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) { +			new_signals.bits.allow_pg = 1; +			new_signals.bits.allow_ips1 = 1; +		} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) { +			new_signals.bits.allow_pg = 1; +			new_signals.bits.allow_ips1 = 1; +			new_signals.bits.allow_ips2 = 1; +		} + +		ips_driver->signals = new_signals;  	}  	/* NOTE: This does not use the "wake" interface since this is part of the wake path. */  	/* We also do not perform a wait since DMCUB could enter idle after the notification. */ -	dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); +	dm_execute_dmub_cmd(dc->ctx, &cmd, allow_idle ? DM_DMUB_WAIT_TYPE_NO_WAIT : DM_DMUB_WAIT_TYPE_WAIT);  }  static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)  { -	uint32_t allow_state = 0; -	uint32_t commit_state = 0; +	struct dc_dmub_srv *dc_dmub_srv;  	if (dc->debug.dmcub_emulation)  		return; @@ -1225,61 +1265,44 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)  	if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub)  		return; -	if (dc->hwss.get_idle_state && -		dc->hwss.set_idle_state && -		dc->clk_mgr->funcs->exit_low_power_state) { +	dc_dmub_srv = dc->ctx->dmub_srv; -		allow_state = dc->hwss.get_idle_state(dc); -		dc->hwss.set_idle_state(dc, false); +	if (dc->clk_mgr->funcs->exit_low_power_state) { +		volatile const struct dmub_shared_state_ips_fw *ips_fw = +			&dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw; +		volatile struct dmub_shared_state_ips_driver *ips_driver = +			&dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver; +		union dmub_shared_state_ips_driver_signals prev_driver_signals = ips_driver->signals; -		if (!(allow_state & DMUB_IPS2_ALLOW_MASK)) { -			// Wait for evaluation time -			for (;;) { -				udelay(dc->debug.ips2_eval_delay_us); -				commit_state = dc->hwss.get_idle_state(dc); -				if (commit_state & DMUB_IPS2_ALLOW_MASK) -					break; +		ips_driver->signals.all = 0; -				/* allow was still set, retry eval delay */ -				dc->hwss.set_idle_state(dc, false); -			} +		if (prev_driver_signals.bits.allow_ips2) { +			udelay(dc->debug.ips2_eval_delay_us); -			if (!(commit_state & DMUB_IPS2_COMMIT_MASK)) { +			if (ips_fw->signals.bits.ips2_commit) {  				// Tell PMFW to exit low power state  				dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);  				// Wait for IPS2 entry upper bound  				udelay(dc->debug.ips2_entry_delay_us); -				dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); -				for (;;) { -					commit_state = dc->hwss.get_idle_state(dc); -					if (commit_state & DMUB_IPS2_COMMIT_MASK) -						break; +				dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); +				while (ips_fw->signals.bits.ips2_commit)  					udelay(1); -				}  				if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))  					ASSERT(0); -				/* TODO: See if we can return early here - IPS2 should go -				 * back directly to IPS0 and clear the flags, but it will -				 * be safer to directly notify DMCUB of this. -				 */ -				allow_state = dc->hwss.get_idle_state(dc); +				dmub_srv_sync_inbox1(dc->ctx->dmub_srv->dmub);  			}  		}  		dc_dmub_srv_notify_idle(dc, false); -		if (!(allow_state & DMUB_IPS1_ALLOW_MASK)) { -			for (;;) { -				commit_state = dc->hwss.get_idle_state(dc); -				if (commit_state & DMUB_IPS1_COMMIT_MASK) -					break; - +		if (prev_driver_signals.bits.allow_ips1) { +			while (ips_fw->signals.bits.ips1_commit)  				udelay(1); -			} +  		}  	} @@ -1361,7 +1384,7 @@ bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned in  	else  		result = dm_execute_dmub_cmd(ctx, cmd, wait_type); -	if (result && reallow_idle) +	if (result && reallow_idle && !ctx->dc->debug.disable_dmub_reallow_idle)  		dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);  	return result; @@ -1410,7 +1433,7 @@ bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_com  	result = dc_dmub_execute_gpint(ctx, command_code, param, response, wait_type); -	if (result && reallow_idle) +	if (result && reallow_idle && !ctx->dc->debug.disable_dmub_reallow_idle)  		dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);  	return result; diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index 811474f4419b..aae2f3a2660d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -827,9 +827,7 @@ struct dc_dsc_config {  	uint32_t version_minor; /* DSC minor version. Full version is formed as 1.version_minor. */  	bool ycbcr422_simple; /* Tell DSC engine to convert YCbCr 4:2:2 to 'YCbCr 4:2:2 simple'. */  	int32_t rc_buffer_size; /* DSC RC buffer block size in bytes */ -#if defined(CONFIG_DRM_AMD_DC_FP)  	bool is_frl; /* indicate if DSC is applied based on HDMI FRL sink's capability */ -#endif  	bool is_dp; /* indicate if DSC is applied based on DP's capability */  	uint32_t mst_pbn; /* pbn of display on dsc mst hub */  	const struct dc_dsc_rc_params_override *rc_params_ovrd; /* DM owned memory. If not NULL, apply custom dsc rc params */ @@ -942,6 +940,7 @@ struct dc_crtc_timing {  	uint32_t hdmi_vic;  	uint32_t rid;  	uint32_t fr_index; +	uint32_t frl_uncompressed_video_bandwidth_in_kbps;  	enum dc_timing_3d_format timing_3d_format;  	enum dc_color_depth display_color_depth;  	enum dc_pixel_encoding pixel_encoding; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c index f0458b8f00af..12f3c35b3a34 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c @@ -239,27 +239,294 @@ static void check_audio_bandwidth_hdmi(  		}  	}  } +static struct fixed31_32 get_link_symbol_clk_freq_mhz(enum dc_link_rate link_rate) +{ +	switch (link_rate) { +	case LINK_RATE_LOW: +		return dc_fixpt_from_int(162); /* 162 MHz */ +	case LINK_RATE_HIGH: +		return dc_fixpt_from_int(270); /* 270 MHz */ +	case LINK_RATE_HIGH2: +		return dc_fixpt_from_int(540); /* 540 MHz */ +	case LINK_RATE_HIGH3: +		return dc_fixpt_from_int(810); /* 810 MHz */ +	case LINK_RATE_UHBR10: +		return dc_fixpt_from_fraction(3125, 10); /* 312.5 MHz */ +	case LINK_RATE_UHBR13_5: +		return dc_fixpt_from_fraction(421875, 1000); /* 421.875 MHz */ +	case LINK_RATE_UHBR20: +		return dc_fixpt_from_int(625); /* 625 MHz */ +	default: +		/* Unexpected case, this requires debug if encountered. */ +		ASSERT(0); +		return dc_fixpt_from_int(0); +	} +} + +struct dp_audio_layout_config { +	uint8_t layouts_per_sample_denom; +	uint8_t symbols_per_layout; +	uint8_t max_layouts_per_audio_sdp; +}; + +static void get_audio_layout_config( +	uint32_t channel_count, +	enum dp_link_encoding encoding, +	struct dp_audio_layout_config *output) +{ +	/* Assuming L-PCM audio. Current implementation uses max 1 layout per SDP, +	 * with each layout being the same size (8ch layout). +	 */ +	if (encoding == DP_8b_10b_ENCODING) { +		if (channel_count == 2) { +			output->layouts_per_sample_denom = 4; +			output->symbols_per_layout = 40; +			output->max_layouts_per_audio_sdp = 1; +		} else if (channel_count == 8 || channel_count == 6) { +			output->layouts_per_sample_denom = 1; +			output->symbols_per_layout = 40; +			output->max_layouts_per_audio_sdp = 1; +		} +	} else if (encoding == DP_128b_132b_ENCODING) { +		if (channel_count == 2) { +			output->layouts_per_sample_denom = 4; +			output->symbols_per_layout = 10; +			output->max_layouts_per_audio_sdp = 1; +		} else if (channel_count == 8 || channel_count == 6) { +			output->layouts_per_sample_denom = 1; +			output->symbols_per_layout = 10; +			output->max_layouts_per_audio_sdp = 1; +		} +	} +} -/*For DP SST, calculate if specified sample rates can fit into a given timing */ -static void check_audio_bandwidth_dpsst( +static uint32_t get_av_stream_map_lane_count( +	enum dp_link_encoding encoding, +	enum dc_lane_count lane_count, +	bool is_mst) +{ +	uint32_t av_stream_map_lane_count = 0; + +	if (encoding == DP_8b_10b_ENCODING) { +		if (!is_mst) +			av_stream_map_lane_count = lane_count; +		else +			av_stream_map_lane_count = 4; +	} else if (encoding == DP_128b_132b_ENCODING) { +		av_stream_map_lane_count = 4; +	} + +	ASSERT(av_stream_map_lane_count != 0); + +	return av_stream_map_lane_count; +} + +static uint32_t get_audio_sdp_overhead( +	enum dp_link_encoding encoding, +	enum dc_lane_count lane_count, +	bool is_mst) +{ +	uint32_t audio_sdp_overhead = 0; + +	if (encoding == DP_8b_10b_ENCODING) { +		if (is_mst) +			audio_sdp_overhead = 16; /* 4 * 2 + 8 */ +		else +			audio_sdp_overhead = lane_count * 2 + 8; +	} else if (encoding == DP_128b_132b_ENCODING) { +		audio_sdp_overhead = 10; /* 4 x 2.5 */ +	} + +	ASSERT(audio_sdp_overhead != 0); + +	return audio_sdp_overhead; +} + +static uint32_t calculate_required_audio_bw_in_symbols(  	const struct audio_crtc_info *crtc_info, +	const struct dp_audio_layout_config *layout_config,  	uint32_t channel_count, -	union audio_sample_rates *sample_rates) +	uint32_t sample_rate_hz, +	uint32_t av_stream_map_lane_count, +	uint32_t audio_sdp_overhead) +{ +	/* DP spec recommends between 1.05 to 1.1 safety margin to prevent sample under-run */ +	struct fixed31_32 audio_sdp_margin = dc_fixpt_from_fraction(110, 100); +	struct fixed31_32 horizontal_line_freq_khz = dc_fixpt_from_fraction( +			crtc_info->requested_pixel_clock_100Hz, crtc_info->h_total * 10); +	struct fixed31_32 samples_per_line; +	struct fixed31_32 layouts_per_line; +	struct fixed31_32 symbols_per_sdp_max_layout; +	struct fixed31_32 remainder; +	uint32_t num_sdp_with_max_layouts; +	uint32_t required_symbols_per_hblank; + +	samples_per_line = dc_fixpt_from_fraction(sample_rate_hz, 1000); +	samples_per_line = dc_fixpt_div(samples_per_line, horizontal_line_freq_khz); +	layouts_per_line = dc_fixpt_div_int(samples_per_line, layout_config->layouts_per_sample_denom); + +	num_sdp_with_max_layouts = dc_fixpt_floor( +			dc_fixpt_div_int(layouts_per_line, layout_config->max_layouts_per_audio_sdp)); +	symbols_per_sdp_max_layout = dc_fixpt_from_int( +			layout_config->max_layouts_per_audio_sdp * layout_config->symbols_per_layout); +	symbols_per_sdp_max_layout = dc_fixpt_add_int(symbols_per_sdp_max_layout, audio_sdp_overhead); +	symbols_per_sdp_max_layout = dc_fixpt_mul(symbols_per_sdp_max_layout, audio_sdp_margin); +	required_symbols_per_hblank = num_sdp_with_max_layouts; +	required_symbols_per_hblank *= ((dc_fixpt_ceil(symbols_per_sdp_max_layout) + av_stream_map_lane_count) / +			av_stream_map_lane_count) *	av_stream_map_lane_count; + +	if (num_sdp_with_max_layouts !=	dc_fixpt_ceil( +			dc_fixpt_div_int(layouts_per_line, layout_config->max_layouts_per_audio_sdp))) { +		remainder = dc_fixpt_sub_int(layouts_per_line, +				num_sdp_with_max_layouts * layout_config->max_layouts_per_audio_sdp); +		remainder = dc_fixpt_mul_int(remainder, layout_config->symbols_per_layout); +		remainder = dc_fixpt_add_int(remainder, audio_sdp_overhead); +		remainder = dc_fixpt_mul(remainder, audio_sdp_margin); +		required_symbols_per_hblank += ((dc_fixpt_ceil(remainder) + av_stream_map_lane_count) / +				av_stream_map_lane_count) * av_stream_map_lane_count; +	} + +	return required_symbols_per_hblank; +} + +/* Current calculation only applicable for 8b/10b MST and 128b/132b SST/MST. + */ +static uint32_t calculate_available_hblank_bw_in_symbols( +	const struct audio_crtc_info *crtc_info, +	const struct audio_dp_link_info *dp_link_info)  { -	/* do nothing */ +	uint64_t hblank = crtc_info->h_total - crtc_info->h_active; +	struct fixed31_32 hblank_time_msec = +			dc_fixpt_from_fraction(hblank * 10, crtc_info->requested_pixel_clock_100Hz); +	struct fixed31_32 lsclkfreq_mhz = +			get_link_symbol_clk_freq_mhz(dp_link_info->link_rate); +	struct fixed31_32 average_stream_sym_bw_frac; +	struct fixed31_32 peak_stream_bw_kbps; +	struct fixed31_32 bits_per_pixel; +	struct fixed31_32 link_bw_kbps; +	struct fixed31_32 available_stream_sym_count; +	uint32_t available_hblank_bw = 0; /* in stream symbols */ + +	if (crtc_info->dsc_bits_per_pixel) { +		bits_per_pixel = dc_fixpt_from_fraction(crtc_info->dsc_bits_per_pixel, 16); +	} else { +		switch (crtc_info->color_depth) { +		case COLOR_DEPTH_666: +			bits_per_pixel = dc_fixpt_from_int(6); +			break; +		case COLOR_DEPTH_888: +			bits_per_pixel = dc_fixpt_from_int(8); +			break; +		case COLOR_DEPTH_101010: +			bits_per_pixel = dc_fixpt_from_int(10); +			break; +		case COLOR_DEPTH_121212: +			bits_per_pixel = dc_fixpt_from_int(12); +			break; +		default: +			/* Default to commonly supported color depth. */ +			bits_per_pixel = dc_fixpt_from_int(8); +			break; +		} + +		bits_per_pixel = dc_fixpt_mul_int(bits_per_pixel, 3); + +		if (crtc_info->pixel_encoding == PIXEL_ENCODING_YCBCR422) { +			bits_per_pixel = dc_fixpt_div_int(bits_per_pixel, 3); +			bits_per_pixel = dc_fixpt_mul_int(bits_per_pixel, 2); +		} else if (crtc_info->pixel_encoding == PIXEL_ENCODING_YCBCR420) { +			bits_per_pixel = dc_fixpt_div_int(bits_per_pixel, 2); +		} +	} + +	/* Use simple stream BW calculation because mainlink overhead is +	 * accounted for separately in the audio BW calculations. +	 */ +	peak_stream_bw_kbps = dc_fixpt_from_fraction(crtc_info->requested_pixel_clock_100Hz, 10); +	peak_stream_bw_kbps = dc_fixpt_mul(peak_stream_bw_kbps, bits_per_pixel); +	link_bw_kbps = dc_fixpt_from_int(dp_link_info->link_bandwidth_kbps); +	average_stream_sym_bw_frac = dc_fixpt_div(peak_stream_bw_kbps, link_bw_kbps); + +	available_stream_sym_count = dc_fixpt_mul_int(hblank_time_msec, 1000); +	available_stream_sym_count = dc_fixpt_mul(available_stream_sym_count, lsclkfreq_mhz); +	available_stream_sym_count = dc_fixpt_mul(available_stream_sym_count, average_stream_sym_bw_frac); +	available_hblank_bw = dc_fixpt_floor(available_stream_sym_count); +	available_hblank_bw *= dp_link_info->lane_count; +	available_hblank_bw -= crtc_info->dsc_num_slices * 4; /* EOC overhead */ + +	if (available_hblank_bw < dp_link_info->hblank_min_symbol_width) +		available_hblank_bw = dp_link_info->hblank_min_symbol_width; + +	if (available_hblank_bw < 12) +		available_hblank_bw = 0; +	else +		available_hblank_bw -= 12; /* Main link overhead */ + +	return available_hblank_bw;  } -/*For DP MST, calculate if specified sample rates can fit into a given timing */ -static void check_audio_bandwidth_dpmst( +static void check_audio_bandwidth_dp(  	const struct audio_crtc_info *crtc_info, +	const struct audio_dp_link_info *dp_link_info,  	uint32_t channel_count,  	union audio_sample_rates *sample_rates)  { -	/* do nothing  */ +	struct dp_audio_layout_config layout_config = {0}; +	uint32_t available_hblank_bw; +	uint32_t av_stream_map_lane_count; +	uint32_t audio_sdp_overhead; + +	/* TODO: Add validation for SST 8b/10 case  */ +	if (!dp_link_info->is_mst && dp_link_info->encoding == DP_8b_10b_ENCODING) +		return; + +	available_hblank_bw = calculate_available_hblank_bw_in_symbols( +			crtc_info, dp_link_info); +	av_stream_map_lane_count = get_av_stream_map_lane_count( +			dp_link_info->encoding, dp_link_info->lane_count, dp_link_info->is_mst); +	audio_sdp_overhead = get_audio_sdp_overhead( +			dp_link_info->encoding, dp_link_info->lane_count, dp_link_info->is_mst); +	get_audio_layout_config( +			channel_count, dp_link_info->encoding, &layout_config); + +	if (layout_config.max_layouts_per_audio_sdp == 0 || +		layout_config.symbols_per_layout == 0 || +		layout_config.layouts_per_sample_denom == 0) { +		return; +	} +	if (available_hblank_bw < calculate_required_audio_bw_in_symbols( +			crtc_info, &layout_config, channel_count, 192000, +			av_stream_map_lane_count, audio_sdp_overhead)) +		sample_rates->rate.RATE_192 = 0; +	if (available_hblank_bw < calculate_required_audio_bw_in_symbols( +			crtc_info, &layout_config, channel_count, 176400, +			av_stream_map_lane_count, audio_sdp_overhead)) +		sample_rates->rate.RATE_176_4 = 0; +	if (available_hblank_bw < calculate_required_audio_bw_in_symbols( +			crtc_info, &layout_config, channel_count, 96000, +			av_stream_map_lane_count, audio_sdp_overhead)) +		sample_rates->rate.RATE_96 = 0; +	if (available_hblank_bw < calculate_required_audio_bw_in_symbols( +			crtc_info, &layout_config, channel_count, 88200, +			av_stream_map_lane_count, audio_sdp_overhead)) +		sample_rates->rate.RATE_88_2 = 0; +	if (available_hblank_bw < calculate_required_audio_bw_in_symbols( +			crtc_info, &layout_config, channel_count, 48000, +			av_stream_map_lane_count, audio_sdp_overhead)) +		sample_rates->rate.RATE_48 = 0; +	if (available_hblank_bw < calculate_required_audio_bw_in_symbols( +			crtc_info, &layout_config, channel_count, 44100, +			av_stream_map_lane_count, audio_sdp_overhead)) +		sample_rates->rate.RATE_44_1 = 0; +	if (available_hblank_bw < calculate_required_audio_bw_in_symbols( +			crtc_info, &layout_config, channel_count, 32000, +			av_stream_map_lane_count, audio_sdp_overhead)) +		sample_rates->rate.RATE_32 = 0;  }  static void check_audio_bandwidth(  	const struct audio_crtc_info *crtc_info, +	const struct audio_dp_link_info *dp_link_info,  	uint32_t channel_count,  	enum signal_type signal,  	union audio_sample_rates *sample_rates) @@ -271,12 +538,9 @@ static void check_audio_bandwidth(  		break;  	case SIGNAL_TYPE_EDP:  	case SIGNAL_TYPE_DISPLAY_PORT: -		check_audio_bandwidth_dpsst( -			crtc_info, channel_count, sample_rates); -		break;  	case SIGNAL_TYPE_DISPLAY_PORT_MST: -		check_audio_bandwidth_dpmst( -			crtc_info, channel_count, sample_rates); +		check_audio_bandwidth_dp( +			crtc_info, dp_link_info, channel_count, sample_rates);  		break;  	default:  		break; @@ -394,7 +658,8 @@ void dce_aud_az_configure(  	struct audio *audio,  	enum signal_type signal,  	const struct audio_crtc_info *crtc_info, -	const struct audio_info *audio_info) +	const struct audio_info *audio_info, +	const struct audio_dp_link_info *dp_link_info)  {  	struct dce_audio *aud = DCE_AUD(audio); @@ -529,6 +794,7 @@ void dce_aud_az_configure(  				check_audio_bandwidth(  					crtc_info, +					dp_link_info,  					channel_count,  					signal,  					&sample_rates); @@ -588,6 +854,7 @@ void dce_aud_az_configure(  	check_audio_bandwidth(  		crtc_info, +		dp_link_info,  		8,  		signal,  		&sample_rate); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h index dbd2cfed0603..539f881928d1 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h @@ -170,7 +170,8 @@ void dce_aud_az_disable(struct audio *audio);  void dce_aud_az_configure(struct audio *audio,  	enum signal_type signal,  	const struct audio_crtc_info *crtc_info, -	const struct audio_info *audio_info); +	const struct audio_info *audio_info, +	const struct audio_dp_link_info *dp_link_info);  void dce_aud_wall_dto_setup(struct audio *audio,  	enum signal_type signal, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c index e8570060d007..5bca67407c5b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c @@ -290,4 +290,5 @@ void dce_panel_cntl_construct(  	dce_panel_cntl->base.funcs = &dce_link_panel_cntl_funcs;  	dce_panel_cntl->base.ctx = init_data->ctx;  	dce_panel_cntl->base.inst = init_data->inst; +	dce_panel_cntl->base.pwrseq_inst = 0;  } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c index ba1fec3016d5..bf636b28e3e1 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c @@ -65,5 +65,9 @@ bool should_use_dmub_lock(struct dc_link *link)  {  	if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)  		return true; + +	if (link->replay_settings.replay_feature_enabled) +		return true; +  	return false;  } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c index 38e4797e9476..b010814706fe 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c @@ -258,7 +258,7 @@ static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst,  		*residency = 0;  } -/** +/*   * Set REPLAY power optimization flags and coasting vtotal.   */  static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dmub, @@ -280,7 +280,7 @@ static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dm  	dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);  } -/** +/*   * send Replay general cmd to DMUB.   */  static void dmub_replay_send_cmd(struct dmub_replay *dmub, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c index 3538973bd0c6..b7e57aa27361 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c @@ -62,6 +62,26 @@ void cm_helper_program_color_matrices(  } +void cm_helper_read_color_matrices(struct dc_context *ctx, +				   uint16_t *regval, +				   const struct color_matrices_reg *reg) +{ +	uint32_t cur_csc_reg, regval0, regval1; +	unsigned int i = 0; + +	for (cur_csc_reg = reg->csc_c11_c12; +	     cur_csc_reg <= reg->csc_c33_c34; cur_csc_reg++) { +		REG_GET_2(cur_csc_reg, +				csc_c11, ®val0, +				csc_c12, ®val1); + +		regval[2 * i] = regval0; +		regval[(2 * i) + 1] = regval1; + +		i++; +	} +} +  void cm_helper_program_xfer_func(  		struct dc_context *ctx,  		const struct pwl_params *params, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h index 0a68b63d6126..decc50b1ac53 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h @@ -114,5 +114,7 @@ bool cm_helper_translate_curve_to_degamma_hw_format(  				const struct dc_transfer_func *output_tf,  				struct pwl_params *lut_params); - +void cm_helper_read_color_matrices(struct dc_context *ctx, +				   uint16_t *regval, +				   const struct color_matrices_reg *reg);  #endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index ef52e6b6eccf..4e391fd1d71c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c @@ -543,7 +543,8 @@ static const struct dpp_funcs dcn10_dpp_funcs = {  		.dpp_set_hdr_multiplier = dpp1_set_hdr_multiplier,  		.dpp_program_blnd_lut = NULL,  		.dpp_program_shaper_lut = NULL, -		.dpp_program_3dlut = NULL +		.dpp_program_3dlut = NULL, +		.dpp_get_gamut_remap = dpp1_cm_get_gamut_remap,  };  static struct dpp_caps dcn10_dpp_cap = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h index c9e045666dcc..a039eedc7c24 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h @@ -1521,4 +1521,7 @@ void dpp1_construct(struct dcn10_dpp *dpp1,  	const struct dcn_dpp_registers *tf_regs,  	const struct dcn_dpp_shift *tf_shift,  	const struct dcn_dpp_mask *tf_mask); + +void dpp1_cm_get_gamut_remap(struct dpp *dpp_base, +			     struct dpp_grph_csc_adjustment *adjust);  #endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c index 904c2d278998..2f994a3a0b9c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c @@ -98,7 +98,7 @@ static void program_gamut_remap(  	if (regval == NULL || select == GAMUT_REMAP_BYPASS) {  		REG_SET(CM_GAMUT_REMAP_CONTROL, 0, -				CM_GAMUT_REMAP_MODE, 0); +			CM_GAMUT_REMAP_MODE, 0);  		return;  	}  	switch (select) { @@ -181,6 +181,74 @@ void dpp1_cm_set_gamut_remap(  	}  } +static void read_gamut_remap(struct dcn10_dpp *dpp, +			     uint16_t *regval, +			     enum gamut_remap_select *select) +{ +	struct color_matrices_reg gam_regs; +	uint32_t selection; + +	REG_GET(CM_GAMUT_REMAP_CONTROL, +					CM_GAMUT_REMAP_MODE, &selection); + +	*select = selection; + +	gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; +	gam_regs.masks.csc_c11  = dpp->tf_mask->CM_GAMUT_REMAP_C11; +	gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; +	gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; + +	if (*select == GAMUT_REMAP_COEFF) { + +		gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); +		gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); + +		cm_helper_read_color_matrices( +				dpp->base.ctx, +				regval, +				&gam_regs); + +	} else if (*select == GAMUT_REMAP_COMA_COEFF) { + +		gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12); +		gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34); + +		cm_helper_read_color_matrices( +				dpp->base.ctx, +				regval, +				&gam_regs); + +	} else if (*select == GAMUT_REMAP_COMB_COEFF) { + +		gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12); +		gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34); + +		cm_helper_read_color_matrices( +				dpp->base.ctx, +				regval, +				&gam_regs); +	} +} + +void dpp1_cm_get_gamut_remap(struct dpp *dpp_base, +			     struct dpp_grph_csc_adjustment *adjust) +{ +	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); +	uint16_t arr_reg_val[12]; +	enum gamut_remap_select select; + +	read_gamut_remap(dpp, arr_reg_val, &select); + +	if (select == GAMUT_REMAP_BYPASS) { +		adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; +		return; +	} + +	adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; +	convert_hw_matrix(adjust->temperature_matrix, +			  arr_reg_val, ARRAY_SIZE(arr_reg_val)); +} +  static void dpp1_cm_program_color_matrix(  		struct dcn10_dpp *dpp,  		const uint16_t *regval) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c index 0dec57679269..48a40dcc7050 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c @@ -23,6 +23,7 @@   *   */ +#include "core_types.h"  #include "dm_services.h"  #include "dcn10_opp.h"  #include "reg_helper.h" @@ -160,6 +161,9 @@ static void opp1_set_pixel_encoding(  	struct dcn10_opp *oppn10,  	const struct clamping_and_pixel_encoding_params *params)  { +	bool force_chroma_subsampling_1tap = +			oppn10->base.ctx->dc->debug.force_chroma_subsampling_1tap; +  	switch (params->pixel_encoding)	{  	case PIXEL_ENCODING_RGB: @@ -178,6 +182,9 @@ static void opp1_set_pixel_encoding(  	default:  		break;  	} + +	if (force_chroma_subsampling_1tap) +		REG_UPDATE(FMT_CONTROL,	FMT_SUBSAMPLING_MODE, 0);  }  /** diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c index eaa7032f0f1a..1516c0a48726 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c @@ -55,21 +55,23 @@ void dpp20_read_state(struct dpp *dpp_base,  	REG_GET(DPP_CONTROL,  			DPP_CLOCK_ENABLE, &s->is_enabled); + +	// Degamma LUT (RAM)  	REG_GET(CM_DGAM_CONTROL, -			CM_DGAM_LUT_MODE, &s->dgam_lut_mode); -	// BGAM has no ROM, and definition is different, can't reuse same dump -	//REG_GET(CM_BLNDGAM_CONTROL, -	//		CM_BLNDGAM_LUT_MODE, &s->rgam_lut_mode); -	REG_GET(CM_GAMUT_REMAP_CONTROL, -			CM_GAMUT_REMAP_MODE, &s->gamut_remap_mode); -	if (s->gamut_remap_mode) { -		s->gamut_remap_c11_c12 = REG_READ(CM_GAMUT_REMAP_C11_C12); -		s->gamut_remap_c13_c14 = REG_READ(CM_GAMUT_REMAP_C13_C14); -		s->gamut_remap_c21_c22 = REG_READ(CM_GAMUT_REMAP_C21_C22); -		s->gamut_remap_c23_c24 = REG_READ(CM_GAMUT_REMAP_C23_C24); -		s->gamut_remap_c31_c32 = REG_READ(CM_GAMUT_REMAP_C31_C32); -		s->gamut_remap_c33_c34 = REG_READ(CM_GAMUT_REMAP_C33_C34); -	} +		CM_DGAM_LUT_MODE, &s->dgam_lut_mode); + +	// Shaper LUT (RAM), 3D LUT (mode, bit-depth, size) +	REG_GET(CM_SHAPER_CONTROL, +		CM_SHAPER_LUT_MODE, &s->shaper_lut_mode); +	REG_GET_2(CM_3DLUT_READ_WRITE_CONTROL, +		  CM_3DLUT_CONFIG_STATUS, &s->lut3d_mode, +		  CM_3DLUT_30BIT_EN, &s->lut3d_bit_depth); +	REG_GET(CM_3DLUT_MODE, +		CM_3DLUT_SIZE, &s->lut3d_size); + +	// Blend/Out Gamma (RAM) +	REG_GET(CM_BLNDGAM_LUT_WRITE_EN_MASK, +		CM_BLNDGAM_CONFIG_STATUS, &s->rgam_lut_mode);  }  void dpp2_power_on_obuf( @@ -393,6 +395,7 @@ static struct dpp_funcs dcn20_dpp_funcs = {  	.set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,  	.dpp_dppclk_control = dpp1_dppclk_control,  	.dpp_set_hdr_multiplier = dpp2_set_hdr_multiplier, +	.dpp_get_gamut_remap = dpp2_cm_get_gamut_remap,  };  static struct dpp_caps dcn20_dpp_cap = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h index e735363d0051..672cde46c4b9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h @@ -775,4 +775,7 @@ bool dpp2_construct(struct dcn20_dpp *dpp2,  void dpp2_power_on_obuf(  		struct dpp *dpp_base,  	bool power_on); + +void dpp2_cm_get_gamut_remap(struct dpp *dpp_base, +			     struct dpp_grph_csc_adjustment *adjust);  #endif /* __DC_HWSS_DCN20_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c index 598caa508d43..58dc69926e8a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c @@ -234,6 +234,61 @@ void dpp2_cm_set_gamut_remap(  	}  } +static void read_gamut_remap(struct dcn20_dpp *dpp, +			     uint16_t *regval, +			     enum dcn20_gamut_remap_select *select) +{ +	struct color_matrices_reg gam_regs; +	uint32_t selection; + +	IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA, +		   CM_TEST_DEBUG_DATA_STATUS_IDX, +		   CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE, &selection); + +	*select = selection; + +	gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; +	gam_regs.masks.csc_c11  = dpp->tf_mask->CM_GAMUT_REMAP_C11; +	gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; +	gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; + +	if (*select == DCN2_GAMUT_REMAP_COEF_A) { +		gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); +		gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); + +		cm_helper_read_color_matrices(dpp->base.ctx, +					      regval, +					      &gam_regs); + +	} else if (*select == DCN2_GAMUT_REMAP_COEF_B) { +		gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12); +		gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34); + +		cm_helper_read_color_matrices(dpp->base.ctx, +					      regval, +					      &gam_regs); +	} +} + +void dpp2_cm_get_gamut_remap(struct dpp *dpp_base, +			     struct dpp_grph_csc_adjustment *adjust) +{ +	struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); +	uint16_t arr_reg_val[12]; +	enum dcn20_gamut_remap_select select; + +	read_gamut_remap(dpp, arr_reg_val, &select); + +	if (select == DCN2_GAMUT_REMAP_BYPASS) { +		adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; +		return; +	} + +	adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; +	convert_hw_matrix(adjust->temperature_matrix, +			  arr_reg_val, ARRAY_SIZE(arr_reg_val)); +} +  void dpp2_program_input_csc(  		struct dpp *dpp_base,  		enum dc_color_space color_space, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c index 5da6e44f284a..16b5ff208d14 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c @@ -542,8 +542,30 @@ static struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)  	return NULL;  } +static void mpc2_read_mpcc_state( +		struct mpc *mpc, +		int mpcc_inst, +		struct mpcc_state *s) +{ +	struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); + +	REG_GET(MPCC_OPP_ID[mpcc_inst], MPCC_OPP_ID, &s->opp_id); +	REG_GET(MPCC_TOP_SEL[mpcc_inst], MPCC_TOP_SEL, &s->dpp_id); +	REG_GET(MPCC_BOT_SEL[mpcc_inst], MPCC_BOT_SEL, &s->bot_mpcc_id); +	REG_GET_4(MPCC_CONTROL[mpcc_inst], MPCC_MODE, &s->mode, +			MPCC_ALPHA_BLND_MODE, &s->alpha_mode, +			MPCC_ALPHA_MULTIPLIED_MODE, &s->pre_multiplied_alpha, +			MPCC_BLND_ACTIVE_OVERLAP_ONLY, &s->overlap_only); +	REG_GET_2(MPCC_STATUS[mpcc_inst], MPCC_IDLE, &s->idle, +			MPCC_BUSY, &s->busy); + +	/* Gamma block state */ +	REG_GET(MPCC_OGAM_LUT_RAM_CONTROL[mpcc_inst], +		MPCC_OGAM_CONFIG_STATUS, &s->rgam_mode); +} +  static const struct mpc_funcs dcn20_mpc_funcs = { -	.read_mpcc_state = mpc1_read_mpcc_state, +	.read_mpcc_state = mpc2_read_mpcc_state,  	.insert_plane = mpc1_insert_plane,  	.remove_mpcc = mpc1_remove_mpcc,  	.mpc_init = mpc1_mpc_init, diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c index a7268027a472..f809a7d21033 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c @@ -275,6 +275,7 @@ static struct dpp_funcs dcn201_dpp_funcs = {  	.set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,  	.dpp_dppclk_control = dpp1_dppclk_control,  	.dpp_set_hdr_multiplier = dpp2_set_hdr_multiplier, +	.dpp_get_gamut_remap = dpp2_cm_get_gamut_remap,  };  static struct dpp_caps dcn201_dpp_cap = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c index 11f7746f3a65..a3a769aad042 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c @@ -44,12 +44,45 @@  void dpp30_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s)  {  	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); +	uint32_t gamcor_lut_mode, rgam_lut_mode;  	REG_GET(DPP_CONTROL, -			DPP_CLOCK_ENABLE, &s->is_enabled); +		DPP_CLOCK_ENABLE, &s->is_enabled); + +	// Pre-degamma (ROM) +	REG_GET_2(PRE_DEGAM, +		  PRE_DEGAM_MODE, &s->pre_dgam_mode, +		  PRE_DEGAM_SELECT, &s->pre_dgam_select); + +	// Gamma Correction (RAM) +	REG_GET(CM_GAMCOR_CONTROL, +		CM_GAMCOR_MODE_CURRENT, &s->gamcor_mode); +	if (s->gamcor_mode) { +		REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, &gamcor_lut_mode); +		if (!gamcor_lut_mode) +			s->gamcor_mode = LUT_RAM_A; // Otherwise, LUT_RAM_B +	} -	// TODO: Implement for DCN3 +	// Shaper LUT (RAM), 3D LUT (mode, bit-depth, size) +	REG_GET(CM_SHAPER_CONTROL, +		CM_SHAPER_LUT_MODE, &s->shaper_lut_mode); +	REG_GET(CM_3DLUT_MODE, +		CM_3DLUT_MODE_CURRENT, &s->lut3d_mode); +	REG_GET(CM_3DLUT_READ_WRITE_CONTROL, +		CM_3DLUT_30BIT_EN, &s->lut3d_bit_depth); +	REG_GET(CM_3DLUT_MODE, +		CM_3DLUT_SIZE, &s->lut3d_size); + +	// Blend/Out Gamma (RAM) +	REG_GET(CM_BLNDGAM_CONTROL, +		CM_BLNDGAM_MODE_CURRENT, &s->rgam_lut_mode); +	if (s->rgam_lut_mode){ +		REG_GET(CM_BLNDGAM_CONTROL, CM_BLNDGAM_SELECT_CURRENT, &rgam_lut_mode); +		if (!rgam_lut_mode) +			s->rgam_lut_mode = LUT_RAM_A; // Otherwise, LUT_RAM_B +	}  } +  /*program post scaler scs block in dpp CM*/  void dpp3_program_post_csc(  		struct dpp *dpp_base, @@ -1462,6 +1495,7 @@ static struct dpp_funcs dcn30_dpp_funcs = {  	.set_optional_cursor_attributes	= dpp1_cnv_set_optional_cursor_attributes,  	.dpp_dppclk_control		= dpp1_dppclk_control,  	.dpp_set_hdr_multiplier		= dpp3_set_hdr_multiplier, +	.dpp_get_gamut_remap		= dpp3_cm_get_gamut_remap,  }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h index cea3208e4ab1..2ac8045a87a1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h @@ -637,4 +637,6 @@ void dpp3_program_cm_dealpha(  		struct dpp *dpp_base,  	uint32_t enable, uint32_t additive_blending); +void dpp3_cm_get_gamut_remap(struct dpp *dpp_base, +			     struct dpp_grph_csc_adjustment *adjust);  #endif /* __DC_HWSS_DCN30_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c index 5f97a868ada3..2f5b3fbd3507 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c @@ -405,3 +405,57 @@ void dpp3_cm_set_gamut_remap(  		program_gamut_remap(dpp, arr_reg_val, gamut_mode);  	}  } + +static void read_gamut_remap(struct dcn3_dpp *dpp, +			     uint16_t *regval, +			     int *select) +{ +	struct color_matrices_reg gam_regs; +	uint32_t selection; + +	//current coefficient set in use +	REG_GET(CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE_CURRENT, &selection); + +	*select = selection; + +	gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; +	gam_regs.masks.csc_c11  = dpp->tf_mask->CM_GAMUT_REMAP_C11; +	gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; +	gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; + +	if (*select == GAMUT_REMAP_COEFF) { +		gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); +		gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); + +		cm_helper_read_color_matrices(dpp->base.ctx, +					      regval, +					      &gam_regs); + +	} else if (*select == GAMUT_REMAP_COMA_COEFF) { +		gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12); +		gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34); + +		cm_helper_read_color_matrices(dpp->base.ctx, +					      regval, +					      &gam_regs); +	} +} + +void dpp3_cm_get_gamut_remap(struct dpp *dpp_base, +			     struct dpp_grph_csc_adjustment *adjust) +{ +	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); +	uint16_t arr_reg_val[12]; +	int select; + +	read_gamut_remap(dpp, arr_reg_val, &select); + +	if (select == GAMUT_REMAP_BYPASS) { +		adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; +		return; +	} + +	adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; +	convert_hw_matrix(adjust->temperature_matrix, +			  arr_reg_val, ARRAY_SIZE(arr_reg_val)); +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c index d1500b223858..bf3386cd444d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c @@ -1129,6 +1129,64 @@ void mpc3_set_gamut_remap(  	}  } +static void read_gamut_remap(struct dcn30_mpc *mpc30, +			     int mpcc_id, +			     uint16_t *regval, +			     uint32_t *select) +{ +	struct color_matrices_reg gam_regs; + +	//current coefficient set in use +	REG_GET(MPCC_GAMUT_REMAP_MODE[mpcc_id], MPCC_GAMUT_REMAP_MODE_CURRENT, select); + +	gam_regs.shifts.csc_c11 = mpc30->mpc_shift->MPCC_GAMUT_REMAP_C11_A; +	gam_regs.masks.csc_c11  = mpc30->mpc_mask->MPCC_GAMUT_REMAP_C11_A; +	gam_regs.shifts.csc_c12 = mpc30->mpc_shift->MPCC_GAMUT_REMAP_C12_A; +	gam_regs.masks.csc_c12 = mpc30->mpc_mask->MPCC_GAMUT_REMAP_C12_A; + +	if (*select == GAMUT_REMAP_COEFF) { +		gam_regs.csc_c11_c12 = REG(MPC_GAMUT_REMAP_C11_C12_A[mpcc_id]); +		gam_regs.csc_c33_c34 = REG(MPC_GAMUT_REMAP_C33_C34_A[mpcc_id]); + +		cm_helper_read_color_matrices( +				mpc30->base.ctx, +				regval, +				&gam_regs); + +	} else  if (*select == GAMUT_REMAP_COMA_COEFF) { + +		gam_regs.csc_c11_c12 = REG(MPC_GAMUT_REMAP_C11_C12_B[mpcc_id]); +		gam_regs.csc_c33_c34 = REG(MPC_GAMUT_REMAP_C33_C34_B[mpcc_id]); + +		cm_helper_read_color_matrices( +				mpc30->base.ctx, +				regval, +				&gam_regs); + +	} + +} + +void mpc3_get_gamut_remap(struct mpc *mpc, +			  int mpcc_id, +			  struct mpc_grph_gamut_adjustment *adjust) +{ +	struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); +	uint16_t arr_reg_val[12]; +	int select; + +	read_gamut_remap(mpc30, mpcc_id, arr_reg_val, &select); + +	if (select == GAMUT_REMAP_BYPASS) { +		adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; +		return; +	} + +	adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; +	convert_hw_matrix(adjust->temperature_matrix, +			  arr_reg_val, ARRAY_SIZE(arr_reg_val)); +} +  bool mpc3_program_3dlut(  		struct mpc *mpc,  		const struct tetrahedral_params *params, @@ -1382,8 +1440,54 @@ static void mpc3_set_mpc_mem_lp_mode(struct mpc *mpc)  	}  } +static void mpc3_read_mpcc_state( +		struct mpc *mpc, +		int mpcc_inst, +		struct mpcc_state *s) +{ +	struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); +	uint32_t rmu_status = 0xf; + +	REG_GET(MPCC_OPP_ID[mpcc_inst], MPCC_OPP_ID, &s->opp_id); +	REG_GET(MPCC_TOP_SEL[mpcc_inst], MPCC_TOP_SEL, &s->dpp_id); +	REG_GET(MPCC_BOT_SEL[mpcc_inst], MPCC_BOT_SEL, &s->bot_mpcc_id); +	REG_GET_4(MPCC_CONTROL[mpcc_inst], MPCC_MODE, &s->mode, +			MPCC_ALPHA_BLND_MODE, &s->alpha_mode, +			MPCC_ALPHA_MULTIPLIED_MODE, &s->pre_multiplied_alpha, +			MPCC_BLND_ACTIVE_OVERLAP_ONLY, &s->overlap_only); +	REG_GET_2(MPCC_STATUS[mpcc_inst], MPCC_IDLE, &s->idle, +			MPCC_BUSY, &s->busy); + +	/* Color blocks state */ +	REG_GET(MPC_RMU_CONTROL, MPC_RMU0_MUX_STATUS, &rmu_status); + +	if (rmu_status == mpcc_inst) { +		REG_GET(SHAPER_CONTROL[0], +			MPC_RMU_SHAPER_LUT_MODE_CURRENT, &s->shaper_lut_mode); +		REG_GET(RMU_3DLUT_MODE[0], +			MPC_RMU_3DLUT_MODE_CURRENT,  &s->lut3d_mode); +		REG_GET(RMU_3DLUT_READ_WRITE_CONTROL[0], +			MPC_RMU_3DLUT_30BIT_EN, &s->lut3d_bit_depth); +		REG_GET(RMU_3DLUT_MODE[0], +			MPC_RMU_3DLUT_SIZE, &s->lut3d_size); +	} else { +		REG_GET(SHAPER_CONTROL[1], +			MPC_RMU_SHAPER_LUT_MODE_CURRENT, &s->shaper_lut_mode); +		REG_GET(RMU_3DLUT_MODE[1], +			MPC_RMU_3DLUT_MODE_CURRENT,  &s->lut3d_mode); +		REG_GET(RMU_3DLUT_READ_WRITE_CONTROL[1], +			MPC_RMU_3DLUT_30BIT_EN, &s->lut3d_bit_depth); +		REG_GET(RMU_3DLUT_MODE[1], +			MPC_RMU_3DLUT_SIZE, &s->lut3d_size); +	} + +        REG_GET_2(MPCC_OGAM_CONTROL[mpcc_inst], +		  MPCC_OGAM_MODE_CURRENT, &s->rgam_mode, +		  MPCC_OGAM_SELECT_CURRENT, &s->rgam_lut); +} +  static const struct mpc_funcs dcn30_mpc_funcs = { -	.read_mpcc_state = mpc1_read_mpcc_state, +	.read_mpcc_state = mpc3_read_mpcc_state,  	.insert_plane = mpc1_insert_plane,  	.remove_mpcc = mpc1_remove_mpcc,  	.mpc_init = mpc1_mpc_init, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h index 5198f2167c7c..9cb96ae95a2f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h @@ -1056,6 +1056,10 @@ void mpc3_set_gamut_remap(  	int mpcc_id,  	const struct mpc_grph_gamut_adjustment *adjust); +void mpc3_get_gamut_remap(struct mpc *mpc, +			  int mpcc_id, +			  struct mpc_grph_gamut_adjustment *adjust); +  void mpc3_set_rmu_mux(  	struct mpc *mpc,  	int rmu_idx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c index ad0df1a72a90..9e96a3ace207 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c @@ -215,4 +215,5 @@ void dcn301_panel_cntl_construct(  	dcn301_panel_cntl->base.funcs = &dcn301_link_panel_cntl_funcs;  	dcn301_panel_cntl->base.ctx = init_data->ctx;  	dcn301_panel_cntl->base.inst = init_data->inst; +	dcn301_panel_cntl->base.pwrseq_inst = 0;  } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c index 03248422d6ff..281be20b1a10 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c @@ -154,8 +154,24 @@ void dcn31_panel_cntl_construct(  	struct dcn31_panel_cntl *dcn31_panel_cntl,  	const struct panel_cntl_init_data *init_data)  { +	uint8_t pwrseq_inst = 0xF; +  	dcn31_panel_cntl->base.funcs = &dcn31_link_panel_cntl_funcs;  	dcn31_panel_cntl->base.ctx = init_data->ctx;  	dcn31_panel_cntl->base.inst = init_data->inst; -	dcn31_panel_cntl->base.pwrseq_inst = init_data->pwrseq_inst; + +	switch (init_data->eng_id) { +	case ENGINE_ID_DIGA: +		pwrseq_inst = 0; +		break; +	case ENGINE_ID_DIGB: +		pwrseq_inst = 1; +		break; +	default: +		DC_LOG_WARNING("Unsupported pwrseq engine id: %d!\n", init_data->eng_id); +		ASSERT(false); +		break; +	} + +	dcn31_panel_cntl->base.pwrseq_inst = pwrseq_inst;  } diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c index d761b0df2878..e224a028d68a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c @@ -34,6 +34,7 @@  #include "dc_bios_types.h"  #include "link_enc_cfg.h" +#include "dc_dmub_srv.h"  #include "gpio_service_interface.h"  #ifndef MIN @@ -61,6 +62,38 @@  #define AUX_REG_WRITE(reg_name, val) \  			dm_write_reg(CTX, AUX_REG(reg_name), val) +static uint8_t phy_id_from_transmitter(enum transmitter t) +{ +	uint8_t phy_id; + +	switch (t) { +	case TRANSMITTER_UNIPHY_A: +		phy_id = 0; +		break; +	case TRANSMITTER_UNIPHY_B: +		phy_id = 1; +		break; +	case TRANSMITTER_UNIPHY_C: +		phy_id = 2; +		break; +	case TRANSMITTER_UNIPHY_D: +		phy_id = 3; +		break; +	case TRANSMITTER_UNIPHY_E: +		phy_id = 4; +		break; +	case TRANSMITTER_UNIPHY_F: +		phy_id = 5; +		break; +	case TRANSMITTER_UNIPHY_G: +		phy_id = 6; +		break; +	default: +		phy_id = 0; +		break; +	} +	return phy_id; +}  void enc32_hw_init(struct link_encoder *enc)  { @@ -117,38 +150,50 @@ void dcn32_link_encoder_enable_dp_output(  	}  } -static bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc) +static bool query_dp_alt_from_dmub(struct link_encoder *enc, +	union dmub_rb_cmd *cmd)  {  	struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); -	uint32_t dp_alt_mode_disable = 0; -	bool is_usb_c_alt_mode = false; -	if (enc->features.flags.bits.DP_IS_USB_C) { -		/* if value == 1 alt mode is disabled, otherwise it is enabled */ -		REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable); -		is_usb_c_alt_mode = (dp_alt_mode_disable == 0); -	} +	memset(cmd, 0, sizeof(*cmd)); +	cmd->query_dp_alt.header.type = DMUB_CMD__VBIOS; +	cmd->query_dp_alt.header.sub_type = +		DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT; +	cmd->query_dp_alt.header.payload_bytes = sizeof(cmd->query_dp_alt.data); +	cmd->query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter); + +	if (!dc_wake_and_execute_dmub_cmd(enc->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) +		return false; -	return is_usb_c_alt_mode; +	return true;  } -static void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc, +bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc) +{ +	union dmub_rb_cmd cmd; + +	if (!query_dp_alt_from_dmub(enc, &cmd)) +		return false; + +	return (cmd.query_dp_alt.data.is_dp_alt_disable == 0); +} + +void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc,  	struct dc_link_settings *link_settings)  { -	struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); -	uint32_t is_in_usb_c_dp4_mode = 0; +	union dmub_rb_cmd cmd;  	dcn10_link_encoder_get_max_link_cap(enc, link_settings); -	/* in usb c dp2 mode, max lane count is 2 */ -	if (enc->funcs->is_in_alt_mode && enc->funcs->is_in_alt_mode(enc)) { -		REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode); -		if (!is_in_usb_c_dp4_mode) -			link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count); -	} +	if (!query_dp_alt_from_dmub(enc, &cmd)) +		return; +	if (cmd.query_dp_alt.data.is_usb && +			cmd.query_dp_alt.data.is_dp4 == 0) +		link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);  } +  static const struct link_encoder_funcs dcn32_link_enc_funcs = {  	.read_state = link_enc2_read_state,  	.validate_output_with_stream = @@ -203,13 +248,15 @@ void dcn32_link_encoder_construct(  	enc10->base.hpd_source = init_data->hpd_source;  	enc10->base.connector = init_data->connector; -  	enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;  	enc10->base.features = *enc_features;  	if (enc10->base.connector.id == CONNECTOR_ID_USBC)  		enc10->base.features.flags.bits.DP_IS_USB_C = 1; +	if (enc10->base.connector.id == CONNECTOR_ID_USBC) +		enc10->base.features.flags.bits.DP_IS_USB_C = 1; +  	enc10->base.transmitter = init_data->transmitter;  	/* set the flag to indicate whether driver poll the I2C data pin diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h index bbcfce06bec0..2d5f25290ed1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h @@ -53,4 +53,9 @@ void dcn32_link_encoder_enable_dp_output(  	const struct dc_link_settings *link_settings,  	enum clock_source_id clock_source); +bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc); + +void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc, +	struct dc_link_settings *link_settings); +  #endif /* __DC_LINK_ENCODER__DCN32_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c index dcf12a0b031c..681e75c6dbaf 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c @@ -133,6 +133,7 @@ static struct dpp_funcs dcn32_dpp_funcs = {  	.set_optional_cursor_attributes	= dpp1_cnv_set_optional_cursor_attributes,  	.dpp_dppclk_control			= dpp1_dppclk_control,  	.dpp_set_hdr_multiplier		= dpp3_set_hdr_multiplier, +	.dpp_get_gamut_remap		= dpp3_cm_get_gamut_remap,  }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.h index 1212fcee38f2..499052329ebb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.h @@ -28,6 +28,7 @@  #include "dcn30/dcn30_vpg.h"  #include "dcn30/dcn30_afmt.h"  #include "stream_encoder.h" +#include "dcn10/dcn10_link_encoder.h"  #include "dcn20/dcn20_stream_encoder.h"  /* Register bit field name change */ diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h index 4229369c57f4..f4d3f04ec857 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h +++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h @@ -26,6 +26,9 @@  #ifndef DM_CP_PSP_IF__H  #define DM_CP_PSP_IF__H +/* + * Interface to CPLIB/PSP to enable ASSR + */  struct dc_link;  struct cp_psp_stream_config { diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index 59ade76ffb18..c4a5efd2dda5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -92,6 +92,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_rq_dlg_calc_32.o := $(dml_ccflags)  CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_util_32.o := $(dml_ccflags) $(frame_warn_flag)  CFLAGS_$(AMDDALPATH)/dc/dml/dcn321/dcn321_fpu.o := $(dml_ccflags)  CFLAGS_$(AMDDALPATH)/dc/dml/dcn35/dcn35_fpu.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn351/dcn351_fpu.o := $(dml_ccflags)  CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/dcn31_fpu.o := $(dml_ccflags)  CFLAGS_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_ccflags)  CFLAGS_$(AMDDALPATH)/dc/dml/dcn302/dcn302_fpu.o := $(dml_ccflags) @@ -126,6 +127,7 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn30/dcn30_fpu.o := $(dml_rcflags)  CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn32/dcn32_fpu.o := $(dml_rcflags)  CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn321/dcn321_fpu.o := $(dml_rcflags)  CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn35/dcn35_fpu.o := $(dml_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn351/dcn351_fpu.o := $(dml_rcflags)  CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn31/dcn31_fpu.o := $(dml_rcflags)  CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn302/dcn302_fpu.o := $(dml_rcflags)  CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn303/dcn303_fpu.o := $(dml_rcflags) @@ -157,6 +159,7 @@ DML += dcn302/dcn302_fpu.o  DML += dcn303/dcn303_fpu.o  DML += dcn314/dcn314_fpu.o  DML += dcn35/dcn35_fpu.o +DML += dcn351/dcn351_fpu.o  DML += dsc/rc_calc_fpu.o  DML += calcs/dcn_calcs.o calcs/dcn_calc_math.o calcs/dcn_calc_auto.o  endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c index 63c48c29ba49..e7f4a2d491cc 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c @@ -4273,7 +4273,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  	//Calculate Swath, DET Configuration, DCFCLKDeepSleep  	// -	for (i = 0; i < mode_lib->soc.num_states; ++i) { +	for (i = start_state; i < mode_lib->soc.num_states; ++i) {  		for (j = 0; j <= 1; ++j) {  			for (k = 0; k < v->NumberOfActivePlanes; ++k) {  				v->RequiredDPPCLKThisState[k] = v->RequiredDPPCLK[i][j][k]; @@ -4576,7 +4576,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  	//Calculate Return BW -	for (i = 0; i < mode_lib->soc.num_states; ++i) { +	for (i = start_state; i < mode_lib->soc.num_states; ++i) {  		for (j = 0; j <= 1; ++j) {  			for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {  				if (v->BlendingAndTiming[k] == k) { @@ -4635,7 +4635,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  					v->UrgentOutOfOrderReturnPerChannelVMDataOnly);  	v->FinalDRAMClockChangeLatency = (v->DRAMClockChangeLatencyOverride > 0 ? v->DRAMClockChangeLatencyOverride : v->DRAMClockChangeLatency); -	for (i = 0; i < mode_lib->soc.num_states; ++i) { +	for (i = start_state; i < mode_lib->soc.num_states; ++i) {  		for (j = 0; j <= 1; ++j) {  			v->DCFCLKState[i][j] = v->DCFCLKPerState[i];  		} @@ -4646,7 +4646,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  		if (v->ClampMinDCFCLK) {  			/* Clamp calculated values to actual minimum */ -			for (i = 0; i < mode_lib->soc.num_states; ++i) { +			for (i = start_state; i < mode_lib->soc.num_states; ++i) {  				for (j = 0; j <= 1; ++j) {  					if (v->DCFCLKState[i][j] < mode_lib->soc.min_dcfclk) {  						v->DCFCLKState[i][j] = mode_lib->soc.min_dcfclk; @@ -4656,7 +4656,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  		}  	} -	for (i = 0; i < mode_lib->soc.num_states; ++i) { +	for (i = start_state; i < mode_lib->soc.num_states; ++i) {  		for (j = 0; j <= 1; ++j) {  			v->IdealSDPPortBandwidthPerState[i][j] = dml_min3(  					v->ReturnBusWidth * v->DCFCLKState[i][j], @@ -4674,7 +4674,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  	//Re-ordering Buffer Support Check -	for (i = 0; i < mode_lib->soc.num_states; ++i) { +	for (i = start_state; i < mode_lib->soc.num_states; ++i) {  		for (j = 0; j <= 1; ++j) {  			if ((v->ROBBufferSizeInKByte - v->PixelChunkSizeInKByte) * 1024 / v->ReturnBWPerState[i][j]  					> (v->RoundTripPingLatencyCycles + 32) / v->DCFCLKState[i][j] + ReorderingBytes / v->ReturnBWPerState[i][j]) { @@ -4692,7 +4692,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  		MaxTotalVActiveRDBandwidth = MaxTotalVActiveRDBandwidth + v->ReadBandwidthLuma[k] + v->ReadBandwidthChroma[k];  	} -	for (i = 0; i < mode_lib->soc.num_states; ++i) { +	for (i = start_state; i < mode_lib->soc.num_states; ++i) {  		for (j = 0; j <= 1; ++j) {  			v->MaxTotalVerticalActiveAvailableBandwidth[i][j] = dml_min(  					v->IdealSDPPortBandwidthPerState[i][j] * v->MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation / 100, @@ -4708,7 +4708,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  	//Prefetch Check -	for (i = 0; i < mode_lib->soc.num_states; ++i) { +	for (i = start_state; i < mode_lib->soc.num_states; ++i) {  		for (j = 0; j <= 1; ++j) {  			int NextPrefetchModeState = MinPrefetchMode; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c index 3eb3a021ab7d..3f02bb806d42 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c @@ -266,6 +266,17 @@ void dcn303_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p  					optimal_uclk_for_dcfclk_sta_targets[i] =  							bw_params->clk_table.entries[j].memclk_mhz * 16;  					break; +				} else { +					/* condition where (dcfclk_sta_targets[i] >= optimal_dcfclk_for_uclk[j]): +					 * This is required for dcn303 because it just so happens that the memory +					 * bandwidth is low enough such that all the optimal DCFCLK for each UCLK +					 * is lower than the smallest DCFCLK STA target. In this case we need to +					 * populate the optimal UCLK for each DCFCLK STA target to be the max UCLK. +					 */ +					if (j == num_uclk_states - 1) { +						optimal_uclk_for_dcfclk_sta_targets[i] = +								bw_params->clk_table.entries[j].memclk_mhz * 16; +					}  				}  			}  		} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index a0a65e099104..b49e1dc9d8ba 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -623,7 +623,6 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,  		 * - Not TMZ surface  		 */  		if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) && -				!(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) &&  				(!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) &&  				dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE &&  				(refresh_rate < 120 || dcn32_allow_subvp_high_refresh_rate(dc, context, pipe)) && diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c index 7ea2bd5374d5..80bebfc268db 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c @@ -583,12 +583,14 @@ void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context)  			plane_count++;  	} -	if (plane_count == 0) { +	if (context->stream_count == 0 || plane_count == 0) {  		support = DCN_ZSTATE_SUPPORT_ALLOW; -	} else if (plane_count == 1 && context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) { +	} else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {  		struct dc_link *link = context->streams[0]->sink->link;  		bool is_pwrseq0 = link && link->link_index == 0; -		bool is_psr1 = link && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr; +		bool is_psr = (link && (link->psr_settings.psr_version == DC_PSR_VERSION_1 || +								link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) && !link->panel_config.psr.disable_psr); +		bool is_replay = link && link->replay_settings.replay_feature_enabled;  		int minmum_z8_residency =  			dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;  		bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency; @@ -596,12 +598,14 @@ void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context)  			dc->debug.minimum_z10_residency_time > 0 ? dc->debug.minimum_z10_residency_time : 5000;  		bool allow_z10 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z10_residency; +		/*for psr1/psr-su, we allow z8 and z10 based on latency, for replay with IPS enabled, it will enter ips2*/  		if (is_pwrseq0 && allow_z10)  			support = DCN_ZSTATE_SUPPORT_ALLOW; -		else if (is_pwrseq0 && is_psr1) +		else if (is_pwrseq0 && (is_psr || is_replay))  			support = allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;  		else if (allow_z8)  			support = DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY; +  	}  	context->bw_ctx.bw.dcn.clk.zstate_support = support; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c new file mode 100644 index 000000000000..dc9e1b758ed6 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c @@ -0,0 +1,574 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright 2024 Advanced Micro Devices, Inc. */ +#include "resource.h" +#include "dcn351_fpu.h" +#include "dcn31/dcn31_resource.h" +#include "dcn32/dcn32_resource.h" +#include "dcn35/dcn35_resource.h" +#include "dcn351/dcn351_resource.h" +#include "dml/dcn31/dcn31_fpu.h" +#include "dml/dcn35/dcn35_fpu.h" +#include "dml/dml_inline_defs.h" + +#include "link.h" + +#define DC_LOGGER_INIT(logger) + +struct _vcs_dpi_ip_params_st dcn3_51_ip = { +	.VBlankNomDefaultUS = 668, +	.gpuvm_enable = 1, +	.gpuvm_max_page_table_levels = 1, +	.hostvm_enable = 1, +	.hostvm_max_page_table_levels = 2, +	.rob_buffer_size_kbytes = 64, +	.det_buffer_size_kbytes = 1536, +	.config_return_buffer_size_in_kbytes = 1792, +	.compressed_buffer_segment_size_in_kbytes = 64, +	.meta_fifo_size_in_kentries = 32, +	.zero_size_buffer_entries = 512, +	.compbuf_reserved_space_64b = 256, +	.compbuf_reserved_space_zs = 64, +	.dpp_output_buffer_pixels = 2560,/*not used*/ +	.opp_output_buffer_lines = 1,/*not used*/ +	.pixel_chunk_size_kbytes = 8, +	//.alpha_pixel_chunk_size_kbytes = 4;/*new*/ +	//.min_pixel_chunk_size_bytes = 1024;/*new*/ +	.meta_chunk_size_kbytes = 2, +	.min_meta_chunk_size_bytes = 256, +	.writeback_chunk_size_kbytes = 8, +	.ptoi_supported = false, +	.num_dsc = 4, +	.maximum_dsc_bits_per_component = 12,/*delta from 10*/ +	.dsc422_native_support = true,/*delta from false*/ +	.is_line_buffer_bpp_fixed = true,/*new*/ +	.line_buffer_fixed_bpp = 32,/*delta from 48*/ +	.line_buffer_size_bits = 986880,/*delta from 789504*/ +	.max_line_buffer_lines = 32,/*delta from 12*/ +	.writeback_interface_buffer_size_kbytes = 90, +	.max_num_dpp = 4, +	.max_num_otg = 4, +	.max_num_hdmi_frl_outputs = 1, +	.max_num_wb = 1, +	/*.max_num_hdmi_frl_outputs = 1; new in dml2*/ +	/*.max_num_dp2p0_outputs = 2; new in dml2*/ +	/*.max_num_dp2p0_streams = 4; new in dml2*/ +	.max_dchub_pscl_bw_pix_per_clk = 4, +	.max_pscl_lb_bw_pix_per_clk = 2, +	.max_lb_vscl_bw_pix_per_clk = 4, +	.max_vscl_hscl_bw_pix_per_clk = 4, +	.max_hscl_ratio = 6, +	.max_vscl_ratio = 6, +	.max_hscl_taps = 8, +	.max_vscl_taps = 8, +	.dpte_buffer_size_in_pte_reqs_luma = 68,/*changed from 64,*/ +	.dpte_buffer_size_in_pte_reqs_chroma = 36,/*changed from 34*/ +	/*.dcc_meta_buffer_size_bytes = 6272; new to dml2*/ +	.dispclk_ramp_margin_percent = 1.11,/*delta from 1*/ +	/*.dppclk_delay_subtotal = 47; +	.dppclk_delay_scl = 50; +	.dppclk_delay_scl_lb_only = 16; +	.dppclk_delay_cnvc_formatter = 28; +	.dppclk_delay_cnvc_cursor = 6; +	.dispclk_delay_subtotal = 125;*/ /*new to dml2*/ +	.max_inter_dcn_tile_repeaters = 8, +	.cursor_buffer_size = 16, +	.cursor_chunk_size = 2, +	.writeback_line_buffer_buffer_size = 0, +	.writeback_min_hscl_ratio = 1, +	.writeback_min_vscl_ratio = 1, +	.writeback_max_hscl_ratio = 1, +	.writeback_max_vscl_ratio = 1, +	.writeback_max_hscl_taps = 1, +	.writeback_max_vscl_taps = 1, +	.dppclk_delay_subtotal = 47, /* changed from 46,*/ +	.dppclk_delay_scl = 50, +	.dppclk_delay_scl_lb_only = 16, +	.dppclk_delay_cnvc_formatter = 28,/*changed from 27,*/ +	.dppclk_delay_cnvc_cursor = 6, +	.dispclk_delay_subtotal = 125, /*changed from 119,*/ +	.dynamic_metadata_vm_enabled = false, +	.odm_combine_4to1_supported = false, +	.dcc_supported = true, +//	.config_return_buffer_segment_size_in_kbytes = 64;/*required, hard coded in dml2_translate_ip_params*/ + +}; + +struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc = { +		/*TODO: correct dispclk/dppclk voltage level determination*/ +	.clock_limits = { +		{ +			.state = 0, +			.dispclk_mhz = 1200.0, +			.dppclk_mhz = 1200.0, +			.phyclk_mhz = 600.0, +			.phyclk_d18_mhz = 667.0, +			.dscclk_mhz = 186.0, +			.dtbclk_mhz = 600.0, +		}, +		{ +			.state = 1, +			.dispclk_mhz = 1200.0, +			.dppclk_mhz = 1200.0, +			.phyclk_mhz = 810.0, +			.phyclk_d18_mhz = 667.0, +			.dscclk_mhz = 209.0, +			.dtbclk_mhz = 600.0, +		}, +		{ +			.state = 2, +			.dispclk_mhz = 1200.0, +			.dppclk_mhz = 1200.0, +			.phyclk_mhz = 810.0, +			.phyclk_d18_mhz = 667.0, +			.dscclk_mhz = 209.0, +			.dtbclk_mhz = 600.0, +		}, +		{ +			.state = 3, +			.dispclk_mhz = 1200.0, +			.dppclk_mhz = 1200.0, +			.phyclk_mhz = 810.0, +			.phyclk_d18_mhz = 667.0, +			.dscclk_mhz = 371.0, +			.dtbclk_mhz = 600.0, +		}, +		{ +			.state = 4, +			.dispclk_mhz = 1200.0, +			.dppclk_mhz = 1200.0, +			.phyclk_mhz = 810.0, +			.phyclk_d18_mhz = 667.0, +			.dscclk_mhz = 417.0, +			.dtbclk_mhz = 600.0, +		}, +	}, +	.num_states = 5, +	.sr_exit_time_us = 28.0, +	.sr_enter_plus_exit_time_us = 30.0, +	.sr_exit_z8_time_us = 210.0, +	.sr_enter_plus_exit_z8_time_us = 320.0, +	.fclk_change_latency_us = 24.0, +	.usr_retraining_latency_us = 2, +	.writeback_latency_us = 12.0, + +	.dram_channel_width_bytes = 4,/*not exist in dml2*/ +	.round_trip_ping_latency_dcfclk_cycles = 106,/*not exist in dml2*/ +	.urgent_latency_pixel_data_only_us = 4.0, +	.urgent_latency_pixel_mixed_with_vm_data_us = 4.0, +	.urgent_latency_vm_data_only_us = 4.0, +	.dram_clock_change_latency_us = 11.72, +	.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, +	.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, +	.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, + +	.pct_ideal_sdp_bw_after_urgent = 80.0, +	.pct_ideal_fabric_bw_after_urgent = 80.0, /*new to dml2*/ +	.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0, +	.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, +	.pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0, +	.max_avg_sdp_bw_use_normal_percent = 60.0, +	.max_avg_dram_bw_use_normal_percent = 60.0, +	.fabric_datapath_to_dcn_data_return_bytes = 32, +	.return_bus_width_bytes = 64, +	.downspread_percent = 0.38, +	.dcn_downspread_percent = 0.5, +	.gpuvm_min_page_size_bytes = 4096, +	.hostvm_min_page_size_bytes = 4096, +	.do_urgent_latency_adjustment = 0, +	.urgent_latency_adjustment_fabric_clock_component_us = 0, +	.urgent_latency_adjustment_fabric_clock_reference_mhz = 0, +}; + +/* + * dcn351_update_bw_bounding_box + * + * This would override some dcn3_51 ip_or_soc initial parameters hardcoded from + * spreadsheet with actual values as per dGPU SKU: + * - with passed few options from dc->config + * - with dentist_vco_frequency from Clk Mgr (currently hardcoded, but might + *   need to get it from PM FW) + * - with passed latency values (passed in ns units) in dc-> bb override for + *   debugging purposes + * - with passed latencies from VBIOS (in 100_ns units) if available for + *   certain dGPU SKU + * - with number of DRAM channels from VBIOS (which differ for certain dGPU SKU + *   of the same ASIC) + * - clocks levels with passed clk_table entries from Clk Mgr as reported by PM + *   FW for different clocks (which might differ for certain dGPU SKU of the + *   same ASIC) + */ +void dcn351_update_bw_bounding_box_fpu(struct dc *dc, +				      struct clk_bw_params *bw_params) +{ +	unsigned int i, closest_clk_lvl; +	int j; +	struct clk_limit_table *clk_table = &bw_params->clk_table; +	struct _vcs_dpi_voltage_scaling_st *clock_limits = +		dc->scratch.update_bw_bounding_box.clock_limits; +	int max_dispclk_mhz = 0, max_dppclk_mhz = 0; + +	dc_assert_fp_enabled(); + +	dcn3_51_ip.max_num_otg = +		dc->res_pool->res_cap->num_timing_generator; +	dcn3_51_ip.max_num_dpp = dc->res_pool->pipe_count; +	dcn3_51_soc.num_chans = bw_params->num_channels; + +	ASSERT(clk_table->num_entries); + +	/* Prepass to find max clocks independent of voltage level. */ +	for (i = 0; i < clk_table->num_entries; ++i) { +		if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz) +			max_dispclk_mhz = clk_table->entries[i].dispclk_mhz; +		if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz) +			max_dppclk_mhz = clk_table->entries[i].dppclk_mhz; +	} + +	for (i = 0; i < clk_table->num_entries; i++) { +		/* loop backwards*/ +		for (closest_clk_lvl = 0, j = dcn3_51_soc.num_states - 1; +			j >= 0; j--) { +			if (dcn3_51_soc.clock_limits[j].dcfclk_mhz <= +				clk_table->entries[i].dcfclk_mhz) { +				closest_clk_lvl = j; +				break; +			} +		} +		if (clk_table->num_entries == 1) { +			/*smu gives one DPM level, let's take the highest one*/ +			closest_clk_lvl = dcn3_51_soc.num_states - 1; +		} + +		clock_limits[i].state = i; + +		/* Clocks dependent on voltage level. */ +		clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; +		if (clk_table->num_entries == 1 && +			clock_limits[i].dcfclk_mhz < +			dcn3_51_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) { +			/*SMU fix not released yet*/ +			clock_limits[i].dcfclk_mhz = +				dcn3_51_soc.clock_limits[closest_clk_lvl].dcfclk_mhz; +		} + +		clock_limits[i].fabricclk_mhz = +			clk_table->entries[i].fclk_mhz; +		clock_limits[i].socclk_mhz = +			clk_table->entries[i].socclk_mhz; + +		if (clk_table->entries[i].memclk_mhz && +			clk_table->entries[i].wck_ratio) +			clock_limits[i].dram_speed_mts = +				clk_table->entries[i].memclk_mhz * 2 * +				clk_table->entries[i].wck_ratio; + +		/* Clocks independent of voltage level. */ +		clock_limits[i].dispclk_mhz = max_dispclk_mhz ? +			max_dispclk_mhz : +			dcn3_51_soc.clock_limits[closest_clk_lvl].dispclk_mhz; + +		clock_limits[i].dppclk_mhz = max_dppclk_mhz ? +			max_dppclk_mhz : +			dcn3_51_soc.clock_limits[closest_clk_lvl].dppclk_mhz; + +		clock_limits[i].dram_bw_per_chan_gbps = +			dcn3_51_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps; +		clock_limits[i].dscclk_mhz = +			dcn3_51_soc.clock_limits[closest_clk_lvl].dscclk_mhz; +		clock_limits[i].dtbclk_mhz = +			dcn3_51_soc.clock_limits[closest_clk_lvl].dtbclk_mhz; +		clock_limits[i].phyclk_d18_mhz = +			dcn3_51_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz; +		clock_limits[i].phyclk_mhz = +			dcn3_51_soc.clock_limits[closest_clk_lvl].phyclk_mhz; +	} + +	memcpy(dcn3_51_soc.clock_limits, clock_limits, +		sizeof(dcn3_51_soc.clock_limits)); + +	if (clk_table->num_entries) +		dcn3_51_soc.num_states = clk_table->num_entries; + +	if (max_dispclk_mhz) { +		dcn3_51_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2; +		dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2; +	} +	if ((int)(dcn3_51_soc.dram_clock_change_latency_us * 1000) +				!= dc->debug.dram_clock_change_latency_ns +			&& dc->debug.dram_clock_change_latency_ns) { +		dcn3_51_soc.dram_clock_change_latency_us = +			dc->debug.dram_clock_change_latency_ns / 1000.0; +	} + +	if (dc->bb_overrides.dram_clock_change_latency_ns > 0) +		dcn3_51_soc.dram_clock_change_latency_us = +			dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; + +	if (dc->bb_overrides.sr_exit_time_ns > 0) +		dcn3_51_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0; + +	if (dc->bb_overrides.sr_enter_plus_exit_time_ns > 0) +		dcn3_51_soc.sr_enter_plus_exit_time_us = +			dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; + +	if (dc->bb_overrides.sr_exit_z8_time_ns > 0) +		dcn3_51_soc.sr_exit_z8_time_us = dc->bb_overrides.sr_exit_z8_time_ns / 1000.0; + +	if (dc->bb_overrides.sr_enter_plus_exit_z8_time_ns > 0) +		dcn3_51_soc.sr_enter_plus_exit_z8_time_us = +			dc->bb_overrides.sr_enter_plus_exit_z8_time_ns / 1000.0; + +	/*temp till dml2 fully work without dml1*/ +	dml_init_instance(&dc->dml, &dcn3_51_soc, &dcn3_51_ip, +			  DML_PROJECT_DCN31); + +	/*copy to dml2, before dml2_create*/ +	if (clk_table->num_entries > 2) { + +		for (i = 0; i < clk_table->num_entries; i++) { +			dc->dml2_options.bbox_overrides.clks_table.num_states = +				clk_table->num_entries; +			dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz = +				clock_limits[i].dcfclk_mhz; +			dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].fclk_mhz = +				clock_limits[i].fabricclk_mhz; +			dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dispclk_mhz = +				clock_limits[i].dispclk_mhz; +			dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dppclk_mhz = +				clock_limits[i].dppclk_mhz; +			dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].socclk_mhz = +				clock_limits[i].socclk_mhz; +			dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz = +				clk_table->entries[i].memclk_mhz * clk_table->entries[i].wck_ratio; +			dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels = +				clk_table->num_entries; +			dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels = +				clk_table->num_entries; +			dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dispclk_levels = +				clk_table->num_entries; +			dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dppclk_levels = +				clk_table->num_entries; +			dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_socclk_levels = +				clk_table->num_entries; +			dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels = +				clk_table->num_entries; +		} +	} + +	/* Update latency values */ +	dc->dml2_options.bbox_overrides.dram_clock_change_latency_us = dcn3_51_soc.dram_clock_change_latency_us; + +	dc->dml2_options.bbox_overrides.sr_exit_latency_us = dcn3_51_soc.sr_exit_time_us; +	dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us = dcn3_51_soc.sr_enter_plus_exit_time_us; + +	dc->dml2_options.bbox_overrides.sr_exit_z8_time_us = dcn3_51_soc.sr_exit_z8_time_us; +	dc->dml2_options.bbox_overrides.sr_enter_plus_exit_z8_time_us = dcn3_51_soc.sr_enter_plus_exit_z8_time_us; +} + +static bool is_dual_plane(enum surface_pixel_format format) +{ +	return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || +		format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA; +} + +/* + * micro_sec_to_vert_lines () - converts time to number of vertical lines for a given timing + * + * @param: num_us: number of microseconds + * @return: number of vertical lines. If exact number of vertical lines is not found then + *          it will round up to next number of lines to guarantee num_us + */ +static unsigned int micro_sec_to_vert_lines(unsigned int num_us, struct dc_crtc_timing *timing) +{ +	unsigned int num_lines = 0; +	unsigned int lines_time_in_ns = 1000.0 * +			(((float)timing->h_total * 1000.0) / +			 ((float)timing->pix_clk_100hz / 10.0)); + +	num_lines = dml_ceil(1000.0 * num_us / lines_time_in_ns, 1.0); + +	return num_lines; +} + +static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing) +{ +	unsigned int v_active = 0, v_blank = 0, v_back_porch = 0; + +	v_active = timing->v_border_top + timing->v_addressable + timing->v_border_bottom; +	v_blank = timing->v_total - v_active; +	v_back_porch = v_blank - timing->v_front_porch - timing->v_sync_width; + +	return v_back_porch; +} + +int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc, +					      struct dc_state *context, +					      display_e2e_pipe_params_st *pipes, +					      bool fast_validate) +{ +	int i, pipe_cnt; +	struct resource_context *res_ctx = &context->res_ctx; +	struct pipe_ctx *pipe; +	bool upscaled = false; +	const unsigned int max_allowed_vblank_nom = 1023; + +	dcn31_populate_dml_pipes_from_context(dc, context, pipes, +					      fast_validate); + +	for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { +		struct dc_crtc_timing *timing; +		unsigned int num_lines = 0; +		unsigned int v_back_porch = 0; + +		if (!res_ctx->pipe_ctx[i].stream) +			continue; + +		pipe = &res_ctx->pipe_ctx[i]; +		timing = &pipe->stream->timing; + +		num_lines = micro_sec_to_vert_lines(dcn3_51_ip.VBlankNomDefaultUS, timing); +		v_back_porch  = get_vertical_back_porch(timing); + +		if (pipe->stream->adjust.v_total_max == +		    pipe->stream->adjust.v_total_min && +		    pipe->stream->adjust.v_total_min > timing->v_total) { +			pipes[pipe_cnt].pipe.dest.vtotal = +				pipe->stream->adjust.v_total_min; +			pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - +				pipes[pipe_cnt].pipe.dest.vactive; +		} + +		pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive; +		pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, num_lines); +		// vblank_nom should not smaller than (VSync (timing->v_sync_width + v_back_porch) + 2) +		// + 2 is because +		// 1 -> VStartup_start should be 1 line before VSync +		// 1 -> always reserve 1 line between start of vblank to vstartup signal +		pipes[pipe_cnt].pipe.dest.vblank_nom = +			max(pipes[pipe_cnt].pipe.dest.vblank_nom, timing->v_sync_width + v_back_porch + 2); +		pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, max_allowed_vblank_nom); + +		if (pipe->plane_state && +		    (pipe->plane_state->src_rect.height < +		     pipe->plane_state->dst_rect.height || +		     pipe->plane_state->src_rect.width < +		     pipe->plane_state->dst_rect.width)) +			upscaled = true; + +		/* +		 * Immediate flip can be set dynamically after enabling the +		 * plane. We need to require support for immediate flip or +		 * underflow can be intermittently experienced depending on peak +		 * b/w requirements. +		 */ +		pipes[pipe_cnt].pipe.src.immediate_flip = true; + +		pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; + +		DC_FP_START(); +		dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt); +		DC_FP_END(); + +		pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch; +		pipes[pipe_cnt].pipe.src.dcc_rate = 3; +		pipes[pipe_cnt].dout.dsc_input_bpc = 0; +		pipes[pipe_cnt].pipe.src.gpuvm_min_page_size_kbytes = 256; + +		if (pipes[pipe_cnt].dout.dsc_enable) { +			switch (timing->display_color_depth) { +			case COLOR_DEPTH_888: +				pipes[pipe_cnt].dout.dsc_input_bpc = 8; +				break; +			case COLOR_DEPTH_101010: +				pipes[pipe_cnt].dout.dsc_input_bpc = 10; +				break; +			case COLOR_DEPTH_121212: +				pipes[pipe_cnt].dout.dsc_input_bpc = 12; +				break; +			default: +				ASSERT(0); +				break; +			} +		} + +		pipe_cnt++; +	} + +	context->bw_ctx.dml.ip.det_buffer_size_kbytes = 384;/*per guide*/ +	dc->config.enable_4to1MPC = false; + +	if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) { +		if (is_dual_plane(pipe->plane_state->format) +				&& pipe->plane_state->src_rect.width <= 1920 && +				pipe->plane_state->src_rect.height <= 1080) { +			dc->config.enable_4to1MPC = true; +		} else if (!is_dual_plane(pipe->plane_state->format) && +			   pipe->plane_state->src_rect.width <= 5120) { +			/* +			 * Limit to 5k max to avoid forced pipe split when there +			 * is not enough detile for swath +			 */ +			context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192; +			pipes[0].pipe.src.unbounded_req_mode = true; +		} +	} else if (context->stream_count >= +		   dc->debug.crb_alloc_policy_min_disp_count && +		   dc->debug.crb_alloc_policy > DET_SIZE_DEFAULT) { +		context->bw_ctx.dml.ip.det_buffer_size_kbytes = +			dc->debug.crb_alloc_policy * 64; +	} else if (context->stream_count >= 3 && upscaled) { +		context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192; +	} + +	for (i = 0; i < dc->res_pool->pipe_count; i++) { +		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + +		if (!pipe->stream) +			continue; + +		if (pipe->stream->signal == SIGNAL_TYPE_EDP && +		    dc->debug.seamless_boot_odm_combine && +		    pipe->stream->apply_seamless_boot_optimization) { + +			if (pipe->stream->apply_boot_odm_mode == +			    dm_odm_combine_policy_2to1) { +				context->bw_ctx.dml.vba.ODMCombinePolicy = +					dm_odm_combine_policy_2to1; +				break; +			} +		} +	} + +	return pipe_cnt; +} + +void dcn351_decide_zstate_support(struct dc *dc, struct dc_state *context) +{ +	enum dcn_zstate_support_state support = DCN_ZSTATE_SUPPORT_DISALLOW; +	unsigned int i, plane_count = 0; + +	for (i = 0; i < dc->res_pool->pipe_count; i++) { +		if (context->res_ctx.pipe_ctx[i].plane_state) +			plane_count++; +	} +	/*dcn351 does not support z9/z10*/ +	if (context->stream_count == 0 || plane_count == 0) { +		support = DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY; +	} else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) { +		struct dc_link *link = context->streams[0]->sink->link; +		bool is_pwrseq0 = link && link->link_index == 0; +		bool is_psr = (link && (link->psr_settings.psr_version == DC_PSR_VERSION_1 || +								link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) && !link->panel_config.psr.disable_psr); +		bool is_replay = link && link->replay_settings.replay_feature_enabled; +		int minmum_z8_residency = +			dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000; +		bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency; + + +		/*for psr1/psr-su, we allow z8 and z10 based on latency, for replay with IPS enabled, it will enter ips2*/ +		 if (is_pwrseq0 && (is_psr || is_replay)) +			support = allow_z8 ? allow_z8 : DCN_ZSTATE_SUPPORT_DISALLOW; + +	} +	context->bw_ctx.bw.dcn.clk.zstate_support = support; +} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h new file mode 100644 index 000000000000..f93efab9a668 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright 2024 Advanced Micro Devices, Inc. */ + +#ifndef __DCN351_FPU_H__ +#define __DCN351_FPU_H__ + +#include "clk_mgr.h" + +void dcn351_update_bw_bounding_box_fpu(struct dc *dc, +				      struct clk_bw_params *bw_params); + +int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc, +					      struct dc_state *context, +					      display_e2e_pipe_params_st *pipes, +					      bool fast_validate); + +void dcn351_decide_zstate_support(struct dc *dc, struct dc_state *context); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c index 0baf39d64a2d..a52c594e1ba4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c @@ -141,14 +141,33 @@ static unsigned int find_pipes_assigned_to_plane(struct dml2_context *ctx,  {  	int i;  	unsigned int num_found = 0; -	unsigned int plane_id_assigned_to_pipe; +	unsigned int plane_id_assigned_to_pipe = -1;  	for (i = 0; i < ctx->config.dcn_pipe_count; i++) { -		if (state->res_ctx.pipe_ctx[i].plane_state && get_plane_id(ctx, state, state->res_ctx.pipe_ctx[i].plane_state, -			state->res_ctx.pipe_ctx[i].stream->stream_id, -			ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_index[state->res_ctx.pipe_ctx[i].pipe_idx], &plane_id_assigned_to_pipe)) { -			if (plane_id_assigned_to_pipe == plane_id) -				pipes[num_found++] = i; +		struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i]; + +		if (!pipe->plane_state || !pipe->stream) +			continue; + +		get_plane_id(ctx, state, pipe->plane_state, pipe->stream->stream_id, +					ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_index[pipe->pipe_idx], +					&plane_id_assigned_to_pipe); +		if (plane_id_assigned_to_pipe == plane_id && !pipe->prev_odm_pipe +				&& (!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state)) { +			while (pipe) { +				struct pipe_ctx *mpc_pipe = pipe; + +				while (mpc_pipe) { +					pipes[num_found++] = mpc_pipe->pipe_idx; +					mpc_pipe = mpc_pipe->bottom_pipe; +					if (!mpc_pipe) +						break; +					if (mpc_pipe->plane_state != pipe->plane_state) +						mpc_pipe = NULL; +				} +				pipe = pipe->next_odm_pipe; +			} +			break;  		}  	} @@ -566,8 +585,14 @@ static unsigned int find_pipes_assigned_to_stream(struct dml2_context *ctx, stru  	unsigned int num_found = 0;  	for (i = 0; i < ctx->config.dcn_pipe_count; i++) { -		if (state->res_ctx.pipe_ctx[i].stream && state->res_ctx.pipe_ctx[i].stream->stream_id == stream_id) { -			pipes[num_found++] = i; +		struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i]; + +		if (pipe->stream && pipe->stream->stream_id == stream_id && !pipe->top_pipe && !pipe->prev_odm_pipe) { +			while (pipe) { +				pipes[num_found++] = pipe->pipe_idx; +				pipe = pipe->next_odm_pipe; +			} +			break;  		}  	} diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c index 23a608274096..1ba6933d2b36 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c @@ -398,7 +398,6 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,  	/* Copy clocks tables entries, if available */  	if (dml2->config.bbox_overrides.clks_table.num_states) {  		p->in_states->num_states = dml2->config.bbox_overrides.clks_table.num_states; -  		for (i = 0; i < dml2->config.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels; i++) {  			p->in_states->state_array[i].dcfclk_mhz = dml2->config.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz;  		} @@ -437,6 +436,14 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,  	}  	dml2_policy_build_synthetic_soc_states(s, p); +	if (dml2->v20.dml_core_ctx.project == dml_project_dcn35 || +		dml2->v20.dml_core_ctx.project == dml_project_dcn351) { +		// Override last out_state with data from last in_state +		// This will ensure that out_state contains max fclk +		memcpy(&p->out_states->state_array[p->out_states->num_states - 1], +				&p->in_states->state_array[p->in_states->num_states - 1], +				sizeof(struct soc_state_bounding_box_st)); +	}  }  void dml2_translate_ip_params(const struct dc *in, struct ip_params_st *out) diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c index 1068b962d1c1..f15d1dbad6a9 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c @@ -234,7 +234,7 @@ static bool get_plane_id(struct dml2_context *dml2, const struct dc_state *state  		if (state->streams[i]->stream_id == stream_id) {  			for (j = 0; j < state->stream_status[i].plane_count; j++) {  				if (state->stream_status[i].plane_states[j] == plane && -					(!is_plane_duplicate || (is_plane_duplicate && (j == plane_index)))) { +					(!is_plane_duplicate || (j == plane_index))) {  					*plane_id = (i << 16) | j;  					return true;  				} diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c index 26307e599614..2a58a7687bdb 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c @@ -76,6 +76,11 @@ static void map_hw_resources(struct dml2_context *dml2,  			in_out_display_cfg->hw.DLGRefClkFreqMHz = 50;  		}  		for (j = 0; j < mode_support_info->DPPPerSurface[i]; j++) { +			if (i >= __DML2_WRAPPER_MAX_STREAMS_PLANES__) { +				dml_print("DML::%s: Index out of bounds: i=%d, __DML2_WRAPPER_MAX_STREAMS_PLANES__=%d\n", +					  __func__, i, __DML2_WRAPPER_MAX_STREAMS_PLANES__); +				break; +			}  			dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id[num_pipes] = dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id[i];  			dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id_valid[num_pipes] = true;  			dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_id[num_pipes] = dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[i]; diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index 0df6c55eb326..ac41f9c0a283 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -137,6 +137,11 @@ uint32_t dc_bandwidth_in_kbps_from_timing(  	if (link_encoding == DC_LINK_ENCODING_DP_128b_132b)  		kbps = apply_128b_132b_stream_overhead(timing, kbps); +	if (link_encoding == DC_LINK_ENCODING_HDMI_FRL && +			timing->vic == 0 && timing->hdmi_vic == 0 && +			timing->frl_uncompressed_video_bandwidth_in_kbps != 0) +		kbps = timing->frl_uncompressed_video_bandwidth_in_kbps; +  	return kbps;  } diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c index 279020535af7..8f1a95b77830 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c @@ -110,6 +110,7 @@ bool dal_hw_factory_init(  	case DCN_VERSION_3_2:  	case DCN_VERSION_3_21:  	case DCN_VERSION_3_5: +	case DCN_VERSION_3_51:  		dal_hw_factory_dcn32_init(factory);  		return true;  	default: diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c index d6b0a1af7d3e..37166b2b3fee 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c @@ -111,6 +111,7 @@ bool dal_hw_translate_init(  	case DCN_VERSION_3_2:  	case DCN_VERSION_3_21:  	case DCN_VERSION_3_5: +	case DCN_VERSION_3_51:  		dal_hw_translate_dcn32_init(translate);  		return true;  	default: diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c index 25ffc052d53b..99e17c164ce7 100644 --- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c +++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c @@ -23,8 +23,6 @@   *   */ -#include <linux/slab.h> -  #include "dm_services.h"  #include "dm_helpers.h"  #include "include/hdcp_msg_types.h" diff --git a/drivers/gpu/drm/amd/display/dc/hwss/Makefile b/drivers/gpu/drm/amd/display/dc/hwss/Makefile index 254136f8e3f9..9e8e9de51a92 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/Makefile +++ b/drivers/gpu/drm/amd/display/dc/hwss/Makefile @@ -180,6 +180,14 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN35)  ############################################################################### +HWSS_DCN351 = dcn351_init.o + +AMD_DAL_HWSS_DCN351 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn351/,$(HWSS_DCN351)) + +AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN351) + +############################################################################### +  ###############################################################################  endif diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c index 01493c49bd7a..9d5df4c0da59 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c @@ -1291,6 +1291,46 @@ static enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id)  	}  } +static void populate_audio_dp_link_info( +	const struct pipe_ctx *pipe_ctx, +	struct audio_dp_link_info *dp_link_info) +{ +	const struct dc_stream_state *stream = pipe_ctx->stream; +	const struct dc_link *link = stream->link; +	struct fixed31_32 link_bw_kbps; + +	dp_link_info->encoding = link->dc->link_srv->dp_get_encoding_format( +				&pipe_ctx->link_config.dp_link_settings); +	dp_link_info->is_mst = (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST); +	dp_link_info->lane_count = pipe_ctx->link_config.dp_link_settings.lane_count; +	dp_link_info->link_rate = pipe_ctx->link_config.dp_link_settings.link_rate; + +	link_bw_kbps = dc_fixpt_from_int(dc_link_bandwidth_kbps(link, +			&pipe_ctx->link_config.dp_link_settings)); + +	/* For audio stream calculations, the video stream should not include FEC or SSC +	 * in order to get the most pessimistic values. +	 */ +	if (dp_link_info->encoding == DP_8b_10b_ENCODING && +			link->dc->link_srv->dp_is_fec_supported(link)) { +		link_bw_kbps = dc_fixpt_mul(link_bw_kbps, +				dc_fixpt_from_fraction(100, DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100)); +	} else if (dp_link_info->encoding == DP_128b_132b_ENCODING) { +		link_bw_kbps = dc_fixpt_mul(link_bw_kbps, +				dc_fixpt_from_fraction(10000, 9975)); /* 99.75% SSC overhead*/ +	} + +	dp_link_info->link_bandwidth_kbps = dc_fixpt_floor(link_bw_kbps); + +	/* HW minimum for 128b/132b HBlank is 4 frame symbols. +	 * TODO: Plumb the actual programmed HBlank min symbol width to here. +	 */ +	if (dp_link_info->encoding == DP_128b_132b_ENCODING) +		dp_link_info->hblank_min_symbol_width = 4; +	else +		dp_link_info->hblank_min_symbol_width = 0; +} +  static void build_audio_output(  	struct dc_state *state,  	const struct pipe_ctx *pipe_ctx, @@ -1338,6 +1378,15 @@ static void build_audio_output(  	audio_output->crtc_info.calculated_pixel_clock_100Hz =  			pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz; +	audio_output->crtc_info.pixel_encoding = +		stream->timing.pixel_encoding; + +	audio_output->crtc_info.dsc_bits_per_pixel = +			stream->timing.dsc_cfg.bits_per_pixel; + +	audio_output->crtc_info.dsc_num_slices = +			stream->timing.dsc_cfg.num_slices_h; +  /*for HDMI, audio ACR is with deep color ratio factor*/  	if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) &&  		audio_output->crtc_info.requested_pixel_clock_100Hz == @@ -1371,6 +1420,10 @@ static void build_audio_output(  	audio_output->pll_info.ss_percentage =  			pipe_ctx->pll_settings.ss_percentage; + +	if (dc_is_dp_signal(pipe_ctx->stream->signal)) { +		populate_audio_dp_link_info(pipe_ctx, &audio_output->dp_link_info); +	}  }  static void program_scaler(const struct dc *dc, @@ -1507,7 +1560,8 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(  				pipe_ctx->stream_res.audio,  				pipe_ctx->stream->signal,  				&audio_output.crtc_info, -				&pipe_ctx->stream->audio_info); +				&pipe_ctx->stream->audio_info, +				&audio_output.dp_link_info);  	}  	/* make sure no pipes syncd to the pipe being enabled */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c index 6dd479e8a348..314798400b16 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c @@ -283,33 +283,33 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)  	DTN_INFO("\n");  } -void dcn10_log_hw_state(struct dc *dc, -	struct dc_log_buffer_ctx *log_ctx) +static void dcn10_log_color_state(struct dc *dc, +				  struct dc_log_buffer_ctx *log_ctx)  {  	struct dc_context *dc_ctx = dc->ctx;  	struct resource_pool *pool = dc->res_pool;  	int i; -	DTN_INFO_BEGIN(); - -	dcn10_log_hubbub_state(dc, log_ctx); - -	dcn10_log_hubp_states(dc, log_ctx); - -	DTN_INFO("DPP:    IGAM format  IGAM mode    DGAM mode    RGAM mode" -			"  GAMUT mode  C11 C12   C13 C14   C21 C22   C23 C24   " -			"C31 C32   C33 C34\n"); +	DTN_INFO("DPP:    IGAM format    IGAM mode    DGAM mode    RGAM mode" +		 "  GAMUT adjust  " +		 "C11        C12        C13        C14        " +		 "C21        C22        C23        C24        " +		 "C31        C32        C33        C34        \n");  	for (i = 0; i < pool->pipe_count; i++) {  		struct dpp *dpp = pool->dpps[i];  		struct dcn_dpp_state s = {0};  		dpp->funcs->dpp_read_state(dpp, &s); +		dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);  		if (!s.is_enabled)  			continue; -		DTN_INFO("[%2d]:  %11xh  %-11s  %-11s  %-11s" -				"%8x    %08xh %08xh %08xh %08xh %08xh %08xh", +		DTN_INFO("[%2d]:  %11xh  %11s    %9s    %9s" +			 "  %12s  " +			 "%010lld %010lld %010lld %010lld " +			 "%010lld %010lld %010lld %010lld " +			 "%010lld %010lld %010lld %010lld",  				dpp->inst,  				s.igam_input_format,  				(s.igam_lut_mode == 0) ? "BypassFixed" : @@ -329,16 +329,42 @@ void dcn10_log_hw_state(struct dc *dc,  					((s.rgam_lut_mode == 3) ? "RAM" :  					((s.rgam_lut_mode == 4) ? "RAM" :  								 "Unknown")))), -				s.gamut_remap_mode, -				s.gamut_remap_c11_c12, -				s.gamut_remap_c13_c14, -				s.gamut_remap_c21_c22, -				s.gamut_remap_c23_c24, -				s.gamut_remap_c31_c32, -				s.gamut_remap_c33_c34); +				(s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" : +					((s.gamut_remap.gamut_adjust_type == 1) ? "HW" : +										  "SW"), +				s.gamut_remap.temperature_matrix[0].value, +				s.gamut_remap.temperature_matrix[1].value, +				s.gamut_remap.temperature_matrix[2].value, +				s.gamut_remap.temperature_matrix[3].value, +				s.gamut_remap.temperature_matrix[4].value, +				s.gamut_remap.temperature_matrix[5].value, +				s.gamut_remap.temperature_matrix[6].value, +				s.gamut_remap.temperature_matrix[7].value, +				s.gamut_remap.temperature_matrix[8].value, +				s.gamut_remap.temperature_matrix[9].value, +				s.gamut_remap.temperature_matrix[10].value, +				s.gamut_remap.temperature_matrix[11].value);  		DTN_INFO("\n");  	}  	DTN_INFO("\n"); +	DTN_INFO("DPP Color Caps: input_lut_shared:%d  icsc:%d" +		 "  dgam_ram:%d  dgam_rom: srgb:%d,bt2020:%d,gamma2_2:%d,pq:%d,hlg:%d" +		 "  post_csc:%d  gamcor:%d  dgam_rom_for_yuv:%d  3d_lut:%d" +		 "  blnd_lut:%d  oscs:%d\n\n", +		 dc->caps.color.dpp.input_lut_shared, +		 dc->caps.color.dpp.icsc, +		 dc->caps.color.dpp.dgam_ram, +		 dc->caps.color.dpp.dgam_rom_caps.srgb, +		 dc->caps.color.dpp.dgam_rom_caps.bt2020, +		 dc->caps.color.dpp.dgam_rom_caps.gamma2_2, +		 dc->caps.color.dpp.dgam_rom_caps.pq, +		 dc->caps.color.dpp.dgam_rom_caps.hlg, +		 dc->caps.color.dpp.post_csc, +		 dc->caps.color.dpp.gamma_corr, +		 dc->caps.color.dpp.dgam_rom_for_yuv, +		 dc->caps.color.dpp.hw_3d_lut, +		 dc->caps.color.dpp.ogam_ram, +		 dc->caps.color.dpp.ocsc);  	DTN_INFO("MPCC:  OPP  DPP  MPCCBOT  MODE  ALPHA_MODE  PREMULT  OVERLAP_ONLY  IDLE\n");  	for (i = 0; i < pool->pipe_count; i++) { @@ -352,6 +378,30 @@ void dcn10_log_hw_state(struct dc *dc,  				s.idle);  	}  	DTN_INFO("\n"); +	DTN_INFO("MPC Color Caps: gamut_remap:%d, 3dlut:%d, ogam_ram:%d, ocsc:%d\n\n", +		 dc->caps.color.mpc.gamut_remap, +		 dc->caps.color.mpc.num_3dluts, +		 dc->caps.color.mpc.ogam_ram, +		 dc->caps.color.mpc.ocsc); +} + +void dcn10_log_hw_state(struct dc *dc, +			struct dc_log_buffer_ctx *log_ctx) +{ +	struct dc_context *dc_ctx = dc->ctx; +	struct resource_pool *pool = dc->res_pool; +	int i; + +	DTN_INFO_BEGIN(); + +	dcn10_log_hubbub_state(dc, log_ctx); + +	dcn10_log_hubp_states(dc, log_ctx); + +	if (dc->hwss.log_color_state) +		dc->hwss.log_color_state(dc, log_ctx); +	else +		dcn10_log_color_state(dc, log_ctx);  	DTN_INFO("OTG:  v_bs  v_be  v_ss  v_se  vpol  vmax  vmin  vmax_sel  vmin_sel  h_bs  h_be  h_ss  h_se  hpol  htot  vtot  underflow blank_en\n"); @@ -1840,6 +1890,9 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,  {  	struct dpp *dpp = pipe_ctx->plane_res.dpp; +	if (!stream) +		return false; +  	if (dpp == NULL)  		return false; @@ -1862,8 +1915,8 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,  	} else  		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS); -	if (stream != NULL && stream->ctx != NULL && -			stream->out_transfer_func != NULL) { +	if (stream->ctx && +	    stream->out_transfer_func) {  		log_tf(stream->ctx,  				stream->out_transfer_func,  				dpp->regamma_params.hw_points_num); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index 931ac8ed7069..c55d5155ecb9 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -71,6 +71,112 @@  #define FN(reg_name, field_name) \  	hws->shifts->field_name, hws->masks->field_name +void dcn20_log_color_state(struct dc *dc, +			   struct dc_log_buffer_ctx *log_ctx) +{ +	struct dc_context *dc_ctx = dc->ctx; +	struct resource_pool *pool = dc->res_pool; +	int i; + +	DTN_INFO("DPP:  DGAM mode  SHAPER mode  3DLUT mode  3DLUT bit depth" +		 "  3DLUT size  RGAM mode  GAMUT adjust  " +		 "C11        C12        C13        C14        " +		 "C21        C22        C23        C24        " +		 "C31        C32        C33        C34        \n"); + +	for (i = 0; i < pool->pipe_count; i++) { +		struct dpp *dpp = pool->dpps[i]; +		struct dcn_dpp_state s = {0}; + +		dpp->funcs->dpp_read_state(dpp, &s); +		dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap); + +		if (!s.is_enabled) +			continue; + +		DTN_INFO("[%2d]:  %8s  %11s  %10s  %15s  %10s  %9s  %12s  " +			 "%010lld %010lld %010lld %010lld " +			 "%010lld %010lld %010lld %010lld " +			 "%010lld %010lld %010lld %010lld", +			dpp->inst, +			(s.dgam_lut_mode == 0) ? "Bypass" : +			 ((s.dgam_lut_mode == 1) ? "sRGB" : +			 ((s.dgam_lut_mode == 2) ? "Ycc" : +			 ((s.dgam_lut_mode == 3) ? "RAM" : +			 ((s.dgam_lut_mode == 4) ? "RAM" : +						   "Unknown")))), +			(s.shaper_lut_mode == 1) ? "RAM A" : +			 ((s.shaper_lut_mode == 2) ? "RAM B" : +						     "Bypass"), +			(s.lut3d_mode == 1) ? "RAM A" : +			 ((s.lut3d_mode == 2) ? "RAM B" : +						"Bypass"), +			(s.lut3d_bit_depth <= 0) ? "12-bit" : "10-bit", +			(s.lut3d_size == 0) ? "17x17x17" : "9x9x9", +			(s.rgam_lut_mode == 1) ? "RAM A" : +			 ((s.rgam_lut_mode == 1) ? "RAM B" : "Bypass"), +			(s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" : +			 ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" : +								   "SW"), +			s.gamut_remap.temperature_matrix[0].value, +			s.gamut_remap.temperature_matrix[1].value, +			s.gamut_remap.temperature_matrix[2].value, +			s.gamut_remap.temperature_matrix[3].value, +			s.gamut_remap.temperature_matrix[4].value, +			s.gamut_remap.temperature_matrix[5].value, +			s.gamut_remap.temperature_matrix[6].value, +			s.gamut_remap.temperature_matrix[7].value, +			s.gamut_remap.temperature_matrix[8].value, +			s.gamut_remap.temperature_matrix[9].value, +			s.gamut_remap.temperature_matrix[10].value, +			s.gamut_remap.temperature_matrix[11].value); +		DTN_INFO("\n"); +	} +	DTN_INFO("\n"); +	DTN_INFO("DPP Color Caps: input_lut_shared:%d  icsc:%d" +		 "  dgam_ram:%d  dgam_rom: srgb:%d,bt2020:%d,gamma2_2:%d,pq:%d,hlg:%d" +		 "  post_csc:%d  gamcor:%d  dgam_rom_for_yuv:%d  3d_lut:%d" +		 "  blnd_lut:%d  oscs:%d\n\n", +		 dc->caps.color.dpp.input_lut_shared, +		 dc->caps.color.dpp.icsc, +		 dc->caps.color.dpp.dgam_ram, +		 dc->caps.color.dpp.dgam_rom_caps.srgb, +		 dc->caps.color.dpp.dgam_rom_caps.bt2020, +		 dc->caps.color.dpp.dgam_rom_caps.gamma2_2, +		 dc->caps.color.dpp.dgam_rom_caps.pq, +		 dc->caps.color.dpp.dgam_rom_caps.hlg, +		 dc->caps.color.dpp.post_csc, +		 dc->caps.color.dpp.gamma_corr, +		 dc->caps.color.dpp.dgam_rom_for_yuv, +		 dc->caps.color.dpp.hw_3d_lut, +		 dc->caps.color.dpp.ogam_ram, +		 dc->caps.color.dpp.ocsc); + +	DTN_INFO("MPCC:  OPP  DPP  MPCCBOT  MODE  ALPHA_MODE  PREMULT  OVERLAP_ONLY  IDLE" +		 "  OGAM mode\n"); + +	for (i = 0; i < pool->pipe_count; i++) { +		struct mpcc_state s = {0}; + +		pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s); +		if (s.opp_id != 0xf) +			DTN_INFO("[%2d]:  %2xh  %2xh  %6xh  %4d  %10d  %7d  %12d  %4d  %9s\n", +				i, s.opp_id, s.dpp_id, s.bot_mpcc_id, +				s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only, +				s.idle, +				(s.rgam_mode == 1) ? "RAM A" : +				 ((s.rgam_mode == 2) ? "RAM B" : +						       "Bypass")); +	} +	DTN_INFO("\n"); +	DTN_INFO("MPC Color Caps: gamut_remap:%d, 3dlut:%d, ogam_ram:%d, ocsc:%d\n\n", +		 dc->caps.color.mpc.gamut_remap, +		 dc->caps.color.mpc.num_3dluts, +		 dc->caps.color.mpc.ogam_ram, +		 dc->caps.color.mpc.ocsc); +} + +  static int find_free_gsl_group(const struct dc *dc)  {  	if (dc->res_pool->gsl_groups.gsl_0 == 0) @@ -1633,6 +1739,7 @@ static void dcn20_update_dchubp_dpp(  	if (pipe_ctx->update_flags.bits.scaler ||  			plane_state->update_flags.bits.scaling_change ||  			plane_state->update_flags.bits.position_change || +			plane_state->update_flags.bits.clip_size_change ||  			plane_state->update_flags.bits.per_pixel_alpha_change ||  			pipe_ctx->stream->update_flags.bits.scaling) {  		pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha; @@ -1645,6 +1752,7 @@ static void dcn20_update_dchubp_dpp(  	if (pipe_ctx->update_flags.bits.viewport ||  			(context == dc->current_state && plane_state->update_flags.bits.position_change) ||  			(context == dc->current_state && plane_state->update_flags.bits.scaling_change) || +			(context == dc->current_state && plane_state->update_flags.bits.clip_size_change) ||  			(context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) {  		hubp->funcs->mem_program_viewport( @@ -1958,7 +2066,6 @@ void dcn20_program_front_end_for_ctx(  				&& context->res_ctx.pipe_ctx[i].stream)  			hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true); -  	/* Disconnect mpcc */  	for (i = 0; i < dc->res_pool->pipe_count; i++)  		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h index d950b3e54ec2..5c874f7b0683 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h @@ -28,6 +28,8 @@  #include "hw_sequencer_private.h" +void dcn20_log_color_state(struct dc *dc, +			   struct dc_log_buffer_ctx *log_ctx);  bool dcn20_set_blend_lut(  	struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state);  bool dcn20_set_shaper_3dlut( diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c index c34c13e1e0a4..7e6b7f2a6dc9 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c @@ -69,6 +69,155 @@  #define FN(reg_name, field_name) \  	hws->shifts->field_name, hws->masks->field_name +void dcn30_log_color_state(struct dc *dc, +			   struct dc_log_buffer_ctx *log_ctx) +{ +	struct dc_context *dc_ctx = dc->ctx; +	struct resource_pool *pool = dc->res_pool; +	int i; + +	DTN_INFO("DPP:  DGAM ROM  DGAM ROM type  DGAM LUT  SHAPER mode" +		 "  3DLUT mode  3DLUT bit depth  3DLUT size  RGAM mode" +		 "  GAMUT adjust  " +		 "C11        C12        C13        C14        " +		 "C21        C22        C23        C24        " +		 "C31        C32        C33        C34        \n"); + +	for (i = 0; i < pool->pipe_count; i++) { +		struct dpp *dpp = pool->dpps[i]; +		struct dcn_dpp_state s = {0}; + +		dpp->funcs->dpp_read_state(dpp, &s); +		dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap); + +		if (!s.is_enabled) +			continue; + +		DTN_INFO("[%2d]:  %7x  %13s  %8s  %11s  %10s  %15s  %10s  %9s" +			 "  %12s  " +			 "%010lld %010lld %010lld %010lld " +			 "%010lld %010lld %010lld %010lld " +			 "%010lld %010lld %010lld %010lld", +			dpp->inst, +			s.pre_dgam_mode, +			(s.pre_dgam_select == 0) ? "sRGB" : +			 ((s.pre_dgam_select == 1) ? "Gamma 2.2" : +			 ((s.pre_dgam_select == 2) ? "Gamma 2.4" : +			 ((s.pre_dgam_select == 3) ? "Gamma 2.6" : +			 ((s.pre_dgam_select == 4) ? "BT.709" : +			 ((s.pre_dgam_select == 5) ? "PQ" : +			 ((s.pre_dgam_select == 6) ? "HLG" : +						     "Unknown")))))), +			(s.gamcor_mode == 0) ? "Bypass" : +			 ((s.gamcor_mode == 1) ? "RAM A" : +						 "RAM B"), +			(s.shaper_lut_mode == 1) ? "RAM A" : +			 ((s.shaper_lut_mode == 2) ? "RAM B" : +						     "Bypass"), +			(s.lut3d_mode == 1) ? "RAM A" : +			 ((s.lut3d_mode == 2) ? "RAM B" : +						"Bypass"), +			(s.lut3d_bit_depth <= 0) ? "12-bit" : "10-bit", +			(s.lut3d_size == 0) ? "17x17x17" : "9x9x9", +			(s.rgam_lut_mode == 0) ? "Bypass" : +			 ((s.rgam_lut_mode == 1) ? "RAM A" : +						   "RAM B"), +			(s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" : +				((s.gamut_remap.gamut_adjust_type == 1) ? "HW" : +									  "SW"), +			s.gamut_remap.temperature_matrix[0].value, +			s.gamut_remap.temperature_matrix[1].value, +			s.gamut_remap.temperature_matrix[2].value, +			s.gamut_remap.temperature_matrix[3].value, +			s.gamut_remap.temperature_matrix[4].value, +			s.gamut_remap.temperature_matrix[5].value, +			s.gamut_remap.temperature_matrix[6].value, +			s.gamut_remap.temperature_matrix[7].value, +			s.gamut_remap.temperature_matrix[8].value, +			s.gamut_remap.temperature_matrix[9].value, +			s.gamut_remap.temperature_matrix[10].value, +			s.gamut_remap.temperature_matrix[11].value); +		DTN_INFO("\n"); +	} +	DTN_INFO("\n"); +	DTN_INFO("DPP Color Caps: input_lut_shared:%d  icsc:%d" +		 "  dgam_ram:%d  dgam_rom: srgb:%d,bt2020:%d,gamma2_2:%d,pq:%d,hlg:%d" +		 "  post_csc:%d  gamcor:%d  dgam_rom_for_yuv:%d  3d_lut:%d" +		 "  blnd_lut:%d  oscs:%d\n\n", +		 dc->caps.color.dpp.input_lut_shared, +		 dc->caps.color.dpp.icsc, +		 dc->caps.color.dpp.dgam_ram, +		 dc->caps.color.dpp.dgam_rom_caps.srgb, +		 dc->caps.color.dpp.dgam_rom_caps.bt2020, +		 dc->caps.color.dpp.dgam_rom_caps.gamma2_2, +		 dc->caps.color.dpp.dgam_rom_caps.pq, +		 dc->caps.color.dpp.dgam_rom_caps.hlg, +		 dc->caps.color.dpp.post_csc, +		 dc->caps.color.dpp.gamma_corr, +		 dc->caps.color.dpp.dgam_rom_for_yuv, +		 dc->caps.color.dpp.hw_3d_lut, +		 dc->caps.color.dpp.ogam_ram, +		 dc->caps.color.dpp.ocsc); + +	DTN_INFO("MPCC:  OPP  DPP  MPCCBOT  MODE  ALPHA_MODE  PREMULT  OVERLAP_ONLY  IDLE" +		 "  SHAPER mode  3DLUT mode  3DLUT bit-depth  3DLUT size  OGAM mode  OGAM LUT" +		 "  GAMUT adjust  " +		 "C11        C12        C13        C14        " +		 "C21        C22        C23        C24        " +		 "C31        C32        C33        C34        \n"); + +	for (i = 0; i < pool->pipe_count; i++) { +		struct mpcc_state s = {0}; + +		pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s); +		mpc3_get_gamut_remap(pool->mpc, i,  &s.gamut_remap); + +		if (s.opp_id != 0xf) +			DTN_INFO("[%2d]:  %2xh  %2xh  %6xh  %4d  %10d  %7d  %12d  %4d  %11s %11s %16s %11s %10s %9s" +				 "  %-12s  " +				 "%010lld %010lld %010lld %010lld " +				 "%010lld %010lld %010lld %010lld " +				 "%010lld %010lld %010lld %010lld\n", +				i, s.opp_id, s.dpp_id, s.bot_mpcc_id, +				s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only, +				s.idle, +				(s.shaper_lut_mode == 1) ? "RAM A" : +				 ((s.shaper_lut_mode == 2) ? "RAM B" : +							     "Bypass"), +				(s.lut3d_mode == 1) ? "RAM A" : +				 ((s.lut3d_mode == 2) ? "RAM B" : +							"Bypass"), +				(s.lut3d_bit_depth <= 0) ? "12-bit" : "10-bit", +				(s.lut3d_size == 0) ? "17x17x17" : "9x9x9", +				(s.rgam_mode == 0) ? "Bypass" : +				 ((s.rgam_mode == 2) ? "RAM" : +						       "Unknown"), +				(s.rgam_mode == 1) ? "B" : "A", +				(s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" : +					((s.gamut_remap.gamut_adjust_type == 1) ? "HW" : +										  "SW"), +				s.gamut_remap.temperature_matrix[0].value, +				s.gamut_remap.temperature_matrix[1].value, +				s.gamut_remap.temperature_matrix[2].value, +				s.gamut_remap.temperature_matrix[3].value, +				s.gamut_remap.temperature_matrix[4].value, +				s.gamut_remap.temperature_matrix[5].value, +				s.gamut_remap.temperature_matrix[6].value, +				s.gamut_remap.temperature_matrix[7].value, +				s.gamut_remap.temperature_matrix[8].value, +				s.gamut_remap.temperature_matrix[9].value, +				s.gamut_remap.temperature_matrix[10].value, +				s.gamut_remap.temperature_matrix[11].value); + +	} +	DTN_INFO("\n"); +	DTN_INFO("MPC Color Caps: gamut_remap:%d, 3dlut:%d, ogam_ram:%d, ocsc:%d\n\n", +		 dc->caps.color.mpc.gamut_remap, +		 dc->caps.color.mpc.num_3dluts, +		 dc->caps.color.mpc.ogam_ram, +		 dc->caps.color.mpc.ocsc); +} +  bool dcn30_set_blend_lut(  	struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state)  { @@ -1015,21 +1164,3 @@ void dcn30_prepare_bandwidth(struct dc *dc,  	if (!dc->clk_mgr->clks.fw_based_mclk_switching)  		dc_dmub_srv_p_state_delegate(dc, false, context);  } - -void dcn30_set_static_screen_control(struct pipe_ctx **pipe_ctx, -		int num_pipes, const struct dc_static_screen_params *params) -{ -	unsigned int i; -	unsigned int triggers = 0; - -	if (params->triggers.surface_update) -		triggers |= 0x100; -	if (params->triggers.cursor_update) -		triggers |= 0x8; -	if (params->triggers.force_trigger) -		triggers |= 0x1; - -	for (i = 0; i < num_pipes; i++) -		pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(pipe_ctx[i]->stream_res.tg, -					triggers, params->num_frames); -} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h index e557e2b98618..638f018a3cb5 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h @@ -52,6 +52,9 @@ bool dcn30_mmhubbub_warmup(  	unsigned int num_dwb,  	struct dc_writeback_info *wb_info); +void dcn30_log_color_state(struct dc *dc, +			   struct dc_log_buffer_ctx *log_ctx); +  bool dcn30_set_blend_lut(struct pipe_ctx *pipe_ctx,  		const struct dc_plane_state *plane_state); @@ -90,7 +93,4 @@ void dcn30_set_hubp_blank(const struct dc *dc,  void dcn30_prepare_bandwidth(struct dc *dc,  	struct dc_state *context); -void dcn30_set_static_screen_control(struct pipe_ctx **pipe_ctx, -		int num_pipes, const struct dc_static_screen_params *params); -  #endif /* __DC_HWSS_DCN30_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c index 9894caedffed..ef913445a795 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c @@ -64,7 +64,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = {  	.update_bandwidth = dcn20_update_bandwidth,  	.set_drr = dcn10_set_drr,  	.get_position = dcn10_get_position, -	.set_static_screen_control = dcn30_set_static_screen_control, +	.set_static_screen_control = dcn10_set_static_screen_control,  	.setup_stereo = dcn10_setup_stereo,  	.set_avmute = dcn30_set_avmute,  	.log_hw_state = dcn10_log_hw_state, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c index 7423880fabb6..a760f0c6fe98 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c @@ -98,10 +98,8 @@ static void enable_memory_low_power(struct dc *dc)  		for (i = 0; i < dc->res_pool->stream_enc_count; i++)  			if (dc->res_pool->stream_enc[i]->vpg)  				dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg); -#if defined(CONFIG_DRM_AMD_DC_FP)  		for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++)  			dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg); -#endif  	}  } @@ -617,3 +615,21 @@ void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable)  	if (hws->ctx->dc->debug.hpo_optimization)  		REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, !!enable);  } + +void dcn31_set_static_screen_control(struct pipe_ctx **pipe_ctx, +		int num_pipes, const struct dc_static_screen_params *params) +{ +	unsigned int i; +	unsigned int triggers = 0; + +	if (params->triggers.surface_update) +		triggers |= 0x100; +	if (params->triggers.cursor_update) +		triggers |= 0x8; +	if (params->triggers.force_trigger) +		triggers |= 0x1; + +	for (i = 0; i < num_pipes; i++) +		pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(pipe_ctx[i]->stream_res.tg, +					triggers, params->num_frames); +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h index edfc01d6ad73..b8bc939da155 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h @@ -56,4 +56,8 @@ bool dcn31_is_abm_supported(struct dc *dc,  void dcn31_init_pipes(struct dc *dc, struct dc_state *context);  void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable); +void dcn31_set_static_screen_control(struct pipe_ctx **pipe_ctx, +		int num_pipes, const struct dc_static_screen_params *params); + +  #endif /* __DC_HWSS_DCN31_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c index 669f524bd064..c06cc2c5da92 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c @@ -67,7 +67,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {  	.update_bandwidth = dcn20_update_bandwidth,  	.set_drr = dcn10_set_drr,  	.get_position = dcn10_get_position, -	.set_static_screen_control = dcn30_set_static_screen_control, +	.set_static_screen_control = dcn31_set_static_screen_control,  	.setup_stereo = dcn10_setup_stereo,  	.set_avmute = dcn30_set_avmute,  	.log_hw_state = dcn10_log_hw_state, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c index ccb7e317e86a..542ce3b7f9e4 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c @@ -69,7 +69,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = {  	.update_bandwidth = dcn20_update_bandwidth,  	.set_drr = dcn10_set_drr,  	.get_position = dcn10_get_position, -	.set_static_screen_control = dcn30_set_static_screen_control, +	.set_static_screen_control = dcn31_set_static_screen_control,  	.setup_stereo = dcn10_setup_stereo,  	.set_avmute = dcn30_set_avmute,  	.log_hw_state = dcn10_log_hw_state, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c index e8ac94a005b8..2b073123d3ed 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c @@ -65,7 +65,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {  	.update_bandwidth = dcn20_update_bandwidth,  	.set_drr = dcn10_set_drr,  	.get_position = dcn10_get_position, -	.set_static_screen_control = dcn30_set_static_screen_control, +	.set_static_screen_control = dcn31_set_static_screen_control,  	.setup_stereo = dcn10_setup_stereo,  	.set_avmute = dcn30_set_avmute,  	.log_hw_state = dcn10_log_hw_state, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index 8b6c49622f3b..4b92df23ff0d 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -1342,8 +1342,8 @@ void dcn35_set_drr(struct pipe_ctx **pipe_ctx,  {  	int i = 0;  	struct drr_params params = {0}; -	// DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow -	unsigned int event_triggers = 0x800; +	// DRR set trigger event mapped to OTG_TRIG_A +	unsigned int event_triggers = 0x2;//Bit[1]: OTG_TRIG_A  	// Note DRR trigger events are generated regardless of whether num frames met.  	unsigned int num_frames = 2; @@ -1377,3 +1377,20 @@ void dcn35_set_drr(struct pipe_ctx **pipe_ctx,  		}  	}  } +void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx, +		int num_pipes, const struct dc_static_screen_params *params) +{ +	unsigned int i; +	unsigned int triggers = 0; + +	if (params->triggers.surface_update) +		triggers |= 0x200;/*bit 9  : 10 0000 0000*/ +	if (params->triggers.cursor_update) +		triggers |= 0x8;/*bit3*/ +	if (params->triggers.force_trigger) +		triggers |= 0x1; +	for (i = 0; i < num_pipes; i++) +		pipe_ctx[i]->stream_res.tg->funcs-> +			set_static_screen_control(pipe_ctx[i]->stream_res.tg, +					triggers, params->num_frames); +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h index fd66316e33de..c354efa6c1b2 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h @@ -90,4 +90,7 @@ uint32_t dcn35_get_idle_state(const struct dc *dc);  void dcn35_set_drr(struct pipe_ctx **pipe_ctx,  		int num_pipes, struct dc_crtc_timing_adjust adjust); +void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx, +		int num_pipes, const struct dc_static_screen_params *params); +  #endif /* __DC_HWSS_DCN35_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c index a630aa77dcec..a93073055e7b 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c @@ -70,7 +70,7 @@ static const struct hw_sequencer_funcs dcn35_funcs = {  	.update_bandwidth = dcn20_update_bandwidth,  	.set_drr = dcn35_set_drr,  	.get_position = dcn10_get_position, -	.set_static_screen_control = dcn30_set_static_screen_control, +	.set_static_screen_control = dcn35_set_static_screen_control,  	.setup_stereo = dcn10_setup_stereo,  	.set_avmute = dcn30_set_avmute,  	.log_hw_state = dcn10_log_hw_state, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/CMakeLists.txt deleted file mode 100644 index 951ca2da4486..000000000000 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/CMakeLists.txt +++ /dev/null @@ -1,4 +0,0 @@ -dal3_subdirectory_sources( -  dcn351_init.c -  dcn351_init.h -) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c index 143d3fc0221c..ab17fa1c64e8 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c @@ -69,7 +69,7 @@ static const struct hw_sequencer_funcs dcn351_funcs = {  	.update_bandwidth = dcn20_update_bandwidth,  	.set_drr = dcn10_set_drr,  	.get_position = dcn10_get_position, -	.set_static_screen_control = dcn30_set_static_screen_control, +	.set_static_screen_control = dcn35_set_static_screen_control,  	.setup_stereo = dcn10_setup_stereo,  	.set_avmute = dcn30_set_avmute,  	.log_hw_state = dcn10_log_hw_state, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h index 64ca7c66509b..f89f205e42a1 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h @@ -339,6 +339,8 @@ struct hw_sequencer_funcs {  	/* HW State Logging Related */  	void (*log_hw_state)(struct dc *dc, struct dc_log_buffer_ctx *log_ctx); +	void (*log_color_state)(struct dc *dc, +				struct dc_log_buffer_ctx *log_ctx);  	void (*get_hw_state)(struct dc *dc, char *pBuf,  			unsigned int bufSize, unsigned int mask);  	void (*clear_status_bits)(struct dc *dc, unsigned int mask); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h index b3c62a82cb1c..554cfab5ab24 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h @@ -155,7 +155,6 @@ struct hwseq_private_funcs {  	void (*setup_hpo_hw_control)(const struct dce_hwseq *hws, bool enable);  	void (*enable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx,  			       struct dc_state *context); -#ifdef CONFIG_DRM_AMD_DC_FP  	void (*program_mall_pipe_config)(struct dc *dc, struct dc_state *context);  	void (*update_force_pstate)(struct dc *dc, struct dc_state *context);  	void (*update_mall_sel)(struct dc *dc, struct dc_state *context); @@ -170,7 +169,6 @@ struct hwseq_private_funcs {  			struct dc_state *context,  			struct dc *dc);  	bool (*is_dp_dig_pixel_rate_div_policy)(struct pipe_ctx *pipe_ctx); -#endif  	void (*reset_back_end_for_pipe)(struct dc *dc,  			struct pipe_ctx *pipe_ctx,  			struct dc_state *context); diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 3a6bf77a6873..b1b72e688f74 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -522,6 +522,25 @@ struct dc_dmub_cmd {  	enum dm_dmub_wait_type wait_type;  }; +struct dc_scratch_space { +	/* used to temporarily backup plane states of a stream during +	 * dc update. The reason is that plane states are overwritten +	 * with surface updates in dc update. Once they are overwritten +	 * current state is no longer valid. We want to temporarily +	 * store current value in plane states so we can still recover +	 * a valid current state during dc update. +	 */ +	struct dc_plane_state plane_states[MAX_SURFACE_NUM]; +	struct dc_gamma gamma_correction[MAX_SURFACE_NUM]; +	struct dc_transfer_func in_transfer_func[MAX_SURFACE_NUM]; +	struct dc_3dlut lut3d_func[MAX_SURFACE_NUM]; +	struct dc_transfer_func in_shaper_func[MAX_SURFACE_NUM]; +	struct dc_transfer_func blend_tf[MAX_SURFACE_NUM]; + +	struct dc_stream_state stream_state; +	struct dc_transfer_func out_transfer_func; +}; +  /**   * struct dc_state - The full description of a state requested by users   */ @@ -604,16 +623,8 @@ struct dc_state {  		unsigned int stutter_period_us;  	} perf_params; -	struct { -		/* used to temporarily backup plane states of a stream during -		 * dc update. The reason is that plane states are overwritten -		 * with surface updates in dc update. Once they are overwritten -		 * current state is no longer valid. We want to temporarily -		 * store current value in plane states so we can still recover -		 * a valid current state during dc update. -		 */ -		struct dc_plane_state plane_states[MAX_SURFACE_NUM]; -	} scratch; + +	struct dc_scratch_space scratch;  };  struct replay_context { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h b/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h index 6ed1fb8c9300..b6203253111c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h @@ -43,7 +43,8 @@ struct audio_funcs {  	void (*az_configure)(struct audio *audio,  		enum signal_type signal,  		const struct audio_crtc_info *crtc_info, -		const struct audio_info *audio_info); +		const struct audio_info *audio_info, +		const struct audio_dp_link_info *dp_link_info);  	void (*wall_dto_setup)(struct audio *audio,  		enum signal_type signal, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h index 6f4c97543c14..f4d4a68c91dc 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h @@ -356,6 +356,7 @@ struct clk_mgr_internal {  	long long wm_range_table_addr;  	bool dpm_present; +	bool pme_trigger_pending;  };  struct clk_mgr_internal_funcs { @@ -393,6 +394,11 @@ static inline int khz_to_mhz_ceil(int khz)  	return (khz + 999) / 1000;  } +static inline int khz_to_mhz_floor(int khz) +{ +	return khz / 1000; +} +  int clk_mgr_helper_get_active_display_cnt(  		struct dc *dc,  		struct dc_state *context); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index 901891316dfb..2ae7484d18af 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -26,6 +26,12 @@  #ifndef __DAL_DCHUBBUB_H__  #define __DAL_DCHUBBUB_H__ +/** + * DOC: overview + * + * There is only one common DCHUBBUB. It contains the common request and return + * blocks for the Data Fabric Interface that are not clock/power gated. + */  enum dcc_control {  	dcc_control__256_256_xxx, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h index f4aa76e02518..0f24afbf4388 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h @@ -27,6 +27,31 @@  #ifndef __DAL_DPP_H__  #define __DAL_DPP_H__ +/** + * DOC: overview + * + * The DPP (Display Pipe and Plane) block is the unified display data + * processing engine in DCN for processing graphic or video data on per DPP + * rectangle base. This rectangle can be a part of SLS (Single Large Surface), + * or a layer to be blended with other DPP, or a rectangle associated with a + * display tile. + * + * It provides various functions including: + * - graphic color keyer + * - graphic cursor compositing + * - graphic or video image source to destination scaling + * - image sharping + * - video format conversion from 4:2:0 or 4:2:2 to 4:4:4 + * - Color Space Conversion + * - Host LUT gamma adjustment + * - Color Gamut Remap + * - brightness and contrast adjustment. + * + * DPP pipe consists of Converter and Cursor (CNVC), Scaler (DSCL), Color + * Management (CM), Output Buffer (OBUF) and Digital Bypass (DPB) module + * connected in a video/graphics pipeline. + */ +  #include "transform.h"  #include "cursor_reg_cache.h" @@ -141,6 +166,7 @@ struct dcn_dpp_state {  	uint32_t igam_input_format;  	uint32_t dgam_lut_mode;  	uint32_t rgam_lut_mode; +	// gamut_remap data for dcn10_get_cm_states()  	uint32_t gamut_remap_mode;  	uint32_t gamut_remap_c11_c12;  	uint32_t gamut_remap_c13_c14; @@ -148,6 +174,16 @@ struct dcn_dpp_state {  	uint32_t gamut_remap_c23_c24;  	uint32_t gamut_remap_c31_c32;  	uint32_t gamut_remap_c33_c34; +	// gamut_remap data for dcn*_log_color_state() +	struct dpp_grph_csc_adjustment gamut_remap; +	uint32_t shaper_lut_mode; +	uint32_t lut3d_mode; +	uint32_t lut3d_bit_depth; +	uint32_t lut3d_size; +	uint32_t blnd_lut_mode; +	uint32_t pre_dgam_mode; +	uint32_t pre_dgam_select; +	uint32_t gamcor_mode;  };  struct CM_bias_params { @@ -290,6 +326,9 @@ struct dpp_funcs {  	void (*dpp_cnv_set_alpha_keyer)(  			struct dpp *dpp_base,  			struct cnv_color_keyer_params *color_keyer); + +	void (*dpp_get_gamut_remap)(struct dpp *dpp_base, +				    struct dpp_grph_csc_adjustment *adjust);  }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h index 7f3f9b69e903..72610cd7eae0 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h @@ -26,13 +26,24 @@  #ifndef __DAL_HUBP_H__  #define __DAL_HUBP_H__ +/** + * DOC: overview + * + * Display Controller Hub (DCHUB) is the gateway between the Scalable Data Port + * (SDP) and DCN. This component has multiple features, such as memory + * arbitration, rotation, and cursor manipulation. + * + * There is one HUBP allocated per pipe, which fetches data and converts + * different pixel formats (i.e. ARGB8888, NV12, etc) into linear, interleaved + * and fixed-depth streams of pixel data. + */ +  #include "mem_input.h"  #include "cursor_reg_cache.h"  #define OPP_ID_INVALID 0xf  #define MAX_TTU 0xffffff -  enum cursor_pitch {  	CURSOR_PITCH_64_PIXELS = 0,  	CURSOR_PITCH_128_PIXELS, @@ -146,9 +157,7 @@ struct hubp_funcs {  	void (*set_blank)(struct hubp *hubp, bool blank);  	void (*set_blank_regs)(struct hubp *hubp, bool blank); -#ifdef CONFIG_DRM_AMD_DC_FP  	void (*phantom_hubp_post_enable)(struct hubp *hubp); -#endif  	void (*set_hubp_blank_en)(struct hubp *hubp, bool blank);  	void (*set_cursor_attributes)( diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h index 61a2406dcc53..34a398f23fc6 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h @@ -23,13 +23,28 @@   */  /** - * DOC: mpc-overview + * DOC: overview   * - * Multiple Pipe/Plane Combined (MPC) is a component in the hardware pipeline + * Multiple Pipe/Plane Combiner (MPC) is a component in the hardware pipeline   * that performs blending of multiple planes, using global and per-pixel alpha.   * It also performs post-blending color correction operations according to the   * hardware capabilities, such as color transformation matrix and gamma 1D and   * 3D LUT. + * + * MPC receives output from all DPP pipes and combines them to multiple outputs + * supporting "M MPC inputs -> N MPC outputs" flexible composition + * architecture. It features: + * + * - Programmable blending structure to allow software controlled blending and + *   cascading; + * - Programmable window location of each DPP in active region of display; + * - Combining multiple DPP pipes in one active region when a single DPP pipe + *   cannot process very large surface; + * - Combining multiple DPP from different SLS with blending; + * - Stereo formats from single DPP in top-bottom or side-by-side modes; + * - Stereo formats from 2 DPPs; + * - Alpha blending of multiple layers from different DPP pipes; + * - Programmable background color;   */  #ifndef __DC_MPCC_H__ @@ -83,34 +98,65 @@ enum mpcc_alpha_blend_mode {  /**   * struct mpcc_blnd_cfg - MPCC blending configuration - * - * @black_color: background color - * @alpha_mode: alpha blend mode (MPCC_ALPHA_BLND_MODE) - * @pre_multiplied_alpha: whether pixel color values were pre-multiplied by the - * alpha channel (MPCC_ALPHA_MULTIPLIED_MODE) - * @global_gain: used when blend mode considers both pixel alpha and plane - * alpha value and assumes the global alpha value. - * @global_alpha: plane alpha value - * @overlap_only: whether overlapping of different planes is allowed - * @bottom_gain_mode: blend mode for bottom gain setting - * @background_color_bpc: background color for bpc - * @top_gain: top gain setting - * @bottom_inside_gain: blend mode for bottom inside - * @bottom_outside_gain:  blend mode for bottom outside   */  struct mpcc_blnd_cfg { -	struct tg_color black_color;	/* background color */ -	enum mpcc_alpha_blend_mode alpha_mode;	/* alpha blend mode */ -	bool pre_multiplied_alpha;	/* alpha pre-multiplied mode flag */ +	/** +	 * @black_color: background color. +	 */ +	struct tg_color black_color; + +	/** +	 * @alpha_mode: alpha blend mode (MPCC_ALPHA_BLND_MODE). +	 */ +	enum mpcc_alpha_blend_mode alpha_mode; + +	/** +	 * @pre_multiplied_alpha: +	 * Whether pixel color values were pre-multiplied by the alpha channel +	 * (MPCC_ALPHA_MULTIPLIED_MODE). +	 */ +	bool pre_multiplied_alpha; + +	/** +	 * @global_gain: Used when blend mode considers both pixel alpha and plane. +	 */  	int global_gain; + +	/** +	 * @global_alpha: Plane alpha value. +	 */  	int global_alpha; + +	/** +	 * @overlap_only: Whether overlapping of different planes is allowed. +	 */  	bool overlap_only;  	/* MPCC top/bottom gain settings */ + +	/** +	 * @bottom_gain_mode: Blend mode for bottom gain setting. +	 */  	int bottom_gain_mode; + +	/** +	 * @background_color_bpc: Background color for bpc. +	 */  	int background_color_bpc; + +	/** +	 * @top_gain: Top gain setting. +	 */  	int top_gain; + +	/** +	 * @bottom_inside_gain: Blend mode for bottom inside. +	 */  	int bottom_inside_gain; + +	/** +	 * @bottom_outside_gain: Blend mode for bottom outside. +	 */  	int bottom_outside_gain;  }; @@ -150,34 +196,58 @@ struct mpc_dwb_flow_control {  /**   * struct mpcc - MPCC connection and blending configuration for a single MPCC instance. - * @mpcc_id: MPCC physical instance - * @dpp_id: DPP input to this MPCC - * @mpcc_bot: pointer to bottom layer MPCC. NULL when not connected. - * @blnd_cfg: the blending configuration for this MPCC - * @sm_cfg: stereo mix setting for this MPCC - * @shared_bottom: if MPCC output to both OPP and DWB endpoints, true. Otherwise, false.   *   * This struct is used as a node in an MPC tree.   */  struct mpcc { -	int mpcc_id;			/* MPCC physical instance */ -	int dpp_id;			/* DPP input to this MPCC */ -	struct mpcc *mpcc_bot;		/* pointer to bottom layer MPCC.  NULL when not connected */ -	struct mpcc_blnd_cfg blnd_cfg;	/* The blending configuration for this MPCC */ -	struct mpcc_sm_cfg sm_cfg;	/* stereo mix setting for this MPCC */ -	bool shared_bottom;		/* TRUE if MPCC output to both OPP and DWB endpoints, else FALSE */ +	/** +	 * @mpcc_id: MPCC physical instance. +	 */ +	int mpcc_id; + +	/** +	 * @dpp_id: DPP input to this MPCC +	 */ +	int dpp_id; + +	/** +	 * @mpcc_bot: Pointer to bottom layer MPCC. NULL when not connected. +	 */ +	struct mpcc *mpcc_bot; + +	/** +	 * @blnd_cfg: The blending configuration for this MPCC. +	 */ +	struct mpcc_blnd_cfg blnd_cfg; + +	/** +	 * @sm_cfg: stereo mix setting for this MPCC +	 */ +	struct mpcc_sm_cfg sm_cfg; + +	/** +	 * @shared_bottom: +	 * +	 * If MPCC output to both OPP and DWB endpoints, true. Otherwise, false. +	 */ +	bool shared_bottom;  };  /**   * struct mpc_tree - MPC tree represents all MPCC connections for a pipe.   * - * @opp_id: the OPP instance that owns this MPC tree - * @opp_list: the top MPCC layer of the MPC tree that outputs to OPP endpoint   *   */  struct mpc_tree { -	int opp_id;			/* The OPP instance that owns this MPC tree */ -	struct mpcc *opp_list;		/* The top MPCC layer of the MPC tree that outputs to OPP endpoint */ +	/** +	 * @opp_id: The OPP instance that owns this MPC tree. +	 */ +	int opp_id; + +	/** +	 * @opp_list: the top MPCC layer of the MPC tree that outputs to OPP endpoint +	 */ +	struct mpcc *opp_list;  };  struct mpc { @@ -199,6 +269,13 @@ struct mpcc_state {  	uint32_t overlap_only;  	uint32_t idle;  	uint32_t busy; +	uint32_t shaper_lut_mode; +	uint32_t lut3d_mode; +	uint32_t lut3d_bit_depth; +	uint32_t lut3d_size; +	uint32_t rgam_mode; +	uint32_t rgam_lut; +	struct mpc_grph_gamut_adjustment gamut_remap;  };  /** @@ -217,16 +294,20 @@ struct mpc_funcs {  	 * Only used for planes that are part of blending chain for OPP output  	 *  	 * Parameters: -	 * [in/out] mpc		- MPC context. -	 * [in/out] tree	- MPC tree structure that plane will be added to. -	 * [in]	blnd_cfg	- MPCC blending configuration for the new blending layer. -	 * [in]	sm_cfg		- MPCC stereo mix configuration for the new blending layer. -	 *			  stereo mix must disable for the very bottom layer of the tree config. -	 * [in]	insert_above_mpcc - Insert new plane above this MPCC.  If NULL, insert as bottom plane. -	 * [in]	dpp_id		 - DPP instance for the plane to be added. -	 * [in]	mpcc_id		 - The MPCC physical instance to use for blending. -	 * -	 * Return:  struct mpcc* - MPCC that was added. +	 * +	 * - [in/out] mpc  - MPC context. +	 * - [in/out] tree - MPC tree structure that plane will be added to. +	 * - [in] blnd_cfg - MPCC blending configuration for the new blending layer. +	 * - [in] sm_cfg   - MPCC stereo mix configuration for the new blending layer. +	 *                   stereo mix must disable for the very bottom layer of the tree config. +	 * - [in] insert_above_mpcc - Insert new plane above this MPCC. +	 *                          If NULL, insert as bottom plane. +	 * - [in] dpp_id  - DPP instance for the plane to be added. +	 * - [in] mpcc_id - The MPCC physical instance to use for blending. +	 * +	 * Return: +	 * +	 * struct mpcc* - MPCC that was added.  	 */  	struct mpcc* (*insert_plane)(  			struct mpc *mpc, @@ -243,11 +324,14 @@ struct mpc_funcs {  	 * Remove a specified MPCC from the MPC tree.  	 *  	 * Parameters: -	 * [in/out] mpc		- MPC context. -	 * [in/out] tree	- MPC tree structure that plane will be removed from. -	 * [in/out] mpcc	- MPCC to be removed from tree.  	 * -	 * Return:  void +	 * - [in/out] mpc   - MPC context. +	 * - [in/out] tree  - MPC tree structure that plane will be removed from. +	 * - [in/out] mpcc  - MPCC to be removed from tree. +	 * +	 * Return: +	 * +	 * void  	 */  	void (*remove_mpcc)(  			struct mpc *mpc, @@ -260,9 +344,12 @@ struct mpc_funcs {  	 * Reset the MPCC HW status by disconnecting all muxes.  	 *  	 * Parameters: -	 * [in/out] mpc		- MPC context.  	 * -	 * Return:  void +	 * - [in/out] mpc - MPC context. +	 * +	 * Return: +	 * +	 * void  	 */  	void (*mpc_init)(struct mpc *mpc);  	void (*mpc_init_single_inst)( @@ -275,11 +362,14 @@ struct mpc_funcs {  	 * Update the blending configuration for a specified MPCC.  	 *  	 * Parameters: -	 * [in/out] mpc		- MPC context. -	 * [in]     blnd_cfg	- MPCC blending configuration. -	 * [in]     mpcc_id	- The MPCC physical instance.  	 * -	 * Return:  void +	 * - [in/out] mpc - MPC context. +	 * - [in] blnd_cfg - MPCC blending configuration. +	 * - [in] mpcc_id  - The MPCC physical instance. +	 * +	 * Return: +	 * +	 * void  	 */  	void (*update_blending)(  		struct mpc *mpc, @@ -289,15 +379,18 @@ struct mpc_funcs {  	/**  	 * @cursor_lock:  	 * -	 * Lock cursor updates for the specified OPP. -	 * OPP defines the set of MPCC that are locked together for cursor. +	 * Lock cursor updates for the specified OPP. OPP defines the set of +	 * MPCC that are locked together for cursor.  	 *  	 * Parameters: -	 * [in] 	mpc		- MPC context. -	 * [in]     opp_id	- The OPP to lock cursor updates on -	 * [in]		lock	- lock/unlock the OPP  	 * -	 * Return:  void +	 * - [in] mpc - MPC context. +	 * - [in] opp_id  - The OPP to lock cursor updates on +	 * - [in] lock - lock/unlock the OPP +	 * +	 * Return: +	 * +	 * void  	 */  	void (*cursor_lock)(  			struct mpc *mpc, @@ -307,20 +400,25 @@ struct mpc_funcs {  	/**  	 * @insert_plane_to_secondary:  	 * -	 * Add DPP into secondary MPC tree based on specified blending position. -	 * Only used for planes that are part of blending chain for DWB output +	 * Add DPP into secondary MPC tree based on specified blending +	 * position.  Only used for planes that are part of blending chain for +	 * DWB output  	 *  	 * Parameters: -	 * [in/out] mpc		- MPC context. -	 * [in/out] tree		- MPC tree structure that plane will be added to. -	 * [in]	blnd_cfg	- MPCC blending configuration for the new blending layer. -	 * [in]	sm_cfg		- MPCC stereo mix configuration for the new blending layer. -	 *			  stereo mix must disable for the very bottom layer of the tree config. -	 * [in]	insert_above_mpcc - Insert new plane above this MPCC.  If NULL, insert as bottom plane. -	 * [in]	dpp_id		- DPP instance for the plane to be added. -	 * [in]	mpcc_id		- The MPCC physical instance to use for blending. -	 * -	 * Return:  struct mpcc* - MPCC that was added. +	 * +	 * - [in/out] mpc  - MPC context. +	 * - [in/out] tree - MPC tree structure that plane will be added to. +	 * - [in] blnd_cfg - MPCC blending configuration for the new blending layer. +	 * - [in] sm_cfg   - MPCC stereo mix configuration for the new blending layer. +	 *	    stereo mix must disable for the very bottom layer of the tree config. +	 * - [in] insert_above_mpcc - Insert new plane above this MPCC.  If +	 *          NULL, insert as bottom plane. +	 * - [in] dpp_id - DPP instance for the plane to be added. +	 * - [in] mpcc_id - The MPCC physical instance to use for blending. +	 * +	 * Return: +	 * +	 * struct mpcc* - MPCC that was added.  	 */  	struct mpcc* (*insert_plane_to_secondary)(  			struct mpc *mpc, @@ -337,10 +435,14 @@ struct mpc_funcs {  	 * Remove a specified DPP from the 'secondary' MPC tree.  	 *  	 * Parameters: -	 * [in/out] mpc		- MPC context. -	 * [in/out] tree	- MPC tree structure that plane will be removed from. -	 * [in]     mpcc	- MPCC to be removed from tree. -	 * Return:  void +	 * +	 * - [in/out] mpc  - MPC context. +	 * - [in/out] tree - MPC tree structure that plane will be removed from. +	 * - [in]     mpcc - MPCC to be removed from tree. +	 * +	 * Return: +	 * +	 * void  	 */  	void (*remove_mpcc_from_secondary)(  			struct mpc *mpc, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h index 7617fabbd16e..aee5372e292c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h @@ -23,6 +23,22 @@   *   */ +/** + * DOC: overview + * + * The Output Plane Processor (OPP) block groups have functions that format + * pixel streams such that they are suitable for display at the display device. + * The key functions contained in the OPP are: + * + * - Adaptive Backlight Modulation (ABM) + * - Formatter (FMT) which provide pixel-by-pixel operations for format the + *   incoming pixel stream. + * - Output Buffer that provide pixel replication, and overlapping. + * - Interface between MPC and OPTC. + * - Clock and reset generation. + * - CRC generation. + */ +  #ifndef __DAL_OPP_H__  #define __DAL_OPP_H__ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h index 5dcbaa2db964..e97d964a1791 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h @@ -57,7 +57,7 @@ struct panel_cntl_funcs {  struct panel_cntl_init_data {  	struct dc_context *ctx;  	uint32_t inst; -	uint32_t pwrseq_inst; +	uint32_t eng_id;  };  struct panel_cntl { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index 9a00a99317b2..d98d72f35be5 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -182,9 +182,7 @@ struct timing_generator_funcs {  	bool (*enable_crtc)(struct timing_generator *tg);  	bool (*disable_crtc)(struct timing_generator *tg); -#ifdef CONFIG_DRM_AMD_DC_FP  	void (*phantom_crtc_post_enable)(struct timing_generator *tg); -#endif  	void (*disable_phantom_crtc)(struct timing_generator *tg);  	bool (*immediate_disable_crtc)(struct timing_generator *tg);  	bool (*is_counter_moving)(struct timing_generator *tg); diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile index 076f667a82f6..2d4378780c1a 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/Makefile +++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile @@ -170,4 +170,13 @@ IRQ_DCN35 = irq_service_dcn35.o  AMD_DAL_IRQ_DCN35= $(addprefix $(AMDDALPATH)/dc/irq/dcn35/,$(IRQ_DCN35)) -AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN35)
\ No newline at end of file +AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN35) + +############################################################################### +# DCN 351 +############################################################################### +IRQ_DCN351 = irq_service_dcn351.o + +AMD_DAL_IRQ_DCN351= $(addprefix $(AMDDALPATH)/dc/irq/dcn351/,$(IRQ_DCN351)) + +AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN351) diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c index e8baafa02443..916f0c974637 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c @@ -23,8 +23,6 @@   *   */ -#include <linux/slab.h> -  #include "dm_services.h"  #include "include/logger_interface.h" diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c index 03c5e8ff8cbd..42cdfe6c3538 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c @@ -23,8 +23,6 @@   *   */ -#include <linux/slab.h> -  #include "dm_services.h"  #include "include/logger_interface.h" diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c b/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c new file mode 100644 index 000000000000..7ec8e0de2f01 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c @@ -0,0 +1,409 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright 2024 Advanced Micro Devices, Inc. */ + +#include "dm_services.h" +#include "include/logger_interface.h" +#include "../dce110/irq_service_dce110.h" + + +#include "dcn/dcn_3_5_1_offset.h" +#include "dcn/dcn_3_5_1_sh_mask.h" + +#include "irq_service_dcn351.h" + +#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" + +static enum dc_irq_source to_dal_irq_source_dcn351( +		struct irq_service *irq_service, +		uint32_t src_id, +		uint32_t ext_id) +{ +	switch (src_id) { +	case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: +		return DC_IRQ_SOURCE_VBLANK1; +	case DCN_1_0__SRCID__DC_D2_OTG_VSTARTUP: +		return DC_IRQ_SOURCE_VBLANK2; +	case DCN_1_0__SRCID__DC_D3_OTG_VSTARTUP: +		return DC_IRQ_SOURCE_VBLANK3; +	case DCN_1_0__SRCID__DC_D4_OTG_VSTARTUP: +		return DC_IRQ_SOURCE_VBLANK4; +	case DCN_1_0__SRCID__DC_D5_OTG_VSTARTUP: +		return DC_IRQ_SOURCE_VBLANK5; +	case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP: +		return DC_IRQ_SOURCE_VBLANK6; +	case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL: +		return DC_IRQ_SOURCE_DC1_VLINE0; +	case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL: +		return DC_IRQ_SOURCE_DC2_VLINE0; +	case DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL: +		return DC_IRQ_SOURCE_DC3_VLINE0; +	case DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL: +		return DC_IRQ_SOURCE_DC4_VLINE0; +	case DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL: +		return DC_IRQ_SOURCE_DC5_VLINE0; +	case DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL: +		return DC_IRQ_SOURCE_DC6_VLINE0; +	case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT: +		return DC_IRQ_SOURCE_PFLIP1; +	case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT: +		return DC_IRQ_SOURCE_PFLIP2; +	case DCN_1_0__SRCID__HUBP2_FLIP_INTERRUPT: +		return DC_IRQ_SOURCE_PFLIP3; +	case DCN_1_0__SRCID__HUBP3_FLIP_INTERRUPT: +		return DC_IRQ_SOURCE_PFLIP4; +	case DCN_1_0__SRCID__HUBP4_FLIP_INTERRUPT: +		return DC_IRQ_SOURCE_PFLIP5; +	case DCN_1_0__SRCID__HUBP5_FLIP_INTERRUPT: +		return DC_IRQ_SOURCE_PFLIP6; +	case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT: +		return DC_IRQ_SOURCE_VUPDATE1; +	case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT: +		return DC_IRQ_SOURCE_VUPDATE2; +	case DCN_1_0__SRCID__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT: +		return DC_IRQ_SOURCE_VUPDATE3; +	case DCN_1_0__SRCID__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT: +		return DC_IRQ_SOURCE_VUPDATE4; +	case DCN_1_0__SRCID__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT: +		return DC_IRQ_SOURCE_VUPDATE5; +	case DCN_1_0__SRCID__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT: +		return DC_IRQ_SOURCE_VUPDATE6; +	case DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT: +		return DC_IRQ_SOURCE_DMCUB_OUTBOX; +	case DCN_1_0__SRCID__DC_HPD1_INT: +		/* generic src_id for all HPD and HPDRX interrupts */ +		switch (ext_id) { +		case DCN_1_0__CTXID__DC_HPD1_INT: +			return DC_IRQ_SOURCE_HPD1; +		case DCN_1_0__CTXID__DC_HPD2_INT: +			return DC_IRQ_SOURCE_HPD2; +		case DCN_1_0__CTXID__DC_HPD3_INT: +			return DC_IRQ_SOURCE_HPD3; +		case DCN_1_0__CTXID__DC_HPD4_INT: +			return DC_IRQ_SOURCE_HPD4; +		case DCN_1_0__CTXID__DC_HPD5_INT: +			return DC_IRQ_SOURCE_HPD5; +		case DCN_1_0__CTXID__DC_HPD6_INT: +			return DC_IRQ_SOURCE_HPD6; +		case DCN_1_0__CTXID__DC_HPD1_RX_INT: +			return DC_IRQ_SOURCE_HPD1RX; +		case DCN_1_0__CTXID__DC_HPD2_RX_INT: +			return DC_IRQ_SOURCE_HPD2RX; +		case DCN_1_0__CTXID__DC_HPD3_RX_INT: +			return DC_IRQ_SOURCE_HPD3RX; +		case DCN_1_0__CTXID__DC_HPD4_RX_INT: +			return DC_IRQ_SOURCE_HPD4RX; +		case DCN_1_0__CTXID__DC_HPD5_RX_INT: +			return DC_IRQ_SOURCE_HPD5RX; +		case DCN_1_0__CTXID__DC_HPD6_RX_INT: +			return DC_IRQ_SOURCE_HPD6RX; +		default: +			return DC_IRQ_SOURCE_INVALID; +		} +		break; + +	default: +		return DC_IRQ_SOURCE_INVALID; +	} +} + +static bool hpd_ack( +	struct irq_service *irq_service, +	const struct irq_source_info *info) +{ +	uint32_t addr = info->status_reg; +	uint32_t value = dm_read_reg(irq_service->ctx, addr); +	uint32_t current_status = +		get_reg_field_value( +			value, +			HPD0_DC_HPD_INT_STATUS, +			DC_HPD_SENSE_DELAYED); + +	dal_irq_service_ack_generic(irq_service, info); + +	value = dm_read_reg(irq_service->ctx, info->enable_reg); + +	set_reg_field_value( +		value, +		current_status ? 0 : 1, +		HPD0_DC_HPD_INT_CONTROL, +		DC_HPD_INT_POLARITY); + +	dm_write_reg(irq_service->ctx, info->enable_reg, value); + +	return true; +} + +static struct irq_source_info_funcs hpd_irq_info_funcs = { +	.set = NULL, +	.ack = hpd_ack +}; + +static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { +	.set = NULL, +	.ack = NULL +}; + +static struct irq_source_info_funcs pflip_irq_info_funcs = { +	.set = NULL, +	.ack = NULL +}; + +static struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = { +	.set = NULL, +	.ack = NULL +}; + +static struct irq_source_info_funcs vblank_irq_info_funcs = { +	.set = NULL, +	.ack = NULL +}; + +static struct irq_source_info_funcs outbox_irq_info_funcs = { +	.set = NULL, +	.ack = NULL +}; + +static struct irq_source_info_funcs vline0_irq_info_funcs = { +	.set = NULL, +	.ack = NULL +}; + +#undef BASE_INNER +#define BASE_INNER(seg) ctx->dcn_reg_offsets[seg] + +/* compile time expand base address. */ +#define BASE(seg) \ +	BASE_INNER(seg) + +#define SRI(reg_name, block, id)\ +	BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ +			reg ## block ## id ## _ ## reg_name + +#define SRI_DMUB(reg_name)\ +	BASE(reg ## reg_name ## _BASE_IDX) + \ +			reg ## reg_name + +#define IRQ_REG_ENTRY(base, block, reg_num, reg1, mask1, reg2, mask2)\ +	REG_STRUCT[base + reg_num].enable_reg = SRI(reg1, block, reg_num),\ +	REG_STRUCT[base + reg_num].enable_mask = \ +		block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ +	REG_STRUCT[base + reg_num].enable_value[0] = \ +		block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ +	REG_STRUCT[base + reg_num].enable_value[1] = \ +		~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK, \ +	REG_STRUCT[base + reg_num].ack_reg = SRI(reg2, block, reg_num),\ +	REG_STRUCT[base + reg_num].ack_mask = \ +		block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\ +	REG_STRUCT[base + reg_num].ack_value = \ +		block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \ + +#define IRQ_REG_ENTRY_DMUB(base, reg1, mask1, reg2, mask2)\ +	REG_STRUCT[base].enable_reg = SRI_DMUB(reg1),\ +	REG_STRUCT[base].enable_mask = \ +		reg1 ## __ ## mask1 ## _MASK,\ +	REG_STRUCT[base].enable_value[0] = \ +		reg1 ## __ ## mask1 ## _MASK,\ +	REG_STRUCT[base].enable_value[1] = \ +		~reg1 ## __ ## mask1 ## _MASK, \ +	REG_STRUCT[base].ack_reg = SRI_DMUB(reg2),\ +	REG_STRUCT[base].ack_mask = \ +		reg2 ## __ ## mask2 ## _MASK,\ +	REG_STRUCT[base].ack_value = \ +		reg2 ## __ ## mask2 ## _MASK \ + +#define hpd_int_entry(reg_num)\ +		IRQ_REG_ENTRY(DC_IRQ_SOURCE_HPD1, HPD, reg_num,\ +			DC_HPD_INT_CONTROL, DC_HPD_INT_EN,\ +			DC_HPD_INT_CONTROL, DC_HPD_INT_ACK),\ +		REG_STRUCT[DC_IRQ_SOURCE_HPD1 + reg_num].funcs = &hpd_irq_info_funcs;\ +		REG_STRUCT[DC_IRQ_SOURCE_HPD1 + reg_num].status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num);\ + +#define hpd_rx_int_entry(reg_num)\ +		IRQ_REG_ENTRY(DC_IRQ_SOURCE_HPD1RX, HPD, reg_num,\ +			DC_HPD_INT_CONTROL, DC_HPD_RX_INT_EN,\ +			DC_HPD_INT_CONTROL, DC_HPD_RX_INT_ACK),\ +		REG_STRUCT[DC_IRQ_SOURCE_HPD1RX + reg_num].status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num);\ +		REG_STRUCT[DC_IRQ_SOURCE_HPD1RX + reg_num].funcs = &hpd_rx_irq_info_funcs;\ + +#define pflip_int_entry(reg_num)\ +		IRQ_REG_ENTRY(DC_IRQ_SOURCE_PFLIP1, HUBPREQ, reg_num,\ +			DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK,\ +			DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_CLEAR),\ +		REG_STRUCT[DC_IRQ_SOURCE_PFLIP1 + reg_num].funcs = &pflip_irq_info_funcs\ + +/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic + * of DCE's DC_IRQ_SOURCE_VUPDATEx. + */ +#define vupdate_no_lock_int_entry(reg_num)\ +		IRQ_REG_ENTRY(DC_IRQ_SOURCE_VUPDATE1, OTG, reg_num,\ +			OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\ +			OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\ +		REG_STRUCT[DC_IRQ_SOURCE_VUPDATE1 + reg_num].funcs = &vupdate_no_lock_irq_info_funcs\ + +#define vblank_int_entry(reg_num)\ +		IRQ_REG_ENTRY(DC_IRQ_SOURCE_VBLANK1, OTG, reg_num,\ +			OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\ +			OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\ +		REG_STRUCT[DC_IRQ_SOURCE_VBLANK1 + reg_num].funcs = &vblank_irq_info_funcs\ + +#define vline0_int_entry(reg_num)\ +		IRQ_REG_ENTRY(DC_IRQ_SOURCE_DC1_VLINE0, OTG, reg_num,\ +			OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE,\ +			OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\ +		REG_STRUCT[DC_IRQ_SOURCE_DC1_VLINE0 + reg_num].funcs = &vline0_irq_info_funcs\ + +#define dmub_outbox_int_entry()\ +		IRQ_REG_ENTRY_DMUB(DC_IRQ_SOURCE_DMCUB_OUTBOX, \ +			DMCUB_INTERRUPT_ENABLE, DMCUB_OUTBOX1_READY_INT_EN,\ +			DMCUB_INTERRUPT_ACK, DMCUB_OUTBOX1_READY_INT_ACK),\ +		REG_STRUCT[DC_IRQ_SOURCE_DMCUB_OUTBOX].funcs = &outbox_irq_info_funcs + +#define dummy_irq_entry(irqno) \ +	REG_STRUCT[irqno].funcs = &dummy_irq_info_funcs\ + +#define i2c_int_entry(reg_num) \ +	dummy_irq_entry(DC_IRQ_SOURCE_I2C_DDC ## reg_num) + +#define dp_sink_int_entry(reg_num) \ +	dummy_irq_entry(DC_IRQ_SOURCE_DPSINK ## reg_num) + +#define gpio_pad_int_entry(reg_num) \ +	dummy_irq_entry(DC_IRQ_SOURCE_GPIOPAD ## reg_num) + +#define dc_underflow_int_entry(reg_num) \ +	dummy_irq_entry(DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW) + +static struct irq_source_info_funcs dummy_irq_info_funcs = { +	.set = dal_irq_service_dummy_set, +	.ack = dal_irq_service_dummy_ack +}; + +#define dcn351_irq_init_part_1() {\ +	dummy_irq_entry(DC_IRQ_SOURCE_INVALID); \ +	hpd_int_entry(0); \ +	hpd_int_entry(1); \ +	hpd_int_entry(2); \ +	hpd_int_entry(3); \ +	hpd_int_entry(4); \ +	hpd_rx_int_entry(0); \ +	hpd_rx_int_entry(1); \ +	hpd_rx_int_entry(2); \ +	hpd_rx_int_entry(3); \ +	hpd_rx_int_entry(4); \ +	i2c_int_entry(1); \ +	i2c_int_entry(2); \ +	i2c_int_entry(3); \ +	i2c_int_entry(4); \ +	i2c_int_entry(5); \ +	i2c_int_entry(6); \ +	dp_sink_int_entry(1); \ +	dp_sink_int_entry(2); \ +	dp_sink_int_entry(3); \ +	dp_sink_int_entry(4); \ +	dp_sink_int_entry(5); \ +	dp_sink_int_entry(6); \ +	dummy_irq_entry(DC_IRQ_SOURCE_TIMER); \ +	pflip_int_entry(0); \ +	pflip_int_entry(1); \ +	pflip_int_entry(2); \ +	pflip_int_entry(3); \ +	dummy_irq_entry(DC_IRQ_SOURCE_PFLIP5); \ +	dummy_irq_entry(DC_IRQ_SOURCE_PFLIP6); \ +	dummy_irq_entry(DC_IRQ_SOURCE_PFLIP_UNDERLAY0); \ +	gpio_pad_int_entry(0); \ +	gpio_pad_int_entry(1); \ +	gpio_pad_int_entry(2); \ +	gpio_pad_int_entry(3); \ +	gpio_pad_int_entry(4); \ +	gpio_pad_int_entry(5); \ +	gpio_pad_int_entry(6); \ +	gpio_pad_int_entry(7); \ +	gpio_pad_int_entry(8); \ +	gpio_pad_int_entry(9); \ +	gpio_pad_int_entry(10); \ +	gpio_pad_int_entry(11); \ +	gpio_pad_int_entry(12); \ +	gpio_pad_int_entry(13); \ +	gpio_pad_int_entry(14); \ +	gpio_pad_int_entry(15); \ +	gpio_pad_int_entry(16); \ +	gpio_pad_int_entry(17); \ +	gpio_pad_int_entry(18); \ +	gpio_pad_int_entry(19); \ +	gpio_pad_int_entry(20); \ +	gpio_pad_int_entry(21); \ +	gpio_pad_int_entry(22); \ +	gpio_pad_int_entry(23); \ +	gpio_pad_int_entry(24); \ +	gpio_pad_int_entry(25); \ +	gpio_pad_int_entry(26); \ +	gpio_pad_int_entry(27); \ +	gpio_pad_int_entry(28); \ +	gpio_pad_int_entry(29); \ +	gpio_pad_int_entry(30); \ +	dc_underflow_int_entry(1); \ +	dc_underflow_int_entry(2); \ +	dc_underflow_int_entry(3); \ +	dc_underflow_int_entry(4); \ +	dc_underflow_int_entry(5); \ +	dc_underflow_int_entry(6); \ +	dummy_irq_entry(DC_IRQ_SOURCE_DMCU_SCP); \ +	dummy_irq_entry(DC_IRQ_SOURCE_VBIOS_SW); \ +} + +#define dcn351_irq_init_part_2() {\ +	vupdate_no_lock_int_entry(0); \ +	vupdate_no_lock_int_entry(1); \ +	vupdate_no_lock_int_entry(2); \ +	vupdate_no_lock_int_entry(3); \ +	vblank_int_entry(0); \ +	vblank_int_entry(1); \ +	vblank_int_entry(2); \ +	vblank_int_entry(3); \ +	vline0_int_entry(0); \ +	vline0_int_entry(1); \ +	vline0_int_entry(2); \ +	vline0_int_entry(3); \ +	dummy_irq_entry(DC_IRQ_SOURCE_DC5_VLINE1); \ +	dummy_irq_entry(DC_IRQ_SOURCE_DC6_VLINE1); \ +	dmub_outbox_int_entry(); \ +} + +#define dcn351_irq_init() {\ +	dcn351_irq_init_part_1(); \ +	dcn351_irq_init_part_2(); \ +} + +static struct irq_source_info irq_source_info_dcn351[DAL_IRQ_SOURCES_NUMBER] = {0}; + +static struct irq_service_funcs irq_service_funcs_dcn351 = { +		.to_dal_irq_source = to_dal_irq_source_dcn351 +}; + +static void dcn351_irq_construct( +	struct irq_service *irq_service, +	struct irq_service_init_data *init_data) +{ +	struct dc_context *ctx = init_data->ctx; + +#define REG_STRUCT irq_source_info_dcn351 +	dcn351_irq_init(); + +	dal_irq_service_construct(irq_service, init_data); + +	irq_service->info = irq_source_info_dcn351; +	irq_service->funcs = &irq_service_funcs_dcn351; +} + +struct irq_service *dal_irq_service_dcn351_create( +	struct irq_service_init_data *init_data) +{ +	struct irq_service *irq_service = kzalloc(sizeof(*irq_service), +						  GFP_KERNEL); + +	if (!irq_service) +		return NULL; + +	dcn351_irq_construct(irq_service, init_data); +	return irq_service; +} diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.h b/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.h new file mode 100644 index 000000000000..4094631ffec6 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright 2021 Advanced Micro Devices, Inc. */ + +#ifndef __DAL_IRQ_SERVICE_DCN351_H__ +#define __DAL_IRQ_SERVICE_DCN351_H__ + +#include "../irq_service.h" + +struct irq_service *dal_irq_service_dcn351_create( +	struct irq_service_init_data *init_data); + +#endif /* __DAL_IRQ_SERVICE_DCN351_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c index 2d152b68a501..22b24749c9d2 100644 --- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c @@ -61,22 +61,6 @@ static enum dc_link_rate get_link_rate_from_test_link_rate(uint8_t test_rate)  	}  } -static bool is_dp_phy_sqaure_pattern(enum dp_test_pattern test_pattern) -{ -	return (DP_TEST_PATTERN_SQUARE_BEGIN <= test_pattern && -			test_pattern <= DP_TEST_PATTERN_SQUARE_END); -} - -static bool is_dp_phy_pattern(enum dp_test_pattern test_pattern) -{ -	if ((DP_TEST_PATTERN_PHY_PATTERN_BEGIN <= test_pattern && -			test_pattern <= DP_TEST_PATTERN_PHY_PATTERN_END) || -			test_pattern == DP_TEST_PATTERN_VIDEO_MODE) -		return true; -	else -		return false; -} -  static void dp_retrain_link_dp_test(struct dc_link *link,  			struct dc_link_settings *link_setting,  			bool skip_video_pattern) @@ -361,7 +345,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)  				test_pattern_size);  	} -	if (is_dp_phy_sqaure_pattern(test_pattern)) { +	if (IS_DP_PHY_SQUARE_PATTERN(test_pattern)) {  		test_pattern_size = 1; // Square pattern data is 1 byte (DP spec)  		core_link_read_dpcd(  				link, @@ -623,6 +607,8 @@ bool dp_set_test_pattern(  	if (pipe_ctx == NULL)  		return false; +	link->pending_test_pattern = test_pattern; +  	/* Reset CRTC Test Pattern if it is currently running and request is VideoMode */  	if (link->test_pattern_enabled && test_pattern ==  			DP_TEST_PATTERN_VIDEO_MODE) { @@ -643,12 +629,13 @@ bool dp_set_test_pattern(  		/* Reset Test Pattern state */  		link->test_pattern_enabled = false;  		link->current_test_pattern = test_pattern; +		link->pending_test_pattern = DP_TEST_PATTERN_UNSUPPORTED;  		return true;  	}  	/* Check for PHY Test Patterns */ -	if (is_dp_phy_pattern(test_pattern)) { +	if (IS_DP_PHY_PATTERN(test_pattern)) {  		/* Set DPCD Lane Settings before running test pattern */  		if (p_link_settings != NULL) {  			if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && @@ -681,6 +668,7 @@ bool dp_set_test_pattern(  			/* Set Test Pattern state */  			link->test_pattern_enabled = true;  			link->current_test_pattern = test_pattern; +			link->pending_test_pattern = DP_TEST_PATTERN_UNSUPPORTED;  			if (p_link_settings != NULL)  				dpcd_set_link_settings(link,  						p_link_settings); @@ -756,7 +744,7 @@ bool dp_set_test_pattern(  			return false;  		if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { -			if (is_dp_phy_sqaure_pattern(test_pattern)) +			if (IS_DP_PHY_SQUARE_PATTERN(test_pattern))  				core_link_write_dpcd(link,  						DP_LINK_SQUARE_PATTERN,  						p_custom_pattern, @@ -884,6 +872,7 @@ bool dp_set_test_pattern(  		/* Set Test Pattern state */  		link->test_pattern_enabled = true;  		link->current_test_pattern = test_pattern; +		link->pending_test_pattern = DP_TEST_PATTERN_UNSUPPORTED;  	}  	return true; diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h index f4633d3cf9b9..a1f72fe378ee 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h @@ -22,6 +22,16 @@   * Authors: AMD   *   */ + +/** + * DOC: overview + * + * Display Input Output (DIO), is the display input and output unit in DCN. It + * includes output encoders to support different display output, like + * DisplayPort, HDMI, DVI interface, and others. It also includes the control + * and status channels for these interfaces. + */ +  #ifndef __LINK_HWSS_DIO_H__  #define __LINK_HWSS_DIO_H__ diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c index b659baa23147..348ea4cb832d 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c @@ -80,21 +80,23 @@ static bool set_dio_fixed_vs_pe_retimer_dp_link_test_pattern_override(struct dc_  	const uint8_t vendor_lttpr_write_data_pg0[4] = {0x1, 0x11, 0x0, 0x0};  	const uint8_t vendor_lttpr_exit_manual_automation_0[4] = {0x1, 0x11, 0x0, 0x06}; +	if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED) +		return false;  	if (tp_params == NULL)  		return false; -	if (link->current_test_pattern >= DP_TEST_PATTERN_SQUARE_BEGIN && -			link->current_test_pattern <= DP_TEST_PATTERN_SQUARE_END) { +	if (IS_DP_PHY_SQUARE_PATTERN(link->current_test_pattern))  		// Deprogram overrides from previous test pattern  		dp_dio_fixed_vs_pe_retimer_exit_manual_automation(link); -	}  	switch (tp_params->dp_phy_pattern) {  	case DP_TEST_PATTERN_80BIT_CUSTOM:  		if (tp_params->custom_pattern_size == 0 || memcmp(tp_params->custom_pattern,  				pltpat_custom, tp_params->custom_pattern_size) != 0)  			return false; +		hw_tp_params.custom_pattern = tp_params->custom_pattern; +		hw_tp_params.custom_pattern_size = tp_params->custom_pattern_size;  		break;  	case DP_TEST_PATTERN_D102:  		break; @@ -185,13 +187,7 @@ static const struct link_hwss dio_fixed_vs_pe_retimer_link_hwss = {  bool requires_fixed_vs_pe_retimer_dio_link_hwss(const struct dc_link *link)  { -	if (!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)) -		return false; - -	if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED) -		return false; - -	return true; +	return (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN);  }  const struct link_hwss *get_dio_fixed_vs_pe_retimer_link_hwss(void) diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c index b621b97711b6..3e6c7be7e278 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c @@ -74,13 +74,16 @@ static void dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(struct dc_link *link,  static void dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(struct dc_link *link,  		struct encoder_set_dp_phy_pattern_param *tp_params)  { +	uint8_t clk_src = 0x4C; +	uint8_t pattern = 0x4F; /* SQ128 */ +  	const uint8_t vendor_lttpr_write_data_pg0[4] = {0x1, 0x11, 0x0, 0x0}; -	const uint8_t vendor_lttpr_write_data_pg1[4] = {0x1, 0x50, 0x50, 0x0}; -	const uint8_t vendor_lttpr_write_data_pg2[4] = {0x1, 0x51, 0x50, 0x0}; +	const uint8_t vendor_lttpr_write_data_pg1[4] = {0x1, 0x50, 0x50, clk_src}; +	const uint8_t vendor_lttpr_write_data_pg2[4] = {0x1, 0x51, 0x50, clk_src};  	const uint8_t vendor_lttpr_write_data_pg3[4]  = {0x1, 0x10, 0x58, 0x21};  	const uint8_t vendor_lttpr_write_data_pg4[4]  = {0x1, 0x10, 0x59, 0x21}; -	const uint8_t vendor_lttpr_write_data_pg5[4] = {0x1, 0x1C, 0x58, 0x4F}; -	const uint8_t vendor_lttpr_write_data_pg6[4] = {0x1, 0x1C, 0x59, 0x4F}; +	const uint8_t vendor_lttpr_write_data_pg5[4] = {0x1, 0x1C, 0x58, pattern}; +	const uint8_t vendor_lttpr_write_data_pg6[4] = {0x1, 0x1C, 0x59, pattern};  	const uint8_t vendor_lttpr_write_data_pg7[4]  = {0x1, 0x30, 0x51, 0x20};  	const uint8_t vendor_lttpr_write_data_pg8[4]  = {0x1, 0x30, 0x52, 0x20};  	const uint8_t vendor_lttpr_write_data_pg9[4]  = {0x1, 0x30, 0x54, 0x20}; @@ -123,18 +126,20 @@ static bool dp_hpo_fixed_vs_pe_retimer_set_override_test_pattern(struct dc_link  	struct encoder_set_dp_phy_pattern_param hw_tp_params = { 0 };  	const uint8_t vendor_lttpr_exit_manual_automation_0[4] = {0x1, 0x11, 0x0, 0x06}; +	if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED) +		return false; +  	if (tp_params == NULL)  		return false; -	if (tp_params->dp_phy_pattern < DP_TEST_PATTERN_SQUARE_BEGIN || -			tp_params->dp_phy_pattern > DP_TEST_PATTERN_SQUARE_END) { +	if (!IS_DP_PHY_SQUARE_PATTERN(tp_params->dp_phy_pattern)) {  		// Deprogram overrides from previously set square wave override  		if (link->current_test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM ||  				link->current_test_pattern == DP_TEST_PATTERN_D102)  			link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,  					&vendor_lttpr_exit_manual_automation_0[0],  					sizeof(vendor_lttpr_exit_manual_automation_0)); -		else +		else if (IS_DP_PHY_SQUARE_PATTERN(link->current_test_pattern))  			dp_dio_fixed_vs_pe_retimer_exit_manual_automation(link);  		return false; @@ -148,8 +153,6 @@ static bool dp_hpo_fixed_vs_pe_retimer_set_override_test_pattern(struct dc_link  	dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(link, tp_params); -	dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(link, &link->cur_lane_setting[0]); -  	return true;  } @@ -170,16 +173,18 @@ static void set_hpo_fixed_vs_pe_retimer_dp_lane_settings(struct dc_link *link,  		const struct dc_link_settings *link_settings,  		const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX])  { -	link_res->hpo_dp_link_enc->funcs->set_ffe( -			link_res->hpo_dp_link_enc, -			link_settings, -			lane_settings[0].FFE_PRESET.raw); - -	// FFE is programmed when retimer is programmed for SQ128, but explicit -	// programming needed here as well in case FFE-only update is requested -	if (link->current_test_pattern >= DP_TEST_PATTERN_SQUARE_BEGIN && -			link->current_test_pattern <= DP_TEST_PATTERN_SQUARE_END) -		dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(link, &lane_settings[0]); +	// Don't update our HW FFE when outputting phy test patterns +	if (IS_DP_PHY_PATTERN(link->pending_test_pattern)) { +		// Directly program FIXED_VS retimer FFE for SQ128 override +		if (IS_DP_PHY_SQUARE_PATTERN(link->pending_test_pattern)) { +			dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(link, &lane_settings[0]); +		} +	} else { +		link_res->hpo_dp_link_enc->funcs->set_ffe( +				link_res->hpo_dp_link_enc, +				link_settings, +				lane_settings[0].FFE_PRESET.raw); +	}  }  static void enable_hpo_fixed_vs_pe_retimer_dp_link_output(struct dc_link *link, @@ -214,13 +219,7 @@ static const struct link_hwss hpo_fixed_vs_pe_retimer_dp_link_hwss = {  bool requires_fixed_vs_pe_retimer_hpo_link_hwss(const struct dc_link *link)  { -	if (!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)) -		return false; - -	if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED) -		return false; - -	return true; +	return requires_fixed_vs_pe_retimer_dio_link_hwss(link);  }  const struct link_hwss *get_hpo_fixed_vs_pe_retimer_dp_link_hwss(void) diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c index 24153b0df503..b8c4a04dd175 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c @@ -41,6 +41,7 @@  #include "protocols/link_dp_dpia.h"  #include "protocols/link_dp_phy.h"  #include "protocols/link_dp_training.h" +#include "protocols/link_dp_dpia_bw.h"  #include "accessories/link_dp_trace.h"  #include "link_enc_cfg.h" @@ -991,6 +992,23 @@ static bool detect_link_and_local_sink(struct dc_link *link,  			if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&  					link->reported_link_cap.link_rate > LINK_RATE_HIGH3)  				link->reported_link_cap.link_rate = LINK_RATE_HIGH3; + +			/* +			 * If this is DP over USB4 link then we need to: +			 * - Enable BW ALLOC support on DPtx if applicable +			 */ +			if (dc->config.usb4_bw_alloc_support) { +				if (link_dp_dpia_set_dptx_usb4_bw_alloc_support(link)) { +					/* update with non reduced link cap if bw allocation mode is supported */ +					if (link->dpia_bw_alloc_config.nrd_max_link_rate && +						link->dpia_bw_alloc_config.nrd_max_lane_count) { +						link->reported_link_cap.link_rate = +							link->dpia_bw_alloc_config.nrd_max_link_rate; +						link->reported_link_cap.lane_count = +							link->dpia_bw_alloc_config.nrd_max_lane_count; +					} +				} +			}  			break;  		} diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index 3cbfbf8d107e..a72de44a5747 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -2197,6 +2197,64 @@ static enum dc_status enable_link(  static bool allocate_usb4_bandwidth_for_stream(struct dc_stream_state *stream, int bw)  { +	struct dc_link *link = stream->sink->link; +	int req_bw = bw; + +	DC_LOGGER_INIT(link->ctx->logger); + +	if (!link->dpia_bw_alloc_config.bw_alloc_enabled) +		return false; + +	if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { +		int sink_index = 0; +		int i = 0; + +		for (i = 0; i < link->sink_count; i++) { +			if (link->remote_sinks[i] == NULL) +				continue; + +			if (stream->sink->sink_id != link->remote_sinks[i]->sink_id) +				req_bw += link->dpia_bw_alloc_config.remote_sink_req_bw[i]; +			else +				sink_index = i; +		} + +		link->dpia_bw_alloc_config.remote_sink_req_bw[sink_index] = bw; +	} + +	/* get dp overhead for dp tunneling */ +	link->dpia_bw_alloc_config.dp_overhead = link_dp_dpia_get_dp_overhead_in_dp_tunneling(link); +	req_bw += link->dpia_bw_alloc_config.dp_overhead; + +	if (link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, req_bw)) { +		if (req_bw <= link->dpia_bw_alloc_config.allocated_bw) { +			DC_LOG_DEBUG("%s, Success in allocate bw for link(%d), allocated_bw(%d), dp_overhead(%d)\n", +					__func__, link->link_index, link->dpia_bw_alloc_config.allocated_bw, +					link->dpia_bw_alloc_config.dp_overhead); +		} else { +			// Cannot get the required bandwidth. +			DC_LOG_ERROR("%s, Failed to allocate bw for link(%d), allocated_bw(%d), dp_overhead(%d)\n", +					__func__, link->link_index, link->dpia_bw_alloc_config.allocated_bw, +					link->dpia_bw_alloc_config.dp_overhead); +			return false; +		} +	} else { +		DC_LOG_DEBUG("%s, usb4 request bw timeout\n", __func__); +		return false; +	} + +	if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { +		int i = 0; + +		for (i = 0; i < link->sink_count; i++) { +			if (link->remote_sinks[i] == NULL) +				continue; +			DC_LOG_DEBUG("%s, remote_sink=%s, request_bw=%d\n", __func__, +					(const char *)(&link->remote_sinks[i]->edid_caps.display_name[0]), +					link->dpia_bw_alloc_config.remote_sink_req_bw[i]); +		} +	} +  	return true;  } diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c index 37d3027c32dc..cf22b8f28ba6 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c @@ -370,30 +370,6 @@ static enum transmitter translate_encoder_to_transmitter(  	}  } -static uint8_t translate_dig_inst_to_pwrseq_inst(struct dc_link *link) -{ -	uint8_t pwrseq_inst = 0xF; -	struct dc_context *dc_ctx = link->dc->ctx; - -	DC_LOGGER_INIT(dc_ctx->logger); - -	switch (link->eng_id) { -	case ENGINE_ID_DIGA: -		pwrseq_inst = 0; -		break; -	case ENGINE_ID_DIGB: -		pwrseq_inst = 1; -		break; -	default: -		DC_LOG_WARNING("Unsupported pwrseq engine id: %d!\n", link->eng_id); -		ASSERT(false); -		break; -	} - -	return pwrseq_inst; -} - -  static void link_destruct(struct dc_link *link)  {  	int i; @@ -657,7 +633,7 @@ static bool construct_phy(struct dc_link *link,  			link->link_id.id == CONNECTOR_ID_LVDS)) {  		panel_cntl_init_data.ctx = dc_ctx;  		panel_cntl_init_data.inst = panel_cntl_init_data.ctx->dc_edp_id_count; -		panel_cntl_init_data.pwrseq_inst = translate_dig_inst_to_pwrseq_inst(link); +		panel_cntl_init_data.eng_id = link->eng_id;  		link->panel_cntl =  			link->dc->res_pool->funcs->panel_cntl_create(  								&panel_cntl_init_data); diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c index 5b0bc7f6a188..1aed55b0ab6a 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c @@ -125,11 +125,9 @@ static bool dp_active_dongle_validate_timing(  		if (dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps > 0) { // DP to HDMI FRL converter  			struct dc_crtc_timing outputTiming = *timing; -#if defined(CONFIG_DRM_AMD_DC_FP)  			if (timing->flags.DSC && !timing->dsc_cfg.is_frl)  				/* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */  				outputTiming.flags.DSC = 0; -#endif  			if (dc_bandwidth_in_kbps_from_timing(&outputTiming, DC_LINK_ENCODING_HDMI_FRL) >  					dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps)  				return false; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c index 0050e0a06cbc..2fa4e64e2430 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c @@ -37,6 +37,7 @@  #include "clk_mgr.h"  #include "resource.h"  #include "link_enc_cfg.h" +#include "atomfirmware.h"  #define DC_LOGGER \  	link->ctx->logger @@ -100,8 +101,11 @@ void dp_set_hw_lane_settings(  {  	const struct link_hwss *link_hwss = get_link_hwss(link, link_res); +	// Don't return here if using FIXED_VS link HWSS and encoding is 128b/132b  	if ((link_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && -			!is_immediate_downstream(link, offset)) +			!is_immediate_downstream(link, offset) && +			(!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) || +			link_dp_get_encoding_format(&link_settings->link_settings) == DP_8b_10b_ENCODING))  		return;  	if (link_hwss->ext.set_dp_lane_settings) diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c index 16a62e018712..e538c67d3ed9 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c @@ -1508,10 +1508,7 @@ enum link_training_result dp_perform_link_training(  	 * Non-LT AUX transactions inside training mode.  	 */  	if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && encoding == DP_8b_10b_ENCODING) -		if (link->dc->config.use_old_fixed_vs_sequence) -			status = dp_perform_fixed_vs_pe_training_sequence_legacy(link, link_res, <_settings); -		else -			status = dp_perform_fixed_vs_pe_training_sequence(link, link_res, <_settings); +		status = dp_perform_fixed_vs_pe_training_sequence(link, link_res, <_settings);  	else if (encoding == DP_8b_10b_ENCODING)  		status = dp_perform_8b_10b_link_training(link, link_res, <_settings);  	else if (encoding == DP_128b_132b_ENCODING) diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c index 7087cdc9e977..b5cf75975fff 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c @@ -186,356 +186,6 @@ static enum link_training_result perform_fixed_vs_pe_nontransparent_training_seq  	return status;  } - -enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( -	struct dc_link *link, -	const struct link_resource *link_res, -	struct link_training_settings *lt_settings) -{ -	const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF}; -	const uint8_t offset = dp_parse_lttpr_repeater_count( -			link->dpcd_caps.lttpr_caps.phy_repeater_cnt); -	const uint8_t vendor_lttpr_write_data_intercept_en[4] = {0x1, 0x55, 0x63, 0x0}; -	const uint8_t vendor_lttpr_write_data_intercept_dis[4] = {0x1, 0x55, 0x63, 0x68}; -	uint32_t pre_disable_intercept_delay_ms = 0; -	uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; -	uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; -	const uint8_t vendor_lttpr_write_data_4lane_1[4] = {0x1, 0x6E, 0xF2, 0x19}; -	const uint8_t vendor_lttpr_write_data_4lane_2[4] = {0x1, 0x6B, 0xF2, 0x01}; -	const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18}; -	const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03}; -	const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06}; -	const uint8_t vendor_lttpr_write_data_dpmf[4] = {0x1, 0x6, 0x70, 0x87}; -	enum link_training_result status = LINK_TRAINING_SUCCESS; -	uint8_t lane = 0; -	union down_spread_ctrl downspread = {0}; -	union lane_count_set lane_count_set = {0}; -	uint8_t toggle_rate; -	uint8_t rate; - -	/* Only 8b/10b is supported */ -	ASSERT(link_dp_get_encoding_format(<_settings->link_settings) == -			DP_8b_10b_ENCODING); - -	if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { -		status = perform_fixed_vs_pe_nontransparent_training_sequence(link, link_res, lt_settings); -		return status; -	} - -	if (offset != 0xFF) { -		if (offset == 2) { -			pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa; - -		/* Certain display and cable configuration require extra delay */ -		} else if (offset > 2) { -			pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2; -		} -	} - -	/* Vendor specific: Reset lane settings */ -	link_configure_fixed_vs_pe_retimer(link->ddc, -			&vendor_lttpr_write_data_reset[0], sizeof(vendor_lttpr_write_data_reset)); -	link_configure_fixed_vs_pe_retimer(link->ddc, -			&vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); -	link_configure_fixed_vs_pe_retimer(link->ddc, -			&vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); - -	/* Vendor specific: Enable intercept */ -	link_configure_fixed_vs_pe_retimer(link->ddc, -			&vendor_lttpr_write_data_intercept_en[0], sizeof(vendor_lttpr_write_data_intercept_en)); - - -	/* 1. set link rate, lane count and spread. */ - -	downspread.raw = (uint8_t)(lt_settings->link_settings.link_spread); - -	lane_count_set.bits.LANE_COUNT_SET = -	lt_settings->link_settings.lane_count; - -	lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing; -	lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0; - - -	if (lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) { -		lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = -				link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED; -	} - -	core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, -		&downspread.raw, sizeof(downspread)); - -	core_link_write_dpcd(link, DP_LANE_COUNT_SET, -		&lane_count_set.raw, 1); - -	rate = get_dpcd_link_rate(<_settings->link_settings); - -	/* Vendor specific: Toggle link rate */ -	toggle_rate = (rate == 0x6) ? 0xA : 0x6; - -	if (link->vendor_specific_lttpr_link_rate_wa == rate || link->vendor_specific_lttpr_link_rate_wa == 0) { -		core_link_write_dpcd( -				link, -				DP_LINK_BW_SET, -				&toggle_rate, -				1); -	} - -	link->vendor_specific_lttpr_link_rate_wa = rate; - -	core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); - -	DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n", -		__func__, -		DP_LINK_BW_SET, -		lt_settings->link_settings.link_rate, -		DP_LANE_COUNT_SET, -		lt_settings->link_settings.lane_count, -		lt_settings->enhanced_framing, -		DP_DOWNSPREAD_CTRL, -		lt_settings->link_settings.link_spread); - -	link_configure_fixed_vs_pe_retimer(link->ddc, -			&vendor_lttpr_write_data_dpmf[0], -			sizeof(vendor_lttpr_write_data_dpmf)); - -	if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) { -		link_configure_fixed_vs_pe_retimer(link->ddc, -				&vendor_lttpr_write_data_4lane_1[0], sizeof(vendor_lttpr_write_data_4lane_1)); -		link_configure_fixed_vs_pe_retimer(link->ddc, -				&vendor_lttpr_write_data_4lane_2[0], sizeof(vendor_lttpr_write_data_4lane_2)); -		link_configure_fixed_vs_pe_retimer(link->ddc, -				&vendor_lttpr_write_data_4lane_3[0], sizeof(vendor_lttpr_write_data_4lane_3)); -		link_configure_fixed_vs_pe_retimer(link->ddc, -				&vendor_lttpr_write_data_4lane_4[0], sizeof(vendor_lttpr_write_data_4lane_4)); -		link_configure_fixed_vs_pe_retimer(link->ddc, -				&vendor_lttpr_write_data_4lane_5[0], sizeof(vendor_lttpr_write_data_4lane_5)); -	} - -	/* 2. Perform link training */ - -	/* Perform Clock Recovery Sequence */ -	if (status == LINK_TRAINING_SUCCESS) { -		const uint8_t max_vendor_dpcd_retries = 10; -		uint32_t retries_cr; -		uint32_t retry_count; -		uint32_t wait_time_microsec; -		enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; -		union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; -		union lane_align_status_updated dpcd_lane_status_updated; -		union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; -		uint8_t i = 0; - -		retries_cr = 0; -		retry_count = 0; - -		memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status)); -		memset(&dpcd_lane_status_updated, '\0', -		sizeof(dpcd_lane_status_updated)); - -		while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && -			(retry_count < LINK_TRAINING_MAX_CR_RETRY)) { - - -			/* 1. call HWSS to set lane settings */ -			dp_set_hw_lane_settings( -					link, -					link_res, -					lt_settings, -					0); - -			/* 2. update DPCD of the receiver */ -			if (!retry_count) { -				/* EPR #361076 - write as a 5-byte burst, -				 * but only for the 1-st iteration. -				 */ -				dpcd_set_lt_pattern_and_lane_settings( -						link, -						lt_settings, -						lt_settings->pattern_for_cr, -						0); -				/* Vendor specific: Disable intercept */ -				for (i = 0; i < max_vendor_dpcd_retries; i++) { -					if (pre_disable_intercept_delay_ms != 0) -						msleep(pre_disable_intercept_delay_ms); -					if (link_configure_fixed_vs_pe_retimer(link->ddc, -							&vendor_lttpr_write_data_intercept_dis[0], -							sizeof(vendor_lttpr_write_data_intercept_dis))) -						break; - -					link_configure_fixed_vs_pe_retimer(link->ddc, -							&vendor_lttpr_write_data_intercept_en[0], -							sizeof(vendor_lttpr_write_data_intercept_en)); -				} -			} else { -				vendor_lttpr_write_data_vs[3] = 0; -				vendor_lttpr_write_data_pe[3] = 0; - -				for (lane = 0; lane < lane_count; lane++) { -					vendor_lttpr_write_data_vs[3] |= -							lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane); -					vendor_lttpr_write_data_pe[3] |= -							lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane); -				} - -				/* Vendor specific: Update VS and PE to DPRX requested value */ -				link_configure_fixed_vs_pe_retimer(link->ddc, -						&vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); -				link_configure_fixed_vs_pe_retimer(link->ddc, -						&vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); - -				dpcd_set_lane_settings( -						link, -						lt_settings, -						0); -			} - -			/* 3. wait receiver to lock-on*/ -			wait_time_microsec = lt_settings->cr_pattern_time; - -			dp_wait_for_training_aux_rd_interval( -					link, -					wait_time_microsec); - -			/* 4. Read lane status and requested drive -			 * settings as set by the sink -			 */ -			dp_get_lane_status_and_lane_adjust( -					link, -					lt_settings, -					dpcd_lane_status, -					&dpcd_lane_status_updated, -					dpcd_lane_adjust, -					0); - -			/* 5. check CR done*/ -			if (dp_is_cr_done(lane_count, dpcd_lane_status)) { -				status = LINK_TRAINING_SUCCESS; -				break; -			} - -			/* 6. max VS reached*/ -			if (dp_is_max_vs_reached(lt_settings)) -				break; - -			/* 7. same lane settings */ -			/* Note: settings are the same for all lanes, -			 * so comparing first lane is sufficient -			 */ -			if (lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET == -					dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE) -				retries_cr++; -			else -				retries_cr = 0; - -			/* 8. update VS/PE/PC2 in lt_settings*/ -			dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, -					lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); -			retry_count++; -		} - -		if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) { -			ASSERT(0); -			DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue", -				__func__, -				LINK_TRAINING_MAX_CR_RETRY); - -		} - -		status = dp_get_cr_failure(lane_count, dpcd_lane_status); -	} - -	/* Perform Channel EQ Sequence */ -	if (status == LINK_TRAINING_SUCCESS) { -		enum dc_dp_training_pattern tr_pattern; -		uint32_t retries_ch_eq; -		uint32_t wait_time_microsec; -		enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; -		union lane_align_status_updated dpcd_lane_status_updated = {0}; -		union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; -		union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; - -		/* Note: also check that TPS4 is a supported feature*/ -		tr_pattern = lt_settings->pattern_for_eq; - -		dp_set_hw_training_pattern(link, link_res, tr_pattern, 0); - -		status = LINK_TRAINING_EQ_FAIL_EQ; - -		for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT; -			retries_ch_eq++) { - -			dp_set_hw_lane_settings(link, link_res, lt_settings, 0); - -			vendor_lttpr_write_data_vs[3] = 0; -			vendor_lttpr_write_data_pe[3] = 0; - -			for (lane = 0; lane < lane_count; lane++) { -				vendor_lttpr_write_data_vs[3] |= -						lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane); -				vendor_lttpr_write_data_pe[3] |= -						lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane); -			} - -			/* Vendor specific: Update VS and PE to DPRX requested value */ -			link_configure_fixed_vs_pe_retimer(link->ddc, -					&vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); -			link_configure_fixed_vs_pe_retimer(link->ddc, -					&vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); - -			/* 2. update DPCD*/ -			if (!retries_ch_eq) -				/* EPR #361076 - write as a 5-byte burst, -				 * but only for the 1-st iteration -				 */ - -				dpcd_set_lt_pattern_and_lane_settings( -					link, -					lt_settings, -					tr_pattern, 0); -			else -				dpcd_set_lane_settings(link, lt_settings, 0); - -			/* 3. wait for receiver to lock-on*/ -			wait_time_microsec = lt_settings->eq_pattern_time; - -			dp_wait_for_training_aux_rd_interval( -					link, -					wait_time_microsec); - -			/* 4. Read lane status and requested -			 * drive settings as set by the sink -			 */ -			dp_get_lane_status_and_lane_adjust( -				link, -				lt_settings, -				dpcd_lane_status, -				&dpcd_lane_status_updated, -				dpcd_lane_adjust, -				0); - -			/* 5. check CR done*/ -			if (!dp_is_cr_done(lane_count, dpcd_lane_status)) { -				status = LINK_TRAINING_EQ_FAIL_CR; -				break; -			} - -			/* 6. check CHEQ done*/ -			if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) && -					dp_is_symbol_locked(lane_count, dpcd_lane_status) && -					dp_is_interlane_aligned(dpcd_lane_status_updated)) { -				status = LINK_TRAINING_SUCCESS; -				break; -			} - -			/* 7. update VS/PE/PC2 in lt_settings*/ -			dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, -					lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); -		} -	} - -	return status; -} -  enum link_training_result dp_perform_fixed_vs_pe_training_sequence(  	struct dc_link *link,  	const struct link_resource *link_res, @@ -620,18 +270,20 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(  	rate = get_dpcd_link_rate(<_settings->link_settings); -	/* Vendor specific: Toggle link rate */ -	toggle_rate = (rate == 0x6) ? 0xA : 0x6; +	if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED) { +		/* Vendor specific: Toggle link rate */ +		toggle_rate = (rate == 0x6) ? 0xA : 0x6; -	if (link->vendor_specific_lttpr_link_rate_wa == rate || link->vendor_specific_lttpr_link_rate_wa == 0) { -		core_link_write_dpcd( -				link, -				DP_LINK_BW_SET, -				&toggle_rate, -				1); -	} +		if (link->vendor_specific_lttpr_link_rate_wa == rate || link->vendor_specific_lttpr_link_rate_wa == 0) { +			core_link_write_dpcd( +					link, +					DP_LINK_BW_SET, +					&toggle_rate, +					1); +		} -	link->vendor_specific_lttpr_link_rate_wa = rate; +		link->vendor_specific_lttpr_link_rate_wa = rate; +	}  	core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h index c0d6ea329504..e61970e27661 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h @@ -28,11 +28,6 @@  #define __DC_LINK_DP_FIXED_VS_PE_RETIMER_H__  #include "link_dp_training.h" -enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( -	struct dc_link *link, -	const struct link_resource *link_res, -	struct link_training_settings *lt_settings); -  enum link_training_result dp_perform_fixed_vs_pe_training_sequence(  	struct dc_link *link,  	const struct link_resource *link_res, diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c index fc50931c2aec..c5de6ed5bf58 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c @@ -164,7 +164,7 @@ static void dpcd_extend_address_range(  	if (new_addr_range.start != in_address || new_addr_range.end != end_address) {  		*out_address = new_addr_range.start;  		*out_size = ADDRESS_RANGE_SIZE(new_addr_range.start, new_addr_range.end); -		*out_data = kzalloc(*out_size * sizeof(**out_data), GFP_KERNEL); +		*out_data = kcalloc(*out_size, sizeof(**out_data), GFP_KERNEL);  	}  } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c index 046d3e205415..acfbbc638cc6 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -287,7 +287,7 @@ bool set_default_brightness_aux(struct dc_link *link)  	if (link && link->dpcd_sink_ext_caps.bits.oled == 1) {  		if (!read_default_bl_aux(link, &default_backlight))  			default_backlight = 150000; -		// if < 1 nits or > 5000, it might be wrong readback +		// if > 5000, it might be wrong readback. 0 nits is a valid default value for OLED panel.  		if (default_backlight < 1000 || default_backlight > 5000000)  			default_backlight = 150000; @@ -892,7 +892,8 @@ bool edp_set_replay_allow_active(struct dc_link *link, const bool *allow_active,  	/* Set power optimization flag */  	if (power_opts && link->replay_settings.replay_power_opt_active != *power_opts) { -		if (link->replay_settings.replay_feature_enabled && replay->funcs->replay_set_power_opt) { +		if (replay != NULL && link->replay_settings.replay_feature_enabled && +		    replay->funcs->replay_set_power_opt) {  			replay->funcs->replay_set_power_opt(replay, *power_opts, panel_inst);  			link->replay_settings.replay_power_opt_active = *power_opts;  		} diff --git a/drivers/gpu/drm/amd/display/dc/resource/Makefile b/drivers/gpu/drm/amd/display/dc/resource/Makefile index 0a75ed8962a5..184b1f23aa77 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/Makefile +++ b/drivers/gpu/drm/amd/display/dc/resource/Makefile @@ -194,6 +194,14 @@ AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN35)  ############################################################################### +RESOURCE_DCN351 = dcn351_resource.o + +AMD_DAL_RESOURCE_DCN351 = $(addprefix $(AMDDALPATH)/dc/resource/dcn351/,$(RESOURCE_DCN351)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN351) + +############################################################################### +  ###############################################################################  endif diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c index f9c5bc624be3..a2387cea1af9 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c @@ -24,8 +24,6 @@   *   */ -#include <linux/slab.h> -  #include "dm_services.h"  #include "dc.h" diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c index 37a64186f324..ecc477ef8e3b 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c @@ -2169,6 +2169,17 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params  					optimal_uclk_for_dcfclk_sta_targets[i] =  							bw_params->clk_table.entries[j].memclk_mhz * 16;  					break; +				} else { +					/* condition where (dcfclk_sta_targets[i] >= optimal_dcfclk_for_uclk[j]): +					 * If it just so happens that the memory bandwidth is low enough such that +					 * all the optimal DCFCLK for each UCLK is lower than the smallest DCFCLK STA +					 * target, we need to populate the optimal UCLK for each DCFCLK STA target to +					 * be the max UCLK. +					 */ +					if (j == num_uclk_states - 1) { +						optimal_uclk_for_dcfclk_sta_targets[i] = +								bw_params->clk_table.entries[j].memclk_mhz * 16; +					}  				}  			}  		} diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c index 31035fc3d868..04d142f97474 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c @@ -1941,8 +1941,6 @@ static bool dcn31_resource_construct(  	dc->caps.color.mpc.ogam_rom_caps.hlg = 0;  	dc->caps.color.mpc.ocsc = 1; -	dc->config.use_old_fixed_vs_sequence = true; -  	/* Use pipe context based otg sync logic */  	dc->config.use_pipe_ctx_sync_logic = true; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index 6f10052caeef..3f3951f3ba98 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -2118,6 +2118,7 @@ static bool dcn32_resource_construct(  	dc->config.use_pipe_ctx_sync_logic = true;  	dc->config.dc_mode_clk_limit_support = true; +	dc->config.enable_windowed_mpo_odm = true;  	/* read VBIOS LTTPR caps */  	{  		if (ctx->dc_bios->funcs->get_lttpr_caps) { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c index 74412e5f03fe..b356fed1726d 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c @@ -1760,6 +1760,7 @@ static bool dcn321_resource_construct(  	dc->caps.color.mpc.ocsc = 1;  	dc->config.dc_mode_clk_limit_support = true; +	dc->config.enable_windowed_mpo_odm = true;  	/* read VBIOS LTTPR caps */  	{  		if (ctx->dc_bios->funcs->get_lttpr_caps) { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c index 5fdcda8f8602..5d52853cac96 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c @@ -701,7 +701,7 @@ static const struct dc_plane_cap plane_cap = {  	// 6:1 downscaling ratio: 1000/6 = 166.666  	.max_downscale_factor = { -			.argb8888 = 167, +			.argb8888 = 250,  			.nv12 = 167,  			.fp16 = 167  	}, @@ -764,6 +764,7 @@ static const struct dc_debug_options debug_defaults_drv = {  	},  	.seamless_boot_odm_combine = DML_FAIL_SOURCE_PIXEL_FORMAT,  	.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/ +	.minimum_z8_residency_time = 2100,  	.using_dml2 = true,  	.support_eDP1_5 = true,  	.enable_hpo_pg_support = false, @@ -782,6 +783,7 @@ static const struct dc_debug_options debug_defaults_drv = {  	.psp_disabled_wa = true,  	.ips2_eval_delay_us = 2000,  	.ips2_entry_delay_us = 800, +	.disable_dmub_reallow_idle = true,  	.static_screen_wait_frames = 2,  }; @@ -1905,7 +1907,8 @@ static bool dcn35_resource_construct(  	if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)  		dc->debug = debug_defaults_drv; - +	/*HW default is to have all the FGCG enabled, SW no need to program them*/ +	dc->debug.enable_fine_grain_clock_gating.u32All = 0xFFFF;  	// Init the vm_helper  	if (dc->vm_helper)  		vm_helper_init(dc->vm_helper, 16); diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c new file mode 100644 index 000000000000..5b486400dfdb --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c @@ -0,0 +1,2156 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright 2024 Advanced Micro Devices, Inc. */ + + +#include "dm_services.h" +#include "dc.h" + +#include "dcn31/dcn31_init.h" +#include "dcn351/dcn351_init.h" + +#include "resource.h" +#include "include/irq_service_interface.h" +#include "dcn351_resource.h" + +#include "dcn20/dcn20_resource.h" +#include "dcn30/dcn30_resource.h" +#include "dcn31/dcn31_resource.h" +#include "dcn32/dcn32_resource.h" +#include "dcn35/dcn35_resource.h" + +#include "dcn10/dcn10_ipp.h" +#include "dcn30/dcn30_hubbub.h" +#include "dcn31/dcn31_hubbub.h" +#include "dcn35/dcn35_hubbub.h" +#include "dcn32/dcn32_mpc.h" +#include "dcn35/dcn35_hubp.h" +#include "irq/dcn351/irq_service_dcn351.h" +#include "dcn35/dcn35_dpp.h" +#include "dcn35/dcn35_optc.h" +#include "dcn20/dcn20_hwseq.h" +#include "dcn30/dcn30_hwseq.h" +#include "dce110/dce110_hwseq.h" +#include "dcn35/dcn35_opp.h" +#include "dcn35/dcn35_dsc.h" +#include "dcn30/dcn30_vpg.h" +#include "dcn30/dcn30_afmt.h" + +#include "dcn31/dcn31_dio_link_encoder.h" +#include "dcn35/dcn35_dio_stream_encoder.h" +#include "dcn31/dcn31_hpo_dp_stream_encoder.h" +#include "dcn31/dcn31_hpo_dp_link_encoder.h" +#include "dcn32/dcn32_hpo_dp_link_encoder.h" +#include "link.h" +#include "dcn31/dcn31_apg.h" +#include "dcn32/dcn32_dio_link_encoder.h" +#include "dcn31/dcn31_vpg.h" +#include "dcn31/dcn31_afmt.h" +#include "dce/dce_clock_source.h" +#include "dce/dce_audio.h" +#include "dce/dce_hwseq.h" +#include "clk_mgr.h" +#include "virtual/virtual_stream_encoder.h" +#include "dce110/dce110_resource.h" +#include "dml/display_mode_vba.h" +#include "dcn35/dcn35_dccg.h" +#include "dcn35/dcn35_pg_cntl.h" +#include "dcn10/dcn10_resource.h" +#include "dcn31/dcn31_panel_cntl.h" +#include "dcn35/dcn35_hwseq.h" +#include "dcn35/dcn35_dio_link_encoder.h" +#include "dml/dcn31/dcn31_fpu.h" /*todo*/ +#include "dml/dcn35/dcn35_fpu.h" +#include "dml/dcn351/dcn351_fpu.h" +#include "dcn35/dcn35_dwb.h" +#include "dcn35/dcn35_mmhubbub.h" + +#include "dcn/dcn_3_5_1_offset.h" +#include "dcn/dcn_3_5_1_sh_mask.h" +#include "nbio/nbio_7_11_0_offset.h" +#include "mmhub/mmhub_3_3_0_offset.h" +#include "mmhub/mmhub_3_3_0_sh_mask.h" + +#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT                   0x0 +#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK                     0x0000000FL + +#include "reg_helper.h" +#include "dce/dmub_abm.h" +#include "dce/dmub_psr.h" +#include "dce/dmub_replay.h" +#include "dce/dce_aux.h" +#include "dce/dce_i2c.h" +#include "dml/dcn31/display_mode_vba_31.h" /*temp*/ +#include "vm_helper.h" +#include "dcn20/dcn20_vmid.h" + +#include "dml2/dml2_wrapper.h" + +#include "link_enc_cfg.h" +#define DC_LOGGER_INIT(logger) + +enum dcn351_clk_src_array_id { +	DCN351_CLK_SRC_PLL0, +	DCN351_CLK_SRC_PLL1, +	DCN351_CLK_SRC_PLL2, +	DCN351_CLK_SRC_PLL3, +	DCN351_CLK_SRC_PLL4, +	DCN351_CLK_SRC_TOTAL +}; + +/* begin ********************* + * macros to expend register list macro defined in HW object header file + */ + +/* DCN */ +/* TODO awful hack. fixup dcn20_dwb.h */ +#undef BASE_INNER +#define BASE_INNER(seg) ctx->dcn_reg_offsets[seg] + +#define BASE(seg) BASE_INNER(seg) + +#define SR(reg_name)\ +		REG_STRUCT.reg_name = BASE(reg ## reg_name ## _BASE_IDX) +  \ +					reg ## reg_name + +#define SR_ARR(reg_name, id) \ +	REG_STRUCT[id].reg_name = BASE(reg##reg_name##_BASE_IDX) + reg##reg_name + +#define SR_ARR_INIT(reg_name, id, value) \ +	REG_STRUCT[id].reg_name = value + +#define SRI(reg_name, block, id)\ +	REG_STRUCT.reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ +					reg ## block ## id ## _ ## reg_name + +#define SRI_ARR(reg_name, block, id)\ +	REG_STRUCT[id].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ +		reg ## block ## id ## _ ## reg_name + +#define SR_ARR_I2C(reg_name, id) \ +	REG_STRUCT[id-1].reg_name = BASE(reg##reg_name##_BASE_IDX) + reg##reg_name + +#define SRI_ARR_I2C(reg_name, block, id)\ +	REG_STRUCT[id-1].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ +		reg ## block ## id ## _ ## reg_name + +#define SRI_ARR_ALPHABET(reg_name, block, index, id)\ +	REG_STRUCT[index].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ +		reg ## block ## id ## _ ## reg_name + +#define SRI2(reg_name, block, id)\ +	.reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ +					reg ## reg_name + +#define SRI2_ARR(reg_name, block, id)\ +	REG_STRUCT[id].reg_name = BASE(reg ## reg_name ## _BASE_IDX) +	\ +		reg ## reg_name + +#define SRIR(var_name, reg_name, block, id)\ +	.var_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ +					reg ## block ## id ## _ ## reg_name + +#define SRII(reg_name, block, id)\ +	REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ +					reg ## block ## id ## _ ## reg_name + +#define SRII_ARR_2(reg_name, block, id, inst)\ +	REG_STRUCT[inst].reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ +		reg ## block ## id ## _ ## reg_name + +#define SRII_MPC_RMU(reg_name, block, id)\ +	.RMU##_##reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ +					reg ## block ## id ## _ ## reg_name + +#define SRII_DWB(reg_name, temp_name, block, id)\ +	REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \ +		reg ## block ## id ## _ ## temp_name + +#define SF_DWB2(reg_name, block, id, field_name, post_fix) \ +	.field_name = reg_name ## __ ## field_name ## post_fix + +#define DCCG_SRII(reg_name, block, id)\ +	REG_STRUCT.block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ +		reg ## block ## id ## _ ## reg_name + +#define VUPDATE_SRII(reg_name, block, id)\ +	REG_STRUCT.reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ +		reg ## reg_name ## _ ## block ## id + +/* NBIO */ +#define NBIO_BASE_INNER(seg) ctx->nbio_reg_offsets[seg] + +#define NBIO_BASE(seg) \ +	NBIO_BASE_INNER(seg) + +#define NBIO_SR(reg_name)\ +	REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \ +				regBIF_BX2_ ## reg_name + +#define NBIO_SR_ARR(reg_name, id)\ +	REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \ +		regBIF_BX2_ ## reg_name + +#define bios_regs_init() \ +		( \ +		NBIO_SR(BIOS_SCRATCH_3),\ +		NBIO_SR(BIOS_SCRATCH_6)\ +		) + +static struct bios_registers bios_regs; + +#define clk_src_regs_init(index, pllid)\ +	CS_COMMON_REG_LIST_DCN3_0_RI(index, pllid) + +static struct dce110_clk_src_regs clk_src_regs[5]; + +static const struct dce110_clk_src_shift cs_shift = { +		CS_COMMON_MASK_SH_LIST_DCN3_1_4(__SHIFT) +}; + +static const struct dce110_clk_src_mask cs_mask = { +		CS_COMMON_MASK_SH_LIST_DCN3_1_4(_MASK) +}; + +#define abm_regs_init(id)\ +		ABM_DCN32_REG_LIST_RI(id) + +static struct dce_abm_registers abm_regs[4]; + +static const struct dce_abm_shift abm_shift = { +		ABM_MASK_SH_LIST_DCN35(__SHIFT) +}; + +static const struct dce_abm_mask abm_mask = { +		ABM_MASK_SH_LIST_DCN35(_MASK) +}; + +#define audio_regs_init(id)\ +		AUD_COMMON_REG_LIST_RI(id) + +static struct dce_audio_registers audio_regs[7]; + + +#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ +		SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ +		SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ +		AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) + +static const struct dce_audio_shift audio_shift = { +		DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_audio_mask audio_mask = { +		DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) +}; + +#define vpg_regs_init(id)\ +	VPG_DCN31_REG_LIST_RI(id) + +static struct dcn31_vpg_registers vpg_regs[10]; + +static const struct dcn31_vpg_shift vpg_shift = { +	DCN31_VPG_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_vpg_mask vpg_mask = { +	DCN31_VPG_MASK_SH_LIST(_MASK) +}; + +#define afmt_regs_init(id)\ +	AFMT_DCN31_REG_LIST_RI(id) + +static struct dcn31_afmt_registers afmt_regs[6]; + +static const struct dcn31_afmt_shift afmt_shift = { +	DCN31_AFMT_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_afmt_mask afmt_mask = { +	DCN31_AFMT_MASK_SH_LIST(_MASK) +}; + +#define apg_regs_init(id)\ +	APG_DCN31_REG_LIST_RI(id) + +static struct dcn31_apg_registers apg_regs[4]; + +static const struct dcn31_apg_shift apg_shift = { +	DCN31_APG_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_apg_mask apg_mask = { +	DCN31_APG_MASK_SH_LIST(_MASK) +}; + +#define stream_enc_regs_init(id)\ +	SE_DCN35_REG_LIST_RI(id) + +static struct dcn10_stream_enc_registers stream_enc_regs[5]; + +static const struct dcn10_stream_encoder_shift se_shift = { +		SE_COMMON_MASK_SH_LIST_DCN35(__SHIFT) +}; + +static const struct dcn10_stream_encoder_mask se_mask = { +		SE_COMMON_MASK_SH_LIST_DCN35(_MASK) +}; + +#define aux_regs_init(id)\ +	DCN2_AUX_REG_LIST_RI(id) + +static struct dcn10_link_enc_aux_registers link_enc_aux_regs[5]; + +#define hpd_regs_init(id)\ +	HPD_REG_LIST_RI(id) + +static struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[5]; + + +static const struct dce110_aux_registers_shift aux_shift = { +	DCN_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { +	DCN_AUX_MASK_SH_LIST(_MASK) +}; + +#define link_regs_init(id, phyid)\ +	( \ +	LE_DCN35_REG_LIST_RI(id), \ +	UNIPHY_DCN2_REG_LIST_RI(id, phyid)\ +	) + +static struct dcn10_link_enc_registers link_enc_regs[5]; + +static const struct dcn10_link_enc_shift le_shift = { +	LINK_ENCODER_MASK_SH_LIST_DCN35(__SHIFT), \ +	//DPCS_DCN31_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn10_link_enc_mask le_mask = { +	LINK_ENCODER_MASK_SH_LIST_DCN35(_MASK), \ +	//DPCS_DCN31_MASK_SH_LIST(_MASK) +}; + +#define hpo_dp_stream_encoder_reg_init(id)\ +	DCN3_1_HPO_DP_STREAM_ENC_REG_LIST_RI(id) + +static struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[4]; + +static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = { +	DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = { +	DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(_MASK) +}; + +#define hpo_dp_link_encoder_reg_init(id)\ +	DCN3_1_HPO_DP_LINK_ENC_REG_LIST_RI(id) +	/*DCN3_1_RDPCSTX_REG_LIST(0),*/ +	/*DCN3_1_RDPCSTX_REG_LIST(1),*/ +	/*DCN3_1_RDPCSTX_REG_LIST(2),*/ +	/*DCN3_1_RDPCSTX_REG_LIST(3),*/ + +static struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[2]; + +static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = { +	DCN3_1_HPO_DP_LINK_ENC_COMMON_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = { +	DCN3_1_HPO_DP_LINK_ENC_COMMON_MASK_SH_LIST(_MASK) +}; + +#define dpp_regs_init(id)\ +	DPP_REG_LIST_DCN35_RI(id) + +static struct dcn3_dpp_registers dpp_regs[4]; + +static const struct dcn35_dpp_shift tf_shift = { +		DPP_REG_LIST_SH_MASK_DCN35(__SHIFT) +}; + +static const struct dcn35_dpp_mask tf_mask = { +		DPP_REG_LIST_SH_MASK_DCN35(_MASK) +}; + +#define opp_regs_init(id)\ +	OPP_REG_LIST_DCN35_RI(id) + +static struct dcn35_opp_registers opp_regs[4]; + +static const struct dcn35_opp_shift opp_shift = { +	OPP_MASK_SH_LIST_DCN35(__SHIFT) +}; + +static const struct dcn35_opp_mask opp_mask = { +	OPP_MASK_SH_LIST_DCN35(_MASK) +}; + +#define aux_engine_regs_init(id)\ +	( \ +	AUX_COMMON_REG_LIST0_RI(id), \ +	SR_ARR_INIT(AUXN_IMPCAL, id, 0), \ +	SR_ARR_INIT(AUXP_IMPCAL, id, 0), \ +	SR_ARR_INIT(AUX_RESET_MASK, id, DP_AUX0_AUX_CONTROL__AUX_RESET_MASK) \ +	) + +static struct dce110_aux_registers aux_engine_regs[5]; + +#define dwbc_regs_dcn3_init(id)\ +	DWBC_COMMON_REG_LIST_DCN30_RI(id) + +static struct dcn30_dwbc_registers dwbc35_regs[1]; + +static const struct dcn35_dwbc_shift dwbc35_shift = { +	DWBC_COMMON_MASK_SH_LIST_DCN35(__SHIFT) +}; + +static const struct dcn35_dwbc_mask dwbc35_mask = { +	DWBC_COMMON_MASK_SH_LIST_DCN35(_MASK) +}; + +#define mcif_wb_regs_dcn3_init(id)\ +	MCIF_WB_COMMON_REG_LIST_DCN3_5_RI(id) + +static struct dcn35_mmhubbub_registers mcif_wb35_regs[1]; + +static const struct dcn35_mmhubbub_shift mcif_wb35_shift = { +	MCIF_WB_COMMON_MASK_SH_LIST_DCN3_5(__SHIFT) +}; + +static const struct dcn35_mmhubbub_mask mcif_wb35_mask = { +	MCIF_WB_COMMON_MASK_SH_LIST_DCN3_5(_MASK) +}; + +#define dsc_regsDCN35_init(id)\ +	DSC_REG_LIST_DCN20_RI(id) + +static struct dcn20_dsc_registers dsc_regs[4]; + +static const struct dcn35_dsc_shift dsc_shift = { +	DSC_REG_LIST_SH_MASK_DCN35(__SHIFT) +}; + +static const struct dcn35_dsc_mask dsc_mask = { +	DSC_REG_LIST_SH_MASK_DCN35(_MASK) +}; + +static struct dcn30_mpc_registers mpc_regs; + +#define dcn_mpc_regs_init() \ +	MPC_REG_LIST_DCN3_2_RI(0),\ +	MPC_REG_LIST_DCN3_2_RI(1),\ +	MPC_REG_LIST_DCN3_2_RI(2),\ +	MPC_REG_LIST_DCN3_2_RI(3),\ +	MPC_OUT_MUX_REG_LIST_DCN3_0_RI(0),\ +	MPC_OUT_MUX_REG_LIST_DCN3_0_RI(1),\ +	MPC_OUT_MUX_REG_LIST_DCN3_0_RI(2),\ +	MPC_OUT_MUX_REG_LIST_DCN3_0_RI(3),\ +	MPC_DWB_MUX_REG_LIST_DCN3_0_RI(0) + +static const struct dcn30_mpc_shift mpc_shift = { +	MPC_COMMON_MASK_SH_LIST_DCN32(__SHIFT) +}; + +static const struct dcn30_mpc_mask mpc_mask = { +	MPC_COMMON_MASK_SH_LIST_DCN32(_MASK) +}; + +#define optc_regs_init(id)\ +	OPTC_COMMON_REG_LIST_DCN3_5_RI(id) + +static struct dcn_optc_registers optc_regs[4]; + +static const struct dcn_optc_shift optc_shift = { +	OPTC_COMMON_MASK_SH_LIST_DCN3_5(__SHIFT) +}; + +static const struct dcn_optc_mask optc_mask = { +	OPTC_COMMON_MASK_SH_LIST_DCN3_5(_MASK) +}; + +#define hubp_regs_init(id)\ +	HUBP_REG_LIST_DCN30_RI(id) + +static struct dcn_hubp2_registers hubp_regs[4]; + + +static const struct dcn35_hubp2_shift hubp_shift = { +		HUBP_MASK_SH_LIST_DCN35(__SHIFT) +}; + +static const struct dcn35_hubp2_mask hubp_mask = { +		HUBP_MASK_SH_LIST_DCN35(_MASK) +}; + +static struct dcn_hubbub_registers hubbub_reg; + +#define hubbub_reg_init()\ +		HUBBUB_REG_LIST_DCN35(0) + +static const struct dcn_hubbub_shift hubbub_shift = { +		HUBBUB_MASK_SH_LIST_DCN35(__SHIFT) +}; + +static const struct dcn_hubbub_mask hubbub_mask = { +		HUBBUB_MASK_SH_LIST_DCN35(_MASK) +}; + +static struct dccg_registers dccg_regs; + +#define dccg_regs_init()\ +	DCCG_REG_LIST_DCN35() + +static const struct dccg_shift dccg_shift = { +		DCCG_MASK_SH_LIST_DCN35(__SHIFT) +}; + +static const struct dccg_mask dccg_mask = { +		DCCG_MASK_SH_LIST_DCN35(_MASK) +}; + +static struct pg_cntl_registers pg_cntl_regs; + +#define pg_cntl_dcn35_regs_init() \ +	PG_CNTL_REG_LIST_DCN35() + +static const struct pg_cntl_shift pg_cntl_shift = { +		PG_CNTL_MASK_SH_LIST_DCN35(__SHIFT) +}; + +static const struct pg_cntl_mask pg_cntl_mask = { +		PG_CNTL_MASK_SH_LIST_DCN35(_MASK) +}; + +#define SRII2(reg_name_pre, reg_name_post, id)\ +	.reg_name_pre ## _ ##  reg_name_post[id] = BASE(reg ## reg_name_pre \ +			## id ## _ ## reg_name_post ## _BASE_IDX) + \ +			reg ## reg_name_pre ## id ## _ ## reg_name_post + +static struct dce_hwseq_registers hwseq_reg; + +#define hwseq_reg_init()\ +	HWSEQ_DCN35_REG_LIST() + +#define HWSEQ_DCN35_MASK_SH_LIST(mask_sh)\ +	HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ +	HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ +	HWS_SF(, DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, mask_sh), \ +	HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ +	HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ +	HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ +	HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ +	HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ +	HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ +	HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ +	HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ +	HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ +	HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ +	HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ +	HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ +	HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ +	HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ +	HWS_SF(, DOMAIN19_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ +	HWS_SF(, DOMAIN19_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ +	HWS_SF(, DOMAIN22_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ +	HWS_SF(, DOMAIN22_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ +	HWS_SF(, DOMAIN23_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ +	HWS_SF(, DOMAIN23_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ +	HWS_SF(, DOMAIN24_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ +	HWS_SF(, DOMAIN24_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ +	HWS_SF(, DOMAIN25_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ +	HWS_SF(, DOMAIN25_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ +	HWS_SF(, DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ +	HWS_SF(, DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ +	HWS_SF(, DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ +	HWS_SF(, DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ +	HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ +	HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ +	HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ +	HWS_SF(, DOMAIN19_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ +	HWS_SF(, DOMAIN22_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ +	HWS_SF(, DOMAIN23_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ +	HWS_SF(, DOMAIN24_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ +	HWS_SF(, DOMAIN25_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ +	HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ +	HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \ +	HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_G_GATE_DIS, mask_sh), \ +	HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \ +	HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \ +	HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh), \ +	HWS_SF(, HPO_TOP_HW_CONTROL, HPO_IO_EN, mask_sh),\ +	HWS_SF(, DMU_CLK_CNTL, DISPCLK_R_DMU_GATE_DIS, mask_sh),\ +	HWS_SF(, DMU_CLK_CNTL, DISPCLK_G_RBBMIF_GATE_DIS, mask_sh),\ +	HWS_SF(, DMU_CLK_CNTL, RBBMIF_FGCG_REP_DIS, mask_sh),\ +	HWS_SF(, DMU_CLK_CNTL, DPREFCLK_ALLOW_DS_CLKSTOP, mask_sh),\ +	HWS_SF(, DMU_CLK_CNTL, DISPCLK_ALLOW_DS_CLKSTOP, mask_sh),\ +	HWS_SF(, DMU_CLK_CNTL, DPPCLK_ALLOW_DS_CLKSTOP, mask_sh),\ +	HWS_SF(, DMU_CLK_CNTL, DTBCLK_ALLOW_DS_CLKSTOP, mask_sh),\ +	HWS_SF(, DMU_CLK_CNTL, DCFCLK_ALLOW_DS_CLKSTOP, mask_sh),\ +	HWS_SF(, DMU_CLK_CNTL, DPIACLK_ALLOW_DS_CLKSTOP, mask_sh),\ +	HWS_SF(, DMU_CLK_CNTL, LONO_FGCG_REP_DIS, mask_sh),\ +	HWS_SF(, DMU_CLK_CNTL, LONO_DISPCLK_GATE_DISABLE, mask_sh),\ +	HWS_SF(, DMU_CLK_CNTL, LONO_SOCCLK_GATE_DISABLE, mask_sh),\ +	HWS_SF(, DMU_CLK_CNTL, LONO_DMCUBCLK_GATE_DISABLE, mask_sh),\ +	HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKA_FE_GATE_DISABLE, mask_sh), \ +	HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKB_FE_GATE_DISABLE, mask_sh), \ +	HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKC_FE_GATE_DISABLE, mask_sh), \ +	HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKD_FE_GATE_DISABLE, mask_sh), \ +	HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKE_FE_GATE_DISABLE, mask_sh), \ +	HWS_SF(, DCCG_GATE_DISABLE_CNTL2, HDMICHARCLK0_GATE_DISABLE, mask_sh), \ +	HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKA_GATE_DISABLE, mask_sh), \ +	HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKB_GATE_DISABLE, mask_sh), \ +	HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKC_GATE_DISABLE, mask_sh), \ +	HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKD_GATE_DISABLE, mask_sh), \ +	HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKE_GATE_DISABLE, mask_sh), \ +	HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_ROOT_GATE_DISABLE, mask_sh), \ +	HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYBSYMCLK_ROOT_GATE_DISABLE, mask_sh), \ +	HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYCSYMCLK_ROOT_GATE_DISABLE, mask_sh), \ +	HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYDSYMCLK_ROOT_GATE_DISABLE, mask_sh), \ +	HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_ROOT_GATE_DISABLE, mask_sh),\ +	HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, mask_sh),\ +	HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, mask_sh),\ +	HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, mask_sh),\ +	HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, mask_sh),\ +	HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_GATE_DISABLE, mask_sh),\ +	HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_GATE_DISABLE, mask_sh),\ +	HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_GATE_DISABLE, mask_sh),\ +	HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_GATE_DISABLE, mask_sh),\ +	HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK0_GATE_DISABLE, mask_sh),\ +	HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK1_GATE_DISABLE, mask_sh),\ +	HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK2_GATE_DISABLE, mask_sh),\ +	HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK3_GATE_DISABLE, mask_sh) + +static const struct dce_hwseq_shift hwseq_shift = { +		HWSEQ_DCN35_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_hwseq_mask hwseq_mask = { +		HWSEQ_DCN35_MASK_SH_LIST(_MASK) +}; + +#define vmid_regs_init(id)\ +		DCN20_VMID_REG_LIST_RI(id) + +static struct dcn_vmid_registers vmid_regs[16]; + +static const struct dcn20_vmid_shift vmid_shifts = { +		DCN20_VMID_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn20_vmid_mask vmid_masks = { +		DCN20_VMID_MASK_SH_LIST(_MASK) +}; + +static const struct resource_caps res_cap_dcn351 = { +	.num_timing_generator = 4, +	.num_opp = 4, +	.num_video_plane = 4, +	.num_audio = 5, +	.num_stream_encoder = 5, +	.num_dig_link_enc = 5, +	.num_hpo_dp_stream_encoder = 4, +	.num_hpo_dp_link_encoder = 2, +	.num_pll = 4,/*1 c10 edp, 3xc20 combo PHY*/ +	.num_dwb = 1, +	.num_ddc = 5, +	.num_vmid = 16, +	.num_mpc_3dlut = 2, +	.num_dsc = 4, +}; + +static const struct dc_plane_cap plane_cap = { +	.type = DC_PLANE_TYPE_DCN_UNIVERSAL, +	.per_pixel_alpha = true, + +	.pixel_format_support = { +			.argb8888 = true, +			.nv12 = true, +			.fp16 = true, +			.p010 = true, +			.ayuv = false, +	}, + +	.max_upscale_factor = { +			.argb8888 = 16000, +			.nv12 = 16000, +			.fp16 = 16000 +	}, + +	// 6:1 downscaling ratio: 1000/6 = 166.666 +	.max_downscale_factor = { +			.argb8888 = 250, +			.nv12 = 167, +			.fp16 = 167 +	}, +	64, +	64 +}; + +static const struct dc_debug_options debug_defaults_drv = { +	.disable_dmcu = true, +	.force_abm_enable = false, +	.timing_trace = false, +	.clock_trace = true, +	.disable_pplib_clock_request = false, +	.pipe_split_policy = MPC_SPLIT_AVOID, +	.force_single_disp_pipe_split = false, +	.disable_dcc = DCC_ENABLE, +	.disable_dpp_power_gate = true, +	.disable_hubp_power_gate = true, +	.disable_clock_gate = false, +	.disable_dsc_power_gate = true, +	.vsr_support = true, +	.performance_trace = false, +	.max_downscale_src_width = 4096,/*upto true 4k*/ +	.disable_pplib_wm_range = false, +	.scl_reset_length10 = true, +	.sanity_checks = false, +	.underflow_assert_delay_us = 0xFFFFFFFF, +	.dwb_fi_phase = -1, // -1 = disable, +	.dmub_command_table = true, +	.pstate_enabled = true, +	.use_max_lb = true, +	.enable_mem_low_power = { +		.bits = { +			.vga = false, +			.i2c = true, +			.dmcu = false, // This is previously known to cause hang on S3 cycles if enabled +			.dscl = true, +			.cm = true, +			.mpc = true, +			.optc = true, +			.vpg = true, +			.afmt = true, +		} +	}, +	.root_clock_optimization = { +		.bits = { +			.dpp = true, +			.dsc = true,/*dscclk and dsc pg*/ +			.hdmistream = true, +			.hdmichar = true, +			.dpstream = true, +			.symclk32_se = true, +			.symclk32_le = true, +			.symclk_fe = true, +			.physymclk = true, +			.dpiasymclk = true, +		} +	}, +	.seamless_boot_odm_combine = DML_FAIL_SOURCE_PIXEL_FORMAT, +	.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/ +	.using_dml2 = true, +	.support_eDP1_5 = true, +	.enable_hpo_pg_support = false, +	.enable_legacy_fast_update = true, +	.enable_single_display_2to1_odm_policy = true, +	.disable_idle_power_optimizations = true, +	.dmcub_emulation = false, +	.disable_boot_optimizations = false, +	.disable_unbounded_requesting = false, +	.disable_mem_low_power = false, +	//must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions +	.enable_double_buffered_dsc_pg_support = true, +	.enable_dp_dig_pixel_rate_div_policy = 1, +	.disable_z10 = true, +	.ignore_pg = true, +	.psp_disabled_wa = true, +	.ips2_eval_delay_us = 200, +	.ips2_entry_delay_us = 400 +}; + +static const struct dc_panel_config panel_config_defaults = { +	.psr = { +		.disable_psr = false, +		.disallow_psrsu = false, +		.disallow_replay = false, +	}, +	.ilr = { +		.optimize_edp_link_rate = true, +	}, +}; + +static void dcn35_dpp_destroy(struct dpp **dpp) +{ +	kfree(TO_DCN20_DPP(*dpp)); +	*dpp = NULL; +} + +static struct dpp *dcn35_dpp_create(struct dc_context *ctx, uint32_t inst) +{ +	struct dcn3_dpp *dpp = kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); +	bool success = (dpp != NULL); + +	if (!success) +		return NULL; + +#undef REG_STRUCT +#define REG_STRUCT dpp_regs +	dpp_regs_init(0), +	dpp_regs_init(1), +	dpp_regs_init(2), +	dpp_regs_init(3); + +	success = dpp35_construct(dpp, ctx, inst, &dpp_regs[inst], &tf_shift, +				  &tf_mask); +	if (success) { +		dpp35_set_fgcg( +			dpp, +			ctx->dc->debug.enable_fine_grain_clock_gating.bits.dpp); +		return &dpp->base; +	} + +	BREAK_TO_DEBUGGER(); +	kfree(dpp); +	return NULL; +} + +static struct output_pixel_processor *dcn35_opp_create( +	struct dc_context *ctx, uint32_t inst) +{ +	struct dcn20_opp *opp = +		kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); + +	if (!opp) { +		BREAK_TO_DEBUGGER(); +		return NULL; +	} + +#undef REG_STRUCT +#define REG_STRUCT opp_regs +	opp_regs_init(0), +	opp_regs_init(1), +	opp_regs_init(2), +	opp_regs_init(3); + +	dcn35_opp_construct(opp, ctx, inst, +			&opp_regs[inst], &opp_shift, &opp_mask); + +	dcn35_opp_set_fgcg(opp, ctx->dc->debug.enable_fine_grain_clock_gating.bits.opp); + +	return &opp->base; +} + +static struct dce_aux *dcn31_aux_engine_create( +	struct dc_context *ctx, +	uint32_t inst) +{ +	struct aux_engine_dce110 *aux_engine = +		kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); + +	if (!aux_engine) +		return NULL; + +#undef REG_STRUCT +#define REG_STRUCT aux_engine_regs +	aux_engine_regs_init(0), +	aux_engine_regs_init(1), +	aux_engine_regs_init(2), +	aux_engine_regs_init(3), +	aux_engine_regs_init(4); + +	dce110_aux_engine_construct(aux_engine, ctx, inst, +				    SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, +				    &aux_engine_regs[inst], +					&aux_mask, +					&aux_shift, +					ctx->dc->caps.extended_aux_timeout_support); + +	return &aux_engine->base; +} + +#define i2c_inst_regs_init(id)\ +	I2C_HW_ENGINE_COMMON_REG_LIST_DCN30_RI(id) + +static struct dce_i2c_registers i2c_hw_regs[5]; + +static const struct dce_i2c_shift i2c_shifts = { +		I2C_COMMON_MASK_SH_LIST_DCN35(__SHIFT) +}; + +static const struct dce_i2c_mask i2c_masks = { +		I2C_COMMON_MASK_SH_LIST_DCN35(_MASK) +}; + +/* ========================================================== */ + +/* + * DPIA index | Preferred Encoder     |    Host Router + *   0        |      C                |       0 + *   1        |      First Available  |       0 + *   2        |      D                |       1 + *   3        |      First Available  |       1 + */ +/* ========================================================== */ +static const enum engine_id dpia_to_preferred_enc_id_table[] = { +		ENGINE_ID_DIGC, +		ENGINE_ID_DIGC, +		ENGINE_ID_DIGD, +		ENGINE_ID_DIGD +}; + +static enum engine_id dcn351_get_preferred_eng_id_dpia(unsigned int dpia_index) +{ +	return dpia_to_preferred_enc_id_table[dpia_index]; +} + +static struct dce_i2c_hw *dcn31_i2c_hw_create( +	struct dc_context *ctx, +	uint32_t inst) +{ +	struct dce_i2c_hw *dce_i2c_hw = +		kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); + +	if (!dce_i2c_hw) +		return NULL; + +#undef REG_STRUCT +#define REG_STRUCT i2c_hw_regs +	i2c_inst_regs_init(1), +	i2c_inst_regs_init(2), +	i2c_inst_regs_init(3), +	i2c_inst_regs_init(4), +	i2c_inst_regs_init(5); + +	dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, +				    &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); + +	return dce_i2c_hw; +} +static struct mpc *dcn35_mpc_create( +		struct dc_context *ctx, +		int num_mpcc, +		int num_rmu) +{ +	struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), GFP_KERNEL); + +	if (!mpc30) +		return NULL; + +#undef REG_STRUCT +#define REG_STRUCT mpc_regs +	dcn_mpc_regs_init(); + +	dcn32_mpc_construct(mpc30, ctx, +			&mpc_regs, +			&mpc_shift, +			&mpc_mask, +			num_mpcc, +			num_rmu); + +	return &mpc30->base; +} + +static struct hubbub *dcn35_hubbub_create(struct dc_context *ctx) +{ +	int i; + +	struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub), +					  GFP_KERNEL); + +	if (!hubbub3) +		return NULL; + +#undef REG_STRUCT +#define REG_STRUCT hubbub_reg +	hubbub_reg_init(); + +#undef REG_STRUCT +#define REG_STRUCT vmid_regs +	vmid_regs_init(0), +	vmid_regs_init(1), +	vmid_regs_init(2), +	vmid_regs_init(3), +	vmid_regs_init(4), +	vmid_regs_init(5), +	vmid_regs_init(6), +	vmid_regs_init(7), +	vmid_regs_init(8), +	vmid_regs_init(9), +	vmid_regs_init(10), +	vmid_regs_init(11), +	vmid_regs_init(12), +	vmid_regs_init(13), +	vmid_regs_init(14), +	vmid_regs_init(15); + +	hubbub35_construct(hubbub3, ctx, +			&hubbub_reg, +			&hubbub_shift, +			&hubbub_mask, +			384,/*ctx->dc->dml.ip.det_buffer_size_kbytes,*/ +			8, /*ctx->dc->dml.ip.pixel_chunk_size_kbytes,*/ +			1792 /*ctx->dc->dml.ip.config_return_buffer_size_in_kbytes*/); + + +	for (i = 0; i < res_cap_dcn351.num_vmid; i++) { +		struct dcn20_vmid *vmid = &hubbub3->vmid[i]; + +		vmid->ctx = ctx; + +		vmid->regs = &vmid_regs[i]; +		vmid->shifts = &vmid_shifts; +		vmid->masks = &vmid_masks; +	} + +	return &hubbub3->base; +} + +static struct timing_generator *dcn35_timing_generator_create( +		struct dc_context *ctx, +		uint32_t instance) +{ +	struct optc *tgn10 = +		kzalloc(sizeof(struct optc), GFP_KERNEL); + +	if (!tgn10) +		return NULL; + +#undef REG_STRUCT +#define REG_STRUCT optc_regs +	optc_regs_init(0), +	optc_regs_init(1), +	optc_regs_init(2), +	optc_regs_init(3); + +	tgn10->base.inst = instance; +	tgn10->base.ctx = ctx; + +	tgn10->tg_regs = &optc_regs[instance]; +	tgn10->tg_shift = &optc_shift; +	tgn10->tg_mask = &optc_mask; + +	dcn35_timing_generator_init(tgn10); + +	return &tgn10->base; +} + +static const struct encoder_feature_support link_enc_feature = { +		.max_hdmi_deep_color = COLOR_DEPTH_121212, +		.max_hdmi_pixel_clock = 600000, +		.hdmi_ycbcr420_supported = true, +		.dp_ycbcr420_supported = true, +		.fec_supported = true, +		.flags.bits.IS_HBR2_CAPABLE = true, +		.flags.bits.IS_HBR3_CAPABLE = true, +		.flags.bits.IS_TPS3_CAPABLE = true, +		.flags.bits.IS_TPS4_CAPABLE = true +}; + +static struct link_encoder *dcn35_link_encoder_create( +	struct dc_context *ctx, +	const struct encoder_init_data *enc_init_data) +{ +	struct dcn20_link_encoder *enc20 = +		kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); + +	if (!enc20) +		return NULL; + +#undef REG_STRUCT +#define REG_STRUCT link_enc_aux_regs +	aux_regs_init(0), +	aux_regs_init(1), +	aux_regs_init(2), +	aux_regs_init(3), +	aux_regs_init(4); + +#undef REG_STRUCT +#define REG_STRUCT link_enc_hpd_regs +	hpd_regs_init(0), +	hpd_regs_init(1), +	hpd_regs_init(2), +	hpd_regs_init(3), +	hpd_regs_init(4); + +#undef REG_STRUCT +#define REG_STRUCT link_enc_regs +	link_regs_init(0, A), +	link_regs_init(1, B), +	link_regs_init(2, C), +	link_regs_init(3, D), +	link_regs_init(4, E); + +	dcn35_link_encoder_construct(enc20, +			enc_init_data, +			&link_enc_feature, +			&link_enc_regs[enc_init_data->transmitter], +			&link_enc_aux_regs[enc_init_data->channel - 1], +			&link_enc_hpd_regs[enc_init_data->hpd_source], +			&le_shift, +			&le_mask); + +	return &enc20->enc10.base; +} + +/* Create a minimal link encoder object not associated with a particular + * physical connector. + * resource_funcs.link_enc_create_minimal + */ +static struct link_encoder *dcn31_link_enc_create_minimal( +		struct dc_context *ctx, enum engine_id eng_id) +{ +	struct dcn20_link_encoder *enc20; + +	if ((eng_id - ENGINE_ID_DIGA) > ctx->dc->res_pool->res_cap->num_dig_link_enc) +		return NULL; + +	enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); +	if (!enc20) +		return NULL; + +	dcn31_link_encoder_construct_minimal( +			enc20, +			ctx, +			&link_enc_feature, +			&link_enc_regs[eng_id - ENGINE_ID_DIGA], +			eng_id); + +	return &enc20->enc10.base; +} + +static struct panel_cntl *dcn31_panel_cntl_create(const struct panel_cntl_init_data *init_data) +{ +	struct dcn31_panel_cntl *panel_cntl = +		kzalloc(sizeof(struct dcn31_panel_cntl), GFP_KERNEL); + +	if (!panel_cntl) +		return NULL; + +	dcn31_panel_cntl_construct(panel_cntl, init_data); + +	return &panel_cntl->base; +} + +static void read_dce_straps( +	struct dc_context *ctx, +	struct resource_straps *straps) +{ +	generic_reg_get(ctx, regDC_PINSTRAPS + BASE(regDC_PINSTRAPS_BASE_IDX), +		FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); + +} + +static struct audio *dcn31_create_audio( +		struct dc_context *ctx, unsigned int inst) +{ + +#undef REG_STRUCT +#define REG_STRUCT audio_regs +	audio_regs_init(0), +	audio_regs_init(1), +	audio_regs_init(2), +	audio_regs_init(3), +	audio_regs_init(4); +	audio_regs_init(5); +	audio_regs_init(6); + +	return dce_audio_create(ctx, inst, +			&audio_regs[inst], &audio_shift, &audio_mask); +} + +static struct vpg *dcn31_vpg_create( +	struct dc_context *ctx, +	uint32_t inst) +{ +	struct dcn31_vpg *vpg31 = kzalloc(sizeof(struct dcn31_vpg), GFP_KERNEL); + +	if (!vpg31) +		return NULL; + +#undef REG_STRUCT +#define REG_STRUCT vpg_regs +	vpg_regs_init(0), +	vpg_regs_init(1), +	vpg_regs_init(2), +	vpg_regs_init(3), +	vpg_regs_init(4), +	vpg_regs_init(5), +	vpg_regs_init(6), +	vpg_regs_init(7), +	vpg_regs_init(8), +	vpg_regs_init(9); + +	vpg31_construct(vpg31, ctx, inst, +			&vpg_regs[inst], +			&vpg_shift, +			&vpg_mask); + +	return &vpg31->base; +} + +static struct afmt *dcn31_afmt_create( +	struct dc_context *ctx, +	uint32_t inst) +{ +	struct dcn31_afmt *afmt31 = kzalloc(sizeof(struct dcn31_afmt), GFP_KERNEL); + +	if (!afmt31) +		return NULL; + +#undef REG_STRUCT +#define REG_STRUCT afmt_regs +	afmt_regs_init(0), +	afmt_regs_init(1), +	afmt_regs_init(2), +	afmt_regs_init(3), +	afmt_regs_init(4), +	afmt_regs_init(5); + +	afmt31_construct(afmt31, ctx, inst, +			&afmt_regs[inst], +			&afmt_shift, +			&afmt_mask); + +	// Light sleep by default, no need to power down here + +	return &afmt31->base; +} + +static struct apg *dcn31_apg_create( +	struct dc_context *ctx, +	uint32_t inst) +{ +	struct dcn31_apg *apg31 = kzalloc(sizeof(struct dcn31_apg), GFP_KERNEL); + +	if (!apg31) +		return NULL; + +#undef REG_STRUCT +#define REG_STRUCT apg_regs +	apg_regs_init(0), +	apg_regs_init(1), +	apg_regs_init(2), +	apg_regs_init(3); + +	apg31_construct(apg31, ctx, inst, +			&apg_regs[inst], +			&apg_shift, +			&apg_mask); + +	return &apg31->base; +} + +static struct stream_encoder *dcn35_stream_encoder_create( +	enum engine_id eng_id, +	struct dc_context *ctx) +{ +	struct dcn10_stream_encoder *enc1; +	struct vpg *vpg; +	struct afmt *afmt; +	int vpg_inst; +	int afmt_inst; + +	/* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ +	if (eng_id <= ENGINE_ID_DIGF) { +		vpg_inst = eng_id; +		afmt_inst = eng_id; +	} else +		return NULL; + +	enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); +	vpg = dcn31_vpg_create(ctx, vpg_inst); +	afmt = dcn31_afmt_create(ctx, afmt_inst); + +	if (!enc1 || !vpg || !afmt) { +		kfree(enc1); +		kfree(vpg); +		kfree(afmt); +		return NULL; +	} + +#undef REG_STRUCT +#define REG_STRUCT stream_enc_regs +	stream_enc_regs_init(0), +	stream_enc_regs_init(1), +	stream_enc_regs_init(2), +	stream_enc_regs_init(3), +	stream_enc_regs_init(4); + +	dcn35_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, +					eng_id, vpg, afmt, +					&stream_enc_regs[eng_id], +					&se_shift, &se_mask); + +	return &enc1->base; +} + +static struct hpo_dp_stream_encoder *dcn31_hpo_dp_stream_encoder_create( +	enum engine_id eng_id, +	struct dc_context *ctx) +{ +	struct dcn31_hpo_dp_stream_encoder *hpo_dp_enc31; +	struct vpg *vpg; +	struct apg *apg; +	uint32_t hpo_dp_inst; +	uint32_t vpg_inst; +	uint32_t apg_inst; + +	ASSERT((eng_id >= ENGINE_ID_HPO_DP_0) && (eng_id <= ENGINE_ID_HPO_DP_3)); +	hpo_dp_inst = eng_id - ENGINE_ID_HPO_DP_0; + +	/* Mapping of VPG register blocks to HPO DP block instance: +	 * VPG[6] -> HPO_DP[0] +	 * VPG[7] -> HPO_DP[1] +	 * VPG[8] -> HPO_DP[2] +	 * VPG[9] -> HPO_DP[3] +	 */ +	vpg_inst = hpo_dp_inst + 6; + +	/* Mapping of APG register blocks to HPO DP block instance: +	 * APG[0] -> HPO_DP[0] +	 * APG[1] -> HPO_DP[1] +	 * APG[2] -> HPO_DP[2] +	 * APG[3] -> HPO_DP[3] +	 */ +	apg_inst = hpo_dp_inst; + +	/* allocate HPO stream encoder and create VPG sub-block */ +	hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_stream_encoder), GFP_KERNEL); +	vpg = dcn31_vpg_create(ctx, vpg_inst); +	apg = dcn31_apg_create(ctx, apg_inst); + +	if (!hpo_dp_enc31 || !vpg || !apg) { +		kfree(hpo_dp_enc31); +		kfree(vpg); +		kfree(apg); +		return NULL; +	} + +#undef REG_STRUCT +#define REG_STRUCT hpo_dp_stream_enc_regs +	hpo_dp_stream_encoder_reg_init(0), +	hpo_dp_stream_encoder_reg_init(1), +	hpo_dp_stream_encoder_reg_init(2), +	hpo_dp_stream_encoder_reg_init(3); + +	dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios, +					hpo_dp_inst, eng_id, vpg, apg, +					&hpo_dp_stream_enc_regs[hpo_dp_inst], +					&hpo_dp_se_shift, &hpo_dp_se_mask); + +	return &hpo_dp_enc31->base; +} + +static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create( +	uint8_t inst, +	struct dc_context *ctx) +{ +	struct dcn31_hpo_dp_link_encoder *hpo_dp_enc31; + +	/* allocate HPO link encoder */ +	hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); + +#undef REG_STRUCT +#define REG_STRUCT hpo_dp_link_enc_regs +	hpo_dp_link_encoder_reg_init(0), +	hpo_dp_link_encoder_reg_init(1); + +	hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst, +					&hpo_dp_link_enc_regs[inst], +					&hpo_dp_le_shift, &hpo_dp_le_mask); + +	return &hpo_dp_enc31->base; +} + +static struct dce_hwseq *dcn351_hwseq_create( +	struct dc_context *ctx) +{ +	struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); + +#undef REG_STRUCT +#define REG_STRUCT hwseq_reg +	hwseq_reg_init(); + +	if (hws) { +		hws->ctx = ctx; +		hws->regs = &hwseq_reg; +		hws->shifts = &hwseq_shift; +		hws->masks = &hwseq_mask; +	} +	return hws; +} +static const struct resource_create_funcs res_create_funcs = { +	.read_dce_straps = read_dce_straps, +	.create_audio = dcn31_create_audio, +	.create_stream_encoder = dcn35_stream_encoder_create, +	.create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create, +	.create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create, +	.create_hwseq = dcn351_hwseq_create, +}; + +static void dcn351_resource_destruct(struct dcn351_resource_pool *pool) +{ +	unsigned int i; + +	for (i = 0; i < pool->base.stream_enc_count; i++) { +		if (pool->base.stream_enc[i] != NULL) { +			if (pool->base.stream_enc[i]->vpg != NULL) { +				kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg)); +				pool->base.stream_enc[i]->vpg = NULL; +			} +			if (pool->base.stream_enc[i]->afmt != NULL) { +				kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt)); +				pool->base.stream_enc[i]->afmt = NULL; +			} +			kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); +			pool->base.stream_enc[i] = NULL; +		} +	} + +	for (i = 0; i < pool->base.hpo_dp_stream_enc_count; i++) { +		if (pool->base.hpo_dp_stream_enc[i] != NULL) { +			if (pool->base.hpo_dp_stream_enc[i]->vpg != NULL) { +				kfree(DCN30_VPG_FROM_VPG(pool->base.hpo_dp_stream_enc[i]->vpg)); +				pool->base.hpo_dp_stream_enc[i]->vpg = NULL; +			} +			if (pool->base.hpo_dp_stream_enc[i]->apg != NULL) { +				kfree(DCN31_APG_FROM_APG(pool->base.hpo_dp_stream_enc[i]->apg)); +				pool->base.hpo_dp_stream_enc[i]->apg = NULL; +			} +			kfree(DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(pool->base.hpo_dp_stream_enc[i])); +			pool->base.hpo_dp_stream_enc[i] = NULL; +		} +	} + +	for (i = 0; i < pool->base.hpo_dp_link_enc_count; i++) { +		if (pool->base.hpo_dp_link_enc[i] != NULL) { +			kfree(DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(pool->base.hpo_dp_link_enc[i])); +			pool->base.hpo_dp_link_enc[i] = NULL; +		} +	} + +	for (i = 0; i < pool->base.res_cap->num_dsc; i++) { +		if (pool->base.dscs[i] != NULL) +			dcn20_dsc_destroy(&pool->base.dscs[i]); +	} + +	if (pool->base.mpc != NULL) { +		kfree(TO_DCN20_MPC(pool->base.mpc)); +		pool->base.mpc = NULL; +	} +	if (pool->base.hubbub != NULL) { +		kfree(pool->base.hubbub); +		pool->base.hubbub = NULL; +	} +	for (i = 0; i < pool->base.pipe_count; i++) { +		if (pool->base.dpps[i] != NULL) +			dcn35_dpp_destroy(&pool->base.dpps[i]); + +		if (pool->base.ipps[i] != NULL) +			pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); + +		if (pool->base.hubps[i] != NULL) { +			kfree(TO_DCN20_HUBP(pool->base.hubps[i])); +			pool->base.hubps[i] = NULL; +		} + +		if (pool->base.irqs != NULL) { +			dal_irq_service_destroy(&pool->base.irqs); +		} +	} + +	for (i = 0; i < pool->base.res_cap->num_ddc; i++) { +		if (pool->base.engines[i] != NULL) +			dce110_engine_destroy(&pool->base.engines[i]); +		if (pool->base.hw_i2cs[i] != NULL) { +			kfree(pool->base.hw_i2cs[i]); +			pool->base.hw_i2cs[i] = NULL; +		} +		if (pool->base.sw_i2cs[i] != NULL) { +			kfree(pool->base.sw_i2cs[i]); +			pool->base.sw_i2cs[i] = NULL; +		} +	} + +	for (i = 0; i < pool->base.res_cap->num_opp; i++) { +		if (pool->base.opps[i] != NULL) +			pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); +	} + +	for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { +		if (pool->base.timing_generators[i] != NULL)	{ +			kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); +			pool->base.timing_generators[i] = NULL; +		} +	} + +	for (i = 0; i < pool->base.res_cap->num_dwb; i++) { +		if (pool->base.dwbc[i] != NULL) { +			kfree(TO_DCN30_DWBC(pool->base.dwbc[i])); +			pool->base.dwbc[i] = NULL; +		} +		if (pool->base.mcif_wb[i] != NULL) { +			kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i])); +			pool->base.mcif_wb[i] = NULL; +		} +	} + +	for (i = 0; i < pool->base.audio_count; i++) { +		if (pool->base.audios[i]) +			dce_aud_destroy(&pool->base.audios[i]); +	} + +	for (i = 0; i < pool->base.clk_src_count; i++) { +		if (pool->base.clock_sources[i] != NULL) { +			dcn20_clock_source_destroy(&pool->base.clock_sources[i]); +			pool->base.clock_sources[i] = NULL; +		} +	} + +	for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) { +		if (pool->base.mpc_lut[i] != NULL) { +			dc_3dlut_func_release(pool->base.mpc_lut[i]); +			pool->base.mpc_lut[i] = NULL; +		} +		if (pool->base.mpc_shaper[i] != NULL) { +			dc_transfer_func_release(pool->base.mpc_shaper[i]); +			pool->base.mpc_shaper[i] = NULL; +		} +	} + +	if (pool->base.dp_clock_source != NULL) { +		dcn20_clock_source_destroy(&pool->base.dp_clock_source); +		pool->base.dp_clock_source = NULL; +	} + +	for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { +		if (pool->base.multiple_abms[i] != NULL) +			dce_abm_destroy(&pool->base.multiple_abms[i]); +	} + +	if (pool->base.psr != NULL) +		dmub_psr_destroy(&pool->base.psr); + +	if (pool->base.replay != NULL) +		dmub_replay_destroy(&pool->base.replay); + +	if (pool->base.pg_cntl != NULL) +		dcn_pg_cntl_destroy(&pool->base.pg_cntl); + +	if (pool->base.dccg != NULL) +		dcn_dccg_destroy(&pool->base.dccg); +} + +static struct hubp *dcn35_hubp_create( +	struct dc_context *ctx, +	uint32_t inst) +{ +	struct dcn20_hubp *hubp2 = +		kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); + +	if (!hubp2) +		return NULL; + +#undef REG_STRUCT +#define REG_STRUCT hubp_regs +	hubp_regs_init(0), +	hubp_regs_init(1), +	hubp_regs_init(2), +	hubp_regs_init(3); + +	if (hubp35_construct(hubp2, ctx, inst, +			&hubp_regs[inst], &hubp_shift, &hubp_mask)) +		return &hubp2->base; + +	BREAK_TO_DEBUGGER(); +	kfree(hubp2); +	return NULL; +} + +static void dcn35_dwbc_init(struct dcn30_dwbc *dwbc30, struct dc_context *ctx) +{ +	dcn35_dwbc_set_fgcg( +		dwbc30, ctx->dc->debug.enable_fine_grain_clock_gating.bits.dwb); +} + +static bool dcn35_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) +{ +	int i; +	uint32_t pipe_count = pool->res_cap->num_dwb; + +	for (i = 0; i < pipe_count; i++) { +		struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc), +						    GFP_KERNEL); + +		if (!dwbc30) { +			dm_error("DC: failed to create dwbc30!\n"); +			return false; +		} + +#undef REG_STRUCT +#define REG_STRUCT dwbc35_regs +		dwbc_regs_dcn3_init(0); + +		dcn35_dwbc_construct(dwbc30, ctx, +				&dwbc35_regs[i], +				&dwbc35_shift, +				&dwbc35_mask, +				i); + +		pool->dwbc[i] = &dwbc30->base; + +		dcn35_dwbc_init(dwbc30, ctx); +	} +	return true; +} + +static void dcn35_mmhubbub_init(struct dcn30_mmhubbub *mcif_wb30, +				struct dc_context *ctx) +{ +	dcn35_mmhubbub_set_fgcg( +		mcif_wb30, +		ctx->dc->debug.enable_fine_grain_clock_gating.bits.mmhubbub); +} + +static bool dcn35_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) +{ +	int i; +	uint32_t pipe_count = pool->res_cap->num_dwb; + +	for (i = 0; i < pipe_count; i++) { +		struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub), +						    GFP_KERNEL); + +		if (!mcif_wb30) { +			dm_error("DC: failed to create mcif_wb30!\n"); +			return false; +		} + +#undef REG_STRUCT +#define REG_STRUCT mcif_wb35_regs +		mcif_wb_regs_dcn3_init(0); + +		dcn35_mmhubbub_construct(mcif_wb30, ctx, +				&mcif_wb35_regs[i], +				&mcif_wb35_shift, +				&mcif_wb35_mask, +				i); + +		dcn35_mmhubbub_init(mcif_wb30, ctx); + +		pool->mcif_wb[i] = &mcif_wb30->base; +	} +	return true; +} + +static struct display_stream_compressor *dcn35_dsc_create( +	struct dc_context *ctx, uint32_t inst) +{ +	struct dcn20_dsc *dsc = +		kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); + +	if (!dsc) { +		BREAK_TO_DEBUGGER(); +		return NULL; +	} + +#undef REG_STRUCT +#define REG_STRUCT dsc_regs +	dsc_regsDCN35_init(0), +	dsc_regsDCN35_init(1), +	dsc_regsDCN35_init(2), +	dsc_regsDCN35_init(3); + +	dsc35_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); +	dsc35_set_fgcg(dsc, +		       ctx->dc->debug.enable_fine_grain_clock_gating.bits.dsc); +	return &dsc->base; +} + +static void dcn351_destroy_resource_pool(struct resource_pool **pool) +{ +	struct dcn351_resource_pool *dcn351_pool = TO_DCN351_RES_POOL(*pool); + +	dcn351_resource_destruct(dcn351_pool); +	kfree(dcn351_pool); +	*pool = NULL; +} + +static struct clock_source *dcn35_clock_source_create( +		struct dc_context *ctx, +		struct dc_bios *bios, +		enum clock_source_id id, +		const struct dce110_clk_src_regs *regs, +		bool dp_clk_src) +{ +	struct dce110_clk_src *clk_src = +		kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); + +	if (!clk_src) +		return NULL; + +	if (dcn31_clk_src_construct(clk_src, ctx, bios, id, +			regs, &cs_shift, &cs_mask)) { +		clk_src->base.dp_clk_src = dp_clk_src; +		return &clk_src->base; +	} + +	BREAK_TO_DEBUGGER(); +	return NULL; +} + +static struct dc_cap_funcs cap_funcs = { +	.get_dcc_compression_cap = dcn20_get_dcc_compression_cap +}; + +static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config) +{ +	*panel_config = panel_config_defaults; +} + + +static bool dcn351_validate_bandwidth(struct dc *dc, +		struct dc_state *context, +		bool fast_validate) +{ +	bool out = false; + +	out = dml2_validate(dc, context, fast_validate); + +	if (fast_validate) +		return out; + +	DC_FP_START(); +	dcn351_decide_zstate_support(dc, context); +	DC_FP_END(); + +	return out; +} + + +static struct resource_funcs dcn351_res_pool_funcs = { +	.destroy = dcn351_destroy_resource_pool, +	.link_enc_create = dcn35_link_encoder_create, +	.link_enc_create_minimal = dcn31_link_enc_create_minimal, +	.link_encs_assign = link_enc_cfg_link_encs_assign, +	.link_enc_unassign = link_enc_cfg_link_enc_unassign, +	.panel_cntl_create = dcn31_panel_cntl_create, +	.validate_bandwidth = dcn351_validate_bandwidth, +	.calculate_wm_and_dlg = NULL, +	.update_soc_for_wm_a = dcn31_update_soc_for_wm_a, +	.populate_dml_pipes = dcn351_populate_dml_pipes_from_context_fpu, +	.acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, +	.release_pipe = dcn20_release_pipe, +	.add_stream_to_ctx = dcn30_add_stream_to_ctx, +	.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, +	.remove_stream_from_ctx = dcn20_remove_stream_from_ctx, +	.populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context, +	.set_mcif_arb_params = dcn30_set_mcif_arb_params, +	.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, +	.acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut, +	.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut, +	.update_bw_bounding_box = dcn351_update_bw_bounding_box_fpu, +	.patch_unknown_plane_state = dcn20_patch_unknown_plane_state, +	.get_panel_config_defaults = dcn35_get_panel_config_defaults, +	.get_preferred_eng_id_dpia = dcn351_get_preferred_eng_id_dpia, +}; + +static bool dcn351_resource_construct( +	uint8_t num_virtual_links, +	struct dc *dc, +	struct dcn351_resource_pool *pool) +{ +	int i; +	struct dc_context *ctx = dc->ctx; +	struct irq_service_init_data init_data; + +#undef REG_STRUCT +#define REG_STRUCT bios_regs +	bios_regs_init(); + +#undef REG_STRUCT +#define REG_STRUCT clk_src_regs +	clk_src_regs_init(0, A), +	clk_src_regs_init(1, B), +	clk_src_regs_init(2, C), +	clk_src_regs_init(3, D), +	clk_src_regs_init(4, E); + +#undef REG_STRUCT +#define REG_STRUCT abm_regs +	abm_regs_init(0), +	abm_regs_init(1), +	abm_regs_init(2), +	abm_regs_init(3); + +#undef REG_STRUCT +#define REG_STRUCT dccg_regs +	dccg_regs_init(); + +	ctx->dc_bios->regs = &bios_regs; + +	pool->base.res_cap = &res_cap_dcn351; + +	pool->base.funcs = &dcn351_res_pool_funcs; + +	/************************************************* +	 *  Resource + asic cap harcoding                * +	 *************************************************/ +	pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; +	pool->base.pipe_count = pool->base.res_cap->num_timing_generator; +	pool->base.mpcc_count = pool->base.res_cap->num_timing_generator; +	dc->caps.max_downscale_ratio = 600; +	dc->caps.i2c_speed_in_khz = 100; +	dc->caps.i2c_speed_in_khz_hdcp = 100; +	dc->caps.max_cursor_size = 256; +	dc->caps.min_horizontal_blanking_period = 80; +	dc->caps.dmdata_alloc_size = 2048; +	dc->caps.max_slave_planes = 2; +	dc->caps.max_slave_yuv_planes = 2; +	dc->caps.max_slave_rgb_planes = 2; +	dc->caps.post_blend_color_processing = true; +	dc->caps.force_dp_tps4_for_cp2520 = true; +	if (dc->config.forceHBR2CP2520) +		dc->caps.force_dp_tps4_for_cp2520 = false; +	dc->caps.dp_hpo = true; +	dc->caps.dp_hdmi21_pcon_support = true; + +	dc->caps.edp_dsc_support = true; +	dc->caps.extended_aux_timeout_support = true; +	dc->caps.dmcub_support = true; +	dc->caps.is_apu = true; +	dc->caps.seamless_odm = true; + +	dc->caps.zstate_support = true; +	dc->caps.ips_support = true; +	dc->caps.max_v_total = (1 << 15) - 1; + +	/* Color pipeline capabilities */ +	dc->caps.color.dpp.dcn_arch = 1; +	dc->caps.color.dpp.input_lut_shared = 0; +	dc->caps.color.dpp.icsc = 1; +	dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr +	dc->caps.color.dpp.dgam_rom_caps.srgb = 1; +	dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; +	dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1; +	dc->caps.color.dpp.dgam_rom_caps.pq = 1; +	dc->caps.color.dpp.dgam_rom_caps.hlg = 1; +	dc->caps.color.dpp.post_csc = 1; +	dc->caps.color.dpp.gamma_corr = 1; +	dc->caps.color.dpp.dgam_rom_for_yuv = 0; + +	dc->caps.color.dpp.hw_3d_lut = 1; +	dc->caps.color.dpp.ogam_ram = 0;  // no OGAM in DPP since DCN1 +	// no OGAM ROM on DCN301 +	dc->caps.color.dpp.ogam_rom_caps.srgb = 0; +	dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; +	dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; +	dc->caps.color.dpp.ogam_rom_caps.pq = 0; +	dc->caps.color.dpp.ogam_rom_caps.hlg = 0; +	dc->caps.color.dpp.ocsc = 0; + +	dc->caps.color.mpc.gamut_remap = 1; +	dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //2 +	dc->caps.color.mpc.ogam_ram = 1; +	dc->caps.color.mpc.ogam_rom_caps.srgb = 0; +	dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; +	dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; +	dc->caps.color.mpc.ogam_rom_caps.pq = 0; +	dc->caps.color.mpc.ogam_rom_caps.hlg = 0; +	dc->caps.color.mpc.ocsc = 1; + +	/* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order +	 * to provide some margin. +	 * It's expected for furture ASIC to have equal or higher value, in order to +	 * have determinstic power improvement from generate to genration. +	 * (i.e., we should not expect new ASIC generation with lower vmin rate) +	 */ +	dc->caps.max_disp_clock_khz_at_vmin = 650000; + +	/* Use pipe context based otg sync logic */ +	dc->config.use_pipe_ctx_sync_logic = true; + +	/* read VBIOS LTTPR caps */ +	{ +		if (ctx->dc_bios->funcs->get_lttpr_caps) { +			enum bp_result bp_query_result; +			uint8_t is_vbios_lttpr_enable = 0; + +			bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); +			dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; +		} + +		/* interop bit is implicit */ +		{ +			dc->caps.vbios_lttpr_aware = true; +		} +	} + +	if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) +		dc->debug = debug_defaults_drv; + +	// Init the vm_helper +	if (dc->vm_helper) +		vm_helper_init(dc->vm_helper, 16); + +	/************************************************* +	 *  Create resources                             * +	 *************************************************/ + +	/* Clock Sources for Pixel Clock*/ +	pool->base.clock_sources[DCN351_CLK_SRC_PLL0] = +			dcn35_clock_source_create(ctx, ctx->dc_bios, +				CLOCK_SOURCE_COMBO_PHY_PLL0, +				&clk_src_regs[0], false); +	pool->base.clock_sources[DCN351_CLK_SRC_PLL1] = +			dcn35_clock_source_create(ctx, ctx->dc_bios, +				CLOCK_SOURCE_COMBO_PHY_PLL1, +				&clk_src_regs[1], false); +	pool->base.clock_sources[DCN351_CLK_SRC_PLL2] = +			dcn35_clock_source_create(ctx, ctx->dc_bios, +				CLOCK_SOURCE_COMBO_PHY_PLL2, +				&clk_src_regs[2], false); +	pool->base.clock_sources[DCN351_CLK_SRC_PLL3] = +			dcn35_clock_source_create(ctx, ctx->dc_bios, +				CLOCK_SOURCE_COMBO_PHY_PLL3, +				&clk_src_regs[3], false); +	pool->base.clock_sources[DCN351_CLK_SRC_PLL4] = +			dcn35_clock_source_create(ctx, ctx->dc_bios, +				CLOCK_SOURCE_COMBO_PHY_PLL4, +				&clk_src_regs[4], false); + +	pool->base.clk_src_count = DCN351_CLK_SRC_TOTAL; + +	/* todo: not reuse phy_pll registers */ +	pool->base.dp_clock_source = +			dcn35_clock_source_create(ctx, ctx->dc_bios, +				CLOCK_SOURCE_ID_DP_DTO, +				&clk_src_regs[0], true); + +	for (i = 0; i < pool->base.clk_src_count; i++) { +		if (pool->base.clock_sources[i] == NULL) { +			dm_error("DC: failed to create clock sources!\n"); +			BREAK_TO_DEBUGGER(); +			goto create_fail; +		} +	} +	/*temp till dml2 fully work without dml1*/ +	dml_init_instance(&dc->dml, &dcn3_5_soc, &dcn3_5_ip, DML_PROJECT_DCN31); + +	/* TODO: DCCG */ +	pool->base.dccg = dccg35_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); +	if (pool->base.dccg == NULL) { +		dm_error("DC: failed to create dccg!\n"); +		BREAK_TO_DEBUGGER(); +		goto create_fail; +	} + +#undef REG_STRUCT +#define REG_STRUCT pg_cntl_regs +	pg_cntl_dcn35_regs_init(); + +	pool->base.pg_cntl = pg_cntl35_create(ctx, &pg_cntl_regs, &pg_cntl_shift, &pg_cntl_mask); +	if (pool->base.pg_cntl == NULL) { +		dm_error("DC: failed to create power gate control!\n"); +		BREAK_TO_DEBUGGER(); +		goto create_fail; +	} + +	/* TODO: IRQ */ +	init_data.ctx = dc->ctx; +	pool->base.irqs = dal_irq_service_dcn351_create(&init_data); +	if (!pool->base.irqs) +		goto create_fail; + +	/* HUBBUB */ +	pool->base.hubbub = dcn35_hubbub_create(ctx); +	if (pool->base.hubbub == NULL) { +		BREAK_TO_DEBUGGER(); +		dm_error("DC: failed to create hubbub!\n"); +		goto create_fail; +	} + +	/* HUBPs, DPPs, OPPs and TGs */ +	for (i = 0; i < pool->base.pipe_count; i++) { +		pool->base.hubps[i] = dcn35_hubp_create(ctx, i); +		if (pool->base.hubps[i] == NULL) { +			BREAK_TO_DEBUGGER(); +			dm_error( +				"DC: failed to create hubps!\n"); +			goto create_fail; +		} + +		pool->base.dpps[i] = dcn35_dpp_create(ctx, i); +		if (pool->base.dpps[i] == NULL) { +			BREAK_TO_DEBUGGER(); +			dm_error( +				"DC: failed to create dpps!\n"); +			goto create_fail; +		} +	} + +	for (i = 0; i < pool->base.res_cap->num_opp; i++) { +		pool->base.opps[i] = dcn35_opp_create(ctx, i); +		if (pool->base.opps[i] == NULL) { +			BREAK_TO_DEBUGGER(); +			dm_error( +				"DC: failed to create output pixel processor!\n"); +			goto create_fail; +		} +	} + +	for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { +		pool->base.timing_generators[i] = dcn35_timing_generator_create( +				ctx, i); +		if (pool->base.timing_generators[i] == NULL) { +			BREAK_TO_DEBUGGER(); +			dm_error("DC: failed to create tg!\n"); +			goto create_fail; +		} +	} +	pool->base.timing_generator_count = i; + +	/* PSR */ +	pool->base.psr = dmub_psr_create(ctx); +	if (pool->base.psr == NULL) { +		dm_error("DC: failed to create psr obj!\n"); +		BREAK_TO_DEBUGGER(); +		goto create_fail; +	} + +	/* Replay */ +	pool->base.replay = dmub_replay_create(ctx); +	if (pool->base.replay == NULL) { +		dm_error("DC: failed to create replay obj!\n"); +		BREAK_TO_DEBUGGER(); +		goto create_fail; +	} + +	/* ABM */ +	for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { +		pool->base.multiple_abms[i] = dmub_abm_create(ctx, +				&abm_regs[i], +				&abm_shift, +				&abm_mask); +		if (pool->base.multiple_abms[i] == NULL) { +			dm_error("DC: failed to create abm for pipe %d!\n", i); +			BREAK_TO_DEBUGGER(); +			goto create_fail; +		} +	} + +	/* MPC and DSC */ +	pool->base.mpc = dcn35_mpc_create(ctx, pool->base.mpcc_count, pool->base.res_cap->num_mpc_3dlut); +	if (pool->base.mpc == NULL) { +		BREAK_TO_DEBUGGER(); +		dm_error("DC: failed to create mpc!\n"); +		goto create_fail; +	} + +	for (i = 0; i < pool->base.res_cap->num_dsc; i++) { +		pool->base.dscs[i] = dcn35_dsc_create(ctx, i); +		if (pool->base.dscs[i] == NULL) { +			BREAK_TO_DEBUGGER(); +			dm_error("DC: failed to create display stream compressor %d!\n", i); +			goto create_fail; +		} +	} + +	/* DWB and MMHUBBUB */ +	if (!dcn35_dwbc_create(ctx, &pool->base)) { +		BREAK_TO_DEBUGGER(); +		dm_error("DC: failed to create dwbc!\n"); +		goto create_fail; +	} + +	if (!dcn35_mmhubbub_create(ctx, &pool->base)) { +		BREAK_TO_DEBUGGER(); +		dm_error("DC: failed to create mcif_wb!\n"); +		goto create_fail; +	} + +	/* AUX and I2C */ +	for (i = 0; i < pool->base.res_cap->num_ddc; i++) { +		pool->base.engines[i] = dcn31_aux_engine_create(ctx, i); +		if (pool->base.engines[i] == NULL) { +			BREAK_TO_DEBUGGER(); +			dm_error( +				"DC:failed to create aux engine!!\n"); +			goto create_fail; +		} +		pool->base.hw_i2cs[i] = dcn31_i2c_hw_create(ctx, i); +		if (pool->base.hw_i2cs[i] == NULL) { +			BREAK_TO_DEBUGGER(); +			dm_error( +				"DC:failed to create hw i2c!!\n"); +			goto create_fail; +		} +		pool->base.sw_i2cs[i] = NULL; +	} + +	/* DCN3.5 has 6 DPIA */ +	pool->base.usb4_dpia_count = 4; +	if (dc->debug.dpia_debug.bits.disable_dpia) +		pool->base.usb4_dpia_count = 0; + +	/* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ +	if (!resource_construct(num_virtual_links, dc, &pool->base, +			&res_create_funcs)) +		goto create_fail; + +	/* HW Sequencer and Plane caps */ +	dcn351_hw_sequencer_construct(dc); + +	dc->caps.max_planes =  pool->base.pipe_count; + +	for (i = 0; i < dc->caps.max_planes; ++i) +		dc->caps.planes[i] = plane_cap; + +	dc->cap_funcs = cap_funcs; + + +	dc->dcn_ip->max_num_dpp = pool->base.pipe_count; + +	dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; +	dc->dml2_options.use_native_pstate_optimization = true; +	dc->dml2_options.use_native_soc_bb_construction = true; +	dc->dml2_options.minimize_dispclk_using_odm = false; +	if (dc->config.EnableMinDispClkODM) +		dc->dml2_options.minimize_dispclk_using_odm = true; +	dc->dml2_options.enable_windowed_mpo_odm = dc->config.enable_windowed_mpo_odm; + +	dc->dml2_options.callbacks.dc = dc; +	dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params; +	dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch; +	dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy; +	dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count; +	dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count; +	dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index; +	dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index; +	dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head; +	dc->dml2_options.max_segments_per_hubp = 24; +	dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;/*todo*/ + +	if (dc->config.sdpif_request_limit_words_per_umc == 0) +		dc->config.sdpif_request_limit_words_per_umc = 16;/*todo*/ + +	return true; + +create_fail: + +	dcn351_resource_destruct(pool); + +	return false; +} + +struct resource_pool *dcn351_create_resource_pool( +		const struct dc_init_data *init_data, +		struct dc *dc) +{ +	struct dcn351_resource_pool *pool = +		kzalloc(sizeof(struct dcn351_resource_pool), GFP_KERNEL); + +	if (!pool) +		return NULL; + +	if (dcn351_resource_construct(init_data->num_virtual_links, dc, pool)) +		return &pool->base; + +	BREAK_TO_DEBUGGER(); +	kfree(pool); +	return NULL; +} diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.h new file mode 100644 index 000000000000..f3e045777a3d --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright 2024 Advanced Micro Devices, Inc. */ + +#ifndef _DCN351_RESOURCE_H_ +#define _DCN351_RESOURCE_H_ + +#include "core_types.h" + +extern struct _vcs_dpi_ip_params_st dcn3_51_ip; +extern struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc; + +#define TO_DCN351_RES_POOL(pool)\ +	container_of(pool, struct dcn351_resource_pool, base) + +struct dcn351_resource_pool { +	struct resource_pool base; +}; + +struct resource_pool *dcn351_create_resource_pool( +		const struct dc_init_data *init_data, +		struct dc *dc); + +#endif /* _DCN351_RESOURCE_H_ */ |