diff options
Diffstat (limited to 'drivers/gpu/drm/amd/display/dc')
174 files changed, 15206 insertions, 5434 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index b1f0d6260226..f05ab2bf20c5 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile @@ -23,12 +23,12 @@ # Makefile for Display Core (dc) component. # -DC_LIBS = basics bios calcs clk_mgr dce gpio irq virtual +DC_LIBS = basics bios dml clk_mgr dce gpio irq link virtual ifdef CONFIG_DRM_AMD_DC_DCN DC_LIBS += dcn20 DC_LIBS += dsc -DC_LIBS += dcn10 dml +DC_LIBS += dcn10 DC_LIBS += dcn21 DC_LIBS += dcn201 DC_LIBS += dcn30 @@ -36,6 +36,8 @@ DC_LIBS += dcn301 DC_LIBS += dcn302 DC_LIBS += dcn303 DC_LIBS += dcn31 +DC_LIBS += dcn315 +DC_LIBS += dcn316 endif DC_LIBS += dce120 @@ -58,7 +60,7 @@ AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LI include $(AMD_DC) DISPLAY_CORE = dc.o dc_stat.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \ -dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o \ +dc_surface.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o \ dc_link_enc_cfg.o dc_link_dpia.o dc_link_dpcd.o ifdef CONFIG_DRM_AMD_DC_DCN diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 1e385d55e7fb..23a3b640f0ee 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -1692,7 +1692,6 @@ static enum bp_result bios_parser_get_encoder_cap_info( ATOM_ENCODER_CAP_RECORD_HBR3_EN) ? 1 : 0; info->HDMI_6GB_EN = (record->encodercaps & ATOM_ENCODER_CAP_RECORD_HDMI6Gbps_EN) ? 1 : 0; -#if defined(CONFIG_DRM_AMD_DC_DCN) info->IS_DP2_CAPABLE = (record->encodercaps & ATOM_ENCODER_CAP_RECORD_DP2) ? 1 : 0; info->DP_UHBR10_EN = (record->encodercaps & @@ -1701,7 +1700,6 @@ static enum bp_result bios_parser_get_encoder_cap_info( ATOM_ENCODER_CAP_RECORD_UHBR13_5_EN) ? 1 : 0; info->DP_UHBR20_EN = (record->encodercaps & ATOM_ENCODER_CAP_RECORD_UHBR20_EN) ? 1 : 0; -#endif info->DP_IS_USB_C = (record->encodercaps & ATOM_ENCODER_CAP_RECORD_USB_C_TYPE) ? 1 : 0; diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c index ad13e4e36d77..0e36cd800fc9 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c @@ -456,7 +456,7 @@ static enum bp_result transmitter_control_v2( if ((CONNECTOR_ID_DUAL_LINK_DVII == connector_id) || (CONNECTOR_ID_DUAL_LINK_DVID == connector_id)) /* on INIT this bit should be set according to the - * phisycal connector + * physical connector * Bit0: dual link connector flag * =0 connector is single link connector * =1 connector is dual link connector @@ -468,7 +468,7 @@ static enum bp_result transmitter_control_v2( cpu_to_le16((uint8_t)cntl->connector_obj_id.id); break; case TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS: - /* votage swing and pre-emphsis */ + /* voltage swing and pre-emphsis */ params.asMode.ucLaneSel = (uint8_t)cntl->lane_select; params.asMode.ucLaneSet = (uint8_t)cntl->lane_settings; break; @@ -2120,7 +2120,7 @@ static enum bp_result program_clock_v5( memset(¶ms, 0, sizeof(params)); if (!bp->cmd_helper->clock_source_id_to_atom( bp_params->pll_id, &atom_pll_id)) { - BREAK_TO_DEBUGGER(); /* Invalid Inpute!! */ + BREAK_TO_DEBUGGER(); /* Invalid Input!! */ return BP_RESULT_BADINPUT; } diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c index 9afa5eb2e6d3..f52f7ff7ead4 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c @@ -338,12 +338,10 @@ static enum bp_result transmitter_control_v1_7( const struct command_table_helper *cmd = bp->cmd_helper; struct dmub_dig_transmitter_control_data_v1_7 dig_v1_7 = {0}; -#if defined(CONFIG_DRM_AMD_DC_DCN) uint8_t hpo_instance = (uint8_t)cntl->hpo_engine_id - ENGINE_ID_HPO_0; if (dc_is_dp_signal(cntl->signal)) hpo_instance = (uint8_t)cntl->hpo_engine_id - ENGINE_ID_HPO_DP_0; -#endif dig_v1_7.phyid = cmd->phy_id_to_atom(cntl->transmitter); dig_v1_7.action = (uint8_t)cntl->action; @@ -358,9 +356,7 @@ static enum bp_result transmitter_control_v1_7( dig_v1_7.hpdsel = cmd->hpd_sel_to_atom(cntl->hpd_sel); dig_v1_7.digfe_sel = cmd->dig_encoder_sel_to_atom(cntl->engine_id); dig_v1_7.connobj_id = (uint8_t)cntl->connector_obj_id.id; -#if defined(CONFIG_DRM_AMD_DC_DCN) dig_v1_7.HPO_instance = hpo_instance; -#endif dig_v1_7.symclk_units.symclk_10khz = cntl->pixel_clock/10; if (cntl->action == TRANSMITTER_CONTROL_ENABLE || diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c index eedc553f340e..dd9704ef4ccc 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c @@ -76,6 +76,8 @@ bool dal_bios_parser_init_cmd_tbl_helper2( case DCN_VERSION_3_02: case DCN_VERSION_3_03: case DCN_VERSION_3_1: + case DCN_VERSION_3_15: + case DCN_VERSION_3_16: *h = dal_cmd_tbl_helper_dce112_get_table2(); return true; #endif diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile index 6bd73e49a6d2..817871917632 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile @@ -153,4 +153,23 @@ CLK_MGR_DCN31 = dcn31_smu.o dcn31_clk_mgr.o AMD_DAL_CLK_MGR_DCN31 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn31/,$(CLK_MGR_DCN31)) AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN31) + +############################################################################### +# DCN315 +############################################################################### +CLK_MGR_DCN315 = dcn315_smu.o dcn315_clk_mgr.o + +AMD_DAL_CLK_MGR_DCN315 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn315/,$(CLK_MGR_DCN315)) + +AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN315) + +############################################################################### +# DCN316 +############################################################################### +CLK_MGR_DCN316 = dcn316_smu.o dcn316_clk_mgr.o + +AMD_DAL_CLK_MGR_DCN316 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn316/,$(CLK_MGR_DCN316)) + +AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN316) + endif diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index 9200c8ce02ba..772bffda68cc 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -43,6 +43,8 @@ #include "dcn30/dcn30_clk_mgr.h" #include "dcn301/vg_clk_mgr.h" #include "dcn31/dcn31_clk_mgr.h" +#include "dcn315/dcn315_clk_mgr.h" +#include "dcn316/dcn316_clk_mgr.h" int clk_mgr_helper_get_active_display_cnt( @@ -289,6 +291,31 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p dcn31_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); return &clk_mgr->base.base; } + break; + case AMDGPU_FAMILY_GC_10_3_6: { + struct clk_mgr_dcn315 *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL); + + if (clk_mgr == NULL) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dcn315_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); + return &clk_mgr->base.base; + } + break; + case AMDGPU_FAMILY_GC_10_3_7: { + struct clk_mgr_dcn316 *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL); + + if (clk_mgr == NULL) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dcn316_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); + return &clk_mgr->base.base; + } + break; #endif default: @@ -322,7 +349,15 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base) break; case FAMILY_YELLOW_CARP: - dcn31_clk_mgr_destroy(clk_mgr); + dcn31_clk_mgr_destroy(clk_mgr); + break; + + case AMDGPU_FAMILY_GC_10_3_6: + dcn315_clk_mgr_destroy(clk_mgr); + break; + + case AMDGPU_FAMILY_GC_10_3_7: + dcn316_clk_mgr_destroy(clk_mgr); break; default: diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c index b210f8e9d592..dfba6138f538 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c @@ -374,6 +374,8 @@ void dce_clock_read_ss_info(struct clk_mgr_internal *clk_mgr_dce) clk_mgr_dce->dprefclk_ss_percentage = info.spread_spectrum_percentage; } + if (clk_mgr_dce->base.ctx->dc->debug.ignore_dpref_ss) + clk_mgr_dce->dprefclk_ss_percentage = 0; } } } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c index 06bab24d8e27..450eaead4f20 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c @@ -101,7 +101,8 @@ static uint32_t rv1_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsi return res_val; } -int rv1_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, unsigned int msg_id, unsigned int param) +static int rv1_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, + unsigned int msg_id, unsigned int param) { uint32_t result; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c index f977f29907df..5ed6a93d1708 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c @@ -25,30 +25,23 @@ #include "dccg.h" #include "clk_mgr_internal.h" - #include "dcn30_clk_mgr_smu_msg.h" #include "dcn20/dcn20_clk_mgr.h" #include "dce100/dce_clk_mgr.h" +#include "dcn30/dcn30_clk_mgr.h" #include "reg_helper.h" #include "core_types.h" #include "dm_helpers.h" - #include "atomfirmware.h" - - #include "sienna_cichlid_ip_offset.h" #include "dcn/dcn_3_0_0_offset.h" #include "dcn/dcn_3_0_0_sh_mask.h" - #include "nbio/nbio_7_4_offset.h" - -#include "dcn/dpcs_3_0_0_offset.h" -#include "dcn/dpcs_3_0_0_sh_mask.h" - +#include "dpcs/dpcs_3_0_0_offset.h" +#include "dpcs/dpcs_3_0_0_sh_mask.h" #include "mmhub/mmhub_2_0_0_offset.h" #include "mmhub/mmhub_2_0_0_sh_mask.h" -/*we don't have clk folder yet*/ -#include "dcn30/dcn30_clk_mgr.h" +#include "dcn30_smu11_driver_if.h" #undef FN #define FN(reg_name, field_name) \ @@ -83,7 +76,7 @@ static const struct clk_mgr_mask clk_mgr_mask = { /* Query SMU for all clock states for a particular clock */ -static void dcn3_init_single_clock(struct clk_mgr_internal *clk_mgr, PPCLK_e clk, unsigned int *entry_0, unsigned int *num_levels) +static void dcn3_init_single_clock(struct clk_mgr_internal *clk_mgr, uint32_t clk, unsigned int *entry_0, unsigned int *num_levels) { unsigned int i; char *entry_i = (char *)entry_0; @@ -184,6 +177,7 @@ void dcn3_init_clocks(struct clk_mgr *clk_mgr_base) dcn3_init_single_clock(clk_mgr, PPCLK_DCEFCLK, &clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz, &num_levels); + dcn30_smu_set_min_deep_sleep_dcef_clk(clk_mgr, 0); /* DTBCLK */ dcn3_init_single_clock(clk_mgr, PPCLK_DTBCLK, @@ -473,8 +467,10 @@ static void dcn3_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base) clk_mgr_base->bw_params->dc_mode_softmax_memclk = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK); /* Refresh bounding box */ + DC_FP_START(); clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box( clk_mgr->base.ctx->dc, clk_mgr_base->bw_params); + DC_FP_END(); } static bool dcn3_is_smu_present(struct clk_mgr *clk_mgr_base) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c index 8ecc708bcd9e..bfc960579760 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c @@ -29,6 +29,7 @@ #include "clk_mgr_internal.h" #include "reg_helper.h" #include "dalsmc.h" +#include "dcn30_smu11_driver_if.h" #define mmDAL_MSG_REG 0x1628A #define mmDAL_ARG_REG 0x16273 @@ -197,7 +198,7 @@ void dcn30_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr) } /* Returns the actual frequency that was set in MHz, 0 on failure */ -unsigned int dcn30_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, PPCLK_e clk, uint16_t freq_mhz) +unsigned int dcn30_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint16_t freq_mhz) { uint32_t response = 0; @@ -215,7 +216,7 @@ unsigned int dcn30_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, PP } /* Returns the actual frequency that was set in MHz, 0 on failure */ -unsigned int dcn30_smu_set_hard_max_by_freq(struct clk_mgr_internal *clk_mgr, PPCLK_e clk, uint16_t freq_mhz) +unsigned int dcn30_smu_set_hard_max_by_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint16_t freq_mhz) { uint32_t response = 0; @@ -246,7 +247,7 @@ unsigned int dcn30_smu_set_hard_max_by_freq(struct clk_mgr_internal *clk_mgr, PP * * Returns 0 on failure */ -unsigned int dcn30_smu_get_dpm_freq_by_index(struct clk_mgr_internal *clk_mgr, PPCLK_e clk, uint8_t dpm_level) +unsigned int dcn30_smu_get_dpm_freq_by_index(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint8_t dpm_level) { uint32_t response = 0; @@ -264,7 +265,7 @@ unsigned int dcn30_smu_get_dpm_freq_by_index(struct clk_mgr_internal *clk_mgr, P } /* Returns the max DPM frequency in DC mode in MHz, 0 on failure */ -unsigned int dcn30_smu_get_dc_mode_max_dpm_freq(struct clk_mgr_internal *clk_mgr, PPCLK_e clk) +unsigned int dcn30_smu_get_dc_mode_max_dpm_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk) { uint32_t response = 0; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.h index dd2640a3ce5d..ca9f5296be94 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.h @@ -28,65 +28,6 @@ #include "core_types.h" -#define SMU11_DRIVER_IF_VERSION 0x1F - -typedef enum { - PPCLK_GFXCLK = 0, - PPCLK_SOCCLK, - PPCLK_UCLK, - PPCLK_FCLK, - PPCLK_DCLK_0, - PPCLK_VCLK_0, - PPCLK_DCLK_1, - PPCLK_VCLK_1, - PPCLK_DCEFCLK, - PPCLK_DISPCLK, - PPCLK_PIXCLK, - PPCLK_PHYCLK, - PPCLK_DTBCLK, - PPCLK_COUNT, -} PPCLK_e; - -typedef struct { - uint16_t MinClock; // This is either DCEFCLK or SOCCLK (in MHz) - uint16_t MaxClock; // This is either DCEFCLK or SOCCLK (in MHz) - uint16_t MinUclk; - uint16_t MaxUclk; - - uint8_t WmSetting; - uint8_t Flags; - uint8_t Padding[2]; - -} WatermarkRowGeneric_t; - -#define NUM_WM_RANGES 4 - -typedef enum { - WM_SOCCLK = 0, - WM_DCEFCLK, - WM_COUNT, -} WM_CLOCK_e; - -typedef enum { - WATERMARKS_CLOCK_RANGE = 0, - WATERMARKS_DUMMY_PSTATE, - WATERMARKS_MALL, - WATERMARKS_COUNT, -} WATERMARKS_FLAGS_e; - -typedef struct { - // Watermarks - WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES]; -} Watermarks_t; - -typedef struct { - Watermarks_t Watermarks; - - uint32_t MmHubPadding[8]; // SMU internal use -} WatermarksExternal_t; - -#define TABLE_WATERMARKS 1 - struct clk_mgr_internal; bool dcn30_smu_test_message(struct clk_mgr_internal *clk_mgr, uint32_t input); @@ -97,10 +38,10 @@ void dcn30_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint void dcn30_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low); void dcn30_smu_transfer_wm_table_smu_2_dram(struct clk_mgr_internal *clk_mgr); void dcn30_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr); -unsigned int dcn30_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, PPCLK_e clk, uint16_t freq_mhz); -unsigned int dcn30_smu_set_hard_max_by_freq(struct clk_mgr_internal *clk_mgr, PPCLK_e clk, uint16_t freq_mhz); -unsigned int dcn30_smu_get_dpm_freq_by_index(struct clk_mgr_internal *clk_mgr, PPCLK_e clk, uint8_t dpm_level); -unsigned int dcn30_smu_get_dc_mode_max_dpm_freq(struct clk_mgr_internal *clk_mgr, PPCLK_e clk); +unsigned int dcn30_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint16_t freq_mhz); +unsigned int dcn30_smu_set_hard_max_by_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint16_t freq_mhz); +unsigned int dcn30_smu_get_dpm_freq_by_index(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint8_t dpm_level); +unsigned int dcn30_smu_get_dc_mode_max_dpm_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk); void dcn30_smu_set_min_deep_sleep_dcef_clk(struct clk_mgr_internal *clk_mgr, uint32_t freq_mhz); void dcn30_smu_set_num_of_displays(struct clk_mgr_internal *clk_mgr, uint32_t num_displays); void dcn30_smu_set_display_refresh_from_mall(struct clk_mgr_internal *clk_mgr, bool enable, uint8_t cache_timer_delay, uint8_t cache_timer_scale); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_smu11_driver_if.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_smu11_driver_if.h new file mode 100644 index 000000000000..8ea8ee57b39f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_smu11_driver_if.h @@ -0,0 +1,74 @@ +// This is a stripped-down version of the smu11_driver_if.h file for the relevant DAL interfaces. + +#define SMU11_DRIVER_IF_VERSION 0x40 + +//Only Clks that have DPM descriptors are listed here +typedef enum { + PPCLK_GFXCLK = 0, + PPCLK_SOCCLK, + PPCLK_UCLK, + PPCLK_FCLK, + PPCLK_DCLK_0, + PPCLK_VCLK_0, + PPCLK_DCLK_1, + PPCLK_VCLK_1, + PPCLK_DCEFCLK, + PPCLK_DISPCLK, + PPCLK_PIXCLK, + PPCLK_PHYCLK, + PPCLK_DTBCLK, + PPCLK_COUNT, +} PPCLK_e; + +typedef struct { + uint16_t MinClock; // This is either DCEFCLK or SOCCLK (in MHz) + uint16_t MaxClock; // This is either DCEFCLK or SOCCLK (in MHz) + uint16_t MinUclk; + uint16_t MaxUclk; + + uint8_t WmSetting; + uint8_t Flags; + uint8_t Padding[2]; + +} WatermarkRowGeneric_t; + +#define NUM_WM_RANGES 4 + +typedef enum { + WM_SOCCLK = 0, + WM_DCEFCLK, + WM_COUNT, +} WM_CLOCK_e; + +typedef enum { + WATERMARKS_CLOCK_RANGE = 0, + WATERMARKS_DUMMY_PSTATE, + WATERMARKS_MALL, + WATERMARKS_COUNT, +} WATERMARKS_FLAGS_e; + +typedef struct { + // Watermarks + WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES]; +} Watermarks_t; + +typedef struct { + Watermarks_t Watermarks; + + uint32_t MmHubPadding[8]; // SMU internal use +} WatermarksExternal_t; + +// Table types +#define TABLE_PPTABLE 0 +#define TABLE_WATERMARKS 1 +#define TABLE_AVFS_PSM_DEBUG 2 +#define TABLE_AVFS_FUSE_OVERRIDE 3 +#define TABLE_PMSTATUSLOG 4 +#define TABLE_SMU_METRICS 5 +#define TABLE_DRIVER_SMU_CONFIG 6 +#define TABLE_ACTIVITY_MONITOR_COEFF 7 +#define TABLE_OVERDRIVE 8 +#define TABLE_I2C_COMMANDS 9 +#define TABLE_PACE 10 +#define TABLE_ECCINFO 11 +#define TABLE_COUNT 12 diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c index 9d17c5a5ae01..59fdd7f0d609 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c @@ -139,9 +139,9 @@ void dcn31_update_clocks(struct clk_mgr *clk_mgr_base, * also if safe to lower is false, we just go in the higher state */ if (safe_to_lower) { - if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_ALLOW && + if (new_clocks->zstate_support != DCN_ZSTATE_SUPPORT_DISALLOW && new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) { - dcn31_smu_set_Z9_support(clk_mgr, true); + dcn31_smu_set_zstate_support(clk_mgr, new_clocks->zstate_support); dm_helpers_enable_periodic_detection(clk_mgr_base->ctx, true); clk_mgr_base->clks.zstate_support = new_clocks->zstate_support; } @@ -167,7 +167,7 @@ void dcn31_update_clocks(struct clk_mgr *clk_mgr_base, } else { if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_DISALLOW && new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) { - dcn31_smu_set_Z9_support(clk_mgr, false); + dcn31_smu_set_zstate_support(clk_mgr, DCN_ZSTATE_SUPPORT_DISALLOW); dm_helpers_enable_periodic_detection(clk_mgr_base->ctx, false); clk_mgr_base->clks.zstate_support = new_clocks->zstate_support; } @@ -697,6 +697,8 @@ void dcn31_clk_mgr_construct( clk_mgr->base.base.dprefclk_khz = 600000; clk_mgr->base.dccg->ref_dtbclk_khz = 600000; dce_clock_read_ss_info(&clk_mgr->base); + /*if bios enabled SS, driver needs to adjust dtb clock, only enable with correct bios*/ + //clk_mgr->base.dccg->ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(clk_mgr_internal, clk_mgr->base.base.dprefclk_khz); clk_mgr->base.base.bw_params = &dcn31_bw_params; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c index de3f4643eeef..c5d7d075026f 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c @@ -310,23 +310,32 @@ void dcn31_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr) VBIOSSMC_MSG_TransferTableDram2Smu, TABLE_WATERMARKS); } -void dcn31_smu_set_Z9_support(struct clk_mgr_internal *clk_mgr, bool support) +void dcn31_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zstate_support_state support) { - //TODO: Work with smu team to define optimization options. - unsigned int msg_id; + unsigned int msg_id, param; if (!clk_mgr->smu_present) return; - if (support) - msg_id = VBIOSSMC_MSG_AllowZstatesEntry; + if (!clk_mgr->base.ctx->dc->debug.enable_z9_disable_interface && + (support == DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY)) + support = DCN_ZSTATE_SUPPORT_DISALLOW; + + + if (support == DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY) + param = 1; else + param = 0; + + if (support == DCN_ZSTATE_SUPPORT_DISALLOW) msg_id = VBIOSSMC_MSG_DisallowZstatesEntry; + else + msg_id = VBIOSSMC_MSG_AllowZstatesEntry; dcn31_smu_send_msg_with_param( clk_mgr, msg_id, - 0); + param); } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.h index cd0b7e1e685f..dfa25a76a6d1 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.h @@ -265,7 +265,7 @@ void dcn31_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr void dcn31_smu_transfer_dpm_table_smu_2_dram(struct clk_mgr_internal *clk_mgr); void dcn31_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr); -void dcn31_smu_set_Z9_support(struct clk_mgr_internal *clk_mgr, bool support); +void dcn31_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zstate_support_state support); void dcn31_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable); #endif /* DAL_DC_31_SMU_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c new file mode 100644 index 000000000000..edda572dc570 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c @@ -0,0 +1,620 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + + +#include "dccg.h" +#include "clk_mgr_internal.h" + +// For dce12_get_dp_ref_freq_khz +#include "dce100/dce_clk_mgr.h" +// For dcn20_update_clocks_update_dpp_dto +#include "dcn20/dcn20_clk_mgr.h" +#include "dcn31/dcn31_clk_mgr.h" +#include "dcn315_clk_mgr.h" + +#include "core_types.h" +#include "dcn315_smu.h" +#include "dm_helpers.h" + +#include "dc_dmub_srv.h" + +#if defined (CONFIG_DRM_AMD_DC_DP2_0) +#include "dc_link_dp.h" +#endif + +#define TO_CLK_MGR_DCN315(clk_mgr)\ + container_of(clk_mgr, struct clk_mgr_dcn315, base) + +static int dcn315_get_active_display_cnt_wa( + struct dc *dc, + struct dc_state *context) +{ + int i, display_count; + bool tmds_present = false; + + display_count = 0; + for (i = 0; i < context->stream_count; i++) { + const struct dc_stream_state *stream = context->streams[i]; + + if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A || + stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK || + stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) + tmds_present = true; + } + + for (i = 0; i < dc->link_count; i++) { + const struct dc_link *link = dc->links[i]; + + /* abusing the fact that the dig and phy are coupled to see if the phy is enabled */ + if (link->link_enc && link->link_enc->funcs->is_dig_enabled && + link->link_enc->funcs->is_dig_enabled(link->link_enc)) + display_count++; + } + + /* WA for hang on HDMI after display off back back on*/ + if (display_count == 0 && tmds_present) + display_count = 1; + + return display_count; +} + +static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable) +{ + struct dc *dc = clk_mgr_base->ctx->dc; + int i; + + for (i = 0; i < dc->res_pool->pipe_count; ++i) { + struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (pipe->top_pipe || pipe->prev_odm_pipe) + continue; + if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) { + if (disable) + pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg); + else + pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg); + } + } +} + +static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base, + struct dc_state *context, + bool safe_to_lower) +{ + union dmub_rb_cmd cmd; + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; + struct dc *dc = clk_mgr_base->ctx->dc; + int display_count; + bool update_dppclk = false; + bool update_dispclk = false; + bool dpp_clock_lowered = false; + + if (dc->work_arounds.skip_clock_update) + return; + + clk_mgr_base->clks.zstate_support = new_clocks->zstate_support; + /* + * if it is safe to lower, but we are already in the lower state, we don't have to do anything + * also if safe to lower is false, we just go in the higher state + */ + clk_mgr_base->clks.zstate_support = new_clocks->zstate_support; + if (safe_to_lower) { + /* check that we're not already in lower */ + if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) { + display_count = dcn315_get_active_display_cnt_wa(dc, context); + /* if we can go lower, go lower */ + if (display_count == 0) { + union display_idle_optimization_u idle_info = { 0 }; + idle_info.idle_info.df_request_disabled = 1; + idle_info.idle_info.phy_ref_clk_off = 1; + idle_info.idle_info.s0i2_rdy = 1; + dcn315_smu_set_display_idle_optimization(clk_mgr, idle_info.data); + /* update power state */ + clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; + } + } + } else { + /* check that we're not already in D0 */ + if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) { + union display_idle_optimization_u idle_info = { 0 }; + dcn315_smu_set_display_idle_optimization(clk_mgr, idle_info.data); + /* update power state */ + clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_MISSION_MODE; + } + } + + if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) { + clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz; + dcn315_smu_set_hard_min_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_khz); + } + + if (should_set_clock(safe_to_lower, + new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) { + clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz; + dcn315_smu_set_min_deep_sleep_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz); + } + + // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow. + if (!IS_DIAG_DC(dc->ctx->dce_environment)) { + if (new_clocks->dppclk_khz < 100000) + new_clocks->dppclk_khz = 100000; + if (new_clocks->dispclk_khz < 100000) + new_clocks->dispclk_khz = 100000; + } + + if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { + if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) + dpp_clock_lowered = true; + clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz; + update_dppclk = true; + } + + if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { + dcn315_disable_otg_wa(clk_mgr_base, true); + + clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; + dcn315_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz); + dcn315_disable_otg_wa(clk_mgr_base, false); + + update_dispclk = true; + } + + if (dpp_clock_lowered) { + // increase per DPP DTO before lowering global dppclk + dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); + dcn315_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz); + } else { + // increase global DPPCLK before lowering per DPP DTO + if (update_dppclk || update_dispclk) + dcn315_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz); + // always update dtos unless clock is lowered and not safe to lower + if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz) + dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); + } + + // notify DMCUB of latest clocks + memset(&cmd, 0, sizeof(cmd)); + cmd.notify_clocks.header.type = DMUB_CMD__CLK_MGR; + cmd.notify_clocks.header.sub_type = DMUB_CMD__CLK_MGR_NOTIFY_CLOCKS; + cmd.notify_clocks.clocks.dcfclk_khz = clk_mgr_base->clks.dcfclk_khz; + cmd.notify_clocks.clocks.dcfclk_deep_sleep_khz = + clk_mgr_base->clks.dcfclk_deep_sleep_khz; + cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz; + cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz; + + dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); + dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); + dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); +} + +static void dcn315_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, + struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info) +{ + return; +} + +static struct clk_bw_params dcn315_bw_params = { + .vram_type = Ddr4MemType, + .num_channels = 1, + .clk_table = { + .num_entries = 5, + }, + +}; + +static struct wm_table ddr5_wm_table = { + .entries = { + { + .wm_inst = WM_A, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 64.0, + .sr_exit_time_us = 11.5, + .sr_enter_plus_exit_time_us = 14.5, + .valid = true, + }, + { + .wm_inst = WM_B, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 64.0, + .sr_exit_time_us = 11.5, + .sr_enter_plus_exit_time_us = 14.5, + .valid = true, + }, + { + .wm_inst = WM_C, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 64.0, + .sr_exit_time_us = 11.5, + .sr_enter_plus_exit_time_us = 14.5, + .valid = true, + }, + { + .wm_inst = WM_D, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 64.0, + .sr_exit_time_us = 11.5, + .sr_enter_plus_exit_time_us = 14.5, + .valid = true, + }, + } +}; + +static struct wm_table lpddr5_wm_table = { + .entries = { + { + .wm_inst = WM_A, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 11.65333, + .sr_exit_time_us = 11.5, + .sr_enter_plus_exit_time_us = 14.5, + .valid = true, + }, + { + .wm_inst = WM_B, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 11.65333, + .sr_exit_time_us = 11.5, + .sr_enter_plus_exit_time_us = 14.5, + .valid = true, + }, + { + .wm_inst = WM_C, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 11.65333, + .sr_exit_time_us = 11.5, + .sr_enter_plus_exit_time_us = 14.5, + .valid = true, + }, + { + .wm_inst = WM_D, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 11.65333, + .sr_exit_time_us = 11.5, + .sr_enter_plus_exit_time_us = 14.5, + .valid = true, + }, + } +}; + +static DpmClocks_315_t dummy_clocks; + +static struct dcn315_watermarks dummy_wms = { 0 }; + +static void dcn315_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn315_watermarks *table) +{ + int i, num_valid_sets; + + num_valid_sets = 0; + + for (i = 0; i < WM_SET_COUNT; i++) { + /* skip empty entries, the smu array has no holes*/ + if (!bw_params->wm_table.entries[i].valid) + continue; + + table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmSetting = bw_params->wm_table.entries[i].wm_inst; + table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType = bw_params->wm_table.entries[i].wm_type; + /* We will not select WM based on fclk, so leave it as unconstrained */ + table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0; + table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF; + + if (table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType == WM_TYPE_PSTATE_CHG) { + if (i == 0) + table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk = 0; + else { + /* add 1 to make it non-overlapping with next lvl */ + table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk = + bw_params->clk_table.entries[i - 1].dcfclk_mhz + 1; + } + table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxMclk = + bw_params->clk_table.entries[i].dcfclk_mhz; + + } else { + /* unconstrained for memory retraining */ + table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0; + table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF; + + /* Modify previous watermark range to cover up to max */ + table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF; + } + num_valid_sets++; + } + + ASSERT(num_valid_sets != 0); /* Must have at least one set of valid watermarks */ + + /* modify the min and max to make sure we cover the whole range*/ + table->WatermarkRow[WM_DCFCLK][0].MinMclk = 0; + table->WatermarkRow[WM_DCFCLK][0].MinClock = 0; + table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxMclk = 0xFFFF; + table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF; + + /* This is for writeback only, does not matter currently as no writeback support*/ + table->WatermarkRow[WM_SOCCLK][0].WmSetting = WM_A; + table->WatermarkRow[WM_SOCCLK][0].MinClock = 0; + table->WatermarkRow[WM_SOCCLK][0].MaxClock = 0xFFFF; + table->WatermarkRow[WM_SOCCLK][0].MinMclk = 0; + table->WatermarkRow[WM_SOCCLK][0].MaxMclk = 0xFFFF; +} + +static void dcn315_notify_wm_ranges(struct clk_mgr *clk_mgr_base) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + struct clk_mgr_dcn315 *clk_mgr_dcn315 = TO_CLK_MGR_DCN315(clk_mgr); + struct dcn315_watermarks *table = clk_mgr_dcn315->smu_wm_set.wm_set; + + if (!clk_mgr->smu_ver) + return; + + if (!table || clk_mgr_dcn315->smu_wm_set.mc_address.quad_part == 0) + return; + + memset(table, 0, sizeof(*table)); + + dcn315_build_watermark_ranges(clk_mgr_base->bw_params, table); + + dcn315_smu_set_dram_addr_high(clk_mgr, + clk_mgr_dcn315->smu_wm_set.mc_address.high_part); + dcn315_smu_set_dram_addr_low(clk_mgr, + clk_mgr_dcn315->smu_wm_set.mc_address.low_part); + dcn315_smu_transfer_wm_table_dram_2_smu(clk_mgr); +} + +static void dcn315_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr, + struct dcn315_smu_dpm_clks *smu_dpm_clks) +{ + DpmClocks_315_t *table = smu_dpm_clks->dpm_clks; + + if (!clk_mgr->smu_ver) + return; + + if (!table || smu_dpm_clks->mc_address.quad_part == 0) + return; + + memset(table, 0, sizeof(*table)); + + dcn315_smu_set_dram_addr_high(clk_mgr, + smu_dpm_clks->mc_address.high_part); + dcn315_smu_set_dram_addr_low(clk_mgr, + smu_dpm_clks->mc_address.low_part); + dcn315_smu_transfer_dpm_table_smu_2_dram(clk_mgr); +} + +static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks) +{ + uint32_t max = 0; + int i; + + for (i = 0; i < num_clocks; ++i) { + if (clocks[i] > max) + max = clocks[i]; + } + + return max; +} + +static unsigned int find_clk_for_voltage( + const DpmClocks_315_t *clock_table, + const uint32_t clocks[], + unsigned int voltage) +{ + int i; + + for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++) { + if (clock_table->SocVoltage[i] == voltage) + return clocks[i]; + } + + ASSERT(0); + return 0; +} + +static void dcn315_clk_mgr_helper_populate_bw_params( + struct clk_mgr_internal *clk_mgr, + struct integrated_info *bios_info, + const DpmClocks_315_t *clock_table) +{ + int i, j; + struct clk_bw_params *bw_params = clk_mgr->base.bw_params; + uint32_t max_dispclk = 0, max_dppclk = 0; + + j = -1; + + ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL); + + /* Find lowest DPM, FCLK is filled in reverse order*/ + + for (i = NUM_DF_PSTATE_LEVELS - 1; i >= 0; i--) { + if (clock_table->DfPstateTable[i].FClk != 0) { + j = i; + break; + } + } + + if (j == -1) { + /* clock table is all 0s, just use our own hardcode */ + ASSERT(0); + return; + } + + bw_params->clk_table.num_entries = j + 1; + + /* dispclk and dppclk can be max at any voltage, same number of levels for both */ + if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS && + clock_table->NumDispClkLevelsEnabled <= NUM_DPPCLK_DPM_LEVELS) { + max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled); + max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled); + } else { + ASSERT(0); + } + + for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) { + int temp; + + bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].FClk; + bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].MemClk; + bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].Voltage; + bw_params->clk_table.entries[i].wck_ratio = 1; + temp = find_clk_for_voltage(clock_table, clock_table->DcfClocks, clock_table->DfPstateTable[j].Voltage); + if (temp) + bw_params->clk_table.entries[i].dcfclk_mhz = temp; + temp = find_clk_for_voltage(clock_table, clock_table->SocClocks, clock_table->DfPstateTable[j].Voltage); + if (temp) + bw_params->clk_table.entries[i].socclk_mhz = temp; + bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk; + bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk; + } + + bw_params->vram_type = bios_info->memory_type; + bw_params->num_channels = bios_info->ma_channel_number; + + for (i = 0; i < WM_SET_COUNT; i++) { + bw_params->wm_table.entries[i].wm_inst = i; + + if (i >= bw_params->clk_table.num_entries) { + bw_params->wm_table.entries[i].valid = false; + continue; + } + + bw_params->wm_table.entries[i].wm_type = WM_TYPE_PSTATE_CHG; + bw_params->wm_table.entries[i].valid = true; + } +} + +static void dcn315_enable_pme_wa(struct clk_mgr *clk_mgr_base) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + dcn315_smu_enable_pme_wa(clk_mgr); +} + +static struct clk_mgr_funcs dcn315_funcs = { + .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, + .update_clocks = dcn315_update_clocks, + .init_clocks = dcn31_init_clocks, + .enable_pme_wa = dcn315_enable_pme_wa, + .are_clock_states_equal = dcn31_are_clock_states_equal, + .notify_wm_ranges = dcn315_notify_wm_ranges +}; +extern struct clk_mgr_funcs dcn3_fpga_funcs; + +void dcn315_clk_mgr_construct( + struct dc_context *ctx, + struct clk_mgr_dcn315 *clk_mgr, + struct pp_smu_funcs *pp_smu, + struct dccg *dccg) +{ + struct dcn315_smu_dpm_clks smu_dpm_clks = { 0 }; + + clk_mgr->base.base.ctx = ctx; + clk_mgr->base.base.funcs = &dcn315_funcs; + + clk_mgr->base.pp_smu = pp_smu; + + clk_mgr->base.dccg = dccg; + clk_mgr->base.dfs_bypass_disp_clk = 0; + + clk_mgr->base.dprefclk_ss_percentage = 0; + clk_mgr->base.dprefclk_ss_divider = 1000; + clk_mgr->base.ss_on_dprefclk = false; + clk_mgr->base.dfs_ref_freq_khz = 48000; + + clk_mgr->smu_wm_set.wm_set = (struct dcn315_watermarks *)dm_helpers_allocate_gpu_mem( + clk_mgr->base.base.ctx, + DC_MEM_ALLOC_TYPE_FRAME_BUFFER, + sizeof(struct dcn315_watermarks), + &clk_mgr->smu_wm_set.mc_address.quad_part); + + if (!clk_mgr->smu_wm_set.wm_set) { + clk_mgr->smu_wm_set.wm_set = &dummy_wms; + clk_mgr->smu_wm_set.mc_address.quad_part = 0; + } + ASSERT(clk_mgr->smu_wm_set.wm_set); + + smu_dpm_clks.dpm_clks = (DpmClocks_315_t *)dm_helpers_allocate_gpu_mem( + clk_mgr->base.base.ctx, + DC_MEM_ALLOC_TYPE_FRAME_BUFFER, + sizeof(DpmClocks_315_t), + &smu_dpm_clks.mc_address.quad_part); + + if (smu_dpm_clks.dpm_clks == NULL) { + smu_dpm_clks.dpm_clks = &dummy_clocks; + smu_dpm_clks.mc_address.quad_part = 0; + } + + ASSERT(smu_dpm_clks.dpm_clks); + + if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { + clk_mgr->base.base.funcs = &dcn3_fpga_funcs; + } else { + struct clk_log_info log_info = {0}; + + clk_mgr->base.smu_ver = dcn315_smu_get_smu_version(&clk_mgr->base); + + if (clk_mgr->base.smu_ver > 0) + clk_mgr->base.smu_present = true; + + if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { + dcn315_bw_params.wm_table = lpddr5_wm_table; + } else { + dcn315_bw_params.wm_table = ddr5_wm_table; + } + /* Saved clocks configured at boot for debug purposes */ + dcn315_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info); + + } + + clk_mgr->base.base.dprefclk_khz = 600000; + clk_mgr->base.base.dprefclk_khz = dcn315_smu_get_dpref_clk(&clk_mgr->base); + clk_mgr->base.dccg->ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz; + dce_clock_read_ss_info(&clk_mgr->base); + clk_mgr->base.dccg->ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz); + + clk_mgr->base.base.bw_params = &dcn315_bw_params; + + if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) { + dcn315_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks); + + if (ctx->dc_bios && ctx->dc_bios->integrated_info) { + dcn315_clk_mgr_helper_populate_bw_params( + &clk_mgr->base, + ctx->dc_bios->integrated_info, + smu_dpm_clks.dpm_clks); + } + } + + if (smu_dpm_clks.dpm_clks && smu_dpm_clks.mc_address.quad_part != 0) + dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER, + smu_dpm_clks.dpm_clks); +} + +void dcn315_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int) +{ + struct clk_mgr_dcn315 *clk_mgr = TO_CLK_MGR_DCN315(clk_mgr_int); + + if (clk_mgr->smu_wm_set.wm_set && clk_mgr->smu_wm_set.mc_address.quad_part != 0) + dm_helpers_free_gpu_mem(clk_mgr_int->base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER, + clk_mgr->smu_wm_set.wm_set); +} diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h new file mode 100644 index 000000000000..ac36ddf5dd1a --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h @@ -0,0 +1,49 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DCN315_CLK_MGR_H__ +#define __DCN315_CLK_MGR_H__ +#include "clk_mgr_internal.h" + +struct dcn315_watermarks; + +struct dcn315_smu_watermark_set { + struct dcn315_watermarks *wm_set; + union large_integer mc_address; +}; + +struct clk_mgr_dcn315 { + struct clk_mgr_internal base; + struct dcn315_smu_watermark_set smu_wm_set; +}; + +void dcn315_clk_mgr_construct(struct dc_context *ctx, + struct clk_mgr_dcn315 *clk_mgr, + struct pp_smu_funcs *pp_smu, + struct dccg *dccg); + +void dcn315_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int); + +#endif //__DCN315_CLK_MGR_H__ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c new file mode 100644 index 000000000000..880ffea2afc6 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c @@ -0,0 +1,338 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "core_types.h" +#include "clk_mgr_internal.h" +#include "reg_helper.h" +#include "dm_helpers.h" +#include "dcn315_smu.h" +#include "mp/mp_13_0_5_offset.h" + +#define MAX_INSTANCE 6 +#define MAX_SEGMENT 6 + +struct IP_BASE_INSTANCE +{ + unsigned int segment[MAX_SEGMENT]; +}; + +struct IP_BASE +{ + struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; +}; + +static const struct IP_BASE MP0_BASE = { { { { 0x00016000, 0x00DC0000, 0x00E00000, 0x00E40000, 0x0243FC00, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0x0241B000, 0x04040000 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; + +#define regBIF_BX_PF2_RSMU_INDEX 0x0000 +#define regBIF_BX_PF2_RSMU_INDEX_BASE_IDX 1 +#define regBIF_BX_PF2_RSMU_DATA 0x0001 +#define regBIF_BX_PF2_RSMU_DATA_BASE_IDX 1 + +#define REG(reg_name) \ + (MP0_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name) + +#define FN(reg_name, field) \ + FD(reg_name##__##field) + +#define REG_NBIO(reg_name) \ + (NBIO_BASE.instance[0].segment[regBIF_BX_PF2_ ## reg_name ## _BASE_IDX] + regBIF_BX_PF2_ ## reg_name) + +#define mmMP1_C2PMSG_3 0x3B1050C + +#define VBIOSSMC_MSG_TestMessage 0x01 ///< To check if PMFW is alive and responding. Requirement specified by PMFW team +#define VBIOSSMC_MSG_GetPmfwVersion 0x02 ///< Get PMFW version +#define VBIOSSMC_MSG_Spare0 0x03 ///< Spare0 +#define VBIOSSMC_MSG_SetDispclkFreq 0x04 ///< Set display clock frequency in MHZ +#define VBIOSSMC_MSG_Spare1 0x05 ///< Spare1 +#define VBIOSSMC_MSG_SetDppclkFreq 0x06 ///< Set DPP clock frequency in MHZ +#define VBIOSSMC_MSG_SetHardMinDcfclkByFreq 0x07 ///< Set DCF clock frequency hard min in MHZ +#define VBIOSSMC_MSG_SetMinDeepSleepDcfclk 0x08 ///< Set DCF clock minimum frequency in deep sleep in MHZ +#define VBIOSSMC_MSG_SetPhyclkVoltageByFreq 0x09 ///< Set display phy clock frequency in MHZ in case VMIN does not support phy frequency +#define VBIOSSMC_MSG_GetFclkFrequency 0x0A ///< Get FCLK frequency, return frequemcy in MHZ +#define VBIOSSMC_MSG_SetDisplayCount 0x0B ///< Inform PMFW of number of display connected +#define VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown 0x0C ///< To ask PMFW turn off TMDP 48MHz refclk during display off to save power +#define VBIOSSMC_MSG_UpdatePmeRestore 0x0D ///< To ask PMFW to write into Azalia for PME wake up event +#define VBIOSSMC_MSG_SetVbiosDramAddrHigh 0x0E ///< Set DRAM address high 32 bits for WM table transfer +#define VBIOSSMC_MSG_SetVbiosDramAddrLow 0x0F ///< Set DRAM address low 32 bits for WM table transfer +#define VBIOSSMC_MSG_TransferTableSmu2Dram 0x10 ///< Transfer table from PMFW SRAM to system DRAM +#define VBIOSSMC_MSG_TransferTableDram2Smu 0x11 ///< Transfer table from system DRAM to PMFW +#define VBIOSSMC_MSG_SetDisplayIdleOptimizations 0x12 ///< Set Idle state optimization for display off +#define VBIOSSMC_MSG_GetDprefclkFreq 0x13 ///< Get DPREF clock frequency. Return in MHZ +#define VBIOSSMC_Message_Count 0x14 ///< Total number of VBIS and DAL messages + +#define VBIOSSMC_Status_BUSY 0x0 +#define VBIOSSMC_Result_OK 0x01 ///< Message Response OK +#define VBIOSSMC_Result_Failed 0xFF ///< Message Response Failed +#define VBIOSSMC_Result_UnknownCmd 0xFE ///< Message Response Unknown Command +#define VBIOSSMC_Result_CmdRejectedPrereq 0xFD ///< Message Response Command Failed Prerequisite +#define VBIOSSMC_Result_CmdRejectedBusy 0xFC ///< Message Response Command Rejected due to PMFW is busy. Sender should retry sending this message + +/* + * Function to be used instead of REG_WAIT macro because the wait ends when + * the register is NOT EQUAL to zero, and because the translation in msg_if.h + * won't work with REG_WAIT. + */ +static uint32_t dcn315_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsigned int delay_us, unsigned int max_retries) +{ + uint32_t res_val = VBIOSSMC_Status_BUSY; + + do { + res_val = REG_READ(MP1_SMN_C2PMSG_38); + if (res_val != VBIOSSMC_Status_BUSY) + break; + + if (delay_us >= 1000) + msleep(delay_us/1000); + else if (delay_us > 0) + udelay(delay_us); + } while (max_retries--); + + return res_val; +} + +static int dcn315_smu_send_msg_with_param( + struct clk_mgr_internal *clk_mgr, + unsigned int msg_id, unsigned int param) +{ + uint32_t result; + + result = dcn315_smu_wait_for_response(clk_mgr, 10, 200000); + ASSERT(result == VBIOSSMC_Result_OK); + + if (result == VBIOSSMC_Status_BUSY) { + return -1; + } + + /* First clear response register */ + REG_WRITE(MP1_SMN_C2PMSG_38, VBIOSSMC_Status_BUSY); + + /* Set the parameter register for the SMU message, unit is Mhz */ + REG_WRITE(MP1_SMN_C2PMSG_37, param); + + /* Trigger the message transaction by writing the message ID */ + generic_write_indirect_reg(CTX, + REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA), + mmMP1_C2PMSG_3, msg_id); + + result = dcn315_smu_wait_for_response(clk_mgr, 10, 200000); + + if (result == VBIOSSMC_Status_BUSY) { + ASSERT(0); + dm_helpers_smu_timeout(CTX, msg_id, param, 10 * 200000); + } + + return REG_READ(MP1_SMN_C2PMSG_37); +} + +int dcn315_smu_get_smu_version(struct clk_mgr_internal *clk_mgr) +{ + return dcn315_smu_send_msg_with_param( + clk_mgr, + VBIOSSMC_MSG_GetPmfwVersion, + 0); +} + + +int dcn315_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz) +{ + int actual_dispclk_set_mhz = -1; + + if (!clk_mgr->smu_present) + return requested_dispclk_khz; + + /* Unit of SMU msg parameter is Mhz */ + actual_dispclk_set_mhz = dcn315_smu_send_msg_with_param( + clk_mgr, + VBIOSSMC_MSG_SetDispclkFreq, + khz_to_mhz_ceil(requested_dispclk_khz)); + + return actual_dispclk_set_mhz * 1000; +} + +int dcn315_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_dcfclk_khz) +{ + int actual_dcfclk_set_mhz = -1; + + if (!clk_mgr->base.ctx->dc->debug.pstate_enabled) + return -1; + + if (!clk_mgr->smu_present) + return requested_dcfclk_khz; + + actual_dcfclk_set_mhz = dcn315_smu_send_msg_with_param( + clk_mgr, + VBIOSSMC_MSG_SetHardMinDcfclkByFreq, + khz_to_mhz_ceil(requested_dcfclk_khz)); + + return actual_dcfclk_set_mhz * 1000; +} + +int dcn315_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz) +{ + int actual_min_ds_dcfclk_mhz = -1; + + if (!clk_mgr->base.ctx->dc->debug.pstate_enabled) + return -1; + + if (!clk_mgr->smu_present) + return requested_min_ds_dcfclk_khz; + + actual_min_ds_dcfclk_mhz = dcn315_smu_send_msg_with_param( + clk_mgr, + VBIOSSMC_MSG_SetMinDeepSleepDcfclk, + khz_to_mhz_ceil(requested_min_ds_dcfclk_khz)); + + return actual_min_ds_dcfclk_mhz * 1000; +} + +int dcn315_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz) +{ + int actual_dppclk_set_mhz = -1; + + if (!clk_mgr->smu_present) + return requested_dpp_khz; + + actual_dppclk_set_mhz = dcn315_smu_send_msg_with_param( + clk_mgr, + VBIOSSMC_MSG_SetDppclkFreq, + khz_to_mhz_ceil(requested_dpp_khz)); + + return actual_dppclk_set_mhz * 1000; +} + +void dcn315_smu_set_display_idle_optimization(struct clk_mgr_internal *clk_mgr, uint32_t idle_info) +{ + if (!clk_mgr->base.ctx->dc->debug.pstate_enabled) + return; + + if (!clk_mgr->smu_present) + return; + + //TODO: Work with smu team to define optimization options. + dcn315_smu_send_msg_with_param( + clk_mgr, + VBIOSSMC_MSG_SetDisplayIdleOptimizations, + idle_info); +} + +void dcn315_smu_enable_phy_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable) +{ + union display_idle_optimization_u idle_info = { 0 }; + + if (!clk_mgr->smu_present) + return; + + if (enable) { + idle_info.idle_info.df_request_disabled = 1; + idle_info.idle_info.phy_ref_clk_off = 1; + } + + dcn315_smu_send_msg_with_param( + clk_mgr, + VBIOSSMC_MSG_SetDisplayIdleOptimizations, + idle_info.data); +} + +void dcn315_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr) +{ + if (!clk_mgr->smu_present) + return; + + dcn315_smu_send_msg_with_param( + clk_mgr, + VBIOSSMC_MSG_UpdatePmeRestore, + 0); +} +void dcn315_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high) +{ + if (!clk_mgr->smu_present) + return; + + dcn315_smu_send_msg_with_param(clk_mgr, + VBIOSSMC_MSG_SetVbiosDramAddrHigh, addr_high); +} + +void dcn315_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low) +{ + if (!clk_mgr->smu_present) + return; + + dcn315_smu_send_msg_with_param(clk_mgr, + VBIOSSMC_MSG_SetVbiosDramAddrLow, addr_low); +} + +void dcn315_smu_transfer_dpm_table_smu_2_dram(struct clk_mgr_internal *clk_mgr) +{ + if (!clk_mgr->smu_present) + return; + + dcn315_smu_send_msg_with_param(clk_mgr, + VBIOSSMC_MSG_TransferTableSmu2Dram, TABLE_DPMCLOCKS); +} + +void dcn315_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr) +{ + if (!clk_mgr->smu_present) + return; + + dcn315_smu_send_msg_with_param(clk_mgr, + VBIOSSMC_MSG_TransferTableDram2Smu, TABLE_WATERMARKS); +} + +int dcn315_smu_get_dpref_clk(struct clk_mgr_internal *clk_mgr) +{ + int dprefclk_get_mhz = -1; + if (clk_mgr->smu_present) { + dprefclk_get_mhz = dcn315_smu_send_msg_with_param( + clk_mgr, + VBIOSSMC_MSG_GetDprefclkFreq, + 0); + } + return (dprefclk_get_mhz * 1000); +} + +int dcn315_smu_get_smu_fclk(struct clk_mgr_internal *clk_mgr) +{ + int fclk_get_mhz = -1; + + if (clk_mgr->smu_present) { + fclk_get_mhz = dcn315_smu_send_msg_with_param( + clk_mgr, + VBIOSSMC_MSG_GetFclkFrequency, + 0); + } + return (fclk_get_mhz * 1000); +} diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.h new file mode 100644 index 000000000000..66fa42f8dd18 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.h @@ -0,0 +1,128 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef DAL_DC_315_SMU_H_ +#define DAL_DC_315_SMU_H_ +#include "os_types.h" + +#define PMFW_DRIVER_IF_VERSION 4 + +#define NUM_DCFCLK_DPM_LEVELS 4 +#define NUM_DISPCLK_DPM_LEVELS 4 +#define NUM_DPPCLK_DPM_LEVELS 4 +#define NUM_SOCCLK_DPM_LEVELS 4 +#define NUM_VCN_DPM_LEVELS 4 +#define NUM_SOC_VOLTAGE_LEVELS 4 +#define NUM_DF_PSTATE_LEVELS 4 + +typedef struct { + uint16_t MinClock; // This is either DCFCLK or SOCCLK (in MHz) + uint16_t MaxClock; // This is either DCFCLK or SOCCLK (in MHz) + uint16_t MinMclk; + uint16_t MaxMclk; + uint8_t WmSetting; + uint8_t WmType; // Used for normal pstate change or memory retraining + uint8_t Padding[2]; +} WatermarkRowGeneric_t; + +#define NUM_WM_RANGES 4 +#define WM_PSTATE_CHG 0 +#define WM_RETRAINING 1 + +typedef enum { + WM_SOCCLK = 0, + WM_DCFCLK, + WM_COUNT, +} WM_CLOCK_e; + +typedef struct { + uint32_t FClk; + uint32_t MemClk; + uint32_t Voltage; +} DfPstateTable_t; + +//Freq in MHz +//Voltage in milli volts with 2 fractional bits +typedef struct { + uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS]; + uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS]; + uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS]; + uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS]; + uint32_t VClocks[NUM_VCN_DPM_LEVELS]; + uint32_t DClocks[NUM_VCN_DPM_LEVELS]; + uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS]; + DfPstateTable_t DfPstateTable[NUM_DF_PSTATE_LEVELS]; + uint8_t NumDcfClkLevelsEnabled; + uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk + uint8_t NumSocClkLevelsEnabled; + uint8_t VcnClkLevelsEnabled; //Applies to both Vclk and Dclk + uint8_t NumDfPstatesEnabled; + uint8_t spare[3]; + uint32_t MinGfxClk; + uint32_t MaxGfxClk; +} DpmClocks_315_t; + +struct dcn315_watermarks { + // Watermarks + WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES]; + uint32_t MmHubPadding[7]; // SMU internal use +}; + +struct dcn315_smu_dpm_clks { + DpmClocks_315_t *dpm_clks; + union large_integer mc_address; +}; + +#define TABLE_WATERMARKS 1 // Called by DAL through VBIOS +#define TABLE_DPMCLOCKS 4 // Called by Driver and VBIOS + +struct display_idle_optimization { + unsigned int df_request_disabled : 1; + unsigned int phy_ref_clk_off : 1; + unsigned int s0i2_rdy : 1; + unsigned int reserved : 29; +}; + +union display_idle_optimization_u { + struct display_idle_optimization idle_info; + uint32_t data; +}; + +int dcn315_smu_get_smu_version(struct clk_mgr_internal *clk_mgr); +int dcn315_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz); +int dcn315_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_dcfclk_khz); +int dcn315_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz); +int dcn315_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz); +void dcn315_smu_set_display_idle_optimization(struct clk_mgr_internal *clk_mgr, uint32_t idle_info); +void dcn315_smu_enable_phy_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable); +void dcn315_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high); +void dcn315_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low); +void dcn315_smu_transfer_dpm_table_smu_2_dram(struct clk_mgr_internal *clk_mgr); +void dcn315_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr); +void dcn315_smu_request_voltage_via_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phyclk_khz); +void dcn315_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr); +int dcn315_smu_get_dpref_clk(struct clk_mgr_internal *clk_mgr); +int dcn315_smu_get_smu_fclk(struct clk_mgr_internal *clk_mgr); +#endif /* DAL_DC_315_SMU_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c new file mode 100644 index 000000000000..702d00ce7da4 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c @@ -0,0 +1,717 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + + +#include "dccg.h" +#include "clk_mgr_internal.h" + +// For dce12_get_dp_ref_freq_khz +#include "dce100/dce_clk_mgr.h" +// For dcn20_update_clocks_update_dpp_dto +#include "dcn20/dcn20_clk_mgr.h" +#include "dcn31/dcn31_clk_mgr.h" +#include "dcn316_clk_mgr.h" +#include "reg_helper.h" +#include "core_types.h" +#include "dcn316_smu.h" +#include "dm_helpers.h" +#include "dc_dmub_srv.h" +#include "dc_link_dp.h" + +// DCN316 this is CLK1 instance +#define MAX_INSTANCE 7 +#define MAX_SEGMENT 6 + +struct IP_BASE_INSTANCE +{ + unsigned int segment[MAX_SEGMENT]; +}; + +struct IP_BASE +{ + struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; +}; + +static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0, 0 } }, + { { 0x00016E00, 0x02401C00, 0, 0, 0, 0 } }, + { { 0x00017000, 0x02402000, 0, 0, 0, 0 } }, + { { 0x00017200, 0x02402400, 0, 0, 0, 0 } }, + { { 0x0001B000, 0x0242D800, 0, 0, 0, 0 } }, + { { 0x0001B200, 0x0242DC00, 0, 0, 0, 0 } }, + { { 0x0001B400, 0x0242E000, 0, 0, 0, 0 } } } }; + +#define regCLK1_CLK_PLL_REQ 0x0237 +#define regCLK1_CLK_PLL_REQ_BASE_IDX 0 + +#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0 +#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc +#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10 +#define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL +#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L +#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L + +#define REG(reg_name) \ + (CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name) + +#define TO_CLK_MGR_DCN316(clk_mgr)\ + container_of(clk_mgr, struct clk_mgr_dcn316, base) + +static int dcn316_get_active_display_cnt_wa( + struct dc *dc, + struct dc_state *context) +{ + int i, display_count; + bool tmds_present = false; + + display_count = 0; + for (i = 0; i < context->stream_count; i++) { + const struct dc_stream_state *stream = context->streams[i]; + + if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A || + stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK || + stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) + tmds_present = true; + } + + for (i = 0; i < dc->link_count; i++) { + const struct dc_link *link = dc->links[i]; + + /* abusing the fact that the dig and phy are coupled to see if the phy is enabled */ + if (link->link_enc && link->link_enc->funcs->is_dig_enabled && + link->link_enc->funcs->is_dig_enabled(link->link_enc)) + display_count++; + } + + /* WA for hang on HDMI after display off back back on*/ + if (display_count == 0 && tmds_present) + display_count = 1; + + return display_count; +} + +static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable) +{ + struct dc *dc = clk_mgr_base->ctx->dc; + int i; + + for (i = 0; i < dc->res_pool->pipe_count; ++i) { + struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (pipe->top_pipe || pipe->prev_odm_pipe) + continue; + if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) { + if (disable) + pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg); + else + pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg); + } + } +} + +static void dcn316_enable_pme_wa(struct clk_mgr *clk_mgr_base) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + dcn316_smu_enable_pme_wa(clk_mgr); +} + +static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base, + struct dc_state *context, + bool safe_to_lower) +{ + union dmub_rb_cmd cmd; + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; + struct dc *dc = clk_mgr_base->ctx->dc; + int display_count; + bool update_dppclk = false; + bool update_dispclk = false; + bool dpp_clock_lowered = false; + + if (dc->work_arounds.skip_clock_update) + return; + + /* + * if it is safe to lower, but we are already in the lower state, we don't have to do anything + * also if safe to lower is false, we just go in the higher state + */ + clk_mgr_base->clks.zstate_support = new_clocks->zstate_support; + if (safe_to_lower) { + if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) { + dcn316_smu_set_dtbclk(clk_mgr, false); + clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en; + } + /* check that we're not already in lower */ + if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) { + display_count = dcn316_get_active_display_cnt_wa(dc, context); + /* if we can go lower, go lower */ + if (display_count == 0) { + union display_idle_optimization_u idle_info = { 0 }; + idle_info.idle_info.df_request_disabled = 1; + idle_info.idle_info.phy_ref_clk_off = 1; + idle_info.idle_info.s0i2_rdy = 1; + dcn316_smu_set_display_idle_optimization(clk_mgr, idle_info.data); + /* update power state */ + clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; + } + } + } else { + if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) { + dcn316_smu_set_dtbclk(clk_mgr, true); + clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en; + } + + /* check that we're not already in D0 */ + if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) { + union display_idle_optimization_u idle_info = { 0 }; + dcn316_smu_set_display_idle_optimization(clk_mgr, idle_info.data); + /* update power state */ + clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_MISSION_MODE; + } + } + + if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) { + clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz; + dcn316_smu_set_hard_min_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_khz); + } + + if (should_set_clock(safe_to_lower, + new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) { + clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz; + dcn316_smu_set_min_deep_sleep_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz); + } + + // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow. + if (!IS_DIAG_DC(dc->ctx->dce_environment)) { + if (new_clocks->dppclk_khz < 100000) + new_clocks->dppclk_khz = 100000; + if (new_clocks->dispclk_khz < 100000) + new_clocks->dispclk_khz = 100000; + } + + if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { + if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) + dpp_clock_lowered = true; + clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz; + update_dppclk = true; + } + + if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { + dcn316_disable_otg_wa(clk_mgr_base, true); + + clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; + dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz); + dcn316_disable_otg_wa(clk_mgr_base, false); + + update_dispclk = true; + } + + if (dpp_clock_lowered) { + // increase per DPP DTO before lowering global dppclk + dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); + dcn316_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz); + } else { + // increase global DPPCLK before lowering per DPP DTO + if (update_dppclk || update_dispclk) + dcn316_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz); + // always update dtos unless clock is lowered and not safe to lower + if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz) + dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); + } + + // notify DMCUB of latest clocks + memset(&cmd, 0, sizeof(cmd)); + cmd.notify_clocks.header.type = DMUB_CMD__CLK_MGR; + cmd.notify_clocks.header.sub_type = DMUB_CMD__CLK_MGR_NOTIFY_CLOCKS; + cmd.notify_clocks.clocks.dcfclk_khz = clk_mgr_base->clks.dcfclk_khz; + cmd.notify_clocks.clocks.dcfclk_deep_sleep_khz = + clk_mgr_base->clks.dcfclk_deep_sleep_khz; + cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz; + cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz; + + dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); + dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); + dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); +} + +static void dcn316_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, + struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info) +{ + return; +} + +static struct clk_bw_params dcn316_bw_params = { + .vram_type = Ddr4MemType, + .num_channels = 1, + .clk_table = { + .num_entries = 5, + }, + +}; + +static struct wm_table ddr4_wm_table = { + .entries = { + { + .wm_inst = WM_A, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 11.72, + .sr_exit_time_us = 6.09, + .sr_enter_plus_exit_time_us = 7.14, + .valid = true, + }, + { + .wm_inst = WM_B, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 11.72, + .sr_exit_time_us = 10.12, + .sr_enter_plus_exit_time_us = 11.48, + .valid = true, + }, + { + .wm_inst = WM_C, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 11.72, + .sr_exit_time_us = 10.12, + .sr_enter_plus_exit_time_us = 11.48, + .valid = true, + }, + { + .wm_inst = WM_D, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 11.72, + .sr_exit_time_us = 10.12, + .sr_enter_plus_exit_time_us = 11.48, + .valid = true, + }, + } +}; + +static struct wm_table lpddr5_wm_table = { + .entries = { + { + .wm_inst = WM_A, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 11.65333, + .sr_exit_time_us = 11.5, + .sr_enter_plus_exit_time_us = 14.5, + .valid = true, + }, + { + .wm_inst = WM_B, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 11.65333, + .sr_exit_time_us = 11.5, + .sr_enter_plus_exit_time_us = 14.5, + .valid = true, + }, + { + .wm_inst = WM_C, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 11.65333, + .sr_exit_time_us = 11.5, + .sr_enter_plus_exit_time_us = 14.5, + .valid = true, + }, + { + .wm_inst = WM_D, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 11.65333, + .sr_exit_time_us = 11.5, + .sr_enter_plus_exit_time_us = 14.5, + .valid = true, + }, + } +}; + +static DpmClocks_316_t dummy_clocks; + +static struct dcn316_watermarks dummy_wms = { 0 }; + +static void dcn316_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn316_watermarks *table) +{ + int i, num_valid_sets; + + num_valid_sets = 0; + + for (i = 0; i < WM_SET_COUNT; i++) { + /* skip empty entries, the smu array has no holes*/ + if (!bw_params->wm_table.entries[i].valid) + continue; + + table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmSetting = bw_params->wm_table.entries[i].wm_inst; + table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType = bw_params->wm_table.entries[i].wm_type; + /* We will not select WM based on fclk, so leave it as unconstrained */ + table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0; + table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF; + + if (table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType == WM_TYPE_PSTATE_CHG) { + if (i == 0) + table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk = 0; + else { + /* add 1 to make it non-overlapping with next lvl */ + table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk = + bw_params->clk_table.entries[i - 1].dcfclk_mhz + 1; + } + table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxMclk = + bw_params->clk_table.entries[i].dcfclk_mhz; + + } else { + /* unconstrained for memory retraining */ + table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0; + table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF; + + /* Modify previous watermark range to cover up to max */ + table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF; + } + num_valid_sets++; + } + + ASSERT(num_valid_sets != 0); /* Must have at least one set of valid watermarks */ + + /* modify the min and max to make sure we cover the whole range*/ + table->WatermarkRow[WM_DCFCLK][0].MinMclk = 0; + table->WatermarkRow[WM_DCFCLK][0].MinClock = 0; + table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxMclk = 0xFFFF; + table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF; + + /* This is for writeback only, does not matter currently as no writeback support*/ + table->WatermarkRow[WM_SOCCLK][0].WmSetting = WM_A; + table->WatermarkRow[WM_SOCCLK][0].MinClock = 0; + table->WatermarkRow[WM_SOCCLK][0].MaxClock = 0xFFFF; + table->WatermarkRow[WM_SOCCLK][0].MinMclk = 0; + table->WatermarkRow[WM_SOCCLK][0].MaxMclk = 0xFFFF; +} + +static void dcn316_notify_wm_ranges(struct clk_mgr *clk_mgr_base) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + struct clk_mgr_dcn316 *clk_mgr_dcn316 = TO_CLK_MGR_DCN316(clk_mgr); + struct dcn316_watermarks *table = clk_mgr_dcn316->smu_wm_set.wm_set; + + if (!clk_mgr->smu_ver) + return; + + if (!table || clk_mgr_dcn316->smu_wm_set.mc_address.quad_part == 0) + return; + + memset(table, 0, sizeof(*table)); + + dcn316_build_watermark_ranges(clk_mgr_base->bw_params, table); + + dcn316_smu_set_dram_addr_high(clk_mgr, + clk_mgr_dcn316->smu_wm_set.mc_address.high_part); + dcn316_smu_set_dram_addr_low(clk_mgr, + clk_mgr_dcn316->smu_wm_set.mc_address.low_part); + dcn316_smu_transfer_wm_table_dram_2_smu(clk_mgr); +} + +static void dcn316_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr, + struct dcn316_smu_dpm_clks *smu_dpm_clks) +{ + DpmClocks_316_t *table = smu_dpm_clks->dpm_clks; + + if (!clk_mgr->smu_ver) + return; + + if (!table || smu_dpm_clks->mc_address.quad_part == 0) + return; + + memset(table, 0, sizeof(*table)); + + dcn316_smu_set_dram_addr_high(clk_mgr, + smu_dpm_clks->mc_address.high_part); + dcn316_smu_set_dram_addr_low(clk_mgr, + smu_dpm_clks->mc_address.low_part); + dcn316_smu_transfer_dpm_table_smu_2_dram(clk_mgr); +} + +static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks) +{ + uint32_t max = 0; + int i; + + for (i = 0; i < num_clocks; ++i) { + if (clocks[i] > max) + max = clocks[i]; + } + + return max; +} + +static unsigned int find_clk_for_voltage( + const DpmClocks_316_t *clock_table, + const uint32_t clocks[], + unsigned int voltage) +{ + int i; + int max_voltage = 0; + int clock = 0; + + for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++) { + if (clock_table->SocVoltage[i] == voltage) { + return clocks[i]; + } else if (clock_table->SocVoltage[i] >= max_voltage && + clock_table->SocVoltage[i] < voltage) { + max_voltage = clock_table->SocVoltage[i]; + clock = clocks[i]; + } + } + + ASSERT(clock); + return clock; +} + +static void dcn316_clk_mgr_helper_populate_bw_params( + struct clk_mgr_internal *clk_mgr, + struct integrated_info *bios_info, + const DpmClocks_316_t *clock_table) +{ + int i, j; + struct clk_bw_params *bw_params = clk_mgr->base.bw_params; + uint32_t max_dispclk = 0, max_dppclk = 0; + + j = -1; + + ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL); + + /* Find lowest DPM, FCLK is filled in reverse order*/ + + for (i = NUM_DF_PSTATE_LEVELS - 1; i >= 0; i--) { + if (clock_table->DfPstateTable[i].FClk != 0) { + j = i; + break; + } + } + + if (j == -1) { + /* clock table is all 0s, just use our own hardcode */ + ASSERT(0); + return; + } + + bw_params->clk_table.num_entries = j + 1; + + /* dispclk and dppclk can be max at any voltage, same number of levels for both */ + if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS && + clock_table->NumDispClkLevelsEnabled <= NUM_DPPCLK_DPM_LEVELS) { + max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled); + max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled); + } else { + ASSERT(0); + } + + for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) { + int temp; + + bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].FClk; + bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].MemClk; + bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].Voltage; + switch (clock_table->DfPstateTable[j].WckRatio) { + case WCK_RATIO_1_2: + bw_params->clk_table.entries[i].wck_ratio = 2; + break; + case WCK_RATIO_1_4: + bw_params->clk_table.entries[i].wck_ratio = 4; + break; + default: + bw_params->clk_table.entries[i].wck_ratio = 1; + } + temp = find_clk_for_voltage(clock_table, clock_table->DcfClocks, clock_table->DfPstateTable[j].Voltage); + if (temp) + bw_params->clk_table.entries[i].dcfclk_mhz = temp; + temp = find_clk_for_voltage(clock_table, clock_table->SocClocks, clock_table->DfPstateTable[j].Voltage); + if (temp) + bw_params->clk_table.entries[i].socclk_mhz = temp; + bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk; + bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk; + } + + bw_params->vram_type = bios_info->memory_type; + bw_params->num_channels = bios_info->ma_channel_number; + + for (i = 0; i < WM_SET_COUNT; i++) { + bw_params->wm_table.entries[i].wm_inst = i; + + if (i >= bw_params->clk_table.num_entries) { + bw_params->wm_table.entries[i].valid = false; + continue; + } + + bw_params->wm_table.entries[i].wm_type = WM_TYPE_PSTATE_CHG; + bw_params->wm_table.entries[i].valid = true; + } +} + + + +static struct clk_mgr_funcs dcn316_funcs = { + .enable_pme_wa = dcn316_enable_pme_wa, + .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, + .update_clocks = dcn316_update_clocks, + .init_clocks = dcn31_init_clocks, + .are_clock_states_equal = dcn31_are_clock_states_equal, + .notify_wm_ranges = dcn316_notify_wm_ranges +}; +extern struct clk_mgr_funcs dcn3_fpga_funcs; + +static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr) +{ + /* get FbMult value */ + struct fixed31_32 pll_req; + unsigned int fbmult_frac_val = 0; + unsigned int fbmult_int_val = 0; + + /* + * Register value of fbmult is in 8.16 format, we are converting to 31.32 + * to leverage the fix point operations available in driver + */ + + REG_GET(CLK1_CLK_PLL_REQ, FbMult_frac, &fbmult_frac_val); /* 16 bit fractional part*/ + REG_GET(CLK1_CLK_PLL_REQ, FbMult_int, &fbmult_int_val); /* 8 bit integer part */ + + pll_req = dc_fixpt_from_int(fbmult_int_val); + + /* + * since fractional part is only 16 bit in register definition but is 32 bit + * in our fix point definiton, need to shift left by 16 to obtain correct value + */ + pll_req.value |= fbmult_frac_val << 16; + + /* multiply by REFCLK period */ + pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz); + + /* integer part is now VCO frequency in kHz */ + return dc_fixpt_floor(pll_req); +} + +void dcn316_clk_mgr_construct( + struct dc_context *ctx, + struct clk_mgr_dcn316 *clk_mgr, + struct pp_smu_funcs *pp_smu, + struct dccg *dccg) +{ + struct dcn316_smu_dpm_clks smu_dpm_clks = { 0 }; + + clk_mgr->base.base.ctx = ctx; + clk_mgr->base.base.funcs = &dcn316_funcs; + + clk_mgr->base.pp_smu = pp_smu; + + clk_mgr->base.dccg = dccg; + clk_mgr->base.dfs_bypass_disp_clk = 0; + + clk_mgr->base.dprefclk_ss_percentage = 0; + clk_mgr->base.dprefclk_ss_divider = 1000; + clk_mgr->base.ss_on_dprefclk = false; + clk_mgr->base.dfs_ref_freq_khz = 48000; + + clk_mgr->smu_wm_set.wm_set = (struct dcn316_watermarks *)dm_helpers_allocate_gpu_mem( + clk_mgr->base.base.ctx, + DC_MEM_ALLOC_TYPE_FRAME_BUFFER, + sizeof(struct dcn316_watermarks), + &clk_mgr->smu_wm_set.mc_address.quad_part); + + if (!clk_mgr->smu_wm_set.wm_set) { + clk_mgr->smu_wm_set.wm_set = &dummy_wms; + clk_mgr->smu_wm_set.mc_address.quad_part = 0; + } + ASSERT(clk_mgr->smu_wm_set.wm_set); + + smu_dpm_clks.dpm_clks = (DpmClocks_316_t *)dm_helpers_allocate_gpu_mem( + clk_mgr->base.base.ctx, + DC_MEM_ALLOC_TYPE_FRAME_BUFFER, + sizeof(DpmClocks_316_t), + &smu_dpm_clks.mc_address.quad_part); + + if (smu_dpm_clks.dpm_clks == NULL) { + smu_dpm_clks.dpm_clks = &dummy_clocks; + smu_dpm_clks.mc_address.quad_part = 0; + } + + ASSERT(smu_dpm_clks.dpm_clks); + + if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { + clk_mgr->base.base.funcs = &dcn3_fpga_funcs; + clk_mgr->base.base.dentist_vco_freq_khz = 2500000; + } else { + struct clk_log_info log_info = {0}; + + clk_mgr->base.smu_ver = dcn316_smu_get_smu_version(&clk_mgr->base); + + if (clk_mgr->base.smu_ver > 0) + clk_mgr->base.smu_present = true; + + // Skip this for now as it did not work on DCN315, renable during bring up + clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); + + /* in case we don't get a value from the register, use default */ + if (clk_mgr->base.base.dentist_vco_freq_khz == 0) + clk_mgr->base.base.dentist_vco_freq_khz = 2500000; /* 2400MHz */ + + + if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { + dcn316_bw_params.wm_table = lpddr5_wm_table; + } else { + dcn316_bw_params.wm_table = ddr4_wm_table; + } + /* Saved clocks configured at boot for debug purposes */ + dcn316_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info); + + } + + clk_mgr->base.base.dprefclk_khz = 600000; + clk_mgr->base.base.dprefclk_khz = dcn316_smu_get_dpref_clk(&clk_mgr->base); + clk_mgr->base.dccg->ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz; + dce_clock_read_ss_info(&clk_mgr->base); + clk_mgr->base.dccg->ref_dtbclk_khz = + dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz); + + clk_mgr->base.base.bw_params = &dcn316_bw_params; + + if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) { + dcn316_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks); + + if (ctx->dc_bios && ctx->dc_bios->integrated_info) { + dcn316_clk_mgr_helper_populate_bw_params( + &clk_mgr->base, + ctx->dc_bios->integrated_info, + smu_dpm_clks.dpm_clks); + } + } + + if (smu_dpm_clks.dpm_clks && smu_dpm_clks.mc_address.quad_part != 0) + dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER, + smu_dpm_clks.dpm_clks); +} + +void dcn316_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int) +{ + struct clk_mgr_dcn316 *clk_mgr = TO_CLK_MGR_DCN316(clk_mgr_int); + + if (clk_mgr->smu_wm_set.wm_set && clk_mgr->smu_wm_set.mc_address.quad_part != 0) + dm_helpers_free_gpu_mem(clk_mgr_int->base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER, + clk_mgr->smu_wm_set.wm_set); +} diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.h new file mode 100644 index 000000000000..864d1f6cef26 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.h @@ -0,0 +1,49 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DCN316_CLK_MGR_H__ +#define __DCN316_CLK_MGR_H__ +#include "clk_mgr_internal.h" + +struct dcn316_watermarks; + +struct dcn316_smu_watermark_set { + struct dcn316_watermarks *wm_set; + union large_integer mc_address; +}; + +struct clk_mgr_dcn316 { + struct clk_mgr_internal base; + struct dcn316_smu_watermark_set smu_wm_set; +}; + +void dcn316_clk_mgr_construct(struct dc_context *ctx, + struct clk_mgr_dcn316 *clk_mgr, + struct pp_smu_funcs *pp_smu, + struct dccg *dccg); + +void dcn316_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int); + +#endif //__DCN316_CLK_MGR_H__ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c new file mode 100644 index 000000000000..dceec4b96052 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c @@ -0,0 +1,338 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "core_types.h" +#include "clk_mgr_internal.h" +#include "reg_helper.h" +#include "dm_helpers.h" +#include "dcn316_smu.h" +#include "mp/mp_13_0_8_offset.h" +#include "mp/mp_13_0_8_sh_mask.h" + +#define MAX_INSTANCE 7 +#define MAX_SEGMENT 6 + +struct IP_BASE_INSTANCE +{ + unsigned int segment[MAX_SEGMENT]; +}; + +struct IP_BASE +{ + struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; +}; + +static const struct IP_BASE MP0_BASE = { { { { 0x00016000, 0x00DC0000, 0x00E00000, 0x00E40000, 0x0243FC00, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; + +#define REG(reg_name) \ + (MP0_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name) + +#define FN(reg_name, field) \ + FD(reg_name##__##field) + +#define VBIOSSMC_MSG_TestMessage 0x01 ///< To check if PMFW is alive and responding. Requirement specified by PMFW team +#define VBIOSSMC_MSG_GetPmfwVersion 0x02 ///< Get PMFW version +#define VBIOSSMC_MSG_Spare0 0x03 ///< Spare0 +#define VBIOSSMC_MSG_SetDispclkFreq 0x04 ///< Set display clock frequency in MHZ +#define VBIOSSMC_MSG_Spare1 0x05 ///< Spare1 +#define VBIOSSMC_MSG_SetDppclkFreq 0x06 ///< Set DPP clock frequency in MHZ +#define VBIOSSMC_MSG_SetHardMinDcfclkByFreq 0x07 ///< Set DCF clock frequency hard min in MHZ +#define VBIOSSMC_MSG_SetMinDeepSleepDcfclk 0x08 ///< Set DCF clock minimum frequency in deep sleep in MHZ +#define VBIOSSMC_MSG_SetPhyclkVoltageByFreq 0x09 ///< Set display phy clock frequency in MHZ in case VMIN does not support phy frequency +#define VBIOSSMC_MSG_GetFclkFrequency 0x0A ///< Get FCLK frequency, return frequemcy in MHZ +#define VBIOSSMC_MSG_SetDisplayCount 0x0B ///< Inform PMFW of number of display connected +#define VBIOSSMC_MSG_SPARE 0x0C ///< SPARE +#define VBIOSSMC_MSG_UpdatePmeRestore 0x0D ///< To ask PMFW to write into Azalia for PME wake up event +#define VBIOSSMC_MSG_SetVbiosDramAddrHigh 0x0E ///< Set DRAM address high 32 bits for WM table transfer +#define VBIOSSMC_MSG_SetVbiosDramAddrLow 0x0F ///< Set DRAM address low 32 bits for WM table transfer +#define VBIOSSMC_MSG_TransferTableSmu2Dram 0x10 ///< Transfer table from PMFW SRAM to system DRAM +#define VBIOSSMC_MSG_TransferTableDram2Smu 0x11 ///< Transfer table from system DRAM to PMFW +#define VBIOSSMC_MSG_SetDisplayIdleOptimizations 0x12 ///< Set Idle state optimization for display off +#define VBIOSSMC_MSG_GetDprefclkFreq 0x13 ///< Get DPREF clock frequency. Return in MHZ +#define VBIOSSMC_MSG_GetDtbclkFreq 0x14 ///< Get DPREF clock frequency. Return in MHZ +#define VBIOSSMC_MSG_SetDtbclkFreq 0x15 ///< Inform PMFW to turn on/off DTB clock arg = 1, turn DTB clock on 600MHz/ arg = 0 turn DTB clock off +#define VBIOSSMC_Message_Count 0x16 ///< Total number of VBIS and DAL messages + +#define VBIOSSMC_Status_BUSY 0x0 +#define VBIOSSMC_Result_OK 0x01 ///< Message Response OK +#define VBIOSSMC_Result_Failed 0xFF ///< Message Response Failed +#define VBIOSSMC_Result_UnknownCmd 0xFE ///< Message Response Unknown Command +#define VBIOSSMC_Result_CmdRejectedPrereq 0xFD ///< Message Response Command Failed Prerequisite +#define VBIOSSMC_Result_CmdRejectedBusy 0xFC ///< Message Response Command Rejected due to PMFW is busy. Sender should retry sending this message + +/* + * Function to be used instead of REG_WAIT macro because the wait ends when + * the register is NOT EQUAL to zero, and because the translation in msg_if.h + * won't work with REG_WAIT. + */ +static uint32_t dcn316_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsigned int delay_us, unsigned int max_retries) +{ + uint32_t res_val = VBIOSSMC_Status_BUSY; + + do { + res_val = REG_READ(MP1_SMN_C2PMSG_91); + if (res_val != VBIOSSMC_Status_BUSY) + break; + + if (delay_us >= 1000) + msleep(delay_us/1000); + else if (delay_us > 0) + udelay(delay_us); + } while (max_retries--); + + return res_val; +} + +static int dcn316_smu_send_msg_with_param( + struct clk_mgr_internal *clk_mgr, + unsigned int msg_id, unsigned int param) +{ + uint32_t result; + + result = dcn316_smu_wait_for_response(clk_mgr, 10, 200000); + ASSERT(result == VBIOSSMC_Result_OK); + + if (result == VBIOSSMC_Status_BUSY) { + return -1; + } + + /* First clear response register */ + REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Status_BUSY); + + /* Set the parameter register for the SMU message, unit is Mhz */ + REG_WRITE(MP1_SMN_C2PMSG_83, param); + + /* Trigger the message transaction by writing the message ID */ + REG_WRITE(MP1_SMN_C2PMSG_67, msg_id); + + result = dcn316_smu_wait_for_response(clk_mgr, 10, 200000); + + if (result == VBIOSSMC_Status_BUSY) { + ASSERT(0); + dm_helpers_smu_timeout(CTX, msg_id, param, 10 * 200000); + } + + return REG_READ(MP1_SMN_C2PMSG_83); +} + +int dcn316_smu_get_smu_version(struct clk_mgr_internal *clk_mgr) +{ + return dcn316_smu_send_msg_with_param( + clk_mgr, + VBIOSSMC_MSG_GetPmfwVersion, + 0); +} + + +int dcn316_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz) +{ + int actual_dispclk_set_mhz = -1; + + if (!clk_mgr->smu_present) + return requested_dispclk_khz; + + /* Unit of SMU msg parameter is Mhz */ + actual_dispclk_set_mhz = dcn316_smu_send_msg_with_param( + clk_mgr, + VBIOSSMC_MSG_SetDispclkFreq, + khz_to_mhz_ceil(requested_dispclk_khz)); + + return actual_dispclk_set_mhz * 1000; +} + +int dcn316_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_dcfclk_khz) +{ + int actual_dcfclk_set_mhz = -1; + + if (!clk_mgr->base.ctx->dc->debug.pstate_enabled) + return -1; + + if (!clk_mgr->smu_present) + return requested_dcfclk_khz; + + actual_dcfclk_set_mhz = dcn316_smu_send_msg_with_param( + clk_mgr, + VBIOSSMC_MSG_SetHardMinDcfclkByFreq, + khz_to_mhz_ceil(requested_dcfclk_khz)); + + return actual_dcfclk_set_mhz * 1000; +} + +int dcn316_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz) +{ + int actual_min_ds_dcfclk_mhz = -1; + + if (!clk_mgr->base.ctx->dc->debug.pstate_enabled) + return -1; + + if (!clk_mgr->smu_present) + return requested_min_ds_dcfclk_khz; + + actual_min_ds_dcfclk_mhz = dcn316_smu_send_msg_with_param( + clk_mgr, + VBIOSSMC_MSG_SetMinDeepSleepDcfclk, + khz_to_mhz_ceil(requested_min_ds_dcfclk_khz)); + + return actual_min_ds_dcfclk_mhz * 1000; +} + +int dcn316_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz) +{ + int actual_dppclk_set_mhz = -1; + + if (!clk_mgr->smu_present) + return requested_dpp_khz; + + actual_dppclk_set_mhz = dcn316_smu_send_msg_with_param( + clk_mgr, + VBIOSSMC_MSG_SetDppclkFreq, + khz_to_mhz_ceil(requested_dpp_khz)); + + return actual_dppclk_set_mhz * 1000; +} + +void dcn316_smu_set_display_idle_optimization(struct clk_mgr_internal *clk_mgr, uint32_t idle_info) +{ + if (!clk_mgr->base.ctx->dc->debug.pstate_enabled) + return; + + if (!clk_mgr->smu_present) + return; + + //TODO: Work with smu team to define optimization options. + dcn316_smu_send_msg_with_param( + clk_mgr, + VBIOSSMC_MSG_SetDisplayIdleOptimizations, + idle_info); +} + +void dcn316_smu_enable_phy_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable) +{ + union display_idle_optimization_u idle_info = { 0 }; + + if (!clk_mgr->smu_present) + return; + + if (enable) { + idle_info.idle_info.df_request_disabled = 1; + idle_info.idle_info.phy_ref_clk_off = 1; + } + + dcn316_smu_send_msg_with_param( + clk_mgr, + VBIOSSMC_MSG_SetDisplayIdleOptimizations, + idle_info.data); +} + +void dcn316_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high) +{ + if (!clk_mgr->smu_present) + return; + + dcn316_smu_send_msg_with_param(clk_mgr, + VBIOSSMC_MSG_SetVbiosDramAddrHigh, addr_high); +} + +void dcn316_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low) +{ + if (!clk_mgr->smu_present) + return; + + dcn316_smu_send_msg_with_param(clk_mgr, + VBIOSSMC_MSG_SetVbiosDramAddrLow, addr_low); +} + +void dcn316_smu_transfer_dpm_table_smu_2_dram(struct clk_mgr_internal *clk_mgr) +{ + if (!clk_mgr->smu_present) + return; + + dcn316_smu_send_msg_with_param(clk_mgr, + VBIOSSMC_MSG_TransferTableSmu2Dram, TABLE_DPMCLOCKS); +} + +void dcn316_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr) +{ + if (!clk_mgr->smu_present) + return; + + dcn316_smu_send_msg_with_param(clk_mgr, + VBIOSSMC_MSG_TransferTableDram2Smu, TABLE_WATERMARKS); +} + +void dcn316_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr) +{ + if (!clk_mgr->smu_present) + return; + + dcn316_smu_send_msg_with_param( + clk_mgr, + VBIOSSMC_MSG_UpdatePmeRestore, + 0); +} + +/* Arg = 1: Turn DTB on; 0: Turn DTB CLK OFF. when it is on, it is 600MHZ */ +void dcn316_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable) +{ + if (!clk_mgr->smu_present) + return; + + dcn316_smu_send_msg_with_param( + clk_mgr, + VBIOSSMC_MSG_SetDtbclkFreq, + enable); +} + +int dcn316_smu_get_dpref_clk(struct clk_mgr_internal *clk_mgr) +{ + int dprefclk_get_mhz = -1; + + if (clk_mgr->smu_present) { + dprefclk_get_mhz = dcn316_smu_send_msg_with_param( + clk_mgr, + VBIOSSMC_MSG_GetDprefclkFreq, + 0); + } + return (dprefclk_get_mhz * 1000); +} + +int dcn316_smu_get_smu_fclk(struct clk_mgr_internal *clk_mgr) +{ + int fclk_get_mhz = -1; + + if (clk_mgr->smu_present) { + fclk_get_mhz = dcn316_smu_send_msg_with_param( + clk_mgr, + VBIOSSMC_MSG_GetFclkFrequency, + 0); + } + return (fclk_get_mhz * 1000); +} diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.h new file mode 100644 index 000000000000..2a7293f66515 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.h @@ -0,0 +1,139 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef DAL_DC_316_SMU_H_ +#define DAL_DC_316_SMU_H_ +#include "os_types.h" + +#define PMFW_DRIVER_IF_VERSION 4 + +#define NUM_DCFCLK_DPM_LEVELS 8 +#define NUM_DISPCLK_DPM_LEVELS 8 +#define NUM_DPPCLK_DPM_LEVELS 8 +#define NUM_SOCCLK_DPM_LEVELS 8 +#define NUM_VCN_DPM_LEVELS 8 +#define NUM_SOC_VOLTAGE_LEVELS 8 +#define NUM_DF_PSTATE_LEVELS 4 + +typedef struct { + uint16_t MinClock; // This is either DCFCLK or SOCCLK (in MHz) + uint16_t MaxClock; // This is either DCFCLK or SOCCLK (in MHz) + uint16_t MinMclk; + uint16_t MaxMclk; + uint8_t WmSetting; + uint8_t WmType; // Used for normal pstate change or memory retraining + uint8_t Padding[2]; +} WatermarkRowGeneric_t; + +#define NUM_WM_RANGES 4 +#define WM_PSTATE_CHG 0 +#define WM_RETRAINING 1 + +typedef enum { + WM_SOCCLK = 0, + WM_DCFCLK, + WM_COUNT, +} WM_CLOCK_e; + +typedef enum{ + WCK_RATIO_1_1 = 0, // DDR5, Wck:ck is always 1:1; + WCK_RATIO_1_2, + WCK_RATIO_1_4, + WCK_RATIO_MAX +} WCK_RATIO_e; + +typedef struct { + uint32_t FClk; + uint32_t MemClk; + uint32_t Voltage; + uint8_t WckRatio; + uint8_t Spare[3]; +} DfPstateTable_t; + +//Freq in MHz +//Voltage in milli volts with 2 fractional bits +typedef struct { + uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS]; + uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS]; + uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS]; + uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS]; + uint32_t VClocks[NUM_VCN_DPM_LEVELS]; + uint32_t DClocks[NUM_VCN_DPM_LEVELS]; + uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS]; + DfPstateTable_t DfPstateTable[NUM_DF_PSTATE_LEVELS]; + uint8_t NumDcfClkLevelsEnabled; + uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk + uint8_t NumSocClkLevelsEnabled; + uint8_t VcnClkLevelsEnabled; //Applies to both Vclk and Dclk + uint8_t NumDfPstatesEnabled; + uint8_t spare[3]; + uint32_t MinGfxClk; + uint32_t MaxGfxClk; +} DpmClocks_316_t; + +struct dcn316_watermarks { + // Watermarks + WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES]; + uint32_t MmHubPadding[7]; // SMU internal use +}; + +struct dcn316_smu_dpm_clks { + DpmClocks_316_t *dpm_clks; + union large_integer mc_address; +}; + +#define TABLE_WATERMARKS 1 // Called by DAL through VBIOS +#define TABLE_DPMCLOCKS 4 // Called by Driver and VBIOS + +struct display_idle_optimization { + unsigned int df_request_disabled : 1; + unsigned int phy_ref_clk_off : 1; + unsigned int s0i2_rdy : 1; + unsigned int reserved : 29; +}; + +union display_idle_optimization_u { + struct display_idle_optimization idle_info; + uint32_t data; +}; + +int dcn316_smu_get_smu_version(struct clk_mgr_internal *clk_mgr); +int dcn316_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz); +int dcn316_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_dcfclk_khz); +int dcn316_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz); +int dcn316_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz); +void dcn316_smu_set_display_idle_optimization(struct clk_mgr_internal *clk_mgr, uint32_t idle_info); +void dcn316_smu_enable_phy_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable); +void dcn316_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high); +void dcn316_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low); +void dcn316_smu_transfer_dpm_table_smu_2_dram(struct clk_mgr_internal *clk_mgr); +void dcn316_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr); +void dcn316_smu_request_voltage_via_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phyclk_khz); +void dcn316_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr); +void dcn316_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable); +int dcn316_smu_get_dpref_clk(struct clk_mgr_internal *clk_mgr); +int dcn316_smu_get_smu_fclk(struct clk_mgr_internal *clk_mgr); + +#endif /* DAL_DC_316_SMU_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index d18e9f3ea998..f6e19efea756 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -76,6 +76,8 @@ #include "dc_trace.h" +#include "dce/dmub_outbox.h" + #define CTX \ dc->ctx @@ -985,10 +987,13 @@ static bool dc_construct(struct dc *dc, goto fail; #ifdef CONFIG_DRM_AMD_DC_DCN dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present; -#endif - if (dc->res_pool->funcs->update_bw_bounding_box) + if (dc->res_pool->funcs->update_bw_bounding_box) { + DC_FP_START(); dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params); + DC_FP_END(); + } +#endif /* Creation of current_state must occur after dc->dml * is initialized in dc_create_resource_pool because @@ -1077,7 +1082,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) break; } } - if (!should_disable && pipe_split_change) + if (!should_disable && pipe_split_change && + dc->current_state->stream_count != context->stream_count) should_disable = true; if (should_disable && old_stream) { @@ -1469,7 +1475,7 @@ static bool context_changed( return false; } -bool dc_validate_seamless_boot_timing(const struct dc *dc, +bool dc_validate_boot_timing(const struct dc *dc, const struct dc_sink *sink, struct dc_crtc_timing *crtc_timing) { @@ -1685,6 +1691,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c struct pipe_ctx *pipe; int i, k, l; struct dc_stream_state *dc_streams[MAX_STREAMS] = {0}; + struct dc_state *old_state; #if defined(CONFIG_DRM_AMD_DC_DCN) dc_z10_restore(dc); @@ -1803,10 +1810,11 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c for (i = 0; i < context->stream_count; i++) context->streams[i]->mode_changed = false; - dc_release_state(dc->current_state); - + old_state = dc->current_state; dc->current_state = context; + dc_release_state(old_state); + dc_retain_state(dc->current_state); return result; @@ -2379,10 +2387,8 @@ static enum surface_update_type check_update_surfaces_for_stream( if (stream_update->dsc_config) su_flags->bits.dsc_changed = 1; -#if defined(CONFIG_DRM_AMD_DC_DCN) if (stream_update->mst_bw_update) su_flags->bits.mst_bw = 1; -#endif if (su_flags->raw != 0) overall_type = UPDATE_TYPE_FULL; @@ -2724,6 +2730,9 @@ static void commit_planes_do_stream_update(struct dc *dc, stream_update->vsp_infopacket) { resource_build_info_frame(pipe_ctx); dc->hwss.update_info_frame(pipe_ctx); + + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); } if (stream_update->hdr_static_metadata && @@ -2761,14 +2770,12 @@ static void commit_planes_do_stream_update(struct dc *dc, if (stream_update->dsc_config) dp_update_dsc_config(pipe_ctx); -#if defined(CONFIG_DRM_AMD_DC_DCN) if (stream_update->mst_bw_update) { if (stream_update->mst_bw_update->is_increase) dc_link_increase_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw); else dc_link_reduce_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw); } -#endif if (stream_update->pending_test_pattern) { dc_link_dp_set_test_pattern(stream->link, @@ -3356,6 +3363,19 @@ bool dc_is_dmcu_initialized(struct dc *dc) return false; } +bool dc_is_oem_i2c_device_present( + struct dc *dc, + size_t slave_address) +{ + if (dc->res_pool->oem_device) + return dce_i2c_oem_device_present( + dc->res_pool, + dc->res_pool->oem_device, + slave_address); + + return false; +} + bool dc_submit_i2c( struct dc *dc, uint32_t link_index, @@ -3704,13 +3724,23 @@ void dc_hardware_release(struct dc *dc) } #endif -/** - * dc_enable_dmub_notifications - Returns whether dmub notification can be enabled - * @dc: dc structure +/* + ***************************************************************************** + * Function: dc_is_dmub_outbox_supported - + * + * @brief + * Checks whether DMUB FW supports outbox notifications, if supported + * DM should register outbox interrupt prior to actually enabling interrupts + * via dc_enable_dmub_outbox * - * Returns: True to enable dmub notifications, False otherwise + * @param + * [in] dc: dc structure + * + * @return + * True if DMUB FW supports outbox notifications, False otherwise + ***************************************************************************** */ -bool dc_enable_dmub_notifications(struct dc *dc) +bool dc_is_dmub_outbox_supported(struct dc *dc) { #if defined(CONFIG_DRM_AMD_DC_DCN) /* YELLOW_CARP B0 USB4 DPIA needs dmub notifications for interrupts */ @@ -3723,6 +3753,48 @@ bool dc_enable_dmub_notifications(struct dc *dc) return dc->debug.enable_dmub_aux_for_legacy_ddc; } +/* + ***************************************************************************** + * Function: dc_enable_dmub_notifications + * + * @brief + * Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox + * notifications. All DMs shall switch to dc_is_dmub_outbox_supported. + * This API shall be removed after switching. + * + * @param + * [in] dc: dc structure + * + * @return + * True if DMUB FW supports outbox notifications, False otherwise + ***************************************************************************** + */ +bool dc_enable_dmub_notifications(struct dc *dc) +{ + return dc_is_dmub_outbox_supported(dc); +} + +/** + ***************************************************************************** + * Function: dc_enable_dmub_outbox + * + * @brief + * Enables DMUB unsolicited notifications to x86 via outbox + * + * @param + * [in] dc: dc structure + * + * @return + * None + ***************************************************************************** + */ +void dc_enable_dmub_outbox(struct dc *dc) +{ + struct dc_context *dc_ctx = dc->ctx; + + dmub_enable_outbox_notification(dc_ctx->dmub_srv); +} + /** * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message * Sets port index appropriately for legacy DDC @@ -3826,7 +3898,7 @@ uint8_t get_link_index_from_dpia_port_index(const struct dc *dc, * [in] payload: aux payload * [out] notify: set_config immediate reply * - * @return + * @return * True if successful, False if failure ***************************************************************************** */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index b5e570d33ca9..cb87dd643180 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -50,6 +50,7 @@ #include "inc/hw/panel_cntl.h" #include "inc/link_enc_cfg.h" #include "inc/link_dpcd.h" +#include "link/link_dp_trace.h" #include "dc/dcn30/dcn30_vpg.h" @@ -720,35 +721,8 @@ static bool detect_dp(struct dc_link *link, sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT; if (!detect_dp_sink_caps(link)) return false; - if (is_mst_supported(link)) { - sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST; - link->type = dc_connection_mst_branch; - dal_ddc_service_set_transaction_type(link->ddc, - sink_caps->transaction_type); - -#if defined(CONFIG_DRM_AMD_DC_DCN) - /* Apply work around for tunneled MST on certain USB4 docks. Always use DSC if dock - * reports DSC support. - */ - if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && - link->type == dc_connection_mst_branch && - link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && - link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT && - !link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around) - link->wa_flags.dpia_mst_dsc_always_on = true; -#endif - -#if defined(CONFIG_DRM_AMD_DC_HDCP) - /* In case of fallback to SST when topology discovery below fails - * HDCP caps will be querried again later by the upper layer (caller - * of this function). */ - query_hdcp_capability(SIGNAL_TYPE_DISPLAY_PORT_MST, link); -#endif - } - - if (link->type != dc_connection_mst_branch && - is_dp_branch_device(link)) + if (is_dp_branch_device(link)) /* DP SST branch */ link->type = dc_connection_sst_branch; } else { @@ -757,6 +731,7 @@ static bool detect_dp(struct dc_link *link, sink_caps, audio_support); link->dpcd_caps.dongle_type = sink_caps->dongle_type; + link->dpcd_caps.is_dongle_type_one = sink_caps->is_dongle_type_one; link->dpcd_caps.dpcd_rev.raw = 0; } @@ -824,15 +799,214 @@ static bool wait_for_entering_dp_alt_mode(struct dc_link *link) return false; } -/* - * dc_link_detect() - Detect if a sink is attached to a given link +static void apply_dpia_mst_dsc_always_on_wa(struct dc_link *link) +{ +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* Apply work around for tunneled MST on certain USB4 docks. Always use DSC if dock + * reports DSC support. + */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && + link->type == dc_connection_mst_branch && + link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && + link->dpcd_caps.branch_hw_revision == DP_BRANCH_HW_REV_20 && + link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT && + !link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around) + link->wa_flags.dpia_mst_dsc_always_on = true; +#endif +} + +static void revert_dpia_mst_dsc_always_on_wa(struct dc_link *link) +{ + /* Disable work around which keeps DSC on for tunneled MST on certain USB4 docks. */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + link->wa_flags.dpia_mst_dsc_always_on = false; +} + +static bool discover_dp_mst_topology(struct dc_link *link, enum dc_detect_reason reason) +{ + DC_LOGGER_INIT(link->ctx->logger); + + LINK_INFO("link=%d, mst branch is now Connected\n", + link->link_index); + + apply_dpia_mst_dsc_always_on_wa(link); + link->type = dc_connection_mst_branch; + dm_helpers_dp_update_branch_info(link->ctx, link); + if (dm_helpers_dp_mst_start_top_mgr(link->ctx, + link, (reason == DETECT_REASON_BOOT || reason == DETECT_REASON_RESUMEFROMS3S4))) { + link_disconnect_sink(link); + } else { + link->type = dc_connection_sst_branch; + } + + return link->type == dc_connection_mst_branch; +} + +static bool reset_cur_dp_mst_topology(struct dc_link *link) +{ + bool result = false; + DC_LOGGER_INIT(link->ctx->logger); + + LINK_INFO("link=%d, mst branch is now Disconnected\n", + link->link_index); + + revert_dpia_mst_dsc_always_on_wa(link); + result = dm_helpers_dp_mst_stop_top_mgr(link->ctx, link); + + link->mst_stream_alloc_table.stream_count = 0; + memset(link->mst_stream_alloc_table.stream_allocations, + 0, + sizeof(link->mst_stream_alloc_table.stream_allocations)); + return result; +} + +static bool should_prepare_phy_clocks_for_link_verification(const struct dc *dc, + enum dc_detect_reason reason) +{ + int i; + bool can_apply_seamless_boot = false; + + for (i = 0; i < dc->current_state->stream_count; i++) { + if (dc->current_state->streams[i]->apply_seamless_boot_optimization) { + can_apply_seamless_boot = true; + break; + } + } + + return !can_apply_seamless_boot && reason != DETECT_REASON_BOOT; +} + +static void prepare_phy_clocks_for_destructive_link_verification(const struct dc *dc) +{ +#if defined(CONFIG_DRM_AMD_DC_DCN) + dc_z10_restore(dc); +#endif + clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr); +} + +static void restore_phy_clocks_for_destructive_link_verification(const struct dc *dc) +{ + clk_mgr_optimize_pwr_state(dc, dc->clk_mgr); +} + +static void set_all_streams_dpms_off_for_link(struct dc_link *link) +{ + int i; + struct pipe_ctx *pipe_ctx; + struct dc_stream_update stream_update; + bool dpms_off = true; + struct link_resource link_res = {0}; + + memset(&stream_update, 0, sizeof(stream_update)); + stream_update.dpms_off = &dpms_off; + + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off && + pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) { + stream_update.stream = pipe_ctx->stream; + dc_commit_updates_for_stream(link->ctx->dc, NULL, 0, + pipe_ctx->stream, &stream_update, + link->ctx->dc->current_state); + } + } + + /* link can be also enabled by vbios. In this case it is not recorded + * in pipe_ctx. Disable link phy here to make sure it is completely off + */ + dp_disable_link_phy(link, &link_res, link->connector_signal); +} + +static void verify_link_capability_destructive(struct dc_link *link, + struct dc_sink *sink, + enum dc_detect_reason reason) +{ + bool should_prepare_phy_clocks = + should_prepare_phy_clocks_for_link_verification(link->dc, reason); + + if (should_prepare_phy_clocks) + prepare_phy_clocks_for_destructive_link_verification(link->dc); + + if (dc_is_dp_signal(link->local_sink->sink_signal)) { + struct dc_link_settings known_limit_link_setting = + dp_get_max_link_cap(link); + set_all_streams_dpms_off_for_link(link); + dp_verify_link_cap_with_retries( + link, &known_limit_link_setting, + LINK_TRAINING_MAX_VERIFY_RETRY); + } else { + ASSERT(0); + } + + if (should_prepare_phy_clocks) + restore_phy_clocks_for_destructive_link_verification(link->dc); +} + +static void verify_link_capability_non_destructive(struct dc_link *link) +{ + if (dc_is_dp_signal(link->local_sink->sink_signal)) { + if (dc_is_embedded_signal(link->local_sink->sink_signal) || + link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + /* TODO - should we check link encoder's max link caps here? + * How do we know which link encoder to check from? + */ + link->verified_link_cap = link->reported_link_cap; + else + link->verified_link_cap = dp_get_max_link_cap(link); + } +} + +static bool should_verify_link_capability_destructively(struct dc_link *link, + enum dc_detect_reason reason) +{ + bool destrictive = false; + struct dc_link_settings max_link_cap; + bool is_link_enc_unavailable = link->link_enc && + link->dc->res_pool->funcs->link_encs_assign && + !link_enc_cfg_is_link_enc_avail( + link->ctx->dc, + link->link_enc->preferred_engine, + link); + + if (dc_is_dp_signal(link->local_sink->sink_signal)) { + max_link_cap = dp_get_max_link_cap(link); + destrictive = true; + + if (link->dc->debug.skip_detection_link_training || + dc_is_embedded_signal(link->local_sink->sink_signal) || + link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { + destrictive = false; + } else if (dp_get_link_encoding_format(&max_link_cap) == + DP_8b_10b_ENCODING) { + if (link->dpcd_caps.is_mst_capable || + is_link_enc_unavailable) { + destrictive = false; + } + } + } else if (dc_is_hdmi_signal(link->local_sink->sink_signal)) + destrictive = true; + + return destrictive; +} + +static void verify_link_capability(struct dc_link *link, struct dc_sink *sink, + enum dc_detect_reason reason) +{ + if (should_verify_link_capability_destructively(link, reason)) + verify_link_capability_destructive(link, sink, reason); + else + verify_link_capability_non_destructive(link); +} + + +/** + * detect_link_and_local_sink() - Detect if a sink is attached to a given link * * link->local_sink is created or destroyed as needed. * - * This does not create remote sinks but will trigger DM - * to start MST detection if a branch is detected. + * This does not create remote sinks. */ -static bool dc_link_detect_helper(struct dc_link *link, +static bool detect_link_and_local_sink(struct dc_link *link, enum dc_detect_reason reason) { struct dc_sink_init_data sink_init_data = { 0 }; @@ -848,9 +1022,7 @@ static bool dc_link_detect_helper(struct dc_link *link, struct dpcd_caps prev_dpcd_caps; enum dc_connection_type new_connection_type = dc_connection_none; enum dc_connection_type pre_connection_type = dc_connection_none; - bool perform_dp_seamless_boot = false; const uint32_t post_oui_delay = 30; // 30ms - struct link_resource link_res = { 0 }; DC_LOGGER_INIT(link->ctx->logger); @@ -862,7 +1034,8 @@ static bool dc_link_detect_helper(struct dc_link *link, (!link->dc->config.allow_edp_hotplug_detection)) && link->local_sink) { // need to re-write OUI and brightness in resume case - if (link->connector_signal == SIGNAL_TYPE_EDP) { + if (link->connector_signal == SIGNAL_TYPE_EDP && + (link->dpcd_sink_ext_caps.bits.oled == 1)) { dpcd_set_source_specific_data(link); msleep(post_oui_delay); dc_link_set_default_brightness_aux(link); @@ -943,61 +1116,6 @@ static bool dc_link_detect_helper(struct dc_link *link, return false; } -#if defined(CONFIG_DRM_AMD_DC_DCN) - if (dp_get_link_encoding_format(&link->reported_link_cap) == DP_128b_132b_ENCODING) - link_res.hpo_dp_link_enc = resource_get_hpo_dp_link_enc_for_det_lt( - &link->dc->current_state->res_ctx, - link->dc->res_pool, - link); -#endif - - if (link->type == dc_connection_mst_branch) { - LINK_INFO("link=%d, mst branch is now Connected\n", - link->link_index); - /* Need to setup mst link_cap struct here - * otherwise dc_link_detect() will leave mst link_cap - * empty which leads to allocate_mst_payload() has "0" - * pbn_per_slot value leading to exception on dc_fixpt_div() - */ - dp_verify_mst_link_cap(link, &link_res); - - /* - * This call will initiate MST topology discovery. Which - * will detect MST ports and add new DRM connector DRM - * framework. Then read EDID via remote i2c over aux. In - * the end, will notify DRM detect result and save EDID - * into DRM framework. - * - * .detect is called by .fill_modes. - * .fill_modes is called by user mode ioctl - * DRM_IOCTL_MODE_GETCONNECTOR. - * - * .get_modes is called by .fill_modes. - * - * call .get_modes, AMDGPU DM implementation will create - * new dc_sink and add to dc_link. For long HPD plug - * in/out, MST has its own handle. - * - * Therefore, just after dc_create, link->sink is not - * created for MST until user mode app calls - * DRM_IOCTL_MODE_GETCONNECTOR. - * - * Need check ->sink usages in case ->sink = NULL - * TODO: s3 resume check - */ - - dm_helpers_dp_update_branch_info(link->ctx, link); - if (dm_helpers_dp_mst_start_top_mgr(link->ctx, - link, reason == DETECT_REASON_BOOT)) { - if (prev_sink) - dc_sink_release(prev_sink); - return false; - } else { - link->type = dc_connection_sst_branch; - sink_caps.signal = SIGNAL_TYPE_DISPLAY_PORT; - } - } - /* Active SST downstream branch device unplug*/ if (link->type == dc_connection_sst_branch && link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) { @@ -1018,19 +1136,6 @@ static bool dc_link_detect_helper(struct dc_link *link, if (pre_connection_type == dc_connection_mst_branch && link->type != dc_connection_mst_branch) dm_helpers_dp_mst_stop_top_mgr(link->ctx, link); - - - // For seamless boot, to skip verify link cap, we read UEFI settings and set them as verified. - if (reason == DETECT_REASON_BOOT && - !dc_ctx->dc->config.power_down_display_on_boot && - link->link_status.link_active) - perform_dp_seamless_boot = true; - - if (perform_dp_seamless_boot) { - read_current_link_settings_on_detect(link); - link->verified_link_cap = link->reported_link_cap; - } - break; } @@ -1078,6 +1183,9 @@ static bool dc_link_detect_helper(struct dc_link *link, case EDID_BAD_CHECKSUM: DC_LOG_ERROR("EDID checksum invalid.\n"); break; + case EDID_PARTIAL_VALID: + DC_LOG_ERROR("Partial EDID valid, abandon invalid blocks.\n"); + break; case EDID_NO_RESPONSE: DC_LOG_ERROR("No EDID read.\n"); /* @@ -1095,6 +1203,22 @@ static bool dc_link_detect_helper(struct dc_link *link, return false; } + + if (link->type == dc_connection_sst_branch && + link->dpcd_caps.dongle_type == + DISPLAY_DONGLE_DP_VGA_CONVERTER && + reason == DETECT_REASON_HPDRX) { + /* Abort detection for DP-VGA adapters when EDID + * can't be read and detection reason is VGA-side + * hotplug + */ + if (prev_sink) + dc_sink_release(prev_sink); + link_disconnect_sink(link); + + return true; + } + break; default: break; @@ -1119,13 +1243,6 @@ static bool dc_link_detect_helper(struct dc_link *link, #if defined(CONFIG_DRM_AMD_DC_HDCP) query_hdcp_capability(sink->sink_signal, link); #endif - - // verify link cap for SST non-seamless boot - if (!perform_dp_seamless_boot) - dp_verify_link_cap_with_retries(link, - &link_res, - &link->reported_link_cap, - LINK_TRAINING_MAX_VERIFY_RETRY); } else { // If edid is the same, then discard new sink and revert back to original sink if (same_edid) { @@ -1143,6 +1260,9 @@ static bool dc_link_detect_helper(struct dc_link *link, !sink->edid_caps.edid_hdmi) sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK; + if (link->local_sink && dc_is_dp_signal(sink_caps.signal)) + dp_trace_init(link); + /* Connectivity log: detection */ for (i = 0; i < sink->dc_edid.length / DC_EDID_BLOCK_SIZE; i++) { CONN_DATA_DETECT(link, @@ -1185,27 +1305,6 @@ static bool dc_link_detect_helper(struct dc_link *link, } } else { /* From Connected-to-Disconnected. */ - if (link->type == dc_connection_mst_branch) { - LINK_INFO("link=%d, mst branch is now Disconnected\n", - link->link_index); - - /* Disable work around which keeps DSC on for tunneled MST on certain USB4 docks. */ - if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) - link->wa_flags.dpia_mst_dsc_always_on = false; - - dm_helpers_dp_mst_stop_top_mgr(link->ctx, link); - - link->mst_stream_alloc_table.stream_count = 0; - memset(link->mst_stream_alloc_table.stream_allocations, - 0, - sizeof(link->mst_stream_alloc_table.stream_allocations)); - } - -#if defined(CONFIG_DRM_AMD_DC_DCN) - if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING) - reset_dp_hpo_stream_encoders_for_link(link); -#endif - link->type = dc_connection_none; sink_caps.signal = SIGNAL_TYPE_NONE; /* When we unplug a passive DP-HDMI dongle connection, dongle_max_pix_clk @@ -1214,6 +1313,9 @@ static bool dc_link_detect_helper(struct dc_link *link, * Clear dongle_max_pix_clk on disconnect to fix this */ link->dongle_max_pix_clk = 0; + + dc_link_clear_dprx_states(link); + dp_trace_reset(link); } LINK_INFO("link=%d, dc_sink_in=%p is now %s prev_sink=%p edid same=%d\n", @@ -1230,33 +1332,26 @@ static bool dc_link_detect_helper(struct dc_link *link, bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) { - const struct dc *dc = link->dc; - bool ret; - bool can_apply_seamless_boot = false; - int i; + bool is_local_sink_detect_success; + bool is_delegated_to_mst_top_mgr = false; + enum dc_connection_type pre_link_type = link->type; - for (i = 0; i < dc->current_state->stream_count; i++) { - if (dc->current_state->streams[i]->apply_seamless_boot_optimization) { - can_apply_seamless_boot = true; - break; - } - } + is_local_sink_detect_success = detect_link_and_local_sink(link, reason); -#if defined(CONFIG_DRM_AMD_DC_DCN) - dc_z10_restore(dc); -#endif + if (is_local_sink_detect_success && link->local_sink) + verify_link_capability(link, link->local_sink, reason); - /* get out of low power state */ - if (!can_apply_seamless_boot && reason != DETECT_REASON_BOOT) - clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr); + if (is_local_sink_detect_success && link->local_sink && + dc_is_dp_signal(link->local_sink->sink_signal) && + link->dpcd_caps.is_mst_capable) + is_delegated_to_mst_top_mgr = discover_dp_mst_topology(link, reason); - ret = dc_link_detect_helper(link, reason); + if (is_local_sink_detect_success && + pre_link_type == dc_connection_mst_branch && + link->type != dc_connection_mst_branch) + is_delegated_to_mst_top_mgr = reset_cur_dp_mst_topology(link); - /* Go back to power optimized state */ - if (!can_apply_seamless_boot && reason != DETECT_REASON_BOOT) - clk_mgr_optimize_pwr_state(dc, dc->clk_mgr); - - return ret; + return is_local_sink_detect_success && !is_delegated_to_mst_top_mgr; } bool dc_link_get_hpd_state(struct dc_link *dc_link) @@ -1586,9 +1681,7 @@ static bool dc_link_construct_legacy(struct dc_link *link, } DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C); -#if defined(CONFIG_DRM_AMD_DC_DCN) DC_LOG_DC("BIOS object table - IS_DP2_CAPABLE: %d", link->link_enc->features.flags.bits.IS_DP2_CAPABLE); -#endif /* Update link encoder tracking variables. These are used for the dynamic * assignment of link encoders to streams. @@ -1871,7 +1964,6 @@ static enum dc_status enable_link_dp(struct dc_state *state, if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) do_fallback = true; -#if defined(CONFIG_DRM_AMD_DC_DCN) /* * Temporary w/a to get DP2.0 link rates to work with SST. * TODO DP2.0 - Workaround: Remove w/a if and when the issue is resolved. @@ -1881,7 +1973,6 @@ static enum dc_status enable_link_dp(struct dc_state *state, link->dc->debug.set_mst_en_for_sst) { dp_enable_mst_on_sink(link, true); } -#endif if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) { /*in case it is not on*/ @@ -1889,7 +1980,6 @@ static enum dc_status enable_link_dp(struct dc_state *state, link->dc->hwss.edp_wait_for_hpd_ready(link, true); } -#if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(&link_settings) == DP_128b_132b_ENCODING) { /* TODO - DP2.0 HW: calculate 32 symbol clock for HPO encoder */ } else { @@ -1899,19 +1989,15 @@ static enum dc_status enable_link_dp(struct dc_state *state, state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false); } -#else - pipe_ctx->stream_res.pix_clk_params.requested_sym_clk = - link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ; - if (state->clk_mgr && !apply_seamless_boot_optimization) - state->clk_mgr->funcs->update_clocks(state->clk_mgr, - state, false); -#endif // during mode switch we do DP_SET_POWER off then on, and OUI is lost dpcd_set_source_specific_data(link); if (link->dpcd_sink_ext_caps.raw != 0) msleep(post_oui_delay); + // similarly, mode switch can cause loss of cable ID + dpcd_write_cable_id_to_dprx(link); + skip_video_pattern = true; if (link_settings.link_rate == LINK_RATE_LOW) @@ -1934,12 +2020,8 @@ static enum dc_status enable_link_dp(struct dc_state *state, else fec_enable = true; -#if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING) dp_set_fec_enable(link, fec_enable); -#else - dp_set_fec_enable(link, fec_enable); -#endif // during mode set we do DP_SET_POWER off then on, aux writes are lost if (link->dpcd_sink_ext_caps.bits.oled == 1 || @@ -2495,9 +2577,7 @@ static void disable_link(struct dc_link *link, const struct link_resource *link_ if (dc_is_dp_signal(signal)) { /* SST DP, eDP */ -#if defined(CONFIG_DRM_AMD_DC_DCN) struct dc_link_settings link_settings = link->cur_link_settings; -#endif if (dc_is_dp_sst_signal(signal)) dp_disable_link_phy(link, link_res, signal); else @@ -2505,15 +2585,10 @@ static void disable_link(struct dc_link *link, const struct link_resource *link_ if (dc_is_dp_sst_signal(signal) || link->mst_stream_alloc_table.stream_count == 0) { -#if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING) { dp_set_fec_enable(link, false); dp_set_fec_ready(link, link_res, false); } -#else - dp_set_fec_enable(link, false); - dp_set_fec_ready(link, link_res, false); -#endif } } else { if (signal != SIGNAL_TYPE_VIRTUAL) @@ -2696,72 +2771,74 @@ static bool dp_active_dongle_validate_timing( break; } -#if defined(CONFIG_DRM_AMD_DC_DCN) if (dpcd_caps->dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER && dongle_caps->extendedCapValid == true) { -#else - if (dpcd_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER || - dongle_caps->extendedCapValid == false) - return true; -#endif - - /* Check Pixel Encoding */ - switch (timing->pixel_encoding) { - case PIXEL_ENCODING_RGB: - case PIXEL_ENCODING_YCBCR444: - break; - case PIXEL_ENCODING_YCBCR422: - if (!dongle_caps->is_dp_hdmi_ycbcr422_pass_through) - return false; - break; - case PIXEL_ENCODING_YCBCR420: - if (!dongle_caps->is_dp_hdmi_ycbcr420_pass_through) + /* Check Pixel Encoding */ + switch (timing->pixel_encoding) { + case PIXEL_ENCODING_RGB: + case PIXEL_ENCODING_YCBCR444: + break; + case PIXEL_ENCODING_YCBCR422: + if (!dongle_caps->is_dp_hdmi_ycbcr422_pass_through) + return false; + break; + case PIXEL_ENCODING_YCBCR420: + if (!dongle_caps->is_dp_hdmi_ycbcr420_pass_through) + return false; + break; + default: + /* Invalid Pixel Encoding*/ return false; - break; - default: - /* Invalid Pixel Encoding*/ - return false; - } + } - switch (timing->display_color_depth) { - case COLOR_DEPTH_666: - case COLOR_DEPTH_888: - /*888 and 666 should always be supported*/ - break; - case COLOR_DEPTH_101010: - if (dongle_caps->dp_hdmi_max_bpc < 10) + switch (timing->display_color_depth) { + case COLOR_DEPTH_666: + case COLOR_DEPTH_888: + /*888 and 666 should always be supported*/ + break; + case COLOR_DEPTH_101010: + if (dongle_caps->dp_hdmi_max_bpc < 10) + return false; + break; + case COLOR_DEPTH_121212: + if (dongle_caps->dp_hdmi_max_bpc < 12) + return false; + break; + case COLOR_DEPTH_141414: + case COLOR_DEPTH_161616: + default: + /* These color depths are currently not supported */ return false; - break; - case COLOR_DEPTH_121212: - if (dongle_caps->dp_hdmi_max_bpc < 12) + } + + /* Check 3D format */ + switch (timing->timing_3d_format) { + case TIMING_3D_FORMAT_NONE: + case TIMING_3D_FORMAT_FRAME_ALTERNATE: + /*Only frame alternate 3D is supported on active dongle*/ + break; + default: + /*other 3D formats are not supported due to bad infoframe translation */ return false; - break; - case COLOR_DEPTH_141414: - case COLOR_DEPTH_161616: - default: - /* These color depths are currently not supported */ - return false; - } + } #if defined(CONFIG_DRM_AMD_DC_DCN) - if (dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps > 0) { // DP to HDMI FRL converter - struct dc_crtc_timing outputTiming = *timing; + if (dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps > 0) { // DP to HDMI FRL converter + struct dc_crtc_timing outputTiming = *timing; - if (timing->flags.DSC && !timing->dsc_cfg.is_frl) - /* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */ - outputTiming.flags.DSC = 0; - if (dc_bandwidth_in_kbps_from_timing(&outputTiming) > dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps) - return false; - } else { // DP to HDMI TMDS converter + if (timing->flags.DSC && !timing->dsc_cfg.is_frl) + /* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */ + outputTiming.flags.DSC = 0; + if (dc_bandwidth_in_kbps_from_timing(&outputTiming) > dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps) + return false; + } else { // DP to HDMI TMDS converter + if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10)) + return false; + } +#else if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10)) return false; - } -#else - if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10)) - return false; #endif - -#if defined(CONFIG_DRM_AMD_DC_DCN) } if (dpcd_caps->channel_coding_cap.bits.DP_128b_132b_SUPPORTED == 0 && @@ -2842,7 +2919,6 @@ static bool dp_active_dongle_validate_timing( return false; } } -#endif return true; } @@ -3197,9 +3273,16 @@ bool dc_link_setup_psr(struct dc_link *link, /*skip power down the single pipe since it blocks the cstate*/ #if defined(CONFIG_DRM_AMD_DC_DCN) if (link->ctx->asic_id.chip_family >= FAMILY_RV) { - psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true; - if (link->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP && !dc->debug.disable_z10) - psr_context->psr_level.bits.SKIP_CRTC_DISABLE = false; + switch(link->ctx->asic_id.chip_family) { + case FAMILY_YELLOW_CARP: + case AMDGPU_FAMILY_GC_10_3_6: + if(!dc->debug.disable_z10) + psr_context->psr_level.bits.SKIP_CRTC_DISABLE = false; + break; + default: + psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true; + break; + } } #else if (link->ctx->asic_id.chip_family >= FAMILY_RV) @@ -3315,9 +3398,7 @@ static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx) static void update_mst_stream_alloc_table( struct dc_link *link, struct stream_encoder *stream_enc, -#if defined(CONFIG_DRM_AMD_DC_DCN) struct hpo_dp_stream_encoder *hpo_dp_stream_enc, // TODO: Rename stream_enc to dio_stream_enc? -#endif const struct dp_mst_stream_allocation_table *proposed_table) { struct link_mst_stream_allocation work_table[MAX_CONTROLLER_NUM] = { 0 }; @@ -3353,9 +3434,7 @@ static void update_mst_stream_alloc_table( work_table[i].slot_count = proposed_table->stream_allocations[i].slot_count; work_table[i].stream_enc = stream_enc; -#if defined(CONFIG_DRM_AMD_DC_DCN) work_table[i].hpo_dp_stream_enc = hpo_dp_stream_enc; -#endif } } @@ -3366,7 +3445,7 @@ static void update_mst_stream_alloc_table( link->mst_stream_alloc_table.stream_allocations[i] = work_table[i]; } -#if defined(CONFIG_DRM_AMD_DC_DCN) + static void dc_log_vcp_x_y(const struct dc_link *link, struct fixed31_32 avg_time_slots_per_mtp) { const uint32_t VCP_Y_PRECISION = 1000; @@ -3402,6 +3481,8 @@ static enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx, struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc; struct link_mst_stream_allocation_table proposed_table = {0}; struct fixed31_32 avg_time_slots_per_mtp; + const struct dc_link_settings empty_link_settings = {0}; + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); DC_LOGGER_INIT(link->ctx->logger); /* slot X.Y for SST payload deallocate */ @@ -3410,10 +3491,13 @@ static enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx, dc_log_vcp_x_y(link, avg_time_slots_per_mtp); - hpo_dp_link_encoder->funcs->set_throttled_vcp_size( - hpo_dp_link_encoder, - hpo_dp_stream_encoder->inst, - avg_time_slots_per_mtp); + if (link_hwss->ext.set_throttled_vcp_size) + link_hwss->ext.set_throttled_vcp_size(pipe_ctx, + avg_time_slots_per_mtp); + if (link_hwss->ext.set_hblank_min_symbol_width) + link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, + &empty_link_settings, + avg_time_slots_per_mtp); } /* calculate VC payload and update branch with new payload allocation table*/ @@ -3457,10 +3541,13 @@ static enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx, dc_log_vcp_x_y(link, avg_time_slots_per_mtp); - hpo_dp_link_encoder->funcs->set_throttled_vcp_size( - hpo_dp_link_encoder, - hpo_dp_stream_encoder->inst, - avg_time_slots_per_mtp); + if (link_hwss->ext.set_throttled_vcp_size) + link_hwss->ext.set_throttled_vcp_size(pipe_ctx, + avg_time_slots_per_mtp); + if (link_hwss->ext.set_hblank_min_symbol_width) + link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, + &link->cur_link_settings, + avg_time_slots_per_mtp); } /* Always return DC_OK. @@ -3468,7 +3555,6 @@ static enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx, */ return DC_OK; } -#endif /* convert link_mst_stream_alloc_table to dm dp_mst_stream_alloc_table * because stream_encoder is not exposed to dm @@ -3478,24 +3564,17 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx) struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; struct link_encoder *link_encoder = NULL; - struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc; -#if defined(CONFIG_DRM_AMD_DC_DCN) struct hpo_dp_link_encoder *hpo_dp_link_encoder = pipe_ctx->link_res.hpo_dp_link_enc; - struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc; -#endif struct dp_mst_stream_allocation_table proposed_table = {0}; struct fixed31_32 avg_time_slots_per_mtp; struct fixed31_32 pbn; struct fixed31_32 pbn_per_slot; int i; enum act_return_status ret; + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); DC_LOGGER_INIT(link->ctx->logger); - /* Link encoder may have been dynamically assigned to non-physical display endpoint. */ - if (link->ep_type == DISPLAY_ENDPOINT_PHY) - link_encoder = link->link_enc; - else if (link->dc->res_pool->funcs->link_encs_assign) - link_encoder = link_enc_cfg_get_link_enc_used_by_stream(pipe_ctx->stream->ctx->dc, stream); + link_encoder = link_enc_cfg_get_link_enc(link); ASSERT(link_encoder); /* enable_link_dp_mst already check link->enabled_stream_count @@ -3508,17 +3587,12 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx) stream->ctx, stream, &proposed_table, - true)) { + true)) update_mst_stream_alloc_table( -#if defined(CONFIG_DRM_AMD_DC_DCN) link, pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.hpo_dp_stream_enc, &proposed_table); -#else - link, pipe_ctx->stream_res.stream_enc, &proposed_table); -#endif - } else DC_LOG_WARNING("Failed to update" "MST allocation table for" @@ -3531,7 +3605,6 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx) link->mst_stream_alloc_table.stream_count); for (i = 0; i < MAX_CONTROLLER_NUM; i++) { -#if defined(CONFIG_DRM_AMD_DC_DCN) DC_LOG_MST("stream_enc[%d]: %p " "stream[%d].hpo_dp_stream_enc: %p " "stream[%d].vcp_id: %d " @@ -3544,17 +3617,6 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx) link->mst_stream_alloc_table.stream_allocations[i].vcp_id, i, link->mst_stream_alloc_table.stream_allocations[i].slot_count); -#else - DC_LOG_MST("stream_enc[%d]: %p " - "stream[%d].vcp_id: %d " - "stream[%d].slot_count: %d\n", - i, - (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, - i, - link->mst_stream_alloc_table.stream_allocations[i].vcp_id, - i, - link->mst_stream_alloc_table.stream_allocations[i].slot_count); -#endif } ASSERT(proposed_table.stream_count > 0); @@ -3574,7 +3636,6 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx) } /* program DP source TX for payload */ -#if defined(CONFIG_DRM_AMD_DC_DCN) switch (dp_get_link_encoding_format(&link->cur_link_settings)) { case DP_8b_10b_ENCODING: link_encoder->funcs->update_mst_stream_allocation_table( @@ -3590,11 +3651,6 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx) DC_LOG_ERROR("Failure: unknown encoding format\n"); return DC_ERROR_UNEXPECTED; } -#else - link_encoder->funcs->update_mst_stream_allocation_table( - link_encoder, - &link->mst_stream_alloc_table); -#endif /* send down message */ ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger( @@ -3617,34 +3673,19 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx) pbn = get_pbn_from_timing(pipe_ctx); avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot); -#if defined(CONFIG_DRM_AMD_DC_DCN) - switch (dp_get_link_encoding_format(&link->cur_link_settings)) { - case DP_8b_10b_ENCODING: - stream_encoder->funcs->set_throttled_vcp_size( - stream_encoder, - avg_time_slots_per_mtp); - break; - case DP_128b_132b_ENCODING: - hpo_dp_link_encoder->funcs->set_throttled_vcp_size( - hpo_dp_link_encoder, - hpo_dp_stream_encoder->inst, + dc_log_vcp_x_y(link, avg_time_slots_per_mtp); + + if (link_hwss->ext.set_throttled_vcp_size) + link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); + if (link_hwss->ext.set_hblank_min_symbol_width) + link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, + &link->cur_link_settings, avg_time_slots_per_mtp); - break; - case DP_UNKNOWN_ENCODING: - DC_LOG_ERROR("Failure: unknown encoding format\n"); - return DC_ERROR_UNEXPECTED; - } -#else - stream_encoder->funcs->set_throttled_vcp_size( - stream_encoder, - avg_time_slots_per_mtp); -#endif return DC_OK; } -#if defined(CONFIG_DRM_AMD_DC_DCN) enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in_kbps) { struct dc_stream_state *stream = pipe_ctx->stream; @@ -3653,10 +3694,10 @@ enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw struct fixed31_32 pbn; struct fixed31_32 pbn_per_slot; struct link_encoder *link_encoder = link->link_enc; - struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc; struct dp_mst_stream_allocation_table proposed_table = {0}; uint8_t i; enum act_return_status ret; + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); DC_LOGGER_INIT(link->ctx->logger); /* decrease throttled vcp size */ @@ -3664,8 +3705,11 @@ enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw pbn = get_pbn_from_bw_in_kbps(bw_in_kbps); avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot); - stream_encoder->funcs->set_throttled_vcp_size( - stream_encoder, + if (link_hwss->ext.set_throttled_vcp_size) + link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); + if (link_hwss->ext.set_hblank_min_symbol_width) + link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, + &link->cur_link_settings, avg_time_slots_per_mtp); /* send ALLOCATE_PAYLOAD sideband message with updated pbn */ @@ -3733,10 +3777,10 @@ enum dc_status dc_link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t struct fixed31_32 pbn; struct fixed31_32 pbn_per_slot; struct link_encoder *link_encoder = link->link_enc; - struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc; struct dp_mst_stream_allocation_table proposed_table = {0}; uint8_t i; enum act_return_status ret; + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); DC_LOGGER_INIT(link->ctx->logger); /* notify immediate branch device table update */ @@ -3795,35 +3839,31 @@ enum dc_status dc_link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t pbn_per_slot = get_pbn_per_slot(stream); avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot); - stream_encoder->funcs->set_throttled_vcp_size( - stream_encoder, + if (link_hwss->ext.set_throttled_vcp_size) + link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); + if (link_hwss->ext.set_hblank_min_symbol_width) + link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, + &link->cur_link_settings, avg_time_slots_per_mtp); return DC_OK; } -#endif static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; struct link_encoder *link_encoder = NULL; - struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc; -#if defined(CONFIG_DRM_AMD_DC_DCN) struct hpo_dp_link_encoder *hpo_dp_link_encoder = pipe_ctx->link_res.hpo_dp_link_enc; - struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc; -#endif struct dp_mst_stream_allocation_table proposed_table = {0}; struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0); int i; bool mst_mode = (link->type == dc_connection_mst_branch); + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); + const struct dc_link_settings empty_link_settings = {0}; DC_LOGGER_INIT(link->ctx->logger); - /* Link encoder may have been dynamically assigned to non-physical display endpoint. */ - if (link->ep_type == DISPLAY_ENDPOINT_PHY) - link_encoder = link->link_enc; - else if (link->dc->res_pool->funcs->link_encs_assign) - link_encoder = link_enc_cfg_get_link_enc_used_by_stream(pipe_ctx->stream->ctx->dc, stream); + link_encoder = link_enc_cfg_get_link_enc(link); ASSERT(link_encoder); /* deallocate_mst_payload is called before disable link. When mode or @@ -3834,28 +3874,12 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) */ /* slot X.Y */ -#if defined(CONFIG_DRM_AMD_DC_DCN) - switch (dp_get_link_encoding_format(&link->cur_link_settings)) { - case DP_8b_10b_ENCODING: - stream_encoder->funcs->set_throttled_vcp_size( - stream_encoder, - avg_time_slots_per_mtp); - break; - case DP_128b_132b_ENCODING: - hpo_dp_link_encoder->funcs->set_throttled_vcp_size( - hpo_dp_link_encoder, - hpo_dp_stream_encoder->inst, + if (link_hwss->ext.set_throttled_vcp_size) + link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); + if (link_hwss->ext.set_hblank_min_symbol_width) + link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, + &empty_link_settings, avg_time_slots_per_mtp); - break; - case DP_UNKNOWN_ENCODING: - DC_LOG_ERROR("Failure: unknown encoding format\n"); - return DC_ERROR_UNEXPECTED; - } -#else - stream_encoder->funcs->set_throttled_vcp_size( - stream_encoder, - avg_time_slots_per_mtp); -#endif /* TODO: which component is responsible for remove payload table? */ if (mst_mode) { @@ -3865,16 +3889,11 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) &proposed_table, false)) { -#if defined(CONFIG_DRM_AMD_DC_DCN) update_mst_stream_alloc_table( link, pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.hpo_dp_stream_enc, &proposed_table); -#else - update_mst_stream_alloc_table( - link, pipe_ctx->stream_res.stream_enc, &proposed_table); -#endif } else { DC_LOG_WARNING("Failed to update" @@ -3890,7 +3909,6 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) link->mst_stream_alloc_table.stream_count); for (i = 0; i < MAX_CONTROLLER_NUM; i++) { -#if defined(CONFIG_DRM_AMD_DC_DCN) DC_LOG_MST("stream_enc[%d]: %p " "stream[%d].hpo_dp_stream_enc: %p " "stream[%d].vcp_id: %d " @@ -3903,17 +3921,6 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) link->mst_stream_alloc_table.stream_allocations[i].vcp_id, i, link->mst_stream_alloc_table.stream_allocations[i].slot_count); -#else - DC_LOG_MST("stream_enc[%d]: %p " - "stream[%d].vcp_id: %d " - "stream[%d].slot_count: %d\n", - i, - (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, - i, - link->mst_stream_alloc_table.stream_allocations[i].vcp_id, - i, - link->mst_stream_alloc_table.stream_allocations[i].slot_count); -#endif } if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { @@ -3930,7 +3937,6 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) status, mst_alloc_slots, prev_mst_slots_in_use); } -#if defined(CONFIG_DRM_AMD_DC_DCN) switch (dp_get_link_encoding_format(&link->cur_link_settings)) { case DP_8b_10b_ENCODING: link_encoder->funcs->update_mst_stream_allocation_table( @@ -3943,14 +3949,9 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) &link->mst_stream_alloc_table); break; case DP_UNKNOWN_ENCODING: - DC_LOG_ERROR("Failure: unknown encoding format\n"); + DC_LOG_DEBUG("Unknown encoding format\n"); return DC_ERROR_UNEXPECTED; } -#else - link_encoder->funcs->update_mst_stream_allocation_table( - link_encoder, - &link->mst_stream_alloc_table); -#endif if (mst_mode) { dm_helpers_dp_mst_poll_for_allocation_change_trigger( @@ -3979,13 +3980,7 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) if (cp_psp == NULL || cp_psp->funcs.update_stream_config == NULL) return; - if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY) - link_enc = pipe_ctx->stream->link->link_enc; - else if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && - pipe_ctx->stream->link->dc->res_pool->funcs->link_encs_assign) - link_enc = link_enc_cfg_get_link_enc_used_by_stream( - pipe_ctx->stream->ctx->dc, - pipe_ctx->stream); + link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link); ASSERT(link_enc); if (link_enc == NULL) return; @@ -3998,21 +3993,18 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) /* stream encoder index */ config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA; -#if defined(CONFIG_DRM_AMD_DC_DCN) if (is_dp_128b_132b_signal(pipe_ctx)) config.stream_enc_idx = pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0; -#endif /* dig back end */ config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst; /* link encoder index */ config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; -#if defined(CONFIG_DRM_AMD_DC_DCN) if (is_dp_128b_132b_signal(pipe_ctx)) config.link_enc_idx = pipe_ctx->link_res.hpo_dp_link_enc->inst; -#endif + /* dio output index */ config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; @@ -4027,9 +4019,7 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) config.assr_enabled = (panel_mode == DP_PANEL_MODE_EDP) ? 1 : 0; config.mst_enabled = (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) ? 1 : 0; -#if defined(CONFIG_DRM_AMD_DC_DCN) config.dp2_enabled = is_dp_128b_132b_signal(pipe_ctx) ? 1 : 0; -#endif config.usb4_enabled = (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ? 1 : 0; config.dpms_off = dpms_off; @@ -4041,7 +4031,6 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) } #endif -#if defined(CONFIG_DRM_AMD_DC_DCN) static void fpga_dp_hpo_enable_link_and_stream(struct dc_state *state, struct pipe_ctx *pipe_ctx) { struct dc *dc = pipe_ctx->stream->ctx->dc; @@ -4050,15 +4039,17 @@ static void fpga_dp_hpo_enable_link_and_stream(struct dc_state *state, struct pi struct fixed31_32 avg_time_slots_per_mtp; uint8_t req_slot_count = 0; uint8_t vc_id = 1; /// VC ID always 1 for SST - struct dc_link_settings link_settings = {0}; + const struct link_hwss *link_hwss = get_link_hwss(stream->link, &pipe_ctx->link_res); DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); decide_link_settings(stream, &link_settings); stream->link->cur_link_settings = link_settings; - /* Enable clock, Configure lane count, and Enable Link Encoder*/ - enable_dp_hpo_output(stream->link, &pipe_ctx->link_res, &stream->link->cur_link_settings); + if (link_hwss->ext.enable_dp_link_output) + link_hwss->ext.enable_dp_link_output(stream->link, &pipe_ctx->link_res, + stream->signal, pipe_ctx->clock_source->id, + &link_settings); #ifdef DIAGS_BUILD /* Workaround for FPGA HPO capture DP link data: @@ -4112,16 +4103,11 @@ static void fpga_dp_hpo_enable_link_and_stream(struct dc_state *state, struct pi pipe_ctx->link_res.hpo_dp_link_enc, &proposed_table); - pipe_ctx->link_res.hpo_dp_link_enc->funcs->set_throttled_vcp_size( - pipe_ctx->link_res.hpo_dp_link_enc, - pipe_ctx->stream_res.hpo_dp_stream_enc->inst, - avg_time_slots_per_mtp); - - + if (link_hwss->ext.set_throttled_vcp_size) + link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); dc->hwss.unblank_stream(pipe_ctx, &stream->link->cur_link_settings); } -#endif void core_link_enable_stream( struct dc_state *state, @@ -4132,31 +4118,32 @@ void core_link_enable_stream( struct dc_link *link = stream->sink->link; enum dc_status status; struct link_encoder *link_enc; -#if defined(CONFIG_DRM_AMD_DC_DCN) enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO; struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg; if (is_dp_128b_132b_signal(pipe_ctx)) vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg; -#endif + DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); + if (pipe_ctx->stream->sink) { + if (pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_VIRTUAL && + pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_NONE) { + DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x\n", __func__, + pipe_ctx->stream->sink->edid_caps.display_name, + pipe_ctx->stream->signal); + } + } + if (!IS_DIAG_DC(dc->ctx->dce_environment) && dc_is_virtual_signal(pipe_ctx->stream->signal)) return; - if (dc->res_pool->funcs->link_encs_assign && stream->link->ep_type != DISPLAY_ENDPOINT_PHY) - link_enc = link_enc_cfg_get_link_enc_used_by_stream(dc, stream); - else - link_enc = stream->link->link_enc; + link_enc = link_enc_cfg_get_link_enc(link); ASSERT(link_enc); -#if defined(CONFIG_DRM_AMD_DC_DCN) if (!dc_is_virtual_signal(pipe_ctx->stream->signal) && !is_dp_128b_132b_signal(pipe_ctx)) { -#else - if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) { -#endif if (link_enc) link_enc->funcs->setup( link_enc, @@ -4167,7 +4154,6 @@ void core_link_enable_stream( stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE); } -#if defined(CONFIG_DRM_AMD_DC_DCN) if (is_dp_128b_132b_signal(pipe_ctx)) { pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->set_stream_attribute( pipe_ctx->stream_res.hpo_dp_stream_enc, @@ -4185,14 +4171,6 @@ void core_link_enable_stream( stream->use_vsc_sdp_for_colorimetry, stream->link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP); } -#else - pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute( - pipe_ctx->stream_res.stream_enc, - &stream->timing, - stream->output_color_space, - stream->use_vsc_sdp_for_colorimetry, - stream->link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP); -#endif if (dc_is_dp_signal(pipe_ctx->stream->signal)) dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR); @@ -4206,10 +4184,8 @@ void core_link_enable_stream( pipe_ctx->stream->link->link_state_valid = true; -#if defined(CONFIG_DRM_AMD_DC_DCN) if (pipe_ctx->stream_res.tg->funcs->set_out_mux) pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, otg_out_dest); -#endif if (dc_is_dvi_signal(pipe_ctx->stream->signal)) pipe_ctx->stream_res.stream_enc->funcs->dvi_set_stream_attribute( @@ -4229,11 +4205,9 @@ void core_link_enable_stream( pipe_ctx->stream->apply_edp_fast_boot_optimization = false; -#if defined(CONFIG_DRM_AMD_DC_DCN) // Enable VPG before building infoframe if (vpg && vpg->funcs->vpg_poweron) vpg->funcs->vpg_poweron(vpg); -#endif resource_build_info_frame(pipe_ctx); dc->hwss.update_info_frame(pipe_ctx); @@ -4319,12 +4293,8 @@ void core_link_enable_stream( * as a workaround for the incorrect value being applied * from transmitter control. */ -#if defined(CONFIG_DRM_AMD_DC_DCN) if (!(dc_is_virtual_signal(pipe_ctx->stream->signal) || is_dp_128b_132b_signal(pipe_ctx))) -#else - if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) -#endif if (link_enc) link_enc->funcs->setup( link_enc, @@ -4343,11 +4313,9 @@ void core_link_enable_stream( if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) dc_link_allocate_mst_payload(pipe_ctx); -#if defined(CONFIG_DRM_AMD_DC_DCN) else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && is_dp_128b_132b_signal(pipe_ctx)) dc_link_update_sst_payload(pipe_ctx, true); -#endif dc->hwss.unblank_stream(pipe_ctx, &pipe_ctx->stream->link->cur_link_settings); @@ -4364,11 +4332,9 @@ void core_link_enable_stream( dc->hwss.enable_audio_stream(pipe_ctx); } else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) -#if defined(CONFIG_DRM_AMD_DC_DCN) if (is_dp_128b_132b_signal(pipe_ctx)) { fpga_dp_hpo_enable_link_and_stream(state, pipe_ctx); } -#endif if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) dp_set_dsc_enable(pipe_ctx, true); @@ -4385,12 +4351,21 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) struct dc *dc = pipe_ctx->stream->ctx->dc; struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->sink->link; -#if defined(CONFIG_DRM_AMD_DC_DCN) struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg; if (is_dp_128b_132b_signal(pipe_ctx)) vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg; -#endif + + DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); + + if (pipe_ctx->stream->sink) { + if (pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_VIRTUAL && + pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_NONE) { + DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x\n", __func__, + pipe_ctx->stream->sink->edid_caps.display_name, + pipe_ctx->stream->signal); + } + } if (!IS_DIAG_DC(dc->ctx->dce_environment) && dc_is_virtual_signal(pipe_ctx->stream->signal)) @@ -4410,11 +4385,9 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) deallocate_mst_payload(pipe_ctx); -#if defined(CONFIG_DRM_AMD_DC_DCN) else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && is_dp_128b_132b_signal(pipe_ctx)) dc_link_update_sst_payload(pipe_ctx, false); -#endif if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) { struct ext_hdmi_settings settings = {0}; @@ -4441,7 +4414,6 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) } } -#if defined(CONFIG_DRM_AMD_DC_DCN) if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && !is_dp_128b_132b_signal(pipe_ctx)) { @@ -4458,27 +4430,18 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) dc->hwss.disable_stream(pipe_ctx); disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal); } -#else - disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal); - - dc->hwss.disable_stream(pipe_ctx); -#endif if (pipe_ctx->stream->timing.flags.DSC) { if (dc_is_dp_signal(pipe_ctx->stream->signal)) dp_set_dsc_enable(pipe_ctx, false); } -#if defined(CONFIG_DRM_AMD_DC_DCN) if (is_dp_128b_132b_signal(pipe_ctx)) { if (pipe_ctx->stream_res.tg->funcs->set_out_mux) pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, OUT_MUX_DIO); } -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN) if (vpg && vpg->funcs->vpg_powerdown) vpg->funcs->vpg_powerdown(vpg); -#endif } void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable) @@ -4542,22 +4505,17 @@ void dc_link_set_drive_settings(struct dc *dc, { int i; - struct pipe_ctx *pipe = NULL; - const struct link_resource *link_res; + struct link_resource link_res; - link_res = dc_link_get_cur_link_res(link); + for (i = 0; i < dc->link_count; i++) + if (dc->links[i] == link) + break; - for (i = 0; i < MAX_PIPES; i++) { - pipe = &dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe->stream && pipe->stream->link) { - if (pipe->stream->link == link) - break; - } - } - if (pipe && link_res) - dc_link_dp_set_drive_settings(pipe->stream->link, link_res, lt_settings); - else + if (i >= dc->link_count) ASSERT_CRITICAL(false); + + dc_link_get_cur_link_res(link, &link_res); + dc_link_dp_set_drive_settings(dc->links[i], &link_res, lt_settings); } void dc_link_set_preferred_link_settings(struct dc *dc, @@ -4617,11 +4575,9 @@ void dc_link_set_preferred_training_settings(struct dc *dc, if (link_setting != NULL) { link->preferred_link_setting = *link_setting; -#if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(link_setting) == DP_128b_132b_ENCODING) /* TODO: add dc update for acquiring link res */ skip_immediate_retrain = true; -#endif } else { link->preferred_link_setting.lane_count = LANE_COUNT_UNKNOWN; link->preferred_link_setting.link_rate = LINK_RATE_UNKNOWN; @@ -4663,7 +4619,6 @@ uint32_t dc_link_bandwidth_kbps( const struct dc_link *link, const struct dc_link_settings *link_setting) { -#if defined(CONFIG_DRM_AMD_DC_DCN) uint32_t total_data_bw_efficiency_x10000 = 0; uint32_t link_rate_per_lane_kbps = 0; @@ -4694,40 +4649,6 @@ uint32_t dc_link_bandwidth_kbps( /* overall effective link bandwidth = link rate per lane * lane count * total data bandwidth efficiency */ return link_rate_per_lane_kbps * link_setting->lane_count / 10000 * total_data_bw_efficiency_x10000; -#else - uint32_t link_bw_kbps = - link_setting->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; /* bytes per sec */ - - link_bw_kbps *= 8; /* 8 bits per byte*/ - link_bw_kbps *= link_setting->lane_count; - - if (dc_link_should_enable_fec(link)) { - /* Account for FEC overhead. - * We have to do it based on caps, - * and not based on FEC being set ready, - * because FEC is set ready too late in - * the process to correctly be picked up - * by mode enumeration. - * - * There's enough zeros at the end of 'kbps' - * that make the below operation 100% precise - * for our purposes. - * 'long long' makes it work even for HDMI 2.1 - * max bandwidth (and much, much bigger bandwidths - * than that, actually). - * - * NOTE: Reducing link BW by 3% may not be precise - * because it may be a stream BT that increases by 3%, and so - * 1/1.03 = 0.970873 factor should have been used instead, - * but the difference is minimal and is in a safe direction, - * which all works well around potential ambiguity of DP 1.4a spec. - */ - long long fec_link_bw_kbps = link_bw_kbps * 970LL; - link_bw_kbps = (uint32_t)(div64_s64(fec_link_bw_kbps, 1000LL)); - } - return link_bw_kbps; - -#endif } const struct dc_link_settings *dc_link_get_link_cap( @@ -4752,16 +4673,7 @@ bool dc_link_is_fec_supported(const struct dc_link *link) */ struct link_encoder *link_enc = NULL; - /* Links supporting dynamically assigned link encoder will be assigned next - * available encoder if one not already assigned. - */ - if (link->is_dig_mapping_flexible && - link->dc->res_pool->funcs->link_encs_assign) { - link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); - if (link_enc == NULL) - link_enc = link_enc_cfg_get_next_avail_link_enc(link->ctx->dc); - } else - link_enc = link->link_enc; + link_enc = link_enc_cfg_get_link_enc(link); ASSERT(link_enc); return (dc_is_dp_signal(link->connector_signal) && link_enc && @@ -4845,23 +4757,24 @@ uint32_t dc_bandwidth_in_kbps_from_timing( } -const struct link_resource *dc_link_get_cur_link_res(const struct dc_link *link) +void dc_link_get_cur_link_res(const struct dc_link *link, + struct link_resource *link_res) { int i; struct pipe_ctx *pipe = NULL; - const struct link_resource *link_res = NULL; + + memset(link_res, 0, sizeof(*link_res)); for (i = 0; i < MAX_PIPES; i++) { pipe = &link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->stream && pipe->stream->link && pipe->top_pipe == NULL) { if (pipe->stream->link == link) { - link_res = &pipe->link_res; + *link_res = pipe->link_res; break; } } } - return link_res; } /** @@ -4883,9 +4796,8 @@ const struct link_resource *dc_link_get_cur_link_res(const struct dc_link *link) */ void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map) { -#if defined(CONFIG_DRM_AMD_DC_DCN) struct dc_link *link; - uint8_t i; + uint32_t i; uint32_t hpo_dp_recycle_map = 0; *map = 0; @@ -4903,7 +4815,6 @@ void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map) } *map |= (hpo_dp_recycle_map << LINK_RES_HPO_DP_REC_MAP__SHIFT); } -#endif } /** @@ -4926,9 +4837,8 @@ void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map) */ void dc_restore_link_res_map(const struct dc *dc, uint32_t *map) { -#if defined(CONFIG_DRM_AMD_DC_DCN) struct dc_link *link; - uint8_t i; + uint32_t i; unsigned int available_hpo_dp_count; uint32_t hpo_dp_recycle_map = (*map & LINK_RES_HPO_DP_REC_MAP__MASK) >> LINK_RES_HPO_DP_REC_MAP__SHIFT; @@ -4964,5 +4874,4 @@ void dc_restore_link_res_map(const struct dc *dc, uint32_t *map) } } } -#endif } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c index 24dc662ec3e4..1d4863763df9 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c @@ -298,6 +298,9 @@ static uint32_t defer_delay_converter_wa( if (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER && link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_0080E1 && + (link->dpcd_caps.branch_fw_revision[0] < 0x01 || + (link->dpcd_caps.branch_fw_revision[0] == 0x01 && + link->dpcd_caps.branch_fw_revision[1] < 0x40)) && !memcmp(link->dpcd_caps.branch_dev_name, DP_VGA_DONGLE_BRANCH_DEV_NAME, sizeof(link->dpcd_caps.branch_dev_name))) @@ -490,6 +493,7 @@ void dal_ddc_service_i2c_query_dp_dual_mode_adaptor( sink_cap->max_hdmi_pixel_clock = max_tmds_clk * 1000; } + sink_cap->is_dongle_type_one = false; } else { if (is_valid_hdmi_signature == true) { @@ -507,6 +511,7 @@ void dal_ddc_service_i2c_query_dp_dual_mode_adaptor( "Type 1 DP-HDMI passive dongle (no signature) %dMhz: ", sink_cap->max_hdmi_pixel_clock / 1000); } + sink_cap->is_dongle_type_one = true; } return; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 61b8f29a0c30..351081f574cb 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -27,6 +27,7 @@ #include "dm_helpers.h" #include "opp.h" #include "dsc.h" +#include "clk_mgr.h" #include "resource.h" #include "inc/core_types.h" @@ -38,6 +39,7 @@ #include "dce/dmub_hw_lock_mgr.h" #include "inc/dc_link_dpia.h" #include "inc/link_enc_cfg.h" +#include "link/link_dp_trace.h" /*Travis*/ static const uint8_t DP_VGA_LVDS_CONVERTER_ID_2[] = "sivarT"; @@ -50,6 +52,13 @@ static const uint8_t DP_VGA_LVDS_CONVERTER_ID_3[] = "dnomlA"; #include "link_dpcd.h" +#ifndef MAX +#define MAX(X, Y) ((X) > (Y) ? (X) : (Y)) +#endif +#ifndef MIN +#define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) +#endif + /* maximum pre emphasis level allowed for each voltage swing level*/ static const enum dc_pre_emphasis voltage_swing_to_pre_emphasis[] = { PRE_EMPHASIS_LEVEL3, @@ -62,7 +71,6 @@ enum { POST_LT_ADJ_REQ_TIMEOUT = 200 }; -#if defined(CONFIG_DRM_AMD_DC_DCN) struct dp_lt_fallback_entry { enum dc_lane_count lane_count; enum dc_link_rate link_rate; @@ -97,16 +105,18 @@ static const struct dp_lt_fallback_entry dp_lt_fallbacks[] = { {LANE_COUNT_ONE, LINK_RATE_HIGH}, {LANE_COUNT_ONE, LINK_RATE_LOW}, }; -#endif + +static const struct dc_link_settings fail_safe_link_settings = { + .lane_count = LANE_COUNT_ONE, + .link_rate = LINK_RATE_LOW, + .link_spread = LINK_SPREAD_DISABLED, +}; static bool decide_fallback_link_setting( struct dc_link *link, struct dc_link_settings initial_link_settings, struct dc_link_settings *current_link_setting, enum link_training_result training_result); -static struct dc_link_settings get_common_supported_link_settings( - struct dc_link_settings link_setting_a, - struct dc_link_settings link_setting_b); static void maximize_lane_settings(const struct link_training_settings *lt_settings, struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]); static void override_lane_settings(const struct link_training_settings *lt_settings, @@ -117,7 +127,7 @@ static uint32_t get_cr_training_aux_rd_interval(struct dc_link *link, { union training_aux_rd_interval training_rd_interval; uint32_t wait_in_micro_secs = 100; -#if defined(CONFIG_DRM_AMD_DC_DCN) + memset(&training_rd_interval, 0, sizeof(training_rd_interval)); if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING && link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { @@ -129,15 +139,7 @@ static uint32_t get_cr_training_aux_rd_interval(struct dc_link *link, if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000; } -#else - core_link_read_dpcd( - link, - DP_TRAINING_AUX_RD_INTERVAL, - (uint8_t *)&training_rd_interval, - sizeof(training_rd_interval)); - if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) - wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000; -#endif + return wait_in_micro_secs; } @@ -145,7 +147,6 @@ static uint32_t get_eq_training_aux_rd_interval( struct dc_link *link, const struct dc_link_settings *link_settings) { -#if defined(CONFIG_DRM_AMD_DC_DCN) union training_aux_rd_interval training_rd_interval; memset(&training_rd_interval, 0, sizeof(training_rd_interval)); @@ -174,41 +175,16 @@ static uint32_t get_eq_training_aux_rd_interval( case 6: return 64000; default: return 400; } -#else - union training_aux_rd_interval training_rd_interval; - uint32_t wait_in_micro_secs = 400; - - memset(&training_rd_interval, 0, sizeof(training_rd_interval)); - /* overwrite the delay if rev > 1.1*/ - if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { - /* DP 1.2 or later - retrieve delay through - * "DPCD_ADDR_TRAINING_AUX_RD_INTERVAL" register */ - core_link_read_dpcd( - link, - DP_TRAINING_AUX_RD_INTERVAL, - (uint8_t *)&training_rd_interval, - sizeof(training_rd_interval)); - - if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) - wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000; - } - - return wait_in_micro_secs; -#endif } void dp_wait_for_training_aux_rd_interval( struct dc_link *link, uint32_t wait_in_micro_secs) { -#if defined(CONFIG_DRM_AMD_DC_DCN) if (wait_in_micro_secs > 1000) msleep(wait_in_micro_secs/1000); else udelay(wait_in_micro_secs); -#else - udelay(wait_in_micro_secs); -#endif DC_LOG_HW_LINK_TRAINING("%s:\n wait = %d\n", __func__, @@ -236,7 +212,6 @@ enum dpcd_training_patterns case DP_TRAINING_PATTERN_SEQUENCE_4: dpcd_tr_pattern = DPCD_TRAINING_PATTERN_4; break; -#if defined(CONFIG_DRM_AMD_DC_DCN) case DP_128b_132b_TPS1: dpcd_tr_pattern = DPCD_128b_132b_TPS1; break; @@ -246,7 +221,6 @@ enum dpcd_training_patterns case DP_128b_132b_TPS2_CDS: dpcd_tr_pattern = DPCD_128b_132b_TPS2_CDS; break; -#endif case DP_TRAINING_PATTERN_VIDEOIDLE: dpcd_tr_pattern = DPCD_TRAINING_PATTERN_VIDEOIDLE; break; @@ -289,10 +263,8 @@ static enum dc_dp_training_pattern decide_cr_training_pattern( case DP_8b_10b_ENCODING: default: return DP_TRAINING_PATTERN_SEQUENCE_1; -#if defined(CONFIG_DRM_AMD_DC_DCN) case DP_128b_132b_ENCODING: return DP_128b_132b_TPS1; -#endif } } @@ -300,19 +272,11 @@ static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *li const struct dc_link_settings *link_settings) { struct link_encoder *link_enc; -#if defined(CONFIG_DRM_AMD_DC_DCN) struct encoder_feature_support *enc_caps; struct dpcd_caps *rx_caps = &link->dpcd_caps; enum dc_dp_training_pattern pattern = DP_TRAINING_PATTERN_SEQUENCE_2; - /* Access link encoder capability based on whether it is statically - * or dynamically assigned to a link. - */ - if (link->is_dig_mapping_flexible && - link->dc->res_pool->funcs->link_encs_assign) - link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); - else - link_enc = link->link_enc; + link_enc = link_enc_cfg_get_link_enc(link); ASSERT(link_enc); enc_caps = &link_enc->features; @@ -335,41 +299,8 @@ static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *li break; } return pattern; -#else - enum dc_dp_training_pattern highest_tp = DP_TRAINING_PATTERN_SEQUENCE_2; - struct encoder_feature_support *features; - struct dpcd_caps *dpcd_caps = &link->dpcd_caps; - - /* Access link encoder capability based on whether it is statically - * or dynamically assigned to a link. - */ - if (link->is_dig_mapping_flexible && - link->dc->res_pool->funcs->link_encs_assign) - link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); - else - link_enc = link->link_enc; - ASSERT(link_enc); - features = &link_enc->features; - - if (features->flags.bits.IS_TPS3_CAPABLE) - highest_tp = DP_TRAINING_PATTERN_SEQUENCE_3; - - if (features->flags.bits.IS_TPS4_CAPABLE) - highest_tp = DP_TRAINING_PATTERN_SEQUENCE_4; - - if (dpcd_caps->max_down_spread.bits.TPS4_SUPPORTED && - highest_tp >= DP_TRAINING_PATTERN_SEQUENCE_4) - return DP_TRAINING_PATTERN_SEQUENCE_4; - - if (dpcd_caps->max_ln_count.bits.TPS3_SUPPORTED && - highest_tp >= DP_TRAINING_PATTERN_SEQUENCE_3) - return DP_TRAINING_PATTERN_SEQUENCE_3; - - return DP_TRAINING_PATTERN_SEQUENCE_2; -#endif } -#if defined(CONFIG_DRM_AMD_DC_DCN) static uint8_t get_dpcd_link_rate(const struct dc_link_settings *link_settings) { uint8_t link_rate = 0; @@ -397,7 +328,6 @@ static uint8_t get_dpcd_link_rate(const struct dc_link_settings *link_settings) return link_rate; } -#endif static void vendor_specific_lttpr_wa_one_start(struct dc_link *link) { @@ -418,29 +348,6 @@ static void vendor_specific_lttpr_wa_one_start(struct dc_link *link) sizeof(vendor_lttpr_write_data)); } -static void vendor_specific_lttpr_wa_one_end( - struct dc_link *link, - uint8_t retry_count) -{ - const uint8_t vendor_lttpr_write_data[4] = {0x1, 0x50, 0x63, 0x0}; - const uint8_t offset = dp_convert_to_count( - link->dpcd_caps.lttpr_caps.phy_repeater_cnt); - uint32_t vendor_lttpr_write_address = 0xF004F; - - if (!retry_count) { - if (offset != 0xFF) - vendor_lttpr_write_address += - ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); - - /* W/A for certain LTTPR to reset their lane settings, part two of two */ - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data[0], - sizeof(vendor_lttpr_write_data)); - } -} - static void vendor_specific_lttpr_wa_one_two( struct dc_link *link, const uint8_t rate) @@ -467,9 +374,9 @@ static void vendor_specific_lttpr_wa_one_two( } } -static void vendor_specific_lttpr_wa_three( +static void dp_fixed_vs_pe_read_lane_adjust( struct dc_link *link, - union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX]) + union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX]) { const uint8_t vendor_lttpr_write_data_vs[3] = {0x0, 0x53, 0x63}; const uint8_t vendor_lttpr_write_data_pe[3] = {0x0, 0x54, 0x63}; @@ -511,23 +418,8 @@ static void vendor_specific_lttpr_wa_three( 1); for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { - dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_LANE = (dprx_vs >> (2 * lane)) & 0x3; - dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_LANE = (dprx_pe >> (2 * lane)) & 0x3; - } -} - -static void vendor_specific_lttpr_wa_three_dpcd( - struct dc_link *link, - union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX]) -{ - union lane_adjust lane_adjust[LANE_COUNT_DP_MAX]; - uint8_t lane = 0; - - vendor_specific_lttpr_wa_three(link, lane_adjust); - - for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { - dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET = lane_adjust[lane].bits.VOLTAGE_SWING_LANE; - dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET = lane_adjust[lane].bits.PRE_EMPHASIS_LANE; + dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET = (dprx_vs >> (2 * lane)) & 0x3; + dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET = (dprx_pe >> (2 * lane)) & 0x3; } } @@ -540,10 +432,8 @@ static void vendor_specific_lttpr_wa_four( const uint8_t offset = dp_convert_to_count( link->dpcd_caps.lttpr_caps.phy_repeater_cnt); uint32_t vendor_lttpr_write_address = 0xF004F; -#if defined(CONFIG_DRM_AMD_DC_DP2_0) uint8_t sink_status = 0; uint8_t i; -#endif if (offset != 0xFF) vendor_lttpr_write_address += @@ -569,7 +459,6 @@ static void vendor_specific_lttpr_wa_four( sizeof(vendor_lttpr_write_data_two)); } -#if defined(CONFIG_DRM_AMD_DC_DP2_0) /* poll for intra-hop disable */ for (i = 0; i < 10; i++) { if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) && @@ -577,20 +466,26 @@ static void vendor_specific_lttpr_wa_four( break; udelay(1000); } -#endif } -static void vendor_specific_lttpr_wa_five( +static void dp_fixed_vs_pe_set_retimer_lane_settings( struct dc_link *link, const union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX], uint8_t lane_count) { - const uint32_t vendor_lttpr_write_address = 0xF004F; + const uint8_t offset = dp_convert_to_count( + link->dpcd_caps.lttpr_caps.phy_repeater_cnt); const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF}; + uint32_t vendor_lttpr_write_address = 0xF004F; uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; uint8_t lane = 0; + if (offset != 0xFF) { + vendor_lttpr_write_address += + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + } + for (lane = 0; lane < lane_count; lane++) { vendor_lttpr_write_data_vs[3] |= dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET << (2 * lane); @@ -665,11 +560,7 @@ enum dc_status dpcd_set_link_settings( status = core_link_write_dpcd(link, DP_LINK_RATE_SET, <_settings->link_settings.link_rate_set, 1); } else { -#if defined(CONFIG_DRM_AMD_DC_DCN) rate = get_dpcd_link_rate(<_settings->link_settings); -#else - rate = (uint8_t) (lt_settings->link_settings.link_rate); -#endif if (link->dc->debug.apply_vendor_specific_lttpr_wa && (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && link->lttpr_mode == LTTPR_MODE_TRANSPARENT) @@ -720,10 +611,8 @@ uint8_t dc_dp_initialize_scrambling_data_symbols( disable_scrabled_data_symbols = 1; break; case DP_TRAINING_PATTERN_SEQUENCE_4: -#if defined(CONFIG_DRM_AMD_DC_DCN) case DP_128b_132b_TPS1: case DP_128b_132b_TPS2: -#endif disable_scrabled_data_symbols = 0; break; default: @@ -794,7 +683,6 @@ static void dpcd_set_lt_pattern_and_lane_settings( size_in_bytes); if (is_repeater(link, offset)) { -#if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(<_settings->link_settings) == DP_128b_132b_ENCODING) DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" @@ -805,7 +693,6 @@ static void dpcd_set_lt_pattern_and_lane_settings( lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); else if (dp_get_link_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING) -#endif DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", __func__, @@ -816,7 +703,6 @@ static void dpcd_set_lt_pattern_and_lane_settings( lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); } else { -#if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(<_settings->link_settings) == DP_128b_132b_ENCODING) DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n", @@ -825,14 +711,13 @@ static void dpcd_set_lt_pattern_and_lane_settings( lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); else if (dp_get_link_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING) -#endif - DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", - __func__, - dpcd_base_lt_offset, - lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, - lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, - lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, - lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); + DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", + __func__, + dpcd_base_lt_offset, + lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, + lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, + lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, + lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); } if (edp_workaround) { /* for eDP write in 2 parts because the 5-byte burst is @@ -850,7 +735,6 @@ static void dpcd_set_lt_pattern_and_lane_settings( (uint8_t *)(lt_settings->dpcd_lane_settings), size_in_bytes); -#if defined(CONFIG_DRM_AMD_DC_DCN) } else if (dp_get_link_encoding_format(<_settings->link_settings) == DP_128b_132b_ENCODING) { core_link_write_dpcd( @@ -858,10 +742,9 @@ static void dpcd_set_lt_pattern_and_lane_settings( dpcd_base_lt_offset, dpcd_lt_buffer, sizeof(dpcd_lt_buffer)); -#endif - } else + } else /* write it all in (1 + number-of-lanes)-byte burst*/ - core_link_write_dpcd( + core_link_write_dpcd( link, dpcd_base_lt_offset, dpcd_lt_buffer, @@ -928,13 +811,11 @@ void dp_hw_to_dpcd_lane_settings( (hw_lane_settings[lane].PRE_EMPHASIS == PRE_EMPHASIS_MAX_LEVEL ? 1 : 0); } -#if defined(CONFIG_DRM_AMD_DC_DCN) else if (dp_get_link_encoding_format(<_settings->link_settings) == DP_128b_132b_ENCODING) { dpcd_lane_settings[lane].tx_ffe.PRESET_VALUE = hw_lane_settings[lane].FFE_PRESET.settings.level; } -#endif } } @@ -956,13 +837,11 @@ void dp_decide_lane_settings( (enum dc_pre_emphasis)(ln_adjust[lane].bits. PRE_EMPHASIS_LANE); } -#if defined(CONFIG_DRM_AMD_DC_DCN) else if (dp_get_link_encoding_format(<_settings->link_settings) == DP_128b_132b_ENCODING) { hw_lane_settings[lane].FFE_PRESET.raw = ln_adjust[lane].tx_ffe.PRESET_VALUE; } -#endif } dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings); @@ -1013,9 +892,7 @@ static void maximize_lane_settings(const struct link_training_settings *lt_setti max_requested.VOLTAGE_SWING = lane_settings[0].VOLTAGE_SWING; max_requested.PRE_EMPHASIS = lane_settings[0].PRE_EMPHASIS; -#if defined(CONFIG_DRM_AMD_DC_DCN) max_requested.FFE_PRESET = lane_settings[0].FFE_PRESET; -#endif /* Determine what the maximum of the requested settings are*/ for (lane = 1; lane < lt_settings->link_settings.lane_count; lane++) { @@ -1024,12 +901,10 @@ static void maximize_lane_settings(const struct link_training_settings *lt_setti if (lane_settings[lane].PRE_EMPHASIS > max_requested.PRE_EMPHASIS) max_requested.PRE_EMPHASIS = lane_settings[lane].PRE_EMPHASIS; -#if defined(CONFIG_DRM_AMD_DC_DCN) if (lane_settings[lane].FFE_PRESET.settings.level > max_requested.FFE_PRESET.settings.level) max_requested.FFE_PRESET.settings.level = lane_settings[lane].FFE_PRESET.settings.level; -#endif } /* make sure the requested settings are @@ -1039,10 +914,8 @@ static void maximize_lane_settings(const struct link_training_settings *lt_setti if (max_requested.PRE_EMPHASIS > PRE_EMPHASIS_MAX_LEVEL) max_requested.PRE_EMPHASIS = PRE_EMPHASIS_MAX_LEVEL; -#if defined(CONFIG_DRM_AMD_DC_DCN) if (max_requested.FFE_PRESET.settings.level > DP_FFE_PRESET_MAX_LEVEL) max_requested.FFE_PRESET.settings.level = DP_FFE_PRESET_MAX_LEVEL; -#endif /* make sure the pre-emphasis matches the voltage swing*/ if (max_requested.PRE_EMPHASIS > @@ -1055,9 +928,7 @@ static void maximize_lane_settings(const struct link_training_settings *lt_setti for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { lane_settings[lane].VOLTAGE_SWING = max_requested.VOLTAGE_SWING; lane_settings[lane].PRE_EMPHASIS = max_requested.PRE_EMPHASIS; -#if defined(CONFIG_DRM_AMD_DC_DCN) lane_settings[lane].FFE_PRESET = max_requested.FFE_PRESET; -#endif } } @@ -1068,9 +939,7 @@ static void override_lane_settings(const struct link_training_settings *lt_setti if (lt_settings->voltage_swing == NULL && lt_settings->pre_emphasis == NULL && -#if defined(CONFIG_DRM_AMD_DC_DCN) lt_settings->ffe_preset == NULL && -#endif lt_settings->post_cursor2 == NULL) return; @@ -1082,10 +951,8 @@ static void override_lane_settings(const struct link_training_settings *lt_setti lane_settings[lane].PRE_EMPHASIS = *lt_settings->pre_emphasis; if (lt_settings->post_cursor2) lane_settings[lane].POST_CURSOR2 = *lt_settings->post_cursor2; -#if defined(CONFIG_DRM_AMD_DC_DCN) if (lt_settings->ffe_preset) lane_settings[lane].FFE_PRESET = *lt_settings->ffe_preset; -#endif } } @@ -1117,6 +984,14 @@ enum dc_status dp_get_lane_status_and_lane_adjust( (uint8_t *)(dpcd_buf), sizeof(dpcd_buf)); + if (status != DC_OK) { + DC_LOG_HW_LINK_TRAINING("%s:\n Failed to read from address 0x%X," + " keep current lane status and lane adjust unchanged", + __func__, + lane01_status_address); + return status; + } + for (lane = 0; lane < (uint32_t)(link_training_setting->link_settings.lane_count); lane++) { @@ -1189,7 +1064,6 @@ enum dc_status dpcd_set_lane_settings( link_training_setting->link_settings.lane_count); if (is_repeater(link, offset)) { -#if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(&link_training_setting->link_settings) == DP_128b_132b_ENCODING) DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" @@ -1200,7 +1074,6 @@ enum dc_status dpcd_set_lane_settings( link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); else if (dp_get_link_encoding_format(&link_training_setting->link_settings) == DP_8b_10b_ENCODING) -#endif DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n" " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", __func__, @@ -1212,7 +1085,6 @@ enum dc_status dpcd_set_lane_settings( link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); } else { -#if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(&link_training_setting->link_settings) == DP_128b_132b_ENCODING) DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n", @@ -1221,7 +1093,6 @@ enum dc_status dpcd_set_lane_settings( link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); else if (dp_get_link_encoding_format(&link_training_setting->link_settings) == DP_8b_10b_ENCODING) -#endif DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", __func__, lane0_set_address, @@ -1261,6 +1132,9 @@ static bool perform_post_lt_adj_req_sequence( uint32_t adj_req_timer; bool req_drv_setting_changed; uint32_t lane; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; + union lane_align_status_updated dpcd_lane_status_updated = {0}; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; req_drv_setting_changed = false; for (adj_req_count = 0; adj_req_count < POST_LT_ADJ_REQ_LIMIT; @@ -1272,11 +1146,6 @@ static bool perform_post_lt_adj_req_sequence( adj_req_timer < POST_LT_ADJ_REQ_TIMEOUT; adj_req_timer++) { - union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; - union lane_align_status_updated - dpcd_lane_status_updated; - union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } }; - dp_get_lane_status_and_lane_adjust( link, lt_settings, @@ -1357,14 +1226,12 @@ uint32_t dp_translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval case 0x04: aux_rd_interval_us = 16000; break; -#if defined(CONFIG_DRM_AMD_DC_DCN) case 0x05: aux_rd_interval_us = 32000; break; case 0x06: aux_rd_interval_us = 64000; break; -#endif default: break; } @@ -1405,13 +1272,8 @@ static enum link_training_result perform_channel_equalization_sequence( /* Note: also check that TPS4 is a supported feature*/ tr_pattern = lt_settings->pattern_for_eq; -#if defined(CONFIG_DRM_AMD_DC_DCN) if (is_repeater(link, offset) && dp_get_link_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING) tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4; -#else - if (is_repeater(link, offset)) - tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4; -#endif dp_set_hw_training_pattern(link, link_res, tr_pattern, offset); @@ -1510,6 +1372,10 @@ static enum link_training_result perform_clock_recovery_sequence( retries_cr = 0; retry_count = 0; + memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status)); + memset(&dpcd_lane_status_updated, '\0', + sizeof(dpcd_lane_status_updated)); + if (!link->ctx->dc->work_arounds.lt_early_cr_pattern) dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, offset); @@ -1521,9 +1387,6 @@ static enum link_training_result perform_clock_recovery_sequence( while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { - memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status)); - memset(&dpcd_lane_status_updated, '\0', - sizeof(dpcd_lane_status_updated)); /* 1. call HWSS to set lane settings*/ dp_set_hw_lane_settings( @@ -1570,27 +1433,15 @@ static enum link_training_result perform_clock_recovery_sequence( dpcd_lane_adjust, offset); - if (link->dc->debug.apply_vendor_specific_lttpr_wa && - (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && - link->lttpr_mode == LTTPR_MODE_TRANSPARENT) { - vendor_specific_lttpr_wa_one_end(link, retry_count); - vendor_specific_lttpr_wa_three(link, dpcd_lane_adjust); - } - /* 5. check CR done*/ if (dp_is_cr_done(lane_count, dpcd_lane_status)) return LINK_TRAINING_SUCCESS; /* 6. max VS reached*/ -#if defined(CONFIG_DRM_AMD_DC_DCN) if ((dp_get_link_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING) && dp_is_max_vs_reached(lt_settings)) break; -#else - if (dp_is_max_vs_reached(lt_settings)) - break; -#endif /* 7. same lane settings*/ /* Note: settings are the same for all lanes, @@ -1599,12 +1450,10 @@ static enum link_training_result perform_clock_recovery_sequence( lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET == dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE) retries_cr++; -#if defined(CONFIG_DRM_AMD_DC_DCN) else if ((dp_get_link_encoding_format(<_settings->link_settings) == DP_128b_132b_ENCODING) && lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE == dpcd_lane_adjust[0].tx_ffe.PRESET_VALUE) retries_cr++; -#endif else retries_cr = 0; @@ -1642,11 +1491,7 @@ static inline enum link_training_result dp_transition_to_video_idle( * TPS4 must be used instead of POST_LT_ADJ_REQ. */ if (link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED != 1 || -#if defined(CONFIG_DRM_AMD_DC_DCN) lt_settings->pattern_for_eq >= DP_TRAINING_PATTERN_SEQUENCE_4) { -#else - lt_settings->pattern_for_eq == DP_TRAINING_PATTERN_SEQUENCE_4) { -#endif /* delay 5ms after Main Link output idle pattern and then check * DPCD 0202h. */ @@ -1745,7 +1590,6 @@ static inline void decide_8b_10b_training_settings( dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); } -#if defined(CONFIG_DRM_AMD_DC_DCN) static inline void decide_128b_132b_training_settings(struct dc_link *link, const struct dc_link_settings *link_settings, struct link_training_settings *lt_settings) @@ -1772,7 +1616,6 @@ static inline void decide_128b_132b_training_settings(struct dc_link *link, dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); } -#endif void dp_decide_training_settings( struct dc_link *link, @@ -1781,10 +1624,8 @@ void dp_decide_training_settings( { if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) decide_8b_10b_training_settings(link, link_settings, lt_settings); -#if defined(CONFIG_DRM_AMD_DC_DCN) else if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) decide_128b_132b_training_settings(link, link_settings, lt_settings); -#endif } static void override_training_settings( @@ -1807,10 +1648,8 @@ static void override_training_settings( lt_settings->pre_emphasis = overrides->pre_emphasis; if (overrides->post_cursor2 != NULL) lt_settings->post_cursor2 = overrides->post_cursor2; -#if defined(CONFIG_DRM_AMD_DC_DCN) if (overrides->ffe_preset != NULL) lt_settings->ffe_preset = overrides->ffe_preset; -#endif /* Override HW lane settings with BIOS forced values if present */ if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN && link->lttpr_mode == LTTPR_MODE_TRANSPARENT) { @@ -2014,7 +1853,6 @@ static void print_status_message( case LINK_RATE_HIGH3: link_rate = "HBR3"; break; -#if defined(CONFIG_DRM_AMD_DC_DCN) case LINK_RATE_UHBR10: link_rate = "UHBR10"; break; @@ -2024,7 +1862,6 @@ static void print_status_message( case LINK_RATE_UHBR20: link_rate = "UHBR20"; break; -#endif default: break; } @@ -2054,7 +1891,6 @@ static void print_status_message( case LINK_TRAINING_LINK_LOSS: lt_result = "Link loss"; break; -#if defined(CONFIG_DRM_AMD_DC_DCN) case DP_128b_132b_LT_FAILED: lt_result = "LT_FAILED received"; break; @@ -2067,7 +1903,6 @@ static void print_status_message( case DP_128b_132b_CDS_DONE_TIMEOUT: lt_result = "CDS timeout"; break; -#endif default: break; } @@ -2087,9 +1922,9 @@ static void print_status_message( } /* Connectivity log: link training */ -#if defined(CONFIG_DRM_AMD_DC_DCN) + /* TODO - DP2.0 Log: add connectivity log for FFE PRESET */ -#endif + CONN_MSG_LT(link, "%sx%d %s VS=%d, PE=%d, DS=%s", link_rate, lt_settings->link_settings.lane_count, @@ -2177,15 +2012,12 @@ enum dc_status dpcd_configure_lttpr_mode(struct dc_link *link, struct link_train static void dpcd_exit_training_mode(struct dc_link *link) { -#if defined(CONFIG_DRM_AMD_DC_DCN) uint8_t sink_status = 0; uint8_t i; -#endif /* clear training pattern set */ dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE); -#if defined(CONFIG_DRM_AMD_DC_DCN) /* poll for intra-hop disable */ for (i = 0; i < 10; i++) { if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) && @@ -2193,7 +2025,6 @@ static void dpcd_exit_training_mode(struct dc_link *link) break; udelay(1000); } -#endif } enum dc_status dpcd_configure_channel_coding(struct dc_link *link, @@ -2217,7 +2048,6 @@ enum dc_status dpcd_configure_channel_coding(struct dc_link *link, return status; } -#if defined(CONFIG_DRM_AMD_DC_DCN) static void dpcd_128b_132b_get_aux_rd_interval(struct dc_link *link, uint32_t *interval_in_us) { @@ -2348,7 +2178,6 @@ static enum link_training_result dp_perform_128b_132b_cds_done_sequence( return status; } -#endif static enum link_training_result dp_perform_8b_10b_link_training( struct dc_link *link, @@ -2378,38 +2207,42 @@ static enum link_training_result dp_perform_8b_10b_link_training( repeater_id--) { status = perform_clock_recovery_sequence(link, link_res, lt_settings, repeater_id); - if (status != LINK_TRAINING_SUCCESS) + if (status != LINK_TRAINING_SUCCESS) { + repeater_training_done(link, repeater_id); break; + } status = perform_channel_equalization_sequence(link, link_res, lt_settings, repeater_id); + repeater_training_done(link, repeater_id); + if (status != LINK_TRAINING_SUCCESS) break; - repeater_training_done(link, repeater_id); + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { + lt_settings->dpcd_lane_settings[lane].raw = 0; + lt_settings->hw_lane_settings[lane].VOLTAGE_SWING = 0; + lt_settings->hw_lane_settings[lane].PRE_EMPHASIS = 0; + } } - - for (lane = 0; lane < (uint8_t)lt_settings->link_settings.lane_count; lane++) - lt_settings->dpcd_lane_settings[lane].raw = 0; } if (status == LINK_TRAINING_SUCCESS) { status = perform_clock_recovery_sequence(link, link_res, lt_settings, DPRX); - if (status == LINK_TRAINING_SUCCESS) { - status = perform_channel_equalization_sequence(link, - link_res, - lt_settings, - DPRX); + if (status == LINK_TRAINING_SUCCESS) { + status = perform_channel_equalization_sequence(link, + link_res, + lt_settings, + DPRX); } } return status; } -#if defined(CONFIG_DRM_AMD_DC_DCN) static enum link_training_result dp_perform_128b_132b_link_training( struct dc_link *link, const struct link_resource *link_res, @@ -2437,9 +2270,97 @@ static enum link_training_result dp_perform_128b_132b_link_training( return result; } -#endif -static enum link_training_result dc_link_dp_perform_fixed_vs_pe_training_sequence( +static enum link_training_result perform_fixed_vs_pe_nontransparent_training_sequence( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings) +{ + enum link_training_result status = LINK_TRAINING_SUCCESS; + uint8_t lane = 0; + uint8_t toggle_rate = 0x6; + uint8_t target_rate = 0x6; + bool apply_toggle_rate_wa = false; + uint8_t repeater_cnt; + uint8_t repeater_id; + + /* Fixed VS/PE specific: Force CR AUX RD Interval to at least 16ms */ + if (lt_settings->cr_pattern_time < 16000) + lt_settings->cr_pattern_time = 16000; + + /* Fixed VS/PE specific: Toggle link rate */ + apply_toggle_rate_wa = (link->vendor_specific_lttpr_link_rate_wa == target_rate); + target_rate = get_dpcd_link_rate(<_settings->link_settings); + toggle_rate = (target_rate == 0x6) ? 0xA : 0x6; + + if (apply_toggle_rate_wa) + lt_settings->link_settings.link_rate = toggle_rate; + + if (link->ctx->dc->work_arounds.lt_early_cr_pattern) + start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX); + + /* 1. set link rate, lane count and spread. */ + dpcd_set_link_settings(link, lt_settings); + + /* Fixed VS/PE specific: Toggle link rate back*/ + if (apply_toggle_rate_wa) { + core_link_write_dpcd( + link, + DP_LINK_BW_SET, + &target_rate, + 1); + } + + link->vendor_specific_lttpr_link_rate_wa = target_rate; + + if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { + + /* 2. perform link training (set link training done + * to false is done as well) + */ + repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + + for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS); + repeater_id--) { + status = perform_clock_recovery_sequence(link, link_res, lt_settings, repeater_id); + + if (status != LINK_TRAINING_SUCCESS) { + repeater_training_done(link, repeater_id); + break; + } + + status = perform_channel_equalization_sequence(link, + link_res, + lt_settings, + repeater_id); + + repeater_training_done(link, repeater_id); + + if (status != LINK_TRAINING_SUCCESS) + break; + + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { + lt_settings->dpcd_lane_settings[lane].raw = 0; + lt_settings->hw_lane_settings[lane].VOLTAGE_SWING = 0; + lt_settings->hw_lane_settings[lane].PRE_EMPHASIS = 0; + } + } + } + + if (status == LINK_TRAINING_SUCCESS) { + status = perform_clock_recovery_sequence(link, link_res, lt_settings, DPRX); + if (status == LINK_TRAINING_SUCCESS) { + status = perform_channel_equalization_sequence(link, + link_res, + lt_settings, + DPRX); + } + } + + return status; +} + +static enum link_training_result dp_perform_fixed_vs_pe_training_sequence( struct dc_link *link, const struct link_resource *link_res, struct link_training_settings *lt_settings) @@ -2463,6 +2384,11 @@ static enum link_training_result dc_link_dp_perform_fixed_vs_pe_training_sequenc ASSERT(dp_get_link_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING); + if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { + status = perform_fixed_vs_pe_nontransparent_training_sequence(link, link_res, lt_settings); + return status; + } + if (offset != 0xFF) { vendor_lttpr_write_address += ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); @@ -2514,11 +2440,7 @@ static enum link_training_result dc_link_dp_perform_fixed_vs_pe_training_sequenc core_link_write_dpcd(link, DP_LANE_COUNT_SET, &lane_count_set.raw, 1); -#if defined(CONFIG_DRM_AMD_DC_DCN) rate = get_dpcd_link_rate(<_settings->link_settings); -#else - rate = (uint8_t) (lt_settings->link_settings.link_rate); -#endif /* Vendor specific: Toggle link rate */ toggle_rate = (rate == 0x6) ? 0xA : 0x6; @@ -2560,12 +2482,13 @@ static enum link_training_result dc_link_dp_perform_fixed_vs_pe_training_sequenc retries_cr = 0; retry_count = 0; + memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status)); + memset(&dpcd_lane_status_updated, '\0', + sizeof(dpcd_lane_status_updated)); + while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { - memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status)); - memset(&dpcd_lane_status_updated, '\0', - sizeof(dpcd_lane_status_updated)); /* 1. call HWSS to set lane settings */ dp_set_hw_lane_settings( @@ -2813,16 +2736,12 @@ enum link_training_result dc_link_dp_perform_link_training( * Per DP specs starting from here, DPTX device shall not issue * Non-LT AUX transactions inside training mode. */ - if (!link->dc->debug.apply_vendor_specific_lttpr_wa && - (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && - link->lttpr_mode == LTTPR_MODE_TRANSPARENT) - status = dc_link_dp_perform_fixed_vs_pe_training_sequence(link, link_res, <_settings); + if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) + status = dp_perform_fixed_vs_pe_training_sequence(link, link_res, <_settings); else if (encoding == DP_8b_10b_ENCODING) status = dp_perform_8b_10b_link_training(link, link_res, <_settings); -#if defined(CONFIG_DRM_AMD_DC_DCN) else if (encoding == DP_128b_132b_ENCODING) status = dp_perform_128b_132b_link_training(link, link_res, <_settings); -#endif else ASSERT(0); @@ -2863,27 +2782,21 @@ bool perform_link_training_with_retries( struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; enum dp_panel_mode panel_mode = dp_get_panel_mode(link); - struct link_encoder *link_enc; enum link_training_result status = LINK_TRAINING_CR_FAIL_LANE0; struct dc_link_settings current_setting = *link_setting; + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); + int fail_count = 0; - /* Dynamically assigned link encoders associated with stream rather than - * link. - */ - if (link->is_dig_mapping_flexible && link->dc->res_pool->funcs->link_encs_assign) - link_enc = link_enc_cfg_get_link_enc_used_by_stream(link->ctx->dc, pipe_ctx->stream); - else - link_enc = link->link_enc; + dp_trace_commit_lt_init(link); - /* We need to do this before the link training to ensure the idle pattern in SST - * mode will be sent right after the link training - */ - if (dp_get_link_encoding_format(¤t_setting) == DP_8b_10b_ENCODING) { - link_enc->funcs->connect_dig_be_to_fe(link_enc, - pipe_ctx->stream_res.stream_enc->id, true); - dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE); - } + if (dp_get_link_encoding_format(¤t_setting) == DP_8b_10b_ENCODING) + /* We need to do this before the link training to ensure the idle + * pattern in SST mode will be sent right after the link training + */ + link_hwss->setup_stream_encoder(pipe_ctx); + + dp_trace_set_lt_start_timestamp(link, false); for (j = 0; j < attempts; ++j) { DC_LOG_HW_LINK_TRAINING("%s: Beginning link training attempt %u of %d\n", @@ -2939,10 +2852,15 @@ bool perform_link_training_with_retries( skip_video_pattern); } + dp_trace_lt_total_count_increment(link, false); + dp_trace_lt_result_update(link, status, false); + dp_trace_set_lt_end_timestamp(link, false); if (status == LINK_TRAINING_SUCCESS) return true; } + fail_count++; + dp_trace_lt_fail_count_update(link, fail_count, false); /* latest link training still fail, skip delay and keep PHY on */ if (j == (attempts - 1) && link->ep_type == DISPLAY_ENDPOINT_PHY) @@ -3074,14 +2992,10 @@ enum link_training_result dc_link_dp_sync_lt_attempt( dp_cs_id, link_settings); /* Set FEC enable */ -#if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) { -#endif fec_enable = lt_overrides->fec_enable && *lt_overrides->fec_enable; dp_set_fec_ready(link, NULL, fec_enable); -#if defined(CONFIG_DRM_AMD_DC_DCN) } -#endif if (lt_overrides->alternate_scrambler_reset) { if (*lt_overrides->alternate_scrambler_reset) @@ -3124,13 +3038,9 @@ bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down) * Still shouldn't turn off dp_receiver (DPCD:600h) */ if (link_down == true) { -#if defined(CONFIG_DRM_AMD_DC_DCN) struct dc_link_settings link_settings = link->cur_link_settings; -#endif dp_disable_link_phy(link, NULL, link->connector_signal); -#if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING) -#endif dp_set_fec_ready(link, NULL, false); } @@ -3138,7 +3048,6 @@ bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down) return true; } -#if defined(CONFIG_DRM_AMD_DC_DCN) static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link) { enum dc_link_rate lttpr_max_link_rate = link->dpcd_caps.lttpr_caps.max_link_rate; @@ -3152,7 +3061,20 @@ static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link) return lttpr_max_link_rate; } -#endif + +static enum dc_link_rate get_cable_max_link_rate(struct dc_link *link) +{ + enum dc_link_rate cable_max_link_rate = LINK_RATE_HIGH3; + + if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR20) + cable_max_link_rate = LINK_RATE_UHBR20; + else if (link->dpcd_caps.cable_id.bits.UHBR13_5_CAPABILITY) + cable_max_link_rate = LINK_RATE_UHBR13_5; + else if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR10) + cable_max_link_rate = LINK_RATE_UHBR10; + + return cable_max_link_rate; +} bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap) { @@ -3163,16 +3085,7 @@ bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_ return false; } - /* Links supporting dynamically assigned link encoder will be assigned next - * available encoder if one not already assigned. - */ - if (link->is_dig_mapping_flexible && - link->dc->res_pool->funcs->link_encs_assign) { - link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); - if (link_enc == NULL) - link_enc = link_enc_cfg_get_next_avail_link_enc(link->ctx->dc); - } else - link_enc = link->link_enc; + link_enc = link_enc_cfg_get_link_enc(link); ASSERT(link_enc); if (link_enc && link_enc->funcs->get_max_link_cap) { @@ -3186,37 +3099,21 @@ bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_ return false; } -static struct dc_link_settings get_max_link_cap(struct dc_link *link, - const struct link_resource *link_res) + +struct dc_link_settings dp_get_max_link_cap(struct dc_link *link) { struct dc_link_settings max_link_cap = {0}; -#if defined(CONFIG_DRM_AMD_DC_DCN) enum dc_link_rate lttpr_max_link_rate; -#endif + enum dc_link_rate cable_max_link_rate; struct link_encoder *link_enc = NULL; - /* Links supporting dynamically assigned link encoder will be assigned next - * available encoder if one not already assigned. - */ - if (link->is_dig_mapping_flexible && - link->dc->res_pool->funcs->link_encs_assign) { - link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); - if (link_enc == NULL) - link_enc = link_enc_cfg_get_next_avail_link_enc(link->ctx->dc); - } else - link_enc = link->link_enc; + + link_enc = link_enc_cfg_get_link_enc(link); ASSERT(link_enc); /* get max link encoder capability */ if (link_enc) link_enc->funcs->get_max_link_cap(link_enc, &max_link_cap); -#if defined(CONFIG_DRM_AMD_DC_DCN) - if (max_link_cap.link_rate >= LINK_RATE_UHBR10) { - if (!link_res->hpo_dp_link_enc || - link->dc->debug.disable_uhbr) - max_link_cap.link_rate = LINK_RATE_HIGH3; - } -#endif /* Lower link settings based on sink's link cap */ if (link->reported_link_cap.lane_count < max_link_cap.lane_count) @@ -3229,6 +3126,14 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link, max_link_cap.link_spread) max_link_cap.link_spread = link->reported_link_cap.link_spread; + + /* Lower link settings based on cable attributes */ + cable_max_link_rate = get_cable_max_link_rate(link); + + if (!link->dc->debug.ignore_cable_id && + cable_max_link_rate < max_link_cap.link_rate) + max_link_cap.link_rate = cable_max_link_rate; + /* * account for lttpr repeaters cap * notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3). @@ -3236,22 +3141,21 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link, if (link->lttpr_mode != LTTPR_MODE_NON_LTTPR) { if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count) max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count; - -#if defined(CONFIG_DRM_AMD_DC_DCN) lttpr_max_link_rate = get_lttpr_max_link_rate(link); if (lttpr_max_link_rate < max_link_cap.link_rate) max_link_cap.link_rate = lttpr_max_link_rate; -#else - if (link->dpcd_caps.lttpr_caps.max_link_rate < max_link_cap.link_rate) - max_link_cap.link_rate = link->dpcd_caps.lttpr_caps.max_link_rate; -#endif DC_LOG_HW_LINK_TRAINING("%s\n Training with LTTPR, max_lane count %d max_link rate %d \n", __func__, max_link_cap.lane_count, max_link_cap.link_rate); } + + if (dp_get_link_encoding_format(&max_link_cap) == DP_128b_132b_ENCODING && + link->dc->debug.disable_uhbr) + max_link_cap.link_rate = LINK_RATE_HIGH3; + return max_link_cap; } @@ -3370,48 +3274,22 @@ bool hpd_rx_irq_check_link_loss_status( return return_code; } -bool dp_verify_link_cap( +static bool dp_verify_link_cap( struct dc_link *link, - const struct link_resource *link_res, struct dc_link_settings *known_limit_link_setting, int *fail_count) { - struct dc_link_settings max_link_cap = {0}; - struct dc_link_settings cur_link_setting = {0}; - struct dc_link_settings *cur = &cur_link_setting; - struct dc_link_settings initial_link_settings = {0}; - bool success; - bool skip_link_training; + struct dc_link_settings cur_link_settings = {0}; + struct dc_link_settings initial_link_settings = *known_limit_link_setting; + bool success = false; bool skip_video_pattern; - enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL; - enum link_training_result status; + enum clock_source_id dp_cs_id = get_clock_source_id(link); + enum link_training_result status = LINK_TRAINING_SUCCESS; union hpd_irq_data irq_data; - - /* link training starts with the maximum common settings - * supported by both sink and ASIC. - */ - max_link_cap = get_max_link_cap(link, link_res); - initial_link_settings = get_common_supported_link_settings( - *known_limit_link_setting, - max_link_cap); - - /* Accept reported capabilities if link supports flexible encoder mapping or encoder already in use. */ - if (link->dc->debug.skip_detection_link_training || - link->is_dig_mapping_flexible) { - /* TODO - should we check link encoder's max link caps here? - * How do we know which link encoder to check from? - */ - link->verified_link_cap = *known_limit_link_setting; - return true; - } else if (link->link_enc && link->dc->res_pool->funcs->link_encs_assign && - !link_enc_cfg_is_link_enc_avail(link->ctx->dc, link->link_enc->preferred_engine, link)) { - link->verified_link_cap = initial_link_settings; - return true; - } + struct link_resource link_res; memset(&irq_data, 0, sizeof(irq_data)); - success = false; - skip_link_training = false; + cur_link_settings = initial_link_settings; /* Grant extended timeout request */ if ((link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (link->dpcd_caps.lttpr_caps.max_ext_timeout > 0)) { @@ -3420,199 +3298,126 @@ bool dp_verify_link_cap( core_link_write_dpcd(link, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &grant, sizeof(grant)); } -#if defined(CONFIG_DRM_AMD_DC_DCN) - if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING) - reset_dp_hpo_stream_encoders_for_link(link); -#endif - /* TODO implement override and monitor patch later */ - - /* try to train the link from high to low to - * find the physical link capability - */ - /* disable PHY done possible by BIOS, will be done by driver itself */ - dp_disable_link_phy(link, link_res, link->connector_signal); - - dp_cs_id = get_clock_source_id(link); - - cur_link_setting = initial_link_settings; - - /* Temporary Renoir-specific workaround for SWDEV-215184; - * PHY will sometimes be in bad state on hotplugging display from certain USB-C dongle, - * so add extra cycle of enabling and disabling the PHY before first link training. - */ - if (link->link_enc && link->link_enc->features.flags.bits.DP_IS_USB_C && - link->dc->debug.usbc_combo_phy_reset_wa) { - dp_enable_link_phy(link, link_res, link->connector_signal, dp_cs_id, cur); - dp_disable_link_phy(link, link_res, link->connector_signal); - } - do { - skip_video_pattern = true; - - if (cur->link_rate == LINK_RATE_LOW) - skip_video_pattern = false; + if (!get_temp_dp_link_res(link, &link_res, &cur_link_settings)) + continue; + skip_video_pattern = cur_link_settings.link_rate != LINK_RATE_LOW; dp_enable_link_phy( link, - link_res, + &link_res, link->connector_signal, dp_cs_id, - cur); + &cur_link_settings); + status = dc_link_dp_perform_link_training( + link, + &link_res, + &cur_link_settings, + skip_video_pattern); - if (skip_link_training) + if (status == LINK_TRAINING_SUCCESS) { success = true; - else { - status = dc_link_dp_perform_link_training( + udelay(1000); + if (read_hpd_rx_irq_data(link, &irq_data) == DC_OK && + hpd_rx_irq_check_link_loss_status( link, - link_res, - cur, - skip_video_pattern); - if (status == LINK_TRAINING_SUCCESS) - success = true; - else + &irq_data)) (*fail_count)++; - } - if (success) { - link->verified_link_cap = *cur; - udelay(1000); - if (read_hpd_rx_irq_data(link, &irq_data) == DC_OK) - if (hpd_rx_irq_check_link_loss_status( - link, - &irq_data)) - (*fail_count)++; + } else { + (*fail_count)++; } - /* always disable the link before trying another - * setting or before returning we'll enable it later - * based on the actual mode we're driving - */ - dp_disable_link_phy(link, link_res, link->connector_signal); + dp_trace_lt_total_count_increment(link, true); + dp_trace_lt_result_update(link, status, true); + dp_disable_link_phy(link, &link_res, link->connector_signal); } while (!success && decide_fallback_link_setting(link, - initial_link_settings, cur, status)); + initial_link_settings, &cur_link_settings, status)); - /* Link Training failed for all Link Settings - * (Lane Count is still unknown) - */ - if (!success) { - /* If all LT fails for all settings, - * set verified = failed safe (1 lane low) - */ - link->verified_link_cap.lane_count = LANE_COUNT_ONE; - link->verified_link_cap.link_rate = LINK_RATE_LOW; - - link->verified_link_cap.link_spread = - LINK_SPREAD_DISABLED; - } + link->verified_link_cap = success ? + cur_link_settings : fail_safe_link_settings; + return success; +} +static void apply_usbc_combo_phy_reset_wa(struct dc_link *link, + struct dc_link_settings *link_settings) +{ + /* Temporary Renoir-specific workaround PHY will sometimes be in bad + * state on hotplugging display from certain USB-C dongle, so add extra + * cycle of enabling and disabling the PHY before first link training. + */ + struct link_resource link_res = {0}; + enum clock_source_id dp_cs_id = get_clock_source_id(link); - return success; + dp_enable_link_phy(link, &link_res, link->connector_signal, + dp_cs_id, link_settings); + dp_disable_link_phy(link, &link_res, link->connector_signal); } bool dp_verify_link_cap_with_retries( struct dc_link *link, - const struct link_resource *link_res, struct dc_link_settings *known_limit_link_setting, int attempts) { int i = 0; bool success = false; + int fail_count = 0; + + dp_trace_detect_lt_init(link); + + if (link->link_enc && link->link_enc->features.flags.bits.DP_IS_USB_C && + link->dc->debug.usbc_combo_phy_reset_wa) + apply_usbc_combo_phy_reset_wa(link, known_limit_link_setting); + dp_trace_set_lt_start_timestamp(link, false); for (i = 0; i < attempts; i++) { - int fail_count = 0; enum dc_connection_type type = dc_connection_none; memset(&link->verified_link_cap, 0, sizeof(struct dc_link_settings)); if (!dc_link_detect_sink(link, &type) || type == dc_connection_none) { - link->verified_link_cap.lane_count = LANE_COUNT_ONE; - link->verified_link_cap.link_rate = LINK_RATE_LOW; - link->verified_link_cap.link_spread = LINK_SPREAD_DISABLED; + link->verified_link_cap = fail_safe_link_settings; break; - } else if (dp_verify_link_cap(link, link_res, - known_limit_link_setting, + } else if (dp_verify_link_cap(link, known_limit_link_setting, &fail_count) && fail_count == 0) { success = true; break; } msleep(10); } - return success; -} -bool dp_verify_mst_link_cap( - struct dc_link *link, const struct link_resource *link_res) -{ - struct dc_link_settings max_link_cap = {0}; + dp_trace_lt_fail_count_update(link, fail_count, true); + dp_trace_set_lt_end_timestamp(link, true); - if (dp_get_link_encoding_format(&link->reported_link_cap) == - DP_8b_10b_ENCODING) { - max_link_cap = get_max_link_cap(link, link_res); - link->verified_link_cap = get_common_supported_link_settings( - link->reported_link_cap, - max_link_cap); - } -#if defined(CONFIG_DRM_AMD_DC_DCN) - else if (dp_get_link_encoding_format(&link->reported_link_cap) == - DP_128b_132b_ENCODING) { - dp_verify_link_cap_with_retries(link, - link_res, - &link->reported_link_cap, - LINK_TRAINING_MAX_VERIFY_RETRY); - } -#endif - return true; + return success; } -static struct dc_link_settings get_common_supported_link_settings( - struct dc_link_settings link_setting_a, - struct dc_link_settings link_setting_b) +/* in DP compliance test, DPR-120 may have + * a random value in its MAX_LINK_BW dpcd field. + * We map it to the maximum supported link rate that + * is smaller than MAX_LINK_BW in this case. + */ +static enum dc_link_rate get_link_rate_from_max_link_bw( + uint8_t max_link_bw) { - struct dc_link_settings link_settings = {0}; + enum dc_link_rate link_rate; - link_settings.lane_count = - (link_setting_a.lane_count <= - link_setting_b.lane_count) ? - link_setting_a.lane_count : - link_setting_b.lane_count; - link_settings.link_rate = - (link_setting_a.link_rate <= - link_setting_b.link_rate) ? - link_setting_a.link_rate : - link_setting_b.link_rate; - link_settings.link_spread = LINK_SPREAD_DISABLED; - - /* in DP compliance test, DPR-120 may have - * a random value in its MAX_LINK_BW dpcd field. - * We map it to the maximum supported link rate that - * is smaller than MAX_LINK_BW in this case. - */ -#if defined(CONFIG_DRM_AMD_DC_DCN) - if (link_settings.link_rate > LINK_RATE_UHBR20) { - link_settings.link_rate = LINK_RATE_UHBR20; - } else if (link_settings.link_rate < LINK_RATE_UHBR20 && - link_settings.link_rate > LINK_RATE_UHBR13_5) { - link_settings.link_rate = LINK_RATE_UHBR13_5; - } else if (link_settings.link_rate < LINK_RATE_UHBR10 && - link_settings.link_rate > LINK_RATE_HIGH3) { -#else - if (link_settings.link_rate > LINK_RATE_HIGH3) { -#endif - link_settings.link_rate = LINK_RATE_HIGH3; - } else if (link_settings.link_rate < LINK_RATE_HIGH3 - && link_settings.link_rate > LINK_RATE_HIGH2) { - link_settings.link_rate = LINK_RATE_HIGH2; - } else if (link_settings.link_rate < LINK_RATE_HIGH2 - && link_settings.link_rate > LINK_RATE_HIGH) { - link_settings.link_rate = LINK_RATE_HIGH; - } else if (link_settings.link_rate < LINK_RATE_HIGH - && link_settings.link_rate > LINK_RATE_LOW) { - link_settings.link_rate = LINK_RATE_LOW; - } else if (link_settings.link_rate < LINK_RATE_LOW) { - link_settings.link_rate = LINK_RATE_UNKNOWN; + if (max_link_bw >= LINK_RATE_HIGH3) { + link_rate = LINK_RATE_HIGH3; + } else if (max_link_bw < LINK_RATE_HIGH3 + && max_link_bw >= LINK_RATE_HIGH2) { + link_rate = LINK_RATE_HIGH2; + } else if (max_link_bw < LINK_RATE_HIGH2 + && max_link_bw >= LINK_RATE_HIGH) { + link_rate = LINK_RATE_HIGH; + } else if (max_link_bw < LINK_RATE_HIGH + && max_link_bw >= LINK_RATE_LOW) { + link_rate = LINK_RATE_LOW; + } else { + link_rate = LINK_RATE_UNKNOWN; } - return link_settings; + return link_rate; } static inline bool reached_minimum_lane_count(enum dc_lane_count lane_count) @@ -3642,14 +3447,12 @@ static enum dc_lane_count reduce_lane_count(enum dc_lane_count lane_count) static enum dc_link_rate reduce_link_rate(enum dc_link_rate link_rate) { switch (link_rate) { -#if defined(CONFIG_DRM_AMD_DC_DCN) case LINK_RATE_UHBR20: return LINK_RATE_UHBR13_5; case LINK_RATE_UHBR13_5: return LINK_RATE_UHBR10; case LINK_RATE_UHBR10: return LINK_RATE_HIGH3; -#endif case LINK_RATE_HIGH3: return LINK_RATE_HIGH2; case LINK_RATE_HIGH2: @@ -3675,7 +3478,8 @@ static enum dc_lane_count increase_lane_count(enum dc_lane_count lane_count) } } -static enum dc_link_rate increase_link_rate(enum dc_link_rate link_rate) +static enum dc_link_rate increase_link_rate(struct dc_link *link, + enum dc_link_rate link_rate) { switch (link_rate) { case LINK_RATE_LOW: @@ -3684,27 +3488,37 @@ static enum dc_link_rate increase_link_rate(enum dc_link_rate link_rate) return LINK_RATE_HIGH2; case LINK_RATE_HIGH2: return LINK_RATE_HIGH3; -#if defined(CONFIG_DRM_AMD_DC_DCN) case LINK_RATE_HIGH3: return LINK_RATE_UHBR10; case LINK_RATE_UHBR10: - return LINK_RATE_UHBR13_5; + /* upto DP2.x specs UHBR13.5 is the only link rate that could be + * not supported by DPRX when higher link rate is supported. + * so we treat it as a special case for code simplicity. When we + * have new specs with more link rates like this, we should + * consider a more generic solution to handle discrete link + * rate capabilities. + */ + return link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5 ? + LINK_RATE_UHBR13_5 : LINK_RATE_UHBR20; case LINK_RATE_UHBR13_5: return LINK_RATE_UHBR20; -#endif default: return LINK_RATE_UNKNOWN; } } -#if defined(CONFIG_DRM_AMD_DC_DCN) static bool decide_fallback_link_setting_max_bw_policy( + struct dc_link *link, const struct dc_link_settings *max, - struct dc_link_settings *cur) + struct dc_link_settings *cur, + enum link_training_result training_result) { uint8_t cur_idx = 0, next_idx; bool found = false; + if (training_result == LINK_TRAINING_ABORT) + return false; + while (cur_idx < ARRAY_SIZE(dp_lt_fallbacks)) /* find current index */ if (dp_lt_fallbacks[cur_idx].lane_count == cur->lane_count && @@ -3717,11 +3531,22 @@ static bool decide_fallback_link_setting_max_bw_policy( while (next_idx < ARRAY_SIZE(dp_lt_fallbacks)) /* find next index */ - if (dp_lt_fallbacks[next_idx].lane_count <= max->lane_count && - dp_lt_fallbacks[next_idx].link_rate <= max->link_rate) - break; - else + if (dp_lt_fallbacks[next_idx].lane_count > max->lane_count || + dp_lt_fallbacks[next_idx].link_rate > max->link_rate) next_idx++; + else if (dp_lt_fallbacks[next_idx].link_rate == LINK_RATE_UHBR13_5 && + link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5 == 0) + /* upto DP2.x specs UHBR13.5 is the only link rate that + * could be not supported by DPRX when higher link rate + * is supported. so we treat it as a special case for + * code simplicity. When we have new specs with more + * link rates like this, we should consider a more + * generic solution to handle discrete link rate + * capabilities. + */ + next_idx++; + else + break; if (next_idx < ARRAY_SIZE(dp_lt_fallbacks)) { cur->lane_count = dp_lt_fallbacks[next_idx].lane_count; @@ -3731,7 +3556,6 @@ static bool decide_fallback_link_setting_max_bw_policy( return found; } -#endif /* * function: set link rate and lane count fallback based @@ -3749,12 +3573,10 @@ static bool decide_fallback_link_setting( { if (!current_link_setting) return false; -#if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(&initial_link_settings) == DP_128b_132b_ENCODING || link->dc->debug.force_dp2_lt_fallback_method) - return decide_fallback_link_setting_max_bw_policy(&initial_link_settings, - current_link_setting); -#endif + return decide_fallback_link_setting_max_bw_policy(link, &initial_link_settings, + current_link_setting, training_result); switch (training_result) { case LINK_TRAINING_CR_FAIL_LANE0: @@ -3801,6 +3623,7 @@ static bool decide_fallback_link_setting( current_link_setting->link_rate = reduce_link_rate( current_link_setting->link_rate); + current_link_setting->lane_count = initial_link_settings.lane_count; } else { return false; } @@ -3813,6 +3636,7 @@ static bool decide_fallback_link_setting( current_link_setting->link_rate = reduce_link_rate( current_link_setting->link_rate); + current_link_setting->lane_count = initial_link_settings.lane_count; } else { return false; } @@ -3907,7 +3731,7 @@ static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_setting current_link_setting.lane_count); } else { current_link_setting.link_rate = - increase_link_rate( + increase_link_rate(link, current_link_setting.link_rate); current_link_setting.lane_count = initial_link_setting.lane_count; @@ -4022,7 +3846,7 @@ static bool decide_edp_link_settings_with_dsc(struct dc_link *link, /* minimize lane */ if (current_link_setting.link_rate < max_link_rate) { current_link_setting.link_rate = - increase_link_rate( + increase_link_rate(link, current_link_setting.link_rate); } else { if (current_link_setting.lane_count < @@ -4043,7 +3867,7 @@ static bool decide_edp_link_settings_with_dsc(struct dc_link *link, current_link_setting.lane_count); } else { current_link_setting.link_rate = - increase_link_rate( + increase_link_rate(link, current_link_setting.link_rate); current_link_setting.lane_count = initial_link_setting.lane_count; @@ -4292,15 +4116,9 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) union phy_test_pattern dpcd_test_pattern; union lane_adjust dpcd_lane_adjustment[2]; unsigned char dpcd_post_cursor_2_adjustment = 0; -#if defined(CONFIG_DRM_AMD_DC_DCN) unsigned char test_pattern_buffer[ (DP_TEST_264BIT_CUSTOM_PATTERN_263_256 - DP_TEST_264BIT_CUSTOM_PATTERN_7_0)+1] = {0}; -#else - unsigned char test_pattern_buffer[ - (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 - - DP_TEST_80BIT_CUSTOM_PATTERN_7_0)+1] = {0}; -#endif unsigned int test_pattern_size = 0; enum dp_test_pattern test_pattern; union lane_adjust dpcd_lane_adjust; @@ -4326,7 +4144,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) if (link->dc->debug.apply_vendor_specific_lttpr_wa && (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && link->lttpr_mode == LTTPR_MODE_TRANSPARENT) - vendor_specific_lttpr_wa_three_dpcd( + dp_fixed_vs_pe_read_lane_adjust( link, link_training_settings.dpcd_lane_settings); @@ -4371,7 +4189,6 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) case PHY_TEST_PATTERN_CP2520_3: test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4; break; -#if defined(CONFIG_DRM_AMD_DC_DCN) case PHY_TEST_PATTERN_128b_132b_TPS1: test_pattern = DP_TEST_PATTERN_128b_132b_TPS1; break; @@ -4399,7 +4216,6 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) case PHY_TEST_PATTERN_SQUARE_PULSE: test_pattern = DP_TEST_PATTERN_SQUARE_PULSE; break; -#endif default: test_pattern = DP_TEST_PATTERN_VIDEO_MODE; break; @@ -4415,7 +4231,6 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) test_pattern_size); } -#if defined(CONFIG_DRM_AMD_DC_DCN) if (test_pattern == DP_TEST_PATTERN_SQUARE_PULSE) { test_pattern_size = 1; // Square pattern data is 1 byte (DP spec) core_link_read_dpcd( @@ -4434,7 +4249,6 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) test_pattern_buffer, test_pattern_size); } -#endif /* prepare link training settings */ link_training_settings.link_settings = link->cur_link_settings; @@ -4455,14 +4269,11 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) link_training_settings.hw_lane_settings[lane].POST_CURSOR2 = (enum dc_post_cursor2) ((dpcd_post_cursor_2_adjustment >> (lane * 2)) & 0x03); - } -#if defined(CONFIG_DRM_AMD_DC_DCN) - else if (dp_get_link_encoding_format(&link->cur_link_settings) == + } else if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING) { link_training_settings.hw_lane_settings[lane].FFE_PRESET.raw = dpcd_lane_adjust.tx_ffe.PRESET_VALUE; } -#endif } dp_hw_to_dpcd_lane_settings(&link_training_settings, @@ -4869,6 +4680,8 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd status = false; if (out_link_loss) *out_link_loss = true; + + dp_trace_link_loss_increment(link); } if (link->type == dc_connection_sst_branch && @@ -4970,7 +4783,7 @@ uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw) return 0; } -/** +/* * Return PCON's post FRL link training supported BW if its non-zero, otherwise return max_supported_frl_bw. */ static uint32_t intersect_frl_link_bw_support( @@ -5159,7 +4972,6 @@ static void get_active_converter_info( dp_hw_fw_revision.ieee_fw_rev, sizeof(dp_hw_fw_revision.ieee_fw_rev)); } -#if defined(CONFIG_DRM_AMD_DC_DCN) if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 && link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) { union dp_dfp_cap_ext dfp_cap_ext; @@ -5195,7 +5007,6 @@ static void get_active_converter_info( DC_LOG_DP2("\tdfp_cap_ext.max_video_h_active_width = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width); DC_LOG_DP2("\tdfp_cap_ext.max_video_v_active_height = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height); } -#endif } static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data, @@ -5255,12 +5066,8 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link) bool dp_retrieve_lttpr_cap(struct dc_link *link) { -#if defined(CONFIG_DRM_AMD_DC_DCN) uint8_t lttpr_dpcd_data[8]; bool allow_lttpr_non_transparent_mode = 0; -#else - uint8_t lttpr_dpcd_data[6]; -#endif bool vbios_lttpr_enable = link->dc->caps.vbios_lttpr_enable; bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware; enum dc_status status = DC_ERROR_UNEXPECTED; @@ -5268,7 +5075,6 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link) memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data)); -#if defined(CONFIG_DRM_AMD_DC_DCN) if ((link->dc->config.allow_lttpr_non_transparent_mode.bits.DP2_0 && link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)) { allow_lttpr_non_transparent_mode = 1; @@ -5276,7 +5082,6 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link) !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) { allow_lttpr_non_transparent_mode = 1; } -#endif /* * Logic to determine LTTPR mode @@ -5285,21 +5090,12 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link) if (vbios_lttpr_enable && vbios_lttpr_interop) link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT; else if (!vbios_lttpr_enable && vbios_lttpr_interop) { -#if defined(CONFIG_DRM_AMD_DC_DCN) if (allow_lttpr_non_transparent_mode) -#else - if (link->dc->config.allow_lttpr_non_transparent_mode) -#endif link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT; else link->lttpr_mode = LTTPR_MODE_TRANSPARENT; } else if (!vbios_lttpr_enable && !vbios_lttpr_interop) { -#if defined(CONFIG_DRM_AMD_DC_DCN) if (!allow_lttpr_non_transparent_mode || !link->dc->caps.extended_aux_timeout_support) -#else - if (!link->dc->config.allow_lttpr_non_transparent_mode - || !link->dc->caps.extended_aux_timeout_support) -#endif link->lttpr_mode = LTTPR_MODE_NON_LTTPR; else link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT; @@ -5322,6 +5118,7 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link) sizeof(lttpr_dpcd_data)); if (status != DC_OK) { DC_LOG_DP2("%s: Read LTTPR caps data failed.\n", __func__); + link->lttpr_mode = LTTPR_MODE_NON_LTTPR; return false; } @@ -5349,7 +5146,6 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link) lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; -#if defined(CONFIG_DRM_AMD_DC_DCN) link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw = lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; @@ -5357,12 +5153,10 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link) link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw = lttpr_dpcd_data[DP_PHY_REPEATER_128b_132b_RATES - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; -#endif /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */ - is_lttpr_present = (dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 && + is_lttpr_present = (link->dpcd_caps.lttpr_caps.max_lane_count > 0 && link->dpcd_caps.lttpr_caps.phy_repeater_cnt < 0xff && - link->dpcd_caps.lttpr_caps.max_lane_count > 0 && link->dpcd_caps.lttpr_caps.max_lane_count <= 4 && link->dpcd_caps.lttpr_caps.revision.raw >= 0x14); if (is_lttpr_present) { @@ -5374,6 +5168,54 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link) return is_lttpr_present; } +static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id) +{ + union dmub_rb_cmd cmd; + + if (!link->ctx->dmub_srv || + link->ep_type != DISPLAY_ENDPOINT_PHY || + link->link_enc->features.flags.bits.DP_IS_USB_C == 0) + return false; + + memset(&cmd, 0, sizeof(cmd)); + cmd.cable_id.header.type = DMUB_CMD_GET_USBC_CABLE_ID; + cmd.cable_id.header.payload_bytes = sizeof(cmd.cable_id.data); + cmd.cable_id.data.input.phy_inst = resource_transmitter_to_phy_idx( + link->dc, link->link_enc->transmitter); + if (dc_dmub_srv_cmd_with_reply_data(link->ctx->dmub_srv, &cmd) && + cmd.cable_id.header.ret_status == 1) + cable_id->raw = cmd.cable_id.data.output_raw; + + return cmd.cable_id.header.ret_status == 1; +} + +static union dp_cable_id intersect_cable_id( + union dp_cable_id *a, union dp_cable_id *b) +{ + union dp_cable_id out; + + out.bits.UHBR10_20_CAPABILITY = MIN(a->bits.UHBR10_20_CAPABILITY, + b->bits.UHBR10_20_CAPABILITY); + out.bits.UHBR13_5_CAPABILITY = MIN(a->bits.UHBR13_5_CAPABILITY, + b->bits.UHBR13_5_CAPABILITY); + out.bits.CABLE_TYPE = MAX(a->bits.CABLE_TYPE, b->bits.CABLE_TYPE); + + return out; +} + +static void retrieve_cable_id(struct dc_link *link) +{ + union dp_cable_id usbc_cable_id; + + link->dpcd_caps.cable_id.raw = 0; + core_link_read_dpcd(link, DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX, + &link->dpcd_caps.cable_id.raw, sizeof(uint8_t)); + + if (get_usbc_cable_id(link, &usbc_cable_id)) + link->dpcd_caps.cable_id = intersect_cable_id( + &link->dpcd_caps.cable_id, &usbc_cable_id); +} + static bool retrieve_link_cap(struct dc_link *link) { /* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16, @@ -5516,6 +5358,9 @@ static bool retrieve_link_cap(struct dc_link *link) read_dp_device_vendor_id(link); + /* TODO - decouple raw mst capability from policy decision */ + link->dpcd_caps.is_mst_capable = is_mst_supported(link); + get_active_converter_info(ds_port.byte, link); dp_wa_power_up_0010FA(link, dpcd_data, sizeof(dpcd_data)); @@ -5534,8 +5379,8 @@ static bool retrieve_link_cap(struct dc_link *link) link->reported_link_cap.lane_count = link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT; - link->reported_link_cap.link_rate = dpcd_data[ - DP_MAX_LINK_RATE - DP_DPCD_REV]; + link->reported_link_cap.link_rate = get_link_rate_from_max_link_bw( + dpcd_data[DP_MAX_LINK_RATE - DP_DPCD_REV]); link->reported_link_cap.link_spread = link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ? LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; @@ -5546,7 +5391,8 @@ static bool retrieve_link_cap(struct dc_link *link) edp_config_cap.bits.ALT_SCRAMBLER_RESET; link->dpcd_caps.dpcd_display_control_capable = edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE; - + link->dpcd_caps.channel_coding_cap.raw = + dpcd_data[DP_MAIN_LINK_CHANNEL_CODING - DP_DPCD_REV]; link->test_pattern_enabled = false; link->compliance_test_state.raw = 0; @@ -5632,7 +5478,6 @@ static bool retrieve_link_cap(struct dc_link *link) DP_DSC_SUPPORT, link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, sizeof(link->dpcd_caps.dsc_caps.dsc_basic_caps.raw)); -#if defined(CONFIG_DRM_AMD_DC_DCN) if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) { status = core_link_read_dpcd( link, @@ -5647,21 +5492,33 @@ static bool retrieve_link_cap(struct dc_link *link) DC_LOG_DSC("\tBRANCH_MAX_LINE_WIDTH 0x%02x", link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_MAX_LINE_WIDTH); } -#else - status = core_link_read_dpcd( - link, - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0, - link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, - sizeof(link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw)); + + /* Apply work around to disable FEC and DSC for USB4 tunneling in TBT3 compatibility mode + * only if required. + */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && +#if defined(CONFIG_DRM_AMD_DC_DCN) + !link->dc->debug.dpia_debug.bits.disable_force_tbt3_work_around && #endif + link->dpcd_caps.is_branch_dev && + link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && + link->dpcd_caps.branch_hw_revision == DP_BRANCH_HW_REV_10 && + (link->dpcd_caps.fec_cap.bits.FEC_CAPABLE || + link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT)) { + /* A TBT3 device is expected to report no support for FEC or DSC to a USB4 DPIA. + * Clear FEC and DSC capabilities as a work around if that is not the case. + */ + link->wa_flags.dpia_forced_tbt3_mode = true; + memset(&link->dpcd_caps.dsc_caps, '\0', sizeof(link->dpcd_caps.dsc_caps)); + memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap)); + DC_LOG_DSC("Clear DSC SUPPORT for USB4 link(%d) in TBT3 compatibility mode", link->link_index); + } else + link->wa_flags.dpia_forced_tbt3_mode = false; } if (!dpcd_read_sink_ext_caps(link)) link->dpcd_sink_ext_caps.raw = 0; -#if defined(CONFIG_DRM_AMD_DC_DCN) - link->dpcd_caps.channel_coding_cap.raw = dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_CAP - DP_DPCD_REV]; - if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) { DC_LOG_DP2("128b/132b encoding is supported at link %d", link->link_index); @@ -5706,7 +5563,9 @@ static bool retrieve_link_cap(struct dc_link *link) if (link->dpcd_caps.fec_cap1.bits.AGGREGATED_ERROR_COUNTERS_CAPABLE) DC_LOG_DP2("\tFEC aggregated error counters are supported"); } -#endif + + retrieve_cable_id(link); + dpcd_write_cable_id_to_dprx(link); /* Connectivity log: detection */ CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: "); @@ -5779,14 +5638,6 @@ bool dp_overwrite_extended_receiver_cap(struct dc_link *link) bool detect_dp_sink_caps(struct dc_link *link) { return retrieve_link_cap(link); - - /* dc init_hw has power encoder using default - * signal for connector. For native DP, no - * need to power up encoder again. If not native - * DP, hw_init may need check signal or power up - * encoder here. - */ - /* TODO save sink caps in link->sink */ } static enum dc_link_rate linkRateInKHzToLinkRateMultiplier(uint32_t link_rate_in_khz) @@ -5864,8 +5715,6 @@ void detect_edp_sink_caps(struct dc_link *link) } } } - link->verified_link_cap = link->reported_link_cap; - core_link_read_dpcd(link, DP_EDP_BACKLIGHT_ADJUSTMENT_CAP, &backlight_adj_cap, sizeof(backlight_adj_cap)); @@ -5873,6 +5722,34 @@ void detect_edp_sink_caps(struct dc_link *link) (backlight_adj_cap & DP_EDP_DYNAMIC_BACKLIGHT_CAP) ? true:false; dc_link_set_default_brightness_aux(link); + + core_link_read_dpcd(link, DP_EDP_DPCD_REV, + &link->dpcd_caps.edp_rev, + sizeof(link->dpcd_caps.edp_rev)); + /* + * PSR is only valid for eDP v1.3 or higher. + */ + if (link->dpcd_caps.edp_rev >= DP_EDP_13) { + core_link_read_dpcd(link, DP_PSR_SUPPORT, + &link->dpcd_caps.psr_info.psr_version, + sizeof(link->dpcd_caps.psr_info.psr_version)); + core_link_read_dpcd(link, DP_PSR_CAPS, + &link->dpcd_caps.psr_info.psr_dpcd_caps.raw, + sizeof(link->dpcd_caps.psr_info.psr_dpcd_caps.raw)); + if (link->dpcd_caps.psr_info.psr_dpcd_caps.bits.Y_COORDINATE_REQUIRED) { + core_link_read_dpcd(link, DP_PSR2_SU_Y_GRANULARITY, + &link->dpcd_caps.psr_info.psr2_su_y_granularity_cap, + sizeof(link->dpcd_caps.psr_info.psr2_su_y_granularity_cap)); + } + } + + /* + * ALPM is only valid for eDP v1.4 or higher. + */ + if (link->dpcd_caps.dpcd_rev.raw >= DP_EDP_14) + core_link_read_dpcd(link, DP_RECEIVER_ALPM_CAP, + &link->dpcd_caps.alpm_caps.raw, + sizeof(link->dpcd_caps.alpm_caps.raw)); } void dc_link_dp_enable_hpd(const struct dc_link *link) @@ -6031,7 +5908,7 @@ static void set_crtc_test_pattern(struct dc_link *link, else if (link->dc->hwss.set_disp_pattern_generator) { struct pipe_ctx *odm_pipe; int opp_cnt = 1; - int dpg_width = width; + int dpg_width; for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) opp_cnt++; @@ -6130,15 +6007,14 @@ bool dc_link_dp_set_test_pattern( if (link->dc->debug.apply_vendor_specific_lttpr_wa && (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && link->lttpr_mode == LTTPR_MODE_TRANSPARENT) { - dpcd_set_lane_settings(link, p_link_settings, DPRX); - vendor_specific_lttpr_wa_five( + dp_fixed_vs_pe_set_retimer_lane_settings( link, p_link_settings->dpcd_lane_settings, p_link_settings->link_settings.lane_count); } else { dp_set_hw_lane_settings(link, &pipe_ctx->link_res, p_link_settings, DPRX); - dpcd_set_lane_settings(link, p_link_settings, DPRX); } + dpcd_set_lane_settings(link, p_link_settings, DPRX); } /* Blank stream if running test pattern */ @@ -6188,7 +6064,6 @@ bool dc_link_dp_set_test_pattern( case DP_TEST_PATTERN_CP2520_3: pattern = PHY_TEST_PATTERN_CP2520_3; break; -#if defined(CONFIG_DRM_AMD_DC_DCN) case DP_TEST_PATTERN_128b_132b_TPS1: pattern = PHY_TEST_PATTERN_128b_132b_TPS1; break; @@ -6216,7 +6091,6 @@ bool dc_link_dp_set_test_pattern( case DP_TEST_PATTERN_SQUARE_PULSE: pattern = PHY_TEST_PATTERN_SQUARE_PULSE; break; -#endif default: return false; } @@ -6482,14 +6356,7 @@ enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource enum dc_status status = DC_OK; uint8_t fec_config = 0; - /* Access link encoder based on whether it is statically - * or dynamically assigned to a link. - */ - if (link->is_dig_mapping_flexible && - link->dc->res_pool->funcs->link_encs_assign) - link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); - else - link_enc = link->link_enc; + link_enc = link_enc_cfg_get_link_enc(link); ASSERT(link_enc); if (!dc_link_should_enable_fec(link)) @@ -6529,14 +6396,7 @@ void dp_set_fec_enable(struct dc_link *link, bool enable) { struct link_encoder *link_enc = NULL; - /* Access link encoder based on whether it is statically - * or dynamically assigned to a link. - */ - if (link->is_dig_mapping_flexible && - link->dc->res_pool->funcs->link_encs_assign) - link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); - else - link_enc = link->link_enc; + link_enc = link_enc_cfg_get_link_enc(link); ASSERT(link_enc); if (!dc_link_should_enable_fec(link)) @@ -6562,23 +6422,6 @@ void dp_set_fec_enable(struct dc_link *link, bool enable) } } -struct link_encoder *dp_get_link_enc(struct dc_link *link) -{ - struct link_encoder *link_enc; - - link_enc = link->link_enc; - if (link->is_dig_mapping_flexible && - link->dc->res_pool->funcs->link_encs_assign) { - link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, - link); - if (!link->link_enc) - link_enc = link_enc_cfg_get_next_avail_link_enc( - link->ctx->dc); - } - - return link_enc; -} - void dpcd_set_source_specific_data(struct dc_link *link) { if (!link->dc->vendor_signature.is_valid) { @@ -6655,6 +6498,20 @@ void dpcd_set_source_specific_data(struct dc_link *link) } } +void dpcd_write_cable_id_to_dprx(struct dc_link *link) +{ + if (!link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED || + link->dpcd_caps.cable_id.raw == 0 || + link->dprx_states.cable_id_written) + return; + + core_link_write_dpcd(link, DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX, + &link->dpcd_caps.cable_id.raw, + sizeof(link->dpcd_caps.cable_id.raw)); + + link->dprx_states.cable_id_written = 1; +} + bool dc_link_set_backlight_level_nits(struct dc_link *link, bool isHDR, uint32_t backlight_millinits, @@ -6819,15 +6676,12 @@ enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings if ((link_settings->link_rate >= LINK_RATE_LOW) && (link_settings->link_rate <= LINK_RATE_HIGH3)) return DP_8b_10b_ENCODING; -#if defined(CONFIG_DRM_AMD_DC_DCN) else if ((link_settings->link_rate >= LINK_RATE_UHBR10) && (link_settings->link_rate <= LINK_RATE_UHBR20)) return DP_128b_132b_ENCODING; -#endif return DP_UNKNOWN_ENCODING; } -#if defined(CONFIG_DRM_AMD_DC_DCN) enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link) { struct dc_link_settings link_settings = {0}; @@ -7044,7 +6898,6 @@ bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx) pipe_ctx->link_res.hpo_dp_link_enc && dc_is_dp_signal(pipe_ctx->stream->signal)); } -#endif void edp_panel_backlight_power_on(struct dc_link *link) { @@ -7056,3 +6909,621 @@ void edp_panel_backlight_power_on(struct dc_link *link) if (link->dc->hwss.edp_backlight_control) link->dc->hwss.edp_backlight_control(link, true); } + +void dc_link_clear_dprx_states(struct dc_link *link) +{ + memset(&link->dprx_states, 0, sizeof(link->dprx_states)); +} + +void dp_receiver_power_ctrl(struct dc_link *link, bool on) +{ + uint8_t state; + + state = on ? DP_POWER_STATE_D0 : DP_POWER_STATE_D3; + + if (link->sync_lt_in_progress) + return; + + core_link_write_dpcd(link, DP_SET_POWER, &state, + sizeof(state)); + +} + +void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode) +{ + if (link != NULL && link->dc->debug.enable_driver_sequence_debug) + core_link_write_dpcd(link, DP_SOURCE_SEQUENCE, + &dp_test_mode, sizeof(dp_test_mode)); +} + + +static uint8_t convert_to_count(uint8_t lttpr_repeater_count) +{ + switch (lttpr_repeater_count) { + case 0x80: // 1 lttpr repeater + return 1; + case 0x40: // 2 lttpr repeaters + return 2; + case 0x20: // 3 lttpr repeaters + return 3; + case 0x10: // 4 lttpr repeaters + return 4; + case 0x08: // 5 lttpr repeaters + return 5; + case 0x04: // 6 lttpr repeaters + return 6; + case 0x02: // 7 lttpr repeaters + return 7; + case 0x01: // 8 lttpr repeaters + return 8; + default: + break; + } + return 0; // invalid value +} + +static inline bool is_immediate_downstream(struct dc_link *link, uint32_t offset) +{ + return (convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == offset); +} + +void dp_enable_link_phy( + struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal, + enum clock_source_id clock_source, + const struct dc_link_settings *link_settings) +{ + struct dc *dc = link->ctx->dc; + struct dmcu *dmcu = dc->res_pool->dmcu; + struct pipe_ctx *pipes = + link->dc->current_state->res_ctx.pipe_ctx; + struct clock_source *dp_cs = + link->dc->res_pool->dp_clock_source; + const struct link_hwss *link_hwss = get_link_hwss(link, link_res); + unsigned int i; + + if (link->connector_signal == SIGNAL_TYPE_EDP) { + link->dc->hwss.edp_power_control(link, true); + link->dc->hwss.edp_wait_for_hpd_ready(link, true); + } + + /* If the current pixel clock source is not DTO(happens after + * switching from HDMI passive dongle to DP on the same connector), + * switch the pixel clock source to DTO. + */ + for (i = 0; i < MAX_PIPES; i++) { + if (pipes[i].stream != NULL && + pipes[i].stream->link == link) { + if (pipes[i].clock_source != NULL && + pipes[i].clock_source->id != CLOCK_SOURCE_ID_DP_DTO) { + pipes[i].clock_source = dp_cs; + pipes[i].stream_res.pix_clk_params.requested_pix_clk_100hz = + pipes[i].stream->timing.pix_clk_100hz; + pipes[i].clock_source->funcs->program_pix_clk( + pipes[i].clock_source, + &pipes[i].stream_res.pix_clk_params, + &pipes[i].pll_settings); + } + } + } + + link->cur_link_settings = *link_settings; + + if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) { + if (dc->clk_mgr->funcs->notify_link_rate_change) + dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link); + } + + if (dmcu != NULL && dmcu->funcs->lock_phy) + dmcu->funcs->lock_phy(dmcu); + + if (link_hwss->ext.enable_dp_link_output) + link_hwss->ext.enable_dp_link_output(link, link_res, signal, + clock_source, link_settings); + + if (dmcu != NULL && dmcu->funcs->unlock_phy) + dmcu->funcs->unlock_phy(dmcu); + + dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY); + dp_receiver_power_ctrl(link, true); +} + +void edp_add_delay_for_T9(struct dc_link *link) +{ + if (link->local_sink && + link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off > 0) + udelay(link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off * 1000); +} + +bool edp_receiver_ready_T9(struct dc_link *link) +{ + unsigned int tries = 0; + unsigned char sinkstatus = 0; + unsigned char edpRev = 0; + enum dc_status result = DC_OK; + + result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev)); + + /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/ + if (result == DC_OK && edpRev >= DP_EDP_12) { + do { + sinkstatus = 1; + result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus)); + if (sinkstatus == 0) + break; + if (result != DC_OK) + break; + udelay(100); //MAx T9 + } while (++tries < 50); + } + + return result; +} +bool edp_receiver_ready_T7(struct dc_link *link) +{ + unsigned char sinkstatus = 0; + unsigned char edpRev = 0; + enum dc_status result = DC_OK; + + /* use absolute time stamp to constrain max T7*/ + unsigned long long enter_timestamp = 0; + unsigned long long finish_timestamp = 0; + unsigned long long time_taken_in_ns = 0; + + result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev)); + + if (result == DC_OK && edpRev >= DP_EDP_12) { + /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/ + enter_timestamp = dm_get_timestamp(link->ctx); + do { + sinkstatus = 0; + result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus)); + if (sinkstatus == 1) + break; + if (result != DC_OK) + break; + udelay(25); + finish_timestamp = dm_get_timestamp(link->ctx); + time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp); + } while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms + } + + if (link->local_sink && + link->local_sink->edid_caps.panel_patch.extra_t7_ms > 0) + udelay(link->local_sink->edid_caps.panel_patch.extra_t7_ms * 1000); + + return result; +} + +void dp_disable_link_phy(struct dc_link *link, const struct link_resource *link_res, + enum signal_type signal) +{ + struct dc *dc = link->ctx->dc; + struct dmcu *dmcu = dc->res_pool->dmcu; + const struct link_hwss *link_hwss = get_link_hwss(link, link_res); + + if (!link->wa_flags.dp_keep_receiver_powered) + dp_receiver_power_ctrl(link, false); + + if (signal == SIGNAL_TYPE_EDP) { + if (link->dc->hwss.edp_backlight_control) + link->dc->hwss.edp_backlight_control(link, false); + if (link_hwss->ext.disable_dp_link_output) + link_hwss->ext.disable_dp_link_output(link, link_res, signal); + link->dc->hwss.edp_power_control(link, false); + } else { + if (dmcu != NULL && dmcu->funcs->lock_phy) + dmcu->funcs->lock_phy(dmcu); + if (link_hwss->ext.disable_dp_link_output) + link_hwss->ext.disable_dp_link_output(link, link_res, signal); + if (dmcu != NULL && dmcu->funcs->unlock_phy) + dmcu->funcs->unlock_phy(dmcu); + } + + dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); + + /* Clear current link setting.*/ + memset(&link->cur_link_settings, 0, + sizeof(link->cur_link_settings)); + + if (dc->clk_mgr->funcs->notify_link_rate_change) + dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link); +} + +void dp_disable_link_phy_mst(struct dc_link *link, const struct link_resource *link_res, + enum signal_type signal) +{ + /* MST disable link only when no stream use the link */ + if (link->mst_stream_alloc_table.stream_count > 0) + return; + + dp_disable_link_phy(link, link_res, signal); + + /* set the sink to SST mode after disabling the link */ + dp_enable_mst_on_sink(link, false); +} + +bool dp_set_hw_training_pattern( + struct dc_link *link, + const struct link_resource *link_res, + enum dc_dp_training_pattern pattern, + uint32_t offset) +{ + enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED; + + switch (pattern) { + case DP_TRAINING_PATTERN_SEQUENCE_1: + test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN1; + break; + case DP_TRAINING_PATTERN_SEQUENCE_2: + test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN2; + break; + case DP_TRAINING_PATTERN_SEQUENCE_3: + test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN3; + break; + case DP_TRAINING_PATTERN_SEQUENCE_4: + test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4; + break; + case DP_128b_132b_TPS1: + test_pattern = DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE; + break; + case DP_128b_132b_TPS2: + test_pattern = DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE; + break; + default: + break; + } + + dp_set_hw_test_pattern(link, link_res, test_pattern, NULL, 0); + + return true; +} + +void dp_set_hw_lane_settings( + struct dc_link *link, + const struct link_resource *link_res, + const struct link_training_settings *link_settings, + uint32_t offset) +{ + const struct link_hwss *link_hwss = get_link_hwss(link, link_res); + + if ((link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && !is_immediate_downstream(link, offset)) + return; + + if (link_hwss->ext.set_dp_lane_settings) + link_hwss->ext.set_dp_lane_settings(link, link_res, + &link_settings->link_settings, + link_settings->hw_lane_settings); + + memmove(link->cur_lane_setting, + link_settings->hw_lane_settings, + sizeof(link->cur_lane_setting)); +} + +void dp_set_hw_test_pattern( + struct dc_link *link, + const struct link_resource *link_res, + enum dp_test_pattern test_pattern, + uint8_t *custom_pattern, + uint32_t custom_pattern_size) +{ + const struct link_hwss *link_hwss = get_link_hwss(link, link_res); + struct encoder_set_dp_phy_pattern_param pattern_param = {0}; + + pattern_param.dp_phy_pattern = test_pattern; + pattern_param.custom_pattern = custom_pattern; + pattern_param.custom_pattern_size = custom_pattern_size; + pattern_param.dp_panel_mode = dp_get_panel_mode(link); + + if (link_hwss->ext.set_dp_link_test_pattern) + link_hwss->ext.set_dp_link_test_pattern(link, link_res, &pattern_param); +} + +void dp_retrain_link_dp_test(struct dc_link *link, + struct dc_link_settings *link_setting, + bool skip_video_pattern) +{ + struct pipe_ctx *pipes = + &link->dc->current_state->res_ctx.pipe_ctx[0]; + unsigned int i; + + + for (i = 0; i < MAX_PIPES; i++) { + if (pipes[i].stream != NULL && + !pipes[i].top_pipe && !pipes[i].prev_odm_pipe && + pipes[i].stream->link != NULL && + pipes[i].stream_res.stream_enc != NULL && + pipes[i].stream->link == link) { + udelay(100); + + pipes[i].stream_res.stream_enc->funcs->dp_blank(link, + pipes[i].stream_res.stream_enc); + + /* disable any test pattern that might be active */ + dp_set_hw_test_pattern(link, &pipes[i].link_res, + DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); + + dp_receiver_power_ctrl(link, false); + + link->dc->hwss.disable_stream(&pipes[i]); + if ((&pipes[i])->stream_res.audio && !link->dc->debug.az_endpoint_mute_only) + (&pipes[i])->stream_res.audio->funcs->az_disable((&pipes[i])->stream_res.audio); + + if (link->link_enc) + link->link_enc->funcs->disable_output( + link->link_enc, + SIGNAL_TYPE_DISPLAY_PORT); + + /* Clear current link setting. */ + memset(&link->cur_link_settings, 0, + sizeof(link->cur_link_settings)); + + perform_link_training_with_retries( + link_setting, + skip_video_pattern, + LINK_TRAINING_ATTEMPTS, + &pipes[i], + SIGNAL_TYPE_DISPLAY_PORT, + false); + + link->dc->hwss.enable_stream(&pipes[i]); + + link->dc->hwss.unblank_stream(&pipes[i], + link_setting); + + if (pipes[i].stream_res.audio) { + /* notify audio driver for + * audio modes of monitor */ + pipes[i].stream_res.audio->funcs->az_enable( + pipes[i].stream_res.audio); + + /* un-mute audio */ + /* TODO: audio should be per stream rather than + * per link */ + pipes[i].stream_res.stream_enc->funcs-> + audio_mute_control( + pipes[i].stream_res.stream_enc, false); + } + } + } +} + +#undef DC_LOGGER +#define DC_LOGGER \ + dsc->ctx->logger +static void dsc_optc_config_log(struct display_stream_compressor *dsc, + struct dsc_optc_config *config) +{ + uint32_t precision = 1 << 28; + uint32_t bytes_per_pixel_int = config->bytes_per_pixel / precision; + uint32_t bytes_per_pixel_mod = config->bytes_per_pixel % precision; + uint64_t ll_bytes_per_pix_fraq = bytes_per_pixel_mod; + + /* 7 fractional digits decimal precision for bytes per pixel is enough because DSC + * bits per pixel precision is 1/16th of a pixel, which means bytes per pixel precision is + * 1/16/8 = 1/128 of a byte, or 0.0078125 decimal + */ + ll_bytes_per_pix_fraq *= 10000000; + ll_bytes_per_pix_fraq /= precision; + + DC_LOG_DSC("\tbytes_per_pixel 0x%08x (%d.%07d)", + config->bytes_per_pixel, bytes_per_pixel_int, (uint32_t)ll_bytes_per_pix_fraq); + DC_LOG_DSC("\tis_pixel_format_444 %d", config->is_pixel_format_444); + DC_LOG_DSC("\tslice_width %d", config->slice_width); +} + +bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable) +{ + struct dc *dc = pipe_ctx->stream->ctx->dc; + struct dc_stream_state *stream = pipe_ctx->stream; + bool result = false; + + if (dc_is_virtual_signal(stream->signal) || IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) + result = true; + else + result = dm_helpers_dp_write_dsc_enable(dc->ctx, stream, enable); + return result; +} + +/* The stream with these settings can be sent (unblanked) only after DSC was enabled on RX first, + * i.e. after dp_enable_dsc_on_rx() had been called + */ +void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) +{ + struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; + struct dc *dc = pipe_ctx->stream->ctx->dc; + struct dc_stream_state *stream = pipe_ctx->stream; + struct pipe_ctx *odm_pipe; + int opp_cnt = 1; + + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) + opp_cnt++; + + if (enable) { + struct dsc_config dsc_cfg; + struct dsc_optc_config dsc_optc_cfg; + enum optc_dsc_mode optc_dsc_mode; + + /* Enable DSC hw block */ + dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; + dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; + dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; + dsc_cfg.color_depth = stream->timing.display_color_depth; + dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; + dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; + ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0); + dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; + + dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg); + dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst); + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { + struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc; + + odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg); + odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst); + } + dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt; + dsc_cfg.pic_width *= opp_cnt; + + optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED; + + /* Enable DSC in encoder */ + if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) + && !is_dp_128b_132b_signal(pipe_ctx)) { + DC_LOG_DSC("Setting stream encoder DSC config for engine %d:", (int)pipe_ctx->stream_res.stream_enc->id); + dsc_optc_config_log(dsc, &dsc_optc_cfg); + pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc, + optc_dsc_mode, + dsc_optc_cfg.bytes_per_pixel, + dsc_optc_cfg.slice_width); + + /* PPS SDP is set elsewhere because it has to be done after DIG FE is connected to DIG BE */ + } + + /* Enable DSC in OPTC */ + DC_LOG_DSC("Setting optc DSC config for tg instance %d:", pipe_ctx->stream_res.tg->inst); + dsc_optc_config_log(dsc, &dsc_optc_cfg); + pipe_ctx->stream_res.tg->funcs->set_dsc_config(pipe_ctx->stream_res.tg, + optc_dsc_mode, + dsc_optc_cfg.bytes_per_pixel, + dsc_optc_cfg.slice_width); + } else { + /* disable DSC in OPTC */ + pipe_ctx->stream_res.tg->funcs->set_dsc_config( + pipe_ctx->stream_res.tg, + OPTC_DSC_DISABLED, 0, 0); + + /* disable DSC in stream encoder */ + if (dc_is_dp_signal(stream->signal)) { + if (is_dp_128b_132b_signal(pipe_ctx)) + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet( + pipe_ctx->stream_res.hpo_dp_stream_enc, + false, + NULL, + true); + else if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { + pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config( + pipe_ctx->stream_res.stream_enc, + OPTC_DSC_DISABLED, 0, 0); + pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( + pipe_ctx->stream_res.stream_enc, false, NULL, true); + } + } + + /* disable DSC block */ + pipe_ctx->stream_res.dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc); + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) + odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc); + } +} + +bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable) +{ + struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; + bool result = false; + + if (!pipe_ctx->stream->timing.flags.DSC) + goto out; + if (!dsc) + goto out; + + if (enable) { + { + dp_set_dsc_on_stream(pipe_ctx, true); + result = true; + } + } else { + dp_set_dsc_on_rx(pipe_ctx, false); + dp_set_dsc_on_stream(pipe_ctx, false); + result = true; + } +out: + return result; +} + +/* + * For dynamic bpp change case, dsc is programmed with MASTER_UPDATE_LOCK enabled; + * hence PPS info packet update need to use frame update instead of immediate update. + * Added parameter immediate_update for this purpose. + * The decision to use frame update is hard-coded in function dp_update_dsc_config(), + * which is the only place where a "false" would be passed in for param immediate_update. + * + * immediate_update is only applicable when DSC is enabled. + */ +bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_update) +{ + struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; + struct dc_stream_state *stream = pipe_ctx->stream; + + if (!pipe_ctx->stream->timing.flags.DSC || !dsc) + return false; + + if (enable) { + struct dsc_config dsc_cfg; + uint8_t dsc_packed_pps[128]; + + memset(&dsc_cfg, 0, sizeof(dsc_cfg)); + memset(dsc_packed_pps, 0, 128); + + /* Enable DSC hw block */ + dsc_cfg.pic_width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; + dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; + dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; + dsc_cfg.color_depth = stream->timing.display_color_depth; + dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; + dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; + + DC_LOG_DSC(" "); + dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]); + if (dc_is_dp_signal(stream->signal)) { + DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id); + if (is_dp_128b_132b_signal(pipe_ctx)) + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet( + pipe_ctx->stream_res.hpo_dp_stream_enc, + true, + &dsc_packed_pps[0], + immediate_update); + else + pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( + pipe_ctx->stream_res.stream_enc, + true, + &dsc_packed_pps[0], + immediate_update); + } + } else { + /* disable DSC PPS in stream encoder */ + if (dc_is_dp_signal(stream->signal)) { + if (is_dp_128b_132b_signal(pipe_ctx)) + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet( + pipe_ctx->stream_res.hpo_dp_stream_enc, + false, + NULL, + true); + else + pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( + pipe_ctx->stream_res.stream_enc, false, NULL, true); + } + } + + return true; +} + + +bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx) +{ + struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; + + if (!pipe_ctx->stream->timing.flags.DSC) + return false; + if (!dsc) + return false; + + dp_set_dsc_on_stream(pipe_ctx, true); + dp_set_dsc_pps_sdp(pipe_ctx, true, false); + return true; +} + +#undef DC_LOGGER +#define DC_LOGGER \ + link->ctx->logger diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c index 7f25c11f4248..48a18766f002 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c @@ -27,7 +27,7 @@ #include <dc_link.h> #include <inc/link_hwss.h> #include <inc/link_dpcd.h> -#include "drm/drm_dp_helper.h" +#include <drm/dp/drm_dp_helper.h> #include <dc_dp_types.h> #include "dm_helpers.h" diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c index a55944da8d53..42da7f430113 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c @@ -122,6 +122,7 @@ static void remove_link_enc_assignment( stream->link_enc = NULL; state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].eng_id = ENGINE_ID_UNKNOWN; state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream = NULL; + dc_stream_release(stream); break; } } @@ -271,6 +272,13 @@ void link_enc_cfg_init( state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY; } +void link_enc_cfg_copy(const struct dc_state *src_ctx, struct dc_state *dst_ctx) +{ + memcpy(&dst_ctx->res_ctx.link_enc_cfg_ctx, + &src_ctx->res_ctx.link_enc_cfg_ctx, + sizeof(dst_ctx->res_ctx.link_enc_cfg_ctx)); +} + void link_enc_cfg_link_encs_assign( struct dc *dc, struct dc_state *state, @@ -486,7 +494,8 @@ struct link_encoder *link_enc_cfg_get_next_avail_link_enc(struct dc *dc) } for (i = 0; i < dc->res_pool->res_cap->num_dig_link_enc; i++) { - if (encs_assigned[i] == ENGINE_ID_UNKNOWN) { + if (encs_assigned[i] == ENGINE_ID_UNKNOWN && + dc->res_pool->link_encoders[i] != NULL) { link_enc = dc->res_pool->link_encoders[i]; break; } @@ -506,6 +515,26 @@ struct link_encoder *link_enc_cfg_get_link_enc_used_by_stream( return link_enc; } +struct link_encoder *link_enc_cfg_get_link_enc( + const struct dc_link *link) +{ + struct link_encoder *link_enc = NULL; + + /* Links supporting dynamically assigned link encoder will be assigned next + * available encoder if one not already assigned. + */ + if (link->is_dig_mapping_flexible && + link->dc->res_pool->funcs->link_encs_assign) { + link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); + if (link_enc == NULL) + link_enc = link_enc_cfg_get_next_avail_link_enc( + link->ctx->dc); + } else + link_enc = link->link_enc; + + return link_enc; +} + bool link_enc_cfg_is_link_enc_avail(struct dc *dc, enum engine_id eng_id, struct dc_link *link) { bool is_avail = true; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c deleted file mode 100644 index 45d03d3a95c3..000000000000 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ /dev/null @@ -1,917 +0,0 @@ -/* Copyright 2015 Advanced Micro Devices, Inc. */ - - -#include "dm_services.h" -#include "dc.h" -#include "inc/core_types.h" -#include "include/ddc_service_types.h" -#include "include/i2caux_interface.h" -#include "link_hwss.h" -#include "hw_sequencer.h" -#include "dc_link_dp.h" -#include "dc_link_ddc.h" -#include "dm_helpers.h" -#include "dpcd_defs.h" -#include "dsc.h" -#include "resource.h" -#include "link_enc_cfg.h" -#include "clk_mgr.h" -#include "inc/link_dpcd.h" -#include "dccg.h" - -static uint8_t convert_to_count(uint8_t lttpr_repeater_count) -{ - switch (lttpr_repeater_count) { - case 0x80: // 1 lttpr repeater - return 1; - case 0x40: // 2 lttpr repeaters - return 2; - case 0x20: // 3 lttpr repeaters - return 3; - case 0x10: // 4 lttpr repeaters - return 4; - case 0x08: // 5 lttpr repeaters - return 5; - case 0x04: // 6 lttpr repeaters - return 6; - case 0x02: // 7 lttpr repeaters - return 7; - case 0x01: // 8 lttpr repeaters - return 8; - default: - break; - } - return 0; // invalid value -} - -static inline bool is_immediate_downstream(struct dc_link *link, uint32_t offset) -{ - return (convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == offset); -} - -void dp_receiver_power_ctrl(struct dc_link *link, bool on) -{ - uint8_t state; - - state = on ? DP_POWER_STATE_D0 : DP_POWER_STATE_D3; - - if (link->sync_lt_in_progress) - return; - - core_link_write_dpcd(link, DP_SET_POWER, &state, - sizeof(state)); -} - -void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode) -{ - if (link != NULL && link->dc->debug.enable_driver_sequence_debug) - core_link_write_dpcd(link, DP_SOURCE_SEQUENCE, - &dp_test_mode, sizeof(dp_test_mode)); -} - -void dp_enable_link_phy( - struct dc_link *link, - const struct link_resource *link_res, - enum signal_type signal, - enum clock_source_id clock_source, - const struct dc_link_settings *link_settings) -{ - struct link_encoder *link_enc; - struct dc *dc = link->ctx->dc; - struct dmcu *dmcu = dc->res_pool->dmcu; - - struct pipe_ctx *pipes = - link->dc->current_state->res_ctx.pipe_ctx; - struct clock_source *dp_cs = - link->dc->res_pool->dp_clock_source; - unsigned int i; - - /* Link should always be assigned encoder when en-/disabling. */ - if (link->is_dig_mapping_flexible && dc->res_pool->funcs->link_encs_assign) - link_enc = link_enc_cfg_get_link_enc_used_by_link(dc, link); - else - link_enc = link->link_enc; - ASSERT(link_enc); - - if (link->connector_signal == SIGNAL_TYPE_EDP) { - link->dc->hwss.edp_power_control(link, true); - link->dc->hwss.edp_wait_for_hpd_ready(link, true); - } - - /* If the current pixel clock source is not DTO(happens after - * switching from HDMI passive dongle to DP on the same connector), - * switch the pixel clock source to DTO. - */ - for (i = 0; i < MAX_PIPES; i++) { - if (pipes[i].stream != NULL && - pipes[i].stream->link == link) { - if (pipes[i].clock_source != NULL && - pipes[i].clock_source->id != CLOCK_SOURCE_ID_DP_DTO) { - pipes[i].clock_source = dp_cs; - pipes[i].stream_res.pix_clk_params.requested_pix_clk_100hz = - pipes[i].stream->timing.pix_clk_100hz; - pipes[i].clock_source->funcs->program_pix_clk( - pipes[i].clock_source, - &pipes[i].stream_res.pix_clk_params, - &pipes[i].pll_settings); - } - } - } - - link->cur_link_settings = *link_settings; - -#if defined(CONFIG_DRM_AMD_DC_DCN) - if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) { - /* TODO - DP2.0 HW: notify link rate change here */ - } else if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) { - if (dc->clk_mgr->funcs->notify_link_rate_change) - dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link); - } -#else - if (dc->clk_mgr->funcs->notify_link_rate_change) - dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link); -#endif - if (dmcu != NULL && dmcu->funcs->lock_phy) - dmcu->funcs->lock_phy(dmcu); - -#if defined(CONFIG_DRM_AMD_DC_DCN) - if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) { - enable_dp_hpo_output(link, link_res, link_settings); - } else if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) { - if (dc_is_dp_sst_signal(signal)) { - link_enc->funcs->enable_dp_output( - link_enc, - link_settings, - clock_source); - } else { - link_enc->funcs->enable_dp_mst_output( - link_enc, - link_settings, - clock_source); - } - } -#else - if (dc_is_dp_sst_signal(signal)) { - link_enc->funcs->enable_dp_output( - link_enc, - link_settings, - clock_source); - } else { - link_enc->funcs->enable_dp_mst_output( - link_enc, - link_settings, - clock_source); - } -#endif - if (dmcu != NULL && dmcu->funcs->unlock_phy) - dmcu->funcs->unlock_phy(dmcu); - - dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY); - dp_receiver_power_ctrl(link, true); -} - -void edp_add_delay_for_T9(struct dc_link *link) -{ - if (link->local_sink && - link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off > 0) - udelay(link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off * 1000); -} - -bool edp_receiver_ready_T9(struct dc_link *link) -{ - unsigned int tries = 0; - unsigned char sinkstatus = 0; - unsigned char edpRev = 0; - enum dc_status result; - - result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev)); - - /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/ - if (result == DC_OK && edpRev >= DP_EDP_12) { - do { - sinkstatus = 1; - result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus)); - if (sinkstatus == 0) - break; - if (result != DC_OK) - break; - udelay(100); //MAx T9 - } while (++tries < 50); - } - - return result; -} -bool edp_receiver_ready_T7(struct dc_link *link) -{ - unsigned char sinkstatus = 0; - unsigned char edpRev = 0; - enum dc_status result; - - /* use absolute time stamp to constrain max T7*/ - unsigned long long enter_timestamp = 0; - unsigned long long finish_timestamp = 0; - unsigned long long time_taken_in_ns = 0; - - result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev)); - - if (result == DC_OK && edpRev >= DP_EDP_12) { - /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/ - enter_timestamp = dm_get_timestamp(link->ctx); - do { - sinkstatus = 0; - result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus)); - if (sinkstatus == 1) - break; - if (result != DC_OK) - break; - udelay(25); - finish_timestamp = dm_get_timestamp(link->ctx); - time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp); - } while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms - } - - if (link->local_sink && - link->local_sink->edid_caps.panel_patch.extra_t7_ms > 0) - udelay(link->local_sink->edid_caps.panel_patch.extra_t7_ms * 1000); - - return result; -} - -void dp_disable_link_phy(struct dc_link *link, const struct link_resource *link_res, - enum signal_type signal) -{ - struct dc *dc = link->ctx->dc; - struct dmcu *dmcu = dc->res_pool->dmcu; -#if defined(CONFIG_DRM_AMD_DC_DCN) - struct hpo_dp_link_encoder *hpo_link_enc = link_res->hpo_dp_link_enc; -#endif - struct link_encoder *link_enc; - - /* Link should always be assigned encoder when en-/disabling. */ - if (link->is_dig_mapping_flexible && dc->res_pool->funcs->link_encs_assign) - link_enc = link_enc_cfg_get_link_enc_used_by_link(dc, link); - else - link_enc = link->link_enc; - ASSERT(link_enc); - - if (!link->wa_flags.dp_keep_receiver_powered) - dp_receiver_power_ctrl(link, false); - - if (signal == SIGNAL_TYPE_EDP) { - if (link->dc->hwss.edp_backlight_control) - link->dc->hwss.edp_backlight_control(link, false); -#if defined(CONFIG_DRM_AMD_DC_DCN) - if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING) - disable_dp_hpo_output(link, link_res, signal); - else - link_enc->funcs->disable_output(link_enc, signal); -#else - link_enc->funcs->disable_output(link_enc, signal); -#endif - link->dc->hwss.edp_power_control(link, false); - } else { - if (dmcu != NULL && dmcu->funcs->lock_phy) - dmcu->funcs->lock_phy(dmcu); - -#if defined(CONFIG_DRM_AMD_DC_DCN) - if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING && - hpo_link_enc) - disable_dp_hpo_output(link, link_res, signal); - else - link_enc->funcs->disable_output(link_enc, signal); -#else - link_enc->funcs->disable_output(link_enc, signal); -#endif - if (dmcu != NULL && dmcu->funcs->unlock_phy) - dmcu->funcs->unlock_phy(dmcu); - } - - dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); - - /* Clear current link setting.*/ - memset(&link->cur_link_settings, 0, - sizeof(link->cur_link_settings)); - - if (dc->clk_mgr->funcs->notify_link_rate_change) - dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link); -} - -void dp_disable_link_phy_mst(struct dc_link *link, const struct link_resource *link_res, - enum signal_type signal) -{ - /* MST disable link only when no stream use the link */ - if (link->mst_stream_alloc_table.stream_count > 0) - return; - - dp_disable_link_phy(link, link_res, signal); - - /* set the sink to SST mode after disabling the link */ - dp_enable_mst_on_sink(link, false); -} - -bool dp_set_hw_training_pattern( - struct dc_link *link, - const struct link_resource *link_res, - enum dc_dp_training_pattern pattern, - uint32_t offset) -{ - enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED; - - switch (pattern) { - case DP_TRAINING_PATTERN_SEQUENCE_1: - test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN1; - break; - case DP_TRAINING_PATTERN_SEQUENCE_2: - test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN2; - break; - case DP_TRAINING_PATTERN_SEQUENCE_3: - test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN3; - break; - case DP_TRAINING_PATTERN_SEQUENCE_4: - test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4; - break; -#if defined(CONFIG_DRM_AMD_DC_DCN) - case DP_128b_132b_TPS1: - test_pattern = DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE; - break; - case DP_128b_132b_TPS2: - test_pattern = DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE; - break; -#endif - default: - break; - } - - dp_set_hw_test_pattern(link, link_res, test_pattern, NULL, 0); - - return true; -} - -#if defined(CONFIG_DRM_AMD_DC_DCN) -#define DC_LOGGER \ - link->ctx->logger -#endif -void dp_set_hw_lane_settings( - struct dc_link *link, - const struct link_resource *link_res, - const struct link_training_settings *link_settings, - uint32_t offset) -{ - struct link_encoder *encoder = link->link_enc; - - if ((link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && !is_immediate_downstream(link, offset)) - return; - - /* call Encoder to set lane settings */ -#if defined(CONFIG_DRM_AMD_DC_DCN) - if (dp_get_link_encoding_format(&link_settings->link_settings) == - DP_128b_132b_ENCODING) { - link_res->hpo_dp_link_enc->funcs->set_ffe( - link_res->hpo_dp_link_enc, - &link_settings->link_settings, - link_settings->lane_settings[0].FFE_PRESET.raw); - } else if (dp_get_link_encoding_format(&link_settings->link_settings) - == DP_8b_10b_ENCODING) { - encoder->funcs->dp_set_lane_settings(encoder, link_settings); - } -#else - encoder->funcs->dp_set_lane_settings(encoder, link_settings); -#endif - memmove(link->cur_lane_setting, - link_settings->lane_settings, - sizeof(link->cur_lane_setting)); -} - -void dp_set_hw_test_pattern( - struct dc_link *link, - const struct link_resource *link_res, - enum dp_test_pattern test_pattern, - uint8_t *custom_pattern, - uint32_t custom_pattern_size) -{ - struct encoder_set_dp_phy_pattern_param pattern_param = {0}; - struct link_encoder *encoder; -#if defined(CONFIG_DRM_AMD_DC_DCN) - enum dp_link_encoding link_encoding_format = dp_get_link_encoding_format(&link->cur_link_settings); -#endif - - /* Access link encoder based on whether it is statically - * or dynamically assigned to a link. - */ - if (link->is_dig_mapping_flexible && - link->dc->res_pool->funcs->link_encs_assign) - encoder = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); - else - encoder = link->link_enc; - - pattern_param.dp_phy_pattern = test_pattern; - pattern_param.custom_pattern = custom_pattern; - pattern_param.custom_pattern_size = custom_pattern_size; - pattern_param.dp_panel_mode = dp_get_panel_mode(link); - -#if defined(CONFIG_DRM_AMD_DC_DCN) - switch (link_encoding_format) { - case DP_128b_132b_ENCODING: - link_res->hpo_dp_link_enc->funcs->set_link_test_pattern( - link_res->hpo_dp_link_enc, &pattern_param); - break; - case DP_8b_10b_ENCODING: - ASSERT(encoder); - encoder->funcs->dp_set_phy_pattern(encoder, &pattern_param); - break; - default: - DC_LOG_ERROR("%s: Unknown link encoding format.", __func__); - break; - } -#else - encoder->funcs->dp_set_phy_pattern(encoder, &pattern_param); -#endif - dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN); -} -#if defined(CONFIG_DRM_AMD_DC_DCN) -#undef DC_LOGGER -#endif - -void dp_retrain_link_dp_test(struct dc_link *link, - struct dc_link_settings *link_setting, - bool skip_video_pattern) -{ - struct pipe_ctx *pipes = - &link->dc->current_state->res_ctx.pipe_ctx[0]; - unsigned int i; - - for (i = 0; i < MAX_PIPES; i++) { - if (pipes[i].stream != NULL && - !pipes[i].top_pipe && !pipes[i].prev_odm_pipe && - pipes[i].stream->link != NULL && - pipes[i].stream_res.stream_enc != NULL && - pipes[i].stream->link == link) { - udelay(100); - - pipes[i].stream_res.stream_enc->funcs->dp_blank(link, - pipes[i].stream_res.stream_enc); - - /* disable any test pattern that might be active */ - dp_set_hw_test_pattern(link, &pipes[i].link_res, - DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); - - dp_receiver_power_ctrl(link, false); - - link->dc->hwss.disable_stream(&pipes[i]); - if ((&pipes[i])->stream_res.audio && !link->dc->debug.az_endpoint_mute_only) - (&pipes[i])->stream_res.audio->funcs->az_disable((&pipes[i])->stream_res.audio); - - if (link->link_enc) - link->link_enc->funcs->disable_output( - link->link_enc, - SIGNAL_TYPE_DISPLAY_PORT); - - /* Clear current link setting. */ - memset(&link->cur_link_settings, 0, - sizeof(link->cur_link_settings)); - - perform_link_training_with_retries( - link_setting, - skip_video_pattern, - LINK_TRAINING_ATTEMPTS, - &pipes[i], - SIGNAL_TYPE_DISPLAY_PORT, - false); - - link->dc->hwss.enable_stream(&pipes[i]); - - link->dc->hwss.unblank_stream(&pipes[i], - link_setting); - - if (pipes[i].stream_res.audio) { - /* notify audio driver for - * audio modes of monitor */ - pipes[i].stream_res.audio->funcs->az_enable( - pipes[i].stream_res.audio); - - /* un-mute audio */ - /* TODO: audio should be per stream rather than - * per link */ - pipes[i].stream_res.stream_enc->funcs-> - audio_mute_control( - pipes[i].stream_res.stream_enc, false); - } - } - } -} - -#define DC_LOGGER \ - dsc->ctx->logger -static void dsc_optc_config_log(struct display_stream_compressor *dsc, - struct dsc_optc_config *config) -{ - uint32_t precision = 1 << 28; - uint32_t bytes_per_pixel_int = config->bytes_per_pixel / precision; - uint32_t bytes_per_pixel_mod = config->bytes_per_pixel % precision; - uint64_t ll_bytes_per_pix_fraq = bytes_per_pixel_mod; - - /* 7 fractional digits decimal precision for bytes per pixel is enough because DSC - * bits per pixel precision is 1/16th of a pixel, which means bytes per pixel precision is - * 1/16/8 = 1/128 of a byte, or 0.0078125 decimal - */ - ll_bytes_per_pix_fraq *= 10000000; - ll_bytes_per_pix_fraq /= precision; - - DC_LOG_DSC("\tbytes_per_pixel 0x%08x (%d.%07d)", - config->bytes_per_pixel, bytes_per_pixel_int, (uint32_t)ll_bytes_per_pix_fraq); - DC_LOG_DSC("\tis_pixel_format_444 %d", config->is_pixel_format_444); - DC_LOG_DSC("\tslice_width %d", config->slice_width); -} - -bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable) -{ - struct dc *dc = pipe_ctx->stream->ctx->dc; - struct dc_stream_state *stream = pipe_ctx->stream; - bool result = false; - - if (dc_is_virtual_signal(stream->signal) || IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) - result = true; - else - result = dm_helpers_dp_write_dsc_enable(dc->ctx, stream, enable); - return result; -} - -/* The stream with these settings can be sent (unblanked) only after DSC was enabled on RX first, - * i.e. after dp_enable_dsc_on_rx() had been called - */ -void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) -{ - struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; - struct dc *dc = pipe_ctx->stream->ctx->dc; - struct dc_stream_state *stream = pipe_ctx->stream; - struct pipe_ctx *odm_pipe; - int opp_cnt = 1; - - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) - opp_cnt++; - - if (enable) { - struct dsc_config dsc_cfg; - struct dsc_optc_config dsc_optc_cfg; - enum optc_dsc_mode optc_dsc_mode; - - /* Enable DSC hw block */ - dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; - dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; - dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; - dsc_cfg.color_depth = stream->timing.display_color_depth; - dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; - dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; - ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0); - dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; - - dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg); - dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst); - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { - struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc; - - odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg); - odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst); - } - dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt; - dsc_cfg.pic_width *= opp_cnt; - - optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED; - - /* Enable DSC in encoder */ -#if defined(CONFIG_DRM_AMD_DC_DCN) - if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) - && !is_dp_128b_132b_signal(pipe_ctx)) { -#else - if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { -#endif - DC_LOG_DSC("Setting stream encoder DSC config for engine %d:", (int)pipe_ctx->stream_res.stream_enc->id); - dsc_optc_config_log(dsc, &dsc_optc_cfg); - pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc, - optc_dsc_mode, - dsc_optc_cfg.bytes_per_pixel, - dsc_optc_cfg.slice_width); - - /* PPS SDP is set elsewhere because it has to be done after DIG FE is connected to DIG BE */ - } - - /* Enable DSC in OPTC */ - DC_LOG_DSC("Setting optc DSC config for tg instance %d:", pipe_ctx->stream_res.tg->inst); - dsc_optc_config_log(dsc, &dsc_optc_cfg); - pipe_ctx->stream_res.tg->funcs->set_dsc_config(pipe_ctx->stream_res.tg, - optc_dsc_mode, - dsc_optc_cfg.bytes_per_pixel, - dsc_optc_cfg.slice_width); - } else { - /* disable DSC in OPTC */ - pipe_ctx->stream_res.tg->funcs->set_dsc_config( - pipe_ctx->stream_res.tg, - OPTC_DSC_DISABLED, 0, 0); - - /* disable DSC in stream encoder */ - if (dc_is_dp_signal(stream->signal)) { - -#if defined(CONFIG_DRM_AMD_DC_DCN) - if (is_dp_128b_132b_signal(pipe_ctx)) - pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet( - pipe_ctx->stream_res.hpo_dp_stream_enc, - false, - NULL, - true); - else -#endif - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config( - pipe_ctx->stream_res.stream_enc, - OPTC_DSC_DISABLED, 0, 0); - pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( - pipe_ctx->stream_res.stream_enc, false, NULL, true); - } - } - - /* disable DSC block */ - pipe_ctx->stream_res.dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc); - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) - odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc); - } -} - -bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable) -{ - struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; - bool result = false; - - if (!pipe_ctx->stream->timing.flags.DSC) - goto out; - if (!dsc) - goto out; - - if (enable) { - { - dp_set_dsc_on_stream(pipe_ctx, true); - result = true; - } - } else { - dp_set_dsc_on_rx(pipe_ctx, false); - dp_set_dsc_on_stream(pipe_ctx, false); - result = true; - } -out: - return result; -} - -/* - * For dynamic bpp change case, dsc is programmed with MASTER_UPDATE_LOCK enabled; - * hence PPS info packet update need to use frame update instead of immediate update. - * Added parameter immediate_update for this purpose. - * The decision to use frame update is hard-coded in function dp_update_dsc_config(), - * which is the only place where a "false" would be passed in for param immediate_update. - * - * immediate_update is only applicable when DSC is enabled. - */ -bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_update) -{ - struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; - struct dc_stream_state *stream = pipe_ctx->stream; - - if (!pipe_ctx->stream->timing.flags.DSC || !dsc) - return false; - - if (enable) { - struct dsc_config dsc_cfg; - uint8_t dsc_packed_pps[128]; - - memset(&dsc_cfg, 0, sizeof(dsc_cfg)); - memset(dsc_packed_pps, 0, 128); - - /* Enable DSC hw block */ - dsc_cfg.pic_width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; - dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; - dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; - dsc_cfg.color_depth = stream->timing.display_color_depth; - dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; - dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; - - DC_LOG_DSC(" "); - dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]); - if (dc_is_dp_signal(stream->signal)) { - DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id); -#if defined(CONFIG_DRM_AMD_DC_DCN) - if (is_dp_128b_132b_signal(pipe_ctx)) - pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet( - pipe_ctx->stream_res.hpo_dp_stream_enc, - true, - &dsc_packed_pps[0], - immediate_update); - else -#endif - pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( - pipe_ctx->stream_res.stream_enc, - true, - &dsc_packed_pps[0], - immediate_update); - } - } else { - /* disable DSC PPS in stream encoder */ - if (dc_is_dp_signal(stream->signal)) { -#if defined(CONFIG_DRM_AMD_DC_DCN) - if (is_dp_128b_132b_signal(pipe_ctx)) - pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet( - pipe_ctx->stream_res.hpo_dp_stream_enc, - false, - NULL, - true); - else -#endif - pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( - pipe_ctx->stream_res.stream_enc, false, NULL, true); - } - } - - return true; -} - - -bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx) -{ - struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; - - if (!pipe_ctx->stream->timing.flags.DSC) - return false; - if (!dsc) - return false; - - dp_set_dsc_on_stream(pipe_ctx, true); - dp_set_dsc_pps_sdp(pipe_ctx, true, false); - return true; -} - -#if defined(CONFIG_DRM_AMD_DC_DCN) -#undef DC_LOGGER -#define DC_LOGGER \ - link->ctx->logger - -static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link) -{ - switch (link->link_enc->transmitter) { - case TRANSMITTER_UNIPHY_A: - return PHYD32CLKA; - case TRANSMITTER_UNIPHY_B: - return PHYD32CLKB; - case TRANSMITTER_UNIPHY_C: - return PHYD32CLKC; - case TRANSMITTER_UNIPHY_D: - return PHYD32CLKD; - case TRANSMITTER_UNIPHY_E: - return PHYD32CLKE; - default: - return PHYD32CLKA; - } -} - -void enable_dp_hpo_output(struct dc_link *link, - const struct link_resource *link_res, - const struct dc_link_settings *link_settings) -{ - const struct dc *dc = link->dc; - enum phyd32clk_clock_source phyd32clk; - - /* Enable PHY PLL at target bit rate - * UHBR10 = 10Gbps (SYMCLK32 = 312.5MHz) - * UBR13.5 = 13.5Gbps (SYMCLK32 = 421.875MHz) - * UHBR20 = 20Gbps (SYMCLK32 = 625MHz) - */ - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - switch (link_settings->link_rate) { - case LINK_RATE_UHBR10: - dm_set_phyd32clk(dc->ctx, 312500); - break; - case LINK_RATE_UHBR13_5: - dm_set_phyd32clk(dc->ctx, 412875); - break; - case LINK_RATE_UHBR20: - dm_set_phyd32clk(dc->ctx, 625000); - break; - default: - return; - } - } else { - /* DP2.0 HW: call transmitter control to enable PHY */ - link_res->hpo_dp_link_enc->funcs->enable_link_phy( - link_res->hpo_dp_link_enc, - link_settings, - link->link_enc->transmitter, - link->link_enc->hpd_source); - } - - /* DCCG muxing and DTBCLK DTO */ - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - dc->res_pool->dccg->funcs->set_physymclk( - dc->res_pool->dccg, - link->link_enc_hw_inst, - PHYSYMCLK_FORCE_SRC_PHYD32CLK, - true); - - phyd32clk = get_phyd32clk_src(link); - dc->res_pool->dccg->funcs->enable_symclk32_le( - dc->res_pool->dccg, - link_res->hpo_dp_link_enc->inst, - phyd32clk); - link_res->hpo_dp_link_enc->funcs->link_enable( - link_res->hpo_dp_link_enc, - link_settings->lane_count); - } -} - -void disable_dp_hpo_output(struct dc_link *link, - const struct link_resource *link_res, - enum signal_type signal) -{ - const struct dc *dc = link->dc; - - link_res->hpo_dp_link_enc->funcs->link_disable(link_res->hpo_dp_link_enc); - - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - dc->res_pool->dccg->funcs->disable_symclk32_le( - dc->res_pool->dccg, - link_res->hpo_dp_link_enc->inst); - - dc->res_pool->dccg->funcs->set_physymclk( - dc->res_pool->dccg, - link->link_enc_hw_inst, - PHYSYMCLK_FORCE_SRC_SYMCLK, - false); - - dm_set_phyd32clk(dc->ctx, 0); - } else { - /* DP2.0 HW: call transmitter control to disable PHY */ - link_res->hpo_dp_link_enc->funcs->disable_link_phy( - link_res->hpo_dp_link_enc, - signal); - } -} - -void setup_dp_hpo_stream(struct pipe_ctx *pipe_ctx, bool enable) -{ - struct dc_stream_state *stream = pipe_ctx->stream; - struct dc *dc = pipe_ctx->stream->ctx->dc; - struct pipe_ctx *odm_pipe; - int odm_combine_num_segments = 1; - enum phyd32clk_clock_source phyd32clk; - - if (enable) { - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) - odm_combine_num_segments++; - - dc->res_pool->dccg->funcs->set_dpstreamclk( - dc->res_pool->dccg, - DTBCLK0, - pipe_ctx->stream_res.tg->inst); - - phyd32clk = get_phyd32clk_src(stream->link); - dc->res_pool->dccg->funcs->enable_symclk32_se( - dc->res_pool->dccg, - pipe_ctx->stream_res.hpo_dp_stream_enc->inst, - phyd32clk); - - dc->res_pool->dccg->funcs->set_dtbclk_dto( - dc->res_pool->dccg, - pipe_ctx->stream_res.tg->inst, - stream->phy_pix_clk, - odm_combine_num_segments, - &stream->timing); - } else { - dc->res_pool->dccg->funcs->set_dtbclk_dto( - dc->res_pool->dccg, - pipe_ctx->stream_res.tg->inst, - 0, - 0, - &stream->timing); - dc->res_pool->dccg->funcs->disable_symclk32_se( - dc->res_pool->dccg, - pipe_ctx->stream_res.hpo_dp_stream_enc->inst); - dc->res_pool->dccg->funcs->set_dpstreamclk( - dc->res_pool->dccg, - REFCLK, - pipe_ctx->stream_res.tg->inst); - } -} - -void reset_dp_hpo_stream_encoders_for_link(struct dc_link *link) -{ - const struct dc *dc = link->dc; - struct dc_state *state = dc->current_state; - uint8_t i; - - for (i = 0; i < MAX_PIPES; i++) { - if (state->res_ctx.pipe_ctx[i].stream_res.hpo_dp_stream_enc && - state->res_ctx.pipe_ctx[i].stream && - state->res_ctx.pipe_ctx[i].stream->link == link && - !state->res_ctx.pipe_ctx[i].stream->dpms_off) { - setup_dp_hpo_stream(&state->res_ctx.pipe_ctx[i], false); - } - } -} - -#undef DC_LOGGER -#endif diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index b3912ff9dc91..7af153434e9e 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -43,6 +43,10 @@ #include "dpcd_defs.h" #include "link_enc_cfg.h" #include "dc_link_dp.h" +#include "virtual/virtual_link_hwss.h" +#include "link/link_hwss_dio.h" +#include "link/link_hwss_dpia.h" +#include "link/link_hwss_hpo_dp.h" #if defined(CONFIG_DRM_AMD_DC_SI) #include "dce60/dce60_resource.h" @@ -62,6 +66,8 @@ #include "dcn302/dcn302_resource.h" #include "dcn303/dcn303_resource.h" #include "dcn31/dcn31_resource.h" +#include "dcn315/dcn315_resource.h" +#include "dcn316/dcn316_resource.h" #endif #define DC_LOGGER_INIT(logger) @@ -131,7 +137,7 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id) case FAMILY_NV: dc_version = DCN_VERSION_2_0; - if (asic_id.chip_id == DEVICE_ID_NV_13FE) { + if (asic_id.chip_id == DEVICE_ID_NV_13FE || asic_id.chip_id == DEVICE_ID_NV_143F) { dc_version = DCN_VERSION_2_01; break; } @@ -151,6 +157,14 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id) if (ASICREV_IS_YELLOW_CARP(asic_id.hw_internal_rev)) dc_version = DCN_VERSION_3_1; break; + case AMDGPU_FAMILY_GC_10_3_6: + if (ASICREV_IS_GC_10_3_6(asic_id.hw_internal_rev)) + dc_version = DCN_VERSION_3_15; + break; + case AMDGPU_FAMILY_GC_10_3_7: + if (ASICREV_IS_GC_10_3_7(asic_id.hw_internal_rev)) + dc_version = DCN_VERSION_3_16; + break; #endif default: @@ -242,6 +256,12 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc, case DCN_VERSION_3_1: res_pool = dcn31_create_resource_pool(init_data, dc); break; + case DCN_VERSION_3_15: + res_pool = dcn315_create_resource_pool(init_data, dc); + break; + case DCN_VERSION_3_16: + res_pool = dcn316_create_resource_pool(init_data, dc); + break; #endif default: break; @@ -356,7 +376,6 @@ bool resource_construct( } } -#if defined(CONFIG_DRM_AMD_DC_DCN) pool->hpo_dp_stream_enc_count = 0; if (create_funcs->create_hpo_dp_stream_encoder) { for (i = 0; i < caps->num_hpo_dp_stream_encoder; i++) { @@ -377,7 +396,6 @@ bool resource_construct( pool->hpo_dp_link_enc_count++; } } -#endif #if defined(CONFIG_DRM_AMD_DC_DCN) for (i = 0; i < caps->num_mpc_3dlut; i++) { @@ -1612,8 +1630,8 @@ bool dc_add_all_planes_for_stream( return add_all_planes_for_stream(dc, stream, &set, 1, context); } -static bool is_timing_changed(struct dc_stream_state *cur_stream, - struct dc_stream_state *new_stream) +bool is_timing_changed(struct dc_stream_state *cur_stream, + struct dc_stream_state *new_stream) { if (cur_stream == NULL) return true; @@ -1640,6 +1658,9 @@ static bool are_stream_backends_same( if (is_timing_changed(stream_a, stream_b)) return false; + if (stream_a->signal != stream_b->signal) + return false; + if (stream_a->dpms_off != stream_b->dpms_off) return false; @@ -1674,8 +1695,8 @@ bool dc_is_stream_unchanged( /* * dc_is_stream_scaling_unchanged() - Compare scaling rectangles of two streams. */ -bool dc_is_stream_scaling_unchanged( - struct dc_stream_state *old_stream, struct dc_stream_state *stream) +bool dc_is_stream_scaling_unchanged(struct dc_stream_state *old_stream, + struct dc_stream_state *stream) { if (old_stream == stream) return true; @@ -1710,7 +1731,6 @@ static void update_stream_engine_usage( } } -#if defined(CONFIG_DRM_AMD_DC_DCN) static void update_hpo_dp_stream_engine_usage( struct resource_context *res_ctx, const struct resource_pool *pool, @@ -1812,7 +1832,6 @@ static void remove_hpo_dp_link_enc_from_ctx(struct resource_context *res_ctx, pipe_ctx->link_res.hpo_dp_link_enc = NULL; } } -#endif /* TODO: release audio object */ void update_audio_usage( @@ -1858,7 +1877,6 @@ static int acquire_first_free_pipe( return -1; } -#if defined(CONFIG_DRM_AMD_DC_DCN) static struct hpo_dp_stream_encoder *find_first_free_match_hpo_dp_stream_enc_for_link( struct resource_context *res_ctx, const struct resource_pool *pool, @@ -1876,7 +1894,6 @@ static struct hpo_dp_stream_encoder *find_first_free_match_hpo_dp_stream_enc_for return NULL; } -#endif static struct audio *find_first_free_audio( struct resource_context *res_ctx, @@ -1907,7 +1924,7 @@ static struct audio *find_first_free_audio( return pool->audios[i]; } } - return 0; + return NULL; } /* @@ -1964,11 +1981,6 @@ enum dc_status dc_remove_stream_from_ctx( dc->res_pool, del_pipe->stream_res.stream_enc, false); - /* Release link encoder from stream in new dc_state. */ - if (dc->res_pool->funcs->link_enc_unassign) - dc->res_pool->funcs->link_enc_unassign(new_ctx, del_pipe->stream); - -#if defined(CONFIG_DRM_AMD_DC_DCN) if (is_dp_128b_132b_signal(del_pipe)) { update_hpo_dp_stream_engine_usage( &new_ctx->res_ctx, dc->res_pool, @@ -1976,7 +1988,6 @@ enum dc_status dc_remove_stream_from_ctx( false); remove_hpo_dp_link_enc_from_ctx(&new_ctx->res_ctx, del_pipe, del_pipe->stream); } -#endif if (del_pipe->stream_res.audio) update_audio_usage( @@ -2173,7 +2184,7 @@ static void mark_seamless_boot_stream( if (dc->config.allow_seamless_boot_optimization && !dcb->funcs->is_accelerated_mode(dcb)) { - if (dc_validate_seamless_boot_timing(dc, stream->sink, &stream->timing)) + if (dc_validate_boot_timing(dc, stream->sink, &stream->timing)) stream->apply_seamless_boot_optimization = true; } } @@ -2229,7 +2240,6 @@ enum dc_status resource_map_pool_resources( pipe_ctx->stream_res.stream_enc, true); -#if defined(CONFIG_DRM_AMD_DC_DCN) /* Allocate DP HPO Stream Encoder based on signal, hw capabilities * and link settings */ @@ -2254,7 +2264,6 @@ enum dc_status resource_map_pool_resources( return DC_NO_LINK_ENC_RESOURCE; } } -#endif /* TODO: Add check if ASIC support and EDID audio */ if (!stream->converter_disable_audio && @@ -2326,6 +2335,9 @@ void dc_resource_state_construct( bool dc_resource_is_dsc_encoding_supported(const struct dc *dc) { + if (dc->res_pool == NULL) + return false; + return dc->res_pool->res_cap->num_dsc > 0; } @@ -2606,6 +2618,8 @@ static void set_avi_info_frame( hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; ///VIC + if (pipe_ctx->stream->timing.hdmi_vic != 0) + vic = 0; format = stream->timing.timing_3d_format; /*todo, add 3DStereo support*/ if (format != TIMING_3D_FORMAT_NONE) { @@ -2924,12 +2938,10 @@ bool pipe_need_reprogram( if (pipe_ctx_old->stream_res.dsc != pipe_ctx->stream_res.dsc) return true; -#if defined(CONFIG_DRM_AMD_DC_DCN) if (pipe_ctx_old->stream_res.hpo_dp_stream_enc != pipe_ctx->stream_res.hpo_dp_stream_enc) return true; if (pipe_ctx_old->link_res.hpo_dp_link_enc != pipe_ctx->link_res.hpo_dp_link_enc) return true; -#endif /* DIG link encoder resource assignment for stream changed. */ if (pipe_ctx_old->stream->ctx->dc->res_pool->funcs->link_encs_assign) { @@ -3196,10 +3208,9 @@ void get_audio_check(struct audio_info *aud_modes, } } -#if defined(CONFIG_DRM_AMD_DC_DCN) -struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt( +static struct hpo_dp_link_encoder *get_temp_hpo_dp_link_enc( const struct resource_context *res_ctx, - const struct resource_pool *pool, + const struct resource_pool *const pool, const struct dc_link *link) { struct hpo_dp_link_encoder *hpo_dp_link_enc = NULL; @@ -3215,7 +3226,24 @@ struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt( return hpo_dp_link_enc; } -#endif + +bool get_temp_dp_link_res(struct dc_link *link, + struct link_resource *link_res, + struct dc_link_settings *link_settings) +{ + const struct dc *dc = link->dc; + const struct resource_context *res_ctx = &dc->current_state->res_ctx; + + memset(link_res, 0, sizeof(*link_res)); + + if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) { + link_res->hpo_dp_link_enc = get_temp_hpo_dp_link_enc(res_ctx, + dc->res_pool, link); + if (!link_res->hpo_dp_link_enc) + return false; + } + return true; +} void reset_syncd_pipes_from_disabled_pipes(struct dc *dc, struct dc_state *context) @@ -3303,3 +3331,36 @@ uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter tr #endif return phy_idx; } + +const struct link_hwss *get_link_hwss(const struct dc_link *link, + const struct link_resource *link_res) +{ + /* Link_hwss is only accessible by getter function instead of accessing + * by pointers in dc with the intent to protect against breaking polymorphism. + */ + if (can_use_hpo_dp_link_hwss(link, link_res)) + /* TODO: some assumes that if decided link settings is 128b/132b + * channel coding format hpo_dp_link_enc should be used. + * Others believe that if hpo_dp_link_enc is available in link + * resource then hpo_dp_link_enc must be used. This bound between + * hpo_dp_link_enc != NULL and decided link settings is loosely coupled + * with a premise that both hpo_dp_link_enc pointer and decided link + * settings are determined based on single policy function like + * "decide_link_settings" from upper layer. This "convention" + * cannot be maintained and enforced at current level. + * Therefore a refactor is due so we can enforce a strong bound + * between those two parameters at this level. + * + * To put it simple, we want to make enforcement at low level so that + * we will not return link hwss if caller plans to do 8b/10b + * with an hpo encoder. Or we can return a very dummy one that doesn't + * do work for all functions + */ + return get_hpo_dp_link_hwss(); + else if (can_use_dpia_link_hwss(link, link_res)) + return get_dpia_link_hwss(); + else if (can_use_dio_link_hwss(link, link_res)) + return get_dio_link_hwss(); + else + return get_virtual_link_hwss(); +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 57cf4cb82370..c4e871f358ab 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -273,6 +273,8 @@ static void program_cursor_attributes( if (!pipe_to_program) { pipe_to_program = pipe_ctx; dc->hwss.cursor_lock(dc, pipe_to_program, true); + if (pipe_to_program->next_odm_pipe) + dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, true); } dc->hwss.set_cursor_attribute(pipe_ctx); @@ -280,8 +282,11 @@ static void program_cursor_attributes( dc->hwss.set_cursor_sdr_white_level(pipe_ctx); } - if (pipe_to_program) + if (pipe_to_program) { dc->hwss.cursor_lock(dc, pipe_to_program, false); + if (pipe_to_program->next_odm_pipe) + dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, false); + } } #ifndef TRIM_FSFT @@ -713,6 +718,20 @@ enum dc_status dc_stream_add_dsc_to_resource(struct dc *dc, } } +struct pipe_ctx *dc_stream_get_pipe_ctx(struct dc_stream_state *stream) +{ + int i = 0; + + for (i = 0; i < MAX_PIPES; i++) { + struct pipe_ctx *pipe = &stream->ctx->dc->current_state->res_ctx.pipe_ctx[i]; + + if (pipe->stream == stream) + return pipe; + } + + return NULL; +} + void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream) { DC_LOG_DC( @@ -737,5 +756,21 @@ void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream) DC_LOG_DC( "\tlink: %d\n", stream->link->link_index); + + DC_LOG_DC( + "\tdsc: %d, mst_pbn: %d\n", + stream->timing.flags.DSC, + stream->timing.dsc_cfg.mst_pbn); + + if (stream->sink) { + if (stream->sink->sink_signal != SIGNAL_TYPE_VIRTUAL && + stream->sink->sink_signal != SIGNAL_TYPE_NONE) { + + DC_LOG_DC( + "\tdispname: %s signal: %x\n", + stream->sink->edid_caps.display_name, + stream->signal); + } + } } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index b51864890621..4ffab7bb1098 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -47,7 +47,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.167" +#define DC_VER "3.2.177" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -195,10 +195,8 @@ struct dc_caps { unsigned int cursor_cache_size; struct dc_plane_cap planes[MAX_PLANES]; struct dc_color_caps color; -#if defined(CONFIG_DRM_AMD_DC_DCN) bool dp_hpo; bool hdmi_frl_pcon_support; -#endif bool edp_dsc_support; bool vbios_lttpr_aware; bool vbios_lttpr_enable; @@ -307,7 +305,6 @@ struct dc_cap_funcs { struct link_training_settings; -#if defined(CONFIG_DRM_AMD_DC_DCN) union allow_lttpr_non_transparent_mode { struct { bool DP1_4A : 1; @@ -315,7 +312,7 @@ union allow_lttpr_non_transparent_mode { } bits; unsigned char raw; }; -#endif + /* Structure to hold configuration flags set by dm at dc creation. */ struct dc_config { bool gpu_vm_support; @@ -323,16 +320,12 @@ struct dc_config { bool fbc_support; bool disable_fractional_pwm; bool allow_seamless_boot_optimization; - bool power_down_display_on_boot; + bool seamless_boot_edp_requested; bool edp_not_connected; bool edp_no_power_sequencing; bool force_enum_edp; bool forced_clocks; -#if defined(CONFIG_DRM_AMD_DC_DCN) union allow_lttpr_non_transparent_mode allow_lttpr_non_transparent_mode; -#else - bool allow_lttpr_non_transparent_mode; -#endif bool multi_mon_pp_mclk_switch; bool disable_dmcu; bool enable_4to1MPC; @@ -361,6 +354,7 @@ enum dc_psr_power_opts { psr_power_opt_invalid = 0x0, psr_power_opt_smu_opt_static_screen = 0x1, psr_power_opt_z10_static_screen = 0x10, + psr_power_opt_ds_disable_allow = 0x100, }; enum dcc_option { @@ -397,6 +391,7 @@ enum dcn_pwr_state { enum dcn_zstate_support_state { DCN_ZSTATE_SUPPORT_UNKNOWN, DCN_ZSTATE_SUPPORT_ALLOW, + DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY, DCN_ZSTATE_SUPPORT_DISALLOW, }; #endif @@ -519,12 +514,13 @@ union root_clock_optimization_options { union dpia_debug_options { struct { - uint32_t disable_dpia:1; - uint32_t force_non_lttpr:1; - uint32_t extend_aux_rd_interval:1; - uint32_t disable_mst_dsc_work_around:1; - uint32_t hpd_delay_in_ms:12; - uint32_t reserved:16; + uint32_t disable_dpia:1; /* bit 0 */ + uint32_t force_non_lttpr:1; /* bit 1 */ + uint32_t extend_aux_rd_interval:1; /* bit 2 */ + uint32_t disable_mst_dsc_work_around:1; /* bit 3 */ + uint32_t hpd_delay_in_ms:12; /* bits 4-15 */ + uint32_t disable_force_tbt3_work_around:1; /* bit 16 */ + uint32_t reserved:15; } bits; uint32_t raw; }; @@ -688,13 +684,12 @@ struct dc_debug_options { bool disable_dsc_edp; unsigned int force_dsc_edp_policy; bool enable_dram_clock_change_one_display_vactive; -#if defined(CONFIG_DRM_AMD_DC_DCN) /* TODO - remove once tested */ bool legacy_dp2_lt; bool set_mst_en_for_sst; bool disable_uhbr; bool force_dp2_lt_fallback_method; -#endif + bool ignore_cable_id; union mem_low_power_enable_options enable_mem_low_power; union root_clock_optimization_options root_clock_optimization; bool hpo_optimization; @@ -710,10 +705,13 @@ struct dc_debug_options { int crb_alloc_policy_min_disp_count; #if defined(CONFIG_DRM_AMD_DC_DCN) bool disable_z10; + bool enable_z9_disable_interface; bool enable_sw_cntl_psr; union dpia_debug_options dpia_debug; #endif bool apply_vendor_specific_lttpr_wa; + bool ignore_dpref_ss; + uint8_t psr_power_use_phy_fsm; }; struct gpu_info_soc_bounding_box_v1_0; @@ -1131,7 +1129,7 @@ struct dc_validation_set { uint8_t plane_count; }; -bool dc_validate_seamless_boot_timing(const struct dc *dc, +bool dc_validate_boot_timing(const struct dc *dc, const struct dc_sink *sink, struct dc_crtc_timing *crtc_timing); @@ -1208,10 +1206,12 @@ struct dpcd_caps { /* dongle type (DP converter, CV smart dongle) */ enum display_dongle_type dongle_type; + bool is_dongle_type_one; /* branch device or sink device */ bool is_branch_dev; /* Dongle's downstream count. */ union sink_count sink_count; + bool is_mst_capable; /* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER, indicates 'Frame Sequential-to-lllFrame Pack' conversion capability.*/ struct dc_dongle_caps dongle_caps; @@ -1234,15 +1234,16 @@ struct dpcd_caps { union dpcd_fec_capability fec_cap; struct dpcd_dsc_capabilities dsc_caps; struct dc_lttpr_caps lttpr_caps; - struct psr_caps psr_caps; struct dpcd_usb4_dp_tunneling_info usb4_dp_tun_info; -#if defined(CONFIG_DRM_AMD_DC_DCN) union dp_128b_132b_supported_link_rates dp_128b_132b_supported_link_rates; union dp_main_line_channel_coding_cap channel_coding_cap; union dp_sink_video_fallback_formats fallback_formats; union dp_fec_capability1 fec_cap1; -#endif + union dp_cable_id cable_id; + uint8_t edp_rev; + union edp_alpm_caps alpm_caps; + struct edp_psr_info psr_info; }; union dpcd_sink_ext_caps { @@ -1448,8 +1449,11 @@ void dc_z10_restore(const struct dc *dc); void dc_z10_save_init(struct dc *dc); #endif +bool dc_is_dmub_outbox_supported(struct dc *dc); bool dc_enable_dmub_notifications(struct dc *dc); +void dc_enable_dmub_outbox(struct dc *dc); + bool dc_process_dmub_aux_transfer_async(struct dc *dc, uint32_t link_index, struct aux_payload *payload); diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index 353dac420f34..36ac2a8746bd 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -53,7 +53,6 @@ enum dc_link_rate { LINK_RATE_RBR2 = 0x0C, // Rate_5 (RBR2)- 3.24 Gbps/Lane LINK_RATE_RATE_6 = 0x10, // Rate_6 - 4.32 Gbps/Lane LINK_RATE_HIGH2 = 0x14, // Rate_7 (HBR2)- 5.40 Gbps/Lane -#if defined(CONFIG_DRM_AMD_DC_DCN) LINK_RATE_HIGH3 = 0x1E, // Rate_8 (HBR3)- 8.10 Gbps/Lane /* Starting from DP2.0 link rate enum directly represents actual * link rate value in unit of 10 mbps @@ -61,9 +60,6 @@ enum dc_link_rate { LINK_RATE_UHBR10 = 1000, // UHBR10 - 10.0 Gbps/Lane LINK_RATE_UHBR13_5 = 1350, // UHBR13.5 - 13.5 Gbps/Lane LINK_RATE_UHBR20 = 2000, // UHBR10 - 20.0 Gbps/Lane -#else - LINK_RATE_HIGH3 = 0x1E // Rate_8 (HBR3)- 8.10 Gbps/Lane -#endif }; enum dc_link_spread { @@ -100,7 +96,6 @@ enum dc_post_cursor2 { POST_CURSOR2_MAX_LEVEL = POST_CURSOR2_LEVEL3, }; -#if defined(CONFIG_DRM_AMD_DC_DCN) enum dc_dp_ffe_preset_level { DP_FFE_PRESET_LEVEL0 = 0, DP_FFE_PRESET_LEVEL1, @@ -120,7 +115,6 @@ enum dc_dp_ffe_preset_level { DP_FFE_PRESET_LEVEL15, DP_FFE_PRESET_MAX_LEVEL = DP_FFE_PRESET_LEVEL15, }; -#endif enum dc_dp_training_pattern { DP_TRAINING_PATTERN_SEQUENCE_1 = 0, @@ -128,19 +122,15 @@ enum dc_dp_training_pattern { DP_TRAINING_PATTERN_SEQUENCE_3, DP_TRAINING_PATTERN_SEQUENCE_4, DP_TRAINING_PATTERN_VIDEOIDLE, -#if defined(CONFIG_DRM_AMD_DC_DCN) DP_128b_132b_TPS1, DP_128b_132b_TPS2, DP_128b_132b_TPS2_CDS, -#endif }; enum dp_link_encoding { DP_UNKNOWN_ENCODING = 0, DP_8b_10b_ENCODING = 1, -#if defined(CONFIG_DRM_AMD_DC_DCN) DP_128b_132b_ENCODING = 2, -#endif }; struct dc_link_settings { @@ -152,7 +142,6 @@ struct dc_link_settings { bool dpcd_source_device_specific_field_support; }; -#if defined(CONFIG_DRM_AMD_DC_DCN) union dc_dp_ffe_preset { struct { uint8_t level : 4; @@ -163,24 +152,19 @@ union dc_dp_ffe_preset { } settings; uint8_t raw; }; -#endif struct dc_lane_settings { enum dc_voltage_swing VOLTAGE_SWING; enum dc_pre_emphasis PRE_EMPHASIS; enum dc_post_cursor2 POST_CURSOR2; -#if defined(CONFIG_DRM_AMD_DC_DCN) union dc_dp_ffe_preset FFE_PRESET; -#endif }; struct dc_link_training_overrides { enum dc_voltage_swing *voltage_swing; enum dc_pre_emphasis *pre_emphasis; enum dc_post_cursor2 *post_cursor2; -#if defined(CONFIG_DRM_AMD_DC_DCN) union dc_dp_ffe_preset *ffe_preset; -#endif uint16_t *cr_pattern_time; uint16_t *eq_pattern_time; @@ -194,7 +178,6 @@ struct dc_link_training_overrides { bool *fec_enable; }; -#if defined(CONFIG_DRM_AMD_DC_DCN) union payload_table_update_status { struct { uint8_t VC_PAYLOAD_TABLE_UPDATED:1; @@ -202,7 +185,6 @@ union payload_table_update_status { } bits; uint8_t raw; }; -#endif union dpcd_rev { struct { @@ -291,14 +273,10 @@ union lane_align_status_updated { struct { uint8_t INTERLANE_ALIGN_DONE:1; uint8_t POST_LT_ADJ_REQ_IN_PROGRESS:1; -#if defined(CONFIG_DRM_AMD_DC_DCN) uint8_t EQ_INTERLANE_ALIGN_DONE_128b_132b:1; uint8_t CDS_INTERLANE_ALIGN_DONE_128b_132b:1; uint8_t LT_FAILED_128b_132b:1; uint8_t RESERVED:1; -#else - uint8_t RESERVED:4; -#endif uint8_t DOWNSTREAM_PORT_STATUS_CHANGED:1; uint8_t LINK_STATUS_UPDATED:1; } bits; @@ -311,12 +289,10 @@ union lane_adjust { uint8_t PRE_EMPHASIS_LANE:2; uint8_t RESERVED:4; } bits; -#if defined(CONFIG_DRM_AMD_DC_DCN) struct { uint8_t PRESET_VALUE :4; uint8_t RESERVED :4; } tx_ffe; -#endif uint8_t raw; }; @@ -346,12 +322,10 @@ union dpcd_training_lane { uint8_t MAX_PRE_EMPHASIS_REACHED:1; uint8_t RESERVED:2; } bits; -#if defined(CONFIG_DRM_AMD_DC_DCN) struct { uint8_t PRESET_VALUE :4; uint8_t RESERVED :4; } tx_ffe; -#endif uint8_t raw; }; @@ -665,18 +639,9 @@ union test_response { union phy_test_pattern { struct { -#if defined(CONFIG_DRM_AMD_DC_DCN) /* This field is 7 bits for DP2.0 */ uint8_t PATTERN :7; uint8_t RESERVED :1; -#else - /* DpcdPhyTestPatterns. This field is 2 bits for DP1.1 - * and 3 bits for DP1.2. - */ - uint8_t PATTERN :3; - /* BY speci, bit7:2 is 0 for DP1.1. */ - uint8_t RESERVED :5; -#endif } bits; uint8_t raw; }; @@ -754,14 +719,10 @@ union dpcd_fec_capability { uint8_t UNCORRECTED_BLOCK_ERROR_COUNT_CAPABLE:1; uint8_t CORRECTED_BLOCK_ERROR_COUNT_CAPABLE:1; uint8_t BIT_ERROR_COUNT_CAPABLE:1; -#if defined(CONFIG_DRM_AMD_DC_DCN) uint8_t PARITY_BLOCK_ERROR_COUNT_CAPABLE:1; uint8_t ARITY_BIT_ERROR_COUNT_CAPABLE:1; uint8_t FEC_RUNNING_INDICATOR_SUPPORTED:1; uint8_t FEC_ERROR_REPORTING_POLICY_SUPPORTED:1; -#else - uint8_t RESERVED:4; -#endif } bits; uint8_t raw; }; @@ -925,7 +886,6 @@ struct dpcd_usb4_dp_tunneling_info { uint8_t usb4_topology_id[DPCD_USB4_TOPOLOGY_ID_LEN]; }; -#if defined(CONFIG_DRM_AMD_DC_DCN) #ifndef DP_MAIN_LINK_CHANNEL_CODING_CAP #define DP_MAIN_LINK_CHANNEL_CODING_CAP 0x006 #endif @@ -941,6 +901,9 @@ struct dpcd_usb4_dp_tunneling_info { #ifndef DP_LINK_SQUARE_PATTERN #define DP_LINK_SQUARE_PATTERN 0x10F #endif +#ifndef DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX +#define DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX 0x110 +#endif #ifndef DP_DSC_CONFIGURATION #define DP_DSC_CONFIGURATION 0x161 #endif @@ -953,6 +916,9 @@ struct dpcd_usb4_dp_tunneling_info { #ifndef DP_128b_132b_TRAINING_AUX_RD_INTERVAL #define DP_128b_132b_TRAINING_AUX_RD_INTERVAL 0x2216 #endif +#ifndef DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX +#define DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX 0x2217 +#endif #ifndef DP_TEST_264BIT_CUSTOM_PATTERN_7_0 #define DP_TEST_264BIT_CUSTOM_PATTERN_7_0 0X2230 #endif @@ -994,8 +960,8 @@ struct dpcd_usb4_dp_tunneling_info { #endif #ifndef DP_INTRA_HOP_AUX_REPLY_INDICATION #define DP_INTRA_HOP_AUX_REPLY_INDICATION (1 << 3) -#endif /* TODO - Use DRM header to replace above once available */ +#endif // DP_INTRA_HOP_AUX_REPLY_INDICATION union dp_main_line_channel_coding_cap { struct { @@ -1052,6 +1018,16 @@ union dp_fec_capability1 { uint8_t raw; }; +union dp_cable_id { + struct { + uint8_t UHBR10_20_CAPABILITY :2; + uint8_t UHBR13_5_CAPABILITY :1; + uint8_t CABLE_TYPE :3; + uint8_t RESERVED :2; + } bits; + uint8_t raw; +}; + struct dp_color_depth_caps { uint8_t support_6bpc :1; uint8_t support_8bpc :1; @@ -1091,6 +1067,34 @@ union dp_128b_132b_training_aux_rd_interval { } bits; uint8_t raw; }; -#endif + +union edp_alpm_caps { + struct { + uint8_t AUX_WAKE_ALPM_CAP :1; + uint8_t PM_STATE_2A_SUPPORT :1; + uint8_t AUX_LESS_ALPM_CAP :1; + uint8_t RESERVED :5; + } bits; + uint8_t raw; +}; + +union edp_psr_dpcd_caps { + struct { + uint8_t LINK_TRAINING_ON_EXIT_NOT_REQUIRED :1; + uint8_t PSR_SETUP_TIME :3; + uint8_t Y_COORDINATE_REQUIRED :1; + uint8_t SU_GRANULARITY_REQUIRED :1; + uint8_t FRAME_SYNC_IS_NOT_NEEDED_FOR_SU :1; + uint8_t RESERVED :1; + } bits; + uint8_t raw; +}; + +struct edp_psr_info { + uint8_t psr_version; + union edp_psr_dpcd_caps psr_dpcd_caps; + uint8_t psr2_su_y_granularity_cap; + uint8_t force_psrsu_cap; +}; #endif /* DC_DP_TYPES_H */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c index ab6bc5d79012..f43cce16bb6c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c @@ -588,6 +588,66 @@ uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx, return reg_val; } + +uint32_t generic_indirect_reg_update_ex_sync(const struct dc_context *ctx, + uint32_t index, uint32_t reg_val, int n, + uint8_t shift1, uint32_t mask1, uint32_t field_value1, + ...) +{ + uint32_t shift, mask, field_value; + int i = 1; + + va_list ap; + + va_start(ap, field_value1); + + reg_val = set_reg_field_value_ex(reg_val, field_value1, mask1, shift1); + + while (i < n) { + shift = va_arg(ap, uint32_t); + mask = va_arg(ap, uint32_t); + field_value = va_arg(ap, uint32_t); + + reg_val = set_reg_field_value_ex(reg_val, field_value, mask, shift); + i++; + } + + dm_write_index_reg(ctx, CGS_IND_REG__PCIE, index, reg_val); + va_end(ap); + + return reg_val; +} + +uint32_t generic_indirect_reg_get_sync(const struct dc_context *ctx, + uint32_t index, int n, + uint8_t shift1, uint32_t mask1, uint32_t *field_value1, + ...) +{ + uint32_t shift, mask, *field_value; + uint32_t value = 0; + int i = 1; + + va_list ap; + + va_start(ap, field_value1); + + value = dm_read_index_reg(ctx, CGS_IND_REG__PCIE, index); + *field_value1 = get_reg_field_value_ex(value, mask1, shift1); + + while (i < n) { + shift = va_arg(ap, uint32_t); + mask = va_arg(ap, uint32_t); + field_value = va_arg(ap, uint32_t *); + + *field_value = get_reg_field_value_ex(value, mask, shift); + i++; + } + + va_end(ap); + + return value; +} + void reg_sequence_start_gather(const struct dc_context *ctx) { /* if reg sequence is supported and enabled, set flag to diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index eac34f591a3f..c964f598755a 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -745,6 +745,7 @@ struct dc_dsc_config { bool is_frl; /* indicate if DSC is applied based on HDMI FRL sink's capability */ #endif bool is_dp; /* indicate if DSC is applied based on DP's capability */ + uint32_t mst_pbn; /* pbn of display on dsc mst hub */ }; struct dc_crtc_timing { uint32_t h_total; diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index c0e37ad0e26c..aa818bf840eb 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -43,14 +43,16 @@ struct dc_link_status { struct dpcd_caps *dpcd_caps; }; +struct dprx_states { + bool cable_id_written; +}; + /* DP MST stream allocation (payload bandwidth number) */ struct link_mst_stream_allocation { /* DIG front */ const struct stream_encoder *stream_enc; -#if defined(CONFIG_DRM_AMD_DC_DCN) /* HPO DP Stream Encoder */ const struct hpo_dp_stream_encoder *hpo_dp_stream_enc; -#endif /* associate DRM payload table with DC stream encoder */ uint8_t vcp_id; /* number of slots required for the DP stream in transport packet */ @@ -74,6 +76,28 @@ struct link_trace { struct time_stamp time_stamp; }; +struct dp_trace_lt_counts { + unsigned int total; + unsigned int fail; +}; + +struct dp_trace_lt { + struct dp_trace_lt_counts counts; + struct dp_trace_timestamps { + unsigned long long start; + unsigned long long end; + } timestamps; + enum link_training_result result; + bool is_logged; +}; + +struct dp_trace { + struct dp_trace_lt detect_lt_trace; + struct dp_trace_lt commit_lt_trace; + unsigned int link_loss_count; + bool is_initialized; +}; + /* PSR feature flags */ struct psr_settings { bool psr_feature_enabled; // PSR is supported by sink @@ -119,6 +143,8 @@ struct dc_link { bool edp_sink_present; + struct dp_trace dp_trace; + /* caps is the same as reported_link_cap. link_traing use * reported_link_cap. Will clean up. TODO */ @@ -197,10 +223,13 @@ struct dc_link { bool dp_mot_reset_segment; /* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */ bool dpia_mst_dsc_always_on; + /* Forced DPIA into TBT3 compatibility mode. */ + bool dpia_forced_tbt3_mode; } wa_flags; struct link_mst_stream_allocation_table mst_stream_alloc_table; struct dc_link_status link_status; + struct dprx_states dprx_states; struct link_trace link_trace; struct gpio *hpd_gpio; @@ -307,6 +336,7 @@ void dc_link_blank_dp_stream(struct dc_link *link, bool hw_init); */ enum dc_detect_reason { DETECT_REASON_BOOT, + DETECT_REASON_RESUMEFROMS3S4, DETECT_REASON_HPD, DETECT_REASON_HPDRX, DETECT_REASON_FALLBACK, @@ -316,10 +346,8 @@ enum dc_detect_reason { bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason); bool dc_link_get_hpd_state(struct dc_link *dc_link); enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx); -#if defined(CONFIG_DRM_AMD_DC_DCN) enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn); enum dc_status dc_link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn); -#endif /* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt). * Return: @@ -438,6 +466,11 @@ const struct dc_link_settings *dc_link_get_link_cap( void dc_link_overwrite_extended_receiver_cap( struct dc_link *link); +bool dc_is_oem_i2c_device_present( + struct dc *dc, + size_t slave_address +); + bool dc_submit_i2c( struct dc *dc, uint32_t link_index, @@ -453,14 +486,29 @@ uint32_t dc_bandwidth_in_kbps_from_timing( bool dc_link_is_fec_supported(const struct dc_link *link); bool dc_link_should_enable_fec(const struct dc_link *link); -#if defined(CONFIG_DRM_AMD_DC_DCN) uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw); enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link); -#endif -const struct link_resource *dc_link_get_cur_link_res(const struct dc_link *link); +void dc_link_get_cur_link_res(const struct dc_link *link, + struct link_resource *link_res); /* take a snapshot of current link resource allocation state */ void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map); /* restore link resource allocation state from a snapshot */ void dc_restore_link_res_map(const struct dc *dc, uint32_t *map); +void dc_link_clear_dprx_states(struct dc_link *link); +struct gpio *get_hpd_gpio(struct dc_bios *dcb, + struct graphics_object_id link_id, + struct gpio_service *gpio_service); +void dp_trace_reset(struct dc_link *link); +bool dc_dp_trace_is_initialized(struct dc_link *link); +unsigned long long dc_dp_trace_get_lt_end_timestamp(struct dc_link *link, + bool in_detection); +void dc_dp_trace_set_is_logged_flag(struct dc_link *link, + bool in_detection, + bool is_logged); +bool dc_dp_trace_is_logged(struct dc_link *link, + bool in_detection); +struct dp_trace_lt_counts *dc_dp_trace_get_lt_counts(struct dc_link *link, + bool in_detection); +unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link); #endif /* DC_LINK_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index e37c4a10bfd5..99a750f561f8 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -115,12 +115,10 @@ struct periodic_interrupt_config { int lines_offset; }; -#if defined(CONFIG_DRM_AMD_DC_DCN) struct dc_mst_stream_bw_update { bool is_increase; // is bandwidth reduced or increased uint32_t mst_stream_bw; // new mst bandwidth in kbps }; -#endif union stream_update_flags { struct { @@ -132,9 +130,7 @@ union stream_update_flags { uint32_t gamut_remap:1; uint32_t wb_update:1; uint32_t dsc_changed : 1; -#if defined(CONFIG_DRM_AMD_DC_DCN) uint32_t mst_bw : 1; -#endif } bits; uint32_t raw; @@ -288,9 +284,7 @@ struct dc_stream_update { struct dc_writeback_update *wb_update; struct dc_dsc_config *dsc_config; -#if defined(CONFIG_DRM_AMD_DC_DCN) struct dc_mst_stream_bw_update *mst_bw_update; -#endif struct dc_transfer_func *func_shaper; struct dc_3dlut *lut3d_func; @@ -530,4 +524,6 @@ bool dc_stream_get_crtc_position(struct dc *dc, unsigned int *v_pos, unsigned int *nom_v_pos); +struct pipe_ctx *dc_stream_get_pipe_ctx(struct dc_stream_state *stream); + #endif /* DC_STREAM_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 0285a4b38d05..2ba9f528c0fe 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -138,6 +138,7 @@ enum dc_edid_status { EDID_BAD_CHECKSUM, EDID_THE_SAME, EDID_FALL_BACK, + EDID_PARTIAL_VALID, }; enum act_return_status { @@ -395,14 +396,11 @@ struct dc_lttpr_caps { uint8_t max_link_rate; uint8_t phy_repeater_cnt; uint8_t max_ext_timeout; -#if defined(CONFIG_DRM_AMD_DC_DCN) union dp_main_link_channel_coding_lttpr_cap main_link_channel_coding; union dp_128b_132b_supported_lttpr_link_rates supported_128b_132b_rates; -#endif uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1]; }; -#if defined(CONFIG_DRM_AMD_DC_DCN) struct dc_dongle_dfp_cap_ext { bool supported; uint16_t max_pixel_rate_in_mps; @@ -414,7 +412,6 @@ struct dc_dongle_dfp_cap_ext { struct dp_color_depth_caps ycbcr422_color_depth_caps; struct dp_color_depth_caps ycbcr420_color_depth_caps; }; -#endif struct dc_dongle_caps { /* dongle type (DP converter, CV smart dongle) */ @@ -429,10 +426,8 @@ struct dc_dongle_caps { bool is_dp_hdmi_ycbcr420_converter; uint32_t dp_hdmi_max_bpc; uint32_t dp_hdmi_max_pixel_clk_in_khz; -#if defined(CONFIG_DRM_AMD_DC_DCN) uint32_t dp_hdmi_frl_max_link_bw_in_kbps; struct dc_dongle_dfp_cap_ext dfp_cap_ext; -#endif }; /* Scaling format */ enum scaling_transformation { diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c index 6d42a9cc9916..8e814000db62 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c @@ -413,7 +413,8 @@ static bool acquire( return false; if (!acquire_engine(engine)) { - dal_ddc_close(ddc); + engine->ddc = ddc; + release_engine(engine); return false; } @@ -878,7 +879,7 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, default: DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR, LOG_FLAG_Error_I2cAux, - "dce_aux_transfer_with_retries: AUX_RET_SUCCESS: FAILURE: AUX_TRANSACTION_REPLY_* unknown, default case."); + "dce_aux_transfer_with_retries: AUX_RET_SUCCESS: FAILURE: AUX_TRANSACTION_REPLY_* unknown, default case. Reply: %d", *payload->reply); goto fail; } break; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c index 1435d7bc1f21..07359eb89efc 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c @@ -450,6 +450,8 @@ void dce_clock_read_ss_info(struct dce_clk_mgr *clk_mgr_dce) clk_mgr_dce->dprefclk_ss_percentage = info.spread_spectrum_percentage; } + if (clk_mgr_dce->base.ctx->dc->debug.ignore_dpref_ss) + clk_mgr_dce->dprefclk_ss_percentage = 0; } } } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c index 2c7eb982eabc..cc5128e67daf 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c @@ -971,6 +971,98 @@ static bool dce112_program_pix_clk( return true; } +#if defined(CONFIG_DRM_AMD_DC_DCN) +static bool dcn31_program_pix_clk( + struct clock_source *clock_source, + struct pixel_clk_params *pix_clk_params, + struct pll_settings *pll_settings) +{ + struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source); + unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0; + unsigned int dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dprefclk_khz; + const struct pixel_rate_range_table_entry *e = + look_up_in_video_optimized_rate_tlb(pix_clk_params->requested_pix_clk_100hz / 10); + struct bp_pixel_clock_parameters bp_pc_params = {0}; + enum transmitter_color_depth bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24; + // For these signal types Driver to program DP_DTO without calling VBIOS Command table + if (dc_is_dp_signal(pix_clk_params->signal_type)) { + if (e) { + /* Set DTO values: phase = target clock, modulo = reference clock*/ + REG_WRITE(PHASE[inst], e->target_pixel_rate_khz * e->mult_factor); + REG_WRITE(MODULO[inst], dp_dto_ref_khz * e->div_factor); + } else { + /* Set DTO values: phase = target clock, modulo = reference clock*/ + REG_WRITE(PHASE[inst], pll_settings->actual_pix_clk_100hz * 100); + REG_WRITE(MODULO[inst], dp_dto_ref_khz * 1000); + } + REG_UPDATE(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 1); + } else { + if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) { + unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0; + unsigned dp_dto_ref_100hz = 7000000; + unsigned clock_100hz = pll_settings->actual_pix_clk_100hz; + + /* Set DTO values: phase = target clock, modulo = reference clock */ + REG_WRITE(PHASE[inst], clock_100hz); + REG_WRITE(MODULO[inst], dp_dto_ref_100hz); + + /* Enable DTO */ + REG_UPDATE(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 1); + return true; + } + + /*ATOMBIOS expects pixel rate adjusted by deep color ratio)*/ + bp_pc_params.controller_id = pix_clk_params->controller_id; + bp_pc_params.pll_id = clock_source->id; + bp_pc_params.target_pixel_clock_100hz = pll_settings->actual_pix_clk_100hz; + bp_pc_params.encoder_object_id = pix_clk_params->encoder_object_id; + bp_pc_params.signal_type = pix_clk_params->signal_type; + + // Make sure we send the correct color depth to DMUB for HDMI + if (pix_clk_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A) { + switch (pix_clk_params->color_depth) { + case COLOR_DEPTH_888: + bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24; + break; + case COLOR_DEPTH_101010: + bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_30; + break; + case COLOR_DEPTH_121212: + bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_36; + break; + case COLOR_DEPTH_161616: + bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_48; + break; + default: + bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24; + break; + } + bp_pc_params.color_depth = bp_pc_colour_depth; + } + + if (clock_source->id != CLOCK_SOURCE_ID_DP_DTO) { + bp_pc_params.flags.SET_GENLOCK_REF_DIV_SRC = + pll_settings->use_external_clk; + bp_pc_params.flags.SET_XTALIN_REF_SRC = + !pll_settings->use_external_clk; + if (pix_clk_params->flags.SUPPORT_YCBCR420) { + bp_pc_params.flags.SUPPORT_YUV_420 = 1; + } + } + if (clk_src->bios->funcs->set_pixel_clock( + clk_src->bios, &bp_pc_params) != BP_RESULT_OK) + return false; + /* Resync deep color DTO */ + if (clock_source->id != CLOCK_SOURCE_ID_DP_DTO) + dce112_program_pixel_clk_resync(clk_src, + pix_clk_params->signal_type, + pix_clk_params->color_depth, + pix_clk_params->flags.SUPPORT_YCBCR420); + } + + return true; +} +#endif static bool dce110_clock_source_power_down( struct clock_source *clk_src) @@ -1205,6 +1297,13 @@ static const struct clock_source_funcs dcn3_clk_src_funcs = { .get_pix_clk_dividers = dcn3_get_pix_clk_dividers, .get_pixel_clk_frequency_100hz = get_pixel_clk_frequency_100hz }; + +static const struct clock_source_funcs dcn31_clk_src_funcs = { + .cs_power_down = dce110_clock_source_power_down, + .program_pix_clk = dcn31_program_pix_clk, + .get_pix_clk_dividers = dcn3_get_pix_clk_dividers, + .get_pixel_clk_frequency_100hz = get_pixel_clk_frequency_100hz +}; #endif /*****************************************/ /* Constructor */ @@ -1610,6 +1709,24 @@ bool dcn3_clk_src_construct( #endif #if defined(CONFIG_DRM_AMD_DC_DCN) +bool dcn31_clk_src_construct( + struct dce110_clk_src *clk_src, + struct dc_context *ctx, + struct dc_bios *bios, + enum clock_source_id id, + const struct dce110_clk_src_regs *regs, + const struct dce110_clk_src_shift *cs_shift, + const struct dce110_clk_src_mask *cs_mask) +{ + bool ret = dce112_clk_src_construct(clk_src, ctx, bios, id, regs, cs_shift, cs_mask); + + clk_src->base.funcs = &dcn31_clk_src_funcs; + + return ret; +} +#endif + +#if defined(CONFIG_DRM_AMD_DC_DCN) bool dcn301_clk_src_construct( struct dce110_clk_src *clk_src, struct dc_context *ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h index 692fa23ca02b..069de7649c8c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h @@ -292,6 +292,15 @@ bool dcn301_clk_src_construct( const struct dce110_clk_src_regs *regs, const struct dce110_clk_src_shift *cs_shift, const struct dce110_clk_src_mask *cs_mask); + +bool dcn31_clk_src_construct( + struct dce110_clk_src *clk_src, + struct dc_context *ctx, + struct dc_bios *bios, + enum clock_source_id id, + const struct dce110_clk_src_regs *regs, + const struct dce110_clk_src_shift *cs_shift, + const struct dce110_clk_src_mask *cs_mask); #endif /* this table is use to find *1.001 and /1.001 pixel rates from non-precise pixel rate */ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c index dd41736bb5c4..f5cd2392fc5f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c @@ -25,6 +25,32 @@ #include "dce_i2c.h" #include "reg_helper.h" +bool dce_i2c_oem_device_present( + struct resource_pool *pool, + struct ddc_service *ddc, + size_t slave_address +) +{ + struct dc *dc = ddc->ctx->dc; + struct dc_bios *dcb = dc->ctx->dc_bios; + struct graphics_object_id id = {0}; + struct graphics_object_i2c_info i2c_info; + + if (!dc->ctx->dc_bios->fw_info.oem_i2c_present) + return false; + + id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; + id.enum_id = 0; + id.type = OBJECT_TYPE_GENERIC; + if (dcb->funcs->get_i2c_info(dcb, id, &i2c_info) != BP_RESULT_OK) + return false; + + if (i2c_info.i2c_slave_address != slave_address) + return false; + + return true; +} + bool dce_i2c_submit_command( struct resource_pool *pool, struct ddc *ddc, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.h index a171c5cd8439..535fd58de450 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.h @@ -30,6 +30,12 @@ #include "dce_i2c_hw.h" #include "dce_i2c_sw.h" +bool dce_i2c_oem_device_present( + struct resource_pool *pool, + struct ddc_service *ddc, + size_t slave_address +); + bool dce_i2c_submit_command( struct resource_pool *pool, struct ddc *ddc, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c index f1c61d5aee6c..0bc41414481e 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c @@ -1325,7 +1325,8 @@ void dce110_link_encoder_disable_output( void dce110_link_encoder_dp_set_lane_settings( struct link_encoder *enc, - const struct link_training_settings *link_settings) + const struct dc_link_settings *link_settings, + const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) { struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); union dpcd_training_lane_set training_lane_set = { { 0 } }; @@ -1340,26 +1341,26 @@ void dce110_link_encoder_dp_set_lane_settings( cntl.action = TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS; cntl.transmitter = enc110->base.transmitter; cntl.connector_obj_id = enc110->base.connector; - cntl.lanes_number = link_settings->link_settings.lane_count; + cntl.lanes_number = link_settings->lane_count; cntl.hpd_sel = enc110->base.hpd_source; - cntl.pixel_clock = link_settings->link_settings.link_rate * + cntl.pixel_clock = link_settings->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; - for (lane = 0; lane < link_settings->link_settings.lane_count; lane++) { + for (lane = 0; lane < link_settings->lane_count; lane++) { /* translate lane settings */ training_lane_set.bits.VOLTAGE_SWING_SET = - link_settings->lane_settings[lane].VOLTAGE_SWING; + lane_settings[lane].VOLTAGE_SWING; training_lane_set.bits.PRE_EMPHASIS_SET = - link_settings->lane_settings[lane].PRE_EMPHASIS; + lane_settings[lane].PRE_EMPHASIS; /* post cursor 2 setting only applies to HBR2 link rate */ - if (link_settings->link_settings.link_rate == LINK_RATE_HIGH2) { + if (link_settings->link_rate == LINK_RATE_HIGH2) { /* this is passed to VBIOS * to program post cursor 2 level */ training_lane_set.bits.POST_CURSOR2_SET = - link_settings->lane_settings[lane].POST_CURSOR2; + lane_settings[lane].POST_CURSOR2; } cntl.lane_select = lane; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h index fc6ade824c23..261c70e01e33 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h @@ -279,7 +279,8 @@ void dce110_link_encoder_disable_output( /* set DP lane settings */ void dce110_link_encoder_dp_set_lane_settings( struct link_encoder *enc, - const struct link_training_settings *link_settings); + const struct dc_link_settings *link_settings, + const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]); void dce110_link_encoder_dp_set_phy_pattern( struct link_encoder *enc, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c index faad8555ddbb..fff1d07d865d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c @@ -22,20 +22,23 @@ * Authors: AMD */ -#include "dmub_outbox.h" +#include "dc.h" #include "dc_dmub_srv.h" +#include "dmub_outbox.h" #include "dmub/inc/dmub_cmd.h" -/** - * dmub_enable_outbox_notification - Sends inbox cmd to dmub to enable outbox1 - * messages with interrupt. Dmub sends outbox1 - * message and triggers outbox1 interrupt. - * @dc: dc structure +/* + * Function: dmub_enable_outbox_notification + * + * @brief + * Sends inbox cmd to dmub for enabling outbox notifications to x86. + * + * @param + * [in] dmub_srv: dmub_srv structure */ -void dmub_enable_outbox_notification(struct dc *dc) +void dmub_enable_outbox_notification(struct dc_dmub_srv *dmub_srv) { union dmub_rb_cmd cmd; - struct dc_context *dc_ctx = dc->ctx; memset(&cmd, 0x0, sizeof(cmd)); cmd.outbox1_enable.header.type = DMUB_CMD__OUTBOX1_ENABLE; @@ -45,7 +48,7 @@ void dmub_enable_outbox_notification(struct dc *dc) sizeof(cmd.outbox1_enable.header); cmd.outbox1_enable.enable = true; - dc_dmub_srv_cmd_queue(dc_ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc_ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc_ctx->dmub_srv); + dc_dmub_srv_cmd_queue(dmub_srv, &cmd); + dc_dmub_srv_cmd_execute(dmub_srv); + dc_dmub_srv_wait_idle(dmub_srv); } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.h index 4e0aa0d1a2d5..58ceabb9d497 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.h @@ -26,8 +26,8 @@ #ifndef _DMUB_OUTBOX_H_ #define _DMUB_OUTBOX_H_ -#include "dc.h" +struct dc_dmub_srv; -void dmub_enable_outbox_notification(struct dc *dc); +void dmub_enable_outbox_notification(struct dc_dmub_srv *dmub_srv); #endif /* _DMUB_OUTBOX_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c index 87ed48d5530d..312c68172689 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c @@ -138,6 +138,10 @@ static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state * cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_UNSUPPORTED; break; } + + if (cmd.psr_set_version.psr_set_version_data.version == PSR_VERSION_UNSUPPORTED) + return false; + cmd.psr_set_version.psr_set_version_data.cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; cmd.psr_set_version.psr_set_version_data.panel_inst = panel_inst; cmd.psr_set_version.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data); @@ -316,6 +320,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub, copy_settings_data->otg_inst = 0; // Misc + copy_settings_data->use_phy_fsm = link->ctx->dc->debug.psr_power_use_phy_fsm; copy_settings_data->psr_level = psr_context->psr_level.u32all; copy_settings_data->smu_optimizations_en = psr_context->allow_smu_optimizations; copy_settings_data->multi_disp_optimizations_en = psr_context->allow_multi_disp_optimizations; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index eb2755bdb30e..248602c15f3a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -49,9 +49,7 @@ #include "link_enc_cfg.h" #include "link_hwss.h" #include "dc_link_dp.h" -#if defined(CONFIG_DRM_AMD_DC_DCN) #include "dccg.h" -#endif #include "clock_source.h" #include "clk_mgr.h" #include "abm.h" @@ -669,17 +667,12 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx) struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; struct dc_link *link = pipe_ctx->stream->link; const struct dc *dc = link->dc; - + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); uint32_t active_total_with_borders; uint32_t early_control = 0; struct timing_generator *tg = pipe_ctx->stream_res.tg; - /* For MST, there are multiply stream go to only one link. - * connect DIG back_end to front_end while enable_stream and - * disconnect them during disable_stream - * BY this, it is logic clean to separate stream and link */ - link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc, - pipe_ctx->stream_res.stream_enc->id, true); + link_hwss->setup_stream_encoder(pipe_ctx); dc->hwss.update_info_frame(pipe_ctx); @@ -795,7 +788,7 @@ void dce110_edp_wait_for_hpd_ready( dal_gpio_destroy_irq(&hpd); if (false == edp_hpd_high) { - DC_LOG_ERROR( + DC_LOG_WARNING( "%s: wait timed out!\n", __func__); } } @@ -1112,17 +1105,12 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) clk_mgr->funcs->enable_pme_wa(clk_mgr); /* un-mute audio */ /* TODO: audio should be per stream rather than per link */ -#if defined(CONFIG_DRM_AMD_DC_DCN) if (is_dp_128b_132b_signal(pipe_ctx)) pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->audio_mute_control( pipe_ctx->stream_res.hpo_dp_stream_enc, false); else pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( pipe_ctx->stream_res.stream_enc, false); -#else - pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( - pipe_ctx->stream_res.stream_enc, false); -#endif if (pipe_ctx->stream_res.audio) pipe_ctx->stream_res.audio->enabled = true; } @@ -1145,32 +1133,22 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx) if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == false) return; -#if defined(CONFIG_DRM_AMD_DC_DCN) if (is_dp_128b_132b_signal(pipe_ctx)) pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->audio_mute_control( pipe_ctx->stream_res.hpo_dp_stream_enc, true); else pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( pipe_ctx->stream_res.stream_enc, true); -#else - pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( - pipe_ctx->stream_res.stream_enc, true); -#endif if (pipe_ctx->stream_res.audio) { pipe_ctx->stream_res.audio->enabled = false; if (dc_is_dp_signal(pipe_ctx->stream->signal)) -#if defined(CONFIG_DRM_AMD_DC_DCN) if (is_dp_128b_132b_signal(pipe_ctx)) pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_disable( pipe_ctx->stream_res.hpo_dp_stream_enc); else pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable( pipe_ctx->stream_res.stream_enc); -#else - pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable( - pipe_ctx->stream_res.stream_enc); -#endif else pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_disable( pipe_ctx->stream_res.stream_enc); @@ -1195,7 +1173,7 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; struct dc *dc = pipe_ctx->stream->ctx->dc; - struct link_encoder *link_enc = NULL; + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) { pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets( @@ -1204,54 +1182,26 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) pipe_ctx->stream_res.stream_enc); } -#if defined(CONFIG_DRM_AMD_DC_DCN) if (is_dp_128b_132b_signal(pipe_ctx)) { pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->stop_dp_info_packets( pipe_ctx->stream_res.hpo_dp_stream_enc); } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) -#else - if (dc_is_dp_signal(pipe_ctx->stream->signal)) -#endif pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets( pipe_ctx->stream_res.stream_enc); dc->hwss.disable_audio_stream(pipe_ctx); - /* Link encoder may have been dynamically assigned to non-physical display endpoint. */ - if (link->ep_type == DISPLAY_ENDPOINT_PHY) - link_enc = link->link_enc; - else if (dc->res_pool->funcs->link_encs_assign) - link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); - ASSERT(link_enc); + link_hwss->reset_stream_encoder(pipe_ctx); -#if defined(CONFIG_DRM_AMD_DC_DCN) if (is_dp_128b_132b_signal(pipe_ctx)) { - pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->disable( - pipe_ctx->stream_res.hpo_dp_stream_enc); - setup_dp_hpo_stream(pipe_ctx, false); - /* TODO - DP2.0 HW: unmap stream from link encoder here */ - } else { - if (link_enc) - link_enc->funcs->connect_dig_be_to_fe( - link_enc, - pipe_ctx->stream_res.stream_enc->id, - false); - } -#else - if (link_enc) - link_enc->funcs->connect_dig_be_to_fe( - link->link_enc, - pipe_ctx->stream_res.stream_enc->id, - false); -#endif - if (dc_is_dp_signal(pipe_ctx->stream->signal)) - dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISCONNECT_DIG_FE_BE); - -#if defined(CONFIG_DRM_AMD_DC_DCN) - if (dc->hwseq->funcs.setup_hpo_hw_control && is_dp_128b_132b_signal(pipe_ctx)) - dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, false); -#endif - + /* TODO: This looks like a bug to me as we are disabling HPO IO when + * we are just disabling a single HPO stream. Shouldn't we disable HPO + * HW control only when HPOs for all streams are disabled? + */ + if (pipe_ctx->stream->ctx->dc->hwseq->funcs.setup_hpo_hw_control) + pipe_ctx->stream->ctx->dc->hwseq->funcs.setup_hpo_hw_control( + pipe_ctx->stream->ctx->dc->hwseq, false); + } } void dce110_unblank_stream(struct pipe_ctx *pipe_ctx, @@ -1285,15 +1235,11 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx) link->dc->hwss.set_abm_immediate_disable(pipe_ctx); } -#if defined(CONFIG_DRM_AMD_DC_DCN) if (is_dp_128b_132b_signal(pipe_ctx)) { /* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_blank( pipe_ctx->stream_res.hpo_dp_stream_enc); } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) { -#else - if (dc_is_dp_signal(pipe_ctx->stream->signal)) { -#endif pipe_ctx->stream_res.stream_enc->funcs->dp_blank(link, pipe_ctx->stream_res.stream_enc); if (!dc_is_embedded_signal(pipe_ctx->stream->signal)) { @@ -1535,7 +1481,6 @@ static enum dc_status apply_single_controller_ctx_to_hw( build_audio_output(context, pipe_ctx, &audio_output); if (dc_is_dp_signal(pipe_ctx->stream->signal)) -#if defined(CONFIG_DRM_AMD_DC_DCN) if (is_dp_128b_132b_signal(pipe_ctx)) pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_setup( pipe_ctx->stream_res.hpo_dp_stream_enc, @@ -1546,12 +1491,6 @@ static enum dc_status apply_single_controller_ctx_to_hw( pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.audio->inst, &pipe_ctx->stream->audio_info); -#else - pipe_ctx->stream_res.stream_enc->funcs->dp_audio_setup( - pipe_ctx->stream_res.stream_enc, - pipe_ctx->stream_res.audio->inst, - &pipe_ctx->stream->audio_info); -#endif else pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_setup( pipe_ctx->stream_res.stream_enc, @@ -1570,14 +1509,37 @@ static enum dc_status apply_single_controller_ctx_to_hw( if (!pipe_ctx->stream->apply_seamless_boot_optimization && dc->config.use_pipe_ctx_sync_logic) check_syncd_pipes_for_disabled_master_pipe(dc, context, pipe_ctx->pipe_idx); -#if defined(CONFIG_DRM_AMD_DC_DCN) + pipe_ctx->stream_res.opp->funcs->opp_program_fmt( + pipe_ctx->stream_res.opp, + &stream->bit_depth_params, + &stream->clamping); + + pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( + pipe_ctx->stream_res.opp, + COLOR_SPACE_YCBCR601, + stream->timing.display_color_depth, + stream->signal); + + while (odm_pipe) { + odm_pipe->stream_res.opp->funcs->opp_set_dyn_expansion( + odm_pipe->stream_res.opp, + COLOR_SPACE_YCBCR601, + stream->timing.display_color_depth, + stream->signal); + + odm_pipe->stream_res.opp->funcs->opp_program_fmt( + odm_pipe->stream_res.opp, + &stream->bit_depth_params, + &stream->clamping); + odm_pipe = odm_pipe->next_odm_pipe; + } + /* DCN3.1 FPGA Workaround * Need to enable HPO DP Stream Encoder before setting OTG master enable. * To do so, move calling function enable_stream_timing to only be done AFTER calling * function core_link_enable_stream */ if (!(hws->wa.dp_hpo_and_otg_sequence && is_dp_128b_132b_signal(pipe_ctx))) -#endif /* */ /* Do not touch stream timing on seamless boot optimization. */ if (!pipe_ctx->stream->apply_seamless_boot_optimization) @@ -1611,34 +1573,9 @@ static enum dc_status apply_single_controller_ctx_to_hw( if (dc_is_dp_signal(pipe_ctx->stream->signal)) dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG); - pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( - pipe_ctx->stream_res.opp, - COLOR_SPACE_YCBCR601, - stream->timing.display_color_depth, - stream->signal); - - pipe_ctx->stream_res.opp->funcs->opp_program_fmt( - pipe_ctx->stream_res.opp, - &stream->bit_depth_params, - &stream->clamping); - while (odm_pipe) { - odm_pipe->stream_res.opp->funcs->opp_set_dyn_expansion( - odm_pipe->stream_res.opp, - COLOR_SPACE_YCBCR601, - stream->timing.display_color_depth, - stream->signal); - - odm_pipe->stream_res.opp->funcs->opp_program_fmt( - odm_pipe->stream_res.opp, - &stream->bit_depth_params, - &stream->clamping); - odm_pipe = odm_pipe->next_odm_pipe; - } - if (!stream->dpms_off) core_link_enable_stream(context, pipe_ctx); -#if defined(CONFIG_DRM_AMD_DC_DCN) /* DCN3.1 FPGA Workaround * Need to enable HPO DP Stream Encoder before setting OTG master enable. * To do so, move calling function enable_stream_timing to only be done AFTER calling @@ -1648,9 +1585,8 @@ static enum dc_status apply_single_controller_ctx_to_hw( if (!pipe_ctx->stream->apply_seamless_boot_optimization) hws->funcs.enable_stream_timing(pipe_ctx, context, dc); } -#endif - pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0; + pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != NULL; pipe_ctx->stream->link->psr_settings.psr_feature_enabled = false; @@ -1826,7 +1762,8 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) edp_link->link_status.link_active) { struct dc_stream_state *edp_stream = edp_streams[0]; - can_apply_edp_fast_boot = !is_edp_ilr_optimization_required(edp_stream->link, &edp_stream->timing); + can_apply_edp_fast_boot = dc_validate_boot_timing(dc, + edp_stream->sink, &edp_stream->timing); edp_stream->apply_edp_fast_boot_optimization = can_apply_edp_fast_boot; if (can_apply_edp_fast_boot) DC_LOG_EVENT_LINK_TRAINING("eDP fast boot disabled to optimize link rate\n"); @@ -2248,8 +2185,6 @@ static void dce110_setup_audio_dto( build_audio_output(context, pipe_ctx, &audio_output); -#if defined(CONFIG_DRM_AMD_DC_DCN) - /* For DCN3.1, audio to HPO FRL encoder is using audio DTBCLK DTO */ if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->set_audio_dtbclk_dto) { /* disable audio DTBCLK DTO */ dc->res_pool->dccg->funcs->set_audio_dtbclk_dto( @@ -2266,13 +2201,6 @@ static void dce110_setup_audio_dto( pipe_ctx->stream->signal, &audio_output.crtc_info, &audio_output.pll_info); -#else - pipe_ctx->stream_res.audio->funcs->wall_dto_setup( - pipe_ctx->stream_res.audio, - pipe_ctx->stream->signal, - &audio_output.crtc_info, - &audio_output.pll_info); -#endif break; } } @@ -2795,7 +2723,7 @@ static void dce110_program_front_end_for_pipe( pipe_ctx->plane_res.xfm->funcs->transform_set_gamut_remap(pipe_ctx->plane_res.xfm, &adjust); - pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0; + pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != NULL; program_scaler(dc, pipe_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index ee55cda854bf..560da6cbcaa3 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c @@ -862,7 +862,7 @@ static struct clock_source *find_matching_pll( return NULL; } - return 0; + return NULL; } static enum dc_status build_mapped_resource( diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c index f4f423d0b8c3..80595d7f060c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c @@ -940,6 +940,7 @@ static const struct hubbub_funcs hubbub1_funcs = { .program_watermarks = hubbub1_program_watermarks, .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, .allow_self_refresh_control = hubbub1_allow_self_refresh_control, + .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high, }; void hubbub1_construct(struct hubbub *hubbub, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index ea185c877323..fbff6beb78be 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -1311,6 +1311,20 @@ void hubp1_set_flip_int(struct hubp *hubp) return; } +/** + * hubp1_wait_pipe_read_start - wait for hubp ret path starting read. + * + * @hubp: hubp struct reference. + */ +void hubp1_wait_pipe_read_start(struct hubp *hubp) +{ + struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); + + REG_WAIT(HUBPRET_READ_LINE_STATUS, + PIPE_READ_VBLANK, 0, + 1, 1000); +} + void hubp1_init(struct hubp *hubp) { //do nothing @@ -1345,6 +1359,7 @@ static const struct hubp_funcs dcn10_hubp_funcs = { .hubp_soft_reset = hubp1_soft_reset, .hubp_in_blank = hubp1_in_blank, .hubp_set_flip_int = hubp1_set_flip_int, + .hubp_wait_pipe_read_start = hubp1_wait_pipe_read_start, }; /*****************************************/ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index 9cb8c383d673..0b17c2993ca5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -76,6 +76,7 @@ SRI(DCSURF_SURFACE_CONTROL, HUBPREQ, id),\ SRI(DCSURF_SURFACE_FLIP_INTERRUPT, HUBPREQ, id),\ SRI(HUBPRET_CONTROL, HUBPRET, id),\ + SRI(HUBPRET_READ_LINE_STATUS, HUBPRET, id),\ SRI(DCN_EXPANSION_MODE, HUBPREQ, id),\ SRI(DCHUBP_REQ_SIZE_CONFIG, HUBP, id),\ SRI(DCHUBP_REQ_SIZE_CONFIG_C, HUBP, id),\ @@ -186,6 +187,7 @@ uint32_t DCSURF_SURFACE_CONTROL; \ uint32_t DCSURF_SURFACE_FLIP_INTERRUPT; \ uint32_t HUBPRET_CONTROL; \ + uint32_t HUBPRET_READ_LINE_STATUS; \ uint32_t DCN_EXPANSION_MODE; \ uint32_t DCHUBP_REQ_SIZE_CONFIG; \ uint32_t DCHUBP_REQ_SIZE_CONFIG_C; \ @@ -338,6 +340,7 @@ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, DET_BUF_PLANE1_BASE_ADDRESS, mask_sh),\ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CB_B, mask_sh),\ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CR_R, mask_sh),\ + HUBP_SF(HUBPRET0_HUBPRET_READ_LINE_STATUS, PIPE_READ_VBLANK, mask_sh),\ HUBP_SF(HUBPREQ0_DCN_EXPANSION_MODE, DRQ_EXPANSION_MODE, mask_sh),\ HUBP_SF(HUBPREQ0_DCN_EXPANSION_MODE, PRQ_EXPANSION_MODE, mask_sh),\ HUBP_SF(HUBPREQ0_DCN_EXPANSION_MODE, MRQ_EXPANSION_MODE, mask_sh),\ @@ -538,6 +541,7 @@ type DET_BUF_PLANE1_BASE_ADDRESS;\ type CROSSBAR_SRC_CB_B;\ type CROSSBAR_SRC_CR_R;\ + type PIPE_READ_VBLANK;\ type DRQ_EXPANSION_MODE;\ type PRQ_EXPANSION_MODE;\ type MRQ_EXPANSION_MODE;\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 530a72e3eefe..c3e141c19a77 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -107,7 +107,7 @@ void dcn10_lock_all_pipes(struct dc *dc, * (un)locking. Also skip if pipe is disabled. */ if (pipe_ctx->top_pipe || - !pipe_ctx->stream || !pipe_ctx->plane_state || + !pipe_ctx->stream || !tg->funcs->is_tg_enabled(tg)) continue; @@ -1112,9 +1112,13 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc) void dcn10_verify_allow_pstate_change_high(struct dc *dc) { + struct hubbub *hubbub = dc->res_pool->hubbub; static bool should_log_hw_state; /* prevent hw state log by default */ - if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub)) { + if (!hubbub->funcs->verify_allow_pstate_change_high) + return; + + if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) { int i = 0; if (should_log_hw_state) @@ -1123,8 +1127,8 @@ void dcn10_verify_allow_pstate_change_high(struct dc *dc) TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES); BREAK_TO_DEBUGGER(); if (dcn10_hw_wa_force_recovery(dc)) { - /*check again*/ - if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub)) + /*check again*/ + if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) BREAK_TO_DEBUGGER(); } } @@ -1500,13 +1504,8 @@ void dcn10_init_hw(struct dc *dc) hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false); } - /* Enable outbox notification feature of dmub */ - if (dc->debug.enable_dmub_aux_for_legacy_ddc) - dmub_enable_outbox_notification(dc); - /* we want to turn off all dp displays before doing detection */ - if (dc->config.power_down_display_on_boot) - dc_link_blank_all_dp_displays(dc); + dc_link_blank_all_dp_displays(dc); /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which @@ -1514,7 +1513,7 @@ void dcn10_init_hw(struct dc *dc) * Otherwise, if taking control is not possible, we need to power * everything down. */ - if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) { + if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) { if (!is_optimized_init_done) { hws->funcs.init_pipes(dc, dc->current_state); if (dc->res_pool->hubbub->funcs->allow_self_refresh_control) @@ -2065,14 +2064,11 @@ static int dcn10_align_pixel_clocks(struct dc *dc, int group_size, uint32_t embedded_pix_clk_100hz; uint16_t embedded_h_total; uint16_t embedded_v_total; - bool clamshell_closed = false; uint32_t dp_ref_clk_100hz = dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10; if (dc->config.vblank_alignment_dto_params && dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) { - clamshell_closed = - (dc->config.vblank_alignment_dto_params >> 63); embedded_h_total = (dc->config.vblank_alignment_dto_params >> 32) & 0x7FFF; embedded_v_total = @@ -3101,7 +3097,8 @@ static void dcn10_config_stereo_parameters( flags->PROGRAM_STEREO = 1; flags->PROGRAM_POLARITY = 1; - if (timing_3d_format == TIMING_3D_FORMAT_INBAND_FA || + if (timing_3d_format == TIMING_3D_FORMAT_FRAME_ALTERNATE || + timing_3d_format == TIMING_3D_FORMAT_INBAND_FA || timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA || timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) { enum display_dongle_type dongle = \ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c index f4b34c110eae..ca39361f71c8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c @@ -1101,7 +1101,8 @@ void dcn10_link_encoder_disable_output( void dcn10_link_encoder_dp_set_lane_settings( struct link_encoder *enc, - const struct link_training_settings *link_settings) + const struct dc_link_settings *link_settings, + const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); union dpcd_training_lane_set training_lane_set = { { 0 } }; @@ -1116,26 +1117,25 @@ void dcn10_link_encoder_dp_set_lane_settings( cntl.action = TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS; cntl.transmitter = enc10->base.transmitter; cntl.connector_obj_id = enc10->base.connector; - cntl.lanes_number = link_settings->link_settings.lane_count; + cntl.lanes_number = link_settings->lane_count; cntl.hpd_sel = enc10->base.hpd_source; - cntl.pixel_clock = link_settings->link_settings.link_rate * - LINK_RATE_REF_FREQ_IN_KHZ; + cntl.pixel_clock = link_settings->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; - for (lane = 0; lane < link_settings->link_settings.lane_count; lane++) { + for (lane = 0; lane < link_settings->lane_count; lane++) { /* translate lane settings */ training_lane_set.bits.VOLTAGE_SWING_SET = - link_settings->lane_settings[lane].VOLTAGE_SWING; + lane_settings[lane].VOLTAGE_SWING; training_lane_set.bits.PRE_EMPHASIS_SET = - link_settings->lane_settings[lane].PRE_EMPHASIS; + lane_settings[lane].PRE_EMPHASIS; /* post cursor 2 setting only applies to HBR2 link rate */ - if (link_settings->link_settings.link_rate == LINK_RATE_HIGH2) { + if (link_settings->link_rate == LINK_RATE_HIGH2) { /* this is passed to VBIOS * to program post cursor 2 level */ training_lane_set.bits.POST_CURSOR2_SET = - link_settings->lane_settings[lane].POST_CURSOR2; + lane_settings[lane].POST_CURSOR2; } cntl.lane_select = lane; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h index c337588231ff..663aac0a164a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h @@ -581,7 +581,8 @@ void dcn10_link_encoder_disable_output( /* set DP lane settings */ void dcn10_link_encoder_dp_set_lane_settings( struct link_encoder *enc, - const struct link_training_settings *link_settings); + const struct dc_link_settings *link_settings, + const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]); void dcn10_link_encoder_dp_set_phy_pattern( struct link_encoder *enc, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 858b72149897..4048908dd265 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -70,68 +70,6 @@ #include "dce/dce_aux.h" #include "dce/dce_i2c.h" -const struct _vcs_dpi_ip_params_st dcn1_0_ip = { - .rob_buffer_size_kbytes = 64, - .det_buffer_size_kbytes = 164, - .dpte_buffer_size_in_pte_reqs_luma = 42, - .dpp_output_buffer_pixels = 2560, - .opp_output_buffer_lines = 1, - .pixel_chunk_size_kbytes = 8, - .pte_enable = 1, - .pte_chunk_size_kbytes = 2, - .meta_chunk_size_kbytes = 2, - .writeback_chunk_size_kbytes = 2, - .line_buffer_size_bits = 589824, - .max_line_buffer_lines = 12, - .IsLineBufferBppFixed = 0, - .LineBufferFixedBpp = -1, - .writeback_luma_buffer_size_kbytes = 12, - .writeback_chroma_buffer_size_kbytes = 8, - .max_num_dpp = 4, - .max_num_wb = 2, - .max_dchub_pscl_bw_pix_per_clk = 4, - .max_pscl_lb_bw_pix_per_clk = 2, - .max_lb_vscl_bw_pix_per_clk = 4, - .max_vscl_hscl_bw_pix_per_clk = 4, - .max_hscl_ratio = 4, - .max_vscl_ratio = 4, - .hscl_mults = 4, - .vscl_mults = 4, - .max_hscl_taps = 8, - .max_vscl_taps = 8, - .dispclk_ramp_margin_percent = 1, - .underscan_factor = 1.10, - .min_vblank_lines = 14, - .dppclk_delay_subtotal = 90, - .dispclk_delay_subtotal = 42, - .dcfclk_cstate_latency = 10, - .max_inter_dcn_tile_repeaters = 8, - .can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = 0, - .bug_forcing_LC_req_same_size_fixed = 0, -}; - -const struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc = { - .sr_exit_time_us = 9.0, - .sr_enter_plus_exit_time_us = 11.0, - .urgent_latency_us = 4.0, - .writeback_latency_us = 12.0, - .ideal_dram_bw_after_urgent_percent = 80.0, - .max_request_size_bytes = 256, - .downspread_percent = 0.5, - .dram_page_open_time_ns = 50.0, - .dram_rw_turnaround_time_ns = 17.5, - .dram_return_buffer_per_channel_bytes = 8192, - .round_trip_ping_latency_dcfclk_cycles = 128, - .urgent_out_of_order_return_per_channel_bytes = 256, - .channel_interleave_bytes = 256, - .num_banks = 8, - .num_chans = 2, - .vmm_page_size_bytes = 4096, - .dram_clock_change_latency_us = 17.0, - .writeback_dram_clock_change_latency_us = 23.0, - .return_bus_width_bytes = 64, -}; - #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 @@ -925,6 +863,21 @@ static struct dce_hwseq *dcn10_hwseq_create( hws->wa.DEGVIDCN10_253 = true; hws->wa.false_optc_underflow = true; hws->wa.DEGVIDCN10_254 = true; + + if ((ctx->asic_id.chip_family == FAMILY_RV) && + ASICREV_IS_RAVEN2(ctx->asic_id.hw_internal_rev)) + switch (ctx->asic_id.pci_revision_id) { + case PRID_POLLOCK_94: + case PRID_POLLOCK_95: + case PRID_POLLOCK_E9: + case PRID_POLLOCK_EA: + case PRID_POLLOCK_EB: + hws->wa.wait_hubpret_read_start_during_mpo_transition = true; + break; + default: + hws->wa.wait_hubpret_read_start_during_mpo_transition = false; + break; + } } return hws; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h index 633025ccb870..bf8e33cd8147 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h @@ -27,6 +27,7 @@ #define __DC_RESOURCE_DCN10_H__ #include "core_types.h" +#include "dml/dcn10/dcn10_fpu.h" #define TO_DCN10_RES_POOL(pool)\ container_of(pool, struct dcn10_resource_pool, base) @@ -35,6 +36,9 @@ struct dc; struct resource_pool; struct _vcs_dpi_display_pipe_params_st; +extern struct _vcs_dpi_ip_params_st dcn1_0_ip; +extern struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc; + struct dcn10_resource_pool { struct resource_pool base; }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile index 5fcaf78334ff..abaed2121feb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile @@ -9,31 +9,6 @@ DCN20 = dcn20_resource.o dcn20_init.o dcn20_hwseq.o dcn20_dpp.o dcn20_dpp_cm.o d DCN20 += dcn20_dsc.o -ifdef CONFIG_X86 -CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -msse -endif - -ifdef CONFIG_PPC64 -CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -maltivec -endif - -ifdef CONFIG_CC_IS_GCC -ifeq ($(call cc-ifversion, -lt, 0701, y), y) -IS_OLD_GCC = 1 -endif -endif - -ifdef CONFIG_X86 -ifdef IS_OLD_GCC -# Stack alignment mismatch, proceed with caution. -# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 -# (8B stack alignment). -CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -mpreferred-stack-boundary=4 -else -CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -msse2 -endif -endif - AMD_DAL_DCN20 = $(addprefix $(AMDDALPATH)/dc/dcn20/,$(DCN20)) AMD_DISPLAY_FILES += $(AMD_DAL_DCN20) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h index f98aba308028..b3c9a9724efd 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h @@ -183,15 +183,25 @@ type SYMCLK32_ROOT_SE1_GATE_DISABLE;\ type SYMCLK32_ROOT_SE2_GATE_DISABLE;\ type SYMCLK32_ROOT_SE3_GATE_DISABLE;\ + type SYMCLK32_SE0_GATE_DISABLE;\ + type SYMCLK32_SE1_GATE_DISABLE;\ + type SYMCLK32_SE2_GATE_DISABLE;\ + type SYMCLK32_SE3_GATE_DISABLE;\ type SYMCLK32_ROOT_LE0_GATE_DISABLE;\ type SYMCLK32_ROOT_LE1_GATE_DISABLE;\ + type SYMCLK32_LE0_GATE_DISABLE;\ + type SYMCLK32_LE1_GATE_DISABLE;\ type DPSTREAMCLK_ROOT_GATE_DISABLE;\ type DPSTREAMCLK_GATE_DISABLE;\ type HDMISTREAMCLK0_DTO_PHASE;\ type HDMISTREAMCLK0_DTO_MODULO;\ type HDMICHARCLK0_GATE_DISABLE;\ - type HDMICHARCLK0_ROOT_GATE_DISABLE; - + type HDMICHARCLK0_ROOT_GATE_DISABLE; \ + type PHYASYMCLK_GATE_DISABLE; \ + type PHYBSYMCLK_GATE_DISABLE; \ + type PHYCSYMCLK_GATE_DISABLE; \ + type PHYDSYMCLK_GATE_DISABLE; \ + type PHYESYMCLK_GATE_DISABLE; struct dccg_shift { DCCG_REG_FIELD_LIST(uint8_t) @@ -233,6 +243,7 @@ struct dccg_registers { uint32_t DSCCLK2_DTO_PARAM; uint32_t DPSTREAMCLK_ROOT_GATE_DISABLE; uint32_t DPSTREAMCLK_GATE_DISABLE; + uint32_t DCCG_GATE_DISABLE_CNTL2; uint32_t DCCG_GATE_DISABLE_CNTL3; uint32_t HDMISTREAMCLK0_DTO_PARAM; uint32_t DCCG_GATE_DISABLE_CNTL4; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c index dc1752e9f461..a665af19f201 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c @@ -707,17 +707,6 @@ bool hubp2_program_surface_flip_and_addr( REG_UPDATE(VMID_SETTINGS_0, VMID, address->vmid); - if (address->type == PLN_ADDR_TYPE_GRPH_STEREO) { - REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0x1); - REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, 0x1); - - } else { - // turn off stereo if not in stereo - REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0x0); - REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, 0x0); - } - - /* HW automatically latch rest of address register on write to * DCSURF_PRIMARY_SURFACE_ADDRESS if SURFACE_UPDATE_LOCK is not used @@ -942,10 +931,6 @@ void hubp2_set_blank_regs(struct hubp *hubp, bool blank) struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); uint32_t blank_en = blank ? 1 : 0; - REG_UPDATE_2(DCHUBP_CNTL, - HUBP_BLANK_EN, blank_en, - HUBP_TTU_DISABLE, blank_en); - if (blank) { uint32_t reg_val = REG_READ(DCHUBP_CNTL); @@ -958,9 +943,13 @@ void hubp2_set_blank_regs(struct hubp *hubp, bool blank) */ REG_WAIT(DCHUBP_CNTL, HUBP_NO_OUTSTANDING_REQ, 1, - 1, 200); + 1, 100000); } } + + REG_UPDATE_2(DCHUBP_CNTL, + HUBP_BLANK_EN, blank_en, + HUBP_TTU_DISABLE, 0); } void hubp2_cursor_set_position( diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 4991e93e5308..ab910deed481 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -55,6 +55,7 @@ #include "inc/link_dpcd.h" #include "dpcd_defs.h" #include "inc/link_enc_cfg.h" +#include "link_hwss.h" #define DC_LOGGER_INIT(logger) @@ -1738,6 +1739,16 @@ void dcn20_program_front_end_for_ctx( || pipe->stream->update_flags.raw) && hws->funcs.program_all_writeback_pipes_in_tree) hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context); + + /* Avoid underflow by check of pipe line read when adding 2nd plane. */ + if (hws->wa.wait_hubpret_read_start_during_mpo_transition && + !pipe->top_pipe && + pipe->stream && + pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start && + dc->current_state->stream_status[0].plane_count == 1 && + context->stream_status[0].plane_count > 1) { + pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp); + } } } @@ -1817,6 +1828,7 @@ void dcn20_prepare_bandwidth( struct dc_state *context) { struct hubbub *hubbub = dc->res_pool->hubbub; + unsigned int compbuf_size_kb = 0; dc->clk_mgr->funcs->update_clocks( dc->clk_mgr, @@ -1828,9 +1840,16 @@ void dcn20_prepare_bandwidth( &context->bw_ctx.bw.dcn.watermarks, dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, false); + /* decrease compbuf size */ - if (hubbub->funcs->program_compbuf_size) - hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, false); + if (hubbub->funcs->program_compbuf_size) { + if (context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes) + compbuf_size_kb = context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes; + else + compbuf_size_kb = context->bw_ctx.bw.dcn.compbuf_size_kb; + + hubbub->funcs->program_compbuf_size(hubbub, compbuf_size_kb, false); + } } void dcn20_optimize_bandwidth( @@ -2390,46 +2409,22 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) uint32_t active_total_with_borders; uint32_t early_control = 0; struct timing_generator *tg = pipe_ctx->stream_res.tg; - struct link_encoder *link_enc; + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); + struct dc *dc = pipe_ctx->stream->ctx->dc; - if (link->is_dig_mapping_flexible && - link->dc->res_pool->funcs->link_encs_assign) - link_enc = link_enc_cfg_get_link_enc_used_by_stream(link->ctx->dc, pipe_ctx->stream); - else - link_enc = link->link_enc; - ASSERT(link_enc); - - /* For MST, there are multiply stream go to only one link. - * connect DIG back_end to front_end while enable_stream and - * disconnect them during disable_stream - * BY this, it is logic clean to separate stream and link - */ if (is_dp_128b_132b_signal(pipe_ctx)) { - if (pipe_ctx->stream->ctx->dc->hwseq->funcs.setup_hpo_hw_control) - pipe_ctx->stream->ctx->dc->hwseq->funcs.setup_hpo_hw_control( - pipe_ctx->stream->ctx->dc->hwseq, true); - setup_dp_hpo_stream(pipe_ctx, true); - pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->enable_stream( - pipe_ctx->stream_res.hpo_dp_stream_enc); - pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->map_stream_to_link( - pipe_ctx->stream_res.hpo_dp_stream_enc, - pipe_ctx->stream_res.hpo_dp_stream_enc->inst, - pipe_ctx->link_res.hpo_dp_link_enc->inst); + if (dc->hwseq->funcs.setup_hpo_hw_control) + dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, true); } - if (!is_dp_128b_132b_signal(pipe_ctx) && link_enc) - link_enc->funcs->connect_dig_be_to_fe( - link_enc, pipe_ctx->stream_res.stream_enc->id, true); - - if (dc_is_dp_signal(pipe_ctx->stream->signal)) - dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE); + link_hwss->setup_stream_encoder(pipe_ctx); if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) { - if (link->dc->hwss.program_dmdata_engine) - link->dc->hwss.program_dmdata_engine(pipe_ctx); + if (dc->hwss.program_dmdata_engine) + dc->hwss.program_dmdata_engine(pipe_ctx); } - link->dc->hwss.update_info_frame(pipe_ctx); + dc->hwss.update_info_frame(pipe_ctx); if (dc_is_dp_signal(pipe_ctx->stream->signal)) dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 2a72517e2b28..d473708d5399 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -63,7 +63,6 @@ #include "dcn20_dccg.h" #include "dcn20_vmid.h" #include "dc_link_ddc.h" -#include "dc_link_dp.h" #include "dce/dce_panel_cntl.h" #include "navi10_ip_offset.h" @@ -93,367 +92,6 @@ #define DC_LOGGER_INIT(logger) -struct _vcs_dpi_ip_params_st dcn2_0_ip = { - .odm_capable = 1, - .gpuvm_enable = 0, - .hostvm_enable = 0, - .gpuvm_max_page_table_levels = 4, - .hostvm_max_page_table_levels = 4, - .hostvm_cached_page_table_levels = 0, - .pte_group_size_bytes = 2048, - .num_dsc = 6, - .rob_buffer_size_kbytes = 168, - .det_buffer_size_kbytes = 164, - .dpte_buffer_size_in_pte_reqs_luma = 84, - .pde_proc_buffer_size_64k_reqs = 48, - .dpp_output_buffer_pixels = 2560, - .opp_output_buffer_lines = 1, - .pixel_chunk_size_kbytes = 8, - .pte_chunk_size_kbytes = 2, - .meta_chunk_size_kbytes = 2, - .writeback_chunk_size_kbytes = 2, - .line_buffer_size_bits = 789504, - .is_line_buffer_bpp_fixed = 0, - .line_buffer_fixed_bpp = 0, - .dcc_supported = true, - .max_line_buffer_lines = 12, - .writeback_luma_buffer_size_kbytes = 12, - .writeback_chroma_buffer_size_kbytes = 8, - .writeback_chroma_line_buffer_width_pixels = 4, - .writeback_max_hscl_ratio = 1, - .writeback_max_vscl_ratio = 1, - .writeback_min_hscl_ratio = 1, - .writeback_min_vscl_ratio = 1, - .writeback_max_hscl_taps = 12, - .writeback_max_vscl_taps = 12, - .writeback_line_buffer_luma_buffer_size = 0, - .writeback_line_buffer_chroma_buffer_size = 14643, - .cursor_buffer_size = 8, - .cursor_chunk_size = 2, - .max_num_otg = 6, - .max_num_dpp = 6, - .max_num_wb = 1, - .max_dchub_pscl_bw_pix_per_clk = 4, - .max_pscl_lb_bw_pix_per_clk = 2, - .max_lb_vscl_bw_pix_per_clk = 4, - .max_vscl_hscl_bw_pix_per_clk = 4, - .max_hscl_ratio = 8, - .max_vscl_ratio = 8, - .hscl_mults = 4, - .vscl_mults = 4, - .max_hscl_taps = 8, - .max_vscl_taps = 8, - .dispclk_ramp_margin_percent = 1, - .underscan_factor = 1.10, - .min_vblank_lines = 32, // - .dppclk_delay_subtotal = 77, // - .dppclk_delay_scl_lb_only = 16, - .dppclk_delay_scl = 50, - .dppclk_delay_cnvc_formatter = 8, - .dppclk_delay_cnvc_cursor = 6, - .dispclk_delay_subtotal = 87, // - .dcfclk_cstate_latency = 10, // SRExitTime - .max_inter_dcn_tile_repeaters = 8, - .xfc_supported = true, - .xfc_fill_bw_overhead_percent = 10.0, - .xfc_fill_constant_bytes = 0, - .number_of_cursors = 1, -}; - -static struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = { - .odm_capable = 1, - .gpuvm_enable = 0, - .hostvm_enable = 0, - .gpuvm_max_page_table_levels = 4, - .hostvm_max_page_table_levels = 4, - .hostvm_cached_page_table_levels = 0, - .num_dsc = 5, - .rob_buffer_size_kbytes = 168, - .det_buffer_size_kbytes = 164, - .dpte_buffer_size_in_pte_reqs_luma = 84, - .dpte_buffer_size_in_pte_reqs_chroma = 42,//todo - .dpp_output_buffer_pixels = 2560, - .opp_output_buffer_lines = 1, - .pixel_chunk_size_kbytes = 8, - .pte_enable = 1, - .max_page_table_levels = 4, - .pte_chunk_size_kbytes = 2, - .meta_chunk_size_kbytes = 2, - .writeback_chunk_size_kbytes = 2, - .line_buffer_size_bits = 789504, - .is_line_buffer_bpp_fixed = 0, - .line_buffer_fixed_bpp = 0, - .dcc_supported = true, - .max_line_buffer_lines = 12, - .writeback_luma_buffer_size_kbytes = 12, - .writeback_chroma_buffer_size_kbytes = 8, - .writeback_chroma_line_buffer_width_pixels = 4, - .writeback_max_hscl_ratio = 1, - .writeback_max_vscl_ratio = 1, - .writeback_min_hscl_ratio = 1, - .writeback_min_vscl_ratio = 1, - .writeback_max_hscl_taps = 12, - .writeback_max_vscl_taps = 12, - .writeback_line_buffer_luma_buffer_size = 0, - .writeback_line_buffer_chroma_buffer_size = 14643, - .cursor_buffer_size = 8, - .cursor_chunk_size = 2, - .max_num_otg = 5, - .max_num_dpp = 5, - .max_num_wb = 1, - .max_dchub_pscl_bw_pix_per_clk = 4, - .max_pscl_lb_bw_pix_per_clk = 2, - .max_lb_vscl_bw_pix_per_clk = 4, - .max_vscl_hscl_bw_pix_per_clk = 4, - .max_hscl_ratio = 8, - .max_vscl_ratio = 8, - .hscl_mults = 4, - .vscl_mults = 4, - .max_hscl_taps = 8, - .max_vscl_taps = 8, - .dispclk_ramp_margin_percent = 1, - .underscan_factor = 1.10, - .min_vblank_lines = 32, // - .dppclk_delay_subtotal = 77, // - .dppclk_delay_scl_lb_only = 16, - .dppclk_delay_scl = 50, - .dppclk_delay_cnvc_formatter = 8, - .dppclk_delay_cnvc_cursor = 6, - .dispclk_delay_subtotal = 87, // - .dcfclk_cstate_latency = 10, // SRExitTime - .max_inter_dcn_tile_repeaters = 8, - .xfc_supported = true, - .xfc_fill_bw_overhead_percent = 10.0, - .xfc_fill_constant_bytes = 0, - .ptoi_supported = 0, - .number_of_cursors = 1, -}; - -static struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = { - /* Defaults that get patched on driver load from firmware. */ - .clock_limits = { - { - .state = 0, - .dcfclk_mhz = 560.0, - .fabricclk_mhz = 560.0, - .dispclk_mhz = 513.0, - .dppclk_mhz = 513.0, - .phyclk_mhz = 540.0, - .socclk_mhz = 560.0, - .dscclk_mhz = 171.0, - .dram_speed_mts = 8960.0, - }, - { - .state = 1, - .dcfclk_mhz = 694.0, - .fabricclk_mhz = 694.0, - .dispclk_mhz = 642.0, - .dppclk_mhz = 642.0, - .phyclk_mhz = 600.0, - .socclk_mhz = 694.0, - .dscclk_mhz = 214.0, - .dram_speed_mts = 11104.0, - }, - { - .state = 2, - .dcfclk_mhz = 875.0, - .fabricclk_mhz = 875.0, - .dispclk_mhz = 734.0, - .dppclk_mhz = 734.0, - .phyclk_mhz = 810.0, - .socclk_mhz = 875.0, - .dscclk_mhz = 245.0, - .dram_speed_mts = 14000.0, - }, - { - .state = 3, - .dcfclk_mhz = 1000.0, - .fabricclk_mhz = 1000.0, - .dispclk_mhz = 1100.0, - .dppclk_mhz = 1100.0, - .phyclk_mhz = 810.0, - .socclk_mhz = 1000.0, - .dscclk_mhz = 367.0, - .dram_speed_mts = 16000.0, - }, - { - .state = 4, - .dcfclk_mhz = 1200.0, - .fabricclk_mhz = 1200.0, - .dispclk_mhz = 1284.0, - .dppclk_mhz = 1284.0, - .phyclk_mhz = 810.0, - .socclk_mhz = 1200.0, - .dscclk_mhz = 428.0, - .dram_speed_mts = 16000.0, - }, - /*Extra state, no dispclk ramping*/ - { - .state = 5, - .dcfclk_mhz = 1200.0, - .fabricclk_mhz = 1200.0, - .dispclk_mhz = 1284.0, - .dppclk_mhz = 1284.0, - .phyclk_mhz = 810.0, - .socclk_mhz = 1200.0, - .dscclk_mhz = 428.0, - .dram_speed_mts = 16000.0, - }, - }, - .num_states = 5, - .sr_exit_time_us = 8.6, - .sr_enter_plus_exit_time_us = 10.9, - .urgent_latency_us = 4.0, - .urgent_latency_pixel_data_only_us = 4.0, - .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, - .urgent_latency_vm_data_only_us = 4.0, - .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, - .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, - .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, - .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0, - .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0, - .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0, - .max_avg_sdp_bw_use_normal_percent = 40.0, - .max_avg_dram_bw_use_normal_percent = 40.0, - .writeback_latency_us = 12.0, - .ideal_dram_bw_after_urgent_percent = 40.0, - .max_request_size_bytes = 256, - .dram_channel_width_bytes = 2, - .fabric_datapath_to_dcn_data_return_bytes = 64, - .dcn_downspread_percent = 0.5, - .downspread_percent = 0.38, - .dram_page_open_time_ns = 50.0, - .dram_rw_turnaround_time_ns = 17.5, - .dram_return_buffer_per_channel_bytes = 8192, - .round_trip_ping_latency_dcfclk_cycles = 131, - .urgent_out_of_order_return_per_channel_bytes = 256, - .channel_interleave_bytes = 256, - .num_banks = 8, - .num_chans = 16, - .vmm_page_size_bytes = 4096, - .dram_clock_change_latency_us = 404.0, - .dummy_pstate_latency_us = 5.0, - .writeback_dram_clock_change_latency_us = 23.0, - .return_bus_width_bytes = 64, - .dispclk_dppclk_vco_speed_mhz = 3850, - .xfc_bus_transport_time_us = 20, - .xfc_xbuf_latency_tolerance_us = 4, - .use_urgent_burst_bw = 0 -}; - -static struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = { - .clock_limits = { - { - .state = 0, - .dcfclk_mhz = 560.0, - .fabricclk_mhz = 560.0, - .dispclk_mhz = 513.0, - .dppclk_mhz = 513.0, - .phyclk_mhz = 540.0, - .socclk_mhz = 560.0, - .dscclk_mhz = 171.0, - .dram_speed_mts = 8960.0, - }, - { - .state = 1, - .dcfclk_mhz = 694.0, - .fabricclk_mhz = 694.0, - .dispclk_mhz = 642.0, - .dppclk_mhz = 642.0, - .phyclk_mhz = 600.0, - .socclk_mhz = 694.0, - .dscclk_mhz = 214.0, - .dram_speed_mts = 11104.0, - }, - { - .state = 2, - .dcfclk_mhz = 875.0, - .fabricclk_mhz = 875.0, - .dispclk_mhz = 734.0, - .dppclk_mhz = 734.0, - .phyclk_mhz = 810.0, - .socclk_mhz = 875.0, - .dscclk_mhz = 245.0, - .dram_speed_mts = 14000.0, - }, - { - .state = 3, - .dcfclk_mhz = 1000.0, - .fabricclk_mhz = 1000.0, - .dispclk_mhz = 1100.0, - .dppclk_mhz = 1100.0, - .phyclk_mhz = 810.0, - .socclk_mhz = 1000.0, - .dscclk_mhz = 367.0, - .dram_speed_mts = 16000.0, - }, - { - .state = 4, - .dcfclk_mhz = 1200.0, - .fabricclk_mhz = 1200.0, - .dispclk_mhz = 1284.0, - .dppclk_mhz = 1284.0, - .phyclk_mhz = 810.0, - .socclk_mhz = 1200.0, - .dscclk_mhz = 428.0, - .dram_speed_mts = 16000.0, - }, - /*Extra state, no dispclk ramping*/ - { - .state = 5, - .dcfclk_mhz = 1200.0, - .fabricclk_mhz = 1200.0, - .dispclk_mhz = 1284.0, - .dppclk_mhz = 1284.0, - .phyclk_mhz = 810.0, - .socclk_mhz = 1200.0, - .dscclk_mhz = 428.0, - .dram_speed_mts = 16000.0, - }, - }, - .num_states = 5, - .sr_exit_time_us = 11.6, - .sr_enter_plus_exit_time_us = 13.9, - .urgent_latency_us = 4.0, - .urgent_latency_pixel_data_only_us = 4.0, - .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, - .urgent_latency_vm_data_only_us = 4.0, - .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, - .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, - .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, - .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0, - .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0, - .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0, - .max_avg_sdp_bw_use_normal_percent = 40.0, - .max_avg_dram_bw_use_normal_percent = 40.0, - .writeback_latency_us = 12.0, - .ideal_dram_bw_after_urgent_percent = 40.0, - .max_request_size_bytes = 256, - .dram_channel_width_bytes = 2, - .fabric_datapath_to_dcn_data_return_bytes = 64, - .dcn_downspread_percent = 0.5, - .downspread_percent = 0.38, - .dram_page_open_time_ns = 50.0, - .dram_rw_turnaround_time_ns = 17.5, - .dram_return_buffer_per_channel_bytes = 8192, - .round_trip_ping_latency_dcfclk_cycles = 131, - .urgent_out_of_order_return_per_channel_bytes = 256, - .channel_interleave_bytes = 256, - .num_banks = 8, - .num_chans = 8, - .vmm_page_size_bytes = 4096, - .dram_clock_change_latency_us = 404.0, - .dummy_pstate_latency_us = 5.0, - .writeback_dram_clock_change_latency_us = 23.0, - .return_bus_width_bytes = 64, - .dispclk_dppclk_vco_speed_mhz = 3850, - .xfc_bus_transport_time_us = 20, - .xfc_xbuf_latency_tolerance_us = 4, - .use_urgent_burst_bw = 0 -}; - -static struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 }; - #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 @@ -1605,16 +1243,7 @@ static void get_pixel_clock_parameters( pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz; - /* Links supporting dynamically assigned link encoder will be assigned next - * available encoder if one not already assigned. - */ - if (link->is_dig_mapping_flexible && - link->dc->res_pool->funcs->link_encs_assign) { - link_enc = link_enc_cfg_get_link_enc_used_by_stream(stream->ctx->dc, stream); - if (link_enc == NULL) - link_enc = link_enc_cfg_get_next_avail_link_enc(stream->ctx->dc); - } else - link_enc = stream->link->link_enc; + link_enc = link_enc_cfg_get_link_enc(link); ASSERT(link_enc); if (link_enc) @@ -1819,69 +1448,6 @@ enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ return result; } - -static void swizzle_to_dml_params( - enum swizzle_mode_values swizzle, - unsigned int *sw_mode) -{ - switch (swizzle) { - case DC_SW_LINEAR: - *sw_mode = dm_sw_linear; - break; - case DC_SW_4KB_S: - *sw_mode = dm_sw_4kb_s; - break; - case DC_SW_4KB_S_X: - *sw_mode = dm_sw_4kb_s_x; - break; - case DC_SW_4KB_D: - *sw_mode = dm_sw_4kb_d; - break; - case DC_SW_4KB_D_X: - *sw_mode = dm_sw_4kb_d_x; - break; - case DC_SW_64KB_S: - *sw_mode = dm_sw_64kb_s; - break; - case DC_SW_64KB_S_X: - *sw_mode = dm_sw_64kb_s_x; - break; - case DC_SW_64KB_S_T: - *sw_mode = dm_sw_64kb_s_t; - break; - case DC_SW_64KB_D: - *sw_mode = dm_sw_64kb_d; - break; - case DC_SW_64KB_D_X: - *sw_mode = dm_sw_64kb_d_x; - break; - case DC_SW_64KB_D_T: - *sw_mode = dm_sw_64kb_d_t; - break; - case DC_SW_64KB_R_X: - *sw_mode = dm_sw_64kb_r_x; - break; - case DC_SW_VAR_S: - *sw_mode = dm_sw_var_s; - break; - case DC_SW_VAR_S_X: - *sw_mode = dm_sw_var_s_x; - break; - case DC_SW_VAR_D: - *sw_mode = dm_sw_var_d; - break; - case DC_SW_VAR_D_X: - *sw_mode = dm_sw_var_d_x; - break; - case DC_SW_VAR_R_X: - *sw_mode = dm_sw_var_r_x; - break; - default: - ASSERT(0); /* Not supported */ - break; - } -} - bool dcn20_split_stream_for_odm( const struct dc *dc, struct resource_context *res_ctx, @@ -1997,394 +1563,6 @@ void dcn20_split_stream_for_mpc( ASSERT(primary_pipe->plane_state); } -int dcn20_populate_dml_pipes_from_context( - struct dc *dc, - struct dc_state *context, - display_e2e_pipe_params_st *pipes, - bool fast_validate) -{ - int pipe_cnt, i; - bool synchronized_vblank = true; - struct resource_context *res_ctx = &context->res_ctx; - - for (i = 0, pipe_cnt = -1; i < dc->res_pool->pipe_count; i++) { - if (!res_ctx->pipe_ctx[i].stream) - continue; - - if (pipe_cnt < 0) { - pipe_cnt = i; - continue; - } - - if (res_ctx->pipe_ctx[pipe_cnt].stream == res_ctx->pipe_ctx[i].stream) - continue; - - if (dc->debug.disable_timing_sync || - (!resource_are_streams_timing_synchronizable( - res_ctx->pipe_ctx[pipe_cnt].stream, - res_ctx->pipe_ctx[i].stream) && - !resource_are_vblanks_synchronizable( - res_ctx->pipe_ctx[pipe_cnt].stream, - res_ctx->pipe_ctx[i].stream))) { - synchronized_vblank = false; - break; - } - } - - for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { - struct dc_crtc_timing *timing = &res_ctx->pipe_ctx[i].stream->timing; - unsigned int v_total; - unsigned int front_porch; - int output_bpc; - struct audio_check aud_check = {0}; - - if (!res_ctx->pipe_ctx[i].stream) - continue; - - v_total = timing->v_total; - front_porch = timing->v_front_porch; - - /* todo: - pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = 0; - pipes[pipe_cnt].pipe.src.dcc = 0; - pipes[pipe_cnt].pipe.src.vm = 0;*/ - - pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0; - - pipes[pipe_cnt].dout.dsc_enable = res_ctx->pipe_ctx[i].stream->timing.flags.DSC; - /* todo: rotation?*/ - pipes[pipe_cnt].dout.dsc_slices = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.num_slices_h; - if (res_ctx->pipe_ctx[i].stream->use_dynamic_meta) { - pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = true; - /* 1/2 vblank */ - pipes[pipe_cnt].pipe.src.dynamic_metadata_lines_before_active = - (v_total - timing->v_addressable - - timing->v_border_top - timing->v_border_bottom) / 2; - /* 36 bytes dp, 32 hdmi */ - pipes[pipe_cnt].pipe.src.dynamic_metadata_xmit_bytes = - dc_is_dp_signal(res_ctx->pipe_ctx[i].stream->signal) ? 36 : 32; - } - pipes[pipe_cnt].pipe.src.dcc = false; - pipes[pipe_cnt].pipe.src.dcc_rate = 1; - pipes[pipe_cnt].pipe.dest.synchronized_vblank_all_planes = synchronized_vblank; - pipes[pipe_cnt].pipe.dest.hblank_start = timing->h_total - timing->h_front_porch; - pipes[pipe_cnt].pipe.dest.hblank_end = pipes[pipe_cnt].pipe.dest.hblank_start - - timing->h_addressable - - timing->h_border_left - - timing->h_border_right; - pipes[pipe_cnt].pipe.dest.vblank_start = v_total - front_porch; - pipes[pipe_cnt].pipe.dest.vblank_end = pipes[pipe_cnt].pipe.dest.vblank_start - - timing->v_addressable - - timing->v_border_top - - timing->v_border_bottom; - pipes[pipe_cnt].pipe.dest.htotal = timing->h_total; - pipes[pipe_cnt].pipe.dest.vtotal = v_total; - pipes[pipe_cnt].pipe.dest.hactive = - timing->h_addressable + timing->h_border_left + timing->h_border_right; - pipes[pipe_cnt].pipe.dest.vactive = - timing->v_addressable + timing->v_border_top + timing->v_border_bottom; - pipes[pipe_cnt].pipe.dest.interlaced = timing->flags.INTERLACE; - pipes[pipe_cnt].pipe.dest.pixel_rate_mhz = timing->pix_clk_100hz/10000.0; - if (timing->timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) - pipes[pipe_cnt].pipe.dest.pixel_rate_mhz *= 2; - pipes[pipe_cnt].pipe.dest.otg_inst = res_ctx->pipe_ctx[i].stream_res.tg->inst; - pipes[pipe_cnt].dout.dp_lanes = 4; - pipes[pipe_cnt].dout.is_virtual = 0; - pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min; - pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max; - switch (get_num_odm_splits(&res_ctx->pipe_ctx[i])) { - case 1: - pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_2to1; - break; - case 3: - pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_4to1; - break; - default: - pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_disabled; - } - pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].pipe_idx; - if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state - == res_ctx->pipe_ctx[i].plane_state) { - struct pipe_ctx *first_pipe = res_ctx->pipe_ctx[i].top_pipe; - int split_idx = 0; - - while (first_pipe->top_pipe && first_pipe->top_pipe->plane_state - == res_ctx->pipe_ctx[i].plane_state) { - first_pipe = first_pipe->top_pipe; - split_idx++; - } - /* Treat 4to1 mpc combine as an mpo of 2 2-to-1 combines */ - if (split_idx == 0) - pipes[pipe_cnt].pipe.src.hsplit_grp = first_pipe->pipe_idx; - else if (split_idx == 1) - pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].pipe_idx; - else if (split_idx == 2) - pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].top_pipe->pipe_idx; - } else if (res_ctx->pipe_ctx[i].prev_odm_pipe) { - struct pipe_ctx *first_pipe = res_ctx->pipe_ctx[i].prev_odm_pipe; - - while (first_pipe->prev_odm_pipe) - first_pipe = first_pipe->prev_odm_pipe; - pipes[pipe_cnt].pipe.src.hsplit_grp = first_pipe->pipe_idx; - } - - switch (res_ctx->pipe_ctx[i].stream->signal) { - case SIGNAL_TYPE_DISPLAY_PORT_MST: - case SIGNAL_TYPE_DISPLAY_PORT: - pipes[pipe_cnt].dout.output_type = dm_dp; - break; - case SIGNAL_TYPE_EDP: - pipes[pipe_cnt].dout.output_type = dm_edp; - break; - case SIGNAL_TYPE_HDMI_TYPE_A: - case SIGNAL_TYPE_DVI_SINGLE_LINK: - case SIGNAL_TYPE_DVI_DUAL_LINK: - pipes[pipe_cnt].dout.output_type = dm_hdmi; - break; - default: - /* In case there is no signal, set dp with 4 lanes to allow max config */ - pipes[pipe_cnt].dout.is_virtual = 1; - pipes[pipe_cnt].dout.output_type = dm_dp; - pipes[pipe_cnt].dout.dp_lanes = 4; - } - - switch (res_ctx->pipe_ctx[i].stream->timing.display_color_depth) { - case COLOR_DEPTH_666: - output_bpc = 6; - break; - case COLOR_DEPTH_888: - output_bpc = 8; - break; - case COLOR_DEPTH_101010: - output_bpc = 10; - break; - case COLOR_DEPTH_121212: - output_bpc = 12; - break; - case COLOR_DEPTH_141414: - output_bpc = 14; - break; - case COLOR_DEPTH_161616: - output_bpc = 16; - break; - case COLOR_DEPTH_999: - output_bpc = 9; - break; - case COLOR_DEPTH_111111: - output_bpc = 11; - break; - default: - output_bpc = 8; - break; - } - - switch (res_ctx->pipe_ctx[i].stream->timing.pixel_encoding) { - case PIXEL_ENCODING_RGB: - case PIXEL_ENCODING_YCBCR444: - pipes[pipe_cnt].dout.output_format = dm_444; - pipes[pipe_cnt].dout.output_bpp = output_bpc * 3; - break; - case PIXEL_ENCODING_YCBCR420: - pipes[pipe_cnt].dout.output_format = dm_420; - pipes[pipe_cnt].dout.output_bpp = (output_bpc * 3.0) / 2; - break; - case PIXEL_ENCODING_YCBCR422: - if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC && - !res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.ycbcr422_simple) - pipes[pipe_cnt].dout.output_format = dm_n422; - else - pipes[pipe_cnt].dout.output_format = dm_s422; - pipes[pipe_cnt].dout.output_bpp = output_bpc * 2; - break; - default: - pipes[pipe_cnt].dout.output_format = dm_444; - pipes[pipe_cnt].dout.output_bpp = output_bpc * 3; - } - - if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC) - pipes[pipe_cnt].dout.output_bpp = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0; - - /* todo: default max for now, until there is logic reflecting this in dc*/ - pipes[pipe_cnt].dout.dsc_input_bpc = 12; - /*fill up the audio sample rate (unit in kHz)*/ - get_audio_check(&res_ctx->pipe_ctx[i].stream->audio_info, &aud_check); - pipes[pipe_cnt].dout.max_audio_sample_rate = aud_check.max_audiosample_rate / 1000; - /* - * For graphic plane, cursor number is 1, nv12 is 0 - * bw calculations due to cursor on/off - */ - if (res_ctx->pipe_ctx[i].plane_state && - res_ctx->pipe_ctx[i].plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) - pipes[pipe_cnt].pipe.src.num_cursors = 0; - else - pipes[pipe_cnt].pipe.src.num_cursors = dc->dml.ip.number_of_cursors; - - pipes[pipe_cnt].pipe.src.cur0_src_width = 256; - pipes[pipe_cnt].pipe.src.cur0_bpp = dm_cur_32bit; - - if (!res_ctx->pipe_ctx[i].plane_state) { - pipes[pipe_cnt].pipe.src.is_hsplit = pipes[pipe_cnt].pipe.dest.odm_combine != dm_odm_combine_mode_disabled; - pipes[pipe_cnt].pipe.src.source_scan = dm_horz; - pipes[pipe_cnt].pipe.src.sw_mode = dm_sw_4kb_s; - pipes[pipe_cnt].pipe.src.macro_tile_size = dm_64k_tile; - pipes[pipe_cnt].pipe.src.viewport_width = timing->h_addressable; - if (pipes[pipe_cnt].pipe.src.viewport_width > 1920) - pipes[pipe_cnt].pipe.src.viewport_width = 1920; - pipes[pipe_cnt].pipe.src.viewport_height = timing->v_addressable; - if (pipes[pipe_cnt].pipe.src.viewport_height > 1080) - pipes[pipe_cnt].pipe.src.viewport_height = 1080; - pipes[pipe_cnt].pipe.src.surface_height_y = pipes[pipe_cnt].pipe.src.viewport_height; - pipes[pipe_cnt].pipe.src.surface_width_y = pipes[pipe_cnt].pipe.src.viewport_width; - pipes[pipe_cnt].pipe.src.surface_height_c = pipes[pipe_cnt].pipe.src.viewport_height; - pipes[pipe_cnt].pipe.src.surface_width_c = pipes[pipe_cnt].pipe.src.viewport_width; - pipes[pipe_cnt].pipe.src.data_pitch = ((pipes[pipe_cnt].pipe.src.viewport_width + 255) / 256) * 256; - pipes[pipe_cnt].pipe.src.source_format = dm_444_32; - pipes[pipe_cnt].pipe.dest.recout_width = pipes[pipe_cnt].pipe.src.viewport_width; /*vp_width/hratio*/ - pipes[pipe_cnt].pipe.dest.recout_height = pipes[pipe_cnt].pipe.src.viewport_height; /*vp_height/vratio*/ - pipes[pipe_cnt].pipe.dest.full_recout_width = pipes[pipe_cnt].pipe.dest.recout_width; /*when is_hsplit != 1*/ - pipes[pipe_cnt].pipe.dest.full_recout_height = pipes[pipe_cnt].pipe.dest.recout_height; /*when is_hsplit != 1*/ - pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_16; - pipes[pipe_cnt].pipe.scale_ratio_depth.hscl_ratio = 1.0; - pipes[pipe_cnt].pipe.scale_ratio_depth.vscl_ratio = 1.0; - pipes[pipe_cnt].pipe.scale_ratio_depth.scl_enable = 0; /*Lb only or Full scl*/ - pipes[pipe_cnt].pipe.scale_taps.htaps = 1; - pipes[pipe_cnt].pipe.scale_taps.vtaps = 1; - pipes[pipe_cnt].pipe.dest.vtotal_min = v_total; - pipes[pipe_cnt].pipe.dest.vtotal_max = v_total; - - if (pipes[pipe_cnt].pipe.dest.odm_combine == dm_odm_combine_mode_2to1) { - pipes[pipe_cnt].pipe.src.viewport_width /= 2; - pipes[pipe_cnt].pipe.dest.recout_width /= 2; - } else if (pipes[pipe_cnt].pipe.dest.odm_combine == dm_odm_combine_mode_4to1) { - pipes[pipe_cnt].pipe.src.viewport_width /= 4; - pipes[pipe_cnt].pipe.dest.recout_width /= 4; - } - } else { - struct dc_plane_state *pln = res_ctx->pipe_ctx[i].plane_state; - struct scaler_data *scl = &res_ctx->pipe_ctx[i].plane_res.scl_data; - - pipes[pipe_cnt].pipe.src.immediate_flip = pln->flip_immediate; - pipes[pipe_cnt].pipe.src.is_hsplit = (res_ctx->pipe_ctx[i].bottom_pipe && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln) - || (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln) - || pipes[pipe_cnt].pipe.dest.odm_combine != dm_odm_combine_mode_disabled; - - /* stereo is not split */ - if (pln->stereo_format == PLANE_STEREO_FORMAT_SIDE_BY_SIDE || - pln->stereo_format == PLANE_STEREO_FORMAT_TOP_AND_BOTTOM) { - pipes[pipe_cnt].pipe.src.is_hsplit = false; - pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].pipe_idx; - } - - pipes[pipe_cnt].pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90 - || pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz; - pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport.y; - pipes[pipe_cnt].pipe.src.viewport_y_c = scl->viewport_c.y; - pipes[pipe_cnt].pipe.src.viewport_width = scl->viewport.width; - pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c.width; - pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport.height; - pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c.height; - pipes[pipe_cnt].pipe.src.viewport_width_max = pln->src_rect.width; - pipes[pipe_cnt].pipe.src.viewport_height_max = pln->src_rect.height; - pipes[pipe_cnt].pipe.src.surface_width_y = pln->plane_size.surface_size.width; - pipes[pipe_cnt].pipe.src.surface_height_y = pln->plane_size.surface_size.height; - pipes[pipe_cnt].pipe.src.surface_width_c = pln->plane_size.chroma_size.width; - pipes[pipe_cnt].pipe.src.surface_height_c = pln->plane_size.chroma_size.height; - if (pln->format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA - || pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { - pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.surface_pitch; - pipes[pipe_cnt].pipe.src.data_pitch_c = pln->plane_size.chroma_pitch; - pipes[pipe_cnt].pipe.src.meta_pitch = pln->dcc.meta_pitch; - pipes[pipe_cnt].pipe.src.meta_pitch_c = pln->dcc.meta_pitch_c; - } else { - pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.surface_pitch; - pipes[pipe_cnt].pipe.src.meta_pitch = pln->dcc.meta_pitch; - } - pipes[pipe_cnt].pipe.src.dcc = pln->dcc.enable; - pipes[pipe_cnt].pipe.dest.recout_width = scl->recout.width; - pipes[pipe_cnt].pipe.dest.recout_height = scl->recout.height; - pipes[pipe_cnt].pipe.dest.full_recout_height = scl->recout.height; - pipes[pipe_cnt].pipe.dest.full_recout_width = scl->recout.width; - if (pipes[pipe_cnt].pipe.dest.odm_combine == dm_odm_combine_mode_2to1) - pipes[pipe_cnt].pipe.dest.full_recout_width *= 2; - else if (pipes[pipe_cnt].pipe.dest.odm_combine == dm_odm_combine_mode_4to1) - pipes[pipe_cnt].pipe.dest.full_recout_width *= 4; - else { - struct pipe_ctx *split_pipe = res_ctx->pipe_ctx[i].bottom_pipe; - - while (split_pipe && split_pipe->plane_state == pln) { - pipes[pipe_cnt].pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width; - split_pipe = split_pipe->bottom_pipe; - } - split_pipe = res_ctx->pipe_ctx[i].top_pipe; - while (split_pipe && split_pipe->plane_state == pln) { - pipes[pipe_cnt].pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width; - split_pipe = split_pipe->top_pipe; - } - } - - pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_16; - pipes[pipe_cnt].pipe.scale_ratio_depth.hscl_ratio = (double) scl->ratios.horz.value / (1ULL<<32); - pipes[pipe_cnt].pipe.scale_ratio_depth.hscl_ratio_c = (double) scl->ratios.horz_c.value / (1ULL<<32); - pipes[pipe_cnt].pipe.scale_ratio_depth.vscl_ratio = (double) scl->ratios.vert.value / (1ULL<<32); - pipes[pipe_cnt].pipe.scale_ratio_depth.vscl_ratio_c = (double) scl->ratios.vert_c.value / (1ULL<<32); - pipes[pipe_cnt].pipe.scale_ratio_depth.scl_enable = - scl->ratios.vert.value != dc_fixpt_one.value - || scl->ratios.horz.value != dc_fixpt_one.value - || scl->ratios.vert_c.value != dc_fixpt_one.value - || scl->ratios.horz_c.value != dc_fixpt_one.value /*Lb only or Full scl*/ - || dc->debug.always_scale; /*support always scale*/ - pipes[pipe_cnt].pipe.scale_taps.htaps = scl->taps.h_taps; - pipes[pipe_cnt].pipe.scale_taps.htaps_c = scl->taps.h_taps_c; - pipes[pipe_cnt].pipe.scale_taps.vtaps = scl->taps.v_taps; - pipes[pipe_cnt].pipe.scale_taps.vtaps_c = scl->taps.v_taps_c; - - pipes[pipe_cnt].pipe.src.macro_tile_size = - swizzle_mode_to_macro_tile_size(pln->tiling_info.gfx9.swizzle); - swizzle_to_dml_params(pln->tiling_info.gfx9.swizzle, - &pipes[pipe_cnt].pipe.src.sw_mode); - - switch (pln->format) { - case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: - case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: - pipes[pipe_cnt].pipe.src.source_format = dm_420_8; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: - case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: - pipes[pipe_cnt].pipe.src.source_format = dm_420_10; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: - case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: - pipes[pipe_cnt].pipe.src.source_format = dm_444_64; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: - case SURFACE_PIXEL_FORMAT_GRPH_RGB565: - pipes[pipe_cnt].pipe.src.source_format = dm_444_16; - break; - case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS: - pipes[pipe_cnt].pipe.src.source_format = dm_444_8; - break; - case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA: - pipes[pipe_cnt].pipe.src.source_format = dm_rgbe_alpha; - break; - default: - pipes[pipe_cnt].pipe.src.source_format = dm_444_32; - break; - } - } - - pipe_cnt++; - } - - /* populate writeback information */ - DC_FP_START(); - dc->res_pool->funcs->populate_dml_writeback_from_context(dc, res_ctx, pipes); - DC_FP_END(); - - return pipe_cnt; -} - unsigned int dcn20_calc_max_scaled_time( unsigned int time_per_pixel, enum mmhubbub_wbif_mode mode, @@ -2422,7 +1600,7 @@ void dcn20_set_mcif_arb_params( { enum mmhubbub_wbif_mode wbif_mode; struct mcif_arb_params *wb_arb_params; - int i, j, k, dwb_pipe; + int i, j, dwb_pipe; /* Writeback MCIF_WB arbitration parameters */ dwb_pipe = 0; @@ -2446,11 +1624,10 @@ void dcn20_set_mcif_arb_params( } else wbif_mode = PACKED_444; - for (k = 0; k < sizeof(wb_arb_params->cli_watermark)/sizeof(wb_arb_params->cli_watermark[0]); k++) { - wb_arb_params->cli_watermark[k] = get_wm_writeback_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - wb_arb_params->pstate_watermark[k] = get_wm_writeback_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - } - wb_arb_params->time_per_pixel = 16.0 * 1000 / (context->res_ctx.pipe_ctx[i].stream->phy_pix_clk / 1000); /* 4 bit fraction, ms */ + DC_FP_START(); + dcn20_fpu_set_wb_arb_params(wb_arb_params, context, pipes, pipe_cnt, i); + DC_FP_END(); + wb_arb_params->slice_lines = 32; wb_arb_params->arbitration_slice = 2; wb_arb_params->max_scaled_time = dcn20_calc_max_scaled_time(wb_arb_params->time_per_pixel, @@ -2817,8 +1994,11 @@ int dcn20_validate_apply_pipe_split_flags( } /* Adjust dppclk when split is forced, do not bother with dispclk */ - if (split[i] != 0 && v->NoOfDPP[vlevel][max_mpc_comb][pipe_idx] == 1) - v->RequiredDPPCLK[vlevel][max_mpc_comb][pipe_idx] /= 2; + if (split[i] != 0 && v->NoOfDPP[vlevel][max_mpc_comb][pipe_idx] == 1) { + DC_FP_START(); + dcn20_fpu_adjust_dppclk(v, vlevel, max_mpc_comb, pipe_idx, false); + DC_FP_END(); + } pipe_idx++; } @@ -2844,7 +2024,9 @@ bool dcn20_fast_validate_bw( dcn20_merge_pipes_for_validate(dc, context); + DC_FP_START(); pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); + DC_FP_END(); *pipe_cnt_out = pipe_cnt; @@ -2901,7 +2083,9 @@ bool dcn20_fast_validate_bw( hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe); ASSERT(hsplit_pipe); if (!hsplit_pipe) { - context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] *= 2; + DC_FP_START(); + dcn20_fpu_adjust_dppclk(&context->bw_ctx.dml.vba, vlevel, context->bw_ctx.dml.vba.maxMpcComb, pipe_idx, true); + DC_FP_END(); continue; } if (context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) { @@ -2943,363 +2127,6 @@ validate_out: return out; } -static void dcn20_calculate_wm( - struct dc *dc, struct dc_state *context, - display_e2e_pipe_params_st *pipes, - int *out_pipe_cnt, - int *pipe_split_from, - int vlevel, - bool fast_validate) -{ - int pipe_cnt, i, pipe_idx; - - for (i = 0, pipe_idx = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { - if (!context->res_ctx.pipe_ctx[i].stream) - continue; - - pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0; - pipes[pipe_cnt].clks_cfg.dispclk_mhz = context->bw_ctx.dml.vba.RequiredDISPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; - - if (pipe_split_from[i] < 0) { - pipes[pipe_cnt].clks_cfg.dppclk_mhz = - context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx]; - if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_idx] == pipe_idx) - pipes[pipe_cnt].pipe.dest.odm_combine = - context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]; - else - pipes[pipe_cnt].pipe.dest.odm_combine = 0; - pipe_idx++; - } else { - pipes[pipe_cnt].clks_cfg.dppclk_mhz = - context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_split_from[i]]; - if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_split_from[i]] == pipe_split_from[i]) - pipes[pipe_cnt].pipe.dest.odm_combine = - context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_split_from[i]]; - else - pipes[pipe_cnt].pipe.dest.odm_combine = 0; - } - - if (dc->config.forced_clocks) { - pipes[pipe_cnt].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz; - pipes[pipe_cnt].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz; - } - if (dc->debug.min_disp_clk_khz > pipes[pipe_cnt].clks_cfg.dispclk_mhz * 1000) - pipes[pipe_cnt].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0; - if (dc->debug.min_dpp_clk_khz > pipes[pipe_cnt].clks_cfg.dppclk_mhz * 1000) - pipes[pipe_cnt].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0; - - pipe_cnt++; - } - - if (pipe_cnt != pipe_idx) { - if (dc->res_pool->funcs->populate_dml_pipes) - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, - context, pipes, fast_validate); - else - pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, - context, pipes, fast_validate); - } - - *out_pipe_cnt = pipe_cnt; - - pipes[0].clks_cfg.voltage = vlevel; - pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz; - pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz; - - /* only pipe 0 is read for voltage and dcf/soc clocks */ - if (vlevel < 1) { - pipes[0].clks_cfg.voltage = 1; - pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[1].dcfclk_mhz; - pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[1].socclk_mhz; - } - context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - - if (vlevel < 2) { - pipes[0].clks_cfg.voltage = 2; - pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz; - pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].socclk_mhz; - } - context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - - if (vlevel < 3) { - pipes[0].clks_cfg.voltage = 3; - pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz; - pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].socclk_mhz; - } - context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - - pipes[0].clks_cfg.voltage = vlevel; - pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz; - pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz; - context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; -} - -static bool is_dtbclk_required(struct dc *dc, struct dc_state *context) -{ - int i; - for (i = 0; i < dc->res_pool->pipe_count; i++) { - if (!context->res_ctx.pipe_ctx[i].stream) - continue; - if (is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i])) - return true; - } - return false; -} - -static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struct dc_state *context) -{ - int plane_count; - int i; - - plane_count = 0; - for (i = 0; i < dc->res_pool->pipe_count; i++) { - if (context->res_ctx.pipe_ctx[i].plane_state) - plane_count++; - } - - /* - * Zstate is allowed in following scenarios: - * 1. Single eDP with PSR enabled - * 2. 0 planes (No memory requests) - * 3. Single eDP without PSR but > 5ms stutter period - */ - if (plane_count == 0) - return DCN_ZSTATE_SUPPORT_ALLOW; - else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) { - struct dc_link *link = context->streams[0]->sink->link; - - if (link->link_index == 0 && context->bw_ctx.dml.vba.StutterPeriod > 5000.0) - return DCN_ZSTATE_SUPPORT_ALLOW; - else - return DCN_ZSTATE_SUPPORT_DISALLOW; - } else - return DCN_ZSTATE_SUPPORT_DISALLOW; -} - -void dcn20_calculate_dlg_params( - struct dc *dc, struct dc_state *context, - display_e2e_pipe_params_st *pipes, - int pipe_cnt, - int vlevel) -{ - int i, pipe_idx; - - /* Writeback MCIF_WB arbitration parameters */ - dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt); - - context->bw_ctx.bw.dcn.clk.dispclk_khz = context->bw_ctx.dml.vba.DISPCLK * 1000; - context->bw_ctx.bw.dcn.clk.dcfclk_khz = context->bw_ctx.dml.vba.DCFCLK * 1000; - context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000; - context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16; - - if (dc->debug.min_dram_clk_khz > context->bw_ctx.bw.dcn.clk.dramclk_khz) - context->bw_ctx.bw.dcn.clk.dramclk_khz = dc->debug.min_dram_clk_khz; - - context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000; - context->bw_ctx.bw.dcn.clk.fclk_khz = context->bw_ctx.dml.vba.FabricClock * 1000; - context->bw_ctx.bw.dcn.clk.p_state_change_support = - context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] - != dm_dram_clock_change_unsupported; - context->bw_ctx.bw.dcn.clk.dppclk_khz = 0; - - context->bw_ctx.bw.dcn.clk.zstate_support = decide_zstate_support(dc, context); - - context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context); - - if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz) - context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz; - - for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { - if (!context->res_ctx.pipe_ctx[i].stream) - continue; - pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); - pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); - pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); - pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); - context->res_ctx.pipe_ctx[i].det_buffer_size_kb = context->bw_ctx.dml.ip.det_buffer_size_kbytes; - context->res_ctx.pipe_ctx[i].unbounded_req = pipes[pipe_idx].pipe.src.unbounded_req_mode; - - if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000) - context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000; - context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = - pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000; - context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest; - pipe_idx++; - } - /*save a original dppclock copy*/ - context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz; - context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz; - context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dppclk_mhz * 1000; - context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dispclk_mhz * 1000; - - context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - - context->bw_ctx.dml.ip.det_buffer_size_kbytes * pipe_idx; - - for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { - bool cstate_en = context->bw_ctx.dml.vba.PrefetchMode[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != 2; - - if (!context->res_ctx.pipe_ctx[i].stream) - continue; - - if (dc->ctx->dce_version == DCN_VERSION_2_01) - cstate_en = false; - - context->bw_ctx.dml.funcs.rq_dlg_get_dlg_reg(&context->bw_ctx.dml, - &context->res_ctx.pipe_ctx[i].dlg_regs, - &context->res_ctx.pipe_ctx[i].ttu_regs, - pipes, - pipe_cnt, - pipe_idx, - cstate_en, - context->bw_ctx.bw.dcn.clk.p_state_change_support, - false, false, true); - - context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg(&context->bw_ctx.dml, - &context->res_ctx.pipe_ctx[i].rq_regs, - &pipes[pipe_idx].pipe); - pipe_idx++; - } -} - -static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *context, - bool fast_validate) -{ - bool out = false; - - BW_VAL_TRACE_SETUP(); - - int vlevel = 0; - int pipe_split_from[MAX_PIPES]; - int pipe_cnt = 0; - display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC); - DC_LOGGER_INIT(dc->ctx->logger); - - BW_VAL_TRACE_COUNT(); - - out = dcn20_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, fast_validate); - - if (pipe_cnt == 0) - goto validate_out; - - if (!out) - goto validate_fail; - - BW_VAL_TRACE_END_VOLTAGE_LEVEL(); - - if (fast_validate) { - BW_VAL_TRACE_SKIP(fast); - goto validate_out; - } - - dcn20_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate); - dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); - - BW_VAL_TRACE_END_WATERMARKS(); - - goto validate_out; - -validate_fail: - DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n", - dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states])); - - BW_VAL_TRACE_SKIP(fail); - out = false; - -validate_out: - kfree(pipes); - - BW_VAL_TRACE_FINISH(); - - return out; -} - -/* - * This must be noinline to ensure anything that deals with FP registers - * is contained within this call; previously our compiling with hard-float - * would result in fp instructions being emitted outside of the boundaries - * of the DC_FP_START/END macros, which makes sense as the compiler has no - * idea about what is wrapped and what is not - * - * This is largely just a workaround to avoid breakage introduced with 5.6, - * ideally all fp-using code should be moved into its own file, only that - * should be compiled with hard-float, and all code exported from there - * should be strictly wrapped with DC_FP_START/END - */ -static noinline bool dcn20_validate_bandwidth_fp(struct dc *dc, - struct dc_state *context, bool fast_validate) -{ - bool voltage_supported = false; - bool full_pstate_supported = false; - bool dummy_pstate_supported = false; - double p_state_latency_us; - - p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us; - context->bw_ctx.dml.soc.disable_dram_clock_change_vactive_support = - dc->debug.disable_dram_clock_change_vactive_support; - context->bw_ctx.dml.soc.allow_dram_clock_one_display_vactive = - dc->debug.enable_dram_clock_change_one_display_vactive; - - /*Unsafe due to current pipe merge and split logic*/ - ASSERT(context != dc->current_state); - - if (fast_validate) { - return dcn20_validate_bandwidth_internal(dc, context, true); - } - - // Best case, we support full UCLK switch latency - voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false); - full_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support; - - if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 || - (voltage_supported && full_pstate_supported)) { - context->bw_ctx.bw.dcn.clk.p_state_change_support = full_pstate_supported; - goto restore_dml_state; - } - - // Fallback: Try to only support G6 temperature read latency - context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us; - - voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false); - dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support; - - if (voltage_supported && (dummy_pstate_supported || !(context->stream_count))) { - context->bw_ctx.bw.dcn.clk.p_state_change_support = false; - goto restore_dml_state; - } - - // ERROR: fallback is supposed to always work. - ASSERT(false); - -restore_dml_state: - context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us; - return voltage_supported; -} - bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate) { @@ -3467,170 +2294,6 @@ static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu) } } -void dcn20_cap_soc_clocks( - struct _vcs_dpi_soc_bounding_box_st *bb, - struct pp_smu_nv_clock_table max_clocks) -{ - int i; - - // First pass - cap all clocks higher than the reported max - for (i = 0; i < bb->num_states; i++) { - if ((bb->clock_limits[i].dcfclk_mhz > (max_clocks.dcfClockInKhz / 1000)) - && max_clocks.dcfClockInKhz != 0) - bb->clock_limits[i].dcfclk_mhz = (max_clocks.dcfClockInKhz / 1000); - - if ((bb->clock_limits[i].dram_speed_mts > (max_clocks.uClockInKhz / 1000) * 16) - && max_clocks.uClockInKhz != 0) - bb->clock_limits[i].dram_speed_mts = (max_clocks.uClockInKhz / 1000) * 16; - - if ((bb->clock_limits[i].fabricclk_mhz > (max_clocks.fabricClockInKhz / 1000)) - && max_clocks.fabricClockInKhz != 0) - bb->clock_limits[i].fabricclk_mhz = (max_clocks.fabricClockInKhz / 1000); - - if ((bb->clock_limits[i].dispclk_mhz > (max_clocks.displayClockInKhz / 1000)) - && max_clocks.displayClockInKhz != 0) - bb->clock_limits[i].dispclk_mhz = (max_clocks.displayClockInKhz / 1000); - - if ((bb->clock_limits[i].dppclk_mhz > (max_clocks.dppClockInKhz / 1000)) - && max_clocks.dppClockInKhz != 0) - bb->clock_limits[i].dppclk_mhz = (max_clocks.dppClockInKhz / 1000); - - if ((bb->clock_limits[i].phyclk_mhz > (max_clocks.phyClockInKhz / 1000)) - && max_clocks.phyClockInKhz != 0) - bb->clock_limits[i].phyclk_mhz = (max_clocks.phyClockInKhz / 1000); - - if ((bb->clock_limits[i].socclk_mhz > (max_clocks.socClockInKhz / 1000)) - && max_clocks.socClockInKhz != 0) - bb->clock_limits[i].socclk_mhz = (max_clocks.socClockInKhz / 1000); - - if ((bb->clock_limits[i].dscclk_mhz > (max_clocks.dscClockInKhz / 1000)) - && max_clocks.dscClockInKhz != 0) - bb->clock_limits[i].dscclk_mhz = (max_clocks.dscClockInKhz / 1000); - } - - // Second pass - remove all duplicate clock states - for (i = bb->num_states - 1; i > 1; i--) { - bool duplicate = true; - - if (bb->clock_limits[i-1].dcfclk_mhz != bb->clock_limits[i].dcfclk_mhz) - duplicate = false; - if (bb->clock_limits[i-1].dispclk_mhz != bb->clock_limits[i].dispclk_mhz) - duplicate = false; - if (bb->clock_limits[i-1].dppclk_mhz != bb->clock_limits[i].dppclk_mhz) - duplicate = false; - if (bb->clock_limits[i-1].dram_speed_mts != bb->clock_limits[i].dram_speed_mts) - duplicate = false; - if (bb->clock_limits[i-1].dscclk_mhz != bb->clock_limits[i].dscclk_mhz) - duplicate = false; - if (bb->clock_limits[i-1].fabricclk_mhz != bb->clock_limits[i].fabricclk_mhz) - duplicate = false; - if (bb->clock_limits[i-1].phyclk_mhz != bb->clock_limits[i].phyclk_mhz) - duplicate = false; - if (bb->clock_limits[i-1].socclk_mhz != bb->clock_limits[i].socclk_mhz) - duplicate = false; - - if (duplicate) - bb->num_states--; - } -} - -void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb, - struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states) -{ - struct _vcs_dpi_voltage_scaling_st calculated_states[DC__VOLTAGE_STATES]; - int i; - int num_calculated_states = 0; - int min_dcfclk = 0; - - if (num_states == 0) - return; - - memset(calculated_states, 0, sizeof(calculated_states)); - - if (dc->bb_overrides.min_dcfclk_mhz > 0) - min_dcfclk = dc->bb_overrides.min_dcfclk_mhz; - else { - if (ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) - min_dcfclk = 310; - else - // Accounting for SOC/DCF relationship, we can go as high as - // 506Mhz in Vmin. - min_dcfclk = 506; - } - - for (i = 0; i < num_states; i++) { - int min_fclk_required_by_uclk; - calculated_states[i].state = i; - calculated_states[i].dram_speed_mts = uclk_states[i] * 16 / 1000; - - // FCLK:UCLK ratio is 1.08 - min_fclk_required_by_uclk = div_u64(((unsigned long long)uclk_states[i]) * 1080, - 1000000); - - calculated_states[i].fabricclk_mhz = (min_fclk_required_by_uclk < min_dcfclk) ? - min_dcfclk : min_fclk_required_by_uclk; - - calculated_states[i].socclk_mhz = (calculated_states[i].fabricclk_mhz > max_clocks->socClockInKhz / 1000) ? - max_clocks->socClockInKhz / 1000 : calculated_states[i].fabricclk_mhz; - - calculated_states[i].dcfclk_mhz = (calculated_states[i].fabricclk_mhz > max_clocks->dcfClockInKhz / 1000) ? - max_clocks->dcfClockInKhz / 1000 : calculated_states[i].fabricclk_mhz; - - calculated_states[i].dispclk_mhz = max_clocks->displayClockInKhz / 1000; - calculated_states[i].dppclk_mhz = max_clocks->displayClockInKhz / 1000; - calculated_states[i].dscclk_mhz = max_clocks->displayClockInKhz / (1000 * 3); - - calculated_states[i].phyclk_mhz = max_clocks->phyClockInKhz / 1000; - - num_calculated_states++; - } - - calculated_states[num_calculated_states - 1].socclk_mhz = max_clocks->socClockInKhz / 1000; - calculated_states[num_calculated_states - 1].fabricclk_mhz = max_clocks->socClockInKhz / 1000; - calculated_states[num_calculated_states - 1].dcfclk_mhz = max_clocks->dcfClockInKhz / 1000; - - memcpy(bb->clock_limits, calculated_states, sizeof(bb->clock_limits)); - bb->num_states = num_calculated_states; - - // Duplicate the last state, DML always an extra state identical to max state to work - memcpy(&bb->clock_limits[num_calculated_states], &bb->clock_limits[num_calculated_states - 1], sizeof(struct _vcs_dpi_voltage_scaling_st)); - bb->clock_limits[num_calculated_states].state = bb->num_states; -} - -void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb) -{ - if ((int)(bb->sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns - && dc->bb_overrides.sr_exit_time_ns) { - bb->sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0; - } - - if ((int)(bb->sr_enter_plus_exit_time_us * 1000) - != dc->bb_overrides.sr_enter_plus_exit_time_ns - && dc->bb_overrides.sr_enter_plus_exit_time_ns) { - bb->sr_enter_plus_exit_time_us = - dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; - } - - if ((int)(bb->urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns - && dc->bb_overrides.urgent_latency_ns) { - bb->urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0; - } - - if ((int)(bb->dram_clock_change_latency_us * 1000) - != dc->bb_overrides.dram_clock_change_latency_ns - && dc->bb_overrides.dram_clock_change_latency_ns) { - bb->dram_clock_change_latency_us = - dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; - } - - if ((int)(bb->dummy_pstate_latency_us * 1000) - != dc->bb_overrides.dummy_clock_change_latency_ns - && dc->bb_overrides.dummy_clock_change_latency_ns) { - bb->dummy_pstate_latency_us = - dc->bb_overrides.dummy_clock_change_latency_ns / 1000.0; - } -} - static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb( uint32_t hw_internal_rev) { @@ -3913,8 +2576,9 @@ static bool dcn20_resource_construct( ranges.reader_wm_sets[i].wm_inst = i; ranges.reader_wm_sets[i].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; ranges.reader_wm_sets[i].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; - ranges.reader_wm_sets[i].min_fill_clk_mhz = (i > 0) ? (loaded_bb->clock_limits[i - 1].dram_speed_mts / 16) + 1 : 0; - ranges.reader_wm_sets[i].max_fill_clk_mhz = loaded_bb->clock_limits[i].dram_speed_mts / 16; + DC_FP_START(); + dcn20_fpu_set_wm_ranges(i, &ranges, loaded_bb); + DC_FP_END(); ranges.num_reader_wm_sets = i + 1; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h index 6ec8ff45f0f7..7cbe1e9daa36 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h @@ -27,6 +27,7 @@ #define __DC_RESOURCE_DCN20_H__ #include "core_types.h" +#include "dml/dcn20/dcn20_fpu.h" #define TO_DCN20_RES_POOL(pool)\ container_of(pool, struct dcn20_resource_pool, base) @@ -35,6 +36,12 @@ struct dc; struct resource_pool; struct _vcs_dpi_display_pipe_params_st; +extern struct _vcs_dpi_ip_params_st dcn2_0_ip; +extern struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip; +extern struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc; +extern struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc; +extern struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc; + struct dcn20_resource_pool { struct resource_pool base; }; @@ -49,11 +56,7 @@ unsigned int dcn20_calc_max_scaled_time( unsigned int time_per_pixel, enum mmhubbub_wbif_mode mode, unsigned int urgent_watermark); -int dcn20_populate_dml_pipes_from_context( - struct dc *dc, - struct dc_state *context, - display_e2e_pipe_params_st *pipes, - bool fast_validate); + struct pipe_ctx *dcn20_acquire_idle_pipe_for_layer( struct dc_state *state, const struct resource_pool *pool, @@ -79,7 +82,6 @@ struct dpp *dcn20_dpp_create( struct input_pixel_processor *dcn20_ipp_create( struct dc_context *ctx, uint32_t inst); - struct output_pixel_processor *dcn20_opp_create( struct dc_context *ctx, uint32_t inst); @@ -96,11 +98,6 @@ struct display_stream_compressor *dcn20_dsc_create( struct dc_context *ctx, uint32_t inst); void dcn20_dsc_destroy(struct display_stream_compressor **dsc); -void dcn20_cap_soc_clocks( - struct _vcs_dpi_soc_bounding_box_st *bb, - struct pp_smu_nv_clock_table max_clocks); -void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb, - struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states); struct hubp *dcn20_hubp_create( struct dc_context *ctx, uint32_t inst); @@ -158,11 +155,6 @@ bool dcn20_fast_validate_bw( int *pipe_split_from, int *vlevel_out, bool fast_validate); -void dcn20_calculate_dlg_params( - struct dc *dc, struct dc_state *context, - display_e2e_pipe_params_st *pipes, - int pipe_cnt, - int vlevel); enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream); enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream); @@ -170,12 +162,5 @@ enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, struct dc_state * enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream); enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state); -void dcn20_patch_bounding_box( - struct dc *dc, - struct _vcs_dpi_soc_bounding_box_st *bb); -void dcn20_cap_soc_clocks( - struct _vcs_dpi_soc_bounding_box_st *bb, - struct pp_smu_nv_clock_table max_clocks); - #endif /* __DC_RESOURCE_DCN20_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c index f1f89f93603f..1826dd7f3da1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c @@ -27,6 +27,7 @@ #include "dcn10/dcn10_hw_sequencer.h" #include "dcn20/dcn20_hwseq.h" #include "dcn201_hwseq.h" +#include "dcn201_init.h" static const struct hw_sequencer_funcs dcn201_funcs = { .program_gamut_remap = dcn10_program_gamut_remap, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile index bb8c95141082..0dc06e428999 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile @@ -5,31 +5,6 @@ DCN21 = dcn21_init.o dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o \ dcn21_hwseq.o dcn21_link_encoder.o dcn21_dccg.o -ifdef CONFIG_X86 -CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse -endif - -ifdef CONFIG_PPC64 -CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -maltivec -endif - -ifdef CONFIG_CC_IS_GCC -ifeq ($(call cc-ifversion, -lt, 0701, y), y) -IS_OLD_GCC = 1 -endif -endif - -ifdef CONFIG_X86 -ifdef IS_OLD_GCC -# Stack alignment mismatch, proceed with caution. -# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 -# (8B stack alignment). -CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -mpreferred-stack-boundary=4 -else -CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -msse2 -endif -endif - AMD_DAL_DCN21 = $(addprefix $(AMDDALPATH)/dc/dcn21/,$(DCN21)) AMD_DISPLAY_FILES += $(AMD_DAL_DCN21) diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index e5cc6bf45743..612732656772 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -34,6 +34,7 @@ #include "resource.h" #include "include/irq_service_interface.h" #include "dcn20/dcn20_resource.h" +#include "dcn21/dcn21_resource.h" #include "dml/dcn20/dcn20_fpu.h" @@ -89,230 +90,6 @@ #include "dce/dmub_psr.h" #include "dce/dmub_abm.h" -#define DC_LOGGER_INIT(logger) - - -struct _vcs_dpi_ip_params_st dcn2_1_ip = { - .odm_capable = 1, - .gpuvm_enable = 1, - .hostvm_enable = 1, - .gpuvm_max_page_table_levels = 1, - .hostvm_max_page_table_levels = 4, - .hostvm_cached_page_table_levels = 2, - .num_dsc = 3, - .rob_buffer_size_kbytes = 168, - .det_buffer_size_kbytes = 164, - .dpte_buffer_size_in_pte_reqs_luma = 44, - .dpte_buffer_size_in_pte_reqs_chroma = 42,//todo - .dpp_output_buffer_pixels = 2560, - .opp_output_buffer_lines = 1, - .pixel_chunk_size_kbytes = 8, - .pte_enable = 1, - .max_page_table_levels = 4, - .pte_chunk_size_kbytes = 2, - .meta_chunk_size_kbytes = 2, - .min_meta_chunk_size_bytes = 256, - .writeback_chunk_size_kbytes = 2, - .line_buffer_size_bits = 789504, - .is_line_buffer_bpp_fixed = 0, - .line_buffer_fixed_bpp = 0, - .dcc_supported = true, - .max_line_buffer_lines = 12, - .writeback_luma_buffer_size_kbytes = 12, - .writeback_chroma_buffer_size_kbytes = 8, - .writeback_chroma_line_buffer_width_pixels = 4, - .writeback_max_hscl_ratio = 1, - .writeback_max_vscl_ratio = 1, - .writeback_min_hscl_ratio = 1, - .writeback_min_vscl_ratio = 1, - .writeback_max_hscl_taps = 12, - .writeback_max_vscl_taps = 12, - .writeback_line_buffer_luma_buffer_size = 0, - .writeback_line_buffer_chroma_buffer_size = 14643, - .cursor_buffer_size = 8, - .cursor_chunk_size = 2, - .max_num_otg = 4, - .max_num_dpp = 4, - .max_num_wb = 1, - .max_dchub_pscl_bw_pix_per_clk = 4, - .max_pscl_lb_bw_pix_per_clk = 2, - .max_lb_vscl_bw_pix_per_clk = 4, - .max_vscl_hscl_bw_pix_per_clk = 4, - .max_hscl_ratio = 4, - .max_vscl_ratio = 4, - .hscl_mults = 4, - .vscl_mults = 4, - .max_hscl_taps = 8, - .max_vscl_taps = 8, - .dispclk_ramp_margin_percent = 1, - .underscan_factor = 1.10, - .min_vblank_lines = 32, // - .dppclk_delay_subtotal = 77, // - .dppclk_delay_scl_lb_only = 16, - .dppclk_delay_scl = 50, - .dppclk_delay_cnvc_formatter = 8, - .dppclk_delay_cnvc_cursor = 6, - .dispclk_delay_subtotal = 87, // - .dcfclk_cstate_latency = 10, // SRExitTime - .max_inter_dcn_tile_repeaters = 8, - - .xfc_supported = false, - .xfc_fill_bw_overhead_percent = 10.0, - .xfc_fill_constant_bytes = 0, - .ptoi_supported = 0, - .number_of_cursors = 1, -}; - -struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = { - .clock_limits = { - { - .state = 0, - .dcfclk_mhz = 400.0, - .fabricclk_mhz = 400.0, - .dispclk_mhz = 600.0, - .dppclk_mhz = 400.00, - .phyclk_mhz = 600.0, - .socclk_mhz = 278.0, - .dscclk_mhz = 205.67, - .dram_speed_mts = 1600.0, - }, - { - .state = 1, - .dcfclk_mhz = 464.52, - .fabricclk_mhz = 800.0, - .dispclk_mhz = 654.55, - .dppclk_mhz = 626.09, - .phyclk_mhz = 600.0, - .socclk_mhz = 278.0, - .dscclk_mhz = 205.67, - .dram_speed_mts = 1600.0, - }, - { - .state = 2, - .dcfclk_mhz = 514.29, - .fabricclk_mhz = 933.0, - .dispclk_mhz = 757.89, - .dppclk_mhz = 685.71, - .phyclk_mhz = 600.0, - .socclk_mhz = 278.0, - .dscclk_mhz = 287.67, - .dram_speed_mts = 1866.0, - }, - { - .state = 3, - .dcfclk_mhz = 576.00, - .fabricclk_mhz = 1067.0, - .dispclk_mhz = 847.06, - .dppclk_mhz = 757.89, - .phyclk_mhz = 600.0, - .socclk_mhz = 715.0, - .dscclk_mhz = 318.334, - .dram_speed_mts = 2134.0, - }, - { - .state = 4, - .dcfclk_mhz = 626.09, - .fabricclk_mhz = 1200.0, - .dispclk_mhz = 900.00, - .dppclk_mhz = 847.06, - .phyclk_mhz = 810.0, - .socclk_mhz = 953.0, - .dscclk_mhz = 489.0, - .dram_speed_mts = 2400.0, - }, - { - .state = 5, - .dcfclk_mhz = 685.71, - .fabricclk_mhz = 1333.0, - .dispclk_mhz = 1028.57, - .dppclk_mhz = 960.00, - .phyclk_mhz = 810.0, - .socclk_mhz = 278.0, - .dscclk_mhz = 287.67, - .dram_speed_mts = 2666.0, - }, - { - .state = 6, - .dcfclk_mhz = 757.89, - .fabricclk_mhz = 1467.0, - .dispclk_mhz = 1107.69, - .dppclk_mhz = 1028.57, - .phyclk_mhz = 810.0, - .socclk_mhz = 715.0, - .dscclk_mhz = 318.334, - .dram_speed_mts = 3200.0, - }, - { - .state = 7, - .dcfclk_mhz = 847.06, - .fabricclk_mhz = 1600.0, - .dispclk_mhz = 1395.0, - .dppclk_mhz = 1285.00, - .phyclk_mhz = 1325.0, - .socclk_mhz = 953.0, - .dscclk_mhz = 489.0, - .dram_speed_mts = 4266.0, - }, - /*Extra state, no dispclk ramping*/ - { - .state = 8, - .dcfclk_mhz = 847.06, - .fabricclk_mhz = 1600.0, - .dispclk_mhz = 1395.0, - .dppclk_mhz = 1285.0, - .phyclk_mhz = 1325.0, - .socclk_mhz = 953.0, - .dscclk_mhz = 489.0, - .dram_speed_mts = 4266.0, - }, - - }, - - .sr_exit_time_us = 12.5, - .sr_enter_plus_exit_time_us = 17.0, - .urgent_latency_us = 4.0, - .urgent_latency_pixel_data_only_us = 4.0, - .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, - .urgent_latency_vm_data_only_us = 4.0, - .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, - .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, - .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, - .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0, - .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 75.0, - .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0, - .max_avg_sdp_bw_use_normal_percent = 60.0, - .max_avg_dram_bw_use_normal_percent = 100.0, - .writeback_latency_us = 12.0, - .max_request_size_bytes = 256, - .dram_channel_width_bytes = 4, - .fabric_datapath_to_dcn_data_return_bytes = 32, - .dcn_downspread_percent = 0.5, - .downspread_percent = 0.38, - .dram_page_open_time_ns = 50.0, - .dram_rw_turnaround_time_ns = 17.5, - .dram_return_buffer_per_channel_bytes = 8192, - .round_trip_ping_latency_dcfclk_cycles = 128, - .urgent_out_of_order_return_per_channel_bytes = 4096, - .channel_interleave_bytes = 256, - .num_banks = 8, - .num_chans = 4, - .vmm_page_size_bytes = 4096, - .dram_clock_change_latency_us = 23.84, - .return_bus_width_bytes = 64, - .dispclk_dppclk_vco_speed_mhz = 3600, - .xfc_bus_transport_time_us = 4, - .xfc_xbuf_latency_tolerance_us = 4, - .use_urgent_burst_bw = 1, - .num_states = 8 -}; - -#ifndef MAX -#define MAX(X, Y) ((X) > (Y) ? (X) : (Y)) -#endif -#ifndef MIN -#define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) -#endif - /* begin ********************* * macros to expend register list macro defined in HW object header file */ @@ -705,12 +482,6 @@ static const struct dcn10_stream_encoder_mask se_mask = { static void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu); -static int dcn21_populate_dml_pipes_from_context( - struct dc *dc, - struct dc_state *context, - display_e2e_pipe_params_st *pipes, - bool fast_validate); - static struct input_pixel_processor *dcn21_ipp_create( struct dc_context *ctx, uint32_t inst) { @@ -1029,163 +800,13 @@ static void dcn21_resource_destruct(struct dcn21_resource_pool *pool) dcn21_pp_smu_destroy(&pool->base.pp_smu); } - -static void calculate_wm_set_for_vlevel( - int vlevel, - struct wm_range_table_entry *table_entry, - struct dcn_watermarks *wm_set, - struct display_mode_lib *dml, - display_e2e_pipe_params_st *pipes, - int pipe_cnt) -{ - double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us; - - ASSERT(vlevel < dml->soc.num_states); - /* only pipe 0 is read for voltage and dcf/soc clocks */ - pipes[0].clks_cfg.voltage = vlevel; - pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz; - pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz; - - dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us; - dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us; - dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us; - - wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000; - wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000; - wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000; - wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000; - wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000; - wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000; - wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000; - wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000; - dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached; - -} - -static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb) -{ - int i; - - if (dc->bb_overrides.sr_exit_time_ns) { - for (i = 0; i < WM_SET_COUNT; i++) { - dc->clk_mgr->bw_params->wm_table.entries[i].sr_exit_time_us = - dc->bb_overrides.sr_exit_time_ns / 1000.0; - } - } - - if (dc->bb_overrides.sr_enter_plus_exit_time_ns) { - for (i = 0; i < WM_SET_COUNT; i++) { - dc->clk_mgr->bw_params->wm_table.entries[i].sr_enter_plus_exit_time_us = - dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; - } - } - - if (dc->bb_overrides.urgent_latency_ns) { - bb->urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0; - } - - if (dc->bb_overrides.dram_clock_change_latency_ns) { - for (i = 0; i < WM_SET_COUNT; i++) { - dc->clk_mgr->bw_params->wm_table.entries[i].pstate_latency_us = - dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; - } - } -} - -static void dcn21_calculate_wm( - struct dc *dc, struct dc_state *context, - display_e2e_pipe_params_st *pipes, - int *out_pipe_cnt, - int *pipe_split_from, - int vlevel_req, - bool fast_validate) -{ - int pipe_cnt, i, pipe_idx; - int vlevel, vlevel_max; - struct wm_range_table_entry *table_entry; - struct clk_bw_params *bw_params = dc->clk_mgr->bw_params; - - ASSERT(bw_params); - - patch_bounding_box(dc, &context->bw_ctx.dml.soc); - - for (i = 0, pipe_idx = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { - if (!context->res_ctx.pipe_ctx[i].stream) - continue; - - pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0; - pipes[pipe_cnt].clks_cfg.dispclk_mhz = context->bw_ctx.dml.vba.RequiredDISPCLK[vlevel_req][context->bw_ctx.dml.vba.maxMpcComb]; - - if (pipe_split_from[i] < 0) { - pipes[pipe_cnt].clks_cfg.dppclk_mhz = - context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel_req][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx]; - if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_idx] == pipe_idx) - pipes[pipe_cnt].pipe.dest.odm_combine = - context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel_req][pipe_idx]; - else - pipes[pipe_cnt].pipe.dest.odm_combine = 0; - pipe_idx++; - } else { - pipes[pipe_cnt].clks_cfg.dppclk_mhz = - context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel_req][context->bw_ctx.dml.vba.maxMpcComb][pipe_split_from[i]]; - if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_split_from[i]] == pipe_split_from[i]) - pipes[pipe_cnt].pipe.dest.odm_combine = - context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel_req][pipe_split_from[i]]; - else - pipes[pipe_cnt].pipe.dest.odm_combine = 0; - } - pipe_cnt++; - } - - if (pipe_cnt != pipe_idx) { - if (dc->res_pool->funcs->populate_dml_pipes) - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, - context, pipes, fast_validate); - else - pipe_cnt = dcn21_populate_dml_pipes_from_context(dc, - context, pipes, fast_validate); - } - - *out_pipe_cnt = pipe_cnt; - - vlevel_max = bw_params->clk_table.num_entries - 1; - - - /* WM Set D */ - table_entry = &bw_params->wm_table.entries[WM_D]; - if (table_entry->wm_type == WM_TYPE_RETRAINING) - vlevel = 0; - else - vlevel = vlevel_max; - calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d, - &context->bw_ctx.dml, pipes, pipe_cnt); - /* WM Set C */ - table_entry = &bw_params->wm_table.entries[WM_C]; - vlevel = MIN(MAX(vlevel_req, 3), vlevel_max); - calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c, - &context->bw_ctx.dml, pipes, pipe_cnt); - /* WM Set B */ - table_entry = &bw_params->wm_table.entries[WM_B]; - vlevel = MIN(MAX(vlevel_req, 2), vlevel_max); - calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b, - &context->bw_ctx.dml, pipes, pipe_cnt); - - /* WM Set A */ - table_entry = &bw_params->wm_table.entries[WM_A]; - vlevel = MIN(vlevel_req, vlevel_max); - calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a, - &context->bw_ctx.dml, pipes, pipe_cnt); -} - - -static bool dcn21_fast_validate_bw( - struct dc *dc, - struct dc_state *context, - display_e2e_pipe_params_st *pipes, - int *pipe_cnt_out, - int *pipe_split_from, - int *vlevel_out, - bool fast_validate) +bool dcn21_fast_validate_bw(struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int *pipe_cnt_out, + int *pipe_split_from, + int *vlevel_out, + bool fast_validate) { bool out = false; int split[MAX_PIPES] = { 0 }; @@ -1197,7 +818,9 @@ static bool dcn21_fast_validate_bw( dcn20_merge_pipes_for_validate(dc, context); + DC_FP_START(); pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); + DC_FP_END(); *pipe_cnt_out = pipe_cnt; @@ -1287,7 +910,9 @@ static bool dcn21_fast_validate_bw( hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe); ASSERT(hsplit_pipe); if (!hsplit_pipe) { - context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] *= 2; + DC_FP_START(); + dcn20_fpu_adjust_dppclk(&context->bw_ctx.dml.vba, vlevel, context->bw_ctx.dml.vba.maxMpcComb, pipe_idx, true); + DC_FP_END(); continue; } if (context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) { @@ -1329,61 +954,6 @@ validate_out: return out; } -static noinline bool dcn21_validate_bandwidth_fp(struct dc *dc, - struct dc_state *context, bool fast_validate) -{ - bool out = false; - - BW_VAL_TRACE_SETUP(); - - int vlevel = 0; - int pipe_split_from[MAX_PIPES]; - int pipe_cnt = 0; - display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC); - DC_LOGGER_INIT(dc->ctx->logger); - - BW_VAL_TRACE_COUNT(); - - /*Unsafe due to current pipe merge and split logic*/ - ASSERT(context != dc->current_state); - - out = dcn21_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, fast_validate); - - if (pipe_cnt == 0) - goto validate_out; - - if (!out) - goto validate_fail; - - BW_VAL_TRACE_END_VOLTAGE_LEVEL(); - - if (fast_validate) { - BW_VAL_TRACE_SKIP(fast); - goto validate_out; - } - - dcn21_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate); - dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); - - BW_VAL_TRACE_END_WATERMARKS(); - - goto validate_out; - -validate_fail: - DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n", - dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states])); - - BW_VAL_TRACE_SKIP(fail); - out = false; - -validate_out: - kfree(pipes); - - BW_VAL_TRACE_FINISH(); - - return out; -} - /* * Some of the functions further below use the FPU, so we need to wrap this * with DC_FP_START()/DC_FP_END(). Use the same approach as for @@ -1558,94 +1128,6 @@ static struct display_stream_compressor *dcn21_dsc_create(struct dc_context *ctx return &dsc->base; } -static struct _vcs_dpi_voltage_scaling_st construct_low_pstate_lvl(struct clk_limit_table *clk_table, unsigned int high_voltage_lvl) -{ - struct _vcs_dpi_voltage_scaling_st low_pstate_lvl; - int i; - - low_pstate_lvl.state = 1; - low_pstate_lvl.dcfclk_mhz = clk_table->entries[0].dcfclk_mhz; - low_pstate_lvl.fabricclk_mhz = clk_table->entries[0].fclk_mhz; - low_pstate_lvl.socclk_mhz = clk_table->entries[0].socclk_mhz; - low_pstate_lvl.dram_speed_mts = clk_table->entries[0].memclk_mhz * 2; - - low_pstate_lvl.dispclk_mhz = dcn2_1_soc.clock_limits[high_voltage_lvl].dispclk_mhz; - low_pstate_lvl.dppclk_mhz = dcn2_1_soc.clock_limits[high_voltage_lvl].dppclk_mhz; - low_pstate_lvl.dram_bw_per_chan_gbps = dcn2_1_soc.clock_limits[high_voltage_lvl].dram_bw_per_chan_gbps; - low_pstate_lvl.dscclk_mhz = dcn2_1_soc.clock_limits[high_voltage_lvl].dscclk_mhz; - low_pstate_lvl.dtbclk_mhz = dcn2_1_soc.clock_limits[high_voltage_lvl].dtbclk_mhz; - low_pstate_lvl.phyclk_d18_mhz = dcn2_1_soc.clock_limits[high_voltage_lvl].phyclk_d18_mhz; - low_pstate_lvl.phyclk_mhz = dcn2_1_soc.clock_limits[high_voltage_lvl].phyclk_mhz; - - for (i = clk_table->num_entries; i > 1; i--) - clk_table->entries[i] = clk_table->entries[i-1]; - clk_table->entries[1] = clk_table->entries[0]; - clk_table->num_entries++; - - return low_pstate_lvl; -} - -static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) -{ - struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool); - struct clk_limit_table *clk_table = &bw_params->clk_table; - struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES]; - unsigned int i, closest_clk_lvl = 0, k = 0; - int j; - - dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator; - dcn2_1_ip.max_num_dpp = pool->base.pipe_count; - dcn2_1_soc.num_chans = bw_params->num_channels; - - ASSERT(clk_table->num_entries); - /* Copy dcn2_1_soc.clock_limits to clock_limits to avoid copying over null states later */ - for (i = 0; i < dcn2_1_soc.num_states + 1; i++) { - clock_limits[i] = dcn2_1_soc.clock_limits[i]; - } - - for (i = 0; i < clk_table->num_entries; i++) { - /* loop backwards*/ - for (closest_clk_lvl = 0, j = dcn2_1_soc.num_states - 1; j >= 0; j--) { - if ((unsigned int) dcn2_1_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) { - closest_clk_lvl = j; - break; - } - } - - /* clk_table[1] is reserved for min DF PState. skip here to fill in later. */ - if (i == 1) - k++; - - clock_limits[k].state = k; - clock_limits[k].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; - clock_limits[k].fabricclk_mhz = clk_table->entries[i].fclk_mhz; - clock_limits[k].socclk_mhz = clk_table->entries[i].socclk_mhz; - clock_limits[k].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2; - - clock_limits[k].dispclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz; - clock_limits[k].dppclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz; - clock_limits[k].dram_bw_per_chan_gbps = dcn2_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps; - clock_limits[k].dscclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz; - clock_limits[k].dtbclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz; - clock_limits[k].phyclk_d18_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz; - clock_limits[k].phyclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz; - - k++; - } - for (i = 0; i < clk_table->num_entries + 1; i++) - dcn2_1_soc.clock_limits[i] = clock_limits[i]; - if (clk_table->num_entries) { - dcn2_1_soc.num_states = clk_table->num_entries + 1; - /* fill in min DF PState */ - dcn2_1_soc.clock_limits[1] = construct_low_pstate_lvl(clk_table, closest_clk_lvl); - /* duplicate last level */ - dcn2_1_soc.clock_limits[dcn2_1_soc.num_states] = dcn2_1_soc.clock_limits[dcn2_1_soc.num_states - 1]; - dcn2_1_soc.clock_limits[dcn2_1_soc.num_states].state = dcn2_1_soc.num_states; - } - - dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21); -} - static struct pp_smu_funcs *dcn21_pp_smu_create(struct dc_context *ctx) { struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL); @@ -1896,24 +1378,6 @@ static uint32_t read_pipe_fuses(struct dc_context *ctx) return value; } -static int dcn21_populate_dml_pipes_from_context( - struct dc *dc, - struct dc_state *context, - display_e2e_pipe_params_st *pipes, - bool fast_validate) -{ - uint32_t pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); - int i; - - for (i = 0; i < pipe_cnt; i++) { - - pipes[i].pipe.src.hostvm = dc->res_pool->hubbub->riommu_active; - pipes[i].pipe.src.gpuvm = 1; - } - - return pipe_cnt; -} - static enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_state) { enum dc_status result = DC_OK; @@ -1941,7 +1405,7 @@ static const struct resource_funcs dcn21_res_pool_funcs = { .patch_unknown_plane_state = dcn21_patch_unknown_plane_state, .set_mcif_arb_params = dcn20_set_mcif_arb_params, .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, - .update_bw_bounding_box = update_bw_bounding_box + .update_bw_bounding_box = dcn21_update_bw_bounding_box, }; static bool dcn21_resource_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.h index a27355171bca..f7ecc002c2f7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.h @@ -35,11 +35,22 @@ struct dc; struct resource_pool; struct _vcs_dpi_display_pipe_params_st; +extern struct _vcs_dpi_ip_params_st dcn2_1_ip; +extern struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc; + struct dcn21_resource_pool { struct resource_pool base; }; struct resource_pool *dcn21_create_resource_pool( const struct dc_init_data *init_data, struct dc *dc); +bool dcn21_fast_validate_bw( + struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int *pipe_cnt_out, + int *pipe_split_from, + int *vlevel_out, + bool fast_validate); #endif /* _DCN21_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c index 387eec616162..87dbeca18984 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c @@ -408,6 +408,6 @@ void dpp3_cm_set_gamut_remap( gamut_mode = 1; //follow dcn2 approach for now - using only coefficient set A - program_gamut_remap(dpp, arr_reg_val, GAMUT_REMAP_COEFF); + program_gamut_remap(dpp, arr_reg_val, gamut_mode); } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c index f4414de96acc..152c9c5733f1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c @@ -448,6 +448,7 @@ static const struct hubbub_funcs hubbub30_funcs = { .program_watermarks = hubbub3_program_watermarks, .allow_self_refresh_control = hubbub1_allow_self_refresh_control, .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, + .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high, .force_wm_propagate_to_pipes = hubbub3_force_wm_propagate_to_pipes, .force_pstate_change_control = hubbub3_force_pstate_change_control, .init_watermarks = hubbub3_init_watermarks, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index 1db1ca19411d..ed0a0e5fd805 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -545,8 +545,7 @@ void dcn30_init_hw(struct dc *dc) hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false); /* we want to turn off all dp displays before doing detection */ - if (dc->config.power_down_display_on_boot) - dc_link_blank_all_dp_displays(dc); + dc_link_blank_all_dp_displays(dc); /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which @@ -554,7 +553,7 @@ void dcn30_init_hw(struct dc *dc) * Otherwise, if taking control is not possible, we need to power * everything down. */ - if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) { + if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) { hws->funcs.init_pipes(dc, dc->current_state); if (dc->res_pool->hubbub->funcs->allow_self_refresh_control) dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, @@ -566,7 +565,7 @@ void dcn30_init_hw(struct dc *dc) * To avoid this, power down hardware on boot * if DIG is turned on and seamless boot not enabled */ - if (dc->config.power_down_display_on_boot) { + if (!dc->config.seamless_boot_edp_requested) { struct dc_link *edp_links[MAX_NUM_EDP]; struct dc_link *edp_link = NULL; diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c index 8ca26383b568..e6a62cc75139 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c @@ -72,8 +72,8 @@ #include "nbio/nbio_7_4_offset.h" -#include "dcn/dpcs_3_0_0_offset.h" -#include "dcn/dpcs_3_0_0_sh_mask.h" +#include "dpcs/dpcs_3_0_0_offset.h" +#include "dpcs/dpcs_3_0_0_sh_mask.h" #include "mmhub/mmhub_2_0_0_offset.h" #include "mmhub/mmhub_2_0_0_sh_mask.h" @@ -1462,7 +1462,9 @@ int dcn30_populate_dml_pipes_from_context( int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; + DC_FP_START(); dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + DC_FP_END(); for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { if (!res_ctx->pipe_ctx[i].stream) @@ -1731,7 +1733,10 @@ static bool init_soc_bounding_box(struct dc *dc, loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator; loaded_ip->max_num_dpp = pool->base.pipe_count; loaded_ip->clamp_min_dcfclk = dc->config.clamp_min_dcfclk; + + DC_FP_START(); dcn20_patch_bounding_box(dc, loaded_bb); + DC_FP_END(); if (dc->ctx->dc_bios->funcs->get_soc_bb_info) { struct bp_soc_bb_info bb_info = {0}; @@ -2261,7 +2266,9 @@ static noinline void dcn30_calculate_wm_and_dlg_fp( pipe_idx++; } + DC_FP_START(); dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); + DC_FP_END(); if (!pstate_en) /* Restore full p-state latency */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c index 1e3bd2e9cdcc..a046664e2031 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c @@ -60,6 +60,7 @@ static const struct hubbub_funcs hubbub301_funcs = { .program_watermarks = hubbub3_program_watermarks, .allow_self_refresh_control = hubbub1_allow_self_refresh_control, .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, + .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high, .force_wm_propagate_to_pipes = hubbub3_force_wm_propagate_to_pipes, .force_pstate_change_control = hubbub3_force_pstate_change_control, .hubbub_read_state = hubbub2_read_state, diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c index 5d9637b07429..4daf8931aa7c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c @@ -73,8 +73,8 @@ #include "nbio/nbio_7_2_0_offset.h" -#include "dcn/dpcs_3_0_0_offset.h" -#include "dcn/dpcs_3_0_0_sh_mask.h" +#include "dpcs/dpcs_3_0_0_offset.h" +#include "dpcs/dpcs_3_0_0_sh_mask.h" #include "reg_helper.h" #include "dce/dmub_abm.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/Makefile b/drivers/gpu/drm/amd/display/dc/dcn302/Makefile index 101620a8867a..f9561d7f97a1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn302/Makefile @@ -1,11 +1,6 @@ # # (c) Copyright 2020 Advanced Micro Devices, Inc. All the rights reserved # -# All rights reserved. This notice is intended as a precaution against -# inadvertent publication and does not imply publication or any waiver -# of confidentiality. The year included in the foregoing notice is the -# year of creation of the work. -# # Authors: AMD # # Makefile for dcn302. @@ -20,13 +15,6 @@ ifdef CONFIG_PPC64 CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -mhard-float -maltivec endif -ifdef CONFIG_CC_IS_GCC -ifeq ($(call cc-ifversion, -lt, 0701, y), y) -IS_OLD_GCC = 1 -endif -CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o += -mhard-float -endif - ifdef CONFIG_X86 ifdef IS_OLD_GCC # Stack alignment mismatch, proceed with caution. diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c index 2e9cbfa7663b..88318e8ffca8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c @@ -61,150 +61,18 @@ #include "resource.h" #include "vm_helper.h" +#include "dml/dcn302/dcn302_fpu.h" + #include "dimgrey_cavefish_ip_offset.h" #include "dcn/dcn_3_0_2_offset.h" #include "dcn/dcn_3_0_2_sh_mask.h" -#include "dcn/dpcs_3_0_0_offset.h" -#include "dcn/dpcs_3_0_0_sh_mask.h" +#include "dpcs/dpcs_3_0_0_offset.h" +#include "dpcs/dpcs_3_0_0_sh_mask.h" #include "nbio/nbio_7_4_offset.h" #include "amdgpu_socbb.h" #define DC_LOGGER_INIT(logger) -struct _vcs_dpi_ip_params_st dcn3_02_ip = { - .use_min_dcfclk = 0, - .clamp_min_dcfclk = 0, - .odm_capable = 1, - .gpuvm_enable = 1, - .hostvm_enable = 0, - .gpuvm_max_page_table_levels = 4, - .hostvm_max_page_table_levels = 4, - .hostvm_cached_page_table_levels = 0, - .pte_group_size_bytes = 2048, - .num_dsc = 5, - .rob_buffer_size_kbytes = 184, - .det_buffer_size_kbytes = 184, - .dpte_buffer_size_in_pte_reqs_luma = 64, - .dpte_buffer_size_in_pte_reqs_chroma = 34, - .pde_proc_buffer_size_64k_reqs = 48, - .dpp_output_buffer_pixels = 2560, - .opp_output_buffer_lines = 1, - .pixel_chunk_size_kbytes = 8, - .pte_enable = 1, - .max_page_table_levels = 2, - .pte_chunk_size_kbytes = 2, // ? - .meta_chunk_size_kbytes = 2, - .writeback_chunk_size_kbytes = 8, - .line_buffer_size_bits = 789504, - .is_line_buffer_bpp_fixed = 0, // ? - .line_buffer_fixed_bpp = 0, // ? - .dcc_supported = true, - .writeback_interface_buffer_size_kbytes = 90, - .writeback_line_buffer_buffer_size = 0, - .max_line_buffer_lines = 12, - .writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640 - .writeback_chroma_buffer_size_kbytes = 8, - .writeback_chroma_line_buffer_width_pixels = 4, - .writeback_max_hscl_ratio = 1, - .writeback_max_vscl_ratio = 1, - .writeback_min_hscl_ratio = 1, - .writeback_min_vscl_ratio = 1, - .writeback_max_hscl_taps = 1, - .writeback_max_vscl_taps = 1, - .writeback_line_buffer_luma_buffer_size = 0, - .writeback_line_buffer_chroma_buffer_size = 14643, - .cursor_buffer_size = 8, - .cursor_chunk_size = 2, - .max_num_otg = 5, - .max_num_dpp = 5, - .max_num_wb = 1, - .max_dchub_pscl_bw_pix_per_clk = 4, - .max_pscl_lb_bw_pix_per_clk = 2, - .max_lb_vscl_bw_pix_per_clk = 4, - .max_vscl_hscl_bw_pix_per_clk = 4, - .max_hscl_ratio = 6, - .max_vscl_ratio = 6, - .hscl_mults = 4, - .vscl_mults = 4, - .max_hscl_taps = 8, - .max_vscl_taps = 8, - .dispclk_ramp_margin_percent = 1, - .underscan_factor = 1.11, - .min_vblank_lines = 32, - .dppclk_delay_subtotal = 46, - .dynamic_metadata_vm_enabled = true, - .dppclk_delay_scl_lb_only = 16, - .dppclk_delay_scl = 50, - .dppclk_delay_cnvc_formatter = 27, - .dppclk_delay_cnvc_cursor = 6, - .dispclk_delay_subtotal = 119, - .dcfclk_cstate_latency = 5.2, // SRExitTime - .max_inter_dcn_tile_repeaters = 8, - .max_num_hdmi_frl_outputs = 1, - .odm_combine_4to1_supported = true, - - .xfc_supported = false, - .xfc_fill_bw_overhead_percent = 10.0, - .xfc_fill_constant_bytes = 0, - .gfx7_compat_tiling_supported = 0, - .number_of_cursors = 1, -}; - -struct _vcs_dpi_soc_bounding_box_st dcn3_02_soc = { - .clock_limits = { - { - .state = 0, - .dispclk_mhz = 562.0, - .dppclk_mhz = 300.0, - .phyclk_mhz = 300.0, - .phyclk_d18_mhz = 667.0, - .dscclk_mhz = 405.6, - }, - }, - - .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */ - .num_states = 1, - .sr_exit_time_us = 26.5, - .sr_enter_plus_exit_time_us = 31, - .urgent_latency_us = 4.0, - .urgent_latency_pixel_data_only_us = 4.0, - .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, - .urgent_latency_vm_data_only_us = 4.0, - .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, - .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, - .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, - .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0, - .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, - .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0, - .max_avg_sdp_bw_use_normal_percent = 60.0, - .max_avg_dram_bw_use_normal_percent = 40.0, - .writeback_latency_us = 12.0, - .max_request_size_bytes = 256, - .fabric_datapath_to_dcn_data_return_bytes = 64, - .dcn_downspread_percent = 0.5, - .downspread_percent = 0.38, - .dram_page_open_time_ns = 50.0, - .dram_rw_turnaround_time_ns = 17.5, - .dram_return_buffer_per_channel_bytes = 8192, - .round_trip_ping_latency_dcfclk_cycles = 156, - .urgent_out_of_order_return_per_channel_bytes = 4096, - .channel_interleave_bytes = 256, - .num_banks = 8, - .gpuvm_min_page_size_bytes = 4096, - .hostvm_min_page_size_bytes = 4096, - .dram_clock_change_latency_us = 404, - .dummy_pstate_latency_us = 5, - .writeback_dram_clock_change_latency_us = 23.0, - .return_bus_width_bytes = 64, - .dispclk_dppclk_vco_speed_mhz = 3650, - .xfc_bus_transport_time_us = 20, // ? - .xfc_xbuf_latency_tolerance_us = 4, // ? - .use_urgent_burst_bw = 1, // ? - .do_urgent_latency_adjustment = true, - .urgent_latency_adjustment_fabric_clock_component_us = 1.0, - .urgent_latency_adjustment_fabric_clock_reference_mhz = 1000, -}; - static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, @@ -1105,24 +973,19 @@ static bool init_soc_bounding_box(struct dc *dc, struct resource_pool *pool) loaded_ip->max_num_otg = pool->pipe_count; loaded_ip->max_num_dpp = pool->pipe_count; loaded_ip->clamp_min_dcfclk = dc->config.clamp_min_dcfclk; + DC_FP_START(); dcn20_patch_bounding_box(dc, loaded_bb); + DC_FP_END(); if (dc->ctx->dc_bios->funcs->get_soc_bb_info) { struct bp_soc_bb_info bb_info = { 0 }; if (dc->ctx->dc_bios->funcs->get_soc_bb_info( dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) { - if (bb_info.dram_clock_change_latency_100ns > 0) - dcn3_02_soc.dram_clock_change_latency_us = - bb_info.dram_clock_change_latency_100ns * 10; - if (bb_info.dram_sr_enter_exit_latency_100ns > 0) - dcn3_02_soc.sr_enter_plus_exit_time_us = - bb_info.dram_sr_enter_exit_latency_100ns * 10; - - if (bb_info.dram_sr_exit_latency_100ns > 0) - dcn3_02_soc.sr_exit_time_us = - bb_info.dram_sr_exit_latency_100ns * 10; + DC_FP_START(); + dcn302_fpu_init_soc_bounding_box(bb_info); + DC_FP_END(); } } @@ -1257,170 +1120,11 @@ static void dcn302_destroy_resource_pool(struct resource_pool **pool) *pool = NULL; } -static void dcn302_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts, - unsigned int *optimal_dcfclk, - unsigned int *optimal_fclk) -{ - double bw_from_dram, bw_from_dram1, bw_from_dram2; - - bw_from_dram1 = uclk_mts * dcn3_02_soc.num_chans * - dcn3_02_soc.dram_channel_width_bytes * (dcn3_02_soc.max_avg_dram_bw_use_normal_percent / 100); - bw_from_dram2 = uclk_mts * dcn3_02_soc.num_chans * - dcn3_02_soc.dram_channel_width_bytes * (dcn3_02_soc.max_avg_sdp_bw_use_normal_percent / 100); - - bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2; - - if (optimal_fclk) - *optimal_fclk = bw_from_dram / - (dcn3_02_soc.fabric_datapath_to_dcn_data_return_bytes * (dcn3_02_soc.max_avg_sdp_bw_use_normal_percent / 100)); - - if (optimal_dcfclk) - *optimal_dcfclk = bw_from_dram / - (dcn3_02_soc.return_bus_width_bytes * (dcn3_02_soc.max_avg_sdp_bw_use_normal_percent / 100)); -} - void dcn302_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) { - unsigned int i, j; - unsigned int num_states = 0; - - unsigned int dcfclk_mhz[DC__VOLTAGE_STATES] = {0}; - unsigned int dram_speed_mts[DC__VOLTAGE_STATES] = {0}; - unsigned int optimal_uclk_for_dcfclk_sta_targets[DC__VOLTAGE_STATES] = {0}; - unsigned int optimal_dcfclk_for_uclk[DC__VOLTAGE_STATES] = {0}; - - unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {694, 875, 1000, 1200}; - unsigned int num_dcfclk_sta_targets = 4; - unsigned int num_uclk_states; - - - if (dc->ctx->dc_bios->vram_info.num_chans) - dcn3_02_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans; - - if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) - dcn3_02_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; - - dcn3_02_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; - dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; - - if (bw_params->clk_table.entries[0].memclk_mhz) { - int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0; - - for (i = 0; i < MAX_NUM_DPM_LVL; i++) { - if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz) - max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz; - if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz) - max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz; - if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz) - max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz; - if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz) - max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz; - } - if (!max_dcfclk_mhz) - max_dcfclk_mhz = dcn3_02_soc.clock_limits[0].dcfclk_mhz; - if (!max_dispclk_mhz) - max_dispclk_mhz = dcn3_02_soc.clock_limits[0].dispclk_mhz; - if (!max_dppclk_mhz) - max_dppclk_mhz = dcn3_02_soc.clock_limits[0].dppclk_mhz; - if (!max_phyclk_mhz) - max_phyclk_mhz = dcn3_02_soc.clock_limits[0].phyclk_mhz; - - if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) { - /* If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array */ - dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz; - num_dcfclk_sta_targets++; - } else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) { - /* If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates */ - for (i = 0; i < num_dcfclk_sta_targets; i++) { - if (dcfclk_sta_targets[i] > max_dcfclk_mhz) { - dcfclk_sta_targets[i] = max_dcfclk_mhz; - break; - } - } - /* Update size of array since we "removed" duplicates */ - num_dcfclk_sta_targets = i + 1; - } - - num_uclk_states = bw_params->clk_table.num_entries; - - /* Calculate optimal dcfclk for each uclk */ - for (i = 0; i < num_uclk_states; i++) { - dcn302_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16, - &optimal_dcfclk_for_uclk[i], NULL); - if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) { - optimal_dcfclk_for_uclk[i] = bw_params->clk_table.entries[0].dcfclk_mhz; - } - } - - /* Calculate optimal uclk for each dcfclk sta target */ - for (i = 0; i < num_dcfclk_sta_targets; i++) { - for (j = 0; j < num_uclk_states; j++) { - if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) { - optimal_uclk_for_dcfclk_sta_targets[i] = - bw_params->clk_table.entries[j].memclk_mhz * 16; - break; - } - } - } - - i = 0; - j = 0; - /* create the final dcfclk and uclk table */ - while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) { - if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) { - dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; - dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; - } else { - if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) { - dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j]; - dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16; - } else { - j = num_uclk_states; - } - } - } - - while (i < num_dcfclk_sta_targets && num_states < DC__VOLTAGE_STATES) { - dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; - dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; - } - - while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES && - optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) { - dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j]; - dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16; - } - - dcn3_02_soc.num_states = num_states; - for (i = 0; i < dcn3_02_soc.num_states; i++) { - dcn3_02_soc.clock_limits[i].state = i; - dcn3_02_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i]; - dcn3_02_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i]; - dcn3_02_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i]; - - /* Fill all states with max values of all other clocks */ - dcn3_02_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz; - dcn3_02_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz; - dcn3_02_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz; - /* Populate from bw_params for DTBCLK, SOCCLK */ - if (!bw_params->clk_table.entries[i].dtbclk_mhz && i > 0) - dcn3_02_soc.clock_limits[i].dtbclk_mhz = dcn3_02_soc.clock_limits[i-1].dtbclk_mhz; - else - dcn3_02_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz; - if (!bw_params->clk_table.entries[i].socclk_mhz && i > 0) - dcn3_02_soc.clock_limits[i].socclk_mhz = dcn3_02_soc.clock_limits[i-1].socclk_mhz; - else - dcn3_02_soc.clock_limits[i].socclk_mhz = bw_params->clk_table.entries[i].socclk_mhz; - /* These clocks cannot come from bw_params, always fill from dcn3_02_soc[1] */ - /* FCLK, PHYCLK_D18, DSCCLK */ - dcn3_02_soc.clock_limits[i].phyclk_d18_mhz = dcn3_02_soc.clock_limits[0].phyclk_d18_mhz; - dcn3_02_soc.clock_limits[i].dscclk_mhz = dcn3_02_soc.clock_limits[0].dscclk_mhz; - } - /* re-init DML with updated bb */ - dml_init_instance(&dc->dml, &dcn3_02_soc, &dcn3_02_ip, DML_PROJECT_DCN30); - if (dc->current_state) - dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_02_soc, &dcn3_02_ip, DML_PROJECT_DCN30); - } + DC_FP_START(); + dcn302_fpu_update_bw_bounding_box(dc, bw_params); + DC_FP_END(); } static struct resource_funcs dcn302_res_pool_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h index 42d2c73e30bc..9f24e73b92b3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h @@ -28,6 +28,9 @@ #include "core_types.h" +extern struct _vcs_dpi_ip_params_st dcn3_02_ip; +extern struct _vcs_dpi_soc_bounding_box_st dcn3_02_soc; + struct resource_pool *dcn302_create_resource_pool(const struct dc_init_data *init_data, struct dc *dc); void dcn302_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params); diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/Makefile b/drivers/gpu/drm/amd/display/dc/dcn303/Makefile index 6f7a1f2b49f0..8702e0b7fda3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn303/Makefile @@ -8,32 +8,6 @@ DCN3_03 = dcn303_init.o dcn303_hwseq.o dcn303_resource.o -ifdef CONFIG_X86 -CFLAGS_$(AMDDALPATH)/dc/dcn303/dcn303_resource.o := -msse -endif - -ifdef CONFIG_PPC64 -CFLAGS_$(AMDDALPATH)/dc/dcn303/dcn303_resource.o := -mhard-float -maltivec -endif - -ifdef CONFIG_CC_IS_GCC -ifeq ($(call cc-ifversion, -lt, 0701, y), y) -IS_OLD_GCC = 1 -endif -CFLAGS_$(AMDDALPATH)/dc/dcn303/dcn303_resource.o += -mhard-float -endif - -ifdef CONFIG_X86 -ifdef IS_OLD_GCC -# Stack alignment mismatch, proceed with caution. -# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 -# (8B stack alignment). -CFLAGS_$(AMDDALPATH)/dc/dcn303/dcn303_resource.o += -mpreferred-stack-boundary=4 -else -CFLAGS_$(AMDDALPATH)/dc/dcn303/dcn303_resource.o += -msse2 -endif -endif - AMD_DAL_DCN3_03 = $(addprefix $(AMDDALPATH)/dc/dcn303/,$(DCN3_03)) AMD_DISPLAY_FILES += $(AMD_DAL_DCN3_03) diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c index 2de687f64cf6..4fcbc0502808 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c @@ -48,144 +48,14 @@ #include "sienna_cichlid_ip_offset.h" #include "dcn/dcn_3_0_3_offset.h" #include "dcn/dcn_3_0_3_sh_mask.h" -#include "dcn/dpcs_3_0_3_offset.h" -#include "dcn/dpcs_3_0_3_sh_mask.h" +#include "dpcs/dpcs_3_0_3_offset.h" +#include "dpcs/dpcs_3_0_3_sh_mask.h" #include "nbio/nbio_2_3_offset.h" -#define DC_LOGGER_INIT(logger) +#include "dml/dcn303/dcn303_fpu.h" -struct _vcs_dpi_ip_params_st dcn3_03_ip = { - .use_min_dcfclk = 0, - .clamp_min_dcfclk = 0, - .odm_capable = 1, - .gpuvm_enable = 1, - .hostvm_enable = 0, - .gpuvm_max_page_table_levels = 4, - .hostvm_max_page_table_levels = 4, - .hostvm_cached_page_table_levels = 0, - .pte_group_size_bytes = 2048, - .num_dsc = 2, - .rob_buffer_size_kbytes = 184, - .det_buffer_size_kbytes = 184, - .dpte_buffer_size_in_pte_reqs_luma = 64, - .dpte_buffer_size_in_pte_reqs_chroma = 34, - .pde_proc_buffer_size_64k_reqs = 48, - .dpp_output_buffer_pixels = 2560, - .opp_output_buffer_lines = 1, - .pixel_chunk_size_kbytes = 8, - .pte_enable = 1, - .max_page_table_levels = 2, - .pte_chunk_size_kbytes = 2, // ? - .meta_chunk_size_kbytes = 2, - .writeback_chunk_size_kbytes = 8, - .line_buffer_size_bits = 789504, - .is_line_buffer_bpp_fixed = 0, // ? - .line_buffer_fixed_bpp = 0, // ? - .dcc_supported = true, - .writeback_interface_buffer_size_kbytes = 90, - .writeback_line_buffer_buffer_size = 0, - .max_line_buffer_lines = 12, - .writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640 - .writeback_chroma_buffer_size_kbytes = 8, - .writeback_chroma_line_buffer_width_pixels = 4, - .writeback_max_hscl_ratio = 1, - .writeback_max_vscl_ratio = 1, - .writeback_min_hscl_ratio = 1, - .writeback_min_vscl_ratio = 1, - .writeback_max_hscl_taps = 1, - .writeback_max_vscl_taps = 1, - .writeback_line_buffer_luma_buffer_size = 0, - .writeback_line_buffer_chroma_buffer_size = 14643, - .cursor_buffer_size = 8, - .cursor_chunk_size = 2, - .max_num_otg = 2, - .max_num_dpp = 2, - .max_num_wb = 1, - .max_dchub_pscl_bw_pix_per_clk = 4, - .max_pscl_lb_bw_pix_per_clk = 2, - .max_lb_vscl_bw_pix_per_clk = 4, - .max_vscl_hscl_bw_pix_per_clk = 4, - .max_hscl_ratio = 6, - .max_vscl_ratio = 6, - .hscl_mults = 4, - .vscl_mults = 4, - .max_hscl_taps = 8, - .max_vscl_taps = 8, - .dispclk_ramp_margin_percent = 1, - .underscan_factor = 1.11, - .min_vblank_lines = 32, - .dppclk_delay_subtotal = 46, - .dynamic_metadata_vm_enabled = true, - .dppclk_delay_scl_lb_only = 16, - .dppclk_delay_scl = 50, - .dppclk_delay_cnvc_formatter = 27, - .dppclk_delay_cnvc_cursor = 6, - .dispclk_delay_subtotal = 119, - .dcfclk_cstate_latency = 5.2, // SRExitTime - .max_inter_dcn_tile_repeaters = 8, - .max_num_hdmi_frl_outputs = 1, - .odm_combine_4to1_supported = false, - .xfc_supported = false, - .xfc_fill_bw_overhead_percent = 10.0, - .xfc_fill_constant_bytes = 0, - .gfx7_compat_tiling_supported = 0, - .number_of_cursors = 1, -}; - -struct _vcs_dpi_soc_bounding_box_st dcn3_03_soc = { - .clock_limits = { - { - .state = 0, - .dispclk_mhz = 1217.0, - .dppclk_mhz = 1217.0, - .phyclk_mhz = 810.0, - .phyclk_d18_mhz = 667.0, - .dscclk_mhz = 405.6, - }, - }, +#define DC_LOGGER_INIT(logger) - .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */ - .num_states = 1, - .sr_exit_time_us = 35.5, - .sr_enter_plus_exit_time_us = 40, - .urgent_latency_us = 4.0, - .urgent_latency_pixel_data_only_us = 4.0, - .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, - .urgent_latency_vm_data_only_us = 4.0, - .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, - .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, - .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, - .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0, - .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, - .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0, - .max_avg_sdp_bw_use_normal_percent = 60.0, - .max_avg_dram_bw_use_normal_percent = 40.0, - .writeback_latency_us = 12.0, - .max_request_size_bytes = 256, - .fabric_datapath_to_dcn_data_return_bytes = 64, - .dcn_downspread_percent = 0.5, - .downspread_percent = 0.38, - .dram_page_open_time_ns = 50.0, - .dram_rw_turnaround_time_ns = 17.5, - .dram_return_buffer_per_channel_bytes = 8192, - .round_trip_ping_latency_dcfclk_cycles = 156, - .urgent_out_of_order_return_per_channel_bytes = 4096, - .channel_interleave_bytes = 256, - .num_banks = 8, - .gpuvm_min_page_size_bytes = 4096, - .hostvm_min_page_size_bytes = 4096, - .dram_clock_change_latency_us = 404, - .dummy_pstate_latency_us = 5, - .writeback_dram_clock_change_latency_us = 23.0, - .return_bus_width_bytes = 64, - .dispclk_dppclk_vco_speed_mhz = 3650, - .xfc_bus_transport_time_us = 20, // ? - .xfc_xbuf_latency_tolerance_us = 4, // ? - .use_urgent_burst_bw = 1, // ? - .do_urgent_latency_adjustment = true, - .urgent_latency_adjustment_fabric_clock_component_us = 1.0, - .urgent_latency_adjustment_fabric_clock_reference_mhz = 1000, -}; static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, @@ -1031,24 +901,18 @@ static bool init_soc_bounding_box(struct dc *dc, struct resource_pool *pool) loaded_ip->max_num_otg = pool->pipe_count; loaded_ip->max_num_dpp = pool->pipe_count; loaded_ip->clamp_min_dcfclk = dc->config.clamp_min_dcfclk; + DC_FP_START(); dcn20_patch_bounding_box(dc, loaded_bb); + DC_FP_END(); if (dc->ctx->dc_bios->funcs->get_soc_bb_info) { struct bp_soc_bb_info bb_info = { 0 }; if (dc->ctx->dc_bios->funcs->get_soc_bb_info( dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) { - if (bb_info.dram_clock_change_latency_100ns > 0) - dcn3_03_soc.dram_clock_change_latency_us = - bb_info.dram_clock_change_latency_100ns * 10; - - if (bb_info.dram_sr_enter_exit_latency_100ns > 0) - dcn3_03_soc.sr_enter_plus_exit_time_us = - bb_info.dram_sr_enter_exit_latency_100ns * 10; - - if (bb_info.dram_sr_exit_latency_100ns > 0) - dcn3_03_soc.sr_exit_time_us = - bb_info.dram_sr_exit_latency_100ns * 10; + DC_FP_START(); + dcn303_fpu_init_soc_bounding_box(bb_info); + DC_FP_END(); } } @@ -1186,183 +1050,12 @@ static void dcn303_destroy_resource_pool(struct resource_pool **pool) *pool = NULL; } -static void dcn303_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts, - unsigned int *optimal_dcfclk, - unsigned int *optimal_fclk) -{ - double bw_from_dram, bw_from_dram1, bw_from_dram2; - - bw_from_dram1 = uclk_mts * dcn3_03_soc.num_chans * - dcn3_03_soc.dram_channel_width_bytes * (dcn3_03_soc.max_avg_dram_bw_use_normal_percent / 100); - bw_from_dram2 = uclk_mts * dcn3_03_soc.num_chans * - dcn3_03_soc.dram_channel_width_bytes * (dcn3_03_soc.max_avg_sdp_bw_use_normal_percent / 100); - - bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2; - - if (optimal_fclk) - *optimal_fclk = bw_from_dram / - (dcn3_03_soc.fabric_datapath_to_dcn_data_return_bytes * - (dcn3_03_soc.max_avg_sdp_bw_use_normal_percent / 100)); - - if (optimal_dcfclk) - *optimal_dcfclk = bw_from_dram / - (dcn3_03_soc.return_bus_width_bytes * (dcn3_03_soc.max_avg_sdp_bw_use_normal_percent / 100)); -} void dcn303_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) { - unsigned int i, j; - unsigned int num_states = 0; - - unsigned int dcfclk_mhz[DC__VOLTAGE_STATES] = {0}; - unsigned int dram_speed_mts[DC__VOLTAGE_STATES] = {0}; - unsigned int optimal_uclk_for_dcfclk_sta_targets[DC__VOLTAGE_STATES] = {0}; - unsigned int optimal_dcfclk_for_uclk[DC__VOLTAGE_STATES] = {0}; - - unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {694, 875, 1000, 1200}; - unsigned int num_dcfclk_sta_targets = 4; - unsigned int num_uclk_states; - - - if (dc->ctx->dc_bios->vram_info.num_chans) - dcn3_03_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans; - - if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) - dcn3_03_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; - - dcn3_03_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; - dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; - - if (bw_params->clk_table.entries[0].memclk_mhz) { - int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0; - - for (i = 0; i < MAX_NUM_DPM_LVL; i++) { - if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz) - max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz; - if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz) - max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz; - if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz) - max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz; - if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz) - max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz; - } - if (!max_dcfclk_mhz) - max_dcfclk_mhz = dcn3_03_soc.clock_limits[0].dcfclk_mhz; - if (!max_dispclk_mhz) - max_dispclk_mhz = dcn3_03_soc.clock_limits[0].dispclk_mhz; - if (!max_dppclk_mhz) - max_dppclk_mhz = dcn3_03_soc.clock_limits[0].dppclk_mhz; - if (!max_phyclk_mhz) - max_phyclk_mhz = dcn3_03_soc.clock_limits[0].phyclk_mhz; - - if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) { - dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz; - num_dcfclk_sta_targets++; - } else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) { - for (i = 0; i < num_dcfclk_sta_targets; i++) { - if (dcfclk_sta_targets[i] > max_dcfclk_mhz) { - dcfclk_sta_targets[i] = max_dcfclk_mhz; - break; - } - } - /* Update size of array since we "removed" duplicates */ - num_dcfclk_sta_targets = i + 1; - } - - num_uclk_states = bw_params->clk_table.num_entries; - - /* Calculate optimal dcfclk for each uclk */ - for (i = 0; i < num_uclk_states; i++) { - dcn303_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16, - &optimal_dcfclk_for_uclk[i], NULL); - if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) - optimal_dcfclk_for_uclk[i] = bw_params->clk_table.entries[0].dcfclk_mhz; - } - - /* Calculate optimal uclk for each dcfclk sta target */ - for (i = 0; i < num_dcfclk_sta_targets; i++) { - for (j = 0; j < num_uclk_states; j++) { - if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) { - optimal_uclk_for_dcfclk_sta_targets[i] = - bw_params->clk_table.entries[j].memclk_mhz * 16; - break; - } - } - } - - i = 0; - j = 0; - /* create the final dcfclk and uclk table */ - while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) { - if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) { - dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; - dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; - } else { - if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) { - dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j]; - dram_speed_mts[num_states++] = - bw_params->clk_table.entries[j++].memclk_mhz * 16; - } else { - j = num_uclk_states; - } - } - } - - while (i < num_dcfclk_sta_targets && num_states < DC__VOLTAGE_STATES) { - dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; - dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; - } - - while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES && - optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) { - dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j]; - dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16; - } - - dcn3_03_soc.num_states = num_states; - for (i = 0; i < dcn3_03_soc.num_states; i++) { - dcn3_03_soc.clock_limits[i].state = i; - dcn3_03_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i]; - dcn3_03_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i]; - dcn3_03_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i]; - - /* Fill all states with max values of all other clocks */ - dcn3_03_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz; - dcn3_03_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz; - dcn3_03_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz; - /* Populate from bw_params for DTBCLK, SOCCLK */ - if (!bw_params->clk_table.entries[i].dtbclk_mhz && i > 0) - dcn3_03_soc.clock_limits[i].dtbclk_mhz = dcn3_03_soc.clock_limits[i-1].dtbclk_mhz; - else - dcn3_03_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz; - if (!bw_params->clk_table.entries[i].socclk_mhz && i > 0) - dcn3_03_soc.clock_limits[i].socclk_mhz = dcn3_03_soc.clock_limits[i-1].socclk_mhz; - else - dcn3_03_soc.clock_limits[i].socclk_mhz = bw_params->clk_table.entries[i].socclk_mhz; - /* These clocks cannot come from bw_params, always fill from dcn3_03_soc[1] */ - /* FCLK, PHYCLK_D18, DSCCLK */ - dcn3_03_soc.clock_limits[i].phyclk_d18_mhz = dcn3_03_soc.clock_limits[0].phyclk_d18_mhz; - dcn3_03_soc.clock_limits[i].dscclk_mhz = dcn3_03_soc.clock_limits[0].dscclk_mhz; - } - - // WA: patch strobe modes to compensate for DCN303 BW issue - if (dcn3_03_soc.num_chans <= 4) { - for (i = 0; i < dcn3_03_soc.num_states; i++) { - if (dcn3_03_soc.clock_limits[i].dram_speed_mts > 1700) - break; - - if (dcn3_03_soc.clock_limits[i].dram_speed_mts >= 1500) { - dcn3_03_soc.clock_limits[i].dcfclk_mhz = 100; - dcn3_03_soc.clock_limits[i].fabricclk_mhz = 100; - } - } - } - - /* re-init DML with updated bb */ - dml_init_instance(&dc->dml, &dcn3_03_soc, &dcn3_03_ip, DML_PROJECT_DCN30); - if (dc->current_state) - dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_03_soc, &dcn3_03_ip, DML_PROJECT_DCN30); - } + DC_FP_START(); + dcn303_fpu_update_bw_bounding_box(dc, bw_params); + DC_FP_END(); } static struct resource_funcs dcn303_res_pool_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.h b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.h index 5b590c169763..9c7d79540900 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.h @@ -10,6 +10,9 @@ #include "core_types.h" +extern struct _vcs_dpi_ip_params_st dcn3_03_ip; +extern struct _vcs_dpi_soc_bounding_box_st dcn3_03_soc; + struct resource_pool *dcn303_create_resource_pool(const struct dc_init_data *init_data, struct dc *dc); void dcn303_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params); diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c index ea4f8e06b07c..287a1066b547 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c @@ -121,7 +121,8 @@ static void dccg31_enable_dpstreamclk(struct dccg *dccg, int otg_inst) return; } if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + DPSTREAMCLK_GATE_DISABLE, 1, DPSTREAMCLK_ROOT_GATE_DISABLE, 1); } @@ -130,8 +131,9 @@ static void dccg31_disable_dpstreamclk(struct dccg *dccg, int otg_inst) struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, - DPSTREAMCLK_ROOT_GATE_DISABLE, 0); + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + DPSTREAMCLK_ROOT_GATE_DISABLE, 0, + DPSTREAMCLK_GATE_DISABLE, 0); switch (otg_inst) { case 0: @@ -180,7 +182,8 @@ void dccg31_enable_symclk32_se( switch (hpo_se_inst) { case 0: if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE0_GATE_DISABLE, 1, SYMCLK32_ROOT_SE0_GATE_DISABLE, 1); REG_UPDATE_2(SYMCLK32_SE_CNTL, SYMCLK32_SE0_SRC_SEL, phyd32clk, @@ -188,7 +191,8 @@ void dccg31_enable_symclk32_se( break; case 1: if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE1_GATE_DISABLE, 1, SYMCLK32_ROOT_SE1_GATE_DISABLE, 1); REG_UPDATE_2(SYMCLK32_SE_CNTL, SYMCLK32_SE1_SRC_SEL, phyd32clk, @@ -196,7 +200,8 @@ void dccg31_enable_symclk32_se( break; case 2: if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE2_GATE_DISABLE, 1, SYMCLK32_ROOT_SE2_GATE_DISABLE, 1); REG_UPDATE_2(SYMCLK32_SE_CNTL, SYMCLK32_SE2_SRC_SEL, phyd32clk, @@ -204,7 +209,8 @@ void dccg31_enable_symclk32_se( break; case 3: if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE3_GATE_DISABLE, 1, SYMCLK32_ROOT_SE3_GATE_DISABLE, 1); REG_UPDATE_2(SYMCLK32_SE_CNTL, SYMCLK32_SE3_SRC_SEL, phyd32clk, @@ -229,7 +235,8 @@ void dccg31_disable_symclk32_se( SYMCLK32_SE0_SRC_SEL, 0, SYMCLK32_SE0_EN, 0); if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE0_GATE_DISABLE, 0, SYMCLK32_ROOT_SE0_GATE_DISABLE, 0); break; case 1: @@ -237,7 +244,8 @@ void dccg31_disable_symclk32_se( SYMCLK32_SE1_SRC_SEL, 0, SYMCLK32_SE1_EN, 0); if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE1_GATE_DISABLE, 0, SYMCLK32_ROOT_SE1_GATE_DISABLE, 0); break; case 2: @@ -245,7 +253,8 @@ void dccg31_disable_symclk32_se( SYMCLK32_SE2_SRC_SEL, 0, SYMCLK32_SE2_EN, 0); if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE2_GATE_DISABLE, 0, SYMCLK32_ROOT_SE2_GATE_DISABLE, 0); break; case 3: @@ -253,7 +262,8 @@ void dccg31_disable_symclk32_se( SYMCLK32_SE3_SRC_SEL, 0, SYMCLK32_SE3_EN, 0); if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE3_GATE_DISABLE, 0, SYMCLK32_ROOT_SE3_GATE_DISABLE, 0); break; default: @@ -275,7 +285,8 @@ void dccg31_enable_symclk32_le( switch (hpo_le_inst) { case 0: if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_LE0_GATE_DISABLE, 1, SYMCLK32_ROOT_LE0_GATE_DISABLE, 1); REG_UPDATE_2(SYMCLK32_LE_CNTL, SYMCLK32_LE0_SRC_SEL, phyd32clk, @@ -283,7 +294,8 @@ void dccg31_enable_symclk32_le( break; case 1: if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_LE1_GATE_DISABLE, 1, SYMCLK32_ROOT_LE1_GATE_DISABLE, 1); REG_UPDATE_2(SYMCLK32_LE_CNTL, SYMCLK32_LE1_SRC_SEL, phyd32clk, @@ -308,7 +320,8 @@ void dccg31_disable_symclk32_le( SYMCLK32_LE0_SRC_SEL, 0, SYMCLK32_LE0_EN, 0); if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_LE0_GATE_DISABLE, 0, SYMCLK32_ROOT_LE0_GATE_DISABLE, 0); break; case 1: @@ -316,7 +329,8 @@ void dccg31_disable_symclk32_le( SYMCLK32_LE1_SRC_SEL, 0, SYMCLK32_LE1_EN, 0); if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_LE1_GATE_DISABLE, 0, SYMCLK32_ROOT_LE1_GATE_DISABLE, 0); break; default: @@ -406,54 +420,89 @@ void dccg31_set_physymclk( /* Force PHYSYMCLK on and Select phyd32clk as the source of clock which is output to PHY through DCIO */ switch (phy_inst) { case 0: - if (force_enable) + if (force_enable) { REG_UPDATE_2(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, 1, PHYASYMCLK_FORCE_SRC_SEL, clk_src); - else + if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + PHYASYMCLK_GATE_DISABLE, 1); + } else { REG_UPDATE_2(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, 0, PHYASYMCLK_FORCE_SRC_SEL, 0); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + PHYASYMCLK_GATE_DISABLE, 0); + } break; case 1: - if (force_enable) + if (force_enable) { REG_UPDATE_2(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_EN, 1, PHYBSYMCLK_FORCE_SRC_SEL, clk_src); - else + if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + PHYBSYMCLK_GATE_DISABLE, 1); + } else { REG_UPDATE_2(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_EN, 0, PHYBSYMCLK_FORCE_SRC_SEL, 0); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + PHYBSYMCLK_GATE_DISABLE, 0); + } break; case 2: - if (force_enable) + if (force_enable) { REG_UPDATE_2(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_EN, 1, PHYCSYMCLK_FORCE_SRC_SEL, clk_src); - else + if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + PHYCSYMCLK_GATE_DISABLE, 1); + } else { REG_UPDATE_2(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_EN, 0, PHYCSYMCLK_FORCE_SRC_SEL, 0); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + PHYCSYMCLK_GATE_DISABLE, 0); + } break; case 3: - if (force_enable) + if (force_enable) { REG_UPDATE_2(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_FORCE_EN, 1, PHYDSYMCLK_FORCE_SRC_SEL, clk_src); - else + if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + PHYDSYMCLK_GATE_DISABLE, 1); + } else { REG_UPDATE_2(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_FORCE_EN, 0, PHYDSYMCLK_FORCE_SRC_SEL, 0); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + PHYDSYMCLK_GATE_DISABLE, 0); + } break; case 4: - if (force_enable) + if (force_enable) { REG_UPDATE_2(PHYESYMCLK_CLOCK_CNTL, PHYESYMCLK_FORCE_EN, 1, PHYESYMCLK_FORCE_SRC_SEL, clk_src); - else + if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + PHYESYMCLK_GATE_DISABLE, 1); + } else { REG_UPDATE_2(PHYESYMCLK_CLOCK_CNTL, PHYESYMCLK_FORCE_EN, 0, PHYESYMCLK_FORCE_SRC_SEL, 0); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + PHYESYMCLK_GATE_DISABLE, 0); + } break; default: BREAK_TO_DEBUGGER(); @@ -615,6 +664,13 @@ void dccg31_init(struct dccg *dccg) dccg31_disable_dpstreamclk(dccg, 3); } + if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) { + dccg31_set_physymclk(dccg, 0, PHYSYMCLK_FORCE_SRC_SYMCLK, false); + dccg31_set_physymclk(dccg, 1, PHYSYMCLK_FORCE_SRC_SYMCLK, false); + dccg31_set_physymclk(dccg, 2, PHYSYMCLK_FORCE_SRC_SYMCLK, false); + dccg31_set_physymclk(dccg, 3, PHYSYMCLK_FORCE_SRC_SYMCLK, false); + dccg31_set_physymclk(dccg, 4, PHYSYMCLK_FORCE_SRC_SYMCLK, false); + } } static const struct dccg_funcs dccg31_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h index a013a32bbaf7..269cabbea72a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h @@ -66,6 +66,7 @@ SR(DSCCLK1_DTO_PARAM),\ SR(DSCCLK2_DTO_PARAM),\ SR(DSCCLK_DTO_CTRL),\ + SR(DCCG_GATE_DISABLE_CNTL2),\ SR(DCCG_GATE_DISABLE_CNTL3),\ SR(HDMISTREAMCLK0_DTO_PARAM) @@ -135,6 +136,11 @@ DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK0_DTO_ENABLE, mask_sh),\ DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK1_DTO_ENABLE, mask_sh),\ DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK2_DTO_ENABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYBSYMCLK_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYCSYMCLK_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYDSYMCLK_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_GATE_DISABLE, mask_sh),\ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, DPSTREAMCLK_ROOT_GATE_DISABLE, mask_sh),\ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, DPSTREAMCLK_GATE_DISABLE, mask_sh),\ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE0_GATE_DISABLE, mask_sh),\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c index 8b9b1a5309ba..d94fd1010deb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c @@ -240,18 +240,9 @@ static void enc31_hw_init(struct link_encoder *enc) // 100MHz -> 0x32 // 48MHz -> 0x18 -#ifdef CLEANUP_FIXME - /*from display_init*/ - REG_WRITE(RDPCSTX_DEBUG_CONFIG, 0); -#endif - // Set TMDS_CTL0 to 1. This is a legacy setting. REG_UPDATE(TMDS_CTL_BITS, TMDS_CTL0, 1); - /*HW default is 5*/ - REG_UPDATE(RDPCSTX_CNTL, - RDPCS_TX_FIFO_RD_START_DELAY, 4); - dcn10_aux_initialize(enc10); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c index 5065904c7833..23621ff08c90 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c @@ -710,6 +710,16 @@ static void dcn31_hpo_dp_stream_enc_read_state( } } +static void dcn31_set_hblank_min_symbol_width( + struct hpo_dp_stream_encoder *enc, + uint16_t width) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + + REG_SET(DP_SYM32_ENC_HBLANK_CONTROL, 0, + HBLANK_MINIMUM_SYMBOL_WIDTH, width); +} + static const struct hpo_dp_stream_encoder_funcs dcn30_str_enc_funcs = { .enable_stream = dcn31_hpo_dp_stream_enc_enable_stream, .dp_unblank = dcn31_hpo_dp_stream_enc_dp_unblank, @@ -725,6 +735,7 @@ static const struct hpo_dp_stream_encoder_funcs dcn30_str_enc_funcs = { .dp_audio_enable = dcn31_hpo_dp_stream_enc_audio_enable, .dp_audio_disable = dcn31_hpo_dp_stream_enc_audio_disable, .read_state = dcn31_hpo_dp_stream_enc_read_state, + .set_hblank_min_symbol_width = dcn31_set_hblank_min_symbol_width, }; void dcn31_hpo_dp_stream_encoder_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h index 70b94fc25304..7c77c71591a0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h @@ -80,7 +80,8 @@ SRI(DP_SYM32_ENC_SDP_GSP_CONTROL11, DP_SYM32_ENC, id),\ SRI(DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL, DP_SYM32_ENC, id),\ SRI(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, DP_SYM32_ENC, id),\ - SRI(DP_SYM32_ENC_VID_CRC_CONTROL, DP_SYM32_ENC, id) + SRI(DP_SYM32_ENC_VID_CRC_CONTROL, DP_SYM32_ENC, id), \ + SRI(DP_SYM32_ENC_HBLANK_CONTROL, DP_SYM32_ENC, id) #define DCN3_1_HPO_DP_STREAM_ENC_REGS \ uint32_t DP_STREAM_MAPPER_CONTROL0;\ @@ -116,7 +117,8 @@ uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL11;\ uint32_t DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL;\ uint32_t DP_SYM32_ENC_SDP_AUDIO_CONTROL0;\ - uint32_t DP_SYM32_ENC_VID_CRC_CONTROL + uint32_t DP_SYM32_ENC_VID_CRC_CONTROL;\ + uint32_t DP_SYM32_ENC_HBLANK_CONTROL #define DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(mask_sh)\ @@ -202,7 +204,8 @@ type GSP_SOF_REFERENCE;\ type METADATA_PACKET_ENABLE;\ type CRC_ENABLE;\ - type CRC_CONT_MODE_ENABLE + type CRC_CONT_MODE_ENABLE;\ + type HBLANK_MINIMUM_SYMBOL_WIDTH struct dcn31_hpo_dp_stream_encoder_registers { diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c index 5e3bcaf12cac..3e6d6ebd199e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c @@ -949,6 +949,65 @@ static void hubbub31_get_dchub_ref_freq(struct hubbub *hubbub, } } +static bool hubbub31_verify_allow_pstate_change_high(struct hubbub *hubbub) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + + /* + * Pstate latency is ~20us so if we wait over 40us and pstate allow + * still not asserted, we are probably stuck and going to hang + */ + const unsigned int pstate_wait_timeout_us = 100; + const unsigned int pstate_wait_expected_timeout_us = 40; + + static unsigned int max_sampled_pstate_wait_us; /* data collection */ + static bool forced_pstate_allow; /* help with revert wa */ + + unsigned int debug_data = 0; + unsigned int i; + + if (forced_pstate_allow) { + /* we hacked to force pstate allow to prevent hang last time + * we verify_allow_pstate_change_high. so disable force + * here so we can check status + */ + REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL, + DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 0, + DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 0); + forced_pstate_allow = false; + } + + REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, hubbub2->debug_test_index_pstate); + + for (i = 0; i < pstate_wait_timeout_us; i++) { + debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA); + + /* Debug bit is specific to ASIC. */ + if (debug_data & (1 << 26)) { + if (i > pstate_wait_expected_timeout_us) + DC_LOG_WARNING("pstate took longer than expected ~%dus\n", i); + return true; + } + if (max_sampled_pstate_wait_us < i) + max_sampled_pstate_wait_us = i; + + udelay(1); + } + + /* force pstate allow to prevent system hang + * and break to debugger to investigate + */ + REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL, + DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 1, + DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 1); + forced_pstate_allow = true; + + DC_LOG_WARNING("pstate TEST_DEBUG_DATA: 0x%X\n", + debug_data); + + return false; +} + static const struct hubbub_funcs hubbub31_funcs = { .update_dchub = hubbub2_update_dchub, .init_dchub_sys_ctx = hubbub31_init_dchub_sys_ctx, @@ -961,6 +1020,7 @@ static const struct hubbub_funcs hubbub31_funcs = { .program_watermarks = hubbub31_program_watermarks, .allow_self_refresh_control = hubbub1_allow_self_refresh_control, .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, + .verify_allow_pstate_change_high = hubbub31_verify_allow_pstate_change_high, .program_det_size = dcn31_program_det_size, .program_compbuf_size = dcn31_program_compbuf_size, .init_crb = dcn31_init_crb, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c index 4206ce5bf9a9..4be228680909 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c @@ -194,11 +194,10 @@ void dcn31_init_hw(struct dc *dc) /* Enables outbox notifications for usb4 dpia */ if (dc->res_pool->usb4_dpia_count) - dmub_enable_outbox_notification(dc); + dmub_enable_outbox_notification(dc->ctx->dmub_srv); /* we want to turn off all dp displays before doing detection */ - if (dc->config.power_down_display_on_boot) - dc_link_blank_all_dp_displays(dc); + dc_link_blank_all_dp_displays(dc); /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which @@ -206,7 +205,7 @@ void dcn31_init_hw(struct dc *dc) * Otherwise, if taking control is not possible, we need to power * everything down. */ - if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) { + if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) { hws->funcs.init_pipes(dc, dc->current_state); if (dc->res_pool->hubbub->funcs->allow_self_refresh_control) dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, @@ -340,21 +339,31 @@ void dcn31_enable_power_gating_plane( { bool force_on = true; /* disable power gating */ - if (enable) + if (enable && !hws->ctx->dc->debug.disable_hubp_power_gate) force_on = false; /* DCHUBP0/1/2/3/4/5 */ REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); + REG_WAIT(DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000); REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); - + REG_WAIT(DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000); /* DPP0/1/2/3/4/5 */ REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); + REG_WAIT(DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000); REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); + REG_WAIT(DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000); + + force_on = true; /* disable power gating */ + if (enable && !hws->ctx->dc->debug.disable_dsc_power_gate) + force_on = false; /* DCS0/1/2/3/4/5 */ REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); + REG_WAIT(DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000); REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); + REG_WAIT(DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000); REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); + REG_WAIT(DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000); } void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx) @@ -420,12 +429,15 @@ void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool p { uint32_t power_gate = power_on ? 0 : 1; uint32_t pwr_status = power_on ? 0 : 2; - + uint32_t org_ip_request_cntl; if (hws->ctx->dc->debug.disable_hubp_power_gate) return; if (REG(DOMAIN0_PG_CONFIG) == 0) return; + REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); + if (org_ip_request_cntl == 0) + REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); switch (hubp_inst) { case 0: @@ -448,6 +460,8 @@ void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool p BREAK_TO_DEBUGGER(); break; } + if (org_ip_request_cntl == 0) + REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); } int dcn31_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config) diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c index e8562fa11366..8afe2130d7c5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c @@ -161,7 +161,7 @@ static bool optc31_immediate_disable_crtc(struct timing_generator *optc) return true; } -static void optc31_set_drr( +void optc31_set_drr( struct timing_generator *optc, const struct drr_params *params) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h index d8ef2f0d0c95..a37b16040c1d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h @@ -256,4 +256,6 @@ void dcn31_timing_generator_init(struct optc *optc1); +void optc31_set_drr(struct timing_generator *optc, const struct drr_params *params); + #endif /* __DC_OPTC_DCN31_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c index 83ece02380a8..11ea9d13e312 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c @@ -83,7 +83,8 @@ static uint32_t dcn31_panel_cntl_hw_init(struct panel_cntl *panel_cntl) cmd.panel_cntl.data.bl_pwm_period_cntl = panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL; cmd.panel_cntl.data.bl_pwm_ref_div1 = panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV; - + cmd.panel_cntl.data.bl_pwm_ref_div2 = + panel_cntl->stored_backlight_registers.PANEL_PWRSEQ_REF_DIV2; if (!dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, &cmd)) return 0; @@ -92,6 +93,8 @@ static uint32_t dcn31_panel_cntl_hw_init(struct panel_cntl *panel_cntl) panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL = cmd.panel_cntl.data.bl_pwm_period_cntl; panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV = cmd.panel_cntl.data.bl_pwm_ref_div1; + panel_cntl->stored_backlight_registers.PANEL_PWRSEQ_REF_DIV2 = + cmd.panel_cntl.data.bl_pwm_ref_div2; return cmd.panel_cntl.data.current_backlight; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c index 8d64187478e4..89b7b6b7254a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c @@ -1011,7 +1011,7 @@ static const struct dc_debug_options debug_defaults_drv = { .max_downscale_src_width = 4096,/*upto true 4K*/ .disable_pplib_wm_range = false, .scl_reset_length10 = true, - .sanity_checks = false, + .sanity_checks = true, .underflow_assert_delay_us = 0xFFFFFFFF, .dwb_fi_phase = -1, // -1 = disable, .dmub_command_table = true, @@ -1033,6 +1033,7 @@ static const struct dc_debug_options debug_defaults_drv = { .optimize_edp_link_rate = true, .enable_sw_cntl_psr = true, .apply_vendor_specific_lttpr_wa = true, + .enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/ }; static const struct dc_debug_options debug_defaults_diags = { @@ -1776,7 +1777,7 @@ static bool is_dual_plane(enum surface_pixel_format format) return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA; } -static int dcn31_populate_dml_pipes_from_context( +int dcn31_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, bool fast_validate) @@ -1786,7 +1787,9 @@ static int dcn31_populate_dml_pipes_from_context( struct pipe_ctx *pipe; bool upscaled = false; + DC_FP_START(); dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + DC_FP_END(); for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { struct dc_crtc_timing *timing; @@ -1809,6 +1812,7 @@ static int dcn31_populate_dml_pipes_from_context( pipes[pipe_cnt].pipe.src.immediate_flip = true; pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; + pipes[pipe_cnt].pipe.src.hostvm = dc->res_pool->hubbub->riommu_active; pipes[pipe_cnt].pipe.src.gpuvm = true; pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0; pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0; @@ -1841,7 +1845,8 @@ static int dcn31_populate_dml_pipes_from_context( if (is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) { dc->config.enable_4to1MPC = true; - } else if (!is_dual_plane(pipe->plane_state->format)) { + } else if (!is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 5120) { + /* Limit to 5k max to avoid forced pipe split when there is not enough detile for swath */ context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192; pipes[0].pipe.src.unbounded_req_mode = true; } @@ -1996,7 +2001,9 @@ static void dcn31_calculate_wm_and_dlg_fp( pipe_idx++; } + DC_FP_START(); dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); + DC_FP_END(); } void dcn31_calculate_wm_and_dlg( @@ -2066,7 +2073,7 @@ static struct dc_cap_funcs cap_funcs = { .get_dcc_compression_cap = dcn20_get_dcc_compression_cap }; -static void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) +void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) { struct clk_limit_table *clk_table = &bw_params->clk_table; struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES]; @@ -2174,7 +2181,7 @@ static struct clock_source *dcn30_clock_source_create( if (!clk_src) return NULL; - if (dcn3_clk_src_construct(clk_src, ctx, bios, id, + if (dcn31_clk_src_construct(clk_src, ctx, bios, id, regs, &cs_shift, &cs_mask)) { clk_src->base.dp_clk_src = dp_clk_src; return &clk_src->base; diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h index a513363b3326..4b7ab21ea15b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h @@ -43,6 +43,11 @@ void dcn31_calculate_wm_and_dlg( display_e2e_pipe_params_st *pipes, int pipe_cnt, int vlevel); +int dcn31_populate_dml_pipes_from_context( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + bool fast_validate); +void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params); void dcn31_update_soc_for_wm_a(struct dc *dc, struct dc_state *context); struct resource_pool *dcn31_create_resource_pool( diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/dcn315/Makefile index f3c00f479e1c..c831ad46e81c 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn315/Makefile @@ -1,6 +1,5 @@ # -# Copyright 2017 Advanced Micro Devices, Inc. -# Copyright 2019 Raptor Engineering, LLC +# Copyright © 2021 Advanced Micro Devices, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), @@ -20,23 +19,25 @@ # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # +# Authors: AMD # -# Makefile for the 'calcs' sub-component of DAL. -# It calculates Bandwidth and Watermarks values for HW programming -# +# Makefile for dcn315. + +DCN315 = dcn315_resource.o ifdef CONFIG_X86 -calcs_ccflags := -mhard-float -msse +CFLAGS_$(AMDDALPATH)/dc/dcn315/dcn315_resource.o := -msse endif ifdef CONFIG_PPC64 -calcs_ccflags := -mhard-float -maltivec +CFLAGS_$(AMDDALPATH)/dc/dcn315/dcn315_resource.o := -mhard-float -maltivec endif ifdef CONFIG_CC_IS_GCC ifeq ($(call cc-ifversion, -lt, 0701, y), y) IS_OLD_GCC = 1 endif +CFLAGS_$(AMDDALPATH)/dc/dcn315/dcn315_resource.o += -mhard-float endif ifdef CONFIG_X86 @@ -44,25 +45,12 @@ ifdef IS_OLD_GCC # Stack alignment mismatch, proceed with caution. # GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 # (8B stack alignment). -calcs_ccflags += -mpreferred-stack-boundary=4 +CFLAGS_$(AMDDALPATH)/dc/dcn315/dcn315_resource.o += -mpreferred-stack-boundary=4 else -calcs_ccflags += -msse2 -endif +CFLAGS_$(AMDDALPATH)/dc/dcn315/dcn315_resource.o += -msse2 endif - -CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calcs.o := $(calcs_ccflags) -CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calc_auto.o := $(calcs_ccflags) -CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calc_math.o := $(calcs_ccflags) -Wno-tautological-compare -CFLAGS_REMOVE_$(AMDDALPATH)/dc/calcs/dcn_calcs.o := $(calcs_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/calcs/dcn_calc_auto.o := $(calcs_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/calcs/dcn_calc_math.o := $(calcs_rcflags) - -BW_CALCS = dce_calcs.o bw_fixed.o custom_float.o - -ifdef CONFIG_DRM_AMD_DC_DCN -BW_CALCS += dcn_calcs.o dcn_calc_math.o dcn_calc_auto.o endif -AMD_DAL_BW_CALCS = $(addprefix $(AMDDALPATH)/dc/calcs/,$(BW_CALCS)) +AMD_DAL_DCN315 = $(addprefix $(AMDDALPATH)/dc/dcn315/,$(DCN315)) -AMD_DISPLAY_FILES += $(AMD_DAL_BW_CALCS) +AMD_DISPLAY_FILES += $(AMD_DAL_DCN315) diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c new file mode 100644 index 000000000000..06adb77c206b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c @@ -0,0 +1,2284 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#include "dm_services.h" +#include "dc.h" + +#include "dcn31/dcn31_init.h" + +#include "resource.h" +#include "include/irq_service_interface.h" +#include "dcn315_resource.h" + +#include "dcn20/dcn20_resource.h" +#include "dcn30/dcn30_resource.h" +#include "dcn31/dcn31_resource.h" + +#include "dcn10/dcn10_ipp.h" +#include "dcn30/dcn30_hubbub.h" +#include "dcn31/dcn31_hubbub.h" +#include "dcn30/dcn30_mpc.h" +#include "dcn31/dcn31_hubp.h" +#include "irq/dcn315/irq_service_dcn315.h" +#include "dcn30/dcn30_dpp.h" +#include "dcn31/dcn31_optc.h" +#include "dcn20/dcn20_hwseq.h" +#include "dcn30/dcn30_hwseq.h" +#include "dce110/dce110_hw_sequencer.h" +#include "dcn30/dcn30_opp.h" +#include "dcn20/dcn20_dsc.h" +#include "dcn30/dcn30_vpg.h" +#include "dcn30/dcn30_afmt.h" +#include "dcn30/dcn30_dio_stream_encoder.h" +#include "dcn31/dcn31_hpo_dp_stream_encoder.h" +#include "dcn31/dcn31_hpo_dp_link_encoder.h" +#include "dcn31/dcn31_apg.h" +#include "dcn31/dcn31_dio_link_encoder.h" +#include "dcn31/dcn31_vpg.h" +#include "dcn31/dcn31_afmt.h" +#include "dce/dce_clock_source.h" +#include "dce/dce_audio.h" +#include "dce/dce_hwseq.h" +#include "clk_mgr.h" +#include "virtual/virtual_stream_encoder.h" +#include "dce110/dce110_resource.h" +#include "dml/display_mode_vba.h" +#include "dcn31/dcn31_dccg.h" +#include "dcn10/dcn10_resource.h" +#include "dcn31/dcn31_panel_cntl.h" + +#include "dcn30/dcn30_dwb.h" +#include "dcn30/dcn30_mmhubbub.h" + +#include "dcn/dcn_3_1_5_offset.h" +#include "dcn/dcn_3_1_5_sh_mask.h" +#include "dpcs/dpcs_4_2_2_offset.h" +#include "dpcs/dpcs_4_2_2_sh_mask.h" + +#define NBIO_BASE__INST0_SEG0 0x00000000 +#define NBIO_BASE__INST0_SEG1 0x00000014 +#define NBIO_BASE__INST0_SEG2 0x00000D20 +#define NBIO_BASE__INST0_SEG3 0x00010400 +#define NBIO_BASE__INST0_SEG4 0x0241B000 +#define NBIO_BASE__INST0_SEG5 0x04040000 + +#define DPCS_BASE__INST0_SEG0 0x00000012 +#define DPCS_BASE__INST0_SEG1 0x000000C0 +#define DPCS_BASE__INST0_SEG2 0x000034C0 +#define DPCS_BASE__INST0_SEG3 0x00009000 +#define DPCS_BASE__INST0_SEG4 0x02403C00 +#define DPCS_BASE__INST0_SEG5 0 + +#define DCN_BASE__INST0_SEG0 0x00000012 +#define DCN_BASE__INST0_SEG1 0x000000C0 +#define DCN_BASE__INST0_SEG2 0x000034C0 +#define DCN_BASE__INST0_SEG3 0x00009000 +#define DCN_BASE__INST0_SEG4 0x02403C00 +#define DCN_BASE__INST0_SEG5 0 + +#define regBIF_BX_PF2_RSMU_INDEX 0x0000 +#define regBIF_BX_PF2_RSMU_INDEX_BASE_IDX 1 +#define regBIF_BX_PF2_RSMU_DATA 0x0001 +#define regBIF_BX_PF2_RSMU_DATA_BASE_IDX 1 +#define regBIF_BX2_BIOS_SCRATCH_6 0x003e +#define regBIF_BX2_BIOS_SCRATCH_6_BASE_IDX 1 +#define BIF_BX2_BIOS_SCRATCH_6__BIOS_SCRATCH_6__SHIFT 0x0 +#define BIF_BX2_BIOS_SCRATCH_6__BIOS_SCRATCH_6_MASK 0xFFFFFFFFL +#define regBIF_BX2_BIOS_SCRATCH_2 0x003a +#define regBIF_BX2_BIOS_SCRATCH_2_BASE_IDX 1 +#define BIF_BX2_BIOS_SCRATCH_2__BIOS_SCRATCH_2__SHIFT 0x0 +#define BIF_BX2_BIOS_SCRATCH_2__BIOS_SCRATCH_2_MASK 0xFFFFFFFFL +#define regBIF_BX2_BIOS_SCRATCH_3 0x003b +#define regBIF_BX2_BIOS_SCRATCH_3_BASE_IDX 1 +#define BIF_BX2_BIOS_SCRATCH_3__BIOS_SCRATCH_3__SHIFT 0x0 +#define BIF_BX2_BIOS_SCRATCH_3__BIOS_SCRATCH_3_MASK 0xFFFFFFFFL + +#define regDCHUBBUB_DEBUG_CTRL_0 0x04d6 +#define regDCHUBBUB_DEBUG_CTRL_0_BASE_IDX 2 +#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH__SHIFT 0x10 +#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH_MASK 0x01FF0000L + +#include "reg_helper.h" +#include "dce/dmub_abm.h" +#include "dce/dmub_psr.h" +#include "dce/dce_aux.h" +#include "dce/dce_i2c.h" + +#include "dml/dcn30/display_mode_vba_30.h" +#include "vm_helper.h" +#include "dcn20/dcn20_vmid.h" + +#include "link_enc_cfg.h" + +#define DC_LOGGER_INIT(logger) + +#define DCN3_15_DEFAULT_DET_SIZE 192 +#define DCN3_15_MAX_DET_SIZE 384 +#define DCN3_15_MIN_COMPBUF_SIZE_KB 128 +#define DCN3_15_CRB_SEGMENT_SIZE_KB 64 + +struct _vcs_dpi_ip_params_st dcn3_15_ip = { + .gpuvm_enable = 1, + .gpuvm_max_page_table_levels = 1, + .hostvm_enable = 1, + .hostvm_max_page_table_levels = 2, + .rob_buffer_size_kbytes = 64, + .det_buffer_size_kbytes = DCN3_15_DEFAULT_DET_SIZE, + .min_comp_buffer_size_kbytes = DCN3_15_MIN_COMPBUF_SIZE_KB, + .config_return_buffer_size_in_kbytes = 1024, + .compressed_buffer_segment_size_in_kbytes = 64, + .meta_fifo_size_in_kentries = 32, + .zero_size_buffer_entries = 512, + .compbuf_reserved_space_64b = 256, + .compbuf_reserved_space_zs = 64, + .dpp_output_buffer_pixels = 2560, + .opp_output_buffer_lines = 1, + .pixel_chunk_size_kbytes = 8, + .meta_chunk_size_kbytes = 2, + .min_meta_chunk_size_bytes = 256, + .writeback_chunk_size_kbytes = 8, + .ptoi_supported = false, + .num_dsc = 3, + .maximum_dsc_bits_per_component = 10, + .dsc422_native_support = false, + .is_line_buffer_bpp_fixed = true, + .line_buffer_fixed_bpp = 49, + .line_buffer_size_bits = 789504, + .max_line_buffer_lines = 12, + .writeback_interface_buffer_size_kbytes = 90, + .max_num_dpp = 4, + .max_num_otg = 4, + .max_num_hdmi_frl_outputs = 1, + .max_num_wb = 1, + .max_dchub_pscl_bw_pix_per_clk = 4, + .max_pscl_lb_bw_pix_per_clk = 2, + .max_lb_vscl_bw_pix_per_clk = 4, + .max_vscl_hscl_bw_pix_per_clk = 4, + .max_hscl_ratio = 6, + .max_vscl_ratio = 6, + .max_hscl_taps = 8, + .max_vscl_taps = 8, + .dpte_buffer_size_in_pte_reqs_luma = 64, + .dpte_buffer_size_in_pte_reqs_chroma = 34, + .dispclk_ramp_margin_percent = 1, + .max_inter_dcn_tile_repeaters = 9, + .cursor_buffer_size = 16, + .cursor_chunk_size = 2, + .writeback_line_buffer_buffer_size = 0, + .writeback_min_hscl_ratio = 1, + .writeback_min_vscl_ratio = 1, + .writeback_max_hscl_ratio = 1, + .writeback_max_vscl_ratio = 1, + .writeback_max_hscl_taps = 1, + .writeback_max_vscl_taps = 1, + .dppclk_delay_subtotal = 46, + .dppclk_delay_scl = 50, + .dppclk_delay_scl_lb_only = 16, + .dppclk_delay_cnvc_formatter = 27, + .dppclk_delay_cnvc_cursor = 6, + .dispclk_delay_subtotal = 119, + .dynamic_metadata_vm_enabled = false, + .odm_combine_4to1_supported = false, + .dcc_supported = true, +}; + +struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = { + /*TODO: correct dispclk/dppclk voltage level determination*/ + .clock_limits = { + { + .state = 0, + .dispclk_mhz = 1372.0, + .dppclk_mhz = 1372.0, + .phyclk_mhz = 810.0, + .phyclk_d18_mhz = 667.0, + .dscclk_mhz = 417.0, + .dtbclk_mhz = 600.0, + }, + { + .state = 1, + .dispclk_mhz = 1372.0, + .dppclk_mhz = 1372.0, + .phyclk_mhz = 810.0, + .phyclk_d18_mhz = 667.0, + .dscclk_mhz = 417.0, + .dtbclk_mhz = 600.0, + }, + { + .state = 2, + .dispclk_mhz = 1372.0, + .dppclk_mhz = 1372.0, + .phyclk_mhz = 810.0, + .phyclk_d18_mhz = 667.0, + .dscclk_mhz = 417.0, + .dtbclk_mhz = 600.0, + }, + { + .state = 3, + .dispclk_mhz = 1372.0, + .dppclk_mhz = 1372.0, + .phyclk_mhz = 810.0, + .phyclk_d18_mhz = 667.0, + .dscclk_mhz = 417.0, + .dtbclk_mhz = 600.0, + }, + { + .state = 4, + .dispclk_mhz = 1372.0, + .dppclk_mhz = 1372.0, + .phyclk_mhz = 810.0, + .phyclk_d18_mhz = 667.0, + .dscclk_mhz = 417.0, + .dtbclk_mhz = 600.0, + }, + }, + .num_states = 5, + .sr_exit_time_us = 9.0, + .sr_enter_plus_exit_time_us = 11.0, + .sr_exit_z8_time_us = 50.0, + .sr_enter_plus_exit_z8_time_us = 50.0, + .writeback_latency_us = 12.0, + .dram_channel_width_bytes = 4, + .round_trip_ping_latency_dcfclk_cycles = 106, + .urgent_latency_pixel_data_only_us = 4.0, + .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, + .urgent_latency_vm_data_only_us = 4.0, + .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, + .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, + .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, + .pct_ideal_sdp_bw_after_urgent = 80.0, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, + .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0, + .max_avg_sdp_bw_use_normal_percent = 60.0, + .max_avg_dram_bw_use_normal_percent = 60.0, + .fabric_datapath_to_dcn_data_return_bytes = 32, + .return_bus_width_bytes = 64, + .downspread_percent = 0.38, + .dcn_downspread_percent = 0.38, + .gpuvm_min_page_size_bytes = 4096, + .hostvm_min_page_size_bytes = 4096, + .do_urgent_latency_adjustment = false, + .urgent_latency_adjustment_fabric_clock_component_us = 0, + .urgent_latency_adjustment_fabric_clock_reference_mhz = 0, +}; + +enum dcn31_clk_src_array_id { + DCN31_CLK_SRC_PLL0, + DCN31_CLK_SRC_PLL1, + DCN31_CLK_SRC_PLL2, + DCN31_CLK_SRC_PLL3, + DCN31_CLK_SRC_PLL4, + DCN30_CLK_SRC_TOTAL +}; + +/* begin ********************* + * macros to expend register list macro defined in HW object header file + */ + +/* DCN */ +/* TODO awful hack. fixup dcn20_dwb.h */ +#undef BASE_INNER +#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg + +#define BASE(seg) BASE_INNER(seg) + +#define SR(reg_name)\ + .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ + reg ## reg_name + +#define SRI(reg_name, block, id)\ + .reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRI2(reg_name, block, id)\ + .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ + reg ## reg_name + +#define SRIR(var_name, reg_name, block, id)\ + .var_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRII(reg_name, block, id)\ + .reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRII_MPC_RMU(reg_name, block, id)\ + .RMU##_##reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRII_DWB(reg_name, temp_name, block, id)\ + .reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## temp_name + +#define DCCG_SRII(reg_name, block, id)\ + .block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define VUPDATE_SRII(reg_name, block, id)\ + .reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ + reg ## reg_name ## _ ## block ## id + +/* NBIO */ +#define NBIO_BASE_INNER(seg) \ + NBIO_BASE__INST0_SEG ## seg + +#define NBIO_BASE(seg) \ + NBIO_BASE_INNER(seg) + +#define NBIO_SR(reg_name)\ + .reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \ + regBIF_BX2_ ## reg_name + +static const struct bios_registers bios_regs = { + NBIO_SR(BIOS_SCRATCH_3), + NBIO_SR(BIOS_SCRATCH_6) +}; + +#define clk_src_regs(index, pllid)\ +[index] = {\ + CS_COMMON_REG_LIST_DCN3_0(index, pllid),\ +} + +static const struct dce110_clk_src_regs clk_src_regs[] = { + clk_src_regs(0, A), + clk_src_regs(1, B), + clk_src_regs(2, C), + clk_src_regs(3, D), + clk_src_regs(4, E) +}; + +static const struct dce110_clk_src_shift cs_shift = { + CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) +}; + +static const struct dce110_clk_src_mask cs_mask = { + CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK) +}; + +#define abm_regs(id)\ +[id] = {\ + ABM_DCN302_REG_LIST(id)\ +} + +static const struct dce_abm_registers abm_regs[] = { + abm_regs(0), + abm_regs(1), + abm_regs(2), + abm_regs(3), +}; + +static const struct dce_abm_shift abm_shift = { + ABM_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dce_abm_mask abm_mask = { + ABM_MASK_SH_LIST_DCN30(_MASK) +}; + +#define audio_regs(id)\ +[id] = {\ + AUD_COMMON_REG_LIST(id)\ +} + +static const struct dce_audio_registers audio_regs[] = { + audio_regs(0), + audio_regs(1), + audio_regs(2), + audio_regs(3), + audio_regs(4), + audio_regs(5), + audio_regs(6) +}; + +#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ + AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) + +static const struct dce_audio_shift audio_shift = { + DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_audio_mask audio_mask = { + DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) +}; + +#define vpg_regs(id)\ +[id] = {\ + VPG_DCN31_REG_LIST(id)\ +} + +static const struct dcn31_vpg_registers vpg_regs[] = { + vpg_regs(0), + vpg_regs(1), + vpg_regs(2), + vpg_regs(3), + vpg_regs(4), + vpg_regs(5), + vpg_regs(6), + vpg_regs(7), + vpg_regs(8), + vpg_regs(9), +}; + +static const struct dcn31_vpg_shift vpg_shift = { + DCN31_VPG_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_vpg_mask vpg_mask = { + DCN31_VPG_MASK_SH_LIST(_MASK) +}; + +#define afmt_regs(id)\ +[id] = {\ + AFMT_DCN31_REG_LIST(id)\ +} + +static const struct dcn31_afmt_registers afmt_regs[] = { + afmt_regs(0), + afmt_regs(1), + afmt_regs(2), + afmt_regs(3), + afmt_regs(4), + afmt_regs(5) +}; + +static const struct dcn31_afmt_shift afmt_shift = { + DCN31_AFMT_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_afmt_mask afmt_mask = { + DCN31_AFMT_MASK_SH_LIST(_MASK) +}; + +#define apg_regs(id)\ +[id] = {\ + APG_DCN31_REG_LIST(id)\ +} + +static const struct dcn31_apg_registers apg_regs[] = { + apg_regs(0), + apg_regs(1), + apg_regs(2), + apg_regs(3) +}; + +static const struct dcn31_apg_shift apg_shift = { + DCN31_APG_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_apg_mask apg_mask = { + DCN31_APG_MASK_SH_LIST(_MASK) +}; + +#define stream_enc_regs(id)\ +[id] = {\ + SE_DCN3_REG_LIST(id)\ +} + +static const struct dcn10_stream_enc_registers stream_enc_regs[] = { + stream_enc_regs(0), + stream_enc_regs(1), + stream_enc_regs(2), + stream_enc_regs(3), + stream_enc_regs(4) +}; + +static const struct dcn10_stream_encoder_shift se_shift = { + SE_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn10_stream_encoder_mask se_mask = { + SE_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + + +#define aux_regs(id)\ +[id] = {\ + DCN2_AUX_REG_LIST(id)\ +} + +static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { + aux_regs(0), + aux_regs(1), + aux_regs(2), + aux_regs(3), + aux_regs(4) +}; + +#define hpd_regs(id)\ +[id] = {\ + HPD_REG_LIST(id)\ +} + +static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { + hpd_regs(0), + hpd_regs(1), + hpd_regs(2), + hpd_regs(3), + hpd_regs(4) +}; + +#define link_regs(id, phyid)\ +[id] = {\ + LE_DCN31_REG_LIST(id), \ + UNIPHY_DCN2_REG_LIST(phyid), \ + DPCS_DCN31_REG_LIST(id), \ +} + +static const struct dce110_aux_registers_shift aux_shift = { + DCN_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCN_AUX_MASK_SH_LIST(_MASK) +}; + +static const struct dcn10_link_enc_registers link_enc_regs[] = { + link_regs(0, A), + link_regs(1, B), + link_regs(2, C), + link_regs(3, D), + link_regs(4, E) +}; + +static const struct dcn10_link_enc_shift le_shift = { + LINK_ENCODER_MASK_SH_LIST_DCN31(__SHIFT), \ + DPCS_DCN31_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn10_link_enc_mask le_mask = { + LINK_ENCODER_MASK_SH_LIST_DCN31(_MASK), \ + DPCS_DCN31_MASK_SH_LIST(_MASK) +}; + +#define hpo_dp_stream_encoder_reg_list(id)\ +[id] = {\ + DCN3_1_HPO_DP_STREAM_ENC_REG_LIST(id)\ +} + +static const struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[] = { + hpo_dp_stream_encoder_reg_list(0), + hpo_dp_stream_encoder_reg_list(1), + hpo_dp_stream_encoder_reg_list(2), + hpo_dp_stream_encoder_reg_list(3), +}; + +static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = { + DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = { + DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(_MASK) +}; + + +#define hpo_dp_link_encoder_reg_list(id)\ +[id] = {\ + DCN3_1_HPO_DP_LINK_ENC_REG_LIST(id),\ + DCN3_1_RDPCSTX_REG_LIST(0),\ + DCN3_1_RDPCSTX_REG_LIST(1),\ + DCN3_1_RDPCSTX_REG_LIST(2),\ + DCN3_1_RDPCSTX_REG_LIST(3),\ + DCN3_1_RDPCSTX_REG_LIST(4)\ +} + +static const struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[] = { + hpo_dp_link_encoder_reg_list(0), + hpo_dp_link_encoder_reg_list(1), +}; + +static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = { + DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = { + DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(_MASK) +}; + +#define dpp_regs(id)\ +[id] = {\ + DPP_REG_LIST_DCN30(id),\ +} + +static const struct dcn3_dpp_registers dpp_regs[] = { + dpp_regs(0), + dpp_regs(1), + dpp_regs(2), + dpp_regs(3) +}; + +static const struct dcn3_dpp_shift tf_shift = { + DPP_REG_LIST_SH_MASK_DCN30(__SHIFT) +}; + +static const struct dcn3_dpp_mask tf_mask = { + DPP_REG_LIST_SH_MASK_DCN30(_MASK) +}; + +#define opp_regs(id)\ +[id] = {\ + OPP_REG_LIST_DCN30(id),\ +} + +static const struct dcn20_opp_registers opp_regs[] = { + opp_regs(0), + opp_regs(1), + opp_regs(2), + opp_regs(3) +}; + +static const struct dcn20_opp_shift opp_shift = { + OPP_MASK_SH_LIST_DCN20(__SHIFT) +}; + +static const struct dcn20_opp_mask opp_mask = { + OPP_MASK_SH_LIST_DCN20(_MASK) +}; + +#define aux_engine_regs(id)\ +[id] = {\ + AUX_COMMON_REG_LIST0(id), \ + .AUXN_IMPCAL = 0, \ + .AUXP_IMPCAL = 0, \ + .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \ +} + +static const struct dce110_aux_registers aux_engine_regs[] = { + aux_engine_regs(0), + aux_engine_regs(1), + aux_engine_regs(2), + aux_engine_regs(3), + aux_engine_regs(4) +}; + +#define dwbc_regs_dcn3(id)\ +[id] = {\ + DWBC_COMMON_REG_LIST_DCN30(id),\ +} + +static const struct dcn30_dwbc_registers dwbc30_regs[] = { + dwbc_regs_dcn3(0), +}; + +static const struct dcn30_dwbc_shift dwbc30_shift = { + DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn30_dwbc_mask dwbc30_mask = { + DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +#define mcif_wb_regs_dcn3(id)\ +[id] = {\ + MCIF_WB_COMMON_REG_LIST_DCN30(id),\ +} + +static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = { + mcif_wb_regs_dcn3(0) +}; + +static const struct dcn30_mmhubbub_shift mcif_wb30_shift = { + MCIF_WB_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn30_mmhubbub_mask mcif_wb30_mask = { + MCIF_WB_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +#define dsc_regsDCN20(id)\ +[id] = {\ + DSC_REG_LIST_DCN20(id)\ +} + +static const struct dcn20_dsc_registers dsc_regs[] = { + dsc_regsDCN20(0), + dsc_regsDCN20(1), + dsc_regsDCN20(2) +}; + +static const struct dcn20_dsc_shift dsc_shift = { + DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) +}; + +static const struct dcn20_dsc_mask dsc_mask = { + DSC_REG_LIST_SH_MASK_DCN20(_MASK) +}; + +static const struct dcn30_mpc_registers mpc_regs = { + MPC_REG_LIST_DCN3_0(0), + MPC_REG_LIST_DCN3_0(1), + MPC_REG_LIST_DCN3_0(2), + MPC_REG_LIST_DCN3_0(3), + MPC_OUT_MUX_REG_LIST_DCN3_0(0), + MPC_OUT_MUX_REG_LIST_DCN3_0(1), + MPC_OUT_MUX_REG_LIST_DCN3_0(2), + MPC_OUT_MUX_REG_LIST_DCN3_0(3), + MPC_DWB_MUX_REG_LIST_DCN3_0(0), +}; + +static const struct dcn30_mpc_shift mpc_shift = { + MPC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn30_mpc_mask mpc_mask = { + MPC_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +#define optc_regs(id)\ +[id] = {OPTC_COMMON_REG_LIST_DCN3_1(id)} + +static const struct dcn_optc_registers optc_regs[] = { + optc_regs(0), + optc_regs(1), + optc_regs(2), + optc_regs(3) +}; + +static const struct dcn_optc_shift optc_shift = { + OPTC_COMMON_MASK_SH_LIST_DCN3_1(__SHIFT) +}; + +static const struct dcn_optc_mask optc_mask = { + OPTC_COMMON_MASK_SH_LIST_DCN3_1(_MASK) +}; + +#define hubp_regs(id)\ +[id] = {\ + HUBP_REG_LIST_DCN30(id)\ +} + +static const struct dcn_hubp2_registers hubp_regs[] = { + hubp_regs(0), + hubp_regs(1), + hubp_regs(2), + hubp_regs(3) +}; + + +static const struct dcn_hubp2_shift hubp_shift = { + HUBP_MASK_SH_LIST_DCN31(__SHIFT) +}; + +static const struct dcn_hubp2_mask hubp_mask = { + HUBP_MASK_SH_LIST_DCN31(_MASK) +}; +static const struct dcn_hubbub_registers hubbub_reg = { + HUBBUB_REG_LIST_DCN31(0) +}; + +static const struct dcn_hubbub_shift hubbub_shift = { + HUBBUB_MASK_SH_LIST_DCN31(__SHIFT) +}; + +static const struct dcn_hubbub_mask hubbub_mask = { + HUBBUB_MASK_SH_LIST_DCN31(_MASK) +}; + +static const struct dccg_registers dccg_regs = { + DCCG_REG_LIST_DCN31() +}; + +static const struct dccg_shift dccg_shift = { + DCCG_MASK_SH_LIST_DCN31(__SHIFT) +}; + +static const struct dccg_mask dccg_mask = { + DCCG_MASK_SH_LIST_DCN31(_MASK) +}; + + +#define SRII2(reg_name_pre, reg_name_post, id)\ + .reg_name_pre ## _ ## reg_name_post[id] = BASE(reg ## reg_name_pre \ + ## id ## _ ## reg_name_post ## _BASE_IDX) + \ + reg ## reg_name_pre ## id ## _ ## reg_name_post + + +#define HWSEQ_DCN31_REG_LIST()\ + SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \ + SR(DCHUBBUB_ARB_HOSTVM_CNTL), \ + SR(DIO_MEM_PWR_CTRL), \ + SR(ODM_MEM_PWR_CTRL3), \ + SR(DMU_MEM_PWR_CNTL), \ + SR(MMHUBBUB_MEM_PWR_CNTL), \ + SR(DCCG_GATE_DISABLE_CNTL), \ + SR(DCCG_GATE_DISABLE_CNTL2), \ + SR(DCFCLK_CNTL),\ + SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \ + SRII(PIXEL_RATE_CNTL, OTG, 0), \ + SRII(PIXEL_RATE_CNTL, OTG, 1),\ + SRII(PIXEL_RATE_CNTL, OTG, 2),\ + SRII(PIXEL_RATE_CNTL, OTG, 3),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 0),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 1),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 2),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 3),\ + SR(MICROSECOND_TIME_BASE_DIV), \ + SR(MILLISECOND_TIME_BASE_DIV), \ + SR(DISPCLK_FREQ_CHANGE_CNTL), \ + SR(RBBMIF_TIMEOUT_DIS), \ + SR(RBBMIF_TIMEOUT_DIS_2), \ + SR(DCHUBBUB_CRC_CTRL), \ + SR(DPP_TOP0_DPP_CRC_CTRL), \ + SR(DPP_TOP0_DPP_CRC_VAL_B_A), \ + SR(DPP_TOP0_DPP_CRC_VAL_R_G), \ + SR(MPC_CRC_CTRL), \ + SR(MPC_CRC_RESULT_GB), \ + SR(MPC_CRC_RESULT_C), \ + SR(MPC_CRC_RESULT_AR), \ + SR(DOMAIN0_PG_CONFIG), \ + SR(DOMAIN1_PG_CONFIG), \ + SR(DOMAIN2_PG_CONFIG), \ + SR(DOMAIN3_PG_CONFIG), \ + SR(DOMAIN16_PG_CONFIG), \ + SR(DOMAIN17_PG_CONFIG), \ + SR(DOMAIN18_PG_CONFIG), \ + SR(DOMAIN0_PG_STATUS), \ + SR(DOMAIN1_PG_STATUS), \ + SR(DOMAIN2_PG_STATUS), \ + SR(DOMAIN3_PG_STATUS), \ + SR(DOMAIN16_PG_STATUS), \ + SR(DOMAIN17_PG_STATUS), \ + SR(DOMAIN18_PG_STATUS), \ + SR(D1VGA_CONTROL), \ + SR(D2VGA_CONTROL), \ + SR(D3VGA_CONTROL), \ + SR(D4VGA_CONTROL), \ + SR(D5VGA_CONTROL), \ + SR(D6VGA_CONTROL), \ + SR(DC_IP_REQUEST_CNTL), \ + SR(AZALIA_AUDIO_DTO), \ + SR(AZALIA_CONTROLLER_CLOCK_GATING), \ + SR(HPO_TOP_HW_CONTROL) + +static const struct dce_hwseq_registers hwseq_reg = { + HWSEQ_DCN31_REG_LIST() +}; + +#define HWSEQ_DCN31_MASK_SH_LIST(mask_sh)\ + HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ + HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ + HWS_SF(, DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, mask_sh), \ + HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ + HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \ + HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_G_GATE_DIS, mask_sh), \ + HWS_SF(, DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, mask_sh), \ + HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \ + HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \ + HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh), \ + HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh), \ + HWS_SF(, HPO_TOP_HW_CONTROL, HPO_IO_EN, mask_sh) + +static const struct dce_hwseq_shift hwseq_shift = { + HWSEQ_DCN31_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_hwseq_mask hwseq_mask = { + HWSEQ_DCN31_MASK_SH_LIST(_MASK) +}; +#define vmid_regs(id)\ +[id] = {\ + DCN20_VMID_REG_LIST(id)\ +} + +static const struct dcn_vmid_registers vmid_regs[] = { + vmid_regs(0), + vmid_regs(1), + vmid_regs(2), + vmid_regs(3), + vmid_regs(4), + vmid_regs(5), + vmid_regs(6), + vmid_regs(7), + vmid_regs(8), + vmid_regs(9), + vmid_regs(10), + vmid_regs(11), + vmid_regs(12), + vmid_regs(13), + vmid_regs(14), + vmid_regs(15) +}; + +static const struct dcn20_vmid_shift vmid_shifts = { + DCN20_VMID_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn20_vmid_mask vmid_masks = { + DCN20_VMID_MASK_SH_LIST(_MASK) +}; + +static const struct resource_caps res_cap_dcn31 = { + .num_timing_generator = 4, + .num_opp = 4, + .num_video_plane = 4, + .num_audio = 5, + .num_stream_encoder = 5, + .num_dig_link_enc = 5, + .num_hpo_dp_stream_encoder = 4, + .num_hpo_dp_link_encoder = 2, + .num_pll = 5, + .num_dwb = 1, + .num_ddc = 5, + .num_vmid = 16, + .num_mpc_3dlut = 2, + .num_dsc = 3, +}; + +static const struct dc_plane_cap plane_cap = { + .type = DC_PLANE_TYPE_DCN_UNIVERSAL, + .blends_with_above = true, + .blends_with_below = true, + .per_pixel_alpha = true, + + .pixel_format_support = { + .argb8888 = true, + .nv12 = true, + .fp16 = true, + .p010 = true, + .ayuv = false, + }, + + .max_upscale_factor = { + .argb8888 = 16000, + .nv12 = 16000, + .fp16 = 16000 + }, + + // 6:1 downscaling ratio: 1000/6 = 166.666 + .max_downscale_factor = { + .argb8888 = 167, + .nv12 = 167, + .fp16 = 167 + }, + 64, + 64 +}; + +static const struct dc_debug_options debug_defaults_drv = { + .disable_z10 = true, /*hw not support it*/ + .disable_dmcu = true, + .force_abm_enable = false, + .timing_trace = false, + .clock_trace = true, + .disable_pplib_clock_request = false, + .pipe_split_policy = MPC_SPLIT_DYNAMIC, + .force_single_disp_pipe_split = false, + .disable_dcc = DCC_ENABLE, + .vsr_support = true, + .performance_trace = false, + .max_downscale_src_width = 4096,/*upto true 4k*/ + .disable_pplib_wm_range = false, + .scl_reset_length10 = true, + .sanity_checks = false, + .underflow_assert_delay_us = 0xFFFFFFFF, + .dwb_fi_phase = -1, // -1 = disable, + .dmub_command_table = true, + .pstate_enabled = true, + .use_max_lb = true, + .enable_mem_low_power = { + .bits = { + .vga = true, + .i2c = true, + .dmcu = false, // This is previously known to cause hang on S3 cycles if enabled + .dscl = true, + .cm = true, + .mpc = true, + .optc = true, + .vpg = true, + .afmt = true, + } + }, + .optimize_edp_link_rate = true, + .enable_sw_cntl_psr = true, + .psr_power_use_phy_fsm = 0, +}; + +static const struct dc_debug_options debug_defaults_diags = { + .disable_dmcu = true, + .force_abm_enable = false, + .timing_trace = true, + .clock_trace = true, + .disable_dpp_power_gate = true, + .disable_hubp_power_gate = true, + .disable_clock_gate = true, + .disable_pplib_clock_request = true, + .disable_pplib_wm_range = true, + .disable_stutter = false, + .scl_reset_length10 = true, + .dwb_fi_phase = -1, // -1 = disable + .dmub_command_table = true, + .enable_tri_buf = true, + .use_max_lb = true +}; + +static void dcn31_dpp_destroy(struct dpp **dpp) +{ + kfree(TO_DCN20_DPP(*dpp)); + *dpp = NULL; +} + +static struct dpp *dcn31_dpp_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn3_dpp *dpp = + kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); + + if (!dpp) + return NULL; + + if (dpp3_construct(dpp, ctx, inst, + &dpp_regs[inst], &tf_shift, &tf_mask)) + return &dpp->base; + + BREAK_TO_DEBUGGER(); + kfree(dpp); + return NULL; +} + +static struct output_pixel_processor *dcn31_opp_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dcn20_opp *opp = + kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); + + if (!opp) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dcn20_opp_construct(opp, ctx, inst, + &opp_regs[inst], &opp_shift, &opp_mask); + return &opp->base; +} + +static struct dce_aux *dcn31_aux_engine_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct aux_engine_dce110 *aux_engine = + kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); + + if (!aux_engine) + return NULL; + + dce110_aux_engine_construct(aux_engine, ctx, inst, + SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); + + return &aux_engine->base; +} +#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST_DCN30(id) } + +static const struct dce_i2c_registers i2c_hw_regs[] = { + i2c_inst_regs(1), + i2c_inst_regs(2), + i2c_inst_regs(3), + i2c_inst_regs(4), + i2c_inst_regs(5), +}; + +static const struct dce_i2c_shift i2c_shifts = { + I2C_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dce_i2c_mask i2c_masks = { + I2C_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +static struct dce_i2c_hw *dcn31_i2c_hw_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_i2c_hw *dce_i2c_hw = + kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); + + if (!dce_i2c_hw) + return NULL; + + dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, + &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); + + return dce_i2c_hw; +} +static struct mpc *dcn31_mpc_create( + struct dc_context *ctx, + int num_mpcc, + int num_rmu) +{ + struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), + GFP_KERNEL); + + if (!mpc30) + return NULL; + + dcn30_mpc_construct(mpc30, ctx, + &mpc_regs, + &mpc_shift, + &mpc_mask, + num_mpcc, + num_rmu); + + return &mpc30->base; +} + +static struct hubbub *dcn31_hubbub_create(struct dc_context *ctx) +{ + int i; + + struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub), + GFP_KERNEL); + + if (!hubbub3) + return NULL; + + hubbub31_construct(hubbub3, ctx, + &hubbub_reg, + &hubbub_shift, + &hubbub_mask, + dcn3_15_ip.det_buffer_size_kbytes, + dcn3_15_ip.pixel_chunk_size_kbytes, + dcn3_15_ip.config_return_buffer_size_in_kbytes); + + + for (i = 0; i < res_cap_dcn31.num_vmid; i++) { + struct dcn20_vmid *vmid = &hubbub3->vmid[i]; + + vmid->ctx = ctx; + + vmid->regs = &vmid_regs[i]; + vmid->shifts = &vmid_shifts; + vmid->masks = &vmid_masks; + } + + return &hubbub3->base; +} + +static struct timing_generator *dcn31_timing_generator_create( + struct dc_context *ctx, + uint32_t instance) +{ + struct optc *tgn10 = + kzalloc(sizeof(struct optc), GFP_KERNEL); + + if (!tgn10) + return NULL; + + tgn10->base.inst = instance; + tgn10->base.ctx = ctx; + + tgn10->tg_regs = &optc_regs[instance]; + tgn10->tg_shift = &optc_shift; + tgn10->tg_mask = &optc_mask; + + dcn31_timing_generator_init(tgn10); + + return &tgn10->base; +} + +static const struct encoder_feature_support link_enc_feature = { + .max_hdmi_deep_color = COLOR_DEPTH_121212, + .max_hdmi_pixel_clock = 600000, + .hdmi_ycbcr420_supported = true, + .dp_ycbcr420_supported = true, + .fec_supported = true, + .flags.bits.IS_HBR2_CAPABLE = true, + .flags.bits.IS_HBR3_CAPABLE = true, + .flags.bits.IS_TPS3_CAPABLE = true, + .flags.bits.IS_TPS4_CAPABLE = true +}; + +static struct link_encoder *dcn31_link_encoder_create( + const struct encoder_init_data *enc_init_data) +{ + struct dcn20_link_encoder *enc20 = + kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); + + if (!enc20) + return NULL; + + dcn31_link_encoder_construct(enc20, + enc_init_data, + &link_enc_feature, + &link_enc_regs[enc_init_data->transmitter], + &link_enc_aux_regs[enc_init_data->channel - 1], + &link_enc_hpd_regs[enc_init_data->hpd_source], + &le_shift, + &le_mask); + + return &enc20->enc10.base; +} + +/* Create a minimal link encoder object not associated with a particular + * physical connector. + * resource_funcs.link_enc_create_minimal + */ +static struct link_encoder *dcn31_link_enc_create_minimal( + struct dc_context *ctx, enum engine_id eng_id) +{ + struct dcn20_link_encoder *enc20; + + if ((eng_id - ENGINE_ID_DIGA) > ctx->dc->res_pool->res_cap->num_dig_link_enc) + return NULL; + + enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); + if (!enc20) + return NULL; + + dcn31_link_encoder_construct_minimal( + enc20, + ctx, + &link_enc_feature, + &link_enc_regs[eng_id - ENGINE_ID_DIGA], + eng_id); + + return &enc20->enc10.base; +} + +static struct panel_cntl *dcn31_panel_cntl_create(const struct panel_cntl_init_data *init_data) +{ + struct dcn31_panel_cntl *panel_cntl = + kzalloc(sizeof(struct dcn31_panel_cntl), GFP_KERNEL); + + if (!panel_cntl) + return NULL; + + dcn31_panel_cntl_construct(panel_cntl, init_data); + + return &panel_cntl->base; +} + +static void read_dce_straps( + struct dc_context *ctx, + struct resource_straps *straps) +{ + generic_reg_get(ctx, regDC_PINSTRAPS + BASE(regDC_PINSTRAPS_BASE_IDX), + FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); + +} + +static struct audio *dcn31_create_audio( + struct dc_context *ctx, unsigned int inst) +{ + return dce_audio_create(ctx, inst, + &audio_regs[inst], &audio_shift, &audio_mask); +} + +static struct vpg *dcn31_vpg_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn31_vpg *vpg31 = kzalloc(sizeof(struct dcn31_vpg), GFP_KERNEL); + + if (!vpg31) + return NULL; + + vpg31_construct(vpg31, ctx, inst, + &vpg_regs[inst], + &vpg_shift, + &vpg_mask); + + return &vpg31->base; +} + +static struct afmt *dcn31_afmt_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn31_afmt *afmt31 = kzalloc(sizeof(struct dcn31_afmt), GFP_KERNEL); + + if (!afmt31) + return NULL; + + afmt31_construct(afmt31, ctx, inst, + &afmt_regs[inst], + &afmt_shift, + &afmt_mask); + + // Light sleep by default, no need to power down here + + return &afmt31->base; +} + +static struct apg *dcn31_apg_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn31_apg *apg31 = kzalloc(sizeof(struct dcn31_apg), GFP_KERNEL); + + if (!apg31) + return NULL; + + apg31_construct(apg31, ctx, inst, + &apg_regs[inst], + &apg_shift, + &apg_mask); + + return &apg31->base; +} + +static struct stream_encoder *dcn315_stream_encoder_create( + enum engine_id eng_id, + struct dc_context *ctx) +{ + struct dcn10_stream_encoder *enc1; + struct vpg *vpg; + struct afmt *afmt; + int vpg_inst; + int afmt_inst; + + /*PHYB is wired off in HW, allow front end to remapping, otherwise needs more changes*/ + + /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ + if (eng_id <= ENGINE_ID_DIGF) { + vpg_inst = eng_id; + afmt_inst = eng_id; + } else + return NULL; + + enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); + vpg = dcn31_vpg_create(ctx, vpg_inst); + afmt = dcn31_afmt_create(ctx, afmt_inst); + + if (!enc1 || !vpg || !afmt) { + kfree(enc1); + kfree(vpg); + kfree(afmt); + return NULL; + } + + dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, + eng_id, vpg, afmt, + &stream_enc_regs[eng_id], + &se_shift, &se_mask); + + return &enc1->base; +} + +static struct hpo_dp_stream_encoder *dcn31_hpo_dp_stream_encoder_create( + enum engine_id eng_id, + struct dc_context *ctx) +{ + struct dcn31_hpo_dp_stream_encoder *hpo_dp_enc31; + struct vpg *vpg; + struct apg *apg; + uint32_t hpo_dp_inst; + uint32_t vpg_inst; + uint32_t apg_inst; + + ASSERT((eng_id >= ENGINE_ID_HPO_DP_0) && (eng_id <= ENGINE_ID_HPO_DP_3)); + hpo_dp_inst = eng_id - ENGINE_ID_HPO_DP_0; + + /* Mapping of VPG register blocks to HPO DP block instance: + * VPG[6] -> HPO_DP[0] + * VPG[7] -> HPO_DP[1] + * VPG[8] -> HPO_DP[2] + * VPG[9] -> HPO_DP[3] + */ + vpg_inst = hpo_dp_inst + 6; + + /* Mapping of APG register blocks to HPO DP block instance: + * APG[0] -> HPO_DP[0] + * APG[1] -> HPO_DP[1] + * APG[2] -> HPO_DP[2] + * APG[3] -> HPO_DP[3] + */ + apg_inst = hpo_dp_inst; + + /* allocate HPO stream encoder and create VPG sub-block */ + hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_stream_encoder), GFP_KERNEL); + vpg = dcn31_vpg_create(ctx, vpg_inst); + apg = dcn31_apg_create(ctx, apg_inst); + + if (!hpo_dp_enc31 || !vpg || !apg) { + kfree(hpo_dp_enc31); + kfree(vpg); + kfree(apg); + return NULL; + } + + dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios, + hpo_dp_inst, eng_id, vpg, apg, + &hpo_dp_stream_enc_regs[hpo_dp_inst], + &hpo_dp_se_shift, &hpo_dp_se_mask); + + return &hpo_dp_enc31->base; +} + +static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create( + uint8_t inst, + struct dc_context *ctx) +{ + struct dcn31_hpo_dp_link_encoder *hpo_dp_enc31; + + /* allocate HPO link encoder */ + hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); + + hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst, + &hpo_dp_link_enc_regs[inst], + &hpo_dp_le_shift, &hpo_dp_le_mask); + + return &hpo_dp_enc31->base; +} + +static struct dce_hwseq *dcn31_hwseq_create( + struct dc_context *ctx) +{ + struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); + + if (hws) { + hws->ctx = ctx; + hws->regs = &hwseq_reg; + hws->shifts = &hwseq_shift; + hws->masks = &hwseq_mask; + /* DCN3.1 FPGA Workaround + * Need to enable HPO DP Stream Encoder before setting OTG master enable. + * To do so, move calling function enable_stream_timing to only be done AFTER calling + * function core_link_enable_stream + */ + if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) + hws->wa.dp_hpo_and_otg_sequence = true; + } + return hws; +} +static const struct resource_create_funcs res_create_funcs = { + .read_dce_straps = read_dce_straps, + .create_audio = dcn31_create_audio, + .create_stream_encoder = dcn315_stream_encoder_create, + .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create, + .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create, + .create_hwseq = dcn31_hwseq_create, +}; + +static const struct resource_create_funcs res_create_maximus_funcs = { + .read_dce_straps = NULL, + .create_audio = NULL, + .create_stream_encoder = NULL, + .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create, + .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create, + .create_hwseq = dcn31_hwseq_create, +}; + +static void dcn315_resource_destruct(struct dcn315_resource_pool *pool) +{ + unsigned int i; + + for (i = 0; i < pool->base.stream_enc_count; i++) { + if (pool->base.stream_enc[i] != NULL) { + if (pool->base.stream_enc[i]->vpg != NULL) { + kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg)); + pool->base.stream_enc[i]->vpg = NULL; + } + if (pool->base.stream_enc[i]->afmt != NULL) { + kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt)); + pool->base.stream_enc[i]->afmt = NULL; + } + kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); + pool->base.stream_enc[i] = NULL; + } + } + + for (i = 0; i < pool->base.hpo_dp_stream_enc_count; i++) { + if (pool->base.hpo_dp_stream_enc[i] != NULL) { + if (pool->base.hpo_dp_stream_enc[i]->vpg != NULL) { + kfree(DCN30_VPG_FROM_VPG(pool->base.hpo_dp_stream_enc[i]->vpg)); + pool->base.hpo_dp_stream_enc[i]->vpg = NULL; + } + if (pool->base.hpo_dp_stream_enc[i]->apg != NULL) { + kfree(DCN31_APG_FROM_APG(pool->base.hpo_dp_stream_enc[i]->apg)); + pool->base.hpo_dp_stream_enc[i]->apg = NULL; + } + kfree(DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(pool->base.hpo_dp_stream_enc[i])); + pool->base.hpo_dp_stream_enc[i] = NULL; + } + } + + for (i = 0; i < pool->base.hpo_dp_link_enc_count; i++) { + if (pool->base.hpo_dp_link_enc[i] != NULL) { + kfree(DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(pool->base.hpo_dp_link_enc[i])); + pool->base.hpo_dp_link_enc[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_dsc; i++) { + if (pool->base.dscs[i] != NULL) + dcn20_dsc_destroy(&pool->base.dscs[i]); + } + + if (pool->base.mpc != NULL) { + kfree(TO_DCN20_MPC(pool->base.mpc)); + pool->base.mpc = NULL; + } + if (pool->base.hubbub != NULL) { + kfree(pool->base.hubbub); + pool->base.hubbub = NULL; + } + for (i = 0; i < pool->base.pipe_count; i++) { + if (pool->base.dpps[i] != NULL) + dcn31_dpp_destroy(&pool->base.dpps[i]); + + if (pool->base.ipps[i] != NULL) + pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); + + if (pool->base.hubps[i] != NULL) { + kfree(TO_DCN20_HUBP(pool->base.hubps[i])); + pool->base.hubps[i] = NULL; + } + + if (pool->base.irqs != NULL) { + dal_irq_service_destroy(&pool->base.irqs); + } + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + if (pool->base.engines[i] != NULL) + dce110_engine_destroy(&pool->base.engines[i]); + if (pool->base.hw_i2cs[i] != NULL) { + kfree(pool->base.hw_i2cs[i]); + pool->base.hw_i2cs[i] = NULL; + } + if (pool->base.sw_i2cs[i] != NULL) { + kfree(pool->base.sw_i2cs[i]); + pool->base.sw_i2cs[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_opp; i++) { + if (pool->base.opps[i] != NULL) + pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + if (pool->base.timing_generators[i] != NULL) { + kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); + pool->base.timing_generators[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_dwb; i++) { + if (pool->base.dwbc[i] != NULL) { + kfree(TO_DCN30_DWBC(pool->base.dwbc[i])); + pool->base.dwbc[i] = NULL; + } + if (pool->base.mcif_wb[i] != NULL) { + kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i])); + pool->base.mcif_wb[i] = NULL; + } + } + + for (i = 0; i < pool->base.audio_count; i++) { + if (pool->base.audios[i]) + dce_aud_destroy(&pool->base.audios[i]); + } + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] != NULL) { + dcn20_clock_source_destroy(&pool->base.clock_sources[i]); + pool->base.clock_sources[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) { + if (pool->base.mpc_lut[i] != NULL) { + dc_3dlut_func_release(pool->base.mpc_lut[i]); + pool->base.mpc_lut[i] = NULL; + } + if (pool->base.mpc_shaper[i] != NULL) { + dc_transfer_func_release(pool->base.mpc_shaper[i]); + pool->base.mpc_shaper[i] = NULL; + } + } + + if (pool->base.dp_clock_source != NULL) { + dcn20_clock_source_destroy(&pool->base.dp_clock_source); + pool->base.dp_clock_source = NULL; + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + if (pool->base.multiple_abms[i] != NULL) + dce_abm_destroy(&pool->base.multiple_abms[i]); + } + + if (pool->base.psr != NULL) + dmub_psr_destroy(&pool->base.psr); + + if (pool->base.dccg != NULL) + dcn_dccg_destroy(&pool->base.dccg); +} + +static struct hubp *dcn31_hubp_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn20_hubp *hubp2 = + kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); + + if (!hubp2) + return NULL; + + if (hubp31_construct(hubp2, ctx, inst, + &hubp_regs[inst], &hubp_shift, &hubp_mask)) + return &hubp2->base; + + BREAK_TO_DEBUGGER(); + kfree(hubp2); + return NULL; +} + +static bool dcn31_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) +{ + int i; + uint32_t pipe_count = pool->res_cap->num_dwb; + + for (i = 0; i < pipe_count; i++) { + struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc), + GFP_KERNEL); + + if (!dwbc30) { + dm_error("DC: failed to create dwbc30!\n"); + return false; + } + + dcn30_dwbc_construct(dwbc30, ctx, + &dwbc30_regs[i], + &dwbc30_shift, + &dwbc30_mask, + i); + + pool->dwbc[i] = &dwbc30->base; + } + return true; +} + +static bool dcn31_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) +{ + int i; + uint32_t pipe_count = pool->res_cap->num_dwb; + + for (i = 0; i < pipe_count; i++) { + struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub), + GFP_KERNEL); + + if (!mcif_wb30) { + dm_error("DC: failed to create mcif_wb30!\n"); + return false; + } + + dcn30_mmhubbub_construct(mcif_wb30, ctx, + &mcif_wb30_regs[i], + &mcif_wb30_shift, + &mcif_wb30_mask, + i); + + pool->mcif_wb[i] = &mcif_wb30->base; + } + return true; +} + +static struct display_stream_compressor *dcn31_dsc_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dcn20_dsc *dsc = + kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); + + if (!dsc) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); + return &dsc->base; +} + +static void dcn315_destroy_resource_pool(struct resource_pool **pool) +{ + struct dcn315_resource_pool *dcn31_pool = TO_DCN315_RES_POOL(*pool); + + dcn315_resource_destruct(dcn31_pool); + kfree(dcn31_pool); + *pool = NULL; +} + +static struct clock_source *dcn31_clock_source_create( + struct dc_context *ctx, + struct dc_bios *bios, + enum clock_source_id id, + const struct dce110_clk_src_regs *regs, + bool dp_clk_src) +{ + struct dce110_clk_src *clk_src = + kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); + + if (!clk_src) + return NULL; + + if (dcn31_clk_src_construct(clk_src, ctx, bios, id, + regs, &cs_shift, &cs_mask)) { + clk_src->base.dp_clk_src = dp_clk_src; + return &clk_src->base; + } + + BREAK_TO_DEBUGGER(); + return NULL; +} + +static bool is_dual_plane(enum surface_pixel_format format) +{ + return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA; +} + +static int dcn315_populate_dml_pipes_from_context( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + bool fast_validate) +{ + int i, pipe_cnt; + struct resource_context *res_ctx = &context->res_ctx; + struct pipe_ctx *pipe; + const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB; + + DC_FP_START(); + dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + DC_FP_END(); + + for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { + struct dc_crtc_timing *timing; + + if (!res_ctx->pipe_ctx[i].stream) + continue; + pipe = &res_ctx->pipe_ctx[i]; + timing = &pipe->stream->timing; + + /* + * Immediate flip can be set dynamically after enabling the plane. + * We need to require support for immediate flip or underflow can be + * intermittently experienced depending on peak b/w requirements. + */ + pipes[pipe_cnt].pipe.src.immediate_flip = true; + + pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; + pipes[pipe_cnt].pipe.src.gpuvm = true; + pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0; + pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0; + pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch; + pipes[pipe_cnt].pipe.src.dcc_rate = 3; + pipes[pipe_cnt].dout.dsc_input_bpc = 0; + + if (pipes[pipe_cnt].dout.dsc_enable) { + switch (timing->display_color_depth) { + case COLOR_DEPTH_888: + pipes[pipe_cnt].dout.dsc_input_bpc = 8; + break; + case COLOR_DEPTH_101010: + pipes[pipe_cnt].dout.dsc_input_bpc = 10; + break; + case COLOR_DEPTH_121212: + pipes[pipe_cnt].dout.dsc_input_bpc = 12; + break; + default: + ASSERT(0); + break; + } + } + + pipe_cnt++; + } + + if (pipe_cnt) + context->bw_ctx.dml.ip.det_buffer_size_kbytes = + (max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB / pipe_cnt) * DCN3_15_CRB_SEGMENT_SIZE_KB; + if (context->bw_ctx.dml.ip.det_buffer_size_kbytes > DCN3_15_MAX_DET_SIZE) + context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_15_MAX_DET_SIZE; + ASSERT(context->bw_ctx.dml.ip.det_buffer_size_kbytes >= DCN3_15_DEFAULT_DET_SIZE); + dc->config.enable_4to1MPC = false; + if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) { + if (is_dual_plane(pipe->plane_state->format) + && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) { + dc->config.enable_4to1MPC = true; + context->bw_ctx.dml.ip.det_buffer_size_kbytes = + (max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB / 4) * DCN3_15_CRB_SEGMENT_SIZE_KB; + } else if (!is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 5120) { + /* Limit to 5k max to avoid forced pipe split when there is not enough detile for swath */ + context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192; + pipes[0].pipe.src.unbounded_req_mode = true; + } + } + + return pipe_cnt; +} + +static struct dc_cap_funcs cap_funcs = { + .get_dcc_compression_cap = dcn20_get_dcc_compression_cap +}; + +static void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) +{ + struct clk_limit_table *clk_table = &bw_params->clk_table; + struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES]; + unsigned int i, closest_clk_lvl; + int max_dispclk_mhz = 0, max_dppclk_mhz = 0; + int j; + + // Default clock levels are used for diags, which may lead to overclocking. + if (!IS_DIAG_DC(dc->ctx->dce_environment)) { + + dcn3_15_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator; + dcn3_15_ip.max_num_dpp = dc->res_pool->pipe_count; + dcn3_15_soc.num_chans = bw_params->num_channels; + + ASSERT(clk_table->num_entries); + + /* Prepass to find max clocks independent of voltage level. */ + for (i = 0; i < clk_table->num_entries; ++i) { + if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz) + max_dispclk_mhz = clk_table->entries[i].dispclk_mhz; + if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz) + max_dppclk_mhz = clk_table->entries[i].dppclk_mhz; + } + + for (i = 0; i < clk_table->num_entries; i++) { + /* loop backwards*/ + for (closest_clk_lvl = 0, j = dcn3_15_soc.num_states - 1; j >= 0; j--) { + if ((unsigned int) dcn3_15_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) { + closest_clk_lvl = j; + break; + } + } + if (clk_table->num_entries == 1) { + /*smu gives one DPM level, let's take the highest one*/ + closest_clk_lvl = dcn3_15_soc.num_states - 1; + } + + clock_limits[i].state = i; + + /* Clocks dependent on voltage level. */ + clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; + if (clk_table->num_entries == 1 && + clock_limits[i].dcfclk_mhz < dcn3_15_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) { + /*SMU fix not released yet*/ + clock_limits[i].dcfclk_mhz = dcn3_15_soc.clock_limits[closest_clk_lvl].dcfclk_mhz; + } + clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz; + clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz; + clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio; + + /* Clocks independent of voltage level. */ + clock_limits[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz : + dcn3_15_soc.clock_limits[closest_clk_lvl].dispclk_mhz; + + clock_limits[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz : + dcn3_15_soc.clock_limits[closest_clk_lvl].dppclk_mhz; + + clock_limits[i].dram_bw_per_chan_gbps = dcn3_15_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps; + clock_limits[i].dscclk_mhz = dcn3_15_soc.clock_limits[closest_clk_lvl].dscclk_mhz; + clock_limits[i].dtbclk_mhz = dcn3_15_soc.clock_limits[closest_clk_lvl].dtbclk_mhz; + clock_limits[i].phyclk_d18_mhz = dcn3_15_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz; + clock_limits[i].phyclk_mhz = dcn3_15_soc.clock_limits[closest_clk_lvl].phyclk_mhz; + } + for (i = 0; i < clk_table->num_entries; i++) + dcn3_15_soc.clock_limits[i] = clock_limits[i]; + if (clk_table->num_entries) { + dcn3_15_soc.num_states = clk_table->num_entries; + } + } + + if (max_dispclk_mhz) { + dcn3_15_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2; + dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2; + } + + if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) + dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN31); + else + dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN31_FPGA); +} + +static struct resource_funcs dcn315_res_pool_funcs = { + .destroy = dcn315_destroy_resource_pool, + .link_enc_create = dcn31_link_encoder_create, + .link_enc_create_minimal = dcn31_link_enc_create_minimal, + .link_encs_assign = link_enc_cfg_link_encs_assign, + .link_enc_unassign = link_enc_cfg_link_enc_unassign, + .panel_cntl_create = dcn31_panel_cntl_create, + .validate_bandwidth = dcn31_validate_bandwidth, + .calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg, + .update_soc_for_wm_a = dcn31_update_soc_for_wm_a, + .populate_dml_pipes = dcn315_populate_dml_pipes_from_context, + .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, + .add_stream_to_ctx = dcn30_add_stream_to_ctx, + .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, + .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, + .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context, + .set_mcif_arb_params = dcn30_set_mcif_arb_params, + .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, + .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut, + .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut, + .update_bw_bounding_box = dcn315_update_bw_bounding_box, + .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, +}; + +static bool dcn315_resource_construct( + uint8_t num_virtual_links, + struct dc *dc, + struct dcn315_resource_pool *pool) +{ + int i; + struct dc_context *ctx = dc->ctx; + struct irq_service_init_data init_data; + + ctx->dc_bios->regs = &bios_regs; + + pool->base.res_cap = &res_cap_dcn31; + + pool->base.funcs = &dcn315_res_pool_funcs; + + /************************************************* + * Resource + asic cap harcoding * + *************************************************/ + pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; + pool->base.pipe_count = pool->base.res_cap->num_timing_generator; + pool->base.mpcc_count = pool->base.res_cap->num_timing_generator; + dc->caps.max_downscale_ratio = 600; + dc->caps.i2c_speed_in_khz = 100; + dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.4 w/a applied by default*/ + dc->caps.max_cursor_size = 256; + dc->caps.min_horizontal_blanking_period = 80; + dc->caps.dmdata_alloc_size = 2048; + + dc->caps.max_slave_planes = 1; + dc->caps.max_slave_yuv_planes = 1; + dc->caps.max_slave_rgb_planes = 1; + dc->caps.post_blend_color_processing = true; + dc->caps.force_dp_tps4_for_cp2520 = true; + dc->caps.dp_hpo = true; + dc->caps.edp_dsc_support = true; + dc->caps.extended_aux_timeout_support = true; + dc->caps.dmcub_support = true; + dc->caps.is_apu = true; + + /* Color pipeline capabilities */ + dc->caps.color.dpp.dcn_arch = 1; + dc->caps.color.dpp.input_lut_shared = 0; + dc->caps.color.dpp.icsc = 1; + dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr + dc->caps.color.dpp.dgam_rom_caps.srgb = 1; + dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; + dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1; + dc->caps.color.dpp.dgam_rom_caps.pq = 1; + dc->caps.color.dpp.dgam_rom_caps.hlg = 1; + dc->caps.color.dpp.post_csc = 1; + dc->caps.color.dpp.gamma_corr = 1; + dc->caps.color.dpp.dgam_rom_for_yuv = 0; + + dc->caps.color.dpp.hw_3d_lut = 1; + dc->caps.color.dpp.ogam_ram = 1; + // no OGAM ROM on DCN301 + dc->caps.color.dpp.ogam_rom_caps.srgb = 0; + dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; + dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.dpp.ogam_rom_caps.pq = 0; + dc->caps.color.dpp.ogam_rom_caps.hlg = 0; + dc->caps.color.dpp.ocsc = 0; + + dc->caps.color.mpc.gamut_remap = 1; + dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //2 + dc->caps.color.mpc.ogam_ram = 1; + dc->caps.color.mpc.ogam_rom_caps.srgb = 0; + dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; + dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.mpc.ogam_rom_caps.pq = 0; + dc->caps.color.mpc.ogam_rom_caps.hlg = 0; + dc->caps.color.mpc.ocsc = 1; + + /* read VBIOS LTTPR caps */ + { + if (ctx->dc_bios->funcs->get_lttpr_caps) { + enum bp_result bp_query_result; + uint8_t is_vbios_lttpr_enable = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); + dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; + } + + /* interop bit is implicit */ + { + dc->caps.vbios_lttpr_aware = true; + } + } + + if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) + dc->debug = debug_defaults_drv; + else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { + dc->debug = debug_defaults_diags; + } else + dc->debug = debug_defaults_diags; + // Init the vm_helper + if (dc->vm_helper) + vm_helper_init(dc->vm_helper, 16); + + /************************************************* + * Create resources * + *************************************************/ + + /* Clock Sources for Pixel Clock*/ + pool->base.clock_sources[DCN31_CLK_SRC_PLL0] = + dcn31_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL0, + &clk_src_regs[0], false); + pool->base.clock_sources[DCN31_CLK_SRC_PLL1] = + dcn31_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL1, + &clk_src_regs[1], false); + pool->base.clock_sources[DCN31_CLK_SRC_PLL2] = + dcn31_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL2, + &clk_src_regs[2], false); + pool->base.clock_sources[DCN31_CLK_SRC_PLL3] = + dcn31_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL3, + &clk_src_regs[3], false); + pool->base.clock_sources[DCN31_CLK_SRC_PLL4] = + dcn31_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL4, + &clk_src_regs[4], false); + + pool->base.clk_src_count = DCN30_CLK_SRC_TOTAL; + + /* todo: not reuse phy_pll registers */ + pool->base.dp_clock_source = + dcn31_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_ID_DP_DTO, + &clk_src_regs[0], true); + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] == NULL) { + dm_error("DC: failed to create clock sources!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + } + + /* TODO: DCCG */ + pool->base.dccg = dccg31_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); + if (pool->base.dccg == NULL) { + dm_error("DC: failed to create dccg!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + /* TODO: IRQ */ + init_data.ctx = dc->ctx; + pool->base.irqs = dal_irq_service_dcn315_create(&init_data); + if (!pool->base.irqs) + goto create_fail; + + /* HUBBUB */ + pool->base.hubbub = dcn31_hubbub_create(ctx); + if (pool->base.hubbub == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create hubbub!\n"); + goto create_fail; + } + + /* HUBPs, DPPs, OPPs and TGs */ + for (i = 0; i < pool->base.pipe_count; i++) { + pool->base.hubps[i] = dcn31_hubp_create(ctx, i); + if (pool->base.hubps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create hubps!\n"); + goto create_fail; + } + + pool->base.dpps[i] = dcn31_dpp_create(ctx, i); + if (pool->base.dpps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create dpps!\n"); + goto create_fail; + } + } + + for (i = 0; i < pool->base.res_cap->num_opp; i++) { + pool->base.opps[i] = dcn31_opp_create(ctx, i); + if (pool->base.opps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create output pixel processor!\n"); + goto create_fail; + } + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + pool->base.timing_generators[i] = dcn31_timing_generator_create( + ctx, i); + if (pool->base.timing_generators[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create tg!\n"); + goto create_fail; + } + } + pool->base.timing_generator_count = i; + + /* PSR */ + pool->base.psr = dmub_psr_create(ctx); + if (pool->base.psr == NULL) { + dm_error("DC: failed to create psr obj!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + /* ABM */ + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + pool->base.multiple_abms[i] = dmub_abm_create(ctx, + &abm_regs[i], + &abm_shift, + &abm_mask); + if (pool->base.multiple_abms[i] == NULL) { + dm_error("DC: failed to create abm for pipe %d!\n", i); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + } + + /* MPC and DSC */ + pool->base.mpc = dcn31_mpc_create(ctx, pool->base.mpcc_count, pool->base.res_cap->num_mpc_3dlut); + if (pool->base.mpc == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create mpc!\n"); + goto create_fail; + } + + for (i = 0; i < pool->base.res_cap->num_dsc; i++) { + pool->base.dscs[i] = dcn31_dsc_create(ctx, i); + if (pool->base.dscs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create display stream compressor %d!\n", i); + goto create_fail; + } + } + + /* DWB and MMHUBBUB */ + if (!dcn31_dwbc_create(ctx, &pool->base)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create dwbc!\n"); + goto create_fail; + } + + if (!dcn31_mmhubbub_create(ctx, &pool->base)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create mcif_wb!\n"); + goto create_fail; + } + + /* AUX and I2C */ + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + pool->base.engines[i] = dcn31_aux_engine_create(ctx, i); + if (pool->base.engines[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create aux engine!!\n"); + goto create_fail; + } + pool->base.hw_i2cs[i] = dcn31_i2c_hw_create(ctx, i); + if (pool->base.hw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create hw i2c!!\n"); + goto create_fail; + } + pool->base.sw_i2cs[i] = NULL; + } + + /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ + if (!resource_construct(num_virtual_links, dc, &pool->base, + (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? + &res_create_funcs : &res_create_maximus_funcs))) + goto create_fail; + + /* HW Sequencer and Plane caps */ + dcn31_hw_sequencer_construct(dc); + + dc->caps.max_planes = pool->base.pipe_count; + + for (i = 0; i < dc->caps.max_planes; ++i) + dc->caps.planes[i] = plane_cap; + + dc->cap_funcs = cap_funcs; + + dc->dcn_ip->max_num_dpp = dcn3_15_ip.max_num_dpp; + + return true; + +create_fail: + + dcn315_resource_destruct(pool); + + return false; +} + +struct resource_pool *dcn315_create_resource_pool( + const struct dc_init_data *init_data, + struct dc *dc) +{ + struct dcn315_resource_pool *pool = + kzalloc(sizeof(struct dcn315_resource_pool), GFP_KERNEL); + + if (!pool) + return NULL; + + if (dcn315_resource_construct(init_data->num_virtual_links, dc, pool)) + return &pool->base; + + BREAK_TO_DEBUGGER(); + kfree(pool); + return NULL; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h new file mode 100644 index 000000000000..f3a36820a31f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h @@ -0,0 +1,42 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DCN315_RESOURCE_H_ +#define _DCN315_RESOURCE_H_ + +#include "core_types.h" + +#define TO_DCN315_RES_POOL(pool)\ + container_of(pool, struct dcn315_resource_pool, base) + +struct dcn315_resource_pool { + struct resource_pool base; +}; + +struct resource_pool *dcn315_create_resource_pool( + const struct dc_init_data *init_data, + struct dc *dc); + +#endif /* _DCN315_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/Makefile b/drivers/gpu/drm/amd/display/dc/dcn316/Makefile new file mode 100644 index 000000000000..cd87b687c5e2 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn316/Makefile @@ -0,0 +1,56 @@ +# +# Copyright 2021 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +# Authors: AMD +# +# Makefile for dcn316. + +DCN316 = dcn316_resource.o + +ifdef CONFIG_X86 +CFLAGS_$(AMDDALPATH)/dc/dcn316/dcn316_resource.o := -msse +endif + +ifdef CONFIG_PPC64 +CFLAGS_$(AMDDALPATH)/dc/dcn316/dcn316_resource.o := -mhard-float -maltivec +endif + +ifdef CONFIG_CC_IS_GCC +ifeq ($(call cc-ifversion, -lt, 0701, y), y) +IS_OLD_GCC = 1 +endif +CFLAGS_$(AMDDALPATH)/dc/dcn316/dcn316_resource.o += -mhard-float +endif + +ifdef CONFIG_X86 +ifdef IS_OLD_GCC +# Stack alignment mismatch, proceed with caution. +# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 +# (8B stack alignment). +CFLAGS_$(AMDDALPATH)/dc/dcn316/dcn316_resource.o += -mpreferred-stack-boundary=4 +else +CFLAGS_$(AMDDALPATH)/dc/dcn316/dcn316_resource.o += -msse2 +endif +endif + +AMD_DAL_DCN316 = $(addprefix $(AMDDALPATH)/dc/dcn316/,$(DCN316)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DCN316) diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c new file mode 100644 index 000000000000..8decc3ccf8ca --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c @@ -0,0 +1,2285 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#include "dm_services.h" +#include "dc.h" + +#include "dcn31/dcn31_init.h" + +#include "resource.h" +#include "include/irq_service_interface.h" +#include "dcn316_resource.h" + +#include "dcn20/dcn20_resource.h" +#include "dcn30/dcn30_resource.h" +#include "dcn31/dcn31_resource.h" + +#include "dcn10/dcn10_ipp.h" +#include "dcn30/dcn30_hubbub.h" +#include "dcn31/dcn31_hubbub.h" +#include "dcn30/dcn30_mpc.h" +#include "dcn31/dcn31_hubp.h" +#include "irq/dcn31/irq_service_dcn31.h" +#include "dcn30/dcn30_dpp.h" +#include "dcn31/dcn31_optc.h" +#include "dcn20/dcn20_hwseq.h" +#include "dcn30/dcn30_hwseq.h" +#include "dce110/dce110_hw_sequencer.h" +#include "dcn30/dcn30_opp.h" +#include "dcn20/dcn20_dsc.h" +#include "dcn30/dcn30_vpg.h" +#include "dcn30/dcn30_afmt.h" +#include "dcn30/dcn30_dio_stream_encoder.h" +#include "dcn31/dcn31_hpo_dp_stream_encoder.h" +#include "dcn31/dcn31_hpo_dp_link_encoder.h" +#include "dcn31/dcn31_apg.h" +#include "dcn31/dcn31_dio_link_encoder.h" +#include "dcn31/dcn31_vpg.h" +#include "dcn31/dcn31_afmt.h" +#include "dce/dce_clock_source.h" +#include "dce/dce_audio.h" +#include "dce/dce_hwseq.h" +#include "clk_mgr.h" +#include "virtual/virtual_stream_encoder.h" +#include "dce110/dce110_resource.h" +#include "dml/display_mode_vba.h" +#include "dcn31/dcn31_dccg.h" +#include "dcn10/dcn10_resource.h" +#include "dcn31/dcn31_panel_cntl.h" + +#include "dcn30/dcn30_dwb.h" +#include "dcn30/dcn30_mmhubbub.h" + +#include "dcn/dcn_3_1_6_offset.h" +#include "dcn/dcn_3_1_6_sh_mask.h" +#include "dpcs/dpcs_4_2_3_offset.h" +#include "dpcs/dpcs_4_2_3_sh_mask.h" + +#define regBIF_BX1_BIOS_SCRATCH_2 0x003a +#define regBIF_BX1_BIOS_SCRATCH_2_BASE_IDX 1 +#define regBIF_BX1_BIOS_SCRATCH_3 0x003b +#define regBIF_BX1_BIOS_SCRATCH_3_BASE_IDX 1 +#define regBIF_BX1_BIOS_SCRATCH_6 0x003e +#define regBIF_BX1_BIOS_SCRATCH_6_BASE_IDX 1 + +#define regDCHUBBUB_DEBUG_CTRL_0 0x04d6 +#define regDCHUBBUB_DEBUG_CTRL_0_BASE_IDX 2 +#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH__SHIFT 0x10 +#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH_MASK 0x01FF0000L + +#define DCN_BASE__INST0_SEG0 0x00000012 +#define DCN_BASE__INST0_SEG1 0x000000C0 +#define DCN_BASE__INST0_SEG2 0x000034C0 +#define DCN_BASE__INST0_SEG3 0x00009000 +#define DCN_BASE__INST0_SEG4 0x02403C00 +#define DCN_BASE__INST0_SEG5 0 + +#define DPCS_BASE__INST0_SEG0 0x00000012 +#define DPCS_BASE__INST0_SEG1 0x000000C0 +#define DPCS_BASE__INST0_SEG2 0x000034C0 +#define DPCS_BASE__INST0_SEG3 0x00009000 +#define DPCS_BASE__INST0_SEG4 0x02403C00 +#define DPCS_BASE__INST0_SEG5 0 + +#define NBIO_BASE__INST0_SEG0 0x00000000 +#define NBIO_BASE__INST0_SEG1 0x00000014 +#define NBIO_BASE__INST0_SEG2 0x00000D20 +#define NBIO_BASE__INST0_SEG3 0x00010400 +#define NBIO_BASE__INST0_SEG4 0x0241B000 +#define NBIO_BASE__INST0_SEG5 0x04040000 + +#include "reg_helper.h" +#include "dce/dmub_abm.h" +#include "dce/dmub_psr.h" +#include "dce/dce_aux.h" +#include "dce/dce_i2c.h" + +#include "dml/dcn30/display_mode_vba_30.h" +#include "vm_helper.h" +#include "dcn20/dcn20_vmid.h" + +#include "link_enc_cfg.h" + +#define DC_LOGGER_INIT(logger) + +#define DCN3_16_DEFAULT_DET_SIZE 192 +#define DCN3_16_MAX_DET_SIZE 384 +#define DCN3_16_MIN_COMPBUF_SIZE_KB 128 +#define DCN3_16_CRB_SEGMENT_SIZE_KB 64 + +struct _vcs_dpi_ip_params_st dcn3_16_ip = { + .gpuvm_enable = 1, + .gpuvm_max_page_table_levels = 1, + .hostvm_enable = 1, + .hostvm_max_page_table_levels = 2, + .rob_buffer_size_kbytes = 64, + .det_buffer_size_kbytes = DCN3_16_DEFAULT_DET_SIZE, + .config_return_buffer_size_in_kbytes = 1024, + .compressed_buffer_segment_size_in_kbytes = 64, + .meta_fifo_size_in_kentries = 32, + .zero_size_buffer_entries = 512, + .compbuf_reserved_space_64b = 256, + .compbuf_reserved_space_zs = 64, + .dpp_output_buffer_pixels = 2560, + .opp_output_buffer_lines = 1, + .pixel_chunk_size_kbytes = 8, + .meta_chunk_size_kbytes = 2, + .min_meta_chunk_size_bytes = 256, + .writeback_chunk_size_kbytes = 8, + .ptoi_supported = false, + .num_dsc = 3, + .maximum_dsc_bits_per_component = 10, + .dsc422_native_support = false, + .is_line_buffer_bpp_fixed = true, + .line_buffer_fixed_bpp = 48, + .line_buffer_size_bits = 789504, + .max_line_buffer_lines = 12, + .writeback_interface_buffer_size_kbytes = 90, + .max_num_dpp = 4, + .max_num_otg = 4, + .max_num_hdmi_frl_outputs = 1, + .max_num_wb = 1, + .max_dchub_pscl_bw_pix_per_clk = 4, + .max_pscl_lb_bw_pix_per_clk = 2, + .max_lb_vscl_bw_pix_per_clk = 4, + .max_vscl_hscl_bw_pix_per_clk = 4, + .max_hscl_ratio = 6, + .max_vscl_ratio = 6, + .max_hscl_taps = 8, + .max_vscl_taps = 8, + .dpte_buffer_size_in_pte_reqs_luma = 64, + .dpte_buffer_size_in_pte_reqs_chroma = 34, + .dispclk_ramp_margin_percent = 1, + .max_inter_dcn_tile_repeaters = 8, + .cursor_buffer_size = 16, + .cursor_chunk_size = 2, + .writeback_line_buffer_buffer_size = 0, + .writeback_min_hscl_ratio = 1, + .writeback_min_vscl_ratio = 1, + .writeback_max_hscl_ratio = 1, + .writeback_max_vscl_ratio = 1, + .writeback_max_hscl_taps = 1, + .writeback_max_vscl_taps = 1, + .dppclk_delay_subtotal = 46, + .dppclk_delay_scl = 50, + .dppclk_delay_scl_lb_only = 16, + .dppclk_delay_cnvc_formatter = 27, + .dppclk_delay_cnvc_cursor = 6, + .dispclk_delay_subtotal = 119, + .dynamic_metadata_vm_enabled = false, + .odm_combine_4to1_supported = false, + .dcc_supported = true, +}; + +struct _vcs_dpi_soc_bounding_box_st dcn3_16_soc = { + /*TODO: correct dispclk/dppclk voltage level determination*/ + .clock_limits = { + { + .state = 0, + .dispclk_mhz = 556.0, + .dppclk_mhz = 556.0, + .phyclk_mhz = 600.0, + .phyclk_d18_mhz = 445.0, + .dscclk_mhz = 186.0, + .dtbclk_mhz = 625.0, + }, + { + .state = 1, + .dispclk_mhz = 625.0, + .dppclk_mhz = 625.0, + .phyclk_mhz = 810.0, + .phyclk_d18_mhz = 667.0, + .dscclk_mhz = 209.0, + .dtbclk_mhz = 625.0, + }, + { + .state = 2, + .dispclk_mhz = 625.0, + .dppclk_mhz = 625.0, + .phyclk_mhz = 810.0, + .phyclk_d18_mhz = 667.0, + .dscclk_mhz = 209.0, + .dtbclk_mhz = 625.0, + }, + { + .state = 3, + .dispclk_mhz = 1112.0, + .dppclk_mhz = 1112.0, + .phyclk_mhz = 810.0, + .phyclk_d18_mhz = 667.0, + .dscclk_mhz = 371.0, + .dtbclk_mhz = 625.0, + }, + { + .state = 4, + .dispclk_mhz = 1250.0, + .dppclk_mhz = 1250.0, + .phyclk_mhz = 810.0, + .phyclk_d18_mhz = 667.0, + .dscclk_mhz = 417.0, + .dtbclk_mhz = 625.0, + }, + }, + .num_states = 5, + .sr_exit_time_us = 9.0, + .sr_enter_plus_exit_time_us = 11.0, + .sr_exit_z8_time_us = 442.0, + .sr_enter_plus_exit_z8_time_us = 560.0, + .writeback_latency_us = 12.0, + .dram_channel_width_bytes = 4, + .round_trip_ping_latency_dcfclk_cycles = 106, + .urgent_latency_pixel_data_only_us = 4.0, + .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, + .urgent_latency_vm_data_only_us = 4.0, + .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, + .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, + .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, + .pct_ideal_sdp_bw_after_urgent = 80.0, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, + .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0, + .max_avg_sdp_bw_use_normal_percent = 60.0, + .max_avg_dram_bw_use_normal_percent = 60.0, + .fabric_datapath_to_dcn_data_return_bytes = 32, + .return_bus_width_bytes = 64, + .downspread_percent = 0.38, + .dcn_downspread_percent = 0.5, + .gpuvm_min_page_size_bytes = 4096, + .hostvm_min_page_size_bytes = 4096, + .do_urgent_latency_adjustment = false, + .urgent_latency_adjustment_fabric_clock_component_us = 0, + .urgent_latency_adjustment_fabric_clock_reference_mhz = 0, +}; + +enum dcn31_clk_src_array_id { + DCN31_CLK_SRC_PLL0, + DCN31_CLK_SRC_PLL1, + DCN31_CLK_SRC_PLL2, + DCN31_CLK_SRC_PLL3, + DCN31_CLK_SRC_PLL4, + DCN30_CLK_SRC_TOTAL +}; + +/* begin ********************* + * macros to expend register list macro defined in HW object header file + */ + +/* DCN */ +/* TODO awful hack. fixup dcn20_dwb.h */ +#undef BASE_INNER +#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg + +#define BASE(seg) BASE_INNER(seg) + +#define SR(reg_name)\ + .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ + reg ## reg_name + +#define SRI(reg_name, block, id)\ + .reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRI2(reg_name, block, id)\ + .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ + reg ## reg_name + +#define SRIR(var_name, reg_name, block, id)\ + .var_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRII(reg_name, block, id)\ + .reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRII_MPC_RMU(reg_name, block, id)\ + .RMU##_##reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRII_DWB(reg_name, temp_name, block, id)\ + .reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## temp_name + +#define DCCG_SRII(reg_name, block, id)\ + .block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define VUPDATE_SRII(reg_name, block, id)\ + .reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ + reg ## reg_name ## _ ## block ## id + +/* NBIO */ +#define NBIO_BASE_INNER(seg) \ + NBIO_BASE__INST0_SEG ## seg + +#define NBIO_BASE(seg) \ + NBIO_BASE_INNER(seg) + +#define NBIO_SR(reg_name)\ + .reg_name = NBIO_BASE(regBIF_BX1_ ## reg_name ## _BASE_IDX) + \ + regBIF_BX1_ ## reg_name + +static const struct bios_registers bios_regs = { + NBIO_SR(BIOS_SCRATCH_3), + NBIO_SR(BIOS_SCRATCH_6) +}; + +#define clk_src_regs(index, pllid)\ +[index] = {\ + CS_COMMON_REG_LIST_DCN3_0(index, pllid),\ +} + +static const struct dce110_clk_src_regs clk_src_regs[] = { + clk_src_regs(0, A), + clk_src_regs(1, B), + clk_src_regs(2, C), + clk_src_regs(3, D), + clk_src_regs(4, E) +}; + +static const struct dce110_clk_src_shift cs_shift = { + CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) +}; + +static const struct dce110_clk_src_mask cs_mask = { + CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK) +}; + +#define abm_regs(id)\ +[id] = {\ + ABM_DCN302_REG_LIST(id)\ +} + +static const struct dce_abm_registers abm_regs[] = { + abm_regs(0), + abm_regs(1), + abm_regs(2), + abm_regs(3), +}; + +static const struct dce_abm_shift abm_shift = { + ABM_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dce_abm_mask abm_mask = { + ABM_MASK_SH_LIST_DCN30(_MASK) +}; + +#define audio_regs(id)\ +[id] = {\ + AUD_COMMON_REG_LIST(id)\ +} + +static const struct dce_audio_registers audio_regs[] = { + audio_regs(0), + audio_regs(1), + audio_regs(2), + audio_regs(3), + audio_regs(4), + audio_regs(5), + audio_regs(6) +}; + +#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ + AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) + +static const struct dce_audio_shift audio_shift = { + DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_audio_mask audio_mask = { + DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) +}; + +#define vpg_regs(id)\ +[id] = {\ + VPG_DCN31_REG_LIST(id)\ +} + +static const struct dcn31_vpg_registers vpg_regs[] = { + vpg_regs(0), + vpg_regs(1), + vpg_regs(2), + vpg_regs(3), + vpg_regs(4), + vpg_regs(5), + vpg_regs(6), + vpg_regs(7), + vpg_regs(8), + vpg_regs(9), +}; + +static const struct dcn31_vpg_shift vpg_shift = { + DCN31_VPG_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_vpg_mask vpg_mask = { + DCN31_VPG_MASK_SH_LIST(_MASK) +}; + +#define afmt_regs(id)\ +[id] = {\ + AFMT_DCN31_REG_LIST(id)\ +} + +static const struct dcn31_afmt_registers afmt_regs[] = { + afmt_regs(0), + afmt_regs(1), + afmt_regs(2), + afmt_regs(3), + afmt_regs(4), + afmt_regs(5) +}; + +static const struct dcn31_afmt_shift afmt_shift = { + DCN31_AFMT_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_afmt_mask afmt_mask = { + DCN31_AFMT_MASK_SH_LIST(_MASK) +}; + + +#define apg_regs(id)\ +[id] = {\ + APG_DCN31_REG_LIST(id)\ +} + +static const struct dcn31_apg_registers apg_regs[] = { + apg_regs(0), + apg_regs(1), + apg_regs(2), + apg_regs(3) +}; + +static const struct dcn31_apg_shift apg_shift = { + DCN31_APG_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_apg_mask apg_mask = { + DCN31_APG_MASK_SH_LIST(_MASK) +}; + + +#define stream_enc_regs(id)\ +[id] = {\ + SE_DCN3_REG_LIST(id)\ +} + +static const struct dcn10_stream_enc_registers stream_enc_regs[] = { + stream_enc_regs(0), + stream_enc_regs(1), + stream_enc_regs(2), + stream_enc_regs(3), + stream_enc_regs(4) +}; + +static const struct dcn10_stream_encoder_shift se_shift = { + SE_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn10_stream_encoder_mask se_mask = { + SE_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + + +#define aux_regs(id)\ +[id] = {\ + DCN2_AUX_REG_LIST(id)\ +} + +static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { + aux_regs(0), + aux_regs(1), + aux_regs(2), + aux_regs(3), + aux_regs(4) +}; + +#define hpd_regs(id)\ +[id] = {\ + HPD_REG_LIST(id)\ +} + +static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { + hpd_regs(0), + hpd_regs(1), + hpd_regs(2), + hpd_regs(3), + hpd_regs(4) +}; + +#define link_regs(id, phyid)\ +[id] = {\ + LE_DCN31_REG_LIST(id), \ + UNIPHY_DCN2_REG_LIST(phyid), \ + DPCS_DCN31_REG_LIST(id), \ +} + +static const struct dce110_aux_registers_shift aux_shift = { + DCN_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCN_AUX_MASK_SH_LIST(_MASK) +}; + +static const struct dcn10_link_enc_registers link_enc_regs[] = { + link_regs(0, A), + link_regs(1, B), + link_regs(2, C), + link_regs(3, D), + link_regs(4, E) +}; + +static const struct dcn10_link_enc_shift le_shift = { + LINK_ENCODER_MASK_SH_LIST_DCN31(__SHIFT), \ + DPCS_DCN31_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn10_link_enc_mask le_mask = { + LINK_ENCODER_MASK_SH_LIST_DCN31(_MASK), \ + DPCS_DCN31_MASK_SH_LIST(_MASK) +}; + + + +#define hpo_dp_stream_encoder_reg_list(id)\ +[id] = {\ + DCN3_1_HPO_DP_STREAM_ENC_REG_LIST(id)\ +} + +static const struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[] = { + hpo_dp_stream_encoder_reg_list(0), + hpo_dp_stream_encoder_reg_list(1), + hpo_dp_stream_encoder_reg_list(2), + hpo_dp_stream_encoder_reg_list(3), +}; + +static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = { + DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = { + DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(_MASK) +}; + + +#define hpo_dp_link_encoder_reg_list(id)\ +[id] = {\ + DCN3_1_HPO_DP_LINK_ENC_REG_LIST(id),\ + DCN3_1_RDPCSTX_REG_LIST(0),\ + DCN3_1_RDPCSTX_REG_LIST(1),\ + DCN3_1_RDPCSTX_REG_LIST(2),\ + DCN3_1_RDPCSTX_REG_LIST(3),\ + DCN3_1_RDPCSTX_REG_LIST(4)\ +} + +static const struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[] = { + hpo_dp_link_encoder_reg_list(0), + hpo_dp_link_encoder_reg_list(1), +}; + +static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = { + DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = { + DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(_MASK) +}; + + +#define dpp_regs(id)\ +[id] = {\ + DPP_REG_LIST_DCN30(id),\ +} + +static const struct dcn3_dpp_registers dpp_regs[] = { + dpp_regs(0), + dpp_regs(1), + dpp_regs(2), + dpp_regs(3) +}; + +static const struct dcn3_dpp_shift tf_shift = { + DPP_REG_LIST_SH_MASK_DCN30(__SHIFT) +}; + +static const struct dcn3_dpp_mask tf_mask = { + DPP_REG_LIST_SH_MASK_DCN30(_MASK) +}; + +#define opp_regs(id)\ +[id] = {\ + OPP_REG_LIST_DCN30(id),\ +} + +static const struct dcn20_opp_registers opp_regs[] = { + opp_regs(0), + opp_regs(1), + opp_regs(2), + opp_regs(3) +}; + +static const struct dcn20_opp_shift opp_shift = { + OPP_MASK_SH_LIST_DCN20(__SHIFT) +}; + +static const struct dcn20_opp_mask opp_mask = { + OPP_MASK_SH_LIST_DCN20(_MASK) +}; + +#define aux_engine_regs(id)\ +[id] = {\ + AUX_COMMON_REG_LIST0(id), \ + .AUXN_IMPCAL = 0, \ + .AUXP_IMPCAL = 0, \ + .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \ +} + +static const struct dce110_aux_registers aux_engine_regs[] = { + aux_engine_regs(0), + aux_engine_regs(1), + aux_engine_regs(2), + aux_engine_regs(3), + aux_engine_regs(4) +}; + +#define dwbc_regs_dcn3(id)\ +[id] = {\ + DWBC_COMMON_REG_LIST_DCN30(id),\ +} + +static const struct dcn30_dwbc_registers dwbc30_regs[] = { + dwbc_regs_dcn3(0), +}; + +static const struct dcn30_dwbc_shift dwbc30_shift = { + DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn30_dwbc_mask dwbc30_mask = { + DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +#define mcif_wb_regs_dcn3(id)\ +[id] = {\ + MCIF_WB_COMMON_REG_LIST_DCN30(id),\ +} + +static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = { + mcif_wb_regs_dcn3(0) +}; + +static const struct dcn30_mmhubbub_shift mcif_wb30_shift = { + MCIF_WB_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn30_mmhubbub_mask mcif_wb30_mask = { + MCIF_WB_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +#define dsc_regsDCN20(id)\ +[id] = {\ + DSC_REG_LIST_DCN20(id)\ +} + +static const struct dcn20_dsc_registers dsc_regs[] = { + dsc_regsDCN20(0), + dsc_regsDCN20(1), + dsc_regsDCN20(2) +}; + +static const struct dcn20_dsc_shift dsc_shift = { + DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) +}; + +static const struct dcn20_dsc_mask dsc_mask = { + DSC_REG_LIST_SH_MASK_DCN20(_MASK) +}; + +static const struct dcn30_mpc_registers mpc_regs = { + MPC_REG_LIST_DCN3_0(0), + MPC_REG_LIST_DCN3_0(1), + MPC_REG_LIST_DCN3_0(2), + MPC_REG_LIST_DCN3_0(3), + MPC_OUT_MUX_REG_LIST_DCN3_0(0), + MPC_OUT_MUX_REG_LIST_DCN3_0(1), + MPC_OUT_MUX_REG_LIST_DCN3_0(2), + MPC_OUT_MUX_REG_LIST_DCN3_0(3), + MPC_RMU_GLOBAL_REG_LIST_DCN3AG, + MPC_RMU_REG_LIST_DCN3AG(0), + MPC_RMU_REG_LIST_DCN3AG(1), + //MPC_RMU_REG_LIST_DCN3AG(2), + MPC_DWB_MUX_REG_LIST_DCN3_0(0), +}; + +static const struct dcn30_mpc_shift mpc_shift = { + MPC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn30_mpc_mask mpc_mask = { + MPC_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +#define optc_regs(id)\ +[id] = {OPTC_COMMON_REG_LIST_DCN3_1(id)} + +static const struct dcn_optc_registers optc_regs[] = { + optc_regs(0), + optc_regs(1), + optc_regs(2), + optc_regs(3) +}; + +static const struct dcn_optc_shift optc_shift = { + OPTC_COMMON_MASK_SH_LIST_DCN3_1(__SHIFT) +}; + +static const struct dcn_optc_mask optc_mask = { + OPTC_COMMON_MASK_SH_LIST_DCN3_1(_MASK) +}; + +#define hubp_regs(id)\ +[id] = {\ + HUBP_REG_LIST_DCN30(id)\ +} + +static const struct dcn_hubp2_registers hubp_regs[] = { + hubp_regs(0), + hubp_regs(1), + hubp_regs(2), + hubp_regs(3) +}; + + +static const struct dcn_hubp2_shift hubp_shift = { + HUBP_MASK_SH_LIST_DCN31(__SHIFT) +}; + +static const struct dcn_hubp2_mask hubp_mask = { + HUBP_MASK_SH_LIST_DCN31(_MASK) +}; +static const struct dcn_hubbub_registers hubbub_reg = { + HUBBUB_REG_LIST_DCN31(0) +}; + +static const struct dcn_hubbub_shift hubbub_shift = { + HUBBUB_MASK_SH_LIST_DCN31(__SHIFT) +}; + +static const struct dcn_hubbub_mask hubbub_mask = { + HUBBUB_MASK_SH_LIST_DCN31(_MASK) +}; + +static const struct dccg_registers dccg_regs = { + DCCG_REG_LIST_DCN31() +}; + +static const struct dccg_shift dccg_shift = { + DCCG_MASK_SH_LIST_DCN31(__SHIFT) +}; + +static const struct dccg_mask dccg_mask = { + DCCG_MASK_SH_LIST_DCN31(_MASK) +}; + + +#define SRII2(reg_name_pre, reg_name_post, id)\ + .reg_name_pre ## _ ## reg_name_post[id] = BASE(reg ## reg_name_pre \ + ## id ## _ ## reg_name_post ## _BASE_IDX) + \ + reg ## reg_name_pre ## id ## _ ## reg_name_post + + +#define HWSEQ_DCN31_REG_LIST()\ + SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \ + SR(DCHUBBUB_ARB_HOSTVM_CNTL), \ + SR(DIO_MEM_PWR_CTRL), \ + SR(ODM_MEM_PWR_CTRL3), \ + SR(DMU_MEM_PWR_CNTL), \ + SR(MMHUBBUB_MEM_PWR_CNTL), \ + SR(DCCG_GATE_DISABLE_CNTL), \ + SR(DCCG_GATE_DISABLE_CNTL2), \ + SR(DCFCLK_CNTL),\ + SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \ + SRII(PIXEL_RATE_CNTL, OTG, 0), \ + SRII(PIXEL_RATE_CNTL, OTG, 1),\ + SRII(PIXEL_RATE_CNTL, OTG, 2),\ + SRII(PIXEL_RATE_CNTL, OTG, 3),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 0),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 1),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 2),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 3),\ + SR(MICROSECOND_TIME_BASE_DIV), \ + SR(MILLISECOND_TIME_BASE_DIV), \ + SR(DISPCLK_FREQ_CHANGE_CNTL), \ + SR(RBBMIF_TIMEOUT_DIS), \ + SR(RBBMIF_TIMEOUT_DIS_2), \ + SR(DCHUBBUB_CRC_CTRL), \ + SR(DPP_TOP0_DPP_CRC_CTRL), \ + SR(DPP_TOP0_DPP_CRC_VAL_B_A), \ + SR(DPP_TOP0_DPP_CRC_VAL_R_G), \ + SR(MPC_CRC_CTRL), \ + SR(MPC_CRC_RESULT_GB), \ + SR(MPC_CRC_RESULT_C), \ + SR(MPC_CRC_RESULT_AR), \ + SR(DOMAIN0_PG_CONFIG), \ + SR(DOMAIN1_PG_CONFIG), \ + SR(DOMAIN2_PG_CONFIG), \ + SR(DOMAIN3_PG_CONFIG), \ + SR(DOMAIN16_PG_CONFIG), \ + SR(DOMAIN17_PG_CONFIG), \ + SR(DOMAIN18_PG_CONFIG), \ + SR(DOMAIN0_PG_STATUS), \ + SR(DOMAIN1_PG_STATUS), \ + SR(DOMAIN2_PG_STATUS), \ + SR(DOMAIN3_PG_STATUS), \ + SR(DOMAIN16_PG_STATUS), \ + SR(DOMAIN17_PG_STATUS), \ + SR(DOMAIN18_PG_STATUS), \ + SR(D1VGA_CONTROL), \ + SR(D2VGA_CONTROL), \ + SR(D3VGA_CONTROL), \ + SR(D4VGA_CONTROL), \ + SR(D5VGA_CONTROL), \ + SR(D6VGA_CONTROL), \ + SR(DC_IP_REQUEST_CNTL), \ + SR(AZALIA_AUDIO_DTO), \ + SR(AZALIA_CONTROLLER_CLOCK_GATING), \ + SR(HPO_TOP_HW_CONTROL) + +static const struct dce_hwseq_registers hwseq_reg = { + HWSEQ_DCN31_REG_LIST() +}; + +#define HWSEQ_DCN31_MASK_SH_LIST(mask_sh)\ + HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ + HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ + HWS_SF(, DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, mask_sh), \ + HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ + HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \ + HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_G_GATE_DIS, mask_sh), \ + HWS_SF(, DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, mask_sh), \ + HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \ + HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \ + HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh), \ + HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh), \ + HWS_SF(, HPO_TOP_HW_CONTROL, HPO_IO_EN, mask_sh) + +static const struct dce_hwseq_shift hwseq_shift = { + HWSEQ_DCN31_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_hwseq_mask hwseq_mask = { + HWSEQ_DCN31_MASK_SH_LIST(_MASK) +}; +#define vmid_regs(id)\ +[id] = {\ + DCN20_VMID_REG_LIST(id)\ +} + +static const struct dcn_vmid_registers vmid_regs[] = { + vmid_regs(0), + vmid_regs(1), + vmid_regs(2), + vmid_regs(3), + vmid_regs(4), + vmid_regs(5), + vmid_regs(6), + vmid_regs(7), + vmid_regs(8), + vmid_regs(9), + vmid_regs(10), + vmid_regs(11), + vmid_regs(12), + vmid_regs(13), + vmid_regs(14), + vmid_regs(15) +}; + +static const struct dcn20_vmid_shift vmid_shifts = { + DCN20_VMID_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn20_vmid_mask vmid_masks = { + DCN20_VMID_MASK_SH_LIST(_MASK) +}; + +static const struct resource_caps res_cap_dcn31 = { + .num_timing_generator = 4, + .num_opp = 4, + .num_video_plane = 4, + .num_audio = 5, + .num_stream_encoder = 5, + .num_dig_link_enc = 5, + .num_hpo_dp_stream_encoder = 4, + .num_hpo_dp_link_encoder = 2, + .num_pll = 5, + .num_dwb = 1, + .num_ddc = 5, + .num_vmid = 16, + .num_mpc_3dlut = 2, + .num_dsc = 3, +}; + +static const struct dc_plane_cap plane_cap = { + .type = DC_PLANE_TYPE_DCN_UNIVERSAL, + .blends_with_above = true, + .blends_with_below = true, + .per_pixel_alpha = true, + + .pixel_format_support = { + .argb8888 = true, + .nv12 = true, + .fp16 = true, + .p010 = true, + .ayuv = false, + }, + + .max_upscale_factor = { + .argb8888 = 16000, + .nv12 = 16000, + .fp16 = 16000 + }, + + // 6:1 downscaling ratio: 1000/6 = 166.666 + .max_downscale_factor = { + .argb8888 = 167, + .nv12 = 167, + .fp16 = 167 + }, + 64, + 64 +}; + +static const struct dc_debug_options debug_defaults_drv = { + .disable_z10 = true, /*hw not support it*/ + .disable_dmcu = true, + .force_abm_enable = false, + .timing_trace = false, + .clock_trace = true, + .disable_pplib_clock_request = false, + .pipe_split_policy = MPC_SPLIT_DYNAMIC, + .force_single_disp_pipe_split = false, + .disable_dcc = DCC_ENABLE, + .vsr_support = true, + .performance_trace = false, + .max_downscale_src_width = 4096,/*upto true 4k*/ + .disable_pplib_wm_range = false, + .scl_reset_length10 = true, + .sanity_checks = false, + .underflow_assert_delay_us = 0xFFFFFFFF, + .dwb_fi_phase = -1, // -1 = disable, + .dmub_command_table = true, + .pstate_enabled = true, + .use_max_lb = true, + .enable_mem_low_power = { + .bits = { + .vga = true, + .i2c = true, + .dmcu = false, // This is previously known to cause hang on S3 cycles if enabled + .dscl = true, + .cm = true, + .mpc = true, + .optc = true, + .vpg = true, + .afmt = true, + } + }, + .optimize_edp_link_rate = true, + .enable_sw_cntl_psr = true, +}; + +static const struct dc_debug_options debug_defaults_diags = { + .disable_dmcu = true, + .force_abm_enable = false, + .timing_trace = true, + .clock_trace = true, + .disable_dpp_power_gate = true, + .disable_hubp_power_gate = true, + .disable_clock_gate = true, + .disable_pplib_clock_request = true, + .disable_pplib_wm_range = true, + .disable_stutter = false, + .scl_reset_length10 = true, + .dwb_fi_phase = -1, // -1 = disable + .dmub_command_table = true, + .enable_tri_buf = true, + .use_max_lb = true +}; + +static void dcn31_dpp_destroy(struct dpp **dpp) +{ + kfree(TO_DCN20_DPP(*dpp)); + *dpp = NULL; +} + +static struct dpp *dcn31_dpp_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn3_dpp *dpp = + kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); + + if (!dpp) + return NULL; + + if (dpp3_construct(dpp, ctx, inst, + &dpp_regs[inst], &tf_shift, &tf_mask)) + return &dpp->base; + + BREAK_TO_DEBUGGER(); + kfree(dpp); + return NULL; +} + +static struct output_pixel_processor *dcn31_opp_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dcn20_opp *opp = + kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); + + if (!opp) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dcn20_opp_construct(opp, ctx, inst, + &opp_regs[inst], &opp_shift, &opp_mask); + return &opp->base; +} + +static struct dce_aux *dcn31_aux_engine_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct aux_engine_dce110 *aux_engine = + kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); + + if (!aux_engine) + return NULL; + + dce110_aux_engine_construct(aux_engine, ctx, inst, + SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); + + return &aux_engine->base; +} +#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST_DCN30(id) } + +static const struct dce_i2c_registers i2c_hw_regs[] = { + i2c_inst_regs(1), + i2c_inst_regs(2), + i2c_inst_regs(3), + i2c_inst_regs(4), + i2c_inst_regs(5), +}; + +static const struct dce_i2c_shift i2c_shifts = { + I2C_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dce_i2c_mask i2c_masks = { + I2C_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +static struct dce_i2c_hw *dcn31_i2c_hw_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_i2c_hw *dce_i2c_hw = + kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); + + if (!dce_i2c_hw) + return NULL; + + dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, + &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); + + return dce_i2c_hw; +} +static struct mpc *dcn31_mpc_create( + struct dc_context *ctx, + int num_mpcc, + int num_rmu) +{ + struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), + GFP_KERNEL); + + if (!mpc30) + return NULL; + + dcn30_mpc_construct(mpc30, ctx, + &mpc_regs, + &mpc_shift, + &mpc_mask, + num_mpcc, + num_rmu); + + return &mpc30->base; +} + +static struct hubbub *dcn31_hubbub_create(struct dc_context *ctx) +{ + int i; + + struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub), + GFP_KERNEL); + + if (!hubbub3) + return NULL; + + hubbub31_construct(hubbub3, ctx, + &hubbub_reg, + &hubbub_shift, + &hubbub_mask, + dcn3_16_ip.det_buffer_size_kbytes, + dcn3_16_ip.pixel_chunk_size_kbytes, + dcn3_16_ip.config_return_buffer_size_in_kbytes); + + + for (i = 0; i < res_cap_dcn31.num_vmid; i++) { + struct dcn20_vmid *vmid = &hubbub3->vmid[i]; + + vmid->ctx = ctx; + + vmid->regs = &vmid_regs[i]; + vmid->shifts = &vmid_shifts; + vmid->masks = &vmid_masks; + } + + return &hubbub3->base; +} + +static struct timing_generator *dcn31_timing_generator_create( + struct dc_context *ctx, + uint32_t instance) +{ + struct optc *tgn10 = + kzalloc(sizeof(struct optc), GFP_KERNEL); + + if (!tgn10) + return NULL; + + tgn10->base.inst = instance; + tgn10->base.ctx = ctx; + + tgn10->tg_regs = &optc_regs[instance]; + tgn10->tg_shift = &optc_shift; + tgn10->tg_mask = &optc_mask; + + dcn31_timing_generator_init(tgn10); + + return &tgn10->base; +} + +static const struct encoder_feature_support link_enc_feature = { + .max_hdmi_deep_color = COLOR_DEPTH_121212, + .max_hdmi_pixel_clock = 600000, + .hdmi_ycbcr420_supported = true, + .dp_ycbcr420_supported = true, + .fec_supported = true, + .flags.bits.IS_HBR2_CAPABLE = true, + .flags.bits.IS_HBR3_CAPABLE = true, + .flags.bits.IS_TPS3_CAPABLE = true, + .flags.bits.IS_TPS4_CAPABLE = true +}; + +static struct link_encoder *dcn31_link_encoder_create( + const struct encoder_init_data *enc_init_data) +{ + struct dcn20_link_encoder *enc20 = + kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); + + if (!enc20) + return NULL; + + dcn31_link_encoder_construct(enc20, + enc_init_data, + &link_enc_feature, + &link_enc_regs[enc_init_data->transmitter], + &link_enc_aux_regs[enc_init_data->channel - 1], + &link_enc_hpd_regs[enc_init_data->hpd_source], + &le_shift, + &le_mask); + + return &enc20->enc10.base; +} + +/* Create a minimal link encoder object not associated with a particular + * physical connector. + * resource_funcs.link_enc_create_minimal + */ +static struct link_encoder *dcn31_link_enc_create_minimal( + struct dc_context *ctx, enum engine_id eng_id) +{ + struct dcn20_link_encoder *enc20; + + if ((eng_id - ENGINE_ID_DIGA) > ctx->dc->res_pool->res_cap->num_dig_link_enc) + return NULL; + + enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); + if (!enc20) + return NULL; + + dcn31_link_encoder_construct_minimal( + enc20, + ctx, + &link_enc_feature, + &link_enc_regs[eng_id - ENGINE_ID_DIGA], + eng_id); + + return &enc20->enc10.base; +} + +static struct panel_cntl *dcn31_panel_cntl_create(const struct panel_cntl_init_data *init_data) +{ + struct dcn31_panel_cntl *panel_cntl = + kzalloc(sizeof(struct dcn31_panel_cntl), GFP_KERNEL); + + if (!panel_cntl) + return NULL; + + dcn31_panel_cntl_construct(panel_cntl, init_data); + + return &panel_cntl->base; +} + +static void read_dce_straps( + struct dc_context *ctx, + struct resource_straps *straps) +{ + generic_reg_get(ctx, regDC_PINSTRAPS + BASE(regDC_PINSTRAPS_BASE_IDX), + FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); + +} + +static struct audio *dcn31_create_audio( + struct dc_context *ctx, unsigned int inst) +{ + return dce_audio_create(ctx, inst, + &audio_regs[inst], &audio_shift, &audio_mask); +} + +static struct vpg *dcn31_vpg_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn31_vpg *vpg31 = kzalloc(sizeof(struct dcn31_vpg), GFP_KERNEL); + + if (!vpg31) + return NULL; + + vpg31_construct(vpg31, ctx, inst, + &vpg_regs[inst], + &vpg_shift, + &vpg_mask); + + return &vpg31->base; +} + +static struct afmt *dcn31_afmt_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn31_afmt *afmt31 = kzalloc(sizeof(struct dcn31_afmt), GFP_KERNEL); + + if (!afmt31) + return NULL; + + afmt31_construct(afmt31, ctx, inst, + &afmt_regs[inst], + &afmt_shift, + &afmt_mask); + + // Light sleep by default, no need to power down here + + return &afmt31->base; +} + + +static struct apg *dcn31_apg_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn31_apg *apg31 = kzalloc(sizeof(struct dcn31_apg), GFP_KERNEL); + + if (!apg31) + return NULL; + + apg31_construct(apg31, ctx, inst, + &apg_regs[inst], + &apg_shift, + &apg_mask); + + return &apg31->base; +} + + +static struct stream_encoder *dcn316_stream_encoder_create( + enum engine_id eng_id, + struct dc_context *ctx) +{ + struct dcn10_stream_encoder *enc1; + struct vpg *vpg; + struct afmt *afmt; + int vpg_inst; + int afmt_inst; + + /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ + if (eng_id <= ENGINE_ID_DIGF) { + vpg_inst = eng_id; + afmt_inst = eng_id; + } else + return NULL; + + enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); + vpg = dcn31_vpg_create(ctx, vpg_inst); + afmt = dcn31_afmt_create(ctx, afmt_inst); + + if (!enc1 || !vpg || !afmt) { + kfree(enc1); + kfree(vpg); + kfree(afmt); + return NULL; + } + + dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, + eng_id, vpg, afmt, + &stream_enc_regs[eng_id], + &se_shift, &se_mask); + + return &enc1->base; +} + + +static struct hpo_dp_stream_encoder *dcn31_hpo_dp_stream_encoder_create( + enum engine_id eng_id, + struct dc_context *ctx) +{ + struct dcn31_hpo_dp_stream_encoder *hpo_dp_enc31; + struct vpg *vpg; + struct apg *apg; + uint32_t hpo_dp_inst; + uint32_t vpg_inst; + uint32_t apg_inst; + + ASSERT((eng_id >= ENGINE_ID_HPO_DP_0) && (eng_id <= ENGINE_ID_HPO_DP_3)); + hpo_dp_inst = eng_id - ENGINE_ID_HPO_DP_0; + + /* Mapping of VPG register blocks to HPO DP block instance: + * VPG[6] -> HPO_DP[0] + * VPG[7] -> HPO_DP[1] + * VPG[8] -> HPO_DP[2] + * VPG[9] -> HPO_DP[3] + */ + vpg_inst = hpo_dp_inst + 6; + + /* Mapping of APG register blocks to HPO DP block instance: + * APG[0] -> HPO_DP[0] + * APG[1] -> HPO_DP[1] + * APG[2] -> HPO_DP[2] + * APG[3] -> HPO_DP[3] + */ + apg_inst = hpo_dp_inst; + + /* allocate HPO stream encoder and create VPG sub-block */ + hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_stream_encoder), GFP_KERNEL); + vpg = dcn31_vpg_create(ctx, vpg_inst); + apg = dcn31_apg_create(ctx, apg_inst); + + if (!hpo_dp_enc31 || !vpg || !apg) { + kfree(hpo_dp_enc31); + kfree(vpg); + kfree(apg); + return NULL; + } + + dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios, + hpo_dp_inst, eng_id, vpg, apg, + &hpo_dp_stream_enc_regs[hpo_dp_inst], + &hpo_dp_se_shift, &hpo_dp_se_mask); + + return &hpo_dp_enc31->base; +} + +static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create( + uint8_t inst, + struct dc_context *ctx) +{ + struct dcn31_hpo_dp_link_encoder *hpo_dp_enc31; + + /* allocate HPO link encoder */ + hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); + + hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst, + &hpo_dp_link_enc_regs[inst], + &hpo_dp_le_shift, &hpo_dp_le_mask); + + return &hpo_dp_enc31->base; +} + + +static struct dce_hwseq *dcn31_hwseq_create( + struct dc_context *ctx) +{ + struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); + + if (hws) { + hws->ctx = ctx; + hws->regs = &hwseq_reg; + hws->shifts = &hwseq_shift; + hws->masks = &hwseq_mask; + /* DCN3.1 FPGA Workaround + * Need to enable HPO DP Stream Encoder before setting OTG master enable. + * To do so, move calling function enable_stream_timing to only be done AFTER calling + * function core_link_enable_stream + */ + if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) + hws->wa.dp_hpo_and_otg_sequence = true; + } + return hws; +} +static const struct resource_create_funcs res_create_funcs = { + .read_dce_straps = read_dce_straps, + .create_audio = dcn31_create_audio, + .create_stream_encoder = dcn316_stream_encoder_create, + .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create, + .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create, + .create_hwseq = dcn31_hwseq_create, +}; + +static const struct resource_create_funcs res_create_maximus_funcs = { + .read_dce_straps = NULL, + .create_audio = NULL, + .create_stream_encoder = NULL, + .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create, + .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create, + .create_hwseq = dcn31_hwseq_create, +}; + +static void dcn316_resource_destruct(struct dcn316_resource_pool *pool) +{ + unsigned int i; + + for (i = 0; i < pool->base.stream_enc_count; i++) { + if (pool->base.stream_enc[i] != NULL) { + if (pool->base.stream_enc[i]->vpg != NULL) { + kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg)); + pool->base.stream_enc[i]->vpg = NULL; + } + if (pool->base.stream_enc[i]->afmt != NULL) { + kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt)); + pool->base.stream_enc[i]->afmt = NULL; + } + kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); + pool->base.stream_enc[i] = NULL; + } + } + + for (i = 0; i < pool->base.hpo_dp_stream_enc_count; i++) { + if (pool->base.hpo_dp_stream_enc[i] != NULL) { + if (pool->base.hpo_dp_stream_enc[i]->vpg != NULL) { + kfree(DCN30_VPG_FROM_VPG(pool->base.hpo_dp_stream_enc[i]->vpg)); + pool->base.hpo_dp_stream_enc[i]->vpg = NULL; + } + if (pool->base.hpo_dp_stream_enc[i]->apg != NULL) { + kfree(DCN31_APG_FROM_APG(pool->base.hpo_dp_stream_enc[i]->apg)); + pool->base.hpo_dp_stream_enc[i]->apg = NULL; + } + kfree(DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(pool->base.hpo_dp_stream_enc[i])); + pool->base.hpo_dp_stream_enc[i] = NULL; + } + } + + for (i = 0; i < pool->base.hpo_dp_link_enc_count; i++) { + if (pool->base.hpo_dp_link_enc[i] != NULL) { + kfree(DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(pool->base.hpo_dp_link_enc[i])); + pool->base.hpo_dp_link_enc[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_dsc; i++) { + if (pool->base.dscs[i] != NULL) + dcn20_dsc_destroy(&pool->base.dscs[i]); + } + + if (pool->base.mpc != NULL) { + kfree(TO_DCN20_MPC(pool->base.mpc)); + pool->base.mpc = NULL; + } + if (pool->base.hubbub != NULL) { + kfree(pool->base.hubbub); + pool->base.hubbub = NULL; + } + for (i = 0; i < pool->base.pipe_count; i++) { + if (pool->base.dpps[i] != NULL) + dcn31_dpp_destroy(&pool->base.dpps[i]); + + if (pool->base.ipps[i] != NULL) + pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); + + if (pool->base.hubps[i] != NULL) { + kfree(TO_DCN20_HUBP(pool->base.hubps[i])); + pool->base.hubps[i] = NULL; + } + + if (pool->base.irqs != NULL) { + dal_irq_service_destroy(&pool->base.irqs); + } + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + if (pool->base.engines[i] != NULL) + dce110_engine_destroy(&pool->base.engines[i]); + if (pool->base.hw_i2cs[i] != NULL) { + kfree(pool->base.hw_i2cs[i]); + pool->base.hw_i2cs[i] = NULL; + } + if (pool->base.sw_i2cs[i] != NULL) { + kfree(pool->base.sw_i2cs[i]); + pool->base.sw_i2cs[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_opp; i++) { + if (pool->base.opps[i] != NULL) + pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + if (pool->base.timing_generators[i] != NULL) { + kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); + pool->base.timing_generators[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_dwb; i++) { + if (pool->base.dwbc[i] != NULL) { + kfree(TO_DCN30_DWBC(pool->base.dwbc[i])); + pool->base.dwbc[i] = NULL; + } + if (pool->base.mcif_wb[i] != NULL) { + kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i])); + pool->base.mcif_wb[i] = NULL; + } + } + + for (i = 0; i < pool->base.audio_count; i++) { + if (pool->base.audios[i]) + dce_aud_destroy(&pool->base.audios[i]); + } + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] != NULL) { + dcn20_clock_source_destroy(&pool->base.clock_sources[i]); + pool->base.clock_sources[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) { + if (pool->base.mpc_lut[i] != NULL) { + dc_3dlut_func_release(pool->base.mpc_lut[i]); + pool->base.mpc_lut[i] = NULL; + } + if (pool->base.mpc_shaper[i] != NULL) { + dc_transfer_func_release(pool->base.mpc_shaper[i]); + pool->base.mpc_shaper[i] = NULL; + } + } + + if (pool->base.dp_clock_source != NULL) { + dcn20_clock_source_destroy(&pool->base.dp_clock_source); + pool->base.dp_clock_source = NULL; + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + if (pool->base.multiple_abms[i] != NULL) + dce_abm_destroy(&pool->base.multiple_abms[i]); + } + + if (pool->base.psr != NULL) + dmub_psr_destroy(&pool->base.psr); + + if (pool->base.dccg != NULL) + dcn_dccg_destroy(&pool->base.dccg); +} + +static struct hubp *dcn31_hubp_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn20_hubp *hubp2 = + kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); + + if (!hubp2) + return NULL; + + if (hubp31_construct(hubp2, ctx, inst, + &hubp_regs[inst], &hubp_shift, &hubp_mask)) + return &hubp2->base; + + BREAK_TO_DEBUGGER(); + kfree(hubp2); + return NULL; +} + +static bool dcn31_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) +{ + int i; + uint32_t pipe_count = pool->res_cap->num_dwb; + + for (i = 0; i < pipe_count; i++) { + struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc), + GFP_KERNEL); + + if (!dwbc30) { + dm_error("DC: failed to create dwbc30!\n"); + return false; + } + + dcn30_dwbc_construct(dwbc30, ctx, + &dwbc30_regs[i], + &dwbc30_shift, + &dwbc30_mask, + i); + + pool->dwbc[i] = &dwbc30->base; + } + return true; +} + +static bool dcn31_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) +{ + int i; + uint32_t pipe_count = pool->res_cap->num_dwb; + + for (i = 0; i < pipe_count; i++) { + struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub), + GFP_KERNEL); + + if (!mcif_wb30) { + dm_error("DC: failed to create mcif_wb30!\n"); + return false; + } + + dcn30_mmhubbub_construct(mcif_wb30, ctx, + &mcif_wb30_regs[i], + &mcif_wb30_shift, + &mcif_wb30_mask, + i); + + pool->mcif_wb[i] = &mcif_wb30->base; + } + return true; +} + +static struct display_stream_compressor *dcn31_dsc_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dcn20_dsc *dsc = + kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); + + if (!dsc) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); + return &dsc->base; +} + +static void dcn316_destroy_resource_pool(struct resource_pool **pool) +{ + struct dcn316_resource_pool *dcn31_pool = TO_DCN316_RES_POOL(*pool); + + dcn316_resource_destruct(dcn31_pool); + kfree(dcn31_pool); + *pool = NULL; +} + +static struct clock_source *dcn31_clock_source_create( + struct dc_context *ctx, + struct dc_bios *bios, + enum clock_source_id id, + const struct dce110_clk_src_regs *regs, + bool dp_clk_src) +{ + struct dce110_clk_src *clk_src = + kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); + + if (!clk_src) + return NULL; + + if (dcn31_clk_src_construct(clk_src, ctx, bios, id, + regs, &cs_shift, &cs_mask)) { + clk_src->base.dp_clk_src = dp_clk_src; + return &clk_src->base; + } + + kfree(clk_src); + + BREAK_TO_DEBUGGER(); + return NULL; +} + +static bool is_dual_plane(enum surface_pixel_format format) +{ + return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA; +} + +static int dcn316_populate_dml_pipes_from_context( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + bool fast_validate) +{ + int i, pipe_cnt; + struct resource_context *res_ctx = &context->res_ctx; + struct pipe_ctx *pipe; + const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_16_MIN_COMPBUF_SIZE_KB; + + DC_FP_START(); + dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + DC_FP_END(); + + for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { + struct dc_crtc_timing *timing; + + if (!res_ctx->pipe_ctx[i].stream) + continue; + pipe = &res_ctx->pipe_ctx[i]; + timing = &pipe->stream->timing; + + /* + * Immediate flip can be set dynamically after enabling the plane. + * We need to require support for immediate flip or underflow can be + * intermittently experienced depending on peak b/w requirements. + */ + pipes[pipe_cnt].pipe.src.immediate_flip = true; + + pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; + pipes[pipe_cnt].pipe.src.gpuvm = true; + pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0; + pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0; + pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch; + pipes[pipe_cnt].pipe.src.dcc_rate = 3; + pipes[pipe_cnt].dout.dsc_input_bpc = 0; + + if (pipes[pipe_cnt].dout.dsc_enable) { + switch (timing->display_color_depth) { + case COLOR_DEPTH_888: + pipes[pipe_cnt].dout.dsc_input_bpc = 8; + break; + case COLOR_DEPTH_101010: + pipes[pipe_cnt].dout.dsc_input_bpc = 10; + break; + case COLOR_DEPTH_121212: + pipes[pipe_cnt].dout.dsc_input_bpc = 12; + break; + default: + ASSERT(0); + break; + } + } + + pipe_cnt++; + } + + if (pipe_cnt) + context->bw_ctx.dml.ip.det_buffer_size_kbytes = + (max_usable_det / DCN3_16_CRB_SEGMENT_SIZE_KB / pipe_cnt) * DCN3_16_CRB_SEGMENT_SIZE_KB; + if (context->bw_ctx.dml.ip.det_buffer_size_kbytes > DCN3_16_MAX_DET_SIZE) + context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_16_MAX_DET_SIZE; + ASSERT(context->bw_ctx.dml.ip.det_buffer_size_kbytes >= DCN3_16_DEFAULT_DET_SIZE); + dc->config.enable_4to1MPC = false; + if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) { + if (is_dual_plane(pipe->plane_state->format) + && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) { + dc->config.enable_4to1MPC = true; + context->bw_ctx.dml.ip.det_buffer_size_kbytes = + (max_usable_det / DCN3_16_CRB_SEGMENT_SIZE_KB / 4) * DCN3_16_CRB_SEGMENT_SIZE_KB; + } else if (!is_dual_plane(pipe->plane_state->format)) { + context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192; + pipes[0].pipe.src.unbounded_req_mode = true; + } + } + + return pipe_cnt; +} + +static struct dc_cap_funcs cap_funcs = { + .get_dcc_compression_cap = dcn20_get_dcc_compression_cap +}; + +static void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) +{ + struct clk_limit_table *clk_table = &bw_params->clk_table; + struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES]; + unsigned int i, closest_clk_lvl; + int max_dispclk_mhz = 0, max_dppclk_mhz = 0; + int j; + + // Default clock levels are used for diags, which may lead to overclocking. + if (!IS_DIAG_DC(dc->ctx->dce_environment)) { + + dcn3_16_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator; + dcn3_16_ip.max_num_dpp = dc->res_pool->pipe_count; + dcn3_16_soc.num_chans = bw_params->num_channels; + + ASSERT(clk_table->num_entries); + + /* Prepass to find max clocks independent of voltage level. */ + for (i = 0; i < clk_table->num_entries; ++i) { + if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz) + max_dispclk_mhz = clk_table->entries[i].dispclk_mhz; + if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz) + max_dppclk_mhz = clk_table->entries[i].dppclk_mhz; + } + + for (i = 0; i < clk_table->num_entries; i++) { + /* loop backwards*/ + for (closest_clk_lvl = 0, j = dcn3_16_soc.num_states - 1; j >= 0; j--) { + if ((unsigned int) dcn3_16_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) { + closest_clk_lvl = j; + break; + } + } + // Ported from DCN315 + if (clk_table->num_entries == 1) { + /*smu gives one DPM level, let's take the highest one*/ + closest_clk_lvl = dcn3_16_soc.num_states - 1; + } + + clock_limits[i].state = i; + + /* Clocks dependent on voltage level. */ + clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; + if (clk_table->num_entries == 1 && + clock_limits[i].dcfclk_mhz < dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) { + /*SMU fix not released yet*/ + clock_limits[i].dcfclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz; + } + clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz; + clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz; + clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio; + + /* Clocks independent of voltage level. */ + clock_limits[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz : + dcn3_16_soc.clock_limits[closest_clk_lvl].dispclk_mhz; + + clock_limits[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz : + dcn3_16_soc.clock_limits[closest_clk_lvl].dppclk_mhz; + + clock_limits[i].dram_bw_per_chan_gbps = dcn3_16_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps; + clock_limits[i].dscclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dscclk_mhz; + clock_limits[i].dtbclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dtbclk_mhz; + clock_limits[i].phyclk_d18_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz; + clock_limits[i].phyclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_mhz; + } + for (i = 0; i < clk_table->num_entries; i++) + dcn3_16_soc.clock_limits[i] = clock_limits[i]; + if (clk_table->num_entries) { + dcn3_16_soc.num_states = clk_table->num_entries; + } + } + + if (max_dispclk_mhz) { + dcn3_16_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2; + dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2; + } + + if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) + dml_init_instance(&dc->dml, &dcn3_16_soc, &dcn3_16_ip, DML_PROJECT_DCN31); + else + dml_init_instance(&dc->dml, &dcn3_16_soc, &dcn3_16_ip, DML_PROJECT_DCN31_FPGA); +} + +static struct resource_funcs dcn316_res_pool_funcs = { + .destroy = dcn316_destroy_resource_pool, + .link_enc_create = dcn31_link_encoder_create, + .link_enc_create_minimal = dcn31_link_enc_create_minimal, + .link_encs_assign = link_enc_cfg_link_encs_assign, + .link_enc_unassign = link_enc_cfg_link_enc_unassign, + .panel_cntl_create = dcn31_panel_cntl_create, + .validate_bandwidth = dcn31_validate_bandwidth, + .calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg, + .update_soc_for_wm_a = dcn31_update_soc_for_wm_a, + .populate_dml_pipes = dcn316_populate_dml_pipes_from_context, + .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, + .add_stream_to_ctx = dcn30_add_stream_to_ctx, + .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, + .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, + .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context, + .set_mcif_arb_params = dcn30_set_mcif_arb_params, + .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, + .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut, + .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut, + .update_bw_bounding_box = dcn316_update_bw_bounding_box, + .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, +}; + +static bool dcn316_resource_construct( + uint8_t num_virtual_links, + struct dc *dc, + struct dcn316_resource_pool *pool) +{ + int i; + struct dc_context *ctx = dc->ctx; + struct irq_service_init_data init_data; + + ctx->dc_bios->regs = &bios_regs; + + pool->base.res_cap = &res_cap_dcn31; + + pool->base.funcs = &dcn316_res_pool_funcs; + + /************************************************* + * Resource + asic cap harcoding * + *************************************************/ + pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; + pool->base.pipe_count = pool->base.res_cap->num_timing_generator; + pool->base.mpcc_count = pool->base.res_cap->num_timing_generator; + dc->caps.max_downscale_ratio = 600; + dc->caps.i2c_speed_in_khz = 100; + dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.4 w/a applied by default*/ + dc->caps.max_cursor_size = 256; + dc->caps.min_horizontal_blanking_period = 80; + dc->caps.dmdata_alloc_size = 2048; + + dc->caps.max_slave_planes = 1; + dc->caps.max_slave_yuv_planes = 1; + dc->caps.max_slave_rgb_planes = 1; + dc->caps.post_blend_color_processing = true; + dc->caps.force_dp_tps4_for_cp2520 = true; + dc->caps.dp_hpo = true; + dc->caps.edp_dsc_support = true; + dc->caps.extended_aux_timeout_support = true; + dc->caps.dmcub_support = true; + dc->caps.is_apu = true; + + /* Color pipeline capabilities */ + dc->caps.color.dpp.dcn_arch = 1; + dc->caps.color.dpp.input_lut_shared = 0; + dc->caps.color.dpp.icsc = 1; + dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr + dc->caps.color.dpp.dgam_rom_caps.srgb = 1; + dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; + dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1; + dc->caps.color.dpp.dgam_rom_caps.pq = 1; + dc->caps.color.dpp.dgam_rom_caps.hlg = 1; + dc->caps.color.dpp.post_csc = 1; + dc->caps.color.dpp.gamma_corr = 1; + dc->caps.color.dpp.dgam_rom_for_yuv = 0; + + dc->caps.color.dpp.hw_3d_lut = 1; + dc->caps.color.dpp.ogam_ram = 1; + // no OGAM ROM on DCN301 + dc->caps.color.dpp.ogam_rom_caps.srgb = 0; + dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; + dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.dpp.ogam_rom_caps.pq = 0; + dc->caps.color.dpp.ogam_rom_caps.hlg = 0; + dc->caps.color.dpp.ocsc = 0; + + dc->caps.color.mpc.gamut_remap = 1; + dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //2 + dc->caps.color.mpc.ogam_ram = 1; + dc->caps.color.mpc.ogam_rom_caps.srgb = 0; + dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; + dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.mpc.ogam_rom_caps.pq = 0; + dc->caps.color.mpc.ogam_rom_caps.hlg = 0; + dc->caps.color.mpc.ocsc = 1; + + /* read VBIOS LTTPR caps */ + { + if (ctx->dc_bios->funcs->get_lttpr_caps) { + enum bp_result bp_query_result; + uint8_t is_vbios_lttpr_enable = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); + dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; + } + + /* interop bit is implicit */ + { + dc->caps.vbios_lttpr_aware = true; + } + } + + if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) + dc->debug = debug_defaults_drv; + else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { + dc->debug = debug_defaults_diags; + } else + dc->debug = debug_defaults_diags; + // Init the vm_helper + if (dc->vm_helper) + vm_helper_init(dc->vm_helper, 16); + + /************************************************* + * Create resources * + *************************************************/ + + /* Clock Sources for Pixel Clock*/ + pool->base.clock_sources[DCN31_CLK_SRC_PLL0] = + dcn31_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL0, + &clk_src_regs[0], false); + pool->base.clock_sources[DCN31_CLK_SRC_PLL1] = + dcn31_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL1, + &clk_src_regs[1], false); + pool->base.clock_sources[DCN31_CLK_SRC_PLL2] = + dcn31_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL2, + &clk_src_regs[2], false); + pool->base.clock_sources[DCN31_CLK_SRC_PLL3] = + dcn31_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL3, + &clk_src_regs[3], false); + pool->base.clock_sources[DCN31_CLK_SRC_PLL4] = + dcn31_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL4, + &clk_src_regs[4], false); + + pool->base.clk_src_count = DCN30_CLK_SRC_TOTAL; + + /* todo: not reuse phy_pll registers */ + pool->base.dp_clock_source = + dcn31_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_ID_DP_DTO, + &clk_src_regs[0], true); + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] == NULL) { + dm_error("DC: failed to create clock sources!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + } + + /* TODO: DCCG */ + pool->base.dccg = dccg31_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); + if (pool->base.dccg == NULL) { + dm_error("DC: failed to create dccg!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + /* TODO: IRQ */ + init_data.ctx = dc->ctx; + pool->base.irqs = dal_irq_service_dcn31_create(&init_data); + if (!pool->base.irqs) + goto create_fail; + + /* HUBBUB */ + pool->base.hubbub = dcn31_hubbub_create(ctx); + if (pool->base.hubbub == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create hubbub!\n"); + goto create_fail; + } + + /* HUBPs, DPPs, OPPs and TGs */ + for (i = 0; i < pool->base.pipe_count; i++) { + pool->base.hubps[i] = dcn31_hubp_create(ctx, i); + if (pool->base.hubps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create hubps!\n"); + goto create_fail; + } + + pool->base.dpps[i] = dcn31_dpp_create(ctx, i); + if (pool->base.dpps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create dpps!\n"); + goto create_fail; + } + } + + for (i = 0; i < pool->base.res_cap->num_opp; i++) { + pool->base.opps[i] = dcn31_opp_create(ctx, i); + if (pool->base.opps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create output pixel processor!\n"); + goto create_fail; + } + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + pool->base.timing_generators[i] = dcn31_timing_generator_create( + ctx, i); + if (pool->base.timing_generators[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create tg!\n"); + goto create_fail; + } + } + pool->base.timing_generator_count = i; + + /* PSR */ + pool->base.psr = dmub_psr_create(ctx); + if (pool->base.psr == NULL) { + dm_error("DC: failed to create psr obj!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + /* ABM */ + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + pool->base.multiple_abms[i] = dmub_abm_create(ctx, + &abm_regs[i], + &abm_shift, + &abm_mask); + if (pool->base.multiple_abms[i] == NULL) { + dm_error("DC: failed to create abm for pipe %d!\n", i); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + } + + /* MPC and DSC */ + pool->base.mpc = dcn31_mpc_create(ctx, pool->base.mpcc_count, pool->base.res_cap->num_mpc_3dlut); + if (pool->base.mpc == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create mpc!\n"); + goto create_fail; + } + + for (i = 0; i < pool->base.res_cap->num_dsc; i++) { + pool->base.dscs[i] = dcn31_dsc_create(ctx, i); + if (pool->base.dscs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create display stream compressor %d!\n", i); + goto create_fail; + } + } + + /* DWB and MMHUBBUB */ + if (!dcn31_dwbc_create(ctx, &pool->base)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create dwbc!\n"); + goto create_fail; + } + + if (!dcn31_mmhubbub_create(ctx, &pool->base)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create mcif_wb!\n"); + goto create_fail; + } + + /* AUX and I2C */ + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + pool->base.engines[i] = dcn31_aux_engine_create(ctx, i); + if (pool->base.engines[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create aux engine!!\n"); + goto create_fail; + } + pool->base.hw_i2cs[i] = dcn31_i2c_hw_create(ctx, i); + if (pool->base.hw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create hw i2c!!\n"); + goto create_fail; + } + pool->base.sw_i2cs[i] = NULL; + } + + /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ + if (!resource_construct(num_virtual_links, dc, &pool->base, + (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? + &res_create_funcs : &res_create_maximus_funcs))) + goto create_fail; + + /* HW Sequencer and Plane caps */ + dcn31_hw_sequencer_construct(dc); + + dc->caps.max_planes = pool->base.pipe_count; + + for (i = 0; i < dc->caps.max_planes; ++i) + dc->caps.planes[i] = plane_cap; + + dc->cap_funcs = cap_funcs; + + dc->dcn_ip->max_num_dpp = dcn3_16_ip.max_num_dpp; + + return true; + +create_fail: + + dcn316_resource_destruct(pool); + + return false; +} + +struct resource_pool *dcn316_create_resource_pool( + const struct dc_init_data *init_data, + struct dc *dc) +{ + struct dcn316_resource_pool *pool = + kzalloc(sizeof(struct dcn316_resource_pool), GFP_KERNEL); + + if (!pool) + return NULL; + + if (dcn316_resource_construct(init_data->num_virtual_links, dc, pool)) + return &pool->base; + + BREAK_TO_DEBUGGER(); + kfree(pool); + return NULL; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h new file mode 100644 index 000000000000..9d0d60cb9482 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h @@ -0,0 +1,42 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DCN316_RESOURCE_H_ +#define _DCN316_RESOURCE_H_ + +#include "core_types.h" + +#define TO_DCN316_RES_POOL(pool)\ + container_of(pool, struct dcn316_resource_pool, base) + +struct dcn316_resource_pool { + struct resource_pool base; +}; + +struct resource_pool *dcn316_create_resource_pool( + const struct dc_init_data *init_data, + struct dc *dc); + +#endif /* _DCN316_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h index 7f94e3f70d7f..31109db02e93 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h +++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h @@ -113,7 +113,7 @@ bool dm_helpers_dp_mst_start_top_mgr( const struct dc_link *link, bool boot); -void dm_helpers_dp_mst_stop_top_mgr( +bool dm_helpers_dp_mst_stop_top_mgr( struct dc_context *ctx, struct dc_link *link); /** @@ -170,9 +170,9 @@ bool dm_helpers_dmub_outbox_interrupt_control(struct dc_context *ctx, bool enabl void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigned int param, unsigned int timeout_us); -// 0x1 = Result_OK, 0xFE = Result_UnkmownCmd +// 0x1 = Result_OK, 0xFE = Result_UnkmownCmd, 0x0 = Status_Busy #define IS_SMU_TIMEOUT(result) \ - (!(result == 0x1 || result == 0xFE)) + (result == 0x0) int dm_helper_dmub_aux_transfer_sync( struct dc_context *ctx, @@ -184,4 +184,7 @@ int dm_helpers_dmub_set_config_sync(struct dc_context *ctx, const struct dc_link *link, struct set_config_cmd_payload *payload, enum set_config_status *operation_result); + +enum dc_edid_status dm_helpers_get_sbios_edid(struct dc_link *link, struct dc_edid *edid); + #endif /* __DM_HELPERS__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index eee6672bd32d..28978ce62f87 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -58,6 +58,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags) ifdef CONFIG_DRM_AMD_DC_DCN CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn10/dcn10_fpu.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/dcn20_fpu.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags) @@ -71,8 +72,13 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn302/dcn302_fpu.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn303/dcn303_fpu.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calcs.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_auto.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_math.o := $(dml_ccflags) -Wno-tautological-compare CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn2x/dcn2x.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_rcflags) @@ -93,10 +99,15 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dml1_display_rq_dlg_calc.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/display_rq_dlg_helpers.o := $(dml_ccflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dml1_display_rq_dlg_calc.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_rq_dlg_helpers.o := $(dml_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calcs.o := $(dml_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_auto.o := $(dml_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_math.o := $(dml_rcflags) -DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \ +DML = calcs/dce_calcs.o calcs/custom_float.o calcs/bw_fixed.o ifdef CONFIG_DRM_AMD_DC_DCN +DML += display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o +DML += dcn10/dcn10_fpu.o DML += dcn20/dcn20_fpu.o DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_20.o DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o @@ -104,7 +115,10 @@ DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o DML += dcn30/display_mode_vba_30.o dcn30/display_rq_dlg_calc_30.o DML += dcn31/display_mode_vba_31.o dcn31/display_rq_dlg_calc_31.o DML += dcn301/dcn301_fpu.o +DML += dcn302/dcn302_fpu.o +DML += dcn303/dcn303_fpu.o DML += dsc/rc_calc_fpu.o +DML += calcs/dcn_calcs.o calcs/dcn_calc_math.o calcs/dcn_calc_auto.o endif AMD_DAL_DML = $(addprefix $(AMDDALPATH)/dc/dml/,$(DML)) diff --git a/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/bw_fixed.c index 6ca288fb5fb9..6ca288fb5fb9 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c +++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/bw_fixed.c diff --git a/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h b/drivers/gpu/drm/amd/display/dc/dml/calcs/calcs_logger.h index 62435bfc274d..62435bfc274d 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h +++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/calcs_logger.h diff --git a/drivers/gpu/drm/amd/display/dc/calcs/custom_float.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/custom_float.c index 31d167bc548f..31d167bc548f 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/custom_float.c +++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/custom_float.c diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c index e6ef36de0825..0100a6053ab6 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c @@ -3411,35 +3411,33 @@ bool bw_calcs(struct dc_context *ctx, calcs_output->stutter_exit_wm_ns[5].c_mark = bw_fixed_to_int(bw_mul(data-> stutter_exit_watermark[9], bw_int_to_fixed(1000))); - - calcs_output->stutter_entry_wm_ns[0].c_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[4], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[1].c_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[5], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[2].c_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[6], bw_int_to_fixed(1000))); - if (ctx->dc->caps.max_slave_planes) { - calcs_output->stutter_entry_wm_ns[3].c_mark = + calcs_output->stutter_entry_wm_ns[0].c_mark = bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[0], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[4].c_mark = + stutter_entry_watermark[4], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[1].c_mark = bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[1], bw_int_to_fixed(1000))); - } else { - calcs_output->stutter_entry_wm_ns[3].c_mark = + stutter_entry_watermark[5], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[2].c_mark = bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[7], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[4].c_mark = + stutter_entry_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->stutter_entry_wm_ns[3].c_mark = + bw_fixed_to_int(bw_mul(data->stutter_entry_watermark[0], + bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[4].c_mark = + bw_fixed_to_int(bw_mul(data->stutter_entry_watermark[1], + bw_int_to_fixed(1000))); + } else { + calcs_output->stutter_entry_wm_ns[3].c_mark = + bw_fixed_to_int(bw_mul(data->stutter_entry_watermark[7], + bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[4].c_mark = + bw_fixed_to_int(bw_mul(data->stutter_entry_watermark[8], + bw_int_to_fixed(1000))); + } + calcs_output->stutter_entry_wm_ns[5].c_mark = bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[8], bw_int_to_fixed(1000))); - } - calcs_output->stutter_entry_wm_ns[5].c_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[9], bw_int_to_fixed(1000))); - + stutter_entry_watermark[9], bw_int_to_fixed(1000))); calcs_output->urgent_wm_ns[0].c_mark = bw_fixed_to_int(bw_mul(data-> urgent_watermark[4], bw_int_to_fixed(1000))); diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_auto.c index 41284e263325..41284e263325 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c +++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_auto.c diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.h b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_auto.h index ce35de79a6c7..ce35de79a6c7 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.h +++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_auto.h diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_math.c index 07d18e78de49..07d18e78de49 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c +++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_math.c diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c index e447c74be713..e447c74be713 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn10/dcn10_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn10/dcn10_fpu.c new file mode 100644 index 000000000000..99644d896222 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn10/dcn10_fpu.c @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dcn10/dcn10_resource.h" + +#include "dcn10_fpu.h" + +/** + * DOC: DCN10 FPU manipulation Overview + * + * The DCN architecture relies on FPU operations, which require special + * compilation flags and the use of kernel_fpu_begin/end functions; ideally, we + * want to avoid spreading FPU access across multiple files. With this idea in + * mind, this file aims to centralize DCN10 functions that require FPU access + * in a single place. Code in this file follows the following code pattern: + * + * 1. Functions that use FPU operations should be isolated in static functions. + * 2. The FPU functions should have the noinline attribute to ensure anything + * that deals with FP register is contained within this call. + * 3. All function that needs to be accessed outside this file requires a + * public interface that not uses any FPU reference. + * 4. Developers **must not** use DC_FP_START/END in this file, but they need + * to ensure that the caller invokes it before access any function available + * in this file. For this reason, public functions in this file must invoke + * dc_assert_fp_enabled(); + * + * Let's expand a little bit more the idea in the code pattern. To fully + * isolate FPU operations in a single place, we must avoid situations where + * compilers spill FP values to registers due to FP enable in a specific C + * file. Note that even if we isolate all FPU functions in a single file and + * call its interface from other files, the compiler might enable the use of + * FPU before we call DC_FP_START. Nevertheless, it is the programmer's + * responsibility to invoke DC_FP_START/END in the correct place. To highlight + * situations where developers forgot to use the FP protection before calling + * the DC FPU interface functions, we introduce a helper that checks if the + * function is invoked under FP protection. If not, it will trigger a kernel + * warning. + */ + +struct _vcs_dpi_ip_params_st dcn1_0_ip = { + .rob_buffer_size_kbytes = 64, + .det_buffer_size_kbytes = 164, + .dpte_buffer_size_in_pte_reqs_luma = 42, + .dpp_output_buffer_pixels = 2560, + .opp_output_buffer_lines = 1, + .pixel_chunk_size_kbytes = 8, + .pte_enable = 1, + .pte_chunk_size_kbytes = 2, + .meta_chunk_size_kbytes = 2, + .writeback_chunk_size_kbytes = 2, + .line_buffer_size_bits = 589824, + .max_line_buffer_lines = 12, + .IsLineBufferBppFixed = 0, + .LineBufferFixedBpp = -1, + .writeback_luma_buffer_size_kbytes = 12, + .writeback_chroma_buffer_size_kbytes = 8, + .max_num_dpp = 4, + .max_num_wb = 2, + .max_dchub_pscl_bw_pix_per_clk = 4, + .max_pscl_lb_bw_pix_per_clk = 2, + .max_lb_vscl_bw_pix_per_clk = 4, + .max_vscl_hscl_bw_pix_per_clk = 4, + .max_hscl_ratio = 4, + .max_vscl_ratio = 4, + .hscl_mults = 4, + .vscl_mults = 4, + .max_hscl_taps = 8, + .max_vscl_taps = 8, + .dispclk_ramp_margin_percent = 1, + .underscan_factor = 1.10, + .min_vblank_lines = 14, + .dppclk_delay_subtotal = 90, + .dispclk_delay_subtotal = 42, + .dcfclk_cstate_latency = 10, + .max_inter_dcn_tile_repeaters = 8, + .can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = 0, + .bug_forcing_LC_req_same_size_fixed = 0, +}; + +struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc = { + .sr_exit_time_us = 9.0, + .sr_enter_plus_exit_time_us = 11.0, + .urgent_latency_us = 4.0, + .writeback_latency_us = 12.0, + .ideal_dram_bw_after_urgent_percent = 80.0, + .max_request_size_bytes = 256, + .downspread_percent = 0.5, + .dram_page_open_time_ns = 50.0, + .dram_rw_turnaround_time_ns = 17.5, + .dram_return_buffer_per_channel_bytes = 8192, + .round_trip_ping_latency_dcfclk_cycles = 128, + .urgent_out_of_order_return_per_channel_bytes = 256, + .channel_interleave_bytes = 256, + .num_banks = 8, + .num_chans = 2, + .vmm_page_size_bytes = 4096, + .dram_clock_change_latency_us = 17.0, + .writeback_dram_clock_change_latency_us = 23.0, + .return_bus_width_bytes = 64, +}; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn10/dcn10_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn10/dcn10_fpu.h new file mode 100644 index 000000000000..e74ed4b4ce5b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn10/dcn10_fpu.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DCN10_FPU_H__ +#define __DCN10_FPU_H__ + +#endif /* __DCN20_FPU_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c index d590dc917363..2f6122153bdb 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c @@ -25,9 +25,23 @@ */ #include "resource.h" +#include "clk_mgr.h" +#include "dc_link_dp.h" +#include "dchubbub.h" +#include "dcn20/dcn20_resource.h" +#include "dcn21/dcn21_resource.h" #include "dcn20_fpu.h" +#define DC_LOGGER_INIT(logger) + +#ifndef MAX +#define MAX(X, Y) ((X) > (Y) ? (X) : (Y)) +#endif +#ifndef MIN +#define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) +#endif + /** * DOC: DCN2x FPU manipulation Overview * @@ -61,6 +75,581 @@ * warning. */ +struct _vcs_dpi_ip_params_st dcn2_0_ip = { + .odm_capable = 1, + .gpuvm_enable = 0, + .hostvm_enable = 0, + .gpuvm_max_page_table_levels = 4, + .hostvm_max_page_table_levels = 4, + .hostvm_cached_page_table_levels = 0, + .pte_group_size_bytes = 2048, + .num_dsc = 6, + .rob_buffer_size_kbytes = 168, + .det_buffer_size_kbytes = 164, + .dpte_buffer_size_in_pte_reqs_luma = 84, + .pde_proc_buffer_size_64k_reqs = 48, + .dpp_output_buffer_pixels = 2560, + .opp_output_buffer_lines = 1, + .pixel_chunk_size_kbytes = 8, + .pte_chunk_size_kbytes = 2, + .meta_chunk_size_kbytes = 2, + .writeback_chunk_size_kbytes = 2, + .line_buffer_size_bits = 789504, + .is_line_buffer_bpp_fixed = 0, + .line_buffer_fixed_bpp = 0, + .dcc_supported = true, + .max_line_buffer_lines = 12, + .writeback_luma_buffer_size_kbytes = 12, + .writeback_chroma_buffer_size_kbytes = 8, + .writeback_chroma_line_buffer_width_pixels = 4, + .writeback_max_hscl_ratio = 1, + .writeback_max_vscl_ratio = 1, + .writeback_min_hscl_ratio = 1, + .writeback_min_vscl_ratio = 1, + .writeback_max_hscl_taps = 12, + .writeback_max_vscl_taps = 12, + .writeback_line_buffer_luma_buffer_size = 0, + .writeback_line_buffer_chroma_buffer_size = 14643, + .cursor_buffer_size = 8, + .cursor_chunk_size = 2, + .max_num_otg = 6, + .max_num_dpp = 6, + .max_num_wb = 1, + .max_dchub_pscl_bw_pix_per_clk = 4, + .max_pscl_lb_bw_pix_per_clk = 2, + .max_lb_vscl_bw_pix_per_clk = 4, + .max_vscl_hscl_bw_pix_per_clk = 4, + .max_hscl_ratio = 8, + .max_vscl_ratio = 8, + .hscl_mults = 4, + .vscl_mults = 4, + .max_hscl_taps = 8, + .max_vscl_taps = 8, + .dispclk_ramp_margin_percent = 1, + .underscan_factor = 1.10, + .min_vblank_lines = 32, // + .dppclk_delay_subtotal = 77, // + .dppclk_delay_scl_lb_only = 16, + .dppclk_delay_scl = 50, + .dppclk_delay_cnvc_formatter = 8, + .dppclk_delay_cnvc_cursor = 6, + .dispclk_delay_subtotal = 87, // + .dcfclk_cstate_latency = 10, // SRExitTime + .max_inter_dcn_tile_repeaters = 8, + .xfc_supported = true, + .xfc_fill_bw_overhead_percent = 10.0, + .xfc_fill_constant_bytes = 0, + .number_of_cursors = 1, +}; + +struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = { + .odm_capable = 1, + .gpuvm_enable = 0, + .hostvm_enable = 0, + .gpuvm_max_page_table_levels = 4, + .hostvm_max_page_table_levels = 4, + .hostvm_cached_page_table_levels = 0, + .num_dsc = 5, + .rob_buffer_size_kbytes = 168, + .det_buffer_size_kbytes = 164, + .dpte_buffer_size_in_pte_reqs_luma = 84, + .dpte_buffer_size_in_pte_reqs_chroma = 42,//todo + .dpp_output_buffer_pixels = 2560, + .opp_output_buffer_lines = 1, + .pixel_chunk_size_kbytes = 8, + .pte_enable = 1, + .max_page_table_levels = 4, + .pte_chunk_size_kbytes = 2, + .meta_chunk_size_kbytes = 2, + .writeback_chunk_size_kbytes = 2, + .line_buffer_size_bits = 789504, + .is_line_buffer_bpp_fixed = 0, + .line_buffer_fixed_bpp = 0, + .dcc_supported = true, + .max_line_buffer_lines = 12, + .writeback_luma_buffer_size_kbytes = 12, + .writeback_chroma_buffer_size_kbytes = 8, + .writeback_chroma_line_buffer_width_pixels = 4, + .writeback_max_hscl_ratio = 1, + .writeback_max_vscl_ratio = 1, + .writeback_min_hscl_ratio = 1, + .writeback_min_vscl_ratio = 1, + .writeback_max_hscl_taps = 12, + .writeback_max_vscl_taps = 12, + .writeback_line_buffer_luma_buffer_size = 0, + .writeback_line_buffer_chroma_buffer_size = 14643, + .cursor_buffer_size = 8, + .cursor_chunk_size = 2, + .max_num_otg = 5, + .max_num_dpp = 5, + .max_num_wb = 1, + .max_dchub_pscl_bw_pix_per_clk = 4, + .max_pscl_lb_bw_pix_per_clk = 2, + .max_lb_vscl_bw_pix_per_clk = 4, + .max_vscl_hscl_bw_pix_per_clk = 4, + .max_hscl_ratio = 8, + .max_vscl_ratio = 8, + .hscl_mults = 4, + .vscl_mults = 4, + .max_hscl_taps = 8, + .max_vscl_taps = 8, + .dispclk_ramp_margin_percent = 1, + .underscan_factor = 1.10, + .min_vblank_lines = 32, // + .dppclk_delay_subtotal = 77, // + .dppclk_delay_scl_lb_only = 16, + .dppclk_delay_scl = 50, + .dppclk_delay_cnvc_formatter = 8, + .dppclk_delay_cnvc_cursor = 6, + .dispclk_delay_subtotal = 87, // + .dcfclk_cstate_latency = 10, // SRExitTime + .max_inter_dcn_tile_repeaters = 8, + .xfc_supported = true, + .xfc_fill_bw_overhead_percent = 10.0, + .xfc_fill_constant_bytes = 0, + .ptoi_supported = 0, + .number_of_cursors = 1, +}; + +struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = { + /* Defaults that get patched on driver load from firmware. */ + .clock_limits = { + { + .state = 0, + .dcfclk_mhz = 560.0, + .fabricclk_mhz = 560.0, + .dispclk_mhz = 513.0, + .dppclk_mhz = 513.0, + .phyclk_mhz = 540.0, + .socclk_mhz = 560.0, + .dscclk_mhz = 171.0, + .dram_speed_mts = 8960.0, + }, + { + .state = 1, + .dcfclk_mhz = 694.0, + .fabricclk_mhz = 694.0, + .dispclk_mhz = 642.0, + .dppclk_mhz = 642.0, + .phyclk_mhz = 600.0, + .socclk_mhz = 694.0, + .dscclk_mhz = 214.0, + .dram_speed_mts = 11104.0, + }, + { + .state = 2, + .dcfclk_mhz = 875.0, + .fabricclk_mhz = 875.0, + .dispclk_mhz = 734.0, + .dppclk_mhz = 734.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 875.0, + .dscclk_mhz = 245.0, + .dram_speed_mts = 14000.0, + }, + { + .state = 3, + .dcfclk_mhz = 1000.0, + .fabricclk_mhz = 1000.0, + .dispclk_mhz = 1100.0, + .dppclk_mhz = 1100.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1000.0, + .dscclk_mhz = 367.0, + .dram_speed_mts = 16000.0, + }, + { + .state = 4, + .dcfclk_mhz = 1200.0, + .fabricclk_mhz = 1200.0, + .dispclk_mhz = 1284.0, + .dppclk_mhz = 1284.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1200.0, + .dscclk_mhz = 428.0, + .dram_speed_mts = 16000.0, + }, + /*Extra state, no dispclk ramping*/ + { + .state = 5, + .dcfclk_mhz = 1200.0, + .fabricclk_mhz = 1200.0, + .dispclk_mhz = 1284.0, + .dppclk_mhz = 1284.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1200.0, + .dscclk_mhz = 428.0, + .dram_speed_mts = 16000.0, + }, + }, + .num_states = 5, + .sr_exit_time_us = 8.6, + .sr_enter_plus_exit_time_us = 10.9, + .urgent_latency_us = 4.0, + .urgent_latency_pixel_data_only_us = 4.0, + .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, + .urgent_latency_vm_data_only_us = 4.0, + .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, + .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, + .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0, + .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0, + .max_avg_sdp_bw_use_normal_percent = 40.0, + .max_avg_dram_bw_use_normal_percent = 40.0, + .writeback_latency_us = 12.0, + .ideal_dram_bw_after_urgent_percent = 40.0, + .max_request_size_bytes = 256, + .dram_channel_width_bytes = 2, + .fabric_datapath_to_dcn_data_return_bytes = 64, + .dcn_downspread_percent = 0.5, + .downspread_percent = 0.38, + .dram_page_open_time_ns = 50.0, + .dram_rw_turnaround_time_ns = 17.5, + .dram_return_buffer_per_channel_bytes = 8192, + .round_trip_ping_latency_dcfclk_cycles = 131, + .urgent_out_of_order_return_per_channel_bytes = 256, + .channel_interleave_bytes = 256, + .num_banks = 8, + .num_chans = 16, + .vmm_page_size_bytes = 4096, + .dram_clock_change_latency_us = 404.0, + .dummy_pstate_latency_us = 5.0, + .writeback_dram_clock_change_latency_us = 23.0, + .return_bus_width_bytes = 64, + .dispclk_dppclk_vco_speed_mhz = 3850, + .xfc_bus_transport_time_us = 20, + .xfc_xbuf_latency_tolerance_us = 4, + .use_urgent_burst_bw = 0 +}; + +struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = { + .clock_limits = { + { + .state = 0, + .dcfclk_mhz = 560.0, + .fabricclk_mhz = 560.0, + .dispclk_mhz = 513.0, + .dppclk_mhz = 513.0, + .phyclk_mhz = 540.0, + .socclk_mhz = 560.0, + .dscclk_mhz = 171.0, + .dram_speed_mts = 8960.0, + }, + { + .state = 1, + .dcfclk_mhz = 694.0, + .fabricclk_mhz = 694.0, + .dispclk_mhz = 642.0, + .dppclk_mhz = 642.0, + .phyclk_mhz = 600.0, + .socclk_mhz = 694.0, + .dscclk_mhz = 214.0, + .dram_speed_mts = 11104.0, + }, + { + .state = 2, + .dcfclk_mhz = 875.0, + .fabricclk_mhz = 875.0, + .dispclk_mhz = 734.0, + .dppclk_mhz = 734.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 875.0, + .dscclk_mhz = 245.0, + .dram_speed_mts = 14000.0, + }, + { + .state = 3, + .dcfclk_mhz = 1000.0, + .fabricclk_mhz = 1000.0, + .dispclk_mhz = 1100.0, + .dppclk_mhz = 1100.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1000.0, + .dscclk_mhz = 367.0, + .dram_speed_mts = 16000.0, + }, + { + .state = 4, + .dcfclk_mhz = 1200.0, + .fabricclk_mhz = 1200.0, + .dispclk_mhz = 1284.0, + .dppclk_mhz = 1284.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1200.0, + .dscclk_mhz = 428.0, + .dram_speed_mts = 16000.0, + }, + /*Extra state, no dispclk ramping*/ + { + .state = 5, + .dcfclk_mhz = 1200.0, + .fabricclk_mhz = 1200.0, + .dispclk_mhz = 1284.0, + .dppclk_mhz = 1284.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1200.0, + .dscclk_mhz = 428.0, + .dram_speed_mts = 16000.0, + }, + }, + .num_states = 5, + .sr_exit_time_us = 11.6, + .sr_enter_plus_exit_time_us = 13.9, + .urgent_latency_us = 4.0, + .urgent_latency_pixel_data_only_us = 4.0, + .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, + .urgent_latency_vm_data_only_us = 4.0, + .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, + .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, + .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0, + .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0, + .max_avg_sdp_bw_use_normal_percent = 40.0, + .max_avg_dram_bw_use_normal_percent = 40.0, + .writeback_latency_us = 12.0, + .ideal_dram_bw_after_urgent_percent = 40.0, + .max_request_size_bytes = 256, + .dram_channel_width_bytes = 2, + .fabric_datapath_to_dcn_data_return_bytes = 64, + .dcn_downspread_percent = 0.5, + .downspread_percent = 0.38, + .dram_page_open_time_ns = 50.0, + .dram_rw_turnaround_time_ns = 17.5, + .dram_return_buffer_per_channel_bytes = 8192, + .round_trip_ping_latency_dcfclk_cycles = 131, + .urgent_out_of_order_return_per_channel_bytes = 256, + .channel_interleave_bytes = 256, + .num_banks = 8, + .num_chans = 8, + .vmm_page_size_bytes = 4096, + .dram_clock_change_latency_us = 404.0, + .dummy_pstate_latency_us = 5.0, + .writeback_dram_clock_change_latency_us = 23.0, + .return_bus_width_bytes = 64, + .dispclk_dppclk_vco_speed_mhz = 3850, + .xfc_bus_transport_time_us = 20, + .xfc_xbuf_latency_tolerance_us = 4, + .use_urgent_burst_bw = 0 +}; + +struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 }; + +struct _vcs_dpi_ip_params_st dcn2_1_ip = { + .odm_capable = 1, + .gpuvm_enable = 1, + .hostvm_enable = 1, + .gpuvm_max_page_table_levels = 1, + .hostvm_max_page_table_levels = 4, + .hostvm_cached_page_table_levels = 2, + .num_dsc = 3, + .rob_buffer_size_kbytes = 168, + .det_buffer_size_kbytes = 164, + .dpte_buffer_size_in_pte_reqs_luma = 44, + .dpte_buffer_size_in_pte_reqs_chroma = 42,//todo + .dpp_output_buffer_pixels = 2560, + .opp_output_buffer_lines = 1, + .pixel_chunk_size_kbytes = 8, + .pte_enable = 1, + .max_page_table_levels = 4, + .pte_chunk_size_kbytes = 2, + .meta_chunk_size_kbytes = 2, + .min_meta_chunk_size_bytes = 256, + .writeback_chunk_size_kbytes = 2, + .line_buffer_size_bits = 789504, + .is_line_buffer_bpp_fixed = 0, + .line_buffer_fixed_bpp = 0, + .dcc_supported = true, + .max_line_buffer_lines = 12, + .writeback_luma_buffer_size_kbytes = 12, + .writeback_chroma_buffer_size_kbytes = 8, + .writeback_chroma_line_buffer_width_pixels = 4, + .writeback_max_hscl_ratio = 1, + .writeback_max_vscl_ratio = 1, + .writeback_min_hscl_ratio = 1, + .writeback_min_vscl_ratio = 1, + .writeback_max_hscl_taps = 12, + .writeback_max_vscl_taps = 12, + .writeback_line_buffer_luma_buffer_size = 0, + .writeback_line_buffer_chroma_buffer_size = 14643, + .cursor_buffer_size = 8, + .cursor_chunk_size = 2, + .max_num_otg = 4, + .max_num_dpp = 4, + .max_num_wb = 1, + .max_dchub_pscl_bw_pix_per_clk = 4, + .max_pscl_lb_bw_pix_per_clk = 2, + .max_lb_vscl_bw_pix_per_clk = 4, + .max_vscl_hscl_bw_pix_per_clk = 4, + .max_hscl_ratio = 4, + .max_vscl_ratio = 4, + .hscl_mults = 4, + .vscl_mults = 4, + .max_hscl_taps = 8, + .max_vscl_taps = 8, + .dispclk_ramp_margin_percent = 1, + .underscan_factor = 1.10, + .min_vblank_lines = 32, // + .dppclk_delay_subtotal = 77, // + .dppclk_delay_scl_lb_only = 16, + .dppclk_delay_scl = 50, + .dppclk_delay_cnvc_formatter = 8, + .dppclk_delay_cnvc_cursor = 6, + .dispclk_delay_subtotal = 87, // + .dcfclk_cstate_latency = 10, // SRExitTime + .max_inter_dcn_tile_repeaters = 8, + + .xfc_supported = false, + .xfc_fill_bw_overhead_percent = 10.0, + .xfc_fill_constant_bytes = 0, + .ptoi_supported = 0, + .number_of_cursors = 1, +}; + +struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = { + .clock_limits = { + { + .state = 0, + .dcfclk_mhz = 400.0, + .fabricclk_mhz = 400.0, + .dispclk_mhz = 600.0, + .dppclk_mhz = 400.00, + .phyclk_mhz = 600.0, + .socclk_mhz = 278.0, + .dscclk_mhz = 205.67, + .dram_speed_mts = 1600.0, + }, + { + .state = 1, + .dcfclk_mhz = 464.52, + .fabricclk_mhz = 800.0, + .dispclk_mhz = 654.55, + .dppclk_mhz = 626.09, + .phyclk_mhz = 600.0, + .socclk_mhz = 278.0, + .dscclk_mhz = 205.67, + .dram_speed_mts = 1600.0, + }, + { + .state = 2, + .dcfclk_mhz = 514.29, + .fabricclk_mhz = 933.0, + .dispclk_mhz = 757.89, + .dppclk_mhz = 685.71, + .phyclk_mhz = 600.0, + .socclk_mhz = 278.0, + .dscclk_mhz = 287.67, + .dram_speed_mts = 1866.0, + }, + { + .state = 3, + .dcfclk_mhz = 576.00, + .fabricclk_mhz = 1067.0, + .dispclk_mhz = 847.06, + .dppclk_mhz = 757.89, + .phyclk_mhz = 600.0, + .socclk_mhz = 715.0, + .dscclk_mhz = 318.334, + .dram_speed_mts = 2134.0, + }, + { + .state = 4, + .dcfclk_mhz = 626.09, + .fabricclk_mhz = 1200.0, + .dispclk_mhz = 900.00, + .dppclk_mhz = 847.06, + .phyclk_mhz = 810.0, + .socclk_mhz = 953.0, + .dscclk_mhz = 489.0, + .dram_speed_mts = 2400.0, + }, + { + .state = 5, + .dcfclk_mhz = 685.71, + .fabricclk_mhz = 1333.0, + .dispclk_mhz = 1028.57, + .dppclk_mhz = 960.00, + .phyclk_mhz = 810.0, + .socclk_mhz = 278.0, + .dscclk_mhz = 287.67, + .dram_speed_mts = 2666.0, + }, + { + .state = 6, + .dcfclk_mhz = 757.89, + .fabricclk_mhz = 1467.0, + .dispclk_mhz = 1107.69, + .dppclk_mhz = 1028.57, + .phyclk_mhz = 810.0, + .socclk_mhz = 715.0, + .dscclk_mhz = 318.334, + .dram_speed_mts = 3200.0, + }, + { + .state = 7, + .dcfclk_mhz = 847.06, + .fabricclk_mhz = 1600.0, + .dispclk_mhz = 1395.0, + .dppclk_mhz = 1285.00, + .phyclk_mhz = 1325.0, + .socclk_mhz = 953.0, + .dscclk_mhz = 489.0, + .dram_speed_mts = 4266.0, + }, + /*Extra state, no dispclk ramping*/ + { + .state = 8, + .dcfclk_mhz = 847.06, + .fabricclk_mhz = 1600.0, + .dispclk_mhz = 1395.0, + .dppclk_mhz = 1285.0, + .phyclk_mhz = 1325.0, + .socclk_mhz = 953.0, + .dscclk_mhz = 489.0, + .dram_speed_mts = 4266.0, + }, + + }, + + .sr_exit_time_us = 12.5, + .sr_enter_plus_exit_time_us = 17.0, + .urgent_latency_us = 4.0, + .urgent_latency_pixel_data_only_us = 4.0, + .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, + .urgent_latency_vm_data_only_us = 4.0, + .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, + .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, + .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 75.0, + .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0, + .max_avg_sdp_bw_use_normal_percent = 60.0, + .max_avg_dram_bw_use_normal_percent = 100.0, + .writeback_latency_us = 12.0, + .max_request_size_bytes = 256, + .dram_channel_width_bytes = 4, + .fabric_datapath_to_dcn_data_return_bytes = 32, + .dcn_downspread_percent = 0.5, + .downspread_percent = 0.38, + .dram_page_open_time_ns = 50.0, + .dram_rw_turnaround_time_ns = 17.5, + .dram_return_buffer_per_channel_bytes = 8192, + .round_trip_ping_latency_dcfclk_cycles = 128, + .urgent_out_of_order_return_per_channel_bytes = 4096, + .channel_interleave_bytes = 256, + .num_banks = 8, + .num_chans = 4, + .vmm_page_size_bytes = 4096, + .dram_clock_change_latency_us = 23.84, + .return_bus_width_bytes = 64, + .dispclk_dppclk_vco_speed_mhz = 3600, + .xfc_bus_transport_time_us = 4, + .xfc_xbuf_latency_tolerance_us = 4, + .use_urgent_burst_bw = 1, + .num_states = 8 +}; + void dcn20_populate_dml_writeback_from_context(struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes) @@ -100,3 +689,1340 @@ void dcn20_populate_dml_writeback_from_context(struct dc *dc, pipe_cnt++; } } + +void dcn20_fpu_set_wb_arb_params(struct mcif_arb_params *wb_arb_params, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt, int i) +{ + int k; + + dc_assert_fp_enabled(); + + for (k = 0; k < sizeof(wb_arb_params->cli_watermark)/sizeof(wb_arb_params->cli_watermark[0]); k++) { + wb_arb_params->cli_watermark[k] = get_wm_writeback_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + wb_arb_params->pstate_watermark[k] = get_wm_writeback_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + } + wb_arb_params->time_per_pixel = 16.0 * 1000 / (context->res_ctx.pipe_ctx[i].stream->phy_pix_clk / 1000); /* 4 bit fraction, ms */ +} + +static bool is_dtbclk_required(struct dc *dc, struct dc_state *context) +{ + int i; + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + if (is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i])) + return true; + } + return false; +} + +static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struct dc_state *context) +{ + int plane_count; + int i; + + plane_count = 0; + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (context->res_ctx.pipe_ctx[i].plane_state) + plane_count++; + } + + /* + * Z9 and Z10 allowed cases: + * 1. 0 Planes enabled + * 2. single eDP, on link 0, 1 plane and stutter period > 5ms + * Z10 only cases: + * 1. single eDP, on link 0, 1 plane and stutter period >= 5ms + * Zstate not allowed cases: + * 1. Everything else + */ + if (plane_count == 0) + return DCN_ZSTATE_SUPPORT_ALLOW; + else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) { + struct dc_link *link = context->streams[0]->sink->link; + struct dc_stream_status *stream_status = &context->stream_status[0]; + + /* zstate only supported on PWRSEQ0 and when there's <2 planes*/ + if (link->link_index != 0 || stream_status->plane_count > 1) + return DCN_ZSTATE_SUPPORT_DISALLOW; + + if (context->bw_ctx.dml.vba.StutterPeriod > 5000.0) + return DCN_ZSTATE_SUPPORT_ALLOW; + else if (link->psr_settings.psr_version == DC_PSR_VERSION_1 && !dc->debug.disable_psr) + return DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY; + else + return DCN_ZSTATE_SUPPORT_DISALLOW; + } else + return DCN_ZSTATE_SUPPORT_DISALLOW; +} + +void dcn20_calculate_dlg_params( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt, + int vlevel) +{ + int i, pipe_idx; + + dc_assert_fp_enabled(); + + /* Writeback MCIF_WB arbitration parameters */ + dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt); + + context->bw_ctx.bw.dcn.clk.dispclk_khz = context->bw_ctx.dml.vba.DISPCLK * 1000; + context->bw_ctx.bw.dcn.clk.dcfclk_khz = context->bw_ctx.dml.vba.DCFCLK * 1000; + context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000; + context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16; + + if (dc->debug.min_dram_clk_khz > context->bw_ctx.bw.dcn.clk.dramclk_khz) + context->bw_ctx.bw.dcn.clk.dramclk_khz = dc->debug.min_dram_clk_khz; + + context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000; + context->bw_ctx.bw.dcn.clk.fclk_khz = context->bw_ctx.dml.vba.FabricClock * 1000; + context->bw_ctx.bw.dcn.clk.p_state_change_support = + context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] + != dm_dram_clock_change_unsupported; + context->bw_ctx.bw.dcn.clk.dppclk_khz = 0; + + context->bw_ctx.bw.dcn.clk.zstate_support = decide_zstate_support(dc, context); + + context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context); + + if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz) + context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz; + + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + context->res_ctx.pipe_ctx[i].det_buffer_size_kb = context->bw_ctx.dml.ip.det_buffer_size_kbytes; + context->res_ctx.pipe_ctx[i].unbounded_req = pipes[pipe_idx].pipe.src.unbounded_req_mode; + + if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000) + context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000; + context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = + pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000; + context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest; + pipe_idx++; + } + /*save a original dppclock copy*/ + context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz; + context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz; + context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dppclk_mhz * 1000; + context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dispclk_mhz * 1000; + + context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes + - context->bw_ctx.dml.ip.det_buffer_size_kbytes * pipe_idx; + + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + bool cstate_en = context->bw_ctx.dml.vba.PrefetchMode[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != 2; + + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + + if (dc->ctx->dce_version == DCN_VERSION_2_01) + cstate_en = false; + + context->bw_ctx.dml.funcs.rq_dlg_get_dlg_reg(&context->bw_ctx.dml, + &context->res_ctx.pipe_ctx[i].dlg_regs, + &context->res_ctx.pipe_ctx[i].ttu_regs, + pipes, + pipe_cnt, + pipe_idx, + cstate_en, + context->bw_ctx.bw.dcn.clk.p_state_change_support, + false, false, true); + + context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg(&context->bw_ctx.dml, + &context->res_ctx.pipe_ctx[i].rq_regs, + &pipes[pipe_idx].pipe); + pipe_idx++; + } +} + +static void swizzle_to_dml_params( + enum swizzle_mode_values swizzle, + unsigned int *sw_mode) +{ + switch (swizzle) { + case DC_SW_LINEAR: + *sw_mode = dm_sw_linear; + break; + case DC_SW_4KB_S: + *sw_mode = dm_sw_4kb_s; + break; + case DC_SW_4KB_S_X: + *sw_mode = dm_sw_4kb_s_x; + break; + case DC_SW_4KB_D: + *sw_mode = dm_sw_4kb_d; + break; + case DC_SW_4KB_D_X: + *sw_mode = dm_sw_4kb_d_x; + break; + case DC_SW_64KB_S: + *sw_mode = dm_sw_64kb_s; + break; + case DC_SW_64KB_S_X: + *sw_mode = dm_sw_64kb_s_x; + break; + case DC_SW_64KB_S_T: + *sw_mode = dm_sw_64kb_s_t; + break; + case DC_SW_64KB_D: + *sw_mode = dm_sw_64kb_d; + break; + case DC_SW_64KB_D_X: + *sw_mode = dm_sw_64kb_d_x; + break; + case DC_SW_64KB_D_T: + *sw_mode = dm_sw_64kb_d_t; + break; + case DC_SW_64KB_R_X: + *sw_mode = dm_sw_64kb_r_x; + break; + case DC_SW_VAR_S: + *sw_mode = dm_sw_var_s; + break; + case DC_SW_VAR_S_X: + *sw_mode = dm_sw_var_s_x; + break; + case DC_SW_VAR_D: + *sw_mode = dm_sw_var_d; + break; + case DC_SW_VAR_D_X: + *sw_mode = dm_sw_var_d_x; + break; + case DC_SW_VAR_R_X: + *sw_mode = dm_sw_var_r_x; + break; + default: + ASSERT(0); /* Not supported */ + break; + } +} + +int dcn20_populate_dml_pipes_from_context( + struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + bool fast_validate) +{ + int pipe_cnt, i; + bool synchronized_vblank = true; + struct resource_context *res_ctx = &context->res_ctx; + + dc_assert_fp_enabled(); + + for (i = 0, pipe_cnt = -1; i < dc->res_pool->pipe_count; i++) { + if (!res_ctx->pipe_ctx[i].stream) + continue; + + if (pipe_cnt < 0) { + pipe_cnt = i; + continue; + } + + if (res_ctx->pipe_ctx[pipe_cnt].stream == res_ctx->pipe_ctx[i].stream) + continue; + + if (dc->debug.disable_timing_sync || + (!resource_are_streams_timing_synchronizable( + res_ctx->pipe_ctx[pipe_cnt].stream, + res_ctx->pipe_ctx[i].stream) && + !resource_are_vblanks_synchronizable( + res_ctx->pipe_ctx[pipe_cnt].stream, + res_ctx->pipe_ctx[i].stream))) { + synchronized_vblank = false; + break; + } + } + + for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { + struct dc_crtc_timing *timing = &res_ctx->pipe_ctx[i].stream->timing; + unsigned int v_total; + unsigned int front_porch; + int output_bpc; + struct audio_check aud_check = {0}; + + if (!res_ctx->pipe_ctx[i].stream) + continue; + + v_total = timing->v_total; + front_porch = timing->v_front_porch; + + /* todo: + pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = 0; + pipes[pipe_cnt].pipe.src.dcc = 0; + pipes[pipe_cnt].pipe.src.vm = 0;*/ + + pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0; + + pipes[pipe_cnt].dout.dsc_enable = res_ctx->pipe_ctx[i].stream->timing.flags.DSC; + /* todo: rotation?*/ + pipes[pipe_cnt].dout.dsc_slices = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.num_slices_h; + if (res_ctx->pipe_ctx[i].stream->use_dynamic_meta) { + pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = true; + /* 1/2 vblank */ + pipes[pipe_cnt].pipe.src.dynamic_metadata_lines_before_active = + (v_total - timing->v_addressable + - timing->v_border_top - timing->v_border_bottom) / 2; + /* 36 bytes dp, 32 hdmi */ + pipes[pipe_cnt].pipe.src.dynamic_metadata_xmit_bytes = + dc_is_dp_signal(res_ctx->pipe_ctx[i].stream->signal) ? 36 : 32; + } + pipes[pipe_cnt].pipe.src.dcc = false; + pipes[pipe_cnt].pipe.src.dcc_rate = 1; + pipes[pipe_cnt].pipe.dest.synchronized_vblank_all_planes = synchronized_vblank; + pipes[pipe_cnt].pipe.dest.hblank_start = timing->h_total - timing->h_front_porch; + pipes[pipe_cnt].pipe.dest.hblank_end = pipes[pipe_cnt].pipe.dest.hblank_start + - timing->h_addressable + - timing->h_border_left + - timing->h_border_right; + pipes[pipe_cnt].pipe.dest.vblank_start = v_total - front_porch; + pipes[pipe_cnt].pipe.dest.vblank_end = pipes[pipe_cnt].pipe.dest.vblank_start + - timing->v_addressable + - timing->v_border_top + - timing->v_border_bottom; + pipes[pipe_cnt].pipe.dest.htotal = timing->h_total; + pipes[pipe_cnt].pipe.dest.vtotal = v_total; + pipes[pipe_cnt].pipe.dest.hactive = + timing->h_addressable + timing->h_border_left + timing->h_border_right; + pipes[pipe_cnt].pipe.dest.vactive = + timing->v_addressable + timing->v_border_top + timing->v_border_bottom; + pipes[pipe_cnt].pipe.dest.interlaced = timing->flags.INTERLACE; + pipes[pipe_cnt].pipe.dest.pixel_rate_mhz = timing->pix_clk_100hz/10000.0; + if (timing->timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) + pipes[pipe_cnt].pipe.dest.pixel_rate_mhz *= 2; + pipes[pipe_cnt].pipe.dest.otg_inst = res_ctx->pipe_ctx[i].stream_res.tg->inst; + pipes[pipe_cnt].dout.dp_lanes = 4; + pipes[pipe_cnt].dout.is_virtual = 0; + pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min; + pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max; + switch (get_num_odm_splits(&res_ctx->pipe_ctx[i])) { + case 1: + pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_2to1; + break; + case 3: + pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_4to1; + break; + default: + pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_disabled; + } + pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].pipe_idx; + if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state + == res_ctx->pipe_ctx[i].plane_state) { + struct pipe_ctx *first_pipe = res_ctx->pipe_ctx[i].top_pipe; + int split_idx = 0; + + while (first_pipe->top_pipe && first_pipe->top_pipe->plane_state + == res_ctx->pipe_ctx[i].plane_state) { + first_pipe = first_pipe->top_pipe; + split_idx++; + } + /* Treat 4to1 mpc combine as an mpo of 2 2-to-1 combines */ + if (split_idx == 0) + pipes[pipe_cnt].pipe.src.hsplit_grp = first_pipe->pipe_idx; + else if (split_idx == 1) + pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].pipe_idx; + else if (split_idx == 2) + pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].top_pipe->pipe_idx; + } else if (res_ctx->pipe_ctx[i].prev_odm_pipe) { + struct pipe_ctx *first_pipe = res_ctx->pipe_ctx[i].prev_odm_pipe; + + while (first_pipe->prev_odm_pipe) + first_pipe = first_pipe->prev_odm_pipe; + pipes[pipe_cnt].pipe.src.hsplit_grp = first_pipe->pipe_idx; + } + + switch (res_ctx->pipe_ctx[i].stream->signal) { + case SIGNAL_TYPE_DISPLAY_PORT_MST: + case SIGNAL_TYPE_DISPLAY_PORT: + pipes[pipe_cnt].dout.output_type = dm_dp; + break; + case SIGNAL_TYPE_EDP: + pipes[pipe_cnt].dout.output_type = dm_edp; + break; + case SIGNAL_TYPE_HDMI_TYPE_A: + case SIGNAL_TYPE_DVI_SINGLE_LINK: + case SIGNAL_TYPE_DVI_DUAL_LINK: + pipes[pipe_cnt].dout.output_type = dm_hdmi; + break; + default: + /* In case there is no signal, set dp with 4 lanes to allow max config */ + pipes[pipe_cnt].dout.is_virtual = 1; + pipes[pipe_cnt].dout.output_type = dm_dp; + pipes[pipe_cnt].dout.dp_lanes = 4; + } + + switch (res_ctx->pipe_ctx[i].stream->timing.display_color_depth) { + case COLOR_DEPTH_666: + output_bpc = 6; + break; + case COLOR_DEPTH_888: + output_bpc = 8; + break; + case COLOR_DEPTH_101010: + output_bpc = 10; + break; + case COLOR_DEPTH_121212: + output_bpc = 12; + break; + case COLOR_DEPTH_141414: + output_bpc = 14; + break; + case COLOR_DEPTH_161616: + output_bpc = 16; + break; + case COLOR_DEPTH_999: + output_bpc = 9; + break; + case COLOR_DEPTH_111111: + output_bpc = 11; + break; + default: + output_bpc = 8; + break; + } + + switch (res_ctx->pipe_ctx[i].stream->timing.pixel_encoding) { + case PIXEL_ENCODING_RGB: + case PIXEL_ENCODING_YCBCR444: + pipes[pipe_cnt].dout.output_format = dm_444; + pipes[pipe_cnt].dout.output_bpp = output_bpc * 3; + break; + case PIXEL_ENCODING_YCBCR420: + pipes[pipe_cnt].dout.output_format = dm_420; + pipes[pipe_cnt].dout.output_bpp = (output_bpc * 3.0) / 2; + break; + case PIXEL_ENCODING_YCBCR422: + if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC && + !res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.ycbcr422_simple) + pipes[pipe_cnt].dout.output_format = dm_n422; + else + pipes[pipe_cnt].dout.output_format = dm_s422; + pipes[pipe_cnt].dout.output_bpp = output_bpc * 2; + break; + default: + pipes[pipe_cnt].dout.output_format = dm_444; + pipes[pipe_cnt].dout.output_bpp = output_bpc * 3; + } + + if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC) + pipes[pipe_cnt].dout.output_bpp = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0; + + /* todo: default max for now, until there is logic reflecting this in dc*/ + pipes[pipe_cnt].dout.dsc_input_bpc = 12; + /*fill up the audio sample rate (unit in kHz)*/ + get_audio_check(&res_ctx->pipe_ctx[i].stream->audio_info, &aud_check); + pipes[pipe_cnt].dout.max_audio_sample_rate = aud_check.max_audiosample_rate / 1000; + /* + * For graphic plane, cursor number is 1, nv12 is 0 + * bw calculations due to cursor on/off + */ + if (res_ctx->pipe_ctx[i].plane_state && + res_ctx->pipe_ctx[i].plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) + pipes[pipe_cnt].pipe.src.num_cursors = 0; + else + pipes[pipe_cnt].pipe.src.num_cursors = dc->dml.ip.number_of_cursors; + + pipes[pipe_cnt].pipe.src.cur0_src_width = 256; + pipes[pipe_cnt].pipe.src.cur0_bpp = dm_cur_32bit; + + if (!res_ctx->pipe_ctx[i].plane_state) { + pipes[pipe_cnt].pipe.src.is_hsplit = pipes[pipe_cnt].pipe.dest.odm_combine != dm_odm_combine_mode_disabled; + pipes[pipe_cnt].pipe.src.source_scan = dm_horz; + pipes[pipe_cnt].pipe.src.sw_mode = dm_sw_4kb_s; + pipes[pipe_cnt].pipe.src.macro_tile_size = dm_64k_tile; + pipes[pipe_cnt].pipe.src.viewport_width = timing->h_addressable; + if (pipes[pipe_cnt].pipe.src.viewport_width > 1920) + pipes[pipe_cnt].pipe.src.viewport_width = 1920; + pipes[pipe_cnt].pipe.src.viewport_height = timing->v_addressable; + if (pipes[pipe_cnt].pipe.src.viewport_height > 1080) + pipes[pipe_cnt].pipe.src.viewport_height = 1080; + pipes[pipe_cnt].pipe.src.surface_height_y = pipes[pipe_cnt].pipe.src.viewport_height; + pipes[pipe_cnt].pipe.src.surface_width_y = pipes[pipe_cnt].pipe.src.viewport_width; + pipes[pipe_cnt].pipe.src.surface_height_c = pipes[pipe_cnt].pipe.src.viewport_height; + pipes[pipe_cnt].pipe.src.surface_width_c = pipes[pipe_cnt].pipe.src.viewport_width; + pipes[pipe_cnt].pipe.src.data_pitch = ((pipes[pipe_cnt].pipe.src.viewport_width + 255) / 256) * 256; + pipes[pipe_cnt].pipe.src.source_format = dm_444_32; + pipes[pipe_cnt].pipe.dest.recout_width = pipes[pipe_cnt].pipe.src.viewport_width; /*vp_width/hratio*/ + pipes[pipe_cnt].pipe.dest.recout_height = pipes[pipe_cnt].pipe.src.viewport_height; /*vp_height/vratio*/ + pipes[pipe_cnt].pipe.dest.full_recout_width = pipes[pipe_cnt].pipe.dest.recout_width; /*when is_hsplit != 1*/ + pipes[pipe_cnt].pipe.dest.full_recout_height = pipes[pipe_cnt].pipe.dest.recout_height; /*when is_hsplit != 1*/ + pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_16; + pipes[pipe_cnt].pipe.scale_ratio_depth.hscl_ratio = 1.0; + pipes[pipe_cnt].pipe.scale_ratio_depth.vscl_ratio = 1.0; + pipes[pipe_cnt].pipe.scale_ratio_depth.scl_enable = 0; /*Lb only or Full scl*/ + pipes[pipe_cnt].pipe.scale_taps.htaps = 1; + pipes[pipe_cnt].pipe.scale_taps.vtaps = 1; + pipes[pipe_cnt].pipe.dest.vtotal_min = v_total; + pipes[pipe_cnt].pipe.dest.vtotal_max = v_total; + + if (pipes[pipe_cnt].pipe.dest.odm_combine == dm_odm_combine_mode_2to1) { + pipes[pipe_cnt].pipe.src.viewport_width /= 2; + pipes[pipe_cnt].pipe.dest.recout_width /= 2; + } else if (pipes[pipe_cnt].pipe.dest.odm_combine == dm_odm_combine_mode_4to1) { + pipes[pipe_cnt].pipe.src.viewport_width /= 4; + pipes[pipe_cnt].pipe.dest.recout_width /= 4; + } + } else { + struct dc_plane_state *pln = res_ctx->pipe_ctx[i].plane_state; + struct scaler_data *scl = &res_ctx->pipe_ctx[i].plane_res.scl_data; + + pipes[pipe_cnt].pipe.src.immediate_flip = pln->flip_immediate; + pipes[pipe_cnt].pipe.src.is_hsplit = (res_ctx->pipe_ctx[i].bottom_pipe && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln) + || (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln) + || pipes[pipe_cnt].pipe.dest.odm_combine != dm_odm_combine_mode_disabled; + + /* stereo is not split */ + if (pln->stereo_format == PLANE_STEREO_FORMAT_SIDE_BY_SIDE || + pln->stereo_format == PLANE_STEREO_FORMAT_TOP_AND_BOTTOM) { + pipes[pipe_cnt].pipe.src.is_hsplit = false; + pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].pipe_idx; + } + + pipes[pipe_cnt].pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90 + || pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz; + pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport.y; + pipes[pipe_cnt].pipe.src.viewport_y_c = scl->viewport_c.y; + pipes[pipe_cnt].pipe.src.viewport_width = scl->viewport.width; + pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c.width; + pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport.height; + pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c.height; + pipes[pipe_cnt].pipe.src.viewport_width_max = pln->src_rect.width; + pipes[pipe_cnt].pipe.src.viewport_height_max = pln->src_rect.height; + pipes[pipe_cnt].pipe.src.surface_width_y = pln->plane_size.surface_size.width; + pipes[pipe_cnt].pipe.src.surface_height_y = pln->plane_size.surface_size.height; + pipes[pipe_cnt].pipe.src.surface_width_c = pln->plane_size.chroma_size.width; + pipes[pipe_cnt].pipe.src.surface_height_c = pln->plane_size.chroma_size.height; + if (pln->format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA + || pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { + pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.surface_pitch; + pipes[pipe_cnt].pipe.src.data_pitch_c = pln->plane_size.chroma_pitch; + pipes[pipe_cnt].pipe.src.meta_pitch = pln->dcc.meta_pitch; + pipes[pipe_cnt].pipe.src.meta_pitch_c = pln->dcc.meta_pitch_c; + } else { + pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.surface_pitch; + pipes[pipe_cnt].pipe.src.meta_pitch = pln->dcc.meta_pitch; + } + pipes[pipe_cnt].pipe.src.dcc = pln->dcc.enable; + pipes[pipe_cnt].pipe.dest.recout_width = scl->recout.width; + pipes[pipe_cnt].pipe.dest.recout_height = scl->recout.height; + pipes[pipe_cnt].pipe.dest.full_recout_height = scl->recout.height; + pipes[pipe_cnt].pipe.dest.full_recout_width = scl->recout.width; + if (pipes[pipe_cnt].pipe.dest.odm_combine == dm_odm_combine_mode_2to1) + pipes[pipe_cnt].pipe.dest.full_recout_width *= 2; + else if (pipes[pipe_cnt].pipe.dest.odm_combine == dm_odm_combine_mode_4to1) + pipes[pipe_cnt].pipe.dest.full_recout_width *= 4; + else { + struct pipe_ctx *split_pipe = res_ctx->pipe_ctx[i].bottom_pipe; + + while (split_pipe && split_pipe->plane_state == pln) { + pipes[pipe_cnt].pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width; + split_pipe = split_pipe->bottom_pipe; + } + split_pipe = res_ctx->pipe_ctx[i].top_pipe; + while (split_pipe && split_pipe->plane_state == pln) { + pipes[pipe_cnt].pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width; + split_pipe = split_pipe->top_pipe; + } + } + + pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_16; + pipes[pipe_cnt].pipe.scale_ratio_depth.hscl_ratio = (double) scl->ratios.horz.value / (1ULL<<32); + pipes[pipe_cnt].pipe.scale_ratio_depth.hscl_ratio_c = (double) scl->ratios.horz_c.value / (1ULL<<32); + pipes[pipe_cnt].pipe.scale_ratio_depth.vscl_ratio = (double) scl->ratios.vert.value / (1ULL<<32); + pipes[pipe_cnt].pipe.scale_ratio_depth.vscl_ratio_c = (double) scl->ratios.vert_c.value / (1ULL<<32); + pipes[pipe_cnt].pipe.scale_ratio_depth.scl_enable = + scl->ratios.vert.value != dc_fixpt_one.value + || scl->ratios.horz.value != dc_fixpt_one.value + || scl->ratios.vert_c.value != dc_fixpt_one.value + || scl->ratios.horz_c.value != dc_fixpt_one.value /*Lb only or Full scl*/ + || dc->debug.always_scale; /*support always scale*/ + pipes[pipe_cnt].pipe.scale_taps.htaps = scl->taps.h_taps; + pipes[pipe_cnt].pipe.scale_taps.htaps_c = scl->taps.h_taps_c; + pipes[pipe_cnt].pipe.scale_taps.vtaps = scl->taps.v_taps; + pipes[pipe_cnt].pipe.scale_taps.vtaps_c = scl->taps.v_taps_c; + + pipes[pipe_cnt].pipe.src.macro_tile_size = + swizzle_mode_to_macro_tile_size(pln->tiling_info.gfx9.swizzle); + swizzle_to_dml_params(pln->tiling_info.gfx9.swizzle, + &pipes[pipe_cnt].pipe.src.sw_mode); + + switch (pln->format) { + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: + pipes[pipe_cnt].pipe.src.source_format = dm_420_8; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: + pipes[pipe_cnt].pipe.src.source_format = dm_420_10; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: + pipes[pipe_cnt].pipe.src.source_format = dm_444_64; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: + case SURFACE_PIXEL_FORMAT_GRPH_RGB565: + pipes[pipe_cnt].pipe.src.source_format = dm_444_16; + break; + case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS: + pipes[pipe_cnt].pipe.src.source_format = dm_444_8; + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA: + pipes[pipe_cnt].pipe.src.source_format = dm_rgbe_alpha; + break; + default: + pipes[pipe_cnt].pipe.src.source_format = dm_444_32; + break; + } + } + + pipe_cnt++; + } + + /* populate writeback information */ + DC_FP_START(); + dc->res_pool->funcs->populate_dml_writeback_from_context(dc, res_ctx, pipes); + DC_FP_END(); + + return pipe_cnt; +} + +void dcn20_calculate_wm( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int *out_pipe_cnt, + int *pipe_split_from, + int vlevel, + bool fast_validate) +{ + int pipe_cnt, i, pipe_idx; + + dc_assert_fp_enabled(); + + for (i = 0, pipe_idx = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + + pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0; + pipes[pipe_cnt].clks_cfg.dispclk_mhz = context->bw_ctx.dml.vba.RequiredDISPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; + + if (pipe_split_from[i] < 0) { + pipes[pipe_cnt].clks_cfg.dppclk_mhz = + context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx]; + if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_idx] == pipe_idx) + pipes[pipe_cnt].pipe.dest.odm_combine = + context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]; + else + pipes[pipe_cnt].pipe.dest.odm_combine = 0; + pipe_idx++; + } else { + pipes[pipe_cnt].clks_cfg.dppclk_mhz = + context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_split_from[i]]; + if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_split_from[i]] == pipe_split_from[i]) + pipes[pipe_cnt].pipe.dest.odm_combine = + context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_split_from[i]]; + else + pipes[pipe_cnt].pipe.dest.odm_combine = 0; + } + + if (dc->config.forced_clocks) { + pipes[pipe_cnt].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz; + pipes[pipe_cnt].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz; + } + if (dc->debug.min_disp_clk_khz > pipes[pipe_cnt].clks_cfg.dispclk_mhz * 1000) + pipes[pipe_cnt].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0; + if (dc->debug.min_dpp_clk_khz > pipes[pipe_cnt].clks_cfg.dppclk_mhz * 1000) + pipes[pipe_cnt].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0; + + pipe_cnt++; + } + + if (pipe_cnt != pipe_idx) { + if (dc->res_pool->funcs->populate_dml_pipes) + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, + context, pipes, fast_validate); + else + pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, + context, pipes, fast_validate); + } + + *out_pipe_cnt = pipe_cnt; + + pipes[0].clks_cfg.voltage = vlevel; + pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz; + pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz; + + /* only pipe 0 is read for voltage and dcf/soc clocks */ + if (vlevel < 1) { + pipes[0].clks_cfg.voltage = 1; + pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[1].dcfclk_mhz; + pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[1].socclk_mhz; + } + context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + + if (vlevel < 2) { + pipes[0].clks_cfg.voltage = 2; + pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz; + pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].socclk_mhz; + } + context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + + if (vlevel < 3) { + pipes[0].clks_cfg.voltage = 3; + pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz; + pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].socclk_mhz; + } + context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + + pipes[0].clks_cfg.voltage = vlevel; + pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz; + pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz; + context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +} + +void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb, + struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states) +{ + struct _vcs_dpi_voltage_scaling_st calculated_states[DC__VOLTAGE_STATES]; + int i; + int num_calculated_states = 0; + int min_dcfclk = 0; + + dc_assert_fp_enabled(); + + if (num_states == 0) + return; + + memset(calculated_states, 0, sizeof(calculated_states)); + + if (dc->bb_overrides.min_dcfclk_mhz > 0) + min_dcfclk = dc->bb_overrides.min_dcfclk_mhz; + else { + if (ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) + min_dcfclk = 310; + else + // Accounting for SOC/DCF relationship, we can go as high as + // 506Mhz in Vmin. + min_dcfclk = 506; + } + + for (i = 0; i < num_states; i++) { + int min_fclk_required_by_uclk; + calculated_states[i].state = i; + calculated_states[i].dram_speed_mts = uclk_states[i] * 16 / 1000; + + // FCLK:UCLK ratio is 1.08 + min_fclk_required_by_uclk = div_u64(((unsigned long long)uclk_states[i]) * 1080, + 1000000); + + calculated_states[i].fabricclk_mhz = (min_fclk_required_by_uclk < min_dcfclk) ? + min_dcfclk : min_fclk_required_by_uclk; + + calculated_states[i].socclk_mhz = (calculated_states[i].fabricclk_mhz > max_clocks->socClockInKhz / 1000) ? + max_clocks->socClockInKhz / 1000 : calculated_states[i].fabricclk_mhz; + + calculated_states[i].dcfclk_mhz = (calculated_states[i].fabricclk_mhz > max_clocks->dcfClockInKhz / 1000) ? + max_clocks->dcfClockInKhz / 1000 : calculated_states[i].fabricclk_mhz; + + calculated_states[i].dispclk_mhz = max_clocks->displayClockInKhz / 1000; + calculated_states[i].dppclk_mhz = max_clocks->displayClockInKhz / 1000; + calculated_states[i].dscclk_mhz = max_clocks->displayClockInKhz / (1000 * 3); + + calculated_states[i].phyclk_mhz = max_clocks->phyClockInKhz / 1000; + + num_calculated_states++; + } + + calculated_states[num_calculated_states - 1].socclk_mhz = max_clocks->socClockInKhz / 1000; + calculated_states[num_calculated_states - 1].fabricclk_mhz = max_clocks->socClockInKhz / 1000; + calculated_states[num_calculated_states - 1].dcfclk_mhz = max_clocks->dcfClockInKhz / 1000; + + memcpy(bb->clock_limits, calculated_states, sizeof(bb->clock_limits)); + bb->num_states = num_calculated_states; + + // Duplicate the last state, DML always an extra state identical to max state to work + memcpy(&bb->clock_limits[num_calculated_states], &bb->clock_limits[num_calculated_states - 1], sizeof(struct _vcs_dpi_voltage_scaling_st)); + bb->clock_limits[num_calculated_states].state = bb->num_states; +} + +void dcn20_cap_soc_clocks( + struct _vcs_dpi_soc_bounding_box_st *bb, + struct pp_smu_nv_clock_table max_clocks) +{ + int i; + + dc_assert_fp_enabled(); + + // First pass - cap all clocks higher than the reported max + for (i = 0; i < bb->num_states; i++) { + if ((bb->clock_limits[i].dcfclk_mhz > (max_clocks.dcfClockInKhz / 1000)) + && max_clocks.dcfClockInKhz != 0) + bb->clock_limits[i].dcfclk_mhz = (max_clocks.dcfClockInKhz / 1000); + + if ((bb->clock_limits[i].dram_speed_mts > (max_clocks.uClockInKhz / 1000) * 16) + && max_clocks.uClockInKhz != 0) + bb->clock_limits[i].dram_speed_mts = (max_clocks.uClockInKhz / 1000) * 16; + + if ((bb->clock_limits[i].fabricclk_mhz > (max_clocks.fabricClockInKhz / 1000)) + && max_clocks.fabricClockInKhz != 0) + bb->clock_limits[i].fabricclk_mhz = (max_clocks.fabricClockInKhz / 1000); + + if ((bb->clock_limits[i].dispclk_mhz > (max_clocks.displayClockInKhz / 1000)) + && max_clocks.displayClockInKhz != 0) + bb->clock_limits[i].dispclk_mhz = (max_clocks.displayClockInKhz / 1000); + + if ((bb->clock_limits[i].dppclk_mhz > (max_clocks.dppClockInKhz / 1000)) + && max_clocks.dppClockInKhz != 0) + bb->clock_limits[i].dppclk_mhz = (max_clocks.dppClockInKhz / 1000); + + if ((bb->clock_limits[i].phyclk_mhz > (max_clocks.phyClockInKhz / 1000)) + && max_clocks.phyClockInKhz != 0) + bb->clock_limits[i].phyclk_mhz = (max_clocks.phyClockInKhz / 1000); + + if ((bb->clock_limits[i].socclk_mhz > (max_clocks.socClockInKhz / 1000)) + && max_clocks.socClockInKhz != 0) + bb->clock_limits[i].socclk_mhz = (max_clocks.socClockInKhz / 1000); + + if ((bb->clock_limits[i].dscclk_mhz > (max_clocks.dscClockInKhz / 1000)) + && max_clocks.dscClockInKhz != 0) + bb->clock_limits[i].dscclk_mhz = (max_clocks.dscClockInKhz / 1000); + } + + // Second pass - remove all duplicate clock states + for (i = bb->num_states - 1; i > 1; i--) { + bool duplicate = true; + + if (bb->clock_limits[i-1].dcfclk_mhz != bb->clock_limits[i].dcfclk_mhz) + duplicate = false; + if (bb->clock_limits[i-1].dispclk_mhz != bb->clock_limits[i].dispclk_mhz) + duplicate = false; + if (bb->clock_limits[i-1].dppclk_mhz != bb->clock_limits[i].dppclk_mhz) + duplicate = false; + if (bb->clock_limits[i-1].dram_speed_mts != bb->clock_limits[i].dram_speed_mts) + duplicate = false; + if (bb->clock_limits[i-1].dscclk_mhz != bb->clock_limits[i].dscclk_mhz) + duplicate = false; + if (bb->clock_limits[i-1].fabricclk_mhz != bb->clock_limits[i].fabricclk_mhz) + duplicate = false; + if (bb->clock_limits[i-1].phyclk_mhz != bb->clock_limits[i].phyclk_mhz) + duplicate = false; + if (bb->clock_limits[i-1].socclk_mhz != bb->clock_limits[i].socclk_mhz) + duplicate = false; + + if (duplicate) + bb->num_states--; + } +} + +void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb) +{ + dc_assert_fp_enabled(); + + if ((int)(bb->sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns + && dc->bb_overrides.sr_exit_time_ns) { + bb->sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0; + } + + if ((int)(bb->sr_enter_plus_exit_time_us * 1000) + != dc->bb_overrides.sr_enter_plus_exit_time_ns + && dc->bb_overrides.sr_enter_plus_exit_time_ns) { + bb->sr_enter_plus_exit_time_us = + dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; + } + + if ((int)(bb->urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns + && dc->bb_overrides.urgent_latency_ns) { + bb->urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0; + } + + if ((int)(bb->dram_clock_change_latency_us * 1000) + != dc->bb_overrides.dram_clock_change_latency_ns + && dc->bb_overrides.dram_clock_change_latency_ns) { + bb->dram_clock_change_latency_us = + dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; + } + + if ((int)(bb->dummy_pstate_latency_us * 1000) + != dc->bb_overrides.dummy_clock_change_latency_ns + && dc->bb_overrides.dummy_clock_change_latency_ns) { + bb->dummy_pstate_latency_us = + dc->bb_overrides.dummy_clock_change_latency_ns / 1000.0; + } +} + +static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *context, + bool fast_validate) +{ + bool out = false; + + BW_VAL_TRACE_SETUP(); + + int vlevel = 0; + int pipe_split_from[MAX_PIPES]; + int pipe_cnt = 0; + display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC); + DC_LOGGER_INIT(dc->ctx->logger); + + BW_VAL_TRACE_COUNT(); + + out = dcn20_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, fast_validate); + + if (pipe_cnt == 0) + goto validate_out; + + if (!out) + goto validate_fail; + + BW_VAL_TRACE_END_VOLTAGE_LEVEL(); + + if (fast_validate) { + BW_VAL_TRACE_SKIP(fast); + goto validate_out; + } + + dcn20_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate); + dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); + + BW_VAL_TRACE_END_WATERMARKS(); + + goto validate_out; + +validate_fail: + DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n", + dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states])); + + BW_VAL_TRACE_SKIP(fail); + out = false; + +validate_out: + kfree(pipes); + + BW_VAL_TRACE_FINISH(); + + return out; +} + +bool dcn20_validate_bandwidth_fp(struct dc *dc, + struct dc_state *context, + bool fast_validate) +{ + bool voltage_supported = false; + bool full_pstate_supported = false; + bool dummy_pstate_supported = false; + double p_state_latency_us; + + dc_assert_fp_enabled(); + + p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us; + context->bw_ctx.dml.soc.disable_dram_clock_change_vactive_support = + dc->debug.disable_dram_clock_change_vactive_support; + context->bw_ctx.dml.soc.allow_dram_clock_one_display_vactive = + dc->debug.enable_dram_clock_change_one_display_vactive; + + /*Unsafe due to current pipe merge and split logic*/ + ASSERT(context != dc->current_state); + + if (fast_validate) { + return dcn20_validate_bandwidth_internal(dc, context, true); + } + + // Best case, we support full UCLK switch latency + voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false); + full_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support; + + if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 || + (voltage_supported && full_pstate_supported)) { + context->bw_ctx.bw.dcn.clk.p_state_change_support = full_pstate_supported; + goto restore_dml_state; + } + + // Fallback: Try to only support G6 temperature read latency + context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us; + + voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false); + dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support; + + if (voltage_supported && (dummy_pstate_supported || !(context->stream_count))) { + context->bw_ctx.bw.dcn.clk.p_state_change_support = false; + goto restore_dml_state; + } + + // ERROR: fallback is supposed to always work. + ASSERT(false); + +restore_dml_state: + context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us; + return voltage_supported; +} + +void dcn20_fpu_set_wm_ranges(int i, + struct pp_smu_wm_range_sets *ranges, + struct _vcs_dpi_soc_bounding_box_st *loaded_bb) +{ + dc_assert_fp_enabled(); + + ranges->reader_wm_sets[i].min_fill_clk_mhz = (i > 0) ? (loaded_bb->clock_limits[i - 1].dram_speed_mts / 16) + 1 : 0; + ranges->reader_wm_sets[i].max_fill_clk_mhz = loaded_bb->clock_limits[i].dram_speed_mts / 16; +} + +void dcn20_fpu_adjust_dppclk(struct vba_vars_st *v, + int vlevel, + int max_mpc_comb, + int pipe_idx, + bool is_validating_bw) +{ + dc_assert_fp_enabled(); + + if (is_validating_bw) + v->RequiredDPPCLK[vlevel][max_mpc_comb][pipe_idx] *= 2; + else + v->RequiredDPPCLK[vlevel][max_mpc_comb][pipe_idx] /= 2; +} + +int dcn21_populate_dml_pipes_from_context(struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + bool fast_validate) +{ + uint32_t pipe_cnt; + int i; + + dc_assert_fp_enabled(); + + pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + + for (i = 0; i < pipe_cnt; i++) { + + pipes[i].pipe.src.hostvm = dc->res_pool->hubbub->riommu_active; + pipes[i].pipe.src.gpuvm = 1; + } + + return pipe_cnt; +} + +static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb) +{ + int i; + + if (dc->bb_overrides.sr_exit_time_ns) { + for (i = 0; i < WM_SET_COUNT; i++) { + dc->clk_mgr->bw_params->wm_table.entries[i].sr_exit_time_us = + dc->bb_overrides.sr_exit_time_ns / 1000.0; + } + } + + if (dc->bb_overrides.sr_enter_plus_exit_time_ns) { + for (i = 0; i < WM_SET_COUNT; i++) { + dc->clk_mgr->bw_params->wm_table.entries[i].sr_enter_plus_exit_time_us = + dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; + } + } + + if (dc->bb_overrides.urgent_latency_ns) { + bb->urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0; + } + + if (dc->bb_overrides.dram_clock_change_latency_ns) { + for (i = 0; i < WM_SET_COUNT; i++) { + dc->clk_mgr->bw_params->wm_table.entries[i].pstate_latency_us = + dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; + } + } +} + +static void calculate_wm_set_for_vlevel(int vlevel, + struct wm_range_table_entry *table_entry, + struct dcn_watermarks *wm_set, + struct display_mode_lib *dml, + display_e2e_pipe_params_st *pipes, + int pipe_cnt) +{ + double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us; + + ASSERT(vlevel < dml->soc.num_states); + /* only pipe 0 is read for voltage and dcf/soc clocks */ + pipes[0].clks_cfg.voltage = vlevel; + pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz; + pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz; + + dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us; + dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us; + dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us; + + wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000; + wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000; + wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000; + wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000; + wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000; + wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000; + wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000; + wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000; + dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached; +} + +static void dcn21_calculate_wm(struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int *out_pipe_cnt, + int *pipe_split_from, + int vlevel_req, + bool fast_validate) +{ + int pipe_cnt, i, pipe_idx; + int vlevel, vlevel_max; + struct wm_range_table_entry *table_entry; + struct clk_bw_params *bw_params = dc->clk_mgr->bw_params; + + ASSERT(bw_params); + + patch_bounding_box(dc, &context->bw_ctx.dml.soc); + + for (i = 0, pipe_idx = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + + pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0; + pipes[pipe_cnt].clks_cfg.dispclk_mhz = context->bw_ctx.dml.vba.RequiredDISPCLK[vlevel_req][context->bw_ctx.dml.vba.maxMpcComb]; + + if (pipe_split_from[i] < 0) { + pipes[pipe_cnt].clks_cfg.dppclk_mhz = + context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel_req][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx]; + if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_idx] == pipe_idx) + pipes[pipe_cnt].pipe.dest.odm_combine = + context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel_req][pipe_idx]; + else + pipes[pipe_cnt].pipe.dest.odm_combine = 0; + pipe_idx++; + } else { + pipes[pipe_cnt].clks_cfg.dppclk_mhz = + context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel_req][context->bw_ctx.dml.vba.maxMpcComb][pipe_split_from[i]]; + if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_split_from[i]] == pipe_split_from[i]) + pipes[pipe_cnt].pipe.dest.odm_combine = + context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel_req][pipe_split_from[i]]; + else + pipes[pipe_cnt].pipe.dest.odm_combine = 0; + } + pipe_cnt++; + } + + if (pipe_cnt != pipe_idx) { + if (dc->res_pool->funcs->populate_dml_pipes) + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, + context, pipes, fast_validate); + else + pipe_cnt = dcn21_populate_dml_pipes_from_context(dc, + context, pipes, fast_validate); + } + + *out_pipe_cnt = pipe_cnt; + + vlevel_max = bw_params->clk_table.num_entries - 1; + + + /* WM Set D */ + table_entry = &bw_params->wm_table.entries[WM_D]; + if (table_entry->wm_type == WM_TYPE_RETRAINING) + vlevel = 0; + else + vlevel = vlevel_max; + calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d, + &context->bw_ctx.dml, pipes, pipe_cnt); + /* WM Set C */ + table_entry = &bw_params->wm_table.entries[WM_C]; + vlevel = MIN(MAX(vlevel_req, 3), vlevel_max); + calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c, + &context->bw_ctx.dml, pipes, pipe_cnt); + /* WM Set B */ + table_entry = &bw_params->wm_table.entries[WM_B]; + vlevel = MIN(MAX(vlevel_req, 2), vlevel_max); + calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b, + &context->bw_ctx.dml, pipes, pipe_cnt); + + /* WM Set A */ + table_entry = &bw_params->wm_table.entries[WM_A]; + vlevel = MIN(vlevel_req, vlevel_max); + calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a, + &context->bw_ctx.dml, pipes, pipe_cnt); +} + +bool dcn21_validate_bandwidth_fp(struct dc *dc, + struct dc_state *context, + bool fast_validate) +{ + bool out = false; + + BW_VAL_TRACE_SETUP(); + + int vlevel = 0; + int pipe_split_from[MAX_PIPES]; + int pipe_cnt = 0; + display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC); + DC_LOGGER_INIT(dc->ctx->logger); + + BW_VAL_TRACE_COUNT(); + + dc_assert_fp_enabled(); + + /*Unsafe due to current pipe merge and split logic*/ + ASSERT(context != dc->current_state); + + out = dcn21_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, fast_validate); + + if (pipe_cnt == 0) + goto validate_out; + + if (!out) + goto validate_fail; + + BW_VAL_TRACE_END_VOLTAGE_LEVEL(); + + if (fast_validate) { + BW_VAL_TRACE_SKIP(fast); + goto validate_out; + } + + dcn21_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate); + dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); + + BW_VAL_TRACE_END_WATERMARKS(); + + goto validate_out; + +validate_fail: + DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n", + dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states])); + + BW_VAL_TRACE_SKIP(fail); + out = false; + +validate_out: + kfree(pipes); + + BW_VAL_TRACE_FINISH(); + + return out; +} + +static struct _vcs_dpi_voltage_scaling_st construct_low_pstate_lvl(struct clk_limit_table *clk_table, unsigned int high_voltage_lvl) +{ + struct _vcs_dpi_voltage_scaling_st low_pstate_lvl; + int i; + + low_pstate_lvl.state = 1; + low_pstate_lvl.dcfclk_mhz = clk_table->entries[0].dcfclk_mhz; + low_pstate_lvl.fabricclk_mhz = clk_table->entries[0].fclk_mhz; + low_pstate_lvl.socclk_mhz = clk_table->entries[0].socclk_mhz; + low_pstate_lvl.dram_speed_mts = clk_table->entries[0].memclk_mhz * 2; + + low_pstate_lvl.dispclk_mhz = dcn2_1_soc.clock_limits[high_voltage_lvl].dispclk_mhz; + low_pstate_lvl.dppclk_mhz = dcn2_1_soc.clock_limits[high_voltage_lvl].dppclk_mhz; + low_pstate_lvl.dram_bw_per_chan_gbps = dcn2_1_soc.clock_limits[high_voltage_lvl].dram_bw_per_chan_gbps; + low_pstate_lvl.dscclk_mhz = dcn2_1_soc.clock_limits[high_voltage_lvl].dscclk_mhz; + low_pstate_lvl.dtbclk_mhz = dcn2_1_soc.clock_limits[high_voltage_lvl].dtbclk_mhz; + low_pstate_lvl.phyclk_d18_mhz = dcn2_1_soc.clock_limits[high_voltage_lvl].phyclk_d18_mhz; + low_pstate_lvl.phyclk_mhz = dcn2_1_soc.clock_limits[high_voltage_lvl].phyclk_mhz; + + for (i = clk_table->num_entries; i > 1; i--) + clk_table->entries[i] = clk_table->entries[i-1]; + clk_table->entries[1] = clk_table->entries[0]; + clk_table->num_entries++; + + return low_pstate_lvl; +} + +void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) +{ + struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool); + struct clk_limit_table *clk_table = &bw_params->clk_table; + struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES]; + unsigned int i, closest_clk_lvl = 0, k = 0; + int j; + + dc_assert_fp_enabled(); + + dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator; + dcn2_1_ip.max_num_dpp = pool->base.pipe_count; + dcn2_1_soc.num_chans = bw_params->num_channels; + + ASSERT(clk_table->num_entries); + /* Copy dcn2_1_soc.clock_limits to clock_limits to avoid copying over null states later */ + for (i = 0; i < dcn2_1_soc.num_states + 1; i++) { + clock_limits[i] = dcn2_1_soc.clock_limits[i]; + } + + for (i = 0; i < clk_table->num_entries; i++) { + /* loop backwards*/ + for (closest_clk_lvl = 0, j = dcn2_1_soc.num_states - 1; j >= 0; j--) { + if ((unsigned int) dcn2_1_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) { + closest_clk_lvl = j; + break; + } + } + + /* clk_table[1] is reserved for min DF PState. skip here to fill in later. */ + if (i == 1) + k++; + + clock_limits[k].state = k; + clock_limits[k].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; + clock_limits[k].fabricclk_mhz = clk_table->entries[i].fclk_mhz; + clock_limits[k].socclk_mhz = clk_table->entries[i].socclk_mhz; + clock_limits[k].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2; + + clock_limits[k].dispclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz; + clock_limits[k].dppclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz; + clock_limits[k].dram_bw_per_chan_gbps = dcn2_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps; + clock_limits[k].dscclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz; + clock_limits[k].dtbclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz; + clock_limits[k].phyclk_d18_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz; + clock_limits[k].phyclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz; + + k++; + } + for (i = 0; i < clk_table->num_entries + 1; i++) + dcn2_1_soc.clock_limits[i] = clock_limits[i]; + if (clk_table->num_entries) { + dcn2_1_soc.num_states = clk_table->num_entries + 1; + /* fill in min DF PState */ + dcn2_1_soc.clock_limits[1] = construct_low_pstate_lvl(clk_table, closest_clk_lvl); + /* duplicate last level */ + dcn2_1_soc.clock_limits[dcn2_1_soc.num_states] = dcn2_1_soc.clock_limits[dcn2_1_soc.num_states - 1]; + dcn2_1_soc.clock_limits[dcn2_1_soc.num_states].state = dcn2_1_soc.num_states; + } + + dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21); +} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h index 36f26126d574..aa892193e485 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h @@ -23,6 +23,7 @@ * Authors: AMD * */ +#include "core_types.h" #ifndef __DCN20_FPU_H__ #define __DCN20_FPU_H__ @@ -31,4 +32,54 @@ void dcn20_populate_dml_writeback_from_context(struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes); +void dcn20_fpu_set_wb_arb_params(struct mcif_arb_params *wb_arb_params, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt, int i); +void dcn20_calculate_dlg_params(struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt, + int vlevel); +int dcn20_populate_dml_pipes_from_context(struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + bool fast_validate); +void dcn20_calculate_wm(struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int *out_pipe_cnt, + int *pipe_split_from, + int vlevel, + bool fast_validate); +void dcn20_cap_soc_clocks(struct _vcs_dpi_soc_bounding_box_st *bb, + struct pp_smu_nv_clock_table max_clocks); +void dcn20_update_bounding_box(struct dc *dc, + struct _vcs_dpi_soc_bounding_box_st *bb, + struct pp_smu_nv_clock_table *max_clocks, + unsigned int *uclk_states, + unsigned int num_states); +void dcn20_patch_bounding_box(struct dc *dc, + struct _vcs_dpi_soc_bounding_box_st *bb); +bool dcn20_validate_bandwidth_fp(struct dc *dc, + struct dc_state *context, + bool fast_validate); +void dcn20_fpu_set_wm_ranges(int i, + struct pp_smu_wm_range_sets *ranges, + struct _vcs_dpi_soc_bounding_box_st *loaded_bb); +void dcn20_fpu_adjust_dppclk(struct vba_vars_st *v, + int vlevel, + int max_mpc_comb, + int pipe_idx, + bool is_validating_bw); + +int dcn21_populate_dml_pipes_from_context(struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + bool fast_validate); +bool dcn21_validate_bandwidth_fp(struct dc *dc, + struct dc_state *context, + bool fast_validate); +void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params); + #endif /* __DCN20_FPU_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c new file mode 100644 index 000000000000..e2bcd205aa93 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c @@ -0,0 +1,357 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "resource.h" +#include "clk_mgr.h" +#include "dcn20/dcn20_resource.h" +#include "dcn302/dcn302_resource.h" + +#include "dml/dcn20/dcn20_fpu.h" +#include "dcn302_fpu.h" + +struct _vcs_dpi_ip_params_st dcn3_02_ip = { + .use_min_dcfclk = 0, + .clamp_min_dcfclk = 0, + .odm_capable = 1, + .gpuvm_enable = 1, + .hostvm_enable = 0, + .gpuvm_max_page_table_levels = 4, + .hostvm_max_page_table_levels = 4, + .hostvm_cached_page_table_levels = 0, + .pte_group_size_bytes = 2048, + .num_dsc = 5, + .rob_buffer_size_kbytes = 184, + .det_buffer_size_kbytes = 184, + .dpte_buffer_size_in_pte_reqs_luma = 64, + .dpte_buffer_size_in_pte_reqs_chroma = 34, + .pde_proc_buffer_size_64k_reqs = 48, + .dpp_output_buffer_pixels = 2560, + .opp_output_buffer_lines = 1, + .pixel_chunk_size_kbytes = 8, + .pte_enable = 1, + .max_page_table_levels = 2, + .pte_chunk_size_kbytes = 2, // ? + .meta_chunk_size_kbytes = 2, + .writeback_chunk_size_kbytes = 8, + .line_buffer_size_bits = 789504, + .is_line_buffer_bpp_fixed = 0, // ? + .line_buffer_fixed_bpp = 0, // ? + .dcc_supported = true, + .writeback_interface_buffer_size_kbytes = 90, + .writeback_line_buffer_buffer_size = 0, + .max_line_buffer_lines = 12, + .writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640 + .writeback_chroma_buffer_size_kbytes = 8, + .writeback_chroma_line_buffer_width_pixels = 4, + .writeback_max_hscl_ratio = 1, + .writeback_max_vscl_ratio = 1, + .writeback_min_hscl_ratio = 1, + .writeback_min_vscl_ratio = 1, + .writeback_max_hscl_taps = 1, + .writeback_max_vscl_taps = 1, + .writeback_line_buffer_luma_buffer_size = 0, + .writeback_line_buffer_chroma_buffer_size = 14643, + .cursor_buffer_size = 8, + .cursor_chunk_size = 2, + .max_num_otg = 5, + .max_num_dpp = 5, + .max_num_wb = 1, + .max_dchub_pscl_bw_pix_per_clk = 4, + .max_pscl_lb_bw_pix_per_clk = 2, + .max_lb_vscl_bw_pix_per_clk = 4, + .max_vscl_hscl_bw_pix_per_clk = 4, + .max_hscl_ratio = 6, + .max_vscl_ratio = 6, + .hscl_mults = 4, + .vscl_mults = 4, + .max_hscl_taps = 8, + .max_vscl_taps = 8, + .dispclk_ramp_margin_percent = 1, + .underscan_factor = 1.11, + .min_vblank_lines = 32, + .dppclk_delay_subtotal = 46, + .dynamic_metadata_vm_enabled = true, + .dppclk_delay_scl_lb_only = 16, + .dppclk_delay_scl = 50, + .dppclk_delay_cnvc_formatter = 27, + .dppclk_delay_cnvc_cursor = 6, + .dispclk_delay_subtotal = 119, + .dcfclk_cstate_latency = 5.2, // SRExitTime + .max_inter_dcn_tile_repeaters = 8, + .max_num_hdmi_frl_outputs = 1, + .odm_combine_4to1_supported = true, + + .xfc_supported = false, + .xfc_fill_bw_overhead_percent = 10.0, + .xfc_fill_constant_bytes = 0, + .gfx7_compat_tiling_supported = 0, + .number_of_cursors = 1, +}; + +struct _vcs_dpi_soc_bounding_box_st dcn3_02_soc = { + .clock_limits = { + { + .state = 0, + .dispclk_mhz = 562.0, + .dppclk_mhz = 300.0, + .phyclk_mhz = 300.0, + .phyclk_d18_mhz = 667.0, + .dscclk_mhz = 405.6, + }, + }, + + .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */ + .num_states = 1, + .sr_exit_time_us = 26.5, + .sr_enter_plus_exit_time_us = 31, + .urgent_latency_us = 4.0, + .urgent_latency_pixel_data_only_us = 4.0, + .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, + .urgent_latency_vm_data_only_us = 4.0, + .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, + .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, + .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, + .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0, + .max_avg_sdp_bw_use_normal_percent = 60.0, + .max_avg_dram_bw_use_normal_percent = 40.0, + .writeback_latency_us = 12.0, + .max_request_size_bytes = 256, + .fabric_datapath_to_dcn_data_return_bytes = 64, + .dcn_downspread_percent = 0.5, + .downspread_percent = 0.38, + .dram_page_open_time_ns = 50.0, + .dram_rw_turnaround_time_ns = 17.5, + .dram_return_buffer_per_channel_bytes = 8192, + .round_trip_ping_latency_dcfclk_cycles = 156, + .urgent_out_of_order_return_per_channel_bytes = 4096, + .channel_interleave_bytes = 256, + .num_banks = 8, + .gpuvm_min_page_size_bytes = 4096, + .hostvm_min_page_size_bytes = 4096, + .dram_clock_change_latency_us = 404, + .dummy_pstate_latency_us = 5, + .writeback_dram_clock_change_latency_us = 23.0, + .return_bus_width_bytes = 64, + .dispclk_dppclk_vco_speed_mhz = 3650, + .xfc_bus_transport_time_us = 20, // ? + .xfc_xbuf_latency_tolerance_us = 4, // ? + .use_urgent_burst_bw = 1, // ? + .do_urgent_latency_adjustment = true, + .urgent_latency_adjustment_fabric_clock_component_us = 1.0, + .urgent_latency_adjustment_fabric_clock_reference_mhz = 1000, +}; + +static void dcn302_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts, + unsigned int *optimal_dcfclk, + unsigned int *optimal_fclk) +{ + + double bw_from_dram, bw_from_dram1, bw_from_dram2; + + bw_from_dram1 = uclk_mts * dcn3_02_soc.num_chans * + dcn3_02_soc.dram_channel_width_bytes * + (dcn3_02_soc.max_avg_dram_bw_use_normal_percent / 100); + bw_from_dram2 = uclk_mts * dcn3_02_soc.num_chans * + dcn3_02_soc.dram_channel_width_bytes * + (dcn3_02_soc.max_avg_sdp_bw_use_normal_percent / 100); + + bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2; + + if (optimal_fclk) + *optimal_fclk = bw_from_dram / + (dcn3_02_soc.fabric_datapath_to_dcn_data_return_bytes * + (dcn3_02_soc.max_avg_sdp_bw_use_normal_percent / 100)); + + if (optimal_dcfclk) + *optimal_dcfclk = bw_from_dram / + (dcn3_02_soc.return_bus_width_bytes * + (dcn3_02_soc.max_avg_sdp_bw_use_normal_percent / 100)); +} + +void dcn302_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) +{ + unsigned int i, j; + unsigned int num_states = 0; + + unsigned int dcfclk_mhz[DC__VOLTAGE_STATES] = {0}; + unsigned int dram_speed_mts[DC__VOLTAGE_STATES] = {0}; + unsigned int optimal_uclk_for_dcfclk_sta_targets[DC__VOLTAGE_STATES] = {0}; + unsigned int optimal_dcfclk_for_uclk[DC__VOLTAGE_STATES] = {0}; + + unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {694, 875, 1000, 1200}; + unsigned int num_dcfclk_sta_targets = 4; + unsigned int num_uclk_states; + + dc_assert_fp_enabled(); + + if (dc->ctx->dc_bios->vram_info.num_chans) + dcn3_02_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans; + + if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) + dcn3_02_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; + + dcn3_02_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; + dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; + + if (bw_params->clk_table.entries[0].memclk_mhz) { + int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0; + + for (i = 0; i < MAX_NUM_DPM_LVL; i++) { + if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz) + max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz; + if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz) + max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz; + if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz) + max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz; + if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz) + max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz; + } + if (!max_dcfclk_mhz) + max_dcfclk_mhz = dcn3_02_soc.clock_limits[0].dcfclk_mhz; + if (!max_dispclk_mhz) + max_dispclk_mhz = dcn3_02_soc.clock_limits[0].dispclk_mhz; + if (!max_dppclk_mhz) + max_dppclk_mhz = dcn3_02_soc.clock_limits[0].dppclk_mhz; + if (!max_phyclk_mhz) + max_phyclk_mhz = dcn3_02_soc.clock_limits[0].phyclk_mhz; + + if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) { + /* If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array */ + dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz; + num_dcfclk_sta_targets++; + } else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) { + /* If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates */ + for (i = 0; i < num_dcfclk_sta_targets; i++) { + if (dcfclk_sta_targets[i] > max_dcfclk_mhz) { + dcfclk_sta_targets[i] = max_dcfclk_mhz; + break; + } + } + /* Update size of array since we "removed" duplicates */ + num_dcfclk_sta_targets = i + 1; + } + + num_uclk_states = bw_params->clk_table.num_entries; + + /* Calculate optimal dcfclk for each uclk */ + for (i = 0; i < num_uclk_states; i++) { + dcn302_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16, + &optimal_dcfclk_for_uclk[i], NULL); + if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) + optimal_dcfclk_for_uclk[i] = bw_params->clk_table.entries[0].dcfclk_mhz; + } + + /* Calculate optimal uclk for each dcfclk sta target */ + for (i = 0; i < num_dcfclk_sta_targets; i++) { + for (j = 0; j < num_uclk_states; j++) { + if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) { + optimal_uclk_for_dcfclk_sta_targets[i] = + bw_params->clk_table.entries[j].memclk_mhz * 16; + break; + } + } + } + + i = 0; + j = 0; + /* create the final dcfclk and uclk table */ + while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) { + if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) { + dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; + dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; + } else { + if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) { + dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j]; + dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16; + } else { + j = num_uclk_states; + } + } + } + + while (i < num_dcfclk_sta_targets && num_states < DC__VOLTAGE_STATES) { + dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; + dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; + } + + while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES && + optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) { + dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j]; + dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16; + } + + dcn3_02_soc.num_states = num_states; + for (i = 0; i < dcn3_02_soc.num_states; i++) { + dcn3_02_soc.clock_limits[i].state = i; + dcn3_02_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i]; + dcn3_02_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i]; + dcn3_02_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i]; + + /* Fill all states with max values of all other clocks */ + dcn3_02_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz; + dcn3_02_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz; + dcn3_02_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz; + /* Populate from bw_params for DTBCLK, SOCCLK */ + if (!bw_params->clk_table.entries[i].dtbclk_mhz && i > 0) + dcn3_02_soc.clock_limits[i].dtbclk_mhz = dcn3_02_soc.clock_limits[i-1].dtbclk_mhz; + else + dcn3_02_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz; + if (!bw_params->clk_table.entries[i].socclk_mhz && i > 0) + dcn3_02_soc.clock_limits[i].socclk_mhz = dcn3_02_soc.clock_limits[i-1].socclk_mhz; + else + dcn3_02_soc.clock_limits[i].socclk_mhz = bw_params->clk_table.entries[i].socclk_mhz; + /* These clocks cannot come from bw_params, always fill from dcn3_02_soc[1] */ + /* FCLK, PHYCLK_D18, DSCCLK */ + dcn3_02_soc.clock_limits[i].phyclk_d18_mhz = dcn3_02_soc.clock_limits[0].phyclk_d18_mhz; + dcn3_02_soc.clock_limits[i].dscclk_mhz = dcn3_02_soc.clock_limits[0].dscclk_mhz; + } + /* re-init DML with updated bb */ + dml_init_instance(&dc->dml, &dcn3_02_soc, &dcn3_02_ip, DML_PROJECT_DCN30); + if (dc->current_state) + dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_02_soc, &dcn3_02_ip, DML_PROJECT_DCN30); + } +} + +void dcn302_fpu_init_soc_bounding_box(struct bp_soc_bb_info bb_info) +{ + + dc_assert_fp_enabled(); + + if (bb_info.dram_clock_change_latency_100ns > 0) + dcn3_02_soc.dram_clock_change_latency_us = + bb_info.dram_clock_change_latency_100ns * 10; + + if (bb_info.dram_sr_enter_exit_latency_100ns > 0) + dcn3_02_soc.sr_enter_plus_exit_time_us = + bb_info.dram_sr_enter_exit_latency_100ns * 10; + + if (bb_info.dram_sr_exit_latency_100ns > 0) + dcn3_02_soc.sr_exit_time_us = + bb_info.dram_sr_exit_latency_100ns * 10; +} + + diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.h new file mode 100644 index 000000000000..548305d96cee --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.h @@ -0,0 +1,32 @@ +/* + * Copyright 2019-2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DCN302_FPU_H__ +#define __DCN302_FPU_H__ + +void dcn302_fpu_init_soc_bounding_box(struct bp_soc_bb_info bb_info); +void dcn302_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params); + +#endif /* __DCN302_FPU_H__*/ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c new file mode 100644 index 000000000000..8fb14baed208 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c @@ -0,0 +1,362 @@ +/* + * Copyright 2019-2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "resource.h" +#include "clk_mgr.h" +#include "dcn20/dcn20_resource.h" +#include "dcn303/dcn303_resource.h" + +#include "dml/dcn20/dcn20_fpu.h" +#include "dcn303_fpu.h" + +struct _vcs_dpi_ip_params_st dcn3_03_ip = { + .use_min_dcfclk = 0, + .clamp_min_dcfclk = 0, + .odm_capable = 1, + .gpuvm_enable = 1, + .hostvm_enable = 0, + .gpuvm_max_page_table_levels = 4, + .hostvm_max_page_table_levels = 4, + .hostvm_cached_page_table_levels = 0, + .pte_group_size_bytes = 2048, + .num_dsc = 2, + .rob_buffer_size_kbytes = 184, + .det_buffer_size_kbytes = 184, + .dpte_buffer_size_in_pte_reqs_luma = 64, + .dpte_buffer_size_in_pte_reqs_chroma = 34, + .pde_proc_buffer_size_64k_reqs = 48, + .dpp_output_buffer_pixels = 2560, + .opp_output_buffer_lines = 1, + .pixel_chunk_size_kbytes = 8, + .pte_enable = 1, + .max_page_table_levels = 2, + .pte_chunk_size_kbytes = 2, // ? + .meta_chunk_size_kbytes = 2, + .writeback_chunk_size_kbytes = 8, + .line_buffer_size_bits = 789504, + .is_line_buffer_bpp_fixed = 0, // ? + .line_buffer_fixed_bpp = 0, // ? + .dcc_supported = true, + .writeback_interface_buffer_size_kbytes = 90, + .writeback_line_buffer_buffer_size = 0, + .max_line_buffer_lines = 12, + .writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640 + .writeback_chroma_buffer_size_kbytes = 8, + .writeback_chroma_line_buffer_width_pixels = 4, + .writeback_max_hscl_ratio = 1, + .writeback_max_vscl_ratio = 1, + .writeback_min_hscl_ratio = 1, + .writeback_min_vscl_ratio = 1, + .writeback_max_hscl_taps = 1, + .writeback_max_vscl_taps = 1, + .writeback_line_buffer_luma_buffer_size = 0, + .writeback_line_buffer_chroma_buffer_size = 14643, + .cursor_buffer_size = 8, + .cursor_chunk_size = 2, + .max_num_otg = 2, + .max_num_dpp = 2, + .max_num_wb = 1, + .max_dchub_pscl_bw_pix_per_clk = 4, + .max_pscl_lb_bw_pix_per_clk = 2, + .max_lb_vscl_bw_pix_per_clk = 4, + .max_vscl_hscl_bw_pix_per_clk = 4, + .max_hscl_ratio = 6, + .max_vscl_ratio = 6, + .hscl_mults = 4, + .vscl_mults = 4, + .max_hscl_taps = 8, + .max_vscl_taps = 8, + .dispclk_ramp_margin_percent = 1, + .underscan_factor = 1.11, + .min_vblank_lines = 32, + .dppclk_delay_subtotal = 46, + .dynamic_metadata_vm_enabled = true, + .dppclk_delay_scl_lb_only = 16, + .dppclk_delay_scl = 50, + .dppclk_delay_cnvc_formatter = 27, + .dppclk_delay_cnvc_cursor = 6, + .dispclk_delay_subtotal = 119, + .dcfclk_cstate_latency = 5.2, // SRExitTime + .max_inter_dcn_tile_repeaters = 8, + .max_num_hdmi_frl_outputs = 1, + .odm_combine_4to1_supported = false, + + .xfc_supported = false, + .xfc_fill_bw_overhead_percent = 10.0, + .xfc_fill_constant_bytes = 0, + .gfx7_compat_tiling_supported = 0, + .number_of_cursors = 1, +}; + +struct _vcs_dpi_soc_bounding_box_st dcn3_03_soc = { + .clock_limits = { + { + .state = 0, + .dispclk_mhz = 562.0, + .dppclk_mhz = 300.0, + .phyclk_mhz = 300.0, + .phyclk_d18_mhz = 667.0, + .dscclk_mhz = 405.6, + }, + }, + + .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */ + .num_states = 1, + .sr_exit_time_us = 35.5, + .sr_enter_plus_exit_time_us = 40, + .urgent_latency_us = 4.0, + .urgent_latency_pixel_data_only_us = 4.0, + .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, + .urgent_latency_vm_data_only_us = 4.0, + .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, + .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, + .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, + .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0, + .max_avg_sdp_bw_use_normal_percent = 60.0, + .max_avg_dram_bw_use_normal_percent = 40.0, + .writeback_latency_us = 12.0, + .max_request_size_bytes = 256, + .fabric_datapath_to_dcn_data_return_bytes = 64, + .dcn_downspread_percent = 0.5, + .downspread_percent = 0.38, + .dram_page_open_time_ns = 50.0, + .dram_rw_turnaround_time_ns = 17.5, + .dram_return_buffer_per_channel_bytes = 8192, + .round_trip_ping_latency_dcfclk_cycles = 156, + .urgent_out_of_order_return_per_channel_bytes = 4096, + .channel_interleave_bytes = 256, + .num_banks = 8, + .gpuvm_min_page_size_bytes = 4096, + .hostvm_min_page_size_bytes = 4096, + .dram_clock_change_latency_us = 404, + .dummy_pstate_latency_us = 5, + .writeback_dram_clock_change_latency_us = 23.0, + .return_bus_width_bytes = 64, + .dispclk_dppclk_vco_speed_mhz = 3650, + .xfc_bus_transport_time_us = 20, // ? + .xfc_xbuf_latency_tolerance_us = 4, // ? + .use_urgent_burst_bw = 1, // ? + .do_urgent_latency_adjustment = true, + .urgent_latency_adjustment_fabric_clock_component_us = 1.0, + .urgent_latency_adjustment_fabric_clock_reference_mhz = 1000, +}; + +static void dcn303_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts, + unsigned int *optimal_dcfclk, + unsigned int *optimal_fclk) +{ + double bw_from_dram, bw_from_dram1, bw_from_dram2; + + bw_from_dram1 = uclk_mts * dcn3_03_soc.num_chans * + dcn3_03_soc.dram_channel_width_bytes * (dcn3_03_soc.max_avg_dram_bw_use_normal_percent / 100); + bw_from_dram2 = uclk_mts * dcn3_03_soc.num_chans * + dcn3_03_soc.dram_channel_width_bytes * (dcn3_03_soc.max_avg_sdp_bw_use_normal_percent / 100); + + bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2; + + if (optimal_fclk) + *optimal_fclk = bw_from_dram / + (dcn3_03_soc.fabric_datapath_to_dcn_data_return_bytes * + (dcn3_03_soc.max_avg_sdp_bw_use_normal_percent / 100)); + + if (optimal_dcfclk) + *optimal_dcfclk = bw_from_dram / + (dcn3_03_soc.return_bus_width_bytes * (dcn3_03_soc.max_avg_sdp_bw_use_normal_percent / 100)); +} + + +void dcn303_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) +{ + unsigned int i, j; + unsigned int num_states = 0; + + unsigned int dcfclk_mhz[DC__VOLTAGE_STATES] = {0}; + unsigned int dram_speed_mts[DC__VOLTAGE_STATES] = {0}; + unsigned int optimal_uclk_for_dcfclk_sta_targets[DC__VOLTAGE_STATES] = {0}; + unsigned int optimal_dcfclk_for_uclk[DC__VOLTAGE_STATES] = {0}; + + unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {694, 875, 1000, 1200}; + unsigned int num_dcfclk_sta_targets = 4; + unsigned int num_uclk_states; + + dc_assert_fp_enabled(); + + if (dc->ctx->dc_bios->vram_info.num_chans) + dcn3_03_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans; + + if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) + dcn3_03_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; + + dcn3_03_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; + dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; + + if (bw_params->clk_table.entries[0].memclk_mhz) { + int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0; + + for (i = 0; i < MAX_NUM_DPM_LVL; i++) { + if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz) + max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz; + if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz) + max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz; + if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz) + max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz; + if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz) + max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz; + } + if (!max_dcfclk_mhz) + max_dcfclk_mhz = dcn3_03_soc.clock_limits[0].dcfclk_mhz; + if (!max_dispclk_mhz) + max_dispclk_mhz = dcn3_03_soc.clock_limits[0].dispclk_mhz; + if (!max_dppclk_mhz) + max_dppclk_mhz = dcn3_03_soc.clock_limits[0].dppclk_mhz; + if (!max_phyclk_mhz) + max_phyclk_mhz = dcn3_03_soc.clock_limits[0].phyclk_mhz; + + if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) { + dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz; + num_dcfclk_sta_targets++; + } else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) { + for (i = 0; i < num_dcfclk_sta_targets; i++) { + if (dcfclk_sta_targets[i] > max_dcfclk_mhz) { + dcfclk_sta_targets[i] = max_dcfclk_mhz; + break; + } + } + /* Update size of array since we "removed" duplicates */ + num_dcfclk_sta_targets = i + 1; + } + + num_uclk_states = bw_params->clk_table.num_entries; + + /* Calculate optimal dcfclk for each uclk */ + for (i = 0; i < num_uclk_states; i++) { + dcn303_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16, + &optimal_dcfclk_for_uclk[i], NULL); + if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) + optimal_dcfclk_for_uclk[i] = bw_params->clk_table.entries[0].dcfclk_mhz; + } + + /* Calculate optimal uclk for each dcfclk sta target */ + for (i = 0; i < num_dcfclk_sta_targets; i++) { + for (j = 0; j < num_uclk_states; j++) { + if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) { + optimal_uclk_for_dcfclk_sta_targets[i] = + bw_params->clk_table.entries[j].memclk_mhz * 16; + break; + } + } + } + + i = 0; + j = 0; + /* create the final dcfclk and uclk table */ + while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) { + if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) { + dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; + dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; + } else { + if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) { + dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j]; + dram_speed_mts[num_states++] = + bw_params->clk_table.entries[j++].memclk_mhz * 16; + } else { + j = num_uclk_states; + } + } + } + + while (i < num_dcfclk_sta_targets && num_states < DC__VOLTAGE_STATES) { + dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; + dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; + } + + while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES && + optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) { + dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j]; + dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16; + } + + dcn3_03_soc.num_states = num_states; + for (i = 0; i < dcn3_03_soc.num_states; i++) { + dcn3_03_soc.clock_limits[i].state = i; + dcn3_03_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i]; + dcn3_03_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i]; + dcn3_03_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i]; + + /* Fill all states with max values of all other clocks */ + dcn3_03_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz; + dcn3_03_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz; + dcn3_03_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz; + /* Populate from bw_params for DTBCLK, SOCCLK */ + if (!bw_params->clk_table.entries[i].dtbclk_mhz && i > 0) + dcn3_03_soc.clock_limits[i].dtbclk_mhz = dcn3_03_soc.clock_limits[i-1].dtbclk_mhz; + else + dcn3_03_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz; + if (!bw_params->clk_table.entries[i].socclk_mhz && i > 0) + dcn3_03_soc.clock_limits[i].socclk_mhz = dcn3_03_soc.clock_limits[i-1].socclk_mhz; + else + dcn3_03_soc.clock_limits[i].socclk_mhz = bw_params->clk_table.entries[i].socclk_mhz; + /* These clocks cannot come from bw_params, always fill from dcn3_03_soc[1] */ + /* FCLK, PHYCLK_D18, DSCCLK */ + dcn3_03_soc.clock_limits[i].phyclk_d18_mhz = dcn3_03_soc.clock_limits[0].phyclk_d18_mhz; + dcn3_03_soc.clock_limits[i].dscclk_mhz = dcn3_03_soc.clock_limits[0].dscclk_mhz; + } + + if (dcn3_03_soc.num_chans <= 4) { + for (i = 0; i < dcn3_03_soc.num_states; i++) { + if (dcn3_03_soc.clock_limits[i].dram_speed_mts > 1700) + break; + + if (dcn3_03_soc.clock_limits[i].dram_speed_mts >= 1500) { + dcn3_03_soc.clock_limits[i].dcfclk_mhz = 100; + dcn3_03_soc.clock_limits[i].fabricclk_mhz = 100; + } + } + } + + /* re-init DML with updated bb */ + dml_init_instance(&dc->dml, &dcn3_03_soc, &dcn3_03_ip, DML_PROJECT_DCN30); + if (dc->current_state) + dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_03_soc, &dcn3_03_ip, DML_PROJECT_DCN30); + } +} + +void dcn303_fpu_init_soc_bounding_box(struct bp_soc_bb_info bb_info) +{ + dc_assert_fp_enabled(); + + if (bb_info.dram_clock_change_latency_100ns > 0) + dcn3_03_soc.dram_clock_change_latency_us = + bb_info.dram_clock_change_latency_100ns * 10; + + if (bb_info.dram_sr_enter_exit_latency_100ns > 0) + dcn3_03_soc.sr_enter_plus_exit_time_us = + bb_info.dram_sr_enter_exit_latency_100ns * 10; + + if (bb_info.dram_sr_exit_latency_100ns > 0) + dcn3_03_soc.sr_exit_time_us = + bb_info.dram_sr_exit_latency_100ns * 10; +} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.h new file mode 100644 index 000000000000..92ec833fa528 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.h @@ -0,0 +1,32 @@ +/* + * Copyright 2019-2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DCN303_FPU_H__ +#define __DCN303_FPU_H__ + +void dcn303_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params); +void dcn303_fpu_init_soc_bounding_box(struct bp_soc_bb_info bb_info); + +#endif /* __DCN303_FPU_H__*/ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c index 6feb23432f8d..e4b9fd31223c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c @@ -64,6 +64,8 @@ typedef struct { double DCFCLKDeepSleep; unsigned int DPPPerPlane; bool ScalerEnabled; + double VRatio; + double VRatioChroma; enum scan_direction_class SourceScan; unsigned int BlockWidth256BytesY; unsigned int BlockHeight256BytesY; @@ -942,6 +944,7 @@ static bool CalculatePrefetchSchedule( double dst_y_prefetch_equ; double Tsw_oto; double prefetch_bw_oto; + double prefetch_bw_pr; double Tvm_oto; double Tr0_oto; double Tvm_oto_lines; @@ -971,6 +974,7 @@ static bool CalculatePrefetchSchedule( double min_Lsw; double Tsw_est1 = 0; double Tsw_est3 = 0; + double max_Tsw = 0; if (GPUVMEnable == true && HostVMEnable == true) { HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels; @@ -1111,11 +1115,14 @@ static bool CalculatePrefetchSchedule( bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC / 4; else bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC; - + /*rev 99*/ + prefetch_bw_pr = dml_min(1, bytes_pp * myPipe->PixelClock / (double) myPipe->DPPPerPlane); + max_Tsw = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime; prefetch_sw_bytes = PrefetchSourceLinesY * swath_width_luma_ub * myPipe->BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * myPipe->BytePerPixelC; prefetch_bw_oto = dml_max(bytes_pp * myPipe->PixelClock / myPipe->DPPPerPlane, prefetch_sw_bytes / (dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime)); + prefetch_bw_oto = dml_max(prefetch_bw_pr, prefetch_sw_bytes / max_Tsw); - min_Lsw = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / max_vratio_pre; + min_Lsw = dml_max(1, dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / max_vratio_pre); Lsw_oto = dml_ceil(4 * dml_max(prefetch_sw_bytes / prefetch_bw_oto / LineTime, min_Lsw), 1) / 4; Tsw_oto = Lsw_oto * LineTime; @@ -1389,7 +1396,7 @@ static bool CalculatePrefetchSchedule( dml_print("DML::%s: SwathHeightC = %d\n", __func__, SwathHeightC); dml_print("DML::%s: VInitPreFillC = %f\n", __func__, VInitPreFillC); #endif - if ((SwathHeightC > 4)) { + if ((SwathHeightC > 4) || VInitPreFillC > 3) { if (LinesToRequestPrefetchPixelData > (VInitPreFillC - 3.0) / 2.0) { *VRatioPrefetchC = dml_max( *VRatioPrefetchC, @@ -2663,6 +2670,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman myPipe.DCFCLKDeepSleep = v->DCFCLKDeepSleep; myPipe.DPPPerPlane = v->DPPPerPlane[k]; myPipe.ScalerEnabled = v->ScalerEnabled[k]; + myPipe.VRatio = v->VRatio[k]; + myPipe.VRatioChroma = v->VRatioChroma[k]; myPipe.SourceScan = v->SourceScan[k]; myPipe.BlockWidth256BytesY = v->BlockWidth256BytesY[k]; myPipe.BlockHeight256BytesY = v->BlockHeight256BytesY[k]; @@ -3911,6 +3920,9 @@ static noinline void CalculatePrefetchSchedulePerPlane( myPipe.DCFCLKDeepSleep = v->ProjectedDCFCLKDeepSleep[i][j]; myPipe.DPPPerPlane = v->NoOfDPP[i][j][k]; myPipe.ScalerEnabled = v->ScalerEnabled[k]; + myPipe.VRatio = mode_lib->vba.VRatio[k]; + myPipe.VRatioChroma = mode_lib->vba.VRatioChroma[k]; + myPipe.SourceScan = v->SourceScan[k]; myPipe.BlockWidth256BytesY = v->Read256BlockWidthY[k]; myPipe.BlockHeight256BytesY = v->Read256BlockHeightY[k]; @@ -4987,6 +4999,17 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l &v->meta_row_bandwidth[i][j][k], &v->dpte_row_bandwidth[i][j][k]); } + /*DCCMetaBufferSizeSupport(i, j) = True + For k = 0 To NumberOfActivePlanes - 1 + If MetaRowBytes(i, j, k) > 24064 Then + DCCMetaBufferSizeSupport(i, j) = False + End If + Next k*/ + v->DCCMetaBufferSizeSupport[i][j] = true; + for (k = 0; k < v->NumberOfActivePlanes; ++k) { + if (v->MetaRowBytes[i][j][k] > 24064) + v->DCCMetaBufferSizeSupport[i][j] = false; + } v->UrgLatency[i] = CalculateUrgentLatency( v->UrgentLatencyPixelDataOnly, v->UrgentLatencyPixelMixedWithVMData, diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h index 8f9f1d607f7c..59f0a61c33cf 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h @@ -141,6 +141,7 @@ struct _vcs_dpi_ip_params_st { unsigned int odm_capable; unsigned int rob_buffer_size_kbytes; unsigned int det_buffer_size_kbytes; + unsigned int min_comp_buffer_size_kbytes; unsigned int dpte_buffer_size_in_pte_reqs_luma; unsigned int dpte_buffer_size_in_pte_reqs_chroma; unsigned int pde_proc_buffer_size_64k_reqs; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c index 0fad15020c74..c0740dbdcc2e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c @@ -47,6 +47,7 @@ static void recalculate_params( unsigned int num_pipes); static unsigned int CursorBppEnumToBits(enum cursor_bpp ebpp); +static void cache_debug_params(struct display_mode_lib *mode_lib); unsigned int dml_get_voltage_level( struct display_mode_lib *mode_lib, @@ -73,6 +74,7 @@ unsigned int dml_get_voltage_level( PixelClockAdjustmentForProgressiveToInterlaceUnit(mode_lib); } mode_lib->funcs.validate(mode_lib); + cache_debug_params(mode_lib); return mode_lib->vba.VoltageLevel; } @@ -745,6 +747,28 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) mode_lib->vba.HostVMEnable = mode_lib->vba.HostVMEnable && !!ip->hostvm_enable; } +/** + * ******************************************************************************************** + * cache_debug_params: Cache any params that needed to be maintained from the initial validation + * for debug purposes. + * + * The DML getters can modify some of the VBA params that we are interested in (for example when + * calculating with dummy p-state latency), so cache any params here that we want for debugging + * + * @param [in] mode_lib: mode_lib input/output of validate call + * + * @return: void + * + * ******************************************************************************************** + */ +static void cache_debug_params(struct display_mode_lib *mode_lib) +{ + int k = 0; + + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; k++) + mode_lib->vba.CachedActiveDRAMClockChangeLatencyMargin[k] = mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k]; +} + // in wm mode we pull the parameters needed from the display_e2e_pipe_params_st structs // rather than working them out as in recalculate_ms static void recalculate_params( diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h index 90e87961fe3e..0603b32971a6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h @@ -195,6 +195,7 @@ struct vba_vars_st { unsigned int LBLatencyHidingSourceLinesY; unsigned int LBLatencyHidingSourceLinesC; double ActiveDRAMClockChangeLatencyMargin[DC__NUM_DPP__MAX]; + double CachedActiveDRAMClockChangeLatencyMargin[DC__NUM_DPP__MAX]; // Cache in dml_get_voltage_level for debug purposes only double MinActiveDRAMClockChangeMargin; double InitFillLevel; double FinalFillMargin; @@ -544,6 +545,8 @@ struct vba_vars_st { bool DTBCLKRequiredMoreThanSupported[DC__VOLTAGE_STATES]; double UrgentRoundTripAndOutOfOrderLatencyPerState[DC__VOLTAGE_STATES]; bool ROBSupport[DC__VOLTAGE_STATES][2]; + //based on rev 99: Dim DCCMetaBufferSizeSupport(NumberOfStates, 1) As Boolean + bool DCCMetaBufferSizeSupport[DC__VOLTAGE_STATES][2]; bool PTEBufferSizeNotExceeded[DC__VOLTAGE_STATES][2]; bool TotalVerticalActiveBandwidthSupport[DC__VOLTAGE_STATES][2]; double MaxTotalVerticalActiveAvailableBandwidth[DC__VOLTAGE_STATES][2]; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c index ec636d06e18c..ef75eb7d5adc 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c @@ -68,7 +68,7 @@ static void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc, int sel = table_hash(mode, bpc, max_min); int table_size = 0; int index; - const struct qp_entry *table = 0L; + const struct qp_entry *table = NULL; // alias enum enum { min = DAL_MM_MIN, max = DAL_MM_MAX }; diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index 9c74564cbd8d..efc2339f1fa0 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -25,7 +25,7 @@ #include <drm/drm_dsc.h> #include "dc_hw_types.h" #include "dsc.h" -#include <drm/drm_dp_helper.h> +#include <drm/dp/drm_dp_helper.h> #include "dc.h" #include "rc_calc.h" #include "fixed31_32.h" diff --git a/drivers/gpu/drm/amd/display/dc/gpio/Makefile b/drivers/gpu/drm/amd/display/dc/gpio/Makefile index c5ddade8b187..d1c2ec58676d 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/Makefile +++ b/drivers/gpu/drm/amd/display/dc/gpio/Makefile @@ -106,6 +106,14 @@ GPIO_DCN30 = hw_translate_dcn30.o hw_factory_dcn30.o AMD_DAL_GPIO_DCN30 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn30/,$(GPIO_DCN30)) AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCN30) +############################################################################### +# DCN 3.15 +############################################################################### +GPIO_DCN315 = hw_translate_dcn315.o hw_factory_dcn315.o + +AMD_DAL_GPIO_DCN315 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn315/,$(GPIO_DCN315)) + +AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCN315) endif ############################################################################### # Diagnostics on FPGA diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c index 5f6ae3edb755..3b7df1ac26be 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c @@ -42,8 +42,8 @@ #include "nbio/nbio_7_4_offset.h" -#include "dcn/dpcs_3_0_0_offset.h" -#include "dcn/dpcs_3_0_0_sh_mask.h" +#include "dpcs/dpcs_3_0_0_offset.h" +#include "dpcs/dpcs_3_0_0_sh_mask.h" #include "mmhub/mmhub_2_0_0_offset.h" #include "mmhub/mmhub_2_0_0_sh_mask.h" diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_translate_dcn30.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_translate_dcn30.c index 0046219a1cc7..6b6b7c7bd12f 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_translate_dcn30.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_translate_dcn30.c @@ -40,8 +40,8 @@ #include "nbio/nbio_7_4_offset.h" -#include "dcn/dpcs_3_0_0_offset.h" -#include "dcn/dpcs_3_0_0_sh_mask.h" +#include "dpcs/dpcs_3_0_0_offset.h" +#include "dpcs/dpcs_3_0_0_sh_mask.h" #include "mmhub/mmhub_2_0_0_offset.h" #include "mmhub/mmhub_2_0_0_sh_mask.h" diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn315/hw_factory_dcn315.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn315/hw_factory_dcn315.c new file mode 100644 index 000000000000..5feebb3b95ca --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn315/hw_factory_dcn315.c @@ -0,0 +1,260 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "dm_services.h" +#include "include/gpio_types.h" +#include "../hw_factory.h" + + +#include "../hw_gpio.h" +#include "../hw_ddc.h" +#include "../hw_hpd.h" +#include "../hw_generic.h" + +#include "hw_factory_dcn315.h" + +#include "dcn/dcn_3_1_5_offset.h" +#include "dcn/dcn_3_1_5_sh_mask.h" + +#include "reg_helper.h" +#include "../hpd_regs.h" +/* begin ********************* + * macros to expend register list macro defined in HW object header file */ + +#define DCN_BASE__INST0_SEG0 0x00000012 +#define DCN_BASE__INST0_SEG1 0x000000C0 +#define DCN_BASE__INST0_SEG2 0x000034C0 +#define DCN_BASE__INST0_SEG3 0x00009000 +#define DCN_BASE__INST0_SEG4 0x02403C00 +#define DCN_BASE__INST0_SEG5 0 + +/* DCN */ +#define block HPD +#define reg_num 0 + +#undef BASE_INNER +#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg + +#define BASE(seg) BASE_INNER(seg) + + + +#define REG(reg_name)\ + BASE(reg ## reg_name ## _BASE_IDX) + reg ## reg_name + +#define SF_HPD(reg_name, field_name, post_fix)\ + .field_name = HPD0_ ## reg_name ## __ ## field_name ## post_fix + +#define REGI(reg_name, block, id)\ + BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SF(reg_name, field_name, post_fix)\ + .field_name = reg_name ## __ ## field_name ## post_fix + +/* macros to expend register list macro defined in HW object header file + * end *********************/ + + + +#define hpd_regs(id) \ +{\ + HPD_REG_LIST(id)\ +} + +static const struct hpd_registers hpd_regs[] = { + hpd_regs(0), + hpd_regs(1), + hpd_regs(2), + hpd_regs(3), + hpd_regs(4), +}; + +static const struct hpd_sh_mask hpd_shift = { + HPD_MASK_SH_LIST(__SHIFT) +}; + +static const struct hpd_sh_mask hpd_mask = { + HPD_MASK_SH_LIST(_MASK) +}; + +#include "../ddc_regs.h" + + /* set field name */ +#define SF_DDC(reg_name, field_name, post_fix)\ + .field_name = reg_name ## __ ## field_name ## post_fix + +static const struct ddc_registers ddc_data_regs_dcn[] = { + ddc_data_regs_dcn2(1), + ddc_data_regs_dcn2(2), + ddc_data_regs_dcn2(3), + ddc_data_regs_dcn2(4), + ddc_data_regs_dcn2(5), + { + DDC_GPIO_VGA_REG_LIST(DATA), + .ddc_setup = 0, + .phy_aux_cntl = 0, + .dc_gpio_aux_ctrl_5 = 0 + } +}; + +static const struct ddc_registers ddc_clk_regs_dcn[] = { + ddc_clk_regs_dcn2(1), + ddc_clk_regs_dcn2(2), + ddc_clk_regs_dcn2(3), + ddc_clk_regs_dcn2(4), + ddc_clk_regs_dcn2(5), + { + DDC_GPIO_VGA_REG_LIST(CLK), + .ddc_setup = 0, + .phy_aux_cntl = 0, + .dc_gpio_aux_ctrl_5 = 0 + } +}; + +static const struct ddc_sh_mask ddc_shift[] = { + DDC_MASK_SH_LIST_DCN2(__SHIFT, 1), + DDC_MASK_SH_LIST_DCN2(__SHIFT, 2), + DDC_MASK_SH_LIST_DCN2(__SHIFT, 3), + DDC_MASK_SH_LIST_DCN2(__SHIFT, 4), + DDC_MASK_SH_LIST_DCN2(__SHIFT, 5), + DDC_MASK_SH_LIST_DCN2(__SHIFT, 6) +}; + +static const struct ddc_sh_mask ddc_mask[] = { + DDC_MASK_SH_LIST_DCN2(_MASK, 1), + DDC_MASK_SH_LIST_DCN2(_MASK, 2), + DDC_MASK_SH_LIST_DCN2(_MASK, 3), + DDC_MASK_SH_LIST_DCN2(_MASK, 4), + DDC_MASK_SH_LIST_DCN2(_MASK, 5), + DDC_MASK_SH_LIST_DCN2(_MASK, 6) +}; + +#include "../generic_regs.h" + +/* set field name */ +#define SF_GENERIC(reg_name, field_name, post_fix)\ + .field_name = reg_name ## __ ## field_name ## post_fix + +#define generic_regs(id) \ +{\ + GENERIC_REG_LIST(id)\ +} + +static const struct generic_registers generic_regs[] = { + generic_regs(A), + generic_regs(B), +}; + +static const struct generic_sh_mask generic_shift[] = { + GENERIC_MASK_SH_LIST(__SHIFT, A), + GENERIC_MASK_SH_LIST(__SHIFT, B), +}; + +static const struct generic_sh_mask generic_mask[] = { + GENERIC_MASK_SH_LIST(_MASK, A), + GENERIC_MASK_SH_LIST(_MASK, B), +}; + +static void define_generic_registers(struct hw_gpio_pin *pin, uint32_t en) +{ + struct hw_generic *generic = HW_GENERIC_FROM_BASE(pin); + + generic->regs = &generic_regs[en]; + generic->shifts = &generic_shift[en]; + generic->masks = &generic_mask[en]; + generic->base.regs = &generic_regs[en].gpio; +} + +static void define_ddc_registers( + struct hw_gpio_pin *pin, + uint32_t en) +{ + struct hw_ddc *ddc = HW_DDC_FROM_BASE(pin); + + switch (pin->id) { + case GPIO_ID_DDC_DATA: + ddc->regs = &ddc_data_regs_dcn[en]; + ddc->base.regs = &ddc_data_regs_dcn[en].gpio; + break; + case GPIO_ID_DDC_CLOCK: + ddc->regs = &ddc_clk_regs_dcn[en]; + ddc->base.regs = &ddc_clk_regs_dcn[en].gpio; + break; + default: + ASSERT_CRITICAL(false); + return; + } + + ddc->shifts = &ddc_shift[en]; + ddc->masks = &ddc_mask[en]; + +} + +static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en) +{ + struct hw_hpd *hpd = HW_HPD_FROM_BASE(pin); + + hpd->regs = &hpd_regs[en]; + hpd->shifts = &hpd_shift; + hpd->masks = &hpd_mask; + hpd->base.regs = &hpd_regs[en].gpio; +} + + +/* fucntion table */ +static const struct hw_factory_funcs funcs = { + .init_ddc_data = dal_hw_ddc_init, + .init_generic = dal_hw_generic_init, + .init_hpd = dal_hw_hpd_init, + .get_ddc_pin = dal_hw_ddc_get_pin, + .get_hpd_pin = dal_hw_hpd_get_pin, + .get_generic_pin = dal_hw_generic_get_pin, + .define_hpd_registers = define_hpd_registers, + .define_ddc_registers = define_ddc_registers, + .define_generic_registers = define_generic_registers +}; +/* + * dal_hw_factory_dcn10_init + * + * @brief + * Initialize HW factory function pointers and pin info + * + * @param + * struct hw_factory *factory - [out] struct of function pointers + */ +void dal_hw_factory_dcn315_init(struct hw_factory *factory) +{ + /*TODO check ASIC CAPs*/ + factory->number_of_pins[GPIO_ID_DDC_DATA] = 8; + factory->number_of_pins[GPIO_ID_DDC_CLOCK] = 8; + factory->number_of_pins[GPIO_ID_GENERIC] = 4; + factory->number_of_pins[GPIO_ID_HPD] = 6; + factory->number_of_pins[GPIO_ID_GPIO_PAD] = 28; + factory->number_of_pins[GPIO_ID_VIP_PAD] = 0; + factory->number_of_pins[GPIO_ID_SYNC] = 0; + factory->number_of_pins[GPIO_ID_GSL] = 0;/*add this*/ + + factory->funcs = &funcs; +} diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn315/hw_factory_dcn315.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn315/hw_factory_dcn315.h new file mode 100644 index 000000000000..ffa71fa41cd4 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn315/hw_factory_dcn315.h @@ -0,0 +1,31 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef __DAL_HW_FACTORY_DCN315_H__ +#define __DAL_HW_FACTORY_DCN315_H__ + +/* Initialize HW factory function pointers and pin info */ +void dal_hw_factory_dcn315_init(struct hw_factory *factory); + +#endif /* __DAL_HW_FACTORY_DCN315_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn315/hw_translate_dcn315.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn315/hw_translate_dcn315.c new file mode 100644 index 000000000000..fbdaba57f718 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn315/hw_translate_dcn315.c @@ -0,0 +1,374 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "hw_translate_dcn315.h" + +#include "dm_services.h" +#include "include/gpio_types.h" +#include "../hw_translate.h" + +#include "dcn/dcn_3_1_5_offset.h" +#include "dcn/dcn_3_1_5_sh_mask.h" + +/* begin ********************* + * macros to expend register list macro defined in HW object header file */ + +#define DCN_BASE__INST0_SEG0 0x00000012 +#define DCN_BASE__INST0_SEG1 0x000000C0 +#define DCN_BASE__INST0_SEG2 0x000034C0 +#define DCN_BASE__INST0_SEG3 0x00009000 +#define DCN_BASE__INST0_SEG4 0x02403C00 +#define DCN_BASE__INST0_SEG5 0 + +/* DCN */ +#define block HPD +#define reg_num 0 + +#undef BASE_INNER +#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg + +#define BASE(seg) BASE_INNER(seg) + +#undef REG +#define REG(reg_name)\ + BASE(reg ## reg_name ## _BASE_IDX) + reg ## reg_name +#define SF_HPD(reg_name, field_name, post_fix)\ + .field_name = reg_name ## __ ## field_name ## post_fix + + +/* macros to expend register list macro defined in HW object header file + * end *********************/ + + +static bool offset_to_id( + uint32_t offset, + uint32_t mask, + enum gpio_id *id, + uint32_t *en) +{ + switch (offset) { + /* GENERIC */ + case REG(DC_GPIO_GENERIC_A): + *id = GPIO_ID_GENERIC; + switch (mask) { + case DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK: + *en = GPIO_GENERIC_A; + return true; + case DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK: + *en = GPIO_GENERIC_B; + return true; + case DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK: + *en = GPIO_GENERIC_C; + return true; + case DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK: + *en = GPIO_GENERIC_D; + return true; + case DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK: + *en = GPIO_GENERIC_E; + return true; + case DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK: + *en = GPIO_GENERIC_F; + return true; + case DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK: + *en = GPIO_GENERIC_G; + return true; + default: + ASSERT_CRITICAL(false); + return false; + } + break; + /* HPD */ + case REG(DC_GPIO_HPD_A): + *id = GPIO_ID_HPD; + switch (mask) { + case DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK: + *en = GPIO_HPD_1; + return true; + case DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK: + *en = GPIO_HPD_2; + return true; + case DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK: + *en = GPIO_HPD_3; + return true; + case DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK: + *en = GPIO_HPD_4; + return true; + case DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK: + *en = GPIO_HPD_5; + return true; + case DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK: + *en = GPIO_HPD_6; + return true; + default: + ASSERT_CRITICAL(false); + return false; + } + break; + /* REG(DC_GPIO_GENLK_MASK */ + case REG(DC_GPIO_GENLK_A): + *id = GPIO_ID_GSL; + switch (mask) { + case DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK: + *en = GPIO_GSL_GENLOCK_CLOCK; + return true; + case DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK: + *en = GPIO_GSL_GENLOCK_VSYNC; + return true; + case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK: + *en = GPIO_GSL_SWAPLOCK_A; + return true; + case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK: + *en = GPIO_GSL_SWAPLOCK_B; + return true; + default: + ASSERT_CRITICAL(false); + return false; + } + break; + /* DDC */ + /* we don't care about the GPIO_ID for DDC + * in DdcHandle it will use GPIO_ID_DDC_DATA/GPIO_ID_DDC_CLOCK + * directly in the create method + */ + case REG(DC_GPIO_DDC1_A): + *en = GPIO_DDC_LINE_DDC1; + return true; + case REG(DC_GPIO_DDC2_A): + *en = GPIO_DDC_LINE_DDC2; + return true; + case REG(DC_GPIO_DDC3_A): + *en = GPIO_DDC_LINE_DDC3; + return true; + case REG(DC_GPIO_DDC4_A): + *en = GPIO_DDC_LINE_DDC4; + return true; + case REG(DC_GPIO_DDC5_A): + *en = GPIO_DDC_LINE_DDC5; + return true; + case REG(DC_GPIO_DDCVGA_A): + *en = GPIO_DDC_LINE_DDC_VGA; + return true; + +/* + * case REG(DC_GPIO_I2CPAD_A): not exit + * case REG(DC_GPIO_PWRSEQ_A): + * case REG(DC_GPIO_PAD_STRENGTH_1): + * case REG(DC_GPIO_PAD_STRENGTH_2): + * case REG(DC_GPIO_DEBUG): + */ + /* UNEXPECTED */ + default: +/* case REG(DC_GPIO_SYNCA_A): not exist */ + ASSERT_CRITICAL(false); + return false; + } +} + +static bool id_to_offset( + enum gpio_id id, + uint32_t en, + struct gpio_pin_info *info) +{ + bool result = true; + + switch (id) { + case GPIO_ID_DDC_DATA: + info->mask = DC_GPIO_DDC1_A__DC_GPIO_DDC1DATA_A_MASK; + switch (en) { + case GPIO_DDC_LINE_DDC1: + info->offset = REG(DC_GPIO_DDC1_A); + break; + case GPIO_DDC_LINE_DDC2: + info->offset = REG(DC_GPIO_DDC2_A); + break; + case GPIO_DDC_LINE_DDC3: + info->offset = REG(DC_GPIO_DDC3_A); + break; + case GPIO_DDC_LINE_DDC4: + info->offset = REG(DC_GPIO_DDC4_A); + break; + case GPIO_DDC_LINE_DDC5: + info->offset = REG(DC_GPIO_DDC5_A); + break; + case GPIO_DDC_LINE_DDC_VGA: + info->offset = REG(DC_GPIO_DDCVGA_A); + break; + case GPIO_DDC_LINE_I2C_PAD: + default: + ASSERT_CRITICAL(false); + result = false; + } + break; + case GPIO_ID_DDC_CLOCK: + info->mask = DC_GPIO_DDC1_A__DC_GPIO_DDC1CLK_A_MASK; + switch (en) { + case GPIO_DDC_LINE_DDC1: + info->offset = REG(DC_GPIO_DDC1_A); + break; + case GPIO_DDC_LINE_DDC2: + info->offset = REG(DC_GPIO_DDC2_A); + break; + case GPIO_DDC_LINE_DDC3: + info->offset = REG(DC_GPIO_DDC3_A); + break; + case GPIO_DDC_LINE_DDC4: + info->offset = REG(DC_GPIO_DDC4_A); + break; + case GPIO_DDC_LINE_DDC5: + info->offset = REG(DC_GPIO_DDC5_A); + break; + case GPIO_DDC_LINE_DDC_VGA: + info->offset = REG(DC_GPIO_DDCVGA_A); + break; + case GPIO_DDC_LINE_I2C_PAD: + default: + ASSERT_CRITICAL(false); + result = false; + } + break; + case GPIO_ID_GENERIC: + info->offset = REG(DC_GPIO_GENERIC_A); + switch (en) { + case GPIO_GENERIC_A: + info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK; + break; + case GPIO_GENERIC_B: + info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK; + break; + case GPIO_GENERIC_C: + info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK; + break; + case GPIO_GENERIC_D: + info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK; + break; + case GPIO_GENERIC_E: + info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK; + break; + case GPIO_GENERIC_F: + info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK; + break; + case GPIO_GENERIC_G: + info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK; + break; + default: + ASSERT_CRITICAL(false); + result = false; + } + break; + case GPIO_ID_HPD: + info->offset = REG(DC_GPIO_HPD_A); + switch (en) { + case GPIO_HPD_1: + info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK; + break; + case GPIO_HPD_2: + info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK; + break; + case GPIO_HPD_3: + info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK; + break; + case GPIO_HPD_4: + info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK; + break; + case GPIO_HPD_5: + info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK; + break; + case GPIO_HPD_6: + info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK; + break; + default: + ASSERT_CRITICAL(false); + result = false; + } + break; + case GPIO_ID_GSL: + switch (en) { + case GPIO_GSL_GENLOCK_CLOCK: + /*not implmented*/ + ASSERT_CRITICAL(false); + result = false; + break; + case GPIO_GSL_GENLOCK_VSYNC: + /*not implmented*/ + ASSERT_CRITICAL(false); + result = false; + break; + case GPIO_GSL_SWAPLOCK_A: + /*not implmented*/ + ASSERT_CRITICAL(false); + result = false; + break; + case GPIO_GSL_SWAPLOCK_B: + /*not implmented*/ + ASSERT_CRITICAL(false); + result = false; + + break; + default: + ASSERT_CRITICAL(false); + result = false; + } + break; + case GPIO_ID_SYNC: + case GPIO_ID_VIP_PAD: + default: + ASSERT_CRITICAL(false); + result = false; + } + + if (result) { + info->offset_y = info->offset + 2; + info->offset_en = info->offset + 1; + info->offset_mask = info->offset - 1; + + info->mask_y = info->mask; + info->mask_en = info->mask; + info->mask_mask = info->mask; + } + + return result; +} + +/* function table */ +static const struct hw_translate_funcs funcs = { + .offset_to_id = offset_to_id, + .id_to_offset = id_to_offset, +}; + +/* + * dal_hw_translate_dcn30_init + * + * @brief + * Initialize Hw translate function pointers. + * + * @param + * struct hw_translate *tr - [out] struct of function pointers + * + */ +void dal_hw_translate_dcn315_init(struct hw_translate *tr) +{ + tr->funcs = &funcs; +} + diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn315/hw_translate_dcn315.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn315/hw_translate_dcn315.h new file mode 100644 index 000000000000..d58def4884b2 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn315/hw_translate_dcn315.h @@ -0,0 +1,33 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef __DAL_HW_TRANSLATE_DCN315_H__ +#define __DAL_HW_TRANSLATE_DCN315_H__ + +struct hw_translate; + +/* Initialize Hw translate function pointers */ +void dal_hw_translate_dcn315_init(struct hw_translate *tr); + +#endif /* __DAL_HW_TRANSLATE_DCN315_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c index dae8e489c8cf..778c206f754d 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c @@ -647,7 +647,9 @@ enum gpio_result dal_ddc_set_config( void dal_ddc_close( struct ddc *ddc) { - dal_gpio_close(ddc->pin_clock); - dal_gpio_close(ddc->pin_data); + if (ddc != NULL) { + dal_gpio_close(ddc->pin_clock); + dal_gpio_close(ddc->pin_data); + } } diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c index 5029d4e42dbf..5c00ffde3996 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c @@ -53,6 +53,7 @@ #include "dcn20/hw_factory_dcn20.h" #include "dcn21/hw_factory_dcn21.h" #include "dcn30/hw_factory_dcn30.h" +#include "dcn315/hw_factory_dcn315.h" #endif #include "diagnostics/hw_factory_diag.h" @@ -114,8 +115,12 @@ bool dal_hw_factory_init( case DCN_VERSION_3_02: case DCN_VERSION_3_03: case DCN_VERSION_3_1: + case DCN_VERSION_3_16: dal_hw_factory_dcn30_init(factory); return true; + case DCN_VERSION_3_15: + dal_hw_factory_dcn315_init(factory); + return true; #endif default: ASSERT_CRITICAL(false); diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c index 904bd30bed68..7a39cbc01f63 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c @@ -51,6 +51,7 @@ #include "dcn20/hw_translate_dcn20.h" #include "dcn21/hw_translate_dcn21.h" #include "dcn30/hw_translate_dcn30.h" +#include "dcn315/hw_translate_dcn315.h" #endif #include "diagnostics/hw_translate_diag.h" @@ -109,8 +110,12 @@ bool dal_hw_translate_init( case DCN_VERSION_3_02: case DCN_VERSION_3_03: case DCN_VERSION_3_1: + case DCN_VERSION_3_16: dal_hw_translate_dcn30_init(translate); return true; + case DCN_VERSION_3_15: + dal_hw_translate_dcn315_init(translate); + return true; #endif default: diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 943240e2809e..951c9b60917d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -54,6 +54,7 @@ void enable_surface_flip_reporting(struct dc_plane_state *plane_state, #ifdef CONFIG_DRM_AMD_DC_HDCP #include "dm_cp_psp.h" #endif +#include "link_hwss.h" /************ link *****************/ struct link_init_data { @@ -249,12 +250,10 @@ struct resource_pool { /* Number of USB4 DPIA (DisplayPort Input Adapter) link objects created.*/ unsigned int usb4_dpia_count; -#if defined(CONFIG_DRM_AMD_DC_DCN) unsigned int hpo_dp_stream_enc_count; struct hpo_dp_stream_encoder *hpo_dp_stream_enc[MAX_HPO_DP2_ENCODERS]; unsigned int hpo_dp_link_enc_count; struct hpo_dp_link_encoder *hpo_dp_link_enc[MAX_HPO_DP2_LINK_ENCODERS]; -#endif #if defined(CONFIG_DRM_AMD_DC_DCN) struct dc_3dlut *mpc_lut[MAX_PIPES]; struct dc_transfer_func *mpc_shaper[MAX_PIPES]; @@ -307,9 +306,7 @@ struct stream_resource { struct display_stream_compressor *dsc; struct timing_generator *tg; struct stream_encoder *stream_enc; -#if defined(CONFIG_DRM_AMD_DC_DCN) struct hpo_dp_stream_encoder *hpo_dp_stream_enc; -#endif struct audio *audio; struct pixel_clk_params pix_clk_params; @@ -334,18 +331,12 @@ struct plane_resource { struct dcn_fe_bandwidth bw; }; -#if defined(CONFIG_DRM_AMD_DC_DCN) #define LINK_RES_HPO_DP_REC_MAP__MASK 0xFFFF #define LINK_RES_HPO_DP_REC_MAP__SHIFT 0 -#endif /* all mappable hardware resources used to enable a link */ struct link_resource { -#if defined(CONFIG_DRM_AMD_DC_DCN) struct hpo_dp_link_encoder *hpo_dp_link_enc; -#else - void *dummy; -#endif }; union pipe_update_flags { @@ -425,11 +416,9 @@ struct resource_context { uint8_t dp_clock_source_ref_count; bool is_dsc_acquired[MAX_PIPES]; struct link_enc_cfg_context link_enc_cfg_ctx; -#if defined(CONFIG_DRM_AMD_DC_DCN) bool is_hpo_dp_stream_enc_acquired[MAX_HPO_DP2_ENCODERS]; unsigned int hpo_dp_link_enc_to_link_idx[MAX_HPO_DP2_LINK_ENCODERS]; int hpo_dp_link_enc_ref_cnts[MAX_HPO_DP2_LINK_ENCODERS]; -#endif #if defined(CONFIG_DRM_AMD_DC_DCN) bool is_mpc_3dlut_acquired[MAX_PIPES]; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h index cd52813a8432..ab9939db8cea 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h @@ -54,22 +54,13 @@ enum { PEAK_FACTOR_X1000 = 1006, }; -bool dp_verify_link_cap( - struct dc_link *link, - const struct link_resource *link_res, - struct dc_link_settings *known_limit_link_setting, - int *fail_count); +struct dc_link_settings dp_get_max_link_cap(struct dc_link *link); bool dp_verify_link_cap_with_retries( struct dc_link *link, - const struct link_resource *link_res, struct dc_link_settings *known_limit_link_setting, int attempts); -bool dp_verify_mst_link_cap( - struct dc_link *link, - const struct link_resource *link_res); - bool dp_validate_mode_timing( struct dc_link *link, const struct dc_crtc_timing *timing); @@ -114,6 +105,9 @@ void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode); bool dp_overwrite_extended_receiver_cap(struct dc_link *link); void dpcd_set_source_specific_data(struct dc_link *link); + +void dpcd_write_cable_id_to_dprx(struct dc_link *link); + /* Write DPCD link configuration data. */ enum dc_status dpcd_set_link_settings( struct dc_link *link, @@ -173,7 +167,6 @@ uint8_t dc_dp_initialize_scrambling_data_symbols( enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource *link_res, bool ready); void dp_set_fec_enable(struct dc_link *link, bool enable); -struct link_encoder *dp_get_link_enc(struct dc_link *link); bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable); bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_update); void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable); @@ -222,8 +215,47 @@ void disable_dp_hpo_output(struct dc_link *link, enum signal_type signal); void setup_dp_hpo_stream(struct pipe_ctx *pipe_ctx, bool enable); bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx); -void reset_dp_hpo_stream_encoders_for_link(struct dc_link *link); bool dp_retrieve_lttpr_cap(struct dc_link *link); void edp_panel_backlight_power_on(struct dc_link *link); +void dp_receiver_power_ctrl(struct dc_link *link, bool on); +void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode); +void dp_enable_link_phy( + struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal, + enum clock_source_id clock_source, + const struct dc_link_settings *link_settings); +void edp_add_delay_for_T9(struct dc_link *link); +bool edp_receiver_ready_T9(struct dc_link *link); +bool edp_receiver_ready_T7(struct dc_link *link); + +void dp_disable_link_phy(struct dc_link *link, const struct link_resource *link_res, + enum signal_type signal); + +void dp_disable_link_phy_mst(struct dc_link *link, const struct link_resource *link_res, + enum signal_type signal); + +bool dp_set_hw_training_pattern( + struct dc_link *link, + const struct link_resource *link_res, + enum dc_dp_training_pattern pattern, + uint32_t offset); + +void dp_set_hw_lane_settings( + struct dc_link *link, + const struct link_resource *link_res, + const struct link_training_settings *link_settings, + uint32_t offset); + +void dp_set_hw_test_pattern( + struct dc_link *link, + const struct link_resource *link_res, + enum dp_test_pattern test_pattern, + uint8_t *custom_pattern, + uint32_t custom_pattern_size); + +void dp_retrain_link_dp_test(struct dc_link *link, + struct dc_link_settings *link_setting, + bool skip_video_pattern); #endif /* __DC_LINK_DP_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index c940fdfda144..b2fa4de47734 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -79,7 +79,7 @@ struct dccg_funcs { void (*otg_drop_pixel)(struct dccg *dccg, uint32_t otg_inst); void (*dccg_init)(struct dccg *dccg); -#if defined(CONFIG_DRM_AMD_DC_DCN) + void (*set_dpstreamclk)( struct dccg *dccg, enum hdmistreamclk_source src, @@ -102,7 +102,7 @@ struct dccg_funcs { void (*disable_symclk32_le)( struct dccg *dccg, int hpo_le_inst); -#endif + void (*set_physymclk)( struct dccg *dccg, int phy_inst, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index 713f5558f5e1..9195dec294c2 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -154,6 +154,8 @@ struct hubbub_funcs { bool (*is_allow_self_refresh_enabled)(struct hubbub *hubbub); void (*allow_self_refresh_control)(struct hubbub *hubbub, bool allow); + bool (*verify_allow_pstate_change_high)(struct hubbub *hubbub); + void (*apply_DEDCN21_147_wa)(struct hubbub *hubbub); void (*force_wm_propagate_to_pipes)(struct hubbub *hubbub); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h index 2c031586f4e6..e45b7993c5c5 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h @@ -195,6 +195,7 @@ struct hubp_funcs { void (*hubp_set_flip_int)(struct hubp *hubp); + void (*hubp_wait_pipe_read_start)(struct hubp *hubp); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index 10ecbc667ffa..d89bd55f110f 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h @@ -38,10 +38,8 @@ #define MAX_PIPES 6 #define MAX_DIG_LINK_ENCODERS 7 #define MAX_DWB_PIPES 1 -#if defined(CONFIG_DRM_AMD_DC_DCN) #define MAX_HPO_DP2_ENCODERS 4 #define MAX_HPO_DP2_LINK_ENCODERS 2 -#endif struct gamma_curve { uint32_t offset; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h index 2ce15cd10d80..2013a70603ae 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h @@ -162,7 +162,8 @@ struct link_encoder_funcs { void (*disable_output)(struct link_encoder *link_enc, enum signal_type signal); void (*dp_set_lane_settings)(struct link_encoder *enc, - const struct link_training_settings *link_settings); + const struct dc_link_settings *link_settings, + const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]); void (*dp_set_phy_pattern)(struct link_encoder *enc, const struct encoder_set_dp_phy_pattern_param *para); void (*update_mst_stream_allocation_table)( @@ -220,7 +221,6 @@ enum link_enc_cfg_mode { LINK_ENC_CFG_TRANSIENT /* During commit state - use state to be committed. */ }; -#if defined(CONFIG_DRM_AMD_DC_DCN) enum dp2_link_mode { DP2_LINK_TRAINING_TPS1, DP2_LINK_TRAINING_TPS2, @@ -306,6 +306,5 @@ struct hpo_dp_link_encoder_funcs { const struct dc_link_settings *link_settings, uint8_t ffe_preset); }; -#endif #endif /* LINK_ENCODER_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h index 48eac622c6a0..24af9d80b937 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h @@ -39,6 +39,7 @@ struct panel_cntl_backlight_registers { unsigned int BL_PWM_CNTL2; unsigned int BL_PWM_PERIOD_CNTL; unsigned int LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV; + unsigned int PANEL_PWRSEQ_REF_DIV2; }; struct panel_cntl_funcs { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h index c88e113b94d1..678c2065e5e8 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h @@ -245,7 +245,6 @@ struct stream_encoder_funcs { struct stream_encoder *enc); }; -#if defined(CONFIG_DRM_AMD_DC_DCN) struct hpo_dp_stream_encoder_state { uint32_t stream_enc_enabled; uint32_t vid_stream_enabled; @@ -324,7 +323,10 @@ struct hpo_dp_stream_encoder_funcs { void (*read_state)( struct hpo_dp_stream_encoder *enc, struct hpo_dp_stream_encoder_state *state); + + void (*set_hblank_min_symbol_width)( + struct hpo_dp_stream_encoder *enc, + uint16_t width); }; -#endif #endif /* STREAM_ENCODER_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index c29320b3855d..59a704781e34 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -100,9 +100,7 @@ enum crc_selection { enum otg_out_mux_dest { OUT_MUX_DIO = 0, -#if defined(CONFIG_DRM_AMD_DC_DCN) OUT_MUX_HPO_DP = 2, -#endif }; enum h_timing_div_mode { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h index c2008258c50a..8c2f190c4712 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h @@ -41,9 +41,8 @@ struct dce_hwseq_wa { bool DEGVIDCN10_254; bool DEGVIDCN21; bool disallow_self_refresh_during_multi_plane_transition; -#if defined(CONFIG_DRM_AMD_DC_DCN) bool dp_hpo_and_otg_sequence; -#endif + bool wait_hubpret_read_start_during_mpo_transition; }; struct hwseq_wa_state { diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_dpcd.h b/drivers/gpu/drm/amd/display/dc/inc/link_dpcd.h index 3f12b1600d2a..d561f86d503c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link_dpcd.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link_dpcd.h @@ -27,7 +27,7 @@ #define __LINK_DPCD_H__ #include <inc/core_status.h> #include <dc_link.h> -#include <inc/link_hwss.h> +#include <dc_link_dp.h> enum dc_status core_link_read_dpcd( struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h index a4e43b4826e0..c6f6baa6e677 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h @@ -40,6 +40,11 @@ void link_enc_cfg_init( struct dc_state *state); /* + * Copies a link encoder assignment from another state. + */ +void link_enc_cfg_copy(const struct dc_state *src_ctx, struct dc_state *dst_ctx); + +/* * Algorithm for assigning available DIG link encoders to streams. * * Update link_enc_assignments table and link_enc_avail list accordingly in @@ -96,6 +101,9 @@ struct link_encoder *link_enc_cfg_get_link_enc_used_by_stream( struct dc *dc, const struct dc_stream_state *stream); +/* Return DIG link encoder. NULL if unused. */ +struct link_encoder *link_enc_cfg_get_link_enc(const struct dc_link *link); + /* Return true if encoder available to use. */ bool link_enc_cfg_is_link_enc_avail(struct dc *dc, enum engine_id eng_id, struct dc_link *link); diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h index 69d63763a10e..3b3090e3d327 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h @@ -26,50 +26,52 @@ #ifndef __DC_LINK_HWSS_H__ #define __DC_LINK_HWSS_H__ -struct gpio *get_hpd_gpio(struct dc_bios *dcb, - struct graphics_object_id link_id, - struct gpio_service *gpio_service); +/* include basic type headers only */ +#include "dc_dp_types.h" +#include "signal_types.h" +#include "grph_object_id.h" +#include "fixed31_32.h" -void dp_enable_link_phy( - struct dc_link *link, - const struct link_resource *link_res, - enum signal_type signal, - enum clock_source_id clock_source, - const struct dc_link_settings *link_settings); +/* forward declare dc core types */ +struct dc_link; +struct link_resource; +struct pipe_ctx; +struct encoder_set_dp_phy_pattern_param; -void dp_receiver_power_ctrl(struct dc_link *link, bool on); -void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode); -void edp_add_delay_for_T9(struct dc_link *link); -bool edp_receiver_ready_T9(struct dc_link *link); -bool edp_receiver_ready_T7(struct dc_link *link); +struct link_hwss_ext { + /* function pointers below require check for NULL at all time + * ********************************************************************* + */ + void (*set_hblank_min_symbol_width)(struct pipe_ctx *pipe_ctx, + const struct dc_link_settings *link_settings, + struct fixed31_32 throttled_vcp_size); + void (*set_throttled_vcp_size)(struct pipe_ctx *pipe_ctx, + struct fixed31_32 throttled_vcp_size); + void (*enable_dp_link_output)(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal, + enum clock_source_id clock_source, + const struct dc_link_settings *link_settings); + void (*disable_dp_link_output)(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal); + void (*set_dp_link_test_pattern)(struct dc_link *link, + const struct link_resource *link_res, + struct encoder_set_dp_phy_pattern_param *tp_params); + void (*set_dp_lane_settings)(struct dc_link *link, + const struct link_resource *link_res, + const struct dc_link_settings *link_settings, + const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]); +}; -void dp_disable_link_phy(struct dc_link *link, const struct link_resource *link_res, - enum signal_type signal); - -void dp_disable_link_phy_mst(struct dc_link *link, const struct link_resource *link_res, - enum signal_type signal); - -bool dp_set_hw_training_pattern( - struct dc_link *link, - const struct link_resource *link_res, - enum dc_dp_training_pattern pattern, - uint32_t offset); - -void dp_set_hw_lane_settings( - struct dc_link *link, - const struct link_resource *link_res, - const struct link_training_settings *link_settings, - uint32_t offset); - -void dp_set_hw_test_pattern( - struct dc_link *link, - const struct link_resource *link_res, - enum dp_test_pattern test_pattern, - uint8_t *custom_pattern, - uint32_t custom_pattern_size); - -void dp_retrain_link_dp_test(struct dc_link *link, - struct dc_link_settings *link_setting, - bool skip_video_pattern); +struct link_hwss { + struct link_hwss_ext ext; + /* function pointers below MUST be assigned to all types of link_hwss + * ********************************************************************* + */ + void (*setup_stream_encoder)(struct pipe_ctx *pipe_ctx); + void (*reset_stream_encoder)(struct pipe_ctx *pipe_ctx); +}; #endif /* __DC_LINK_HWSS_H__ */ + diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h index 2470405e996b..a402df225a76 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h +++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h @@ -498,6 +498,40 @@ uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx, uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...); +/* indirect register access + * underlying implementation determines which index/data pair to be used + * in a synchronous way + */ +#define IX_REG_SET_N_SYNC(index, n, initial_val, ...) \ + generic_indirect_reg_update_ex_sync(CTX, \ + IND_REG(index), \ + initial_val, \ + n, __VA_ARGS__) + +#define IX_REG_SET_2_SYNC(index, init_value, f1, v1, f2, v2) \ + IX_REG_SET_N_SYNC(index, 2, init_value, \ + FN(reg, f1), v1,\ + FN(reg, f2), v2) + +#define IX_REG_GET_N_SYNC(index, n, ...) \ + generic_indirect_reg_get_sync(CTX, \ + IND_REG(index), \ + n, __VA_ARGS__) + +#define IX_REG_GET_SYNC(index, field, val) \ + IX_REG_GET_N_SYNC(index, 1, \ + FN(data_reg_name, field), val) + +uint32_t generic_indirect_reg_get_sync(const struct dc_context *ctx, + uint32_t index, int n, + uint8_t shift1, uint32_t mask1, uint32_t *field_value1, + ...); + +uint32_t generic_indirect_reg_update_ex_sync(const struct dc_context *ctx, + uint32_t index, uint32_t reg_val, int n, + uint8_t shift1, uint32_t mask1, uint32_t field_value1, + ...); + /* register offload macros * * instead of MMIO to register directly, in some cases we want diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index dbfe6690ded8..2369f38ed06f 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -54,10 +54,8 @@ struct resource_caps { int num_dsc; unsigned int num_dig_link_enc; // Total number of DIGs (digital encoders) in DIO (Display Input/Output). unsigned int num_usb4_dpia; // Total number of USB4 DPIA (DisplayPort Input Adapters). -#if defined(CONFIG_DRM_AMD_DC_DCN) int num_hpo_dp_stream_encoder; int num_hpo_dp_link_encoder; -#endif int num_mpc_3dlut; }; @@ -77,14 +75,12 @@ struct resource_create_funcs { struct stream_encoder *(*create_stream_encoder)( enum engine_id eng_id, struct dc_context *ctx); -#if defined(CONFIG_DRM_AMD_DC_DCN) struct hpo_dp_stream_encoder *(*create_hpo_dp_stream_encoder)( enum engine_id eng_id, struct dc_context *ctx); struct hpo_dp_link_encoder *(*create_hpo_dp_link_encoder)( uint8_t inst, struct dc_context *ctx); -#endif struct dce_hwseq *(*create_hwseq)( struct dc_context *ctx); @@ -205,12 +201,9 @@ int get_num_mpc_splits(struct pipe_ctx *pipe); int get_num_odm_splits(struct pipe_ctx *pipe); -#if defined(CONFIG_DRM_AMD_DC_DCN) -struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt( - const struct resource_context *res_ctx, - const struct resource_pool *pool, - const struct dc_link *link); -#endif +bool get_temp_dp_link_res(struct dc_link *link, + struct link_resource *link_res, + struct dc_link_settings *link_settings); void reset_syncd_pipes_from_disabled_pipes(struct dc *dc, struct dc_state *context); @@ -221,4 +214,7 @@ void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc, uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter); +const struct link_hwss *get_link_hwss(const struct dc_link *link, + const struct link_resource *link_res); + #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile index fd739aecf104..f305d4c9a122 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/Makefile +++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile @@ -137,3 +137,11 @@ AMD_DAL_IRQ_DCN31= $(addprefix $(AMDDALPATH)/dc/irq/dcn31/,$(IRQ_DCN31)) AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN31) endif +############################################################################### +# DCN 315 +############################################################################### +IRQ_DCN315 = irq_service_dcn315.o + +AMD_DAL_IRQ_DCN315= $(addprefix $(AMDDALPATH)/dc/irq/dcn315/,$(IRQ_DCN315)) + +AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN315) diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c index c4b067d01895..93c31111500b 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c @@ -40,7 +40,7 @@ #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" -enum dc_irq_source to_dal_irq_source_dcn20( +static enum dc_irq_source to_dal_irq_source_dcn20( struct irq_service *irq_service, uint32_t src_id, uint32_t ext_id) diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c index aa708b61142f..45f99351a0ab 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c @@ -138,11 +138,6 @@ static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = { .ack = NULL }; -static const struct irq_source_info_funcs dmub_outbox_irq_info_funcs = { - .set = NULL, - .ack = NULL -}; - #undef BASE_INNER #define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c index 0f15bcada4e9..717977aec6d0 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c @@ -265,14 +265,6 @@ static const struct irq_source_info_funcs vline0_irq_info_funcs = { .funcs = &pflip_irq_info_funcs\ } -#define vupdate_int_entry(reg_num)\ - [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\ - IRQ_REG_ENTRY(OTG, reg_num,\ - OTG_GLOBAL_SYNC_STATUS, VUPDATE_INT_EN,\ - OTG_GLOBAL_SYNC_STATUS, VUPDATE_EVENT_CLEAR),\ - .funcs = &vblank_irq_info_funcs\ - } - /* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic * of DCE's DC_IRQ_SOURCE_VUPDATEx. */ @@ -401,12 +393,6 @@ irq_source_info_dcn21[DAL_IRQ_SOURCES_NUMBER] = { dc_underflow_int_entry(6), [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(), [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(), - vupdate_int_entry(0), - vupdate_int_entry(1), - vupdate_int_entry(2), - vupdate_int_entry(3), - vupdate_int_entry(4), - vupdate_int_entry(5), vupdate_no_lock_int_entry(0), vupdate_no_lock_int_entry(1), vupdate_no_lock_int_entry(2), diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c index 914ce2ce1c2f..ac0c6a62d17b 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c @@ -37,8 +37,8 @@ #include "nbio/nbio_7_4_offset.h" -#include "dcn/dpcs_3_0_0_offset.h" -#include "dcn/dpcs_3_0_0_sh_mask.h" +#include "dpcs/dpcs_3_0_0_offset.h" +#include "dpcs/dpcs_3_0_0_sh_mask.h" #include "mmhub/mmhub_2_0_0_offset.h" #include "mmhub/mmhub_2_0_0_sh_mask.h" @@ -47,7 +47,7 @@ #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" -enum dc_irq_source to_dal_irq_source_dcn30( +static enum dc_irq_source to_dal_irq_source_dcn30( struct irq_service *irq_service, uint32_t src_id, uint32_t ext_id) diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c b/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c new file mode 100644 index 000000000000..e722171f0d2d --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c @@ -0,0 +1,438 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" +#include "include/logger_interface.h" +#include "../dce110/irq_service_dce110.h" + + +#include "dcn/dcn_3_1_5_offset.h" +#include "dcn/dcn_3_1_5_sh_mask.h" + +#include "irq_service_dcn315.h" + +#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" + +#define DCN_BASE__INST0_SEG0 0x00000012 +#define DCN_BASE__INST0_SEG1 0x000000C0 +#define DCN_BASE__INST0_SEG2 0x000034C0 +#define DCN_BASE__INST0_SEG3 0x00009000 +#define DCN_BASE__INST0_SEG4 0x02403C00 +#define DCN_BASE__INST0_SEG5 0 + +static enum dc_irq_source to_dal_irq_source_dcn315( + struct irq_service *irq_service, + uint32_t src_id, + uint32_t ext_id) +{ + switch (src_id) { + case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: + return DC_IRQ_SOURCE_VBLANK1; + case DCN_1_0__SRCID__DC_D2_OTG_VSTARTUP: + return DC_IRQ_SOURCE_VBLANK2; + case DCN_1_0__SRCID__DC_D3_OTG_VSTARTUP: + return DC_IRQ_SOURCE_VBLANK3; + case DCN_1_0__SRCID__DC_D4_OTG_VSTARTUP: + return DC_IRQ_SOURCE_VBLANK4; + case DCN_1_0__SRCID__DC_D5_OTG_VSTARTUP: + return DC_IRQ_SOURCE_VBLANK5; + case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP: + return DC_IRQ_SOURCE_VBLANK6; + case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC1_VLINE0; + case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC2_VLINE0; + case DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC3_VLINE0; + case DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC4_VLINE0; + case DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC5_VLINE0; + case DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC6_VLINE0; + case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT: + return DC_IRQ_SOURCE_PFLIP1; + case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT: + return DC_IRQ_SOURCE_PFLIP2; + case DCN_1_0__SRCID__HUBP2_FLIP_INTERRUPT: + return DC_IRQ_SOURCE_PFLIP3; + case DCN_1_0__SRCID__HUBP3_FLIP_INTERRUPT: + return DC_IRQ_SOURCE_PFLIP4; + case DCN_1_0__SRCID__HUBP4_FLIP_INTERRUPT: + return DC_IRQ_SOURCE_PFLIP5; + case DCN_1_0__SRCID__HUBP5_FLIP_INTERRUPT: + return DC_IRQ_SOURCE_PFLIP6; + case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT: + return DC_IRQ_SOURCE_VUPDATE1; + case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT: + return DC_IRQ_SOURCE_VUPDATE2; + case DCN_1_0__SRCID__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT: + return DC_IRQ_SOURCE_VUPDATE3; + case DCN_1_0__SRCID__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT: + return DC_IRQ_SOURCE_VUPDATE4; + case DCN_1_0__SRCID__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT: + return DC_IRQ_SOURCE_VUPDATE5; + case DCN_1_0__SRCID__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT: + return DC_IRQ_SOURCE_VUPDATE6; + case DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT: + return DC_IRQ_SOURCE_DMCUB_OUTBOX; + case DCN_1_0__SRCID__DC_HPD1_INT: + /* generic src_id for all HPD and HPDRX interrupts */ + switch (ext_id) { + case DCN_1_0__CTXID__DC_HPD1_INT: + return DC_IRQ_SOURCE_HPD1; + case DCN_1_0__CTXID__DC_HPD2_INT: + return DC_IRQ_SOURCE_HPD2; + case DCN_1_0__CTXID__DC_HPD3_INT: + return DC_IRQ_SOURCE_HPD3; + case DCN_1_0__CTXID__DC_HPD4_INT: + return DC_IRQ_SOURCE_HPD4; + case DCN_1_0__CTXID__DC_HPD5_INT: + return DC_IRQ_SOURCE_HPD5; + case DCN_1_0__CTXID__DC_HPD6_INT: + return DC_IRQ_SOURCE_HPD6; + case DCN_1_0__CTXID__DC_HPD1_RX_INT: + return DC_IRQ_SOURCE_HPD1RX; + case DCN_1_0__CTXID__DC_HPD2_RX_INT: + return DC_IRQ_SOURCE_HPD2RX; + case DCN_1_0__CTXID__DC_HPD3_RX_INT: + return DC_IRQ_SOURCE_HPD3RX; + case DCN_1_0__CTXID__DC_HPD4_RX_INT: + return DC_IRQ_SOURCE_HPD4RX; + case DCN_1_0__CTXID__DC_HPD5_RX_INT: + return DC_IRQ_SOURCE_HPD5RX; + case DCN_1_0__CTXID__DC_HPD6_RX_INT: + return DC_IRQ_SOURCE_HPD6RX; + default: + return DC_IRQ_SOURCE_INVALID; + } + break; + + default: + return DC_IRQ_SOURCE_INVALID; + } +} + +static bool hpd_ack( + struct irq_service *irq_service, + const struct irq_source_info *info) +{ + uint32_t addr = info->status_reg; + uint32_t value = dm_read_reg(irq_service->ctx, addr); + uint32_t current_status = + get_reg_field_value( + value, + HPD0_DC_HPD_INT_STATUS, + DC_HPD_SENSE_DELAYED); + + dal_irq_service_ack_generic(irq_service, info); + + value = dm_read_reg(irq_service->ctx, info->enable_reg); + + set_reg_field_value( + value, + current_status ? 0 : 1, + HPD0_DC_HPD_INT_CONTROL, + DC_HPD_INT_POLARITY); + + dm_write_reg(irq_service->ctx, info->enable_reg, value); + + return true; +} + +static const struct irq_source_info_funcs hpd_irq_info_funcs = { + .set = NULL, + .ack = hpd_ack +}; + +static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + +static const struct irq_source_info_funcs pflip_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + +static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + +static const struct irq_source_info_funcs vblank_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + +static const struct irq_source_info_funcs outbox_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + +static const struct irq_source_info_funcs vline0_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + +#undef BASE_INNER +#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg + +/* compile time expand base address. */ +#define BASE(seg) \ + BASE_INNER(seg) + +#define SRI(reg_name, block, id)\ + BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRI_DMUB(reg_name)\ + BASE(reg ## reg_name ## _BASE_IDX) + \ + reg ## reg_name + +#define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\ + .enable_reg = SRI(reg1, block, reg_num),\ + .enable_mask = \ + block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ + .enable_value = {\ + block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ + ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \ + },\ + .ack_reg = SRI(reg2, block, reg_num),\ + .ack_mask = \ + block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\ + .ack_value = \ + block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \ + +#define IRQ_REG_ENTRY_DMUB(reg1, mask1, reg2, mask2)\ + .enable_reg = SRI_DMUB(reg1),\ + .enable_mask = \ + reg1 ## __ ## mask1 ## _MASK,\ + .enable_value = {\ + reg1 ## __ ## mask1 ## _MASK,\ + ~reg1 ## __ ## mask1 ## _MASK \ + },\ + .ack_reg = SRI_DMUB(reg2),\ + .ack_mask = \ + reg2 ## __ ## mask2 ## _MASK,\ + .ack_value = \ + reg2 ## __ ## mask2 ## _MASK \ + +#define hpd_int_entry(reg_num)\ + [DC_IRQ_SOURCE_HPD1 + reg_num] = {\ + IRQ_REG_ENTRY(HPD, reg_num,\ + DC_HPD_INT_CONTROL, DC_HPD_INT_EN,\ + DC_HPD_INT_CONTROL, DC_HPD_INT_ACK),\ + .status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\ + .funcs = &hpd_irq_info_funcs\ + } + +#define hpd_rx_int_entry(reg_num)\ + [DC_IRQ_SOURCE_HPD1RX + reg_num] = {\ + IRQ_REG_ENTRY(HPD, reg_num,\ + DC_HPD_INT_CONTROL, DC_HPD_RX_INT_EN,\ + DC_HPD_INT_CONTROL, DC_HPD_RX_INT_ACK),\ + .status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\ + .funcs = &hpd_rx_irq_info_funcs\ + } +#define pflip_int_entry(reg_num)\ + [DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\ + IRQ_REG_ENTRY(HUBPREQ, reg_num,\ + DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK,\ + DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_CLEAR),\ + .funcs = &pflip_irq_info_funcs\ + } + +/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic + * of DCE's DC_IRQ_SOURCE_VUPDATEx. + */ +#define vupdate_no_lock_int_entry(reg_num)\ + [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\ + IRQ_REG_ENTRY(OTG, reg_num,\ + OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\ + OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\ + .funcs = &vupdate_no_lock_irq_info_funcs\ + } + +#define vblank_int_entry(reg_num)\ + [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\ + IRQ_REG_ENTRY(OTG, reg_num,\ + OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\ + OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\ + .funcs = &vblank_irq_info_funcs\ + } + +#define vline0_int_entry(reg_num)\ + [DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\ + IRQ_REG_ENTRY(OTG, reg_num,\ + OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE,\ + OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\ + .funcs = &vline0_irq_info_funcs\ + } +#define dmub_outbox_int_entry()\ + [DC_IRQ_SOURCE_DMCUB_OUTBOX] = {\ + IRQ_REG_ENTRY_DMUB(\ + DMCUB_INTERRUPT_ENABLE, DMCUB_OUTBOX1_READY_INT_EN,\ + DMCUB_INTERRUPT_ACK, DMCUB_OUTBOX1_READY_INT_ACK),\ + .funcs = &outbox_irq_info_funcs\ + } + +#define dummy_irq_entry() \ + {\ + .funcs = &dummy_irq_info_funcs\ + } + +#define i2c_int_entry(reg_num) \ + [DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry() + +#define dp_sink_int_entry(reg_num) \ + [DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry() + +#define gpio_pad_int_entry(reg_num) \ + [DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry() + +#define dc_underflow_int_entry(reg_num) \ + [DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry() + +static const struct irq_source_info_funcs dummy_irq_info_funcs = { + .set = dal_irq_service_dummy_set, + .ack = dal_irq_service_dummy_ack +}; + +static const struct irq_source_info +irq_source_info_dcn315[DAL_IRQ_SOURCES_NUMBER] = { + [DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(), + hpd_int_entry(0), + hpd_int_entry(1), + hpd_int_entry(2), + hpd_int_entry(3), + hpd_int_entry(4), + hpd_rx_int_entry(0), + hpd_rx_int_entry(1), + hpd_rx_int_entry(2), + hpd_rx_int_entry(3), + hpd_rx_int_entry(4), + i2c_int_entry(1), + i2c_int_entry(2), + i2c_int_entry(3), + i2c_int_entry(4), + i2c_int_entry(5), + i2c_int_entry(6), + dp_sink_int_entry(1), + dp_sink_int_entry(2), + dp_sink_int_entry(3), + dp_sink_int_entry(4), + dp_sink_int_entry(5), + dp_sink_int_entry(6), + [DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(), + pflip_int_entry(0), + pflip_int_entry(1), + pflip_int_entry(2), + pflip_int_entry(3), + [DC_IRQ_SOURCE_PFLIP5] = dummy_irq_entry(), + [DC_IRQ_SOURCE_PFLIP6] = dummy_irq_entry(), + [DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(), + gpio_pad_int_entry(0), + gpio_pad_int_entry(1), + gpio_pad_int_entry(2), + gpio_pad_int_entry(3), + gpio_pad_int_entry(4), + gpio_pad_int_entry(5), + gpio_pad_int_entry(6), + gpio_pad_int_entry(7), + gpio_pad_int_entry(8), + gpio_pad_int_entry(9), + gpio_pad_int_entry(10), + gpio_pad_int_entry(11), + gpio_pad_int_entry(12), + gpio_pad_int_entry(13), + gpio_pad_int_entry(14), + gpio_pad_int_entry(15), + gpio_pad_int_entry(16), + gpio_pad_int_entry(17), + gpio_pad_int_entry(18), + gpio_pad_int_entry(19), + gpio_pad_int_entry(20), + gpio_pad_int_entry(21), + gpio_pad_int_entry(22), + gpio_pad_int_entry(23), + gpio_pad_int_entry(24), + gpio_pad_int_entry(25), + gpio_pad_int_entry(26), + gpio_pad_int_entry(27), + gpio_pad_int_entry(28), + gpio_pad_int_entry(29), + gpio_pad_int_entry(30), + dc_underflow_int_entry(1), + dc_underflow_int_entry(2), + dc_underflow_int_entry(3), + dc_underflow_int_entry(4), + dc_underflow_int_entry(5), + dc_underflow_int_entry(6), + [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(), + [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(), + vupdate_no_lock_int_entry(0), + vupdate_no_lock_int_entry(1), + vupdate_no_lock_int_entry(2), + vupdate_no_lock_int_entry(3), + vblank_int_entry(0), + vblank_int_entry(1), + vblank_int_entry(2), + vblank_int_entry(3), + vline0_int_entry(0), + vline0_int_entry(1), + vline0_int_entry(2), + vline0_int_entry(3), + [DC_IRQ_SOURCE_DC5_VLINE1] = dummy_irq_entry(), + [DC_IRQ_SOURCE_DC6_VLINE1] = dummy_irq_entry(), + dmub_outbox_int_entry(), +}; + +static const struct irq_service_funcs irq_service_funcs_dcn315 = { + .to_dal_irq_source = to_dal_irq_source_dcn315 +}; + +static void dcn315_irq_construct( + struct irq_service *irq_service, + struct irq_service_init_data *init_data) +{ + dal_irq_service_construct(irq_service, init_data); + + irq_service->info = irq_source_info_dcn315; + irq_service->funcs = &irq_service_funcs_dcn315; +} + +struct irq_service *dal_irq_service_dcn315_create( + struct irq_service_init_data *init_data) +{ + struct irq_service *irq_service = kzalloc(sizeof(*irq_service), + GFP_KERNEL); + + if (!irq_service) + return NULL; + + dcn315_irq_construct(irq_service, init_data); + return irq_service; +} diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.h b/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.h new file mode 100644 index 000000000000..a5a80486a8d6 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.h @@ -0,0 +1,34 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DAL_IRQ_SERVICE_DCN315_H__ +#define __DAL_IRQ_SERVICE_DCN315_H__ + +#include "../irq_service.h" + +struct irq_service *dal_irq_service_dcn315_create( + struct irq_service_init_data *init_data); + +#endif /* __DAL_IRQ_SERVICE_DCN315_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/Makefile b/drivers/gpu/drm/amd/display/dc/link/Makefile new file mode 100644 index 000000000000..054c2a727eb2 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/Makefile @@ -0,0 +1,30 @@ +# +# Copyright 2022 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +# Makefile for the link sub-component of DAL. +# It abstracts the control and status of back end pipe such as DIO, HPO, DPIA, +# PHY, HPD, DDC and etc). + +LINK = link_hwss_dio.o link_hwss_dpia.o link_hwss_hpo_dp.o link_dp_trace.o + +AMD_DAL_LINK = $(addprefix $(AMDDALPATH)/dc/link/,$(LINK)) + +AMD_DISPLAY_FILES += $(AMD_DAL_LINK) diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.c b/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.c new file mode 100644 index 000000000000..e7047391934b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.c @@ -0,0 +1,146 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "dc_link.h" +#include "link_dp_trace.h" + +void dp_trace_init(struct dc_link *link) +{ + memset(&link->dp_trace, 0, sizeof(link->dp_trace)); + link->dp_trace.is_initialized = true; +} + +void dp_trace_reset(struct dc_link *link) +{ + memset(&link->dp_trace, 0, sizeof(link->dp_trace)); +} + +bool dc_dp_trace_is_initialized(struct dc_link *link) +{ + return link->dp_trace.is_initialized; +} + +void dp_trace_detect_lt_init(struct dc_link *link) +{ + memset(&link->dp_trace.detect_lt_trace, 0, sizeof(link->dp_trace.detect_lt_trace)); +} + +void dp_trace_commit_lt_init(struct dc_link *link) +{ + memset(&link->dp_trace.commit_lt_trace, 0, sizeof(link->dp_trace.commit_lt_trace)); +} + +void dp_trace_link_loss_increment(struct dc_link *link) +{ + link->dp_trace.link_loss_count++; +} + +void dp_trace_lt_fail_count_update(struct dc_link *link, + unsigned int fail_count, + bool in_detection) +{ + if (in_detection) + link->dp_trace.detect_lt_trace.counts.fail = fail_count; + else + link->dp_trace.commit_lt_trace.counts.fail = fail_count; +} + +void dp_trace_lt_total_count_increment(struct dc_link *link, + bool in_detection) +{ + if (in_detection) + link->dp_trace.detect_lt_trace.counts.total++; + else + link->dp_trace.commit_lt_trace.counts.total++; +} + +void dc_dp_trace_set_is_logged_flag(struct dc_link *link, + bool in_detection, + bool is_logged) +{ + if (in_detection) + link->dp_trace.detect_lt_trace.is_logged = is_logged; + else + link->dp_trace.commit_lt_trace.is_logged = is_logged; +} + +bool dc_dp_trace_is_logged(struct dc_link *link, + bool in_detection) +{ + if (in_detection) + return link->dp_trace.detect_lt_trace.is_logged; + else + return link->dp_trace.commit_lt_trace.is_logged; +} + +void dp_trace_lt_result_update(struct dc_link *link, + enum link_training_result result, + bool in_detection) +{ + if (in_detection) + link->dp_trace.detect_lt_trace.result = result; + else + link->dp_trace.commit_lt_trace.result = result; +} + +void dp_trace_set_lt_start_timestamp(struct dc_link *link, + bool in_detection) +{ + if (in_detection) + link->dp_trace.detect_lt_trace.timestamps.start = dm_get_timestamp(link->dc->ctx); + else + link->dp_trace.commit_lt_trace.timestamps.start = dm_get_timestamp(link->dc->ctx); +} + +void dp_trace_set_lt_end_timestamp(struct dc_link *link, + bool in_detection) +{ + if (in_detection) + link->dp_trace.detect_lt_trace.timestamps.end = dm_get_timestamp(link->dc->ctx); + else + link->dp_trace.commit_lt_trace.timestamps.end = dm_get_timestamp(link->dc->ctx); +} + +unsigned long long dc_dp_trace_get_lt_end_timestamp(struct dc_link *link, + bool in_detection) +{ + if (in_detection) + return link->dp_trace.detect_lt_trace.timestamps.end; + else + return link->dp_trace.commit_lt_trace.timestamps.end; +} + +struct dp_trace_lt_counts *dc_dp_trace_get_lt_counts(struct dc_link *link, + bool in_detection) +{ + if (in_detection) + return &link->dp_trace.detect_lt_trace.counts; + else + return &link->dp_trace.commit_lt_trace.counts; +} + +unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link) +{ + return link->dp_trace.link_loss_count; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.h b/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.h new file mode 100644 index 000000000000..702f97c6ead0 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.h @@ -0,0 +1,57 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef __LINK_DP_TRACE_H__ +#define __LINK_DP_TRACE_H__ + +void dp_trace_init(struct dc_link *link); +void dp_trace_reset(struct dc_link *link); +bool dc_dp_trace_is_initialized(struct dc_link *link); +void dp_trace_detect_lt_init(struct dc_link *link); +void dp_trace_commit_lt_init(struct dc_link *link); +void dp_trace_link_loss_increment(struct dc_link *link); +void dp_trace_lt_fail_count_update(struct dc_link *link, + unsigned int fail_count, + bool in_detection); +void dp_trace_lt_total_count_increment(struct dc_link *link, + bool in_detection); +void dc_dp_trace_set_is_logged_flag(struct dc_link *link, + bool in_detection, + bool is_logged); +bool dc_dp_trace_is_logged(struct dc_link *link, + bool in_detection); +void dp_trace_lt_result_update(struct dc_link *link, + enum link_training_result result, + bool in_detection); +void dp_trace_set_lt_start_timestamp(struct dc_link *link, + bool in_detection); +void dp_trace_set_lt_end_timestamp(struct dc_link *link, + bool in_detection); +unsigned long long dc_dp_trace_get_lt_end_timestamp(struct dc_link *link, + bool in_detection); +struct dp_trace_lt_counts *dc_dp_trace_get_lt_counts(struct dc_link *link, + bool in_detection); +unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link); + +#endif /* __LINK_DP_TRACE_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c new file mode 100644 index 000000000000..0f845113a6aa --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c @@ -0,0 +1,137 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "link_hwss_dio.h" +#include "core_types.h" +#include "dc_link_dp.h" +#include "link_enc_cfg.h" + +void set_dio_throttled_vcp_size(struct pipe_ctx *pipe_ctx, + struct fixed31_32 throttled_vcp_size) +{ + struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc; + + stream_encoder->funcs->set_throttled_vcp_size( + stream_encoder, + throttled_vcp_size); +} + +void setup_dio_stream_encoder(struct pipe_ctx *pipe_ctx) +{ + struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link); + + link_enc->funcs->connect_dig_be_to_fe(link_enc, + pipe_ctx->stream_res.stream_enc->id, true); + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + dp_source_sequence_trace(pipe_ctx->stream->link, + DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE); +} + +void reset_dio_stream_encoder(struct pipe_ctx *pipe_ctx) +{ + struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link); + + link_enc->funcs->connect_dig_be_to_fe( + link_enc, + pipe_ctx->stream_res.stream_enc->id, + false); + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + dp_source_sequence_trace(pipe_ctx->stream->link, + DPCD_SOURCE_SEQ_AFTER_DISCONNECT_DIG_FE_BE); + +} + +void enable_dio_dp_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal, + enum clock_source_id clock_source, + const struct dc_link_settings *link_settings) +{ + struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link); + + if (dc_is_dp_sst_signal(signal)) + link_enc->funcs->enable_dp_output( + link_enc, + link_settings, + clock_source); + else + link_enc->funcs->enable_dp_mst_output( + link_enc, + link_settings, + clock_source); + dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY); +} + +void disable_dio_dp_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal) +{ + struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link); + + link_enc->funcs->disable_output(link_enc, signal); + dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); +} + +void set_dio_dp_link_test_pattern(struct dc_link *link, + const struct link_resource *link_res, + struct encoder_set_dp_phy_pattern_param *tp_params) +{ + struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link); + + link_enc->funcs->dp_set_phy_pattern(link_enc, tp_params); + dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN); +} + +void set_dio_dp_lane_settings(struct dc_link *link, + const struct link_resource *link_res, + const struct dc_link_settings *link_settings, + const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) +{ + struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link); + + link_enc->funcs->dp_set_lane_settings(link_enc, link_settings, lane_settings); +} + +static const struct link_hwss dio_link_hwss = { + .setup_stream_encoder = setup_dio_stream_encoder, + .reset_stream_encoder = reset_dio_stream_encoder, + .ext = { + .set_throttled_vcp_size = set_dio_throttled_vcp_size, + .enable_dp_link_output = enable_dio_dp_link_output, + .disable_dp_link_output = disable_dio_dp_link_output, + .set_dp_link_test_pattern = set_dio_dp_link_test_pattern, + .set_dp_lane_settings = set_dio_dp_lane_settings, + }, +}; + +bool can_use_dio_link_hwss(const struct dc_link *link, + const struct link_resource *link_res) +{ + return link->link_enc != NULL; +} + +const struct link_hwss *get_dio_link_hwss(void) +{ + return &dio_link_hwss; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h new file mode 100644 index 000000000000..680df20b1fa3 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h @@ -0,0 +1,53 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef __LINK_HWSS_DIO_H__ +#define __LINK_HWSS_DIO_H__ + +#include "link_hwss.h" + +const struct link_hwss *get_dio_link_hwss(void); +bool can_use_dio_link_hwss(const struct dc_link *link, + const struct link_resource *link_res); +void set_dio_throttled_vcp_size(struct pipe_ctx *pipe_ctx, + struct fixed31_32 throttled_vcp_size); +void setup_dio_stream_encoder(struct pipe_ctx *pipe_ctx); +void reset_dio_stream_encoder(struct pipe_ctx *pipe_ctx); +void enable_dio_dp_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal, + enum clock_source_id clock_source, + const struct dc_link_settings *link_settings); +void disable_dio_dp_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal); +void set_dio_dp_link_test_pattern(struct dc_link *link, + const struct link_resource *link_res, + struct encoder_set_dp_phy_pattern_param *tp_params); +void set_dio_dp_lane_settings(struct dc_link *link, + const struct link_resource *link_res, + const struct dc_link_settings *link_settings, + const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]); + +#endif /* __LINK_HWSS_DIO_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c new file mode 100644 index 000000000000..35b206225201 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c @@ -0,0 +1,51 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "link_hwss_dpia.h" +#include "core_types.h" +#include "link_hwss_dio.h" + +static const struct link_hwss dpia_link_hwss = { + .setup_stream_encoder = setup_dio_stream_encoder, + .reset_stream_encoder = reset_dio_stream_encoder, + .ext = { + .set_throttled_vcp_size = set_dio_throttled_vcp_size, + .enable_dp_link_output = enable_dio_dp_link_output, + .disable_dp_link_output = disable_dio_dp_link_output, + .set_dp_link_test_pattern = set_dio_dp_link_test_pattern, + .set_dp_lane_settings = set_dio_dp_lane_settings, + }, +}; + +bool can_use_dpia_link_hwss(const struct dc_link *link, + const struct link_resource *link_res) +{ + return link->is_dig_mapping_flexible && + link->dc->res_pool->funcs->link_encs_assign; +} + +const struct link_hwss *get_dpia_link_hwss(void) +{ + return &dpia_link_hwss; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.h b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.h new file mode 100644 index 000000000000..ad16ec5d9bb7 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.h @@ -0,0 +1,34 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef __LINK_HWSS_DPIA_H__ +#define __LINK_HWSS_DPIA_H__ + +#include "link_hwss.h" + +const struct link_hwss *get_dpia_link_hwss(void); +bool can_use_dpia_link_hwss(const struct dc_link *link, + const struct link_resource *link_res); + +#endif /* __LINK_HWSS_DPIA_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c new file mode 100644 index 000000000000..74919491675f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c @@ -0,0 +1,254 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "link_hwss_hpo_dp.h" +#include "dm_helpers.h" +#include "core_types.h" +#include "dccg.h" +#include "dc_link_dp.h" + +static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link) +{ + switch (link->link_enc->transmitter) { + case TRANSMITTER_UNIPHY_A: + return PHYD32CLKA; + case TRANSMITTER_UNIPHY_B: + return PHYD32CLKB; + case TRANSMITTER_UNIPHY_C: + return PHYD32CLKC; + case TRANSMITTER_UNIPHY_D: + return PHYD32CLKD; + case TRANSMITTER_UNIPHY_E: + return PHYD32CLKE; + default: + return PHYD32CLKA; + } +} + +static void set_hpo_dp_throttled_vcp_size(struct pipe_ctx *pipe_ctx, + struct fixed31_32 throttled_vcp_size) +{ + struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = + pipe_ctx->stream_res.hpo_dp_stream_enc; + struct hpo_dp_link_encoder *hpo_dp_link_encoder = + pipe_ctx->link_res.hpo_dp_link_enc; + + hpo_dp_link_encoder->funcs->set_throttled_vcp_size(hpo_dp_link_encoder, + hpo_dp_stream_encoder->inst, + throttled_vcp_size); +} + +static void set_hpo_dp_hblank_min_symbol_width(struct pipe_ctx *pipe_ctx, + const struct dc_link_settings *link_settings, + struct fixed31_32 throttled_vcp_size) +{ + struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = + pipe_ctx->stream_res.hpo_dp_stream_enc; + struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; + struct fixed31_32 h_blank_in_ms, time_slot_in_ms, mtp_cnt_per_h_blank; + uint32_t link_bw_in_kbps = + dc_link_bandwidth_kbps(pipe_ctx->stream->link, link_settings); + uint16_t hblank_min_symbol_width = 0; + + if (link_bw_in_kbps > 0) { + h_blank_in_ms = dc_fixpt_div(dc_fixpt_from_int( + timing->h_total - timing->h_addressable), + dc_fixpt_from_fraction(timing->pix_clk_100hz, 10)); + time_slot_in_ms = dc_fixpt_from_fraction(32 * 4, link_bw_in_kbps); + mtp_cnt_per_h_blank = dc_fixpt_div(h_blank_in_ms, + dc_fixpt_mul_int(time_slot_in_ms, 64)); + hblank_min_symbol_width = dc_fixpt_floor( + dc_fixpt_mul(mtp_cnt_per_h_blank, throttled_vcp_size)); + } + + hpo_dp_stream_encoder->funcs->set_hblank_min_symbol_width(hpo_dp_stream_encoder, + hblank_min_symbol_width); +} + +static int get_odm_segment_count(struct pipe_ctx *pipe_ctx) +{ + struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; + int count = 1; + + while (odm_pipe != NULL) { + count++; + odm_pipe = odm_pipe->next_odm_pipe; + } + + return count; +} + +static void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx) +{ + struct dc *dc = pipe_ctx->stream->ctx->dc; + struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc; + struct hpo_dp_link_encoder *link_enc = pipe_ctx->link_res.hpo_dp_link_enc; + struct dccg *dccg = dc->res_pool->dccg; + struct timing_generator *tg = pipe_ctx->stream_res.tg; + int odm_segment_count = get_odm_segment_count(pipe_ctx); + enum phyd32clk_clock_source phyd32clk = get_phyd32clk_src(pipe_ctx->stream->link); + + dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst); + dccg->funcs->enable_symclk32_se(dccg, stream_enc->inst, phyd32clk); + dccg->funcs->set_dtbclk_dto(dccg, tg->inst, pipe_ctx->stream->phy_pix_clk, + odm_segment_count, + &pipe_ctx->stream->timing); + stream_enc->funcs->enable_stream(stream_enc); + stream_enc->funcs->map_stream_to_link(stream_enc, stream_enc->inst, link_enc->inst); +} + +static void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx) +{ + struct dc *dc = pipe_ctx->stream->ctx->dc; + struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc; + struct dccg *dccg = dc->res_pool->dccg; + struct timing_generator *tg = pipe_ctx->stream_res.tg; + + stream_enc->funcs->disable(stream_enc); + dccg->funcs->set_dtbclk_dto(dccg, tg->inst, 0, 0, &pipe_ctx->stream->timing); + dccg->funcs->disable_symclk32_se(dccg, stream_enc->inst); + dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst); +} + +static void enable_hpo_dp_fpga_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal, + enum clock_source_id clock_source, + const struct dc_link_settings *link_settings) +{ + const struct dc *dc = link->dc; + enum phyd32clk_clock_source phyd32clk = get_phyd32clk_src(link); + int phyd32clk_freq_khz = link_settings->link_rate == LINK_RATE_UHBR10 ? 312500 : + link_settings->link_rate == LINK_RATE_UHBR13_5 ? 412875 : + link_settings->link_rate == LINK_RATE_UHBR20 ? 625000 : 0; + + dm_set_phyd32clk(dc->ctx, phyd32clk_freq_khz); + dc->res_pool->dccg->funcs->set_physymclk( + dc->res_pool->dccg, + link->link_enc_hw_inst, + PHYSYMCLK_FORCE_SRC_PHYD32CLK, + true); + dc->res_pool->dccg->funcs->enable_symclk32_le( + dc->res_pool->dccg, + link_res->hpo_dp_link_enc->inst, + phyd32clk); + link_res->hpo_dp_link_enc->funcs->link_enable( + link_res->hpo_dp_link_enc, + link_settings->lane_count); + +} + +static void enable_hpo_dp_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal, + enum clock_source_id clock_source, + const struct dc_link_settings *link_settings) +{ + if (IS_FPGA_MAXIMUS_DC(link->dc->ctx->dce_environment)) + enable_hpo_dp_fpga_link_output(link, link_res, signal, + clock_source, link_settings); + else + link_res->hpo_dp_link_enc->funcs->enable_link_phy( + link_res->hpo_dp_link_enc, + link_settings, + link->link_enc->transmitter, + link->link_enc->hpd_source); +} + + +static void disable_hpo_dp_fpga_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal) +{ + const struct dc *dc = link->dc; + + link_res->hpo_dp_link_enc->funcs->link_disable(link_res->hpo_dp_link_enc); + dc->res_pool->dccg->funcs->disable_symclk32_le( + dc->res_pool->dccg, + link_res->hpo_dp_link_enc->inst); + dc->res_pool->dccg->funcs->set_physymclk( + dc->res_pool->dccg, + link->link_enc_hw_inst, + PHYSYMCLK_FORCE_SRC_SYMCLK, + false); + dm_set_phyd32clk(dc->ctx, 0); +} + +static void disable_hpo_dp_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal) +{ + if (IS_FPGA_MAXIMUS_DC(link->dc->ctx->dce_environment)) { + disable_hpo_dp_fpga_link_output(link, link_res, signal); + } else { + link_res->hpo_dp_link_enc->funcs->link_disable(link_res->hpo_dp_link_enc); + link_res->hpo_dp_link_enc->funcs->disable_link_phy( + link_res->hpo_dp_link_enc, signal); + } +} + +static void set_hpo_dp_link_test_pattern(struct dc_link *link, + const struct link_resource *link_res, + struct encoder_set_dp_phy_pattern_param *tp_params) +{ + link_res->hpo_dp_link_enc->funcs->set_link_test_pattern( + link_res->hpo_dp_link_enc, tp_params); + dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN); +} + +static void set_hpo_dp_lane_settings(struct dc_link *link, + const struct link_resource *link_res, + const struct dc_link_settings *link_settings, + const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) +{ + link_res->hpo_dp_link_enc->funcs->set_ffe( + link_res->hpo_dp_link_enc, + link_settings, + lane_settings[0].FFE_PRESET.raw); +} + +static const struct link_hwss hpo_dp_link_hwss = { + .setup_stream_encoder = setup_hpo_dp_stream_encoder, + .reset_stream_encoder = reset_hpo_dp_stream_encoder, + .ext = { + .set_throttled_vcp_size = set_hpo_dp_throttled_vcp_size, + .set_hblank_min_symbol_width = set_hpo_dp_hblank_min_symbol_width, + .enable_dp_link_output = enable_hpo_dp_link_output, + .disable_dp_link_output = disable_hpo_dp_link_output, + .set_dp_link_test_pattern = set_hpo_dp_link_test_pattern, + .set_dp_lane_settings = set_hpo_dp_lane_settings, + }, +}; + +bool can_use_hpo_dp_link_hwss(const struct dc_link *link, + const struct link_resource *link_res) +{ + return link_res->hpo_dp_link_enc != NULL; +} + +const struct link_hwss *get_hpo_dp_link_hwss(void) +{ + return &hpo_dp_link_hwss; +} + diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.h b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.h new file mode 100644 index 000000000000..57d447ec27b8 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.h @@ -0,0 +1,35 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef __LINK_HWSS_HPO_DP_H__ +#define __LINK_HWSS_HPO_DP_H__ + +#include "link_hwss.h" + +bool can_use_hpo_dp_link_hwss(const struct dc_link *link, + const struct link_resource *link_res); +const struct link_hwss *get_hpo_dp_link_hwss(void); + + +#endif /* __LINK_HWSS_HPO_DP_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.c new file mode 100644 index 000000000000..9df273ca699b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.c @@ -0,0 +1,43 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "link_hwss_hpo_frl.h" +#include "core_types.h" +#include "virtual/virtual_link_hwss.h" + +static const struct link_hwss hpo_frl_link_hwss = { + .setup_stream_encoder = virtual_setup_stream_encoder, + .reset_stream_encoder = virtual_reset_stream_encoder, +}; + +bool can_use_hpo_frl_link_hwss(const struct dc_link *link, + const struct link_resource *link_res) +{ + return link_res->hpo_frl_link_enc != NULL; +} + +const struct link_hwss *get_hpo_frl_link_hwss(void) +{ + return &hpo_frl_link_hwss; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.h b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.h new file mode 100644 index 000000000000..ea8d9760132f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.h @@ -0,0 +1,34 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef __LINK_HWSS_HPO_FRL_H__ +#define __LINK_HWSS_HPO_FRL_H__ + +#include "link_hwss.h" + +bool can_use_hpo_frl_link_hwss(const struct dc_link *link, + const struct link_resource *link_res); +const struct link_hwss *get_hpo_frl_link_hwss(void); + +#endif /* __LINK_HWSS_HPO_FRL_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h index 5df1d80c8341..17d05071b809 100644 --- a/drivers/gpu/drm/amd/display/dc/os_types.h +++ b/drivers/gpu/drm/amd/display/dc/os_types.h @@ -36,7 +36,7 @@ #include <asm/byteorder.h> #include <drm/drm_print.h> -#include <drm/drm_dp_helper.h> +#include <drm/dp/drm_dp_helper.h> #include "cgs_common.h" diff --git a/drivers/gpu/drm/amd/display/dc/virtual/Makefile b/drivers/gpu/drm/amd/display/dc/virtual/Makefile index 07326d244d50..931facd4dab5 100644 --- a/drivers/gpu/drm/amd/display/dc/virtual/Makefile +++ b/drivers/gpu/drm/amd/display/dc/virtual/Makefile @@ -23,7 +23,7 @@ # Makefile for the virtual sub-component of DAL. # It provides the control and status of HW CRTC block. -VIRTUAL = virtual_link_encoder.o virtual_stream_encoder.o +VIRTUAL = virtual_link_encoder.o virtual_stream_encoder.o virtual_link_hwss.o AMD_DAL_VIRTUAL = $(addprefix $(AMDDALPATH)/dc/virtual/,$(VIRTUAL)) diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c index 348e9a600a72..df8bc44bc4be 100644 --- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c @@ -63,7 +63,8 @@ static void virtual_link_encoder_disable_output( static void virtual_link_encoder_dp_set_lane_settings( struct link_encoder *enc, - const struct link_training_settings *link_settings) {} + const struct dc_link_settings *link_settings, + const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) {} static void virtual_link_encoder_dp_set_phy_pattern( struct link_encoder *enc, diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c new file mode 100644 index 000000000000..525eba2a3354 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c @@ -0,0 +1,43 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "virtual_link_hwss.h" + +void virtual_setup_stream_encoder(struct pipe_ctx *pipe_ctx) +{ +} + +void virtual_reset_stream_encoder(struct pipe_ctx *pipe_ctx) +{ +} +static const struct link_hwss virtual_link_hwss = { + .setup_stream_encoder = virtual_setup_stream_encoder, + .reset_stream_encoder = virtual_reset_stream_encoder, +}; + +const struct link_hwss *get_virtual_link_hwss(void) +{ + return &virtual_link_hwss; +} diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.h b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.h new file mode 100644 index 000000000000..e6bcb4bb0f3a --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.h @@ -0,0 +1,34 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef __DC_VIRTUAL_LINK_HWSS_H__ +#define __DC_VIRTUAL_LINK_HWSS_H__ + +#include "core_types.h" + +void virtual_setup_stream_encoder(struct pipe_ctx *pipe_ctx); +void virtual_reset_stream_encoder(struct pipe_ctx *pipe_ctx); +const struct link_hwss *get_virtual_link_hwss(void); + +#endif /* __DC_VIRTUAL_LINK_HWSS_H__ */ |