From b8fe56375f78835db47565d91ea9d21767fe3c08 Mon Sep 17 00:00:00 2001 From: Leon Huang Date: Tue, 8 Nov 2022 16:29:13 +0800 Subject: drm/amd/display: Refactor ABM feature [Why] Refactor ABM feature and implement inbox command for DMUB. [How] Implement the ioctl to send inbox command to DMUB. Reviewed-by: Rodrigo Siqueira Signed-off-by: Leon Huang Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/inc/hw/abm.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/gpu/drm/amd/display/dc/inc') diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h index ecb4191b6e64..db5cf9acafe6 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h @@ -55,6 +55,12 @@ struct abm_funcs { unsigned int bytes, unsigned int inst); bool (*set_abm_pause)(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int otg_inst); + bool (*set_pipe_ex)(struct abm *abm, + unsigned int otg_inst, + unsigned int option, + unsigned int panel_inst); + bool (*set_abm_event)(struct abm *abm, unsigned int full_screen, unsigned int video_mode, + unsigned int hdr_mode, unsigned int panel_inst); }; #endif -- cgit From 9c25ab167df412a5474dedfd0e7743e76bc89cbe Mon Sep 17 00:00:00 2001 From: Sung Lee Date: Mon, 10 Apr 2023 14:15:14 -0400 Subject: drm/amd/display: Add p-state debugging [WHY] P-State related issues are fairly common but currently there is no way to debug these issues after the fact. [HOW] Add helpful registers to HW state queries Tested-by: Daniel Wheeler Reviewed-by: Aric Cyr Reviewed-by: Jun Lei Acked-by: Rodrigo Siqueira Signed-off-by: Sung Lee Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c | 5 +++++ drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h | 1 + drivers/gpu/drm/amd/display/dmub/dmub_srv.h | 3 ++- drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c | 3 +++ drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h | 1 + 5 files changed, 12 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/display/dc/inc') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c index 24bd93219936..0ddd310cc971 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c @@ -623,6 +623,11 @@ void hubbub2_read_state(struct hubbub *hubbub, struct dcn_hubbub_state *hubbub_s REG_GET(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, &hubbub_state->vm_error_vmid); REG_GET(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, &hubbub_state->vm_error_pipe); } + + if (REG(DCHUBBUB_TEST_DEBUG_INDEX) && REG(DCHUBBUB_TEST_DEBUG_DATA)) { + REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, 0x6); + hubbub_state->test_debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA); + } } static const struct hubbub_funcs hubbub2_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index beb26dc8a07f..aa80b3f2ca3f 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -111,6 +111,7 @@ struct dcn_hubbub_state { uint32_t vm_error_vmid; uint32_t vm_error_pipe; uint32_t vm_error_mode; + uint32_t test_debug_data; }; struct hubbub_funcs { diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index d35432c21856..7c9a2b34bd05 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -271,7 +271,7 @@ struct dmub_srv_hw_params { */ struct dmub_diagnostic_data { uint32_t dmcub_version; - uint32_t scratch[16]; + uint32_t scratch[17]; uint32_t pc; uint32_t undefined_address_fault_addr; uint32_t inst_fetch_fault_addr; @@ -282,6 +282,7 @@ struct dmub_diagnostic_data { uint32_t inbox0_rptr; uint32_t inbox0_wptr; uint32_t inbox0_size; + uint32_t gpint_datain0; uint8_t is_dmcub_enabled : 1; uint8_t is_dmcub_soft_reset : 1; uint8_t is_dmcub_secure_reset : 1; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c index 21dd6cbdb106..bf5994e292d9 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c @@ -439,6 +439,7 @@ void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti diag_data->scratch[13] = REG_READ(DMCUB_SCRATCH13); diag_data->scratch[14] = REG_READ(DMCUB_SCRATCH14); diag_data->scratch[15] = REG_READ(DMCUB_SCRATCH15); + diag_data->scratch[16] = REG_READ(DMCUB_SCRATCH16); diag_data->undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR); diag_data->inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR); @@ -469,6 +470,8 @@ void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled); diag_data->is_cw6_enabled = is_cw6_enabled; + + diag_data->gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0); } void dmub_dcn32_configure_dmub_in_system_memory(struct dmub_srv *dmub) { diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h index f15336b6e22b..d58a1e4b9f1c 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h @@ -107,6 +107,7 @@ struct dmub_srv; DMUB_SR(DMCUB_SCRATCH15) \ DMUB_SR(DMCUB_SCRATCH16) \ DMUB_SR(DMCUB_SCRATCH17) \ + DMUB_SR(DMCUB_GPINT_DATAIN0) \ DMUB_SR(DMCUB_GPINT_DATAIN1) \ DMUB_SR(DMCUB_GPINT_DATAOUT) \ DMUB_SR(CC_DC_PIPE_DIS) \ -- cgit From 738b3469f8e12ae72555ef4724bebe8167a93e29 Mon Sep 17 00:00:00 2001 From: Sung Lee Date: Fri, 21 Apr 2023 12:03:16 -0400 Subject: drm/amd/display: Add additional pstate registers to HW state query [WHY] These registers would be useful to know when debugging pstate issues. [HOW] Add additional registers to hw state query. Reviewed-by: Aric Cyr Reviewed-by: Jun Lei Acked-by: Alex Hung Signed-off-by: Sung Lee Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 2 ++ drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c | 6 ++++++ drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c | 6 ++++++ drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h | 2 ++ 4 files changed, 16 insertions(+) (limited to 'drivers/gpu/drm/amd/display/dc/inc') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index 0b17c2993ca5..09784222cc03 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -690,6 +690,8 @@ struct dcn_hubp_state { uint32_t primary_surface_addr_hi; uint32_t primary_meta_addr_lo; uint32_t primary_meta_addr_hi; + uint32_t uclk_pstate_force; + uint32_t hubp_cntl; }; struct dcn10_hubp { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c index 0ddd310cc971..6eebcb22e317 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c @@ -628,6 +628,12 @@ void hubbub2_read_state(struct hubbub *hubbub, struct dcn_hubbub_state *hubbub_s REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, 0x6); hubbub_state->test_debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA); } + + if (REG(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL)) + hubbub_state->watermark_change_cntl = REG_READ(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL); + + if (REG(DCHUBBUB_ARB_DRAM_STATE_CNTL)) + hubbub_state->dram_state_cntl = REG_READ(DCHUBBUB_ARB_DRAM_STATE_CNTL); } static const struct hubbub_funcs hubbub2_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c index e46bbe7ddcc9..2861d974fcf6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c @@ -449,6 +449,12 @@ void hubp3_read_state(struct hubp *hubp) SWATH_HEIGHT_C, &rq_regs->rq_regs_c.swath_height, PTE_ROW_HEIGHT_LINEAR_C, &rq_regs->rq_regs_c.pte_row_height_linear); + if (REG(UCLK_PSTATE_FORCE)) + s->uclk_pstate_force = REG_READ(UCLK_PSTATE_FORCE); + + if (REG(DCHUBP_CNTL)) + s->hubp_cntl = REG_READ(DCHUBP_CNTL); + } void hubp3_setup( diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index aa80b3f2ca3f..aaa293613846 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -112,6 +112,8 @@ struct dcn_hubbub_state { uint32_t vm_error_pipe; uint32_t vm_error_mode; uint32_t test_debug_data; + uint32_t watermark_change_cntl; + uint32_t dram_state_cntl; }; struct hubbub_funcs { -- cgit From d205a800a66e46430ab93c0d450393233d39931a Mon Sep 17 00:00:00 2001 From: "Leo (Hanghong) Ma" Date: Wed, 12 Apr 2023 14:02:01 -0400 Subject: drm/amd/display: Add visual confirm color support for MCLK switch [Why && How] We would like to have visual confirm color support for MCLK switch. 1. Set visual confirm color to yellow: Vblank MCLK switch. 2. Set visual confirm color to cyan: FPO + Vblank MCLK switch. 3. Set visual confirm color to pink: Vactive MCLK switch. Reviewed-by: Jun Lei Acked-by: Aurabindo Pillai Signed-off-by: Leo (Hanghong) Ma Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 47 +++++++++++++++++--- .../gpu/drm/amd/display/dc/core/dc_hw_sequencer.c | 50 ++++++++++++++++++++-- drivers/gpu/drm/amd/display/dc/dc.h | 1 + .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 22 +++------- .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h | 1 - drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 26 +---------- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h | 5 --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c | 2 +- .../gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c | 4 +- .../gpu/drm/amd/display/dc/dcn201/dcn201_init.c | 2 +- drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c | 2 +- drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c | 2 +- .../gpu/drm/amd/display/dc/dcn301/dcn301_init.c | 2 +- drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c | 2 +- .../gpu/drm/amd/display/dc/dcn314/dcn314_init.c | 2 +- drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c | 2 +- .../gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 7 +++ drivers/gpu/drm/amd/display/dc/inc/core_types.h | 2 + drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h | 9 +++- 19 files changed, 125 insertions(+), 65 deletions(-) (limited to 'drivers/gpu/drm/amd/display/dc/inc') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index c26dfdd48dd9..2c639cd346d7 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1119,6 +1119,33 @@ static void phantom_pipe_blank( hws->funcs.wait_for_blank_complete(opp); } +static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) +{ + if (dc->ctx->dce_version >= DCN_VERSION_1_0) { + memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color)); + + if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) + get_hdr_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); + else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) + get_surface_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); + else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE) + get_surface_tile_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); + else { + if (dc->ctx->dce_version < DCN_VERSION_2_0) + color_space_to_black_color( + dc, pipe_ctx->stream->output_color_space, &(pipe_ctx->visual_confirm_color)); + } + if (dc->ctx->dce_version >= DCN_VERSION_2_0) { + if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) + get_mpctree_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); + else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP) + get_subvp_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color)); + else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) + get_mclk_switch_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color)); + } + } +} + static void disable_dangling_plane(struct dc *dc, struct dc_state *context) { int i, j; @@ -1189,6 +1216,9 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) dc_rem_all_planes_for_stream(dc, old_stream, dangling_context); disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); + if (pipe->stream && pipe->plane_state) + dc_update_viusal_confirm_color(dc, context, pipe); + if (dc->hwss.apply_ctx_for_surface) { apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true); dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context); @@ -3456,6 +3486,14 @@ static void commit_planes_for_stream(struct dc *dc, } } + if (dc->debug.visual_confirm) + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->stream && pipe->plane_state) + dc_update_viusal_confirm_color(dc, context, pipe); + } + if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) { struct pipe_ctx *mpcc_pipe; struct pipe_ctx *odm_pipe; @@ -3539,15 +3577,14 @@ static void commit_planes_for_stream(struct dc *dc, for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; - if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP && + if ((dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP || + dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) && pipe_ctx->stream && pipe_ctx->plane_state) { - /* Only update visual confirm for SUBVP here. + /* Only update visual confirm for SUBVP and Mclk switching here. * The bar appears on all pipes, so we need to update the bar on all displays, * so the information doesn't get stale. */ - struct mpcc_blnd_cfg blnd_cfg = { 0 }; - - dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, + dc->hwss.update_visual_confirm_color(dc, pipe_ctx, pipe_ctx->plane_res.hubp->inst); } } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index 2acbf692193f..8a98b8dd008e 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -421,6 +421,7 @@ void get_hdr_visual_confirm_color( void get_subvp_visual_confirm_color( struct dc *dc, + struct dc_state *context, struct pipe_ctx *pipe_ctx, struct tg_color *color) { @@ -428,15 +429,17 @@ void get_subvp_visual_confirm_color( bool enable_subvp = false; int i; - if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx) + if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !context) return; for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; if (pipe->stream && pipe->stream->mall_stream_config.paired_stream && pipe->stream->mall_stream_config.type == SUBVP_MAIN) { /* SubVP enable - red */ + color->color_g_y = 0; + color->color_b_cb = 0; color->color_r_cr = color_value; enable_subvp = true; @@ -448,12 +451,51 @@ void get_subvp_visual_confirm_color( if (enable_subvp && pipe_ctx->stream->mall_stream_config.type == SUBVP_NONE) { color->color_r_cr = 0; - if (pipe_ctx->stream->ignore_msa_timing_param == 1) + if (pipe_ctx->stream->allow_freesync == 1) { /* SubVP enable and DRR on - green */ + color->color_b_cb = 0; color->color_g_y = color_value; - else + } else { /* SubVP enable and No DRR - blue */ + color->color_g_y = 0; + color->color_b_cb = color_value; + } + } +} + +void get_mclk_switch_visual_confirm_color( + struct dc *dc, + struct dc_state *context, + struct pipe_ctx *pipe_ctx, + struct tg_color *color) +{ + uint32_t color_value = MAX_TG_COLOR_VALUE; + struct vba_vars_st *vba = &context->bw_ctx.dml.vba; + + if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !vba || !context) + return; + + if (vba->DRAMClockChangeSupport[vba->VoltageLevel][vba->maxMpcComb] != + dm_dram_clock_change_unsupported) { + /* MCLK switching is supported */ + if (!pipe_ctx->has_vactive_margin) { + /* In Vblank - yellow */ + color->color_r_cr = color_value; + color->color_g_y = color_value; + + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { + /* FPO + Vblank - cyan */ + color->color_r_cr = 0; + color->color_g_y = color_value; + color->color_b_cb = color_value; + } + } else { + /* In Vactive - pink */ + color->color_r_cr = color_value; color->color_b_cb = color_value; + } + /* SubVP */ + get_subvp_visual_confirm_color(dc, context, pipe_ctx, color); } } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 1ebb8d3573f4..8be2e6d6d888 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -426,6 +426,7 @@ enum visual_confirm { VISUAL_CONFIRM_FAMS = 7, VISUAL_CONFIRM_SWIZZLE = 9, VISUAL_CONFIRM_SUBVP = 14, + VISUAL_CONFIRM_MCLK_SWITCH = 16, }; enum dc_psr_power_opts { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index a7ad1d7bc43e..905246a2ece4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -2602,23 +2602,15 @@ static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params); } -void dcn10_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id) +void dcn10_update_visual_confirm_color(struct dc *dc, + struct pipe_ctx *pipe_ctx, + int mpcc_id) { struct mpc *mpc = dc->res_pool->mpc; - if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) - get_hdr_visual_confirm_color(pipe_ctx, color); - else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) - get_surface_visual_confirm_color(pipe_ctx, color); - else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE) - get_surface_tile_visual_confirm_color(pipe_ctx, color); - else - color_space_to_black_color( - dc, pipe_ctx->stream->output_color_space, color); - if (mpc->funcs->set_bg_color) { - memcpy(&pipe_ctx->plane_state->visual_confirm_color, color, sizeof(struct tg_color)); - mpc->funcs->set_bg_color(mpc, color, mpcc_id); + memcpy(&pipe_ctx->plane_state->visual_confirm_color, &(pipe_ctx->visual_confirm_color), sizeof(struct tg_color)); + mpc->funcs->set_bg_color(mpc, &(pipe_ctx->visual_confirm_color), mpcc_id); } } @@ -2671,7 +2663,7 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) /* If there is no full update, don't need to touch MPC tree*/ if (!pipe_ctx->plane_state->update_flags.bits.full_update) { mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id); - dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id); + dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id); return; } @@ -2693,7 +2685,7 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) NULL, hubp->inst, mpcc_id); - dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id); + dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id); ASSERT(new_mpcc != NULL); hubp->opp_id = pipe_ctx->stream_res.opp->inst; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h index 0ef7bf7ddb75..ef6d56da417c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h @@ -202,7 +202,6 @@ void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits); void dcn10_update_visual_confirm_color( struct dc *dc, struct pipe_ctx *pipe_ctx, - struct tg_color *color, int mpcc_id); #endif /* __DC_HWSS_DCN10_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index f49c1c0d6274..b3e187b1347d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -2580,28 +2580,6 @@ void dcn20_reset_hw_ctx_wrap( } } -void dcn20_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id) -{ - struct mpc *mpc = dc->res_pool->mpc; - - // input to MPCC is always RGB, by default leave black_color at 0 - if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) - get_hdr_visual_confirm_color(pipe_ctx, color); - else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) - get_surface_visual_confirm_color(pipe_ctx, color); - else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) - get_mpctree_visual_confirm_color(pipe_ctx, color); - else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE) - get_surface_tile_visual_confirm_color(pipe_ctx, color); - else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP) - get_subvp_visual_confirm_color(dc, pipe_ctx, color); - - if (mpc->funcs->set_bg_color) { - memcpy(&pipe_ctx->plane_state->visual_confirm_color, color, sizeof(struct tg_color)); - mpc->funcs->set_bg_color(mpc, color, mpcc_id); - } -} - void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) { struct hubp *hubp = pipe_ctx->plane_res.hubp; @@ -2657,7 +2635,7 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) if (!pipe_ctx->plane_state->update_flags.bits.full_update && !pipe_ctx->update_flags.bits.mpcc) { mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id); - dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id); + dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id); return; } @@ -2679,7 +2657,7 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) NULL, hubp->inst, mpcc_id); - dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id); + dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id); ASSERT(new_mpcc != NULL); hubp->opp_id = pipe_ctx->stream_res.opp->inst; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h index 33a36c02b2f8..01901b08644c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h @@ -150,10 +150,5 @@ void dcn20_set_disp_pattern_generator(const struct dc *dc, const struct tg_color *solid_color, int width, int height, int offset); -void dcn20_update_visual_confirm_color(struct dc *dc, - struct pipe_ctx *pipe_ctx, - struct tg_color *color, - int mpcc_id); - #endif /* __DC_HWSS_DCN20_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c index 7c5817c426fa..4192c522e59a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c @@ -102,7 +102,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = { .disable_link_output = dce110_disable_link_output, .set_disp_pattern_generator = dcn20_set_disp_pattern_generator, .get_dcc_en_bits = dcn10_get_dcc_en_bits, - .update_visual_confirm_color = dcn20_update_visual_confirm_color + .update_visual_confirm_color = dcn10_update_visual_confirm_color, }; static const struct hwseq_private_funcs dcn20_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c index 1aeb04fbd89d..75472d53ff52 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c @@ -496,7 +496,7 @@ void dcn201_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) /* If there is no full update, don't need to touch MPC tree*/ if (!pipe_ctx->plane_state->update_flags.bits.full_update) { - dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id); + dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id); mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id); return; } @@ -521,7 +521,7 @@ void dcn201_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) dc->res_pool->mpc, mpcc_id); /* Call MPC to insert new plane */ - dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id); + dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id); new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc, mpc_tree_params, &blnd_cfg, diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c index 9c16633e473a..92dd4cddbab8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c @@ -91,7 +91,7 @@ static const struct hw_sequencer_funcs dcn201_funcs = { .enable_dp_link_output = dce110_enable_dp_link_output, .disable_link_output = dce110_disable_link_output, .set_disp_pattern_generator = dcn20_set_disp_pattern_generator, - .update_visual_confirm_color = dcn20_update_visual_confirm_color, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, }; static const struct hwseq_private_funcs dcn201_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c index fe1a8e2e08ef..8b58ce1db035 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c @@ -106,7 +106,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = { .is_abm_supported = dcn21_is_abm_supported, .set_disp_pattern_generator = dcn20_set_disp_pattern_generator, .get_dcc_en_bits = dcn10_get_dcc_en_bits, - .update_visual_confirm_color = dcn20_update_visual_confirm_color, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, }; static const struct hwseq_private_funcs dcn21_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c index 3216d10c58ba..18e94d8ae54f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c @@ -106,7 +106,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = { .disable_link_output = dce110_disable_link_output, .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, .get_dcc_en_bits = dcn10_get_dcc_en_bits, - .update_visual_confirm_color = dcn20_update_visual_confirm_color, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, .is_abm_supported = dcn21_is_abm_supported }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c index 6192851c59ed..257df8660b4c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c @@ -107,7 +107,7 @@ static const struct hw_sequencer_funcs dcn301_funcs = { .get_dcc_en_bits = dcn10_get_dcc_en_bits, .optimize_pwr_state = dcn21_optimize_pwr_state, .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, - .update_visual_confirm_color = dcn20_update_visual_confirm_color, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, }; static const struct hwseq_private_funcs dcn301_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c index 8598ea233ef3..ba9e7dee6e5e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c @@ -110,7 +110,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = { .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, .optimize_pwr_state = dcn21_optimize_pwr_state, .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, - .update_visual_confirm_color = dcn20_update_visual_confirm_color, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, }; static const struct hwseq_private_funcs dcn31_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c index ed8a1b94c006..7a28c7bb25d2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c @@ -112,7 +112,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = { .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, .optimize_pwr_state = dcn21_optimize_pwr_state, .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, - .update_visual_confirm_color = dcn20_update_visual_confirm_color, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, }; static const struct hwseq_private_funcs dcn314_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c index 8085f2acb1a9..24a890d879b8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c @@ -109,7 +109,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = { .commit_subvp_config = dcn32_commit_subvp_config, .enable_phantom_streams = dcn32_enable_phantom_streams, .subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock, - .update_visual_confirm_color = dcn20_update_visual_confirm_color, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, .update_phantom_vp_position = dcn32_update_phantom_vp_position, .update_dsc_pg = dcn32_update_dsc_pg, .apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index 8c60b88c7d1a..d8d8fcd5ef1f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -1324,6 +1324,7 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context, int i, pipe_idx, active_hubp_count = 0; bool usr_retraining_support = false; bool unbounded_req_enabled = false; + struct vba_vars_st *vba = &context->bw_ctx.dml.vba; dc_assert_fp_enabled(); @@ -1405,6 +1406,11 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context, context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes = get_surface_size_in_mall(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0) + context->res_ctx.pipe_ctx[i].has_vactive_margin = true; + else + context->res_ctx.pipe_ctx[i].has_vactive_margin = false; + /* MALL Allocation Sizes */ /* count from active, top pipes per plane only */ if (context->res_ctx.pipe_ctx[i].stream && context->res_ctx.pipe_ctx[i].plane_state && @@ -2015,6 +2021,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb; dcfclk_from_fw_based_mclk_switching = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; pstate_en = true; + context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank; } else { /* Restore FCLK latency and re-run validation to go back to original validation * output if we find that enabling FPO does not give us any benefit (i.e. lower diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 2eb597a24425..b4c1cc6dc857 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -426,6 +426,8 @@ struct pipe_ctx { struct dwbc *dwbc; struct mcif_wb *mcif_wb; union pipe_update_flags update_flags; + struct tg_color visual_confirm_color; + bool has_vactive_margin; }; /* Data used for dynamic link encoder assignment. diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index 88ac723d10aa..df160c6a630c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -257,7 +257,6 @@ struct hw_sequencer_funcs { void (*update_visual_confirm_color)(struct dc *dc, struct pipe_ctx *pipe_ctx, - struct tg_color *color, int mpcc_id); void (*update_phantom_vp_position)(struct dc *dc, @@ -294,6 +293,7 @@ void get_surface_visual_confirm_color( void get_subvp_visual_confirm_color( struct dc *dc, + struct dc_state *context, struct pipe_ctx *pipe_ctx, struct tg_color *color); @@ -306,4 +306,11 @@ void get_mpctree_visual_confirm_color( void get_surface_tile_visual_confirm_color( struct pipe_ctx *pipe_ctx, struct tg_color *color); + +void get_mclk_switch_visual_confirm_color( + struct dc *dc, + struct dc_state *context, + struct pipe_ctx *pipe_ctx, + struct tg_color *color); + #endif /* __DC_HW_SEQUENCER_H__ */ -- cgit From fe9fa3859b66caf4a6923598c8e343b8a32ec5d1 Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Fri, 5 May 2023 11:06:26 -0400 Subject: drm/amd/display: Make unbounded req update separate from dlg/ttu [Description] - Updates to unbounded requesting should not be conditional on updates to dlg / ttu, as this could prevent unbounded requesting from being updated if dlg / ttu does not change Reviewed-by: Jun Lei Acked-by: Aurabindo Pillai Signed-off-by: Alvin Lee Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 11 ++++++++--- drivers/gpu/drm/amd/display/dc/inc/core_types.h | 1 + 2 files changed, 9 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd/display/dc/inc') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index b3e187b1347d..e74c3ce561ab 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1361,6 +1361,7 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx new_pipe->update_flags.bits.dppclk = 1; new_pipe->update_flags.bits.hubp_interdependent = 1; new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; + new_pipe->update_flags.bits.unbounded_req = 1; new_pipe->update_flags.bits.gamut_remap = 1; new_pipe->update_flags.bits.scaler = 1; new_pipe->update_flags.bits.viewport = 1; @@ -1504,6 +1505,9 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx memcmp(&old_pipe->rq_regs, &new_pipe->rq_regs, sizeof(old_pipe->rq_regs))) new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; } + + if (old_pipe->unbounded_req != new_pipe->unbounded_req) + new_pipe->update_flags.bits.unbounded_req = 1; } static void dcn20_update_dchubp_dpp( @@ -1537,10 +1541,11 @@ static void dcn20_update_dchubp_dpp( &pipe_ctx->ttu_regs, &pipe_ctx->rq_regs, &pipe_ctx->pipe_dlg_param); - - if (hubp->funcs->set_unbounded_requesting) - hubp->funcs->set_unbounded_requesting(hubp, pipe_ctx->unbounded_req); } + + if (pipe_ctx->update_flags.bits.unbounded_req && hubp->funcs->set_unbounded_requesting) + hubp->funcs->set_unbounded_requesting(hubp, pipe_ctx->unbounded_req); + if (pipe_ctx->update_flags.bits.hubp_interdependent) hubp->funcs->hubp_setup_interdependent( hubp, diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index b4c1cc6dc857..d8dd143cf6ea 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -374,6 +374,7 @@ union pipe_update_flags { uint32_t viewport : 1; uint32_t plane_changed : 1; uint32_t det_size : 1; + uint32_t unbounded_req : 1; } bits; uint32_t raw; }; -- cgit From fd73c8507675f6bccc039cf319f183e41e447cb7 Mon Sep 17 00:00:00 2001 From: Hamza Mahfooz Date: Wed, 17 May 2023 13:49:29 -0400 Subject: drm/amd/display: drop unused function set_abm_event() set_abm_event() is never actually used. So, drop it. Fixes: b8fe56375f78 ("drm/amd/display: Refactor ABM feature") Reported-by: kernel test robot Reported-by: Tom Rix Reviewed-by: Rodrigo Siqueira Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c | 12 ------------ drivers/gpu/drm/amd/display/dc/inc/hw/abm.h | 2 -- 2 files changed, 14 deletions(-) (limited to 'drivers/gpu/drm/amd/display/dc/inc') diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c index a66f83a61402..2fb9572ce25d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c @@ -131,17 +131,6 @@ static bool dmub_abm_set_pipe_ex(struct abm *abm, uint32_t otg_inst, uint32_t op return ret; } -static bool dmub_abm_set_event_ex(struct abm *abm, unsigned int full_screen, unsigned int video_mode, - unsigned int hdr_mode, unsigned int panel_inst) -{ - bool ret = false; - unsigned int feature_support; - - feature_support = abm_feature_support(abm, panel_inst); - - return ret; -} - static bool dmub_abm_set_backlight_level_pwm_ex(struct abm *abm, unsigned int backlight_pwm_u16_16, unsigned int frame_ramp, @@ -167,7 +156,6 @@ static const struct abm_funcs abm_funcs = { .init_abm_config = dmub_abm_init_config_ex, .set_abm_pause = dmub_abm_set_pause_ex, .set_pipe_ex = dmub_abm_set_pipe_ex, - .set_abm_event = dmub_abm_set_event_ex, .set_backlight_level_pwm = dmub_abm_set_backlight_level_pwm_ex, }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h index db5cf9acafe6..d2190a3320f6 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h @@ -59,8 +59,6 @@ struct abm_funcs { unsigned int otg_inst, unsigned int option, unsigned int panel_inst); - bool (*set_abm_event)(struct abm *abm, unsigned int full_screen, unsigned int video_mode, - unsigned int hdr_mode, unsigned int panel_inst); }; #endif -- cgit From 3e8d74cb128fb1a4d56270ffbecea6056c55739a Mon Sep 17 00:00:00 2001 From: Saaem Rizvi Date: Tue, 9 May 2023 14:41:59 -0400 Subject: drm/amd/display: Trigger DIO FIFO resync on commit streams [WHY] Currently, there is an intermittent issue where a screen can either go blank or be corrupted. [HOW] To resolve the issue we trigger the ramping logic for DIO FIFO so that it goes back up to the correct speed. Reviewed-by: Nicholas Kazlauskas Acked-by: Tom Chung Signed-off-by: Saaem Rizvi Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../amd/display/dc/dce110/dce110_hw_sequencer.c | 3 +++ drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h | 4 +++- .../gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c | 11 +++++++++ .../gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h | 5 +++- .../gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c | 27 ++++++++++++++++++++++ .../gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h | 2 ++ .../gpu/drm/amd/display/dc/dcn314/dcn314_init.c | 1 + drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h | 3 +++ .../drm/amd/display/dc/inc/hw_sequencer_private.h | 2 ++ 9 files changed, 56 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd/display/dc/inc') diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 8d2460d06bce..1a0be40d125c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -2291,6 +2291,9 @@ enum dc_status dce110_apply_ctx_to_hw( if (DC_OK != status) return status; + + if (hws->funcs.resync_fifo_dccg_dio) + hws->funcs.resync_fifo_dccg_dio(hws, dc, context); } if (dc->fbc_compressor) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h index 7bdc146f7cb5..c8602bcfa393 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h @@ -208,7 +208,9 @@ #define DCCG314_REG_FIELD_LIST(type) \ type DSCCLK3_DTO_PHASE;\ type DSCCLK3_DTO_MODULO;\ - type DSCCLK3_DTO_ENABLE; + type DSCCLK3_DTO_ENABLE;\ + type DENTIST_DISPCLK_RDIVIDER;\ + type DENTIST_DISPCLK_WDIVIDER; #define DCCG32_REG_FIELD_LIST(type) \ type DPSTREAMCLK0_EN;\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c index de7bfba2c179..e0e7d32bb1a0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c @@ -45,6 +45,16 @@ #define DC_LOGGER \ dccg->ctx->logger +static void dccg314_trigger_dio_fifo_resync( + struct dccg *dccg) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + uint32_t dispclk_rdivider_value = 0; + + REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_RDIVIDER, &dispclk_rdivider_value); + REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value); +} + static void dccg314_get_pixel_rate_div( struct dccg *dccg, uint32_t otg_inst, @@ -357,6 +367,7 @@ static const struct dccg_funcs dccg314_funcs = { .disable_dsc = dccg31_disable_dscclk, .enable_dsc = dccg31_enable_dscclk, .set_pixel_rate_div = dccg314_set_pixel_rate_div, + .trigger_dio_fifo_resync = dccg314_trigger_dio_fifo_resync, .set_valid_pixel_rate = dccg314_set_valid_pixel_rate, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h index 90687a9e8fdd..8e07d3151f91 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h @@ -192,7 +192,10 @@ DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYBSYMCLK_GATE_DISABLE, mask_sh),\ DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYCSYMCLK_GATE_DISABLE, mask_sh),\ DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYDSYMCLK_GATE_DISABLE, mask_sh),\ - DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_GATE_DISABLE, mask_sh) + DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_GATE_DISABLE, mask_sh),\ + DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_MODULO, mask_sh),\ + DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_RDIVIDER, mask_sh),\ + DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, mask_sh) struct dccg *dccg314_create( struct dc_context *ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c index f7a3e0d71d01..70fac2ebb757 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c @@ -390,6 +390,33 @@ void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx) pix_per_cycle); } +void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context) +{ + uint8_t i; + struct pipe_ctx *pipe = NULL; + bool otg_disabled[MAX_PIPES] = {false}; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (pipe->top_pipe || pipe->prev_odm_pipe) + continue; + + if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) { + pipe->stream_res.tg->funcs->disable_crtc(pipe->stream_res.tg); + reset_sync_context_for_pipe(dc, context, i); + otg_disabled[i] = true; + } + } + + hws->ctx->dc->res_pool->dccg->funcs->trigger_dio_fifo_resync(hws->ctx->dc->res_pool->dccg); + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (otg_disabled[i]) + pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg); + } +} + void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on) { if (!hws->ctx->dc->debug.root_clock_optimization.bits.dpp) diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h index 6d0b62503caa..559d71002e8a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h @@ -41,6 +41,8 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx); +void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context); + void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on); void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on); diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c index 7a28c7bb25d2..90be62c05822 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c @@ -152,6 +152,7 @@ static const struct hwseq_private_funcs dcn314_private_funcs = { .setup_hpo_hw_control = dcn31_setup_hpo_hw_control, .calculate_dccg_k1_k2_values = dcn314_calculate_dccg_k1_k2_values, .set_pixels_per_cycle = dcn314_set_pixels_per_cycle, + .resync_fifo_dccg_dio = dcn314_resync_fifo_dccg_dio, }; void dcn314_hw_sequencer_construct(struct dc *dc) diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index ad6acd1b34e1..0b700b3d7d97 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -159,6 +159,9 @@ struct dccg_funcs { int otg_inst, int pixclk_khz); + void (*trigger_dio_fifo_resync)( + struct dccg *dccg); + void (*dpp_root_clock_control)( struct dccg *dccg, unsigned int dpp_inst, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h index 4513544559be..4ca4192c1e12 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h @@ -160,6 +160,8 @@ struct hwseq_private_funcs { unsigned int *k1_div, unsigned int *k2_div); void (*set_pixels_per_cycle)(struct pipe_ctx *pipe_ctx); + void (*resync_fifo_dccg_dio)(struct dce_hwseq *hws, struct dc *dc, + struct dc_state *context); bool (*is_dp_dig_pixel_rate_div_policy)(struct pipe_ctx *pipe_ctx); #endif }; -- cgit From 0baae624630788862bbd654741929007971e9d5b Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Thu, 18 May 2023 11:30:44 -0400 Subject: drm/amd/display: Refactor fast update to use new HWSS build sequence [Description] - Refactor HW sequencer to use a build / execute sequence - Also move gamma updates to become fast v2: squash in build fix ("drm/amd/display: Fix guarding of 'if (dc->debug.visual_confirm)'") Acked-by: Stylon Wang Signed-off-by: Alvin Lee Reviewed-by: Jun Lei Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 274 ++++++++++++++++++--- .../gpu/drm/amd/display/dc/core/dc_hw_sequencer.c | 255 +++++++++++++++++++ drivers/gpu/drm/amd/display/dc/dc.h | 1 + .../drm/amd/display/dc/dce100/dce100_resource.c | 5 + .../drm/amd/display/dc/dce110/dce110_resource.c | 5 + .../drm/amd/display/dc/dce112/dce112_resource.c | 5 + .../drm/amd/display/dc/dce120/dce120_resource.c | 1 + .../gpu/drm/amd/display/dc/dce80/dce80_resource.c | 6 + .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 1 + .../gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 1 + .../drm/amd/display/dc/dcn201/dcn201_resource.c | 1 + .../gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 1 + .../gpu/drm/amd/display/dc/dcn31/dcn31_resource.c | 1 + .../drm/amd/display/dc/dcn315/dcn315_resource.c | 1 + .../drm/amd/display/dc/dcn316/dcn316_resource.c | 1 + drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c | 24 ++ drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h | 2 + drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c | 1 + drivers/gpu/drm/amd/display/dc/inc/core_types.h | 11 + drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h | 126 ++++++++++ 20 files changed, 693 insertions(+), 30 deletions(-) (limited to 'drivers/gpu/drm/amd/display/dc/inc') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 0e9403f21dae..60da6d834518 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -2589,15 +2589,19 @@ static enum surface_update_type det_surface_update(const struct dc *dc, elevate_update_type(&overall_type, type); } - if (update_flags->bits.input_csc_change - || update_flags->bits.coeff_reduction_change - || update_flags->bits.lut_3d - || update_flags->bits.gamma_change - || update_flags->bits.gamut_remap_change) { + if (update_flags->bits.lut_3d) { type = UPDATE_TYPE_FULL; elevate_update_type(&overall_type, type); } + if (dc->debug.enable_legacy_fast_update && + (update_flags->bits.gamma_change || + update_flags->bits.gamut_remap_change || + update_flags->bits.input_csc_change || + update_flags->bits.coeff_reduction_change)) { + type = UPDATE_TYPE_FULL; + elevate_update_type(&overall_type, type); + } return overall_type; } @@ -2630,7 +2634,7 @@ static enum surface_update_type check_update_surfaces_for_stream( stream_update->integer_scaling_update) su_flags->bits.scaling = 1; - if (stream_update->out_transfer_func) + if (dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func) su_flags->bits.out_tf = 1; if (stream_update->abm_level) @@ -2661,6 +2665,12 @@ static enum surface_update_type check_update_surfaces_for_stream( if (stream_update->output_csc_transform || stream_update->output_color_space) su_flags->bits.out_csc = 1; + + /* Output transfer function changes do not require bandwidth recalculation, + * so don't trigger a full update + */ + if (!dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func) + su_flags->bits.out_tf = 1; } for (i = 0 ; i < surface_count; i++) { @@ -3412,6 +3422,166 @@ void dc_dmub_update_dirty_rect(struct dc *dc, } } +static void build_dmub_update_dirty_rect( + struct dc *dc, + int surface_count, + struct dc_stream_state *stream, + struct dc_surface_update *srf_updates, + struct dc_state *context, + struct dc_dmub_cmd dc_dmub_cmd[], + unsigned int *dmub_cmd_count) +{ + union dmub_rb_cmd cmd; + struct dmub_cmd_update_dirty_rect_data *update_dirty_rect; + unsigned int i, j; + unsigned int panel_inst = 0; + + if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream)) + return; + + if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst)) + return; + + memset(&cmd, 0x0, sizeof(cmd)); + cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT; + cmd.update_dirty_rect.header.sub_type = 0; + cmd.update_dirty_rect.header.payload_bytes = + sizeof(cmd.update_dirty_rect) - + sizeof(cmd.update_dirty_rect.header); + update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data; + for (i = 0; i < surface_count; i++) { + struct dc_plane_state *plane_state = srf_updates[i].surface; + const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr; + + if (!srf_updates[i].surface || !flip_addr) + continue; + /* Do not send in immediate flip mode */ + if (srf_updates[i].surface->flip_immediate) + continue; + update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; + update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count; + memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects, + sizeof(flip_addr->dirty_rects)); + for (j = 0; j < dc->res_pool->pipe_count; j++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; + + if (pipe_ctx->stream != stream) + continue; + if (pipe_ctx->plane_state != plane_state) + continue; + update_dirty_rect->panel_inst = panel_inst; + update_dirty_rect->pipe_idx = j; + dc_dmub_cmd[*dmub_cmd_count].dmub_cmd = cmd; + dc_dmub_cmd[*dmub_cmd_count].wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; + (*dmub_cmd_count)++; + } + } +} + + +/** + * ************************************************************************************************ + * build_dmub_cmd_list: Build an array of DMCUB commands to be sent to DMCUB + * + * @param [in]: dc: Current DC state + * @param [in]: srf_updates: Array of surface updates + * @param [in]: surface_count: Number of surfaces that have an updated + * @param [in]: stream: Correponding stream to be updated in the current flip + * @param [in]: context: New DC state to be programmed + * + * @param [out]: dc_dmub_cmd: Array of DMCUB commands to be sent to DMCUB + * @param [out]: dmub_cmd_count: Count indicating the number of DMCUB commands in dc_dmub_cmd array + * + * This function builds an array of DMCUB commands to be sent to DMCUB. This function is required + * to build an array of commands and have them sent while the OTG lock is acquired. + * + * @return: void + * ************************************************************************************************ + */ +static void build_dmub_cmd_list(struct dc *dc, + struct dc_surface_update *srf_updates, + int surface_count, + struct dc_stream_state *stream, + struct dc_state *context, + struct dc_dmub_cmd dc_dmub_cmd[], + unsigned int *dmub_cmd_count) +{ + // Initialize cmd count to 0 + *dmub_cmd_count = 0; + build_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context, dc_dmub_cmd, dmub_cmd_count); +} + +static void commit_planes_for_stream_fast(struct dc *dc, + struct dc_surface_update *srf_updates, + int surface_count, + struct dc_stream_state *stream, + struct dc_stream_update *stream_update, + enum surface_update_type update_type, + struct dc_state *context) +{ + int i, j; + struct pipe_ctx *top_pipe_to_program = NULL; + bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST); + dc_z10_restore(dc); + + for (j = 0; j < dc->res_pool->pipe_count; j++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; + + if (!pipe_ctx->top_pipe && + !pipe_ctx->prev_odm_pipe && + pipe_ctx->stream && + pipe_ctx->stream == stream) { + top_pipe_to_program = pipe_ctx; + } + } + + if (dc->debug.visual_confirm) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->stream && pipe->plane_state) + dc_update_viusal_confirm_color(dc, context, pipe); + } + } + + for (i = 0; i < surface_count; i++) { + struct dc_plane_state *plane_state = srf_updates[i].surface; + /*set logical flag for lock/unlock use*/ + for (j = 0; j < dc->res_pool->pipe_count; j++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; + + if (!pipe_ctx->plane_state) + continue; + if (should_update_pipe_for_plane(context, pipe_ctx, plane_state)) + continue; + pipe_ctx->plane_state->triplebuffer_flips = false; + if (update_type == UPDATE_TYPE_FAST && + dc->hwss.program_triplebuffer && + !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { + /*triple buffer for VUpdate only*/ + pipe_ctx->plane_state->triplebuffer_flips = true; + } + } + } + + build_dmub_cmd_list(dc, + srf_updates, + surface_count, + stream, + context, + context->dc_dmub_cmd, + &(context->dmub_cmd_count)); + hwss_build_fast_sequence(dc, + context->dc_dmub_cmd, + context->dmub_cmd_count, + context->block_sequence, + &(context->block_sequence_steps), + top_pipe_to_program); + hwss_execute_sequence(dc, + context->block_sequence, + context->block_sequence_steps); +} + static void commit_planes_for_stream(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, @@ -3449,21 +3619,6 @@ static void commit_planes_for_stream(struct dc *dc, } } - if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) { - /* Optimize seamless boot flag keeps clocks and watermarks high until - * first flip. After first flip, optimization is required to lower - * bandwidth. Important to note that it is expected UEFI will - * only light up a single display on POST, therefore we only expect - * one stream with seamless boot flag set. - */ - if (stream->apply_seamless_boot_optimization) { - stream->apply_seamless_boot_optimization = false; - - if (get_seamless_boot_stream_count(context) == 0) - dc->optimized_required = true; - } - } - if (update_type == UPDATE_TYPE_FULL) { dc_allow_idle_optimizations(dc, false); @@ -4046,6 +4201,43 @@ static bool commit_minimal_transition_state(struct dc *dc, return true; } +/** + * ******************************************************************************* + * update_seamless_boot_flags: Helper function for updating seamless boot flags + * + * @param [in]: dc: Current DC state + * @param [in]: context: New DC state to be programmed + * @param [in]: surface_count: Number of surfaces that have an updated + * @param [in]: stream: Correponding stream to be updated in the current flip + * + * Updating seamless boot flags do not need to be part of the commit sequence. This + * helper function will update the seamless boot flags on each flip (if required) + * outside of the HW commit sequence (fast or slow). + * + * @return: void + * ******************************************************************************* + */ +static void update_seamless_boot_flags(struct dc *dc, + struct dc_state *context, + int surface_count, + struct dc_stream_state *stream) +{ + if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) { + /* Optimize seamless boot flag keeps clocks and watermarks high until + * first flip. After first flip, optimization is required to lower + * bandwidth. Important to note that it is expected UEFI will + * only light up a single display on POST, therefore we only expect + * one stream with seamless boot flag set. + */ + if (stream->apply_seamless_boot_optimization) { + stream->apply_seamless_boot_optimization = false; + + if (get_seamless_boot_stream_count(context) == 0) + dc->optimized_required = true; + } + } +} + bool dc_update_planes_and_stream(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_state *stream, @@ -4112,14 +4304,25 @@ bool dc_update_planes_and_stream(struct dc *dc, update_type = UPDATE_TYPE_FULL; } - commit_planes_for_stream( - dc, - srf_updates, - surface_count, - stream, - stream_update, - update_type, - context); + update_seamless_boot_flags(dc, context, surface_count, stream); + if (!dc->debug.enable_legacy_fast_update && update_type == UPDATE_TYPE_FAST) { + commit_planes_for_stream_fast(dc, + srf_updates, + surface_count, + stream, + stream_update, + update_type, + context); + } else { + commit_planes_for_stream( + dc, + srf_updates, + surface_count, + stream, + stream_update, + update_type, + context); + } if (dc->current_state != context) { @@ -4244,7 +4447,17 @@ void dc_commit_updates_for_stream(struct dc *dc, TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES); - commit_planes_for_stream( + update_seamless_boot_flags(dc, context, surface_count, stream); + if (!dc->debug.enable_legacy_fast_update && update_type == UPDATE_TYPE_FAST) { + commit_planes_for_stream_fast(dc, + srf_updates, + surface_count, + stream, + stream_update, + update_type, + context); + } else { + commit_planes_for_stream( dc, srf_updates, surface_count, @@ -4252,6 +4465,7 @@ void dc_commit_updates_for_stream(struct dc *dc, stream_update, update_type, context); + } /*update current_State*/ if (dc->current_state != context) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index 8a98b8dd008e..182c42c63bc5 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -27,6 +27,8 @@ #include "core_types.h" #include "timing_generator.h" #include "hw_sequencer.h" +#include "hw_sequencer_private.h" +#include "basics/dc_common.h" #define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0])) @@ -463,6 +465,259 @@ void get_subvp_visual_confirm_color( } } +void hwss_build_fast_sequence(struct dc *dc, + struct dc_dmub_cmd *dc_dmub_cmd, + unsigned int dmub_cmd_count, + struct block_sequence block_sequence[], + int *num_steps, + struct pipe_ctx *pipe_ctx) +{ + struct dc_plane_state *plane = pipe_ctx->plane_state; + struct dc_stream_state *stream = pipe_ctx->stream; + struct dce_hwseq *hws = dc->hwseq; + struct pipe_ctx *current_pipe = NULL; + struct pipe_ctx *current_mpc_pipe = NULL; + unsigned int i = 0; + + *num_steps = 0; // Initialize to 0 + + if (!plane || !stream) + return; + + if (dc->hwss.subvp_pipe_control_lock_fast) { + block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.dc = dc; + block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.lock = true; + block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.pipe_ctx = pipe_ctx; + block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST; + (*num_steps)++; + } + if (dc->hwss.pipe_control_lock) { + block_sequence[*num_steps].params.pipe_control_lock_params.dc = dc; + block_sequence[*num_steps].params.pipe_control_lock_params.lock = true; + block_sequence[*num_steps].params.pipe_control_lock_params.pipe_ctx = pipe_ctx; + block_sequence[*num_steps].func = OPTC_PIPE_CONTROL_LOCK; + (*num_steps)++; + } + + for (i = 0; i < dmub_cmd_count; i++) { + block_sequence[*num_steps].params.send_dmcub_cmd_params.ctx = dc->ctx; + block_sequence[*num_steps].params.send_dmcub_cmd_params.cmd = &(dc_dmub_cmd[i].dmub_cmd); + block_sequence[*num_steps].params.send_dmcub_cmd_params.wait_type = dc_dmub_cmd[i].wait_type; + block_sequence[*num_steps].func = DMUB_SEND_DMCUB_CMD; + (*num_steps)++; + } + + current_pipe = pipe_ctx; + while (current_pipe) { + current_mpc_pipe = current_pipe; + while (current_mpc_pipe) { + if (dc->hwss.set_flip_control_gsl && current_mpc_pipe->plane_state && current_mpc_pipe->plane_state->update_flags.raw) { + block_sequence[*num_steps].params.set_flip_control_gsl_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].params.set_flip_control_gsl_params.flip_immediate = current_mpc_pipe->plane_state->flip_immediate; + block_sequence[*num_steps].func = HUBP_SET_FLIP_CONTROL_GSL; + (*num_steps)++; + } + if (dc->hwss.program_triplebuffer && dc->debug.enable_tri_buf && current_mpc_pipe->plane_state->update_flags.raw) { + block_sequence[*num_steps].params.program_triplebuffer_params.dc = dc; + block_sequence[*num_steps].params.program_triplebuffer_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].params.program_triplebuffer_params.enableTripleBuffer = current_mpc_pipe->plane_state->triplebuffer_flips; + block_sequence[*num_steps].func = HUBP_PROGRAM_TRIPLEBUFFER; + (*num_steps)++; + } + if (dc->hwss.update_plane_addr && current_mpc_pipe->plane_state->update_flags.bits.addr_update) { + block_sequence[*num_steps].params.update_plane_addr_params.dc = dc; + block_sequence[*num_steps].params.update_plane_addr_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].func = HUBP_UPDATE_PLANE_ADDR; + (*num_steps)++; + } + + if (hws->funcs.set_input_transfer_func && current_mpc_pipe->plane_state->update_flags.bits.gamma_change) { + block_sequence[*num_steps].params.set_input_transfer_func_params.dc = dc; + block_sequence[*num_steps].params.set_input_transfer_func_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].params.set_input_transfer_func_params.plane_state = current_mpc_pipe->plane_state; + block_sequence[*num_steps].func = DPP_SET_INPUT_TRANSFER_FUNC; + (*num_steps)++; + } + + if (dc->hwss.program_gamut_remap && current_mpc_pipe->plane_state->update_flags.bits.gamut_remap_change) { + block_sequence[*num_steps].params.program_gamut_remap_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].func = DPP_PROGRAM_GAMUT_REMAP; + (*num_steps)++; + } + if (current_mpc_pipe->plane_state->update_flags.bits.input_csc_change) { + block_sequence[*num_steps].params.setup_dpp_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].func = DPP_SETUP_DPP; + (*num_steps)++; + } + if (current_mpc_pipe->plane_state->update_flags.bits.coeff_reduction_change) { + block_sequence[*num_steps].params.program_bias_and_scale_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].func = DPP_PROGRAM_BIAS_AND_SCALE; + (*num_steps)++; + } + if (hws->funcs.set_output_transfer_func && current_mpc_pipe->stream->update_flags.bits.out_tf) { + block_sequence[*num_steps].params.set_output_transfer_func_params.dc = dc; + block_sequence[*num_steps].params.set_output_transfer_func_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].params.set_output_transfer_func_params.stream = current_mpc_pipe->stream; + block_sequence[*num_steps].func = DPP_SET_OUTPUT_TRANSFER_FUNC; + (*num_steps)++; + } + + current_mpc_pipe = current_mpc_pipe->bottom_pipe; + } + current_pipe = current_pipe->next_odm_pipe; + } + + if (dc->hwss.pipe_control_lock) { + block_sequence[*num_steps].params.pipe_control_lock_params.dc = dc; + block_sequence[*num_steps].params.pipe_control_lock_params.lock = false; + block_sequence[*num_steps].params.pipe_control_lock_params.pipe_ctx = pipe_ctx; + block_sequence[*num_steps].func = OPTC_PIPE_CONTROL_LOCK; + (*num_steps)++; + } + if (dc->hwss.subvp_pipe_control_lock_fast) { + block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.dc = dc; + block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.lock = false; + block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.pipe_ctx = pipe_ctx; + block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST; + (*num_steps)++; + } + + current_pipe = pipe_ctx; + while (current_pipe) { + current_mpc_pipe = current_pipe; + + while (current_mpc_pipe) { + if (!current_mpc_pipe->bottom_pipe && !pipe_ctx->next_odm_pipe && + current_mpc_pipe->stream && current_mpc_pipe->plane_state && + current_mpc_pipe->plane_state->update_flags.bits.addr_update && + !current_mpc_pipe->plane_state->skip_manual_trigger) { + block_sequence[*num_steps].params.program_manual_trigger_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].func = OPTC_PROGRAM_MANUAL_TRIGGER; + (*num_steps)++; + } + current_mpc_pipe = current_mpc_pipe->bottom_pipe; + } + current_pipe = current_pipe->next_odm_pipe; + } +} + +void hwss_execute_sequence(struct dc *dc, + struct block_sequence block_sequence[], + int num_steps) +{ + unsigned int i; + union block_sequence_params *params; + struct dce_hwseq *hws = dc->hwseq; + + for (i = 0; i < num_steps; i++) { + params = &(block_sequence[i].params); + switch (block_sequence[i].func) { + + case DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST: + dc->hwss.subvp_pipe_control_lock_fast(params); + break; + case OPTC_PIPE_CONTROL_LOCK: + dc->hwss.pipe_control_lock(params->pipe_control_lock_params.dc, + params->pipe_control_lock_params.pipe_ctx, + params->pipe_control_lock_params.lock); + break; + case HUBP_SET_FLIP_CONTROL_GSL: + dc->hwss.set_flip_control_gsl(params->set_flip_control_gsl_params.pipe_ctx, + params->set_flip_control_gsl_params.flip_immediate); + break; + case HUBP_PROGRAM_TRIPLEBUFFER: + dc->hwss.program_triplebuffer(params->program_triplebuffer_params.dc, + params->program_triplebuffer_params.pipe_ctx, + params->program_triplebuffer_params.enableTripleBuffer); + break; + case HUBP_UPDATE_PLANE_ADDR: + dc->hwss.update_plane_addr(params->update_plane_addr_params.dc, + params->update_plane_addr_params.pipe_ctx); + break; + case DPP_SET_INPUT_TRANSFER_FUNC: + hws->funcs.set_input_transfer_func(params->set_input_transfer_func_params.dc, + params->set_input_transfer_func_params.pipe_ctx, + params->set_input_transfer_func_params.plane_state); + break; + case DPP_PROGRAM_GAMUT_REMAP: + dc->hwss.program_gamut_remap(params->program_gamut_remap_params.pipe_ctx); + break; + case DPP_SETUP_DPP: + hwss_setup_dpp(params); + break; + case DPP_PROGRAM_BIAS_AND_SCALE: + hwss_program_bias_and_scale(params); + break; + case OPTC_PROGRAM_MANUAL_TRIGGER: + hwss_program_manual_trigger(params); + break; + case DPP_SET_OUTPUT_TRANSFER_FUNC: + hws->funcs.set_output_transfer_func(params->set_output_transfer_func_params.dc, + params->set_output_transfer_func_params.pipe_ctx, + params->set_output_transfer_func_params.stream); + break; + case MPC_UPDATE_VISUAL_CONFIRM: + dc->hwss.update_visual_confirm_color(params->update_visual_confirm_params.dc, + params->update_visual_confirm_params.pipe_ctx, + params->update_visual_confirm_params.mpcc_id); + break; + case DMUB_SEND_DMCUB_CMD: + hwss_send_dmcub_cmd(params); + break; + default: + ASSERT(false); + break; + } + } +} + +void hwss_send_dmcub_cmd(union block_sequence_params *params) +{ + struct dc_context *ctx = params->send_dmcub_cmd_params.ctx; + union dmub_rb_cmd *cmd = params->send_dmcub_cmd_params.cmd; + enum dm_dmub_wait_type wait_type = params->send_dmcub_cmd_params.wait_type; + + dm_execute_dmub_cmd(ctx, cmd, wait_type); +} + +void hwss_program_manual_trigger(union block_sequence_params *params) +{ + struct pipe_ctx *pipe_ctx = params->program_manual_trigger_params.pipe_ctx; + + if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger) + pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg); +} + +void hwss_setup_dpp(union block_sequence_params *params) +{ + struct pipe_ctx *pipe_ctx = params->setup_dpp_params.pipe_ctx; + struct dpp *dpp = pipe_ctx->plane_res.dpp; + struct dc_plane_state *plane_state = pipe_ctx->plane_state; + + if (dpp && dpp->funcs->dpp_setup) { + // program the input csc + dpp->funcs->dpp_setup(dpp, + plane_state->format, + EXPANSION_MODE_ZERO, + plane_state->input_csc_color_matrix, + plane_state->color_space, + NULL); + } +} + +void hwss_program_bias_and_scale(union block_sequence_params *params) +{ + struct pipe_ctx *pipe_ctx = params->program_bias_and_scale_params.pipe_ctx; + struct dpp *dpp = pipe_ctx->plane_res.dpp; + struct dc_plane_state *plane_state = pipe_ctx->plane_state; + struct dc_bias_and_scale bns_params = {0}; + + //TODO :for CNVC set scale and bias registers if necessary + build_prescale_params(&bns_params, plane_state); + if (dpp->funcs->dpp_program_bias_and_scale) + dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params); +} + void get_mclk_switch_visual_confirm_color( struct dc *dc, struct dc_state *context, diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 7fd9f5a9e191..7ded574f84ff 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -896,6 +896,7 @@ struct dc_debug_options { bool disable_dp_plus_plus_wa; uint32_t fpo_vactive_min_active_margin_us; uint32_t fpo_vactive_max_blank_us; + bool enable_legacy_fast_update; }; struct gpu_info_soc_bounding_box_v1_0; diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c index 54805802cbd5..42e9b6a529f6 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c @@ -401,6 +401,10 @@ static const struct dc_plane_cap plane_cap = { } }; +static const struct dc_debug_options debug_defaults = { + .enable_legacy_fast_update = true, +}; + #define CTX ctx #define REG(reg) mm ## reg @@ -1071,6 +1075,7 @@ static bool dce100_resource_construct( dc->caps.dual_link_dvi = true; dc->caps.disable_dp_clk_share = true; dc->caps.extended_aux_timeout_support = false; + dc->debug = debug_defaults; for (i = 0; i < pool->base.pipe_count; i++) { pool->base.timing_generators[i] = diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index a4a45a6ce61e..46eca5a21e1c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c @@ -424,6 +424,10 @@ static const struct dc_plane_cap plane_cap = { 64 }; +static const struct dc_debug_options debug_defaults = { + .enable_legacy_fast_update = true, +}; + static const struct dc_plane_cap underlay_plane_cap = { .type = DC_PLANE_TYPE_DCE_UNDERLAY, .per_pixel_alpha = 1, @@ -1368,6 +1372,7 @@ static bool dce110_resource_construct( dc->caps.min_horizontal_blanking_period = 80; dc->caps.is_apu = true; dc->caps.extended_aux_timeout_support = false; + dc->debug = debug_defaults; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index e179e80667d1..808855886183 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c @@ -429,6 +429,10 @@ static const struct dc_plane_cap plane_cap = { 64 }; +static const struct dc_debug_options debug_defaults = { + .enable_legacy_fast_update = true, +}; + #define CTX ctx #define REG(reg) mm ## reg @@ -1239,6 +1243,7 @@ static bool dce112_resource_construct( dc->caps.min_horizontal_blanking_period = 80; dc->caps.dual_link_dvi = true; dc->caps.extended_aux_timeout_support = false; + dc->debug = debug_defaults; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index af631085e88c..18c5a86d2d61 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -526,6 +526,7 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults = { .disable_clock_gate = true, + .enable_legacy_fast_update = true, }; static struct clock_source *dce120_clock_source_create( diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index 5825e6f412bd..3935fd455f0f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c @@ -418,6 +418,10 @@ static const struct dc_plane_cap plane_cap = { } }; +static const struct dc_debug_options debug_defaults = { + .enable_legacy_fast_update = true, +}; + static const struct dce_dmcu_registers dmcu_regs = { DMCU_DCE80_REG_LIST() }; @@ -969,6 +973,7 @@ static bool dce80_construct( dc->caps.min_horizontal_blanking_period = 80; dc->caps.dual_link_dvi = true; dc->caps.extended_aux_timeout_support = false; + dc->debug = debug_defaults; /************************************************* * Create resources * @@ -1369,6 +1374,7 @@ static bool dce83_construct( dc->caps.max_cursor_size = 128; dc->caps.min_horizontal_blanking_period = 80; dc->caps.is_apu = true; + dc->debug = debug_defaults; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 26ddf73fd5b1..4b02f8443534 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -553,6 +553,7 @@ static const struct dc_debug_options debug_defaults_drv = { .recovery_enabled = false, /*enable this by default after testing.*/ .max_downscale_src_width = 3840, .underflow_assert_delay_us = 0xFFFFFFFF, + .enable_legacy_fast_update = true, }; static const struct dc_debug_options debug_defaults_diags = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 6ef7e2634991..4cc8de2627ce 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -722,6 +722,7 @@ static const struct dc_debug_options debug_defaults_drv = { .scl_reset_length10 = true, .sanity_checks = false, .underflow_assert_delay_us = 0xFFFFFFFF, + .enable_legacy_fast_update = true, }; void dcn20_dpp_destroy(struct dpp **dpp) diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c index 5ff09bf4bc30..fdba8a9f5c30 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c @@ -613,6 +613,7 @@ static const struct dc_debug_options debug_defaults_drv = { .sanity_checks = false, .underflow_assert_delay_us = 0xFFFFFFFF, .enable_tri_buf = false, + .enable_legacy_fast_update = true, }; static void dcn201_dpp_destroy(struct dpp **dpp) diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index da6d42de0554..d693ea42d033 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -653,6 +653,7 @@ static const struct dc_debug_options debug_defaults_drv = { .usbc_combo_phy_reset_wa = true, .dmub_command_table = true, .use_max_lb = true, + .enable_legacy_fast_update = true, }; static const struct dc_panel_config panel_config_defaults = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c index f2d589c505a8..fc33b5fcabe1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c @@ -887,6 +887,7 @@ static const struct dc_debug_options debug_defaults_drv = { } }, .disable_z10 = true, + .enable_legacy_fast_update = true, .enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/ .dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c index 8570bdc292b4..0cc853964781 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c @@ -887,6 +887,7 @@ static const struct dc_debug_options debug_defaults_drv = { .afmt = true, } }, + .enable_legacy_fast_update = true, .psr_power_use_phy_fsm = 0, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c index b6e0aa2ab27a..707cf28bbceb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c @@ -884,6 +884,7 @@ static const struct dc_debug_options debug_defaults_drv = { .afmt = true, } }, + .enable_legacy_fast_update = true, }; static const struct dc_panel_config panel_config_defaults = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c index 7f5cd8c8d49b..3f11992e380b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c @@ -409,6 +409,30 @@ void dcn32_subvp_pipe_control_lock(struct dc *dc, } } +void dcn32_subvp_pipe_control_lock_fast(union block_sequence_params *params) +{ + struct dc *dc = params->subvp_pipe_control_lock_fast_params.dc; + bool lock = params->subvp_pipe_control_lock_fast_params.lock; + struct pipe_ctx *pipe_ctx = params->subvp_pipe_control_lock_fast_params.pipe_ctx; + bool subvp_immediate_flip = false; + + if (pipe_ctx && pipe_ctx->stream && pipe_ctx->plane_state) { + if (pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN && + pipe_ctx->plane_state->flip_immediate) + subvp_immediate_flip = true; + } + + // Don't need to lock for DRR VSYNC flips -- FW will wait for DRR pending update cleared. + if (subvp_immediate_flip) { + union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 }; + + hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK; + hw_lock_cmd.bits.hw_lock_client = HW_LOCK_CLIENT_DRIVER; + hw_lock_cmd.bits.lock = lock; + hw_lock_cmd.bits.should_release = !lock; + dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd); + } +} bool dcn32_set_mpc_shaper_3dlut( struct pipe_ctx *pipe_ctx, const struct dc_stream_state *stream) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h index 6dbe929cf599..2d2628f31bed 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h @@ -84,6 +84,8 @@ void dcn32_subvp_pipe_control_lock(struct dc *dc, struct pipe_ctx *top_pipe_to_program, bool subvp_prev_use); +void dcn32_subvp_pipe_control_lock_fast(union block_sequence_params *params); + void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings); diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c index 8356b31e1d9a..c2490e16a66a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c @@ -110,6 +110,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = { .enable_phantom_streams = dcn32_enable_phantom_streams, .subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock, .update_visual_confirm_color = dcn10_update_visual_confirm_color, + .subvp_pipe_control_lock_fast = dcn32_subvp_pipe_control_lock_fast, .update_phantom_vp_position = dcn32_update_phantom_vp_position, .update_dsc_pg = dcn32_update_dsc_pg, .apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom, diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index d8dd143cf6ea..034610b74a37 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -37,6 +37,7 @@ #include "dwb.h" #include "mcif_wb.h" #include "panel_cntl.h" +#include "dmub/inc/dmub_cmd.h" #define MAX_CLOCK_SOURCES 7 #define MAX_SVP_PHANTOM_STREAMS 2 @@ -499,6 +500,11 @@ struct bw_context { struct display_mode_lib dml; }; +struct dc_dmub_cmd { + union dmub_rb_cmd dmub_cmd; + enum dm_dmub_wait_type wait_type; +}; + /** * struct dc_state - The full description of a state requested by users */ @@ -547,6 +553,11 @@ struct dc_state { */ struct bw_context bw_ctx; + struct block_sequence block_sequence[50]; + unsigned int block_sequence_steps; + struct dc_dmub_cmd dc_dmub_cmd[10]; + unsigned int dmub_cmd_count; + /** * @refcount: refcount reference * diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index df160c6a630c..cc0a3a992f7b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -44,6 +44,112 @@ struct dc_virtual_addr_space_config; struct dpp; struct dce_hwseq; struct link_resource; +struct dc_dmub_cmd; + +struct subvp_pipe_control_lock_fast_params { + struct dc *dc; + bool lock; + struct pipe_ctx *pipe_ctx; +}; + +struct pipe_control_lock_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; + bool lock; +}; + +struct set_flip_control_gsl_params { + struct pipe_ctx *pipe_ctx; + bool flip_immediate; +}; + +struct program_triplebuffer_params { + const struct dc *dc; + struct pipe_ctx *pipe_ctx; + bool enableTripleBuffer; +}; + +struct update_plane_addr_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; +}; + +struct set_input_transfer_func_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; + struct dc_plane_state *plane_state; +}; + +struct program_gamut_remap_params { + struct pipe_ctx *pipe_ctx; +}; + +struct program_manual_trigger_params { + struct pipe_ctx *pipe_ctx; +}; + +struct send_dmcub_cmd_params { + struct dc_context *ctx; + union dmub_rb_cmd *cmd; + enum dm_dmub_wait_type wait_type; +}; + +struct setup_dpp_params { + struct pipe_ctx *pipe_ctx; +}; + +struct program_bias_and_scale_params { + struct pipe_ctx *pipe_ctx; +}; + +struct set_output_transfer_func_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; + const struct dc_stream_state *stream; +}; + +struct update_visual_confirm_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; + int mpcc_id; +}; + +union block_sequence_params { + struct update_plane_addr_params update_plane_addr_params; + struct subvp_pipe_control_lock_fast_params subvp_pipe_control_lock_fast_params; + struct pipe_control_lock_params pipe_control_lock_params; + struct set_flip_control_gsl_params set_flip_control_gsl_params; + struct program_triplebuffer_params program_triplebuffer_params; + struct set_input_transfer_func_params set_input_transfer_func_params; + struct program_gamut_remap_params program_gamut_remap_params; + struct program_manual_trigger_params program_manual_trigger_params; + struct send_dmcub_cmd_params send_dmcub_cmd_params; + struct setup_dpp_params setup_dpp_params; + struct program_bias_and_scale_params program_bias_and_scale_params; + struct set_output_transfer_func_params set_output_transfer_func_params; + struct update_visual_confirm_params update_visual_confirm_params; +}; + +enum block_sequence_func { + DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST = 0, + OPTC_PIPE_CONTROL_LOCK, + HUBP_SET_FLIP_CONTROL_GSL, + HUBP_PROGRAM_TRIPLEBUFFER, + HUBP_UPDATE_PLANE_ADDR, + DPP_SET_INPUT_TRANSFER_FUNC, + DPP_PROGRAM_GAMUT_REMAP, + OPTC_PROGRAM_MANUAL_TRIGGER, + DMUB_SEND_DMCUB_CMD, + DPP_SETUP_DPP, + DPP_PROGRAM_BIAS_AND_SCALE, + DPP_SET_OUTPUT_TRANSFER_FUNC, + MPC_UPDATE_VISUAL_CONFIRM, +}; + +struct block_sequence { + union block_sequence_params params; + enum block_sequence_func func; +}; struct hw_sequencer_funcs { void (*hardware_release)(struct dc *dc); @@ -252,6 +358,7 @@ struct hw_sequencer_funcs { const struct tg_color *solid_color, int width, int height, int offset); + void (*subvp_pipe_control_lock_fast)(union block_sequence_params *params); void (*z10_restore)(const struct dc *dc); void (*z10_save_init)(struct dc *dc); @@ -313,4 +420,23 @@ void get_mclk_switch_visual_confirm_color( struct pipe_ctx *pipe_ctx, struct tg_color *color); +void hwss_execute_sequence(struct dc *dc, + struct block_sequence block_sequence[], + int num_steps); + +void hwss_build_fast_sequence(struct dc *dc, + struct dc_dmub_cmd *dc_dmub_cmd, + unsigned int dmub_cmd_count, + struct block_sequence block_sequence[], + int *num_steps, + struct pipe_ctx *pipe_ctx); + +void hwss_send_dmcub_cmd(union block_sequence_params *params); + +void hwss_program_manual_trigger(union block_sequence_params *params); + +void hwss_setup_dpp(union block_sequence_params *params); + +void hwss_program_bias_and_scale(union block_sequence_params *params); + #endif /* __DC_HW_SEQUENCER_H__ */ -- cgit From 3b718dcaf163d17fe907ea098c8449e0cd6bc271 Mon Sep 17 00:00:00 2001 From: Austin Zheng Date: Wed, 24 May 2023 11:52:12 -0400 Subject: drm/amd/display: Filter out AC mode frequencies on DC mode systems Why: Limit maximum clock speeds to DC mode limits for DC mode systems How: Store DC mode limits when individual clocks are initialized and cap the values when building the clock table Acked-by: Stylon Wang Signed-off-by: Austin Zheng Reviewed-by: Alvin Lee Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c | 13 +- drivers/gpu/drm/amd/display/dc/dc.h | 1 + .../gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 140 ++++++++++++++------ .../gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c | 144 +++++++++++++++------ drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h | 1 + 5 files changed, 216 insertions(+), 83 deletions(-) (limited to 'drivers/gpu/drm/amd/display/dc/inc') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index 20bff6a346ba..96fa68f166e0 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -182,23 +182,32 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base) dcn32_init_single_clock(clk_mgr, PPCLK_DCFCLK, &clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz, &num_entries_per_clk->num_dcfclk_levels); + clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DCFCLK); /* SOCCLK */ dcn32_init_single_clock(clk_mgr, PPCLK_SOCCLK, &clk_mgr_base->bw_params->clk_table.entries[0].socclk_mhz, &num_entries_per_clk->num_socclk_levels); + clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_SOCCLK); /* DTBCLK */ - if (!clk_mgr->base.ctx->dc->debug.disable_dtb_ref_clk_switch) + if (!clk_mgr->base.ctx->dc->debug.disable_dtb_ref_clk_switch) { dcn32_init_single_clock(clk_mgr, PPCLK_DTBCLK, &clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz, &num_entries_per_clk->num_dtbclk_levels); + clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz = + dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DTBCLK); + } /* DISPCLK */ dcn32_init_single_clock(clk_mgr, PPCLK_DISPCLK, &clk_mgr_base->bw_params->clk_table.entries[0].dispclk_mhz, &num_entries_per_clk->num_dispclk_levels); num_levels = num_entries_per_clk->num_dispclk_levels; + clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DISPCLK); + //HW recommends limit of 1950 MHz in display clock for all DCN3.2.x + if (clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz > 1950) + clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = 1950; if (num_entries_per_clk->num_dcfclk_levels && num_entries_per_clk->num_dtbclk_levels && @@ -817,6 +826,7 @@ static void dcn32_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base) dcn32_init_single_clock(clk_mgr, PPCLK_UCLK, &clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz, &num_entries_per_clk->num_memclk_levels); + clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK); /* memclk must have at least one level */ num_entries_per_clk->num_memclk_levels = num_entries_per_clk->num_memclk_levels ? num_entries_per_clk->num_memclk_levels : 1; @@ -824,6 +834,7 @@ static void dcn32_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base) dcn32_init_single_clock(clk_mgr, PPCLK_FCLK, &clk_mgr_base->bw_params->clk_table.entries[0].fclk_mhz, &num_entries_per_clk->num_fclk_levels); + clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_FCLK); if (num_entries_per_clk->num_memclk_levels >= num_entries_per_clk->num_fclk_levels) { num_levels = num_entries_per_clk->num_memclk_levels; diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 7ded574f84ff..360dd83b1a7a 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -897,6 +897,7 @@ struct dc_debug_options { uint32_t fpo_vactive_min_active_margin_us; uint32_t fpo_vactive_max_blank_us; bool enable_legacy_fast_update; + bool disable_dc_mode_overwrite; }; struct gpu_info_soc_bounding_box_v1_0; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index ad6ee48580f8..fa3678342abb 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -2325,14 +2325,48 @@ void dcn32_patch_dpm_table(struct clk_bw_params *bw_params) bw_params->clk_table.entries[0].memclk_mhz = dcn3_2_soc.clock_limits[0].dram_speed_mts / 16; } -static int build_synthetic_soc_states(struct clk_bw_params *bw_params, +/* + * override_max_clk_values - Overwrite the max clock frequencies with the max DC mode timings + * Input: + * max_clk_limit - struct containing the desired clock timings + * Output: + * curr_clk_limit - struct containing the timings that need to be overwritten + * Return: 0 upon success, non-zero for failure + */ +static int override_max_clk_values(struct clk_limit_table_entry *max_clk_limit, + struct clk_limit_table_entry *curr_clk_limit) +{ + if (NULL == max_clk_limit || NULL == curr_clk_limit) + return -1; //invalid parameters + + //only overwrite if desired max clock frequency is initialized + if (max_clk_limit->dcfclk_mhz != 0) + curr_clk_limit->dcfclk_mhz = max_clk_limit->dcfclk_mhz; + + if (max_clk_limit->fclk_mhz != 0) + curr_clk_limit->fclk_mhz = max_clk_limit->fclk_mhz; + + if (max_clk_limit->memclk_mhz != 0) + curr_clk_limit->memclk_mhz = max_clk_limit->memclk_mhz; + + if (max_clk_limit->socclk_mhz != 0) + curr_clk_limit->socclk_mhz = max_clk_limit->socclk_mhz; + + if (max_clk_limit->dtbclk_mhz != 0) + curr_clk_limit->dtbclk_mhz = max_clk_limit->dtbclk_mhz; + + if (max_clk_limit->dispclk_mhz != 0) + curr_clk_limit->dispclk_mhz = max_clk_limit->dispclk_mhz; + + return 0; +} + +static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk_bw_params *bw_params, struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries) { int i, j; struct _vcs_dpi_voltage_scaling_st entry = {0}; - - unsigned int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, - max_phyclk_mhz = 0, max_dtbclk_mhz = 0, max_fclk_mhz = 0, max_uclk_mhz = 0; + struct clk_limit_table_entry max_clk_data = {0}; unsigned int min_dcfclk_mhz = 199, min_fclk_mhz = 299; @@ -2343,51 +2377,76 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params, unsigned int num_fclk_dpms = 0; unsigned int num_dcfclk_dpms = 0; - for (i = 0; i < MAX_NUM_DPM_LVL; i++) { - if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz) - max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz; - if (bw_params->clk_table.entries[i].fclk_mhz > max_fclk_mhz) - max_fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz; - if (bw_params->clk_table.entries[i].memclk_mhz > max_uclk_mhz) - max_uclk_mhz = bw_params->clk_table.entries[i].memclk_mhz; - if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz) - max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz; - if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz) - max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz; - if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz) - max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz; - if (bw_params->clk_table.entries[i].dtbclk_mhz > max_dtbclk_mhz) - max_dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz; + unsigned int num_dc_uclk_dpms = 0; + unsigned int num_dc_fclk_dpms = 0; + unsigned int num_dc_dcfclk_dpms = 0; - if (bw_params->clk_table.entries[i].memclk_mhz > 0) + for (i = 0; i < MAX_NUM_DPM_LVL; i++) { + if (bw_params->clk_table.entries[i].dcfclk_mhz > max_clk_data.dcfclk_mhz) + max_clk_data.dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz; + if (bw_params->clk_table.entries[i].fclk_mhz > max_clk_data.fclk_mhz) + max_clk_data.fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz; + if (bw_params->clk_table.entries[i].memclk_mhz > max_clk_data.memclk_mhz) + max_clk_data.memclk_mhz = bw_params->clk_table.entries[i].memclk_mhz; + if (bw_params->clk_table.entries[i].dispclk_mhz > max_clk_data.dispclk_mhz) + max_clk_data.dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz; + if (bw_params->clk_table.entries[i].dppclk_mhz > max_clk_data.dppclk_mhz) + max_clk_data.dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz; + if (bw_params->clk_table.entries[i].phyclk_mhz > max_clk_data.phyclk_mhz) + max_clk_data.phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz; + if (bw_params->clk_table.entries[i].dtbclk_mhz > max_clk_data.dtbclk_mhz) + max_clk_data.dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz; + + if (bw_params->clk_table.entries[i].memclk_mhz > 0) { num_uclk_dpms++; - if (bw_params->clk_table.entries[i].fclk_mhz > 0) + if (bw_params->clk_table.entries[i].memclk_mhz <= bw_params->dc_mode_limit.memclk_mhz) + num_dc_uclk_dpms++; + } + if (bw_params->clk_table.entries[i].fclk_mhz > 0) { num_fclk_dpms++; - if (bw_params->clk_table.entries[i].dcfclk_mhz > 0) + if (bw_params->clk_table.entries[i].fclk_mhz <= bw_params->dc_mode_limit.fclk_mhz) + num_dc_fclk_dpms++; + } + if (bw_params->clk_table.entries[i].dcfclk_mhz > 0) { num_dcfclk_dpms++; + if (bw_params->clk_table.entries[i].dcfclk_mhz <= bw_params->dc_mode_limit.dcfclk_mhz) + num_dc_dcfclk_dpms++; + } + } + + if (!disable_dc_mode_overwrite) { + //Overwrite max frequencies with max DC mode frequencies for DC mode systems + override_max_clk_values(&bw_params->dc_mode_limit, &max_clk_data); + num_uclk_dpms = num_dc_uclk_dpms; + num_fclk_dpms = num_dc_fclk_dpms; + num_dcfclk_dpms = num_dc_dcfclk_dpms; + bw_params->clk_table.num_entries_per_clk.num_memclk_levels = num_uclk_dpms; + bw_params->clk_table.num_entries_per_clk.num_fclk_levels = num_fclk_dpms; } if (num_dcfclk_dpms > 0 && bw_params->clk_table.entries[0].fclk_mhz > min_fclk_mhz) min_fclk_mhz = bw_params->clk_table.entries[0].fclk_mhz; - if (!max_dcfclk_mhz || !max_dispclk_mhz || !max_dtbclk_mhz) + if (!max_clk_data.dcfclk_mhz || !max_clk_data.dispclk_mhz || !max_clk_data.dtbclk_mhz) return -1; - if (max_dppclk_mhz == 0) - max_dppclk_mhz = max_dispclk_mhz; + if (max_clk_data.dppclk_mhz == 0) + max_clk_data.dppclk_mhz = max_clk_data.dispclk_mhz; - if (max_fclk_mhz == 0) - max_fclk_mhz = max_dcfclk_mhz * dcn3_2_soc.pct_ideal_sdp_bw_after_urgent / dcn3_2_soc.pct_ideal_fabric_bw_after_urgent; + if (max_clk_data.fclk_mhz == 0) + max_clk_data.fclk_mhz = max_clk_data.dcfclk_mhz * + dcn3_2_soc.pct_ideal_sdp_bw_after_urgent / + dcn3_2_soc.pct_ideal_fabric_bw_after_urgent; - if (max_phyclk_mhz == 0) - max_phyclk_mhz = dcn3_2_soc.clock_limits[0].phyclk_mhz; + if (max_clk_data.phyclk_mhz == 0) + max_clk_data.phyclk_mhz = dcn3_2_soc.clock_limits[0].phyclk_mhz; *num_entries = 0; - entry.dispclk_mhz = max_dispclk_mhz; - entry.dscclk_mhz = max_dispclk_mhz / 3; - entry.dppclk_mhz = max_dppclk_mhz; - entry.dtbclk_mhz = max_dtbclk_mhz; - entry.phyclk_mhz = max_phyclk_mhz; + entry.dispclk_mhz = max_clk_data.dispclk_mhz; + entry.dscclk_mhz = max_clk_data.dispclk_mhz / 3; + entry.dppclk_mhz = max_clk_data.dppclk_mhz; + entry.dtbclk_mhz = max_clk_data.dtbclk_mhz; + entry.phyclk_mhz = max_clk_data.phyclk_mhz; entry.phyclk_d18_mhz = dcn3_2_soc.clock_limits[0].phyclk_d18_mhz; entry.phyclk_d32_mhz = dcn3_2_soc.clock_limits[0].phyclk_d32_mhz; @@ -2401,7 +2460,7 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params, } // Insert the max DCFCLK - entry.dcfclk_mhz = max_dcfclk_mhz; + entry.dcfclk_mhz = max_clk_data.dcfclk_mhz; entry.fabricclk_mhz = 0; entry.dram_speed_mts = 0; @@ -2429,7 +2488,7 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params, // If FCLK fine grained, only insert max else { entry.dcfclk_mhz = 0; - entry.fabricclk_mhz = max_fclk_mhz; + entry.fabricclk_mhz = max_clk_data.fclk_mhz; entry.dram_speed_mts = 0; insert_entry_into_table_sorted(table, num_entries, &entry); @@ -2441,9 +2500,9 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params, // Remove states that require higher clocks than are supported for (i = *num_entries - 1; i >= 0 ; i--) { - if (table[i].dcfclk_mhz > max_dcfclk_mhz || - table[i].fabricclk_mhz > max_fclk_mhz || - table[i].dram_speed_mts > max_uclk_mhz * 16) + if (table[i].dcfclk_mhz > max_clk_data.dcfclk_mhz || + table[i].fabricclk_mhz > max_clk_data.fclk_mhz || + table[i].dram_speed_mts > max_clk_data.memclk_mhz * 16) remove_entry_from_table_at_index(table, num_entries, i); } @@ -2756,7 +2815,8 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa dcn3_2_soc.clock_limits[i].phyclk_d32_mhz = dcn3_2_soc.clock_limits[0].phyclk_d32_mhz; } } else { - build_synthetic_soc_states(bw_params, dcn3_2_soc.clock_limits, &dcn3_2_soc.num_states); + build_synthetic_soc_states(dc->debug.disable_dc_mode_overwrite, bw_params, + dcn3_2_soc.clock_limits, &dcn3_2_soc.num_states); } /* Re-init DML with updated bb */ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c index 1aaff6f2d453..f0683fd9d3f0 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c @@ -252,14 +252,48 @@ static void remove_entry_from_table_at_index(struct _vcs_dpi_voltage_scaling_st memset(&table[--(*num_entries)], 0, sizeof(struct _vcs_dpi_voltage_scaling_st)); } -static int build_synthetic_soc_states(struct clk_bw_params *bw_params, +/* + * override_max_clk_values - Overwrite the max clock frequencies with the max DC mode timings + * Input: + * max_clk_limit - struct containing the desired clock timings + * Output: + * curr_clk_limit - struct containing the timings that need to be overwritten + * Return: 0 upon success, non-zero for failure + */ +static int override_max_clk_values(struct clk_limit_table_entry *max_clk_limit, + struct clk_limit_table_entry *curr_clk_limit) +{ + if (NULL == max_clk_limit || NULL == curr_clk_limit) + return -1; //invalid parameters + + //only overwrite if desired max clock frequency is initialized + if (max_clk_limit->dcfclk_mhz != 0) + curr_clk_limit->dcfclk_mhz = max_clk_limit->dcfclk_mhz; + + if (max_clk_limit->fclk_mhz != 0) + curr_clk_limit->fclk_mhz = max_clk_limit->fclk_mhz; + + if (max_clk_limit->memclk_mhz != 0) + curr_clk_limit->memclk_mhz = max_clk_limit->memclk_mhz; + + if (max_clk_limit->socclk_mhz != 0) + curr_clk_limit->socclk_mhz = max_clk_limit->socclk_mhz; + + if (max_clk_limit->dtbclk_mhz != 0) + curr_clk_limit->dtbclk_mhz = max_clk_limit->dtbclk_mhz; + + if (max_clk_limit->dispclk_mhz != 0) + curr_clk_limit->dispclk_mhz = max_clk_limit->dispclk_mhz; + + return 0; +} + +static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk_bw_params *bw_params, struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries) { int i, j; struct _vcs_dpi_voltage_scaling_st entry = {0}; - - unsigned int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, - max_phyclk_mhz = 0, max_dtbclk_mhz = 0, max_fclk_mhz = 0, max_uclk_mhz = 0; + struct clk_limit_table_entry max_clk_data = {0}; unsigned int min_dcfclk_mhz = 199, min_fclk_mhz = 299; @@ -270,53 +304,78 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params, unsigned int num_fclk_dpms = 0; unsigned int num_dcfclk_dpms = 0; + unsigned int num_dc_uclk_dpms = 0; + unsigned int num_dc_fclk_dpms = 0; + unsigned int num_dc_dcfclk_dpms = 0; + for (i = 0; i < MAX_NUM_DPM_LVL; i++) { - if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz) - max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz; - if (bw_params->clk_table.entries[i].fclk_mhz > max_fclk_mhz) - max_fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz; - if (bw_params->clk_table.entries[i].memclk_mhz > max_uclk_mhz) - max_uclk_mhz = bw_params->clk_table.entries[i].memclk_mhz; - if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz) - max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz; - if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz) - max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz; - if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz) - max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz; - if (bw_params->clk_table.entries[i].dtbclk_mhz > max_dtbclk_mhz) - max_dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz; - - if (bw_params->clk_table.entries[i].memclk_mhz > 0) + if (bw_params->clk_table.entries[i].dcfclk_mhz > max_clk_data.dcfclk_mhz) + max_clk_data.dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz; + if (bw_params->clk_table.entries[i].fclk_mhz > max_clk_data.fclk_mhz) + max_clk_data.fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz; + if (bw_params->clk_table.entries[i].memclk_mhz > max_clk_data.memclk_mhz) + max_clk_data.memclk_mhz = bw_params->clk_table.entries[i].memclk_mhz; + if (bw_params->clk_table.entries[i].dispclk_mhz > max_clk_data.dispclk_mhz) + max_clk_data.dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz; + if (bw_params->clk_table.entries[i].dppclk_mhz > max_clk_data.dppclk_mhz) + max_clk_data.dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz; + if (bw_params->clk_table.entries[i].phyclk_mhz > max_clk_data.phyclk_mhz) + max_clk_data.phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz; + if (bw_params->clk_table.entries[i].dtbclk_mhz > max_clk_data.dtbclk_mhz) + max_clk_data.dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz; + + if (bw_params->clk_table.entries[i].memclk_mhz > 0) { num_uclk_dpms++; - if (bw_params->clk_table.entries[i].fclk_mhz > 0) + if (bw_params->clk_table.entries[i].memclk_mhz <= bw_params->dc_mode_limit.memclk_mhz) + num_dc_uclk_dpms++; + } + if (bw_params->clk_table.entries[i].fclk_mhz > 0) { num_fclk_dpms++; - if (bw_params->clk_table.entries[i].dcfclk_mhz > 0) + if (bw_params->clk_table.entries[i].fclk_mhz <= bw_params->dc_mode_limit.fclk_mhz) + num_dc_fclk_dpms++; + } + if (bw_params->clk_table.entries[i].dcfclk_mhz > 0) { num_dcfclk_dpms++; + if (bw_params->clk_table.entries[i].dcfclk_mhz <= bw_params->dc_mode_limit.dcfclk_mhz) + num_dc_dcfclk_dpms++; + } + } + + if (!disable_dc_mode_overwrite) { + //Overwrite max frequencies with max DC mode frequencies for DC mode systems + override_max_clk_values(&bw_params->dc_mode_limit, &max_clk_data); + num_uclk_dpms = num_dc_uclk_dpms; + num_fclk_dpms = num_dc_fclk_dpms; + num_dcfclk_dpms = num_dc_dcfclk_dpms; + bw_params->clk_table.num_entries_per_clk.num_memclk_levels = num_uclk_dpms; + bw_params->clk_table.num_entries_per_clk.num_fclk_levels = num_fclk_dpms; } if (num_dcfclk_dpms > 0 && bw_params->clk_table.entries[0].fclk_mhz > min_fclk_mhz) min_fclk_mhz = bw_params->clk_table.entries[0].fclk_mhz; - if (!max_dcfclk_mhz || !max_dispclk_mhz || !max_dtbclk_mhz) + if (!max_clk_data.dcfclk_mhz || !max_clk_data.dispclk_mhz || !max_clk_data.dtbclk_mhz) return -1; - if (max_dppclk_mhz == 0) - max_dppclk_mhz = max_dispclk_mhz; + if (max_clk_data.dppclk_mhz == 0) + max_clk_data.dppclk_mhz = max_clk_data.dispclk_mhz; - if (max_fclk_mhz == 0) - max_fclk_mhz = max_dcfclk_mhz * dcn3_21_soc.pct_ideal_sdp_bw_after_urgent / dcn3_21_soc.pct_ideal_fabric_bw_after_urgent; + if (max_clk_data.fclk_mhz == 0) + max_clk_data.fclk_mhz = max_clk_data.dcfclk_mhz * + dcn3_2_soc.pct_ideal_sdp_bw_after_urgent / + dcn3_2_soc.pct_ideal_fabric_bw_after_urgent; - if (max_phyclk_mhz == 0) - max_phyclk_mhz = dcn3_21_soc.clock_limits[0].phyclk_mhz; + if (max_clk_data.phyclk_mhz == 0) + max_clk_data.phyclk_mhz = dcn3_2_soc.clock_limits[0].phyclk_mhz; *num_entries = 0; - entry.dispclk_mhz = max_dispclk_mhz; - entry.dscclk_mhz = max_dispclk_mhz / 3; - entry.dppclk_mhz = max_dppclk_mhz; - entry.dtbclk_mhz = max_dtbclk_mhz; - entry.phyclk_mhz = max_phyclk_mhz; - entry.phyclk_d18_mhz = dcn3_21_soc.clock_limits[0].phyclk_d18_mhz; - entry.phyclk_d32_mhz = dcn3_21_soc.clock_limits[0].phyclk_d32_mhz; + entry.dispclk_mhz = max_clk_data.dispclk_mhz; + entry.dscclk_mhz = max_clk_data.dispclk_mhz / 3; + entry.dppclk_mhz = max_clk_data.dppclk_mhz; + entry.dtbclk_mhz = max_clk_data.dtbclk_mhz; + entry.phyclk_mhz = max_clk_data.phyclk_mhz; + entry.phyclk_d18_mhz = dcn3_2_soc.clock_limits[0].phyclk_d18_mhz; + entry.phyclk_d32_mhz = dcn3_2_soc.clock_limits[0].phyclk_d32_mhz; // Insert all the DCFCLK STAs for (i = 0; i < num_dcfclk_stas; i++) { @@ -328,7 +387,7 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params, } // Insert the max DCFCLK - entry.dcfclk_mhz = max_dcfclk_mhz; + entry.dcfclk_mhz = max_clk_data.dcfclk_mhz; entry.fabricclk_mhz = 0; entry.dram_speed_mts = 0; @@ -356,7 +415,7 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params, // If FCLK fine grained, only insert max else { entry.dcfclk_mhz = 0; - entry.fabricclk_mhz = max_fclk_mhz; + entry.fabricclk_mhz = max_clk_data.fclk_mhz; entry.dram_speed_mts = 0; dcn321_insert_entry_into_table_sorted(table, num_entries, &entry); @@ -368,9 +427,9 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params, // Remove states that require higher clocks than are supported for (i = *num_entries - 1; i >= 0 ; i--) { - if (table[i].dcfclk_mhz > max_dcfclk_mhz || - table[i].fabricclk_mhz > max_fclk_mhz || - table[i].dram_speed_mts > max_uclk_mhz * 16) + if (table[i].dcfclk_mhz > max_clk_data.dcfclk_mhz || + table[i].fabricclk_mhz > max_clk_data.fclk_mhz || + table[i].dram_speed_mts > max_clk_data.memclk_mhz * 16) remove_entry_from_table_at_index(table, num_entries, i); } @@ -689,7 +748,8 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p dcn3_21_soc.clock_limits[i].phyclk_d32_mhz = dcn3_21_soc.clock_limits[0].phyclk_d32_mhz; } } else { - build_synthetic_soc_states(bw_params, dcn3_21_soc.clock_limits, &dcn3_21_soc.num_states); + build_synthetic_soc_states(dc->debug.disable_dc_mode_overwrite, bw_params, + dcn3_21_soc.clock_limits, &dcn3_21_soc.num_states); } /* Re-init DML with updated bb */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index bef843cc32a1..6faf40fa5c69 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -233,6 +233,7 @@ struct clk_bw_params { struct clk_limit_table clk_table; struct wm_table wm_table; struct dummy_pstate_entry dummy_pstate_table[4]; + struct clk_limit_table_entry dc_mode_limit; }; /* Public interfaces */ -- cgit