diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2023-08-04 11:10:18 +0200 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2023-08-04 11:10:18 +0200 |
commit | 3d00c59d147724e536b415e389445ece6fcda42f (patch) | |
tree | b7abf11faad68372dfd889eb644a825f4cdb19a2 /drivers/gpu/drm/amd/display/dc | |
parent | 52920704df878050123dfeb469aa6ab8022547c1 (diff) | |
parent | 7ea1db28119e237d634c6f74ba52056939c009ad (diff) |
Merge tag 'amd-drm-next-6.6-2023-07-28' of https://gitlab.freedesktop.org/agd5f/linux into drm-next
amd-drm-next-6.6-2023-07-28:
amdgpu:
- Lots of checkpatch cleanups
- GFX 9.4.3 updates
- Add USB PD and IFWI flashing documentation
- GPUVM updates
- RAS fixes
- DRR fixes
- FAMS fixes
- Virtual display fixes
- Soft IH fixes
- SMU13 fixes
- Rework PSP firmware loading for other IPs
- Kernel doc fixes
- DCN 3.0.1 fixes
- LTTPR fixes
- DP MST fixes
- DCN 3.1.6 fixes
- SubVP fixes
- Display bandwidth calculation fixes
- VCN4 secure submission fixes
- Allow building DC on RISC-V
- Add visible FB info to bo_print_info
- HBR3 fixes
- Add PSP 14.0 support
- GFX9 MCBP fix
- GMC10 vmhub index fix
- GMC11 vmhub index fix
- Create a new doorbell manager
- SR-IOV fixes
amdkfd:
- Cleanup CRIU dma-buf handling
- Use KIQ to unmap HIQ
- GFX 9.4.3 debugger updates
- GFX 9.4.2 debugger fixes
- Enable cooperative groups fof gfx11
- SVM fixes
radeon:
- Lots of checkpatch cleanups
Merge conflicts:
- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
The switch to drm eu helpers in 8a206685d36f ("drm/amdgpu: use
drm_exec for GEM and CSA handling v2") clashed with the
cosmetic cleanups from 30953c4d000b ("drm/amdgpu: Fix style
issues in amdgpu_gem.c"). I
kept the former since the cleanup up code is gone.
- drivers/gpu/drm/amd/amdgpu/atom.c.
adf64e214280 ("drm/amd: Avoid reading the VBIOS part number
twice") removed code that 992b8fe106ab ("drm/radeon: Replace
all non-returning strlcpy with strscpy") polished.
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230728214228.8102-1-alexander.deucher@amd.com
[sima: some merge conflict wrangling as noted]
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'drivers/gpu/drm/amd/display/dc')
75 files changed, 1516 insertions, 509 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c index 352e9afb85c6..e295a839ab47 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/conversion.c +++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c @@ -24,7 +24,7 @@ */ #include "dm_services.h" -#include "conversion.h" +#include "basics/conversion.h" #define DIVIDER 10000 diff --git a/drivers/gpu/drm/amd/display/dc/basics/vector.c b/drivers/gpu/drm/amd/display/dc/basics/vector.c index 84aeccf36b4b..6d2924114a3e 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/vector.c +++ b/drivers/gpu/drm/amd/display/dc/basics/vector.c @@ -50,12 +50,11 @@ bool dal_vector_construct( return true; } -static bool dal_vector_presized_costruct( - struct vector *vector, - struct dc_context *ctx, - uint32_t count, - void *initial_value, - uint32_t struct_size) +static bool dal_vector_presized_costruct(struct vector *vector, + struct dc_context *ctx, + uint32_t count, + void *initial_value, + uint32_t struct_size) { uint32_t i; diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index 27af9d3c2b73..4f005ae1516c 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -2593,11 +2593,10 @@ static struct integrated_info *bios_parser_create_integrated_info( return NULL; } -static enum bp_result update_slot_layout_info( - struct dc_bios *dcb, - unsigned int i, - struct slot_layout_info *slot_layout_info, - unsigned int record_offset) +static enum bp_result update_slot_layout_info(struct dc_bios *dcb, + unsigned int i, + struct slot_layout_info *slot_layout_info, + unsigned int record_offset) { unsigned int j; struct bios_parser *bp; @@ -2696,10 +2695,9 @@ static enum bp_result update_slot_layout_info( } -static enum bp_result get_bracket_layout_record( - struct dc_bios *dcb, - unsigned int bracket_layout_id, - struct slot_layout_info *slot_layout_info) +static enum bp_result get_bracket_layout_record(struct dc_bios *dcb, + unsigned int bracket_layout_id, + struct slot_layout_info *slot_layout_info) { unsigned int i; unsigned int record_offset; diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index cce47d3f1a13..540d19efad8f 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -340,9 +340,8 @@ static struct atom_display_object_path_v2 *get_bios_object( } /* from graphics_object_id, find display path which includes the object_id */ -static struct atom_display_object_path_v3 *get_bios_object_from_path_v3( - struct bios_parser *bp, - struct graphics_object_id id) +static struct atom_display_object_path_v3 *get_bios_object_from_path_v3(struct bios_parser *bp, + struct graphics_object_id id) { unsigned int i; struct graphics_object_id obj_id = {0}; @@ -521,9 +520,8 @@ static enum bp_result get_gpio_i2c_info( return BP_RESULT_OK; } -static struct atom_hpd_int_record *get_hpd_record_for_path_v3( - struct bios_parser *bp, - struct atom_display_object_path_v3 *object) +static struct atom_hpd_int_record *get_hpd_record_for_path_v3(struct bios_parser *bp, + struct atom_display_object_path_v3 *object) { struct atom_common_record_header *header; uint32_t offset; @@ -2175,9 +2173,8 @@ static struct atom_disp_connector_caps_record *get_disp_connector_caps_record( return NULL; } -static struct atom_connector_caps_record *get_connector_caps_record( - struct bios_parser *bp, - struct atom_display_object_path_v3 *object) +static struct atom_connector_caps_record *get_connector_caps_record(struct bios_parser *bp, + struct atom_display_object_path_v3 *object) { struct atom_common_record_header *header; uint32_t offset; @@ -2264,9 +2261,8 @@ static enum bp_result bios_parser_get_disp_connector_caps_info( return BP_RESULT_OK; } -static struct atom_connector_speed_record *get_connector_speed_cap_record( - struct bios_parser *bp, - struct atom_display_object_path_v3 *object) +static struct atom_connector_speed_record *get_connector_speed_cap_record(struct bios_parser *bp, + struct atom_display_object_path_v3 *object) { struct atom_common_record_header *header; uint32_t offset; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c index 7ccd96959256..3db4ef564b99 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c @@ -87,6 +87,11 @@ static int dcn31_get_active_display_cnt_wa( stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK || stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) tmds_present = true; + + /* Checking stream / link detection ensuring that PHY is active*/ + if (dc_is_dp_signal(stream->signal) && !stream->dpms_off) + display_count++; + } for (i = 0; i < dc->link_count; i++) { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c index 2f7c8996b19d..3ba2e13d691d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c @@ -87,6 +87,14 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0, #define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L #define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L +#define regCLK1_CLK2_BYPASS_CNTL 0x029c +#define regCLK1_CLK2_BYPASS_CNTL_BASE_IDX 0 + +#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL__SHIFT 0x0 +#define CLK1_CLK2_BYPASS_CNTL__LK2_BYPASS_DIV__SHIFT 0x10 +#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L +#define CLK1_CLK2_BYPASS_CNTL__LK2_BYPASS_DIV_MASK 0x000F0000L + #define REG(reg_name) \ (CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name) @@ -436,6 +444,11 @@ static DpmClocks314_t dummy_clocks; static struct dcn314_watermarks dummy_wms = { 0 }; +static struct dcn314_ss_info_table ss_info_table = { + .ss_divider = 1000, + .ss_percentage = {0, 0, 375, 375, 375} +}; + static void dcn314_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn314_watermarks *table) { int i, num_valid_sets; @@ -715,6 +728,20 @@ static struct clk_mgr_funcs dcn314_funcs = { }; extern struct clk_mgr_funcs dcn3_fpga_funcs; +static void dcn314_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr) +{ + uint32_t clock_source; + + REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source); + + clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source]; + + if (clk_mgr->dprefclk_ss_percentage != 0) { + clk_mgr->ss_on_dprefclk = true; + clk_mgr->dprefclk_ss_divider = ss_info_table.ss_divider; + } +} + void dcn314_clk_mgr_construct( struct dc_context *ctx, struct clk_mgr_dcn314 *clk_mgr, @@ -781,9 +808,11 @@ void dcn314_clk_mgr_construct( clk_mgr->base.base.dprefclk_khz = 600000; clk_mgr->base.base.clks.ref_dtbclk_khz = 600000; - dce_clock_read_ss_info(&clk_mgr->base); + + dcn314_read_ss_info_from_lut(&clk_mgr->base); /*if bios enabled SS, driver needs to adjust dtb clock, only enable with correct bios*/ - //clk_mgr->base.dccg->ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(clk_mgr_internal, clk_mgr->base.base.dprefclk_khz); + clk_mgr->base.base.dprefclk_khz = + dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz); clk_mgr->base.base.bw_params = &dcn314_bw_params; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h index 171f84340eb2..e0670dafe260 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h @@ -28,6 +28,8 @@ #define __DCN314_CLK_MGR_H__ #include "clk_mgr_internal.h" +#define NUM_CLOCK_SOURCES 5 + struct dcn314_watermarks; struct dcn314_smu_watermark_set { @@ -40,6 +42,11 @@ struct clk_mgr_dcn314 { struct dcn314_smu_watermark_set smu_wm_set; }; +struct dcn314_ss_info_table { + uint32_t ss_divider; + uint32_t ss_percentage[NUM_CLOCK_SOURCES]; +}; + bool dcn314_are_clock_states_equal(struct dc_clocks *a, struct dc_clocks *b); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c index d7de756301cf..0349631991b8 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c @@ -55,14 +55,6 @@ struct IP_BASE struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; }; -static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0, 0 } }, - { { 0x00016E00, 0x02401C00, 0, 0, 0, 0 } }, - { { 0x00017000, 0x02402000, 0, 0, 0, 0 } }, - { { 0x00017200, 0x02402400, 0, 0, 0, 0 } }, - { { 0x0001B000, 0x0242D800, 0, 0, 0, 0 } }, - { { 0x0001B200, 0x0242DC00, 0, 0, 0, 0 } }, - { { 0x0001B400, 0x0242E000, 0, 0, 0, 0 } } } }; - #define regCLK1_CLK_PLL_REQ 0x0237 #define regCLK1_CLK_PLL_REQ_BASE_IDX 0 @@ -73,9 +65,6 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0, #define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L #define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L -#define REG(reg_name) \ - (CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name) - #define TO_CLK_MGR_DCN316(clk_mgr)\ container_of(clk_mgr, struct clk_mgr_dcn316, base) @@ -577,36 +566,6 @@ static struct clk_mgr_funcs dcn316_funcs = { }; extern struct clk_mgr_funcs dcn3_fpga_funcs; -static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr) -{ - /* get FbMult value */ - struct fixed31_32 pll_req; - unsigned int fbmult_frac_val = 0; - unsigned int fbmult_int_val = 0; - - /* - * Register value of fbmult is in 8.16 format, we are converting to 31.32 - * to leverage the fix point operations available in driver - */ - - REG_GET(CLK1_CLK_PLL_REQ, FbMult_frac, &fbmult_frac_val); /* 16 bit fractional part*/ - REG_GET(CLK1_CLK_PLL_REQ, FbMult_int, &fbmult_int_val); /* 8 bit integer part */ - - pll_req = dc_fixpt_from_int(fbmult_int_val); - - /* - * since fractional part is only 16 bit in register definition but is 32 bit - * in our fix point definiton, need to shift left by 16 to obtain correct value - */ - pll_req.value |= fbmult_frac_val << 16; - - /* multiply by REFCLK period */ - pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz); - - /* integer part is now VCO frequency in kHz */ - return dc_fixpt_floor(pll_req); -} - void dcn316_clk_mgr_construct( struct dc_context *ctx, struct clk_mgr_dcn316 *clk_mgr, @@ -660,7 +619,8 @@ void dcn316_clk_mgr_construct( clk_mgr->base.smu_present = true; // Skip this for now as it did not work on DCN315, renable during bring up - clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); + //clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); + clk_mgr->base.base.dentist_vco_freq_khz = 2500000; /* in case we don't get a value from the register, use default */ if (clk_mgr->base.base.dentist_vco_freq_khz == 0) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index cb992aca760d..0701d03b88a9 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -297,7 +297,7 @@ void dcn32_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr, clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz; for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { - int dpp_inst, dppclk_khz, prev_dppclk_khz; + int dpp_inst = 0, dppclk_khz, prev_dppclk_khz; dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index d133e4186a52..7cac14f493f6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1047,8 +1047,10 @@ static void disable_all_writeback_pipes_for_stream( stream->writeback_info[i].wb_enabled = false; } -static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context, - struct dc_stream_state *stream, bool lock) +static void apply_ctx_interdependent_lock(struct dc *dc, + struct dc_state *context, + struct dc_stream_state *stream, + bool lock) { int i; @@ -3582,9 +3584,9 @@ static void commit_planes_for_stream_fast(struct dc *dc, context->block_sequence_steps); /* Clear update flags so next flip doesn't have redundant programming * (if there's no stream update, the update flags are not cleared). + * Surface updates are cleared unconditionally at the beginning of each flip, + * so no need to clear here. */ - if (top_pipe_to_program->plane_state) - top_pipe_to_program->plane_state->update_flags.raw = 0; if (top_pipe_to_program->stream) top_pipe_to_program->stream->update_flags.raw = 0; } @@ -4088,9 +4090,9 @@ static bool commit_minimal_transition_state(struct dc *dc, struct dc_state *transition_base_context) { struct dc_state *transition_context = dc_create_state(dc); - enum pipe_split_policy tmp_mpc_policy; - bool temp_dynamic_odm_policy; - bool temp_subvp_policy; + enum pipe_split_policy tmp_mpc_policy = 0; + bool temp_dynamic_odm_policy = 0; + bool temp_subvp_policy = 0; enum dc_status ret = DC_ERROR_UNEXPECTED; unsigned int i, j; unsigned int pipe_in_use = 0; @@ -4284,7 +4286,8 @@ static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_c return false; } -static bool full_update_required(struct dc_surface_update *srf_updates, +static bool full_update_required(struct dc *dc, + struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_update *stream_update, struct dc_stream_state *stream) @@ -4292,6 +4295,7 @@ static bool full_update_required(struct dc_surface_update *srf_updates, int i; struct dc_stream_status *stream_status; + const struct dc_state *context = dc->current_state; for (i = 0; i < surface_count; i++) { if (srf_updates && @@ -4302,7 +4306,11 @@ static bool full_update_required(struct dc_surface_update *srf_updates, srf_updates[i].in_transfer_func || srf_updates[i].func_shaper || srf_updates[i].lut3d_func || - srf_updates[i].blend_tf)) + srf_updates[i].blend_tf || + srf_updates[i].surface->force_full_update || + (srf_updates[i].flip_addr && + srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) || + !is_surface_in_context(context, srf_updates[i].surface))) return true; } @@ -4340,18 +4348,21 @@ static bool full_update_required(struct dc_surface_update *srf_updates, if (stream_status == NULL || stream_status->plane_count != surface_count) return true; } + if (dc->idle_optimizations_allowed) + return true; return false; } -static bool fast_update_only(struct dc_fast_update *fast_update, +static bool fast_update_only(struct dc *dc, + struct dc_fast_update *fast_update, struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_update *stream_update, struct dc_stream_state *stream) { return fast_updates_exist(fast_update, surface_count) - && !full_update_required(srf_updates, surface_count, stream_update, stream); + && !full_update_required(dc, srf_updates, surface_count, stream_update, stream); } bool dc_update_planes_and_stream(struct dc *dc, @@ -4369,8 +4380,8 @@ bool dc_update_planes_and_stream(struct dc *dc, * cause underflow. Apply stream configuration with minimal pipe * split first to avoid unsupported transitions for active pipes. */ - bool force_minimal_pipe_splitting; - bool is_plane_addition; + bool force_minimal_pipe_splitting = 0; + bool is_plane_addition = 0; populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes( @@ -4423,7 +4434,7 @@ bool dc_update_planes_and_stream(struct dc *dc, } update_seamless_boot_flags(dc, context, surface_count, stream); - if (fast_update_only(fast_update, srf_updates, surface_count, stream_update, stream) && + if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) && !dc->debug.enable_legacy_fast_update) { commit_planes_for_stream_fast(dc, srf_updates, @@ -4569,7 +4580,7 @@ void dc_commit_updates_for_stream(struct dc *dc, TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES); update_seamless_boot_flags(dc, context, surface_count, stream); - if (fast_update_only(fast_update, srf_updates, surface_count, stream_update, stream) && + if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) && !dc->debug.enable_legacy_fast_update) { commit_planes_for_stream_fast(dc, srf_updates, @@ -5273,3 +5284,56 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause) pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst); } + +/***************************************************************************** + * dc_abm_save_restore() - Interface to DC for save+pause and restore+un-pause + * ABM + * @dc: dc structure + * @stream: stream where vsync int state changed + * @pData: abm hw states + * + ****************************************************************************/ +bool dc_abm_save_restore( + struct dc *dc, + struct dc_stream_state *stream, + struct abm_save_restore *pData) +{ + int i; + int edp_num; + struct pipe_ctx *pipe = NULL; + struct dc_link *link = stream->sink->link; + struct dc_link *edp_links[MAX_NUM_EDP]; + + + /*find primary pipe associated with stream*/ + for (i = 0; i < MAX_PIPES; i++) { + pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (pipe->stream == stream && pipe->stream_res.tg) + break; + } + + if (i == MAX_PIPES) { + ASSERT(0); + return false; + } + + dc_get_edp_links(dc, edp_links, &edp_num); + + /* Determine panel inst */ + for (i = 0; i < edp_num; i++) + if (edp_links[i] == link) + break; + + if (i == edp_num) + return false; + + if (pipe->stream_res.abm && + pipe->stream_res.abm->funcs->save_restore) + return pipe->stream_res.abm->funcs->save_restore( + pipe->stream_res.abm, + i, + pData); + return false; +} + diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index cb2bf9a466f5..f99ec1b0efaf 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -187,6 +187,7 @@ static bool is_ycbcr709_limited_type( ret = true; return ret; } + static enum dc_color_space_type get_color_space_type(enum dc_color_space color_space) { enum dc_color_space_type type = COLOR_SPACE_RGB_TYPE; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c index 18e098568cb4..0d19d4cd1916 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c @@ -314,6 +314,24 @@ const struct dc_link_settings *dc_link_get_link_cap(const struct dc_link *link) return link->dc->link_srv->dp_get_verified_link_cap(link); } +enum dc_link_encoding_format dc_link_get_highest_encoding_format(const struct dc_link *link) +{ + if (dc_is_dp_signal(link->connector_signal)) { + if (link->dpcd_caps.dongle_type >= DISPLAY_DONGLE_DP_DVI_DONGLE && + link->dpcd_caps.dongle_type <= DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE) + return DC_LINK_ENCODING_HDMI_TMDS; + else if (link->dc->link_srv->dp_get_encoding_format(&link->verified_link_cap) == + DP_8b_10b_ENCODING) + return DC_LINK_ENCODING_DP_8b_10b; + else if (link->dc->link_srv->dp_get_encoding_format(&link->verified_link_cap) == + DP_128b_132b_ENCODING) + return DC_LINK_ENCODING_DP_128b_132b; + } else if (dc_is_hdmi_signal(link->connector_signal)) { + } + + return DC_LINK_ENCODING_UNSPECIFIED; +} + bool dc_link_is_dp_sink_present(struct dc_link *link) { return link->dc->link_srv->dp_is_sink_present(link); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 2f3d9a698486..d0f4b86cadf1 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -69,9 +69,16 @@ #include "../dcn32/dcn32_resource.h" #include "../dcn321/dcn321_resource.h" +#define VISUAL_CONFIRM_BASE_DEFAULT 3 +#define VISUAL_CONFIRM_BASE_MIN 1 +#define VISUAL_CONFIRM_BASE_MAX 10 +#define VISUAL_CONFIRM_DPP_OFFSET 3 #define DC_LOGGER_INIT(logger) +#define HEAD_NOT_IN_ODM -2 +#define UNABLE_TO_SPLIT -1 + enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id) { enum dce_version dc_version = DCE_VERSION_UNKNOWN; @@ -740,7 +747,12 @@ int get_num_mpc_splits(struct pipe_ctx *pipe) int get_num_odm_splits(struct pipe_ctx *pipe) { int odm_split_count = 0; - struct pipe_ctx *next_pipe = pipe->next_odm_pipe; + struct pipe_ctx *next_pipe = NULL; + + while (pipe->top_pipe) + pipe = pipe->top_pipe; + + next_pipe = pipe->next_odm_pipe; while (next_pipe) { odm_split_count++; next_pipe = next_pipe->next_odm_pipe; @@ -753,32 +765,35 @@ int get_num_odm_splits(struct pipe_ctx *pipe) return odm_split_count; } -static void calculate_split_count_and_index(struct pipe_ctx *pipe_ctx, int *split_count, int *split_idx) +static int get_odm_split_index(struct pipe_ctx *pipe_ctx) { - *split_count = get_num_odm_splits(pipe_ctx); - *split_idx = 0; - if (*split_count == 0) { - /*Check for mpc split*/ - struct pipe_ctx *split_pipe = pipe_ctx->top_pipe; - - *split_count = get_num_mpc_splits(pipe_ctx); - while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) { - (*split_idx)++; - split_pipe = split_pipe->top_pipe; - } + struct pipe_ctx *split_pipe = NULL; + int index = 0; - /* MPO window on right side of ODM split */ - if (split_pipe && split_pipe->prev_odm_pipe && !pipe_ctx->prev_odm_pipe) - (*split_idx)++; - } else { - /*Get odm split index*/ - struct pipe_ctx *split_pipe = pipe_ctx->prev_odm_pipe; + while (pipe_ctx->top_pipe) + pipe_ctx = pipe_ctx->top_pipe; - while (split_pipe) { - (*split_idx)++; - split_pipe = split_pipe->prev_odm_pipe; - } + split_pipe = pipe_ctx->prev_odm_pipe; + + while (split_pipe) { + index++; + split_pipe = split_pipe->prev_odm_pipe; } + + return index; +} + +static int get_mpc_split_index(struct pipe_ctx *pipe_ctx) +{ + struct pipe_ctx *split_pipe = pipe_ctx->top_pipe; + int index = 0; + + while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) { + index++; + split_pipe = split_pipe->top_pipe; + } + + return index; } /* @@ -800,82 +815,357 @@ static void calculate_viewport_size(struct pipe_ctx *pipe_ctx) } } -static void calculate_recout(struct pipe_ctx *pipe_ctx) +static struct rect intersect_rec(const struct rect *r0, const struct rect *r1) { - const struct dc_plane_state *plane_state = pipe_ctx->plane_state; - const struct dc_stream_state *stream = pipe_ctx->stream; - struct scaler_data *data = &pipe_ctx->plane_res.scl_data; - struct rect surf_clip = plane_state->clip_rect; - bool split_tb = stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM; - int split_count, split_idx; + struct rect rec; + int r0_x_end = r0->x + r0->width; + int r1_x_end = r1->x + r1->width; + int r0_y_end = r0->y + r0->height; + int r1_y_end = r1->y + r1->height; + + rec.x = r0->x > r1->x ? r0->x : r1->x; + rec.width = r0_x_end > r1_x_end ? r1_x_end - rec.x : r0_x_end - rec.x; + rec.y = r0->y > r1->y ? r0->y : r1->y; + rec.height = r0_y_end > r1_y_end ? r1_y_end - rec.y : r0_y_end - rec.y; + + /* in case that there is no intersection */ + if (rec.width < 0 || rec.height < 0) + memset(&rec, 0, sizeof(rec)); + + return rec; +} - calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx); - if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE) - split_idx = 0; +static struct rect shift_rec(const struct rect *rec_in, int x, int y) +{ + struct rect rec_out = *rec_in; + + rec_out.x += x; + rec_out.y += y; + + return rec_out; +} + +static struct rect calculate_odm_slice_in_timing_active(struct pipe_ctx *pipe_ctx) +{ + const struct dc_stream_state *stream = pipe_ctx->stream; + int odm_slice_count = get_num_odm_splits(pipe_ctx) + 1; + int odm_slice_idx = get_odm_split_index(pipe_ctx); + bool is_last_odm_slice = (odm_slice_idx + 1) == odm_slice_count; + int h_active = stream->timing.h_addressable + + stream->timing.h_border_left + + stream->timing.h_border_right; + int odm_slice_width = h_active / odm_slice_count; + struct rect odm_rec; + + odm_rec.x = odm_slice_width * odm_slice_idx; + odm_rec.width = is_last_odm_slice ? + /* last slice width is the reminder of h_active */ + h_active - odm_slice_width * (odm_slice_count - 1) : + /* odm slice width is the floor of h_active / count */ + odm_slice_width; + odm_rec.y = 0; + odm_rec.height = stream->timing.v_addressable + + stream->timing.v_border_bottom + + stream->timing.v_border_top; + + return odm_rec; +} +static struct rect calculate_plane_rec_in_timing_active( + struct pipe_ctx *pipe_ctx, + const struct rect *rec_in) +{ /* - * Only the leftmost ODM pipe should be offset by a nonzero distance + * The following diagram shows an example where we map a 1920x1200 + * desktop to a 2560x1440 timing with a plane rect in the middle + * of the screen. To map a plane rect from Stream Source to Timing + * Active space, we first multiply stream scaling ratios (i.e 2304/1920 + * horizontal and 1440/1200 vertical) to the plane's x and y, then + * we add stream destination offsets (i.e 128 horizontal, 0 vertical). + * This will give us a plane rect's position in Timing Active. However + * we have to remove the fractional. The rule is that we find left/right + * and top/bottom positions and round the value to the adjacent integer. + * + * Stream Source Space + * ------------ + * __________________________________________________ + * |Stream Source (1920 x 1200) ^ | + * | y | + * | <------- w --------|> | + * | __________________V | + * |<-- x -->|Plane//////////////| ^ | + * | |(pre scale)////////| | | + * | |///////////////////| | | + * | |///////////////////| h | + * | |///////////////////| | | + * | |///////////////////| | | + * | |///////////////////| V | + * | | + * | | + * |__________________________________________________| + * + * + * Timing Active Space + * --------------------------------- + * + * Timing Active (2560 x 1440) + * __________________________________________________ + * |*****| Stteam Destination (2304 x 1440) |*****| + * |*****| |*****| + * |<128>| |*****| + * |*****| __________________ |*****| + * |*****| |Plane/////////////| |*****| + * |*****| |(post scale)//////| |*****| + * |*****| |//////////////////| |*****| + * |*****| |//////////////////| |*****| + * |*****| |//////////////////| |*****| + * |*****| |//////////////////| |*****| + * |*****| |*****| + * |*****| |*****| + * |*****| |*****| + * |*****|______________________________________|*****| + * + * So the resulting formulas are shown below: + * + * recout_x = 128 + round(plane_x * 2304 / 1920) + * recout_w = 128 + round((plane_x + plane_w) * 2304 / 1920) - recout_x + * recout_y = 0 + round(plane_y * 1440 / 1280) + * recout_h = 0 + round((plane_y + plane_h) * 1440 / 1200) - recout_y + * + * NOTE: fixed point division is not error free. To reduce errors + * introduced by fixed point division, we divide only after + * multiplication is complete. */ - if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->prev_odm_pipe && !pipe_ctx->prev_odm_pipe) { - /* MPO window on right side of ODM split */ - data->recout.x = stream->dst.x + (surf_clip.x - stream->src.x - stream->src.width/2) * - stream->dst.width / stream->src.width; - } else if (!pipe_ctx->prev_odm_pipe || split_idx == split_count) { - data->recout.x = stream->dst.x; - if (stream->src.x < surf_clip.x) - data->recout.x += (surf_clip.x - stream->src.x) * stream->dst.width - / stream->src.width; - } else - data->recout.x = 0; - - if (stream->src.x > surf_clip.x) - surf_clip.width -= stream->src.x - surf_clip.x; - data->recout.width = surf_clip.width * stream->dst.width / stream->src.width; - if (data->recout.width + data->recout.x > stream->dst.x + stream->dst.width) - data->recout.width = stream->dst.x + stream->dst.width - data->recout.x; - - data->recout.y = stream->dst.y; - if (stream->src.y < surf_clip.y) - data->recout.y += (surf_clip.y - stream->src.y) * stream->dst.height - / stream->src.height; - else if (stream->src.y > surf_clip.y) - surf_clip.height -= stream->src.y - surf_clip.y; - - data->recout.height = surf_clip.height * stream->dst.height / stream->src.height; - if (data->recout.height + data->recout.y > stream->dst.y + stream->dst.height) - data->recout.height = stream->dst.y + stream->dst.height - data->recout.y; - - /* Handle h & v split */ - if (split_tb) { - ASSERT(data->recout.height % 2 == 0); - data->recout.height /= 2; - } else if (split_count) { - if (!pipe_ctx->next_odm_pipe && !pipe_ctx->prev_odm_pipe) { - /* extra pixels in the division remainder need to go to pipes after - * the extra pixel index minus one(epimo) defined here as: - */ - int epimo = split_count - data->recout.width % (split_count + 1); + const struct dc_stream_state *stream = pipe_ctx->stream; + struct rect rec_out = {0}; + struct fixed31_32 temp; - data->recout.x += (data->recout.width / (split_count + 1)) * split_idx; - if (split_idx > epimo) - data->recout.x += split_idx - epimo - 1; - ASSERT(stream->view_format != VIEW_3D_FORMAT_SIDE_BY_SIDE || data->recout.width % 2 == 0); - data->recout.width = data->recout.width / (split_count + 1) + (split_idx > epimo ? 1 : 0); - } else { - /* odm */ - if (split_idx == split_count) { - /* rightmost pipe is the remainder recout */ - data->recout.width -= data->h_active * split_count - data->recout.x; - - /* ODM combine cases with MPO we can get negative widths */ - if (data->recout.width < 0) - data->recout.width = 0; - - data->recout.x = 0; - } else - data->recout.width = data->h_active - data->recout.x; - } + temp = dc_fixpt_from_fraction(rec_in->x * stream->dst.width, + stream->src.width); + rec_out.x = stream->dst.x + dc_fixpt_round(temp); + + temp = dc_fixpt_from_fraction( + (rec_in->x + rec_in->width) * stream->dst.width, + stream->src.width); + rec_out.width = stream->dst.x + dc_fixpt_round(temp) - rec_out.x; + + temp = dc_fixpt_from_fraction(rec_in->y * stream->dst.height, + stream->src.height); + rec_out.y = stream->dst.y + dc_fixpt_round(temp); + + temp = dc_fixpt_from_fraction( + (rec_in->y + rec_in->height) * stream->dst.height, + stream->src.height); + rec_out.height = stream->dst.y + dc_fixpt_round(temp) - rec_out.y; + + return rec_out; +} + +static struct rect calculate_mpc_slice_in_timing_active( + struct pipe_ctx *pipe_ctx, + struct rect *plane_clip_rec) +{ + const struct dc_stream_state *stream = pipe_ctx->stream; + int mpc_slice_count = get_num_mpc_splits(pipe_ctx) + 1; + int mpc_slice_idx = get_mpc_split_index(pipe_ctx); + int epimo = mpc_slice_count - plane_clip_rec->width % mpc_slice_count - 1; + struct rect mpc_rec; + + mpc_rec.width = plane_clip_rec->width / mpc_slice_count; + mpc_rec.x = plane_clip_rec->x + mpc_rec.width * mpc_slice_idx; + mpc_rec.height = plane_clip_rec->height; + mpc_rec.y = plane_clip_rec->y; + ASSERT(mpc_slice_count == 1 || + stream->view_format != VIEW_3D_FORMAT_SIDE_BY_SIDE || + mpc_rec.width % 2 == 0); + + /* extra pixels in the division remainder need to go to pipes after + * the extra pixel index minus one(epimo) defined here as: + */ + if (mpc_slice_idx > epimo) { + mpc_rec.x += mpc_slice_idx - epimo - 1; + mpc_rec.width += 1; } + + if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) { + ASSERT(mpc_rec.height % 2 == 0); + mpc_rec.height /= 2; + } + return mpc_rec; +} + +static void adjust_recout_for_visual_confirm(struct rect *recout, + struct pipe_ctx *pipe_ctx) +{ + struct dc *dc = pipe_ctx->stream->ctx->dc; + int dpp_offset, base_offset; + + if (dc->debug.visual_confirm == VISUAL_CONFIRM_DISABLE) + return; + + dpp_offset = pipe_ctx->plane_res.dpp->inst * VISUAL_CONFIRM_DPP_OFFSET; + + if ((dc->debug.visual_confirm_rect_height >= VISUAL_CONFIRM_BASE_MIN) && + dc->debug.visual_confirm_rect_height <= VISUAL_CONFIRM_BASE_MAX) + base_offset = dc->debug.visual_confirm_rect_height; + else + base_offset = VISUAL_CONFIRM_BASE_DEFAULT; + + recout->height -= base_offset; + recout->height -= dpp_offset; +} + +/* + * The function maps a plane clip from Stream Source Space to ODM Slice Space + * and calculates the rec of the overlapping area of MPC slice of the plane + * clip, ODM slice associated with the pipe context and stream destination rec. + */ +static void calculate_recout(struct pipe_ctx *pipe_ctx) +{ + /* + * A plane clip represents the desired plane size and position in Stream + * Source Space. Stream Source is the destination where all planes are + * blended (i.e. positioned, scaled and overlaid). It is a canvas where + * all planes associated with the current stream are drawn together. + * After Stream Source is completed, we will further scale and + * reposition the entire canvas of the stream source to Stream + * Destination in Timing Active Space. This could be due to display + * overscan adjustment where we will need to rescale and reposition all + * the planes so they can fit into a TV with overscan or downscale + * upscale features such as GPU scaling or VSR. + * + * This two step blending is a virtual procedure in software. In + * hardware there is no such thing as Stream Source. all planes are + * blended once in Timing Active Space. Software virtualizes a Stream + * Source space to decouple the math complicity so scaling param + * calculation focuses on one step at a time. + * + * In the following two diagrams, user applied 10% overscan adjustment + * so the Stream Source needs to be scaled down a little before mapping + * to Timing Active Space. As a result the Plane Clip is also scaled + * down by the same ratio, Plane Clip position (i.e. x and y) with + * respect to Stream Source is also scaled down. To map it in Timing + * Active Space additional x and y offsets from Stream Destination are + * added to Plane Clip as well. + * + * Stream Source Space + * ------------ + * __________________________________________________ + * |Stream Source (3840 x 2160) ^ | + * | y | + * | | | + * | __________________V | + * |<-- x -->|Plane Clip/////////| | + * | |(pre scale)////////| | + * | |///////////////////| | + * | |///////////////////| | + * | |///////////////////| | + * | |///////////////////| | + * | |///////////////////| | + * | | + * | | + * |__________________________________________________| + * + * + * Timing Active Space (3840 x 2160) + * --------------------------------- + * + * Timing Active + * __________________________________________________ + * | y_____________________________________________ | + * |x |Stream Destination (3456 x 1944) | | + * | | | | + * | | __________________ | | + * | | |Plane Clip////////| | | + * | | |(post scale)//////| | | + * | | |//////////////////| | | + * | | |//////////////////| | | + * | | |//////////////////| | | + * | | |//////////////////| | | + * | | | | + * | | | | + * | |____________________________________________| | + * |__________________________________________________| + * + * + * In Timing Active Space a plane clip could be further sliced into + * pieces called MPC slices. Each Pipe Context is responsible for + * processing only one MPC slice so the plane processing workload can be + * distributed to multiple DPP Pipes. MPC slices could be blended + * together to a single ODM slice. Each ODM slice is responsible for + * processing a portion of Timing Active divided horizontally so the + * output pixel processing workload can be distributed to multiple OPP + * pipes. All ODM slices are mapped together in ODM block so all MPC + * slices belong to different ODM slices could be pieced together to + * form a single image in Timing Active. MPC slices must belong to + * single ODM slice. If an MPC slice goes across ODM slice boundary, it + * needs to be divided into two MPC slices one for each ODM slice. + * + * In the following diagram the output pixel processing workload is + * divided horizontally into two ODM slices one for each OPP blend tree. + * OPP0 blend tree is responsible for processing left half of Timing + * Active, while OPP2 blend tree is responsible for processing right + * half. + * + * The plane has two MPC slices. However since the right MPC slice goes + * across ODM boundary, two DPP pipes are needed one for each OPP blend + * tree. (i.e. DPP1 for OPP0 blend tree and DPP2 for OPP2 blend tree). + * + * Assuming that we have a Pipe Context associated with OPP0 and DPP1 + * working on processing the plane in the diagram. We want to know the + * width and height of the shaded rectangle and its relative position + * with respect to the ODM slice0. This is called the recout of the pipe + * context. + * + * Planes can be at arbitrary size and position and there could be an + * arbitrary number of MPC and ODM slices. The algorithm needs to take + * all scenarios into account. + * + * Timing Active Space (3840 x 2160) + * --------------------------------- + * + * Timing Active + * __________________________________________________ + * |OPP0(ODM slice0)^ |OPP2(ODM slice1) | + * | y | | + * | | <- w -> | + * | _____V________|____ | + * | |DPP0 ^ |DPP1 |DPP2| | + * |<------ x |-----|->|/////| | | + * | | | |/////| | | + * | | h |/////| | | + * | | | |/////| | | + * | |_____V__|/////|____| | + * | | | + * | | | + * | | | + * |_________________________|________________________| + * + * + */ + struct rect plane_clip; + struct rect mpc_slice_of_plane_clip; + struct rect odm_slice; + struct rect overlapping_area; + + plane_clip = calculate_plane_rec_in_timing_active(pipe_ctx, + &pipe_ctx->plane_state->clip_rect); + /* guard plane clip from drawing beyond stream dst here */ + plane_clip = intersect_rec(&plane_clip, + &pipe_ctx->stream->dst); + mpc_slice_of_plane_clip = calculate_mpc_slice_in_timing_active( + pipe_ctx, &plane_clip); + odm_slice = calculate_odm_slice_in_timing_active(pipe_ctx); + overlapping_area = intersect_rec(&mpc_slice_of_plane_clip, &odm_slice); + /* shift the overlapping area so it is with respect to current ODM + * slice's position + */ + pipe_ctx->plane_res.scl_data.recout = shift_rec( + &overlapping_area, + -odm_slice.x, -odm_slice.y); + + adjust_recout_for_visual_confirm(&pipe_ctx->plane_res.scl_data.recout, + pipe_ctx); } static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx) @@ -997,34 +1287,25 @@ static void calculate_init_and_vp( static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx) { const struct dc_plane_state *plane_state = pipe_ctx->plane_state; - const struct dc_stream_state *stream = pipe_ctx->stream; struct scaler_data *data = &pipe_ctx->plane_res.scl_data; struct rect src = plane_state->src_rect; + struct rect recout_dst_in_active_timing; + struct rect recout_clip_in_active_timing; + struct rect recout_clip_in_recout_dst; + struct rect odm_slice = calculate_odm_slice_in_timing_active(pipe_ctx); int vpc_div = (data->format == PIXEL_FORMAT_420BPP8 || data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1; - int split_count, split_idx, ro_lb, ro_tb, recout_full_x, recout_full_y; bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir; - calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx); - /* - * recout full is what the recout would have been if we didnt clip - * the source plane at all. We only care about left(ro_lb) and top(ro_tb) - * offsets of recout within recout full because those are the directions - * we scan from and therefore the only ones that affect inits. - */ - recout_full_x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x) - * stream->dst.width / stream->src.width; - recout_full_y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y) - * stream->dst.height / stream->src.height; - if (pipe_ctx->prev_odm_pipe && split_idx) - ro_lb = data->h_active * split_idx - recout_full_x; - else if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->prev_odm_pipe) - ro_lb = data->h_active * split_idx - recout_full_x + data->recout.x; - else - ro_lb = data->recout.x - recout_full_x; - ro_tb = data->recout.y - recout_full_y; - ASSERT(ro_lb >= 0 && ro_tb >= 0); - + recout_clip_in_active_timing = shift_rec( + &data->recout, odm_slice.x, odm_slice.y); + recout_dst_in_active_timing = calculate_plane_rec_in_timing_active( + pipe_ctx, &plane_state->dst_rect); + recout_clip_in_recout_dst = shift_rec(&recout_clip_in_active_timing, + -recout_dst_in_active_timing.x, + -recout_dst_in_active_timing.y); + ASSERT(recout_clip_in_recout_dst.x >= 0 && + recout_clip_in_recout_dst.y >= 0); /* * Work in recout rotation since that requires less transformations */ @@ -1042,7 +1323,7 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx) calculate_init_and_vp( flip_horz_scan_dir, - ro_lb, + recout_clip_in_recout_dst.x, data->recout.width, src.width, data->taps.h_taps, @@ -1052,7 +1333,7 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx) &data->viewport.width); calculate_init_and_vp( flip_horz_scan_dir, - ro_lb, + recout_clip_in_recout_dst.x, data->recout.width, src.width / vpc_div, data->taps.h_taps_c, @@ -1062,7 +1343,7 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx) &data->viewport_c.width); calculate_init_and_vp( flip_vert_scan_dir, - ro_tb, + recout_clip_in_recout_dst.y, data->recout.height, src.height, data->taps.v_taps, @@ -1072,7 +1353,7 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx) &data->viewport.height); calculate_init_and_vp( flip_vert_scan_dir, - ro_tb, + recout_clip_in_recout_dst.y, data->recout.height, src.height / vpc_div, data->taps.v_taps_c, @@ -1097,6 +1378,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) { const struct dc_plane_state *plane_state = pipe_ctx->plane_state; struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; + const struct rect odm_slice_rec = calculate_odm_slice_in_timing_active(pipe_ctx); bool res = false; DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); @@ -1121,30 +1403,9 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) pipe_ctx->stream->dst.y += timing->v_border_top; /* Calculate H and V active size */ - pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + - timing->h_border_left + timing->h_border_right; - pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + - timing->v_border_top + timing->v_border_bottom; - if (pipe_ctx->next_odm_pipe || pipe_ctx->prev_odm_pipe) { - pipe_ctx->plane_res.scl_data.h_active /= get_num_odm_splits(pipe_ctx) + 1; - - DC_LOG_SCALER("%s pipe %d: next_odm_pipe:%d prev_odm_pipe:%d\n", - __func__, - pipe_ctx->pipe_idx, - pipe_ctx->next_odm_pipe ? pipe_ctx->next_odm_pipe->pipe_idx : -1, - pipe_ctx->prev_odm_pipe ? pipe_ctx->prev_odm_pipe->pipe_idx : -1); - } /* ODM + windows MPO, where window is on either right or left ODM half */ - else if (pipe_ctx->top_pipe && (pipe_ctx->top_pipe->next_odm_pipe || pipe_ctx->top_pipe->prev_odm_pipe)) { - - pipe_ctx->plane_res.scl_data.h_active /= get_num_odm_splits(pipe_ctx->top_pipe) + 1; - - DC_LOG_SCALER("%s ODM + windows MPO: pipe:%d top_pipe:%d top_pipe->next_odm_pipe:%d top_pipe->prev_odm_pipe:%d\n", - __func__, - pipe_ctx->pipe_idx, - pipe_ctx->top_pipe->pipe_idx, - pipe_ctx->top_pipe->next_odm_pipe ? pipe_ctx->top_pipe->next_odm_pipe->pipe_idx : -1, - pipe_ctx->top_pipe->prev_odm_pipe ? pipe_ctx->top_pipe->prev_odm_pipe->pipe_idx : -1); - } + pipe_ctx->plane_res.scl_data.h_active = odm_slice_rec.width; + pipe_ctx->plane_res.scl_data.v_active = odm_slice_rec.height; + /* depends on h_active */ calculate_recout(pipe_ctx); /* depends on pixel format */ @@ -1449,7 +1710,24 @@ static int acquire_first_split_pipe( return i; } else if (split_pipe->prev_odm_pipe && split_pipe->prev_odm_pipe->plane_state == split_pipe->plane_state) { + + // Fix case where ODM slice has child planes + // Re-attach child planes + struct pipe_ctx *temp_head_pipe = resource_get_head_pipe_for_stream(res_ctx, split_pipe->stream); + + if (split_pipe->bottom_pipe && temp_head_pipe) { + + struct pipe_ctx *temp_tail_pipe = resource_get_tail_pipe(res_ctx, temp_head_pipe); + + if (temp_tail_pipe) { + + split_pipe->bottom_pipe->top_pipe = temp_tail_pipe; + temp_tail_pipe->bottom_pipe = split_pipe->bottom_pipe; + } + } + split_pipe->prev_odm_pipe->next_odm_pipe = split_pipe->next_odm_pipe; + if (split_pipe->next_odm_pipe) split_pipe->next_odm_pipe->prev_odm_pipe = split_pipe->prev_odm_pipe; @@ -1457,6 +1735,11 @@ static int acquire_first_split_pipe( resource_build_scaling_params(split_pipe->prev_odm_pipe); memset(split_pipe, 0, sizeof(*split_pipe)); + + // We cannot split if head pipe is not odm + if (temp_head_pipe && !temp_head_pipe->next_odm_pipe && !temp_head_pipe->prev_odm_pipe) + return HEAD_NOT_IN_ODM; + split_pipe->stream_res.tg = pool->timing_generators[i]; split_pipe->plane_res.hubp = pool->hubps[i]; split_pipe->plane_res.ipp = pool->ipps[i]; @@ -1469,7 +1752,7 @@ static int acquire_first_split_pipe( return i; } } - return -1; + return UNABLE_TO_SPLIT; } bool dc_add_plane_to_context( @@ -1521,6 +1804,10 @@ bool dc_add_plane_to_context( int pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream); if (pipe_idx >= 0) free_pipe = &context->res_ctx.pipe_ctx[pipe_idx]; + else if (pipe_idx == HEAD_NOT_IN_ODM) + break; + else + ASSERT(false); } if (!free_pipe) { @@ -1677,12 +1964,14 @@ bool dc_add_plane_to_context( (free_pipe->plane_state->clip_rect.x + free_pipe->plane_state->clip_rect.width <= free_pipe->stream->src.x + free_pipe->stream->src.width/2))) { if (!free_pipe->next_odm_pipe && - tail_pipe->next_odm_pipe && tail_pipe->next_odm_pipe->bottom_pipe) { + tail_pipe->next_odm_pipe && tail_pipe->next_odm_pipe->bottom_pipe && + tail_pipe->next_odm_pipe->bottom_pipe->plane_state == free_pipe->plane_state) { free_pipe->next_odm_pipe = tail_pipe->next_odm_pipe->bottom_pipe; tail_pipe->next_odm_pipe->bottom_pipe->prev_odm_pipe = free_pipe; } if (!free_pipe->prev_odm_pipe && - tail_pipe->prev_odm_pipe && tail_pipe->prev_odm_pipe->bottom_pipe) { + tail_pipe->prev_odm_pipe && tail_pipe->prev_odm_pipe->bottom_pipe && + tail_pipe->prev_odm_pipe->bottom_pipe->plane_state == free_pipe->plane_state) { free_pipe->prev_odm_pipe = tail_pipe->prev_odm_pipe->bottom_pipe; tail_pipe->prev_odm_pipe->bottom_pipe->next_odm_pipe = free_pipe; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 6e11d2b701f8..ea3d4b328e8e 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -306,6 +306,32 @@ bool dc_optimize_timing_for_fsft( } #endif +static bool is_subvp_high_refresh_candidate(struct dc_stream_state *stream) +{ + uint32_t refresh_rate; + struct dc *dc = stream->ctx->dc; + + refresh_rate = (stream->timing.pix_clk_100hz * (uint64_t)100 + + stream->timing.v_total * stream->timing.h_total - (uint64_t)1); + refresh_rate = div_u64(refresh_rate, stream->timing.v_total); + refresh_rate = div_u64(refresh_rate, stream->timing.h_total); + + /* If there's any stream that fits the SubVP high refresh criteria, + * we must return true. This is because cursor updates are asynchronous + * with full updates, so we could transition into a SubVP config and + * remain in HW cursor mode if there's no cursor update which will + * then cause corruption. + */ + if ((refresh_rate >= 120 && refresh_rate <= 165 && + stream->timing.v_addressable >= 1440 && + stream->timing.v_addressable <= 2160) && + (dc->current_state->stream_count > 1 || + (dc->current_state->stream_count == 1 && !stream->allow_freesync))) + return true; + + return false; +} + /* * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address */ @@ -334,12 +360,13 @@ bool dc_stream_set_cursor_attributes( /* SubVP is not compatible with HW cursor larger than 64 x 64 x 4. * Therefore, if cursor is greater than 64 x 64 x 4, fallback to SW cursor in the following case: - * 1. For single display cases, if resolution is >= 5K and refresh rate < 120hz - * 2. For multi display cases, if resolution is >= 4K and refresh rate < 120hz - * - * [< 120hz is a requirement for SubVP configs] + * 1. If the config is a candidate for SubVP high refresh (both single an dual display configs) + * 2. If not subvp high refresh, for single display cases, if resolution is >= 5K and refresh rate < 120hz + * 3. If not subvp high refresh, for multi display cases, if resolution is >= 4K and refresh rate < 120hz */ if (dc->debug.allow_sw_cursor_fallback && attributes->height * attributes->width * 4 > 16384) { + if (!dc->debug.disable_subvp_high_refresh && is_subvp_high_refresh_candidate(stream)) + return false; if (dc->current_state->stream_count == 1 && stream->timing.v_addressable >= 2880 && ((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120) return false; diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 63948170fd6d..eadb53853131 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -40,12 +40,14 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" +struct abm_save_restore; + /* forward declaration */ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.241" +#define DC_VER "3.2.244" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -506,7 +508,7 @@ enum dcn_zstate_support_state { DCN_ZSTATE_SUPPORT_DISALLOW, }; -/** +/* * struct dc_clocks - DC pipe clocks * * For any clocks that may differ per pipe only the max is stored in this @@ -728,7 +730,7 @@ struct resource_pool; struct dce_hwseq; struct link_service; -/** +/* * struct dc_debug_options - DC debug struct * * This struct provides a simple mechanism for developers to change some @@ -756,7 +758,7 @@ struct dc_debug_options { bool use_max_lb; enum dcc_option disable_dcc; - /** + /* * @pipe_split_policy: Define which pipe split policy is used by the * display core. */ @@ -861,6 +863,7 @@ struct dc_debug_options { bool psr_skip_crtc_disable; union dpia_debug_options dpia_debug; bool disable_fixed_vs_aux_timeout_wa; + uint32_t fixed_vs_aux_delay_config_wa; bool force_disable_subvp; bool force_subvp_mclk_switch; bool allow_sw_cursor_fallback; @@ -1334,7 +1337,7 @@ struct dc_validation_set { struct dc_stream_state *stream; /** - * @plane_state: Surface state + * @plane_states: Surface state */ struct dc_plane_state *plane_states[MAX_SURFACES]; @@ -1409,10 +1412,14 @@ struct dc_plane_state *dc_get_surface_for_mpcc(struct dc *dc, uint32_t dc_get_opp_for_plane(struct dc *dc, struct dc_plane_state *plane); +void dc_set_disable_128b_132b_stream_overhead(bool disable); + /* The function returns minimum bandwidth required to drive a given timing * return - minimum required timing bandwidth in kbps. */ -uint32_t dc_bandwidth_in_kbps_from_timing(const struct dc_crtc_timing *timing); +uint32_t dc_bandwidth_in_kbps_from_timing( + const struct dc_crtc_timing *timing, + const enum dc_link_encoding_format link_encoding); /* Link Interfaces */ /* @@ -1514,6 +1521,7 @@ struct dc_link { enum edp_revision edp_revision; union dpcd_sink_ext_caps dpcd_sink_ext_caps; + struct backlight_settings backlight_settings; struct psr_settings psr_settings; /* Drive settings read from integrated info table */ @@ -1849,6 +1857,14 @@ enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format( */ const struct dc_link_settings *dc_link_get_link_cap(const struct dc_link *link); +/* Get the highest encoding format that the link supports; highest meaning the + * encoding format which supports the maximum bandwidth. + * + * @link - a link with DP RX connection + * return - highest encoding format link supports. + */ +enum dc_link_encoding_format dc_link_get_highest_encoding_format(const struct dc_link *link); + /* Check if a RX (ex. DP sink, MST hub, passive or active dongle) is connected * to a link with dp connector signal type. * @link - a link with dp connector signal type @@ -2230,6 +2246,11 @@ void dc_z10_save_init(struct dc *dc); bool dc_is_dmub_outbox_supported(struct dc *dc); bool dc_enable_dmub_notifications(struct dc *dc); +bool dc_abm_save_restore( + struct dc *dc, + struct dc_stream_state *stream, + struct abm_save_restore *pData); + void dc_enable_dmub_outbox(struct dc *dc); bool dc_process_dmub_aux_transfer_async(struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index c753c6f30dd7..24433409d7de 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -381,6 +381,9 @@ void dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv *dc_dmub_srv) { union dmub_rb_cmd cmd = { 0 }; + if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation) + return; + memset(&cmd, 0, sizeof(cmd)); /* Prepare fw command */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h index 9491b76d61f5..fe3078b8789e 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h @@ -73,6 +73,7 @@ bool dc_dsc_compute_bandwidth_range( uint32_t max_bpp_x16, const struct dsc_dec_dpcd_caps *dsc_sink_caps, const struct dc_crtc_timing *timing, + const enum dc_link_encoding_format link_encoding, struct dc_dsc_bw_range *range); bool dc_dsc_compute_config( @@ -81,6 +82,7 @@ bool dc_dsc_compute_config( const struct dc_dsc_config_options *options, uint32_t target_bandwidth_kbps, const struct dc_crtc_timing *timing, + const enum dc_link_encoding_format link_encoding, struct dc_dsc_config *dsc_cfg); uint32_t dc_dsc_stream_bandwidth_in_kbps(const struct dc_crtc_timing *timing, diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 0ce7728a5a4b..14d7804b70b2 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -189,7 +189,6 @@ struct dc_panel_patch { unsigned int disable_fams; unsigned int skip_avmute; unsigned int mst_start_top_delay; - unsigned int delay_disable_aux_intercept_ms; }; struct dc_edid_caps { @@ -879,7 +878,7 @@ struct dsc_dec_dpcd_caps { uint32_t branch_overall_throughput_0_mps; /* In MPs */ uint32_t branch_overall_throughput_1_mps; /* In MPs */ uint32_t branch_max_line_width; - bool is_dp; + bool is_dp; /* Decoded format */ }; struct dc_golden_table { @@ -902,6 +901,14 @@ enum dc_gpu_mem_alloc_type { DC_MEM_ALLOC_TYPE_AGP }; +enum dc_link_encoding_format { + DC_LINK_ENCODING_UNSPECIFIED = 0, + DC_LINK_ENCODING_DP_8b_10b, + DC_LINK_ENCODING_DP_128b_132b, + DC_LINK_ENCODING_HDMI_TMDS, + DC_LINK_ENCODING_HDMI_FRL +}; + enum dc_psr_version { DC_PSR_VERSION_1 = 0, DC_PSR_VERSION_SU_1 = 1, @@ -995,6 +1002,10 @@ struct link_mst_stream_allocation_table { struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM]; }; +struct backlight_settings { + uint32_t backlight_millinits; +}; + /* PSR feature flags */ struct psr_settings { bool psr_feature_enabled; // PSR is supported by sink diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c index 63009db8b5a7..b87bfecb7755 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c @@ -76,9 +76,9 @@ static bool dce_dmcu_init(struct dmcu *dmcu) } static bool dce_dmcu_load_iram(struct dmcu *dmcu, - unsigned int start_offset, - const char *src, - unsigned int bytes) + unsigned int start_offset, + const char *src, + unsigned int bytes) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); unsigned int count = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c index 6d1b01c267b7..4f552c3e7663 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c @@ -442,10 +442,9 @@ struct dce_i2c_hw *acquire_i2c_hw_engine( return dce_i2c_hw; } -static enum i2c_channel_operation_result dce_i2c_hw_engine_wait_on_operation_result( - struct dce_i2c_hw *dce_i2c_hw, - uint32_t timeout, - enum i2c_channel_operation_result expected_result) +static enum i2c_channel_operation_result dce_i2c_hw_engine_wait_on_operation_result(struct dce_i2c_hw *dce_i2c_hw, + uint32_t timeout, + enum i2c_channel_operation_result expected_result) { enum i2c_channel_operation_result result; uint32_t i = 0; @@ -509,11 +508,10 @@ static uint32_t get_transaction_timeout_hw( return period_timeout * num_of_clock_stretches; } -static bool dce_i2c_hw_engine_submit_payload( - struct dce_i2c_hw *dce_i2c_hw, - struct i2c_payload *payload, - bool middle_of_transaction, - uint32_t speed) +static bool dce_i2c_hw_engine_submit_payload(struct dce_i2c_hw *dce_i2c_hw, + struct i2c_payload *payload, + bool middle_of_transaction, + uint32_t speed) { struct i2c_request_transaction_data request; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c index f1aeb6d1967c..e188447c8156 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c @@ -367,6 +367,7 @@ static bool dce_i2c_sw_engine_acquire_engine( return true; } + bool dce_i2c_engine_acquire_sw( struct dce_i2c_sw *dce_i2c_sw, struct ddc *ddc_handle) @@ -392,12 +393,8 @@ bool dce_i2c_engine_acquire_sw( return result; } - - - -static void dce_i2c_sw_engine_submit_channel_request( - struct dce_i2c_sw *engine, - struct i2c_request_transaction_data *req) +static void dce_i2c_sw_engine_submit_channel_request(struct dce_i2c_sw *engine, + struct i2c_request_transaction_data *req) { struct ddc *ddc = engine->ddc; uint16_t clock_delay_div_4 = engine->clock_delay >> 2; @@ -439,10 +436,9 @@ static void dce_i2c_sw_engine_submit_channel_request( I2C_CHANNEL_OPERATION_FAILED; } -static bool dce_i2c_sw_engine_submit_payload( - struct dce_i2c_sw *engine, - struct i2c_payload *payload, - bool middle_of_transaction) +static bool dce_i2c_sw_engine_submit_payload(struct dce_i2c_sw *engine, + struct i2c_payload *payload, + bool middle_of_transaction) { struct i2c_request_transaction_data request; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c index 2fb9572ce25d..d3e6544022b7 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c @@ -27,6 +27,7 @@ #include "dmub_abm_lcd.h" #include "dc.h" #include "core_types.h" +#include "dmub_cmd.h" #define TO_DMUB_ABM(abm)\ container_of(abm, struct dce_abm, base) @@ -118,6 +119,32 @@ static bool dmub_abm_set_pause_ex(struct abm *abm, bool pause, unsigned int pane return ret; } +/***************************************************************************** + * dmub_abm_save_restore_ex() - calls dmub_abm_save_restore for preserving DMUB's + * Varibright states for LCD only. OLED is TBD + * @abm: used to check get dc context + * @panel_inst: panel instance index + * @pData: contains command to pause/un-pause abm and abm parameters + * + * + ***************************************************************************/ +static bool dmub_abm_save_restore_ex( + struct abm *abm, + unsigned int panel_inst, + struct abm_save_restore *pData) +{ + bool ret = false; + unsigned int feature_support; + struct dc_context *dc = abm->ctx; + + feature_support = abm_feature_support(abm, panel_inst); + + if (feature_support == ABM_LCD_SUPPORT) + ret = dmub_abm_save_restore(dc, panel_inst, pData); + + return ret; +} + static bool dmub_abm_set_pipe_ex(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst) { bool ret = false; @@ -155,6 +182,7 @@ static const struct abm_funcs abm_funcs = { .get_target_backlight = dmub_abm_get_target_backlight_ex, .init_abm_config = dmub_abm_init_config_ex, .set_abm_pause = dmub_abm_set_pause_ex, + .save_restore = dmub_abm_save_restore_ex, .set_pipe_ex = dmub_abm_set_pipe_ex, .set_backlight_level_pwm = dmub_abm_set_backlight_level_pwm_ex, }; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c index 39da73eba86e..592a8f7a1c6d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c @@ -208,6 +208,52 @@ bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, un return true; } + +/***************************************************************************** + * dmub_abm_save_restore() - dmub interface for abm save+pause and restore+ + * un-pause + * @dc: dc context + * @panel_inst: panel instance index + * @pData: contains command to pause/un-pause abm and exchange abm parameters + * + * When called Pause will get abm data and store in pData, and un-pause will + * set/apply abm data stored in pData. + * + *****************************************************************************/ +bool dmub_abm_save_restore( + struct dc_context *dc, + unsigned int panel_inst, + struct abm_save_restore *pData) +{ + union dmub_rb_cmd cmd; + uint8_t panel_mask = 0x01 << panel_inst; + unsigned int bytes = sizeof(struct abm_save_restore); + + // TODO: Optimize by only reading back final 4 bytes + dmub_flush_buffer_mem(&dc->dmub_srv->dmub->scratch_mem_fb); + + // Copy iramtable into cw7 + memcpy(dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, (void *)pData, bytes); + + memset(&cmd, 0, sizeof(cmd)); + cmd.abm_save_restore.header.type = DMUB_CMD__ABM; + cmd.abm_save_restore.header.sub_type = DMUB_CMD__ABM_SAVE_RESTORE; + + cmd.abm_save_restore.abm_init_config_data.src.quad_part = dc->dmub_srv->dmub->scratch_mem_fb.gpu_addr; + cmd.abm_save_restore.abm_init_config_data.bytes = bytes; + cmd.abm_save_restore.abm_init_config_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; + cmd.abm_save_restore.abm_init_config_data.panel_mask = panel_mask; + + cmd.abm_save_restore.header.payload_bytes = sizeof(struct dmub_rb_cmd_abm_save_restore); + + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + + // Copy iramtable data into local structure + memcpy((void *)pData, dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, bytes); + + return true; +} + bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst) { union dmub_rb_cmd cmd; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h index 00b4e268768e..853564d7f471 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h @@ -28,6 +28,8 @@ #include "abm.h" +struct abm_save_restore; + void dmub_abm_init(struct abm *abm, uint32_t backlight); bool dmub_abm_set_level(struct abm *abm, uint32_t level, uint8_t panel_mask); unsigned int dmub_abm_get_current_backlight(struct abm *abm); @@ -38,6 +40,10 @@ void dmub_abm_init_config(struct abm *abm, unsigned int inst); bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int stream_inst); +bool dmub_abm_save_restore( + struct dc_context *dc, + unsigned int panel_inst, + struct abm_save_restore *pData); bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst); bool dmub_abm_set_backlight_level(struct abm *abm, unsigned int backlight_pwm_u16_16, diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 6c9ca43d1040..20d4d08a6a2f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1792,10 +1792,13 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) hws->funcs.edp_backlight_control(edp_link_with_sink, false); } /*resume from S3, no vbios posting, no need to power down again*/ + clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr); + power_down_all_hw_blocks(dc); disable_vga_and_power_gate_all_controllers(dc); if (edp_link_with_sink && !keep_edp_vdd_on) dc->hwss.edp_power_control(edp_link_with_sink, false); + clk_mgr_optimize_pwr_state(dc, dc->clk_mgr); } bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 1); } diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index 3935fd455f0f..061221394ce0 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c @@ -58,13 +58,13 @@ #include "dce/dce_i2c.h" /* TODO remove this include */ -#include "dce80_resource.h" - #ifndef mmMC_HUB_RDREQ_DMIF_LIMIT #include "gmc/gmc_7_1_d.h" #include "gmc/gmc_7_1_sh_mask.h" #endif +#include "dce80/dce80_resource.h" + #ifndef mmDP_DPHY_INTERNAL_CTRL #define mmDP_DPHY_INTERNAL_CTRL 0x1CDE #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x1CDE diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c index b33955928bd0..7e140c35a0ce 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c @@ -39,9 +39,6 @@ #define BLACK_OFFSET_RGB_Y 0x0 #define BLACK_OFFSET_CBCR 0x8000 -#define VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT 3 -#define VISUAL_CONFIRM_RECT_HEIGHT_MIN 1 -#define VISUAL_CONFIRM_RECT_HEIGHT_MAX 10 #define REG(reg)\ dpp->tf_regs->reg @@ -591,18 +588,6 @@ static void dpp1_dscl_set_manual_ratio_init( static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp, const struct rect *recout) { - int visual_confirm_on = 0; - unsigned short visual_confirm_rect_height = VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT; - - if (dpp->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) - visual_confirm_on = 1; - - /* Check bounds to ensure the VC bar height was set to a sane value */ - if ((dpp->base.ctx->dc->debug.visual_confirm_rect_height >= VISUAL_CONFIRM_RECT_HEIGHT_MIN) && - (dpp->base.ctx->dc->debug.visual_confirm_rect_height <= VISUAL_CONFIRM_RECT_HEIGHT_MAX)) { - visual_confirm_rect_height = dpp->base.ctx->dc->debug.visual_confirm_rect_height; - } - REG_SET_2(RECOUT_START, 0, /* First pixel of RECOUT in the active OTG area */ RECOUT_START_X, recout->x, @@ -613,8 +598,7 @@ static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp, /* Number of RECOUT horizontal pixels */ RECOUT_WIDTH, recout->width, /* Number of RECOUT vertical lines */ - RECOUT_HEIGHT, recout->height - - visual_confirm_on * 2 * (dpp->base.inst + visual_confirm_rect_height)); + RECOUT_HEIGHT, recout->height); } /** diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index a50309039d08..9834b75f1837 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -3278,7 +3278,8 @@ void dcn10_wait_for_mpcc_disconnect( if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) { struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst); - if (pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg)) + if (pipe_ctx->stream_res.tg && + pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg)) res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst); pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false; hubp->funcs->set_blank(hubp, true); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c index ee08b545aaea..377f1ba1a81b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c @@ -1056,7 +1056,7 @@ void dcn10_link_encoder_disable_output( struct bp_transmitter_control cntl = { 0 }; enum bp_result result; - if (!dcn10_is_dig_enabled(enc)) { + if (enc->funcs->is_dig_enabled && !enc->funcs->is_dig_enabled(enc)) { /* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */ /*in DP_Alt_No_Connect case, we turn off the dig already, after excuation the PHY w/a sequence, not allow touch PHY any more*/ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 4492bc2392b6..e32d3246e82a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1054,9 +1054,9 @@ void dcn20_blank_pixel_data( enum controller_dp_color_space test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED; struct pipe_ctx *odm_pipe; int odm_cnt = 1; - - int width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; - int height = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top; + int h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; + int v_active = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top; + int odm_slice_width, last_odm_slice_width, offset = 0; if (stream->link->test_pattern_enabled) return; @@ -1066,8 +1066,8 @@ void dcn20_blank_pixel_data( for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) odm_cnt++; - - width = width / odm_cnt; + odm_slice_width = h_active / odm_cnt; + last_odm_slice_width = h_active - odm_slice_width * (odm_cnt - 1); if (blank) { dc->hwss.set_abm_immediate_disable(pipe_ctx); @@ -1080,29 +1080,32 @@ void dcn20_blank_pixel_data( test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE; } - dc->hwss.set_disp_pattern_generator(dc, - pipe_ctx, - test_pattern, - test_pattern_color_space, - stream->timing.display_color_depth, - &black_color, - width, - height, - 0); + odm_pipe = pipe_ctx; - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { + while (odm_pipe->next_odm_pipe) { dc->hwss.set_disp_pattern_generator(dc, - odm_pipe, - dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE && blank ? - CONTROLLER_DP_TEST_PATTERN_COLORRAMP : test_pattern, + pipe_ctx, + test_pattern, test_pattern_color_space, stream->timing.display_color_depth, &black_color, - width, - height, - 0); + odm_slice_width, + v_active, + offset); + offset += odm_slice_width; + odm_pipe = odm_pipe->next_odm_pipe; } + dc->hwss.set_disp_pattern_generator(dc, + odm_pipe, + test_pattern, + test_pattern_color_space, + stream->timing.display_color_depth, + &black_color, + last_odm_slice_width, + v_active, + offset); + if (!blank && dc->debug.enable_single_display_2to1_odm_policy) { /* when exiting dynamic ODM need to reinit DPG state for unused pipes */ struct pipe_ctx *old_odm_pipe = dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx].next_odm_pipe; @@ -2123,6 +2126,15 @@ void dcn20_optimize_bandwidth( if (hubbub->funcs->program_compbuf_size) hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true); + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { + dc_dmub_srv_p_state_delegate(dc, + true, context); + context->bw_ctx.bw.dcn.clk.p_state_change_support = true; + dc->clk_mgr->clks.fw_based_mclk_switching = true; + } else { + dc->clk_mgr->clks.fw_based_mclk_switching = false; + } + dc->clk_mgr->funcs->update_clocks( dc->clk_mgr, context, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.c index 33fc9aa8621b..d07c04458d31 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.c @@ -43,7 +43,7 @@ #define DC_LOGGER \ dccg->ctx->logger -void dccg21_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk) +static void dccg21_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h index e44a37491c1e..b7efa777ec73 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h @@ -32,6 +32,5 @@ struct dccg *dccg21_create( const struct dccg_shift *dccg_shift, const struct dccg_mask *dccg_mask); -void dccg21_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk); #endif /* __DCN21_DCCG_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index d693ea42d033..82dfcf773b1a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -854,8 +854,8 @@ bool dcn21_fast_validate_bw(struct dc *dc, /* We only support full screen mpo with ODM */ if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled && pipe->plane_state && mpo_pipe - && memcmp(&mpo_pipe->plane_res.scl_data.recout, - &pipe->plane_res.scl_data.recout, + && memcmp(&mpo_pipe->plane_state->clip_rect, + &pipe->stream->src, sizeof(struct rect)) != 0) { ASSERT(mpo_pipe->plane_state != pipe->plane_state); goto validate_fail; diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index bf8864bc8a99..4cd4ae07d73d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -949,13 +949,36 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc, } void dcn30_prepare_bandwidth(struct dc *dc, - struct dc_state *context) + struct dc_state *context) { + bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support; + /* Any transition into an FPO config should disable MCLK switching first to avoid + * driver and FW P-State synchronization issues. + */ + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) { + dc->optimized_required = true; + context->bw_ctx.bw.dcn.clk.p_state_change_support = false; + } + if (dc->clk_mgr->dc_mode_softmax_enabled) if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 && context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz); dcn20_prepare_bandwidth(dc, context); + /* + * enabled -> enabled: do not disable + * enabled -> disabled: disable + * disabled -> enabled: don't care + * disabled -> disabled: don't care + */ + if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) + dc_dmub_srv_p_state_delegate(dc, false, context); + + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) { + /* After disabling P-State, restore the original value to ensure we get the correct P-State + * on the next optimize. */ + context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support; + } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c index dfb8f62765f2..5bf4d0aa6230 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c @@ -215,7 +215,7 @@ void optc3_set_odm_bypass(struct timing_generator *optc, optc1->opp_count = 1; } -static void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, +void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, struct dc_crtc_timing *timing) { struct optc *optc1 = DCN10TG_FROM_TG(optc); @@ -293,7 +293,7 @@ static void optc3_set_timing_double_buffer(struct timing_generator *optc, bool e OTG_DRR_TIMING_DBUF_UPDATE_MODE, mode); } -static void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc) +void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h index fb06dc9a4893..d3a056c12b0d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h @@ -351,6 +351,9 @@ void optc3_set_timing_db_mode(struct timing_generator *optc, bool enable); void optc3_set_odm_bypass(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing); +void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, + struct dc_crtc_timing *timing); +void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc); void optc3_tg_init(struct timing_generator *optc); void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max); #endif /* __DC_OPTC_DCN30_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c index abe4c12a10b5..f5bfcd2a0dbc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c @@ -1705,8 +1705,8 @@ noinline bool dcn30_internal_validate_bw( /* We only support full screen mpo with ODM */ if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled && pipe->plane_state && mpo_pipe - && memcmp(&mpo_pipe->plane_res.scl_data.recout, - &pipe->plane_res.scl_data.recout, + && memcmp(&mpo_pipe->plane_state->clip_rect, + &pipe->stream->src, sizeof(struct rect)) != 0) { ASSERT(mpo_pipe->plane_state != pipe->plane_state); goto validate_fail; diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile index 7aa628c21973..9002cb10a6ae 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile @@ -11,7 +11,8 @@ # Makefile for dcn30. DCN301 = dcn301_init.o dcn301_resource.o dcn301_dccg.o \ - dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o + dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o \ + dcn301_optc.o AMD_DAL_DCN301 = $(addprefix $(AMDDALPATH)/dc/dcn301/,$(DCN301)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c new file mode 100644 index 000000000000..b3cfcb887905 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c @@ -0,0 +1,185 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "reg_helper.h" +#include "dcn301_optc.h" +#include "dc.h" +#include "dcn_calc_math.h" +#include "dc_dmub_srv.h" + +#include "dml/dcn30/dcn30_fpu.h" +#include "dc_trace.h" + +#define REG(reg)\ + optc1->tg_regs->reg + +#define CTX \ + optc1->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + optc1->tg_shift->field_name, optc1->tg_mask->field_name + + +/** + * optc301_set_drr() - Program dynamic refresh rate registers m_OTGx_OTG_V_TOTAL_*. + * + * @optc: timing_generator instance. + * @params: parameters used for Dynamic Refresh Rate. + */ +void optc301_set_drr( + struct timing_generator *optc, + const struct drr_params *params) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + if (params != NULL && + params->vertical_total_max > 0 && + params->vertical_total_min > 0) { + + if (params->vertical_total_mid != 0) { + + REG_SET(OTG_V_TOTAL_MID, 0, + OTG_V_TOTAL_MID, params->vertical_total_mid - 1); + + REG_UPDATE_2(OTG_V_TOTAL_CONTROL, + OTG_VTOTAL_MID_REPLACING_MAX_EN, 1, + OTG_VTOTAL_MID_FRAME_NUM, + (uint8_t)params->vertical_total_mid_frame_num); + + } + + optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1); + + REG_UPDATE_5(OTG_V_TOTAL_CONTROL, + OTG_V_TOTAL_MIN_SEL, 1, + OTG_V_TOTAL_MAX_SEL, 1, + OTG_FORCE_LOCK_ON_EVENT, 0, + OTG_SET_V_TOTAL_MIN_MASK_EN, 0, + OTG_SET_V_TOTAL_MIN_MASK, 0); + // Setup manual flow control for EOF via TRIG_A + optc->funcs->setup_manual_trigger(optc); + + } else { + REG_UPDATE_4(OTG_V_TOTAL_CONTROL, + OTG_SET_V_TOTAL_MIN_MASK, 0, + OTG_V_TOTAL_MIN_SEL, 0, + OTG_V_TOTAL_MAX_SEL, 0, + OTG_FORCE_LOCK_ON_EVENT, 0); + + optc->funcs->set_vtotal_min_max(optc, 0, 0); + } +} + + +void optc301_setup_manual_trigger(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_SET_8(OTG_TRIGA_CNTL, 0, + OTG_TRIGA_SOURCE_SELECT, 21, + OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst, + OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1, + OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 0, + OTG_TRIGA_POLARITY_SELECT, 0, + OTG_TRIGA_FREQUENCY_SELECT, 0, + OTG_TRIGA_DELAY, 0, + OTG_TRIGA_CLEAR, 1); +} + +static struct timing_generator_funcs dcn30_tg_funcs = { + .validate_timing = optc1_validate_timing, + .program_timing = optc1_program_timing, + .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, + .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1, + .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2, + .program_global_sync = optc1_program_global_sync, + .enable_crtc = optc2_enable_crtc, + .disable_crtc = optc1_disable_crtc, + /* used by enable_timing_synchronization. Not need for FPGA */ + .is_counter_moving = optc1_is_counter_moving, + .get_position = optc1_get_position, + .get_frame_count = optc1_get_vblank_counter, + .get_scanoutpos = optc1_get_crtc_scanoutpos, + .get_otg_active_size = optc1_get_otg_active_size, + .set_early_control = optc1_set_early_control, + /* used by enable_timing_synchronization. Not need for FPGA */ + .wait_for_state = optc1_wait_for_state, + .set_blank_color = optc3_program_blank_color, + .did_triggered_reset_occur = optc1_did_triggered_reset_occur, + .triplebuffer_lock = optc3_triplebuffer_lock, + .triplebuffer_unlock = optc2_triplebuffer_unlock, + .enable_reset_trigger = optc1_enable_reset_trigger, + .enable_crtc_reset = optc1_enable_crtc_reset, + .disable_reset_trigger = optc1_disable_reset_trigger, + .lock = optc3_lock, + .unlock = optc1_unlock, + .lock_doublebuffer_enable = optc3_lock_doublebuffer_enable, + .lock_doublebuffer_disable = optc3_lock_doublebuffer_disable, + .enable_optc_clock = optc1_enable_optc_clock, + .set_drr = optc301_set_drr, + .get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal, + .set_vtotal_min_max = optc3_set_vtotal_min_max, + .set_static_screen_control = optc1_set_static_screen_control, + .program_stereo = optc1_program_stereo, + .is_stereo_left_eye = optc1_is_stereo_left_eye, + .tg_init = optc3_tg_init, + .is_tg_enabled = optc1_is_tg_enabled, + .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred, + .clear_optc_underflow = optc1_clear_optc_underflow, + .setup_global_swap_lock = NULL, + .get_crc = optc1_get_crc, + .configure_crc = optc2_configure_crc, + .set_dsc_config = optc3_set_dsc_config, + .get_dsc_status = optc2_get_dsc_status, + .set_dwb_source = NULL, + .set_odm_bypass = optc3_set_odm_bypass, + .set_odm_combine = optc3_set_odm_combine, + .get_optc_source = optc2_get_optc_source, + .set_out_mux = optc3_set_out_mux, + .set_drr_trigger_window = optc3_set_drr_trigger_window, + .set_vtotal_change_limit = optc3_set_vtotal_change_limit, + .set_gsl = optc2_set_gsl, + .set_gsl_source_select = optc2_set_gsl_source_select, + .set_vtg_params = optc1_set_vtg_params, + .program_manual_trigger = optc2_program_manual_trigger, + .setup_manual_trigger = optc301_setup_manual_trigger, + .get_hw_timing = optc1_get_hw_timing, + .wait_drr_doublebuffer_pending_clear = optc3_wait_drr_doublebuffer_pending_clear, +}; + +void dcn301_timing_generator_init(struct optc *optc1) +{ + optc1->base.funcs = &dcn30_tg_funcs; + + optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1; + optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1; + + optc1->min_h_blank = 32; + optc1->min_v_blank = 3; + optc1->min_v_blank_interlace = 5; + optc1->min_h_sync_width = 4; + optc1->min_v_sync_width = 1; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h new file mode 100644 index 000000000000..b49585682a15 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h @@ -0,0 +1,36 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_OPTC_DCN301_H__ +#define __DC_OPTC_DCN301_H__ + +#include "dcn20/dcn20_optc.h" +#include "dcn30/dcn30_optc.h" + +void dcn301_timing_generator_init(struct optc *optc1); +void optc301_setup_manual_trigger(struct timing_generator *optc); +void optc301_set_drr(struct timing_generator *optc, const struct drr_params *params); + +#endif /* __DC_OPTC_DCN301_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c index 3485fbb1093e..f856a4773c27 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c @@ -42,7 +42,7 @@ #include "dcn30/dcn30_hubp.h" #include "irq/dcn30/irq_service_dcn30.h" #include "dcn30/dcn30_dpp.h" -#include "dcn30/dcn30_optc.h" +#include "dcn301/dcn301_optc.h" #include "dcn20/dcn20_hwseq.h" #include "dcn30/dcn30_hwseq.h" #include "dce110/dce110_hw_sequencer.h" @@ -855,7 +855,7 @@ static struct timing_generator *dcn301_timing_generator_create( tgn10->tg_shift = &optc_shift; tgn10->tg_mask = &optc_mask; - dcn30_timing_generator_init(tgn10); + dcn301_timing_generator_init(tgn10); return &tgn10->base; } @@ -1425,9 +1425,9 @@ static bool dcn301_resource_construct( dc->caps.max_cursor_size = 256; dc->caps.min_horizontal_blanking_period = 80; dc->caps.dmdata_alloc_size = 2048; - dc->caps.max_slave_planes = 1; - dc->caps.max_slave_yuv_planes = 1; - dc->caps.max_slave_rgb_planes = 1; + dc->caps.max_slave_planes = 2; + dc->caps.max_slave_yuv_planes = 2; + dc->caps.max_slave_rgb_planes = 2; dc->caps.is_apu = true; dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c index 45956ef6f3f9..131b8b82afc0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c @@ -65,7 +65,7 @@ static const struct dc_debug_options debug_defaults_drv = { .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, - .pipe_split_policy = MPC_SPLIT_DYNAMIC, + .pipe_split_policy = MPC_SPLIT_AVOID, .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c index 65c1d754e2d6..8664f0c4c9b7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c @@ -84,7 +84,8 @@ static enum phyd32clk_clock_source get_phy_mux_symclk( struct dcn_dccg *dccg_dcn, enum phyd32clk_clock_source src) { - if (dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { + if (dccg_dcn->base.ctx->asic_id.chip_family == FAMILY_YELLOW_CARP && + dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { if (src == PHYD32CLKC) src = PHYD32CLKF; if (src == PHYD32CLKD) @@ -284,19 +285,11 @@ void dccg31_enable_symclk32_le( /* select one of the PHYD32CLKs as the source for symclk32_le */ switch (hpo_le_inst) { case 0: - if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) - REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, - SYMCLK32_LE0_GATE_DISABLE, 1, - SYMCLK32_ROOT_LE0_GATE_DISABLE, 1); REG_UPDATE_2(SYMCLK32_LE_CNTL, SYMCLK32_LE0_SRC_SEL, phyd32clk, SYMCLK32_LE0_EN, 1); break; case 1: - if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) - REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, - SYMCLK32_LE1_GATE_DISABLE, 1, - SYMCLK32_ROOT_LE1_GATE_DISABLE, 1); REG_UPDATE_2(SYMCLK32_LE_CNTL, SYMCLK32_LE1_SRC_SEL, phyd32clk, SYMCLK32_LE1_EN, 1); @@ -319,19 +312,38 @@ void dccg31_disable_symclk32_le( REG_UPDATE_2(SYMCLK32_LE_CNTL, SYMCLK32_LE0_SRC_SEL, 0, SYMCLK32_LE0_EN, 0); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) - REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, - SYMCLK32_LE0_GATE_DISABLE, 0, - SYMCLK32_ROOT_LE0_GATE_DISABLE, 0); break; case 1: REG_UPDATE_2(SYMCLK32_LE_CNTL, SYMCLK32_LE1_SRC_SEL, 0, SYMCLK32_LE1_EN, 0); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) - REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, - SYMCLK32_LE1_GATE_DISABLE, 0, - SYMCLK32_ROOT_LE1_GATE_DISABLE, 0); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + +void dccg31_set_symclk32_le_root_clock_gating( + struct dccg *dccg, + int hpo_le_inst, + bool enable) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + if (!dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) + return; + + switch (hpo_le_inst) { + case 0: + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_LE0_GATE_DISABLE, enable ? 1 : 0, + SYMCLK32_ROOT_LE0_GATE_DISABLE, enable ? 1 : 0); + break; + case 1: + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_LE1_GATE_DISABLE, enable ? 1 : 0, + SYMCLK32_ROOT_LE1_GATE_DISABLE, enable ? 1 : 0); break; default: BREAK_TO_DEBUGGER(); @@ -660,10 +672,8 @@ void dccg31_init(struct dccg *dccg) dccg31_disable_symclk32_se(dccg, 2); dccg31_disable_symclk32_se(dccg, 3); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) { - dccg31_disable_symclk32_le(dccg, 0); - dccg31_disable_symclk32_le(dccg, 1); - } + dccg31_set_symclk32_le_root_clock_gating(dccg, 0, false); + dccg31_set_symclk32_le_root_clock_gating(dccg, 1, false); if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) { dccg31_disable_dpstreamclk(dccg, 0); diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h index 0902ce5eb8a1..e3caaacf7493 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h @@ -179,6 +179,11 @@ void dccg31_disable_symclk32_le( struct dccg *dccg, int hpo_le_inst); +void dccg31_set_symclk32_le_root_clock_gating( + struct dccg *dccg, + int hpo_le_inst, + bool enable); + void dccg31_set_physymclk( struct dccg *dccg, int phy_inst, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c index bd62502380d8..4596f3bac1b4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c @@ -558,7 +558,7 @@ void dcn31_link_encoder_disable_output( struct dmub_cmd_dig_dpia_control_data dpia_control = { 0 }; struct dc_link *link; - if (!dcn10_is_dig_enabled(enc)) + if (enc->funcs->is_dig_enabled && !enc->funcs->is_dig_enabled(enc)) return; link = link_enc_cfg_get_link_using_link_enc(enc->ctx->dc, enc->preferred_engine); diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c index 0278bae50a9d..45143459eedd 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c @@ -154,7 +154,7 @@ static void dcn31_hpo_dp_stream_enc_dp_blank( VID_STREAM_STATUS, 0, 10, 5000); - /* Disable SDP tranmission */ + /* Disable SDP transmission */ REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL, SDP_STREAM_ENABLE, 0); diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c index 0746ed31d1d1..ad3f019a784f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c @@ -362,6 +362,7 @@ static const struct dccg_funcs dccg314_funcs = { .disable_symclk32_se = dccg31_disable_symclk32_se, .enable_symclk32_le = dccg31_enable_symclk32_le, .disable_symclk32_le = dccg31_disable_symclk32_le, + .set_symclk32_le_root_clock_gating = dccg31_set_symclk32_le_root_clock_gating, .set_physymclk = dccg31_set_physymclk, .set_dtbclk_dto = dccg314_set_dtbclk_dto, .set_audio_dtbclk_dto = dccg31_set_audio_dtbclk_dto, diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c index 6a9024aa3285..9b8e0f6f32b4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c @@ -908,15 +908,15 @@ static const struct dc_debug_options debug_defaults_drv = { .root_clock_optimization = { .bits = { .dpp = true, - .dsc = false, - .hdmistream = false, - .hdmichar = false, - .dpstream = false, - .symclk32_se = false, - .symclk32_le = false, - .symclk_fe = false, - .physymclk = false, - .dpiasymclk = false, + .dsc = true, + .hdmistream = true, + .hdmichar = true, + .dpstream = true, + .symclk32_se = true, + .symclk32_le = true, + .symclk_fe = true, + .physymclk = true, + .dpiasymclk = true, } }, diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c index df3a438abda8..2e3fa0fb8bd4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c @@ -1659,7 +1659,7 @@ static int dcn315_populate_dml_pipes_from_context( { int i, pipe_cnt, crb_idx, crb_pipes; struct resource_context *res_ctx = &context->res_ctx; - struct pipe_ctx *pipe; + struct pipe_ctx *pipe = NULL; const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB; int remaining_det_segs = max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB; bool pixel_rate_crb = allow_pixel_rate_crb(dc, context); diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c index 11e28e056cf7..61ceff6bc0b1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c @@ -49,7 +49,10 @@ static void dccg32_trigger_dio_fifo_resync( uint32_t dispclk_rdivider_value = 0; REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_RDIVIDER, &dispclk_rdivider_value); - REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value); + + /* Not valid for the WDIVIDER to be set to 0 */ + if (dispclk_rdivider_value != 0) + REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value); } static void dccg32_get_pixel_rate_div( diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c index d52d5feeb311..a87afb796f47 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c @@ -47,11 +47,9 @@ #include "clk_mgr.h" #include "dsc.h" #include "dcn20/dcn20_optc.h" -#include "dmub_subvp_state.h" #include "dce/dmub_hw_lock_mgr.h" #include "dcn32_resource.h" #include "link.h" -#include "dmub/inc/dmub_subvp_state.h" #define DC_LOGGER_INIT(logger) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c index 1cc09799f92d..0d1f18f8348e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c @@ -1892,7 +1892,7 @@ int dcn32_populate_dml_pipes_from_context( { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; - struct pipe_ctx *pipe; + struct pipe_ctx *pipe = NULL; bool subvp_in_use = false; struct dc_crtc_timing *timing; bool vsr_odm_support = false; diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c index 5be242a1b82c..db9c55a09d9f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c @@ -641,6 +641,7 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context) uint8_t non_subvp_pipes = 0; bool drr_pipe_found = false; bool drr_psr_capable = false; + uint64_t refresh_rate = 0; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; @@ -649,8 +650,14 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context) continue; if (pipe->plane_state && !pipe->top_pipe) { - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) + if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) { subvp_count++; + + refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 + + pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1); + refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total); + refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total); + } if (pipe->stream->mall_stream_config.type == SUBVP_NONE) { non_subvp_pipes++; drr_psr_capable = (drr_psr_capable || dcn32_is_psr_capable(pipe)); @@ -662,7 +669,8 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context) } } - if (subvp_count == 1 && non_subvp_pipes == 1 && drr_pipe_found && !drr_psr_capable) + if (subvp_count == 1 && non_subvp_pipes == 1 && drr_pipe_found && !drr_psr_capable && + ((uint32_t)refresh_rate < 120)) result = true; return result; @@ -693,6 +701,7 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int bool drr_pipe_found = false; struct vba_vars_st *vba = &context->bw_ctx.dml.vba; bool vblank_psr_capable = false; + uint64_t refresh_rate = 0; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; @@ -701,8 +710,14 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int continue; if (pipe->plane_state && !pipe->top_pipe) { - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) + if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) { subvp_count++; + + refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 + + pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1); + refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total); + refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total); + } if (pipe->stream->mall_stream_config.type == SUBVP_NONE) { non_subvp_pipes++; vblank_psr_capable = (vblank_psr_capable || dcn32_is_psr_capable(pipe)); @@ -715,7 +730,8 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int } if (subvp_count == 1 && non_subvp_pipes == 1 && !drr_pipe_found && !vblank_psr_capable && - vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_vblank_w_mall_sub_vp) + ((uint32_t)refresh_rate < 120) && + vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_vblank_w_mall_sub_vp) result = true; return result; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c index f294f2f8c75b..57cf0358cc43 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c @@ -3194,7 +3194,7 @@ static void CalculateFlipSchedule( unsigned int HostVMDynamicLevels; double TimeForFetchingMetaPTEImmediateFlip; double TimeForFetchingRowInVBlankImmediateFlip; - double ImmediateFlipBW; + double ImmediateFlipBW = 1.0; double HostVMInefficiencyFactor; double VRatioClamped; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c index 43016c462251..eba51144fee7 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c @@ -3505,7 +3505,7 @@ static void CalculateFlipSchedule( unsigned int HostVMDynamicLevelsTrips; double TimeForFetchingMetaPTEImmediateFlip; double TimeForFetchingRowInVBlankImmediateFlip; - double ImmediateFlipBW; + double ImmediateFlipBW = 1.0; double LineTime = v->HTotal[k] / v->PixelClock[k]; if (v->GPUVMEnable == true && v->HostVMEnable == true) { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c index d9e049e7ff0a..07adb614366e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c @@ -31,6 +31,7 @@ #include "dml/dcn20/dcn20_fpu.h" #include "dml/dcn31/dcn31_fpu.h" #include "dml/display_mode_vba.h" +#include "dml/dml_inline_defs.h" struct _vcs_dpi_ip_params_st dcn3_14_ip = { .VBlankNomDefaultUS = 668, @@ -273,6 +274,25 @@ static bool is_dual_plane(enum surface_pixel_format format) return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA; } +/* + * micro_sec_to_vert_lines () - converts time to number of vertical lines for a given timing + * + * @param: num_us: number of microseconds + * @return: number of vertical lines. If exact number of vertical lines is not found then + * it will round up to next number of lines to guarantee num_us + */ +static unsigned int micro_sec_to_vert_lines(unsigned int num_us, struct dc_crtc_timing *timing) +{ + unsigned int num_lines = 0; + unsigned int lines_time_in_ns = 1000.0 * + (((float)timing->h_total * 1000.0) / + ((float)timing->pix_clk_100hz / 10.0)); + + num_lines = dml_ceil(1000.0 * num_us / lines_time_in_ns, 1.0); + + return num_lines; +} + int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, bool fast_validate) @@ -289,15 +309,22 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { struct dc_crtc_timing *timing; + unsigned int num_lines = 0; if (!res_ctx->pipe_ctx[i].stream) continue; pipe = &res_ctx->pipe_ctx[i]; timing = &pipe->stream->timing; - pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min; + num_lines = micro_sec_to_vert_lines(dcn3_14_ip.VBlankNomDefaultUS, timing); + + if (pipe->stream->adjust.v_total_min != 0) + pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min; + else + pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total; + pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive; - pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, dcn3_14_ip.VBlankNomDefaultUS); + pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, num_lines); pipes[pipe_cnt].pipe.dest.vblank_nom = max(pipes[pipe_cnt].pipe.dest.vblank_nom, timing->v_sync_width); pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, max_allowed_vblank_nom); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c index 9010c47476e9..32251af76935 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c @@ -3613,7 +3613,7 @@ static void CalculateFlipSchedule( unsigned int HostVMDynamicLevelsTrips; double TimeForFetchingMetaPTEImmediateFlip; double TimeForFetchingRowInVBlankImmediateFlip; - double ImmediateFlipBW; + double ImmediateFlipBW = 1.0; double LineTime = v->HTotal[k] / v->PixelClock[k]; if (v->GPUVMEnable == true && v->HostVMEnable == true) { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index a95034801712..0f882b879b0d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -1040,7 +1040,7 @@ static bool subvp_subvp_admissable(struct dc *dc, uint32_t i; uint8_t subvp_count = 0; uint32_t min_refresh = subvp_high_refresh_list.min_refresh, max_refresh = 0; - uint32_t refresh_rate = 0; + uint64_t refresh_rate = 0; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; @@ -1050,19 +1050,21 @@ static bool subvp_subvp_admissable(struct dc *dc, if (pipe->plane_state && !pipe->top_pipe && pipe->stream->mall_stream_config.type == SUBVP_MAIN) { - refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 + - pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1) - / (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total); - if (refresh_rate < min_refresh) - min_refresh = refresh_rate; - if (refresh_rate > max_refresh) - max_refresh = refresh_rate; + refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 + + pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1); + refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total); + refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total); + + if ((uint32_t)refresh_rate < min_refresh) + min_refresh = (uint32_t)refresh_rate; + if ((uint32_t)refresh_rate > max_refresh) + max_refresh = (uint32_t)refresh_rate; subvp_count++; } } if (subvp_count == 2 && ((min_refresh < 120 && max_refresh < 120) || - (min_refresh >= 120 && max_refresh >= 120))) + (min_refresh >= 120 && max_refresh <= 165))) result = true; return result; @@ -1715,8 +1717,8 @@ bool dcn32_internal_validate_bw(struct dc *dc, if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled && !dc->config.enable_windowed_mpo_odm && pipe->plane_state && mpo_pipe - && memcmp(&mpo_pipe->plane_res.scl_data.recout, - &pipe->plane_res.scl_data.recout, + && memcmp(&mpo_pipe->plane_state->clip_rect, + &pipe->stream->src, sizeof(struct rect)) != 0) { ASSERT(mpo_pipe->plane_state != pipe->plane_state); goto validate_fail; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c index a50e7f4dce42..ecea008f19d3 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c @@ -3459,6 +3459,7 @@ bool dml32_CalculatePrefetchSchedule( double TimeForFetchingMetaPTE = 0; double TimeForFetchingRowInVBlank = 0; double LinesToRequestPrefetchPixelData = 0; + double LinesForPrefetchBandwidth = 0; unsigned int HostVMDynamicLevelsTrips; double trip_to_mem; double Tvm_trips; @@ -3888,11 +3889,15 @@ bool dml32_CalculatePrefetchSchedule( TimeForFetchingMetaPTE = Tvm_oto; TimeForFetchingRowInVBlank = Tr0_oto; *PrefetchBandwidth = prefetch_bw_oto; + /* Clamp to oto for bandwidth calculation */ + LinesForPrefetchBandwidth = dst_y_prefetch_oto; } else { *DestinationLinesForPrefetch = dst_y_prefetch_equ; TimeForFetchingMetaPTE = Tvm_equ; TimeForFetchingRowInVBlank = Tr0_equ; *PrefetchBandwidth = prefetch_bw_equ; + /* Clamp to equ for bandwidth calculation */ + LinesForPrefetchBandwidth = dst_y_prefetch_equ; } *DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * TimeForFetchingMetaPTE / LineTime, 1.0) / 4.0; @@ -3900,7 +3905,7 @@ bool dml32_CalculatePrefetchSchedule( *DestinationLinesToRequestRowInVBlank = dml_ceil(4.0 * TimeForFetchingRowInVBlank / LineTime, 1.0) / 4.0; - LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch - + LinesToRequestPrefetchPixelData = LinesForPrefetchBandwidth - *DestinationLinesToRequestVMInVBlank - 2 * *DestinationLinesToRequestRowInVBlank; #ifdef __DML_VBA_DEBUG__ @@ -4124,7 +4129,7 @@ void dml32_CalculateFlipSchedule( unsigned int HostVMDynamicLevelsTrips; double TimeForFetchingMetaPTEImmediateFlip; double TimeForFetchingRowInVBlankImmediateFlip; - double ImmediateFlipBW; + double ImmediateFlipBW = 1.0; if (GPUVMEnable == true && HostVMEnable == true) HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels; diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index 58dd62cce4bb..3966845c7694 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -40,6 +40,8 @@ static bool dsc_policy_enable_dsc_when_not_needed; static bool dsc_policy_disable_dsc_stream_overhead; +static bool disable_128b_132b_stream_overhead; + #ifndef MAX #define MAX(X, Y) ((X) > (Y) ? (X) : (Y)) #endif @@ -47,8 +49,44 @@ static bool dsc_policy_disable_dsc_stream_overhead; #define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) #endif +/* Need to account for padding due to pixel-to-symbol packing + * for uncompressed 128b/132b streams. + */ +static uint32_t apply_128b_132b_stream_overhead( + const struct dc_crtc_timing *timing, const uint32_t kbps) +{ + uint32_t total_kbps = kbps; + + if (disable_128b_132b_stream_overhead) + return kbps; + + if (!timing->flags.DSC) { + struct fixed31_32 bpp; + struct fixed31_32 overhead_factor; + + bpp = dc_fixpt_from_int(kbps); + bpp = dc_fixpt_div_int(bpp, timing->pix_clk_100hz / 10); + + /* Symbols_per_HActive = HActive * bpp / (4 lanes * 32-bit symbol size) + * Overhead_factor = ceil(Symbols_per_HActive) / Symbols_per_HActive + */ + overhead_factor = dc_fixpt_from_int(timing->h_addressable); + overhead_factor = dc_fixpt_mul(overhead_factor, bpp); + overhead_factor = dc_fixpt_div_int(overhead_factor, 128); + overhead_factor = dc_fixpt_div( + dc_fixpt_from_int(dc_fixpt_ceil(overhead_factor)), + overhead_factor); + + total_kbps = dc_fixpt_ceil( + dc_fixpt_mul_int(overhead_factor, total_kbps)); + } + + return total_kbps; +} + uint32_t dc_bandwidth_in_kbps_from_timing( - const struct dc_crtc_timing *timing) + const struct dc_crtc_timing *timing, + const enum dc_link_encoding_format link_encoding) { uint32_t bits_per_channel = 0; uint32_t kbps; @@ -96,6 +134,9 @@ uint32_t dc_bandwidth_in_kbps_from_timing( kbps = kbps * 2 / 3; } + if (link_encoding == DC_LINK_ENCODING_DP_128b_132b) + kbps = apply_128b_132b_stream_overhead(timing, kbps); + return kbps; } @@ -107,6 +148,7 @@ static bool decide_dsc_bandwidth_range( const uint32_t num_slices_h, const struct dsc_enc_caps *dsc_caps, const struct dc_crtc_timing *timing, + const enum dc_link_encoding_format link_encoding, struct dc_dsc_bw_range *range); static uint32_t compute_bpp_x16_from_target_bandwidth( @@ -133,6 +175,7 @@ static bool setup_dsc_config( int target_bandwidth_kbps, const struct dc_crtc_timing *timing, const struct dc_dsc_config_options *options, + const enum dc_link_encoding_format link_encoding, struct dc_dsc_config *dsc_cfg); static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size) @@ -398,6 +441,7 @@ bool dc_dsc_compute_bandwidth_range( uint32_t max_bpp_x16, const struct dsc_dec_dpcd_caps *dsc_sink_caps, const struct dc_crtc_timing *timing, + const enum dc_link_encoding_format link_encoding, struct dc_dsc_bw_range *range) { bool is_dsc_possible = false; @@ -417,11 +461,11 @@ bool dc_dsc_compute_bandwidth_range( if (is_dsc_possible) is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing, - &options, &config); + &options, link_encoding, &config); if (is_dsc_possible) is_dsc_possible = decide_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16, - config.num_slices_h, &dsc_common_caps, timing, range); + config.num_slices_h, &dsc_common_caps, timing, link_encoding, range); return is_dsc_possible; } @@ -557,6 +601,7 @@ static bool decide_dsc_bandwidth_range( const uint32_t num_slices_h, const struct dsc_enc_caps *dsc_caps, const struct dc_crtc_timing *timing, + const enum dc_link_encoding_format link_encoding, struct dc_dsc_bw_range *range) { uint32_t preferred_bpp_x16 = timing->dsc_fixed_bits_per_pixel_x16; @@ -586,7 +631,7 @@ static bool decide_dsc_bandwidth_range( /* populate output structure */ if (range->max_target_bpp_x16 >= range->min_target_bpp_x16 && range->min_target_bpp_x16 > 0) { /* native stream bandwidth */ - range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing); + range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing, link_encoding); /* max dsc target bpp */ range->max_kbps = dc_dsc_stream_bandwidth_in_kbps(timing, @@ -612,6 +657,7 @@ static bool decide_dsc_target_bpp_x16( const int target_bandwidth_kbps, const struct dc_crtc_timing *timing, const int num_slices_h, + const enum dc_link_encoding_format link_encoding, int *target_bpp_x16) { struct dc_dsc_bw_range range; @@ -619,7 +665,7 @@ static bool decide_dsc_target_bpp_x16( *target_bpp_x16 = 0; if (decide_dsc_bandwidth_range(policy->min_target_bpp * 16, policy->max_target_bpp * 16, - num_slices_h, dsc_common_caps, timing, &range)) { + num_slices_h, dsc_common_caps, timing, link_encoding, &range)) { if (target_bandwidth_kbps >= range.stream_kbps) { if (policy->enable_dsc_when_not_needed) /* enable max bpp even dsc is not needed */ @@ -796,6 +842,7 @@ static bool setup_dsc_config( int target_bandwidth_kbps, const struct dc_crtc_timing *timing, const struct dc_dsc_config_options *options, + const enum dc_link_encoding_format link_encoding, struct dc_dsc_config *dsc_cfg) { struct dsc_enc_caps dsc_common_caps; @@ -995,6 +1042,7 @@ static bool setup_dsc_config( target_bandwidth_kbps, timing, num_slices_h, + link_encoding, &target_bpp); dsc_cfg->bits_per_pixel = target_bpp; } @@ -1023,6 +1071,7 @@ bool dc_dsc_compute_config( const struct dc_dsc_config_options *options, uint32_t target_bandwidth_kbps, const struct dc_crtc_timing *timing, + const enum dc_link_encoding_format link_encoding, struct dc_dsc_config *dsc_cfg) { bool is_dsc_possible = false; @@ -1032,7 +1081,7 @@ bool dc_dsc_compute_config( is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, target_bandwidth_kbps, - timing, options, dsc_cfg); + timing, options, link_encoding, dsc_cfg); return is_dsc_possible; } @@ -1165,6 +1214,11 @@ void dc_dsc_policy_set_disable_dsc_stream_overhead(bool disable) dsc_policy_disable_dsc_stream_overhead = disable; } +void dc_set_disable_128b_132b_stream_overhead(bool disable) +{ + disable_128b_132b_stream_overhead = disable; +} + void dc_dsc_get_default_config_option(const struct dc *dc, struct dc_dsc_config_options *options) { options->dsc_min_slice_height_override = dc->debug.dsc_min_slice_height_override; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h index d2190a3320f6..33db15d69f23 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h @@ -27,6 +27,8 @@ #include "dm_services_types.h" +struct abm_save_restore; + struct abm { struct dc_context *ctx; const struct abm_funcs *funcs; @@ -55,6 +57,10 @@ struct abm_funcs { unsigned int bytes, unsigned int inst); bool (*set_abm_pause)(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int otg_inst); + bool (*save_restore)( + struct abm *abm, + unsigned int panel_inst, + struct abm_save_restore *pData); bool (*set_pipe_ex)(struct abm *abm, unsigned int otg_inst, unsigned int option, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h index 7254182b7c72..af6b9509d09d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h @@ -172,8 +172,6 @@ struct aux_engine_funcs { struct aux_engine *engine, uint8_t *returned_bytes); bool (*is_engine_available)(struct aux_engine *engine); - enum i2caux_engine_type (*get_engine_type)( - const struct aux_engine *engine); bool (*acquire)( struct aux_engine *engine, struct ddc *ddc); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index 8dc804bbe98b..93592281de32 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -123,6 +123,11 @@ struct dccg_funcs { struct dccg *dccg, int hpo_le_inst); + void (*set_symclk32_le_root_clock_gating)( + struct dccg *dccg, + int hpo_le_inst, + bool enable); + void (*set_physymclk)( struct dccg *dccg, int phy_inst, diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c b/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c index c923b2af8510..37bc98faa7a0 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c @@ -38,10 +38,9 @@ #define DCN_BASE__INST0_SEG2 0x000034C0 -static enum dc_irq_source to_dal_irq_source_dcn314( - struct irq_service *irq_service, - uint32_t src_id, - uint32_t ext_id) +static enum dc_irq_source to_dal_irq_source_dcn314(struct irq_service *irq_service, + uint32_t src_id, + uint32_t ext_id) { switch (src_id) { case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c index db9f1baa27e5..bce0428ad612 100644 --- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c @@ -428,15 +428,24 @@ static void set_crtc_test_pattern(struct dc_link *link, stream->timing.display_color_depth; struct bit_depth_reduction_params params; struct output_pixel_processor *opp = pipe_ctx->stream_res.opp; - int width = pipe_ctx->stream->timing.h_addressable + + struct pipe_ctx *odm_pipe; + int odm_cnt = 1; + int h_active = pipe_ctx->stream->timing.h_addressable + pipe_ctx->stream->timing.h_border_left + pipe_ctx->stream->timing.h_border_right; - int height = pipe_ctx->stream->timing.v_addressable + + int v_active = pipe_ctx->stream->timing.v_addressable + pipe_ctx->stream->timing.v_border_bottom + pipe_ctx->stream->timing.v_border_top; + int odm_slice_width, last_odm_slice_width, offset = 0; memset(¶ms, 0, sizeof(params)); + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) + odm_cnt++; + + odm_slice_width = h_active / odm_cnt; + last_odm_slice_width = h_active - odm_slice_width * (odm_cnt - 1); + switch (test_pattern) { case DP_TEST_PATTERN_COLOR_SQUARES: controller_test_pattern = @@ -473,16 +482,13 @@ static void set_crtc_test_pattern(struct dc_link *link, { /* disable bit depth reduction */ pipe_ctx->stream->bit_depth_params = params; - opp->funcs->opp_program_bit_depth_reduction(opp, ¶ms); - if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) + if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) { + opp->funcs->opp_program_bit_depth_reduction(opp, ¶ms); pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, controller_test_pattern, color_depth); - else if (link->dc->hwss.set_disp_pattern_generator) { - struct pipe_ctx *odm_pipe; + } else if (link->dc->hwss.set_disp_pattern_generator) { enum controller_dp_color_space controller_color_space; - int opp_cnt = 1; - int offset = 0; - int dpg_width = width; + struct output_pixel_processor *odm_opp; switch (test_pattern_color_space) { case DP_TEST_PATTERN_COLOR_SPACE_RGB: @@ -502,36 +508,33 @@ static void set_crtc_test_pattern(struct dc_link *link, break; } - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) - opp_cnt++; - dpg_width = width / opp_cnt; - offset = dpg_width; - - link->dc->hwss.set_disp_pattern_generator(link->dc, - pipe_ctx, - controller_test_pattern, - controller_color_space, - color_depth, - NULL, - dpg_width, - height, - 0); - - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { - struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp; - + odm_pipe = pipe_ctx; + while (odm_pipe->next_odm_pipe) { + odm_opp = odm_pipe->stream_res.opp; odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); link->dc->hwss.set_disp_pattern_generator(link->dc, - odm_pipe, + pipe_ctx, controller_test_pattern, controller_color_space, color_depth, NULL, - dpg_width, - height, + odm_slice_width, + v_active, offset); - offset += offset; + offset += odm_slice_width; + odm_pipe = odm_pipe->next_odm_pipe; } + odm_opp = odm_pipe->stream_res.opp; + odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); + link->dc->hwss.set_disp_pattern_generator(link->dc, + odm_pipe, + controller_test_pattern, + controller_color_space, + color_depth, + NULL, + last_odm_slice_width, + v_active, + offset); } } break; @@ -540,23 +543,17 @@ static void set_crtc_test_pattern(struct dc_link *link, /* restore bitdepth reduction */ resource_build_bit_depth_reduction_params(pipe_ctx->stream, ¶ms); pipe_ctx->stream->bit_depth_params = params; - opp->funcs->opp_program_bit_depth_reduction(opp, ¶ms); - if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) + if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) { + opp->funcs->opp_program_bit_depth_reduction(opp, ¶ms); pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, - CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, - color_depth); - else if (link->dc->hwss.set_disp_pattern_generator) { - struct pipe_ctx *odm_pipe; - int opp_cnt = 1; - int dpg_width; - - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) - opp_cnt++; - - dpg_width = width / opp_cnt; - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { - struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp; + CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, + color_depth); + } else if (link->dc->hwss.set_disp_pattern_generator) { + struct output_pixel_processor *odm_opp; + odm_pipe = pipe_ctx; + while (odm_pipe->next_odm_pipe) { + odm_opp = odm_pipe->stream_res.opp; odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); link->dc->hwss.set_disp_pattern_generator(link->dc, odm_pipe, @@ -564,19 +561,23 @@ static void set_crtc_test_pattern(struct dc_link *link, CONTROLLER_DP_COLOR_SPACE_UDEFINED, color_depth, NULL, - dpg_width, - height, - 0); + odm_slice_width, + v_active, + offset); + offset += odm_slice_width; + odm_pipe = odm_pipe->next_odm_pipe; } + odm_opp = odm_pipe->stream_res.opp; + odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); link->dc->hwss.set_disp_pattern_generator(link->dc, - pipe_ctx, + odm_pipe, CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, CONTROLLER_DP_COLOR_SPACE_UDEFINED, color_depth, NULL, - dpg_width, - height, - 0); + last_odm_slice_width, + v_active, + offset); } } break; diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c index 586fe25c1702..dc1cb5478e08 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c @@ -108,6 +108,11 @@ static void enable_hpo_dp_link_output(struct dc_link *link, enum clock_source_id clock_source, const struct dc_link_settings *link_settings) { + if (link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating) + link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating( + link->dc->res_pool->dccg, + link_res->hpo_dp_link_enc->inst, + true); link_res->hpo_dp_link_enc->funcs->enable_link_phy( link_res->hpo_dp_link_enc, link_settings, @@ -122,6 +127,11 @@ static void disable_hpo_dp_link_output(struct dc_link *link, link_res->hpo_dp_link_enc->funcs->link_disable(link_res->hpo_dp_link_enc); link_res->hpo_dp_link_enc->funcs->disable_link_phy( link_res->hpo_dp_link_enc, signal); + if (link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating) + link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating( + link->dc->res_pool->dccg, + link_res->hpo_dp_link_enc->inst, + false); } static void set_hpo_dp_link_test_pattern(struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c index 8041b8369e45..c9b6676eaf53 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c @@ -876,8 +876,7 @@ static bool detect_link_and_local_sink(struct dc_link *link, (link->dpcd_sink_ext_caps.bits.oled == 1)) { dpcd_set_source_specific_data(link); msleep(post_oui_delay); - set_default_brightness_aux(link); - //TODO: use cached + set_cached_brightness_aux(link); } return true; diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index 1a7b93e41e35..7997936613fc 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -1079,8 +1079,14 @@ static struct fixed31_32 get_pbn_from_bw_in_kbps(uint64_t kbps) static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx) { uint64_t kbps; + enum dc_link_encoding_format link_encoding; - kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing); + if (dp_is_128b_132b_signal(pipe_ctx)) + link_encoding = DC_LINK_ENCODING_DP_128b_132b; + else + link_encoding = DC_LINK_ENCODING_DP_8b_10b; + + kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing, link_encoding); return get_pbn_from_bw_in_kbps(kbps); } @@ -1538,7 +1544,8 @@ struct fixed31_32 link_calculate_sst_avg_time_slots_per_mtp( dc_fixpt_div_int(link_bw_effective, MAX_MTP_SLOT_COUNT); struct fixed31_32 timing_bw = dc_fixpt_from_int( - dc_bandwidth_in_kbps_from_timing(&stream->timing)); + dc_bandwidth_in_kbps_from_timing(&stream->timing, + dc_link_get_highest_encoding_format(link))); struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_div(timing_bw, timeslot_bw_effective); @@ -1971,6 +1978,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx) bool is_vga_mode = (stream->timing.h_addressable == 640) && (stream->timing.v_addressable == 480); struct dc *dc = pipe_ctx->stream->ctx->dc; + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); if (stream->phy_pix_clk == 0) stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10; @@ -2010,6 +2018,12 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx) if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) display_color_depth = COLOR_DEPTH_888; + /* We need to enable stream encoder for TMDS first to apply 1/4 TMDS + * character clock in case that beyond 340MHz. + */ + if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) + link_hwss->setup_stream_encoder(pipe_ctx); + dc->hwss.enable_tmds_link_output( link, &pipe_ctx->link_res, @@ -2129,7 +2143,8 @@ static enum dc_status enable_link_dp(struct dc_state *state, if (link->dpcd_sink_ext_caps.bits.oled == 1 || link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1 || link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1) { - set_default_brightness_aux(link); // TODO: use cached if known + set_cached_brightness_aux(link); + if (link->dpcd_sink_ext_caps.bits.oled == 1) msleep(bl_oled_enable_delay); edp_backlight_enable_aux(link, true); diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c index e8b2fc4002a5..b45fda96eaf6 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c @@ -130,7 +130,8 @@ static bool dp_active_dongle_validate_timing( /* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */ outputTiming.flags.DSC = 0; #endif - if (dc_bandwidth_in_kbps_from_timing(&outputTiming) > dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps) + if (dc_bandwidth_in_kbps_from_timing(&outputTiming, DC_LINK_ENCODING_HDMI_FRL) > + dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps) return false; } else { // DP to HDMI TMDS converter if (get_tmds_output_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10)) @@ -285,7 +286,7 @@ static bool dp_validate_mode_timing( link_setting = &link->verified_link_cap; */ - req_bw = dc_bandwidth_in_kbps_from_timing(timing); + req_bw = dc_bandwidth_in_kbps_from_timing(timing, dc_link_get_highest_encoding_format(link)); max_bw = dp_link_bandwidth_kbps(link, link_setting); if (req_bw <= max_bw) { @@ -357,7 +358,8 @@ bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const un for (uint8_t i = 0; i < num_streams; ++i) { link[i] = stream[i].link; - bw_needed[i] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing); + bw_needed[i] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing, + dc_link_get_highest_encoding_format(link[i])); } ret = dpia_validate_usb4_bw(link, bw_needed, num_streams); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c index 0fa1228bc178..0f19c07011b5 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c @@ -427,7 +427,7 @@ bool try_to_configure_aux_timeout(struct ddc_service *ddc, if ((ddc->link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && !ddc->link->dc->debug.disable_fixed_vs_aux_timeout_wa && - ASICREV_IS_YELLOW_CARP(ddc->ctx->asic_id.hw_internal_rev)) { + ddc->ctx->dce_version == DCN_VERSION_3_1) { /* Fixed VS workaround for AUX timeout */ const uint32_t fixed_vs_address = 0xF004F; const uint8_t fixed_vs_data[4] = {0x1, 0x22, 0x63, 0xc}; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c index 3a5e80b57711..b38ac3ea06b0 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -906,7 +906,7 @@ bool link_decide_link_settings(struct dc_stream_state *stream, struct dc_link_settings *link_setting) { struct dc_link *link = stream->link; - uint32_t req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing); + uint32_t req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing, dc_link_get_highest_encoding_format(link)); memset(link_setting, 0, sizeof(*link_setting)); @@ -939,7 +939,8 @@ bool link_decide_link_settings(struct dc_stream_state *stream, tmp_link_setting.link_rate = LINK_RATE_UNKNOWN; tmp_timing.flags.DSC = 0; - orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing); + orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing, + dc_link_get_highest_encoding_format(link)); edp_decide_link_settings(link, &tmp_link_setting, orig_req_bw); max_link_rate = tmp_link_setting.link_rate; } @@ -2165,7 +2166,9 @@ static bool dp_verify_link_cap( link, &irq_data)) (*fail_count)++; - + } else if (status == LINK_TRAINING_LINK_LOSS) { + success = true; + (*fail_count)++; } else { (*fail_count)++; } @@ -2188,6 +2191,7 @@ bool dp_verify_link_cap_with_retries( int i = 0; bool success = false; int fail_count = 0; + struct dc_link_settings last_verified_link_cap = fail_safe_link_settings; dp_trace_detect_lt_init(link); @@ -2204,10 +2208,14 @@ bool dp_verify_link_cap_with_retries( if (!link_detect_connection_type(link, &type) || type == dc_connection_none) { link->verified_link_cap = fail_safe_link_settings; break; - } else if (dp_verify_link_cap(link, known_limit_link_setting, - &fail_count) && fail_count == 0) { - success = true; - break; + } else if (dp_verify_link_cap(link, known_limit_link_setting, &fail_count)) { + last_verified_link_cap = link->verified_link_cap; + if (fail_count == 0) { + success = true; + break; + } + } else { + link->verified_link_cap = last_verified_link_cap; } fsleep(10 * 1000); } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c index e011df4bdaf2..90339c2dfd84 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c @@ -1699,13 +1699,20 @@ bool perform_link_training_with_retries( } else if (do_fallback) { /* Try training at lower link bandwidth if doing fallback. */ uint32_t req_bw; uint32_t link_bw; + enum dc_link_encoding_format link_encoding = DC_LINK_ENCODING_UNSPECIFIED; decide_fallback_link_setting(link, &max_link_settings, &cur_link_settings, status); + + if (link_dp_get_encoding_format(&cur_link_settings) == DP_8b_10b_ENCODING) + link_encoding = DC_LINK_ENCODING_DP_8b_10b; + else if (link_dp_get_encoding_format(&cur_link_settings) == DP_128b_132b_ENCODING) + link_encoding = DC_LINK_ENCODING_DP_128b_132b; + /* Flag if reduced link bandwidth no longer meets stream requirements or fallen back to * minimum link bandwidth. */ - req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing); + req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing, link_encoding); link_bw = dp_link_bandwidth_kbps(link, &cur_link_settings); is_link_bw_low = (req_bw > link_bw); is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) && diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c index 15faaf645b14..ca0543e62917 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c @@ -236,6 +236,11 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( uint32_t pre_disable_intercept_delay_ms = 0; uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; + const uint8_t vendor_lttpr_write_data_4lane_1[4] = {0x1, 0x6E, 0xF2, 0x19}; + const uint8_t vendor_lttpr_write_data_4lane_2[4] = {0x1, 0x6B, 0xF2, 0x01}; + const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18}; + const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03}; + const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06}; uint32_t vendor_lttpr_write_address = 0xF004F; enum link_training_result status = LINK_TRAINING_SUCCESS; uint8_t lane = 0; @@ -244,10 +249,6 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( uint8_t toggle_rate; uint8_t rate; - if (link->local_sink) - pre_disable_intercept_delay_ms = - link->local_sink->edid_caps.panel_patch.delay_disable_aux_intercept_ms; - /* Only 8b/10b is supported */ ASSERT(link_dp_get_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING); @@ -260,10 +261,13 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( if (offset != 0xFF) { vendor_lttpr_write_address += ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + if (offset == 2) { + pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa; /* Certain display and cable configuration require extra delay */ - if (offset > 2) - pre_disable_intercept_delay_ms = pre_disable_intercept_delay_ms * 2; + } else if (offset > 2) { + pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2; + } } /* Vendor specific: Reset lane settings */ @@ -339,6 +343,34 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( DP_DOWNSPREAD_CTRL, lt_settings->link_settings.link_spread); + if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) { + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_4lane_1[0], + sizeof(vendor_lttpr_write_data_4lane_1)); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_4lane_2[0], + sizeof(vendor_lttpr_write_data_4lane_2)); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_4lane_3[0], + sizeof(vendor_lttpr_write_data_4lane_3)); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_4lane_4[0], + sizeof(vendor_lttpr_write_data_4lane_4)); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_4lane_5[0], + sizeof(vendor_lttpr_write_data_4lane_5)); + } + /* 2. Perform link training */ /* Perform Clock Recovery Sequence */ @@ -596,9 +628,14 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( const uint8_t vendor_lttpr_write_data_adicora_eq1[4] = {0x1, 0x55, 0x63, 0x2E}; const uint8_t vendor_lttpr_write_data_adicora_eq2[4] = {0x1, 0x55, 0x63, 0x01}; const uint8_t vendor_lttpr_write_data_adicora_eq3[4] = {0x1, 0x55, 0x63, 0x68}; + uint32_t pre_disable_intercept_delay_ms = 0; uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; - uint32_t pre_disable_intercept_delay_ms = 0; + const uint8_t vendor_lttpr_write_data_4lane_1[4] = {0x1, 0x6E, 0xF2, 0x19}; + const uint8_t vendor_lttpr_write_data_4lane_2[4] = {0x1, 0x6B, 0xF2, 0x01}; + const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18}; + const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03}; + const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06}; uint32_t vendor_lttpr_write_address = 0xF004F; enum link_training_result status = LINK_TRAINING_SUCCESS; uint8_t lane = 0; @@ -607,10 +644,6 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( uint8_t toggle_rate; uint8_t rate; - if (link->local_sink) - pre_disable_intercept_delay_ms = - link->local_sink->edid_caps.panel_patch.delay_disable_aux_intercept_ms; - /* Only 8b/10b is supported */ ASSERT(link_dp_get_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING); @@ -623,10 +656,13 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( if (offset != 0xFF) { vendor_lttpr_write_address += ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + if (offset == 2) { + pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa; /* Certain display and cable configuration require extra delay */ - if (offset > 2) - pre_disable_intercept_delay_ms = pre_disable_intercept_delay_ms * 2; + } else if (offset > 2) { + pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2; + } } /* Vendor specific: Reset lane settings */ @@ -702,6 +738,34 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( DP_DOWNSPREAD_CTRL, lt_settings->link_settings.link_spread); + if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) { + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_4lane_1[0], + sizeof(vendor_lttpr_write_data_4lane_1)); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_4lane_2[0], + sizeof(vendor_lttpr_write_data_4lane_2)); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_4lane_3[0], + sizeof(vendor_lttpr_write_data_4lane_3)); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_4lane_4[0], + sizeof(vendor_lttpr_write_data_4lane_4)); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_4lane_5[0], + sizeof(vendor_lttpr_write_data_4lane_5)); + } + /* 2. Perform link training */ /* Perform Clock Recovery Sequence */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c index 2039a345f23a..8b360c09e0e8 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -46,43 +46,42 @@ void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode) { union dpcd_edp_config edp_config_set; bool panel_mode_edp = false; + enum dc_status result; memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config)); - if (panel_mode != DP_PANEL_MODE_DEFAULT) { + switch (panel_mode) { + case DP_PANEL_MODE_EDP: + case DP_PANEL_MODE_SPECIAL: + panel_mode_edp = true; + break; - switch (panel_mode) { - case DP_PANEL_MODE_EDP: - case DP_PANEL_MODE_SPECIAL: - panel_mode_edp = true; - break; + default: + break; + } - default: - break; - } + /*set edp panel mode in receiver*/ + result = core_link_read_dpcd( + link, + DP_EDP_CONFIGURATION_SET, + &edp_config_set.raw, + sizeof(edp_config_set.raw)); - /*set edp panel mode in receiver*/ - core_link_read_dpcd( + if (result == DC_OK && + edp_config_set.bits.PANEL_MODE_EDP + != panel_mode_edp) { + + edp_config_set.bits.PANEL_MODE_EDP = + panel_mode_edp; + result = core_link_write_dpcd( link, DP_EDP_CONFIGURATION_SET, &edp_config_set.raw, sizeof(edp_config_set.raw)); - if (edp_config_set.bits.PANEL_MODE_EDP - != panel_mode_edp) { - enum dc_status result; - - edp_config_set.bits.PANEL_MODE_EDP = - panel_mode_edp; - result = core_link_write_dpcd( - link, - DP_EDP_CONFIGURATION_SET, - &edp_config_set.raw, - sizeof(edp_config_set.raw)); - - ASSERT(result == DC_OK); - } + ASSERT(result == DC_OK); } + link->panel_mode = panel_mode; DC_LOG_DETECTION_DP_CAPS("Link: %d eDP panel mode supported: %d " "eDP panel mode enabled: %d \n", @@ -164,6 +163,7 @@ bool edp_set_backlight_level_nits(struct dc_link *link, *(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits; *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms; + link->backlight_settings.backlight_millinits = backlight_millinits; if (!link->dpcd_caps.panel_luminance_control) { if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, @@ -251,10 +251,20 @@ static bool read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millin link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) return false; - if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, - (uint8_t *) backlight_millinits, - sizeof(uint32_t))) - return false; + if (!link->dpcd_caps.panel_luminance_control) { + if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, + (uint8_t *)backlight_millinits, + sizeof(uint32_t))) + return false; + } else { + //setting to 0 as a precaution, since target_luminance_value is 3 bytes + memset(backlight_millinits, 0, sizeof(uint32_t)); + + if (!core_link_read_dpcd(link, DP_EDP_PANEL_TARGET_LUMINANCE_VALUE, + (uint8_t *)backlight_millinits, + sizeof(struct target_luminance_value))) + return false; + } return true; } @@ -276,6 +286,16 @@ bool set_default_brightness_aux(struct dc_link *link) return false; } +bool set_cached_brightness_aux(struct dc_link *link) +{ + if (link->backlight_settings.backlight_millinits) + return edp_set_backlight_level_nits(link, true, + link->backlight_settings.backlight_millinits, 0); + else + return set_default_brightness_aux(link); + return false; +} + bool edp_is_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timing *crtc_timing) { @@ -309,7 +329,7 @@ bool edp_is_ilr_optimization_required(struct dc_link *link, core_link_read_dpcd(link, DP_LANE_COUNT_SET, &lane_count_set.raw, sizeof(lane_count_set)); - req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing); + req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing, dc_link_get_highest_encoding_format(link)); if (!crtc_timing->flags.DSC) edp_decide_link_settings(link, &link_setting, req_bw); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h index 28f552080558..fa89bdb3a336 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h @@ -30,6 +30,7 @@ enum dp_panel_mode dp_get_panel_mode(struct dc_link *link); void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode); bool set_default_brightness_aux(struct dc_link *link); +bool set_cached_brightness_aux(struct dc_link *link); void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd); int edp_get_backlight_level(const struct dc_link *link); bool edp_get_backlight_level_nits(struct dc_link *link, |