diff options
Diffstat (limited to 'drivers/gpu/drm/amd/display/dc')
212 files changed, 7556 insertions, 3534 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index a160512a2f04..6e3dddc73246 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile @@ -25,19 +25,10 @@ DC_LIBS = basics bios calcs clk_mgr dce gpio irq virtual -ifdef CONFIG_DRM_AMD_DC_DCN2_0 +ifdef CONFIG_DRM_AMD_DC_DCN DC_LIBS += dcn20 -endif - - -ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT DC_LIBS += dsc -endif - -ifdef CONFIG_DRM_AMD_DC_DCN1_0 DC_LIBS += dcn10 dml -endif -ifdef CONFIG_DRM_AMD_DC_DCN2_1 DC_LIBS += dcn21 endif @@ -59,7 +50,7 @@ include $(AMD_DC) DISPLAY_CORE = dc.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \ dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o -ifdef CONFIG_DRM_AMD_DC_DCN2_0 +ifdef CONFIG_DRM_AMD_DC_DCN DISPLAY_CORE += dc_vm_helper.o endif @@ -70,5 +61,6 @@ AMD_DM_REG_UPDATE = $(addprefix $(AMDDALPATH)/dc/,dc_helper.o) AMD_DISPLAY_FILES += $(AMD_DISPLAY_CORE) AMD_DISPLAY_FILES += $(AMD_DM_REG_UPDATE) - - +DC_DMUB += dc_dmub_srv.o +AMD_DISPLAY_DMUB = $(addprefix $(AMDDALPATH)/dc/,$(DC_DMUB)) +AMD_DISPLAY_FILES += $(AMD_DISPLAY_DMUB) diff --git a/drivers/gpu/drm/amd/display/dc/basics/Makefile b/drivers/gpu/drm/amd/display/dc/basics/Makefile index a50a76471107..7ad0cad0f4ef 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/Makefile +++ b/drivers/gpu/drm/amd/display/dc/basics/Makefile @@ -25,7 +25,7 @@ # subcomponents. BASICS = conversion.o fixpt31_32.o \ - log_helpers.o vector.o + log_helpers.o vector.o dc_common.o AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS)) diff --git a/drivers/gpu/drm/amd/display/dc/basics/dc_common.c b/drivers/gpu/drm/amd/display/dc/basics/dc_common.c new file mode 100644 index 000000000000..b2fc4f8e6482 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/basics/dc_common.c @@ -0,0 +1,101 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "core_types.h" +#include "dc_common.h" +#include "basics/conversion.h" + +bool is_rgb_cspace(enum dc_color_space output_color_space) +{ + switch (output_color_space) { + case COLOR_SPACE_SRGB: + case COLOR_SPACE_SRGB_LIMITED: + case COLOR_SPACE_2020_RGB_FULLRANGE: + case COLOR_SPACE_2020_RGB_LIMITEDRANGE: + case COLOR_SPACE_ADOBERGB: + return true; + case COLOR_SPACE_YCBCR601: + case COLOR_SPACE_YCBCR709: + case COLOR_SPACE_YCBCR601_LIMITED: + case COLOR_SPACE_YCBCR709_LIMITED: + case COLOR_SPACE_2020_YCBCR: + return false; + default: + /* Add a case to switch */ + BREAK_TO_DEBUGGER(); + return false; + } +} + +bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx) +{ + if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible) + return true; + if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe)) + return true; + return false; +} + +bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx) +{ + if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible) + return true; + if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe)) + return true; + return false; +} + +bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx) +{ + if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible) + return true; + if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe)) + return true; + if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe)) + return true; + return false; +} + +void build_prescale_params(struct dc_bias_and_scale *bias_and_scale, + const struct dc_plane_state *plane_state) +{ + if (plane_state->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN + && plane_state->format != SURFACE_PIXEL_FORMAT_INVALID + && plane_state->input_csc_color_matrix.enable_adjustment + && plane_state->coeff_reduction_factor.value != 0) { + bias_and_scale->scale_blue = fixed_point_to_int_frac( + dc_fixpt_mul(plane_state->coeff_reduction_factor, + dc_fixpt_from_fraction(256, 255)), + 2, + 13); + bias_and_scale->scale_red = bias_and_scale->scale_blue; + bias_and_scale->scale_green = bias_and_scale->scale_blue; + } else { + bias_and_scale->scale_blue = 0x2000; + bias_and_scale->scale_red = 0x2000; + bias_and_scale->scale_green = 0x2000; + } +} + diff --git a/drivers/gpu/drm/amd/display/dc/basics/dc_common.h b/drivers/gpu/drm/amd/display/dc/basics/dc_common.h new file mode 100644 index 000000000000..7c0cbf47e8ce --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/basics/dc_common.h @@ -0,0 +1,42 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DAL_DC_COMMON_H__ +#define __DAL_DC_COMMON_H__ + +#include "core_types.h" + +bool is_rgb_cspace(enum dc_color_space output_color_space); + +bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx); + +bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx); + +bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx); + +void build_prescale_params(struct dc_bias_and_scale *bias_and_scale, + const struct dc_plane_state *plane_state); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index 823843cd2613..008d4d11339d 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -111,7 +111,7 @@ struct dc_bios *bios_parser_create( return NULL; } -static void destruct(struct bios_parser *bp) +static void bios_parser_destruct(struct bios_parser *bp) { kfree(bp->base.bios_local_image); kfree(bp->base.integrated_info); @@ -126,7 +126,7 @@ static void bios_parser_destroy(struct dc_bios **dcb) return; } - destruct(bp); + bios_parser_destruct(bp); kfree(bp); *dcb = NULL; @@ -2189,7 +2189,7 @@ static uint32_t get_support_mask_for_device_id(struct device_id device_id) break; default: break; - }; + } /* Unidentified device ID, return empty support mask. */ return 0; @@ -2739,7 +2739,6 @@ static enum bp_result bios_get_board_layout_info( struct board_layout_info *board_layout_info) { unsigned int i; - struct bios_parser *bp; enum bp_result record_result; const unsigned int slot_index_to_vbios_id[MAX_BOARD_SLOTS] = { @@ -2748,7 +2747,6 @@ static enum bp_result bios_get_board_layout_info( 0, 0 }; - bp = BP_FROM_DCB(dcb); if (board_layout_info == NULL) { DC_LOG_DETECTION_EDID_PARSER("Invalid board_layout_info\n"); return BP_RESULT_BADINPUT; diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 5c3fcaa47410..2f1c9584ac32 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -111,7 +111,7 @@ static struct atom_encoder_caps_record *get_encoder_cap_record( #define DATA_TABLES(table) (bp->master_data_tbl->listOfdatatables.table) -static void destruct(struct bios_parser *bp) +static void bios_parser2_destruct(struct bios_parser *bp) { kfree(bp->base.bios_local_image); kfree(bp->base.integrated_info); @@ -126,7 +126,7 @@ static void firmware_parser_destroy(struct dc_bios **dcb) return; } - destruct(bp); + bios_parser2_destruct(bp); kfree(bp); *dcb = NULL; @@ -294,11 +294,21 @@ static enum bp_result bios_parser_get_i2c_info(struct dc_bios *dcb, struct atom_display_object_path_v2 *object; struct atom_common_record_header *header; struct atom_i2c_record *record; + struct atom_i2c_record dummy_record = {0}; struct bios_parser *bp = BP_FROM_DCB(dcb); if (!info) return BP_RESULT_BADINPUT; + if (id.type == OBJECT_TYPE_GENERIC) { + dummy_record.i2c_id = id.id; + + if (get_gpio_i2c_info(bp, &dummy_record, info) == BP_RESULT_OK) + return BP_RESULT_OK; + else + return BP_RESULT_NORECORD; + } + object = get_bios_object(bp, id); if (!object) @@ -341,6 +351,7 @@ static enum bp_result get_gpio_i2c_info( struct atom_gpio_pin_lut_v2_1 *header; uint32_t count = 0; unsigned int table_index = 0; + bool find_valid = false; if (!info) return BP_RESULT_BADINPUT; @@ -368,33 +379,28 @@ static enum bp_result get_gpio_i2c_info( - sizeof(struct atom_common_table_header)) / sizeof(struct atom_gpio_pin_assignment); - table_index = record->i2c_id & I2C_HW_LANE_MUX; - - if (count < table_index) { - bool find_valid = false; - - for (table_index = 0; table_index < count; table_index++) { - if (((record->i2c_id & I2C_HW_CAP) == ( - header->gpio_pin[table_index].gpio_id & - I2C_HW_CAP)) && - ((record->i2c_id & I2C_HW_ENGINE_ID_MASK) == - (header->gpio_pin[table_index].gpio_id & - I2C_HW_ENGINE_ID_MASK)) && - ((record->i2c_id & I2C_HW_LANE_MUX) == - (header->gpio_pin[table_index].gpio_id & - I2C_HW_LANE_MUX))) { - /* still valid */ - find_valid = true; - break; - } + for (table_index = 0; table_index < count; table_index++) { + if (((record->i2c_id & I2C_HW_CAP) == ( + header->gpio_pin[table_index].gpio_id & + I2C_HW_CAP)) && + ((record->i2c_id & I2C_HW_ENGINE_ID_MASK) == + (header->gpio_pin[table_index].gpio_id & + I2C_HW_ENGINE_ID_MASK)) && + ((record->i2c_id & I2C_HW_LANE_MUX) == + (header->gpio_pin[table_index].gpio_id & + I2C_HW_LANE_MUX))) { + /* still valid */ + find_valid = true; + break; } - /* If we don't find the entry that we are looking for then - * we will return BP_Result_BadBiosTable. - */ - if (find_valid == false) - return BP_RESULT_BADBIOSTABLE; } + /* If we don't find the entry that we are looking for then + * we will return BP_Result_BadBiosTable. + */ + if (find_valid == false) + return BP_RESULT_BADBIOSTABLE; + /* get the GPIO_I2C_INFO */ info->i2c_hw_assist = (record->i2c_id & I2C_HW_CAP) ? true : false; info->i2c_line = record->i2c_id & I2C_HW_LANE_MUX; @@ -828,6 +834,7 @@ static enum bp_result bios_parser_get_spread_spectrum_info( case 1: return get_ss_info_v4_1(bp, signal, index, ss_info); case 2: + case 3: return get_ss_info_v4_2(bp, signal, index, ss_info); default: break; @@ -986,7 +993,7 @@ static uint32_t get_support_mask_for_device_id(struct device_id device_id) break; default: break; - }; + } /* Unidentified device ID, return empty support mask. */ return 0; @@ -1205,6 +1212,8 @@ static enum bp_result get_firmware_info_v3_1( bp->cmd_tbl.get_smu_clock_info(bp, SMU9_SYSPLL0_ID) * 10; } + info->oem_i2c_present = false; + return BP_RESULT_OK; } @@ -1283,6 +1292,13 @@ static enum bp_result get_firmware_info_v3_2( bp->cmd_tbl.get_smu_clock_info(bp, SMU11_SYSPLL3_0_ID) * 10; } + if (firmware_info->board_i2c_feature_id == 0x2) { + info->oem_i2c_present = true; + info->oem_i2c_obj_id = firmware_info->board_i2c_feature_gpio_id; + } else { + info->oem_i2c_present = false; + } + return BP_RESULT_OK; } @@ -1402,10 +1418,8 @@ static enum bp_result get_integrated_info_v11( info->ma_channel_number = info_v11->umachannelnumber; info->lvds_ss_percentage = le16_to_cpu(info_v11->lvds_ss_percentage); -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 info->dp_ss_control = le16_to_cpu(info_v11->reserved1); -#endif info->lvds_sspread_rate_in_10hz = le16_to_cpu(info_v11->lvds_ss_rate_10hz); info->hdmi_ss_percentage = @@ -1826,7 +1840,6 @@ static enum bp_result bios_get_board_layout_info( struct board_layout_info *board_layout_info) { unsigned int i; - struct bios_parser *bp; enum bp_result record_result; const unsigned int slot_index_to_vbios_id[MAX_BOARD_SLOTS] = { @@ -1835,7 +1848,6 @@ static enum bp_result bios_get_board_layout_info( 0, 0 }; - bp = BP_FROM_DCB(dcb); if (board_layout_info == NULL) { DC_LOG_DETECTION_EDID_PARSER("Invalid board_layout_info\n"); return BP_RESULT_BADINPUT; @@ -1915,7 +1927,7 @@ static const struct dc_vbios_funcs vbios_funcs = { .get_board_layout_info = bios_get_board_layout_info, }; -static bool bios_parser_construct( +static bool bios_parser2_construct( struct bios_parser *bp, struct bp_init_data *init, enum dce_version dce_version) @@ -2008,7 +2020,7 @@ struct dc_bios *firmware_parser_create( if (!bp) return NULL; - if (bios_parser_construct(bp, init, dce_version)) + if (bios_parser2_construct(bp, init, dce_version)) return &bp->base; kfree(bp); diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c index bb2e8105e6ab..629a07a2719b 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c @@ -37,6 +37,8 @@ #include "bios_parser_types_internal2.h" #include "amdgpu.h" +#include "dc_dmub_srv.h" +#include "dc.h" #define DC_LOGGER \ bp->base.ctx->logger @@ -87,6 +89,10 @@ static enum bp_result encoder_control_digx_v1_5( struct bios_parser *bp, struct bp_encoder_control *cntl); +static enum bp_result encoder_control_fallback( + struct bios_parser *bp, + struct bp_encoder_control *cntl); + static void init_dig_encoder_control(struct bios_parser *bp) { uint32_t version = @@ -98,11 +104,26 @@ static void init_dig_encoder_control(struct bios_parser *bp) break; default: dm_output_to_console("Don't have dig_encoder_control for v%d\n", version); - bp->cmd_tbl.dig_encoder_control = NULL; + bp->cmd_tbl.dig_encoder_control = encoder_control_fallback; break; } } +static void encoder_control_dmcub( + struct dc_dmub_srv *dmcub, + struct dig_encoder_stream_setup_parameters_v1_5 *dig) +{ + struct dmub_rb_cmd_digx_encoder_control encoder_control = { 0 }; + + encoder_control.header.type = DMUB_CMD__VBIOS; + encoder_control.header.sub_type = DMUB_CMD__VBIOS_DIGX_ENCODER_CONTROL; + encoder_control.encoder_control.dig.stream_param = *dig; + + dc_dmub_srv_cmd_queue(dmcub, &encoder_control.header); + dc_dmub_srv_cmd_execute(dmcub); + dc_dmub_srv_wait_idle(dmcub); +} + static enum bp_result encoder_control_digx_v1_5( struct bios_parser *bp, struct bp_encoder_control *cntl) @@ -155,12 +176,30 @@ static enum bp_result encoder_control_digx_v1_5( break; } + if (bp->base.ctx->dc->ctx->dmub_srv && + bp->base.ctx->dc->debug.dmub_command_table) { + encoder_control_dmcub(bp->base.ctx->dmub_srv, ¶ms); + return BP_RESULT_OK; + } + if (EXEC_BIOS_CMD_TABLE(digxencodercontrol, params)) result = BP_RESULT_OK; return result; } +static enum bp_result encoder_control_fallback( + struct bios_parser *bp, + struct bp_encoder_control *cntl) +{ + if (bp->base.ctx->dc->ctx->dmub_srv && + bp->base.ctx->dc->debug.dmub_command_table) { + return encoder_control_digx_v1_5(bp, cntl); + } + + return BP_RESULT_FAILURE; +} + /***************************************************************************** ****************************************************************************** ** @@ -173,6 +212,10 @@ static enum bp_result transmitter_control_v1_6( struct bios_parser *bp, struct bp_transmitter_control *cntl); +static enum bp_result transmitter_control_fallback( + struct bios_parser *bp, + struct bp_transmitter_control *cntl); + static void init_transmitter_control(struct bios_parser *bp) { uint8_t frev; @@ -186,11 +229,27 @@ static void init_transmitter_control(struct bios_parser *bp) break; default: dm_output_to_console("Don't have transmitter_control for v%d\n", crev); - bp->cmd_tbl.transmitter_control = NULL; + bp->cmd_tbl.transmitter_control = transmitter_control_fallback; break; } } +static void transmitter_control_dmcub( + struct dc_dmub_srv *dmcub, + struct dig_transmitter_control_parameters_v1_6 *dig) +{ + struct dmub_rb_cmd_dig1_transmitter_control transmitter_control; + + transmitter_control.header.type = DMUB_CMD__VBIOS; + transmitter_control.header.sub_type = + DMUB_CMD__VBIOS_DIG1_TRANSMITTER_CONTROL; + transmitter_control.transmitter_control.dig = *dig; + + dc_dmub_srv_cmd_queue(dmcub, &transmitter_control.header); + dc_dmub_srv_cmd_execute(dmcub); + dc_dmub_srv_wait_idle(dmcub); +} + static enum bp_result transmitter_control_v1_6( struct bios_parser *bp, struct bp_transmitter_control *cntl) @@ -222,6 +281,11 @@ static enum bp_result transmitter_control_v1_6( __func__, ps.param.symclk_10khz); } + if (bp->base.ctx->dc->ctx->dmub_srv && + bp->base.ctx->dc->debug.dmub_command_table) { + transmitter_control_dmcub(bp->base.ctx->dmub_srv, &ps.param); + return BP_RESULT_OK; + } /*color_depth not used any more, driver has deep color factor in the Phyclk*/ if (EXEC_BIOS_CMD_TABLE(dig1transmittercontrol, ps)) @@ -229,6 +293,18 @@ static enum bp_result transmitter_control_v1_6( return result; } +static enum bp_result transmitter_control_fallback( + struct bios_parser *bp, + struct bp_transmitter_control *cntl) +{ + if (bp->base.ctx->dc->ctx->dmub_srv && + bp->base.ctx->dc->debug.dmub_command_table) { + return transmitter_control_v1_6(bp, cntl); + } + + return BP_RESULT_FAILURE; +} + /****************************************************************************** ****************************************************************************** ** @@ -241,6 +317,10 @@ static enum bp_result set_pixel_clock_v7( struct bios_parser *bp, struct bp_pixel_clock_parameters *bp_params); +static enum bp_result set_pixel_clock_fallback( + struct bios_parser *bp, + struct bp_pixel_clock_parameters *bp_params); + static void init_set_pixel_clock(struct bios_parser *bp) { switch (BIOS_CMD_TABLE_PARA_REVISION(setpixelclock)) { @@ -250,12 +330,25 @@ static void init_set_pixel_clock(struct bios_parser *bp) default: dm_output_to_console("Don't have set_pixel_clock for v%d\n", BIOS_CMD_TABLE_PARA_REVISION(setpixelclock)); - bp->cmd_tbl.set_pixel_clock = NULL; + bp->cmd_tbl.set_pixel_clock = set_pixel_clock_fallback; break; } } +static void set_pixel_clock_dmcub( + struct dc_dmub_srv *dmcub, + struct set_pixel_clock_parameter_v1_7 *clk) +{ + struct dmub_rb_cmd_set_pixel_clock pixel_clock = { 0 }; + pixel_clock.header.type = DMUB_CMD__VBIOS; + pixel_clock.header.sub_type = DMUB_CMD__VBIOS_SET_PIXEL_CLOCK; + pixel_clock.pixel_clock.clk = *clk; + + dc_dmub_srv_cmd_queue(dmcub, &pixel_clock.header); + dc_dmub_srv_cmd_execute(dmcub); + dc_dmub_srv_wait_idle(dmcub); +} static enum bp_result set_pixel_clock_v7( struct bios_parser *bp, @@ -331,12 +424,30 @@ static enum bp_result set_pixel_clock_v7( if (bp_params->signal_type == SIGNAL_TYPE_DVI_DUAL_LINK) clk.miscinfo |= PIXEL_CLOCK_V7_MISC_DVI_DUALLINK_EN; + if (bp->base.ctx->dc->ctx->dmub_srv && + bp->base.ctx->dc->debug.dmub_command_table) { + set_pixel_clock_dmcub(bp->base.ctx->dmub_srv, &clk); + return BP_RESULT_OK; + } + if (EXEC_BIOS_CMD_TABLE(setpixelclock, clk)) result = BP_RESULT_OK; } return result; } +static enum bp_result set_pixel_clock_fallback( + struct bios_parser *bp, + struct bp_pixel_clock_parameters *bp_params) +{ + if (bp->base.ctx->dc->ctx->dmub_srv && + bp->base.ctx->dc->debug.dmub_command_table) { + return set_pixel_clock_v7(bp, bp_params); + } + + return BP_RESULT_FAILURE; +} + /****************************************************************************** ****************************************************************************** ** @@ -569,6 +680,11 @@ static enum bp_result enable_disp_power_gating_v2_1( enum controller_id crtc_id, enum bp_pipe_control_action action); +static enum bp_result enable_disp_power_gating_fallback( + struct bios_parser *bp, + enum controller_id crtc_id, + enum bp_pipe_control_action action); + static void init_enable_disp_power_gating( struct bios_parser *bp) { @@ -580,11 +696,30 @@ static void init_enable_disp_power_gating( default: dm_output_to_console("Don't enable_disp_power_gating enable_crtc for v%d\n", BIOS_CMD_TABLE_PARA_REVISION(enabledisppowergating)); - bp->cmd_tbl.enable_disp_power_gating = NULL; + bp->cmd_tbl.enable_disp_power_gating = enable_disp_power_gating_fallback; break; } } +static void enable_disp_power_gating_dmcub( + struct dc_dmub_srv *dmcub, + struct enable_disp_power_gating_parameters_v2_1 *pwr) +{ + struct dmub_rb_cmd_enable_disp_power_gating power_gating; + + power_gating.header.type = DMUB_CMD__VBIOS; + power_gating.header.sub_type = DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING; + power_gating.power_gating.pwr = *pwr; + + /* ATOM_ENABLE is old API in DMUB */ + if (power_gating.power_gating.pwr.enable == ATOM_ENABLE) + power_gating.power_gating.pwr.enable = ATOM_INIT; + + dc_dmub_srv_cmd_queue(dmcub, &power_gating.header); + dc_dmub_srv_cmd_execute(dmcub); + dc_dmub_srv_wait_idle(dmcub); +} + static enum bp_result enable_disp_power_gating_v2_1( struct bios_parser *bp, enum controller_id crtc_id, @@ -604,12 +739,32 @@ static enum bp_result enable_disp_power_gating_v2_1( ps.param.enable = bp->cmd_helper->disp_power_gating_action_to_atom(action); + if (bp->base.ctx->dc->ctx->dmub_srv && + bp->base.ctx->dc->debug.dmub_command_table) { + enable_disp_power_gating_dmcub(bp->base.ctx->dmub_srv, + &ps.param); + return BP_RESULT_OK; + } + if (EXEC_BIOS_CMD_TABLE(enabledisppowergating, ps.param)) result = BP_RESULT_OK; return result; } +static enum bp_result enable_disp_power_gating_fallback( + struct bios_parser *bp, + enum controller_id crtc_id, + enum bp_pipe_control_action action) +{ + if (bp->base.ctx->dc->ctx->dmub_srv && + bp->base.ctx->dc->debug.dmub_command_table) { + return enable_disp_power_gating_v2_1(bp, crtc_id, action); + } + + return BP_RESULT_FAILURE; +} + /****************************************************************************** ******************************************************************************* ** diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c index db153ddf0fee..7388c987c595 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c @@ -55,23 +55,19 @@ bool dal_bios_parser_init_cmd_tbl_helper2( case DCE_VERSION_11_22: *h = dal_cmd_tbl_helper_dce112_get_table2(); return true; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) case DCN_VERSION_1_0: case DCN_VERSION_1_01: *h = dal_cmd_tbl_helper_dce112_get_table2(); return true; #endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) case DCN_VERSION_2_0: *h = dal_cmd_tbl_helper_dce112_get_table2(); return true; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) case DCN_VERSION_2_1: *h = dal_cmd_tbl_helper_dce112_get_table2(); return true; -#endif case DCE_VERSION_12_0: case DCE_VERSION_12_1: *h = dal_cmd_tbl_helper_dce112_get_table2(); diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile index 26c6d735cdc7..4674aca8f206 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile +++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile @@ -1,5 +1,6 @@ # # Copyright 2017 Advanced Micro Devices, Inc. +# Copyright 2019 Raptor Engineering, LLC # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), @@ -24,7 +25,13 @@ # It calculates Bandwidth and Watermarks values for HW programming # +ifdef CONFIG_X86 calcs_ccflags := -mhard-float -msse +endif + +ifdef CONFIG_PPC64 +calcs_ccflags := -mhard-float -maltivec +endif ifdef CONFIG_CC_IS_GCC ifeq ($(call cc-ifversion, -lt, 0701, y), y) @@ -32,6 +39,7 @@ IS_OLD_GCC = 1 endif endif +ifdef CONFIG_X86 ifdef IS_OLD_GCC # Stack alignment mismatch, proceed with caution. # GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 @@ -40,6 +48,7 @@ calcs_ccflags += -mpreferred-stack-boundary=4 else calcs_ccflags += -msse2 endif +endif CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calcs.o := $(calcs_ccflags) CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calc_auto.o := $(calcs_ccflags) @@ -47,7 +56,7 @@ CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calc_math.o := $(calcs_ccflags) -Wno-tautologi BW_CALCS = dce_calcs.o bw_fixed.o custom_float.o -ifdef CONFIG_DRM_AMD_DC_DCN1_0 +ifdef CONFIG_DRM_AMD_DC_DCN BW_CALCS += dcn_calcs.o dcn_calc_math.o dcn_calc_auto.o endif diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c index a1d49256fab7..5d081c42e81b 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c @@ -154,14 +154,14 @@ static void calculate_bandwidth( - if (data->d0_underlay_mode == bw_def_none) { d0_underlay_enable = 0; } - else { - d0_underlay_enable = 1; - } - if (data->d1_underlay_mode == bw_def_none) { d1_underlay_enable = 0; } - else { - d1_underlay_enable = 1; - } + if (data->d0_underlay_mode == bw_def_none) + d0_underlay_enable = false; + else + d0_underlay_enable = true; + if (data->d1_underlay_mode == bw_def_none) + d1_underlay_enable = false; + else + d1_underlay_enable = true; data->number_of_underlay_surfaces = d0_underlay_enable + d1_underlay_enable; switch (data->underlay_surface_type) { case bw_def_420: @@ -286,8 +286,8 @@ static void calculate_bandwidth( data->cursor_width_pixels[2] = bw_int_to_fixed(0); data->cursor_width_pixels[3] = bw_int_to_fixed(0); /* graphics surface parameters from spreadsheet*/ - fbc_enabled = 0; - lpt_enabled = 0; + fbc_enabled = false; + lpt_enabled = false; for (i = 4; i <= maximum_number_of_surfaces - 3; i++) { if (i < data->number_of_displays + 4) { if (i == 4 && data->d0_underlay_mode == bw_def_underlay_only) { @@ -338,9 +338,9 @@ static void calculate_bandwidth( data->access_one_channel_only[i] = 0; } if (data->fbc_en[i] == 1) { - fbc_enabled = 1; + fbc_enabled = true; if (data->lpt_en[i] == 1) { - lpt_enabled = 1; + lpt_enabled = true; } } data->cursor_width_pixels[i] = bw_int_to_fixed(vbios->cursor_width); diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c index 9b2cb57bf2ba..1a37550731de 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c @@ -1,5 +1,6 @@ /* * Copyright 2017 Advanced Micro Devices, Inc. + * Copyright 2019 Raptor Engineering, LLC * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -53,13 +54,9 @@ * remain as-is as it provides us with a guarantee from HW that it is correct. */ -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 /* Defaults from spreadsheet rev#247. * RV2 delta: dram_clock_change_latency, max_num_dpp */ -#else -/* Defaults from spreadsheet rev#247 */ -#endif const struct dcn_soc_bounding_box dcn10_soc_defaults = { /* latencies */ .sr_exit_time = 17, /*us*/ @@ -626,7 +623,7 @@ static bool dcn_bw_apply_registry_override(struct dc *dc) { bool updated = false; - kernel_fpu_begin(); + DC_FP_START(); if ((int)(dc->dcn_soc->sr_exit_time * 1000) != dc->debug.sr_exit_time_ns && dc->debug.sr_exit_time_ns) { updated = true; @@ -662,7 +659,7 @@ static bool dcn_bw_apply_registry_override(struct dc *dc) dc->dcn_soc->dram_clock_change_latency = dc->debug.dram_clock_change_latency_ns / 1000.0; } - kernel_fpu_end(); + DC_FP_END(); return updated; } @@ -708,8 +705,8 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v, unsigned int get_highest_allowed_voltage_level(uint32_t hw_internal_rev) { - /* for dali, the highest voltage level we want is 0 */ - if (ASICREV_IS_DALI(hw_internal_rev)) + /* for dali & pollock, the highest voltage level we want is 0 */ + if (ASICREV_IS_POLLOCK(hw_internal_rev) || ASICREV_IS_DALI(hw_internal_rev)) return 0; /* we are ok with all levels */ @@ -742,7 +739,7 @@ bool dcn_validate_bandwidth( dcn_bw_sync_calcs_and_dml(dc); memset(v, 0, sizeof(*v)); - kernel_fpu_begin(); + DC_FP_START(); v->sr_exit_time = dc->dcn_soc->sr_exit_time; v->sr_enter_plus_exit_time = dc->dcn_soc->sr_enter_plus_exit_time; @@ -1275,7 +1272,7 @@ bool dcn_validate_bandwidth( bw_limit = dc->dcn_soc->percent_disp_bw_limit * v->fabric_and_dram_bandwidth_vmax0p9; bw_limit_pass = (v->total_data_read_bandwidth / 1000.0) < bw_limit; - kernel_fpu_end(); + DC_FP_END(); PERFORMANCE_TRACE_END(); BW_VAL_TRACE_FINISH(); @@ -1438,37 +1435,49 @@ void dcn_bw_update_from_pplib(struct dc *dc) struct dc_context *ctx = dc->ctx; struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0}; bool res; + unsigned vmin0p65_idx, vmid0p72_idx, vnom0p8_idx, vmax0p9_idx; /* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */ res = dm_pp_get_clock_levels_by_type_with_voltage( ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks); - kernel_fpu_begin(); + DC_FP_START(); if (res) res = verify_clock_values(&fclks); if (res) { - ASSERT(fclks.num_levels >= 3); - dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 32 * (fclks.data[0].clocks_in_khz / 1000.0) / 1000.0; - dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = dc->dcn_soc->number_of_channels * - (fclks.data[fclks.num_levels - (fclks.num_levels > 2 ? 3 : 2)].clocks_in_khz / 1000.0) - * ddr4_dram_factor_single_Channel / 1000.0; - dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = dc->dcn_soc->number_of_channels * - (fclks.data[fclks.num_levels - 2].clocks_in_khz / 1000.0) - * ddr4_dram_factor_single_Channel / 1000.0; - dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = dc->dcn_soc->number_of_channels * - (fclks.data[fclks.num_levels - 1].clocks_in_khz / 1000.0) - * ddr4_dram_factor_single_Channel / 1000.0; + ASSERT(fclks.num_levels); + + vmin0p65_idx = 0; + vmid0p72_idx = fclks.num_levels - + (fclks.num_levels > 2 ? 3 : (fclks.num_levels > 1 ? 2 : 1)); + vnom0p8_idx = fclks.num_levels - (fclks.num_levels > 1 ? 2 : 1); + vmax0p9_idx = fclks.num_levels - 1; + + dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = + 32 * (fclks.data[vmin0p65_idx].clocks_in_khz / 1000.0) / 1000.0; + dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = + dc->dcn_soc->number_of_channels * + (fclks.data[vmid0p72_idx].clocks_in_khz / 1000.0) + * ddr4_dram_factor_single_Channel / 1000.0; + dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = + dc->dcn_soc->number_of_channels * + (fclks.data[vnom0p8_idx].clocks_in_khz / 1000.0) + * ddr4_dram_factor_single_Channel / 1000.0; + dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = + dc->dcn_soc->number_of_channels * + (fclks.data[vmax0p9_idx].clocks_in_khz / 1000.0) + * ddr4_dram_factor_single_Channel / 1000.0; } else BREAK_TO_DEBUGGER(); - kernel_fpu_end(); + DC_FP_END(); res = dm_pp_get_clock_levels_by_type_with_voltage( ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks); - kernel_fpu_begin(); + DC_FP_START(); if (res) res = verify_clock_values(&dcfclks); @@ -1481,7 +1490,7 @@ void dcn_bw_update_from_pplib(struct dc *dc) } else BREAK_TO_DEBUGGER(); - kernel_fpu_end(); + DC_FP_END(); } void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc) @@ -1496,11 +1505,11 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc) if (!pp || !pp->set_wm_ranges) return; - kernel_fpu_begin(); + DC_FP_START(); min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32; min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000; socclk_khz = dc->dcn_soc->socclk * 1000; - kernel_fpu_end(); + DC_FP_END(); /* Now notify PPLib/SMU about which Watermarks sets they should select * depending on DPM state they are in. And update BW MGR GFX Engine and @@ -1551,7 +1560,7 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc) void dcn_bw_sync_calcs_and_dml(struct dc *dc) { - kernel_fpu_begin(); + DC_FP_START(); DC_LOG_BANDWIDTH_CALCS("sr_exit_time: %f ns\n" "sr_enter_plus_exit_time: %f ns\n" "urgent_latency: %f ns\n" @@ -1740,5 +1749,5 @@ void dcn_bw_sync_calcs_and_dml(struct dc *dc) dc->dml.ip.bug_forcing_LC_req_same_size_fixed = dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes; dc->dml.ip.dcfclk_cstate_latency = dc->dcn_ip->dcfclk_cstate_latency; - kernel_fpu_end(); + DC_FP_END(); } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile index b864869cc7e3..3cd283195091 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile @@ -63,7 +63,7 @@ CLK_MGR_DCE120 = dce120_clk_mgr.o AMD_DAL_CLK_MGR_DCE120 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dce120/,$(CLK_MGR_DCE120)) AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCE120) -ifdef CONFIG_DRM_AMD_DC_DCN1_0 +ifdef CONFIG_DRM_AMD_DC_DCN ############################################################################### # DCN10 ############################################################################### @@ -72,9 +72,7 @@ CLK_MGR_DCN10 = rv1_clk_mgr.o rv1_clk_mgr_vbios_smu.o rv2_clk_mgr.o AMD_DAL_CLK_MGR_DCN10 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn10/,$(CLK_MGR_DCN10)) AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN10) -endif -ifdef CONFIG_DRM_AMD_DC_DCN2_0 ############################################################################### # DCN20 ############################################################################### @@ -83,9 +81,7 @@ CLK_MGR_DCN20 = dcn20_clk_mgr.o AMD_DAL_CLK_MGR_DCN20 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn20/,$(CLK_MGR_DCN20)) AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN20) -endif -ifdef CONFIG_DRM_AMD_DC_DCN2_1 ############################################################################### # DCN21 ############################################################################### diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index 8828dd9c3783..a78e5c74c79c 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -37,9 +37,7 @@ #include "dcn10/rv1_clk_mgr.h" #include "dcn10/rv2_clk_mgr.h" #include "dcn20/dcn20_clk_mgr.h" -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) #include "dcn21/rn_clk_mgr.h" -#endif int clk_mgr_helper_get_active_display_cnt( @@ -134,14 +132,19 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p dce120_clk_mgr_construct(ctx, clk_mgr); break; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) case FAMILY_RV: -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) + if (ASICREV_IS_DALI(asic_id.hw_internal_rev) || + ASICREV_IS_POLLOCK(asic_id.hw_internal_rev)) { + /* TEMP: this check has to come before ASICREV_IS_RENOIR */ + /* which also incorrectly returns true for Dali/Pollock*/ + rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu); + break; + } if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev)) { rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); break; } -#endif /* DCN2_1 */ if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev)) { rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu); break; @@ -152,13 +155,11 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p break; } break; -#endif /* Family RV */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) case FAMILY_NV: dcn20_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); break; -#endif /* Family NV */ +#endif /* Family RV and NV*/ default: ASSERT(0); /* Unknown Asic */ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c index a6c46e903ff9..d031bd3d3072 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c @@ -72,8 +72,8 @@ int dce112_set_clock(struct clk_mgr *clk_mgr_base, int requested_clk_khz) struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct bp_set_dce_clock_parameters dce_clk_params; struct dc_bios *bp = clk_mgr_base->ctx->dc_bios; - struct dc *core_dc = clk_mgr_base->ctx->dc; - struct dmcu *dmcu = core_dc->res_pool->dmcu; + struct dc *dc = clk_mgr_base->ctx->dc; + struct dmcu *dmcu = dc->res_pool->dmcu; int actual_clock = requested_clk_khz; /* Prepare to program display clock*/ memset(&dce_clk_params, 0, sizeof(dce_clk_params)); @@ -110,7 +110,7 @@ int dce112_set_clock(struct clk_mgr *clk_mgr_base, int requested_clk_khz) bp->funcs->set_dce_clock(bp, &dce_clk_params); - if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { + if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock) dmcu->funcs->set_psr_wait_loop(dmcu, @@ -126,8 +126,8 @@ int dce112_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_clk_khz) { struct bp_set_dce_clock_parameters dce_clk_params; struct dc_bios *bp = clk_mgr->base.ctx->dc_bios; - struct dc *core_dc = clk_mgr->base.ctx->dc; - struct dmcu *dmcu = core_dc->res_pool->dmcu; + struct dc *dc = clk_mgr->base.ctx->dc; + struct dmcu *dmcu = dc->res_pool->dmcu; int actual_clock = requested_clk_khz; /* Prepare to program display clock*/ memset(&dce_clk_params, 0, sizeof(dce_clk_params)); @@ -152,7 +152,7 @@ int dce112_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_clk_khz) clk_mgr->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; - if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { + if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { if (clk_mgr->dfs_bypass_disp_clk != actual_clock) dmcu->funcs->set_psr_wait_loop(dmcu, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c index 1897e91c8ccb..97b7f32294fd 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c @@ -88,8 +88,8 @@ int rv1_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, unsigned int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz) { int actual_dispclk_set_mhz = -1; - struct dc *core_dc = clk_mgr->base.ctx->dc; - struct dmcu *dmcu = core_dc->res_pool->dmcu; + struct dc *dc = clk_mgr->base.ctx->dc; + struct dmcu *dmcu = dc->res_pool->dmcu; /* Unit of SMU msg parameter is Mhz */ actual_dispclk_set_mhz = rv1_vbios_smu_send_msg_with_param( @@ -100,7 +100,7 @@ int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_di /* Actual dispclk set is returned in the parameter register */ actual_dispclk_set_mhz = REG_READ(MP1_SMN_C2PMSG_83) * 1000; - if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { + if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz) dmcu->funcs->set_psr_wait_loop(dmcu, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 25d7b7c6681c..495f01e9f2ca 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -27,6 +27,7 @@ #include "clk_mgr_internal.h" #include "dce100/dce_clk_mgr.h" +#include "dcn20_clk_mgr.h" #include "reg_helper.h" #include "core_types.h" #include "dm_helpers.h" @@ -100,13 +101,13 @@ uint32_t dentist_get_did_from_divider(int divider) } void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr, - struct dc_state *context) + struct dc_state *context, bool safe_to_lower) { int i; clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz; for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { - int dpp_inst, dppclk_khz; + int dpp_inst, dppclk_khz, prev_dppclk_khz; /* Loop index will match dpp->inst if resource exists, * and we want to avoid dependency on dpp object @@ -114,8 +115,12 @@ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr, dpp_inst = i; dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz; - clk_mgr->dccg->funcs->update_dpp_dto( - clk_mgr->dccg, dpp_inst, dppclk_khz); + prev_dppclk_khz = clk_mgr->base.ctx->dc->current_state->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz; + + if (safe_to_lower || prev_dppclk_khz < dppclk_khz) { + clk_mgr->dccg->funcs->update_dpp_dto( + clk_mgr->dccg, dpp_inst, dppclk_khz); + } } } @@ -161,6 +166,9 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, dc->debug.force_clock_mode & 0x1) { //this is from resume or boot up, if forced_clock cfg option used, we bypass program dispclk and DPPCLK, but need set them for S3. force_reset = true; + + dcn2_read_clocks_from_hw_dentist(clk_mgr_base); + //force_clock_mode 0x1: force reset the clock even it is the same clock as long as it is in Passive level. } display_count = clk_mgr_helper_get_active_display_cnt(dc, context); @@ -240,7 +248,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) { if (dpp_clock_lowered) { // if clock is being lowered, increase DTO before lowering refclk - dcn20_update_clocks_update_dpp_dto(clk_mgr, context); + dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); dcn20_update_clocks_update_dentist(clk_mgr); } else { // if clock is being raised, increase refclk before lowering DTO @@ -248,7 +256,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, dcn20_update_clocks_update_dentist(clk_mgr); // always update dtos unless clock is lowered and not safe to lower if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz) - dcn20_update_clocks_update_dpp_dto(clk_mgr, context); + dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); } } @@ -339,6 +347,32 @@ void dcn2_enable_pme_wa(struct clk_mgr *clk_mgr_base) } } + +void dcn2_read_clocks_from_hw_dentist(struct clk_mgr *clk_mgr_base) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + uint32_t dispclk_wdivider; + uint32_t dppclk_wdivider; + int disp_divider; + int dpp_divider; + + REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, &dispclk_wdivider); + REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_WDIVIDER, &dppclk_wdivider); + + disp_divider = dentist_get_divider_from_did(dispclk_wdivider); + dpp_divider = dentist_get_divider_from_did(dispclk_wdivider); + + if (disp_divider && dpp_divider) { + /* Calculate the current DFS clock, in kHz.*/ + clk_mgr_base->clks.dispclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR + * clk_mgr->base.dentist_vco_freq_khz) / disp_divider; + + clk_mgr_base->clks.dppclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR + * clk_mgr->base.dentist_vco_freq_khz) / dpp_divider; + } + +} + void dcn2_get_clock(struct clk_mgr *clk_mgr, struct dc_state *context, enum dc_clock_type clock_type, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h index c9fd824f3c23..0b9c045b0c8e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h @@ -34,7 +34,7 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr, struct dc_state *context, bool safe_to_lower); void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr, - struct dc_state *context); + struct dc_state *context, bool safe_to_lower); void dcn2_init_clocks(struct clk_mgr *clk_mgr); @@ -51,4 +51,8 @@ void dcn2_get_clock(struct clk_mgr *clk_mgr, struct dc_clock_config *clock_cfg); void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr); + +void dcn2_read_clocks_from_hw_dentist(struct clk_mgr *clk_mgr_base); + + #endif //__DCN20_CLK_MGR_H__ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index 35c55e54eac0..7ae4c06232dd 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -59,14 +59,16 @@ int rn_get_active_display_cnt_wa( struct dc_state *context) { int i, display_count; - bool hdmi_present = false; + bool tmds_present = false; display_count = 0; for (i = 0; i < context->stream_count; i++) { const struct dc_stream_state *stream = context->streams[i]; - if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) - hdmi_present = true; + if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A || + stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK || + stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) + tmds_present = true; } for (i = 0; i < dc->link_count; i++) { @@ -85,7 +87,7 @@ int rn_get_active_display_cnt_wa( } /* WA for hang on HDMI after display off back back on*/ - if (display_count == 0 && hdmi_present) + if (display_count == 0 && tmds_present) display_count = 1; return display_count; @@ -164,16 +166,16 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base, } if (dpp_clock_lowered) { - // if clock is being lowered, increase DTO before lowering refclk - dcn20_update_clocks_update_dpp_dto(clk_mgr, context); + // increase per DPP DTO before lowering global dppclk + dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz); } else { - // if clock is being raised, increase refclk before lowering DTO + // increase global DPPCLK before lowering per DPP DTO if (update_dppclk || update_dispclk) rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz); // always update dtos unless clock is lowered and not safe to lower if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz) - dcn20_update_clocks_update_dpp_dto(clk_mgr, context); + dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); } if (update_dispclk && @@ -409,7 +411,7 @@ void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_ra continue; ranges->reader_wm_sets[num_valid_sets].wm_inst = bw_params->wm_table.entries[i].wm_inst; - ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type;; + ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type; /* We will not select WM based on dcfclk, so leave it as unconstrained */ ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; ranges->reader_wm_sets[num_valid_sets].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; @@ -578,33 +580,33 @@ struct wm_table lpddr4_wm_table = { { .wm_inst = WM_A, .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 23.84, - .sr_exit_time_us = 12.5, - .sr_enter_plus_exit_time_us = 17.0, + .pstate_latency_us = 11.65333, + .sr_exit_time_us = 5.32, + .sr_enter_plus_exit_time_us = 6.38, .valid = true, }, { .wm_inst = WM_B, .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 23.84, - .sr_exit_time_us = 12.5, - .sr_enter_plus_exit_time_us = 17.0, + .pstate_latency_us = 11.65333, + .sr_exit_time_us = 9.82, + .sr_enter_plus_exit_time_us = 11.196, .valid = true, }, { .wm_inst = WM_C, .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 23.84, - .sr_exit_time_us = 12.5, - .sr_enter_plus_exit_time_us = 17.0, + .pstate_latency_us = 11.65333, + .sr_exit_time_us = 9.89, + .sr_enter_plus_exit_time_us = 11.24, .valid = true, }, { .wm_inst = WM_D, .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 23.84, - .sr_exit_time_us = 12.5, - .sr_enter_plus_exit_time_us = 17.0, + .pstate_latency_us = 11.65333, + .sr_exit_time_us = 9.748, + .sr_enter_plus_exit_time_us = 11.102, .valid = true, }, } @@ -691,7 +693,6 @@ void rn_clk_mgr_construct( { struct dc_debug_options *debug = &ctx->dc->debug; struct dpm_clocks clock_table = { 0 }; - struct clk_state_registers_and_bypass s = { 0 }; clk_mgr->base.ctx = ctx; clk_mgr->base.funcs = &dcn21_funcs; @@ -711,7 +712,6 @@ void rn_clk_mgr_construct( if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { dcn21_funcs.update_clocks = dcn2_update_clocks_fpga; clk_mgr->base.dentist_vco_freq_khz = 3600000; - clk_mgr->base.dprefclk_khz = 600000; } else { struct clk_log_info log_info = {0}; @@ -722,24 +722,16 @@ void rn_clk_mgr_construct( if (clk_mgr->base.dentist_vco_freq_khz == 0) clk_mgr->base.dentist_vco_freq_khz = 3600000; - rn_dump_clk_registers(&s, &clk_mgr->base, &log_info); - /* Convert dprefclk units from MHz to KHz */ - /* Value already divided by 10, some resolution lost */ - clk_mgr->base.dprefclk_khz = s.dprefclk * 1000; - - /* in case we don't get a value from the register, use default */ - if (clk_mgr->base.dprefclk_khz == 0) { - ASSERT(clk_mgr->base.dprefclk_khz == 600000); - clk_mgr->base.dprefclk_khz = 600000; - } - if (ctx->dc_bios->integrated_info->memory_type == LpDdr4MemType) { rn_bw_params.wm_table = lpddr4_wm_table; } else { rn_bw_params.wm_table = ddr4_wm_table; } + /* Saved clocks configured at boot for debug purposes */ + rn_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info); } + clk_mgr->base.dprefclk_khz = 600000; dce_clock_read_ss_info(clk_mgr); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c index cb7c0e8b7e1b..6878aedf1d3e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c @@ -82,8 +82,8 @@ int rn_vbios_smu_get_smu_version(struct clk_mgr_internal *clk_mgr) int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz) { int actual_dispclk_set_mhz = -1; - struct dc *core_dc = clk_mgr->base.ctx->dc; - struct dmcu *dmcu = core_dc->res_pool->dmcu; + struct dc *dc = clk_mgr->base.ctx->dc; + struct dmcu *dmcu = dc->res_pool->dmcu; /* Unit of SMU msg parameter is Mhz */ actual_dispclk_set_mhz = rn_vbios_smu_send_msg_with_param( @@ -91,7 +91,7 @@ int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dis VBIOSSMC_MSG_SetDispclkFreq, requested_dispclk_khz / 1000); - if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { + if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz) dmcu->funcs->set_psr_wait_loop(dmcu, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 32f31bf91915..04441dbcba76 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -58,21 +58,21 @@ #include "hubp.h" #include "dc_link_dp.h" +#include "dc_dmub_srv.h" -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #include "dsc.h" -#endif -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 #include "vm_helper.h" -#endif #include "dce/dce_i2c.h" +#define CTX \ + dc->ctx + #define DC_LOGGER \ dc->ctx->logger -const static char DC_BUILD_ID[] = "production-build"; +static const char DC_BUILD_ID[] = "production-build"; /** * DOC: Overview @@ -287,7 +287,6 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc, struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->stream == stream && pipe->stream_res.tg) { - pipe->stream->adjust = *adjust; dc->hwss.set_drr(&pipe, 1, adjust->v_total_min, @@ -511,10 +510,10 @@ bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream) return ret; } -void dc_stream_set_static_screen_events(struct dc *dc, +void dc_stream_set_static_screen_params(struct dc *dc, struct dc_stream_state **streams, int num_streams, - const struct dc_static_screen_events *events) + const struct dc_static_screen_params *params) { int i = 0; int j = 0; @@ -533,10 +532,10 @@ void dc_stream_set_static_screen_events(struct dc *dc, } } - dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events); + dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params); } -static void destruct(struct dc *dc) +static void dc_destruct(struct dc *dc) { if (dc->current_state) { dc_release_state(dc->current_state); @@ -569,7 +568,7 @@ static void destruct(struct dc *dc) kfree(dc->bw_dceip); dc->bw_dceip = NULL; -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 +#ifdef CONFIG_DRM_AMD_DC_DCN kfree(dc->dcn_soc); dc->dcn_soc = NULL; @@ -577,28 +576,58 @@ static void destruct(struct dc *dc) dc->dcn_ip = NULL; #endif -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 kfree(dc->vm_helper); dc->vm_helper = NULL; -#endif } -static bool construct(struct dc *dc, +static bool dc_construct_ctx(struct dc *dc, + const struct dc_init_data *init_params) +{ + struct dc_context *dc_ctx; + enum dce_version dc_version = DCE_VERSION_UNKNOWN; + + dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL); + if (!dc_ctx) + return false; + + dc_ctx->cgs_device = init_params->cgs_device; + dc_ctx->driver_context = init_params->driver; + dc_ctx->dc = dc; + dc_ctx->asic_id = init_params->asic_id; + dc_ctx->dc_sink_id_count = 0; + dc_ctx->dc_stream_id_count = 0; + dc_ctx->dce_environment = init_params->dce_environment; + + /* Create logger */ + + dc_version = resource_parse_asic_id(init_params->asic_id); + dc_ctx->dce_version = dc_version; + + dc_ctx->perf_trace = dc_perf_trace_create(); + if (!dc_ctx->perf_trace) { + ASSERT_CRITICAL(false); + return false; + } + + dc->ctx = dc_ctx; + + return true; +} + +static bool dc_construct(struct dc *dc, const struct dc_init_data *init_params) { struct dc_context *dc_ctx; struct bw_calcs_dceip *dc_dceip; struct bw_calcs_vbios *dc_vbios; -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 +#ifdef CONFIG_DRM_AMD_DC_DCN struct dcn_soc_bounding_box *dcn_soc; struct dcn_ip_params *dcn_ip; #endif - enum dce_version dc_version = DCE_VERSION_UNKNOWN; dc->config = init_params->flags; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 // Allocate memory for the vm_helper dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL); if (!dc->vm_helper) { @@ -606,7 +635,6 @@ static bool construct(struct dc *dc, goto fail; } -#endif memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides)); dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL); @@ -624,7 +652,7 @@ static bool construct(struct dc *dc, } dc->bw_vbios = dc_vbios; -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 +#ifdef CONFIG_DRM_AMD_DC_DCN dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL); if (!dcn_soc) { dm_error("%s: failed to create dcn_soc\n", __func__); @@ -640,31 +668,15 @@ static bool construct(struct dc *dc, } dc->dcn_ip = dcn_ip; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 dc->soc_bounding_box = init_params->soc_bounding_box; #endif -#endif - dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL); - if (!dc_ctx) { + if (!dc_construct_ctx(dc, init_params)) { dm_error("%s: failed to create ctx\n", __func__); goto fail; } - dc_ctx->cgs_device = init_params->cgs_device; - dc_ctx->driver_context = init_params->driver; - dc_ctx->dc = dc; - dc_ctx->asic_id = init_params->asic_id; - dc_ctx->dc_sink_id_count = 0; - dc_ctx->dc_stream_id_count = 0; - dc->ctx = dc_ctx; - - /* Create logger */ - - dc_ctx->dce_environment = init_params->dce_environment; - - dc_version = resource_parse_asic_id(init_params->asic_id); - dc_ctx->dce_version = dc_version; + dc_ctx = dc->ctx; /* Resource should construct all asic specific resources. * This should be the only place where we need to parse the asic id @@ -679,7 +691,7 @@ static bool construct(struct dc *dc, bp_init_data.bios = init_params->asic_id.atombios_base_address; dc_ctx->dc_bios = dal_bios_parser_create( - &bp_init_data, dc_version); + &bp_init_data, dc_ctx->dce_version); if (!dc_ctx->dc_bios) { ASSERT_CRITICAL(false); @@ -687,17 +699,13 @@ static bool construct(struct dc *dc, } dc_ctx->created_bios = true; - } - - dc_ctx->perf_trace = dc_perf_trace_create(); - if (!dc_ctx->perf_trace) { - ASSERT_CRITICAL(false); - goto fail; } + + /* Create GPIO service */ dc_ctx->gpio_service = dal_gpio_service_create( - dc_version, + dc_ctx->dce_version, dc_ctx->dce_environment, dc_ctx); @@ -706,7 +714,7 @@ static bool construct(struct dc *dc, goto fail; } - dc->res_pool = dc_create_resource_pool(dc, init_params, dc_version); + dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version); if (!dc->res_pool) goto fail; @@ -714,10 +722,8 @@ static bool construct(struct dc *dc, if (!dc->clk_mgr) goto fail; -#ifdef CONFIG_DRM_AMD_DC_DCN2_1 if (dc->res_pool->funcs->update_bw_bounding_box) dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params); -#endif /* Creation of current_state must occur after dc->dml * is initialized in dc_create_resource_pool because @@ -739,12 +745,9 @@ static bool construct(struct dc *dc, return true; fail: - - destruct(dc); return false; } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) static bool disable_all_writeback_pipes_for_stream( const struct dc *dc, struct dc_stream_state *stream, @@ -757,7 +760,6 @@ static bool disable_all_writeback_pipes_for_stream( return true; } -#endif static void disable_dangling_plane(struct dc *dc, struct dc_state *context) { @@ -783,16 +785,12 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) } if (should_disable && old_stream) { dc_rem_all_planes_for_stream(dc, old_stream, dangling_context); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); -#endif if (dc->hwss.apply_ctx_for_surface) dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context); } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) if (dc->hwss.program_front_end_for_ctx) dc->hwss.program_front_end_for_ctx(dc, dangling_context); -#endif } current_ctx = dc->current_state; @@ -800,6 +798,33 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) dc_release_state(current_ctx); } +static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context) +{ + int i; + int count = 0; + struct pipe_ctx *pipe; + PERF_TRACE(); + for (i = 0; i < MAX_PIPES; i++) { + pipe = &context->res_ctx.pipe_ctx[i]; + + if (!pipe->plane_state) + continue; + + /* Timeout 100 ms */ + while (count < 100000) { + /* Must set to false to start with, due to OR in update function */ + pipe->plane_state->status.is_flip_pending = false; + dc->hwss.update_pending_status(pipe); + if (!pipe->plane_state->status.is_flip_pending) + break; + udelay(1); + count++; + } + ASSERT(!pipe->plane_state->status.is_flip_pending); + } + PERF_TRACE(); +} + /******************************************************************************* * Public functions ******************************************************************************/ @@ -812,26 +837,38 @@ struct dc *dc_create(const struct dc_init_data *init_params) if (NULL == dc) goto alloc_fail; - if (false == construct(dc, init_params)) - goto construct_fail; + if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) { + if (false == dc_construct_ctx(dc, init_params)) { + dc_destruct(dc); + goto construct_fail; + } + } else { + if (false == dc_construct(dc, init_params)) { + dc_destruct(dc); + goto construct_fail; + } - full_pipe_count = dc->res_pool->pipe_count; - if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE) - full_pipe_count--; - dc->caps.max_streams = min( - full_pipe_count, - dc->res_pool->stream_enc_count); + full_pipe_count = dc->res_pool->pipe_count; + if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE) + full_pipe_count--; + dc->caps.max_streams = min( + full_pipe_count, + dc->res_pool->stream_enc_count); - dc->caps.max_links = dc->link_count; - dc->caps.max_audios = dc->res_pool->audio_count; - dc->caps.linear_pitch_alignment = 64; + dc->optimize_seamless_boot_streams = 0; + dc->caps.max_links = dc->link_count; + dc->caps.max_audios = dc->res_pool->audio_count; + dc->caps.linear_pitch_alignment = 64; + + dc->caps.max_dp_protocol_version = DP_VERSION_1_4; + + if (dc->res_pool->dmcu != NULL) + dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version; + } /* Populate versioning information */ dc->versions.dc_ver = DC_VER; - if (dc->res_pool->dmcu != NULL) - dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version; - dc->build_id = DC_BUILD_ID; DC_LOG_DC("Display Core initialized\n"); @@ -849,7 +886,8 @@ alloc_fail: void dc_hardware_init(struct dc *dc) { - dc->hwss.init_hw(dc); + if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW) + dc->hwss.init_hw(dc); } void dc_init_callbacks(struct dc *dc, @@ -869,7 +907,7 @@ void dc_deinit_callbacks(struct dc *dc) void dc_destroy(struct dc **dc) { - destruct(*dc); + dc_destruct(*dc); kfree(*dc); *dc = NULL; } @@ -1163,10 +1201,10 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c for (i = 0; i < context->stream_count; i++) { if (context->streams[i]->apply_seamless_boot_optimization) - dc->optimize_seamless_boot = true; + dc->optimize_seamless_boot_streams++; } - if (!dc->optimize_seamless_boot) + if (dc->optimize_seamless_boot_streams == 0) dc->hwss.prepare_bandwidth(dc, context); /* re-program planes for existing stream, in case we need to @@ -1182,10 +1220,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c context->stream_status[i].plane_count, context); /* use new pipe config in new context */ } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) - if (dc->hwss.program_front_end_for_ctx) - dc->hwss.program_front_end_for_ctx(dc, context); -#endif /* Program hardware */ for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -1204,10 +1238,8 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c } /* Program all planes within new context*/ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) if (dc->hwss.program_front_end_for_ctx) dc->hwss.program_front_end_for_ctx(dc, context); -#endif for (i = 0; i < context->stream_count; i++) { const struct dc_link *link = context->streams[i]->link; @@ -1245,6 +1277,13 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c dc_enable_stereo(dc, context, dc_streams, context->stream_count); + if (dc->optimize_seamless_boot_streams == 0) { + /* Must wait for no flips to be pending before doing optimize bw */ + wait_for_no_pipes_pending(dc, context); + /* pplib is notified if disp_num changed */ + dc->hwss.optimize_bandwidth(dc, context); + } + for (i = 0; i < context->stream_count; i++) context->streams[i]->mode_changed = false; @@ -1279,12 +1318,18 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context) return (result == DC_OK); } +bool dc_is_hw_initialized(struct dc *dc) +{ + struct dc_bios *dcb = dc->ctx->dc_bios; + return dcb->funcs->is_accelerated_mode(dcb); +} + bool dc_post_update_surfaces_to_stream(struct dc *dc) { int i; struct dc_state *context = dc->current_state; - if (!dc->optimized_required || dc->optimize_seamless_boot) + if (!dc->optimized_required || dc->optimize_seamless_boot_streams > 0) return true; post_surface_trace(dc); @@ -1313,7 +1358,7 @@ struct dc_state *dc_create_state(struct dc *dc) * initialize and obtain IP and SOC the base DML instance from DC is * initially copied into every context */ -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 +#ifdef CONFIG_DRM_AMD_DC_DCN memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib)); #endif @@ -1486,11 +1531,6 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa elevate_update_type(&update_type, UPDATE_TYPE_MED); } - if (u->plane_info->sdr_white_level != u->surface->sdr_white_level) { - update_flags->bits.sdr_white_level = 1; - elevate_update_type(&update_type, UPDATE_TYPE_MED); - } - if (u->plane_info->dcc.enable != u->surface->dcc.enable || u->plane_info->dcc.independent_64b_blks != u->surface->dcc.independent_64b_blks || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) { @@ -1508,7 +1548,6 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa } if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch - || u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) { update_flags->bits.plane_size_change = 1; elevate_update_type(&update_type, UPDATE_TYPE_MED); @@ -1547,7 +1586,10 @@ static enum surface_update_type get_scaling_info_update_type( if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width || u->scaling_info->clip_rect.height != u->surface->clip_rect.height || u->scaling_info->dst_rect.width != u->surface->dst_rect.width - || u->scaling_info->dst_rect.height != u->surface->dst_rect.height) { + || u->scaling_info->dst_rect.height != u->surface->dst_rect.height + || u->scaling_info->scaling_quality.integer_scaling != + u->surface->scaling_quality.integer_scaling + ) { update_flags->bits.scaling_change = 1; if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width @@ -1563,7 +1605,7 @@ static enum surface_update_type get_scaling_info_update_type( update_flags->bits.scaling_change = 1; if (u->scaling_info->src_rect.width > u->surface->src_rect.width - && u->scaling_info->src_rect.height > u->surface->src_rect.height) + || u->scaling_info->src_rect.height > u->surface->src_rect.height) /* Making src rect bigger requires a bandwidth change */ update_flags->bits.clock_change = 1; } @@ -1577,11 +1619,11 @@ static enum surface_update_type get_scaling_info_update_type( update_flags->bits.position_change = 1; if (update_flags->bits.clock_change - || update_flags->bits.bandwidth_change) + || update_flags->bits.bandwidth_change + || update_flags->bits.scaling_change) return UPDATE_TYPE_FULL; - if (update_flags->bits.scaling_change - || update_flags->bits.position_change) + if (update_flags->bits.position_change) return UPDATE_TYPE_MED; return UPDATE_TYPE_FAST; @@ -1635,6 +1677,12 @@ static enum surface_update_type det_surface_update(const struct dc *dc, update_flags->bits.gamma_change = 1; } + if (u->hdr_mult.value) + if (u->hdr_mult.value != u->surface->hdr_mult.value) { + update_flags->bits.hdr_mult = 1; + elevate_update_type(&overall_type, UPDATE_TYPE_MED); + } + if (update_flags->bits.in_transfer_func_change) { type = UPDATE_TYPE_MED; elevate_update_type(&overall_type, type); @@ -1668,7 +1716,8 @@ static enum surface_update_type check_update_surfaces_for_stream( union stream_update_flags *su_flags = &stream_update->stream->update_flags; if ((stream_update->src.height != 0 && stream_update->src.width != 0) || - (stream_update->dst.height != 0 && stream_update->dst.width != 0)) + (stream_update->dst.height != 0 && stream_update->dst.width != 0) || + stream_update->integer_scaling_update) su_flags->bits.scaling = 1; if (stream_update->out_transfer_func) @@ -1683,15 +1732,16 @@ static enum surface_update_type check_update_surfaces_for_stream( if (stream_update->gamut_remap) su_flags->bits.gamut_remap = 1; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) if (stream_update->wb_update) su_flags->bits.wb_update = 1; -#endif if (su_flags->raw != 0) overall_type = UPDATE_TYPE_FULL; if (stream_update->output_csc_transform || stream_update->output_color_space) su_flags->bits.out_csc = 1; + + if (stream_update->dsc_config) + overall_type = UPDATE_TYPE_FULL; } for (i = 0 ; i < surface_count; i++) { @@ -1817,8 +1867,6 @@ static void copy_surface_update_to_plane( srf_update->plane_info->global_alpha_value; surface->dcc = srf_update->plane_info->dcc; - surface->sdr_white_level = - srf_update->plane_info->sdr_white_level; surface->layer_index = srf_update->plane_info->layer_index; } @@ -1851,7 +1899,6 @@ static void copy_surface_update_to_plane( sizeof(struct dc_transfer_func_distributed_points)); } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) if (srf_update->func_shaper && (surface->in_shaper_func != srf_update->func_shaper)) @@ -1864,13 +1911,16 @@ static void copy_surface_update_to_plane( memcpy(surface->lut3d_func, srf_update->lut3d_func, sizeof(*surface->lut3d_func)); + if (srf_update->hdr_mult.value) + surface->hdr_mult = + srf_update->hdr_mult; + if (srf_update->blend_tf && (surface->blend_tf != srf_update->blend_tf)) memcpy(surface->blend_tf, srf_update->blend_tf, sizeof(*surface->blend_tf)); -#endif if (srf_update->input_csc_color_matrix) surface->input_csc_color_matrix = *srf_update->input_csc_color_matrix; @@ -1883,8 +1933,10 @@ static void copy_surface_update_to_plane( static void copy_stream_update_to_stream(struct dc *dc, struct dc_state *context, struct dc_stream_state *stream, - const struct dc_stream_update *update) + struct dc_stream_update *update) { + struct dc_context *dc_ctx = dc->ctx; + if (update == NULL || stream == NULL) return; @@ -1945,7 +1997,6 @@ static void copy_stream_update_to_stream(struct dc *dc, if (update->dither_option) stream->dither_option = *update->dither_option; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* update current stream with writeback info */ if (update->wb_update) { int i; @@ -1956,23 +2007,32 @@ static void copy_stream_update_to_stream(struct dc *dc, stream->writeback_info[i] = update->wb_update->writeback_info[i]; } -#endif -#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT) if (update->dsc_config) { struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg; uint32_t old_dsc_enabled = stream->timing.flags.DSC; uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 && update->dsc_config->num_slices_v != 0); - stream->timing.dsc_cfg = *update->dsc_config; - stream->timing.flags.DSC = enable_dsc; - if (!dc->res_pool->funcs->validate_bandwidth(dc, context, - true)) { - stream->timing.dsc_cfg = old_dsc_cfg; - stream->timing.flags.DSC = old_dsc_enabled; + /* Use temporarry context for validating new DSC config */ + struct dc_state *dsc_validate_context = dc_create_state(dc); + + if (dsc_validate_context) { + dc_resource_state_copy_construct(dc->current_state, dsc_validate_context); + + stream->timing.dsc_cfg = *update->dsc_config; + stream->timing.flags.DSC = enable_dsc; + if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) { + stream->timing.dsc_cfg = old_dsc_cfg; + stream->timing.flags.DSC = old_dsc_enabled; + update->dsc_config = NULL; + } + + dc_release_state(dsc_validate_context); + } else { + DC_ERROR("Failed to allocate new validate context for DSC change\n"); + update->dsc_config = NULL; } } -#endif } static void commit_planes_do_stream_update(struct dc *dc, @@ -1992,11 +2052,11 @@ static void commit_planes_do_stream_update(struct dc *dc, if (stream_update->periodic_interrupt0 && dc->hwss.setup_periodic_interrupt) - dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE0); + dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE0); if (stream_update->periodic_interrupt1 && dc->hwss.setup_periodic_interrupt) - dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE1); + dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE1); if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) || stream_update->vrr_infopacket || @@ -2006,6 +2066,12 @@ static void commit_planes_do_stream_update(struct dc *dc, dc->hwss.update_info_frame(pipe_ctx); } + if (stream_update->hdr_static_metadata && + stream->use_dynamic_meta && + dc->hwss.set_dmdata_attributes && + pipe_ctx->stream->dmdata_address.quad_part != 0) + dc->hwss.set_dmdata_attributes(pipe_ctx); + if (stream_update->gamut_remap) dc_stream_set_gamut_remap(dc, stream); @@ -2013,31 +2079,25 @@ static void commit_planes_do_stream_update(struct dc *dc, dc_stream_program_csc_matrix(dc, stream); if (stream_update->dither_option) { -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; -#endif resource_build_bit_depth_reduction_params(pipe_ctx->stream, &pipe_ctx->stream->bit_depth_params); pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp, &stream->bit_depth_params, &stream->clamping); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) while (odm_pipe) { odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp, &stream->bit_depth_params, &stream->clamping); odm_pipe = odm_pipe->next_odm_pipe; } -#endif } -#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT) if (stream_update->dsc_config && dc->hwss.pipe_control_lock_global) { dc->hwss.pipe_control_lock_global(dc, pipe_ctx, true); dp_update_dsc_config(pipe_ctx); dc->hwss.pipe_control_lock_global(dc, pipe_ctx, false); } -#endif /* Full fe update*/ if (update_type == UPDATE_TYPE_FAST) continue; @@ -2053,7 +2113,7 @@ static void commit_planes_do_stream_update(struct dc *dc, dc->hwss.optimize_bandwidth(dc, dc->current_state); } else { - if (!dc->optimize_seamless_boot) + if (dc->optimize_seamless_boot_streams == 0) dc->hwss.prepare_bandwidth(dc, dc->current_state); core_link_enable_stream(dc->current_state, pipe_ctx); @@ -2094,7 +2154,7 @@ static void commit_planes_for_stream(struct dc *dc, int i, j; struct pipe_ctx *top_pipe_to_program = NULL; - if (dc->optimize_seamless_boot && surface_count > 0) { + if (dc->optimize_seamless_boot_streams > 0 && surface_count > 0) { /* Optimize seamless boot flag keeps clocks and watermarks high until * first flip. After first flip, optimization is required to lower * bandwidth. Important to note that it is expected UEFI will @@ -2103,12 +2163,14 @@ static void commit_planes_for_stream(struct dc *dc, */ if (stream->apply_seamless_boot_optimization) { stream->apply_seamless_boot_optimization = false; - dc->optimize_seamless_boot = false; - dc->optimized_required = true; + dc->optimize_seamless_boot_streams--; + + if (dc->optimize_seamless_boot_streams == 0) + dc->optimized_required = true; } } - if (update_type == UPDATE_TYPE_FULL && !dc->optimize_seamless_boot) { + if (update_type == UPDATE_TYPE_FULL && dc->optimize_seamless_boot_streams == 0) { dc->hwss.prepare_bandwidth(dc, context); context_clock_trace(dc, context); } @@ -2124,15 +2186,12 @@ static void commit_planes_for_stream(struct dc *dc, */ if (dc->hwss.apply_ctx_for_surface) dc->hwss.apply_ctx_for_surface(dc, stream, 0, context); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) if (dc->hwss.program_front_end_for_ctx) dc->hwss.program_front_end_for_ctx(dc, context); -#endif return; } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) if (!IS_DIAG_DC(dc->ctx->dce_environment)) { for (i = 0; i < surface_count; i++) { struct dc_plane_state *plane_state = srf_updates[i].surface; @@ -2154,7 +2213,6 @@ static void commit_planes_for_stream(struct dc *dc, } } } -#endif // Update Type FULL, Surface updates for (j = 0; j < dc->res_pool->pipe_count; j++) { @@ -2175,7 +2233,6 @@ static void commit_planes_for_stream(struct dc *dc, if (update_type == UPDATE_TYPE_FAST) continue; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); if (dc->hwss.program_triplebuffer != NULL && @@ -2184,7 +2241,6 @@ static void commit_planes_for_stream(struct dc *dc, dc->hwss.program_triplebuffer( dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); } -#endif stream_status = stream_get_status(context, pipe_ctx->stream); @@ -2193,10 +2249,24 @@ static void commit_planes_for_stream(struct dc *dc, dc, pipe_ctx->stream, stream_status->plane_count, context); } } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) - if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) + if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) { dc->hwss.program_front_end_for_ctx(dc, context); +#ifdef CONFIG_DRM_AMD_DC_DCN + if (dc->debug.validate_dml_output) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx cur_pipe = context->res_ctx.pipe_ctx[i]; + if (cur_pipe.stream == NULL) + continue; + + cur_pipe.plane_res.hubp->funcs->validate_dml_output( + cur_pipe.plane_res.hubp, dc->ctx, + &context->res_ctx.pipe_ctx[i].rq_regs, + &context->res_ctx.pipe_ctx[i].dlg_regs, + &context->res_ctx.pipe_ctx[i].ttu_regs); + } + } #endif + } // Update Type FAST, Surface updates if (update_type == UPDATE_TYPE_FAST) { @@ -2206,7 +2276,6 @@ static void commit_planes_for_stream(struct dc *dc, */ dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) if (dc->hwss.set_flip_control_gsl) for (i = 0; i < surface_count; i++) { struct dc_plane_state *plane_state = srf_updates[i].surface; @@ -2225,7 +2294,6 @@ static void commit_planes_for_stream(struct dc *dc, plane_state->flip_immediate); } } -#endif /* Perform requested Updates */ for (i = 0; i < surface_count; i++) { struct dc_plane_state *plane_state = srf_updates[i].surface; @@ -2238,7 +2306,6 @@ static void commit_planes_for_stream(struct dc *dc, if (pipe_ctx->plane_state != plane_state) continue; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /*program triple buffer after lock based on flip type*/ if (dc->hwss.program_triplebuffer != NULL && !dc->debug.disable_tri_buf) { @@ -2246,7 +2313,6 @@ static void commit_planes_for_stream(struct dc *dc, dc->hwss.program_triplebuffer( dc, pipe_ctx, plane_state->triplebuffer_flips); } -#endif if (srf_updates[i].flip_addr) dc->hwss.update_plane_addr(dc, pipe_ctx); } @@ -2396,25 +2462,21 @@ void dc_set_power_state( enum dc_acpi_cm_power_state power_state) { struct kref refcount; - struct display_mode_lib *dml = kzalloc(sizeof(struct display_mode_lib), - GFP_KERNEL); - - ASSERT(dml); - if (!dml) - return; + struct display_mode_lib *dml; switch (power_state) { case DC_ACPI_CM_POWER_STATE_D0: dc_resource_state_construct(dc, dc->current_state); + if (dc->ctx->dmub_srv) + dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv); + dc->hwss.init_hw(dc); -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 if (dc->hwss.init_sys_ctx != NULL && dc->vm_pa_config.valid) { dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config); } -#endif break; default: @@ -2423,6 +2485,12 @@ void dc_set_power_state( * clean state, and dc hw programming optimizations will not * cause any trouble. */ + dml = kzalloc(sizeof(struct display_mode_lib), + GFP_KERNEL); + + ASSERT(dml); + if (!dml) + return; /* Preserve refcount */ refcount = dc->current_state->refcount; @@ -2436,10 +2504,10 @@ void dc_set_power_state( dc->current_state->refcount = refcount; dc->current_state->bw_ctx.dml = *dml; + kfree(dml); + break; } - - kfree(dml); } void dc_resume(struct dc *dc) @@ -2494,6 +2562,17 @@ bool dc_submit_i2c( cmd); } +bool dc_submit_i2c_oem( + struct dc *dc, + struct i2c_command *cmd) +{ + struct ddc_service *ddc = dc->res_pool->oem_device; + return dce_i2c_submit_command( + dc->res_pool, + ddc->ddc_pin, + cmd); +} + static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink) { if (dc_link->sink_count >= MAX_SINKS_PER_LINK) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c index b9227d5de3a3..502ed3c7959d 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c @@ -33,7 +33,6 @@ #include "core_status.h" #include "core_types.h" -#include "hw_sequencer.h" #include "resource.h" @@ -310,14 +309,13 @@ void context_timing_trace( struct resource_context *res_ctx) { int i; - struct dc *core_dc = dc; int h_pos[MAX_PIPES] = {0}, v_pos[MAX_PIPES] = {0}; struct crtc_position position; - unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index; + unsigned int underlay_idx = dc->res_pool->underlay_pipe_index; DC_LOGGER_INIT(dc->ctx->logger); - for (i = 0; i < core_dc->res_pool->pipe_count; i++) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; /* get_position() returns CRTC vertical/horizontal counter * hence not applicable for underlay pipe @@ -329,7 +327,7 @@ void context_timing_trace( h_pos[i] = position.horizontal_count; v_pos[i] = position.vertical_count; } - for (i = 0; i < core_dc->res_pool->pipe_count; i++) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; if (pipe_ctx->stream == NULL || pipe_ctx->pipe_idx == underlay_idx) @@ -347,7 +345,7 @@ void context_clock_trace( struct dc *dc, struct dc_state *context) { -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) DC_LOGGER_INIT(dc->ctx->logger); CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n" "dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n", diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 62d8289abb4e..a09119c10d7c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -45,6 +45,7 @@ #include "dpcd_defs.h" #include "dmcu.h" #include "hw/clk_mgr.h" +#include "../dce/dmub_psr.h" #define DC_LOGGER_INIT(logger) @@ -74,7 +75,7 @@ enum { /******************************************************************************* * Private functions ******************************************************************************/ -static void destruct(struct dc_link *link) +static void dc_link_destruct(struct dc_link *link) { int i; @@ -817,8 +818,8 @@ static bool dc_link_detect_helper(struct dc_link *link, } case SIGNAL_TYPE_EDP: { - read_current_link_settings_on_detect(link); detect_edp_sink_caps(link); + read_current_link_settings_on_detect(link); sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; sink_caps.signal = SIGNAL_TYPE_EDP; break; @@ -850,18 +851,12 @@ static bool dc_link_detect_helper(struct dc_link *link, if (memcmp(&link->dpcd_caps, &prev_dpcd_caps, sizeof(struct dpcd_caps))) same_dpcd = false; } - /* Active dongle plug in without display or downstream unplug*/ + /* Active dongle downstream unplug*/ if (link->type == dc_connection_active_dongle && link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) { - if (prev_sink != NULL) { + if (prev_sink != NULL) /* Downstream unplug */ dc_sink_release(prev_sink); - } else { - /* Empty dongle plug in */ - dp_verify_link_cap_with_retries(link, - &link->reported_link_cap, - LINK_TRAINING_MAX_VERIFY_RETRY); - } return true; } @@ -968,8 +963,7 @@ static bool dc_link_detect_helper(struct dc_link *link, same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid); if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && - sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX && - reason != DETECT_REASON_HPDRX) { + sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) { /* * TODO debug why Dell 2413 doesn't like * two link trainings @@ -1244,7 +1238,7 @@ static enum transmitter translate_encoder_to_transmitter( } } -static bool construct( +static bool dc_link_construct( struct dc_link *link, const struct link_init_data *init_params) { @@ -1446,7 +1440,7 @@ struct dc_link *link_create(const struct link_init_data *init_params) if (NULL == link) goto alloc_fail; - if (false == construct(link, init_params)) + if (false == dc_link_construct(link, init_params)) goto construct_fail; return link; @@ -1460,7 +1454,7 @@ alloc_fail: void link_destroy(struct dc_link **link) { - destruct(*link); + dc_link_destruct(*link); kfree(*link); *link = NULL; } @@ -1495,10 +1489,7 @@ static enum dc_status enable_link_dp( bool skip_video_pattern; struct dc_link *link = stream->link; struct dc_link_settings link_settings = {0}; - enum dp_panel_mode panel_mode; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool fec_enable; -#endif int i; bool apply_seamless_boot_optimization = false; @@ -1514,15 +1505,6 @@ static enum dc_status enable_link_dp( decide_link_settings(stream, &link_settings); if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) { - /* If link settings are different than current and link already enabled - * then need to disable before programming to new rate. - */ - if (link->link_status.link_active && - (link->cur_link_settings.lane_count != link_settings.lane_count || - link->cur_link_settings.link_rate != link_settings.link_rate)) { - dp_disable_link_phy(link, pipe_ctx->stream->signal); - } - /*in case it is not on*/ link->dc->hwss.edp_power_control(link, true); link->dc->hwss.edp_wait_for_hpd_ready(link, true); @@ -1533,50 +1515,29 @@ static enum dc_status enable_link_dp( if (state->clk_mgr && !apply_seamless_boot_optimization) state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false); - dp_enable_link_phy( - link, - pipe_ctx->stream->signal, - pipe_ctx->clock_source->id, - &link_settings); - - if (stream->sink_patches.dppowerup_delay > 0) { - int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay; - - msleep(delay_dp_power_up_in_ms); - } - - panel_mode = dp_get_panel_mode(link); - dp_set_panel_mode(link, panel_mode); - skip_video_pattern = true; if (link_settings.link_rate == LINK_RATE_LOW) skip_video_pattern = false; - if (link->aux_access_disabled) { - dc_link_dp_perform_link_training_skip_aux(link, &link_settings); - - link->cur_link_settings = link_settings; - status = DC_OK; - } else if (perform_link_training_with_retries( - link, + if (perform_link_training_with_retries( &link_settings, skip_video_pattern, - LINK_TRAINING_ATTEMPTS)) { + LINK_TRAINING_ATTEMPTS, + pipe_ctx, + pipe_ctx->stream->signal)) { link->cur_link_settings = link_settings; status = DC_OK; } else status = DC_FAIL_DP_LINK_TRAINING; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT if (link->preferred_training_settings.fec_enable != NULL) fec_enable = *link->preferred_training_settings.fec_enable; else fec_enable = true; dp_set_fec_enable(link, fec_enable); -#endif return status; } @@ -2063,6 +2024,45 @@ static void write_i2c_redriver_setting( ASSERT(i2c_success); } +static void disable_link(struct dc_link *link, enum signal_type signal) +{ + /* + * TODO: implement call for dp_set_hw_test_pattern + * it is needed for compliance testing + */ + + /* Here we need to specify that encoder output settings + * need to be calculated as for the set mode, + * it will lead to querying dynamic link capabilities + * which should be done before enable output + */ + + if (dc_is_dp_signal(signal)) { + /* SST DP, eDP */ + if (dc_is_dp_sst_signal(signal)) + dp_disable_link_phy(link, signal); + else + dp_disable_link_phy_mst(link, signal); + + if (dc_is_dp_sst_signal(signal) || + link->mst_stream_alloc_table.stream_count == 0) { + dp_set_fec_enable(link, false); + dp_set_fec_ready(link, false); + } + } else { + if (signal != SIGNAL_TYPE_VIRTUAL) + link->link_enc->funcs->disable_output(link->link_enc, signal); + } + + if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + /* MST disable link only when no stream use the link */ + if (link->mst_stream_alloc_table.stream_count <= 0) + link->link_status.link_active = false; + } else { + link->link_status.link_active = false; + } +} + static void enable_link_hdmi(struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; @@ -2147,6 +2147,19 @@ static enum dc_status enable_link( struct pipe_ctx *pipe_ctx) { enum dc_status status = DC_ERROR_UNEXPECTED; + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; + + /* There's some scenarios where driver is unloaded with display + * still enabled. When driver is reloaded, it may cause a display + * to not light up if there is a mismatch between old and new + * link settings. Need to call disable first before enabling at + * new link settings. + */ + if (link->link_status.link_active) { + disable_link(link, pipe_ctx->stream->signal); + } + switch (pipe_ctx->stream->signal) { case SIGNAL_TYPE_DISPLAY_PORT: status = enable_link_dp(state, pipe_ctx); @@ -2181,46 +2194,6 @@ static enum dc_status enable_link( return status; } -static void disable_link(struct dc_link *link, enum signal_type signal) -{ - /* - * TODO: implement call for dp_set_hw_test_pattern - * it is needed for compliance testing - */ - - /* here we need to specify that encoder output settings - * need to be calculated as for the set mode, - * it will lead to querying dynamic link capabilities - * which should be done before enable output */ - - if (dc_is_dp_signal(signal)) { - /* SST DP, eDP */ - if (dc_is_dp_sst_signal(signal)) - dp_disable_link_phy(link, signal); - else - dp_disable_link_phy_mst(link, signal); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT - - if (dc_is_dp_sst_signal(signal) || - link->mst_stream_alloc_table.stream_count == 0) { - dp_set_fec_enable(link, false); - dp_set_fec_ready(link, false); - } -#endif - } else { - if (signal != SIGNAL_TYPE_VIRTUAL) - link->link_enc->funcs->disable_output(link->link_enc, signal); - } - - if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { - /* MST disable link only when no stream use the link */ - if (link->mst_stream_alloc_table.stream_count <= 0) - link->link_status.link_active = false; - } else { - link->link_status.link_active = false; - } -} - static uint32_t get_timing_pixel_clock_100hz(const struct dc_crtc_timing *timing) { @@ -2357,9 +2330,9 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t backlight_pwm_u16_16, uint32_t frame_ramp) { - struct dc *core_dc = link->ctx->dc; - struct abm *abm = core_dc->res_pool->abm; - struct dmcu *dmcu = core_dc->res_pool->dmcu; + struct dc *dc = link->ctx->dc; + struct abm *abm = dc->res_pool->abm; + struct dmcu *dmcu = dc->res_pool->dmcu; unsigned int controller_id = 0; bool use_smooth_brightness = true; int i; @@ -2377,22 +2350,22 @@ bool dc_link_set_backlight_level(const struct dc_link *link, if (dc_is_embedded_signal(link->connector_signal)) { for (i = 0; i < MAX_PIPES; i++) { - if (core_dc->current_state->res_ctx.pipe_ctx[i].stream) { - if (core_dc->current_state->res_ctx. + if (dc->current_state->res_ctx.pipe_ctx[i].stream) { + if (dc->current_state->res_ctx. pipe_ctx[i].stream->link == link) { /* DMCU -1 for all controller id values, * therefore +1 here */ controller_id = - core_dc->current_state-> + dc->current_state-> res_ctx.pipe_ctx[i].stream_res.tg->inst + 1; /* Disable brightness ramping when the display is blanked * as it can hang the DMCU */ - if (core_dc->current_state->res_ctx.pipe_ctx[i].plane_state == NULL) + if (dc->current_state->res_ctx.pipe_ctx[i].plane_state == NULL) frame_ramp = 0; } } @@ -2410,8 +2383,8 @@ bool dc_link_set_backlight_level(const struct dc_link *link, bool dc_link_set_abm_disable(const struct dc_link *link) { - struct dc *core_dc = link->ctx->dc; - struct abm *abm = core_dc->res_pool->abm; + struct dc *dc = link->ctx->dc; + struct abm *abm = dc->res_pool->abm; if ((abm == NULL) || (abm->funcs->set_backlight_level_pwm == NULL)) return false; @@ -2423,12 +2396,13 @@ bool dc_link_set_abm_disable(const struct dc_link *link) bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool wait) { - struct dc *core_dc = link->ctx->dc; - struct dmcu *dmcu = core_dc->res_pool->dmcu; - - + struct dc *dc = link->ctx->dc; + struct dmcu *dmcu = dc->res_pool->dmcu; + struct dmub_psr *psr = dc->res_pool->psr; - if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_feature_enabled) + if ((psr != NULL) && link->psr_feature_enabled) + psr->funcs->set_psr_enable(psr, allow_active); + else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_feature_enabled) dmcu->funcs->set_psr_enable(dmcu, allow_active, wait); link->psr_allow_active = allow_active; @@ -2438,10 +2412,13 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state) { - struct dc *core_dc = link->ctx->dc; - struct dmcu *dmcu = core_dc->res_pool->dmcu; + struct dc *dc = link->ctx->dc; + struct dmcu *dmcu = dc->res_pool->dmcu; + struct dmub_psr *psr = dc->res_pool->psr; - if (dmcu != NULL && link->psr_feature_enabled) + if (psr != NULL && link->psr_feature_enabled) + psr->funcs->get_psr_state(psr_state); + else if (dmcu != NULL && link->psr_feature_enabled) dmcu->funcs->get_psr_state(dmcu, psr_state); return true; @@ -2486,8 +2463,9 @@ bool dc_link_setup_psr(struct dc_link *link, const struct dc_stream_state *stream, struct psr_config *psr_config, struct psr_context *psr_context) { - struct dc *core_dc; + struct dc *dc; struct dmcu *dmcu; + struct dmub_psr *psr; int i; /* updateSinkPsrDpcdConfig*/ union dpcd_psr_configuration psr_configuration; @@ -2497,10 +2475,11 @@ bool dc_link_setup_psr(struct dc_link *link, if (!link) return false; - core_dc = link->ctx->dc; - dmcu = core_dc->res_pool->dmcu; + dc = link->ctx->dc; + dmcu = dc->res_pool->dmcu; + psr = dc->res_pool->psr; - if (!dmcu) + if (!dmcu && !psr) return false; @@ -2537,13 +2516,13 @@ bool dc_link_setup_psr(struct dc_link *link, psr_context->engineId = link->link_enc->preferred_engine; for (i = 0; i < MAX_PIPES; i++) { - if (core_dc->current_state->res_ctx.pipe_ctx[i].stream + if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { /* dmcu -1 for all controller id values, * therefore +1 here */ psr_context->controllerId = - core_dc->current_state->res_ctx. + dc->current_state->res_ctx. pipe_ctx[i].stream_res.tg->inst + 1; break; } @@ -2556,7 +2535,7 @@ bool dc_link_setup_psr(struct dc_link *link, transmitter_to_phy_id(link->link_enc->transmitter); psr_context->crtcTimingVerticalTotal = stream->timing.v_total; - psr_context->vsyncRateHz = div64_u64(div64_u64((stream-> + psr_context->vsync_rate_hz = div64_u64(div64_u64((stream-> timing.pix_clk_100hz * 100), stream->timing.v_total), stream->timing.h_total); @@ -2586,7 +2565,7 @@ bool dc_link_setup_psr(struct dc_link *link, psr_context->psr_level.u32all = 0; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) /*skip power down the single pipe since it blocks the cstate*/ if (ASICREV_IS_RAVEN(link->ctx->asic_id.hw_internal_rev)) psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true; @@ -2609,7 +2588,10 @@ bool dc_link_setup_psr(struct dc_link *link, */ psr_context->frame_delay = 0; - link->psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context); + if (psr) + link->psr_feature_enabled = psr->funcs->setup_psr(psr, link, psr_context); + else + link->psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context); /* psr_enabled == 0 indicates setup_psr did not succeed, but this * should not happen since firmware should be running at this point @@ -2644,28 +2626,13 @@ static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream) return dc_fixpt_div_int(mbytes_per_sec, 54); } -static int get_color_depth(enum dc_color_depth color_depth) -{ - switch (color_depth) { - case COLOR_DEPTH_666: return 6; - case COLOR_DEPTH_888: return 8; - case COLOR_DEPTH_101010: return 10; - case COLOR_DEPTH_121212: return 12; - case COLOR_DEPTH_141414: return 14; - case COLOR_DEPTH_161616: return 16; - default: return 0; - } -} - static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx) { - uint32_t bpc; uint64_t kbps; struct fixed31_32 peak_kbps; uint32_t numerator; uint32_t denominator; - bpc = get_color_depth(pipe_ctx->stream_res.pix_clk_params.color_depth); kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing); /* @@ -2899,6 +2866,52 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) return DC_OK; } + +enum dc_status dc_link_reallocate_mst_payload(struct dc_link *link) +{ + int i; + struct pipe_ctx *pipe_ctx; + + // Clear all of MST payload then reallocate + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; + + /* driver enable split pipe for external monitors + * we have to check pipe_ctx is split pipe or not + * If it's split pipe, driver using top pipe to + * reaallocate. + */ + if (!pipe_ctx || pipe_ctx->top_pipe) + continue; + + if (pipe_ctx->stream && pipe_ctx->stream->link == link && + pipe_ctx->stream->dpms_off == false && + pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + deallocate_mst_payload(pipe_ctx); + } + } + + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; + + if (!pipe_ctx || pipe_ctx->top_pipe) + continue; + + if (pipe_ctx->stream && pipe_ctx->stream->link == link && + pipe_ctx->stream->dpms_off == false && + pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + /* enable/disable PHY will clear connection between BE and FE + * need to restore it. + */ + link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc, + pipe_ctx->stream_res.stream_enc->id, true); + dc_link_allocate_mst_payload(pipe_ctx); + } + } + + return DC_OK; +} + #if defined(CONFIG_DRM_AMD_DC_HDCP) static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) { @@ -2922,12 +2935,12 @@ void core_link_enable_stream( struct dc_state *state, struct pipe_ctx *pipe_ctx) { - struct dc *core_dc = pipe_ctx->stream->ctx->dc; + struct dc *dc = pipe_ctx->stream->ctx->dc; struct dc_stream_state *stream = pipe_ctx->stream; enum dc_status status; DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); - if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment) && + if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && dc_is_virtual_signal(pipe_ctx->stream->signal)) return; @@ -2946,6 +2959,7 @@ void core_link_enable_stream( pipe_ctx->stream_res.stream_enc, &stream->timing, stream->output_color_space, + stream->use_vsc_sdp_for_colorimetry, stream->link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP); if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) @@ -2969,14 +2983,14 @@ void core_link_enable_stream( pipe_ctx->stream_res.stream_enc, &stream->timing); - if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { + if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { bool apply_edp_fast_boot_optimization = pipe_ctx->stream->apply_edp_fast_boot_optimization; pipe_ctx->stream->apply_edp_fast_boot_optimization = false; resource_build_info_frame(pipe_ctx); - core_dc->hwss.update_info_frame(pipe_ctx); + dc->hwss.update_info_frame(pipe_ctx); /* Do not touch link on seamless boot optimization. */ if (pipe_ctx->stream->apply_seamless_boot_optimization) { @@ -3019,7 +3033,7 @@ void core_link_enable_stream( } } - core_dc->hwss.enable_audio_stream(pipe_ctx); + dc->hwss.enable_audio_stream(pipe_ctx); /* turn off otg test pattern if enable */ if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) @@ -3027,28 +3041,24 @@ void core_link_enable_stream( CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, COLOR_DEPTH_UNDEFINED); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT if (pipe_ctx->stream->timing.flags.DSC) { if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) dp_set_dsc_enable(pipe_ctx, true); } -#endif - core_dc->hwss.enable_stream(pipe_ctx); + dc->hwss.enable_stream(pipe_ctx); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* Set DPS PPS SDP (AKA "info frames") */ if (pipe_ctx->stream->timing.flags.DSC) { if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) dp_set_dsc_pps_sdp(pipe_ctx, true); } -#endif if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) dc_link_allocate_mst_payload(pipe_ctx); - core_dc->hwss.unblank_stream(pipe_ctx, + dc->hwss.unblank_stream(pipe_ctx, &pipe_ctx->stream->link->cur_link_settings); if (dc_is_dp_signal(pipe_ctx->stream->signal)) @@ -3056,24 +3066,21 @@ void core_link_enable_stream( #if defined(CONFIG_DRM_AMD_DC_HDCP) update_psp_stream_config(pipe_ctx, false); #endif - } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT - else { // if (IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) + } else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) dp_set_dsc_enable(pipe_ctx, true); } -#endif } void core_link_disable_stream(struct pipe_ctx *pipe_ctx) { - struct dc *core_dc = pipe_ctx->stream->ctx->dc; + struct dc *dc = pipe_ctx->stream->ctx->dc; struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->sink->link; - if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment) && + if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && dc_is_virtual_signal(pipe_ctx->stream->signal)) return; @@ -3081,7 +3088,7 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) update_psp_stream_config(pipe_ctx, true); #endif - core_dc->hwss.blank_stream(pipe_ctx); + dc->hwss.blank_stream(pipe_ctx); if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) deallocate_mst_payload(pipe_ctx); @@ -3110,25 +3117,23 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) write_i2c_redriver_setting(pipe_ctx, false); } } - core_dc->hwss.disable_stream(pipe_ctx); + dc->hwss.disable_stream(pipe_ctx); disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT if (pipe_ctx->stream->timing.flags.DSC) { if (dc_is_dp_signal(pipe_ctx->stream->signal)) dp_set_dsc_enable(pipe_ctx, false); } -#endif } void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable) { - struct dc *core_dc = pipe_ctx->stream->ctx->dc; + struct dc *dc = pipe_ctx->stream->ctx->dc; if (!dc_is_hdmi_signal(pipe_ctx->stream->signal)) return; - core_dc->hwss.set_avmute(pipe_ctx, enable); + dc->hwss.set_avmute(pipe_ctx, enable); } /** @@ -3186,13 +3191,11 @@ uint32_t dc_bandwidth_in_kbps_from_timing( uint32_t bits_per_channel = 0; uint32_t kbps; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT if (timing->flags.DSC) { kbps = (timing->pix_clk_100hz * timing->dsc_cfg.bits_per_pixel); kbps = kbps / 160 + ((kbps % 160) ? 1 : 0); return kbps; } -#endif switch (timing->display_color_depth) { case COLOR_DEPTH_666: @@ -3345,6 +3348,7 @@ void dc_link_disable_hpd(const struct dc_link *link) void dc_link_set_test_pattern(struct dc_link *link, enum dp_test_pattern test_pattern, + enum dp_test_pattern_color_space test_pattern_color_space, const struct link_training_settings *p_link_settings, const unsigned char *p_custom_pattern, unsigned int cust_pattern_size) @@ -3353,6 +3357,7 @@ void dc_link_set_test_pattern(struct dc_link *link, dc_link_dp_set_test_pattern( link, test_pattern, + test_pattern_color_space, p_link_settings, p_custom_pattern, cust_pattern_size); @@ -3368,7 +3373,6 @@ uint32_t dc_link_bandwidth_kbps( link_bw_kbps *= 8; /* 8 bits per byte*/ link_bw_kbps *= link_setting->lane_count; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT if (link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) { /* Account for FEC overhead. * We have to do it based on caps, @@ -3393,7 +3397,6 @@ uint32_t dc_link_bandwidth_kbps( link_bw_kbps = mul_u64_u32_shr(BIT_ULL(32) * 970LL / 1000, link_bw_kbps, 32); } -#endif return link_bw_kbps; @@ -3407,3 +3410,10 @@ const struct dc_link_settings *dc_link_get_link_cap( return &link->preferred_link_setting; return &link->verified_link_cap; } + +void dc_link_overwrite_extended_receiver_cap( + struct dc_link *link) +{ + dp_overwrite_extended_receiver_cap(link); +} + diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c index 81789191d4ec..a49c10d5df26 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c @@ -187,7 +187,7 @@ void dal_ddc_i2c_payloads_add( } -static void construct( +static void ddc_service_construct( struct ddc_service *ddc_service, struct ddc_service_init_data *init_data) { @@ -206,7 +206,10 @@ static void construct( ddc_service->ddc_pin = NULL; } else { hw_info.ddc_channel = i2c_info.i2c_line; - hw_info.hw_supported = i2c_info.i2c_hw_assist; + if (ddc_service->link != NULL) + hw_info.hw_supported = i2c_info.i2c_hw_assist; + else + hw_info.hw_supported = false; ddc_service->ddc_pin = dal_gpio_create_ddc( gpio_service, @@ -236,11 +239,11 @@ struct ddc_service *dal_ddc_service_create( if (!ddc_service) return NULL; - construct(ddc_service, init_data); + ddc_service_construct(ddc_service, init_data); return ddc_service; } -static void destruct(struct ddc_service *ddc) +static void ddc_service_destruct(struct ddc_service *ddc) { if (ddc->ddc_pin) dal_gpio_destroy_ddc(&ddc->ddc_pin); @@ -252,7 +255,7 @@ void dal_ddc_service_destroy(struct ddc_service **ddc) BREAK_TO_DEBUGGER(); return; } - destruct(*ddc); + ddc_service_destruct(*ddc); kfree(*ddc); *ddc = NULL; } @@ -587,7 +590,7 @@ bool dal_ddc_submit_aux_command(struct ddc_service *ddc, struct aux_payload *payload) { uint32_t retrieved = 0; - bool ret = 0; + bool ret = false; if (!ddc) return false; @@ -647,17 +650,16 @@ bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc, } -enum dc_status dc_link_aux_configure_timeout(struct ddc_service *ddc, +uint32_t dc_link_aux_configure_timeout(struct ddc_service *ddc, uint32_t timeout) { - enum dc_status status = DC_OK; + uint32_t prev_timeout = 0; struct ddc *ddc_pin = ddc->ddc_pin; - if (ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout == NULL) - return DC_ERROR_UNEXPECTED; - if (!ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout(ddc, timeout)) - status = DC_ERROR_UNEXPECTED; - return status; + if (ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout) + prev_timeout = + ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout(ddc, timeout); + return prev_timeout; } /*test only function*/ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 504055fc70e8..cb731c1d30b1 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -4,12 +4,8 @@ #include "dc_link_dp.h" #include "dm_helpers.h" #include "opp.h" -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #include "dsc.h" -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #include "resource.h" -#endif #include "inc/core_types.h" #include "link_hwss.h" @@ -21,6 +17,9 @@ #define DC_LOGGER \ link->ctx->logger + +#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50 + /* maximum pre emphasis level allowed for each voltage swing level*/ static const enum dc_pre_emphasis voltage_swing_to_pre_emphasis[] = { PRE_EMPHASIS_LEVEL3, @@ -221,19 +220,31 @@ static enum dpcd_training_patterns return dpcd_tr_pattern; } +static inline bool is_repeater(struct dc_link *link, uint32_t offset) +{ + return (!link->is_lttpr_mode_transparent && offset != 0); +} + static void dpcd_set_lt_pattern_and_lane_settings( struct dc_link *link, const struct link_training_settings *lt_settings, - enum dc_dp_training_pattern pattern) + enum dc_dp_training_pattern pattern, + uint32_t offset) { union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = { { {0} } }; - const uint32_t dpcd_base_lt_offset = - DP_TRAINING_PATTERN_SET; + + uint32_t dpcd_base_lt_offset; + uint8_t dpcd_lt_buffer[5] = {0}; union dpcd_training_pattern dpcd_pattern = { {0} }; uint32_t lane; uint32_t size_in_bytes; bool edp_workaround = false; /* TODO link_prop.INTERNAL */ + dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET; + + if (is_repeater(link, offset)) + dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); /***************************************************************** * DpcdAddress_TrainingPatternSet @@ -241,14 +252,21 @@ static void dpcd_set_lt_pattern_and_lane_settings( dpcd_pattern.v1_4.TRAINING_PATTERN_SET = dc_dp_training_pattern_to_dpcd_training_pattern(link, pattern); - dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - dpcd_base_lt_offset] + dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET] = dpcd_pattern.raw; - DC_LOG_HW_LINK_TRAINING("%s\n %x pattern = %x\n", - __func__, - DP_TRAINING_PATTERN_SET, - dpcd_pattern.v1_4.TRAINING_PATTERN_SET); - + if (is_repeater(link, offset)) { + DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n", + __func__, + offset, + dpcd_base_lt_offset, + dpcd_pattern.v1_4.TRAINING_PATTERN_SET); + } else { + DC_LOG_HW_LINK_TRAINING("%s\n 0x%X pattern = %x\n", + __func__, + dpcd_base_lt_offset, + dpcd_pattern.v1_4.TRAINING_PATTERN_SET); + } /***************************************************************** * DpcdAddress_Lane0Set -> DpcdAddress_Lane3Set *****************************************************************/ @@ -268,24 +286,35 @@ static void dpcd_set_lt_pattern_and_lane_settings( PRE_EMPHASIS_MAX_LEVEL ? 1 : 0); } - /* concatinate everything into one buffer*/ + /* concatenate everything into one buffer*/ size_in_bytes = lt_settings->link_settings.lane_count * sizeof(dpcd_lane[0]); // 0x00103 - 0x00102 memmove( - &dpcd_lt_buffer[DP_TRAINING_LANE0_SET - dpcd_base_lt_offset], + &dpcd_lt_buffer[DP_TRAINING_LANE0_SET - DP_TRAINING_PATTERN_SET], dpcd_lane, size_in_bytes); - DC_LOG_HW_LINK_TRAINING("%s:\n %x VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", - __func__, - DP_TRAINING_LANE0_SET, - dpcd_lane[0].bits.VOLTAGE_SWING_SET, - dpcd_lane[0].bits.PRE_EMPHASIS_SET, - dpcd_lane[0].bits.MAX_SWING_REACHED, - dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED); - + if (is_repeater(link, offset)) { + DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" + " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", + __func__, + offset, + dpcd_base_lt_offset, + dpcd_lane[0].bits.VOLTAGE_SWING_SET, + dpcd_lane[0].bits.PRE_EMPHASIS_SET, + dpcd_lane[0].bits.MAX_SWING_REACHED, + dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED); + } else { + DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", + __func__, + dpcd_base_lt_offset, + dpcd_lane[0].bits.VOLTAGE_SWING_SET, + dpcd_lane[0].bits.PRE_EMPHASIS_SET, + dpcd_lane[0].bits.MAX_SWING_REACHED, + dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED); + } if (edp_workaround) { /* for eDP write in 2 parts because the 5-byte burst is * causing issues on some eDP panels (EPR#366724) @@ -495,8 +524,12 @@ static void get_lane_status_and_drive_settings( const struct link_training_settings *link_training_setting, union lane_status *ln_status, union lane_align_status_updated *ln_status_updated, - struct link_training_settings *req_settings) + struct link_training_settings *req_settings, + uint32_t offset) { + unsigned int lane01_status_address = DP_LANE0_1_STATUS; + uint8_t lane_adjust_offset = 4; + unsigned int lane01_adjust_address; uint8_t dpcd_buf[6] = {0}; union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } }; struct link_training_settings request_settings = { {0} }; @@ -504,9 +537,16 @@ static void get_lane_status_and_drive_settings( memset(req_settings, '\0', sizeof(struct link_training_settings)); + if (is_repeater(link, offset)) { + lane01_status_address = + DP_LANE0_1_STATUS_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + lane_adjust_offset = 3; + } + core_link_read_dpcd( link, - DP_LANE0_1_STATUS, + lane01_status_address, (uint8_t *)(dpcd_buf), sizeof(dpcd_buf)); @@ -517,22 +557,47 @@ static void get_lane_status_and_drive_settings( ln_status[lane].raw = get_nibble_at_index(&dpcd_buf[0], lane); dpcd_lane_adjust[lane].raw = - get_nibble_at_index(&dpcd_buf[4], lane); + get_nibble_at_index(&dpcd_buf[lane_adjust_offset], lane); } ln_status_updated->raw = dpcd_buf[2]; - DC_LOG_HW_LINK_TRAINING("%s:\n%x Lane01Status = %x\n %x Lane23Status = %x\n ", - __func__, - DP_LANE0_1_STATUS, dpcd_buf[0], - DP_LANE2_3_STATUS, dpcd_buf[1]); - - DC_LOG_HW_LINK_TRAINING("%s:\n %x Lane01AdjustRequest = %x\n %x Lane23AdjustRequest = %x\n", - __func__, - DP_ADJUST_REQUEST_LANE0_1, - dpcd_buf[4], - DP_ADJUST_REQUEST_LANE2_3, - dpcd_buf[5]); + if (is_repeater(link, offset)) { + DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" + " 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ", + __func__, + offset, + lane01_status_address, dpcd_buf[0], + lane01_status_address + 1, dpcd_buf[1]); + } else { + DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ", + __func__, + lane01_status_address, dpcd_buf[0], + lane01_status_address + 1, dpcd_buf[1]); + } + lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1; + + if (is_repeater(link, offset)) + lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + + if (is_repeater(link, offset)) { + DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" + " 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n", + __func__, + offset, + lane01_adjust_address, + dpcd_buf[lane_adjust_offset], + lane01_adjust_address + 1, + dpcd_buf[lane_adjust_offset + 1]); + } else { + DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n", + __func__, + lane01_adjust_address, + dpcd_buf[lane_adjust_offset], + lane01_adjust_address + 1, + dpcd_buf[lane_adjust_offset + 1]); + } /*copy to req_settings*/ request_settings.link_settings.lane_count = @@ -571,10 +636,18 @@ static void get_lane_status_and_drive_settings( static void dpcd_set_lane_settings( struct dc_link *link, - const struct link_training_settings *link_training_setting) + const struct link_training_settings *link_training_setting, + uint32_t offset) { union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}}; uint32_t lane; + unsigned int lane0_set_address; + + lane0_set_address = DP_TRAINING_LANE0_SET; + + if (is_repeater(link, offset)) + lane0_set_address = DP_TRAINING_LANE0_SET_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); for (lane = 0; lane < (uint32_t)(link_training_setting-> @@ -597,7 +670,7 @@ static void dpcd_set_lane_settings( } core_link_write_dpcd(link, - DP_TRAINING_LANE0_SET, + lane0_set_address, (uint8_t *)(dpcd_lane), link_training_setting->link_settings.lane_count); @@ -620,14 +693,26 @@ static void dpcd_set_lane_settings( } */ - DC_LOG_HW_LINK_TRAINING("%s\n %x VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", - __func__, - DP_TRAINING_LANE0_SET, - dpcd_lane[0].bits.VOLTAGE_SWING_SET, - dpcd_lane[0].bits.PRE_EMPHASIS_SET, - dpcd_lane[0].bits.MAX_SWING_REACHED, - dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED); + if (is_repeater(link, offset)) { + DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n" + " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", + __func__, + offset, + lane0_set_address, + dpcd_lane[0].bits.VOLTAGE_SWING_SET, + dpcd_lane[0].bits.PRE_EMPHASIS_SET, + dpcd_lane[0].bits.MAX_SWING_REACHED, + dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED); + } else { + DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", + __func__, + lane0_set_address, + dpcd_lane[0].bits.VOLTAGE_SWING_SET, + dpcd_lane[0].bits.PRE_EMPHASIS_SET, + dpcd_lane[0].bits.MAX_SWING_REACHED, + dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED); + } link->cur_lane_setting = link_training_setting->lane_settings[0]; } @@ -647,17 +732,6 @@ static bool is_max_vs_reached( } -void dc_link_dp_set_drive_settings( - struct dc_link *link, - struct link_training_settings *lt_settings) -{ - /* program ASIC PHY settings*/ - dp_set_hw_lane_settings(link, lt_settings); - - /* Notify DP sink the PHY settings from source */ - dpcd_set_lane_settings(link, lt_settings); -} - static bool perform_post_lt_adj_req_sequence( struct dc_link *link, struct link_training_settings *lt_settings) @@ -690,7 +764,8 @@ static bool perform_post_lt_adj_req_sequence( lt_settings, dpcd_lane_status, &dpcd_lane_status_updated, - &req_settings); + &req_settings, + DPRX); if (dpcd_lane_status_updated.bits. POST_LT_ADJ_REQ_IN_PROGRESS == 0) @@ -747,6 +822,31 @@ static bool perform_post_lt_adj_req_sequence( } +/* Only used for channel equalization */ +static uint32_t translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval) +{ + unsigned int aux_rd_interval_us = 400; + + switch (dpcd_aux_read_interval) { + case 0x01: + aux_rd_interval_us = 400; + break; + case 0x02: + aux_rd_interval_us = 4000; + break; + case 0x03: + aux_rd_interval_us = 8000; + break; + case 0x04: + aux_rd_interval_us = 16000; + break; + default: + break; + } + + return aux_rd_interval_us; +} + static enum link_training_result get_cr_failure(enum dc_lane_count ln_count, union lane_status *dpcd_lane_status) { @@ -765,37 +865,55 @@ static enum link_training_result get_cr_failure(enum dc_lane_count ln_count, static enum link_training_result perform_channel_equalization_sequence( struct dc_link *link, - struct link_training_settings *lt_settings) + struct link_training_settings *lt_settings, + uint32_t offset) { struct link_training_settings req_settings; enum dc_dp_training_pattern tr_pattern; uint32_t retries_ch_eq; + uint32_t wait_time_microsec; enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; union lane_align_status_updated dpcd_lane_status_updated = { {0} }; union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } }; + /* Note: also check that TPS4 is a supported feature*/ + tr_pattern = lt_settings->pattern_for_eq; - dp_set_hw_training_pattern(link, tr_pattern); + if (is_repeater(link, offset)) + tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4; + + dp_set_hw_training_pattern(link, tr_pattern, offset); for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT; retries_ch_eq++) { - dp_set_hw_lane_settings(link, lt_settings); + dp_set_hw_lane_settings(link, lt_settings, offset); /* 2. update DPCD*/ if (!retries_ch_eq) /* EPR #361076 - write as a 5-byte burst, - * but only for the 1-st iteration*/ + * but only for the 1-st iteration + */ + dpcd_set_lt_pattern_and_lane_settings( link, lt_settings, - tr_pattern); + tr_pattern, offset); else - dpcd_set_lane_settings(link, lt_settings); + dpcd_set_lane_settings(link, lt_settings, offset); /* 3. wait for receiver to lock-on*/ - wait_for_training_aux_rd_interval(link, lt_settings->eq_pattern_time); + wait_time_microsec = lt_settings->eq_pattern_time; + + if (is_repeater(link, offset)) + wait_time_microsec = + translate_training_aux_read_interval( + link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]); + + wait_for_training_aux_rd_interval( + link, + wait_time_microsec); /* 4. Read lane status and requested * drive settings as set by the sink*/ @@ -805,7 +923,8 @@ static enum link_training_result perform_channel_equalization_sequence( lt_settings, dpcd_lane_status, &dpcd_lane_status_updated, - &req_settings); + &req_settings, + offset); /* 5. check CR done*/ if (!is_cr_done(lane_count, dpcd_lane_status)) @@ -824,13 +943,16 @@ static enum link_training_result perform_channel_equalization_sequence( return LINK_TRAINING_EQ_FAIL_EQ; } +#define TRAINING_AUX_RD_INTERVAL 100 //us static enum link_training_result perform_clock_recovery_sequence( struct dc_link *link, - struct link_training_settings *lt_settings) + struct link_training_settings *lt_settings, + uint32_t offset) { uint32_t retries_cr; uint32_t retry_count; + uint32_t wait_time_microsec; struct link_training_settings req_settings; enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; enum dc_dp_training_pattern tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_1; @@ -840,7 +962,7 @@ static enum link_training_result perform_clock_recovery_sequence( retries_cr = 0; retry_count = 0; - dp_set_hw_training_pattern(link, tr_pattern); + dp_set_hw_training_pattern(link, tr_pattern, offset); /* najeeb - The synaptics MST hub can put the LT in * infinite loop by switching the VS @@ -857,25 +979,33 @@ static enum link_training_result perform_clock_recovery_sequence( /* 1. call HWSS to set lane settings*/ dp_set_hw_lane_settings( link, - lt_settings); + lt_settings, + offset); /* 2. update DPCD of the receiver*/ - if (!retries_cr) + if (!retry_count) /* EPR #361076 - write as a 5-byte burst, * but only for the 1-st iteration.*/ dpcd_set_lt_pattern_and_lane_settings( link, lt_settings, - tr_pattern); + tr_pattern, + offset); else dpcd_set_lane_settings( link, - lt_settings); + lt_settings, + offset); /* 3. wait receiver to lock-on*/ + wait_time_microsec = lt_settings->cr_pattern_time; + + if (!link->is_lttpr_mode_transparent) + wait_time_microsec = TRAINING_AUX_RD_INTERVAL; + wait_for_training_aux_rd_interval( link, - lt_settings->cr_pattern_time); + wait_time_microsec); /* 4. Read lane status and requested drive * settings as set by the sink @@ -885,7 +1015,8 @@ static enum link_training_result perform_clock_recovery_sequence( lt_settings, dpcd_lane_status, &dpcd_lane_status_updated, - &req_settings); + &req_settings, + offset); /* 5. check CR done*/ if (is_cr_done(lane_count, dpcd_lane_status)) @@ -1054,6 +1185,102 @@ static void initialize_training_settings( lt_settings->enhanced_framing = 1; } +static uint8_t convert_to_count(uint8_t lttpr_repeater_count) +{ + switch (lttpr_repeater_count) { + case 0x80: // 1 lttpr repeater + return 1; + case 0x40: // 2 lttpr repeaters + return 2; + case 0x20: // 3 lttpr repeaters + return 3; + case 0x10: // 4 lttpr repeaters + return 4; + case 0x08: // 5 lttpr repeaters + return 5; + case 0x04: // 6 lttpr repeaters + return 6; + case 0x02: // 7 lttpr repeaters + return 7; + case 0x01: // 8 lttpr repeaters + return 8; + default: + break; + } + return 0; // invalid value +} + +static void configure_lttpr_mode(struct dc_link *link) +{ + /* aux timeout is already set to extended */ + /* RESET/SET lttpr mode to enable non transparent mode */ + uint8_t repeater_cnt; + uint32_t aux_interval_address; + uint8_t repeater_id; + enum dc_status result = DC_ERROR_UNEXPECTED; + uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT; + + DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__); + result = core_link_write_dpcd(link, + DP_PHY_REPEATER_MODE, + (uint8_t *)&repeater_mode, + sizeof(repeater_mode)); + + if (result == DC_OK) { + link->dpcd_caps.lttpr_caps.mode = repeater_mode; + } + + if (!link->is_lttpr_mode_transparent) { + + DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Non Transparent Mode\n", __func__); + + repeater_mode = DP_PHY_REPEATER_MODE_NON_TRANSPARENT; + result = core_link_write_dpcd(link, + DP_PHY_REPEATER_MODE, + (uint8_t *)&repeater_mode, + sizeof(repeater_mode)); + + if (result == DC_OK) { + link->dpcd_caps.lttpr_caps.mode = repeater_mode; + } + + repeater_cnt = convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) { + aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (repeater_id - 1)); + core_link_read_dpcd( + link, + aux_interval_address, + (uint8_t *)&link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1], + sizeof(link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1])); + link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1] &= 0x7F; + } + } +} + +static void repeater_training_done(struct dc_link *link, uint32_t offset) +{ + union dpcd_training_pattern dpcd_pattern = { {0} }; + + const uint32_t dpcd_base_lt_offset = + DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + /* Set training not in progress*/ + dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE; + + core_link_write_dpcd( + link, + dpcd_base_lt_offset, + &dpcd_pattern.raw, + 1); + + DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Id: %d 0x%X pattern = %x\n", + __func__, + offset, + dpcd_base_lt_offset, + dpcd_pattern.v1_4.TRAINING_PATTERN_SET); +} + static void print_status_message( struct dc_link *link, const struct link_training_settings *lt_settings, @@ -1133,6 +1360,17 @@ static void print_status_message( lt_spread); } +void dc_link_dp_set_drive_settings( + struct dc_link *link, + struct link_training_settings *lt_settings) +{ + /* program ASIC PHY settings*/ + dp_set_hw_lane_settings(link, lt_settings, DPRX); + + /* Notify DP sink the PHY settings from source */ + dpcd_set_lane_settings(link, lt_settings, DPRX); +} + bool dc_link_dp_perform_link_training_skip_aux( struct dc_link *link, const struct dc_link_settings *link_setting) @@ -1149,10 +1387,10 @@ bool dc_link_dp_perform_link_training_skip_aux( /* 1. Perform_clock_recovery_sequence. */ /* transmit training pattern for clock recovery */ - dp_set_hw_training_pattern(link, pattern_for_cr); + dp_set_hw_training_pattern(link, pattern_for_cr, DPRX); /* call HWSS to set lane settings*/ - dp_set_hw_lane_settings(link, <_settings); + dp_set_hw_lane_settings(link, <_settings, DPRX); /* wait receiver to lock-on*/ wait_for_training_aux_rd_interval(link, lt_settings.cr_pattern_time); @@ -1160,10 +1398,10 @@ bool dc_link_dp_perform_link_training_skip_aux( /* 2. Perform_channel_equalization_sequence. */ /* transmit training pattern for channel equalization. */ - dp_set_hw_training_pattern(link, lt_settings.pattern_for_eq); + dp_set_hw_training_pattern(link, lt_settings.pattern_for_eq, DPRX); /* call HWSS to set lane settings*/ - dp_set_hw_lane_settings(link, <_settings); + dp_set_hw_lane_settings(link, <_settings, DPRX); /* wait receiver to lock-on. */ wait_for_training_aux_rd_interval(link, lt_settings.eq_pattern_time); @@ -1185,9 +1423,10 @@ enum link_training_result dc_link_dp_perform_link_training( { enum link_training_result status = LINK_TRAINING_SUCCESS; struct link_training_settings lt_settings; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT + bool fec_enable; -#endif + uint8_t repeater_cnt; + uint8_t repeater_id; initialize_training_settings( link, @@ -1198,23 +1437,47 @@ enum link_training_result dc_link_dp_perform_link_training( /* 1. set link rate, lane count and spread. */ dpcd_set_link_settings(link, <_settings); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT if (link->preferred_training_settings.fec_enable != NULL) fec_enable = *link->preferred_training_settings.fec_enable; else fec_enable = true; dp_set_fec_ready(link, fec_enable); -#endif + if (!link->is_lttpr_mode_transparent) { + /* Configure lttpr mode */ + configure_lttpr_mode(link); + + /* 2. perform link training (set link training done + * to false is done as well) + */ + repeater_cnt = convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + + for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS); + repeater_id--) { + status = perform_clock_recovery_sequence(link, <_settings, repeater_id); + + if (status != LINK_TRAINING_SUCCESS) + break; + + status = perform_channel_equalization_sequence(link, + <_settings, + repeater_id); + + if (status != LINK_TRAINING_SUCCESS) + break; + + repeater_training_done(link, repeater_id); + } + } - /* 2. perform link training (set link training done - * to false is done as well) - */ - status = perform_clock_recovery_sequence(link, <_settings); + if (status == LINK_TRAINING_SUCCESS) { + status = perform_clock_recovery_sequence(link, <_settings, DPRX); if (status == LINK_TRAINING_SUCCESS) { status = perform_channel_equalization_sequence(link, - <_settings); + <_settings, + DPRX); + } } if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) { @@ -1233,23 +1496,58 @@ enum link_training_result dc_link_dp_perform_link_training( } bool perform_link_training_with_retries( - struct dc_link *link, const struct dc_link_settings *link_setting, bool skip_video_pattern, - int attempts) + int attempts, + struct pipe_ctx *pipe_ctx, + enum signal_type signal) { uint8_t j; uint8_t delay_between_attempts = LINK_TRAINING_RETRY_DELAY; + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; + enum dp_panel_mode panel_mode = dp_get_panel_mode(link); for (j = 0; j < attempts; ++j) { - if (dc_link_dp_perform_link_training( + dp_enable_link_phy( + link, + signal, + pipe_ctx->clock_source->id, + link_setting); + + if (stream->sink_patches.dppowerup_delay > 0) { + int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay; + + msleep(delay_dp_power_up_in_ms); + } + + dp_set_panel_mode(link, panel_mode); + + /* We need to do this before the link training to ensure the idle pattern in SST + * mode will be sent right after the link training + */ + link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc, + pipe_ctx->stream_res.stream_enc->id, true); + + if (link->aux_access_disabled) { + dc_link_dp_perform_link_training_skip_aux(link, link_setting); + return true; + } else if (dc_link_dp_perform_link_training( link, link_setting, skip_video_pattern) == LINK_TRAINING_SUCCESS) return true; + /* latest link training still fail, skip delay and keep PHY on + */ + if (j == (attempts - 1)) + break; + + dp_disable_link_phy(link, signal); + msleep(delay_between_attempts); + delay_between_attempts += LINK_TRAINING_RETRY_DELAY; } @@ -1321,9 +1619,7 @@ enum link_training_result dc_link_dp_sync_lt_attempt( enum link_training_result lt_status = LINK_TRAINING_SUCCESS; enum dp_panel_mode panel_mode = DP_PANEL_MODE_DEFAULT; enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool fec_enable = false; -#endif initialize_training_settings( link, @@ -1343,11 +1639,9 @@ enum link_training_result dc_link_dp_sync_lt_attempt( dp_enable_link_phy(link, link->connector_signal, dp_cs_id, link_settings); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* Set FEC enable */ fec_enable = lt_overrides->fec_enable && *lt_overrides->fec_enable; dp_set_fec_ready(link, fec_enable); -#endif if (lt_overrides->alternate_scrambler_reset) { if (*lt_overrides->alternate_scrambler_reset) @@ -1367,10 +1661,11 @@ enum link_training_result dc_link_dp_sync_lt_attempt( /* 2. perform link training (set link training done * to false is done as well) */ - lt_status = perform_clock_recovery_sequence(link, <_settings); + lt_status = perform_clock_recovery_sequence(link, <_settings, DPRX); if (lt_status == LINK_TRAINING_SUCCESS) { lt_status = perform_channel_equalization_sequence(link, - <_settings); + <_settings, + DPRX); } /* 3. Sync LT must skip TRAINING_PATTERN_SET:0 (video pattern)*/ @@ -1387,9 +1682,7 @@ bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down) */ if (link_down == true) { dp_disable_link_phy(link, link->connector_signal); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT dp_set_fec_ready(link, false); -#endif } link->sync_lt_in_progress = false; @@ -1423,6 +1716,22 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link) max_link_cap.link_spread) max_link_cap.link_spread = link->reported_link_cap.link_spread; + /* + * account for lttpr repeaters cap + * notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3). + */ + if (!link->is_lttpr_mode_transparent) { + if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count) + max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count; + + if (link->dpcd_caps.lttpr_caps.max_link_rate < max_link_cap.link_rate) + max_link_cap.link_rate = link->dpcd_caps.lttpr_caps.max_link_rate; + + DC_LOG_HW_LINK_TRAINING("%s\n Training with LTTPR, max_lane count %d max_link rate %d \n", + __func__, + max_link_cap.lane_count, + max_link_cap.link_rate); + } return max_link_cap; } @@ -1568,6 +1877,13 @@ bool dp_verify_link_cap( max_link_cap = get_max_link_cap(link); + /* Grant extended timeout request */ + if (!link->is_lttpr_mode_transparent && link->dpcd_caps.lttpr_caps.max_ext_timeout > 0) { + uint8_t grant = link->dpcd_caps.lttpr_caps.max_ext_timeout & 0x80; + + core_link_write_dpcd(link, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &grant, sizeof(grant)); + } + /* TODO implement override and monitor patch later */ /* try to train the link from high to low to @@ -1576,6 +1892,16 @@ bool dp_verify_link_cap( /* disable PHY done possible by BIOS, will be done by driver itself */ dp_disable_link_phy(link, link->connector_signal); + /* Temporary Renoir-specific workaround for SWDEV-215184; + * PHY will sometimes be in bad state on hotplugging display from certain USB-C dongle, + * so add extra cycle of enabling and disabling the PHY before first link training. + */ + if (link->link_enc->features.flags.bits.DP_IS_USB_C && + link->dc->debug.usbc_combo_phy_reset_wa) { + dp_enable_link_phy(link, link->connector_signal, dp_cs_id, cur); + dp_disable_link_phy(link, link->connector_signal); + } + dp_cs_id = get_clock_source_id(link); /* link training starts with the maximum common settings @@ -2280,6 +2606,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) dc_link_dp_set_test_pattern( link, test_pattern, + DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED, &link_training_settings, test_80_bit_pattern, (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 - @@ -2291,6 +2618,8 @@ static void dp_test_send_link_test_pattern(struct dc_link *link) union link_test_pattern dpcd_test_pattern; union test_misc dpcd_test_params; enum dp_test_pattern test_pattern; + enum dp_test_pattern_color_space test_pattern_color_space = + DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED; memset(&dpcd_test_pattern, 0, sizeof(dpcd_test_pattern)); memset(&dpcd_test_params, 0, sizeof(dpcd_test_params)); @@ -2325,14 +2654,105 @@ static void dp_test_send_link_test_pattern(struct dc_link *link) break; } + test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ? + DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 : + DP_TEST_PATTERN_COLOR_SPACE_YCBCR601; + dc_link_dp_set_test_pattern( link, test_pattern, + test_pattern_color_space, NULL, NULL, 0); } +static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video) +{ + union audio_test_mode dpcd_test_mode = {0}; + struct audio_test_pattern_type dpcd_pattern_type = {0}; + union audio_test_pattern_period dpcd_pattern_period[AUDIO_CHANNELS_COUNT] = {0}; + enum dp_test_pattern test_pattern = DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED; + + struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; + struct pipe_ctx *pipe_ctx = &pipes[0]; + unsigned int channel_count; + unsigned int channel = 0; + unsigned int modes = 0; + unsigned int sampling_rate_in_hz = 0; + + // get audio test mode and test pattern parameters + core_link_read_dpcd( + link, + DP_TEST_AUDIO_MODE, + &dpcd_test_mode.raw, + sizeof(dpcd_test_mode)); + + core_link_read_dpcd( + link, + DP_TEST_AUDIO_PATTERN_TYPE, + &dpcd_pattern_type.value, + sizeof(dpcd_pattern_type)); + + channel_count = dpcd_test_mode.bits.channel_count + 1; + + // read pattern periods for requested channels when sawTooth pattern is requested + if (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH || + dpcd_pattern_type.value == AUDIO_TEST_PATTERN_OPERATOR_DEFINED) { + + test_pattern = (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH) ? + DP_TEST_PATTERN_AUDIO_SAWTOOTH : DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED; + // read period for each channel + for (channel = 0; channel < channel_count; channel++) { + core_link_read_dpcd( + link, + DP_TEST_AUDIO_PERIOD_CH1 + channel, + &dpcd_pattern_period[channel].raw, + sizeof(dpcd_pattern_period[channel])); + } + } + + // translate sampling rate + switch (dpcd_test_mode.bits.sampling_rate) { + case AUDIO_SAMPLING_RATE_32KHZ: + sampling_rate_in_hz = 32000; + break; + case AUDIO_SAMPLING_RATE_44_1KHZ: + sampling_rate_in_hz = 44100; + break; + case AUDIO_SAMPLING_RATE_48KHZ: + sampling_rate_in_hz = 48000; + break; + case AUDIO_SAMPLING_RATE_88_2KHZ: + sampling_rate_in_hz = 88200; + break; + case AUDIO_SAMPLING_RATE_96KHZ: + sampling_rate_in_hz = 96000; + break; + case AUDIO_SAMPLING_RATE_176_4KHZ: + sampling_rate_in_hz = 176400; + break; + case AUDIO_SAMPLING_RATE_192KHZ: + sampling_rate_in_hz = 192000; + break; + default: + sampling_rate_in_hz = 0; + break; + } + + link->audio_test_data.flags.test_requested = 1; + link->audio_test_data.flags.disable_video = disable_video; + link->audio_test_data.sampling_rate = sampling_rate_in_hz; + link->audio_test_data.channel_count = channel_count; + link->audio_test_data.pattern_type = test_pattern; + + if (test_pattern == DP_TEST_PATTERN_AUDIO_SAWTOOTH) { + for (modes = 0; modes < pipe_ctx->stream->audio_info.mode_count; modes++) { + link->audio_test_data.pattern_period[modes] = dpcd_pattern_period[modes].bits.pattern_period; + } + } +} + static void handle_automated_test(struct dc_link *link) { union test_request test_request; @@ -2362,6 +2782,12 @@ static void handle_automated_test(struct dc_link *link) dp_test_send_link_test_pattern(link); test_response.bits.ACK = 1; } + + if (test_request.bits.AUDIO_TEST_PATTERN) { + dp_test_get_audio_test_data(link, test_request.bits.TEST_AUDIO_DISABLED_VIDEO); + test_response.bits.ACK = 1; + } + if (test_request.bits.PHY_TEST_PATTERN) { dp_test_send_phy_test_pattern(link); test_response.bits.ACK = 1; @@ -2381,9 +2807,9 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd union hpd_irq_data hpd_irq_dpcd_data = { { { {0} } } }; union device_service_irq device_service_clear = { { 0 } }; enum dc_status result; - bool status = false; struct pipe_ctx *pipe_ctx; + struct dc_link_settings previous_link_settings; int i; if (out_link_loss) @@ -2447,29 +2873,37 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd /* For now we only handle 'Downstream port status' case. * If we got sink count changed it means * Downstream port status changed, - * then DM should call DC to do the detection. */ - if (hpd_rx_irq_check_link_loss_status( - link, - &hpd_irq_dpcd_data)) { + * then DM should call DC to do the detection. + * NOTE: Do not handle link loss on eDP since it is internal link*/ + if ((link->connector_signal != SIGNAL_TYPE_EDP) && + hpd_rx_irq_check_link_loss_status( + link, + &hpd_irq_dpcd_data)) { /* Connectivity log: link loss */ CONN_DATA_LINK_LOSS(link, hpd_irq_dpcd_data.raw, sizeof(hpd_irq_dpcd_data), "Status: "); - perform_link_training_with_retries(link, - &link->cur_link_settings, - true, LINK_TRAINING_ATTEMPTS); - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link && - pipe_ctx->stream->dpms_off == false && - pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { - dc_link_allocate_mst_payload(pipe_ctx); - } + if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link) + break; } + if (pipe_ctx == NULL || pipe_ctx->stream == NULL) + return false; + + previous_link_settings = link->cur_link_settings; + + perform_link_training_with_retries(&previous_link_settings, + true, LINK_TRAINING_ATTEMPTS, + pipe_ctx, + pipe_ctx->stream->signal); + + if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) + dc_link_reallocate_mst_payload(link); + status = false; if (out_link_loss) *out_link_loss = true; @@ -2697,7 +3131,6 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data, int length) { int retry = 0; - union dp_downstream_port_present ds_port = { 0 }; if (!link->dpcd_caps.dpcd_rev.raw) { do { @@ -2710,9 +3143,6 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data, } while (retry++ < 4 && !link->dpcd_caps.dpcd_rev.raw); } - ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT - - DP_DPCD_REV]; - if (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) { switch (link->dpcd_caps.branch_dev_id) { /* 0010FA active dongles (DP-VGA, DP-DLDVI converters) power down @@ -2737,7 +3167,11 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data, static bool retrieve_link_cap(struct dc_link *link) { - uint8_t dpcd_data[DP_ADAPTER_CAP - DP_DPCD_REV + 1]; + /* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16, + * which means size 16 will be good for both of those DPCD register block reads + */ + uint8_t dpcd_data[16]; + uint8_t lttpr_dpcd_data[6]; /*Only need to read 1 byte starting from DP_DPRX_FEATURE_ENUMERATION_LIST. */ @@ -2753,7 +3187,19 @@ static bool retrieve_link_cap(struct dc_link *link) int i; struct dp_sink_hw_fw_revision dp_hw_fw_revision; + /* Set default timeout to 3.2ms and read LTTPR capabilities */ + bool ext_timeout_support = link->dc->caps.extended_aux_timeout_support && + !link->dc->config.disable_extended_timeout_support; + + link->is_lttpr_mode_transparent = true; + + if (ext_timeout_support) { + dc_link_aux_configure_timeout(link->ddc, + LINK_AUX_DEFAULT_EXTENDED_TIMEOUT_PERIOD); + } + memset(dpcd_data, '\0', sizeof(dpcd_data)); + memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data)); memset(&down_strm_port_count, '\0', sizeof(union down_stream_port_count)); memset(&edp_config_cap, '\0', @@ -2785,6 +3231,52 @@ static bool retrieve_link_cap(struct dc_link *link) return false; } + if (ext_timeout_support) { + + status = core_link_read_dpcd( + link, + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV, + lttpr_dpcd_data, + sizeof(lttpr_dpcd_data)); + + link->dpcd_caps.lttpr_caps.revision.raw = + lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + link->dpcd_caps.lttpr_caps.max_link_rate = + lttpr_dpcd_data[DP_MAX_LINK_RATE_PHY_REPEATER - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + link->dpcd_caps.lttpr_caps.phy_repeater_cnt = + lttpr_dpcd_data[DP_PHY_REPEATER_CNT - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + link->dpcd_caps.lttpr_caps.max_lane_count = + lttpr_dpcd_data[DP_MAX_LANE_COUNT_PHY_REPEATER - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + link->dpcd_caps.lttpr_caps.mode = + lttpr_dpcd_data[DP_PHY_REPEATER_MODE - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + link->dpcd_caps.lttpr_caps.max_ext_timeout = + lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + if (link->dpcd_caps.lttpr_caps.phy_repeater_cnt > 0 && + link->dpcd_caps.lttpr_caps.max_lane_count > 0 && + link->dpcd_caps.lttpr_caps.max_lane_count <= 4 && + link->dpcd_caps.lttpr_caps.revision.raw >= 0x14) { + link->is_lttpr_mode_transparent = false; + } else { + /*No lttpr reset timeout to its default value*/ + link->is_lttpr_mode_transparent = true; + dc_link_aux_configure_timeout(link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD); + } + + CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: "); + } + { union training_aux_rd_interval aux_rd_interval; @@ -2792,7 +3284,7 @@ static bool retrieve_link_cap(struct dc_link *link) dpcd_data[DP_TRAINING_AUX_RD_INTERVAL]; link->dpcd_caps.ext_receiver_cap_field_present = - aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1 ? true:false; + aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1; if (aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1) { uint8_t ext_cap_data[16]; @@ -2923,7 +3415,6 @@ static bool retrieve_link_cap(struct dc_link *link) dp_hw_fw_revision.ieee_fw_rev, sizeof(dp_hw_fw_revision.ieee_fw_rev)); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT memset(&link->dpcd_caps.dsc_caps, '\0', sizeof(link->dpcd_caps.dsc_caps)); memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap)); @@ -2945,7 +3436,6 @@ static bool retrieve_link_cap(struct dc_link *link) link->dpcd_caps.dsc_caps.dsc_ext_caps.raw, sizeof(link->dpcd_caps.dsc_caps.dsc_ext_caps.raw)); } -#endif /* Connectivity log: detection */ CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: "); @@ -2953,6 +3443,68 @@ static bool retrieve_link_cap(struct dc_link *link) return true; } +bool dp_overwrite_extended_receiver_cap(struct dc_link *link) +{ + uint8_t dpcd_data[16]; + uint32_t read_dpcd_retry_cnt = 3; + enum dc_status status = DC_ERROR_UNEXPECTED; + union dp_downstream_port_present ds_port = { 0 }; + union down_stream_port_count down_strm_port_count; + union edp_configuration_cap edp_config_cap; + + int i; + + for (i = 0; i < read_dpcd_retry_cnt; i++) { + status = core_link_read_dpcd( + link, + DP_DPCD_REV, + dpcd_data, + sizeof(dpcd_data)); + if (status == DC_OK) + break; + } + + link->dpcd_caps.dpcd_rev.raw = + dpcd_data[DP_DPCD_REV - DP_DPCD_REV]; + + if (dpcd_data[DP_MAX_LANE_COUNT - DP_DPCD_REV] == 0) + return false; + + ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT - + DP_DPCD_REV]; + + get_active_converter_info(ds_port.byte, link); + + down_strm_port_count.raw = dpcd_data[DP_DOWN_STREAM_PORT_COUNT - + DP_DPCD_REV]; + + link->dpcd_caps.allow_invalid_MSA_timing_param = + down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM; + + link->dpcd_caps.max_ln_count.raw = dpcd_data[ + DP_MAX_LANE_COUNT - DP_DPCD_REV]; + + link->dpcd_caps.max_down_spread.raw = dpcd_data[ + DP_MAX_DOWNSPREAD - DP_DPCD_REV]; + + link->reported_link_cap.lane_count = + link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT; + link->reported_link_cap.link_rate = dpcd_data[ + DP_MAX_LINK_RATE - DP_DPCD_REV]; + link->reported_link_cap.link_spread = + link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ? + LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; + + edp_config_cap.raw = dpcd_data[ + DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV]; + link->dpcd_caps.panel_mode_edp = + edp_config_cap.bits.ALT_SCRAMBLER_RESET; + link->dpcd_caps.dpcd_display_control_capable = + edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE; + + return true; +} + bool detect_dp_sink_caps(struct dc_link *link) { return retrieve_link_cap(link); @@ -3067,21 +3619,20 @@ static bool is_dp_phy_pattern(enum dp_test_pattern test_pattern) static void set_crtc_test_pattern(struct dc_link *link, struct pipe_ctx *pipe_ctx, - enum dp_test_pattern test_pattern) + enum dp_test_pattern test_pattern, + enum dp_test_pattern_color_space test_pattern_color_space) { enum controller_dp_test_pattern controller_test_pattern; enum dc_color_depth color_depth = pipe_ctx-> stream->timing.display_color_depth; struct bit_depth_reduction_params params; struct output_pixel_processor *opp = pipe_ctx->stream_res.opp; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) int width = pipe_ctx->stream->timing.h_addressable + pipe_ctx->stream->timing.h_border_left + pipe_ctx->stream->timing.h_border_right; int height = pipe_ctx->stream->timing.v_addressable + pipe_ctx->stream->timing.v_border_bottom + pipe_ctx->stream->timing.v_border_top; -#endif memset(¶ms, 0, sizeof(params)); @@ -3125,10 +3676,29 @@ static void set_crtc_test_pattern(struct dc_link *link, if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, controller_test_pattern, color_depth); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) else if (opp->funcs->opp_set_disp_pattern_generator) { struct pipe_ctx *odm_pipe; + enum controller_dp_color_space controller_color_space; int opp_cnt = 1; + int count; + + switch (test_pattern_color_space) { + case DP_TEST_PATTERN_COLOR_SPACE_RGB: + controller_color_space = CONTROLLER_DP_COLOR_SPACE_RGB; + break; + case DP_TEST_PATTERN_COLOR_SPACE_YCBCR601: + controller_color_space = CONTROLLER_DP_COLOR_SPACE_YCBCR601; + break; + case DP_TEST_PATTERN_COLOR_SPACE_YCBCR709: + controller_color_space = CONTROLLER_DP_COLOR_SPACE_YCBCR709; + break; + case DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED: + default: + controller_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED; + DC_LOG_ERROR("%s: Color space must be defined for test pattern", __func__); + ASSERT(0); + break; + } for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) opp_cnt++; @@ -3141,6 +3711,7 @@ static void set_crtc_test_pattern(struct dc_link *link, odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); odm_opp->funcs->opp_set_disp_pattern_generator(odm_opp, controller_test_pattern, + controller_color_space, color_depth, NULL, width, @@ -3148,12 +3719,18 @@ static void set_crtc_test_pattern(struct dc_link *link, } opp->funcs->opp_set_disp_pattern_generator(opp, controller_test_pattern, + controller_color_space, color_depth, NULL, width, height); + /* wait for dpg to blank pixel data with test pattern */ + for (count = 0; count < 1000; count++) { + if (opp->funcs->dpg_is_blanked(opp)) + break; + udelay(100); + } } -#endif } break; case DP_TEST_PATTERN_VIDEO_MODE: @@ -3166,7 +3743,6 @@ static void set_crtc_test_pattern(struct dc_link *link, pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, color_depth); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) else if (opp->funcs->opp_set_disp_pattern_generator) { struct pipe_ctx *odm_pipe; int opp_cnt = 1; @@ -3181,6 +3757,7 @@ static void set_crtc_test_pattern(struct dc_link *link, odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); odm_opp->funcs->opp_set_disp_pattern_generator(odm_opp, CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, + CONTROLLER_DP_COLOR_SPACE_UDEFINED, color_depth, NULL, width, @@ -3188,12 +3765,12 @@ static void set_crtc_test_pattern(struct dc_link *link, } opp->funcs->opp_set_disp_pattern_generator(opp, CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, + CONTROLLER_DP_COLOR_SPACE_UDEFINED, color_depth, NULL, width, height); } -#endif } break; @@ -3205,6 +3782,7 @@ static void set_crtc_test_pattern(struct dc_link *link, bool dc_link_dp_set_test_pattern( struct dc_link *link, enum dp_test_pattern test_pattern, + enum dp_test_pattern_color_space test_pattern_color_space, const struct link_training_settings *p_link_settings, const unsigned char *p_custom_pattern, unsigned int cust_pattern_size) @@ -3233,7 +3811,7 @@ bool dc_link_dp_set_test_pattern( if (link->test_pattern_enabled && test_pattern == DP_TEST_PATTERN_VIDEO_MODE) { /* Set CRTC Test Pattern */ - set_crtc_test_pattern(link, pipe_ctx, test_pattern); + set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space); dp_set_hw_test_pattern(link, test_pattern, (uint8_t *)p_custom_pattern, (uint32_t)cust_pattern_size); @@ -3256,8 +3834,8 @@ bool dc_link_dp_set_test_pattern( if (is_dp_phy_pattern(test_pattern)) { /* Set DPCD Lane Settings before running test pattern */ if (p_link_settings != NULL) { - dp_set_hw_lane_settings(link, p_link_settings); - dpcd_set_lane_settings(link, p_link_settings); + dp_set_hw_lane_settings(link, p_link_settings, DPRX); + dpcd_set_lane_settings(link, p_link_settings, DPRX); } /* Blank stream if running test pattern */ @@ -3347,8 +3925,38 @@ bool dc_link_dp_set_test_pattern( sizeof(training_pattern)); } } else { - /* CRTC Patterns */ - set_crtc_test_pattern(link, pipe_ctx, test_pattern); + enum dc_color_space color_space = COLOR_SPACE_UNKNOWN; + + switch (test_pattern_color_space) { + case DP_TEST_PATTERN_COLOR_SPACE_RGB: + color_space = COLOR_SPACE_SRGB; + if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) + color_space = COLOR_SPACE_SRGB_LIMITED; + break; + + case DP_TEST_PATTERN_COLOR_SPACE_YCBCR601: + color_space = COLOR_SPACE_YCBCR601; + if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) + color_space = COLOR_SPACE_YCBCR601_LIMITED; + break; + case DP_TEST_PATTERN_COLOR_SPACE_YCBCR709: + color_space = COLOR_SPACE_YCBCR709; + if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) + color_space = COLOR_SPACE_YCBCR709_LIMITED; + break; + default: + break; + } + /* update MSA to requested color space */ + pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(pipe_ctx->stream_res.stream_enc, + &pipe_ctx->stream->timing, + color_space, + pipe_ctx->stream->use_vsc_sdp_for_colorimetry, + link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP); + + /* CRTC Patterns */ + set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space); + /* Set Test Pattern state */ link->test_pattern_enabled = true; } @@ -3468,7 +4076,6 @@ enum dp_panel_mode dp_get_panel_mode(struct dc_link *link) return DP_PANEL_MODE_DEFAULT; } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT void dp_set_fec_ready(struct dc_link *link, bool ready) { /* FEC has to be "set ready" before the link training. @@ -3538,5 +4145,4 @@ void dp_set_fec_enable(struct dc_link *link, bool enable) } } } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index a519dbc5ecb6..ddb855045767 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -12,12 +12,38 @@ #include "dc_link_ddc.h" #include "dm_helpers.h" #include "dpcd_defs.h" -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #include "dsc.h" -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #include "resource.h" -#endif + +static uint8_t convert_to_count(uint8_t lttpr_repeater_count) +{ + switch (lttpr_repeater_count) { + case 0x80: // 1 lttpr repeater + return 1; + case 0x40: // 2 lttpr repeaters + return 2; + case 0x20: // 3 lttpr repeaters + return 3; + case 0x10: // 4 lttpr repeaters + return 4; + case 0x08: // 5 lttpr repeaters + return 5; + case 0x04: // 6 lttpr repeaters + return 6; + case 0x02: // 7 lttpr repeaters + return 7; + case 0x01: // 8 lttpr repeaters + return 8; + default: + break; + } + return 0; // invalid value +} + +static inline bool is_immediate_downstream(struct dc_link *link, uint32_t offset) +{ + return (convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == offset); +} enum dc_status core_link_read_dpcd( struct dc_link *link, @@ -69,8 +95,8 @@ void dp_enable_link_phy( const struct dc_link_settings *link_settings) { struct link_encoder *link_enc = link->link_enc; - struct dc *core_dc = link->ctx->dc; - struct dmcu *dmcu = core_dc->res_pool->dmcu; + struct dc *dc = link->ctx->dc; + struct dmcu *dmcu = dc->res_pool->dmcu; struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; @@ -147,15 +173,20 @@ bool edp_receiver_ready_T9(struct dc_link *link) } bool edp_receiver_ready_T7(struct dc_link *link) { - unsigned int tries = 0; unsigned char sinkstatus = 0; unsigned char edpRev = 0; enum dc_status result = DC_OK; + /* use absolute time stamp to constrain max T7*/ + unsigned long long enter_timestamp = 0; + unsigned long long finish_timestamp = 0; + unsigned long long time_taken_in_ns = 0; + result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev)); if (result == DC_OK && edpRev < DP_EDP_12) return true; /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/ + enter_timestamp = dm_get_timestamp(link->ctx); do { sinkstatus = 0; result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus)); @@ -163,8 +194,10 @@ bool edp_receiver_ready_T7(struct dc_link *link) break; if (result != DC_OK) break; - udelay(25); //MAx T7 is 50ms - } while (++tries < 300); + udelay(25); + finish_timestamp = dm_get_timestamp(link->ctx); + time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp); + } while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms if (link->local_sink->edid_caps.panel_patch.extra_t7_ms > 0) udelay(link->local_sink->edid_caps.panel_patch.extra_t7_ms * 1000); @@ -174,8 +207,8 @@ bool edp_receiver_ready_T7(struct dc_link *link) void dp_disable_link_phy(struct dc_link *link, enum signal_type signal) { - struct dc *core_dc = link->ctx->dc; - struct dmcu *dmcu = core_dc->res_pool->dmcu; + struct dc *dc = link->ctx->dc; + struct dmcu *dmcu = dc->res_pool->dmcu; if (!link->wa_flags.dp_keep_receiver_powered) dp_receiver_power_ctrl(link, false); @@ -212,7 +245,8 @@ void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal) bool dp_set_hw_training_pattern( struct dc_link *link, - enum dc_dp_training_pattern pattern) + enum dc_dp_training_pattern pattern, + uint32_t offset) { enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED; @@ -240,10 +274,14 @@ bool dp_set_hw_training_pattern( void dp_set_hw_lane_settings( struct dc_link *link, - const struct link_training_settings *link_settings) + const struct link_training_settings *link_settings, + uint32_t offset) { struct link_encoder *encoder = link->link_enc; + if (!link->is_lttpr_mode_transparent && !is_immediate_downstream(link, offset)) + return; + /* call Encoder to set lane settings */ encoder->funcs->dp_set_lane_settings(encoder, link_settings); } @@ -302,20 +340,12 @@ void dp_retrain_link_dp_test(struct dc_link *link, memset(&link->cur_link_settings, 0, sizeof(link->cur_link_settings)); - link->link_enc->funcs->enable_dp_output( - link->link_enc, - link_setting, - pipes[i].clock_source->id); - link->cur_link_settings = *link_setting; - - dp_receiver_power_ctrl(link, true); - perform_link_training_with_retries( - link, link_setting, skip_video_pattern, - LINK_TRAINING_ATTEMPTS); - + LINK_TRAINING_ATTEMPTS, + &pipes[i], + SIGNAL_TYPE_DISPLAY_PORT); link->dc->hwss.enable_stream(&pipes[i]); @@ -339,7 +369,6 @@ void dp_retrain_link_dp_test(struct dc_link *link, } } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #define DC_LOGGER \ dsc->ctx->logger static void dsc_optc_config_log(struct display_stream_compressor *dsc, @@ -365,14 +394,14 @@ static void dsc_optc_config_log(struct display_stream_compressor *dsc, static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable) { - struct dc *core_dc = pipe_ctx->stream->ctx->dc; + struct dc *dc = pipe_ctx->stream->ctx->dc; struct dc_stream_state *stream = pipe_ctx->stream; bool result = false; - if (IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) + if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) result = true; else - result = dm_helpers_dp_write_dsc_enable(core_dc->ctx, stream, enable); + result = dm_helpers_dp_write_dsc_enable(dc->ctx, stream, enable); return result; } @@ -382,7 +411,7 @@ static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable) void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) { struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; - struct dc *core_dc = pipe_ctx->stream->ctx->dc; + struct dc *dc = pipe_ctx->stream->ctx->dc; struct dc_stream_state *stream = pipe_ctx->stream; struct pipe_ctx *odm_pipe; int opp_cnt = 1; @@ -418,7 +447,7 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED; /* Enable DSC in encoder */ - if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { + if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { DC_LOG_DSC("Setting stream encoder DSC config for engine %d:", (int)pipe_ctx->stream_res.stream_enc->id); dsc_optc_config_log(dsc, &dsc_optc_cfg); pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc, @@ -443,7 +472,7 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) OPTC_DSC_DISABLED, 0, 0); /* disable DSC in stream encoder */ - if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { + if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config( pipe_ctx->stream_res.stream_enc, OPTC_DSC_DISABLED, 0, 0); @@ -486,7 +515,7 @@ out: bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable) { struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; - struct dc *core_dc = pipe_ctx->stream->ctx->dc; + struct dc *dc = pipe_ctx->stream->ctx->dc; struct dc_stream_state *stream = pipe_ctx->stream; if (!pipe_ctx->stream->timing.flags.DSC || !dsc) @@ -496,6 +525,9 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable) struct dsc_config dsc_cfg; uint8_t dsc_packed_pps[128]; + memset(&dsc_cfg, 0, sizeof(dsc_cfg)); + memset(dsc_packed_pps, 0, 128); + /* Enable DSC hw block */ dsc_cfg.pic_width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; @@ -505,7 +537,7 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable) DC_LOG_DSC(" "); dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]); - if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { + if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id); pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( pipe_ctx->stream_res.stream_enc, @@ -514,7 +546,7 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable) } } else { /* disable DSC PPS in stream encoder */ - if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { + if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( pipe_ctx->stream_res.stream_enc, false, NULL); } @@ -537,5 +569,4 @@ bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx) dp_set_dsc_pps_sdp(pipe_ctx, true); return true; } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 37698305a2dc..a0eb9e533a61 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -46,15 +46,11 @@ #include "dce100/dce100_resource.h" #include "dce110/dce110_resource.h" #include "dce112/dce112_resource.h" -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) #include "dcn10/dcn10_resource.h" #endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #include "dcn20/dcn20_resource.h" -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) #include "dcn21/dcn21_resource.h" -#endif #include "dce120/dce120_resource.h" #define DC_LOGGER_INIT(logger) @@ -99,23 +95,19 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id) else dc_version = DCE_VERSION_12_0; break; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) case FAMILY_RV: dc_version = DCN_VERSION_1_0; if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev)) dc_version = DCN_VERSION_1_01; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev)) dc_version = DCN_VERSION_2_1; -#endif break; #endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) case FAMILY_NV: dc_version = DCN_VERSION_2_0; break; -#endif default: dc_version = DCE_VERSION_UNKNOWN; break; @@ -162,20 +154,16 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc, init_data->num_virtual_links, dc); break; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) case DCN_VERSION_1_0: case DCN_VERSION_1_01: res_pool = dcn10_create_resource_pool(init_data, dc); break; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) case DCN_VERSION_2_0: res_pool = dcn20_create_resource_pool(init_data, dc); break; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) case DCN_VERSION_2_1: res_pool = dcn21_create_resource_pool(init_data, dc); break; @@ -951,44 +939,44 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx) data->inits.v_c_bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c); } -static bool are_rects_integer_multiples(struct rect src, struct rect dest) -{ - if (dest.width >= src.width && dest.width % src.width == 0 && - dest.height >= src.height && dest.height % src.height == 0) - return true; - - return false; -} -static void calculate_integer_scaling(struct pipe_ctx *pipe_ctx) +/* + * When handling 270 rotation in mixed SLS mode, we have + * stream->timing.h_border_left that is non zero. If we are doing + * pipe-splitting, this h_border_left value gets added to recout.x and when it + * calls calculate_inits_and_adj_vp() and + * adjust_vp_and_init_for_seamless_clip(), it can cause viewport.height for a + * pipe to be incorrect. + * + * To fix this, instead of using stream->timing.h_border_left, we can use + * stream->dst.x to represent the border instead. So we will set h_border_left + * to 0 and shift the appropriate amount in stream->dst.x. We will then + * perform all calculations in resource_build_scaling_params() based on this + * and then restore the h_border_left and stream->dst.x to their original + * values. + * + * shift_border_left_to_dst() will shift the amount of h_border_left to + * stream->dst.x and set h_border_left to 0. restore_border_left_from_dst() + * will restore h_border_left and stream->dst.x back to their original values + * We also need to make sure pipe_ctx->plane_res.scl_data.h_active uses the + * original h_border_left value in its calculation. + */ +int shift_border_left_to_dst(struct pipe_ctx *pipe_ctx) { - if (!pipe_ctx->plane_state->scaling_quality.integer_scaling) - return; - - //for Centered Mode - if (pipe_ctx->stream->dst.width == pipe_ctx->stream->src.width && - pipe_ctx->stream->dst.height == pipe_ctx->stream->src.height) { - // calculate maximum # of replication of src onto addressable - unsigned int integer_multiple = min( - pipe_ctx->stream->timing.h_addressable / pipe_ctx->stream->src.width, - pipe_ctx->stream->timing.v_addressable / pipe_ctx->stream->src.height); - - //scale dst - pipe_ctx->stream->dst.width = integer_multiple * pipe_ctx->stream->src.width; - pipe_ctx->stream->dst.height = integer_multiple * pipe_ctx->stream->src.height; + int store_h_border_left = pipe_ctx->stream->timing.h_border_left; - //center dst onto addressable - pipe_ctx->stream->dst.x = (pipe_ctx->stream->timing.h_addressable - pipe_ctx->stream->dst.width)/2; - pipe_ctx->stream->dst.y = (pipe_ctx->stream->timing.v_addressable - pipe_ctx->stream->dst.height)/2; + if (store_h_border_left) { + pipe_ctx->stream->timing.h_border_left = 0; + pipe_ctx->stream->dst.x += store_h_border_left; } + return store_h_border_left; +} - //disable taps if src & dst are integer ratio - if (are_rects_integer_multiples(pipe_ctx->stream->src, pipe_ctx->stream->dst)) { - pipe_ctx->plane_state->scaling_quality.v_taps = 1; - pipe_ctx->plane_state->scaling_quality.h_taps = 1; - pipe_ctx->plane_state->scaling_quality.v_taps_c = 1; - pipe_ctx->plane_state->scaling_quality.h_taps_c = 1; - } +void restore_border_left_from_dst(struct pipe_ctx *pipe_ctx, + int store_h_border_left) +{ + pipe_ctx->stream->dst.x -= store_h_border_left; + pipe_ctx->stream->timing.h_border_left = store_h_border_left; } bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) @@ -996,6 +984,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) const struct dc_plane_state *plane_state = pipe_ctx->plane_state; struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; bool res = false; + int store_h_border_left = shift_border_left_to_dst(pipe_ctx); DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); /* Important: scaling ratio calculation requires pixel format, * lb depth calculation requires recout and taps require scaling ratios. @@ -1004,14 +993,18 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface( pipe_ctx->plane_state->format); - calculate_integer_scaling(pipe_ctx); - calculate_scaling_ratios(pipe_ctx); calculate_viewport(pipe_ctx); - if (pipe_ctx->plane_res.scl_data.viewport.height < 16 || pipe_ctx->plane_res.scl_data.viewport.width < 16) + if (pipe_ctx->plane_res.scl_data.viewport.height < 16 || + pipe_ctx->plane_res.scl_data.viewport.width < 16) { + if (store_h_border_left) { + restore_border_left_from_dst(pipe_ctx, + store_h_border_left); + } return false; + } calculate_recout(pipe_ctx); @@ -1024,8 +1017,10 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_res.scl_data.recout.x += timing->h_border_left; pipe_ctx->plane_res.scl_data.recout.y += timing->v_border_top; - pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right; - pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom; + pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + + store_h_border_left + timing->h_border_right; + pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + + timing->v_border_top + timing->v_border_bottom; /* Taps calculations */ if (pipe_ctx->plane_res.xfm != NULL) @@ -1072,6 +1067,9 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) plane_state->dst_rect.x, plane_state->dst_rect.y); + if (store_h_border_left) + restore_border_left_from_dst(pipe_ctx, store_h_border_left); + return res; } @@ -1217,7 +1215,7 @@ static struct pipe_ctx *acquire_free_pipe_for_head( return pool->funcs->acquire_idle_pipe_for_layer(context, pool, head_pipe->stream); } -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) static int acquire_first_split_pipe( struct resource_context *res_ctx, const struct resource_pool *pool, @@ -1298,7 +1296,7 @@ bool dc_add_plane_to_context( free_pipe = acquire_free_pipe_for_head(context, pool, head_pipe); - #if defined(CONFIG_DRM_AMD_DC_DCN1_0) + #if defined(CONFIG_DRM_AMD_DC_DCN) if (!free_pipe) { int pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream); if (pipe_idx >= 0) @@ -1891,7 +1889,7 @@ static int acquire_resource_from_hw_enabled_state( inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); if (inst == ENGINE_ID_UNKNOWN) - return false; + return -1; for (i = 0; i < pool->stream_enc_count; i++) { if (pool->stream_enc[i]->id == inst) { @@ -1903,10 +1901,10 @@ static int acquire_resource_from_hw_enabled_state( // tg_inst not found if (i == pool->stream_enc_count) - return false; + return -1; if (tg_inst >= pool->timing_generator_count) - return false; + return -1; if (!res_ctx->pipe_ctx[tg_inst].stream) { struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[tg_inst]; @@ -1919,8 +1917,26 @@ static int acquire_resource_from_hw_enabled_state( pipe_ctx->plane_res.dpp = pool->dpps[tg_inst]; pipe_ctx->stream_res.opp = pool->opps[tg_inst]; - if (pool->dpps[tg_inst]) + if (pool->dpps[tg_inst]) { pipe_ctx->plane_res.mpcc_inst = pool->dpps[tg_inst]->inst; + + // Read DPP->MPCC->OPP Pipe from HW State + if (pool->mpc->funcs->read_mpcc_state) { + struct mpcc_state s = {0}; + + pool->mpc->funcs->read_mpcc_state(pool->mpc, pipe_ctx->plane_res.mpcc_inst, &s); + + if (s.dpp_id < MAX_MPCC) + pool->mpc->mpcc_array[pipe_ctx->plane_res.mpcc_inst].dpp_id = s.dpp_id; + + if (s.bot_mpcc_id < MAX_MPCC) + pool->mpc->mpcc_array[pipe_ctx->plane_res.mpcc_inst].mpcc_bot = + &pool->mpc->mpcc_array[s.bot_mpcc_id]; + + if (s.opp_id < MAX_OPP) + pipe_ctx->stream_res.opp->mpc_tree_params.opp_id = s.opp_id; + } + } pipe_ctx->pipe_idx = tg_inst; pipe_ctx->stream = stream; @@ -1972,7 +1988,7 @@ enum dc_status resource_map_pool_resources( /* acquire new resources */ pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream); -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 +#ifdef CONFIG_DRM_AMD_DC_DCN if (pipe_idx < 0) pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream); #endif @@ -2050,6 +2066,13 @@ void dc_resource_state_construct( dst_ctx->clk_mgr = dc->clk_mgr; } + +bool dc_resource_is_dsc_encoding_supported(const struct dc *dc) +{ + return dc->res_pool->res_cap->num_dsc > 0; +} + + /** * dc_validate_global_state() - Determine if HW can support a given state * Checks HW resource availability and bandwidth requirement. @@ -2306,7 +2329,7 @@ static void set_avi_info_frame( if (color_space == COLOR_SPACE_SRGB || color_space == COLOR_SPACE_2020_RGB_FULLRANGE) { hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_FULL_RANGE; - hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_FULL_RANGE; + hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; } else if (color_space == COLOR_SPACE_SRGB_LIMITED || color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE) { hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_LIMITED_RANGE; @@ -2772,9 +2795,8 @@ void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream, enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream) { - struct dc *core_dc = dc; struct dc_link *link = stream->link; - struct timing_generator *tg = core_dc->res_pool->timing_generators[0]; + struct timing_generator *tg = dc->res_pool->timing_generators[0]; enum dc_status res = DC_OK; calculate_phy_pix_clks(stream); @@ -2837,3 +2859,48 @@ unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format) return -1; } } +static unsigned int get_max_audio_sample_rate(struct audio_mode *modes) +{ + if (modes) { + if (modes->sample_rates.rate.RATE_192) + return 192000; + if (modes->sample_rates.rate.RATE_176_4) + return 176400; + if (modes->sample_rates.rate.RATE_96) + return 96000; + if (modes->sample_rates.rate.RATE_88_2) + return 88200; + if (modes->sample_rates.rate.RATE_48) + return 48000; + if (modes->sample_rates.rate.RATE_44_1) + return 44100; + if (modes->sample_rates.rate.RATE_32) + return 32000; + } + /*original logic when no audio info*/ + return 441000; +} + +void get_audio_check(struct audio_info *aud_modes, + struct audio_check *audio_chk) +{ + unsigned int i; + unsigned int max_sample_rate = 0; + + if (aud_modes) { + audio_chk->audio_packet_type = 0x2;/*audio sample packet AP = .25 for layout0, 1 for layout1*/ + + audio_chk->max_audiosample_rate = 0; + for (i = 0; i < aud_modes->mode_count; i++) { + max_sample_rate = get_max_audio_sample_rate(&aud_modes->modes[i]); + if (audio_chk->max_audiosample_rate < max_sample_rate) + audio_chk->max_audiosample_rate = max_sample_rate; + /*dts takes the same as type 2: AP = 0.25*/ + } + /*check which one take more bandwidth*/ + if (audio_chk->max_audiosample_rate > 192000) + audio_chk->audio_packet_type = 0x9;/*AP =1*/ + audio_chk->acat = 0;/*not support*/ + } +} + diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c index 5cbfdf1c4b11..a249a0e5edd0 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c @@ -33,7 +33,7 @@ * Private functions ******************************************************************************/ -static void destruct(struct dc_sink *sink) +static void dc_sink_destruct(struct dc_sink *sink) { if (sink->dc_container_id) { kfree(sink->dc_container_id); @@ -41,7 +41,7 @@ static void destruct(struct dc_sink *sink) } } -static bool construct(struct dc_sink *sink, const struct dc_sink_init_data *init_params) +static bool dc_sink_construct(struct dc_sink *sink, const struct dc_sink_init_data *init_params) { struct dc_link *link = init_params->link; @@ -75,7 +75,7 @@ void dc_sink_retain(struct dc_sink *sink) static void dc_sink_free(struct kref *kref) { struct dc_sink *sink = container_of(kref, struct dc_sink, refcount); - destruct(sink); + dc_sink_destruct(sink); kfree(sink); } @@ -91,7 +91,7 @@ struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params) if (NULL == sink) goto alloc_fail; - if (false == construct(sink, init_params)) + if (false == dc_sink_construct(sink, init_params)) goto construct_fail; kref_init(&sink->refcount); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index bb09243758fe..6ddbb00ed37a 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -27,14 +27,12 @@ #include <linux/slab.h> #include "dm_services.h" +#include "basics/dc_common.h" #include "dc.h" #include "core_types.h" #include "resource.h" #include "ipp.h" #include "timing_generator.h" -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) -#include "dcn10/dcn10_hw_sequencer.h" -#endif #define DC_LOGGER dc->ctx->logger @@ -58,7 +56,7 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink) } } -static void construct(struct dc_stream_state *stream, +static void dc_stream_construct(struct dc_stream_state *stream, struct dc_sink *dc_sink_data) { uint32_t i = 0; @@ -108,7 +106,6 @@ static void construct(struct dc_stream_state *stream, /* EDID CAP translation for HDMI 2.0 */ stream->timing.flags.LTE_340MCSC_SCRAMBLE = dc_sink_data->edid_caps.lte_340mcsc_scramble; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT memset(&stream->timing.dsc_cfg, 0, sizeof(stream->timing.dsc_cfg)); stream->timing.dsc_cfg.num_slices_h = 0; stream->timing.dsc_cfg.num_slices_v = 0; @@ -117,7 +114,6 @@ static void construct(struct dc_stream_state *stream, stream->timing.dsc_cfg.linebuf_depth = 9; stream->timing.dsc_cfg.version_minor = 2; stream->timing.dsc_cfg.ycbcr422_simple = 0; -#endif update_stream_signal(stream, dc_sink_data); @@ -129,7 +125,7 @@ static void construct(struct dc_stream_state *stream, stream->ctx->dc_stream_id_count++; } -static void destruct(struct dc_stream_state *stream) +static void dc_stream_destruct(struct dc_stream_state *stream) { dc_sink_release(stream->sink); if (stream->out_transfer_func != NULL) { @@ -147,7 +143,7 @@ static void dc_stream_free(struct kref *kref) { struct dc_stream_state *stream = container_of(kref, struct dc_stream_state, refcount); - destruct(stream); + dc_stream_destruct(stream); kfree(stream); } @@ -170,7 +166,7 @@ struct dc_stream_state *dc_create_stream_for_sink( if (stream == NULL) return NULL; - construct(stream, sink); + dc_stream_construct(stream, sink); kref_init(&stream->refcount); @@ -237,7 +233,7 @@ struct dc_stream_status *dc_stream_get_status( static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc) { -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) unsigned int vupdate_line; unsigned int lines_to_vupdate, us_to_vupdate, vpos, nvpos; struct dc_stream_state *stream = pipe_ctx->stream; @@ -246,7 +242,7 @@ static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc) if (stream->ctx->asic_id.chip_family == FAMILY_RV && ASICREV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) { - vupdate_line = get_vupdate_offset_from_vsync(pipe_ctx); + vupdate_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx); if (!dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos)) return; @@ -272,7 +268,7 @@ bool dc_stream_set_cursor_attributes( const struct dc_cursor_attributes *attributes) { int i; - struct dc *core_dc; + struct dc *dc; struct resource_context *res_ctx; struct pipe_ctx *pipe_to_program = NULL; @@ -290,8 +286,8 @@ bool dc_stream_set_cursor_attributes( return false; } - core_dc = stream->ctx->dc; - res_ctx = &core_dc->current_state->res_ctx; + dc = stream->ctx->dc; + res_ctx = &dc->current_state->res_ctx; stream->cursor_attributes = *attributes; for (i = 0; i < MAX_PIPES; i++) { @@ -303,17 +299,17 @@ bool dc_stream_set_cursor_attributes( if (!pipe_to_program) { pipe_to_program = pipe_ctx; - delay_cursor_until_vupdate(pipe_ctx, core_dc); - core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, true); + delay_cursor_until_vupdate(pipe_ctx, dc); + dc->hwss.pipe_control_lock(dc, pipe_to_program, true); } - core_dc->hwss.set_cursor_attribute(pipe_ctx); - if (core_dc->hwss.set_cursor_sdr_white_level) - core_dc->hwss.set_cursor_sdr_white_level(pipe_ctx); + dc->hwss.set_cursor_attribute(pipe_ctx); + if (dc->hwss.set_cursor_sdr_white_level) + dc->hwss.set_cursor_sdr_white_level(pipe_ctx); } if (pipe_to_program) - core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, false); + dc->hwss.pipe_control_lock(dc, pipe_to_program, false); return true; } @@ -323,7 +319,7 @@ bool dc_stream_set_cursor_position( const struct dc_cursor_position *position) { int i; - struct dc *core_dc; + struct dc *dc; struct resource_context *res_ctx; struct pipe_ctx *pipe_to_program = NULL; @@ -337,8 +333,8 @@ bool dc_stream_set_cursor_position( return false; } - core_dc = stream->ctx->dc; - res_ctx = &core_dc->current_state->res_ctx; + dc = stream->ctx->dc; + res_ctx = &dc->current_state->res_ctx; stream->cursor_position = *position; for (i = 0; i < MAX_PIPES; i++) { @@ -354,20 +350,19 @@ bool dc_stream_set_cursor_position( if (!pipe_to_program) { pipe_to_program = pipe_ctx; - delay_cursor_until_vupdate(pipe_ctx, core_dc); - core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, true); + delay_cursor_until_vupdate(pipe_ctx, dc); + dc->hwss.pipe_control_lock(dc, pipe_to_program, true); } - core_dc->hwss.set_cursor_position(pipe_ctx); + dc->hwss.set_cursor_position(pipe_ctx); } if (pipe_to_program) - core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, false); + dc->hwss.pipe_control_lock(dc, pipe_to_program, false); return true; } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) bool dc_stream_add_writeback(struct dc *dc, struct dc_stream_state *stream, struct dc_writeback_info *wb_info) @@ -411,25 +406,30 @@ bool dc_stream_add_writeback(struct dc *dc, stream->writeback_info[stream->num_wb_info++] = *wb_info; } - if (!dc->hwss.update_bandwidth(dc, dc->current_state)) { - dm_error("DC: update_bandwidth failed!\n"); - return false; - } - - /* enable writeback */ if (dc->hwss.enable_writeback) { struct dc_stream_status *stream_status = dc_stream_get_status(stream); struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; + dwb->otg_inst = stream_status->primary_otg_inst; + } + if (IS_DIAG_DC(dc->ctx->dce_environment)) { + if (!dc->hwss.update_bandwidth(dc, dc->current_state)) { + dm_error("DC: update_bandwidth failed!\n"); + return false; + } - if (dwb->funcs->is_enabled(dwb)) { - /* writeback pipe already enabled, only need to update */ - dc->hwss.update_writeback(dc, stream_status, wb_info, dc->current_state); - } else { - /* Enable writeback pipe from scratch*/ - dc->hwss.enable_writeback(dc, stream_status, wb_info, dc->current_state); + /* enable writeback */ + if (dc->hwss.enable_writeback) { + struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; + + if (dwb->funcs->is_enabled(dwb)) { + /* writeback pipe already enabled, only need to update */ + dc->hwss.update_writeback(dc, wb_info, dc->current_state); + } else { + /* Enable writeback pipe from scratch*/ + dc->hwss.enable_writeback(dc, wb_info, dc->current_state); + } } } - return true; } @@ -468,26 +468,35 @@ bool dc_stream_remove_writeback(struct dc *dc, } stream->num_wb_info = j; - /* recalculate and apply DML parameters */ - if (!dc->hwss.update_bandwidth(dc, dc->current_state)) { - dm_error("DC: update_bandwidth failed!\n"); - return false; - } - - /* disable writeback */ - if (dc->hwss.disable_writeback) - dc->hwss.disable_writeback(dc, dwb_pipe_inst); + if (IS_DIAG_DC(dc->ctx->dce_environment)) { + /* recalculate and apply DML parameters */ + if (!dc->hwss.update_bandwidth(dc, dc->current_state)) { + dm_error("DC: update_bandwidth failed!\n"); + return false; + } + /* disable writeback */ + if (dc->hwss.disable_writeback) + dc->hwss.disable_writeback(dc, dwb_pipe_inst); + } return true; } -#endif +bool dc_stream_warmup_writeback(struct dc *dc, + int num_dwb, + struct dc_writeback_info *wb_info) +{ + if (dc->hwss.mmhubbub_warmup) + return dc->hwss.mmhubbub_warmup(dc, num_dwb, wb_info); + else + return false; +} uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream) { uint8_t i; - struct dc *core_dc = stream->ctx->dc; + struct dc *dc = stream->ctx->dc; struct resource_context *res_ctx = - &core_dc->current_state->res_ctx; + &dc->current_state->res_ctx; for (i = 0; i < MAX_PIPES; i++) { struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg; @@ -544,9 +553,9 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream, { uint8_t i; bool ret = false; - struct dc *core_dc = stream->ctx->dc; + struct dc *dc = stream->ctx->dc; struct resource_context *res_ctx = - &core_dc->current_state->res_ctx; + &dc->current_state->res_ctx; for (i = 0; i < MAX_PIPES; i++) { struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg; @@ -567,10 +576,8 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream, return ret; } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream) { - bool status = true; struct pipe_ctx *pipe = NULL; int i; @@ -586,8 +593,7 @@ bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream) if (i == MAX_PIPES) return true; - status = dc->hwss.dmdata_status_done(pipe); - return status; + return dc->hwss.dmdata_status_done(pipe); } bool dc_stream_set_dynamic_metadata(struct dc *dc, @@ -630,7 +636,6 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc, return true; } -#endif void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c index b9d6a5bd8522..ea1229a3e2b2 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c @@ -37,7 +37,7 @@ /******************************************************************************* * Private functions ******************************************************************************/ -static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state) +static void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state) { plane_state->ctx = ctx; @@ -50,7 +50,6 @@ static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state plane_state->in_transfer_func->type = TF_TYPE_BYPASS; plane_state->in_transfer_func->ctx = ctx; } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) plane_state->in_shaper_func = dc_create_transfer_func(); if (plane_state->in_shaper_func != NULL) { plane_state->in_shaper_func->type = TF_TYPE_BYPASS; @@ -67,10 +66,9 @@ static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state plane_state->blend_tf->ctx = ctx; } -#endif } -static void destruct(struct dc_plane_state *plane_state) +static void dc_plane_destruct(struct dc_plane_state *plane_state) { if (plane_state->gamma_correction != NULL) { dc_gamma_release(&plane_state->gamma_correction); @@ -80,7 +78,6 @@ static void destruct(struct dc_plane_state *plane_state) plane_state->in_transfer_func); plane_state->in_transfer_func = NULL; } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) if (plane_state->in_shaper_func != NULL) { dc_transfer_func_release( plane_state->in_shaper_func); @@ -97,7 +94,6 @@ static void destruct(struct dc_plane_state *plane_state) plane_state->blend_tf = NULL; } -#endif } /******************************************************************************* @@ -112,16 +108,14 @@ void enable_surface_flip_reporting(struct dc_plane_state *plane_state, struct dc_plane_state *dc_create_plane_state(struct dc *dc) { - struct dc *core_dc = dc; - struct dc_plane_state *plane_state = kvzalloc(sizeof(*plane_state), - GFP_KERNEL); + GFP_KERNEL); if (NULL == plane_state) return NULL; kref_init(&plane_state->refcount); - construct(core_dc->ctx, plane_state); + dc_plane_construct(dc->ctx, plane_state); return plane_state; } @@ -141,7 +135,7 @@ const struct dc_plane_status *dc_plane_get_status( const struct dc_plane_state *plane_state) { const struct dc_plane_status *plane_status; - struct dc *core_dc; + struct dc *dc; int i; if (!plane_state || @@ -152,15 +146,15 @@ const struct dc_plane_status *dc_plane_get_status( } plane_status = &plane_state->status; - core_dc = plane_state->ctx->dc; + dc = plane_state->ctx->dc; - if (core_dc->current_state == NULL) + if (dc->current_state == NULL) return NULL; /* Find the current plane state and set its pending bit to false */ - for (i = 0; i < core_dc->res_pool->pipe_count; i++) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = - &core_dc->current_state->res_ctx.pipe_ctx[i]; + &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->plane_state != plane_state) continue; @@ -170,14 +164,14 @@ const struct dc_plane_status *dc_plane_get_status( break; } - for (i = 0; i < core_dc->res_pool->pipe_count; i++) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = - &core_dc->current_state->res_ctx.pipe_ctx[i]; + &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->plane_state != plane_state) continue; - core_dc->hwss.update_pending_status(pipe_ctx); + dc->hwss.update_pending_status(pipe_ctx); } return plane_status; @@ -191,7 +185,7 @@ void dc_plane_state_retain(struct dc_plane_state *plane_state) static void dc_plane_state_free(struct kref *kref) { struct dc_plane_state *plane_state = container_of(kref, struct dc_plane_state, refcount); - destruct(plane_state); + dc_plane_destruct(plane_state); kvfree(plane_state); } @@ -262,7 +256,6 @@ alloc_fail: return NULL; } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) static void dc_3dlut_func_free(struct kref *kref) { struct dc_3dlut *lut = container_of(kref, struct dc_3dlut, refcount); @@ -296,6 +289,5 @@ void dc_3dlut_func_retain(struct dc_3dlut *lut) { kref_get(&lut->refcount); } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 0416a17b0897..8ff25b5dd2f6 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -39,7 +39,7 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.2.56" +#define DC_VER "3.2.69" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -54,6 +54,10 @@ struct dc_versions { struct dmcu_version dmcu_version; }; +enum dp_protocol_version { + DP_VERSION_1_4, +}; + enum dc_plane_type { DC_PLANE_TYPE_INVALID, DC_PLANE_TYPE_DCE_RGB, @@ -112,17 +116,15 @@ struct dc_caps { bool disable_dp_clk_share; bool psp_setup_panel_mode; bool extended_aux_timeout_support; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 + bool dmcub_support; bool hw_3d_lut; -#endif + enum dp_protocol_version max_dp_protocol_version; struct dc_plane_cap planes[MAX_PLANES]; }; struct dc_bug_wa { -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) bool no_connect_phy_config; bool dedcn20_305_wa; -#endif bool skip_clock_update; }; @@ -155,11 +157,14 @@ struct dc_surface_dcc_cap { bool const_color_support; }; -struct dc_static_screen_events { - bool force_trigger; - bool cursor_update; - bool surface_update; - bool overlay_update; +struct dc_static_screen_params { + struct { + bool force_trigger; + bool cursor_update; + bool surface_update; + bool overlay_update; + } triggers; + unsigned int num_frames; }; @@ -363,10 +368,10 @@ struct dc_debug_options { bool disable_dfs_bypass; bool disable_dpp_power_gate; bool disable_hubp_power_gate; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool disable_dsc_power_gate; int dsc_min_slice_height_override; -#endif + int dsc_bpp_increment_div; + bool native422_support; bool disable_pplib_wm_range; enum wm_report_mode pplib_wm_report_mode; unsigned int min_disp_clk_khz; @@ -401,22 +406,26 @@ struct dc_debug_options { unsigned int force_odm_combine; //bit vector based on otg inst unsigned int force_fclk_khz; bool disable_tri_buf; + bool dmub_offload_enabled; + bool dmcub_emulation; + bool dmub_command_table; /* for testing only */ struct dc_bw_validation_profile bw_val_profile; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool disable_fec; -#endif -#ifdef CONFIG_DRM_AMD_DC_DCN2_1 bool disable_48mhz_pwrdwn; -#endif /* This forces a hard min on the DCFCLK requested to SMU/PP * watermarks are not affected. */ unsigned int force_min_dcfclk_mhz; bool disable_timing_sync; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) bool cm_in_bypass; -#endif int force_clock_mode;/*every mode change.*/ + + bool nv12_iflip_vm_wa; + bool disable_dram_clock_change_vactive_support; + bool validate_dml_output; + bool enable_dmcub_surface_flip; + bool usbc_combo_phy_reset_wa; + bool disable_dsc; }; struct dc_debug_data { @@ -425,7 +434,6 @@ struct dc_debug_data { uint32_t auxErrorCount; }; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 struct dc_phy_addr_space_config { struct { uint64_t start_addr; @@ -455,7 +463,6 @@ struct dc_virtual_addr_space_config { uint32_t page_table_block_size_in_bytes; uint8_t page_table_depth; // 1 = 1 level, 2 = 2 level, etc. 0 = invalid }; -#endif struct dc_bounding_box_overrides { int sr_exit_time_ns; @@ -483,9 +490,7 @@ struct dc { struct dc_bounding_box_overrides bb_overrides; struct dc_bug_wa work_arounds; struct dc_context *ctx; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 struct dc_phy_addr_space_config vm_pa_config; -#endif uint8_t link_count; struct dc_link *links[MAX_PIPES * 2]; @@ -501,7 +506,7 @@ struct dc { /* Inputs into BW and WM calculations. */ struct bw_calcs_dceip *bw_dceip; struct bw_calcs_vbios *bw_vbios; -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 +#ifdef CONFIG_DRM_AMD_DC_DCN struct dcn_soc_bounding_box *dcn_soc; struct dcn_ip_params *dcn_ip; struct display_mode_lib dml; @@ -515,7 +520,7 @@ struct dc { bool optimized_required; /* Require to maintain clocks and bandwidth for UEFI enabled HW */ - bool optimize_seamless_boot; + int optimize_seamless_boot_streams; /* FBC compressor */ struct compressor *fbc_compressor; @@ -523,10 +528,8 @@ struct dc { struct dc_debug_data debug_data; const char *build_id; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 struct vm_helper *vm_helper; const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box; -#endif }; enum frame_buffer_mode { @@ -558,15 +561,16 @@ struct dc_init_data { struct dc_bios *vbios_override; enum dce_environment dce_environment; + struct dmub_offload_funcs *dmub_if; + struct dc_reg_helper_state *dmub_offload; + struct dc_config flags; uint32_t log_mask; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 /** * gpu_info FW provided soc bounding box struct or 0 if not * available in FW */ const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box; -#endif }; struct dc_callback_init { @@ -581,11 +585,9 @@ struct dc *dc_create(const struct dc_init_data *init_params); void dc_hardware_init(struct dc *dc); int dc_get_vmid_use_vector(struct dc *dc); -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 void dc_setup_vm_context(struct dc *dc, struct dc_virtual_addr_space_config *va_config, int vmid); /* Returns the number of vmids supported */ int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_config); -#endif void dc_init_callbacks(struct dc *dc, const struct dc_callback_init *init_params); void dc_deinit_callbacks(struct dc *dc); @@ -661,7 +663,6 @@ struct dc_transfer_func { }; }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) union dc_3dlut_state { struct { @@ -680,12 +681,11 @@ union dc_3dlut_state { struct dc_3dlut { struct kref refcount; struct tetrahedral_params lut_3d; - uint32_t hdr_multiplier; + struct fixed31_32 hdr_multiplier; bool initialized; /*remove after diag fix*/ union dc_3dlut_state state; struct dc_context *ctx; }; -#endif /* * This structure is filled in by dc_surface_get_status and contains * the last requested address and the currently active address so the called @@ -708,7 +708,7 @@ union surface_update_flags { uint32_t horizontal_mirror_change:1; uint32_t per_pixel_alpha_change:1; uint32_t global_alpha_change:1; - uint32_t sdr_white_level:1; + uint32_t hdr_mult:1; uint32_t rotation_change:1; uint32_t swizzle_change:1; uint32_t scaling_change:1; @@ -736,9 +736,7 @@ union surface_update_flags { struct dc_plane_state { struct dc_plane_address address; struct dc_plane_flip_time time; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) bool triplebuffer_flips; -#endif struct scaling_taps scaling_quality; struct rect src_rect; struct rect dst_rect; @@ -754,18 +752,16 @@ struct dc_plane_state { struct dc_bias_and_scale *bias_and_scale; struct dc_csc_transform input_csc_color_matrix; struct fixed31_32 coeff_reduction_factor; - uint32_t sdr_white_level; + struct fixed31_32 hdr_mult; // TODO: No longer used, remove struct dc_hdr_static_metadata hdr_static_ctx; enum dc_color_space color_space; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dc_3dlut *lut3d_func; struct dc_transfer_func *in_shaper_func; struct dc_transfer_func *blend_tf; -#endif enum surface_pixel_format format; enum dc_rotation_angle rotation; @@ -801,7 +797,6 @@ struct dc_plane_info { enum dc_rotation_angle rotation; enum plane_stereo_format stereo_format; enum dc_color_space color_space; - unsigned int sdr_white_level; bool horizontal_mirror; bool visible; bool per_pixel_alpha; @@ -825,7 +820,7 @@ struct dc_surface_update { const struct dc_flip_addrs *flip_addr; const struct dc_plane_info *plane_info; const struct dc_scaling_info *scaling_info; - + struct fixed31_32 hdr_mult; /* following updates require alloc/sleep/spin that is not isr safe, * null means no updates */ @@ -834,11 +829,9 @@ struct dc_surface_update { const struct dc_csc_transform *input_csc_color_matrix; const struct fixed31_32 *coeff_reduction_factor; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) const struct dc_transfer_func *func_shaper; const struct dc_3dlut *lut3d_func; const struct dc_transfer_func *blend_tf; -#endif }; /* @@ -859,11 +852,9 @@ void dc_transfer_func_retain(struct dc_transfer_func *dc_tf); void dc_transfer_func_release(struct dc_transfer_func *dc_tf); struct dc_transfer_func *dc_create_transfer_func(void); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dc_3dlut *dc_create_3dlut_func(void); void dc_3dlut_func_release(struct dc_3dlut *lut); void dc_3dlut_func_retain(struct dc_3dlut *lut); -#endif /* * This structure holds a surface address. There could be multiple addresses * in cases such as Stereo 3D, Planar YUV, etc. Other per-flip attributes such @@ -925,6 +916,8 @@ void dc_resource_state_copy_construct_current( void dc_resource_state_destruct(struct dc_state *context); +bool dc_resource_is_dsc_encoding_supported(const struct dc *dc); + /* * TODO update to make it about validation sets * Set up streams and links associated to drive sinks @@ -980,10 +973,10 @@ struct dpcd_caps { bool panel_mode_edp; bool dpcd_display_control_capable; bool ext_receiver_cap_field_present; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT union dpcd_fec_capability fec_cap; struct dpcd_dsc_capabilities dsc_caps; -#endif + struct dc_lttpr_caps lttpr_caps; + }; #include "dc_link.h" @@ -1004,14 +997,12 @@ struct dc_container_id { }; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct dc_sink_dsc_caps { // 'true' if these are virtual DPCD's DSC caps (immediately upstream of sink in MST topology), // 'false' if they are sink's DSC caps bool is_virtual_dpcd_dsc; struct dsc_dec_dpcd_caps dsc_dec_caps; }; -#endif /* * The sink structure contains EDID and other display device properties @@ -1026,9 +1017,7 @@ struct dc_sink { struct stereo_3d_features features_3d[TIMING_3D_FORMAT_MAX]; bool converter_disable_audio; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct dc_sink_dsc_caps sink_dsc_caps; -#endif /* private to DC core */ struct dc_link *link; @@ -1086,13 +1075,12 @@ unsigned int dc_get_current_backlight_pwm(struct dc *dc); unsigned int dc_get_target_backlight_pwm(struct dc *dc); bool dc_is_dmcu_initialized(struct dc *dc); +bool dc_is_hw_initialized(struct dc *dc); enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping); void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg); -#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT) /******************************************************************************* * DSC Interfaces ******************************************************************************/ #include "dc_dsc.h" -#endif #endif /* DC_INTERFACE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c new file mode 100644 index 000000000000..59c298a6484f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -0,0 +1,134 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dc.h" +#include "dc_dmub_srv.h" +#include "../dmub/inc/dmub_srv.h" + +static void dc_dmub_srv_construct(struct dc_dmub_srv *dc_srv, struct dc *dc, + struct dmub_srv *dmub) +{ + dc_srv->dmub = dmub; + dc_srv->ctx = dc->ctx; +} + +struct dc_dmub_srv *dc_dmub_srv_create(struct dc *dc, struct dmub_srv *dmub) +{ + struct dc_dmub_srv *dc_srv = + kzalloc(sizeof(struct dc_dmub_srv), GFP_KERNEL); + + if (dc_srv == NULL) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dc_dmub_srv_construct(dc_srv, dc, dmub); + + return dc_srv; +} + +void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv) +{ + if (*dmub_srv) { + kfree(*dmub_srv); + *dmub_srv = NULL; + } +} + +void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv, + struct dmub_cmd_header *cmd) +{ + struct dmub_srv *dmub = dc_dmub_srv->dmub; + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + enum dmub_status status; + + status = dmub_srv_cmd_queue(dmub, cmd); + if (status == DMUB_STATUS_OK) + return; + + if (status != DMUB_STATUS_QUEUE_FULL) + goto error; + + /* Execute and wait for queue to become empty again. */ + dc_dmub_srv_cmd_execute(dc_dmub_srv); + dc_dmub_srv_wait_idle(dc_dmub_srv); + + /* Requeue the command. */ + status = dmub_srv_cmd_queue(dmub, cmd); + if (status == DMUB_STATUS_OK) + return; + +error: + DC_ERROR("Error queuing DMUB command: status=%d\n", status); +} + +void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv) +{ + struct dmub_srv *dmub = dc_dmub_srv->dmub; + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + enum dmub_status status; + + status = dmub_srv_cmd_execute(dmub); + if (status != DMUB_STATUS_OK) + DC_ERROR("Error starting DMUB execution: status=%d\n", status); +} + +void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv) +{ + struct dmub_srv *dmub = dc_dmub_srv->dmub; + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + enum dmub_status status; + + status = dmub_srv_wait_for_idle(dmub, 100000); + if (status != DMUB_STATUS_OK) + DC_ERROR("Error waiting for DMUB idle: status=%d\n", status); +} + +void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv) +{ + struct dmub_srv *dmub = dc_dmub_srv->dmub; + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + enum dmub_status status; + + for (;;) { + /* Wait up to a second for PHY init. */ + status = dmub_srv_wait_for_phy_init(dmub, 1000000); + if (status == DMUB_STATUS_OK) + /* Initialization OK */ + break; + + DC_ERROR("DMCUB PHY init failed: status=%d\n", status); + ASSERT(0); + + if (status != DMUB_STATUS_TIMEOUT) + /* + * Server likely initialized or we don't have + * DMCUB HW support - this won't end. + */ + break; + + /* Continue spinning so we don't hang the ASIC. */ + } +} diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h new file mode 100644 index 000000000000..754b6077539c --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h @@ -0,0 +1,60 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_DC_SRV_H_ +#define _DMUB_DC_SRV_H_ + +#include "os_types.h" +#include "../dmub/inc/dmub_cmd.h" + +struct dmub_srv; +struct dmub_cmd_header; + +struct dc_reg_helper_state { + bool gather_in_progress; + uint32_t same_addr_count; + bool should_burst_write; + union dmub_rb_cmd cmd_data; + unsigned int reg_seq_count; +}; + +struct dc_dmub_srv { + struct dmub_srv *dmub; + struct dc_reg_helper_state reg_helper_offload; + + struct dc_context *ctx; + void *dm; +}; + +void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv, + struct dmub_cmd_header *cmd); + +void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv); + +void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv); + +void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv); + +#endif /* _DMUB_DC_SRV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index ef79a686e4c2..dfe4472c9e40 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -129,9 +129,7 @@ struct dc_link_training_overrides { bool *alternate_scrambler_reset; bool *enhanced_framing; bool *mst_enable; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool *fec_enable; -#endif }; union dpcd_rev { @@ -471,13 +469,13 @@ union training_aux_rd_interval { /* Automated test structures */ union test_request { struct { - uint8_t LINK_TRAINING :1; - uint8_t LINK_TEST_PATTRN :1; - uint8_t EDID_READ :1; - uint8_t PHY_TEST_PATTERN :1; - uint8_t AUDIO_TEST_PATTERN :1; - uint8_t RESERVED :1; - uint8_t TEST_STEREO_3D :1; + uint8_t LINK_TRAINING :1; + uint8_t LINK_TEST_PATTRN :1; + uint8_t EDID_READ :1; + uint8_t PHY_TEST_PATTERN :1; + uint8_t RESERVED :1; + uint8_t AUDIO_TEST_PATTERN :1; + uint8_t TEST_AUDIO_DISABLED_VIDEO :1; } bits; uint8_t raw; }; @@ -524,19 +522,52 @@ union link_test_pattern { union test_misc { struct dpcd_test_misc_bits { - unsigned char SYNC_CLOCK :1; + unsigned char SYNC_CLOCK :1; /* dpcd_test_color_format */ - unsigned char CLR_FORMAT :2; + unsigned char CLR_FORMAT :2; /* dpcd_test_dyn_range */ - unsigned char DYN_RANGE :1; - unsigned char YCBCR :1; + unsigned char DYN_RANGE :1; + unsigned char YCBCR_COEFS :1; /* dpcd_test_bit_depth */ - unsigned char BPC :3; + unsigned char BPC :3; } bits; unsigned char raw; }; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT +union audio_test_mode { + struct { + unsigned char sampling_rate :4; + unsigned char channel_count :4; + } bits; + unsigned char raw; +}; + +union audio_test_pattern_period { + struct { + unsigned char pattern_period :4; + unsigned char reserved :4; + } bits; + unsigned char raw; +}; + +struct audio_test_pattern_type { + unsigned char value; +}; + +struct dp_audio_test_data_flags { + uint8_t test_requested :1; + uint8_t disable_video :1; +}; + +struct dp_audio_test_data { + + struct dp_audio_test_data_flags flags; + uint8_t sampling_rate; + uint8_t channel_count; + uint8_t pattern_type; + uint8_t pattern_period[8]; +}; + /* FEC capability DPCD register field bits-*/ union dpcd_fec_capability { struct { @@ -661,6 +692,5 @@ struct dpcd_dsc_capabilities { union dpcd_dsc_ext_capabilities dsc_ext_caps; }; -#endif /* CONFIG_DRM_AMD_DC_DSC_SUPPORT */ #endif /* DC_DP_TYPES_H */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h index 0ed2962add5a..3800340a5b4f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h @@ -1,4 +1,3 @@ -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #ifndef DC_DSC_H_ #define DC_DSC_H_ /* @@ -42,21 +41,28 @@ struct dc_dsc_bw_range { struct display_stream_compressor { const struct dsc_funcs *funcs; -#ifndef AMD_EDID_UTILITY struct dc_context *ctx; int inst; -#endif }; -bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data, +struct dc_dsc_policy { + bool use_min_slices_h; + int max_slices_h; // Maximum available if 0 + int min_slice_height; // Must not be less than 8 + uint32_t max_target_bpp; + uint32_t min_target_bpp; +}; + +bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, + const uint8_t *dpcd_dsc_basic_data, const uint8_t *dpcd_dsc_ext_data, struct dsc_dec_dpcd_caps *dsc_sink_caps); bool dc_dsc_compute_bandwidth_range( const struct display_stream_compressor *dsc, const uint32_t dsc_min_slice_height_override, - const uint32_t min_kbps, - const uint32_t max_kbps, + const uint32_t min_bpp, + const uint32_t max_bpp, const struct dsc_dec_dpcd_caps *dsc_sink_caps, const struct dc_crtc_timing *timing, struct dc_dsc_bw_range *range); @@ -68,5 +74,10 @@ bool dc_dsc_compute_config( uint32_t target_bandwidth_kbps, const struct dc_crtc_timing *timing, struct dc_dsc_config *dsc_cfg); -#endif + +void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, + struct dc_dsc_policy *policy); + +void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c index 30b2f9edd42f..737048d8a96c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c @@ -32,6 +32,74 @@ #include "dm_services.h" #include <stdarg.h> +#include "dc.h" +#include "dc_dmub_srv.h" + +static inline void submit_dmub_read_modify_write( + struct dc_reg_helper_state *offload, + const struct dc_context *ctx) +{ + struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write; + bool gather = false; + + offload->should_burst_write = + (offload->same_addr_count == (DMUB_READ_MODIFY_WRITE_SEQ__MAX - 1)); + cmd_buf->header.payload_bytes = + sizeof(struct dmub_cmd_read_modify_write_sequence) * offload->reg_seq_count; + + gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress; + ctx->dmub_srv->reg_helper_offload.gather_in_progress = false; + + dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header); + + ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather; + + memset(cmd_buf, 0, sizeof(*cmd_buf)); + + offload->reg_seq_count = 0; + offload->same_addr_count = 0; +} + +static inline void submit_dmub_burst_write( + struct dc_reg_helper_state *offload, + const struct dc_context *ctx) +{ + struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write; + bool gather = false; + + cmd_buf->header.payload_bytes = + sizeof(uint32_t) * offload->reg_seq_count; + + gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress; + ctx->dmub_srv->reg_helper_offload.gather_in_progress = false; + + dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header); + + ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather; + + memset(cmd_buf, 0, sizeof(*cmd_buf)); + + offload->reg_seq_count = 0; +} + +static inline void submit_dmub_reg_wait( + struct dc_reg_helper_state *offload, + const struct dc_context *ctx) +{ + struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait; + bool gather = false; + + gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress; + ctx->dmub_srv->reg_helper_offload.gather_in_progress = false; + + dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header); + + memset(cmd_buf, 0, sizeof(*cmd_buf)); + offload->reg_seq_count = 0; + + ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather; +} + struct dc_reg_value_masks { uint32_t value; uint32_t mask; @@ -77,6 +145,100 @@ static void set_reg_field_values(struct dc_reg_value_masks *field_value_mask, } } +static void dmub_flush_buffer_execute( + struct dc_reg_helper_state *offload, + const struct dc_context *ctx) +{ + submit_dmub_read_modify_write(offload, ctx); + dc_dmub_srv_cmd_execute(ctx->dmub_srv); +} + +static void dmub_flush_burst_write_buffer_execute( + struct dc_reg_helper_state *offload, + const struct dc_context *ctx) +{ + submit_dmub_burst_write(offload, ctx); + dc_dmub_srv_cmd_execute(ctx->dmub_srv); +} + +static bool dmub_reg_value_burst_set_pack(const struct dc_context *ctx, uint32_t addr, + uint32_t reg_val) +{ + struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload; + struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write; + + /* flush command if buffer is full */ + if (offload->reg_seq_count == DMUB_BURST_WRITE_VALUES__MAX) + dmub_flush_burst_write_buffer_execute(offload, ctx); + + if (offload->cmd_data.cmd_common.header.type == DMUB_CMD__REG_SEQ_BURST_WRITE && + addr != cmd_buf->addr) { + dmub_flush_burst_write_buffer_execute(offload, ctx); + return false; + } + + cmd_buf->header.type = DMUB_CMD__REG_SEQ_BURST_WRITE; + cmd_buf->header.sub_type = 0; + cmd_buf->addr = addr; + cmd_buf->write_values[offload->reg_seq_count] = reg_val; + offload->reg_seq_count++; + + return true; +} + +static uint32_t dmub_reg_value_pack(const struct dc_context *ctx, uint32_t addr, + struct dc_reg_value_masks *field_value_mask) +{ + struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload; + struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write; + struct dmub_cmd_read_modify_write_sequence *seq; + + /* flush command if buffer is full */ + if (offload->cmd_data.cmd_common.header.type != DMUB_CMD__REG_SEQ_BURST_WRITE && + offload->reg_seq_count == DMUB_READ_MODIFY_WRITE_SEQ__MAX) + dmub_flush_buffer_execute(offload, ctx); + + if (offload->should_burst_write) { + if (dmub_reg_value_burst_set_pack(ctx, addr, field_value_mask->value)) + return field_value_mask->value; + else + offload->should_burst_write = false; + } + + /* pack commands */ + cmd_buf->header.type = DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE; + cmd_buf->header.sub_type = 0; + seq = &cmd_buf->seq[offload->reg_seq_count]; + + if (offload->reg_seq_count) { + if (cmd_buf->seq[offload->reg_seq_count - 1].addr == addr) + offload->same_addr_count++; + else + offload->same_addr_count = 0; + } + + seq->addr = addr; + seq->modify_mask = field_value_mask->mask; + seq->modify_value = field_value_mask->value; + offload->reg_seq_count++; + + return field_value_mask->value; +} + +static void dmub_reg_wait_done_pack(const struct dc_context *ctx, uint32_t addr, + uint32_t mask, uint32_t shift, uint32_t condition_value, uint32_t time_out_us) +{ + struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload; + struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait; + + cmd_buf->header.type = DMUB_CMD__REG_REG_WAIT; + cmd_buf->header.sub_type = 0; + cmd_buf->reg_wait.addr = addr; + cmd_buf->reg_wait.condition_field_value = mask & (condition_value << shift); + cmd_buf->reg_wait.mask = mask; + cmd_buf->reg_wait.time_out_us = time_out_us; +} + uint32_t generic_reg_update_ex(const struct dc_context *ctx, uint32_t addr, int n, uint8_t shift1, uint32_t mask1, uint32_t field_value1, @@ -93,6 +255,11 @@ uint32_t generic_reg_update_ex(const struct dc_context *ctx, va_end(ap); + if (ctx->dmub_srv && + ctx->dmub_srv->reg_helper_offload.gather_in_progress) + return dmub_reg_value_pack(ctx, addr, &field_value_mask); + /* todo: return void so we can decouple code running in driver from register states */ + /* mmio write directly */ reg_val = dm_read_reg(ctx, addr); reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value; @@ -118,6 +285,13 @@ uint32_t generic_reg_set_ex(const struct dc_context *ctx, /* mmio write directly */ reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value; + + if (ctx->dmub_srv && + ctx->dmub_srv->reg_helper_offload.gather_in_progress) { + return dmub_reg_value_burst_set_pack(ctx, addr, reg_val); + /* todo: return void so we can decouple code running in driver from register states */ + } + dm_write_reg(ctx, addr, reg_val); return reg_val; } @@ -134,6 +308,14 @@ uint32_t dm_read_reg_func( return 0; } #endif + + if (ctx->dmub_srv && + ctx->dmub_srv->reg_helper_offload.gather_in_progress && + !ctx->dmub_srv->reg_helper_offload.should_burst_write) { + ASSERT(false); + return 0; + } + value = cgs_read_register(ctx->cgs_device, address); trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value); @@ -299,7 +481,19 @@ void generic_reg_wait(const struct dc_context *ctx, uint32_t reg_val; int i; - /* something is terribly wrong if time out is > 200ms. (5Hz) */ + if (ctx->dmub_srv && + ctx->dmub_srv->reg_helper_offload.gather_in_progress) { + dmub_reg_wait_done_pack(ctx, addr, mask, shift, condition_value, + delay_between_poll_us * time_out_num_tries); + return; + } + + /* + * Something is terribly wrong if time out is > 3000ms. + * 3000ms is the maximum time needed for SMU to pass values back. + * This value comes from experiments. + * + */ ASSERT(delay_between_poll_us * time_out_num_tries <= 3000000); for (i = 0; i <= time_out_num_tries; i++) { @@ -346,12 +540,48 @@ uint32_t generic_read_indirect_reg(const struct dc_context *ctx, { uint32_t value = 0; + // when reg read, there should not be any offload. + if (ctx->dmub_srv && + ctx->dmub_srv->reg_helper_offload.gather_in_progress) { + ASSERT(false); + } + dm_write_reg(ctx, addr_index, index); value = dm_read_reg(ctx, addr_data); return value; } +uint32_t generic_indirect_reg_get(const struct dc_context *ctx, + uint32_t addr_index, uint32_t addr_data, + uint32_t index, int n, + uint8_t shift1, uint32_t mask1, uint32_t *field_value1, + ...) +{ + uint32_t shift, mask, *field_value; + uint32_t value = 0; + int i = 1; + + va_list ap; + + va_start(ap, field_value1); + + value = generic_read_indirect_reg(ctx, addr_index, addr_data, index); + *field_value1 = get_reg_field_value_ex(value, mask1, shift1); + + while (i < n) { + shift = va_arg(ap, uint32_t); + mask = va_arg(ap, uint32_t); + field_value = va_arg(ap, uint32_t *); + + *field_value = get_reg_field_value_ex(value, mask, shift); + i++; + } + + va_end(ap); + + return value; +} uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx, uint32_t addr_index, uint32_t addr_data, @@ -382,3 +612,68 @@ uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx, return reg_val; } + +void reg_sequence_start_gather(const struct dc_context *ctx) +{ + /* if reg sequence is supported and enabled, set flag to + * indicate we want to have REG_SET, REG_UPDATE macro build + * reg sequence command buffer rather than MMIO directly. + */ + + if (ctx->dmub_srv && ctx->dc->debug.dmub_offload_enabled) { + struct dc_reg_helper_state *offload = + &ctx->dmub_srv->reg_helper_offload; + + /* caller sequence mismatch. need to debug caller. offload will not work!!! */ + ASSERT(!offload->gather_in_progress); + + offload->gather_in_progress = true; + } +} + +void reg_sequence_start_execute(const struct dc_context *ctx) +{ + struct dc_reg_helper_state *offload; + + if (!ctx->dmub_srv) + return; + + offload = &ctx->dmub_srv->reg_helper_offload; + + if (offload && offload->gather_in_progress) { + offload->gather_in_progress = false; + offload->should_burst_write = false; + switch (offload->cmd_data.cmd_common.header.type) { + case DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE: + submit_dmub_read_modify_write(offload, ctx); + break; + case DMUB_CMD__REG_REG_WAIT: + submit_dmub_reg_wait(offload, ctx); + break; + case DMUB_CMD__REG_SEQ_BURST_WRITE: + submit_dmub_burst_write(offload, ctx); + break; + default: + return; + } + + dc_dmub_srv_cmd_execute(ctx->dmub_srv); + } +} + +void reg_sequence_wait_done(const struct dc_context *ctx) +{ + /* callback to DM to poll for last submission done*/ + struct dc_reg_helper_state *offload; + + if (!ctx->dmub_srv) + return; + + offload = &ctx->dmub_srv->reg_helper_offload; + + if (offload && + ctx->dc->debug.dmub_offload_enabled && + !ctx->dc->debug.dmcub_emulation) { + dc_dmub_srv_wait_idle(ctx->dmub_srv); + } +} diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index e0856bb8511f..25c50bcab9e9 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -26,8 +26,6 @@ #ifndef DC_HW_TYPES_H #define DC_HW_TYPES_H -#ifndef AMD_EDID_UTILITY - #include "os_types.h" #include "fixed31_32.h" #include "signal_types.h" @@ -167,12 +165,10 @@ enum surface_pixel_format { /*swaped & float*/ SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F, /*grow graphics here if necessary */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX, SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX, SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT, SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT, -#endif SURFACE_PIXEL_FORMAT_VIDEO_BEGIN, SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr = SURFACE_PIXEL_FORMAT_VIDEO_BEGIN, @@ -180,10 +176,8 @@ enum surface_pixel_format { SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr, SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb, SURFACE_PIXEL_FORMAT_SUBSAMPLE_END, -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010, SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102, -#endif SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888, SURFACE_PIXEL_FORMAT_INVALID @@ -222,12 +216,10 @@ enum tile_split_values { DC_ROTATED_MICRO_TILING = 0x3, }; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 enum tripleBuffer_enable { DC_TRIPLEBUFFER_DISABLE = 0x0, DC_TRIPLEBUFFER_ENABLE = 0x1, }; -#endif /* TODO: These values come from hardware spec. We need to readdress this * if they ever change. @@ -427,13 +419,11 @@ struct dc_csc_transform { bool enable_adjustment; }; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 struct dc_rgb_fixed { struct fixed31_32 red; struct fixed31_32 green; struct fixed31_32 blue; }; -#endif struct dc_gamma { struct kref refcount; @@ -468,10 +458,8 @@ enum dc_cursor_color_format { CURSOR_MODE_COLOR_1BIT_AND, CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA, CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA, -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED, CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED -#endif }; /* @@ -594,8 +582,6 @@ struct scaling_taps { bool integer_scaling; }; -#endif /* AMD_EDID_UTILITY */ - enum dc_timing_standard { DC_TIMING_STANDARD_UNDEFINED, DC_TIMING_STANDARD_DMT, @@ -626,10 +612,8 @@ enum dc_color_depth { COLOR_DEPTH_121212, COLOR_DEPTH_141414, COLOR_DEPTH_161616, -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 COLOR_DEPTH_999, COLOR_DEPTH_111111, -#endif COLOR_DEPTH_COUNT }; @@ -690,9 +674,7 @@ struct dc_crtc_timing_flags { * rates less than or equal to 340Mcsc */ uint32_t LTE_340MCSC_SCRAMBLE:1; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT uint32_t DSC : 1; /* Use DSC with this timing */ -#endif }; enum dc_timing_3d_format { @@ -717,7 +699,6 @@ enum dc_timing_3d_format { TIMING_3D_FORMAT_MAX, }; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct dc_dsc_config { uint32_t num_slices_h; /* Number of DSC slices - horizontal */ uint32_t num_slices_v; /* Number of DSC slices - vertical */ @@ -728,7 +709,6 @@ struct dc_dsc_config { bool ycbcr422_simple; /* Tell DSC engine to convert YCbCr 4:2:2 to 'YCbCr 4:2:2 simple'. */ int32_t rc_buffer_size; /* DSC RC buffer block size in bytes */ }; -#endif struct dc_crtc_timing { uint32_t h_total; uint32_t h_border_left; @@ -755,13 +735,9 @@ struct dc_crtc_timing { enum scanning_type scan_type; struct dc_crtc_timing_flags flags; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct dc_dsc_config dsc_cfg; -#endif }; -#ifndef AMD_EDID_UTILITY - enum trigger_delay { TRIGGER_DELAY_NEXT_PIXEL = 0, TRIGGER_DELAY_NEXT_LINE, @@ -796,7 +772,6 @@ enum vram_type { VIDEO_MEMORY_TYPE_GDDR6 = 6, }; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 enum dwb_cnv_out_bpc { DWB_CNV_OUT_BPC_8BPC = 0, DWB_CNV_OUT_BPC_10BPC = 1, @@ -847,7 +822,6 @@ struct mcif_buf_params { unsigned int swlock; }; -#endif #define MAX_TG_COLOR_VALUE 0x3FF struct tg_color { @@ -857,7 +831,5 @@ struct tg_color { uint16_t color_b_cb; }; -#endif /* AMD_EDID_UTILITY */ - #endif /* DC_HW_TYPES_H */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index f24fd19ed93d..d25603128394 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -29,13 +29,11 @@ #include "dc_types.h" #include "grph_object_defs.h" -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT enum dc_link_fec_state { dc_link_fec_not_ready, dc_link_fec_ready, dc_link_fec_enabled }; -#endif struct dc_link_status { bool link_active; struct dpcd_caps *dpcd_caps; @@ -85,6 +83,7 @@ struct dc_link { bool link_state_valid; bool aux_access_disabled; bool sync_lt_in_progress; + bool is_lttpr_mode_transparent; /* caps is the same as reported_link_cap. link_traing use * reported_link_cap. Will clean up. TODO @@ -95,6 +94,7 @@ struct dc_link { struct dc_lane_settings cur_lane_setting; struct dc_link_settings preferred_link_setting; struct dc_link_training_overrides preferred_training_settings; + struct dp_audio_test_data audio_test_data; uint8_t ddc_hw_inst; @@ -133,6 +133,7 @@ struct dc_link { struct link_flags { bool dp_keep_receiver_powered; bool dp_skip_DID2; + bool dp_skip_reset_segment; } wa_flags; struct link_mst_stream_allocation_table mst_stream_alloc_table; @@ -140,9 +141,7 @@ struct dc_link { struct link_trace link_trace; struct gpio *hpd_gpio; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT enum dc_link_fec_state fec_state; -#endif }; const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link); @@ -206,6 +205,7 @@ enum dc_detect_reason { bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason); bool dc_link_get_hpd_state(struct dc_link *dc_link); enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx); +enum dc_status dc_link_reallocate_mst_payload(struct dc_link *link); /* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt). * Return: @@ -259,6 +259,7 @@ void dc_link_dp_disable_hpd(const struct dc_link *link); bool dc_link_dp_set_test_pattern( struct dc_link *link, enum dp_test_pattern test_pattern, + enum dp_test_pattern_color_space test_pattern_color_space, const struct link_training_settings *p_link_settings, const unsigned char *p_custom_pattern, unsigned int cust_pattern_size); @@ -290,6 +291,7 @@ void dc_link_enable_hpd(const struct dc_link *link); void dc_link_disable_hpd(const struct dc_link *link); void dc_link_set_test_pattern(struct dc_link *link, enum dp_test_pattern test_pattern, + enum dp_test_pattern_color_space test_pattern_color_space, const struct link_training_settings *p_link_settings, const unsigned char *p_custom_pattern, unsigned int cust_pattern_size); @@ -300,11 +302,18 @@ uint32_t dc_link_bandwidth_kbps( const struct dc_link_settings *dc_link_get_link_cap( const struct dc_link *link); +void dc_link_overwrite_extended_receiver_cap( + struct dc_link *link); + bool dc_submit_i2c( struct dc *dc, uint32_t link_index, struct i2c_command *cmd); +bool dc_submit_i2c_oem( + struct dc *dc, + struct i2c_command *cmd); + uint32_t dc_bandwidth_in_kbps_from_timing( const struct dc_crtc_timing *timing); #endif /* DC_LINK_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index fdb6adc37857..92096de79dec 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -52,7 +52,6 @@ struct freesync_context { bool dummy; }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) enum hubp_dmdata_mode { DMDATA_SW_MODE, DMDATA_HW_MODE @@ -82,9 +81,7 @@ struct dc_dmdata_attributes { /* An unbounded array of uint32s, represents software dmdata to be loaded */ uint32_t *dmdata_sw_data; }; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dc_writeback_info { bool wb_enabled; int dwb_pipe_inst; @@ -96,7 +93,6 @@ struct dc_writeback_update { unsigned int num_wb_info; struct dc_writeback_info writeback_info[MAX_DWB_PIPES]; }; -#endif enum vertical_interrupt_ref_point { START_V_UPDATE = 0, @@ -121,9 +117,7 @@ union stream_update_flags { uint32_t abm_level:1; uint32_t dpms_off:1; uint32_t gamut_remap:1; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) uint32_t wb_update:1; -#endif } bits; uint32_t raw; @@ -164,6 +158,7 @@ struct dc_stream_state { enum view_3d_format view_format; + bool use_vsc_sdp_for_colorimetry; bool ignore_msa_timing_param; bool converter_disable_audio; uint8_t qs_bit; @@ -203,11 +198,9 @@ struct dc_stream_state { struct crtc_trigger_info triggered_crtc_reset; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* writeback */ unsigned int num_wb_info; struct dc_writeback_info writeback_info[MAX_DWB_PIPES]; -#endif /* Computed state bits */ bool mode_changed : 1; @@ -226,9 +219,7 @@ struct dc_stream_state { bool apply_seamless_boot_optimization; uint32_t stream_id; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool is_dsc_enabled; -#endif union stream_update_flags update_flags; }; @@ -251,6 +242,7 @@ struct dc_stream_update { struct dc_info_packet *vsp_infopacket; bool *dpms_off; + bool integer_scaling_update; struct colorspace_transform *gamut_remap; enum dc_color_space *output_color_space; @@ -258,12 +250,8 @@ struct dc_stream_update { struct dc_csc_transform *output_csc_transform; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dc_writeback_update *wb_update; -#endif -#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT) struct dc_dsc_config *dsc_config; -#endif }; bool dc_is_stream_unchanged( @@ -353,18 +341,23 @@ bool dc_add_all_planes_for_stream( int plane_count, struct dc_state *context); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) bool dc_stream_add_writeback(struct dc *dc, struct dc_stream_state *stream, struct dc_writeback_info *wb_info); + bool dc_stream_remove_writeback(struct dc *dc, struct dc_stream_state *stream, uint32_t dwb_pipe_inst); + +bool dc_stream_warmup_writeback(struct dc *dc, + int num_dwb, + struct dc_writeback_info *wb_info); + bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream); + bool dc_stream_set_dynamic_metadata(struct dc *dc, struct dc_stream_state *stream, struct dc_dmdata_attributes *dmdata_attr); -#endif enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream); @@ -446,10 +439,10 @@ bool dc_stream_get_crc(struct dc *dc, uint32_t *g_y, uint32_t *b_cb); -void dc_stream_set_static_screen_events(struct dc *dc, +void dc_stream_set_static_screen_params(struct dc *dc, struct dc_stream_state **stream, int num_streams, - const struct dc_static_screen_events *events); + const struct dc_static_screen_params *params); void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream, enum dc_dynamic_expansion option); diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index d9be8fc3889f..e59532d98cb4 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -25,7 +25,6 @@ #ifndef DC_TYPES_H_ #define DC_TYPES_H_ -#ifndef AMD_EDID_UTILITY /* AND EdidUtility only needs a portion * of this file, including the rest only * causes additional issues. @@ -48,6 +47,7 @@ struct dc_stream_state; struct dc_link; struct dc_sink; struct dal; +struct dc_dmub_srv; /******************************** * Environment definitions @@ -60,7 +60,12 @@ enum dce_environment { DCE_ENV_FPGA_MAXIMUS, /* Emulation on real HW or on FPGA. Used by Diagnostics, enforces * requirements of Diagnostics team. */ - DCE_ENV_DIAG + DCE_ENV_DIAG, + /* + * Guest VM system, DC HW may exist but is not virtualized and + * should not be used. SW support for VDI only. + */ + DCE_ENV_VIRTUAL_HW }; /* Note: use these macro definitions instead of direct comparison! */ @@ -109,6 +114,8 @@ struct dc_context { uint32_t dc_sink_id_count; uint32_t dc_stream_id_count; uint64_t fbc_gpu_addr; + struct dc_dmub_srv *dmub_srv; + #ifdef CONFIG_DRM_AMD_DC_HDCP struct cp_psp cp_psp; #endif @@ -119,6 +126,7 @@ struct dc_context { #define DC_EDID_BLOCK_SIZE 128 #define MAX_SURFACE_NUM 4 #define NUM_PIXEL_FORMATS 10 +#define MAX_REPEATER_CNT 8 #include "dc_ddc_types.h" @@ -221,6 +229,7 @@ struct dc_panel_patch { unsigned int extra_t12_ms; unsigned int extra_delay_backlight_off; unsigned int extra_t7_ms; + unsigned int manage_secondary_link; }; struct dc_edid_caps { @@ -402,6 +411,30 @@ enum dpcd_downstream_port_max_bpc { DOWN_STREAM_MAX_12BPC, DOWN_STREAM_MAX_16BPC }; + + +enum link_training_offset { + DPRX = 0, + LTTPR_PHY_REPEATER1 = 1, + LTTPR_PHY_REPEATER2 = 2, + LTTPR_PHY_REPEATER3 = 3, + LTTPR_PHY_REPEATER4 = 4, + LTTPR_PHY_REPEATER5 = 5, + LTTPR_PHY_REPEATER6 = 6, + LTTPR_PHY_REPEATER7 = 7, + LTTPR_PHY_REPEATER8 = 8 +}; + +struct dc_lttpr_caps { + union dpcd_rev revision; + uint8_t mode; + uint8_t max_lane_count; + uint8_t max_link_rate; + uint8_t phy_repeater_cnt; + uint8_t max_ext_timeout; + uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1]; +}; + struct dc_dongle_caps { /* dongle type (DP converter, CV smart dongle) */ enum display_dongle_type dongle_type; @@ -440,7 +473,6 @@ enum display_content_type { DISPLAY_CONTENT_TYPE_GAME = 8 }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* writeback */ struct dwb_stereo_params { bool stereo_enabled; /* false: normal mode, true: 3D stereo */ @@ -471,7 +503,6 @@ struct dc_dwb_params { enum dwb_subsample_position subsample_position; struct dc_transfer_func *out_transfer_func; }; -#endif /* audio*/ @@ -573,15 +604,17 @@ struct audio_info { /* this field must be last in this struct */ struct audio_mode modes[DC_MAX_AUDIO_DESC_COUNT]; }; - +struct audio_check { + unsigned int audio_packet_type; + unsigned int max_audiosample_rate; + unsigned int acat; +}; enum dc_infoframe_type { DC_HDMI_INFOFRAME_TYPE_VENDOR = 0x81, DC_HDMI_INFOFRAME_TYPE_AVI = 0x82, DC_HDMI_INFOFRAME_TYPE_SPD = 0x83, DC_HDMI_INFOFRAME_TYPE_AUDIO = 0x84, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT DC_DP_INFOFRAME_TYPE_PPS = 0x10, -#endif }; struct dc_info_packet { @@ -696,7 +729,7 @@ struct psr_context { /* The VSync rate in Hz used to calculate the * step size for smooth brightness feature */ - unsigned int vsyncRateHz; + unsigned int vsync_rate_hz; unsigned int skipPsrWaitForPllLock; unsigned int numberOfControllers; /* Unused, for future use. To indicate that first changed frame from @@ -757,10 +790,6 @@ struct dc_clock_config { uint32_t current_clock_khz;/*current clock in use*/ }; -#endif /*AMD_EDID_UTILITY*/ -//AMD EDID UTILITY does not need any of the above structures - -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* DSC DPCD capabilities */ union dsc_slice_caps1 { struct { @@ -830,6 +859,5 @@ struct dsc_dec_dpcd_caps { uint32_t branch_overall_throughput_1_mps; /* In MPs */ uint32_t branch_max_line_width; }; -#endif #endif /* DC_TYPES_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h index 7ba7e6f722f6..ba0caaffa24b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h @@ -67,7 +67,6 @@ SRI(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \ NBIO_SR(BIOS_SCRATCH_2) -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define ABM_DCN20_REG_LIST() \ ABM_COMMON_REG_LIST_DCE_BASE(), \ SR(DC_ABM1_HG_SAMPLE_RATE), \ @@ -81,7 +80,6 @@ SR(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES), \ SR(DC_ABM1_HGLS_REG_READ_PROGRESS), \ NBIO_SR(BIOS_SCRATCH_2) -#endif #define ABM_SF(reg_name, field_name, post_fix)\ .field_name = reg_name ## __ ## field_name ## post_fix @@ -163,9 +161,7 @@ ABM_SF(ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS, \ ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, mask_sh) -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define ABM_MASK_SH_LIST_DCN20(mask_sh) ABM_MASK_SH_LIST_DCE110(mask_sh) -#endif #define ABM_REG_FIELD_LIST(type) \ type ABM1_HG_NUM_OF_BINS_SEL; \ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c index 793c0cec407f..f1a5d2c6aa37 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c @@ -60,12 +60,14 @@ enum { AUX_DEFER_RETRY_COUNTER = 6 }; -#define TIME_OUT_INCREMENT 1016 -#define TIME_OUT_MULTIPLIER_8 8 -#define TIME_OUT_MULTIPLIER_16 16 -#define TIME_OUT_MULTIPLIER_32 32 -#define TIME_OUT_MULTIPLIER_64 64 -#define MAX_TIMEOUT_LENGTH 127 +#define TIME_OUT_INCREMENT 1016 +#define TIME_OUT_MULTIPLIER_8 8 +#define TIME_OUT_MULTIPLIER_16 16 +#define TIME_OUT_MULTIPLIER_32 32 +#define TIME_OUT_MULTIPLIER_64 64 +#define MAX_TIMEOUT_LENGTH 127 +#define DEFAULT_AUX_ENGINE_MULT 0 +#define DEFAULT_AUX_ENGINE_LENGTH 69 static void release_engine( struct dce_aux *engine) @@ -427,11 +429,14 @@ void dce110_engine_destroy(struct dce_aux **engine) } -static bool dce_aux_configure_timeout(struct ddc_service *ddc, +static uint32_t dce_aux_configure_timeout(struct ddc_service *ddc, uint32_t timeout_in_us) { uint32_t multiplier = 0; uint32_t length = 0; + uint32_t prev_length = 0; + uint32_t prev_mult = 0; + uint32_t prev_timeout_val = 0; struct ddc *ddc_pin = ddc->ddc_pin; struct dce_aux *aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]; struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(aux_engine); @@ -440,7 +445,10 @@ static bool dce_aux_configure_timeout(struct ddc_service *ddc, aux110->polling_timeout_period = timeout_in_us * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER; /* 2-Update aux timeout period length and multiplier */ - if (timeout_in_us <= TIME_OUT_INCREMENT) { + if (timeout_in_us == 0) { + multiplier = DEFAULT_AUX_ENGINE_MULT; + length = DEFAULT_AUX_ENGINE_LENGTH; + } else if (timeout_in_us <= TIME_OUT_INCREMENT) { multiplier = 0; length = timeout_in_us/TIME_OUT_MULTIPLIER_8; if (timeout_in_us % TIME_OUT_MULTIPLIER_8 != 0) @@ -464,9 +472,29 @@ static bool dce_aux_configure_timeout(struct ddc_service *ddc, length = (length < MAX_TIMEOUT_LENGTH) ? length : MAX_TIMEOUT_LENGTH; + REG_GET_2(AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, &prev_length, AUX_RX_TIMEOUT_LEN_MUL, &prev_mult); + + switch (prev_mult) { + case 0: + prev_timeout_val = prev_length * TIME_OUT_MULTIPLIER_8; + break; + case 1: + prev_timeout_val = prev_length * TIME_OUT_MULTIPLIER_16; + break; + case 2: + prev_timeout_val = prev_length * TIME_OUT_MULTIPLIER_32; + break; + case 3: + prev_timeout_val = prev_length * TIME_OUT_MULTIPLIER_64; + break; + default: + prev_timeout_val = DEFAULT_AUX_ENGINE_LENGTH * TIME_OUT_MULTIPLIER_8; + break; + } + REG_UPDATE_SEQ_2(AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, length, AUX_RX_TIMEOUT_LEN_MUL, multiplier); - return true; + return prev_timeout_val; } static struct dce_aux_funcs aux_functions = { diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h index b4b2c79a8073..382465862f29 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h @@ -30,7 +30,6 @@ #include "inc/hw/aux_engine.h" -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 #define AUX_COMMON_REG_LIST0(id)\ SRI(AUX_CONTROL, DP_AUX, id), \ SRI(AUX_ARB_CONTROL, DP_AUX, id), \ @@ -39,7 +38,6 @@ SRI(AUX_INTERRUPT_CONTROL, DP_AUX, id), \ SRI(AUX_DPHY_RX_CONTROL1, DP_AUX, id), \ SRI(AUX_SW_STATUS, DP_AUX, id) -#endif #define AUX_COMMON_REG_LIST(id)\ SRI(AUX_CONTROL, DP_AUX, id), \ @@ -311,7 +309,7 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, struct aux_payload *cmd); struct dce_aux_funcs { - bool (*configure_timeout) + uint32_t (*configure_timeout) (struct ddc_service *ddc, uint32_t timeout); void (*destroy) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c index f787a6b94781..2e992fbc0d71 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c @@ -905,7 +905,7 @@ static bool dce112_program_pix_clk( struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source); struct bp_pixel_clock_parameters bp_pc_params = {0}; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) { unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0; unsigned dp_dto_ref_100hz = 7000000; @@ -1004,7 +1004,6 @@ static bool get_pixel_clk_frequency_100hz( return false; } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* this table is use to find *1.001 and /1.001 pixel rates from non-precise pixel rate */ struct pixel_rate_range_table_entry { @@ -1064,7 +1063,6 @@ static const struct clock_source_funcs dcn20_clk_src_funcs = { .get_pix_clk_dividers = dce112_get_pix_clk_dividers, .get_pixel_clk_frequency_100hz = get_pixel_clk_frequency_100hz }; -#endif /*****************************************/ /* Constructor */ @@ -1435,7 +1433,6 @@ bool dce112_clk_src_construct( return true; } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) bool dcn20_clk_src_construct( struct dce110_clk_src *clk_src, struct dc_context *ctx, @@ -1451,4 +1448,3 @@ bool dcn20_clk_src_construct( return ret; } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h index 43c1bf60b83c..51bd25079606 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h @@ -55,7 +55,6 @@ CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_DCCG_DEEP_COLOR_CNTL, mask_sh),\ CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE, mask_sh) -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define CS_COMMON_REG_LIST_DCN2_0(index, pllid) \ SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\ SRII(PHASE, DP_DTO, 0),\ @@ -76,9 +75,7 @@ SRII(PIXEL_RATE_CNTL, OTG, 3),\ SRII(PIXEL_RATE_CNTL, OTG, 4),\ SRII(PIXEL_RATE_CNTL, OTG, 5) -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) #define CS_COMMON_REG_LIST_DCN2_1(index, pllid) \ SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\ SRII(PHASE, DP_DTO, 0),\ @@ -93,17 +90,14 @@ SRII(PIXEL_RATE_CNTL, OTG, 1),\ SRII(PIXEL_RATE_CNTL, OTG, 2),\ SRII(PIXEL_RATE_CNTL, OTG, 3) -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define CS_COMMON_MASK_SH_LIST_DCN2_0(mask_sh)\ CS_SF(DP_DTO0_PHASE, DP_DTO0_PHASE, mask_sh),\ CS_SF(DP_DTO0_MODULO, DP_DTO0_MODULO, mask_sh),\ CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_DCCG_DEEP_COLOR_CNTL, mask_sh),\ CS_SF(OTG0_PIXEL_RATE_CNTL, DP_DTO0_ENABLE, mask_sh) -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) #define CS_COMMON_REG_LIST_DCN1_0(index, pllid) \ SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\ @@ -201,7 +195,6 @@ bool dce112_clk_src_construct( const struct dce110_clk_src_shift *cs_shift, const struct dce110_clk_src_mask *cs_mask); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) bool dcn20_clk_src_construct( struct dce110_clk_src *clk_src, struct dc_context *ctx, @@ -210,6 +203,5 @@ bool dcn20_clk_src_construct( const struct dce110_clk_src_regs *regs, const struct dce110_clk_src_shift *cs_shift, const struct dce110_clk_src_mask *cs_mask); -#endif #endif diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c index ba995d3f2318..30d953acd016 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c @@ -59,6 +59,12 @@ #define MCP_BL_SET_PWM_FRAC 0x6A /* Enable or disable Fractional PWM */ #define MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK 0x00000001L +// PSP FW version +#define mmMP0_SMN_C2PMSG_58 0x1607A + +//Register access policy version +#define mmMP0_SMN_C2PMSG_91 0x1609B + static bool dce_dmcu_init(struct dmcu *dmcu) { // Do nothing @@ -318,7 +324,7 @@ static void dce_get_psr_wait_loop( return; } -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) static void dcn10_get_dmcu_version(struct dmcu *dmcu) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); @@ -373,6 +379,7 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu) const struct dc_config *config = &dmcu->ctx->dc->config; bool status = false; + PERF_TRACE(); /* Definition of DC_DMCU_SCRATCH * 0 : firmare not loaded * 1 : PSP load DMCU FW but not initialized @@ -429,9 +436,21 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu) break; } + PERF_TRACE(); return status; } +static bool dcn21_dmcu_init(struct dmcu *dmcu) +{ + struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); + uint32_t dmcub_psp_version = REG_READ(DMCUB_SCRATCH15); + + if (dmcu->auto_load_dmcu && dmcub_psp_version == 0) { + return false; + } + + return dcn10_dmcu_init(dmcu); +} static bool dcn10_dmcu_load_iram(struct dmcu *dmcu, unsigned int start_offset, @@ -518,9 +537,6 @@ static void dcn10_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait) if (dmcu->dmcu_state != DMCU_RUNNING) return; - dcn10_get_dmcu_psr_state(dmcu, &psr_state); - if (psr_state == 0 && !enable) - return; /* waitDMCUReadyForCmd */ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, dmcu_wait_reg_ready_interval, @@ -727,9 +743,7 @@ static bool dcn10_is_dmcu_initialized(struct dmcu *dmcu) return true; } -#endif //(CONFIG_DRM_AMD_DC_DCN1_0) -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) static bool dcn20_lock_phy(struct dmcu *dmcu) { @@ -777,7 +791,7 @@ static bool dcn20_unlock_phy(struct dmcu *dmcu) return true; } -#endif //(CONFIG_DRM_AMD_DC_DCN2_0) +#endif //(CONFIG_DRM_AMD_DC_DCN) static const struct dmcu_funcs dce_funcs = { .dmcu_init = dce_dmcu_init, @@ -790,7 +804,7 @@ static const struct dmcu_funcs dce_funcs = { .is_dmcu_initialized = dce_is_dmcu_initialized }; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) static const struct dmcu_funcs dcn10_funcs = { .dmcu_init = dcn10_dmcu_init, .load_iram = dcn10_dmcu_load_iram, @@ -801,9 +815,7 @@ static const struct dmcu_funcs dcn10_funcs = { .get_psr_wait_loop = dcn10_get_psr_wait_loop, .is_dmcu_initialized = dcn10_is_dmcu_initialized }; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) static const struct dmcu_funcs dcn20_funcs = { .dmcu_init = dcn10_dmcu_init, .load_iram = dcn10_dmcu_load_iram, @@ -816,6 +828,19 @@ static const struct dmcu_funcs dcn20_funcs = { .lock_phy = dcn20_lock_phy, .unlock_phy = dcn20_unlock_phy }; + +static const struct dmcu_funcs dcn21_funcs = { + .dmcu_init = dcn21_dmcu_init, + .load_iram = dcn10_dmcu_load_iram, + .set_psr_enable = dcn10_dmcu_set_psr_enable, + .setup_psr = dcn10_dmcu_setup_psr, + .get_psr_state = dcn10_get_dmcu_psr_state, + .set_psr_wait_loop = dcn10_psr_wait_loop, + .get_psr_wait_loop = dcn10_get_psr_wait_loop, + .is_dmcu_initialized = dcn10_is_dmcu_initialized, + .lock_phy = dcn20_lock_phy, + .unlock_phy = dcn20_unlock_phy +}; #endif static void dce_dmcu_construct( @@ -836,6 +861,26 @@ static void dce_dmcu_construct( dmcu_dce->dmcu_mask = dmcu_mask; } +#if defined(CONFIG_DRM_AMD_DC_DCN) +static void dcn21_dmcu_construct( + struct dce_dmcu *dmcu_dce, + struct dc_context *ctx, + const struct dce_dmcu_registers *regs, + const struct dce_dmcu_shift *dmcu_shift, + const struct dce_dmcu_mask *dmcu_mask) +{ + uint32_t psp_version = 0; + + dce_dmcu_construct(dmcu_dce, ctx, regs, dmcu_shift, dmcu_mask); + + if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { + psp_version = dm_read_reg(ctx, mmMP0_SMN_C2PMSG_58); + dmcu_dce->base.auto_load_dmcu = ((psp_version & 0x00FF00FF) > 0x00110029); + dmcu_dce->base.psp_version = psp_version; + } +} +#endif + struct dmcu *dce_dmcu_create( struct dc_context *ctx, const struct dce_dmcu_registers *regs, @@ -857,7 +902,7 @@ struct dmcu *dce_dmcu_create( return &dmcu_dce->base; } -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) struct dmcu *dcn10_dmcu_create( struct dc_context *ctx, const struct dce_dmcu_registers *regs, @@ -878,9 +923,7 @@ struct dmcu *dcn10_dmcu_create( return &dmcu_dce->base; } -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dmcu *dcn20_dmcu_create( struct dc_context *ctx, const struct dce_dmcu_registers *regs, @@ -901,6 +944,27 @@ struct dmcu *dcn20_dmcu_create( return &dmcu_dce->base; } + +struct dmcu *dcn21_dmcu_create( + struct dc_context *ctx, + const struct dce_dmcu_registers *regs, + const struct dce_dmcu_shift *dmcu_shift, + const struct dce_dmcu_mask *dmcu_mask) +{ + struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL); + + if (dmcu_dce == NULL) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dcn21_dmcu_construct( + dmcu_dce, ctx, regs, dmcu_shift, dmcu_mask); + + dmcu_dce->base.funcs = &dcn21_funcs; + + return &dmcu_dce->base; +} #endif void dce_dmcu_destroy(struct dmcu **dmcu) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h index cc8587683b4b..5e044c2d3d6d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h @@ -71,6 +71,10 @@ DMCU_COMMON_REG_LIST_DCE_BASE(), \ SR(DMU_MEM_PWR_CNTL) +#define DMCU_DCN20_REG_LIST()\ + DMCU_DCN10_REG_LIST(), \ + SR(DMCUB_SCRATCH15) + #define DMCU_SF(reg_name, field_name, post_fix)\ .field_name = reg_name ## __ ## field_name ## post_fix @@ -175,6 +179,7 @@ struct dce_dmcu_registers { uint32_t DMCU_INTERRUPT_TO_UC_EN_MASK; uint32_t SMU_INTERRUPT_CONTROL; uint32_t DC_DMCU_SCRATCH; + uint32_t DMCUB_SCRATCH15; }; struct dce_dmcu { @@ -261,13 +266,17 @@ struct dmcu *dcn10_dmcu_create( const struct dce_dmcu_shift *dmcu_shift, const struct dce_dmcu_mask *dmcu_mask); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dmcu *dcn20_dmcu_create( struct dc_context *ctx, const struct dce_dmcu_registers *regs, const struct dce_dmcu_shift *dmcu_shift, const struct dce_dmcu_mask *dmcu_mask); -#endif + +struct dmcu *dcn21_dmcu_create( + struct dc_context *ctx, + const struct dce_dmcu_registers *regs, + const struct dce_dmcu_shift *dmcu_shift, + const struct dce_dmcu_mask *dmcu_mask); void dce_dmcu_destroy(struct dmcu **dmcu); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c index 0275d6d60da4..e1c5839a80dc 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c @@ -25,7 +25,7 @@ #include "dce_hwseq.h" #include "reg_helper.h" -#include "hw_sequencer.h" +#include "hw_sequencer_private.h" #include "core_types.h" #define CTX \ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h index 32d145a0d6fc..c5aa1f48593a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h @@ -25,7 +25,7 @@ #ifndef __DCE_HWSEQ_H__ #define __DCE_HWSEQ_H__ -#include "hw_sequencer.h" +#include "dc_types.h" #define BL_REG_LIST()\ SR(LVTMA_PWRSEQ_CNTL), \ @@ -210,7 +210,6 @@ SR(DC_IP_REQUEST_CNTL), \ BL_REG_LIST() -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define HWSEQ_DCN2_REG_LIST()\ HWSEQ_DCN_REG_LIST(), \ HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 0), \ @@ -276,9 +275,7 @@ SR(D6VGA_CONTROL), \ SR(DC_IP_REQUEST_CNTL), \ BL_REG_LIST() -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) #define HWSEQ_DCN21_REG_LIST()\ HWSEQ_DCN_REG_LIST(), \ HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 0), \ @@ -329,7 +326,6 @@ SR(D6VGA_CONTROL), \ SR(DC_IP_REQUEST_CNTL), \ BL_REG_LIST() -#endif struct dce_hwseq_registers { @@ -577,7 +573,6 @@ struct dce_hwseq_registers { HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh),\ HWSEQ_LVTMA_MASK_SH_LIST(mask_sh) -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define HWSEQ_DCN2_MASK_SH_LIST(mask_sh)\ HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ @@ -637,9 +632,7 @@ struct dce_hwseq_registers { HWS_SF(, DOMAIN21_PG_STATUS, DOMAIN21_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ HWSEQ_LVTMA_MASK_SH_LIST(mask_sh) -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) #define HWSEQ_DCN21_MASK_SH_LIST(mask_sh)\ HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ @@ -682,7 +675,6 @@ struct dce_hwseq_registers { HWSEQ_LVTMA_MASK_SH_LIST(mask_sh), \ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \ HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh) -#endif #define HWSEQ_REG_FIELD_LIST(type) \ type DCFE_CLOCK_ENABLE; \ @@ -800,8 +792,7 @@ struct dce_hwseq_registers { type D2VGA_MODE_ENABLE; \ type D3VGA_MODE_ENABLE; \ type D4VGA_MODE_ENABLE; \ - type AZALIA_AUDIO_DTO_MODULE;\ - type HPO_HDMISTREAMCLK_GATE_DIS; + type AZALIA_AUDIO_DTO_MODULE; struct dce_hwseq_shift { HWSEQ_REG_FIELD_LIST(uint8_t) @@ -820,6 +811,10 @@ enum blnd_mode { BLND_MODE_BLENDING,/* Alpha blending - blend 'current' and 'other' */ }; +struct dce_hwseq; +struct pipe_ctx; +struct clock_source; + void dce_enable_fe_clock(struct dce_hwseq *hwss, unsigned int inst, bool enable); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c index 35a75398fcb4..dd41736bb5c4 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c @@ -31,7 +31,7 @@ bool dce_i2c_submit_command( struct i2c_command *cmd) { struct dce_i2c_hw *dce_i2c_hw; - struct dce_i2c_sw *dce_i2c_sw; + struct dce_i2c_sw dce_i2c_sw = {0}; if (!ddc) { BREAK_TO_DEBUGGER(); @@ -43,18 +43,15 @@ bool dce_i2c_submit_command( return false; } - /* The software engine is only available on dce8 */ - dce_i2c_sw = dce_i2c_acquire_i2c_sw_engine(pool, ddc); - - if (!dce_i2c_sw) { - dce_i2c_hw = acquire_i2c_hw_engine(pool, ddc); - - if (!dce_i2c_hw) - return false; + dce_i2c_hw = acquire_i2c_hw_engine(pool, ddc); + if (dce_i2c_hw) return dce_i2c_submit_command_hw(pool, ddc, cmd, dce_i2c_hw); - } - return dce_i2c_submit_command_sw(pool, ddc, cmd, dce_i2c_sw); + dce_i2c_sw.ctx = ddc->ctx; + if (dce_i2c_engine_acquire_sw(&dce_i2c_sw, ddc)) { + return dce_i2c_submit_command_sw(pool, ddc, cmd, &dce_i2c_sw); + } + return false; } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c index aad7b52165be..066188ba7949 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c @@ -100,20 +100,6 @@ static uint32_t get_hw_buffer_available_size( dce_i2c_hw->buffer_used_bytes; } -static uint32_t get_speed( - const struct dce_i2c_hw *dce_i2c_hw) -{ - uint32_t pre_scale = 0; - - REG_GET(SPEED, DC_I2C_DDC1_PRESCALE, &pre_scale); - - /* [anaumov] it seems following is unnecessary */ - /*ASSERT(value.bits.DC_I2C_DDC1_PRESCALE);*/ - return pre_scale ? - dce_i2c_hw->reference_frequency / pre_scale : - dce_i2c_hw->default_speed; -} - static void process_channel_reply( struct dce_i2c_hw *dce_i2c_hw, struct i2c_payload *reply) @@ -278,16 +264,25 @@ static void set_speed( struct dce_i2c_hw *dce_i2c_hw, uint32_t speed) { + uint32_t xtal_ref_div = 0; + uint32_t prescale = 0; + + REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div); + + if (xtal_ref_div == 0) + xtal_ref_div = 2; + + prescale = ((dce_i2c_hw->reference_frequency * 2) / xtal_ref_div) / speed; if (speed) { if (dce_i2c_hw->masks->DC_I2C_DDC1_START_STOP_TIMING_CNTL) REG_UPDATE_N(SPEED, 3, - FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), dce_i2c_hw->reference_frequency / speed, + FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale, FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2, FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1); else REG_UPDATE_N(SPEED, 2, - FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), dce_i2c_hw->reference_frequency / speed, + FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale, FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2); } } @@ -296,9 +291,7 @@ static bool setup_engine( struct dce_i2c_hw *dce_i2c_hw) { uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) uint32_t reset_length = 0; -#endif /* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/ REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1); @@ -322,14 +315,12 @@ static bool setup_engine( REG_UPDATE_N(SETUP, 2, FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), i2c_setup_limit, FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) } else { reset_length = dce_i2c_hw->send_reset_length; REG_UPDATE_N(SETUP, 3, FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), i2c_setup_limit, FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_SEND_RESET_LENGTH), reset_length, FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1); -#endif } /* Program HW priority * set to High - interrupt software I2C at any time @@ -348,9 +339,7 @@ static void release_engine( bool safe_to_reset; /* Restore original HW engine speed */ - - set_speed(dce_i2c_hw, dce_i2c_hw->original_speed); - + set_speed(dce_i2c_hw, dce_i2c_hw->default_speed); /* Reset HW engine */ { @@ -382,7 +371,6 @@ struct dce_i2c_hw *acquire_i2c_hw_engine( { uint32_t counter = 0; enum gpio_result result; - uint32_t current_speed; struct dce_i2c_hw *dce_i2c_hw = NULL; if (!ddc) @@ -420,11 +408,6 @@ struct dce_i2c_hw *acquire_i2c_hw_engine( dce_i2c_hw->ddc = ddc; - current_speed = get_speed(dce_i2c_hw); - - if (current_speed) - dce_i2c_hw->original_speed = current_speed; - if (!setup_engine(dce_i2c_hw)) { release_engine(dce_i2c_hw); return NULL; @@ -482,13 +465,9 @@ static void submit_channel_request_hw( static uint32_t get_transaction_timeout_hw( const struct dce_i2c_hw *dce_i2c_hw, - uint32_t length) + uint32_t length, + uint32_t speed) { - - uint32_t speed = get_speed(dce_i2c_hw); - - - uint32_t period_timeout; uint32_t num_of_clock_stretches; @@ -508,7 +487,8 @@ static uint32_t get_transaction_timeout_hw( bool dce_i2c_hw_engine_submit_payload( struct dce_i2c_hw *dce_i2c_hw, struct i2c_payload *payload, - bool middle_of_transaction) + bool middle_of_transaction, + uint32_t speed) { struct i2c_request_transaction_data request; @@ -546,7 +526,7 @@ bool dce_i2c_hw_engine_submit_payload( /* obtain timeout value before submitting request */ transaction_timeout = get_transaction_timeout_hw( - dce_i2c_hw, payload->length + 1); + dce_i2c_hw, payload->length + 1, speed); submit_channel_request_hw( dce_i2c_hw, &request); @@ -592,13 +572,11 @@ bool dce_i2c_submit_command_hw( struct i2c_payload *payload = cmd->payloads + index_of_payload; if (!dce_i2c_hw_engine_submit_payload( - dce_i2c_hw, payload, mot)) { + dce_i2c_hw, payload, mot, cmd->speed)) { result = false; break; } - - ++index_of_payload; } @@ -629,7 +607,6 @@ void dce_i2c_hw_construct( dce_i2c_hw->buffer_used_bytes = 0; dce_i2c_hw->transaction_count = 0; dce_i2c_hw->engine_keep_power_up_count = 1; - dce_i2c_hw->original_speed = DEFAULT_I2C_HW_SPEED; dce_i2c_hw->default_speed = DEFAULT_I2C_HW_SPEED; dce_i2c_hw->send_reset_length = 0; dce_i2c_hw->setup_limit = I2C_SETUP_TIME_LIMIT_DCE; @@ -644,9 +621,6 @@ void dce100_i2c_hw_construct( const struct dce_i2c_shift *shifts, const struct dce_i2c_mask *masks) { - - uint32_t xtal_ref_div = 0; - dce_i2c_hw_construct(dce_i2c_hw, ctx, engine_id, @@ -654,21 +628,6 @@ void dce100_i2c_hw_construct( shifts, masks); dce_i2c_hw->buffer_size = I2C_HW_BUFFER_SIZE_DCE100; - - REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div); - - if (xtal_ref_div == 0) - xtal_ref_div = 2; - - /*Calculating Reference Clock by divding original frequency by - * XTAL_REF_DIV. - * At upper level, uint32_t reference_frequency = - * dal_dce_i2c_get_reference_clock(as) >> 1 - * which already divided by 2. So we need x2 to get original - * reference clock from ppll_info - */ - dce_i2c_hw->reference_frequency = - (dce_i2c_hw->reference_frequency * 2) / xtal_ref_div; } void dce112_i2c_hw_construct( @@ -705,7 +664,6 @@ void dcn1_i2c_hw_construct( dce_i2c_hw->setup_limit = I2C_SETUP_TIME_LIMIT_DCN; } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void dcn2_i2c_hw_construct( struct dce_i2c_hw *dce_i2c_hw, struct dc_context *ctx, @@ -724,4 +682,3 @@ void dcn2_i2c_hw_construct( if (ctx->dc->debug.scl_reset_length10) dce_i2c_hw->send_reset_length = I2C_SEND_RESET_LENGTH_10; } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h index cb0234e5d597..fb055e6883c0 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h @@ -177,9 +177,7 @@ struct dce_i2c_shift { uint8_t DC_I2C_INDEX; uint8_t DC_I2C_INDEX_WRITE; uint8_t XTAL_REF_DIV; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) uint8_t DC_I2C_DDC1_SEND_RESET_LENGTH; -#endif uint8_t DC_I2C_REG_RW_CNTL_STATUS; }; @@ -220,17 +218,13 @@ struct dce_i2c_mask { uint32_t DC_I2C_INDEX; uint32_t DC_I2C_INDEX_WRITE; uint32_t XTAL_REF_DIV; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) uint32_t DC_I2C_DDC1_SEND_RESET_LENGTH; -#endif uint32_t DC_I2C_REG_RW_CNTL_STATUS; }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define I2C_COMMON_MASK_SH_LIST_DCN2(mask_sh)\ I2C_COMMON_MASK_SH_LIST_DCE110(mask_sh),\ I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_SEND_RESET_LENGTH, mask_sh) -#endif struct dce_i2c_registers { uint32_t SETUP; @@ -262,7 +256,6 @@ struct i2c_request_transaction_data { struct dce_i2c_hw { struct ddc *ddc; - uint32_t original_speed; uint32_t engine_keep_power_up_count; uint32_t transaction_count; uint32_t buffer_used_bytes; @@ -312,7 +305,6 @@ void dcn1_i2c_hw_construct( const struct dce_i2c_shift *shifts, const struct dce_i2c_mask *masks); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void dcn2_i2c_hw_construct( struct dce_i2c_hw *dce_i2c_hw, struct dc_context *ctx, @@ -320,7 +312,6 @@ void dcn2_i2c_hw_construct( const struct dce_i2c_registers *regs, const struct dce_i2c_shift *shifts, const struct dce_i2c_mask *masks); -#endif bool dce_i2c_submit_command_hw( struct resource_pool *pool, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c index a5a11c251e25..87d8428df6c4 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c @@ -73,31 +73,6 @@ static void release_engine_dce_sw( dce_i2c_sw->ddc = NULL; } -static bool get_hw_supported_ddc_line( - struct ddc *ddc, - enum gpio_ddc_line *line) -{ - enum gpio_ddc_line line_found; - - *line = GPIO_DDC_LINE_UNKNOWN; - - if (!ddc) { - BREAK_TO_DEBUGGER(); - return false; - } - - if (!ddc->hw_info.hw_supported) - return false; - - line_found = dal_ddc_get_line(ddc); - - if (line_found >= GPIO_DDC_LINE_COUNT) - return false; - - *line = line_found; - - return true; -} static bool wait_for_scl_high_sw( struct dc_context *ctx, struct ddc *ddc, @@ -524,21 +499,3 @@ bool dce_i2c_submit_command_sw( return result; } -struct dce_i2c_sw *dce_i2c_acquire_i2c_sw_engine( - struct resource_pool *pool, - struct ddc *ddc) -{ - enum gpio_ddc_line line; - struct dce_i2c_sw *engine = NULL; - - if (get_hw_supported_ddc_line(ddc, &line)) - engine = pool->sw_i2cs[line]; - - if (!engine) - return NULL; - - if (!dce_i2c_engine_acquire_sw(engine, ddc)) - return NULL; - - return engine; -} diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h index 5bbcdd455614..019fc47bb767 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h @@ -49,9 +49,9 @@ bool dce_i2c_submit_command_sw( struct i2c_command *cmd, struct dce_i2c_sw *dce_i2c_sw); -struct dce_i2c_sw *dce_i2c_acquire_i2c_sw_engine( - struct resource_pool *pool, - struct ddc *ddc); +bool dce_i2c_engine_acquire_sw( + struct dce_i2c_sw *dce_i2c_sw, + struct ddc *ddc_handle); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c index 6ed922a3c1cd..451574971b96 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c @@ -137,7 +137,7 @@ static void dce110_update_generic_info_packet( AFMT_GENERIC0_UPDATE, (packet_index == 0), AFMT_GENERIC2_UPDATE, (packet_index == 2)); } -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) if (REG(AFMT_VBI_PACKET_CONTROL1)) { switch (packet_index) { case 0: @@ -231,7 +231,7 @@ static void dce110_update_hdmi_info_packet( HDMI_GENERIC1_SEND, send, HDMI_GENERIC1_LINE, line); break; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) case 4: if (REG(HDMI_GENERIC_PACKET_CONTROL2)) REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL2, @@ -275,9 +275,10 @@ static void dce110_stream_encoder_dp_set_stream_attribute( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, enum dc_color_space output_color_space, + bool use_vsc_sdp_for_colorimetry, uint32_t enable_sdp_splitting) { -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) uint32_t h_active_start; uint32_t v_active_start; uint32_t misc0 = 0; @@ -329,7 +330,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute( if (enc110->se_mask->DP_VID_M_DOUBLE_VALUE_EN) REG_UPDATE(DP_VID_TIMING, DP_VID_M_DOUBLE_VALUE_EN, 1); -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) if (enc110->se_mask->DP_VID_N_MUL) REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1); #endif @@ -340,7 +341,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute( break; } -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) if (REG(DP_MSA_MISC)) misc1 = REG_READ(DP_MSA_MISC); #endif @@ -374,7 +375,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute( /* set dynamic range and YCbCr range */ -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) switch (hw_crtc_timing.display_color_depth) { case COLOR_DEPTH_666: colorimetry_bpc = 0; @@ -454,7 +455,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute( DP_DYN_RANGE, dynamic_range_rgb, DP_YCBCR_RANGE, dynamic_range_ycbcr); -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) if (REG(DP_MSA_COLORIMETRY)) REG_SET(DP_MSA_COLORIMETRY, 0, DP_MSA_MISC0, misc0); @@ -489,7 +490,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute( hw_crtc_timing.v_front_porch; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) /* start at begining of left border */ if (REG(DP_MSA_TIMING_PARAM2)) REG_SET_2(DP_MSA_TIMING_PARAM2, 0, @@ -786,7 +787,7 @@ static void dce110_stream_encoder_update_hdmi_info_packets( dce110_update_hdmi_info_packet(enc110, 3, &info_frame->hdrsmd); } -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) if (enc110->se_mask->HDMI_DB_DISABLE) { /* for bring up, disable dp double TODO */ if (REG(HDMI_DB_CONTROL)) @@ -824,7 +825,7 @@ static void dce110_stream_encoder_stop_hdmi_info_packets( HDMI_GENERIC1_LINE, 0, HDMI_GENERIC1_SEND, 0); -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) /* stop generic packets 2 & 3 on HDMI */ if (REG(HDMI_GENERIC_PACKET_CONTROL2)) REG_SET_6(HDMI_GENERIC_PACKET_CONTROL2, 0, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c new file mode 100644 index 000000000000..225955ec6d39 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c @@ -0,0 +1,220 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dmub_psr.h" +#include "dc.h" +#include "dc_dmub_srv.h" +#include "../../dmub/inc/dmub_srv.h" +#include "dmub_fw_state.h" +#include "core_types.h" +#include "ipp.h" + +#define MAX_PIPES 6 + +/** + * Get PSR state from firmware. + */ +static void dmub_get_psr_state(uint32_t *psr_state) +{ + // Not yet implemented + // Trigger GPINT interrupt from firmware +} + +/** + * Enable/Disable PSR. + */ +static void dmub_set_psr_enable(struct dmub_psr *dmub, bool enable) +{ + union dmub_rb_cmd cmd; + struct dc_context *dc = dmub->ctx; + + cmd.psr_enable.header.type = DMUB_CMD__PSR; + + if (enable) + cmd.psr_enable.header.sub_type = DMUB_CMD__PSR_ENABLE; + else + cmd.psr_enable.header.sub_type = DMUB_CMD__PSR_DISABLE; + + cmd.psr_enable.header.payload_bytes = 0; // Send header only + + dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_enable.header); + dc_dmub_srv_cmd_execute(dc->dmub_srv); + dc_dmub_srv_wait_idle(dc->dmub_srv); +} + +/** + * Set PSR level. + */ +static void dmub_set_psr_level(struct dmub_psr *dmub, uint16_t psr_level) +{ + union dmub_rb_cmd cmd; + uint32_t psr_state = 0; + struct dc_context *dc = dmub->ctx; + + dmub_get_psr_state(&psr_state); + + if (psr_state == 0) + return; + + cmd.psr_set_level.header.type = DMUB_CMD__PSR; + cmd.psr_set_level.header.sub_type = DMUB_CMD__PSR_SET_LEVEL; + cmd.psr_set_level.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_level_data); + cmd.psr_set_level.psr_set_level_data.psr_level = psr_level; + + dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_set_level.header); + dc_dmub_srv_cmd_execute(dc->dmub_srv); + dc_dmub_srv_wait_idle(dc->dmub_srv); +} + +/** + * Setup PSR by programming phy registers and sending psr hw context values to firmware. + */ +static bool dmub_setup_psr(struct dmub_psr *dmub, + struct dc_link *link, + struct psr_context *psr_context) +{ + union dmub_rb_cmd cmd; + struct dc_context *dc = dmub->ctx; + struct dmub_cmd_psr_copy_settings_data *copy_settings_data + = &cmd.psr_copy_settings.psr_copy_settings_data; + struct pipe_ctx *pipe_ctx = NULL; + struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx; + + for (int i = 0; i < MAX_PIPES; i++) { + if (res_ctx && + res_ctx->pipe_ctx[i].stream && + res_ctx->pipe_ctx[i].stream->link && + res_ctx->pipe_ctx[i].stream->link == link && + res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) { + pipe_ctx = &res_ctx->pipe_ctx[i]; + break; + } + } + + if (!pipe_ctx || + !&pipe_ctx->plane_res || + !&pipe_ctx->stream_res) + return false; + + // Program DP DPHY fast training registers + link->link_enc->funcs->psr_program_dp_dphy_fast_training(link->link_enc, + psr_context->psrExitLinkTrainingRequired); + + // Program DP_SEC_CNTL1 register to set transmission GPS0 line num and priority to high + link->link_enc->funcs->psr_program_secondary_packet(link->link_enc, + psr_context->sdpTransmitLineNumDeadline); + + cmd.psr_copy_settings.header.type = DMUB_CMD__PSR; + cmd.psr_copy_settings.header.sub_type = DMUB_CMD__PSR_COPY_SETTINGS; + cmd.psr_copy_settings.header.payload_bytes = sizeof(struct dmub_cmd_psr_copy_settings_data); + + // Hw insts + copy_settings_data->dpphy_inst = psr_context->phyType; + copy_settings_data->aux_inst = psr_context->channel; + copy_settings_data->digfe_inst = psr_context->engineId; + copy_settings_data->digbe_inst = psr_context->transmitterId; + + copy_settings_data->mpcc_inst = pipe_ctx->plane_res.mpcc_inst; + + if (pipe_ctx->plane_res.hubp) + copy_settings_data->hubp_inst = pipe_ctx->plane_res.hubp->inst; + else + copy_settings_data->hubp_inst = 0; + if (pipe_ctx->plane_res.dpp) + copy_settings_data->dpp_inst = pipe_ctx->plane_res.dpp->inst; + else + copy_settings_data->dpp_inst = 0; + if (pipe_ctx->stream_res.opp) + copy_settings_data->opp_inst = pipe_ctx->stream_res.opp->inst; + else + copy_settings_data->opp_inst = 0; + if (pipe_ctx->stream_res.tg) + copy_settings_data->otg_inst = pipe_ctx->stream_res.tg->inst; + else + copy_settings_data->otg_inst = 0; + + // Misc + copy_settings_data->psr_level = psr_context->psr_level.u32all; + copy_settings_data->hyst_frames = psr_context->timehyst_frames; + copy_settings_data->hyst_lines = psr_context->hyst_lines; + copy_settings_data->phy_type = psr_context->phyType; + copy_settings_data->aux_repeat = psr_context->aux_repeats; + copy_settings_data->smu_optimizations_en = psr_context->allow_smu_optimizations; + copy_settings_data->skip_wait_for_pll_lock = psr_context->skipPsrWaitForPllLock; + copy_settings_data->frame_delay = psr_context->frame_delay; + copy_settings_data->smu_phy_id = psr_context->smuPhyId; + copy_settings_data->num_of_controllers = psr_context->numberOfControllers; + copy_settings_data->frame_cap_ind = psr_context->psrFrameCaptureIndicationReq; + copy_settings_data->phy_num = psr_context->frame_delay & 0x7; + copy_settings_data->link_rate = psr_context->frame_delay & 0xF; + + dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_copy_settings.header); + dc_dmub_srv_cmd_execute(dc->dmub_srv); + dc_dmub_srv_wait_idle(dc->dmub_srv); + + return true; +} + +static const struct dmub_psr_funcs psr_funcs = { + .set_psr_enable = dmub_set_psr_enable, + .setup_psr = dmub_setup_psr, + .get_psr_state = dmub_get_psr_state, + .set_psr_level = dmub_set_psr_level, +}; + +/** + * Construct PSR object. + */ +static void dmub_psr_construct(struct dmub_psr *psr, struct dc_context *ctx) +{ + psr->ctx = ctx; + psr->funcs = &psr_funcs; +} + +/** + * Allocate and initialize PSR object. + */ +struct dmub_psr *dmub_psr_create(struct dc_context *ctx) +{ + struct dmub_psr *psr = kzalloc(sizeof(struct dmub_psr), GFP_KERNEL); + + if (psr == NULL) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dmub_psr_construct(psr, ctx); + + return psr; +} + +/** + * Deallocate PSR object. + */ +void dmub_psr_destroy(struct dmub_psr **dmub) +{ + kfree(dmub); + *dmub = NULL; +} diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h new file mode 100644 index 000000000000..229958de3035 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h @@ -0,0 +1,47 @@ +/* + * Copyright 2012-16 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_PSR_H_ +#define _DMUB_PSR_H_ + +#include "os_types.h" + +struct dmub_psr { + struct dc_context *ctx; + const struct dmub_psr_funcs *funcs; +}; + +struct dmub_psr_funcs { + void (*set_psr_enable)(struct dmub_psr *dmub, bool enable); + bool (*setup_psr)(struct dmub_psr *dmub, struct dc_link *link, struct psr_context *psr_context); + void (*get_psr_state)(uint32_t *psr_state); + void (*set_psr_level)(struct dmub_psr *dmub, uint16_t psr_level); +}; + +struct dmub_psr *dmub_psr_create(struct dc_context *ctx); +void dmub_psr_destroy(struct dmub_psr **dmub); + + +#endif /* _DCE_DMUB_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c index 799d36299c9b..753cb8edd996 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c @@ -26,7 +26,6 @@ #include "dc.h" #include "core_types.h" #include "clk_mgr.h" -#include "hw_sequencer.h" #include "dce100_hw_sequencer.h" #include "resource.h" @@ -136,7 +135,7 @@ void dce100_hw_sequencer_construct(struct dc *dc) { dce110_hw_sequencer_construct(dc); - dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating; + dc->hwseq->funcs.enable_display_power_gating = dce100_enable_display_power_gating; dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth; dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth; } diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h index a6b80fdaa666..34518da20009 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h @@ -27,6 +27,7 @@ #define __DC_HWSS_DCE100_H__ #include "core_types.h" +#include "hw_sequencer_private.h" struct dc; struct dc_state; diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c index a5e122c721ec..8f78bf9abbca 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c @@ -725,7 +725,7 @@ void dce100_clock_source_destroy(struct clock_source **clk_src) *clk_src = NULL; } -static void destruct(struct dce110_resource_pool *pool) +static void dce100_resource_destruct(struct dce110_resource_pool *pool) { unsigned int i; @@ -885,7 +885,7 @@ static void dce100_destroy_resource_pool(struct resource_pool **pool) { struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); - destruct(dce110_pool); + dce100_resource_destruct(dce110_pool); kfree(dce110_pool); *pool = NULL; } @@ -950,7 +950,7 @@ static const struct resource_funcs dce100_res_pool_funcs = { .find_first_free_match_stream_enc_for_link = dce100_find_first_free_match_stream_enc_for_link }; -static bool construct( +static bool dce100_resource_construct( uint8_t num_virtual_links, struct dc *dc, struct dce110_resource_pool *pool) @@ -1122,7 +1122,7 @@ static bool construct( return true; res_create_fail: - destruct(pool); + dce100_resource_destruct(pool); return false; } @@ -1137,7 +1137,7 @@ struct resource_pool *dce100_create_resource_pool( if (!pool) return NULL; - if (construct(num_virtual_links, dc, pool)) + if (dce100_resource_construct(num_virtual_links, dc, pool)) return &pool->base; kfree(pool); diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index f0e837d14000..5b689273ff44 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -61,6 +61,8 @@ #include "atomfirmware.h" +#define GAMMA_HW_POINTS_NUM 256 + /* * All values are in milliseconds; * For eDP, after power-up/power/down, @@ -268,7 +270,7 @@ static void build_prescale_params(struct ipp_prescale_params *prescale_params, } static bool -dce110_set_input_transfer_func(struct pipe_ctx *pipe_ctx, +dce110_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state) { struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp; @@ -596,7 +598,7 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf, } static bool -dce110_set_output_transfer_func(struct pipe_ctx *pipe_ctx, +dce110_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, const struct dc_stream_state *stream) { struct transform *xfm = pipe_ctx->plane_res.xfm; @@ -651,10 +653,9 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx) { enum dc_lane_count lane_count = pipe_ctx->stream->link->cur_link_settings.lane_count; - struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; struct dc_link *link = pipe_ctx->stream->link; - + const struct dc *dc = link->dc; uint32_t active_total_with_borders; uint32_t early_control = 0; @@ -667,7 +668,7 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx) link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc, pipe_ctx->stream_res.stream_enc->id, true); - link->dc->hwss.update_info_frame(pipe_ctx); + dc->hwss.update_info_frame(pipe_ctx); /* enable early control to avoid corruption on DP monitor*/ active_total_with_borders = @@ -943,15 +944,15 @@ void dce110_edp_backlight_control( void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) { /* notify audio driver for audio modes of monitor */ - struct dc *core_dc; + struct dc *dc; struct clk_mgr *clk_mgr; unsigned int i, num_audio = 1; if (!pipe_ctx->stream) return; - core_dc = pipe_ctx->stream->ctx->dc; - clk_mgr = core_dc->clk_mgr; + dc = pipe_ctx->stream->ctx->dc; + clk_mgr = dc->clk_mgr; if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == true) return; @@ -959,7 +960,7 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) if (pipe_ctx->stream_res.audio) { for (i = 0; i < MAX_PIPES; i++) { /*current_state not updated yet*/ - if (core_dc->current_state->res_ctx.pipe_ctx[i].stream_res.audio != NULL) + if (dc->current_state->res_ctx.pipe_ctx[i].stream_res.audio != NULL) num_audio++; } @@ -1047,6 +1048,7 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx, struct encoder_unblank_param params = { { 0 } }; struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; + struct dce_hwseq *hws = link->dc->hwseq; /* only 3 items below are used by unblank */ params.timing = pipe_ctx->stream->timing; @@ -1056,7 +1058,7 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx, pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, ¶ms); if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { - link->dc->hwss.edp_backlight_control(link, true); + hws->funcs.edp_backlight_control(link, true); } } @@ -1064,9 +1066,10 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; + struct dce_hwseq *hws = link->dc->hwseq; if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { - link->dc->hwss.edp_backlight_control(link, false); + hws->funcs.edp_backlight_control(link, false); dc_link_set_abm_disable(link); } @@ -1223,7 +1226,7 @@ static void program_scaler(const struct dc *dc, { struct tg_color color = {0}; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) /* TOFPGA */ if (pipe_ctx->plane_res.xfm->funcs->transform_set_pixel_storage_depth == NULL) return; @@ -1322,12 +1325,11 @@ static enum dc_status apply_single_controller_ctx_to_hw( struct dc_stream_state *stream = pipe_ctx->stream; struct drr_params params = {0}; unsigned int event_triggers = 0; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; -#endif + struct dce_hwseq *hws = dc->hwseq; - if (dc->hwss.disable_stream_gating) { - dc->hwss.disable_stream_gating(dc, pipe_ctx); + if (hws->funcs.disable_stream_gating) { + hws->funcs.disable_stream_gating(dc, pipe_ctx); } if (pipe_ctx->stream_res.audio != NULL) { @@ -1357,10 +1359,10 @@ static enum dc_status apply_single_controller_ctx_to_hw( /* */ /* Do not touch stream timing on seamless boot optimization. */ if (!pipe_ctx->stream->apply_seamless_boot_optimization) - dc->hwss.enable_stream_timing(pipe_ctx, context, dc); + hws->funcs.enable_stream_timing(pipe_ctx, context, dc); - if (dc->hwss.setup_vupdate_interrupt) - dc->hwss.setup_vupdate_interrupt(pipe_ctx); + if (hws->funcs.setup_vupdate_interrupt) + hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx); params.vertical_total_min = stream->adjust.v_total_min; params.vertical_total_max = stream->adjust.v_total_max; @@ -1371,9 +1373,13 @@ static enum dc_status apply_single_controller_ctx_to_hw( // DRR should set trigger event to monitor surface update event if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0) event_triggers = 0x80; + /* Event triggers and num frames initialized for DRR, but can be + * later updated for PSR use. Note DRR trigger events are generated + * regardless of whether num frames met. + */ if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control) pipe_ctx->stream_res.tg->funcs->set_static_screen_control( - pipe_ctx->stream_res.tg, event_triggers); + pipe_ctx->stream_res.tg, event_triggers, 2); if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) pipe_ctx->stream_res.stream_enc->funcs->dig_connect_to_otg( @@ -1390,7 +1396,6 @@ static enum dc_status apply_single_controller_ctx_to_hw( pipe_ctx->stream_res.opp, &stream->bit_depth_params, &stream->clamping); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) while (odm_pipe) { odm_pipe->stream_res.opp->funcs->opp_set_dyn_expansion( odm_pipe->stream_res.opp, @@ -1404,7 +1409,6 @@ static enum dc_status apply_single_controller_ctx_to_hw( &stream->clamping); odm_pipe = odm_pipe->next_odm_pipe; } -#endif if (!stream->dpms_off) core_link_enable_stream(context, pipe_ctx); @@ -1438,6 +1442,9 @@ static void power_down_encoders(struct dc *dc) if (!dc->links[i]->wa_flags.dp_keep_receiver_powered) dp_receiver_power_ctrl(dc->links[i], false); + if (signal != SIGNAL_TYPE_EDP) + signal = SIGNAL_TYPE_NONE; + dc->links[i]->link_enc->funcs->disable_output( dc->links[i]->link_enc, signal); } @@ -1552,9 +1559,10 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) bool can_apply_edp_fast_boot = false; bool can_apply_seamless_boot = false; bool keep_edp_vdd_on = false; + struct dce_hwseq *hws = dc->hwseq; - if (dc->hwss.init_pipes) - dc->hwss.init_pipes(dc, context); + if (hws->funcs.init_pipes) + hws->funcs.init_pipes(dc, context); edp_stream = get_edp_stream(context); @@ -1591,7 +1599,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) if (!can_apply_edp_fast_boot && !can_apply_seamless_boot) { if (edp_link_with_sink && !keep_edp_vdd_on) { /*turn off backlight before DP_blank and encoder powered down*/ - dc->hwss.edp_backlight_control(edp_link_with_sink, false); + hws->funcs.edp_backlight_control(edp_link_with_sink, false); } /*resume from S3, no vbios posting, no need to power down again*/ power_down_all_hw_blocks(dc); @@ -1702,6 +1710,8 @@ static void set_drr(struct pipe_ctx **pipe_ctx, struct drr_params params = {0}; // DRR should set trigger event to monitor surface update event unsigned int event_triggers = 0x80; + // Note DRR trigger events are generated regardless of whether num frames met. + unsigned int num_frames = 2; params.vertical_total_max = vmax; params.vertical_total_min = vmin; @@ -1717,7 +1727,7 @@ static void set_drr(struct pipe_ctx **pipe_ctx, if (vmax != 0 && vmin != 0) pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control( pipe_ctx[i]->stream_res.tg, - event_triggers); + event_triggers, num_frames); } } @@ -1734,30 +1744,31 @@ static void get_position(struct pipe_ctx **pipe_ctx, } static void set_static_screen_control(struct pipe_ctx **pipe_ctx, - int num_pipes, const struct dc_static_screen_events *events) + int num_pipes, const struct dc_static_screen_params *params) { unsigned int i; - unsigned int value = 0; + unsigned int triggers = 0; - if (events->overlay_update) - value |= 0x100; - if (events->surface_update) - value |= 0x80; - if (events->cursor_update) - value |= 0x2; - if (events->force_trigger) - value |= 0x1; + if (params->triggers.overlay_update) + triggers |= 0x100; + if (params->triggers.surface_update) + triggers |= 0x80; + if (params->triggers.cursor_update) + triggers |= 0x2; + if (params->triggers.force_trigger) + triggers |= 0x1; if (num_pipes) { struct dc *dc = pipe_ctx[0]->stream->ctx->dc; if (dc->fbc_compressor) - value |= 0x84; + triggers |= 0x84; } for (i = 0; i < num_pipes; i++) pipe_ctx[i]->stream_res.tg->funcs-> - set_static_screen_control(pipe_ctx[i]->stream_res.tg, value); + set_static_screen_control(pipe_ctx[i]->stream_res.tg, + triggers, params->num_frames); } /* @@ -2006,13 +2017,14 @@ enum dc_status dce110_apply_ctx_to_hw( struct dc *dc, struct dc_state *context) { + struct dce_hwseq *hws = dc->hwseq; struct dc_bios *dcb = dc->ctx->dc_bios; enum dc_status status; int i; /* Reset old context */ /* look up the targets that have been removed since last commit */ - dc->hwss.reset_hw_ctx_wrap(dc, context); + hws->funcs.reset_hw_ctx_wrap(dc, context); /* Skip applying if no targets */ if (context->stream_count <= 0) @@ -2037,7 +2049,7 @@ enum dc_status dce110_apply_ctx_to_hw( continue; } - dc->hwss.enable_display_power_gating( + hws->funcs.enable_display_power_gating( dc, i, dc->ctx->dc_bios, PIPE_GATING_CONTROL_DISABLE); } @@ -2346,19 +2358,20 @@ static void init_hw(struct dc *dc) struct transform *xfm; struct abm *abm; struct dmcu *dmcu; + struct dce_hwseq *hws = dc->hwseq; bp = dc->ctx->dc_bios; for (i = 0; i < dc->res_pool->pipe_count; i++) { xfm = dc->res_pool->transforms[i]; xfm->funcs->transform_reset(xfm); - dc->hwss.enable_display_power_gating( + hws->funcs.enable_display_power_gating( dc, i, bp, PIPE_GATING_CONTROL_INIT); - dc->hwss.enable_display_power_gating( + hws->funcs.enable_display_power_gating( dc, i, bp, PIPE_GATING_CONTROL_DISABLE); - dc->hwss.enable_display_pipe_clock_gating( + hws->funcs.enable_display_pipe_clock_gating( dc->ctx, true); } @@ -2444,6 +2457,8 @@ static void dce110_program_front_end_for_pipe( struct xfm_grph_csc_adjustment adjust; struct out_csc_color_matrix tbl_entry; unsigned int i; + struct dce_hwseq *hws = dc->hwseq; + DC_LOGGER_INIT(); memset(&tbl_entry, 0, sizeof(tbl_entry)); @@ -2502,10 +2517,10 @@ static void dce110_program_front_end_for_pipe( if (pipe_ctx->plane_state->update_flags.bits.full_update || pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || pipe_ctx->plane_state->update_flags.bits.gamma_change) - dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state); + hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state); if (pipe_ctx->plane_state->update_flags.bits.full_update) - dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream); + hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream); DC_LOG_SURFACE( "Pipe:%d %p: addr hi:0x%x, " @@ -2608,6 +2623,7 @@ static void dce110_apply_ctx_for_surface( static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx) { + struct dce_hwseq *hws = dc->hwseq; int fe_idx = pipe_ctx->plane_res.mi ? pipe_ctx->plane_res.mi->inst : pipe_ctx->pipe_idx; @@ -2615,7 +2631,7 @@ static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx) if (dc->current_state->res_ctx.pipe_ctx[fe_idx].stream) return; - dc->hwss.enable_display_power_gating( + hws->funcs.enable_display_power_gating( dc, fe_idx, dc->ctx->dc_bios, PIPE_GATING_CONTROL_ENABLE); dc->res_pool->transforms[fe_idx]->funcs->transform_reset( @@ -2704,14 +2720,10 @@ static const struct hw_sequencer_funcs dce110_funcs = { .program_gamut_remap = program_gamut_remap, .program_output_csc = program_output_csc, .init_hw = init_hw, - .init_pipes = init_pipes, .apply_ctx_to_hw = dce110_apply_ctx_to_hw, .apply_ctx_for_surface = dce110_apply_ctx_for_surface, .update_plane_addr = update_plane_addr, .update_pending_status = dce110_update_pending_status, - .set_input_transfer_func = dce110_set_input_transfer_func, - .set_output_transfer_func = dce110_set_output_transfer_func, - .power_down = dce110_power_down, .enable_accelerated_mode = dce110_enable_accelerated_mode, .enable_timing_synchronization = dce110_enable_timing_synchronization, .enable_per_frame_crtc_position_reset = dce110_enable_per_frame_crtc_position_reset, @@ -2722,8 +2734,6 @@ static const struct hw_sequencer_funcs dce110_funcs = { .blank_stream = dce110_blank_stream, .enable_audio_stream = dce110_enable_audio_stream, .disable_audio_stream = dce110_disable_audio_stream, - .enable_display_pipe_clock_gating = enable_display_pipe_clock_gating, - .enable_display_power_gating = dce110_enable_display_power_gating, .disable_plane = dce110_power_down_fe, .pipe_control_lock = dce_pipe_control_lock, .prepare_bandwidth = dce110_prepare_bandwidth, @@ -2731,22 +2741,33 @@ static const struct hw_sequencer_funcs dce110_funcs = { .set_drr = set_drr, .get_position = get_position, .set_static_screen_control = set_static_screen_control, - .reset_hw_ctx_wrap = dce110_reset_hw_ctx_wrap, - .enable_stream_timing = dce110_enable_stream_timing, - .disable_stream_gating = NULL, - .enable_stream_gating = NULL, .setup_stereo = NULL, .set_avmute = dce110_set_avmute, .wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect, - .edp_backlight_control = dce110_edp_backlight_control, .edp_power_control = dce110_edp_power_control, .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, .set_cursor_position = dce110_set_cursor_position, .set_cursor_attribute = dce110_set_cursor_attribute }; +static const struct hwseq_private_funcs dce110_private_funcs = { + .init_pipes = init_pipes, + .update_plane_addr = update_plane_addr, + .set_input_transfer_func = dce110_set_input_transfer_func, + .set_output_transfer_func = dce110_set_output_transfer_func, + .power_down = dce110_power_down, + .enable_display_pipe_clock_gating = enable_display_pipe_clock_gating, + .enable_display_power_gating = dce110_enable_display_power_gating, + .reset_hw_ctx_wrap = dce110_reset_hw_ctx_wrap, + .enable_stream_timing = dce110_enable_stream_timing, + .disable_stream_gating = NULL, + .enable_stream_gating = NULL, + .edp_backlight_control = dce110_edp_backlight_control, +}; + void dce110_hw_sequencer_construct(struct dc *dc) { dc->hwss = dce110_funcs; + dc->hwseq->funcs = dce110_private_funcs; } diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h index 2f9b7dbdf415..26a9c14a58b1 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h @@ -27,8 +27,8 @@ #define __DC_HWSS_DCE110_H__ #include "core_types.h" +#include "hw_sequencer_private.h" -#define GAMMA_HW_POINTS_NUM 256 struct dc; struct dc_state; struct dm_pp_display_configuration; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index 83a4dbf6d76e..bf14e9ab040c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c @@ -782,7 +782,7 @@ void dce110_clock_source_destroy(struct clock_source **clk_src) *clk_src = NULL; } -static void destruct(struct dce110_resource_pool *pool) +static void dce110_resource_destruct(struct dce110_resource_pool *pool) { unsigned int i; @@ -1097,6 +1097,7 @@ static struct pipe_ctx *dce110_acquire_underlay( struct dc_stream_state *stream) { struct dc *dc = stream->ctx->dc; + struct dce_hwseq *hws = dc->hwseq; struct resource_context *res_ctx = &context->res_ctx; unsigned int underlay_idx = pool->underlay_pipe_index; struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[underlay_idx]; @@ -1117,7 +1118,7 @@ static struct pipe_ctx *dce110_acquire_underlay( struct tg_color black_color = {0}; struct dc_bios *dcb = dc->ctx->dc_bios; - dc->hwss.enable_display_power_gating( + hws->funcs.enable_display_power_gating( dc, pipe_ctx->stream_res.tg->inst, dcb, PIPE_GATING_CONTROL_DISABLE); @@ -1161,7 +1162,7 @@ static void dce110_destroy_resource_pool(struct resource_pool **pool) { struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); - destruct(dce110_pool); + dce110_resource_destruct(dce110_pool); kfree(dce110_pool); *pool = NULL; } @@ -1313,7 +1314,7 @@ const struct resource_caps *dce110_resource_cap( return &carrizo_resource_cap; } -static bool construct( +static bool dce110_resource_construct( uint8_t num_virtual_links, struct dc *dc, struct dce110_resource_pool *pool, @@ -1492,7 +1493,7 @@ static bool construct( return true; res_create_fail: - destruct(pool); + dce110_resource_destruct(pool); return false; } @@ -1507,7 +1508,7 @@ struct resource_pool *dce110_create_resource_pool( if (!pool) return NULL; - if (construct(num_virtual_links, dc, pool, asic_id)) + if (dce110_resource_construct(num_virtual_links, dc, pool, asic_id)) return &pool->base; kfree(pool); diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c index 5f7c2c5641c4..1ea7db8eeb98 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c @@ -469,22 +469,27 @@ void dce110_timing_generator_set_drr( void dce110_timing_generator_set_static_screen_control( struct timing_generator *tg, - uint32_t value) + uint32_t event_triggers, + uint32_t num_frames) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); uint32_t static_screen_cntl = 0; uint32_t addr = 0; + // By register spec, it only takes 8 bit value + if (num_frames > 0xFF) + num_frames = 0xFF; + addr = CRTC_REG(mmCRTC_STATIC_SCREEN_CONTROL); static_screen_cntl = dm_read_reg(tg->ctx, addr); set_reg_field_value(static_screen_cntl, - value, + event_triggers, CRTC_STATIC_SCREEN_CONTROL, CRTC_STATIC_SCREEN_EVENT_MASK); set_reg_field_value(static_screen_cntl, - 2, + num_frames, CRTC_STATIC_SCREEN_CONTROL, CRTC_STATIC_SCREEN_FRAME_COUNT); diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h index 768ccf27ada9..d8a5ed7b485d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h @@ -231,7 +231,8 @@ void dce110_timing_generator_set_drr( void dce110_timing_generator_set_static_screen_control( struct timing_generator *tg, - uint32_t value); + uint32_t event_triggers, + uint32_t num_frames); void dce110_timing_generator_get_crtc_scanoutpos( struct timing_generator *tg, diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c index 1e4a7c13f0ed..19873ee1f78d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c @@ -158,6 +158,6 @@ void dce112_hw_sequencer_construct(struct dc *dc) * structure */ dce110_hw_sequencer_construct(dc); - dc->hwss.enable_display_power_gating = dce112_enable_display_power_gating; + dc->hwseq->funcs.enable_display_power_gating = dce112_enable_display_power_gating; } diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h index e646f4a37fa2..943f1b2c5b2f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h @@ -27,6 +27,7 @@ #define __DC_HWSS_DCE112_H__ #include "core_types.h" +#include "hw_sequencer_private.h" struct dc; diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index 97dcc5d0862b..700ad8b3e54b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c @@ -744,7 +744,7 @@ void dce112_clock_source_destroy(struct clock_source **clk_src) *clk_src = NULL; } -static void destruct(struct dce110_resource_pool *pool) +static void dce112_resource_destruct(struct dce110_resource_pool *pool) { unsigned int i; @@ -1013,7 +1013,7 @@ static void dce112_destroy_resource_pool(struct resource_pool **pool) { struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); - destruct(dce110_pool); + dce112_resource_destruct(dce110_pool); kfree(dce110_pool); *pool = NULL; } @@ -1186,7 +1186,7 @@ const struct resource_caps *dce112_resource_cap( return &polaris_10_resource_cap; } -static bool construct( +static bool dce112_resource_construct( uint8_t num_virtual_links, struct dc *dc, struct dce110_resource_pool *pool) @@ -1372,7 +1372,7 @@ static bool construct( return true; res_create_fail: - destruct(pool); + dce112_resource_destruct(pool); return false; } @@ -1386,7 +1386,7 @@ struct resource_pool *dce112_create_resource_pool( if (!pool) return NULL; - if (construct(num_virtual_links, dc, pool)) + if (dce112_resource_construct(num_virtual_links, dc, pool)) return &pool->base; kfree(pool); diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c index 1ca30928025e..66a13aa39c95 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c @@ -265,7 +265,7 @@ void dce120_hw_sequencer_construct(struct dc *dc) * structure */ dce110_hw_sequencer_construct(dc); - dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating; + dc->hwseq->funcs.enable_display_power_gating = dce120_enable_display_power_gating; dc->hwss.update_dchub = dce120_update_dchub; } diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h index c51afbd0b012..bc024534732f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h @@ -27,6 +27,7 @@ #define __DC_HWSS_DCE120_H__ #include "core_types.h" +#include "hw_sequencer_private.h" struct dc; diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index 63543f6918ff..53ab88ef71f5 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -63,8 +63,8 @@ #include "soc15_hw_ip.h" #include "vega10_ip_offset.h" #include "nbio/nbio_6_1_offset.h" -#include "mmhub/mmhub_9_4_0_offset.h" -#include "mmhub/mmhub_9_4_0_sh_mask.h" +#include "mmhub/mmhub_1_0_offset.h" +#include "mmhub/mmhub_1_0_sh_mask.h" #include "reg_helper.h" #include "dce100/dce100_resource.h" @@ -587,7 +587,7 @@ static void dce120_transform_destroy(struct transform **xfm) *xfm = NULL; } -static void destruct(struct dce110_resource_pool *pool) +static void dce120_resource_destruct(struct dce110_resource_pool *pool) { unsigned int i; @@ -872,7 +872,7 @@ static void dce120_destroy_resource_pool(struct resource_pool **pool) { struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); - destruct(dce110_pool); + dce120_resource_destruct(dce110_pool); kfree(dce110_pool); *pool = NULL; } @@ -1024,7 +1024,7 @@ static uint32_t read_pipe_fuses(struct dc_context *ctx) return value; } -static bool construct( +static bool dce120_resource_construct( uint8_t num_virtual_links, struct dc *dc, struct dce110_resource_pool *pool) @@ -1237,7 +1237,7 @@ controller_create_fail: clk_src_create_fail: res_create_fail: - destruct(pool); + dce120_resource_destruct(pool); return false; } @@ -1252,7 +1252,7 @@ struct resource_pool *dce120_create_resource_pool( if (!pool) return NULL; - if (construct(num_virtual_links, dc, pool)) + if (dce120_resource_construct(num_virtual_links, dc, pool)) return &pool->base; kfree(pool); diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c index 098e56962f2a..82bc4e192bbf 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c @@ -819,13 +819,18 @@ void dce120_tg_set_colors(struct timing_generator *tg, static void dce120_timing_generator_set_static_screen_control( struct timing_generator *tg, - uint32_t value) + uint32_t event_triggers, + uint32_t num_frames) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); + // By register spec, it only takes 8 bit value + if (num_frames > 0xFF) + num_frames = 0xFF; + CRTC_REG_UPDATE_2(CRTC0_CRTC_STATIC_SCREEN_CONTROL, - CRTC_STATIC_SCREEN_EVENT_MASK, value, - CRTC_STATIC_SCREEN_FRAME_COUNT, 2); + CRTC_STATIC_SCREEN_EVENT_MASK, event_triggers, + CRTC_STATIC_SCREEN_FRAME_COUNT, num_frames); } void dce120_timing_generator_set_test_pattern( diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c index c4543178ba20..893261c81854 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c @@ -74,7 +74,7 @@ void dce80_hw_sequencer_construct(struct dc *dc) { dce110_hw_sequencer_construct(dc); - dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating; + dc->hwseq->funcs.enable_display_power_gating = dce100_enable_display_power_gating; dc->hwss.pipe_control_lock = dce_pipe_control_lock; dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth; dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth; diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h index 7a1b31def66f..e43af832d00c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h @@ -27,6 +27,7 @@ #define __DC_HWSS_DCE80_H__ #include "core_types.h" +#include "hw_sequencer_private.h" struct dc; diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index 3e8d4b49f279..2ad5c28c6e66 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c @@ -773,7 +773,7 @@ static struct input_pixel_processor *dce80_ipp_create( return &ipp->base; } -static void destruct(struct dce110_resource_pool *pool) +static void dce80_resource_destruct(struct dce110_resource_pool *pool) { unsigned int i; @@ -901,7 +901,7 @@ static void dce80_destroy_resource_pool(struct resource_pool **pool) { struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); - destruct(dce110_pool); + dce80_resource_destruct(dce110_pool); kfree(dce110_pool); *pool = NULL; } @@ -1093,7 +1093,7 @@ static bool dce80_construct( return true; res_create_fail: - destruct(pool); + dce80_resource_destruct(pool); return false; } @@ -1290,7 +1290,7 @@ static bool dce81_construct( return true; res_create_fail: - destruct(pool); + dce80_resource_destruct(pool); return false; } @@ -1483,7 +1483,7 @@ static bool dce83_construct( return true; res_create_fail: - destruct(pool); + dce80_resource_destruct(pool); return false; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile index 032f872be89c..62ad1a11bff9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile @@ -22,7 +22,8 @@ # # Makefile for DCN. -DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o dcn10_hw_sequencer_debug.o \ +DCN10 = dcn10_init.o dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \ + dcn10_hw_sequencer_debug.o \ dcn10_dpp.o dcn10_opp.o dcn10_optc.o \ dcn10_hubp.o dcn10_mpc.o \ dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index 997e9582edc7..0e682b5aa3eb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c @@ -290,12 +290,8 @@ void dpp1_cnv_setup ( enum surface_pixel_format format, enum expansion_mode mode, struct dc_csc_transform input_csc_color_matrix, -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 enum dc_color_space input_color_space, struct cnv_alpha_2bit_lut *alpha_2bit_lut) -#else - enum dc_color_space input_color_space) -#endif { uint32_t pixel_format; uint32_t alpha_en; @@ -542,11 +538,9 @@ static const struct dpp_funcs dcn10_dpp_funcs = { .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, .dpp_dppclk_control = dpp1_dppclk_control, .dpp_set_hdr_multiplier = dpp1_set_hdr_multiplier, -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) .dpp_program_blnd_lut = NULL, .dpp_program_shaper_lut = NULL, .dpp_program_3dlut = NULL -#endif }; static struct dpp_caps dcn10_dpp_cap = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h index 1d4a7d640334..2edf566b3a72 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h @@ -1486,12 +1486,8 @@ void dpp1_cnv_setup ( enum surface_pixel_format format, enum expansion_mode mode, struct dc_csc_transform input_csc_color_matrix, -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 enum dc_color_space input_color_space, struct cnv_alpha_2bit_lut *alpha_2bit_lut); -#else - enum dc_color_space input_color_space); -#endif void dpp1_full_bypass(struct dpp *dpp_base); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c index aa0c7a7d13a0..4d3f7d5e1473 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c @@ -88,26 +88,6 @@ enum dscl_mode_sel { DSCL_MODE_DSCL_BYPASS = 6 }; -static const struct dpp_input_csc_matrix dpp_input_csc_matrix[] = { - {COLOR_SPACE_SRGB, - {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} }, - {COLOR_SPACE_SRGB_LIMITED, - {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} }, - {COLOR_SPACE_YCBCR601, - {0x2cdd, 0x2000, 0, 0xe991, 0xe926, 0x2000, 0xf4fd, 0x10ef, - 0, 0x2000, 0x38b4, 0xe3a6} }, - {COLOR_SPACE_YCBCR601_LIMITED, - {0x3353, 0x2568, 0, 0xe400, 0xe5dc, 0x2568, 0xf367, 0x1108, - 0, 0x2568, 0x40de, 0xdd3a} }, - {COLOR_SPACE_YCBCR709, - {0x3265, 0x2000, 0, 0xe6ce, 0xf105, 0x2000, 0xfa01, 0xa7d, 0, - 0x2000, 0x3b61, 0xe24f} }, - - {COLOR_SPACE_YCBCR709_LIMITED, - {0x39a6, 0x2568, 0, 0xe0d6, 0xeedd, 0x2568, 0xf925, 0x9a8, 0, - 0x2568, 0x43ee, 0xdbb2} } -}; - static void program_gamut_remap( struct dcn10_dpp *dpp, const uint16_t *regval, @@ -352,6 +332,8 @@ void dpp1_cm_program_regamma_lut(struct dpp *dpp_base, uint32_t i; struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + REG_SEQ_START(); + for (i = 0 ; i < num; i++) { REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].red_reg); REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].green_reg); @@ -626,10 +608,16 @@ void dpp1_set_degamma( case IPP_DEGAMMA_MODE_HW_xvYCC: REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2); break; + case IPP_DEGAMMA_MODE_USER_PWL: + REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3); + break; default: BREAK_TO_DEBUGGER(); break; } + + REG_SEQ_SUBMIT(); + REG_SEQ_WAIT_DONE(); } void dpp1_degamma_ram_select( @@ -731,10 +719,8 @@ void dpp1_full_bypass(struct dpp *dpp_base) /* COLOR_KEYER_CONTROL.COLOR_KEYER_EN = 0 this should be default */ if (dpp->tf_mask->CM_BYPASS_EN) REG_SET(CM_CONTROL, 0, CM_BYPASS_EN, 1); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) else REG_SET(CM_CONTROL, 0, CM_BYPASS, 1); -#endif /* Setting degamma bypass for now */ REG_SET(CM_DGAM_CONTROL, 0, CM_DGAM_LUT_MODE, 0); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c index d67e0abeee93..fce37c527a0b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c @@ -218,14 +218,12 @@ static void dpp1_dscl_set_lb( INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */ LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */ } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) else { /* DSCL caps: pixel data processed in float format */ REG_SET_2(LB_DATA_FORMAT, 0, INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */ LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */ } -#endif REG_SET_2(LB_MEMORY_CTRL, 0, MEMORY_CONFIG, mem_size_config, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c index 374cc9acda3b..b6391a5ead78 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c @@ -23,7 +23,7 @@ * */ -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) #include "reg_helper.h" #include "resource.h" @@ -109,9 +109,7 @@ const struct dwbc_funcs dcn10_dwbc_funcs = { .update = NULL, .set_stereo = NULL, .set_new_content = NULL, -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) .set_warmup = NULL, -#endif .dwb_set_scaler = NULL, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h index c175edd0bae7..d56ea7c8171e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h @@ -24,7 +24,7 @@ #ifndef __DC_DWBC_DCN10_H__ #define __DC_DWBC_DCN10_H__ -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) /* DCN */ #define BASE_INNER(seg) \ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c index a02c10e23e0d..f36a0d8cedfe 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c @@ -930,6 +930,9 @@ static bool hubbub1_get_dcc_compression_cap(struct hubbub *hubbub, output->grph.rgb.max_compressed_blk_size = 64; output->grph.rgb.independent_64b_blks = true; break; + default: + ASSERT(false); + break; } output->capable = true; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h index 69d903d68661..af57751253de 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h @@ -121,7 +121,6 @@ struct dcn_hubbub_registers { uint32_t DCN_VM_AGP_BASE; uint32_t DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB; uint32_t DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_A; uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_B; uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_C; @@ -140,7 +139,6 @@ struct dcn_hubbub_registers { uint32_t DCHVM_CLK_CTRL; uint32_t DCHVM_RIOMMU_CTRL0; uint32_t DCHVM_RIOMMU_STAT0; -#endif }; /* set field name */ @@ -232,7 +230,6 @@ struct dcn_hubbub_registers { type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C;\ type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) #define HUBBUB_HVM_REG_FIELD_LIST(type) \ type DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD;\ type DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A;\ @@ -278,22 +275,17 @@ struct dcn_hubbub_registers { type HOSTVM_POWERSTATUS; \ type RIOMMU_ACTIVE; \ type HOSTVM_PREFETCH_DONE -#endif struct dcn_hubbub_shift { DCN_HUBBUB_REG_FIELD_LIST(uint8_t); HUBBUB_STUTTER_REG_FIELD_LIST(uint8_t); -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) HUBBUB_HVM_REG_FIELD_LIST(uint8_t); -#endif }; struct dcn_hubbub_mask { DCN_HUBBUB_REG_FIELD_LIST(uint32_t); HUBBUB_STUTTER_REG_FIELD_LIST(uint32_t); -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) HUBBUB_HVM_REG_FIELD_LIST(uint32_t); -#endif }; struct dc; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 14d1be6c66e6..31b64733d693 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -306,7 +306,6 @@ void hubp1_program_pixel_format( REG_UPDATE(DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, 12); break; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX: REG_UPDATE(DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, 112); @@ -327,7 +326,6 @@ void hubp1_program_pixel_format( REG_UPDATE(DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, 119); break; -#endif default: BREAK_TO_DEBUGGER(); break; @@ -1014,6 +1012,9 @@ void hubp1_read_state_common(struct hubp *hubp) HUBP_TTU_DISABLE, &s->ttu_disable, HUBP_UNDERFLOW_STATUS, &s->underflow_status); + REG_GET(HUBP_CLK_CNTL, + HUBP_CLOCK_ENABLE, &s->clock_en); + REG_GET(DCN_GLOBAL_TTU_CNTL, MIN_TTU_VBLANK, &s->min_ttu_vblank); @@ -1248,10 +1249,8 @@ static const struct hubp_funcs dcn10_hubp_funcs = { .hubp_get_underflow_status = hubp1_get_underflow_status, .hubp_init = hubp1_init, -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) .dmdata_set_attributes = NULL, .dmdata_load = NULL, -#endif }; /*****************************************/ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index ae70d9c0aa1d..780af5b3c16f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -670,6 +670,7 @@ struct dcn_hubp_state { uint32_t sw_mode; uint32_t dcc_en; uint32_t blank_en; + uint32_t clock_en; uint32_t underflow_status; uint32_t ttu_disable; uint32_t min_ttu_vblank; @@ -728,13 +729,11 @@ void hubp1_dcc_control(struct hubp *hubp, bool enable, enum hubp_ind_block_size independent_64b_blks); -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 bool hubp1_program_surface_flip_and_addr( struct hubp *hubp, const struct dc_plane_address *address, bool flip_immediate); -#endif bool hubp1_is_flip_pending(struct hubp *hubp); void hubp1_cursor_set_attributes( diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index eb91432621ab..1008ac8a0f2a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -25,17 +25,18 @@ #include <linux/delay.h> #include "dm_services.h" +#include "basics/dc_common.h" #include "core_types.h" #include "resource.h" #include "custom_float.h" #include "dcn10_hw_sequencer.h" -#include "dce110/dce110_hw_sequencer.h" +#include "dcn10_hw_sequencer_debug.h" #include "dce/dce_hwseq.h" #include "abm.h" #include "dmcu.h" #include "dcn10_optc.h" -#include "dcn10/dcn10_dpp.h" -#include "dcn10/dcn10_mpc.h" +#include "dcn10_dpp.h" +#include "dcn10_mpc.h" #include "timing_generator.h" #include "opp.h" #include "ipp.h" @@ -49,9 +50,7 @@ #include "clk_mgr.h" -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #include "dsc.h" -#endif #define DC_LOGGER_INIT(logger) @@ -68,6 +67,8 @@ #define DTN_INFO_MICRO_SEC(ref_cycle) \ print_microsec(dc_ctx, log_ctx, ref_cycle) +#define GAMMA_HW_POINTS_NUM 256 + void print_microsec(struct dc_context *dc_ctx, struct dc_log_buffer_ctx *log_ctx, uint32_t ref_cycle) @@ -81,6 +82,33 @@ void print_microsec(struct dc_context *dc_ctx, us_x10 % frac); } +static void dcn10_lock_all_pipes(struct dc *dc, + struct dc_state *context, + bool lock) +{ + struct pipe_ctx *pipe_ctx; + struct timing_generator *tg; + int i; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe_ctx = &context->res_ctx.pipe_ctx[i]; + tg = pipe_ctx->stream_res.tg; + /* + * Only lock the top pipe's tg to prevent redundant + * (un)locking. Also skip if pipe is disabled. + */ + if (pipe_ctx->top_pipe || + !pipe_ctx->stream || !pipe_ctx->plane_state || + !tg->funcs->is_tg_enabled(tg)) + continue; + + if (lock) + tg->funcs->lock(tg); + else + tg->funcs->unlock(tg); + } +} + static void log_mpc_crc(struct dc *dc, struct dc_log_buffer_ctx *log_ctx) { @@ -129,9 +157,8 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx) struct resource_pool *pool = dc->res_pool; int i; - DTN_INFO("HUBP: format addr_hi width height" - " rot mir sw_mode dcc_en blank_en ttu_dis underflow" - " min_ttu_vblank qos_low_wm qos_high_wm\n"); + DTN_INFO( + "HUBP: format addr_hi width height rot mir sw_mode dcc_en blank_en clock_en ttu_dis underflow min_ttu_vblank qos_low_wm qos_high_wm\n"); for (i = 0; i < pool->pipe_count; i++) { struct hubp *hubp = pool->hubps[i]; struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state); @@ -139,8 +166,7 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx) hubp->funcs->hubp_read_state(hubp); if (!s->blank_en) { - DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh" - " %6d %8d %7d %8xh", + DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh %6d %8d %8d %7d %8xh", hubp->inst, s->pixel_format, s->inuse_addr_hi, @@ -151,6 +177,7 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx) s->sw_mode, s->dcc_en, s->blank_en, + s->clock_en, s->ttu_disable, s->underflow_status); DTN_INFO_MICRO_SEC(s->min_ttu_vblank); @@ -308,21 +335,31 @@ void dcn10_log_hw_state(struct dc *dc, } DTN_INFO("\n"); - DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel" - " h_bs h_be h_ss h_se hpol htot vtot underflow\n"); + DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel h_bs h_be h_ss h_se hpol htot vtot underflow blank_en\n"); for (i = 0; i < pool->timing_generator_count; i++) { struct timing_generator *tg = pool->timing_generators[i]; struct dcn_otg_state s = {0}; - + /* Read shared OTG state registers for all DCNx */ optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s); + /* + * For DCN2 and greater, a register on the OPP is used to + * determine if the CRTC is blanked instead of the OTG. So use + * dpg_is_blanked() if exists, otherwise fallback on otg. + * + * TODO: Implement DCN-specific read_otg_state hooks. + */ + if (pool->opps[i]->funcs->dpg_is_blanked) + s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]); + else + s.blank_enabled = tg->funcs->is_blanked(tg); + //only print if OTG master is enabled if ((s.otg_enabled & 1) == 0) continue; - DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d" - " %5d %5d %5d %5d %9d\n", + DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d %9d %8d\n", tg->inst, s.v_blank_start, s.v_blank_end, @@ -340,7 +377,8 @@ void dcn10_log_hw_state(struct dc *dc, s.h_sync_a_pol, s.h_total, s.v_total, - s.underflow_occurred_status); + s.underflow_occurred_status, + s.blank_enabled); // Clear underflow for debug purposes // We want to keep underflow sticky bit on for the longevity tests outside of test environment. @@ -350,7 +388,6 @@ void dcn10_log_hw_state(struct dc *dc, } DTN_INFO("\n"); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n"); for (i = 0; i < pool->res_cap->num_dsc; i++) { struct display_stream_compressor *dsc = pool->dscs[i]; @@ -387,7 +424,7 @@ void dcn10_log_hw_state(struct dc *dc, } DTN_INFO("\n"); - DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS\n"); + DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS DP_LINK_TRAINING_COMPLETE\n"); for (i = 0; i < dc->link_count; i++) { struct link_encoder *lenc = dc->links[i]->link_enc; @@ -395,16 +432,16 @@ void dcn10_log_hw_state(struct dc *dc, if (lenc->funcs->read_state) { lenc->funcs->read_state(lenc, &s); - DTN_INFO("[%-3d]: %-12d %-22d %-22d\n", + DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n", i, s.dphy_fec_en, s.dphy_fec_ready_shadow, - s.dphy_fec_active_status); + s.dphy_fec_active_status, + s.dp_link_training_complete); DTN_INFO("\n"); } } DTN_INFO("\n"); -#endif DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n" "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n", @@ -438,14 +475,14 @@ bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx) return false; } -static void dcn10_enable_power_gating_plane( +void dcn10_enable_power_gating_plane( struct dce_hwseq *hws, bool enable) { - bool force_on = 1; /* disable power gating */ + bool force_on = true; /* disable power gating */ if (enable) - force_on = 0; + force_on = false; /* DCHUBP0/1/2/3 */ REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on); @@ -460,7 +497,7 @@ static void dcn10_enable_power_gating_plane( REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on); } -static void dcn10_disable_vga( +void dcn10_disable_vga( struct dce_hwseq *hws) { unsigned int in_vga1_mode = 0; @@ -493,7 +530,7 @@ static void dcn10_disable_vga( REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1); } -static void dcn10_dpp_pg_control( +void dcn10_dpp_pg_control( struct dce_hwseq *hws, unsigned int dpp_inst, bool power_on) @@ -545,7 +582,7 @@ static void dcn10_dpp_pg_control( } } -static void dcn10_hubp_pg_control( +void dcn10_hubp_pg_control( struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on) @@ -605,8 +642,8 @@ static void power_on_plane( if (REG(DC_IP_REQUEST_CNTL)) { REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); - hws->ctx->dc->hwss.dpp_pg_control(hws, plane_id, true); - hws->ctx->dc->hwss.hubp_pg_control(hws, plane_id, true); + hws->funcs.dpp_pg_control(hws, plane_id, true); + hws->funcs.hubp_pg_control(hws, plane_id, true); REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); DC_LOG_DEBUG( @@ -627,7 +664,7 @@ static void undo_DEGVIDCN10_253_wa(struct dc *dc) REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); - dc->hwss.hubp_pg_control(hws, 0, false); + hws->funcs.hubp_pg_control(hws, 0, false); REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); @@ -656,7 +693,7 @@ static void apply_DEGVIDCN10_253_wa(struct dc *dc) REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); - dc->hwss.hubp_pg_control(hws, 0, true); + hws->funcs.hubp_pg_control(hws, 0, true); REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); @@ -664,16 +701,16 @@ static void apply_DEGVIDCN10_253_wa(struct dc *dc) hws->wa_state.DEGVIDCN10_253_applied = true; } -static void dcn10_bios_golden_init(struct dc *dc) +void dcn10_bios_golden_init(struct dc *dc) { + struct dce_hwseq *hws = dc->hwseq; struct dc_bios *bp = dc->ctx->dc_bios; int i; bool allow_self_fresh_force_enable = true; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) - if (dc->hwss.s0i3_golden_init_wa && dc->hwss.s0i3_golden_init_wa(dc)) + if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc)) return; -#endif + if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled) allow_self_fresh_force_enable = dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub); @@ -732,7 +769,7 @@ static void false_optc_underflow_wa( tg->funcs->clear_optc_underflow(tg); } -static enum dc_status dcn10_enable_stream_timing( +enum dc_status dcn10_enable_stream_timing( struct pipe_ctx *pipe_ctx, struct dc_state *context, struct dc *dc) @@ -823,6 +860,7 @@ static void dcn10_reset_back_end_for_pipe( struct dc_state *context) { int i; + struct dc_link *link; DC_LOGGER_INIT(dc->ctx->logger); if (pipe_ctx->stream_res.stream_enc == NULL) { pipe_ctx->stream = NULL; @@ -830,8 +868,14 @@ static void dcn10_reset_back_end_for_pipe( } if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - /* DPMS may already disable */ - if (!pipe_ctx->stream->dpms_off) + link = pipe_ctx->stream->link; + /* DPMS may already disable or */ + /* dpms_off status is incorrect due to fastboot + * feature. When system resume from S4 with second + * screen only, the dpms_off would be true but + * VBIOS lit up eDP, so check link status too. + */ + if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) core_link_disable_stream(pipe_ctx); else if (pipe_ctx->stream_res.audio) dc->hwss.disable_audio_stream(pipe_ctx); @@ -978,8 +1022,9 @@ void dcn10_verify_allow_pstate_change_high(struct dc *dc) } /* trigger HW to start disconnect plane from stream on the next vsync */ -void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx) { + struct dce_hwseq *hws = dc->hwseq; struct hubp *hubp = pipe_ctx->plane_res.hubp; int dpp_id = pipe_ctx->plane_res.dpp->inst; struct mpc *mpc = dc->res_pool->mpc; @@ -1004,10 +1049,10 @@ void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx) hubp->funcs->hubp_disconnect(hubp); if (dc->debug.sanity_checks) - dcn10_verify_allow_pstate_change_high(dc); + hws->funcs.verify_allow_pstate_change_high(dc); } -static void dcn10_plane_atomic_power_down(struct dc *dc, +void dcn10_plane_atomic_power_down(struct dc *dc, struct dpp *dpp, struct hubp *hubp) { @@ -1017,8 +1062,8 @@ static void dcn10_plane_atomic_power_down(struct dc *dc, if (REG(DC_IP_REQUEST_CNTL)) { REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); - dc->hwss.dpp_pg_control(hws, dpp->inst, false); - dc->hwss.hubp_pg_control(hws, hubp->inst, false); + hws->funcs.dpp_pg_control(hws, dpp->inst, false); + hws->funcs.hubp_pg_control(hws, hubp->inst, false); dpp->funcs->dpp_reset(dpp); REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); @@ -1030,8 +1075,9 @@ static void dcn10_plane_atomic_power_down(struct dc *dc, /* disable HW used by plane. * note: cannot disable until disconnect is complete */ -static void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) { + struct dce_hwseq *hws = dc->hwseq; struct hubp *hubp = pipe_ctx->plane_res.hubp; struct dpp *dpp = pipe_ctx->plane_res.dpp; int opp_id = hubp->opp_id; @@ -1050,7 +1096,7 @@ static void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) hubp->power_gated = true; dc->optimized_required = false; /* We're powering off, no need to optimize */ - dc->hwss.plane_atomic_power_down(dc, + hws->funcs.plane_atomic_power_down(dc, pipe_ctx->plane_res.dpp, pipe_ctx->plane_res.hubp); @@ -1062,14 +1108,15 @@ static void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) pipe_ctx->plane_state = NULL; } -static void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) { + struct dce_hwseq *hws = dc->hwseq; DC_LOGGER_INIT(dc->ctx->logger); if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated) return; - dc->hwss.plane_atomic_disable(dc, pipe_ctx); + hws->funcs.plane_atomic_disable(dc, pipe_ctx); apply_DEGVIDCN10_253_wa(dc); @@ -1077,9 +1124,10 @@ static void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) pipe_ctx->pipe_idx); } -static void dcn10_init_pipes(struct dc *dc, struct dc_state *context) +void dcn10_init_pipes(struct dc *dc, struct dc_state *context) { int i; + struct dce_hwseq *hws = dc->hwseq; bool can_apply_seamless_boot = false; for (i = 0; i < context->stream_count; i++) { @@ -1104,8 +1152,8 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context) * command table. */ if (tg->funcs->is_tg_enabled(tg)) { - if (dc->hwss.init_blank != NULL) { - dc->hwss.init_blank(dc, tg); + if (hws->funcs.init_blank != NULL) { + hws->funcs.init_blank(dc, tg); tg->funcs->lock(tg); } else { tg->funcs->lock(tg); @@ -1115,7 +1163,8 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context) } } - for (i = 0; i < dc->res_pool->pipe_count; i++) { + /* num_opp will be equal to number of mpcc */ + for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; /* Cannot reset the MPC mux if seamless boot */ @@ -1139,8 +1188,14 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context) if (can_apply_seamless_boot && pipe_ctx->stream != NULL && pipe_ctx->stream_res.tg->funcs->is_tg_enabled( - pipe_ctx->stream_res.tg)) + pipe_ctx->stream_res.tg)) { + // Enable double buffering for OTG_BLANK no matter if + // seamless boot is enabled or not to suppress global sync + // signals when OTG blanked. This is to prevent pipe from + // requesting data while in PSR. + tg->funcs->tg_init(tg); continue; + } /* Disable on the current state so the new one isn't cleared. */ pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; @@ -1162,7 +1217,7 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context) dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true; pipe_ctx->stream_res.opp = dc->res_pool->opps[i]; - dc->hwss.plane_atomic_disconnect(dc, pipe_ctx); + hws->funcs.plane_atomic_disconnect(dc, pipe_ctx); if (tg->funcs->is_tg_enabled(tg)) tg->funcs->unlock(tg); @@ -1176,7 +1231,7 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context) } } -static void dcn10_init_hw(struct dc *dc) +void dcn10_init_hw(struct dc *dc) { int i; struct abm *abm = dc->res_pool->abm; @@ -1208,15 +1263,15 @@ static void dcn10_init_hw(struct dc *dc) } //Enable ability to power gate / don't force power on permanently - dc->hwss.enable_power_gating_plane(hws, true); + hws->funcs.enable_power_gating_plane(hws, true); return; } if (!dcb->funcs->is_accelerated_mode(dcb)) - dc->hwss.disable_vga(dc->hwseq); + hws->funcs.disable_vga(dc->hwseq); - dc->hwss.bios_golden_init(dc); + hws->funcs.bios_golden_init(dc); if (dc->ctx->dc_bios->fw_info_valid) { res_pool->ref_clocks.xtalin_clock_inKhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency; @@ -1258,11 +1313,9 @@ static void dcn10_init_hw(struct dc *dc) } /* Power gate DSCs */ -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT for (i = 0; i < res_pool->res_cap->num_dsc; i++) - if (dc->hwss.dsc_pg_control != NULL) - dc->hwss.dsc_pg_control(hws, res_pool->dscs[i]->inst, false); -#endif + if (hws->funcs.dsc_pg_control != NULL) + hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false); /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which @@ -1271,7 +1324,7 @@ static void dcn10_init_hw(struct dc *dc) * everything down. */ if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) { - dc->hwss.init_pipes(dc, dc->current_state); + hws->funcs.init_pipes(dc, dc->current_state); } for (i = 0; i < res_pool->audio_count; i++) { @@ -1285,7 +1338,7 @@ static void dcn10_init_hw(struct dc *dc) abm->funcs->abm_init(abm); } - if (dmcu != NULL) + if (dmcu != NULL && !dmcu->auto_load_dmcu) dmcu->funcs->dmcu_init(dmcu); if (abm != NULL && dmcu != NULL) @@ -1303,18 +1356,19 @@ static void dcn10_init_hw(struct dc *dc) REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); } - dc->hwss.enable_power_gating_plane(dc->hwseq, true); + hws->funcs.enable_power_gating_plane(dc->hwseq, true); if (dc->clk_mgr->funcs->notify_wm_ranges) dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr); } -static void dcn10_reset_hw_ctx_wrap( +void dcn10_reset_hw_ctx_wrap( struct dc *dc, struct dc_state *context) { int i; + struct dce_hwseq *hws = dc->hwseq; /* Reset Back End*/ for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) { @@ -1333,8 +1387,8 @@ static void dcn10_reset_hw_ctx_wrap( struct clock_source *old_clk = pipe_ctx_old->clock_source; dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); - if (dc->hwss.enable_stream_gating) - dc->hwss.enable_stream_gating(dc, pipe_ctx); + if (hws->funcs.enable_stream_gating) + hws->funcs.enable_stream_gating(dc, pipe_ctx); if (old_clk) old_clk->funcs->cs_power_down(old_clk); } @@ -1367,9 +1421,7 @@ static bool patch_address_for_sbs_tb_stereo( return false; } - - -static void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx) { bool addr_patched = false; PHYSICAL_ADDRESS_LOC addr; @@ -1394,8 +1446,8 @@ static void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_c pipe_ctx->plane_state->address.grph_stereo.left_addr = addr; } -static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx, - const struct dc_plane_state *plane_state) +bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, + const struct dc_plane_state *plane_state) { struct dpp *dpp_base = pipe_ctx->plane_res.dpp; const struct dc_transfer_func *tf = NULL; @@ -1427,6 +1479,11 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx, dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS); break; case TRANSFER_FUNCTION_PQ: + dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL); + cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params); + dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params); + result = true; + break; default: result = false; break; @@ -1472,9 +1529,8 @@ static void log_tf(struct dc_context *ctx, } } -static bool -dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx, - const struct dc_stream_state *stream) +bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, + const struct dc_stream_state *stream) { struct dpp *dpp = pipe_ctx->plane_res.dpp; @@ -1510,11 +1566,13 @@ dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx, return true; } -static void dcn10_pipe_control_lock( +void dcn10_pipe_control_lock( struct dc *dc, struct pipe_ctx *pipe, bool lock) { + struct dce_hwseq *hws = dc->hwseq; + /* use TG master update lock to lock everything on the TG * therefore only top pipe need to lock */ @@ -1522,7 +1580,7 @@ static void dcn10_pipe_control_lock( return; if (dc->debug.sanity_checks) - dcn10_verify_allow_pstate_change_high(dc); + hws->funcs.verify_allow_pstate_change_high(dc); if (lock) pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg); @@ -1530,7 +1588,7 @@ static void dcn10_pipe_control_lock( pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg); if (dc->debug.sanity_checks) - dcn10_verify_allow_pstate_change_high(dc); + hws->funcs.verify_allow_pstate_change_high(dc); } static bool wait_for_reset_trigger_to_occur( @@ -1570,7 +1628,7 @@ static bool wait_for_reset_trigger_to_occur( return rc; } -static void dcn10_enable_timing_synchronization( +void dcn10_enable_timing_synchronization( struct dc *dc, int group_index, int group_size, @@ -1600,7 +1658,7 @@ static void dcn10_enable_timing_synchronization( DC_SYNC_INFO("Sync complete\n"); } -static void dcn10_enable_per_frame_crtc_position_reset( +void dcn10_enable_per_frame_crtc_position_reset( struct dc *dc, int group_size, struct pipe_ctx *grouped_pipes[]) @@ -1625,10 +1683,10 @@ static void dcn10_enable_per_frame_crtc_position_reset( } /*static void print_rq_dlg_ttu( - struct dc *core_dc, + struct dc *dc, struct pipe_ctx *pipe_ctx) { - DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger, + DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger, "\n============== DML TTU Output parameters [%d] ==============\n" "qos_level_low_wm: %d, \n" "qos_level_high_wm: %d, \n" @@ -1658,7 +1716,7 @@ static void dcn10_enable_per_frame_crtc_position_reset( pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_c ); - DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger, + DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger, "\n============== DML DLG Output parameters [%d] ==============\n" "refcyc_h_blank_end: %d, \n" "dlg_vblank_end: %d, \n" @@ -1693,7 +1751,7 @@ static void dcn10_enable_per_frame_crtc_position_reset( pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_l ); - DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger, + DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger, "\ndst_y_per_meta_row_nom_l: %d, \n" "refcyc_per_meta_chunk_nom_l: %d, \n" "refcyc_per_line_delivery_pre_l: %d, \n" @@ -1723,7 +1781,7 @@ static void dcn10_enable_per_frame_crtc_position_reset( pipe_ctx->dlg_regs.refcyc_per_line_delivery_c ); - DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger, + DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger, "\n============== DML RQ Output parameters [%d] ==============\n" "chunk_size: %d \n" "min_chunk_size: %d \n" @@ -1838,7 +1896,7 @@ static void dcn10_enable_plane( struct dce_hwseq *hws = dc->hwseq; if (dc->debug.sanity_checks) { - dcn10_verify_allow_pstate_change_high(dc); + hws->funcs.verify_allow_pstate_change_high(dc); } undo_DEGVIDCN10_253_wa(dc); @@ -1895,11 +1953,11 @@ static void dcn10_enable_plane( dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp); if (dc->debug.sanity_checks) { - dcn10_verify_allow_pstate_change_high(dc); + hws->funcs.verify_allow_pstate_change_high(dc); } } -static void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx) +void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx) { int i = 0; struct dpp_grph_csc_adjustment adjust; @@ -1947,7 +2005,7 @@ static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint matrix[11] = rgb_bias; } -static void dcn10_program_output_csc(struct dc *dc, +void dcn10_program_output_csc(struct dc *dc, struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace, uint16_t *matrix, @@ -1979,57 +2037,6 @@ static void dcn10_program_output_csc(struct dc *dc, } } -bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx) -{ - if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible) - return true; - if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe)) - return true; - return false; -} - -bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx) -{ - if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible) - return true; - if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe)) - return true; - return false; -} - -bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx) -{ - if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible) - return true; - if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe)) - return true; - if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe)) - return true; - return false; -} - -bool is_rgb_cspace(enum dc_color_space output_color_space) -{ - switch (output_color_space) { - case COLOR_SPACE_SRGB: - case COLOR_SPACE_SRGB_LIMITED: - case COLOR_SPACE_2020_RGB_FULLRANGE: - case COLOR_SPACE_2020_RGB_LIMITEDRANGE: - case COLOR_SPACE_ADOBERGB: - return true; - case COLOR_SPACE_YCBCR601: - case COLOR_SPACE_YCBCR709: - case COLOR_SPACE_YCBCR601_LIMITED: - case COLOR_SPACE_YCBCR709_LIMITED: - case COLOR_SPACE_2020_YCBCR: - return false; - default: - /* Add a case to switch */ - BREAK_TO_DEBUGGER(); - return false; - } -} - void dcn10_get_surface_visual_confirm_color( const struct pipe_ctx *pipe_ctx, struct tg_color *color) @@ -2103,70 +2110,7 @@ void dcn10_get_hdr_visual_confirm_color( } } -static uint16_t fixed_point_to_int_frac( - struct fixed31_32 arg, - uint8_t integer_bits, - uint8_t fractional_bits) -{ - int32_t numerator; - int32_t divisor = 1 << fractional_bits; - - uint16_t result; - - uint16_t d = (uint16_t)dc_fixpt_floor( - dc_fixpt_abs( - arg)); - - if (d <= (uint16_t)(1 << integer_bits) - (1 / (uint16_t)divisor)) - numerator = (uint16_t)dc_fixpt_floor( - dc_fixpt_mul_int( - arg, - divisor)); - else { - numerator = dc_fixpt_floor( - dc_fixpt_sub( - dc_fixpt_from_int( - 1LL << integer_bits), - dc_fixpt_recip( - dc_fixpt_from_int( - divisor)))); - } - - if (numerator >= 0) - result = (uint16_t)numerator; - else - result = (uint16_t)( - (1 << (integer_bits + fractional_bits + 1)) + numerator); - - if ((result != 0) && dc_fixpt_lt( - arg, dc_fixpt_zero)) - result |= 1 << (integer_bits + fractional_bits); - - return result; -} - -void dcn10_build_prescale_params(struct dc_bias_and_scale *bias_and_scale, - const struct dc_plane_state *plane_state) -{ - if (plane_state->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN - && plane_state->format != SURFACE_PIXEL_FORMAT_INVALID - && plane_state->input_csc_color_matrix.enable_adjustment - && plane_state->coeff_reduction_factor.value != 0) { - bias_and_scale->scale_blue = fixed_point_to_int_frac( - dc_fixpt_mul(plane_state->coeff_reduction_factor, - dc_fixpt_from_fraction(256, 255)), - 2, - 13); - bias_and_scale->scale_red = bias_and_scale->scale_blue; - bias_and_scale->scale_green = bias_and_scale->scale_blue; - } else { - bias_and_scale->scale_blue = 0x2000; - bias_and_scale->scale_red = 0x2000; - bias_and_scale->scale_green = 0x2000; - } -} - -static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state) +static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state) { struct dc_bias_and_scale bns_params = {0}; @@ -2175,21 +2119,18 @@ static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state) plane_state->format, EXPANSION_MODE_ZERO, plane_state->input_csc_color_matrix, -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 plane_state->color_space, NULL); -#else - plane_state->color_space); -#endif //set scale and bias registers - dcn10_build_prescale_params(&bns_params, plane_state); + build_prescale_params(&bns_params, plane_state); if (dpp->funcs->dpp_program_bias_and_scale) dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params); } -static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) { + struct dce_hwseq *hws = dc->hwseq; struct hubp *hubp = pipe_ctx->plane_res.hubp; struct mpcc_blnd_cfg blnd_cfg = {{0}}; bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe; @@ -2199,10 +2140,10 @@ static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params); if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) { - dcn10_get_hdr_visual_confirm_color( + hws->funcs.get_hdr_visual_confirm_color( pipe_ctx, &blnd_cfg.black_color); } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) { - dcn10_get_surface_visual_confirm_color( + hws->funcs.get_surface_visual_confirm_color( pipe_ctx, &blnd_cfg.black_color); } else { color_space_to_black_color( @@ -2284,11 +2225,12 @@ static void update_scaler(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data); } -void update_dchubp_dpp( +static void dcn10_update_dchubp_dpp( struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context) { + struct dce_hwseq *hws = dc->hwseq; struct hubp *hubp = pipe_ctx->plane_res.hubp; struct dpp *dpp = pipe_ctx->plane_res.dpp; struct dc_plane_state *plane_state = pipe_ctx->plane_state; @@ -2342,12 +2284,12 @@ void update_dchubp_dpp( if (plane_state->update_flags.bits.full_update || plane_state->update_flags.bits.bpp_change) - update_dpp(dpp, plane_state); + dcn10_update_dpp(dpp, plane_state); if (plane_state->update_flags.bits.full_update || plane_state->update_flags.bits.per_pixel_alpha_change || plane_state->update_flags.bits.global_alpha_change) - dc->hwss.update_mpcc(dc, pipe_ctx); + hws->funcs.update_mpcc(dc, pipe_ctx); if (plane_state->update_flags.bits.full_update || plane_state->update_flags.bits.per_pixel_alpha_change || @@ -2407,13 +2349,13 @@ void update_dchubp_dpp( hubp->power_gated = false; - dc->hwss.update_plane_addr(dc, pipe_ctx); + hws->funcs.update_plane_addr(dc, pipe_ctx); if (is_pipe_tree_visible(pipe_ctx)) hubp->funcs->set_blank(hubp, false); } -static void dcn10_blank_pixel_data( +void dcn10_blank_pixel_data( struct dc *dc, struct pipe_ctx *pipe_ctx, bool blank) @@ -2456,10 +2398,9 @@ static void dcn10_blank_pixel_data( } } -void set_hdr_multiplier(struct pipe_ctx *pipe_ctx) +void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx) { - struct fixed31_32 multiplier = dc_fixpt_from_fraction( - pipe_ctx->plane_state->sdr_white_level, 80); + struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult; uint32_t hw_mult = 0x1f000; // 1.0 default multiplier struct custom_float_format fmt; @@ -2467,7 +2408,8 @@ void set_hdr_multiplier(struct pipe_ctx *pipe_ctx) fmt.mantissa_bits = 12; fmt.sign = true; - if (pipe_ctx->plane_state->sdr_white_level > 80) + + if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0 convert_to_custom_float_format(multiplier, &fmt, &hw_mult); pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier( @@ -2479,17 +2421,19 @@ void dcn10_program_pipe( struct pipe_ctx *pipe_ctx, struct dc_state *context) { + struct dce_hwseq *hws = dc->hwseq; + if (pipe_ctx->plane_state->update_flags.bits.full_update) dcn10_enable_plane(dc, pipe_ctx, context); - update_dchubp_dpp(dc, pipe_ctx, context); + dcn10_update_dchubp_dpp(dc, pipe_ctx, context); - set_hdr_multiplier(pipe_ctx); + hws->funcs.set_hdr_multiplier(pipe_ctx); if (pipe_ctx->plane_state->update_flags.bits.full_update || pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || pipe_ctx->plane_state->update_flags.bits.gamma_change) - dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state); + hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state); /* dcn10_translate_regamma_to_hw_format takes 750us to finish * only do gamma programming for full update. @@ -2498,14 +2442,16 @@ void dcn10_program_pipe( * doing heavy calculation and programming */ if (pipe_ctx->plane_state->update_flags.bits.full_update) - dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream); + hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream); } -static void program_all_pipe_in_tree( +static void dcn10_program_all_pipe_in_tree( struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context) { + struct dce_hwseq *hws = dc->hwseq; + if (pipe_ctx->top_pipe == NULL) { bool blank = !is_pipe_tree_visible(pipe_ctx); @@ -2519,20 +2465,20 @@ static void program_all_pipe_in_tree( pipe_ctx->stream_res.tg->funcs->set_vtg_params( pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); - if (dc->hwss.setup_vupdate_interrupt) - dc->hwss.setup_vupdate_interrupt(pipe_ctx); + if (hws->funcs.setup_vupdate_interrupt) + hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx); - dc->hwss.blank_pixel_data(dc, pipe_ctx, blank); + hws->funcs.blank_pixel_data(dc, pipe_ctx, blank); } if (pipe_ctx->plane_state != NULL) - dcn10_program_pipe(dc, pipe_ctx, context); + hws->funcs.program_pipe(dc, pipe_ctx, context); if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx) - program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context); + dcn10_program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context); } -struct pipe_ctx *find_top_pipe_for_stream( +static struct pipe_ctx *dcn10_find_top_pipe_for_stream( struct dc *dc, struct dc_state *context, const struct dc_stream_state *stream) @@ -2556,19 +2502,20 @@ struct pipe_ctx *find_top_pipe_for_stream( return NULL; } -static void dcn10_apply_ctx_for_surface( +void dcn10_apply_ctx_for_surface( struct dc *dc, const struct dc_stream_state *stream, int num_planes, struct dc_state *context) { + struct dce_hwseq *hws = dc->hwseq; int i; struct timing_generator *tg; uint32_t underflow_check_delay_us; bool removed_pipe[4] = { false }; bool interdependent_update = false; struct pipe_ctx *top_pipe_to_program = - find_top_pipe_for_stream(dc, context, stream); + dcn10_find_top_pipe_for_stream(dc, context, stream); DC_LOGGER_INIT(dc->ctx->logger); if (!top_pipe_to_program) @@ -2581,23 +2528,23 @@ static void dcn10_apply_ctx_for_surface( underflow_check_delay_us = dc->debug.underflow_assert_delay_us; - if (underflow_check_delay_us != 0xFFFFFFFF && dc->hwss.did_underflow_occur) - ASSERT(dc->hwss.did_underflow_occur(dc, top_pipe_to_program)); + if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur) + ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program)); if (interdependent_update) - lock_all_pipes(dc, context, true); + dcn10_lock_all_pipes(dc, context, true); else dcn10_pipe_control_lock(dc, top_pipe_to_program, true); if (underflow_check_delay_us != 0xFFFFFFFF) udelay(underflow_check_delay_us); - if (underflow_check_delay_us != 0xFFFFFFFF && dc->hwss.did_underflow_occur) - ASSERT(dc->hwss.did_underflow_occur(dc, top_pipe_to_program)); + if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur) + ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program)); if (num_planes == 0) { /* OTG blank before remove all front end */ - dc->hwss.blank_pixel_data(dc, top_pipe_to_program, true); + hws->funcs.blank_pixel_data(dc, top_pipe_to_program, true); } /* Disconnect unused mpcc */ @@ -2623,7 +2570,7 @@ static void dcn10_apply_ctx_for_surface( old_pipe_ctx->plane_state && old_pipe_ctx->stream_res.tg == tg) { - dc->hwss.plane_atomic_disconnect(dc, old_pipe_ctx); + hws->funcs.plane_atomic_disconnect(dc, old_pipe_ctx); removed_pipe[i] = true; DC_LOG_DC("Reset mpcc for pipe %d\n", @@ -2632,13 +2579,11 @@ static void dcn10_apply_ctx_for_surface( } if (num_planes > 0) - program_all_pipe_in_tree(dc, top_pipe_to_program, context); + dcn10_program_all_pipe_in_tree(dc, top_pipe_to_program, context); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* Program secondary blending tree and writeback pipes */ - if ((stream->num_wb_info > 0) && (dc->hwss.program_all_writeback_pipes_in_tree)) - dc->hwss.program_all_writeback_pipes_in_tree(dc, stream, context); -#endif + if ((stream->num_wb_info > 0) && (hws->funcs.program_all_writeback_pipes_in_tree)) + hws->funcs.program_all_writeback_pipes_in_tree(dc, stream, context); if (interdependent_update) for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; @@ -2654,7 +2599,7 @@ static void dcn10_apply_ctx_for_surface( } if (interdependent_update) - lock_all_pipes(dc, context, false); + dcn10_lock_all_pipes(dc, context, false); else dcn10_pipe_control_lock(dc, top_pipe_to_program, false); @@ -2691,14 +2636,15 @@ static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *contex } } -static void dcn10_prepare_bandwidth( +void dcn10_prepare_bandwidth( struct dc *dc, struct dc_state *context) { + struct dce_hwseq *hws = dc->hwseq; struct hubbub *hubbub = dc->res_pool->hubbub; if (dc->debug.sanity_checks) - dcn10_verify_allow_pstate_change_high(dc); + hws->funcs.verify_allow_pstate_change_high(dc); if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { if (context->stream_count == 0) @@ -2720,17 +2666,18 @@ static void dcn10_prepare_bandwidth( dcn_bw_notify_pplib_of_wm_ranges(dc); if (dc->debug.sanity_checks) - dcn10_verify_allow_pstate_change_high(dc); + hws->funcs.verify_allow_pstate_change_high(dc); } -static void dcn10_optimize_bandwidth( +void dcn10_optimize_bandwidth( struct dc *dc, struct dc_state *context) { + struct dce_hwseq *hws = dc->hwseq; struct hubbub *hubbub = dc->res_pool->hubbub; if (dc->debug.sanity_checks) - dcn10_verify_allow_pstate_change_high(dc); + hws->funcs.verify_allow_pstate_change_high(dc); if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { if (context->stream_count == 0) @@ -2752,10 +2699,10 @@ static void dcn10_optimize_bandwidth( dcn_bw_notify_pplib_of_wm_ranges(dc); if (dc->debug.sanity_checks) - dcn10_verify_allow_pstate_change_high(dc); + hws->funcs.verify_allow_pstate_change_high(dc); } -static void dcn10_set_drr(struct pipe_ctx **pipe_ctx, +void dcn10_set_drr(struct pipe_ctx **pipe_ctx, int num_pipes, unsigned int vmin, unsigned int vmax, unsigned int vmid, unsigned int vmid_frame_number) { @@ -2763,6 +2710,8 @@ static void dcn10_set_drr(struct pipe_ctx **pipe_ctx, struct drr_params params = {0}; // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow unsigned int event_triggers = 0x800; + // Note DRR trigger events are generated regardless of whether num frames met. + unsigned int num_frames = 2; params.vertical_total_max = vmax; params.vertical_total_min = vmin; @@ -2779,11 +2728,11 @@ static void dcn10_set_drr(struct pipe_ctx **pipe_ctx, if (vmax != 0 && vmin != 0) pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control( pipe_ctx[i]->stream_res.tg, - event_triggers); + event_triggers, num_frames); } } -static void dcn10_get_position(struct pipe_ctx **pipe_ctx, +void dcn10_get_position(struct pipe_ctx **pipe_ctx, int num_pipes, struct crtc_position *position) { @@ -2795,22 +2744,23 @@ static void dcn10_get_position(struct pipe_ctx **pipe_ctx, pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position); } -static void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx, - int num_pipes, const struct dc_static_screen_events *events) +void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx, + int num_pipes, const struct dc_static_screen_params *params) { unsigned int i; - unsigned int value = 0; + unsigned int triggers = 0; - if (events->surface_update) - value |= 0x80; - if (events->cursor_update) - value |= 0x2; - if (events->force_trigger) - value |= 0x1; + if (params->triggers.surface_update) + triggers |= 0x80; + if (params->triggers.cursor_update) + triggers |= 0x2; + if (params->triggers.force_trigger) + triggers |= 0x1; for (i = 0; i < num_pipes; i++) pipe_ctx[i]->stream_res.tg->funcs-> - set_static_screen_control(pipe_ctx[i]->stream_res.tg, value); + set_static_screen_control(pipe_ctx[i]->stream_res.tg, + triggers, params->num_frames); } static void dcn10_config_stereo_parameters( @@ -2850,7 +2800,7 @@ static void dcn10_config_stereo_parameters( return; } -static void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc) +void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc) { struct crtc_stereo_flags flags = { 0 }; struct dc_stream_state *stream = pipe_ctx->stream; @@ -2889,15 +2839,16 @@ static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_in return NULL; } -static void dcn10_wait_for_mpcc_disconnect( +void dcn10_wait_for_mpcc_disconnect( struct dc *dc, struct resource_pool *res_pool, struct pipe_ctx *pipe_ctx) { + struct dce_hwseq *hws = dc->hwseq; int mpcc_inst; if (dc->debug.sanity_checks) { - dcn10_verify_allow_pstate_change_high(dc); + hws->funcs.verify_allow_pstate_change_high(dc); } if (!pipe_ctx->stream_res.opp) @@ -2914,12 +2865,12 @@ static void dcn10_wait_for_mpcc_disconnect( } if (dc->debug.sanity_checks) { - dcn10_verify_allow_pstate_change_high(dc); + hws->funcs.verify_allow_pstate_change_high(dc); } } -static bool dcn10_dummy_display_power_gating( +bool dcn10_dummy_display_power_gating( struct dc *dc, uint8_t controller_id, struct dc_bios *dcb, @@ -2928,7 +2879,7 @@ static bool dcn10_dummy_display_power_gating( return true; } -static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx) +void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx) { struct dc_plane_state *plane_state = pipe_ctx->plane_state; struct timing_generator *tg = pipe_ctx->stream_res.tg; @@ -2952,7 +2903,7 @@ static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx) } } -static void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data) +void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data) { struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub; @@ -2960,7 +2911,34 @@ static void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh hubbub->funcs->update_dchub(hubbub, dh_data); } -static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) +static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx) +{ + struct pipe_ctx *test_pipe; + const struct rect *r1 = &pipe_ctx->plane_res.scl_data.recout, *r2; + int r1_r = r1->x + r1->width, r1_b = r1->y + r1->height, r2_r, r2_b; + + /** + * Disable the cursor if there's another pipe above this with a + * plane that contains this pipe's viewport to prevent double cursor + * and incorrect scaling artifacts. + */ + for (test_pipe = pipe_ctx->top_pipe; test_pipe; + test_pipe = test_pipe->top_pipe) { + if (!test_pipe->plane_state->visible) + continue; + + r2 = &test_pipe->plane_res.scl_data.recout; + r2_r = r2->x + r2->width; + r2_b = r2->y + r2->height; + + if (r1->x >= r2->x && r1->y >= r2->y && r1_r <= r2_r && r1_b <= r2_b) + return true; + } + + return false; +} + +void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) { struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position; struct hubp *hubp = pipe_ctx->plane_res.hubp; @@ -2974,23 +2952,44 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) .rotation = pipe_ctx->plane_state->rotation, .mirror = pipe_ctx->plane_state->horizontal_mirror }; - uint32_t x_plane = pipe_ctx->plane_state->dst_rect.x; - uint32_t y_plane = pipe_ctx->plane_state->dst_rect.y; - uint32_t x_offset = min(x_plane, pos_cpy.x); - uint32_t y_offset = min(y_plane, pos_cpy.y); + bool pipe_split_on = (pipe_ctx->top_pipe != NULL) || + (pipe_ctx->bottom_pipe != NULL); - pos_cpy.x -= x_offset; - pos_cpy.y -= y_offset; - pos_cpy.x_hotspot += (x_plane - x_offset); - pos_cpy.y_hotspot += (y_plane - y_offset); + int x_plane = pipe_ctx->plane_state->dst_rect.x; + int y_plane = pipe_ctx->plane_state->dst_rect.y; + int x_pos = pos_cpy.x; + int y_pos = pos_cpy.y; + + // translate cursor from stream space to plane space + x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width / + pipe_ctx->plane_state->dst_rect.width; + y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height / + pipe_ctx->plane_state->dst_rect.height; + + if (x_pos < 0) { + pos_cpy.x_hotspot -= x_pos; + x_pos = 0; + } + + if (y_pos < 0) { + pos_cpy.y_hotspot -= y_pos; + y_pos = 0; + } + + pos_cpy.x = (uint32_t)x_pos; + pos_cpy.y = (uint32_t)y_pos; if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) pos_cpy.enable = false; + if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx)) + pos_cpy.enable = false; + // Swap axis and mirror horizontally if (param.rotation == ROTATION_ANGLE_90) { uint32_t temp_x = pos_cpy.x; + pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width - (pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x; pos_cpy.y = temp_x; @@ -2998,26 +2997,44 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) // Swap axis and mirror vertically else if (param.rotation == ROTATION_ANGLE_270) { uint32_t temp_y = pos_cpy.y; - if (pos_cpy.x > pipe_ctx->plane_res.scl_data.viewport.height) { - pos_cpy.x = pos_cpy.x - pipe_ctx->plane_res.scl_data.viewport.height; - pos_cpy.y = pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.x; - } else { - pos_cpy.y = 2 * pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.x; - } + int viewport_height = + pipe_ctx->plane_res.scl_data.viewport.height; + + if (pipe_split_on) { + if (pos_cpy.x > viewport_height) { + pos_cpy.x = pos_cpy.x - viewport_height; + pos_cpy.y = viewport_height - pos_cpy.x; + } else { + pos_cpy.y = 2 * viewport_height - pos_cpy.x; + } + } else + pos_cpy.y = viewport_height - pos_cpy.x; pos_cpy.x = temp_y; } // Mirror horizontally and vertically else if (param.rotation == ROTATION_ANGLE_180) { - if (pos_cpy.x >= pipe_ctx->plane_res.scl_data.viewport.width + pipe_ctx->plane_res.scl_data.viewport.x) { - pos_cpy.x = 2 * pipe_ctx->plane_res.scl_data.viewport.width - - pos_cpy.x + 2 * pipe_ctx->plane_res.scl_data.viewport.x; - } else { - uint32_t temp_x = pos_cpy.x; - pos_cpy.x = 2 * pipe_ctx->plane_res.scl_data.viewport.x - pos_cpy.x; - if (temp_x >= pipe_ctx->plane_res.scl_data.viewport.x + (int)hubp->curs_attr.width - || pos_cpy.x <= (int)hubp->curs_attr.width + pipe_ctx->plane_state->src_rect.x) { - pos_cpy.x = temp_x + pipe_ctx->plane_res.scl_data.viewport.width; + int viewport_width = + pipe_ctx->plane_res.scl_data.viewport.width; + int viewport_x = + pipe_ctx->plane_res.scl_data.viewport.x; + + if (pipe_split_on) { + if (pos_cpy.x >= viewport_width + viewport_x) { + pos_cpy.x = 2 * viewport_width + - pos_cpy.x + 2 * viewport_x; + } else { + uint32_t temp_x = pos_cpy.x; + + pos_cpy.x = 2 * viewport_x - pos_cpy.x; + if (temp_x >= viewport_x + + (int)hubp->curs_attr.width || pos_cpy.x + <= (int)hubp->curs_attr.width + + pipe_ctx->plane_state->src_rect.x) { + pos_cpy.x = temp_x + viewport_width; + } } + } else { + pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x; } pos_cpy.y = pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y; } @@ -3026,7 +3043,7 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width, hubp->curs_attr.height); } -static void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx) +void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx) { struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes; @@ -3036,7 +3053,7 @@ static void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_res.dpp, attributes); } -static void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx) +void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx) { uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level; struct fixed31_32 multiplier; @@ -3063,12 +3080,12 @@ static void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_res.dpp, &opt_attr); } -/** -* apply_front_porch_workaround TODO FPGA still need? -* -* This is a workaround for a bug that has existed since R5xx and has not been -* fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive. -*/ +/* + * apply_front_porch_workaround TODO FPGA still need? + * + * This is a workaround for a bug that has existed since R5xx and has not been + * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive. + */ static void apply_front_porch_workaround( struct dc_crtc_timing *timing) { @@ -3081,7 +3098,7 @@ static void apply_front_porch_workaround( } } -int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx) +int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx) { const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing; struct dc_crtc_timing patched_crtc_timing; @@ -3110,34 +3127,8 @@ int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx) return vertical_line_start; } -void lock_all_pipes(struct dc *dc, - struct dc_state *context, - bool lock) -{ - struct pipe_ctx *pipe_ctx; - struct timing_generator *tg; - int i; - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - pipe_ctx = &context->res_ctx.pipe_ctx[i]; - tg = pipe_ctx->stream_res.tg; - /* - * Only lock the top pipe's tg to prevent redundant - * (un)locking. Also skip if pipe is disabled. - */ - if (pipe_ctx->top_pipe || - !pipe_ctx->stream || !pipe_ctx->plane_state || - !tg->funcs->is_tg_enabled(tg)) - continue; - - if (lock) - tg->funcs->lock(tg); - else - tg->funcs->unlock(tg); - } -} - -static void calc_vupdate_position( +static void dcn10_calc_vupdate_position( + struct dc *dc, struct pipe_ctx *pipe_ctx, uint32_t *start_line, uint32_t *end_line) @@ -3145,7 +3136,7 @@ static void calc_vupdate_position( const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing; int vline_int_offset_from_vupdate = pipe_ctx->stream->periodic_interrupt0.lines_offset; - int vupdate_offset_from_vsync = get_vupdate_offset_from_vsync(pipe_ctx); + int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx); int start_position; if (vline_int_offset_from_vupdate > 0) @@ -3166,7 +3157,8 @@ static void calc_vupdate_position( *end_line = 2; } -static void cal_vline_position( +static void dcn10_cal_vline_position( + struct dc *dc, struct pipe_ctx *pipe_ctx, enum vline_select vline, uint32_t *start_line, @@ -3181,7 +3173,8 @@ static void cal_vline_position( switch (ref_point) { case START_V_UPDATE: - calc_vupdate_position( + dcn10_calc_vupdate_position( + dc, pipe_ctx, start_line, end_line); @@ -3195,7 +3188,8 @@ static void cal_vline_position( } } -static void dcn10_setup_periodic_interrupt( +void dcn10_setup_periodic_interrupt( + struct dc *dc, struct pipe_ctx *pipe_ctx, enum vline_select vline) { @@ -3205,7 +3199,7 @@ static void dcn10_setup_periodic_interrupt( uint32_t start_line = 0; uint32_t end_line = 0; - cal_vline_position(pipe_ctx, vline, &start_line, &end_line); + dcn10_cal_vline_position(dc, pipe_ctx, vline, &start_line, &end_line); tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line); @@ -3216,10 +3210,10 @@ static void dcn10_setup_periodic_interrupt( } } -static void dcn10_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx) +void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx) { struct timing_generator *tg = pipe_ctx->stream_res.tg; - int start_line = get_vupdate_offset_from_vsync(pipe_ctx); + int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx); if (start_line < 0) { ASSERT(0); @@ -3230,12 +3224,13 @@ static void dcn10_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx) tg->funcs->setup_vertical_interrupt2(tg, start_line); } -static void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx, +void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings) { struct encoder_unblank_param params = { { 0 } }; struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; + struct dce_hwseq *hws = link->dc->hwseq; /* only 3 items below are used by unblank */ params.timing = pipe_ctx->stream->timing; @@ -3249,11 +3244,11 @@ static void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx, } if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { - link->dc->hwss.edp_backlight_control(link, true); + hws->funcs.edp_backlight_control(link, true); } } -static void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx, +void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx, const uint8_t *custom_sdp_message, unsigned int sdp_message_size) { @@ -3264,7 +3259,7 @@ static void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx, sdp_message_size); } } -static enum dc_status dcn10_set_clock(struct dc *dc, +enum dc_status dcn10_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping) @@ -3304,7 +3299,7 @@ static enum dc_status dcn10_set_clock(struct dc *dc, } -static void dcn10_get_clock(struct dc *dc, +void dcn10_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg) { @@ -3314,77 +3309,3 @@ static void dcn10_get_clock(struct dc *dc, dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg); } - -static const struct hw_sequencer_funcs dcn10_funcs = { - .program_gamut_remap = dcn10_program_gamut_remap, - .init_hw = dcn10_init_hw, - .init_pipes = dcn10_init_pipes, - .apply_ctx_to_hw = dce110_apply_ctx_to_hw, - .apply_ctx_for_surface = dcn10_apply_ctx_for_surface, - .update_plane_addr = dcn10_update_plane_addr, - .plane_atomic_disconnect = hwss1_plane_atomic_disconnect, - .update_dchub = dcn10_update_dchub, - .update_mpcc = dcn10_update_mpcc, - .update_pending_status = dcn10_update_pending_status, - .set_input_transfer_func = dcn10_set_input_transfer_func, - .set_output_transfer_func = dcn10_set_output_transfer_func, - .program_output_csc = dcn10_program_output_csc, - .power_down = dce110_power_down, - .enable_accelerated_mode = dce110_enable_accelerated_mode, - .enable_timing_synchronization = dcn10_enable_timing_synchronization, - .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, - .update_info_frame = dce110_update_info_frame, - .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, - .enable_stream = dce110_enable_stream, - .disable_stream = dce110_disable_stream, - .unblank_stream = dcn10_unblank_stream, - .blank_stream = dce110_blank_stream, - .enable_audio_stream = dce110_enable_audio_stream, - .disable_audio_stream = dce110_disable_audio_stream, - .enable_display_power_gating = dcn10_dummy_display_power_gating, - .disable_plane = dcn10_disable_plane, - .blank_pixel_data = dcn10_blank_pixel_data, - .pipe_control_lock = dcn10_pipe_control_lock, - .prepare_bandwidth = dcn10_prepare_bandwidth, - .optimize_bandwidth = dcn10_optimize_bandwidth, - .reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap, - .enable_stream_timing = dcn10_enable_stream_timing, - .set_drr = dcn10_set_drr, - .get_position = dcn10_get_position, - .set_static_screen_control = dcn10_set_static_screen_control, - .setup_stereo = dcn10_setup_stereo, - .set_avmute = dce110_set_avmute, - .log_hw_state = dcn10_log_hw_state, - .get_hw_state = dcn10_get_hw_state, - .clear_status_bits = dcn10_clear_status_bits, - .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, - .edp_backlight_control = dce110_edp_backlight_control, - .edp_power_control = dce110_edp_power_control, - .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, - .set_cursor_position = dcn10_set_cursor_position, - .set_cursor_attribute = dcn10_set_cursor_attribute, - .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, - .disable_stream_gating = NULL, - .enable_stream_gating = NULL, - .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, - .setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt, - .set_clock = dcn10_set_clock, - .get_clock = dcn10_get_clock, - .did_underflow_occur = dcn10_did_underflow_occur, - .init_blank = NULL, - .disable_vga = dcn10_disable_vga, - .bios_golden_init = dcn10_bios_golden_init, - .plane_atomic_disable = dcn10_plane_atomic_disable, - .plane_atomic_power_down = dcn10_plane_atomic_power_down, - .enable_power_gating_plane = dcn10_enable_power_gating_plane, - .dpp_pg_control = dcn10_dpp_pg_control, - .hubp_pg_control = dcn10_hubp_pg_control, - .dsc_pg_control = NULL, -}; - - -void dcn10_hw_sequencer_construct(struct dc *dc) -{ - dc->hwss = dcn10_funcs; -} - diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h index d3616b1948cc..4d20f6586bb5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h @@ -27,68 +27,160 @@ #define __DC_HWSS_DCN10_H__ #include "core_types.h" +#include "hw_sequencer_private.h" struct dc; void dcn10_hw_sequencer_construct(struct dc *dc); -extern void fill_display_configs( - const struct dc_state *context, - struct dm_pp_display_configuration *pp_display_cfg); - -bool is_rgb_cspace(enum dc_color_space output_color_space); - -void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx); - -void dcn10_verify_allow_pstate_change_high(struct dc *dc); +int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx); +void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx); +enum dc_status dcn10_enable_stream_timing( + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct dc *dc); +void dcn10_optimize_bandwidth( + struct dc *dc, + struct dc_state *context); +void dcn10_prepare_bandwidth( + struct dc *dc, + struct dc_state *context); +void dcn10_pipe_control_lock( + struct dc *dc, + struct pipe_ctx *pipe, + bool lock); +void dcn10_blank_pixel_data( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool blank); +void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx, + struct dc_link_settings *link_settings); +void dcn10_program_output_csc(struct dc *dc, + struct pipe_ctx *pipe_ctx, + enum dc_color_space colorspace, + uint16_t *matrix, + int opp_id); +bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, + const struct dc_stream_state *stream); +bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, + const struct dc_plane_state *plane_state); +void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn10_reset_hw_ctx_wrap( + struct dc *dc, + struct dc_state *context); +void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn10_apply_ctx_for_surface( + struct dc *dc, + const struct dc_stream_state *stream, + int num_planes, + struct dc_state *context); +void dcn10_hubp_pg_control( + struct dce_hwseq *hws, + unsigned int hubp_inst, + bool power_on); +void dcn10_dpp_pg_control( + struct dce_hwseq *hws, + unsigned int dpp_inst, + bool power_on); +void dcn10_enable_power_gating_plane( + struct dce_hwseq *hws, + bool enable); +void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn10_disable_vga( + struct dce_hwseq *hws); void dcn10_program_pipe( struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context); - -void dcn10_get_hw_state( +void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx); +void dcn10_init_hw(struct dc *dc); +void dcn10_init_pipes(struct dc *dc, struct dc_state *context); +enum dc_status dce110_apply_ctx_to_hw( + struct dc *dc, + struct dc_state *context); +void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data); +void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx); +void dce110_power_down(struct dc *dc); +void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context); +void dcn10_enable_timing_synchronization( + struct dc *dc, + int group_index, + int group_size, + struct pipe_ctx *grouped_pipes[]); +void dcn10_enable_per_frame_crtc_position_reset( + struct dc *dc, + int group_size, + struct pipe_ctx *grouped_pipes[]); +void dce110_update_info_frame(struct pipe_ctx *pipe_ctx); +void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx, + const uint8_t *custom_sdp_message, + unsigned int sdp_message_size); +void dce110_blank_stream(struct pipe_ctx *pipe_ctx); +void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx); +void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx); +bool dcn10_dummy_display_power_gating( struct dc *dc, - char *pBuf, unsigned int bufSize, + uint8_t controller_id, + struct dc_bios *dcb, + enum pipe_gating_control power_gating); +void dcn10_set_drr(struct pipe_ctx **pipe_ctx, + int num_pipes, unsigned int vmin, unsigned int vmax, + unsigned int vmid, unsigned int vmid_frame_number); +void dcn10_get_position(struct pipe_ctx **pipe_ctx, + int num_pipes, + struct crtc_position *position); +void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx, + int num_pipes, const struct dc_static_screen_params *params); +void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc); +void dce110_set_avmute(struct pipe_ctx *pipe_ctx, bool enable); +void dcn10_log_hw_state(struct dc *dc, + struct dc_log_buffer_ctx *log_ctx); +void dcn10_get_hw_state(struct dc *dc, + char *pBuf, + unsigned int bufSize, unsigned int mask); - void dcn10_clear_status_bits(struct dc *dc, unsigned int mask); - -bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx); - -bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx); - -bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx); - -void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp); - -void set_hdr_multiplier(struct pipe_ctx *pipe_ctx); - +void dcn10_wait_for_mpcc_disconnect( + struct dc *dc, + struct resource_pool *res_pool, + struct pipe_ctx *pipe_ctx); +void dce110_edp_backlight_control( + struct dc_link *link, + bool enable); +void dce110_edp_power_control( + struct dc_link *link, + bool power_up); +void dce110_edp_wait_for_hpd_ready( + struct dc_link *link, + bool power_up); +void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx); +void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx); +void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx); +void dcn10_setup_periodic_interrupt( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + enum vline_select vline); +enum dc_status dcn10_set_clock(struct dc *dc, + enum dc_clock_type clock_type, + uint32_t clk_khz, + uint32_t stepping); +void dcn10_get_clock(struct dc *dc, + enum dc_clock_type clock_type, + struct dc_clock_config *clock_cfg); +bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn10_bios_golden_init(struct dc *dc); +void dcn10_plane_atomic_power_down(struct dc *dc, + struct dpp *dpp, + struct hubp *hubp); void dcn10_get_surface_visual_confirm_color( const struct pipe_ctx *pipe_ctx, struct tg_color *color); - void dcn10_get_hdr_visual_confirm_color( struct pipe_ctx *pipe_ctx, struct tg_color *color); - -bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx); - -void update_dchubp_dpp( - struct dc *dc, - struct pipe_ctx *pipe_ctx, - struct dc_state *context); - -struct pipe_ctx *find_top_pipe_for_stream( - struct dc *dc, - struct dc_state *context, - const struct dc_stream_state *stream); - -int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx); - -void dcn10_build_prescale_params(struct dc_bias_and_scale *bias_and_scale, - const struct dc_plane_state *plane_state); -void lock_all_pipes(struct dc *dc, - struct dc_state *context, - bool lock); +void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx); +void dcn10_verify_allow_pstate_change_high(struct dc *dc); #endif /* __DC_HWSS_DCN10_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.h new file mode 100644 index 000000000000..596f95c22e85 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.h @@ -0,0 +1,43 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_HWSS_DCN10_DEBUG_H__ +#define __DC_HWSS_DCN10_DEBUG_H__ + +#include "core_types.h" + +struct dc; + +void dcn10_clear_status_bits(struct dc *dc, unsigned int mask); + +void dcn10_log_hw_state(struct dc *dc, + struct dc_log_buffer_ctx *log_ctx); + +void dcn10_get_hw_state(struct dc *dc, + char *pBuf, + unsigned int bufSize, + unsigned int mask); + +#endif /* __DC_HWSS_DCN10_DEBUG_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c new file mode 100644 index 000000000000..e7e5352ec424 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c @@ -0,0 +1,111 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "hw_sequencer_private.h" +#include "dce110/dce110_hw_sequencer.h" +#include "dcn10_hw_sequencer.h" + +static const struct hw_sequencer_funcs dcn10_funcs = { + .program_gamut_remap = dcn10_program_gamut_remap, + .init_hw = dcn10_init_hw, + .apply_ctx_to_hw = dce110_apply_ctx_to_hw, + .apply_ctx_for_surface = dcn10_apply_ctx_for_surface, + .update_plane_addr = dcn10_update_plane_addr, + .update_dchub = dcn10_update_dchub, + .update_pending_status = dcn10_update_pending_status, + .program_output_csc = dcn10_program_output_csc, + .enable_accelerated_mode = dce110_enable_accelerated_mode, + .enable_timing_synchronization = dcn10_enable_timing_synchronization, + .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, + .update_info_frame = dce110_update_info_frame, + .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, + .enable_stream = dce110_enable_stream, + .disable_stream = dce110_disable_stream, + .unblank_stream = dcn10_unblank_stream, + .blank_stream = dce110_blank_stream, + .enable_audio_stream = dce110_enable_audio_stream, + .disable_audio_stream = dce110_disable_audio_stream, + .disable_plane = dcn10_disable_plane, + .pipe_control_lock = dcn10_pipe_control_lock, + .prepare_bandwidth = dcn10_prepare_bandwidth, + .optimize_bandwidth = dcn10_optimize_bandwidth, + .set_drr = dcn10_set_drr, + .get_position = dcn10_get_position, + .set_static_screen_control = dcn10_set_static_screen_control, + .setup_stereo = dcn10_setup_stereo, + .set_avmute = dce110_set_avmute, + .log_hw_state = dcn10_log_hw_state, + .get_hw_state = dcn10_get_hw_state, + .clear_status_bits = dcn10_clear_status_bits, + .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, + .edp_power_control = dce110_edp_power_control, + .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, + .set_cursor_position = dcn10_set_cursor_position, + .set_cursor_attribute = dcn10_set_cursor_attribute, + .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, + .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, + .set_clock = dcn10_set_clock, + .get_clock = dcn10_get_clock, + .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, +}; + +static const struct hwseq_private_funcs dcn10_private_funcs = { + .init_pipes = dcn10_init_pipes, + .update_plane_addr = dcn10_update_plane_addr, + .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, + .program_pipe = dcn10_program_pipe, + .update_mpcc = dcn10_update_mpcc, + .set_input_transfer_func = dcn10_set_input_transfer_func, + .set_output_transfer_func = dcn10_set_output_transfer_func, + .power_down = dce110_power_down, + .enable_display_power_gating = dcn10_dummy_display_power_gating, + .blank_pixel_data = dcn10_blank_pixel_data, + .reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap, + .enable_stream_timing = dcn10_enable_stream_timing, + .edp_backlight_control = dce110_edp_backlight_control, + .disable_stream_gating = NULL, + .enable_stream_gating = NULL, + .setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt, + .did_underflow_occur = dcn10_did_underflow_occur, + .init_blank = NULL, + .disable_vga = dcn10_disable_vga, + .bios_golden_init = dcn10_bios_golden_init, + .plane_atomic_disable = dcn10_plane_atomic_disable, + .plane_atomic_power_down = dcn10_plane_atomic_power_down, + .enable_power_gating_plane = dcn10_enable_power_gating_plane, + .dpp_pg_control = dcn10_dpp_pg_control, + .hubp_pg_control = dcn10_hubp_pg_control, + .dsc_pg_control = NULL, + .get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color, + .get_hdr_visual_confirm_color = dcn10_get_hdr_visual_confirm_color, + .set_hdr_multiplier = dcn10_set_hdr_multiplier, + .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, +}; + +void dcn10_hw_sequencer_construct(struct dc *dc) +{ + dc->hwss = dcn10_funcs; + dc->hwseq->funcs = dcn10_private_funcs; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h new file mode 100644 index 000000000000..8c6fd7b844a4 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h @@ -0,0 +1,33 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_DCN10_INIT_H__ +#define __DC_DCN10_INIT_H__ + +struct dc; + +void dcn10_hw_sequencer_construct(struct dc *dc); + +#endif /* __DC_DCN10_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c index 0fb9e440cb9d..f05371c1fc36 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c @@ -53,11 +53,9 @@ static const struct ipp_funcs dcn10_ipp_funcs = { .ipp_destroy = dcn10_ipp_destroy }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) static const struct ipp_funcs dcn20_ipp_funcs = { .ipp_destroy = dcn10_ipp_destroy }; -#endif void dcn10_ipp_construct( struct dcn10_ipp *ippn10, @@ -76,7 +74,6 @@ void dcn10_ipp_construct( ippn10->ipp_mask = ipp_mask; } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void dcn20_ipp_construct( struct dcn10_ipp *ippn10, struct dc_context *ctx, @@ -93,4 +90,3 @@ void dcn20_ipp_construct( ippn10->ipp_shift = ipp_shift; ippn10->ipp_mask = ipp_mask; } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h index cfa24459242b..f0e0d07b0311 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h @@ -49,7 +49,6 @@ SRI(CURSOR_HOT_SPOT, CURSOR, id), \ SRI(CURSOR_DST_OFFSET, CURSOR, id) -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define IPP_REG_LIST_DCN20(id) \ IPP_REG_LIST_DCN(id), \ SRI(CURSOR_SETTINGS, HUBPREQ, id), \ @@ -60,7 +59,6 @@ SRI(CURSOR_POSITION, CURSOR0_, id), \ SRI(CURSOR_HOT_SPOT, CURSOR0_, id), \ SRI(CURSOR_DST_OFFSET, CURSOR0_, id) -#endif #define CURSOR0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4 #define CURSOR0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L @@ -105,7 +103,6 @@ IPP_SF(CURSOR0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh), \ IPP_SF(CNVC_CFG0_FORMAT_CONTROL, OUTPUT_FP, mask_sh) -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define IPP_MASK_SH_LIST_DCN20(mask_sh) \ IPP_MASK_SH_LIST_DCN(mask_sh), \ IPP_SF(HUBPREQ0_CURSOR_SETTINGS, CURSOR0_DST_Y_OFFSET, mask_sh), \ @@ -124,7 +121,6 @@ IPP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \ IPP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \ IPP_SF(CURSOR0_0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh) -#endif #define IPP_DCN10_REG_FIELD_LIST(type) \ type CNVC_SURFACE_PIXEL_FORMAT; \ @@ -196,13 +192,11 @@ void dcn10_ipp_construct(struct dcn10_ipp *ippn10, const struct dcn10_ipp_shift *ipp_shift, const struct dcn10_ipp_mask *ipp_mask); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void dcn20_ipp_construct(struct dcn10_ipp *ippn10, struct dc_context *ctx, int inst, const struct dcn10_ipp_registers *regs, const struct dcn10_ipp_shift *ipp_shift, const struct dcn10_ipp_mask *ipp_mask); -#endif #endif /* _DCN10_IPP_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h index 88fcc395adf5..eb13589b9a81 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h @@ -72,9 +72,7 @@ struct dcn10_link_enc_aux_registers { uint32_t AUX_CONTROL; uint32_t AUX_DPHY_RX_CONTROL0; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 uint32_t AUX_DPHY_TX_CONTROL; -#endif }; struct dcn10_link_enc_hpd_registers { @@ -106,7 +104,6 @@ struct dcn10_link_enc_registers { uint32_t DP_DPHY_HBR2_PATTERN_CONTROL; uint32_t DP_SEC_CNTL1; uint32_t TMDS_CTL_BITS; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* DCCG */ uint32_t CLOCK_ENABLE; /* DIG */ @@ -127,6 +124,26 @@ struct dcn10_link_enc_registers { uint32_t RDPCSTX_PHY_CNTL13; uint32_t RDPCSTX_PHY_CNTL14; uint32_t RDPCSTX_PHY_CNTL15; + uint32_t RDPCSTX_CNTL; + uint32_t RDPCSTX_CLOCK_CNTL; + uint32_t RDPCSTX_PHY_CNTL0; + uint32_t RDPCSTX_PHY_CNTL2; + uint32_t RDPCSTX_PLL_UPDATE_DATA; + uint32_t RDPCS_TX_CR_ADDR; + uint32_t RDPCS_TX_CR_DATA; + uint32_t DPCSTX_TX_CLOCK_CNTL; + uint32_t DPCSTX_TX_CNTL; + uint32_t RDPCSTX_INTERRUPT_CONTROL; + uint32_t RDPCSTX_PHY_FUSE0; + uint32_t RDPCSTX_PHY_FUSE1; + uint32_t RDPCSTX_PHY_FUSE2; + uint32_t RDPCSTX_PHY_FUSE3; + uint32_t RDPCSTX_PHY_RX_LD_VAL; + uint32_t DPCSTX_DEBUG_CONFIG; + uint32_t RDPCSTX_DEBUG_CONFIG; + uint32_t RDPCSTX0_RDPCSTX_SCRATCH; + uint32_t RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG; + uint32_t DCIO_SOFT_RESET; /* indirect registers */ uint32_t RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2; uint32_t RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3; @@ -136,7 +153,6 @@ struct dcn10_link_enc_registers { uint32_t RAWLANE2_DIG_PCS_XF_RX_OVRD_IN_3; uint32_t RAWLANE3_DIG_PCS_XF_RX_OVRD_IN_2; uint32_t RAWLANE3_DIG_PCS_XF_RX_OVRD_IN_3; -#endif }; #define LE_SF(reg_name, field_name, post_fix)\ @@ -242,7 +258,6 @@ struct dcn10_link_enc_registers { type AUX_LS_READ_EN;\ type AUX_RX_RECEIVE_WINDOW -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define DCN20_LINK_ENCODER_DPCS_REG_FIELD_LIST(type) \ type RDPCS_PHY_DP_TX0_DATA_EN;\ @@ -423,20 +438,15 @@ struct dcn10_link_enc_registers { type AUX_TX_PRECHARGE_SYMBOLS; \ type AUX_MODE_DET_CHECK_DELAY;\ type DPCS_DBG_CBUS_DIS -#endif struct dcn10_link_enc_shift { DCN_LINK_ENCODER_REG_FIELD_LIST(uint8_t); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) DCN20_LINK_ENCODER_REG_FIELD_LIST(uint8_t); -#endif }; struct dcn10_link_enc_mask { DCN_LINK_ENCODER_REG_FIELD_LIST(uint32_t); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) DCN20_LINK_ENCODER_REG_FIELD_LIST(uint32_t); -#endif }; struct dcn10_link_encoder { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c index 8b2f29f6dabd..04f863499cfb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c @@ -42,20 +42,27 @@ void mpc1_set_bg_color(struct mpc *mpc, int mpcc_id) { struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); + struct mpcc *bottommost_mpcc = mpc1_get_mpcc(mpc, mpcc_id); + uint32_t bg_r_cr, bg_g_y, bg_b_cb; + + /* find bottommost mpcc. */ + while (bottommost_mpcc->mpcc_bot) { + bottommost_mpcc = bottommost_mpcc->mpcc_bot; + } /* mpc color is 12 bit. tg_color is 10 bit */ /* todo: might want to use 16 bit to represent color and have each * hw block translate to correct color depth. */ - uint32_t bg_r_cr = bg_color->color_r_cr << 2; - uint32_t bg_g_y = bg_color->color_g_y << 2; - uint32_t bg_b_cb = bg_color->color_b_cb << 2; + bg_r_cr = bg_color->color_r_cr << 2; + bg_g_y = bg_color->color_g_y << 2; + bg_b_cb = bg_color->color_b_cb << 2; - REG_SET(MPCC_BG_R_CR[mpcc_id], 0, + REG_SET(MPCC_BG_R_CR[bottommost_mpcc->mpcc_id], 0, MPCC_BG_R_CR, bg_r_cr); - REG_SET(MPCC_BG_G_Y[mpcc_id], 0, + REG_SET(MPCC_BG_G_Y[bottommost_mpcc->mpcc_id], 0, MPCC_BG_G_Y, bg_g_y); - REG_SET(MPCC_BG_B_CB[mpcc_id], 0, + REG_SET(MPCC_BG_B_CB[bottommost_mpcc->mpcc_id], 0, MPCC_BG_B_CB, bg_b_cb); } @@ -457,12 +464,10 @@ static const struct mpc_funcs dcn10_mpc_funcs = { .assert_mpcc_idle_before_connect = mpc1_assert_mpcc_idle_before_connect, .init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw, .update_blending = mpc1_update_blending, -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) .set_denorm = NULL, .set_denorm_clamp = NULL, .set_output_csc = NULL, .set_output_gamma = NULL, -#endif }; void dcn10_mpc_construct(struct dcn10_mpc *mpc10, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c index 0a9ad692f541..d79718fde5a6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c @@ -373,11 +373,9 @@ void opp1_program_oppbuf( */ REG_UPDATE(OPPBUF_CONTROL, OPPBUF_PIXEL_REPETITION, oppbuf->pixel_repetition); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* Controls the number of padded pixels at the end of a segment */ if (REG(OPPBUF_CONTROL1)) REG_UPDATE(OPPBUF_CONTROL1, OPPBUF_NUM_SEGMENT_PADDED_PIXELS, oppbuf->num_segment_padded_pixels); -#endif } void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable) @@ -404,9 +402,8 @@ static const struct opp_funcs dcn10_opp_funcs = { .opp_program_bit_depth_reduction = opp1_program_bit_depth_reduction, .opp_program_stereo = opp1_program_stereo, .opp_pipe_clock_control = opp1_pipe_clock_control, -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) .opp_set_disp_pattern_generator = NULL, -#endif + .dpg_is_blanked = NULL, .opp_destroy = opp1_destroy }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index dabccbd49ad4..a9a43b397db9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c @@ -457,11 +457,16 @@ static bool optc1_enable_crtc(struct timing_generator *optc) REG_UPDATE(CONTROL, VTG0_ENABLE, 1); + REG_SEQ_START(); + /* Enable CRTC */ REG_UPDATE_2(OTG_CONTROL, OTG_DISABLE_POINT_CNTL, 3, OTG_MASTER_EN, 1); + REG_SEQ_SUBMIT(); + REG_SEQ_WAIT_DONE(); + return true; } @@ -784,21 +789,26 @@ void optc1_set_early_control( void optc1_set_static_screen_control( struct timing_generator *optc, - uint32_t value) + uint32_t event_triggers, + uint32_t num_frames) { struct optc *optc1 = DCN10TG_FROM_TG(optc); + // By register spec, it only takes 8 bit value + if (num_frames > 0xFF) + num_frames = 0xFF; + /* Bit 8 is no longer applicable in RV for PSR case, * set bit 8 to 0 if given */ - if ((value & STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN) + if ((event_triggers & STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN) != 0) - value = value & + event_triggers = event_triggers & ~STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN; REG_SET_2(OTG_STATIC_SCREEN_CONTROL, 0, - OTG_STATIC_SCREEN_EVENT_MASK, value, - OTG_STATIC_SCREEN_FRAME_COUNT, 2); + OTG_STATIC_SCREEN_EVENT_MASK, event_triggers, + OTG_STATIC_SCREEN_FRAME_COUNT, num_frames); } void optc1_setup_manual_trigger(struct timing_generator *optc) @@ -1497,7 +1507,6 @@ void dcn10_timing_generator_init(struct optc *optc1) optc1->min_v_sync_width = 1; } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* "Containter" vs. "pixel" is a concept within HW blocks, mostly those closer to the back-end. It works like this: * * - In most of the formats (RGB or YCbCr 4:4:4, 4:2:2 uncompressed and DSC 4:2:2 Simple) pixel rate is the same as @@ -1510,15 +1519,12 @@ void dcn10_timing_generator_init(struct optc *optc1) * to it) and has to be treated the same as 4:2:0, i.e. target containter rate has to be halved in this case as well. * */ -#endif bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing) { bool two_pix = timing->pixel_encoding == PIXEL_ENCODING_YCBCR420; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT two_pix = two_pix || (timing->flags.DSC && timing->pixel_encoding == PIXEL_ENCODING_YCBCR422 && !timing->dsc_cfg.ycbcr422_simple); -#endif return two_pix; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h index c8d795b335ba..f277656d5464 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h @@ -165,13 +165,11 @@ struct dcn_optc_registers { uint32_t OTG_CRC0_WINDOWB_X_CONTROL; uint32_t OTG_CRC0_WINDOWB_Y_CONTROL; uint32_t GSL_SOURCE_SELECT; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 uint32_t DWB_SOURCE_SELECT; uint32_t OTG_DSC_START_POSITION; uint32_t OPTC_DATA_FORMAT_CONTROL; uint32_t OPTC_BYTES_PER_PIXEL; uint32_t OPTC_WIDTH_CONTROL; -#endif }; #define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\ @@ -456,7 +454,6 @@ struct dcn_optc_registers { type MANUAL_FLOW_CONTROL;\ type MANUAL_FLOW_CONTROL_SEL; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 #define TG_REG_FIELD_LIST(type) \ TG_REG_FIELD_LIST_DCN1_0(type)\ @@ -479,12 +476,6 @@ struct dcn_optc_registers { type OPTC_DWB0_SOURCE_SELECT;\ type OPTC_DWB1_SOURCE_SELECT; -#else - -#define TG_REG_FIELD_LIST(type) \ - TG_REG_FIELD_LIST_DCN1_0(type) - -#endif struct dcn_optc_shift { @@ -542,6 +533,7 @@ struct dcn_otg_state { uint32_t h_total; uint32_t underflow_occurred_status; uint32_t otg_enabled; + uint32_t blank_enabled; }; void optc1_read_otg_state(struct optc *optc1, @@ -633,7 +625,8 @@ void optc1_set_drr( void optc1_set_static_screen_control( struct timing_generator *optc, - uint32_t value); + uint32_t event_triggers, + uint32_t num_frames); void optc1_program_stereo(struct timing_generator *optc, const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 15640aedd664..3b71898e859e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -28,6 +28,8 @@ #include "dm_services.h" #include "dc.h" +#include "dcn10_init.h" + #include "resource.h" #include "include/irq_service_interface.h" #include "dcn10_resource.h" @@ -919,7 +921,7 @@ static struct pp_smu_funcs *dcn10_pp_smu_create(struct dc_context *ctx) return pp_smu; } -static void destruct(struct dcn10_resource_pool *pool) +static void dcn10_resource_destruct(struct dcn10_resource_pool *pool) { unsigned int i; @@ -1166,7 +1168,7 @@ static void dcn10_destroy_resource_pool(struct resource_pool **pool) { struct dcn10_resource_pool *dcn10_pool = TO_DCN10_RES_POOL(*pool); - destruct(dcn10_pool); + dcn10_resource_destruct(dcn10_pool); kfree(dcn10_pool); *pool = NULL; } @@ -1305,7 +1307,7 @@ static uint32_t read_pipe_fuses(struct dc_context *ctx) return value; } -static bool construct( +static bool dcn10_resource_construct( uint8_t num_virtual_links, struct dc *dc, struct dcn10_resource_pool *pool) @@ -1592,7 +1594,7 @@ static bool construct( fail: - destruct(pool); + dcn10_resource_destruct(pool); return false; } @@ -1607,7 +1609,7 @@ struct resource_pool *dcn10_create_resource_pool( if (!pool) return NULL; - if (construct(init_data->num_virtual_links, dc, pool)) + if (dcn10_resource_construct(init_data->num_virtual_links, dc, pool)) return &pool->base; kfree(pool); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c index 06e5bbb4545c..376c4264d295 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c @@ -247,6 +247,7 @@ void enc1_stream_encoder_dp_set_stream_attribute( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, enum dc_color_space output_color_space, + bool use_vsc_sdp_for_colorimetry, uint32_t enable_sdp_splitting) { uint32_t h_active_start; @@ -312,10 +313,7 @@ void enc1_stream_encoder_dp_set_stream_attribute( * Pixel Encoding/Colorimetry Format and that a Sink device shall ignore MISC1, bit 7, * and MISC0, bits 7:1 (MISC1, bit 7, and MISC0, bits 7:1, become "don't care"). */ - if ((hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) || - (output_color_space == COLOR_SPACE_2020_YCBCR) || - (output_color_space == COLOR_SPACE_2020_RGB_FULLRANGE) || - (output_color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE)) + if (use_vsc_sdp_for_colorimetry) misc1 = misc1 | 0x40; else misc1 = misc1 & ~0x40; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h index c9cbc21d121e..f9b9e221c698 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h @@ -163,14 +163,12 @@ struct dcn10_stream_enc_registers { uint32_t DP_MSA_TIMING_PARAM3; uint32_t DP_MSA_TIMING_PARAM4; uint32_t HDMI_DB_CONTROL; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) uint32_t DP_DSC_CNTL; uint32_t DP_DSC_BYTES_PER_PIXEL; uint32_t DME_CONTROL; uint32_t DP_SEC_METADATA_TRANSMISSION; uint32_t HDMI_METADATA_PACKET_CONTROL; uint32_t DP_SEC_FRAMING4; -#endif uint32_t DIG_CLOCK_PATTERN; }; @@ -466,7 +464,6 @@ struct dcn10_stream_enc_registers { type DIG_SOURCE_SELECT;\ type DIG_CLOCK_PATTERN -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define SE_REG_FIELD_LIST_DCN2_0(type) \ type DP_DSC_MODE;\ type DP_DSC_SLICE_WIDTH;\ @@ -485,20 +482,15 @@ struct dcn10_stream_enc_registers { type DOLBY_VISION_EN;\ type DP_PIXEL_COMBINE;\ type DP_SST_SDP_SPLITTING -#endif struct dcn10_stream_encoder_shift { SE_REG_FIELD_LIST_DCN1_0(uint8_t); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) SE_REG_FIELD_LIST_DCN2_0(uint8_t); -#endif }; struct dcn10_stream_encoder_mask { SE_REG_FIELD_LIST_DCN1_0(uint32_t); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) SE_REG_FIELD_LIST_DCN2_0(uint32_t); -#endif }; struct dcn10_stream_encoder { @@ -526,6 +518,7 @@ void enc1_stream_encoder_dp_set_stream_attribute( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, enum dc_color_space output_color_space, + bool use_vsc_sdp_for_colorimetry, uint32_t enable_sdp_splitting); void enc1_stream_encoder_hdmi_set_stream_attribute( diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile index 10b47986526b..5fcaf78334ff 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile @@ -2,16 +2,20 @@ # # Makefile for DCN. -DCN20 = dcn20_resource.o dcn20_hwseq.o dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \ +DCN20 = dcn20_resource.o dcn20_init.o dcn20_hwseq.o dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \ dcn20_mpc.o dcn20_opp.o dcn20_hubbub.o dcn20_optc.o dcn20_mmhubbub.o \ dcn20_stream_encoder.o dcn20_link_encoder.o dcn20_dccg.o \ dcn20_vmid.o dcn20_dwb.o dcn20_dwb_scl.o -ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT DCN20 += dcn20_dsc.o -endif +ifdef CONFIG_X86 CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -msse +endif + +ifdef CONFIG_PPC64 +CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -maltivec +endif ifdef CONFIG_CC_IS_GCC ifeq ($(call cc-ifversion, -lt, 0701, y), y) @@ -19,6 +23,7 @@ IS_OLD_GCC = 1 endif endif +ifdef CONFIG_X86 ifdef IS_OLD_GCC # Stack alignment mismatch, proceed with caution. # GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 @@ -27,6 +32,7 @@ CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -mpreferred-stack-boundary=4 else CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -msse2 endif +endif AMD_DAL_DCN20 = $(addprefix $(AMDDALPATH)/dc/dcn20/,$(DCN20)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c index 1e1151356e60..50bffbfdd394 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c @@ -50,20 +50,20 @@ void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk) if (dccg->ref_dppclk && req_dppclk) { int ref_dppclk = dccg->ref_dppclk; + int modulo, phase; - ASSERT(req_dppclk <= ref_dppclk); - /* need to clamp to 8 bits */ - if (ref_dppclk > 0xff) { - int divider = (ref_dppclk + 0xfe) / 0xff; + // phase / modulo = dpp pipe clk / dpp global clk + modulo = 0xff; // use FF at the end + phase = ((modulo * req_dppclk) + ref_dppclk - 1) / ref_dppclk; - ref_dppclk /= divider; - req_dppclk = (req_dppclk + divider - 1) / divider; - if (req_dppclk > ref_dppclk) - req_dppclk = ref_dppclk; + if (phase > 0xff) { + ASSERT(false); + phase = 0xff; } + REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0, - DPPCLK0_DTO_PHASE, req_dppclk, - DPPCLK0_DTO_MODULO, ref_dppclk); + DPPCLK0_DTO_PHASE, phase, + DPPCLK0_DTO_MODULO, modulo); REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_ENABLE[dpp_inst], 1); } else { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c index 4d7e45892f08..13e057d7ee93 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c @@ -104,7 +104,7 @@ static void dpp2_cnv_setup ( uint32_t pixel_format = 0; uint32_t alpha_en = 1; enum dc_color_space color_space = COLOR_SPACE_SRGB; - enum dcn10_input_csc_select select = INPUT_CSC_SELECT_BYPASS; + enum dcn20_input_csc_select select = DCN2_ICSC_SELECT_BYPASS; bool force_disable_cursor = false; struct out_csc_color_matrix tbl_entry; uint32_t is_2bit = 0; @@ -145,25 +145,25 @@ static void dpp2_cnv_setup ( force_disable_cursor = false; pixel_format = 65; color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; + select = DCN2_ICSC_SELECT_ICSC_A; break; case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: force_disable_cursor = true; pixel_format = 64; color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; + select = DCN2_ICSC_SELECT_ICSC_A; break; case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: force_disable_cursor = true; pixel_format = 67; color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; + select = DCN2_ICSC_SELECT_ICSC_A; break; case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: force_disable_cursor = true; pixel_format = 66; color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; + select = DCN2_ICSC_SELECT_ICSC_A; break; case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: pixel_format = 22; @@ -177,7 +177,7 @@ static void dpp2_cnv_setup ( case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888: pixel_format = 12; color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; + select = DCN2_ICSC_SELECT_ICSC_A; break; case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX: pixel_format = 112; @@ -188,13 +188,13 @@ static void dpp2_cnv_setup ( case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010: pixel_format = 114; color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; + select = DCN2_ICSC_SELECT_ICSC_A; is_2bit = 1; break; case SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102: pixel_format = 115; color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; + select = DCN2_ICSC_SELECT_ICSC_A; is_2bit = 1; break; case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT: @@ -227,13 +227,13 @@ static void dpp2_cnv_setup ( tbl_entry.color_space = input_color_space; if (color_space >= COLOR_SPACE_YCBCR601) - select = INPUT_CSC_SELECT_ICSC; + select = DCN2_ICSC_SELECT_ICSC_A; else - select = INPUT_CSC_SELECT_BYPASS; + select = DCN2_ICSC_SELECT_BYPASS; - dpp1_program_input_csc(dpp_base, color_space, select, &tbl_entry); + dpp2_program_input_csc(dpp_base, color_space, select, &tbl_entry); } else - dpp1_program_input_csc(dpp_base, color_space, select, NULL); + dpp2_program_input_csc(dpp_base, color_space, select, NULL); if (force_disable_cursor) { REG_UPDATE(CURSOR_CONTROL, @@ -458,7 +458,7 @@ static struct dpp_funcs dcn20_dpp_funcs = { .dpp_reset = dpp_reset, .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, .dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps, - .dpp_set_gamut_remap = dpp1_cm_set_gamut_remap, + .dpp_set_gamut_remap = dpp2_cm_set_gamut_remap, .dpp_set_csc_adjustment = NULL, .dpp_set_csc_default = NULL, .dpp_program_regamma_pwl = oppn20_dummy_program_regamma_pwl, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h index 5b03b737b1d6..27610251c57f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h @@ -150,6 +150,16 @@ SRI(CM_SHAPER_RAMA_REGION_32_33, CM, id), \ SRI(CM_SHAPER_LUT_INDEX, CM, id) +#define TF_REG_LIST_DCN20_COMMON_APPEND(id) \ + SRI(CM_GAMUT_REMAP_B_C11_C12, CM, id),\ + SRI(CM_GAMUT_REMAP_B_C13_C14, CM, id),\ + SRI(CM_GAMUT_REMAP_B_C21_C22, CM, id),\ + SRI(CM_GAMUT_REMAP_B_C23_C24, CM, id),\ + SRI(CM_GAMUT_REMAP_B_C31_C32, CM, id),\ + SRI(CM_GAMUT_REMAP_B_C33_C34, CM, id),\ + SRI(CM_ICSC_B_C11_C12, CM, id), \ + SRI(CM_ICSC_B_C33_C34, CM, id) + #define TF_REG_LIST_DCN20(id) \ TF_REG_LIST_DCN(id), \ TF_REG_LIST_DCN20_COMMON(id), \ @@ -572,10 +582,29 @@ TF_SF(DSCL0_OBUF_MEM_PWR_CTRL, OBUF_MEM_PWR_FORCE, mask_sh),\ TF_SF(DSCL0_DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, mask_sh) +/* DPP CM debug status register: + * + * Status index including current ICSC, Gamut Remap Mode is 9 + * ICSC Mode: [4..3] + * Gamut Remap Mode: [10..9] + */ +#define CM_TEST_DEBUG_DATA_STATUS_IDX 9 + +#define TF_DEBUG_REG_LIST_SH_DCN20 \ + TF_DEBUG_REG_LIST_SH_DCN10, \ + .CM_TEST_DEBUG_DATA_ICSC_MODE = 3, \ + .CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE = 9 + +#define TF_DEBUG_REG_LIST_MASK_DCN20 \ + TF_DEBUG_REG_LIST_MASK_DCN10, \ + .CM_TEST_DEBUG_DATA_ICSC_MODE = 0x18, \ + .CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE = 0x600 #define TF_REG_FIELD_LIST_DCN2_0(type) \ TF_REG_FIELD_LIST(type) \ type CM_BLNDGAM_LUT_DATA; \ + type CM_TEST_DEBUG_DATA_ICSC_MODE; \ + type CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE; \ type FORMAT_CNV16; \ type CNVC_BYPASS_MSB_ALIGN; \ type CLAMP_POSITIVE; \ @@ -630,11 +659,22 @@ struct dcn2_dpp_mask { uint32_t COLOR_KEYER_RED; \ uint32_t COLOR_KEYER_GREEN; \ uint32_t COLOR_KEYER_BLUE; \ - uint32_t OBUF_MEM_PWR_CTRL;\ + uint32_t OBUF_MEM_PWR_CTRL; \ uint32_t DSCL_MEM_PWR_CTRL +#define DPP_DCN2_REG_VARIABLE_LIST_CM_APPEND \ + uint32_t CM_GAMUT_REMAP_B_C11_C12; \ + uint32_t CM_GAMUT_REMAP_B_C13_C14; \ + uint32_t CM_GAMUT_REMAP_B_C21_C22; \ + uint32_t CM_GAMUT_REMAP_B_C23_C24; \ + uint32_t CM_GAMUT_REMAP_B_C31_C32; \ + uint32_t CM_GAMUT_REMAP_B_C33_C34; \ + uint32_t CM_ICSC_B_C11_C12; \ + uint32_t CM_ICSC_B_C33_C34 + struct dcn2_dpp_registers { DPP_DCN2_REG_VARIABLE_LIST; + DPP_DCN2_REG_VARIABLE_LIST_CM_APPEND; }; struct dcn20_dpp { @@ -656,6 +696,18 @@ struct dcn20_dpp { struct pwl_params pwl_data; }; +enum dcn20_input_csc_select { + DCN2_ICSC_SELECT_BYPASS = 0, + DCN2_ICSC_SELECT_ICSC_A = 1, + DCN2_ICSC_SELECT_ICSC_B = 2 +}; + +enum dcn20_gamut_remap_select { + DCN2_GAMUT_REMAP_BYPASS = 0, + DCN2_GAMUT_REMAP_COEF_A = 1, + DCN2_GAMUT_REMAP_COEF_B = 2 +}; + void dpp20_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s); @@ -667,6 +719,16 @@ void dpp2_set_degamma( struct dpp *dpp_base, enum ipp_degamma_mode mode); +void dpp2_cm_set_gamut_remap( + struct dpp *dpp_base, + const struct dpp_grph_csc_adjustment *adjust); + +void dpp2_program_input_csc( + struct dpp *dpp_base, + enum dc_color_space color_space, + enum dcn20_input_csc_select input_select, + const struct out_csc_color_matrix *tbl_entry); + bool dpp20_program_blnd_lut( struct dpp *dpp_base, const struct pwl_params *params); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c index 2d112c316424..8dc3d1f73984 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c @@ -36,6 +36,9 @@ #define REG(reg)\ dpp->tf_regs->reg +#define IND_REG(index) \ + (index) + #define CTX \ dpp->base.ctx @@ -44,9 +47,6 @@ dpp->tf_shift->field_name, dpp->tf_mask->field_name - - - static void dpp2_enable_cm_block( struct dpp *dpp_base) { @@ -149,12 +149,164 @@ void dpp2_set_degamma( case IPP_DEGAMMA_MODE_HW_xvYCC: REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2); break; + case IPP_DEGAMMA_MODE_USER_PWL: + REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3); + break; default: BREAK_TO_DEBUGGER(); break; } } +static void program_gamut_remap( + struct dcn20_dpp *dpp, + const uint16_t *regval, + enum dcn20_gamut_remap_select select) +{ + uint32_t cur_select = 0; + struct color_matrices_reg gam_regs; + + if (regval == NULL || select == DCN2_GAMUT_REMAP_BYPASS) { + REG_SET(CM_GAMUT_REMAP_CONTROL, 0, + CM_GAMUT_REMAP_MODE, 0); + return; + } + + /* determine which gamut_remap coefficients (A or B) we are using + * currently. select the alternate set to double buffer + * the update so gamut_remap is updated on frame boundary + */ + IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA, + CM_TEST_DEBUG_DATA_STATUS_IDX, + CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE, &cur_select); + + /* value stored in dbg reg will be 1 greater than mode we want */ + if (cur_select != DCN2_GAMUT_REMAP_COEF_A) + select = DCN2_GAMUT_REMAP_COEF_A; + else + select = DCN2_GAMUT_REMAP_COEF_B; + + gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; + gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11; + gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; + gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; + + if (select == DCN2_GAMUT_REMAP_COEF_A) { + gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); + } else { + gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34); + } + + cm_helper_program_color_matrices( + dpp->base.ctx, + regval, + &gam_regs); + + REG_SET( + CM_GAMUT_REMAP_CONTROL, 0, + CM_GAMUT_REMAP_MODE, select); + +} + +void dpp2_cm_set_gamut_remap( + struct dpp *dpp_base, + const struct dpp_grph_csc_adjustment *adjust) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + int i = 0; + + if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW) + /* Bypass if type is bypass or hw */ + program_gamut_remap(dpp, NULL, DCN2_GAMUT_REMAP_BYPASS); + else { + struct fixed31_32 arr_matrix[12]; + uint16_t arr_reg_val[12]; + + for (i = 0; i < 12; i++) + arr_matrix[i] = adjust->temperature_matrix[i]; + + convert_float_matrix( + arr_reg_val, arr_matrix, 12); + + program_gamut_remap(dpp, arr_reg_val, DCN2_GAMUT_REMAP_COEF_A); + } +} + +void dpp2_program_input_csc( + struct dpp *dpp_base, + enum dc_color_space color_space, + enum dcn20_input_csc_select input_select, + const struct out_csc_color_matrix *tbl_entry) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + int i; + int arr_size = sizeof(dpp_input_csc_matrix)/sizeof(struct dpp_input_csc_matrix); + const uint16_t *regval = NULL; + uint32_t cur_select = 0; + enum dcn20_input_csc_select select; + struct color_matrices_reg icsc_regs; + + if (input_select == DCN2_ICSC_SELECT_BYPASS) { + REG_SET(CM_ICSC_CONTROL, 0, CM_ICSC_MODE, 0); + return; + } + + if (tbl_entry == NULL) { + for (i = 0; i < arr_size; i++) + if (dpp_input_csc_matrix[i].color_space == color_space) { + regval = dpp_input_csc_matrix[i].regval; + break; + } + + if (regval == NULL) { + BREAK_TO_DEBUGGER(); + return; + } + } else { + regval = tbl_entry->regval; + } + + /* determine which CSC coefficients (A or B) we are using + * currently. select the alternate set to double buffer + * the CSC update so CSC is updated on frame boundary + */ + IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA, + CM_TEST_DEBUG_DATA_STATUS_IDX, + CM_TEST_DEBUG_DATA_ICSC_MODE, &cur_select); + + if (cur_select != DCN2_ICSC_SELECT_ICSC_A) + select = DCN2_ICSC_SELECT_ICSC_A; + else + select = DCN2_ICSC_SELECT_ICSC_B; + + icsc_regs.shifts.csc_c11 = dpp->tf_shift->CM_ICSC_C11; + icsc_regs.masks.csc_c11 = dpp->tf_mask->CM_ICSC_C11; + icsc_regs.shifts.csc_c12 = dpp->tf_shift->CM_ICSC_C12; + icsc_regs.masks.csc_c12 = dpp->tf_mask->CM_ICSC_C12; + + if (select == DCN2_ICSC_SELECT_ICSC_A) { + + icsc_regs.csc_c11_c12 = REG(CM_ICSC_C11_C12); + icsc_regs.csc_c33_c34 = REG(CM_ICSC_C33_C34); + + } else { + + icsc_regs.csc_c11_c12 = REG(CM_ICSC_B_C11_C12); + icsc_regs.csc_c33_c34 = REG(CM_ICSC_B_C33_C34); + + } + + cm_helper_program_color_matrices( + dpp->base.ctx, + regval, + &icsc_regs); + + REG_SET(CM_ICSC_CONTROL, 0, + CM_ICSC_MODE, select); +} + static void dpp20_power_on_blnd_lut( struct dpp *dpp_base, bool power_on) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c index 63eb377ed9c0..6bdfee20b6a7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c @@ -23,7 +23,6 @@ * */ -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #include "reg_helper.h" #include "dcn20_dsc.h" #include "dsc/dscc_types.h" @@ -207,6 +206,9 @@ static bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const str struct dsc_reg_values dsc_reg_vals; struct dsc_optc_config dsc_optc_cfg; + memset(&dsc_reg_vals, 0, sizeof(dsc_reg_vals)); + memset(&dsc_optc_cfg, 0, sizeof(dsc_optc_cfg)); + DC_LOG_DSC("Getting packed DSC PPS for DSC Config:"); dsc_config_log(dsc, dsc_cfg); DC_LOG_DSC("DSC Picture Parameter Set (PPS):"); @@ -222,9 +224,18 @@ static bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const str static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe) { struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc); + int dsc_clock_en; + int dsc_fw_config; + int enabled_opp_pipe; + + DC_LOG_DSC("enable DSC %d at opp pipe %d", dsc->inst, opp_pipe); - /* TODO Check if DSC alreay in use? */ - DC_LOG_DSC("enable DSC at opp pipe %d", opp_pipe); + REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &dsc_clock_en); + REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &dsc_fw_config, DSCRM_DSC_OPP_PIPE_SOURCE, &enabled_opp_pipe); + if ((dsc_clock_en || dsc_fw_config) && enabled_opp_pipe != opp_pipe) { + DC_LOG_DSC("ERROR: DSC %d at opp pipe %d already enabled!", dsc->inst, enabled_opp_pipe); + ASSERT(0); + } REG_UPDATE(DSC_TOP_CONTROL, DSC_CLOCK_EN, 1); @@ -238,8 +249,18 @@ static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe) static void dsc2_disable(struct display_stream_compressor *dsc) { struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc); + int dsc_clock_en; + int dsc_fw_config; + int enabled_opp_pipe; - DC_LOG_DSC("disable DSC"); + DC_LOG_DSC("disable DSC %d", dsc->inst); + + REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &dsc_clock_en); + REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &dsc_fw_config, DSCRM_DSC_OPP_PIPE_SOURCE, &enabled_opp_pipe); + if (!dsc_clock_en || !dsc_fw_config) { + DC_LOG_DSC("ERROR: DSC %d at opp pipe %d already disabled!", dsc->inst, enabled_opp_pipe); + ASSERT(0); + } REG_UPDATE(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, 0); @@ -715,4 +736,3 @@ static void dsc_write_to_registers(struct display_stream_compressor *dsc, const } } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h index 4e2fb38390a4..9855a7ed0387 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h @@ -21,7 +21,6 @@ * Authors: AMD * */ -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #ifndef __DCN20_DSC_H__ #define __DCN20_DSC_H__ @@ -572,4 +571,3 @@ void dsc2_construct(struct dcn20_dsc *dsc, #endif -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c index 8b8438566101..9235f7d29454 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c @@ -293,6 +293,9 @@ bool hubbub2_get_dcc_compression_cap(struct hubbub *hubbub, output->grph.rgb.max_compressed_blk_size = 64; output->grph.rgb.independent_64b_blks = true; break; + default: + ASSERT(false); + break; } output->capable = true; output->const_color_support = true; @@ -601,7 +604,8 @@ static const struct hubbub_funcs hubbub2_funcs = { .wm_read_state = hubbub2_wm_read_state, .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq, .program_watermarks = hubbub2_program_watermarks, - .allow_self_refresh_control = hubbub1_allow_self_refresh_control + .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, + .allow_self_refresh_control = hubbub1_allow_self_refresh_control, }; void hubbub2_construct(struct dcn20_hubbub *hubbub, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c index 69e2aae42394..84d7ac5dd206 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c @@ -30,6 +30,8 @@ #include "reg_helper.h" #include "basics/conversion.h" +#define DC_LOGGER_INIT(logger) + #define REG(reg)\ hubp2->hubp_regs->reg @@ -483,7 +485,6 @@ void hubp2_program_pixel_format( REG_UPDATE(DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, 12); break; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX: REG_UPDATE(DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, 112); @@ -504,7 +505,6 @@ void hubp2_program_pixel_format( REG_UPDATE(DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, 119); break; -#endif default: BREAK_TO_DEBUGGER(); break; @@ -1204,6 +1204,9 @@ void hubp2_read_state_common(struct hubp *hubp) HUBP_TTU_DISABLE, &s->ttu_disable, HUBP_UNDERFLOW_STATUS, &s->underflow_status); + REG_GET(HUBP_CLK_CNTL, + HUBP_CLOCK_ENABLE, &s->clock_en); + REG_GET(DCN_GLOBAL_TTU_CNTL, MIN_TTU_VBLANK, &s->min_ttu_vblank); @@ -1243,6 +1246,314 @@ void hubp2_read_state(struct hubp *hubp) } +void hubp2_validate_dml_output(struct hubp *hubp, + struct dc_context *ctx, + struct _vcs_dpi_display_rq_regs_st *dml_rq_regs, + struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr, + struct _vcs_dpi_display_ttu_regs_st *dml_ttu_attr) +{ + struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); + struct _vcs_dpi_display_rq_regs_st rq_regs = {0}; + struct _vcs_dpi_display_dlg_regs_st dlg_attr = {0}; + struct _vcs_dpi_display_ttu_regs_st ttu_attr = {0}; + DC_LOGGER_INIT(ctx->logger); + DC_LOG_DEBUG("DML Validation | Running Validation"); + + /* Requestor Regs */ + REG_GET(HUBPRET_CONTROL, + DET_BUF_PLANE1_BASE_ADDRESS, &rq_regs.plane1_base_address); + REG_GET_4(DCN_EXPANSION_MODE, + DRQ_EXPANSION_MODE, &rq_regs.drq_expansion_mode, + PRQ_EXPANSION_MODE, &rq_regs.prq_expansion_mode, + MRQ_EXPANSION_MODE, &rq_regs.mrq_expansion_mode, + CRQ_EXPANSION_MODE, &rq_regs.crq_expansion_mode); + REG_GET_8(DCHUBP_REQ_SIZE_CONFIG, + CHUNK_SIZE, &rq_regs.rq_regs_l.chunk_size, + MIN_CHUNK_SIZE, &rq_regs.rq_regs_l.min_chunk_size, + META_CHUNK_SIZE, &rq_regs.rq_regs_l.meta_chunk_size, + MIN_META_CHUNK_SIZE, &rq_regs.rq_regs_l.min_meta_chunk_size, + DPTE_GROUP_SIZE, &rq_regs.rq_regs_l.dpte_group_size, + MPTE_GROUP_SIZE, &rq_regs.rq_regs_l.mpte_group_size, + SWATH_HEIGHT, &rq_regs.rq_regs_l.swath_height, + PTE_ROW_HEIGHT_LINEAR, &rq_regs.rq_regs_l.pte_row_height_linear); + REG_GET_8(DCHUBP_REQ_SIZE_CONFIG_C, + CHUNK_SIZE_C, &rq_regs.rq_regs_c.chunk_size, + MIN_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_chunk_size, + META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.meta_chunk_size, + MIN_META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_meta_chunk_size, + DPTE_GROUP_SIZE_C, &rq_regs.rq_regs_c.dpte_group_size, + MPTE_GROUP_SIZE_C, &rq_regs.rq_regs_c.mpte_group_size, + SWATH_HEIGHT_C, &rq_regs.rq_regs_c.swath_height, + PTE_ROW_HEIGHT_LINEAR_C, &rq_regs.rq_regs_c.pte_row_height_linear); + + if (rq_regs.plane1_base_address != dml_rq_regs->plane1_base_address) + DC_LOG_DEBUG("DML Validation | HUBPRET_CONTROL:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u Actual: %u\n", + dml_rq_regs->plane1_base_address, rq_regs.plane1_base_address); + if (rq_regs.drq_expansion_mode != dml_rq_regs->drq_expansion_mode) + DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DRQ_EXPANSION_MODE - Expected: %u Actual: %u\n", + dml_rq_regs->drq_expansion_mode, rq_regs.drq_expansion_mode); + if (rq_regs.prq_expansion_mode != dml_rq_regs->prq_expansion_mode) + DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:MRQ_EXPANSION_MODE - Expected: %u Actual: %u\n", + dml_rq_regs->prq_expansion_mode, rq_regs.prq_expansion_mode); + if (rq_regs.mrq_expansion_mode != dml_rq_regs->mrq_expansion_mode) + DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u Actual: %u\n", + dml_rq_regs->mrq_expansion_mode, rq_regs.mrq_expansion_mode); + if (rq_regs.crq_expansion_mode != dml_rq_regs->crq_expansion_mode) + DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:CRQ_EXPANSION_MODE - Expected: %u Actual: %u\n", + dml_rq_regs->crq_expansion_mode, rq_regs.crq_expansion_mode); + + if (rq_regs.rq_regs_l.chunk_size != dml_rq_regs->rq_regs_l.chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:CHUNK_SIZE - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.chunk_size, rq_regs.rq_regs_l.chunk_size); + if (rq_regs.rq_regs_l.min_chunk_size != dml_rq_regs->rq_regs_l.min_chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_CHUNK_SIZE - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.min_chunk_size, rq_regs.rq_regs_l.min_chunk_size); + if (rq_regs.rq_regs_l.meta_chunk_size != dml_rq_regs->rq_regs_l.meta_chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:META_CHUNK_SIZE - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.meta_chunk_size, rq_regs.rq_regs_l.meta_chunk_size); + if (rq_regs.rq_regs_l.min_meta_chunk_size != dml_rq_regs->rq_regs_l.min_meta_chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_META_CHUNK_SIZE - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs.rq_regs_l.min_meta_chunk_size); + if (rq_regs.rq_regs_l.dpte_group_size != dml_rq_regs->rq_regs_l.dpte_group_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:DPTE_GROUP_SIZE - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.dpte_group_size, rq_regs.rq_regs_l.dpte_group_size); + if (rq_regs.rq_regs_l.mpte_group_size != dml_rq_regs->rq_regs_l.mpte_group_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MPTE_GROUP_SIZE - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.mpte_group_size, rq_regs.rq_regs_l.mpte_group_size); + if (rq_regs.rq_regs_l.swath_height != dml_rq_regs->rq_regs_l.swath_height) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:SWATH_HEIGHT - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.swath_height, rq_regs.rq_regs_l.swath_height); + if (rq_regs.rq_regs_l.pte_row_height_linear != dml_rq_regs->rq_regs_l.pte_row_height_linear) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:PTE_ROW_HEIGHT_LINEAR - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.pte_row_height_linear, rq_regs.rq_regs_l.pte_row_height_linear); + + if (rq_regs.rq_regs_c.chunk_size != dml_rq_regs->rq_regs_c.chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:CHUNK_SIZE_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.chunk_size, rq_regs.rq_regs_c.chunk_size); + if (rq_regs.rq_regs_c.min_chunk_size != dml_rq_regs->rq_regs_c.min_chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_CHUNK_SIZE_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.min_chunk_size, rq_regs.rq_regs_c.min_chunk_size); + if (rq_regs.rq_regs_c.meta_chunk_size != dml_rq_regs->rq_regs_c.meta_chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:META_CHUNK_SIZE_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.meta_chunk_size, rq_regs.rq_regs_c.meta_chunk_size); + if (rq_regs.rq_regs_c.min_meta_chunk_size != dml_rq_regs->rq_regs_c.min_meta_chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_META_CHUNK_SIZE_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.min_meta_chunk_size, rq_regs.rq_regs_c.min_meta_chunk_size); + if (rq_regs.rq_regs_c.dpte_group_size != dml_rq_regs->rq_regs_c.dpte_group_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:DPTE_GROUP_SIZE_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.dpte_group_size, rq_regs.rq_regs_c.dpte_group_size); + if (rq_regs.rq_regs_c.mpte_group_size != dml_rq_regs->rq_regs_c.mpte_group_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MPTE_GROUP_SIZE_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.mpte_group_size, rq_regs.rq_regs_c.mpte_group_size); + if (rq_regs.rq_regs_c.swath_height != dml_rq_regs->rq_regs_c.swath_height) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:SWATH_HEIGHT_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.swath_height, rq_regs.rq_regs_c.swath_height); + if (rq_regs.rq_regs_c.pte_row_height_linear != dml_rq_regs->rq_regs_c.pte_row_height_linear) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:PTE_ROW_HEIGHT_LINEAR_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.pte_row_height_linear, rq_regs.rq_regs_c.pte_row_height_linear); + + /* DLG - Per hubp */ + REG_GET_2(BLANK_OFFSET_0, + REFCYC_H_BLANK_END, &dlg_attr.refcyc_h_blank_end, + DLG_V_BLANK_END, &dlg_attr.dlg_vblank_end); + REG_GET(BLANK_OFFSET_1, + MIN_DST_Y_NEXT_START, &dlg_attr.min_dst_y_next_start); + REG_GET(DST_DIMENSIONS, + REFCYC_PER_HTOTAL, &dlg_attr.refcyc_per_htotal); + REG_GET_2(DST_AFTER_SCALER, + REFCYC_X_AFTER_SCALER, &dlg_attr.refcyc_x_after_scaler, + DST_Y_AFTER_SCALER, &dlg_attr.dst_y_after_scaler); + REG_GET(REF_FREQ_TO_PIX_FREQ, + REF_FREQ_TO_PIX_FREQ, &dlg_attr.ref_freq_to_pix_freq); + + if (dlg_attr.refcyc_h_blank_end != dml_dlg_attr->refcyc_h_blank_end) + DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:REFCYC_H_BLANK_END - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_h_blank_end, dlg_attr.refcyc_h_blank_end); + if (dlg_attr.dlg_vblank_end != dml_dlg_attr->dlg_vblank_end) + DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:DLG_V_BLANK_END - Expected: %u Actual: %u\n", + dml_dlg_attr->dlg_vblank_end, dlg_attr.dlg_vblank_end); + if (dlg_attr.min_dst_y_next_start != dml_dlg_attr->min_dst_y_next_start) + DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_1:MIN_DST_Y_NEXT_START - Expected: %u Actual: %u\n", + dml_dlg_attr->min_dst_y_next_start, dlg_attr.min_dst_y_next_start); + if (dlg_attr.refcyc_per_htotal != dml_dlg_attr->refcyc_per_htotal) + DC_LOG_DEBUG("DML Validation | DST_DIMENSIONS:REFCYC_PER_HTOTAL - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_htotal, dlg_attr.refcyc_per_htotal); + if (dlg_attr.refcyc_x_after_scaler != dml_dlg_attr->refcyc_x_after_scaler) + DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:REFCYC_X_AFTER_SCALER - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_x_after_scaler, dlg_attr.refcyc_x_after_scaler); + if (dlg_attr.dst_y_after_scaler != dml_dlg_attr->dst_y_after_scaler) + DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:DST_Y_AFTER_SCALER - Expected: %u Actual: %u\n", + dml_dlg_attr->dst_y_after_scaler, dlg_attr.dst_y_after_scaler); + if (dlg_attr.ref_freq_to_pix_freq != dml_dlg_attr->ref_freq_to_pix_freq) + DC_LOG_DEBUG("DML Validation | REF_FREQ_TO_PIX_FREQ:REF_FREQ_TO_PIX_FREQ - Expected: %u Actual: %u\n", + dml_dlg_attr->ref_freq_to_pix_freq, dlg_attr.ref_freq_to_pix_freq); + + /* DLG - Per luma/chroma */ + REG_GET(VBLANK_PARAMETERS_1, + REFCYC_PER_PTE_GROUP_VBLANK_L, &dlg_attr.refcyc_per_pte_group_vblank_l); + if (REG(NOM_PARAMETERS_0)) + REG_GET(NOM_PARAMETERS_0, + DST_Y_PER_PTE_ROW_NOM_L, &dlg_attr.dst_y_per_pte_row_nom_l); + if (REG(NOM_PARAMETERS_1)) + REG_GET(NOM_PARAMETERS_1, + REFCYC_PER_PTE_GROUP_NOM_L, &dlg_attr.refcyc_per_pte_group_nom_l); + REG_GET(NOM_PARAMETERS_4, + DST_Y_PER_META_ROW_NOM_L, &dlg_attr.dst_y_per_meta_row_nom_l); + REG_GET(NOM_PARAMETERS_5, + REFCYC_PER_META_CHUNK_NOM_L, &dlg_attr.refcyc_per_meta_chunk_nom_l); + REG_GET_2(PER_LINE_DELIVERY, + REFCYC_PER_LINE_DELIVERY_L, &dlg_attr.refcyc_per_line_delivery_l, + REFCYC_PER_LINE_DELIVERY_C, &dlg_attr.refcyc_per_line_delivery_c); + REG_GET_2(PER_LINE_DELIVERY_PRE, + REFCYC_PER_LINE_DELIVERY_PRE_L, &dlg_attr.refcyc_per_line_delivery_pre_l, + REFCYC_PER_LINE_DELIVERY_PRE_C, &dlg_attr.refcyc_per_line_delivery_pre_c); + REG_GET(VBLANK_PARAMETERS_2, + REFCYC_PER_PTE_GROUP_VBLANK_C, &dlg_attr.refcyc_per_pte_group_vblank_c); + if (REG(NOM_PARAMETERS_2)) + REG_GET(NOM_PARAMETERS_2, + DST_Y_PER_PTE_ROW_NOM_C, &dlg_attr.dst_y_per_pte_row_nom_c); + if (REG(NOM_PARAMETERS_3)) + REG_GET(NOM_PARAMETERS_3, + REFCYC_PER_PTE_GROUP_NOM_C, &dlg_attr.refcyc_per_pte_group_nom_c); + REG_GET(NOM_PARAMETERS_6, + DST_Y_PER_META_ROW_NOM_C, &dlg_attr.dst_y_per_meta_row_nom_c); + REG_GET(NOM_PARAMETERS_7, + REFCYC_PER_META_CHUNK_NOM_C, &dlg_attr.refcyc_per_meta_chunk_nom_c); + REG_GET(VBLANK_PARAMETERS_3, + REFCYC_PER_META_CHUNK_VBLANK_L, &dlg_attr.refcyc_per_meta_chunk_vblank_l); + REG_GET(VBLANK_PARAMETERS_4, + REFCYC_PER_META_CHUNK_VBLANK_C, &dlg_attr.refcyc_per_meta_chunk_vblank_c); + + if (dlg_attr.refcyc_per_pte_group_vblank_l != dml_dlg_attr->refcyc_per_pte_group_vblank_l) + DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_1:REFCYC_PER_PTE_GROUP_VBLANK_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_pte_group_vblank_l, dlg_attr.refcyc_per_pte_group_vblank_l); + if (dlg_attr.dst_y_per_pte_row_nom_l != dml_dlg_attr->dst_y_per_pte_row_nom_l) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_0:DST_Y_PER_PTE_ROW_NOM_L - Expected: %u Actual: %u\n", + dml_dlg_attr->dst_y_per_pte_row_nom_l, dlg_attr.dst_y_per_pte_row_nom_l); + if (dlg_attr.refcyc_per_pte_group_nom_l != dml_dlg_attr->refcyc_per_pte_group_nom_l) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_1:REFCYC_PER_PTE_GROUP_NOM_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_pte_group_nom_l, dlg_attr.refcyc_per_pte_group_nom_l); + if (dlg_attr.dst_y_per_meta_row_nom_l != dml_dlg_attr->dst_y_per_meta_row_nom_l) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_4:DST_Y_PER_META_ROW_NOM_L - Expected: %u Actual: %u\n", + dml_dlg_attr->dst_y_per_meta_row_nom_l, dlg_attr.dst_y_per_meta_row_nom_l); + if (dlg_attr.refcyc_per_meta_chunk_nom_l != dml_dlg_attr->refcyc_per_meta_chunk_nom_l) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_5:REFCYC_PER_META_CHUNK_NOM_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_meta_chunk_nom_l, dlg_attr.refcyc_per_meta_chunk_nom_l); + if (dlg_attr.refcyc_per_line_delivery_l != dml_dlg_attr->refcyc_per_line_delivery_l) + DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_line_delivery_l, dlg_attr.refcyc_per_line_delivery_l); + if (dlg_attr.refcyc_per_line_delivery_c != dml_dlg_attr->refcyc_per_line_delivery_c) + DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_line_delivery_c, dlg_attr.refcyc_per_line_delivery_c); + if (dlg_attr.refcyc_per_pte_group_vblank_c != dml_dlg_attr->refcyc_per_pte_group_vblank_c) + DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_2:REFCYC_PER_PTE_GROUP_VBLANK_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_pte_group_vblank_c, dlg_attr.refcyc_per_pte_group_vblank_c); + if (dlg_attr.dst_y_per_pte_row_nom_c != dml_dlg_attr->dst_y_per_pte_row_nom_c) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_2:DST_Y_PER_PTE_ROW_NOM_C - Expected: %u Actual: %u\n", + dml_dlg_attr->dst_y_per_pte_row_nom_c, dlg_attr.dst_y_per_pte_row_nom_c); + if (dlg_attr.refcyc_per_pte_group_nom_c != dml_dlg_attr->refcyc_per_pte_group_nom_c) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_3:REFCYC_PER_PTE_GROUP_NOM_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_pte_group_nom_c, dlg_attr.refcyc_per_pte_group_nom_c); + if (dlg_attr.dst_y_per_meta_row_nom_c != dml_dlg_attr->dst_y_per_meta_row_nom_c) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_6:DST_Y_PER_META_ROW_NOM_C - Expected: %u Actual: %u\n", + dml_dlg_attr->dst_y_per_meta_row_nom_c, dlg_attr.dst_y_per_meta_row_nom_c); + if (dlg_attr.refcyc_per_meta_chunk_nom_c != dml_dlg_attr->refcyc_per_meta_chunk_nom_c) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_7:REFCYC_PER_META_CHUNK_NOM_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_meta_chunk_nom_c, dlg_attr.refcyc_per_meta_chunk_nom_c); + if (dlg_attr.refcyc_per_line_delivery_pre_l != dml_dlg_attr->refcyc_per_line_delivery_pre_l) + DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_line_delivery_pre_l, dlg_attr.refcyc_per_line_delivery_pre_l); + if (dlg_attr.refcyc_per_line_delivery_pre_c != dml_dlg_attr->refcyc_per_line_delivery_pre_c) + DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_line_delivery_pre_c, dlg_attr.refcyc_per_line_delivery_pre_c); + if (dlg_attr.refcyc_per_meta_chunk_vblank_l != dml_dlg_attr->refcyc_per_meta_chunk_vblank_l) + DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_3:REFCYC_PER_META_CHUNK_VBLANK_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_meta_chunk_vblank_l, dlg_attr.refcyc_per_meta_chunk_vblank_l); + if (dlg_attr.refcyc_per_meta_chunk_vblank_c != dml_dlg_attr->refcyc_per_meta_chunk_vblank_c) + DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_4:REFCYC_PER_META_CHUNK_VBLANK_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_meta_chunk_vblank_c, dlg_attr.refcyc_per_meta_chunk_vblank_c); + + /* TTU - per hubp */ + REG_GET_2(DCN_TTU_QOS_WM, + QoS_LEVEL_LOW_WM, &ttu_attr.qos_level_low_wm, + QoS_LEVEL_HIGH_WM, &ttu_attr.qos_level_high_wm); + + if (ttu_attr.qos_level_low_wm != dml_ttu_attr->qos_level_low_wm) + DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_LOW_WM - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_level_low_wm, ttu_attr.qos_level_low_wm); + if (ttu_attr.qos_level_high_wm != dml_ttu_attr->qos_level_high_wm) + DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_HIGH_WM - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_level_high_wm, ttu_attr.qos_level_high_wm); + + /* TTU - per luma/chroma */ + /* Assumed surf0 is luma and 1 is chroma */ + REG_GET_3(DCN_SURF0_TTU_CNTL0, + REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_l, + QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_l, + QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_l); + REG_GET_3(DCN_SURF1_TTU_CNTL0, + REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_c, + QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_c, + QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_c); + REG_GET_3(DCN_CUR0_TTU_CNTL0, + REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_cur0, + QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_cur0, + QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_cur0); + REG_GET(FLIP_PARAMETERS_1, + REFCYC_PER_PTE_GROUP_FLIP_L, &dlg_attr.refcyc_per_pte_group_flip_l); + REG_GET(DCN_CUR0_TTU_CNTL1, + REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur0); + REG_GET(DCN_CUR1_TTU_CNTL1, + REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur1); + REG_GET(DCN_SURF0_TTU_CNTL1, + REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_l); + REG_GET(DCN_SURF1_TTU_CNTL1, + REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_c); + + if (ttu_attr.refcyc_per_req_delivery_l != dml_ttu_attr->refcyc_per_req_delivery_l) + DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_l, ttu_attr.refcyc_per_req_delivery_l); + if (ttu_attr.qos_level_fixed_l != dml_ttu_attr->qos_level_fixed_l) + DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_level_fixed_l, ttu_attr.qos_level_fixed_l); + if (ttu_attr.qos_ramp_disable_l != dml_ttu_attr->qos_ramp_disable_l) + DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_ramp_disable_l, ttu_attr.qos_ramp_disable_l); + if (ttu_attr.refcyc_per_req_delivery_c != dml_ttu_attr->refcyc_per_req_delivery_c) + DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_c, ttu_attr.refcyc_per_req_delivery_c); + if (ttu_attr.qos_level_fixed_c != dml_ttu_attr->qos_level_fixed_c) + DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_level_fixed_c, ttu_attr.qos_level_fixed_c); + if (ttu_attr.qos_ramp_disable_c != dml_ttu_attr->qos_ramp_disable_c) + DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_ramp_disable_c, ttu_attr.qos_ramp_disable_c); + if (ttu_attr.refcyc_per_req_delivery_cur0 != dml_ttu_attr->refcyc_per_req_delivery_cur0) + DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_cur0, ttu_attr.refcyc_per_req_delivery_cur0); + if (ttu_attr.qos_level_fixed_cur0 != dml_ttu_attr->qos_level_fixed_cur0) + DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_level_fixed_cur0, ttu_attr.qos_level_fixed_cur0); + if (ttu_attr.qos_ramp_disable_cur0 != dml_ttu_attr->qos_ramp_disable_cur0) + DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_ramp_disable_cur0, ttu_attr.qos_ramp_disable_cur0); + if (dlg_attr.refcyc_per_pte_group_flip_l != dml_dlg_attr->refcyc_per_pte_group_flip_l) + DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_1:REFCYC_PER_PTE_GROUP_FLIP_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_pte_group_flip_l, dlg_attr.refcyc_per_pte_group_flip_l); + if (ttu_attr.refcyc_per_req_delivery_pre_cur0 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur0) + DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_pre_cur0, ttu_attr.refcyc_per_req_delivery_pre_cur0); + if (ttu_attr.refcyc_per_req_delivery_pre_cur1 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur1) + DC_LOG_DEBUG("DML Validation | DCN_CUR1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_pre_cur1, ttu_attr.refcyc_per_req_delivery_pre_cur1); + if (ttu_attr.refcyc_per_req_delivery_pre_l != dml_ttu_attr->refcyc_per_req_delivery_pre_l) + DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_pre_l, ttu_attr.refcyc_per_req_delivery_pre_l); + if (ttu_attr.refcyc_per_req_delivery_pre_c != dml_ttu_attr->refcyc_per_req_delivery_pre_c) + DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_pre_c, ttu_attr.refcyc_per_req_delivery_pre_c); +} + static struct hubp_funcs dcn20_hubp_funcs = { .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer, .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled, @@ -1266,6 +1577,7 @@ static struct hubp_funcs dcn20_hubp_funcs = { .hubp_clear_underflow = hubp2_clear_underflow, .hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl, .hubp_init = hubp1_init, + .validate_dml_output = hubp2_validate_dml_output, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h index d5c8615af45e..8c04a3606a54 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h @@ -148,7 +148,6 @@ uint32_t VMID_SETTINGS_0 -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) #define DCN21_HUBP_REG_COMMON_VARIABLE_LIST \ DCN2_HUBP_REG_COMMON_VARIABLE_LIST; \ uint32_t FLIP_PARAMETERS_3;\ @@ -157,7 +156,6 @@ uint32_t FLIP_PARAMETERS_6;\ uint32_t VBLANK_PARAMETERS_5;\ uint32_t VBLANK_PARAMETERS_6 -#endif #define DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type) \ DCN_HUBP_REG_FIELD_BASE_LIST(type); \ @@ -184,7 +182,6 @@ type SURFACE_TRIPLE_BUFFER_ENABLE;\ type VMID -#ifdef CONFIG_DRM_AMD_DC_DCN2_1 #define DCN21_HUBP_REG_FIELD_VARIABLE_LIST(type) \ DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type);\ type REFCYC_PER_VM_GROUP_FLIP;\ @@ -194,31 +191,18 @@ type REFCYC_PER_PTE_GROUP_FLIP_C; \ type REFCYC_PER_META_CHUNK_FLIP_C; \ type VM_GROUP_SIZE -#endif struct dcn_hubp2_registers { -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) DCN21_HUBP_REG_COMMON_VARIABLE_LIST; -#else - DCN2_HUBP_REG_COMMON_VARIABLE_LIST; -#endif }; struct dcn_hubp2_shift { -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) DCN21_HUBP_REG_FIELD_VARIABLE_LIST(uint8_t); -#else - DCN2_HUBP_REG_FIELD_VARIABLE_LIST(uint8_t); -#endif }; struct dcn_hubp2_mask { -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) DCN21_HUBP_REG_FIELD_VARIABLE_LIST(uint32_t); -#else - DCN2_HUBP_REG_FIELD_VARIABLE_LIST(uint32_t); -#endif }; struct dcn20_hubp { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index ac8c18fadefc..cfbbaffa8654 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -25,17 +25,15 @@ #include <linux/delay.h> #include "dm_services.h" +#include "basics/dc_common.h" #include "dm_helpers.h" #include "core_types.h" #include "resource.h" -#include "dcn20/dcn20_resource.h" -#include "dce110/dce110_hw_sequencer.h" -#include "dcn10/dcn10_hw_sequencer.h" +#include "dcn20_resource.h" #include "dcn20_hwseq.h" #include "dce/dce_hwseq.h" -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT -#include "dcn20/dcn20_dsc.h" -#endif +#include "dcn20_dsc.h" +#include "dcn20_optc.h" #include "abm.h" #include "clk_mgr.h" #include "dmcu.h" @@ -45,10 +43,9 @@ #include "ipp.h" #include "mpc.h" #include "mcif_wb.h" +#include "dchubbub.h" #include "reg_helper.h" #include "dcn10/dcn10_cm_common.h" -#include "dcn10/dcn10_hubbub.h" -#include "dcn10/dcn10_optc.h" #include "dc_link_dp.h" #include "vm_helper.h" #include "dccg.h" @@ -64,14 +61,132 @@ #define FN(reg_name, field_name) \ hws->shifts->field_name, hws->masks->field_name -static void dcn20_enable_power_gating_plane( +static int find_free_gsl_group(const struct dc *dc) +{ + if (dc->res_pool->gsl_groups.gsl_0 == 0) + return 1; + if (dc->res_pool->gsl_groups.gsl_1 == 0) + return 2; + if (dc->res_pool->gsl_groups.gsl_2 == 0) + return 3; + + return 0; +} + +/* NOTE: This is not a generic setup_gsl function (hence the suffix as_lock) + * This is only used to lock pipes in pipe splitting case with immediate flip + * Ordinary MPC/OTG locks suppress VUPDATE which doesn't help with immediate, + * so we get tearing with freesync since we cannot flip multiple pipes + * atomically. + * We use GSL for this: + * - immediate flip: find first available GSL group if not already assigned + * program gsl with that group, set current OTG as master + * and always us 0x4 = AND of flip_ready from all pipes + * - vsync flip: disable GSL if used + * + * Groups in stream_res are stored as +1 from HW registers, i.e. + * gsl_0 <=> pipe_ctx->stream_res.gsl_group == 1 + * Using a magic value like -1 would require tracking all inits/resets + */ +static void dcn20_setup_gsl_group_as_lock( + const struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool enable) +{ + struct gsl_params gsl; + int group_idx; + + memset(&gsl, 0, sizeof(struct gsl_params)); + + if (enable) { + /* return if group already assigned since GSL was set up + * for vsync flip, we would unassign so it can't be "left over" + */ + if (pipe_ctx->stream_res.gsl_group > 0) + return; + + group_idx = find_free_gsl_group(dc); + ASSERT(group_idx != 0); + pipe_ctx->stream_res.gsl_group = group_idx; + + /* set gsl group reg field and mark resource used */ + switch (group_idx) { + case 1: + gsl.gsl0_en = 1; + dc->res_pool->gsl_groups.gsl_0 = 1; + break; + case 2: + gsl.gsl1_en = 1; + dc->res_pool->gsl_groups.gsl_1 = 1; + break; + case 3: + gsl.gsl2_en = 1; + dc->res_pool->gsl_groups.gsl_2 = 1; + break; + default: + BREAK_TO_DEBUGGER(); + return; // invalid case + } + gsl.gsl_master_en = 1; + } else { + group_idx = pipe_ctx->stream_res.gsl_group; + if (group_idx == 0) + return; // if not in use, just return + + pipe_ctx->stream_res.gsl_group = 0; + + /* unset gsl group reg field and mark resource free */ + switch (group_idx) { + case 1: + gsl.gsl0_en = 0; + dc->res_pool->gsl_groups.gsl_0 = 0; + break; + case 2: + gsl.gsl1_en = 0; + dc->res_pool->gsl_groups.gsl_1 = 0; + break; + case 3: + gsl.gsl2_en = 0; + dc->res_pool->gsl_groups.gsl_2 = 0; + break; + default: + BREAK_TO_DEBUGGER(); + return; + } + gsl.gsl_master_en = 0; + } + + /* at this point we want to program whether it's to enable or disable */ + if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL && + pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) { + pipe_ctx->stream_res.tg->funcs->set_gsl( + pipe_ctx->stream_res.tg, + &gsl); + + pipe_ctx->stream_res.tg->funcs->set_gsl_source_select( + pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0); + } else + BREAK_TO_DEBUGGER(); +} + +void dcn20_set_flip_control_gsl( + struct pipe_ctx *pipe_ctx, + bool flip_immediate) +{ + if (pipe_ctx && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl) + pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl( + pipe_ctx->plane_res.hubp, flip_immediate); + +} + +void dcn20_enable_power_gating_plane( struct dce_hwseq *hws, bool enable) { - bool force_on = 1; /* disable power gating */ + bool force_on = true; /* disable power gating */ if (enable) - force_on = 0; + force_on = false; /* DCHUBP0/1/2/3/4/5 */ REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on); @@ -128,44 +243,6 @@ void dcn20_dccg_init(struct dce_hwseq *hws) /* This value is dependent on the hardware pipeline delay so set once per SOC */ REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0x801003c); } -void dcn20_display_init(struct dc *dc) -{ - struct dce_hwseq *hws = dc->hwseq; - - /* RBBMIF - * disable RBBMIF timeout detection for all clients - * Ensure RBBMIF does not drop register accesses due to the per-client timeout - */ - REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF); - REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF); - - /* DCCG */ - dcn20_dccg_init(hws); - - REG_UPDATE(DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, 0); - - /* DCHUB/MMHUBBUB - * set global timer refclk divider - * 100Mhz refclk -> 2 - * 27Mhz refclk -> 1 - * 48Mhz refclk -> 1 - */ - REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2); - REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); - REG_WRITE(REFCLK_CNTL, 0); - - /* OPTC - * OTG_CONTROL.OTG_DISABLE_POINT_CNTL = 0x3; will be set during optc2_enable_crtc - */ - - /* AZ - * default value is 0x64 for 100Mhz ref clock, if the ref clock is 100Mhz, no need to program this regiser, - * if not, it should be programmed according to the ref clock - */ - REG_UPDATE(AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, 0x64); - /* Enable controller clock gating */ - REG_WRITE(AZALIA_CONTROLLER_CLOCK_GATING, 0x1); -} void dcn20_disable_vga( struct dce_hwseq *hws) @@ -178,15 +255,15 @@ void dcn20_disable_vga( REG_WRITE(D6VGA_CONTROL, 0); } -void dcn20_program_tripleBuffer( +void dcn20_program_triple_buffer( const struct dc *dc, struct pipe_ctx *pipe_ctx, - bool enableTripleBuffer) + bool enable_triple_buffer) { if (pipe_ctx->plane_res.hubp && pipe_ctx->plane_res.hubp->funcs) { pipe_ctx->plane_res.hubp->funcs->hubp_enable_tripleBuffer( pipe_ctx->plane_res.hubp, - enableTripleBuffer); + enable_triple_buffer); } } @@ -195,6 +272,7 @@ void dcn20_init_blank( struct dc *dc, struct timing_generator *tg) { + struct dce_hwseq *hws = dc->hwseq; enum dc_color_space color_space; struct tg_color black_color = {0}; struct output_pixel_processor *opp = NULL; @@ -225,6 +303,7 @@ void dcn20_init_blank( opp->funcs->opp_set_disp_pattern_generator( opp, CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR, + CONTROLLER_DP_COLOR_SPACE_UDEFINED, COLOR_DEPTH_UNDEFINED, &black_color, otg_active_width, @@ -234,17 +313,17 @@ void dcn20_init_blank( bottom_opp->funcs->opp_set_disp_pattern_generator( bottom_opp, CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR, + CONTROLLER_DP_COLOR_SPACE_UDEFINED, COLOR_DEPTH_UNDEFINED, &black_color, otg_active_width, otg_active_height); } - dcn20_hwss_wait_for_blank_complete(opp); + hws->funcs.wait_for_blank_complete(opp); } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT -static void dcn20_dsc_pg_control( +void dcn20_dsc_pg_control( struct dce_hwseq *hws, unsigned int dsc_inst, bool power_on) @@ -320,9 +399,8 @@ static void dcn20_dsc_pg_control( if (org_ip_request_cntl == 0) REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); } -#endif -static void dcn20_dpp_pg_control( +void dcn20_dpp_pg_control( struct dce_hwseq *hws, unsigned int dpp_inst, bool power_on) @@ -396,7 +474,7 @@ static void dcn20_dpp_pg_control( } -static void dcn20_hubp_pg_control( +void dcn20_hubp_pg_control( struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on) @@ -473,8 +551,9 @@ static void dcn20_hubp_pg_control( /* disable HW used by plane. * note: cannot disable until disconnect is complete */ -static void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) { + struct dce_hwseq *hws = dc->hwseq; struct hubp *hubp = pipe_ctx->plane_res.hubp; struct dpp *dpp = pipe_ctx->plane_res.dpp; @@ -495,7 +574,7 @@ static void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) hubp->power_gated = true; dc->optimized_required = false; /* We're powering off, no need to optimize */ - dc->hwss.plane_atomic_power_down(dc, + hws->funcs.plane_atomic_power_down(dc, pipe_ctx->plane_res.dpp, pipe_ctx->plane_res.hubp); @@ -526,6 +605,7 @@ enum dc_status dcn20_enable_stream_timing( struct dc_state *context, struct dc *dc) { + struct dce_hwseq *hws = dc->hwseq; struct dc_stream_state *stream = pipe_ctx->stream; struct drr_params params = {0}; unsigned int event_triggers = 0; @@ -585,7 +665,7 @@ enum dc_status dcn20_enable_stream_timing( pipe_ctx->stream_res.opp, true); - dc->hwss.blank_pixel_data(dc, pipe_ctx, true); + hws->funcs.blank_pixel_data(dc, pipe_ctx, true); /* VTG is within DCHUB command block. DCFCLK is always on */ if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) { @@ -593,7 +673,7 @@ enum dc_status dcn20_enable_stream_timing( return DC_ERROR_UNEXPECTED; } - dcn20_hwss_wait_for_blank_complete(pipe_ctx->stream_res.opp); + hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp); params.vertical_total_min = stream->adjust.v_total_min; params.vertical_total_max = stream->adjust.v_total_max; @@ -606,9 +686,13 @@ enum dc_status dcn20_enable_stream_timing( // DRR should set trigger event to monitor surface update event if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0) event_triggers = 0x80; + /* Event triggers and num frames initialized for DRR, but can be + * later updated for PSR use. Note DRR trigger events are generated + * regardless of whether num frames met. + */ if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control) pipe_ctx->stream_res.tg->funcs->set_static_screen_control( - pipe_ctx->stream_res.tg, event_triggers); + pipe_ctx->stream_res.tg, event_triggers, 2); /* TODO program crtc source select for non-virtual signal*/ /* TODO program FMT */ @@ -649,7 +733,7 @@ void dcn20_program_output_csc(struct dc *dc, } } -bool dcn20_set_output_transfer_func(struct pipe_ctx *pipe_ctx, +bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, const struct dc_stream_state *stream) { int mpcc_id = pipe_ctx->plane_res.hubp->inst; @@ -736,20 +820,14 @@ bool dcn20_set_shaper_3dlut( else result = dpp_base->funcs->dpp_program_3dlut(dpp_base, NULL); - if (plane_state->lut3d_func && - plane_state->lut3d_func->state.bits.initialized == 1 && - plane_state->lut3d_func->hdr_multiplier != 0) - dpp_base->funcs->dpp_set_hdr_multiplier(dpp_base, - plane_state->lut3d_func->hdr_multiplier); - else - dpp_base->funcs->dpp_set_hdr_multiplier(dpp_base, 0x1f000); - return result; } -bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx, - const struct dc_plane_state *plane_state) +bool dcn20_set_input_transfer_func(struct dc *dc, + struct pipe_ctx *pipe_ctx, + const struct dc_plane_state *plane_state) { + struct dce_hwseq *hws = dc->hwseq; struct dpp *dpp_base = pipe_ctx->plane_res.dpp; const struct dc_transfer_func *tf = NULL; bool result = true; @@ -758,8 +836,8 @@ bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx, if (dpp_base == NULL || plane_state == NULL) return false; - dcn20_set_shaper_3dlut(pipe_ctx, plane_state); - dcn20_set_blend_lut(pipe_ctx, plane_state); + hws->funcs.set_shaper_3dlut(pipe_ctx, plane_state); + hws->funcs.set_blend_lut(pipe_ctx, plane_state); if (plane_state->in_transfer_func) tf = plane_state->in_transfer_func; @@ -804,6 +882,11 @@ bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx, IPP_DEGAMMA_MODE_BYPASS); break; case TRANSFER_FUNCTION_PQ: + dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL); + cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params); + dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params); + result = true; + break; default: result = false; break; @@ -824,7 +907,7 @@ bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx, return result; } -static void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) +void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) { struct pipe_ctx *odm_pipe; int opp_cnt = 1; @@ -855,12 +938,16 @@ void dcn20_blank_pixel_data( struct dc_stream_state *stream = pipe_ctx->stream; enum dc_color_space color_space = stream->output_color_space; enum controller_dp_test_pattern test_pattern = CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR; + enum controller_dp_color_space test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED; struct pipe_ctx *odm_pipe; int odm_cnt = 1; int width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; int height = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top; + if (stream->link->test_pattern_enabled) + return; + /* get opp dpg blank color */ color_space_to_black_color(dc, color_space, &black_color); @@ -873,8 +960,10 @@ void dcn20_blank_pixel_data( if (stream_res->abm) stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm); - if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) + if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) { test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORSQUARES; + test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_RGB; + } } else { test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE; } @@ -882,6 +971,7 @@ void dcn20_blank_pixel_data( stream_res->opp->funcs->opp_set_disp_pattern_generator( stream_res->opp, test_pattern, + test_pattern_color_space, stream->timing.display_color_depth, &black_color, width, @@ -892,6 +982,7 @@ void dcn20_blank_pixel_data( odm_pipe->stream_res.opp, dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE && blank ? CONTROLLER_DP_TEST_PATTERN_COLORRAMP : test_pattern, + test_pattern_color_space, stream->timing.display_color_depth, &black_color, width, @@ -1217,9 +1308,11 @@ static void dcn20_update_dchubp_dpp( struct pipe_ctx *pipe_ctx, struct dc_state *context) { + struct dce_hwseq *hws = dc->hwseq; struct hubp *hubp = pipe_ctx->plane_res.hubp; struct dpp *dpp = pipe_ctx->plane_res.dpp; struct dc_plane_state *plane_state = pipe_ctx->plane_state; + bool viewport_changed = false; if (pipe_ctx->update_flags.bits.dppclk) dpp->funcs->dpp_dppclk_control(dpp, false, true); @@ -1261,7 +1354,7 @@ static void dcn20_update_dchubp_dpp( if (dpp->funcs->dpp_program_bias_and_scale) { //TODO :for CNVC set scale and bias registers if necessary - dcn10_build_prescale_params(&bns_params, plane_state); + build_prescale_params(&bns_params, plane_state); dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params); } } @@ -1269,19 +1362,19 @@ static void dcn20_update_dchubp_dpp( if (pipe_ctx->update_flags.bits.mpcc || plane_state->update_flags.bits.global_alpha_change || plane_state->update_flags.bits.per_pixel_alpha_change) { - /* Need mpcc to be idle if changing opp */ - if (pipe_ctx->update_flags.bits.opp_changed) { - struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx]; - int mpcc_inst; - - for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) { - if (!old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) - continue; + // MPCC inst is equal to pipe index in practice + int mpcc_inst = hubp->inst; + int opp_inst; + int opp_count = dc->res_pool->pipe_count; + + for (opp_inst = 0; opp_inst < opp_count; opp_inst++) { + if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) { dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst); - old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false; + dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false; + break; } } - dc->hwss.update_mpcc(dc, pipe_ctx); + hws->funcs.update_mpcc(dc, pipe_ctx); } if (pipe_ctx->update_flags.bits.scaler || @@ -1298,14 +1391,18 @@ static void dcn20_update_dchubp_dpp( if (pipe_ctx->update_flags.bits.viewport || (context == dc->current_state && plane_state->update_flags.bits.scaling_change) || - (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) + (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) { + hubp->funcs->mem_program_viewport( hubp, &pipe_ctx->plane_res.scl_data.viewport, &pipe_ctx->plane_res.scl_data.viewport_c); + viewport_changed = true; + } /* Any updates are handled in dc interface, just need to apply existing for plane enable */ - if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed) + if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed || + pipe_ctx->update_flags.bits.scaler || pipe_ctx->update_flags.bits.viewport) && pipe_ctx->stream->cursor_attributes.address.quad_part != 0) { dc->hwss.set_cursor_position(pipe_ctx); dc->hwss.set_cursor_attribute(pipe_ctx); @@ -1355,8 +1452,13 @@ static void dcn20_update_dchubp_dpp( hubp->power_gated = false; } + if (hubp->funcs->apply_PLAT_54186_wa && viewport_changed) + hubp->funcs->apply_PLAT_54186_wa(hubp, &plane_state->address); + if (pipe_ctx->update_flags.bits.enable || plane_state->update_flags.bits.addr_update) - dc->hwss.update_plane_addr(dc, pipe_ctx); + hws->funcs.update_plane_addr(dc, pipe_ctx); + + if (pipe_ctx->update_flags.bits.enable) hubp->funcs->set_blank(hubp, false); @@ -1368,10 +1470,11 @@ static void dcn20_program_pipe( struct pipe_ctx *pipe_ctx, struct dc_state *context) { + struct dce_hwseq *hws = dc->hwseq; /* Only need to unblank on top pipe */ if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.abm_level) && !pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe) - dc->hwss.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible); + hws->funcs.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible); if (pipe_ctx->update_flags.bits.global_sync) { pipe_ctx->stream_res.tg->funcs->program_global_sync( @@ -1384,12 +1487,12 @@ static void dcn20_program_pipe( pipe_ctx->stream_res.tg->funcs->set_vtg_params( pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); - if (dc->hwss.setup_vupdate_interrupt) - dc->hwss.setup_vupdate_interrupt(pipe_ctx); + if (hws->funcs.setup_vupdate_interrupt) + hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx); } if (pipe_ctx->update_flags.bits.odm) - dc->hwss.update_odm(dc, context, pipe_ctx); + hws->funcs.update_odm(dc, context, pipe_ctx); if (pipe_ctx->update_flags.bits.enable) dcn20_enable_plane(dc, pipe_ctx, context); @@ -1398,20 +1501,20 @@ static void dcn20_program_pipe( dcn20_update_dchubp_dpp(dc, pipe_ctx, context); if (pipe_ctx->update_flags.bits.enable - || pipe_ctx->plane_state->update_flags.bits.sdr_white_level) - set_hdr_multiplier(pipe_ctx); + || pipe_ctx->plane_state->update_flags.bits.hdr_mult) + hws->funcs.set_hdr_multiplier(pipe_ctx); if (pipe_ctx->update_flags.bits.enable || pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || pipe_ctx->plane_state->update_flags.bits.gamma_change) - dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state); + hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state); /* dcn10_translate_regamma_to_hw_format takes 750us to finish * only do gamma programming for powering on, internal memcmp to avoid * updating on slave planes */ if (pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.out_tf) - dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream); + hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream); /* If the pipe has been enabled or has a different opp, we * should reprogram the fmt. This deals with cases where @@ -1445,12 +1548,13 @@ static bool does_pipe_need_lock(struct pipe_ctx *pipe) return false; } -static void dcn20_program_front_end_for_ctx( +void dcn20_program_front_end_for_ctx( struct dc *dc, struct dc_state *context) { const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100; int i; + struct dce_hwseq *hws = dc->hwseq; bool pipe_locked[MAX_PIPES] = {false}; DC_LOGGER_INIT(dc->ctx->logger); @@ -1482,13 +1586,13 @@ static void dcn20_program_front_end_for_ctx( && !context->res_ctx.pipe_ctx[i].top_pipe && !context->res_ctx.pipe_ctx[i].prev_odm_pipe && context->res_ctx.pipe_ctx[i].stream) - dc->hwss.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true); + hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true); /* Disconnect mpcc */ for (i = 0; i < dc->res_pool->pipe_count; i++) if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable || context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) { - dc->hwss.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]); + hws->funcs.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]); DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx); } @@ -1508,8 +1612,8 @@ static void dcn20_program_front_end_for_ctx( pipe = &context->res_ctx.pipe_ctx[i]; if (!pipe->prev_odm_pipe && pipe->stream->num_wb_info > 0 && (pipe->update_flags.raw || pipe->plane_state->update_flags.raw || pipe->stream->update_flags.raw) - && dc->hwss.program_all_writeback_pipes_in_tree) - dc->hwss.program_all_writeback_pipes_in_tree(dc, pipe->stream, context); + && hws->funcs.program_all_writeback_pipes_in_tree) + hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context); } } @@ -1541,9 +1645,9 @@ static void dcn20_program_front_end_for_ctx( struct hubp *hubp = pipe->plane_res.hubp; int j = 0; - for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS + for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS*1000 && hubp->funcs->hubp_is_flip_pending(hubp); j++) - msleep(1); + mdelay(1); } } @@ -1594,6 +1698,7 @@ bool dcn20_update_bandwidth( struct dc_state *context) { int i; + struct dce_hwseq *hws = dc->hwseq; /* recalculate DML parameters */ if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) @@ -1623,10 +1728,10 @@ bool dcn20_update_bandwidth( pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); if (pipe_ctx->prev_odm_pipe == NULL) - dc->hwss.blank_pixel_data(dc, pipe_ctx, blank); + hws->funcs.blank_pixel_data(dc, pipe_ctx, blank); - if (dc->hwss.setup_vupdate_interrupt) - dc->hwss.setup_vupdate_interrupt(pipe_ctx); + if (hws->funcs.setup_vupdate_interrupt) + hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx); } pipe_ctx->plane_res.hubp->funcs->hubp_setup( @@ -1640,9 +1745,8 @@ bool dcn20_update_bandwidth( return true; } -static void dcn20_enable_writeback( +void dcn20_enable_writeback( struct dc *dc, - const struct dc_stream_status *stream_status, struct dc_writeback_info *wb_info, struct dc_state *context) { @@ -1656,8 +1760,7 @@ static void dcn20_enable_writeback( mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst]; /* set the OPTC source mux */ - ASSERT(stream_status->primary_otg_inst < MAX_PIPES); - optc = dc->res_pool->timing_generators[stream_status->primary_otg_inst]; + optc = dc->res_pool->timing_generators[dwb->otg_inst]; optc->funcs->set_dwb_source(optc, wb_info->dwb_pipe_inst); /* set MCIF_WB buffer and arbitration configuration */ mcif_wb->funcs->config_mcif_buf(mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height); @@ -1684,7 +1787,7 @@ void dcn20_disable_writeback( mcif_wb->funcs->disable_mcif(mcif_wb); } -bool dcn20_hwss_wait_for_blank_complete( +bool dcn20_wait_for_blank_complete( struct output_pixel_processor *opp) { int counter; @@ -1713,9 +1816,8 @@ bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx) return hubp->funcs->dmdata_status_done(hubp); } -static void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx) { -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct dce_hwseq *hws = dc->hwseq; if (pipe_ctx->stream_res.dsc) { @@ -1727,12 +1829,10 @@ static void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx odm_pipe = odm_pipe->next_odm_pipe; } } -#endif } -static void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx) { -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct dce_hwseq *hws = dc->hwseq; if (pipe_ctx->stream_res.dsc) { @@ -1744,7 +1844,6 @@ static void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx) odm_pipe = odm_pipe->next_odm_pipe; } } -#endif } void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx) @@ -1767,12 +1866,7 @@ void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx) hubp->funcs->dmdata_set_attributes(hubp, &attr); } -void dcn20_disable_stream(struct pipe_ctx *pipe_ctx) -{ - dce110_disable_stream(pipe_ctx); -} - -static void dcn20_init_vm_ctx( +void dcn20_init_vm_ctx( struct dce_hwseq *hws, struct dc *dc, struct dc_virtual_addr_space_config *va_config, @@ -1794,7 +1888,7 @@ static void dcn20_init_vm_ctx( dc->res_pool->hubbub->funcs->init_vm_ctx(dc->res_pool->hubbub, &config, vmid); } -static int dcn20_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config) +int dcn20_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config) { struct dcn_hubbub_phys_addr_config config; @@ -1838,8 +1932,7 @@ static bool patch_address_for_sbs_tb_stereo( return false; } - -static void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx) { bool addr_patched = false; PHYSICAL_ADDRESS_LOC addr; @@ -1873,6 +1966,7 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx, struct encoder_unblank_param params = { { 0 } }; struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; + struct dce_hwseq *hws = link->dc->hwseq; struct pipe_ctx *odm_pipe; params.opp_cnt = 1; @@ -1885,7 +1979,7 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx, params.link_settings.link_rate = link_settings->link_rate; if (dc_is_dp_signal(pipe_ctx->stream->signal)) { - if (optc1_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1) + if (optc2_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1) params.timing.pix_clk_100hz /= 2; pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine( pipe_ctx->stream_res.stream_enc, params.opp_cnt > 1); @@ -1893,14 +1987,14 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx, } if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { - link->dc->hwss.edp_backlight_control(link, true); + hws->funcs.edp_backlight_control(link, true); } } -void dcn20_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx) +void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx) { struct timing_generator *tg = pipe_ctx->stream_res.tg; - int start_line = get_vupdate_offset_from_vsync(pipe_ctx); + int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx); if (start_line < 0) start_line = 0; @@ -1915,6 +2009,7 @@ static void dcn20_reset_back_end_for_pipe( struct dc_state *context) { int i; + struct dc_link *link; DC_LOGGER_INIT(dc->ctx->logger); if (pipe_ctx->stream_res.stream_enc == NULL) { pipe_ctx->stream = NULL; @@ -1922,8 +2017,14 @@ static void dcn20_reset_back_end_for_pipe( } if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - /* DPMS may already disable */ - if (!pipe_ctx->stream->dpms_off) + link = pipe_ctx->stream->link; + /* DPMS may already disable or */ + /* dpms_off status is incorrect due to fastboot + * feature. When system resume from S4 with second + * screen only, the dpms_off would be true but + * VBIOS lit up eDP, so check link status too. + */ + if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) core_link_disable_stream(pipe_ctx); else if (pipe_ctx->stream_res.audio) dc->hwss.disable_audio_stream(pipe_ctx); @@ -1943,11 +2044,9 @@ static void dcn20_reset_back_end_for_pipe( } } } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT else if (pipe_ctx->stream_res.dsc) { dp_set_dsc_enable(pipe_ctx, false); } -#endif /* by upper caller loop, parent pipe: pipe0, will be reset last. * back end share by all pipes and will be disable only when disable @@ -1978,11 +2077,12 @@ static void dcn20_reset_back_end_for_pipe( pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst); } -static void dcn20_reset_hw_ctx_wrap( +void dcn20_reset_hw_ctx_wrap( struct dc *dc, struct dc_state *context) { int i; + struct dce_hwseq *hws = dc->hwseq; /* Reset Back End*/ for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) { @@ -2001,8 +2101,8 @@ static void dcn20_reset_hw_ctx_wrap( struct clock_source *old_clk = pipe_ctx_old->clock_source; dcn20_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); - if (dc->hwss.enable_stream_gating) - dc->hwss.enable_stream_gating(dc, pipe_ctx); + if (hws->funcs.enable_stream_gating) + hws->funcs.enable_stream_gating(dc, pipe_ctx); if (old_clk) old_clk->funcs->cs_power_down(old_clk); } @@ -2031,8 +2131,9 @@ void dcn20_get_mpctree_visual_confirm_color( *color = pipe_colors[top_pipe->pipe_idx]; } -static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) { + struct dce_hwseq *hws = dc->hwseq; struct hubp *hubp = pipe_ctx->plane_res.hubp; struct mpcc_blnd_cfg blnd_cfg = { {0} }; bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha; @@ -2043,10 +2144,10 @@ static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) // input to MPCC is always RGB, by default leave black_color at 0 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) { - dcn10_get_hdr_visual_confirm_color( + hws->funcs.get_hdr_visual_confirm_color( pipe_ctx, &blnd_cfg.black_color); } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) { - dcn10_get_surface_visual_confirm_color( + hws->funcs.get_surface_visual_confirm_color( pipe_ctx, &blnd_cfg.black_color); } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) { dcn20_get_mpctree_visual_confirm_color( @@ -2083,12 +2184,6 @@ static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) */ mpcc_id = hubp->inst; - /* If there is no full update, don't need to touch MPC tree*/ - if (!pipe_ctx->plane_state->update_flags.bits.full_update) { - mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id); - return; - } - /* check if this MPCC is already being used */ new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id); /* remove MPCC if being used */ @@ -2113,125 +2208,7 @@ static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) hubp->mpcc_id = mpcc_id; } -static int find_free_gsl_group(const struct dc *dc) -{ - if (dc->res_pool->gsl_groups.gsl_0 == 0) - return 1; - if (dc->res_pool->gsl_groups.gsl_1 == 0) - return 2; - if (dc->res_pool->gsl_groups.gsl_2 == 0) - return 3; - - return 0; -} - -/* NOTE: This is not a generic setup_gsl function (hence the suffix as_lock) - * This is only used to lock pipes in pipe splitting case with immediate flip - * Ordinary MPC/OTG locks suppress VUPDATE which doesn't help with immediate, - * so we get tearing with freesync since we cannot flip multiple pipes - * atomically. - * We use GSL for this: - * - immediate flip: find first available GSL group if not already assigned - * program gsl with that group, set current OTG as master - * and always us 0x4 = AND of flip_ready from all pipes - * - vsync flip: disable GSL if used - * - * Groups in stream_res are stored as +1 from HW registers, i.e. - * gsl_0 <=> pipe_ctx->stream_res.gsl_group == 1 - * Using a magic value like -1 would require tracking all inits/resets - */ -void dcn20_setup_gsl_group_as_lock( - const struct dc *dc, - struct pipe_ctx *pipe_ctx, - bool enable) -{ - struct gsl_params gsl; - int group_idx; - - memset(&gsl, 0, sizeof(struct gsl_params)); - - if (enable) { - /* return if group already assigned since GSL was set up - * for vsync flip, we would unassign so it can't be "left over" - */ - if (pipe_ctx->stream_res.gsl_group > 0) - return; - - group_idx = find_free_gsl_group(dc); - ASSERT(group_idx != 0); - pipe_ctx->stream_res.gsl_group = group_idx; - - /* set gsl group reg field and mark resource used */ - switch (group_idx) { - case 1: - gsl.gsl0_en = 1; - dc->res_pool->gsl_groups.gsl_0 = 1; - break; - case 2: - gsl.gsl1_en = 1; - dc->res_pool->gsl_groups.gsl_1 = 1; - break; - case 3: - gsl.gsl2_en = 1; - dc->res_pool->gsl_groups.gsl_2 = 1; - break; - default: - BREAK_TO_DEBUGGER(); - return; // invalid case - } - gsl.gsl_master_en = 1; - } else { - group_idx = pipe_ctx->stream_res.gsl_group; - if (group_idx == 0) - return; // if not in use, just return - - pipe_ctx->stream_res.gsl_group = 0; - - /* unset gsl group reg field and mark resource free */ - switch (group_idx) { - case 1: - gsl.gsl0_en = 0; - dc->res_pool->gsl_groups.gsl_0 = 0; - break; - case 2: - gsl.gsl1_en = 0; - dc->res_pool->gsl_groups.gsl_1 = 0; - break; - case 3: - gsl.gsl2_en = 0; - dc->res_pool->gsl_groups.gsl_2 = 0; - break; - default: - BREAK_TO_DEBUGGER(); - return; - } - gsl.gsl_master_en = 0; - } - - /* at this point we want to program whether it's to enable or disable */ - if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL && - pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) { - pipe_ctx->stream_res.tg->funcs->set_gsl( - pipe_ctx->stream_res.tg, - &gsl); - - pipe_ctx->stream_res.tg->funcs->set_gsl_source_select( - pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0); - } else - BREAK_TO_DEBUGGER(); -} - -static void dcn20_set_flip_control_gsl( - struct pipe_ctx *pipe_ctx, - bool flip_immediate) -{ - if (pipe_ctx && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl) - pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl( - pipe_ctx->plane_res.hubp, flip_immediate); - -} - -static void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) +void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) { enum dc_lane_count lane_count = pipe_ctx->stream->link->cur_link_settings.lane_count; @@ -2279,7 +2256,7 @@ static void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) } } -static void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx) +void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; struct hubp *hubp = pipe_ctx->plane_res.hubp; @@ -2305,7 +2282,7 @@ static void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx) hubp->inst, mode); } -static void dcn20_fpga_init_hw(struct dc *dc) +void dcn20_fpga_init_hw(struct dc *dc) { int i, j; struct dce_hwseq *hws = dc->hwseq; @@ -2320,13 +2297,13 @@ static void dcn20_fpga_init_hw(struct dc *dc) res_pool->dccg->funcs->dccg_init(res_pool->dccg); //Enable ability to power gate / don't force power on permanently - dc->hwss.enable_power_gating_plane(hws, true); + hws->funcs.enable_power_gating_plane(hws, true); // Specific to FPGA dccg and registers REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF); REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF); - dcn20_dccg_init(hws); + hws->funcs.dccg_init(hws); REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2); REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); @@ -2390,7 +2367,7 @@ static void dcn20_fpga_init_hw(struct dc *dc) dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true; pipe_ctx->stream_res.opp = dc->res_pool->opps[i]; /*to do*/ - hwss1_plane_atomic_disconnect(dc, pipe_ctx); + hws->funcs.plane_atomic_disconnect(dc, pipe_ctx); } /* initialize DWB pointer to MCIF_WB */ @@ -2419,57 +2396,3 @@ static void dcn20_fpga_init_hw(struct dc *dc) tg->funcs->tg_init(tg); } } - -void dcn20_hw_sequencer_construct(struct dc *dc) -{ - dcn10_hw_sequencer_construct(dc); - dc->hwss.unblank_stream = dcn20_unblank_stream; - dc->hwss.update_plane_addr = dcn20_update_plane_addr; - dc->hwss.enable_stream_timing = dcn20_enable_stream_timing; - dc->hwss.program_triplebuffer = dcn20_program_tripleBuffer; - dc->hwss.set_input_transfer_func = dcn20_set_input_transfer_func; - dc->hwss.set_output_transfer_func = dcn20_set_output_transfer_func; - dc->hwss.apply_ctx_for_surface = NULL; - dc->hwss.program_front_end_for_ctx = dcn20_program_front_end_for_ctx; - dc->hwss.pipe_control_lock = dcn20_pipe_control_lock; - dc->hwss.pipe_control_lock_global = dcn20_pipe_control_lock_global; - dc->hwss.optimize_bandwidth = dcn20_optimize_bandwidth; - dc->hwss.prepare_bandwidth = dcn20_prepare_bandwidth; - dc->hwss.update_bandwidth = dcn20_update_bandwidth; - dc->hwss.enable_writeback = dcn20_enable_writeback; - dc->hwss.disable_writeback = dcn20_disable_writeback; - dc->hwss.program_output_csc = dcn20_program_output_csc; - dc->hwss.update_odm = dcn20_update_odm; - dc->hwss.blank_pixel_data = dcn20_blank_pixel_data; - dc->hwss.dmdata_status_done = dcn20_dmdata_status_done; - dc->hwss.program_dmdata_engine = dcn20_program_dmdata_engine; - dc->hwss.enable_stream = dcn20_enable_stream; - dc->hwss.disable_stream = dcn20_disable_stream; - dc->hwss.init_sys_ctx = dcn20_init_sys_ctx; - dc->hwss.init_vm_ctx = dcn20_init_vm_ctx; - dc->hwss.disable_stream_gating = dcn20_disable_stream_gating; - dc->hwss.enable_stream_gating = dcn20_enable_stream_gating; - dc->hwss.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt; - dc->hwss.reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap; - dc->hwss.update_mpcc = dcn20_update_mpcc; - dc->hwss.set_flip_control_gsl = dcn20_set_flip_control_gsl; - dc->hwss.init_blank = dcn20_init_blank; - dc->hwss.disable_plane = dcn20_disable_plane; - dc->hwss.plane_atomic_disable = dcn20_plane_atomic_disable; - dc->hwss.enable_power_gating_plane = dcn20_enable_power_gating_plane; - dc->hwss.dpp_pg_control = dcn20_dpp_pg_control; - dc->hwss.hubp_pg_control = dcn20_hubp_pg_control; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT - dc->hwss.dsc_pg_control = dcn20_dsc_pg_control; -#else - dc->hwss.dsc_pg_control = NULL; -#endif - dc->hwss.disable_vga = dcn20_disable_vga; - - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - dc->hwss.init_hw = dcn20_fpga_init_hw; - dc->hwss.init_pipes = NULL; - } - - -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h index 3098f1049ed7..02c9be5ebd47 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h @@ -26,90 +26,112 @@ #ifndef __DC_HWSS_DCN20_H__ #define __DC_HWSS_DCN20_H__ -struct dc; +#include "hw_sequencer_private.h" -void dcn20_hw_sequencer_construct(struct dc *dc); - -enum dc_status dcn20_enable_stream_timing( - struct pipe_ctx *pipe_ctx, - struct dc_state *context, - struct dc *dc); - -void dcn20_blank_pixel_data( +bool dcn20_set_blend_lut( + struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state); +bool dcn20_set_shaper_3dlut( + struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state); +void dcn20_program_front_end_for_ctx( struct dc *dc, - struct pipe_ctx *pipe_ctx, - bool blank); - + struct dc_state *context); +void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx); +bool dcn20_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, + const struct dc_plane_state *plane_state); +bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, + const struct dc_stream_state *stream); void dcn20_program_output_csc(struct dc *dc, struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace, uint16_t *matrix, int opp_id); - +void dcn20_enable_stream(struct pipe_ctx *pipe_ctx); +void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx, + struct dc_link_settings *link_settings); +void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_blank_pixel_data( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool blank); +void dcn20_pipe_control_lock( + struct dc *dc, + struct pipe_ctx *pipe, + bool lock); +void dcn20_pipe_control_lock_global( + struct dc *dc, + struct pipe_ctx *pipe, + bool lock); void dcn20_prepare_bandwidth( struct dc *dc, struct dc_state *context); - void dcn20_optimize_bandwidth( struct dc *dc, struct dc_state *context); - bool dcn20_update_bandwidth( struct dc *dc, struct dc_state *context); - +void dcn20_reset_hw_ctx_wrap( + struct dc *dc, + struct dc_state *context); +enum dc_status dcn20_enable_stream_timing( + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct dc *dc); +void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_init_blank( + struct dc *dc, + struct timing_generator *tg); +void dcn20_disable_vga( + struct dce_hwseq *hws); +void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_enable_power_gating_plane( + struct dce_hwseq *hws, + bool enable); +void dcn20_dpp_pg_control( + struct dce_hwseq *hws, + unsigned int dpp_inst, + bool power_on); +void dcn20_hubp_pg_control( + struct dce_hwseq *hws, + unsigned int hubp_inst, + bool power_on); +void dcn20_program_triple_buffer( + const struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool enable_triple_buffer); +void dcn20_enable_writeback( + struct dc *dc, + struct dc_writeback_info *wb_info, + struct dc_state *context); void dcn20_disable_writeback( struct dc *dc, unsigned int dwb_pipe_inst); - -bool dcn20_hwss_wait_for_blank_complete( - struct output_pixel_processor *opp); - -bool dcn20_set_output_transfer_func(struct pipe_ctx *pipe_ctx, - const struct dc_stream_state *stream); - -bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx, - const struct dc_plane_state *plane_state); - +void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx); bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx); - +void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx); void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx); - -void dcn20_disable_stream(struct pipe_ctx *pipe_ctx); - -void dcn20_program_tripleBuffer( - const struct dc *dc, - struct pipe_ctx *pipe_ctx, - bool enableTripleBuffer); - -void dcn20_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx); - -void dcn20_pipe_control_lock_global( +void dcn20_init_vm_ctx( + struct dce_hwseq *hws, struct dc *dc, - struct pipe_ctx *pipe, - bool lock); -void dcn20_setup_gsl_group_as_lock(const struct dc *dc, - struct pipe_ctx *pipe_ctx, - bool enable); -void dcn20_dccg_init(struct dce_hwseq *hws); -void dcn20_init_blank( - struct dc *dc, - struct timing_generator *tg); -void dcn20_display_init(struct dc *dc); -void dcn20_pipe_control_lock( - struct dc *dc, - struct pipe_ctx *pipe, - bool lock); -void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx); -void dcn20_enable_plane( - struct dc *dc, - struct pipe_ctx *pipe_ctx, - struct dc_state *context); -bool dcn20_set_blend_lut( - struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state); -bool dcn20_set_shaper_3dlut( - struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state); -void dcn20_get_mpctree_visual_confirm_color( + struct dc_virtual_addr_space_config *va_config, + int vmid); +void dcn20_set_flip_control_gsl( struct pipe_ctx *pipe_ctx, - struct tg_color *color); + bool flip_immediate); +void dcn20_dsc_pg_control( + struct dce_hwseq *hws, + unsigned int dsc_inst, + bool power_on); +void dcn20_fpga_init_hw(struct dc *dc); +bool dcn20_wait_for_blank_complete( + struct output_pixel_processor *opp); +void dcn20_dccg_init(struct dce_hwseq *hws); +int dcn20_init_sys_ctx(struct dce_hwseq *hws, + struct dc *dc, + struct dc_phy_addr_space_config *pa_config); + #endif /* __DC_HWSS_DCN20_H__ */ + diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c new file mode 100644 index 000000000000..d51e02fdab4d --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c @@ -0,0 +1,133 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dce110/dce110_hw_sequencer.h" +#include "dcn10/dcn10_hw_sequencer.h" +#include "dcn20_hwseq.h" + +static const struct hw_sequencer_funcs dcn20_funcs = { + .program_gamut_remap = dcn10_program_gamut_remap, + .init_hw = dcn10_init_hw, + .apply_ctx_to_hw = dce110_apply_ctx_to_hw, + .apply_ctx_for_surface = NULL, + .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, + .update_plane_addr = dcn20_update_plane_addr, + .update_dchub = dcn10_update_dchub, + .update_pending_status = dcn10_update_pending_status, + .program_output_csc = dcn20_program_output_csc, + .enable_accelerated_mode = dce110_enable_accelerated_mode, + .enable_timing_synchronization = dcn10_enable_timing_synchronization, + .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, + .update_info_frame = dce110_update_info_frame, + .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, + .enable_stream = dcn20_enable_stream, + .disable_stream = dce110_disable_stream, + .unblank_stream = dcn20_unblank_stream, + .blank_stream = dce110_blank_stream, + .enable_audio_stream = dce110_enable_audio_stream, + .disable_audio_stream = dce110_disable_audio_stream, + .disable_plane = dcn20_disable_plane, + .pipe_control_lock = dcn20_pipe_control_lock, + .pipe_control_lock_global = dcn20_pipe_control_lock_global, + .prepare_bandwidth = dcn20_prepare_bandwidth, + .optimize_bandwidth = dcn20_optimize_bandwidth, + .update_bandwidth = dcn20_update_bandwidth, + .set_drr = dcn10_set_drr, + .get_position = dcn10_get_position, + .set_static_screen_control = dcn10_set_static_screen_control, + .setup_stereo = dcn10_setup_stereo, + .set_avmute = dce110_set_avmute, + .log_hw_state = dcn10_log_hw_state, + .get_hw_state = dcn10_get_hw_state, + .clear_status_bits = dcn10_clear_status_bits, + .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, + .edp_power_control = dce110_edp_power_control, + .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, + .set_cursor_position = dcn10_set_cursor_position, + .set_cursor_attribute = dcn10_set_cursor_attribute, + .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, + .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, + .set_clock = dcn10_set_clock, + .get_clock = dcn10_get_clock, + .program_triplebuffer = dcn20_program_triple_buffer, + .enable_writeback = dcn20_enable_writeback, + .disable_writeback = dcn20_disable_writeback, + .dmdata_status_done = dcn20_dmdata_status_done, + .program_dmdata_engine = dcn20_program_dmdata_engine, + .set_dmdata_attributes = dcn20_set_dmdata_attributes, + .init_sys_ctx = dcn20_init_sys_ctx, + .init_vm_ctx = dcn20_init_vm_ctx, + .set_flip_control_gsl = dcn20_set_flip_control_gsl, + .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, +}; + +static const struct hwseq_private_funcs dcn20_private_funcs = { + .init_pipes = dcn10_init_pipes, + .update_plane_addr = dcn20_update_plane_addr, + .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, + .update_mpcc = dcn20_update_mpcc, + .set_input_transfer_func = dcn20_set_input_transfer_func, + .set_output_transfer_func = dcn20_set_output_transfer_func, + .power_down = dce110_power_down, + .enable_display_power_gating = dcn10_dummy_display_power_gating, + .blank_pixel_data = dcn20_blank_pixel_data, + .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap, + .enable_stream_timing = dcn20_enable_stream_timing, + .edp_backlight_control = dce110_edp_backlight_control, + .disable_stream_gating = dcn20_disable_stream_gating, + .enable_stream_gating = dcn20_enable_stream_gating, + .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, + .did_underflow_occur = dcn10_did_underflow_occur, + .init_blank = dcn20_init_blank, + .disable_vga = dcn20_disable_vga, + .bios_golden_init = dcn10_bios_golden_init, + .plane_atomic_disable = dcn20_plane_atomic_disable, + .plane_atomic_power_down = dcn10_plane_atomic_power_down, + .enable_power_gating_plane = dcn20_enable_power_gating_plane, + .dpp_pg_control = dcn20_dpp_pg_control, + .hubp_pg_control = dcn20_hubp_pg_control, + .dsc_pg_control = NULL, + .update_odm = dcn20_update_odm, + .dsc_pg_control = dcn20_dsc_pg_control, + .get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color, + .get_hdr_visual_confirm_color = dcn10_get_hdr_visual_confirm_color, + .set_hdr_multiplier = dcn10_set_hdr_multiplier, + .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, + .wait_for_blank_complete = dcn20_wait_for_blank_complete, + .dccg_init = dcn20_dccg_init, + .set_blend_lut = dcn20_set_blend_lut, + .set_shaper_3dlut = dcn20_set_shaper_3dlut, +}; + +void dcn20_hw_sequencer_construct(struct dc *dc) +{ + dc->hwss = dcn20_funcs; + dc->hwseq->funcs = dcn20_private_funcs; + + if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { + dc->hwss.init_hw = dcn20_fpga_init_hw; + dc->hwseq->funcs.init_pipes = NULL; + } +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h new file mode 100644 index 000000000000..12277797cd71 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h @@ -0,0 +1,33 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_DCN20_INIT_H__ +#define __DC_DCN20_INIT_H__ + +struct dc; + +void dcn20_hw_sequencer_construct(struct dc *dc); + +#endif /* __DC_DCN20_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c index e476f27aa3a9..e4ac73035c84 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c @@ -168,10 +168,8 @@ static struct mpll_cfg dcn2_mpll_cfg[] = { void enc2_fec_set_enable(struct link_encoder *enc, bool enable) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT DC_LOG_DSC("%s FEC at link encoder inst %d", enable ? "Enabling" : "Disabling", enc->id.enum_id); -#endif REG_UPDATE(DP_DPHY_CNTL, DPHY_FEC_EN, enable); } @@ -192,7 +190,6 @@ bool enc2_fec_is_active(struct link_encoder *enc) return (active != 0); } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* this function reads dsc related register fields to be logged later in dcn10_log_hw_state * into a dcn_dsc_state struct. */ @@ -203,8 +200,8 @@ void link_enc2_read_state(struct link_encoder *enc, struct link_enc_state *s) REG_GET(DP_DPHY_CNTL, DPHY_FEC_EN, &s->dphy_fec_en); REG_GET(DP_DPHY_CNTL, DPHY_FEC_READY_SHADOW, &s->dphy_fec_ready_shadow); REG_GET(DP_DPHY_CNTL, DPHY_FEC_ACTIVE_STATUS, &s->dphy_fec_active_status); + REG_GET(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, &s->dp_link_training_complete); } -#endif static bool update_cfg_data( struct dcn10_link_encoder *enc10, @@ -315,9 +312,7 @@ void enc2_hw_init(struct link_encoder *enc) } static const struct link_encoder_funcs dcn20_link_enc_funcs = { -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .read_state = link_enc2_read_state, -#endif .validate_output_with_stream = dcn10_link_encoder_validate_output_with_stream, .hw_init = enc2_hw_init, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h index 0c98a0bbbd14..8cab8107fd94 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h @@ -33,7 +33,142 @@ SRI(AUX_DPHY_TX_CONTROL, DP_AUX, id) #define UNIPHY_MASK_SH_LIST(mask_sh)\ - LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_LINK_ENABLE, mask_sh) + LE_SF(SYMCLKA_CLOCK_ENABLE, SYMCLKA_CLOCK_ENABLE, mask_sh),\ + LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_LINK_ENABLE, mask_sh),\ + LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL0_XBAR_SOURCE, mask_sh),\ + LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL1_XBAR_SOURCE, mask_sh),\ + LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL2_XBAR_SOURCE, mask_sh),\ + LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL3_XBAR_SOURCE, mask_sh) + +#define DPCS_MASK_SH_LIST(mask_sh)\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_CLK_RDY, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_DATA_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_CLK_RDY, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_DATA_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_CLK_RDY, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_DATA_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_CLK_RDY, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_DATA_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX0_TERM_CTRL, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX1_TERM_CTRL, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX2_TERM_CTRL, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX3_TERM_CTRL, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_DP_MPLLB_MULTIPLIER, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX0_WIDTH, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX0_RATE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX1_WIDTH, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX1_RATE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX2_PSTATE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX3_PSTATE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX2_MPLL_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX3_MPLL_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL7, RDPCS_PHY_DP_MPLLB_FRACN_QUOT, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL7, RDPCS_PHY_DP_MPLLB_FRACN_DEN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL8, RDPCS_PHY_DP_MPLLB_SSC_PEAK, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL9, RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL9, RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL10, RDPCS_PHY_DP_MPLLB_FRACN_REM, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_DP_REF_CLK_MPLLB_DIV, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_HDMI_MPLLB_HDMI_DIV, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_SSC_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_TX_CLK_DIV, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_STATE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL13, RDPCS_PHY_DP_MPLLB_DIV_CLK_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL13, RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL14, RDPCS_PHY_DP_MPLLB_FRACN_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL14, RDPCS_PHY_DP_MPLLB_PMIX_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE0_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE1_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE2_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE3_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_RD_START_DELAY, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_EXT_REFCLK_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SRAMCLK_BYPASS, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SRAMCLK_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SRAMCLK_CLOCK_ON, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SYMCLK_DIV2_CLOCK_ON, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SYMCLK_DIV2_GATE_DIS, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SYMCLK_DIV2_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_DISABLE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_DISABLE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_DISABLE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_DISABLE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_REQ, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_REQ, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_REQ, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_REQ, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_ACK, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_ACK, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_ACK, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_ACK, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_RESET, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_RESET, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_RESET, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_RESET, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_RESET, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_CR_MUX_SEL, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_REF_RANGE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_SRAM_BYPASS, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_SRAM_EXT_LD_DONE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_HDMIMODE_ENABLE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_SRAM_INIT_DONE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DP4_POR, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA, RDPCS_PLL_UPDATE_DATA, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_REG_FIFO_ERROR_MASK, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_TX_FIFO_ERROR_MASK, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_DPALT_DISABLE_TOGGLE_MASK, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_DPALT_4LANE_TOGGLE_MASK, mask_sh),\ + LE_SF(RDPCSTX0_RDPCS_TX_CR_ADDR, RDPCS_TX_CR_ADDR, mask_sh),\ + LE_SF(RDPCSTX0_RDPCS_TX_CR_DATA, RDPCS_TX_CR_DATA, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_MPLLB_V2I, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_MAIN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_PRE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_POST, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_MPLLB_FREQ_VCO, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_MPLLB_CP_INT, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_MPLLB_CP_PROP, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_MAIN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_PRE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_POST, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_MAIN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_PRE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_POST, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_MAIN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DCO_FINETUNE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DCO_RANGE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_PRE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_POST, mask_sh),\ + LE_SF(DPCSTX0_DPCSTX_TX_CLOCK_CNTL, DPCS_SYMCLK_CLOCK_ON, mask_sh),\ + LE_SF(DPCSTX0_DPCSTX_TX_CLOCK_CNTL, DPCS_SYMCLK_GATE_DIS, mask_sh),\ + LE_SF(DPCSTX0_DPCSTX_TX_CLOCK_CNTL, DPCS_SYMCLK_EN, mask_sh),\ + LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_SWAP, mask_sh),\ + LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_ORDER_INVERT, mask_sh),\ + LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_EN, mask_sh),\ + LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_RD_START_DELAY, mask_sh),\ + LE_SF(DPCSTX0_DPCSTX_DEBUG_CONFIG, DPCS_DBG_CBUS_DIS, mask_sh) + +#define DPCS_DCN2_MASK_SH_LIST(mask_sh)\ + DPCS_MASK_SH_LIST(mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL, RDPCS_PHY_RX_REF_LD_VAL, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL, RDPCS_PHY_RX_VCO_LD_VAL, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE_ACK, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX0_PSTATE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX1_PSTATE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX0_MPLL_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX1_MPLL_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_REF_CLK_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX2_WIDTH, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX2_RATE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX3_WIDTH, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX3_RATE, mask_sh),\ + LE_SF(DCIO_SOFT_RESET, UNIPHYA_SOFT_RESET, mask_sh),\ + LE_SF(DCIO_SOFT_RESET, UNIPHYB_SOFT_RESET, mask_sh),\ + LE_SF(DCIO_SOFT_RESET, UNIPHYC_SOFT_RESET, mask_sh),\ + LE_SF(DCIO_SOFT_RESET, UNIPHYD_SOFT_RESET, mask_sh),\ + LE_SF(DCIO_SOFT_RESET, UNIPHYE_SOFT_RESET, mask_sh) #define LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh)\ LINK_ENCODER_MASK_SH_LIST_DCN10(mask_sh),\ @@ -63,6 +198,49 @@ SRI(CLOCK_ENABLE, SYMCLK, id), \ SRI(CHANNEL_XBAR_CNTL, UNIPHY, id) +#define DPCS_DCN2_CMN_REG_LIST(id) \ + SRI(DIG_LANE_ENABLE, DIG, id), \ + SRI(TMDS_CTL_BITS, DIG, id), \ + SRI(RDPCSTX_PHY_CNTL3, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_CNTL4, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_CNTL5, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_CNTL6, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_CNTL7, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_CNTL8, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_CNTL9, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_CNTL10, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_CNTL11, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_CNTL12, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_CNTL13, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_CNTL14, RDPCSTX, id), \ + SRI(RDPCSTX_CNTL, RDPCSTX, id), \ + SRI(RDPCSTX_CLOCK_CNTL, RDPCSTX, id), \ + SRI(RDPCSTX_INTERRUPT_CONTROL, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_CNTL0, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_CNTL2, RDPCSTX, id), \ + SRI(RDPCSTX_PLL_UPDATE_DATA, RDPCSTX, id), \ + SRI(RDPCS_TX_CR_ADDR, RDPCSTX, id), \ + SRI(RDPCS_TX_CR_DATA, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_FUSE0, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_FUSE1, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_FUSE2, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \ + SRI(DPCSTX_TX_CLOCK_CNTL, DPCSTX, id), \ + SRI(DPCSTX_TX_CNTL, DPCSTX, id), \ + SRI(DPCSTX_DEBUG_CONFIG, DPCSTX, id), \ + SRI(RDPCSTX_DEBUG_CONFIG, RDPCSTX, id), \ + SR(RDPCSTX0_RDPCSTX_SCRATCH) + + +#define DPCS_DCN2_REG_LIST(id) \ + DPCS_DCN2_CMN_REG_LIST(id), \ + SRI(RDPCSTX_PHY_RX_LD_VAL, RDPCSTX, id),\ + SRI(RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG, RDPCSTX, id) + +#define LE_DCN2_REG_LIST(id) \ + LE_DCN10_REG_LIST(id), \ + SR(DCIO_SOFT_RESET) + struct mpll_cfg { uint32_t mpllb_ana_v2i; uint32_t mpllb_ana_freq_vco; @@ -158,9 +336,7 @@ void enc2_fec_set_ready(struct link_encoder *enc, bool ready); bool enc2_fec_is_active(struct link_encoder *enc); void enc2_hw_init(struct link_encoder *enc); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT void link_enc2_read_state(struct link_encoder *enc, struct link_enc_state *s); -#endif void dcn20_link_encoder_enable_dp_output( struct link_encoder *enc, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c index 5a188b2bc033..de9c857ab3e9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c @@ -33,6 +33,9 @@ #define REG(reg)\ mpc20->mpc_regs->reg +#define IND_REG(index) \ + (index) + #define CTX \ mpc20->base.ctx @@ -132,19 +135,33 @@ void mpc2_set_output_csc( const uint16_t *regval, enum mpc_output_csc_mode ocsc_mode) { + uint32_t cur_mode; struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); struct color_matrices_reg ocsc_regs; - REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode); - - if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE) + if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE) { + REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode); return; + } if (regval == NULL) { BREAK_TO_DEBUGGER(); return; } + /* determine which CSC coefficients (A or B) we are using + * currently. select the alternate set to double buffer + * the CSC update so CSC is updated on frame boundary + */ + IX_REG_GET(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_DATA, + MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX, + MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE, &cur_mode); + + if (cur_mode != MPC_OUTPUT_CSC_COEF_A) + ocsc_mode = MPC_OUTPUT_CSC_COEF_A; + else + ocsc_mode = MPC_OUTPUT_CSC_COEF_B; + ocsc_regs.shifts.csc_c11 = mpc20->mpc_shift->MPC_OCSC_C11_A; ocsc_regs.masks.csc_c11 = mpc20->mpc_mask->MPC_OCSC_C11_A; ocsc_regs.shifts.csc_c12 = mpc20->mpc_shift->MPC_OCSC_C12_A; @@ -157,10 +174,13 @@ void mpc2_set_output_csc( ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_B[opp_id]); ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_B[opp_id]); } + cm_helper_program_color_matrices( mpc20->base.ctx, regval, &ocsc_regs); + + REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode); } void mpc2_set_ocsc_default( @@ -169,14 +189,16 @@ void mpc2_set_ocsc_default( enum dc_color_space color_space, enum mpc_output_csc_mode ocsc_mode) { + uint32_t cur_mode; struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); uint32_t arr_size; struct color_matrices_reg ocsc_regs; const uint16_t *regval = NULL; - REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode); - if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE) + if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE) { + REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode); return; + } regval = find_color_matrix(color_space, &arr_size); @@ -185,6 +207,19 @@ void mpc2_set_ocsc_default( return; } + /* determine which CSC coefficients (A or B) we are using + * currently. select the alternate set to double buffer + * the CSC update so CSC is updated on frame boundary + */ + IX_REG_GET(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_DATA, + MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX, + MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE, &cur_mode); + + if (cur_mode != MPC_OUTPUT_CSC_COEF_A) + ocsc_mode = MPC_OUTPUT_CSC_COEF_A; + else + ocsc_mode = MPC_OUTPUT_CSC_COEF_B; + ocsc_regs.shifts.csc_c11 = mpc20->mpc_shift->MPC_OCSC_C11_A; ocsc_regs.masks.csc_c11 = mpc20->mpc_mask->MPC_OCSC_C11_A; ocsc_regs.shifts.csc_c12 = mpc20->mpc_shift->MPC_OCSC_C12_A; @@ -203,6 +238,8 @@ void mpc2_set_ocsc_default( mpc20->base.ctx, regval, &ocsc_regs); + + REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode); } static void mpc2_ogam_get_reg_field( @@ -345,6 +382,9 @@ static void mpc20_program_ogam_pwl( uint32_t i; struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); + PERF_TRACE(); + REG_SEQ_START(); + for (i = 0 ; i < num; i++) { REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].red_reg); REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].green_reg); @@ -463,6 +503,11 @@ void mpc2_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id) ASSERT(!mpc_disabled); ASSERT(!mpc_idle); } + + REG_SEQ_SUBMIT(); + PERF_TRACE(); + REG_SEQ_WAIT_DONE(); + PERF_TRACE(); } static void mpc2_init_mpcc(struct mpcc *mpcc, int mpcc_inst) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h index 9f53192da2dc..c78fd5123497 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h @@ -80,6 +80,10 @@ SRII(DENORM_CLAMP_G_Y, MPC_OUT, inst),\ SRII(DENORM_CLAMP_B_CB, MPC_OUT, inst) +#define MPC_DBG_REG_LIST_DCN2_0() \ + SR(MPC_OCSC_TEST_DEBUG_DATA),\ + SR(MPC_OCSC_TEST_DEBUG_INDEX) + #define MPC_REG_VARIABLE_LIST_DCN2_0 \ MPC_COMMON_REG_VARIABLE_LIST \ uint32_t MPCC_TOP_GAIN[MAX_MPCC]; \ @@ -118,6 +122,8 @@ uint32_t MPCC_OGAM_LUT_RAM_CONTROL[MAX_MPCC];\ uint32_t MPCC_OGAM_LUT_DATA[MAX_MPCC];\ uint32_t MPCC_OGAM_MODE[MAX_MPCC];\ + uint32_t MPC_OCSC_TEST_DEBUG_DATA;\ + uint32_t MPC_OCSC_TEST_DEBUG_INDEX;\ uint32_t CSC_MODE[MAX_OPP]; \ uint32_t CSC_C11_C12_A[MAX_OPP]; \ uint32_t CSC_C33_C34_A[MAX_OPP]; \ @@ -134,6 +140,7 @@ SF(MPCC0_MPCC_TOP_GAIN, MPCC_TOP_GAIN, mask_sh),\ SF(MPCC0_MPCC_BOT_GAIN_INSIDE, MPCC_BOT_GAIN_INSIDE, mask_sh),\ SF(MPCC0_MPCC_BOT_GAIN_OUTSIDE, MPCC_BOT_GAIN_OUTSIDE, mask_sh),\ + SF(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_INDEX, mask_sh),\ SF(MPC_OUT0_CSC_MODE, MPC_OCSC_MODE, mask_sh),\ SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C11_A, mask_sh),\ SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C12_A, mask_sh),\ @@ -174,6 +181,19 @@ SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MAX_B_CB, mask_sh),\ SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh) +/* + * DCN2 MPC_OCSC debug status register: + * + * Status index including current OCSC Mode is 1 + * OCSC Mode: [1..0] + */ +#define MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX 1 + +#define MPC_DEBUG_REG_LIST_SH_DCN20 \ + .MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE = 0 + +#define MPC_DEBUG_REG_LIST_MASK_DCN20 \ + .MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE = 0x3 #define MPC_REG_FIELD_LIST_DCN2_0(type) \ MPC_REG_FIELD_LIST(type)\ @@ -182,6 +202,8 @@ type MPCC_TOP_GAIN;\ type MPCC_BOT_GAIN_INSIDE;\ type MPCC_BOT_GAIN_OUTSIDE;\ + type MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE;\ + type MPC_OCSC_TEST_DEBUG_INDEX;\ type MPC_OCSC_MODE;\ type MPC_OCSC_C11_A;\ type MPC_OCSC_C12_A;\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c index 40164ed015ea..023cc71fad0f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c @@ -41,6 +41,7 @@ void opp2_set_disp_pattern_generator( struct output_pixel_processor *opp, enum controller_dp_test_pattern test_pattern, + enum controller_dp_color_space color_space, enum dc_color_depth color_depth, const struct tg_color *solid_color, int width, @@ -100,9 +101,22 @@ void opp2_set_disp_pattern_generator( TEST_PATTERN_DYN_RANGE_CEA : TEST_PATTERN_DYN_RANGE_VESA); + switch (color_space) { + case CONTROLLER_DP_COLOR_SPACE_YCBCR601: + mode = TEST_PATTERN_MODE_COLORSQUARES_YCBCR601; + break; + case CONTROLLER_DP_COLOR_SPACE_YCBCR709: + mode = TEST_PATTERN_MODE_COLORSQUARES_YCBCR709; + break; + case CONTROLLER_DP_COLOR_SPACE_RGB: + default: + mode = TEST_PATTERN_MODE_COLORSQUARES_RGB; + break; + } + REG_UPDATE_6(DPG_CONTROL, DPG_EN, 1, - DPG_MODE, TEST_PATTERN_MODE_COLORSQUARES_RGB, + DPG_MODE, mode, DPG_DYNAMIC_RANGE, dyn_range, DPG_BIT_DEPTH, bit_depth, DPG_VRES, 6, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h index abd8de9a78f8..4093bec172c1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h @@ -140,6 +140,7 @@ void dcn20_opp_construct(struct dcn20_opp *oppn20, void opp2_set_disp_pattern_generator( struct output_pixel_processor *opp, enum controller_dp_test_pattern test_pattern, + enum controller_dp_color_space color_space, enum dc_color_depth color_depth, const struct tg_color *solid_color, int width, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c index 3b613fb93ef8..d875b0c38fde 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c @@ -59,11 +59,16 @@ bool optc2_enable_crtc(struct timing_generator *optc) REG_UPDATE(CONTROL, VTG0_ENABLE, 1); + REG_SEQ_START(); + /* Enable CRTC */ REG_UPDATE_2(OTG_CONTROL, OTG_DISABLE_POINT_CNTL, 3, OTG_MASTER_EN, 1); + REG_SEQ_SUBMIT(); + REG_SEQ_WAIT_DONE(); + return true; } @@ -167,7 +172,6 @@ void optc2_set_gsl_source_select( } } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* DSC encoder frame start controls: x = h position, line_num = # of lines from vstartup */ void optc2_set_dsc_encoder_frame_start(struct timing_generator *optc, int x_position, @@ -201,13 +205,12 @@ void optc2_set_dsc_config(struct timing_generator *optc, REG_UPDATE(OPTC_WIDTH_CONTROL, OPTC_DSC_SLICE_WIDTH, dsc_slice_width); } -#endif -/** - * PTI i think is already done somewhere else for 2ka - * (opp?, please double check. - * OPTC side only has 1 register to set for PTI_ENABLE) - */ +/*TEMP: Need to figure out inheritance model here.*/ +bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing) +{ + return optc1_is_two_pixels_per_containter(timing); +} void optc2_set_odm_bypass(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing) @@ -221,7 +224,7 @@ void optc2_set_odm_bypass(struct timing_generator *optc, OPTC_SEG1_SRC_SEL, 0xf); REG_WRITE(OTG_H_TIMING_CNTL, 0); - h_div_2 = optc1_is_two_pixels_per_containter(dc_crtc_timing); + h_div_2 = optc2_is_two_pixels_per_containter(dc_crtc_timing); REG_UPDATE(OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_BY2, h_div_2); REG_SET(OPTC_MEMORY_CONFIG, 0, @@ -233,12 +236,13 @@ void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c struct dc_crtc_timing *timing) { struct optc *optc1 = DCN10TG_FROM_TG(optc); - /* 2 pieces of memory required for up to 5120 displays, 4 for up to 8192 */ int mpcc_hactive = (timing->h_addressable + timing->h_border_left + timing->h_border_right) / opp_cnt; - int memory_mask = mpcc_hactive <= 2560 ? 0x3 : 0xf; + uint32_t memory_mask; uint32_t data_fmt = 0; + ASSERT(opp_cnt == 2); + /* TODO: In pseudocode but does not affect maximus, delete comment if we dont need on asic * REG_SET(OTG_GLOBAL_CONTROL2, 0, GLOBAL_UPDATE_LOCK_EN, 1); * Program OTG register MASTER_UPDATE_LOCK_DB_X/Y to the position before DP frame start @@ -246,9 +250,17 @@ void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c * MASTER_UPDATE_LOCK_DB_X, 160, * MASTER_UPDATE_LOCK_DB_Y, 240); */ + + /* 2 pieces of memory required for up to 5120 displays, 4 for up to 8192, + * however, for ODM combine we can simplify by always using 4. + * To make sure there's no overlap, each instance "reserves" 2 memories and + * they are uniquely combined here. + */ + memory_mask = 0x3 << (opp_id[0] * 2) | 0x3 << (opp_id[1] * 2); + if (REG(OPTC_MEMORY_CONFIG)) REG_SET(OPTC_MEMORY_CONFIG, 0, - OPTC_MEM_SEL, memory_mask << (optc->inst * 4)); + OPTC_MEM_SEL, memory_mask); if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) data_fmt = 1; @@ -257,7 +269,6 @@ void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c REG_UPDATE(OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, data_fmt); - ASSERT(opp_cnt == 2); REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0, OPTC_NUM_OF_INPUT_SEGMENT, 1, OPTC_SEG0_SRC_SEL, opp_id[0], @@ -379,14 +390,8 @@ void optc2_setup_manual_trigger(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); - REG_SET(OTG_MANUAL_FLOW_CONTROL, 0, - MANUAL_FLOW_CONTROL, 1); - - REG_SET(OTG_GLOBAL_CONTROL2, 0, - MANUAL_FLOW_CONTROL_SEL, optc->inst); - REG_SET_8(OTG_TRIGA_CNTL, 0, - OTG_TRIGA_SOURCE_SELECT, 22, + OTG_TRIGA_SOURCE_SELECT, 21, OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst, OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1, OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 0, @@ -448,9 +453,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = { .setup_global_swap_lock = NULL, .get_crc = optc1_get_crc, .configure_crc = optc1_configure_crc, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .set_dsc_config = optc2_set_dsc_config, -#endif .set_dwb_source = optc2_set_dwb_source, .set_odm_bypass = optc2_set_odm_bypass, .set_odm_combine = optc2_set_odm_combine, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h index 32a58431fd09..239cc40ae474 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h @@ -86,12 +86,10 @@ void optc2_set_gsl_source_select(struct timing_generator *optc, int group_idx, uint32_t gsl_ready_signal); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT void optc2_set_dsc_config(struct timing_generator *optc, enum optc_dsc_mode dsc_mode, uint32_t dsc_bytes_per_pixel, uint32_t dsc_slice_width); -#endif void optc2_set_odm_bypass(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing); @@ -108,6 +106,7 @@ void optc2_triplebuffer_lock(struct timing_generator *optc); void optc2_triplebuffer_unlock(struct timing_generator *optc); void optc2_lock_doublebuffer_disable(struct timing_generator *optc); void optc2_lock_doublebuffer_enable(struct timing_generator *optc); +void optc2_setup_manual_trigger(struct timing_generator *optc); void optc2_program_manual_trigger(struct timing_generator *optc); - +bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing); #endif /* __DC_OPTC_DCN20_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 23ff2f1c75b5..85f90f3e24cb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -1,5 +1,6 @@ /* * Copyright 2016 Advanced Micro Devices, Inc. + * Copyright 2019 Raptor Engineering, LLC * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -28,6 +29,8 @@ #include "dm_services.h" #include "dc.h" +#include "dcn20_init.h" + #include "resource.h" #include "include/irq_service_interface.h" #include "dcn20/dcn20_resource.h" @@ -45,9 +48,7 @@ #include "dcn10/dcn10_resource.h" #include "dcn20_opp.h" -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #include "dcn20_dsc.h" -#endif #include "dcn20_link_encoder.h" #include "dcn20_stream_encoder.h" @@ -59,11 +60,14 @@ #include "dml/display_mode_vba.h" #include "dcn20_dccg.h" #include "dcn20_vmid.h" +#include "dc_link_ddc.h" #include "navi10_ip_offset.h" #include "dcn/dcn_2_0_0_offset.h" #include "dcn/dcn_2_0_0_sh_mask.h" +#include "dpcs/dpcs_2_0_0_offset.h" +#include "dpcs/dpcs_2_0_0_sh_mask.h" #include "nbio/nbio_2_3_offset.h" @@ -82,8 +86,6 @@ #include "amdgpu_socbb.h" -/* NV12 SOC BB is currently in FW, mark SW bounding box invalid. */ -#define SOC_BOUNDING_BOX_VALID false #define DC_LOGGER_INIT(logger) struct _vcs_dpi_ip_params_st dcn2_0_ip = { @@ -94,11 +96,7 @@ struct _vcs_dpi_ip_params_st dcn2_0_ip = { .hostvm_max_page_table_levels = 4, .hostvm_cached_page_table_levels = 0, .pte_group_size_bytes = 2048, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .num_dsc = 6, -#else - .num_dsc = 0, -#endif .rob_buffer_size_kbytes = 168, .det_buffer_size_kbytes = 164, .dpte_buffer_size_in_pte_reqs_luma = 84, @@ -553,6 +551,7 @@ static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { [id] = {\ LE_DCN10_REG_LIST(id), \ UNIPHY_DCN2_REG_LIST(phyid), \ + DPCS_DCN2_REG_LIST(id), \ SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ } @@ -566,11 +565,13 @@ static const struct dcn10_link_enc_registers link_enc_regs[] = { }; static const struct dcn10_link_enc_shift le_shift = { - LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT) + LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT),\ + DPCS_DCN2_MASK_SH_LIST(__SHIFT) }; static const struct dcn10_link_enc_mask le_mask = { - LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK) + LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK),\ + DPCS_DCN2_MASK_SH_LIST(_MASK) }; #define ipp_regs(id)\ @@ -637,6 +638,7 @@ static const struct dce110_aux_registers aux_engine_regs[] = { #define tf_regs(id)\ [id] = {\ TF_REG_LIST_DCN20(id),\ + TF_REG_LIST_DCN20_COMMON_APPEND(id),\ } static const struct dcn2_dpp_registers tf_regs[] = { @@ -650,12 +652,12 @@ static const struct dcn2_dpp_registers tf_regs[] = { static const struct dcn2_dpp_shift tf_shift = { TF_REG_LIST_SH_MASK_DCN20(__SHIFT), - TF_DEBUG_REG_LIST_SH_DCN10 + TF_DEBUG_REG_LIST_SH_DCN20 }; static const struct dcn2_dpp_mask tf_mask = { TF_REG_LIST_SH_MASK_DCN20(_MASK), - TF_DEBUG_REG_LIST_MASK_DCN10 + TF_DEBUG_REG_LIST_MASK_DCN20 }; #define dwbc_regs_dcn2(id)\ @@ -705,14 +707,17 @@ static const struct dcn20_mpc_registers mpc_regs = { MPC_OUT_MUX_REG_LIST_DCN2_0(3), MPC_OUT_MUX_REG_LIST_DCN2_0(4), MPC_OUT_MUX_REG_LIST_DCN2_0(5), + MPC_DBG_REG_LIST_DCN2_0() }; static const struct dcn20_mpc_shift mpc_shift = { - MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) + MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT), + MPC_DEBUG_REG_LIST_SH_DCN20 }; static const struct dcn20_mpc_mask mpc_mask = { - MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK) + MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK), + MPC_DEBUG_REG_LIST_MASK_DCN20 }; #define tg_regs(id)\ @@ -838,7 +843,6 @@ static int map_transmitter_id_to_phy_instance( } } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #define dsc_regsDCN20(id)\ [id] = {\ DSC_REG_LIST_DCN20(id)\ @@ -860,7 +864,6 @@ static const struct dcn20_dsc_shift dsc_shift = { static const struct dcn20_dsc_mask dsc_mask = { DSC_REG_LIST_SH_MASK_DCN20(_MASK) }; -#endif static const struct dccg_registers dccg_regs = { DCCG_REG_LIST_DCN2() @@ -884,9 +887,7 @@ static const struct resource_caps res_cap_nv10 = { .num_dwb = 1, .num_ddc = 6, .num_vmid = 16, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .num_dsc = 6, -#endif }; static const struct dc_plane_cap plane_cap = { @@ -923,9 +924,7 @@ static const struct resource_caps res_cap_nv14 = { .num_dwb = 1, .num_ddc = 5, .num_vmid = 16, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .num_dsc = 5, -#endif }; static const struct dc_debug_options debug_defaults_drv = { @@ -1284,7 +1283,6 @@ void dcn20_clock_source_destroy(struct clock_source **clk_src) *clk_src = NULL; } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct display_stream_compressor *dcn20_dsc_create( struct dc_context *ctx, uint32_t inst) @@ -1307,9 +1305,8 @@ void dcn20_dsc_destroy(struct display_stream_compressor **dsc) *dsc = NULL; } -#endif -static void destruct(struct dcn20_resource_pool *pool) +static void dcn20_resource_destruct(struct dcn20_resource_pool *pool) { unsigned int i; @@ -1320,12 +1317,10 @@ static void destruct(struct dcn20_resource_pool *pool) } } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT for (i = 0; i < pool->base.res_cap->num_dsc; i++) { if (pool->base.dscs[i] != NULL) dcn20_dsc_destroy(&pool->base.dscs[i]); } -#endif if (pool->base.mpc != NULL) { kfree(TO_DCN20_MPC(pool->base.mpc)); @@ -1418,6 +1413,8 @@ static void destruct(struct dcn20_resource_pool *pool) if (pool->base.pp_smu != NULL) dcn20_pp_smu_destroy(&pool->base.pp_smu); + if (pool->base.oem_device != NULL) + dal_ddc_service_destroy(&pool->base.oem_device); } struct hubp *dcn20_hubp_create( @@ -1468,7 +1465,7 @@ static void get_pixel_clock_parameters( if (opp_cnt == 4) pixel_clk_params->requested_pix_clk_100hz /= 4; - else if (optc1_is_two_pixels_per_containter(&stream->timing) || opp_cnt == 2) + else if (optc2_is_two_pixels_per_containter(&stream->timing) || opp_cnt == 2) pixel_clk_params->requested_pix_clk_100hz /= 2; if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) @@ -1534,7 +1531,6 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state return status; } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT static void acquire_dsc(struct resource_context *res_ctx, const struct resource_pool *pool, @@ -1575,11 +1571,9 @@ static void release_dsc(struct resource_context *res_ctx, } } -#endif -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT -static enum dc_status add_dsc_to_stream_resource(struct dc *dc, +enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, struct dc_state *dc_ctx, struct dc_stream_state *dc_stream) { @@ -1594,11 +1588,13 @@ static enum dc_status add_dsc_to_stream_resource(struct dc *dc, if (pipe_ctx->stream != dc_stream) continue; + if (pipe_ctx->stream_res.dsc) + continue; + acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc, i); /* The number of DSCs can be less than the number of pipes */ if (!pipe_ctx->stream_res.dsc) { - dm_output_to_console("No DSCs available\n"); result = DC_NO_DSC_RESOURCE; } @@ -1630,7 +1626,6 @@ static enum dc_status remove_dsc_from_stream_resource(struct dc *dc, else return DC_OK; } -#endif enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream) @@ -1642,11 +1637,9 @@ enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, if (result == DC_OK) result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* Get a DSC if required and available */ if (result == DC_OK && dc_stream->timing.flags.DSC) - result = add_dsc_to_stream_resource(dc, new_ctx, dc_stream); -#endif + result = dcn20_add_dsc_to_stream_resource(dc, new_ctx, dc_stream); if (result == DC_OK) result = dcn20_build_mapped_resource(dc, new_ctx, dc_stream); @@ -1659,9 +1652,7 @@ enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ { enum dc_status result = DC_OK; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT result = remove_dsc_from_stream_resource(dc, new_ctx, dc_stream); -#endif return result; } @@ -1744,9 +1735,7 @@ bool dcn20_split_stream_for_odm( next_odm_pipe->plane_res.xfm = pool->transforms[next_odm_pipe->pipe_idx]; next_odm_pipe->plane_res.dpp = pool->dpps[next_odm_pipe->pipe_idx]; next_odm_pipe->plane_res.mpcc_inst = pool->dpps[next_odm_pipe->pipe_idx]->inst; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT next_odm_pipe->stream_res.dsc = NULL; -#endif if (prev_odm_pipe->next_odm_pipe && prev_odm_pipe->next_odm_pipe != next_odm_pipe) { next_odm_pipe->next_odm_pipe = prev_odm_pipe->next_odm_pipe; next_odm_pipe->next_odm_pipe->prev_odm_pipe = next_odm_pipe; @@ -1792,14 +1781,12 @@ bool dcn20_split_stream_for_odm( sd->recout.x = 0; } next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx]; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT if (next_odm_pipe->stream->timing.flags.DSC == 1) { acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx); ASSERT(next_odm_pipe->stream_res.dsc); if (next_odm_pipe->stream_res.dsc == NULL) return false; } -#endif return true; } @@ -1823,9 +1810,7 @@ void dcn20_split_stream_for_mpc( secondary_pipe->plane_res.xfm = pool->transforms[secondary_pipe->pipe_idx]; secondary_pipe->plane_res.dpp = pool->dpps[secondary_pipe->pipe_idx]; secondary_pipe->plane_res.mpcc_inst = pool->dpps[secondary_pipe->pipe_idx]->inst; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT secondary_pipe->stream_res.dsc = NULL; -#endif if (primary_pipe->bottom_pipe && primary_pipe->bottom_pipe != secondary_pipe) { ASSERT(!secondary_pipe->bottom_pipe); secondary_pipe->bottom_pipe = primary_pipe->bottom_pipe; @@ -1876,11 +1861,28 @@ void dcn20_populate_dml_writeback_from_context( } +static int get_num_odm_heads(struct pipe_ctx *pipe) +{ + int odm_head_count = 0; + struct pipe_ctx *next_pipe = pipe->next_odm_pipe; + while (next_pipe) { + odm_head_count++; + next_pipe = next_pipe->next_odm_pipe; + } + pipe = pipe->prev_odm_pipe; + while (pipe) { + odm_head_count++; + pipe = pipe->prev_odm_pipe; + } + return odm_head_count ? odm_head_count + 1 : 0; +} + int dcn20_populate_dml_pipes_from_context( - struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes) + struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes) { int pipe_cnt, i; bool synchronized_vblank = true; + struct resource_context *res_ctx = &context->res_ctx; for (i = 0, pipe_cnt = -1; i < dc->res_pool->pipe_count; i++) { if (!res_ctx->pipe_ctx[i].stream) @@ -1900,25 +1902,30 @@ int dcn20_populate_dml_pipes_from_context( for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { struct dc_crtc_timing *timing = &res_ctx->pipe_ctx[i].stream->timing; + unsigned int v_total; + unsigned int front_porch; int output_bpc; if (!res_ctx->pipe_ctx[i].stream) continue; + + v_total = timing->v_total; + front_porch = timing->v_front_porch; /* todo: pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = 0; pipes[pipe_cnt].pipe.src.dcc = 0; pipes[pipe_cnt].pipe.src.vm = 0;*/ -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT + pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0; + pipes[pipe_cnt].dout.dsc_enable = res_ctx->pipe_ctx[i].stream->timing.flags.DSC; /* todo: rotation?*/ pipes[pipe_cnt].dout.dsc_slices = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.num_slices_h; -#endif if (res_ctx->pipe_ctx[i].stream->use_dynamic_meta) { pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = true; /* 1/2 vblank */ pipes[pipe_cnt].pipe.src.dynamic_metadata_lines_before_active = - (timing->v_total - timing->v_addressable + (v_total - timing->v_addressable - timing->v_border_top - timing->v_border_bottom) / 2; /* 36 bytes dp, 32 hdmi */ pipes[pipe_cnt].pipe.src.dynamic_metadata_xmit_bytes = @@ -1932,13 +1939,13 @@ int dcn20_populate_dml_pipes_from_context( - timing->h_addressable - timing->h_border_left - timing->h_border_right; - pipes[pipe_cnt].pipe.dest.vblank_start = timing->v_total - timing->v_front_porch; + pipes[pipe_cnt].pipe.dest.vblank_start = v_total - front_porch; pipes[pipe_cnt].pipe.dest.vblank_end = pipes[pipe_cnt].pipe.dest.vblank_start - timing->v_addressable - timing->v_border_top - timing->v_border_bottom; pipes[pipe_cnt].pipe.dest.htotal = timing->h_total; - pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total; + pipes[pipe_cnt].pipe.dest.vtotal = v_total; pipes[pipe_cnt].pipe.dest.hactive = timing->h_addressable; pipes[pipe_cnt].pipe.dest.vactive = timing->v_addressable; pipes[pipe_cnt].pipe.dest.interlaced = timing->flags.INTERLACE; @@ -1949,8 +1956,13 @@ int dcn20_populate_dml_pipes_from_context( pipes[pipe_cnt].dout.dp_lanes = 4; pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min; pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max; - pipes[pipe_cnt].pipe.dest.odm_combine = res_ctx->pipe_ctx[i].prev_odm_pipe - || res_ctx->pipe_ctx[i].next_odm_pipe; + switch (get_num_odm_heads(&res_ctx->pipe_ctx[i])) { + case 2: + pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_2to1; + break; + default: + pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_disabled; + } pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].pipe_idx; if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state == res_ctx->pipe_ctx[i].plane_state) @@ -2001,14 +2013,12 @@ int dcn20_populate_dml_pipes_from_context( case COLOR_DEPTH_161616: output_bpc = 16; break; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 case COLOR_DEPTH_999: output_bpc = 9; break; case COLOR_DEPTH_111111: output_bpc = 11; break; -#endif default: output_bpc = 8; break; @@ -2036,10 +2046,8 @@ int dcn20_populate_dml_pipes_from_context( pipes[pipe_cnt].dout.output_bpp = output_bpc * 3; } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC) pipes[pipe_cnt].dout.output_bpp = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0; -#endif /* todo: default max for now, until there is logic reflecting this in dc*/ pipes[pipe_cnt].dout.output_bpc = 12; @@ -2063,6 +2071,10 @@ int dcn20_populate_dml_pipes_from_context( pipes[pipe_cnt].pipe.src.viewport_height = timing->v_addressable; if (pipes[pipe_cnt].pipe.src.viewport_height > 1080) pipes[pipe_cnt].pipe.src.viewport_height = 1080; + pipes[pipe_cnt].pipe.src.surface_height_y = pipes[pipe_cnt].pipe.src.viewport_height; + pipes[pipe_cnt].pipe.src.surface_width_y = pipes[pipe_cnt].pipe.src.viewport_width; + pipes[pipe_cnt].pipe.src.surface_height_c = pipes[pipe_cnt].pipe.src.viewport_height; + pipes[pipe_cnt].pipe.src.surface_width_c = pipes[pipe_cnt].pipe.src.viewport_width; pipes[pipe_cnt].pipe.src.data_pitch = ((pipes[pipe_cnt].pipe.src.viewport_width + 63) / 64) * 64; /* linear sw only */ pipes[pipe_cnt].pipe.src.source_format = dm_444_32; pipes[pipe_cnt].pipe.dest.recout_width = pipes[pipe_cnt].pipe.src.viewport_width; /*vp_width/hratio*/ @@ -2077,8 +2089,8 @@ int dcn20_populate_dml_pipes_from_context( pipes[pipe_cnt].pipe.scale_taps.vtaps = 1; pipes[pipe_cnt].pipe.src.is_hsplit = 0; pipes[pipe_cnt].pipe.dest.odm_combine = 0; - pipes[pipe_cnt].pipe.dest.vtotal_min = timing->v_total; - pipes[pipe_cnt].pipe.dest.vtotal_max = timing->v_total; + pipes[pipe_cnt].pipe.dest.vtotal_min = v_total; + pipes[pipe_cnt].pipe.dest.vtotal_max = v_total; } else { struct dc_plane_state *pln = res_ctx->pipe_ctx[i].plane_state; struct scaler_data *scl = &res_ctx->pipe_ctx[i].plane_res.scl_data; @@ -2096,6 +2108,10 @@ int dcn20_populate_dml_pipes_from_context( pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c.width; pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport.height; pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c.height; + pipes[pipe_cnt].pipe.src.surface_width_y = pln->plane_size.surface_size.width; + pipes[pipe_cnt].pipe.src.surface_height_y = pln->plane_size.surface_size.height; + pipes[pipe_cnt].pipe.src.surface_width_c = pln->plane_size.chroma_size.width; + pipes[pipe_cnt].pipe.src.surface_height_c = pln->plane_size.chroma_size.height; if (pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.surface_pitch; pipes[pipe_cnt].pipe.src.data_pitch_c = pln->plane_size.chroma_pitch; @@ -2261,7 +2277,6 @@ void dcn20_set_mcif_arb_params( } } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx) { int i; @@ -2295,7 +2310,6 @@ bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx) } return true; } -#endif struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc, struct resource_context *res_ctx, @@ -2398,10 +2412,8 @@ void dcn20_merge_pipes_for_validate( odm_pipe->bottom_pipe = NULL; odm_pipe->prev_odm_pipe = NULL; odm_pipe->next_odm_pipe = NULL; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT if (odm_pipe->stream_res.dsc) release_dsc(&context->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc); -#endif /* Clear plane_res and stream_res */ memset(&odm_pipe->plane_res, 0, sizeof(odm_pipe->plane_res)); memset(&odm_pipe->stream_res, 0, sizeof(odm_pipe->stream_res)); @@ -2513,7 +2525,7 @@ int dcn20_validate_apply_pipe_split_flags( split[i] = true; if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) { split[i] = true; - context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = true; + context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = dm_odm_combine_mode_2to1; } context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] = context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx]; @@ -2544,7 +2556,7 @@ bool dcn20_fast_validate_bw( dcn20_merge_pipes_for_validate(dc, context); - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, &context->res_ctx, pipes); + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes); *pipe_cnt_out = pipe_cnt; @@ -2621,14 +2633,12 @@ bool dcn20_fast_validate_bw( ASSERT(0); } } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* Actual dsc count per stream dsc validation*/ if (!dcn20_validate_dsc(dc, context)) { context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] = DML_FAIL_DSC_VALIDATION_FAILURE; goto validate_fail; } -#endif *vlevel_out = vlevel; @@ -2692,10 +2702,10 @@ static void dcn20_calculate_wm( if (pipe_cnt != pipe_idx) { if (dc->res_pool->funcs->populate_dml_pipes) pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, - &context->res_ctx, pipes); + context, pipes); else pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, - &context->res_ctx, pipes); + context, pipes); } *out_pipe_cnt = pipe_cnt; @@ -2715,11 +2725,9 @@ static void dcn20_calculate_wm( context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; -#endif if (vlevel < 2) { pipes[0].clks_cfg.voltage = 2; @@ -2731,10 +2739,8 @@ static void dcn20_calculate_wm( context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; -#endif if (vlevel < 3) { pipes[0].clks_cfg.voltage = 3; @@ -2746,10 +2752,8 @@ static void dcn20_calculate_wm( context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; -#endif pipes[0].clks_cfg.voltage = vlevel; pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz; @@ -2759,10 +2763,8 @@ static void dcn20_calculate_wm( context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; -#endif } void dcn20_calculate_dlg_params( @@ -2928,11 +2930,19 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool voltage_supported = false; bool full_pstate_supported = false; bool dummy_pstate_supported = false; - double p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us; + double p_state_latency_us; - if (fast_validate) - return dcn20_validate_bandwidth_internal(dc, context, true); + DC_FP_START(); + p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us; + context->bw_ctx.dml.soc.disable_dram_clock_change_vactive_support = + dc->debug.disable_dram_clock_change_vactive_support; + if (fast_validate) { + voltage_supported = dcn20_validate_bandwidth_internal(dc, context, true); + + DC_FP_END(); + return voltage_supported; + } // Best case, we support full UCLK switch latency voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false); @@ -2940,7 +2950,7 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 || (voltage_supported && full_pstate_supported)) { - context->bw_ctx.bw.dcn.clk.p_state_change_support = true; + context->bw_ctx.bw.dcn.clk.p_state_change_support = full_pstate_supported; goto restore_dml_state; } @@ -2961,6 +2971,7 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, restore_dml_state: context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us; + DC_FP_END(); return voltage_supported; } @@ -3005,7 +3016,7 @@ static void dcn20_destroy_resource_pool(struct resource_pool **pool) { struct dcn20_resource_pool *dcn20_pool = TO_DCN20_RES_POOL(*pool); - destruct(dcn20_pool); + dcn20_resource_destruct(dcn20_pool); kfree(dcn20_pool); *pool = NULL; } @@ -3252,7 +3263,6 @@ void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb) { - kernel_fpu_begin(); if ((int)(bb->sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns && dc->bb_overrides.sr_exit_time_ns) { bb->sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0; @@ -3276,7 +3286,6 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st bb->dram_clock_change_latency_us = dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; } - kernel_fpu_end(); } static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb( @@ -3318,12 +3327,13 @@ static bool init_soc_bounding_box(struct dc *dc, DC_LOGGER_INIT(dc->ctx->logger); - if (!bb && !SOC_BOUNDING_BOX_VALID) { + /* TODO: upstream NV12 bounding box when its launched */ + if (!bb && ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) { DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__); return false; } - if (bb && !SOC_BOUNDING_BOX_VALID) { + if (bb && ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) { int i; dcn2_0_nv12_soc.sr_exit_time_us = @@ -3465,7 +3475,7 @@ static bool init_soc_bounding_box(struct dc *dc, return true; } -static bool construct( +static bool dcn20_resource_construct( uint8_t num_virtual_links, struct dc *dc, struct dcn20_resource_pool *pool) @@ -3473,6 +3483,7 @@ static bool construct( int i; struct dc_context *ctx = dc->ctx; struct irq_service_init_data init_data; + struct ddc_service_init_data ddc_init_data; struct _vcs_dpi_soc_bounding_box_st *loaded_bb = get_asic_rev_soc_bb(ctx->asic_id.hw_internal_rev); struct _vcs_dpi_ip_params_st *loaded_ip = @@ -3480,6 +3491,8 @@ static bool construct( enum dml_project dml_project_version = get_dml_project_version(ctx->asic_id.hw_internal_rev); + DC_FP_START(); + ctx->dc_bios->regs = &bios_regs; pool->base.funcs = &dcn20_res_pool_funcs; @@ -3732,7 +3745,6 @@ static bool construct( goto create_fail; } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT for (i = 0; i < pool->base.res_cap->num_dsc; i++) { pool->base.dscs[i] = dcn20_dsc_create(ctx, i); if (pool->base.dscs[i] == NULL) { @@ -3741,7 +3753,6 @@ static bool construct( goto create_fail; } } -#endif if (!dcn20_dwbc_create(ctx, &pool->base)) { BREAK_TO_DEBUGGER(); @@ -3768,11 +3779,24 @@ static bool construct( dc->cap_funcs = cap_funcs; + if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { + ddc_init_data.ctx = dc->ctx; + ddc_init_data.link = NULL; + ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; + ddc_init_data.id.enum_id = 0; + ddc_init_data.id.type = OBJECT_TYPE_GENERIC; + pool->base.oem_device = dal_ddc_service_create(&ddc_init_data); + } else { + pool->base.oem_device = NULL; + } + + DC_FP_END(); return true; create_fail: - destruct(pool); + DC_FP_END(); + dcn20_resource_destruct(pool); return false; } @@ -3787,7 +3811,7 @@ struct resource_pool *dcn20_create_resource_pool( if (!pool) return NULL; - if (construct(init_data->num_virtual_links, dc, pool)) + if (dcn20_resource_construct(init_data->num_virtual_links, dc, pool)) return &pool->base; BREAK_TO_DEBUGGER(); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h index fef473d68a4a..f5893840b79b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h @@ -50,7 +50,7 @@ unsigned int dcn20_calc_max_scaled_time( enum mmhubbub_wbif_mode mode, unsigned int urgent_watermark); int dcn20_populate_dml_pipes_from_context( - struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes); + struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes); struct pipe_ctx *dcn20_acquire_idle_pipe_for_layer( struct dc_state *state, const struct resource_pool *pool, @@ -127,9 +127,7 @@ int dcn20_validate_apply_pipe_split_flags( struct dc_state *context, int vlevel, bool *split); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx); -#endif void dcn20_split_stream_for_mpc( struct resource_context *res_ctx, const struct resource_pool *pool, @@ -159,6 +157,7 @@ void dcn20_calculate_dlg_params( enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream); enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream); +enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, struct dc_state *dc_ctx, struct dc_stream_state *dc_stream); enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream); enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c index fcb3877b4fcb..9b70a1e7b962 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c @@ -205,7 +205,6 @@ static void enc2_stream_encoder_stop_hdmi_info_packets( HDMI_GENERIC7_LINE, 0); } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* Update GSP7 SDP 128 byte long */ static void enc2_update_gsp7_128_info_packet( @@ -360,7 +359,6 @@ static void enc2_read_state(struct stream_encoder *enc, struct enc_state *s) REG_GET(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, &s->sec_stream_enable); } } -#endif /* Set Dynamic Metadata-configuration. * enable_dme: TRUE: enables Dynamic Metadata Enfine, FALSE: disables DME @@ -440,10 +438,8 @@ static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing) { bool two_pix = timing->pixel_encoding == PIXEL_ENCODING_YCBCR420; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT two_pix = two_pix || (timing->flags.DSC && timing->pixel_encoding == PIXEL_ENCODING_YCBCR422 && !timing->dsc_cfg.ycbcr422_simple); -#endif return two_pix; } @@ -541,11 +537,16 @@ void enc2_stream_encoder_dp_set_stream_attribute( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, enum dc_color_space output_color_space, + bool use_vsc_sdp_for_colorimetry, uint32_t enable_sdp_splitting) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); - enc1_stream_encoder_dp_set_stream_attribute(enc, crtc_timing, output_color_space, enable_sdp_splitting); + enc1_stream_encoder_dp_set_stream_attribute(enc, + crtc_timing, + output_color_space, + use_vsc_sdp_for_colorimetry, + enable_sdp_splitting); REG_UPDATE(DP_SEC_FRAMING4, DP_SST_SDP_SPLITTING, enable_sdp_splitting); @@ -568,6 +569,8 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = { enc2_stream_encoder_stop_hdmi_info_packets, .update_dp_info_packets = enc2_stream_encoder_update_dp_info_packets, + .send_immediate_sdp_message = + enc1_stream_encoder_send_immediate_sdp_message, .stop_dp_info_packets = enc1_stream_encoder_stop_dp_info_packets, .dp_blank = @@ -590,11 +593,9 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = { .dp_get_pixel_format = enc1_stream_encoder_dp_get_pixel_format, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .enc_read_state = enc2_read_state, .dp_set_dsc_config = enc2_dp_set_dsc_config, .dp_set_dsc_pps_info_packet = enc2_dp_set_dsc_pps_info_packet, -#endif .set_dynamic_metadata = enc2_set_dynamic_metadata, .hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h index 3f94a9f13c4a..d2a805bd4573 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h @@ -98,6 +98,7 @@ void enc2_stream_encoder_dp_set_stream_attribute( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, enum dc_color_space output_color_space, + bool use_vsc_sdp_for_colorimetry, uint32_t enable_sdp_splitting); void enc2_stream_encoder_dp_unblank( diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile index 5b8c17564bc1..07684d3e375a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile @@ -2,9 +2,16 @@ # # Makefile for DCN21. -DCN21 = dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o dcn21_hwseq.o dcn21_link_encoder.o +DCN21 = dcn21_init.o dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o \ + dcn21_hwseq.o dcn21_link_encoder.o +ifdef CONFIG_X86 CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse +endif + +ifdef CONFIG_PPC64 +CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -maltivec +endif ifdef CONFIG_CC_IS_GCC ifeq ($(call cc-ifversion, -lt, 0701, y), y) @@ -12,6 +19,7 @@ IS_OLD_GCC = 1 endif endif +ifdef CONFIG_X86 ifdef IS_OLD_GCC # Stack alignment mismatch, proceed with caution. # GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 @@ -20,6 +28,7 @@ CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -mpreferred-stack-boundary=4 else CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -msse2 endif +endif AMD_DAL_DCN21 = $(addprefix $(AMDDALPATH)/dc/dcn21/,$(DCN21)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c index 2f5a5867e674..cf09b9335728 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c @@ -29,6 +29,10 @@ #include "dm_services.h" #include "reg_helper.h" +#include "dc_dmub_srv.h" + +#define DC_LOGGER_INIT(logger) + #define REG(reg)\ hubp21->hubp_regs->reg @@ -164,6 +168,158 @@ static void hubp21_setup( } +void hubp21_set_viewport( + struct hubp *hubp, + const struct rect *viewport, + const struct rect *viewport_c) +{ + struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); + + REG_SET_2(DCSURF_PRI_VIEWPORT_DIMENSION, 0, + PRI_VIEWPORT_WIDTH, viewport->width, + PRI_VIEWPORT_HEIGHT, viewport->height); + + REG_SET_2(DCSURF_PRI_VIEWPORT_START, 0, + PRI_VIEWPORT_X_START, viewport->x, + PRI_VIEWPORT_Y_START, viewport->y); + + /*for stereo*/ + REG_SET_2(DCSURF_SEC_VIEWPORT_DIMENSION, 0, + SEC_VIEWPORT_WIDTH, viewport->width, + SEC_VIEWPORT_HEIGHT, viewport->height); + + REG_SET_2(DCSURF_SEC_VIEWPORT_START, 0, + SEC_VIEWPORT_X_START, viewport->x, + SEC_VIEWPORT_Y_START, viewport->y); + + /* DC supports NV12 only at the moment */ + REG_SET_2(DCSURF_PRI_VIEWPORT_DIMENSION_C, 0, + PRI_VIEWPORT_WIDTH_C, viewport_c->width, + PRI_VIEWPORT_HEIGHT_C, viewport_c->height); + + REG_SET_2(DCSURF_PRI_VIEWPORT_START_C, 0, + PRI_VIEWPORT_X_START_C, viewport_c->x, + PRI_VIEWPORT_Y_START_C, viewport_c->y); + + REG_SET_2(DCSURF_SEC_VIEWPORT_DIMENSION_C, 0, + SEC_VIEWPORT_WIDTH_C, viewport_c->width, + SEC_VIEWPORT_HEIGHT_C, viewport_c->height); + + REG_SET_2(DCSURF_SEC_VIEWPORT_START_C, 0, + SEC_VIEWPORT_X_START_C, viewport_c->x, + SEC_VIEWPORT_Y_START_C, viewport_c->y); +} + +static void hubp21_apply_PLAT_54186_wa( + struct hubp *hubp, + const struct dc_plane_address *address) +{ + struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); + struct dc_debug_options *debug = &hubp->ctx->dc->debug; + unsigned int chroma_bpe = 2; + unsigned int luma_addr_high_part = 0; + unsigned int row_height = 0; + unsigned int chroma_pitch = 0; + unsigned int viewport_c_height = 0; + unsigned int viewport_c_width = 0; + unsigned int patched_viewport_height = 0; + unsigned int patched_viewport_width = 0; + unsigned int rotation_angle = 0; + unsigned int pix_format = 0; + unsigned int h_mirror_en = 0; + unsigned int tile_blk_size = 64 * 1024; /* 64KB for 64KB SW, 4KB for 4KB SW */ + + + if (!debug->nv12_iflip_vm_wa) + return; + + REG_GET(DCHUBP_REQ_SIZE_CONFIG_C, + PTE_ROW_HEIGHT_LINEAR_C, &row_height); + + REG_GET_2(DCSURF_PRI_VIEWPORT_DIMENSION_C, + PRI_VIEWPORT_WIDTH_C, &viewport_c_width, + PRI_VIEWPORT_HEIGHT_C, &viewport_c_height); + + REG_GET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, + PRIMARY_SURFACE_ADDRESS_HIGH_C, &luma_addr_high_part); + + REG_GET(DCSURF_SURFACE_PITCH_C, + PITCH_C, &chroma_pitch); + + chroma_pitch += 1; + + REG_GET_3(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, &pix_format, + ROTATION_ANGLE, &rotation_angle, + H_MIRROR_EN, &h_mirror_en); + + /* reset persistent cached data */ + hubp21->PLAT_54186_wa_chroma_addr_offset = 0; + /* apply wa only for NV12 surface with scatter gather enabled with viewport > 512 along + * the vertical direction*/ + if (address->type != PLN_ADDR_TYPE_VIDEO_PROGRESSIVE || + address->video_progressive.luma_addr.high_part == 0xf4) + return; + + if ((rotation_angle == ROTATION_ANGLE_0 || rotation_angle == ROTATION_ANGLE_180) + && viewport_c_height <= 512) + return; + + if ((rotation_angle == ROTATION_ANGLE_90 || rotation_angle == ROTATION_ANGLE_270) + && viewport_c_width <= 512) + return; + + switch (rotation_angle) { + case ROTATION_ANGLE_0: /* 0 degree rotation */ + row_height = 128; + patched_viewport_height = (viewport_c_height / row_height + 1) * row_height + 1; + patched_viewport_width = viewport_c_width; + hubp21->PLAT_54186_wa_chroma_addr_offset = 0; + break; + case ROTATION_ANGLE_180: /* 180 degree rotation */ + row_height = 128; + patched_viewport_height = viewport_c_height + row_height; + patched_viewport_width = viewport_c_width; + hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - chroma_pitch * row_height * chroma_bpe; + break; + case ROTATION_ANGLE_90: /* 90 degree rotation */ + row_height = 256; + if (h_mirror_en) { + patched_viewport_height = viewport_c_height; + patched_viewport_width = viewport_c_width + row_height; + hubp21->PLAT_54186_wa_chroma_addr_offset = 0; + } else { + patched_viewport_height = viewport_c_height; + patched_viewport_width = viewport_c_width + row_height; + hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - tile_blk_size; + } + break; + case ROTATION_ANGLE_270: /* 270 degree rotation */ + row_height = 256; + if (h_mirror_en) { + patched_viewport_height = viewport_c_height; + patched_viewport_width = viewport_c_width + row_height; + hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - tile_blk_size; + } else { + patched_viewport_height = viewport_c_height; + patched_viewport_width = viewport_c_width + row_height; + hubp21->PLAT_54186_wa_chroma_addr_offset = 0; + } + break; + default: + ASSERT(0); + break; + } + + /* catch cases where viewport keep growing */ + ASSERT(patched_viewport_height && patched_viewport_height < 5000); + ASSERT(patched_viewport_width && patched_viewport_width < 5000); + + REG_UPDATE_2(DCSURF_PRI_VIEWPORT_DIMENSION_C, + PRI_VIEWPORT_WIDTH_C, patched_viewport_width, + PRI_VIEWPORT_HEIGHT_C, patched_viewport_height); +} + void hubp21_set_vm_system_aperture_settings(struct hubp *hubp, struct vm_system_aperture_param *apt) { @@ -191,6 +347,562 @@ void hubp21_set_vm_system_aperture_settings(struct hubp *hubp, SYSTEM_ACCESS_MODE, 0x3); } +void hubp21_validate_dml_output(struct hubp *hubp, + struct dc_context *ctx, + struct _vcs_dpi_display_rq_regs_st *dml_rq_regs, + struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr, + struct _vcs_dpi_display_ttu_regs_st *dml_ttu_attr) +{ + struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); + struct _vcs_dpi_display_rq_regs_st rq_regs = {0}; + struct _vcs_dpi_display_dlg_regs_st dlg_attr = {0}; + struct _vcs_dpi_display_ttu_regs_st ttu_attr = {0}; + DC_LOGGER_INIT(ctx->logger); + DC_LOG_DEBUG("DML Validation | Running Validation"); + + /* Requester - Per hubp */ + REG_GET(HUBPRET_CONTROL, + DET_BUF_PLANE1_BASE_ADDRESS, &rq_regs.plane1_base_address); + REG_GET_4(DCN_EXPANSION_MODE, + DRQ_EXPANSION_MODE, &rq_regs.drq_expansion_mode, + PRQ_EXPANSION_MODE, &rq_regs.prq_expansion_mode, + MRQ_EXPANSION_MODE, &rq_regs.mrq_expansion_mode, + CRQ_EXPANSION_MODE, &rq_regs.crq_expansion_mode); + REG_GET_8(DCHUBP_REQ_SIZE_CONFIG, + CHUNK_SIZE, &rq_regs.rq_regs_l.chunk_size, + MIN_CHUNK_SIZE, &rq_regs.rq_regs_l.min_chunk_size, + META_CHUNK_SIZE, &rq_regs.rq_regs_l.meta_chunk_size, + MIN_META_CHUNK_SIZE, &rq_regs.rq_regs_l.min_meta_chunk_size, + DPTE_GROUP_SIZE, &rq_regs.rq_regs_l.dpte_group_size, + VM_GROUP_SIZE, &rq_regs.rq_regs_l.mpte_group_size, + SWATH_HEIGHT, &rq_regs.rq_regs_l.swath_height, + PTE_ROW_HEIGHT_LINEAR, &rq_regs.rq_regs_l.pte_row_height_linear); + REG_GET_7(DCHUBP_REQ_SIZE_CONFIG_C, + CHUNK_SIZE_C, &rq_regs.rq_regs_c.chunk_size, + MIN_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_chunk_size, + META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.meta_chunk_size, + MIN_META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_meta_chunk_size, + DPTE_GROUP_SIZE_C, &rq_regs.rq_regs_c.dpte_group_size, + SWATH_HEIGHT_C, &rq_regs.rq_regs_c.swath_height, + PTE_ROW_HEIGHT_LINEAR_C, &rq_regs.rq_regs_c.pte_row_height_linear); + + if (rq_regs.plane1_base_address != dml_rq_regs->plane1_base_address) + DC_LOG_DEBUG("DML Validation | HUBPRET_CONTROL:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u Actual: %u\n", + dml_rq_regs->plane1_base_address, rq_regs.plane1_base_address); + if (rq_regs.drq_expansion_mode != dml_rq_regs->drq_expansion_mode) + DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DRQ_EXPANSION_MODE - Expected: %u Actual: %u\n", + dml_rq_regs->drq_expansion_mode, rq_regs.drq_expansion_mode); + if (rq_regs.prq_expansion_mode != dml_rq_regs->prq_expansion_mode) + DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:MRQ_EXPANSION_MODE - Expected: %u Actual: %u\n", + dml_rq_regs->prq_expansion_mode, rq_regs.prq_expansion_mode); + if (rq_regs.mrq_expansion_mode != dml_rq_regs->mrq_expansion_mode) + DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u Actual: %u\n", + dml_rq_regs->mrq_expansion_mode, rq_regs.mrq_expansion_mode); + if (rq_regs.crq_expansion_mode != dml_rq_regs->crq_expansion_mode) + DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:CRQ_EXPANSION_MODE - Expected: %u Actual: %u\n", + dml_rq_regs->crq_expansion_mode, rq_regs.crq_expansion_mode); + + if (rq_regs.rq_regs_l.chunk_size != dml_rq_regs->rq_regs_l.chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:CHUNK_SIZE - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.chunk_size, rq_regs.rq_regs_l.chunk_size); + if (rq_regs.rq_regs_l.min_chunk_size != dml_rq_regs->rq_regs_l.min_chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_CHUNK_SIZE - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.min_chunk_size, rq_regs.rq_regs_l.min_chunk_size); + if (rq_regs.rq_regs_l.meta_chunk_size != dml_rq_regs->rq_regs_l.meta_chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:META_CHUNK_SIZE - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.meta_chunk_size, rq_regs.rq_regs_l.meta_chunk_size); + if (rq_regs.rq_regs_l.min_meta_chunk_size != dml_rq_regs->rq_regs_l.min_meta_chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_META_CHUNK_SIZE - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs.rq_regs_l.min_meta_chunk_size); + if (rq_regs.rq_regs_l.dpte_group_size != dml_rq_regs->rq_regs_l.dpte_group_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:DPTE_GROUP_SIZE - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.dpte_group_size, rq_regs.rq_regs_l.dpte_group_size); + if (rq_regs.rq_regs_l.mpte_group_size != dml_rq_regs->rq_regs_l.mpte_group_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:VM_GROUP_SIZE - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.mpte_group_size, rq_regs.rq_regs_l.mpte_group_size); + if (rq_regs.rq_regs_l.swath_height != dml_rq_regs->rq_regs_l.swath_height) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:SWATH_HEIGHT - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.swath_height, rq_regs.rq_regs_l.swath_height); + if (rq_regs.rq_regs_l.pte_row_height_linear != dml_rq_regs->rq_regs_l.pte_row_height_linear) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:PTE_ROW_HEIGHT_LINEAR - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.pte_row_height_linear, rq_regs.rq_regs_l.pte_row_height_linear); + + if (rq_regs.rq_regs_c.chunk_size != dml_rq_regs->rq_regs_c.chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:CHUNK_SIZE_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.chunk_size, rq_regs.rq_regs_c.chunk_size); + if (rq_regs.rq_regs_c.min_chunk_size != dml_rq_regs->rq_regs_c.min_chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_CHUNK_SIZE_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.min_chunk_size, rq_regs.rq_regs_c.min_chunk_size); + if (rq_regs.rq_regs_c.meta_chunk_size != dml_rq_regs->rq_regs_c.meta_chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:META_CHUNK_SIZE_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.meta_chunk_size, rq_regs.rq_regs_c.meta_chunk_size); + if (rq_regs.rq_regs_c.min_meta_chunk_size != dml_rq_regs->rq_regs_c.min_meta_chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_META_CHUNK_SIZE_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.min_meta_chunk_size, rq_regs.rq_regs_c.min_meta_chunk_size); + if (rq_regs.rq_regs_c.dpte_group_size != dml_rq_regs->rq_regs_c.dpte_group_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:DPTE_GROUP_SIZE_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.dpte_group_size, rq_regs.rq_regs_c.dpte_group_size); + if (rq_regs.rq_regs_c.swath_height != dml_rq_regs->rq_regs_c.swath_height) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:SWATH_HEIGHT_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.swath_height, rq_regs.rq_regs_c.swath_height); + if (rq_regs.rq_regs_c.pte_row_height_linear != dml_rq_regs->rq_regs_c.pte_row_height_linear) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:PTE_ROW_HEIGHT_LINEAR_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.pte_row_height_linear, rq_regs.rq_regs_c.pte_row_height_linear); + + + /* DLG - Per hubp */ + REG_GET_2(BLANK_OFFSET_0, + REFCYC_H_BLANK_END, &dlg_attr.refcyc_h_blank_end, + DLG_V_BLANK_END, &dlg_attr.dlg_vblank_end); + REG_GET(BLANK_OFFSET_1, + MIN_DST_Y_NEXT_START, &dlg_attr.min_dst_y_next_start); + REG_GET(DST_DIMENSIONS, + REFCYC_PER_HTOTAL, &dlg_attr.refcyc_per_htotal); + REG_GET_2(DST_AFTER_SCALER, + REFCYC_X_AFTER_SCALER, &dlg_attr.refcyc_x_after_scaler, + DST_Y_AFTER_SCALER, &dlg_attr.dst_y_after_scaler); + REG_GET(REF_FREQ_TO_PIX_FREQ, + REF_FREQ_TO_PIX_FREQ, &dlg_attr.ref_freq_to_pix_freq); + + if (dlg_attr.refcyc_h_blank_end != dml_dlg_attr->refcyc_h_blank_end) + DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:REFCYC_H_BLANK_END - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_h_blank_end, dlg_attr.refcyc_h_blank_end); + if (dlg_attr.dlg_vblank_end != dml_dlg_attr->dlg_vblank_end) + DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:DLG_V_BLANK_END - Expected: %u Actual: %u\n", + dml_dlg_attr->dlg_vblank_end, dlg_attr.dlg_vblank_end); + if (dlg_attr.min_dst_y_next_start != dml_dlg_attr->min_dst_y_next_start) + DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_1:MIN_DST_Y_NEXT_START - Expected: %u Actual: %u\n", + dml_dlg_attr->min_dst_y_next_start, dlg_attr.min_dst_y_next_start); + if (dlg_attr.refcyc_per_htotal != dml_dlg_attr->refcyc_per_htotal) + DC_LOG_DEBUG("DML Validation | DST_DIMENSIONS:REFCYC_PER_HTOTAL - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_htotal, dlg_attr.refcyc_per_htotal); + if (dlg_attr.refcyc_x_after_scaler != dml_dlg_attr->refcyc_x_after_scaler) + DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:REFCYC_X_AFTER_SCALER - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_x_after_scaler, dlg_attr.refcyc_x_after_scaler); + if (dlg_attr.dst_y_after_scaler != dml_dlg_attr->dst_y_after_scaler) + DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:DST_Y_AFTER_SCALER - Expected: %u Actual: %u\n", + dml_dlg_attr->dst_y_after_scaler, dlg_attr.dst_y_after_scaler); + if (dlg_attr.ref_freq_to_pix_freq != dml_dlg_attr->ref_freq_to_pix_freq) + DC_LOG_DEBUG("DML Validation | REF_FREQ_TO_PIX_FREQ:REF_FREQ_TO_PIX_FREQ - Expected: %u Actual: %u\n", + dml_dlg_attr->ref_freq_to_pix_freq, dlg_attr.ref_freq_to_pix_freq); + + /* DLG - Per luma/chroma */ + REG_GET(VBLANK_PARAMETERS_1, + REFCYC_PER_PTE_GROUP_VBLANK_L, &dlg_attr.refcyc_per_pte_group_vblank_l); + if (REG(NOM_PARAMETERS_0)) + REG_GET(NOM_PARAMETERS_0, + DST_Y_PER_PTE_ROW_NOM_L, &dlg_attr.dst_y_per_pte_row_nom_l); + if (REG(NOM_PARAMETERS_1)) + REG_GET(NOM_PARAMETERS_1, + REFCYC_PER_PTE_GROUP_NOM_L, &dlg_attr.refcyc_per_pte_group_nom_l); + REG_GET(NOM_PARAMETERS_4, + DST_Y_PER_META_ROW_NOM_L, &dlg_attr.dst_y_per_meta_row_nom_l); + REG_GET(NOM_PARAMETERS_5, + REFCYC_PER_META_CHUNK_NOM_L, &dlg_attr.refcyc_per_meta_chunk_nom_l); + REG_GET_2(PER_LINE_DELIVERY, + REFCYC_PER_LINE_DELIVERY_L, &dlg_attr.refcyc_per_line_delivery_l, + REFCYC_PER_LINE_DELIVERY_C, &dlg_attr.refcyc_per_line_delivery_c); + REG_GET_2(PER_LINE_DELIVERY_PRE, + REFCYC_PER_LINE_DELIVERY_PRE_L, &dlg_attr.refcyc_per_line_delivery_pre_l, + REFCYC_PER_LINE_DELIVERY_PRE_C, &dlg_attr.refcyc_per_line_delivery_pre_c); + REG_GET(VBLANK_PARAMETERS_2, + REFCYC_PER_PTE_GROUP_VBLANK_C, &dlg_attr.refcyc_per_pte_group_vblank_c); + if (REG(NOM_PARAMETERS_2)) + REG_GET(NOM_PARAMETERS_2, + DST_Y_PER_PTE_ROW_NOM_C, &dlg_attr.dst_y_per_pte_row_nom_c); + if (REG(NOM_PARAMETERS_3)) + REG_GET(NOM_PARAMETERS_3, + REFCYC_PER_PTE_GROUP_NOM_C, &dlg_attr.refcyc_per_pte_group_nom_c); + REG_GET(NOM_PARAMETERS_6, + DST_Y_PER_META_ROW_NOM_C, &dlg_attr.dst_y_per_meta_row_nom_c); + REG_GET(NOM_PARAMETERS_7, + REFCYC_PER_META_CHUNK_NOM_C, &dlg_attr.refcyc_per_meta_chunk_nom_c); + REG_GET(VBLANK_PARAMETERS_3, + REFCYC_PER_META_CHUNK_VBLANK_L, &dlg_attr.refcyc_per_meta_chunk_vblank_l); + REG_GET(VBLANK_PARAMETERS_4, + REFCYC_PER_META_CHUNK_VBLANK_C, &dlg_attr.refcyc_per_meta_chunk_vblank_c); + + if (dlg_attr.refcyc_per_pte_group_vblank_l != dml_dlg_attr->refcyc_per_pte_group_vblank_l) + DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_1:REFCYC_PER_PTE_GROUP_VBLANK_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_pte_group_vblank_l, dlg_attr.refcyc_per_pte_group_vblank_l); + if (dlg_attr.dst_y_per_pte_row_nom_l != dml_dlg_attr->dst_y_per_pte_row_nom_l) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_0:DST_Y_PER_PTE_ROW_NOM_L - Expected: %u Actual: %u\n", + dml_dlg_attr->dst_y_per_pte_row_nom_l, dlg_attr.dst_y_per_pte_row_nom_l); + if (dlg_attr.refcyc_per_pte_group_nom_l != dml_dlg_attr->refcyc_per_pte_group_nom_l) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_1:REFCYC_PER_PTE_GROUP_NOM_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_pte_group_nom_l, dlg_attr.refcyc_per_pte_group_nom_l); + if (dlg_attr.dst_y_per_meta_row_nom_l != dml_dlg_attr->dst_y_per_meta_row_nom_l) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_4:DST_Y_PER_META_ROW_NOM_L - Expected: %u Actual: %u\n", + dml_dlg_attr->dst_y_per_meta_row_nom_l, dlg_attr.dst_y_per_meta_row_nom_l); + if (dlg_attr.refcyc_per_meta_chunk_nom_l != dml_dlg_attr->refcyc_per_meta_chunk_nom_l) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_5:REFCYC_PER_META_CHUNK_NOM_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_meta_chunk_nom_l, dlg_attr.refcyc_per_meta_chunk_nom_l); + if (dlg_attr.refcyc_per_line_delivery_l != dml_dlg_attr->refcyc_per_line_delivery_l) + DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_line_delivery_l, dlg_attr.refcyc_per_line_delivery_l); + if (dlg_attr.refcyc_per_line_delivery_c != dml_dlg_attr->refcyc_per_line_delivery_c) + DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_line_delivery_c, dlg_attr.refcyc_per_line_delivery_c); + if (dlg_attr.refcyc_per_pte_group_vblank_c != dml_dlg_attr->refcyc_per_pte_group_vblank_c) + DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_2:REFCYC_PER_PTE_GROUP_VBLANK_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_pte_group_vblank_c, dlg_attr.refcyc_per_pte_group_vblank_c); + if (dlg_attr.dst_y_per_pte_row_nom_c != dml_dlg_attr->dst_y_per_pte_row_nom_c) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_2:DST_Y_PER_PTE_ROW_NOM_C - Expected: %u Actual: %u\n", + dml_dlg_attr->dst_y_per_pte_row_nom_c, dlg_attr.dst_y_per_pte_row_nom_c); + if (dlg_attr.refcyc_per_pte_group_nom_c != dml_dlg_attr->refcyc_per_pte_group_nom_c) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_3:REFCYC_PER_PTE_GROUP_NOM_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_pte_group_nom_c, dlg_attr.refcyc_per_pte_group_nom_c); + if (dlg_attr.dst_y_per_meta_row_nom_c != dml_dlg_attr->dst_y_per_meta_row_nom_c) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_6:DST_Y_PER_META_ROW_NOM_C - Expected: %u Actual: %u\n", + dml_dlg_attr->dst_y_per_meta_row_nom_c, dlg_attr.dst_y_per_meta_row_nom_c); + if (dlg_attr.refcyc_per_meta_chunk_nom_c != dml_dlg_attr->refcyc_per_meta_chunk_nom_c) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_7:REFCYC_PER_META_CHUNK_NOM_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_meta_chunk_nom_c, dlg_attr.refcyc_per_meta_chunk_nom_c); + if (dlg_attr.refcyc_per_line_delivery_pre_l != dml_dlg_attr->refcyc_per_line_delivery_pre_l) + DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_line_delivery_pre_l, dlg_attr.refcyc_per_line_delivery_pre_l); + if (dlg_attr.refcyc_per_line_delivery_pre_c != dml_dlg_attr->refcyc_per_line_delivery_pre_c) + DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_line_delivery_pre_c, dlg_attr.refcyc_per_line_delivery_pre_c); + if (dlg_attr.refcyc_per_meta_chunk_vblank_l != dml_dlg_attr->refcyc_per_meta_chunk_vblank_l) + DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_3:REFCYC_PER_META_CHUNK_VBLANK_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_meta_chunk_vblank_l, dlg_attr.refcyc_per_meta_chunk_vblank_l); + if (dlg_attr.refcyc_per_meta_chunk_vblank_c != dml_dlg_attr->refcyc_per_meta_chunk_vblank_c) + DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_4:REFCYC_PER_META_CHUNK_VBLANK_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_meta_chunk_vblank_c, dlg_attr.refcyc_per_meta_chunk_vblank_c); + + /* TTU - per hubp */ + REG_GET_2(DCN_TTU_QOS_WM, + QoS_LEVEL_LOW_WM, &ttu_attr.qos_level_low_wm, + QoS_LEVEL_HIGH_WM, &ttu_attr.qos_level_high_wm); + + if (ttu_attr.qos_level_low_wm != dml_ttu_attr->qos_level_low_wm) + DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_LOW_WM - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_level_low_wm, ttu_attr.qos_level_low_wm); + if (ttu_attr.qos_level_high_wm != dml_ttu_attr->qos_level_high_wm) + DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_HIGH_WM - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_level_high_wm, ttu_attr.qos_level_high_wm); + + /* TTU - per luma/chroma */ + /* Assumed surf0 is luma and 1 is chroma */ + REG_GET_3(DCN_SURF0_TTU_CNTL0, + REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_l, + QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_l, + QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_l); + REG_GET_3(DCN_SURF1_TTU_CNTL0, + REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_c, + QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_c, + QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_c); + REG_GET_3(DCN_CUR0_TTU_CNTL0, + REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_cur0, + QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_cur0, + QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_cur0); + REG_GET(FLIP_PARAMETERS_1, + REFCYC_PER_PTE_GROUP_FLIP_L, &dlg_attr.refcyc_per_pte_group_flip_l); + REG_GET(DCN_CUR0_TTU_CNTL1, + REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur0); + REG_GET(DCN_CUR1_TTU_CNTL1, + REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur1); + REG_GET(DCN_SURF0_TTU_CNTL1, + REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_l); + REG_GET(DCN_SURF1_TTU_CNTL1, + REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_c); + + if (ttu_attr.refcyc_per_req_delivery_l != dml_ttu_attr->refcyc_per_req_delivery_l) + DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_l, ttu_attr.refcyc_per_req_delivery_l); + if (ttu_attr.qos_level_fixed_l != dml_ttu_attr->qos_level_fixed_l) + DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_level_fixed_l, ttu_attr.qos_level_fixed_l); + if (ttu_attr.qos_ramp_disable_l != dml_ttu_attr->qos_ramp_disable_l) + DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_ramp_disable_l, ttu_attr.qos_ramp_disable_l); + if (ttu_attr.refcyc_per_req_delivery_c != dml_ttu_attr->refcyc_per_req_delivery_c) + DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_c, ttu_attr.refcyc_per_req_delivery_c); + if (ttu_attr.qos_level_fixed_c != dml_ttu_attr->qos_level_fixed_c) + DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_level_fixed_c, ttu_attr.qos_level_fixed_c); + if (ttu_attr.qos_ramp_disable_c != dml_ttu_attr->qos_ramp_disable_c) + DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_ramp_disable_c, ttu_attr.qos_ramp_disable_c); + if (ttu_attr.refcyc_per_req_delivery_cur0 != dml_ttu_attr->refcyc_per_req_delivery_cur0) + DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_cur0, ttu_attr.refcyc_per_req_delivery_cur0); + if (ttu_attr.qos_level_fixed_cur0 != dml_ttu_attr->qos_level_fixed_cur0) + DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_level_fixed_cur0, ttu_attr.qos_level_fixed_cur0); + if (ttu_attr.qos_ramp_disable_cur0 != dml_ttu_attr->qos_ramp_disable_cur0) + DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_ramp_disable_cur0, ttu_attr.qos_ramp_disable_cur0); + if (dlg_attr.refcyc_per_pte_group_flip_l != dml_dlg_attr->refcyc_per_pte_group_flip_l) + DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_1:REFCYC_PER_PTE_GROUP_FLIP_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_pte_group_flip_l, dlg_attr.refcyc_per_pte_group_flip_l); + if (ttu_attr.refcyc_per_req_delivery_pre_cur0 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur0) + DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_pre_cur0, ttu_attr.refcyc_per_req_delivery_pre_cur0); + if (ttu_attr.refcyc_per_req_delivery_pre_cur1 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur1) + DC_LOG_DEBUG("DML Validation | DCN_CUR1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_pre_cur1, ttu_attr.refcyc_per_req_delivery_pre_cur1); + if (ttu_attr.refcyc_per_req_delivery_pre_l != dml_ttu_attr->refcyc_per_req_delivery_pre_l) + DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_pre_l, ttu_attr.refcyc_per_req_delivery_pre_l); + if (ttu_attr.refcyc_per_req_delivery_pre_c != dml_ttu_attr->refcyc_per_req_delivery_pre_c) + DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_pre_c, ttu_attr.refcyc_per_req_delivery_pre_c); + + /* Host VM deadline regs */ + REG_GET(VBLANK_PARAMETERS_5, + REFCYC_PER_VM_GROUP_VBLANK, &dlg_attr.refcyc_per_vm_group_vblank); + REG_GET(VBLANK_PARAMETERS_6, + REFCYC_PER_VM_REQ_VBLANK, &dlg_attr.refcyc_per_vm_req_vblank); + REG_GET(FLIP_PARAMETERS_3, + REFCYC_PER_VM_GROUP_FLIP, &dlg_attr.refcyc_per_vm_group_flip); + REG_GET(FLIP_PARAMETERS_4, + REFCYC_PER_VM_REQ_FLIP, &dlg_attr.refcyc_per_vm_req_flip); + REG_GET(FLIP_PARAMETERS_5, + REFCYC_PER_PTE_GROUP_FLIP_C, &dlg_attr.refcyc_per_pte_group_flip_c); + REG_GET(FLIP_PARAMETERS_6, + REFCYC_PER_META_CHUNK_FLIP_C, &dlg_attr.refcyc_per_meta_chunk_flip_c); + REG_GET(FLIP_PARAMETERS_2, + REFCYC_PER_META_CHUNK_FLIP_L, &dlg_attr.refcyc_per_meta_chunk_flip_l); + + if (dlg_attr.refcyc_per_vm_group_vblank != dml_dlg_attr->refcyc_per_vm_group_vblank) + DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_5:REFCYC_PER_VM_GROUP_VBLANK - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_vm_group_vblank, dlg_attr.refcyc_per_vm_group_vblank); + if (dlg_attr.refcyc_per_vm_req_vblank != dml_dlg_attr->refcyc_per_vm_req_vblank) + DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_6:REFCYC_PER_VM_REQ_VBLANK - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_vm_req_vblank, dlg_attr.refcyc_per_vm_req_vblank); + if (dlg_attr.refcyc_per_vm_group_flip != dml_dlg_attr->refcyc_per_vm_group_flip) + DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_3:REFCYC_PER_VM_GROUP_FLIP - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_vm_group_flip, dlg_attr.refcyc_per_vm_group_flip); + if (dlg_attr.refcyc_per_vm_req_flip != dml_dlg_attr->refcyc_per_vm_req_flip) + DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_4:REFCYC_PER_VM_REQ_FLIP - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_vm_req_flip, dlg_attr.refcyc_per_vm_req_flip); + if (dlg_attr.refcyc_per_pte_group_flip_c != dml_dlg_attr->refcyc_per_pte_group_flip_c) + DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_5:REFCYC_PER_PTE_GROUP_FLIP_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_pte_group_flip_c, dlg_attr.refcyc_per_pte_group_flip_c); + if (dlg_attr.refcyc_per_meta_chunk_flip_c != dml_dlg_attr->refcyc_per_meta_chunk_flip_c) + DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_6:REFCYC_PER_META_CHUNK_FLIP_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_meta_chunk_flip_c, dlg_attr.refcyc_per_meta_chunk_flip_c); + if (dlg_attr.refcyc_per_meta_chunk_flip_l != dml_dlg_attr->refcyc_per_meta_chunk_flip_l) + DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_2:REFCYC_PER_META_CHUNK_FLIP_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_meta_chunk_flip_l, dlg_attr.refcyc_per_meta_chunk_flip_l); +} + +static void program_surface_flip_and_addr(struct hubp *hubp, struct surface_flip_registers *flip_regs) +{ + struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); + + REG_UPDATE_3(DCSURF_FLIP_CONTROL, + SURFACE_FLIP_TYPE, flip_regs->immediate, + SURFACE_FLIP_MODE_FOR_STEREOSYNC, flip_regs->grph_stereo, + SURFACE_FLIP_IN_STEREOSYNC, flip_regs->grph_stereo); + + REG_UPDATE(VMID_SETTINGS_0, + VMID, flip_regs->vmid); + + REG_UPDATE_8(DCSURF_SURFACE_CONTROL, + PRIMARY_SURFACE_TMZ, flip_regs->tmz_surface, + PRIMARY_SURFACE_TMZ_C, flip_regs->tmz_surface, + PRIMARY_META_SURFACE_TMZ, flip_regs->tmz_surface, + PRIMARY_META_SURFACE_TMZ_C, flip_regs->tmz_surface, + SECONDARY_SURFACE_TMZ, flip_regs->tmz_surface, + SECONDARY_SURFACE_TMZ_C, flip_regs->tmz_surface, + SECONDARY_META_SURFACE_TMZ, flip_regs->tmz_surface, + SECONDARY_META_SURFACE_TMZ_C, flip_regs->tmz_surface); + + REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, 0, + PRIMARY_META_SURFACE_ADDRESS_HIGH_C, + flip_regs->DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C); + + REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, 0, + PRIMARY_META_SURFACE_ADDRESS_C, + flip_regs->DCSURF_PRIMARY_META_SURFACE_ADDRESS_C); + + REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0, + PRIMARY_META_SURFACE_ADDRESS_HIGH, + flip_regs->DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH); + + REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0, + PRIMARY_META_SURFACE_ADDRESS, + flip_regs->DCSURF_PRIMARY_META_SURFACE_ADDRESS); + + REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH, 0, + SECONDARY_META_SURFACE_ADDRESS_HIGH, + flip_regs->DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH); + + REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS, 0, + SECONDARY_META_SURFACE_ADDRESS, + flip_regs->DCSURF_SECONDARY_META_SURFACE_ADDRESS); + + + REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, 0, + SECONDARY_SURFACE_ADDRESS_HIGH, + flip_regs->DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH); + + REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS, 0, + SECONDARY_SURFACE_ADDRESS, + flip_regs->DCSURF_SECONDARY_SURFACE_ADDRESS); + + + REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, 0, + PRIMARY_SURFACE_ADDRESS_HIGH_C, + flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C); + + REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_C, 0, + PRIMARY_SURFACE_ADDRESS_C, + flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_C); + + REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0, + PRIMARY_SURFACE_ADDRESS_HIGH, + flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH); + + REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0, + PRIMARY_SURFACE_ADDRESS, + flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS); +} + +void dmcub_PLAT_54186_wa(struct hubp *hubp, struct surface_flip_registers *flip_regs) +{ + struct dc_dmub_srv *dmcub = hubp->ctx->dmub_srv; + struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); + struct dmub_rb_cmd_PLAT_54186_wa PLAT_54186_wa = { 0 }; + + PLAT_54186_wa.header.type = DMUB_CMD__PLAT_54186_WA; + PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS; + PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_C = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_C; + PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH; + PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C; + PLAT_54186_wa.flip.flip_params.grph_stereo = flip_regs->grph_stereo; + PLAT_54186_wa.flip.flip_params.hubp_inst = hubp->inst; + PLAT_54186_wa.flip.flip_params.immediate = flip_regs->immediate; + PLAT_54186_wa.flip.flip_params.tmz_surface = flip_regs->tmz_surface; + PLAT_54186_wa.flip.flip_params.vmid = flip_regs->vmid; + + PERF_TRACE(); // TODO: remove after performance is stable. + dc_dmub_srv_cmd_queue(dmcub, &PLAT_54186_wa.header); + PERF_TRACE(); // TODO: remove after performance is stable. + dc_dmub_srv_cmd_execute(dmcub); + PERF_TRACE(); // TODO: remove after performance is stable. + dc_dmub_srv_wait_idle(dmcub); + PERF_TRACE(); // TODO: remove after performance is stable. +} + +bool hubp21_program_surface_flip_and_addr( + struct hubp *hubp, + const struct dc_plane_address *address, + bool flip_immediate) +{ + struct dc_debug_options *debug = &hubp->ctx->dc->debug; + struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); + struct surface_flip_registers flip_regs = { 0 }; + + flip_regs.vmid = address->vmid; + + switch (address->type) { + case PLN_ADDR_TYPE_GRAPHICS: + if (address->grph.addr.quad_part == 0) { + BREAK_TO_DEBUGGER(); + break; + } + + if (address->grph.meta_addr.quad_part != 0) { + flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS = + address->grph.meta_addr.low_part; + flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH = + address->grph.meta_addr.high_part; + } + + flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS = + address->grph.addr.low_part; + flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH = + address->grph.addr.high_part; + break; + case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE: + if (address->video_progressive.luma_addr.quad_part == 0 + || address->video_progressive.chroma_addr.quad_part == 0) + break; + + if (address->video_progressive.luma_meta_addr.quad_part != 0) { + flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS = + address->video_progressive.luma_meta_addr.low_part; + flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH = + address->video_progressive.luma_meta_addr.high_part; + + flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_C = + address->video_progressive.chroma_meta_addr.low_part; + flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C = + address->video_progressive.chroma_meta_addr.high_part; + } + + flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS = + address->video_progressive.luma_addr.low_part; + flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH = + address->video_progressive.luma_addr.high_part; + + if (debug->nv12_iflip_vm_wa) { + flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_C = + address->video_progressive.chroma_addr.low_part + hubp21->PLAT_54186_wa_chroma_addr_offset; + } else + flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_C = + address->video_progressive.chroma_addr.low_part; + + flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C = + address->video_progressive.chroma_addr.high_part; + + break; + case PLN_ADDR_TYPE_GRPH_STEREO: + if (address->grph_stereo.left_addr.quad_part == 0) + break; + if (address->grph_stereo.right_addr.quad_part == 0) + break; + + flip_regs.grph_stereo = true; + + if (address->grph_stereo.right_meta_addr.quad_part != 0) { + flip_regs.DCSURF_SECONDARY_META_SURFACE_ADDRESS = + address->grph_stereo.right_meta_addr.low_part; + flip_regs.DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH = + address->grph_stereo.right_meta_addr.high_part; + } + + if (address->grph_stereo.left_meta_addr.quad_part != 0) { + flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS = + address->grph_stereo.left_meta_addr.low_part; + flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH = + address->grph_stereo.left_meta_addr.high_part; + } + + flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS = + address->grph_stereo.left_addr.low_part; + flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH = + address->grph_stereo.left_addr.high_part; + + flip_regs.DCSURF_SECONDARY_SURFACE_ADDRESS = + address->grph_stereo.right_addr.low_part; + flip_regs.DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH = + address->grph_stereo.right_addr.high_part; + + break; + default: + BREAK_TO_DEBUGGER(); + break; + } + + flip_regs.tmz_surface = address->tmz_surface; + flip_regs.immediate = flip_immediate; + + if (hubp->ctx->dc->debug.enable_dmcub_surface_flip && address->type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) + dmcub_PLAT_54186_wa(hubp, &flip_regs); + else + program_surface_flip_and_addr(hubp, &flip_regs); + + hubp->request_address = *address; + + return true; +} + void hubp21_init(struct hubp *hubp) { // DEDCN21-133: Inconsistent row starting line for flip between DPTE and Meta @@ -203,7 +915,7 @@ void hubp21_init(struct hubp *hubp) static struct hubp_funcs dcn21_hubp_funcs = { .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer, .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled, - .hubp_program_surface_flip_and_addr = hubp2_program_surface_flip_and_addr, + .hubp_program_surface_flip_and_addr = hubp21_program_surface_flip_and_addr, .hubp_program_surface_config = hubp1_program_surface_config, .hubp_is_flip_pending = hubp1_is_flip_pending, .hubp_setup = hubp21_setup, @@ -211,7 +923,8 @@ static struct hubp_funcs dcn21_hubp_funcs = { .hubp_set_vm_system_aperture_settings = hubp21_set_vm_system_aperture_settings, .set_blank = hubp1_set_blank, .dcc_control = hubp1_dcc_control, - .mem_program_viewport = min_set_viewport, + .mem_program_viewport = hubp21_set_viewport, + .apply_PLAT_54186_wa = hubp21_apply_PLAT_54186_wa, .set_cursor_attributes = hubp2_cursor_set_attributes, .set_cursor_position = hubp1_cursor_set_position, .hubp_clk_cntl = hubp1_clk_cntl, @@ -223,6 +936,7 @@ static struct hubp_funcs dcn21_hubp_funcs = { .hubp_clear_underflow = hubp1_clear_underflow, .hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl, .hubp_init = hubp21_init, + .validate_dml_output = hubp21_validate_dml_output, }; bool hubp21_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.h index aeda719a2a13..9873b6cbc5ba 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.h @@ -108,6 +108,7 @@ struct dcn21_hubp { const struct dcn_hubp2_registers *hubp_regs; const struct dcn_hubp2_shift *hubp_shift; const struct dcn_hubp2_mask *hubp_mask; + int PLAT_54186_wa_chroma_addr_offset; }; bool hubp21_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c index b25215cadf85..081ad8e43d58 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c @@ -28,7 +28,7 @@ #include "core_types.h" #include "resource.h" #include "dce/dce_hwseq.h" -#include "dcn20/dcn20_hwseq.h" +#include "dcn21_hwseq.h" #include "vmid.h" #include "reg_helper.h" #include "hw/clk_mgr.h" @@ -61,7 +61,7 @@ static void mmhub_update_page_table_config(struct dcn_hubbub_phys_addr_config *c } -static int dcn21_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config) +int dcn21_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config) { struct dcn_hubbub_phys_addr_config config; @@ -82,7 +82,7 @@ static int dcn21_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_ph // work around for Renoir s0i3, if register is programmed, bypass golden init. -static bool dcn21_s0i3_golden_init_wa(struct dc *dc) +bool dcn21_s0i3_golden_init_wa(struct dc *dc) { struct dce_hwseq *hws = dc->hwseq; uint32_t value = 0; @@ -112,11 +112,3 @@ void dcn21_optimize_pwr_state( true); } -void dcn21_hw_sequencer_construct(struct dc *dc) -{ - dcn20_hw_sequencer_construct(dc); - dc->hwss.init_sys_ctx = dcn21_init_sys_ctx; - dc->hwss.s0i3_golden_init_wa = dcn21_s0i3_golden_init_wa; - dc->hwss.optimize_pwr_state = dcn21_optimize_pwr_state; - dc->hwss.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state; -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h index be67b62e6fb1..182736096123 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h @@ -26,8 +26,22 @@ #ifndef __DC_HWSS_DCN21_H__ #define __DC_HWSS_DCN21_H__ +#include "hw_sequencer_private.h" + struct dc; -void dcn21_hw_sequencer_construct(struct dc *dc); +int dcn21_init_sys_ctx(struct dce_hwseq *hws, + struct dc *dc, + struct dc_phy_addr_space_config *pa_config); + +bool dcn21_s0i3_golden_init_wa(struct dc *dc); + +void dcn21_exit_optimized_pwr_state( + const struct dc *dc, + struct dc_state *context); + +void dcn21_optimize_pwr_state( + const struct dc *dc, + struct dc_state *context); #endif /* __DC_HWSS_DCN21_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c new file mode 100644 index 000000000000..4861aa5c59ae --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c @@ -0,0 +1,142 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dce110/dce110_hw_sequencer.h" +#include "dcn10/dcn10_hw_sequencer.h" +#include "dcn20/dcn20_hwseq.h" +#include "dcn21_hwseq.h" + +static const struct hw_sequencer_funcs dcn21_funcs = { + .program_gamut_remap = dcn10_program_gamut_remap, + .init_hw = dcn10_init_hw, + .apply_ctx_to_hw = dce110_apply_ctx_to_hw, + .apply_ctx_for_surface = NULL, + .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, + .update_plane_addr = dcn20_update_plane_addr, + .update_dchub = dcn10_update_dchub, + .update_pending_status = dcn10_update_pending_status, + .program_output_csc = dcn20_program_output_csc, + .enable_accelerated_mode = dce110_enable_accelerated_mode, + .enable_timing_synchronization = dcn10_enable_timing_synchronization, + .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, + .update_info_frame = dce110_update_info_frame, + .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, + .enable_stream = dcn20_enable_stream, + .disable_stream = dce110_disable_stream, + .unblank_stream = dcn20_unblank_stream, + .blank_stream = dce110_blank_stream, + .enable_audio_stream = dce110_enable_audio_stream, + .disable_audio_stream = dce110_disable_audio_stream, + .disable_plane = dcn20_disable_plane, + .pipe_control_lock = dcn20_pipe_control_lock, + .pipe_control_lock_global = dcn20_pipe_control_lock_global, + .prepare_bandwidth = dcn20_prepare_bandwidth, + .optimize_bandwidth = dcn20_optimize_bandwidth, + .update_bandwidth = dcn20_update_bandwidth, + .set_drr = dcn10_set_drr, + .get_position = dcn10_get_position, + .set_static_screen_control = dcn10_set_static_screen_control, + .setup_stereo = dcn10_setup_stereo, + .set_avmute = dce110_set_avmute, + .log_hw_state = dcn10_log_hw_state, + .get_hw_state = dcn10_get_hw_state, + .clear_status_bits = dcn10_clear_status_bits, + .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, + .edp_power_control = dce110_edp_power_control, + .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, + .set_cursor_position = dcn10_set_cursor_position, + .set_cursor_attribute = dcn10_set_cursor_attribute, + .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, + .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, + .set_clock = dcn10_set_clock, + .get_clock = dcn10_get_clock, + .program_triplebuffer = dcn20_program_triple_buffer, + .enable_writeback = dcn20_enable_writeback, + .disable_writeback = dcn20_disable_writeback, + .dmdata_status_done = dcn20_dmdata_status_done, + .program_dmdata_engine = dcn20_program_dmdata_engine, + .set_dmdata_attributes = dcn20_set_dmdata_attributes, + .init_sys_ctx = dcn21_init_sys_ctx, + .init_vm_ctx = dcn20_init_vm_ctx, + .set_flip_control_gsl = dcn20_set_flip_control_gsl, + .optimize_pwr_state = dcn21_optimize_pwr_state, + .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, + .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, + .set_cursor_position = dcn10_set_cursor_position, + .set_cursor_attribute = dcn10_set_cursor_attribute, + .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, + .optimize_pwr_state = dcn21_optimize_pwr_state, + .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, +}; + +static const struct hwseq_private_funcs dcn21_private_funcs = { + .init_pipes = dcn10_init_pipes, + .update_plane_addr = dcn20_update_plane_addr, + .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, + .update_mpcc = dcn20_update_mpcc, + .set_input_transfer_func = dcn20_set_input_transfer_func, + .set_output_transfer_func = dcn20_set_output_transfer_func, + .power_down = dce110_power_down, + .enable_display_power_gating = dcn10_dummy_display_power_gating, + .blank_pixel_data = dcn20_blank_pixel_data, + .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap, + .enable_stream_timing = dcn20_enable_stream_timing, + .edp_backlight_control = dce110_edp_backlight_control, + .disable_stream_gating = dcn20_disable_stream_gating, + .enable_stream_gating = dcn20_enable_stream_gating, + .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, + .did_underflow_occur = dcn10_did_underflow_occur, + .init_blank = dcn20_init_blank, + .disable_vga = dcn20_disable_vga, + .bios_golden_init = dcn10_bios_golden_init, + .plane_atomic_disable = dcn20_plane_atomic_disable, + .plane_atomic_power_down = dcn10_plane_atomic_power_down, + .enable_power_gating_plane = dcn20_enable_power_gating_plane, + .dpp_pg_control = dcn20_dpp_pg_control, + .hubp_pg_control = dcn20_hubp_pg_control, + .dsc_pg_control = NULL, + .update_odm = dcn20_update_odm, + .dsc_pg_control = dcn20_dsc_pg_control, + .get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color, + .get_hdr_visual_confirm_color = dcn10_get_hdr_visual_confirm_color, + .set_hdr_multiplier = dcn10_set_hdr_multiplier, + .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, + .s0i3_golden_init_wa = dcn21_s0i3_golden_init_wa, + .wait_for_blank_complete = dcn20_wait_for_blank_complete, + .dccg_init = dcn20_dccg_init, + .set_blend_lut = dcn20_set_blend_lut, + .set_shaper_3dlut = dcn20_set_shaper_3dlut, +}; + +void dcn21_hw_sequencer_construct(struct dc *dc) +{ + dc->hwss = dcn21_funcs; + dc->hwseq->funcs = dcn21_private_funcs; + + if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { + dc->hwss.init_hw = dcn20_fpga_init_hw; + dc->hwseq->funcs.init_pipes = NULL; + } +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h new file mode 100644 index 000000000000..3ed24292648a --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h @@ -0,0 +1,33 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_DCN21_INIT_H__ +#define __DC_DCN21_INIT_H__ + +struct dc; + +void dcn21_hw_sequencer_construct(struct dc *dc); + +#endif /* __DC_DCN20_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c index e8a504ca5890..e45683ac871a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c @@ -323,9 +323,7 @@ void dcn21_link_encoder_disable_output( static const struct link_encoder_funcs dcn21_link_enc_funcs = { -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .read_state = link_enc2_read_state, -#endif .validate_output_with_stream = dcn10_link_encoder_validate_output_with_stream, .hw_init = enc2_hw_init, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h index 1d7a1a51f13d..033d5d76f195 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h @@ -33,6 +33,45 @@ struct dcn21_link_encoder { struct dpcssys_phy_seq_cfg phy_seq_cfg; }; +#define DPCS_DCN21_MASK_SH_LIST(mask_sh)\ + DPCS_DCN2_MASK_SH_LIST(mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_TX_VBOOST_LVL, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_MPLLB_CP_PROP_GS, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_RX_VREF_CTRL, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_MPLLB_CP_INT_GS, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG, RDPCS_DMCU_DPALT_DIS_BLOCK_REG, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL15, RDPCS_PHY_SUP_PRE_HP, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL15, RDPCS_PHY_DP_TX0_VREGDRV_BYP, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL15, RDPCS_PHY_DP_TX1_VREGDRV_BYP, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL15, RDPCS_PHY_DP_TX2_VREGDRV_BYP, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL15, RDPCS_PHY_DP_TX3_VREGDRV_BYP, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_MAIN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_PRE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_POST, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_MAIN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_PRE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_POST, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_MAIN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_PRE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_POST, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_MAIN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DCO_FINETUNE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DCO_RANGE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_PRE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_POST, mask_sh),\ + LE_SF(DCIO_SOFT_RESET, UNIPHYA_SOFT_RESET, mask_sh),\ + LE_SF(DCIO_SOFT_RESET, UNIPHYB_SOFT_RESET, mask_sh),\ + LE_SF(DCIO_SOFT_RESET, UNIPHYC_SOFT_RESET, mask_sh),\ + LE_SF(DCIO_SOFT_RESET, UNIPHYD_SOFT_RESET, mask_sh),\ + LE_SF(DCIO_SOFT_RESET, UNIPHYE_SOFT_RESET, mask_sh) + +#define DPCS_DCN21_REG_LIST(id) \ + DPCS_DCN2_REG_LIST(id),\ + SRI(RDPCSTX_PHY_CNTL15, RDPCSTX, id),\ + SRI(RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG, RDPCSTX, id) + #define LINK_ENCODER_MASK_SH_LIST_DCN21(mask_sh)\ LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh),\ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL0_XBAR_SOURCE, mask_sh),\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index b29b2c99a564..0d506d30d6b6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -1,5 +1,6 @@ /* * Copyright 2018 Advanced Micro Devices, Inc. + * Copyright 2019 Raptor Engineering, LLC * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -28,6 +29,8 @@ #include "dm_services.h" #include "dc.h" +#include "dcn21_init.h" + #include "resource.h" #include "include/irq_service_interface.h" #include "dcn20/dcn20_resource.h" @@ -60,6 +63,8 @@ #include "dcn20/dcn20_dwb.h" #include "dcn20/dcn20_mmhubbub.h" +#include "dpcs/dpcs_2_1_0_offset.h" +#include "dpcs/dpcs_2_1_0_sh_mask.h" #include "renoir_ip_offset.h" #include "dcn/dcn_2_1_0_offset.h" @@ -78,6 +83,7 @@ #include "dcn21_resource.h" #include "vm_helper.h" #include "dcn20/dcn20_vmid.h" +#include "../dce/dmub_psr.h" #define SOC_BOUNDING_BOX_VALID false #define DC_LOGGER_INIT(logger) @@ -90,11 +96,7 @@ struct _vcs_dpi_ip_params_st dcn2_1_ip = { .gpuvm_max_page_table_levels = 1, .hostvm_max_page_table_levels = 4, .hostvm_cached_page_table_levels = 2, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .num_dsc = 3, -#else - .num_dsc = 0, -#endif .rob_buffer_size_kbytes = 168, .det_buffer_size_kbytes = 164, .dpte_buffer_size_in_pte_reqs_luma = 44, @@ -352,7 +354,7 @@ static const struct bios_registers bios_regs = { }; static const struct dce_dmcu_registers dmcu_regs = { - DMCU_DCN10_REG_LIST() + DMCU_DCN20_REG_LIST() }; static const struct dce_dmcu_shift dmcu_shift = { @@ -375,20 +377,6 @@ static const struct dce_abm_mask abm_mask = { ABM_MASK_SH_LIST_DCN20(_MASK) }; -#ifdef CONFIG_DRM_AMD_DC_DMUB -static const struct dcn21_dmcub_registers dmcub_regs = { - DMCUB_REG_LIST_DCN() -}; - -static const struct dcn21_dmcub_shift dmcub_shift = { - DMCUB_COMMON_MASK_SH_LIST_BASE(__SHIFT) -}; - -static const struct dcn21_dmcub_mask dmcub_mask = { - DMCUB_COMMON_MASK_SH_LIST_BASE(_MASK) -}; -#endif - #define audio_regs(id)\ [id] = {\ AUD_COMMON_REG_LIST(id)\ @@ -478,15 +466,18 @@ static const struct dcn20_mpc_registers mpc_regs = { MPC_OUT_MUX_REG_LIST_DCN2_0(0), MPC_OUT_MUX_REG_LIST_DCN2_0(1), MPC_OUT_MUX_REG_LIST_DCN2_0(2), - MPC_OUT_MUX_REG_LIST_DCN2_0(3) + MPC_OUT_MUX_REG_LIST_DCN2_0(3), + MPC_DBG_REG_LIST_DCN2_0() }; static const struct dcn20_mpc_shift mpc_shift = { - MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) + MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT), + MPC_DEBUG_REG_LIST_SH_DCN20 }; static const struct dcn20_mpc_mask mpc_mask = { - MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK) + MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK), + MPC_DEBUG_REG_LIST_MASK_DCN20 }; #define hubp_regs(id)\ @@ -554,7 +545,6 @@ static const struct dcn20_vmid_mask vmid_masks = { DCN20_VMID_MASK_SH_LIST(_MASK) }; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #define dsc_regsDCN20(id)\ [id] = {\ DSC_REG_LIST_DCN20(id)\ @@ -576,7 +566,6 @@ static const struct dcn20_dsc_shift dsc_shift = { static const struct dcn20_dsc_mask dsc_mask = { DSC_REG_LIST_SH_MASK_DCN20(_MASK) }; -#endif #define ipp_regs(id)\ [id] = {\ @@ -623,6 +612,7 @@ static const struct dce110_aux_registers aux_engine_regs[] = { #define tf_regs(id)\ [id] = {\ TF_REG_LIST_DCN20(id),\ + TF_REG_LIST_DCN20_COMMON_APPEND(id),\ } static const struct dcn2_dpp_registers tf_regs[] = { @@ -633,11 +623,13 @@ static const struct dcn2_dpp_registers tf_regs[] = { }; static const struct dcn2_dpp_shift tf_shift = { - TF_REG_LIST_SH_MASK_DCN20(__SHIFT) + TF_REG_LIST_SH_MASK_DCN20(__SHIFT), + TF_DEBUG_REG_LIST_SH_DCN20 }; static const struct dcn2_dpp_mask tf_mask = { - TF_REG_LIST_SH_MASK_DCN20(_MASK) + TF_REG_LIST_SH_MASK_DCN20(_MASK), + TF_DEBUG_REG_LIST_MASK_DCN20 }; #define stream_enc_regs(id)\ @@ -672,7 +664,7 @@ static const struct dcn10_stream_encoder_mask se_mask = { static void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu); static int dcn21_populate_dml_pipes_from_context( - struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes); + struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes); static struct input_pixel_processor *dcn21_ipp_create( struct dc_context *ctx, uint32_t inst) @@ -773,9 +765,7 @@ static const struct resource_caps res_cap_rn = { .num_dwb = 1, .num_ddc = 5, .num_vmid = 1, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .num_dsc = 3, -#endif }; #ifdef DIAGS_BUILD @@ -800,9 +790,7 @@ static const struct resource_caps res_cap_rn_FPGA_2pipe_dsc = { .num_pll = 4, .num_dwb = 1, .num_ddc = 4, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .num_dsc = 2, -#endif }; #endif @@ -842,11 +830,13 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_dcc = DCC_ENABLE, .vsr_support = true, .performance_trace = false, - .max_downscale_src_width = 3840, + .max_downscale_src_width = 4096, .disable_pplib_wm_range = false, .scl_reset_length10 = true, .sanity_checks = true, .disable_48mhz_pwrdwn = false, + .nv12_iflip_vm_wa = true, + .usbc_combo_phy_reset_wa = true }; static const struct dc_debug_options debug_defaults_diags = { @@ -869,7 +859,7 @@ enum dcn20_clk_src_array_id { DCN20_CLK_SRC_TOTAL_DCN21 }; -static void destruct(struct dcn21_resource_pool *pool) +static void dcn21_resource_destruct(struct dcn21_resource_pool *pool) { unsigned int i; @@ -880,12 +870,10 @@ static void destruct(struct dcn21_resource_pool *pool) } } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT for (i = 0; i < pool->base.res_cap->num_dsc; i++) { if (pool->base.dscs[i] != NULL) dcn20_dsc_destroy(&pool->base.dscs[i]); } -#endif if (pool->base.mpc != NULL) { kfree(TO_DCN20_MPC(pool->base.mpc)); @@ -972,11 +960,6 @@ static void destruct(struct dcn21_resource_pool *pool) if (pool->base.dmcu != NULL) dce_dmcu_destroy(&pool->base.dmcu); -#ifdef CONFIG_DRM_AMD_DC_DMUB - if (pool->base.dmcub != NULL) - dcn21_dmcub_destroy(&pool->base.dmcub); -#endif - if (pool->base.dccg != NULL) dcn_dccg_destroy(&pool->base.dccg); @@ -1010,11 +993,9 @@ static void calculate_wm_set_for_vlevel( wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000; wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000; wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000; wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000; wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000; -#endif dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached; } @@ -1023,7 +1004,8 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s { int i; - kernel_fpu_begin(); + DC_FP_START(); + if (dc->bb_overrides.sr_exit_time_ns) { for (i = 0; i < WM_SET_COUNT; i++) { dc->clk_mgr->bw_params->wm_table.entries[i].sr_exit_time_us = @@ -1049,7 +1031,7 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s } } - kernel_fpu_end(); + DC_FP_END(); } void dcn21_calculate_wm( @@ -1099,10 +1081,10 @@ void dcn21_calculate_wm( if (pipe_cnt != pipe_idx) { if (dc->res_pool->funcs->populate_dml_pipes) pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, - &context->res_ctx, pipes); + context, pipes); else pipe_cnt = dcn21_populate_dml_pipes_from_context(dc, - &context->res_ctx, pipes); + context, pipes); } *out_pipe_cnt = pipe_cnt; @@ -1192,7 +1174,7 @@ static void dcn21_destroy_resource_pool(struct resource_pool **pool) { struct dcn21_resource_pool *dcn21_pool = TO_DCN21_RES_POOL(*pool); - destruct(dcn21_pool); + dcn21_resource_destruct(dcn21_pool); kfree(dcn21_pool); *pool = NULL; } @@ -1331,7 +1313,6 @@ static void read_dce_straps( } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct display_stream_compressor *dcn21_dsc_create( struct dc_context *ctx, uint32_t inst) @@ -1347,16 +1328,9 @@ struct display_stream_compressor *dcn21_dsc_create( dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); return &dsc->base; } -#endif static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) { - /* - TODO: Fix this function to calcualte correct values. - There are known issues with this function currently - that will need to be investigated. Use hardcoded known good values for now. - - struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool); struct clk_limit_table *clk_table = &bw_params->clk_table; int i; @@ -1371,11 +1345,14 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param dcn2_1_soc.clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; dcn2_1_soc.clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz; dcn2_1_soc.clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz; - dcn2_1_soc.clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 16 / 1000; + dcn2_1_soc.clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2; } - dcn2_1_soc.clock_limits[i] = dcn2_1_soc.clock_limits[i - i]; + dcn2_1_soc.clock_limits[i] = dcn2_1_soc.clock_limits[i - 1]; dcn2_1_soc.num_states = i; - */ + + // diags does not retrieve proper values from SMU, do not update DML instance for diags + if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && !IS_DIAG_DC(dc->ctx->dce_environment)) + dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21); } /* Temporary Place holder until we can get them from fuse */ @@ -1529,8 +1506,9 @@ static const struct encoder_feature_support link_enc_feature = { #define link_regs(id, phyid)\ [id] = {\ - LE_DCN10_REG_LIST(id), \ + LE_DCN2_REG_LIST(id), \ UNIPHY_DCN2_REG_LIST(phyid), \ + DPCS_DCN21_REG_LIST(id), \ SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ } @@ -1569,11 +1547,13 @@ static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { }; static const struct dcn10_link_enc_shift le_shift = { - LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT) + LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT),\ + DPCS_DCN21_MASK_SH_LIST(__SHIFT) }; static const struct dcn10_link_enc_mask le_mask = { - LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK) + LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK),\ + DPCS_DCN21_MASK_SH_LIST(_MASK) }; static int map_transmitter_id_to_phy_instance( @@ -1639,10 +1619,11 @@ static uint32_t read_pipe_fuses(struct dc_context *ctx) } static int dcn21_populate_dml_pipes_from_context( - struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes) + struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes) { - uint32_t pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, res_ctx, pipes); + uint32_t pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes); int i; + struct resource_context *res_ctx = &context->res_ctx; for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -1671,7 +1652,7 @@ static struct resource_funcs dcn21_res_pool_funcs = { .update_bw_bounding_box = update_bw_bounding_box }; -static bool construct( +static bool dcn21_resource_construct( uint8_t num_virtual_links, struct dc *dc, struct dcn21_resource_pool *pool) @@ -1711,6 +1692,7 @@ static bool construct( dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; dc->caps.extended_aux_timeout_support = true; + dc->caps.dmcub_support = true; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; @@ -1760,7 +1742,7 @@ static bool construct( goto create_fail; } - pool->base.dmcu = dcn20_dmcu_create(ctx, + pool->base.dmcu = dcn21_dmcu_create(ctx, &dmcu_regs, &dmcu_shift, &dmcu_mask); @@ -1770,6 +1752,10 @@ static bool construct( goto create_fail; } + // Leave as NULL to not affect current dmcu psr programming sequence + // Will be uncommented when functionality is confirmed to be working + pool->base.psr = NULL; + pool->base.abm = dce_abm_create(ctx, &abm_regs, &abm_shift, @@ -1780,18 +1766,6 @@ static bool construct( goto create_fail; } -#ifdef CONFIG_DRM_AMD_DC_DMUB - pool->base.dmcub = dcn21_dmcub_create(ctx, - &dmcub_regs, - &dmcub_shift, - &dmcub_mask); - if (pool->base.dmcub == NULL) { - dm_error("DC: failed to create dmcub!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } -#endif - pool->base.pp_smu = dcn21_pp_smu_create(ctx); num_pipes = dcn2_1_ip.max_num_dpp; @@ -1818,41 +1792,41 @@ static bool construct( if ((pipe_fuses & (1 << i)) != 0) continue; - pool->base.hubps[i] = dcn21_hubp_create(ctx, i); - if (pool->base.hubps[i] == NULL) { + pool->base.hubps[j] = dcn21_hubp_create(ctx, i); + if (pool->base.hubps[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create memory input!\n"); goto create_fail; } - pool->base.ipps[i] = dcn21_ipp_create(ctx, i); - if (pool->base.ipps[i] == NULL) { + pool->base.ipps[j] = dcn21_ipp_create(ctx, i); + if (pool->base.ipps[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create input pixel processor!\n"); goto create_fail; } - pool->base.dpps[i] = dcn21_dpp_create(ctx, i); - if (pool->base.dpps[i] == NULL) { + pool->base.dpps[j] = dcn21_dpp_create(ctx, i); + if (pool->base.dpps[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create dpps!\n"); goto create_fail; } - pool->base.opps[i] = dcn21_opp_create(ctx, i); - if (pool->base.opps[i] == NULL) { + pool->base.opps[j] = dcn21_opp_create(ctx, i); + if (pool->base.opps[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create output pixel processor!\n"); goto create_fail; } - pool->base.timing_generators[i] = dcn21_timing_generator_create( + pool->base.timing_generators[j] = dcn21_timing_generator_create( ctx, i); - if (pool->base.timing_generators[i] == NULL) { + if (pool->base.timing_generators[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create tg!\n"); goto create_fail; @@ -1896,7 +1870,6 @@ static bool construct( goto create_fail; } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT for (i = 0; i < pool->base.res_cap->num_dsc; i++) { pool->base.dscs[i] = dcn21_dsc_create(ctx, i); if (pool->base.dscs[i] == NULL) { @@ -1905,7 +1878,6 @@ static bool construct( goto create_fail; } } -#endif if (!dcn20_dwbc_create(ctx, &pool->base)) { BREAK_TO_DEBUGGER(); @@ -1936,7 +1908,7 @@ static bool construct( create_fail: - destruct(pool); + dcn21_resource_destruct(pool); return false; } @@ -1951,7 +1923,7 @@ struct resource_pool *dcn21_create_resource_pool( if (!pool) return NULL; - if (construct(init_data->num_virtual_links, dc, pool)) + if (dcn21_resource_construct(init_data->num_virtual_links, dc, pool)) return &pool->base; BREAK_TO_DEBUGGER(); diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h index 94b75e942607..8bde1d688f2e 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h +++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h @@ -118,13 +118,11 @@ bool dm_helpers_submit_i2c( const struct dc_link *link, struct i2c_command *cmd); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool dm_helpers_dp_write_dsc_enable( struct dc_context *ctx, const struct dc_stream_state *stream, bool enable ); -#endif bool dm_helpers_is_dp_sink_present( struct dc_link *link); diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h index ef7df9ef6d7e..ae608c329366 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h +++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h @@ -41,12 +41,8 @@ enum pp_smu_ver { */ PP_SMU_UNSUPPORTED, PP_SMU_VER_RV, -#ifndef CONFIG_TRIM_DRM_AMD_DC_DCN2_0 PP_SMU_VER_NV, -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) PP_SMU_VER_RN, -#endif PP_SMU_VER_MAX }; @@ -143,7 +139,6 @@ struct pp_smu_funcs_rv { void (*set_pme_wa_enable)(struct pp_smu *pp); }; -#ifndef CONFIG_TRIM_DRM_AMD_DC_DCN2_0 /* Used by pp_smu_funcs_nv.set_voltage_by_freq * */ @@ -247,7 +242,6 @@ struct pp_smu_funcs_nv { enum pp_smu_status (*set_pstate_handshake_support)(struct pp_smu *pp, BOOLEAN pstate_handshake_supported); }; -#endif #define PP_SMU_NUM_SOCCLK_DPM_LEVELS 8 #define PP_SMU_NUM_DCFCLK_DPM_LEVELS 8 @@ -291,12 +285,8 @@ struct pp_smu_funcs { struct pp_smu ctx; union { struct pp_smu_funcs_rv rv_funcs; -#ifndef CONFIG_TRIM_DRM_AMD_DC_DCN2_0 struct pp_smu_funcs_nv nv_funcs; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) struct pp_smu_funcs_rn rn_funcs; -#endif }; }; diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h index 1a0429744630..968ff1fef486 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_services.h +++ b/drivers/gpu/drm/amd/display/dc/dm_services.h @@ -40,6 +40,9 @@ #undef DEPRECATED +struct dmub_srv; +struct dc_dmub_srv; + irq_handler_idx dm_register_interrupt( struct dc_context *ctx, struct dc_interrupt_params *int_params, @@ -139,6 +142,13 @@ uint32_t generic_reg_update_ex(const struct dc_context *ctx, uint32_t addr, int n, uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...); +struct dc_dmub_srv *dc_dmub_srv_create(struct dc *dc, struct dmub_srv *dmub); +void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv); + +void reg_sequence_start_gather(const struct dc_context *ctx); +void reg_sequence_start_execute(const struct dc_context *ctx); +void reg_sequence_wait_done(const struct dc_context *ctx); + #define FD(reg_field) reg_field ## __SHIFT, \ reg_field ## _MASK diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h index a3d1be20dd9d..b52ba6ffabe1 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h +++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h @@ -220,6 +220,7 @@ struct dm_bl_data_point { }; /* Total size of the structure should not exceed 256 bytes */ +#define BL_DATA_POINTS 99 struct dm_acpi_atif_backlight_caps { uint16_t size; /* Bytes 0-1 (2 bytes) */ uint16_t flags; /* Byted 2-3 (2 bytes) */ @@ -229,7 +230,7 @@ struct dm_acpi_atif_backlight_caps { uint8_t min_input_signal; /* Byte 7 */ uint8_t max_input_signal; /* Byte 8 */ uint8_t num_data_points; /* Byte 9 */ - struct dm_bl_data_point data_points[99]; /* Bytes 10-207 (198 bytes)*/ + struct dm_bl_data_point data_points[BL_DATA_POINTS]; /* Bytes 10-207 (198 bytes)*/ }; enum dm_acpi_display_type { diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index 8df251626e22..7ee8b8460a9b 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -1,5 +1,6 @@ # # Copyright 2017 Advanced Micro Devices, Inc. +# Copyright 2019 Raptor Engineering, LLC # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), @@ -24,7 +25,13 @@ # It provides the general basic services required by other DAL # subcomponents. +ifdef CONFIG_X86 dml_ccflags := -mhard-float -msse +endif + +ifdef CONFIG_PPC64 +dml_ccflags := -mhard-float -maltivec +endif ifdef CONFIG_CC_IS_GCC ifeq ($(call cc-ifversion, -lt, 0701, y), y) @@ -32,6 +39,7 @@ IS_OLD_GCC = 1 endif endif +ifdef CONFIG_X86 ifdef IS_OLD_GCC # Stack alignment mismatch, proceed with caution. # GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 @@ -40,17 +48,16 @@ dml_ccflags += -mpreferred-stack-boundary=4 else dml_ccflags += -msse2 endif +endif CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags) -ifdef CONFIG_DRM_AMD_DC_DCN2_0 +ifdef CONFIG_DRM_AMD_DC_DCN CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags) -endif -ifdef CONFIG_DRM_AMD_DC_DCN2_1 CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags) endif @@ -61,11 +68,9 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dml_common_defs.o := $(dml_ccflags) DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \ dml_common_defs.o -ifdef CONFIG_DRM_AMD_DC_DCN2_0 +ifdef CONFIG_DRM_AMD_DC_DCN DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_20.o DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o -endif -ifdef CONFIG_DRM_AMD_DC_DCN2_1 DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c index 6c6c486b774a..45f028986a8d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c @@ -38,6 +38,7 @@ #define BPP_INVALID 0 #define BPP_BLENDED_PIPE 0xffffffff +#define DCN20_MAX_420_IMAGE_WIDTH 4096 static double adjust_ReturnBW( struct display_mode_lib *mode_lib, @@ -937,7 +938,7 @@ static unsigned int CalculateVMAndRowBytes( *MetaRowByte = 0; } - if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_lvp) { + if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_l_vp) { MacroTileSizeBytes = 256; MacroTileHeight = BlockHeight256Bytes; } else if (SurfaceTiling == dm_sw_4kb_s || SurfaceTiling == dm_sw_4kb_s_x @@ -1335,11 +1336,11 @@ static void dml20_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPer else mode_lib->vba.SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportHeight[k]; - if (mode_lib->vba.ODMCombineEnabled[k] == true) + if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) MainPlaneDoesODMCombine = true; for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) if (mode_lib->vba.BlendingAndTiming[k] == j - && mode_lib->vba.ODMCombineEnabled[j] == true) + && mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) MainPlaneDoesODMCombine = true; if (MainPlaneDoesODMCombine == true) @@ -2577,7 +2578,8 @@ static void dml20_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPer mode_lib->vba.MinActiveDRAMClockChangeMargin + mode_lib->vba.DRAMClockChangeLatency; - if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 50) { + if (mode_lib->vba.DRAMClockChangeSupportsVActive && + mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) { mode_lib->vba.DRAMClockChangeWatermark += 25; mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive; } else { @@ -2847,12 +2849,12 @@ static void dml20_DisplayPipeConfiguration(struct display_mode_lib *mode_lib) SwathWidth = mode_lib->vba.ViewportHeight[k]; } - if (mode_lib->vba.ODMCombineEnabled[k] == true) { + if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) { MainPlaneDoesODMCombine = true; } for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) { if (mode_lib->vba.BlendingAndTiming[k] == j - && mode_lib->vba.ODMCombineEnabled[j] == true) { + && mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) { MainPlaneDoesODMCombine = true; } } @@ -3347,7 +3349,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l == dm_420_10)) || (((mode_lib->vba.SurfaceTiling[k] == dm_sw_gfx7_2d_thin_gl || mode_lib->vba.SurfaceTiling[k] - == dm_sw_gfx7_2d_thin_lvp) + == dm_sw_gfx7_2d_thin_l_vp) && !((mode_lib->vba.SourcePixelFormat[k] == dm_444_64 || mode_lib->vba.SourcePixelFormat[k] @@ -3445,10 +3447,10 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l locals->FabricAndDRAMBandwidthPerState[i] * 1000) * locals->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly / 100; - locals->ReturnBWPerState[i] = locals->ReturnBWToDCNPerState; + locals->ReturnBWPerState[i][0] = locals->ReturnBWToDCNPerState; if (locals->DCCEnabledInAnyPlane == true && locals->ReturnBWToDCNPerState > locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) { - locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i], + locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0], locals->ReturnBWToDCNPerState * 4 * (1 - locals->UrgentLatency / ((locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024 / (locals->ReturnBWToDCNPerState - locals->DCFCLKPerState[i] @@ -3459,7 +3461,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l + (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024); if (locals->DCCEnabledInAnyPlane && locals->CriticalPoint > 1 && locals->CriticalPoint < 4) { - locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i], + locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0], 4 * locals->ReturnBWToDCNPerState * (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024 * locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency / @@ -3471,7 +3473,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l locals->DCFCLKPerState[i], locals->FabricAndDRAMBandwidthPerState[i] * 1000); if (locals->DCCEnabledInAnyPlane == true && locals->ReturnBWToDCNPerState > locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) { - locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i], + locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0], locals->ReturnBWToDCNPerState * 4 * (1 - locals->UrgentLatency / ((locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024 / (locals->ReturnBWToDCNPerState - locals->DCFCLKPerState[i] @@ -3482,7 +3484,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l + (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024); if (locals->DCCEnabledInAnyPlane && locals->CriticalPoint > 1 && locals->CriticalPoint < 4) { - locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i], + locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0], 4 * locals->ReturnBWToDCNPerState * (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024 * locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency / @@ -3520,12 +3522,12 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i] = (mode_lib->vba.RoundTripPingLatencyCycles + 32.0) / mode_lib->vba.DCFCLKPerState[i] - + locals->UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i]; - if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i] + + locals->UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i][0]; + if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i][0] > locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i]) { - locals->ROBSupport[i] = true; + locals->ROBSupport[i][0] = true; } else { - locals->ROBSupport[i] = false; + locals->ROBSupport[i][0] = false; } } /*Writeback Mode Support Check*/ @@ -3893,16 +3895,22 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l && i == mode_lib->vba.soc.num_states) mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2 * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); - if (mode_lib->vba.ODMCapability == false || mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine <= mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) { - locals->ODMCombineEnablePerState[i][k] = false; - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; - } else { - locals->ODMCombineEnablePerState[i][k] = true; - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + + locals->ODMCombineEnablePerState[i][k] = false; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; + if (mode_lib->vba.ODMCapability) { + if (locals->PlaneRequiredDISPCLKWithoutODMCombine > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) { + locals->ODMCombineEnablePerState[i][k] = true; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + } else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) { + locals->ODMCombineEnablePerState[i][k] = true; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + } } + if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity && locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k] - && locals->ODMCombineEnablePerState[i][k] == false) { + && locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) { locals->NoOfDPP[i][j][k] = 1; locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); @@ -3991,16 +3999,16 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l /*Viewport Size Check*/ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { - locals->ViewportSizeSupport[i] = true; + locals->ViewportSizeSupport[i][0] = true; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { - if (locals->ODMCombineEnablePerState[i][k] == true) { + if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) { if (dml_min(locals->SwathWidthYSingleDPP[k], dml_round(mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k])) > locals->MaximumSwathWidth[k]) { - locals->ViewportSizeSupport[i] = false; + locals->ViewportSizeSupport[i][0] = false; } } else { if (locals->SwathWidthYSingleDPP[k] / 2.0 > locals->MaximumSwathWidth[k]) { - locals->ViewportSizeSupport[i] = false; + locals->ViewportSizeSupport[i][0] = false; } } } @@ -4182,8 +4190,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.DSCFormatFactor = 1; } if (locals->RequiresDSC[i][k] == true) { - if (locals->ODMCombineEnablePerState[i][k] - == true) { + if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) { if (mode_lib->vba.PixelClockBackEnd[k] / 6.0 / mode_lib->vba.DSCFormatFactor > (1.0 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * mode_lib->vba.MaxDSCCLK[i]) { locals->DSCCLKRequiredMoreThanSupported[i] = @@ -4206,7 +4213,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.TotalDSCUnitsRequired = 0.0; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (locals->RequiresDSC[i][k] == true) { - if (locals->ODMCombineEnablePerState[i][k] == true) { + if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) { mode_lib->vba.TotalDSCUnitsRequired = mode_lib->vba.TotalDSCUnitsRequired + 2.0; } else { @@ -4248,7 +4255,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.bpp = locals->OutputBppPerState[i][k]; } if (locals->RequiresDSC[i][k] == true && mode_lib->vba.bpp != 0.0) { - if (locals->ODMCombineEnablePerState[i][k] == false) { + if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) { locals->DSCDelayPerState[i][k] = dscceComputeDelay( mode_lib->vba.DSCInputBitPerComponent[k], @@ -4291,7 +4298,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { for (j = 0; j < 2; j++) { for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { - if (locals->ODMCombineEnablePerState[i][k] == true) + if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) locals->SwathWidthYPerState[i][j][k] = dml_min(locals->SwathWidthYSingleDPP[k], dml_round(locals->HActive[k] / 2 * locals->HRatio[k])); else locals->SwathWidthYPerState[i][j][k] = locals->SwathWidthYSingleDPP[k] / locals->NoOfDPP[i][j][k]; @@ -4344,28 +4351,28 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l locals->EffectiveDETLBLinesLuma = dml_floor(locals->LinesInDETLuma + dml_min( locals->LinesInDETLuma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETY[k] * - locals->PSCL_FACTOR[k] / locals->ReturnBWPerState[i], + locals->PSCL_FACTOR[k] / locals->ReturnBWPerState[i][0], locals->EffectiveLBLatencyHidingSourceLinesLuma), locals->SwathHeightYPerState[i][j][k]); locals->EffectiveDETLBLinesChroma = dml_floor(locals->LinesInDETChroma + dml_min( locals->LinesInDETChroma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETC[k] * - locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i], + locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i][0], locals->EffectiveLBLatencyHidingSourceLinesChroma), locals->SwathHeightCPerState[i][j][k]); if (locals->BytePerPixelInDETC[k] == 0) { locals->UrgentLatencySupportUsPerState[i][j][k] = locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k]) / locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] * - dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k]); + dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]); } else { locals->UrgentLatencySupportUsPerState[i][j][k] = dml_min( locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k]) / locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] * - dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k]), + dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]), locals->EffectiveDETLBLinesChroma * (locals->HTotal[k] / locals->PixelClock[k]) / (locals->VRatio[k] / 2) - locals->EffectiveDETLBLinesChroma * locals->SwathWidthYPerState[i][j][k] / 2 * - dml_ceil(locals->BytePerPixelInDETC[k], 2) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k])); + dml_ceil(locals->BytePerPixelInDETC[k], 2) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k])); } } } @@ -4405,14 +4412,14 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l locals->SwathHeightYThisState[k] = locals->SwathHeightYPerState[i][j][k]; locals->SwathHeightCThisState[k] = locals->SwathHeightCPerState[i][j][k]; locals->SwathWidthYThisState[k] = locals->SwathWidthYPerState[i][j][k]; - mode_lib->vba.ProjectedDCFCLKDeepSleep = dml_max( - mode_lib->vba.ProjectedDCFCLKDeepSleep, + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], mode_lib->vba.PixelClock[k] / 16.0); if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) { if (mode_lib->vba.VRatio[k] <= 1.0) { - mode_lib->vba.ProjectedDCFCLKDeepSleep = + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( - mode_lib->vba.ProjectedDCFCLKDeepSleep, + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], 1.1 * dml_ceil( mode_lib->vba.BytePerPixelInDETY[k], @@ -4422,9 +4429,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l * mode_lib->vba.PixelClock[k] / mode_lib->vba.NoOfDPP[i][j][k]); } else { - mode_lib->vba.ProjectedDCFCLKDeepSleep = + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( - mode_lib->vba.ProjectedDCFCLKDeepSleep, + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], 1.1 * dml_ceil( mode_lib->vba.BytePerPixelInDETY[k], @@ -4435,9 +4442,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } } else { if (mode_lib->vba.VRatio[k] <= 1.0) { - mode_lib->vba.ProjectedDCFCLKDeepSleep = + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( - mode_lib->vba.ProjectedDCFCLKDeepSleep, + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], 1.1 * dml_ceil( mode_lib->vba.BytePerPixelInDETY[k], @@ -4447,9 +4454,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l * mode_lib->vba.PixelClock[k] / mode_lib->vba.NoOfDPP[i][j][k]); } else { - mode_lib->vba.ProjectedDCFCLKDeepSleep = + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( - mode_lib->vba.ProjectedDCFCLKDeepSleep, + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], 1.1 * dml_ceil( mode_lib->vba.BytePerPixelInDETY[k], @@ -4459,9 +4466,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l * mode_lib->vba.RequiredDPPCLK[i][j][k]); } if (mode_lib->vba.VRatio[k] / 2.0 <= 1.0) { - mode_lib->vba.ProjectedDCFCLKDeepSleep = + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( - mode_lib->vba.ProjectedDCFCLKDeepSleep, + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], 1.1 * dml_ceil( mode_lib->vba.BytePerPixelInDETC[k], @@ -4472,9 +4479,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l * mode_lib->vba.PixelClock[k] / mode_lib->vba.NoOfDPP[i][j][k]); } else { - mode_lib->vba.ProjectedDCFCLKDeepSleep = + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( - mode_lib->vba.ProjectedDCFCLKDeepSleep, + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], 1.1 * dml_ceil( mode_lib->vba.BytePerPixelInDETC[k], @@ -4510,7 +4517,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l &mode_lib->vba.PTEBufferSizeNotExceededY[i][j][k], &mode_lib->vba.dpte_row_height[k], &mode_lib->vba.meta_row_height[k]); - mode_lib->vba.PrefetchLinesY[k] = CalculatePrefetchSourceLines( + mode_lib->vba.PrefetchLinesY[0][0][k] = CalculatePrefetchSourceLines( mode_lib, mode_lib->vba.VRatio[k], mode_lib->vba.vtaps[k], @@ -4549,7 +4556,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l &mode_lib->vba.PTEBufferSizeNotExceededC[i][j][k], &mode_lib->vba.dpte_row_height_chroma[k], &mode_lib->vba.meta_row_height_chroma[k]); - mode_lib->vba.PrefetchLinesC[k] = CalculatePrefetchSourceLines( + mode_lib->vba.PrefetchLinesC[0][0][k] = CalculatePrefetchSourceLines( mode_lib, mode_lib->vba.VRatio[k] / 2.0, mode_lib->vba.VTAPsChroma[k], @@ -4563,14 +4570,14 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = 0.0; mode_lib->vba.MetaRowBytesC = 0.0; mode_lib->vba.DPTEBytesPerRowC = 0.0; - locals->PrefetchLinesC[k] = 0.0; + locals->PrefetchLinesC[0][0][k] = 0.0; locals->PTEBufferSizeNotExceededC[i][j][k] = true; locals->PTEBufferSizeInRequestsForLuma = mode_lib->vba.PTEBufferSizeInRequestsLuma + mode_lib->vba.PTEBufferSizeInRequestsChroma; } - locals->PDEAndMetaPTEBytesPerFrame[k] = + locals->PDEAndMetaPTEBytesPerFrame[0][0][k] = mode_lib->vba.PDEAndMetaPTEBytesPerFrameY + mode_lib->vba.PDEAndMetaPTEBytesPerFrameC; - locals->MetaRowBytes[k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC; - locals->DPTEBytesPerRow[k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC; + locals->MetaRowBytes[0][0][k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC; + locals->DPTEBytesPerRow[0][0][k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC; CalculateActiveRowBandwidth( mode_lib->vba.GPUVMEnable, @@ -4597,14 +4604,14 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l + mode_lib->vba.TotalNumberOfDCCActiveDPP[i][j] * mode_lib->vba.MetaChunkSize) * 1024.0 - / mode_lib->vba.ReturnBWPerState[i]; + / mode_lib->vba.ReturnBWPerState[i][0]; if (mode_lib->vba.GPUVMEnable == true) { mode_lib->vba.ExtraLatency = mode_lib->vba.ExtraLatency + mode_lib->vba.TotalNumberOfActiveDPP[i][j] * mode_lib->vba.PTEGroupSize - / mode_lib->vba.ReturnBWPerState[i]; + / mode_lib->vba.ReturnBWPerState[i][0]; } - mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep; + mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0]; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (mode_lib->vba.BlendingAndTiming[k] == k) { @@ -4654,7 +4661,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { - locals->MaximumVStartup[k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k] + locals->MaximumVStartup[0][0][k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k] - dml_max(1.0, dml_ceil(locals->WritebackDelay[i][k] / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1.0)); } @@ -4699,7 +4706,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.RequiredDPPCLK[i][j][k], mode_lib->vba.RequiredDISPCLK[i][j], mode_lib->vba.PixelClock[k], - mode_lib->vba.ProjectedDCFCLKDeepSleep, + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], mode_lib->vba.DSCDelayPerState[i][k], mode_lib->vba.NoOfDPP[i][j][k], mode_lib->vba.ScalerEnabled[k], @@ -4717,7 +4724,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l - mode_lib->vba.VActive[k], mode_lib->vba.HTotal[k], mode_lib->vba.MaxInterDCNTileRepeaters, - mode_lib->vba.MaximumVStartup[k], + mode_lib->vba.MaximumVStartup[0][0][k], mode_lib->vba.GPUVMMaxPageTableLevels, mode_lib->vba.GPUVMEnable, mode_lib->vba.DynamicMetadataEnable[k], @@ -4727,15 +4734,15 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.UrgentLatencyPixelDataOnly, mode_lib->vba.ExtraLatency, mode_lib->vba.TimeCalc, - mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k], - mode_lib->vba.MetaRowBytes[k], - mode_lib->vba.DPTEBytesPerRow[k], - mode_lib->vba.PrefetchLinesY[k], + mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k], + mode_lib->vba.MetaRowBytes[0][0][k], + mode_lib->vba.DPTEBytesPerRow[0][0][k], + mode_lib->vba.PrefetchLinesY[0][0][k], mode_lib->vba.SwathWidthYPerState[i][j][k], mode_lib->vba.BytePerPixelInDETY[k], mode_lib->vba.PrefillY[k], mode_lib->vba.MaxNumSwY[k], - mode_lib->vba.PrefetchLinesC[k], + mode_lib->vba.PrefetchLinesC[0][0][k], mode_lib->vba.BytePerPixelInDETC[k], mode_lib->vba.PrefillC[k], mode_lib->vba.MaxNumSwC[k], @@ -4766,19 +4773,19 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l locals->prefetch_vm_bw_valid = true; locals->prefetch_row_bw_valid = true; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { - if (locals->PDEAndMetaPTEBytesPerFrame[k] == 0) + if (locals->PDEAndMetaPTEBytesPerFrame[0][0][k] == 0) locals->prefetch_vm_bw[k] = 0; else if (locals->LinesForMetaPTE[k] > 0) - locals->prefetch_vm_bw[k] = locals->PDEAndMetaPTEBytesPerFrame[k] + locals->prefetch_vm_bw[k] = locals->PDEAndMetaPTEBytesPerFrame[0][0][k] / (locals->LinesForMetaPTE[k] * locals->HTotal[k] / locals->PixelClock[k]); else { locals->prefetch_vm_bw[k] = 0; locals->prefetch_vm_bw_valid = false; } - if (locals->MetaRowBytes[k] + locals->DPTEBytesPerRow[k] == 0) + if (locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k] == 0) locals->prefetch_row_bw[k] = 0; else if (locals->LinesForMetaAndDPTERow[k] > 0) - locals->prefetch_row_bw[k] = (locals->MetaRowBytes[k] + locals->DPTEBytesPerRow[k]) + locals->prefetch_row_bw[k] = (locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k]) / (locals->LinesForMetaAndDPTERow[k] * locals->HTotal[k] / locals->PixelClock[k]); else { locals->prefetch_row_bw[k] = 0; @@ -4797,13 +4804,13 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.RequiredPrefetchPixelDataBWLuma[i][j][k]) + mode_lib->vba.meta_row_bw[k] + mode_lib->vba.dpte_row_bw[k]); } - locals->BandwidthWithoutPrefetchSupported[i] = true; - if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i]) { - locals->BandwidthWithoutPrefetchSupported[i] = false; + locals->BandwidthWithoutPrefetchSupported[i][0] = true; + if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i][0]) { + locals->BandwidthWithoutPrefetchSupported[i][0] = false; } locals->PrefetchSupported[i][j] = true; - if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i]) { + if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i][0]) { locals->PrefetchSupported[i][j] = false; } for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { @@ -4828,7 +4835,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l if (mode_lib->vba.PrefetchSupported[i][j] == true && mode_lib->vba.VRatioInPrefetchSupported[i][j] == true) { mode_lib->vba.BandwidthAvailableForImmediateFlip = - mode_lib->vba.ReturnBWPerState[i]; + mode_lib->vba.ReturnBWPerState[i][0]; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { mode_lib->vba.BandwidthAvailableForImmediateFlip = mode_lib->vba.BandwidthAvailableForImmediateFlip @@ -4842,9 +4849,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8 && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) { mode_lib->vba.ImmediateFlipBytes[k] = - mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k] - + mode_lib->vba.MetaRowBytes[k] - + mode_lib->vba.DPTEBytesPerRow[k]; + mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k] + + mode_lib->vba.MetaRowBytes[0][0][k] + + mode_lib->vba.DPTEBytesPerRow[0][0][k]; } } mode_lib->vba.TotImmediateFlipBytes = 0.0; @@ -4872,9 +4879,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l / mode_lib->vba.PixelClock[k], mode_lib->vba.VRatio[k], mode_lib->vba.Tno_bw[k], - mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k], - mode_lib->vba.MetaRowBytes[k], - mode_lib->vba.DPTEBytesPerRow[k], + mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k], + mode_lib->vba.MetaRowBytes[0][0][k], + mode_lib->vba.DPTEBytesPerRow[0][0][k], mode_lib->vba.DCCEnable[k], mode_lib->vba.dpte_row_height[k], mode_lib->vba.meta_row_height[k], @@ -4899,7 +4906,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } mode_lib->vba.ImmediateFlipSupportedForState[i][j] = true; if (mode_lib->vba.total_dcn_read_bw_with_flip - > mode_lib->vba.ReturnBWPerState[i]) { + > mode_lib->vba.ReturnBWPerState[i][0]) { mode_lib->vba.ImmediateFlipSupportedForState[i][j] = false; } for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { @@ -4918,13 +4925,13 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; k++) mode_lib->vba.MaxTotalVActiveRDBandwidth = mode_lib->vba.MaxTotalVActiveRDBandwidth + mode_lib->vba.ReadBandwidth[k]; for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { - mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i] = dml_min(mode_lib->vba.ReturnBusWidth * + mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][0] = dml_min(mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKPerState[i], mode_lib->vba.FabricAndDRAMBandwidthPerState[i] * 1000) * mode_lib->vba.MaxAveragePercentOfIdealDRAMBWDisplayCanUseInNormalSystemOperation / 100; - if (mode_lib->vba.MaxTotalVActiveRDBandwidth <= mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i]) - mode_lib->vba.TotalVerticalActiveBandwidthSupport[i] = true; + if (mode_lib->vba.MaxTotalVActiveRDBandwidth <= mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][0]) + mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][0] = true; else - mode_lib->vba.TotalVerticalActiveBandwidthSupport[i] = false; + mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][0] = false; } /*PTE Buffer Size Check*/ @@ -5012,7 +5019,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l status = DML_FAIL_SCALE_RATIO_TAP; } else if (mode_lib->vba.SourceFormatPixelAndScanSupport != true) { status = DML_FAIL_SOURCE_PIXEL_FORMAT; - } else if (locals->ViewportSizeSupport[i] != true) { + } else if (locals->ViewportSizeSupport[i][0] != true) { status = DML_FAIL_VIEWPORT_SIZE; } else if (locals->DIOSupport[i] != true) { status = DML_FAIL_DIO_SUPPORT; @@ -5022,7 +5029,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l status = DML_FAIL_DSC_CLK_REQUIRED; } else if (locals->UrgentLatencySupport[i][j] != true) { status = DML_FAIL_URGENT_LATENCY; - } else if (locals->ROBSupport[i] != true) { + } else if (locals->ROBSupport[i][0] != true) { status = DML_FAIL_REORDERING_BUFFER; } else if (locals->DISPCLK_DPPCLK_Support[i][j] != true) { status = DML_FAIL_DISPCLK_DPPCLK; @@ -5042,7 +5049,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l status = DML_FAIL_PITCH_SUPPORT; } else if (locals->PrefetchSupported[i][j] != true) { status = DML_FAIL_PREFETCH_SUPPORT; - } else if (locals->TotalVerticalActiveBandwidthSupport[i] != true) { + } else if (locals->TotalVerticalActiveBandwidthSupport[i][0] != true) { status = DML_FAIL_TOTAL_V_ACTIVE_BW; } else if (locals->VRatioInPrefetchSupported[i][j] != true) { status = DML_FAIL_V_RATIO_PREFETCH; @@ -5088,7 +5095,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.DRAMSpeed = mode_lib->vba.DRAMSpeedPerState[mode_lib->vba.VoltageLevel]; mode_lib->vba.FabricClock = mode_lib->vba.FabricClockPerState[mode_lib->vba.VoltageLevel]; mode_lib->vba.SOCCLK = mode_lib->vba.SOCCLKPerState[mode_lib->vba.VoltageLevel]; - mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel]; + mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel][0]; mode_lib->vba.FabricAndDRAMBandwidth = locals->FabricAndDRAMBandwidthPerState[mode_lib->vba.VoltageLevel]; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (mode_lib->vba.BlendingAndTiming[k] == k) { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c index 3c70dd577292..485a9c62ec58 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c @@ -39,6 +39,7 @@ #define BPP_INVALID 0 #define BPP_BLENDED_PIPE 0xffffffff #define DCN20_MAX_DSC_IMAGE_WIDTH 5184 +#define DCN20_MAX_420_IMAGE_WIDTH 4096 static double adjust_ReturnBW( struct display_mode_lib *mode_lib, @@ -997,7 +998,7 @@ static unsigned int CalculateVMAndRowBytes( *MetaRowByte = 0; } - if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_lvp) { + if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_l_vp) { MacroTileSizeBytes = 256; MacroTileHeight = BlockHeight256Bytes; } else if (SurfaceTiling == dm_sw_4kb_s || SurfaceTiling == dm_sw_4kb_s_x @@ -1395,11 +1396,11 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP else mode_lib->vba.SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportHeight[k]; - if (mode_lib->vba.ODMCombineEnabled[k] == true) + if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) MainPlaneDoesODMCombine = true; for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) if (mode_lib->vba.BlendingAndTiming[k] == j - && mode_lib->vba.ODMCombineEnabled[j] == true) + && mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) MainPlaneDoesODMCombine = true; if (MainPlaneDoesODMCombine == true) @@ -2611,9 +2612,13 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP mode_lib->vba.MinActiveDRAMClockChangeMargin + mode_lib->vba.DRAMClockChangeLatency; - if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 50) { + if (mode_lib->vba.DRAMClockChangeSupportsVActive && + mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) { mode_lib->vba.DRAMClockChangeWatermark += 25; mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive; + } else if (mode_lib->vba.DummyPStateCheck && + mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) { + mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive; } else { if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) { mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vblank; @@ -2881,12 +2886,12 @@ static void dml20v2_DisplayPipeConfiguration(struct display_mode_lib *mode_lib) SwathWidth = mode_lib->vba.ViewportHeight[k]; } - if (mode_lib->vba.ODMCombineEnabled[k] == true) { + if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) { MainPlaneDoesODMCombine = true; } for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) { if (mode_lib->vba.BlendingAndTiming[k] == j - && mode_lib->vba.ODMCombineEnabled[j] == true) { + && mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) { MainPlaneDoesODMCombine = true; } } @@ -3381,7 +3386,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode == dm_420_10)) || (((mode_lib->vba.SurfaceTiling[k] == dm_sw_gfx7_2d_thin_gl || mode_lib->vba.SurfaceTiling[k] - == dm_sw_gfx7_2d_thin_lvp) + == dm_sw_gfx7_2d_thin_l_vp) && !((mode_lib->vba.SourcePixelFormat[k] == dm_444_64 || mode_lib->vba.SourcePixelFormat[k] @@ -3479,10 +3484,10 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode locals->FabricAndDRAMBandwidthPerState[i] * 1000) * locals->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly / 100; - locals->ReturnBWPerState[i] = locals->ReturnBWToDCNPerState; + locals->ReturnBWPerState[i][0] = locals->ReturnBWToDCNPerState; if (locals->DCCEnabledInAnyPlane == true && locals->ReturnBWToDCNPerState > locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) { - locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i], + locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0], locals->ReturnBWToDCNPerState * 4 * (1 - locals->UrgentLatency / ((locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024 / (locals->ReturnBWToDCNPerState - locals->DCFCLKPerState[i] @@ -3493,7 +3498,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode + (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024); if (locals->DCCEnabledInAnyPlane && locals->CriticalPoint > 1 && locals->CriticalPoint < 4) { - locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i], + locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0], 4 * locals->ReturnBWToDCNPerState * (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024 * locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency / @@ -3505,7 +3510,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode locals->DCFCLKPerState[i], locals->FabricAndDRAMBandwidthPerState[i] * 1000); if (locals->DCCEnabledInAnyPlane == true && locals->ReturnBWToDCNPerState > locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) { - locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i], + locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0], locals->ReturnBWToDCNPerState * 4 * (1 - locals->UrgentLatency / ((locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024 / (locals->ReturnBWToDCNPerState - locals->DCFCLKPerState[i] @@ -3516,7 +3521,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode + (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024); if (locals->DCCEnabledInAnyPlane && locals->CriticalPoint > 1 && locals->CriticalPoint < 4) { - locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i], + locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0], 4 * locals->ReturnBWToDCNPerState * (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024 * locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency / @@ -3554,12 +3559,12 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i] = (mode_lib->vba.RoundTripPingLatencyCycles + 32.0) / mode_lib->vba.DCFCLKPerState[i] - + locals->UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i]; - if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i] + + locals->UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i][0]; + if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i][0] > locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i]) { - locals->ROBSupport[i] = true; + locals->ROBSupport[i][0] = true; } else { - locals->ROBSupport[i] = false; + locals->ROBSupport[i][0] = false; } } /*Writeback Mode Support Check*/ @@ -3931,18 +3936,25 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode && i == mode_lib->vba.soc.num_states) mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2 * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); - if (mode_lib->vba.ODMCapability == false || - (locals->PlaneRequiredDISPCLKWithoutODMCombine <= MaxMaxDispclkRoundedDown - && (!locals->DSCEnabled[k] || locals->HActive[k] <= DCN20_MAX_DSC_IMAGE_WIDTH))) { - locals->ODMCombineEnablePerState[i][k] = false; - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; - } else { - locals->ODMCombineEnablePerState[i][k] = true; - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + + locals->ODMCombineEnablePerState[i][k] = false; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; + if (mode_lib->vba.ODMCapability) { + if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) { + locals->ODMCombineEnablePerState[i][k] = true; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + } else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN20_MAX_DSC_IMAGE_WIDTH)) { + locals->ODMCombineEnablePerState[i][k] = true; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + } else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) { + locals->ODMCombineEnablePerState[i][k] = true; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + } } + if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity && locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k] - && locals->ODMCombineEnablePerState[i][k] == false) { + && locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) { locals->NoOfDPP[i][j][k] = 1; locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); @@ -4031,16 +4043,16 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode /*Viewport Size Check*/ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { - locals->ViewportSizeSupport[i] = true; + locals->ViewportSizeSupport[i][0] = true; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { - if (locals->ODMCombineEnablePerState[i][k] == true) { + if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) { if (dml_min(locals->SwathWidthYSingleDPP[k], dml_round(mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k])) > locals->MaximumSwathWidth[k]) { - locals->ViewportSizeSupport[i] = false; + locals->ViewportSizeSupport[i][0] = false; } } else { if (locals->SwathWidthYSingleDPP[k] / 2.0 > locals->MaximumSwathWidth[k]) { - locals->ViewportSizeSupport[i] = false; + locals->ViewportSizeSupport[i][0] = false; } } } @@ -4222,8 +4234,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode mode_lib->vba.DSCFormatFactor = 1; } if (locals->RequiresDSC[i][k] == true) { - if (locals->ODMCombineEnablePerState[i][k] - == true) { + if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) { if (mode_lib->vba.PixelClockBackEnd[k] / 6.0 / mode_lib->vba.DSCFormatFactor > (1.0 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * mode_lib->vba.MaxDSCCLK[i]) { locals->DSCCLKRequiredMoreThanSupported[i] = @@ -4246,7 +4257,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode mode_lib->vba.TotalDSCUnitsRequired = 0.0; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (locals->RequiresDSC[i][k] == true) { - if (locals->ODMCombineEnablePerState[i][k] == true) { + if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) { mode_lib->vba.TotalDSCUnitsRequired = mode_lib->vba.TotalDSCUnitsRequired + 2.0; } else { @@ -4288,7 +4299,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode mode_lib->vba.bpp = locals->OutputBppPerState[i][k]; } if (locals->RequiresDSC[i][k] == true && mode_lib->vba.bpp != 0.0) { - if (locals->ODMCombineEnablePerState[i][k] == false) { + if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) { locals->DSCDelayPerState[i][k] = dscceComputeDelay( mode_lib->vba.DSCInputBitPerComponent[k], @@ -4331,7 +4342,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { for (j = 0; j < 2; j++) { for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { - if (locals->ODMCombineEnablePerState[i][k] == true) + if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) locals->SwathWidthYPerState[i][j][k] = dml_min(locals->SwathWidthYSingleDPP[k], dml_round(locals->HActive[k] / 2 * locals->HRatio[k])); else locals->SwathWidthYPerState[i][j][k] = locals->SwathWidthYSingleDPP[k] / locals->NoOfDPP[i][j][k]; @@ -4384,28 +4395,28 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode locals->EffectiveDETLBLinesLuma = dml_floor(locals->LinesInDETLuma + dml_min( locals->LinesInDETLuma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETY[k] * - locals->PSCL_FACTOR[k] / locals->ReturnBWPerState[i], + locals->PSCL_FACTOR[k] / locals->ReturnBWPerState[i][0], locals->EffectiveLBLatencyHidingSourceLinesLuma), locals->SwathHeightYPerState[i][j][k]); locals->EffectiveDETLBLinesChroma = dml_floor(locals->LinesInDETChroma + dml_min( locals->LinesInDETChroma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETC[k] * - locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i], + locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i][0], locals->EffectiveLBLatencyHidingSourceLinesChroma), locals->SwathHeightCPerState[i][j][k]); if (locals->BytePerPixelInDETC[k] == 0) { locals->UrgentLatencySupportUsPerState[i][j][k] = locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k]) / locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] * - dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k]); + dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]); } else { locals->UrgentLatencySupportUsPerState[i][j][k] = dml_min( locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k]) / locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] * - dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k]), + dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]), locals->EffectiveDETLBLinesChroma * (locals->HTotal[k] / locals->PixelClock[k]) / (locals->VRatio[k] / 2) - locals->EffectiveDETLBLinesChroma * locals->SwathWidthYPerState[i][j][k] / 2 * - dml_ceil(locals->BytePerPixelInDETC[k], 2) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k])); + dml_ceil(locals->BytePerPixelInDETC[k], 2) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k])); } } } @@ -4450,14 +4461,14 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode locals->SwathHeightYThisState[k] = locals->SwathHeightYPerState[i][j][k]; locals->SwathHeightCThisState[k] = locals->SwathHeightCPerState[i][j][k]; locals->SwathWidthYThisState[k] = locals->SwathWidthYPerState[i][j][k]; - mode_lib->vba.ProjectedDCFCLKDeepSleep = dml_max( - mode_lib->vba.ProjectedDCFCLKDeepSleep, + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], mode_lib->vba.PixelClock[k] / 16.0); if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) { if (mode_lib->vba.VRatio[k] <= 1.0) { - mode_lib->vba.ProjectedDCFCLKDeepSleep = + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( - mode_lib->vba.ProjectedDCFCLKDeepSleep, + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], 1.1 * dml_ceil( mode_lib->vba.BytePerPixelInDETY[k], @@ -4467,9 +4478,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode * mode_lib->vba.PixelClock[k] / mode_lib->vba.NoOfDPP[i][j][k]); } else { - mode_lib->vba.ProjectedDCFCLKDeepSleep = + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( - mode_lib->vba.ProjectedDCFCLKDeepSleep, + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], 1.1 * dml_ceil( mode_lib->vba.BytePerPixelInDETY[k], @@ -4480,9 +4491,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode } } else { if (mode_lib->vba.VRatio[k] <= 1.0) { - mode_lib->vba.ProjectedDCFCLKDeepSleep = + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( - mode_lib->vba.ProjectedDCFCLKDeepSleep, + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], 1.1 * dml_ceil( mode_lib->vba.BytePerPixelInDETY[k], @@ -4492,9 +4503,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode * mode_lib->vba.PixelClock[k] / mode_lib->vba.NoOfDPP[i][j][k]); } else { - mode_lib->vba.ProjectedDCFCLKDeepSleep = + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( - mode_lib->vba.ProjectedDCFCLKDeepSleep, + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], 1.1 * dml_ceil( mode_lib->vba.BytePerPixelInDETY[k], @@ -4504,9 +4515,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode * mode_lib->vba.RequiredDPPCLK[i][j][k]); } if (mode_lib->vba.VRatio[k] / 2.0 <= 1.0) { - mode_lib->vba.ProjectedDCFCLKDeepSleep = + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( - mode_lib->vba.ProjectedDCFCLKDeepSleep, + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], 1.1 * dml_ceil( mode_lib->vba.BytePerPixelInDETC[k], @@ -4517,9 +4528,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode * mode_lib->vba.PixelClock[k] / mode_lib->vba.NoOfDPP[i][j][k]); } else { - mode_lib->vba.ProjectedDCFCLKDeepSleep = + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( - mode_lib->vba.ProjectedDCFCLKDeepSleep, + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], 1.1 * dml_ceil( mode_lib->vba.BytePerPixelInDETC[k], @@ -4555,7 +4566,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode &mode_lib->vba.PTEBufferSizeNotExceededY[i][j][k], &mode_lib->vba.dpte_row_height[k], &mode_lib->vba.meta_row_height[k]); - mode_lib->vba.PrefetchLinesY[k] = CalculatePrefetchSourceLines( + mode_lib->vba.PrefetchLinesY[0][0][k] = CalculatePrefetchSourceLines( mode_lib, mode_lib->vba.VRatio[k], mode_lib->vba.vtaps[k], @@ -4594,7 +4605,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode &mode_lib->vba.PTEBufferSizeNotExceededC[i][j][k], &mode_lib->vba.dpte_row_height_chroma[k], &mode_lib->vba.meta_row_height_chroma[k]); - mode_lib->vba.PrefetchLinesC[k] = CalculatePrefetchSourceLines( + mode_lib->vba.PrefetchLinesC[0][0][k] = CalculatePrefetchSourceLines( mode_lib, mode_lib->vba.VRatio[k] / 2.0, mode_lib->vba.VTAPsChroma[k], @@ -4608,14 +4619,14 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = 0.0; mode_lib->vba.MetaRowBytesC = 0.0; mode_lib->vba.DPTEBytesPerRowC = 0.0; - locals->PrefetchLinesC[k] = 0.0; + locals->PrefetchLinesC[0][0][k] = 0.0; locals->PTEBufferSizeNotExceededC[i][j][k] = true; locals->PTEBufferSizeInRequestsForLuma = mode_lib->vba.PTEBufferSizeInRequestsLuma + mode_lib->vba.PTEBufferSizeInRequestsChroma; } - locals->PDEAndMetaPTEBytesPerFrame[k] = + locals->PDEAndMetaPTEBytesPerFrame[0][0][k] = mode_lib->vba.PDEAndMetaPTEBytesPerFrameY + mode_lib->vba.PDEAndMetaPTEBytesPerFrameC; - locals->MetaRowBytes[k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC; - locals->DPTEBytesPerRow[k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC; + locals->MetaRowBytes[0][0][k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC; + locals->DPTEBytesPerRow[0][0][k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC; CalculateActiveRowBandwidth( mode_lib->vba.GPUVMEnable, @@ -4642,14 +4653,14 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode + mode_lib->vba.TotalNumberOfDCCActiveDPP[i][j] * mode_lib->vba.MetaChunkSize) * 1024.0 - / mode_lib->vba.ReturnBWPerState[i]; + / mode_lib->vba.ReturnBWPerState[i][0]; if (mode_lib->vba.GPUVMEnable == true) { mode_lib->vba.ExtraLatency = mode_lib->vba.ExtraLatency + mode_lib->vba.TotalNumberOfActiveDPP[i][j] * mode_lib->vba.PTEGroupSize - / mode_lib->vba.ReturnBWPerState[i]; + / mode_lib->vba.ReturnBWPerState[i][0]; } - mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep; + mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0]; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (mode_lib->vba.BlendingAndTiming[k] == k) { @@ -4699,7 +4710,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode } for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { - locals->MaximumVStartup[k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k] + locals->MaximumVStartup[0][0][k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k] - dml_max(1.0, dml_ceil(locals->WritebackDelay[i][k] / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1.0)); } @@ -4739,7 +4750,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0.0; } - CalculateDelayAfterScaler(mode_lib, mode_lib->vba.ReturnBWPerState[i], mode_lib->vba.ReadBandwidthLuma[k], mode_lib->vba.ReadBandwidthChroma[k], mode_lib->vba.MaxTotalVActiveRDBandwidth, + CalculateDelayAfterScaler(mode_lib, mode_lib->vba.ReturnBWPerState[i][0], mode_lib->vba.ReadBandwidthLuma[k], mode_lib->vba.ReadBandwidthChroma[k], mode_lib->vba.MaxTotalVActiveRDBandwidth, mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k], mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k], mode_lib->vba.RequiredDPPCLK[i][j][k], mode_lib->vba.RequiredDISPCLK[i][j], mode_lib->vba.PixelClock[k], mode_lib->vba.DSCDelayPerState[i][k], mode_lib->vba.NoOfDPP[i][j][k], mode_lib->vba.ScalerEnabled[k], mode_lib->vba.NumberOfCursors[k], mode_lib->vba.DPPCLKDelaySubtotal, mode_lib->vba.DPPCLKDelaySCL, mode_lib->vba.DPPCLKDelaySCLLBOnly, mode_lib->vba.DPPCLKDelayCNVCFormater, mode_lib->vba.DPPCLKDelayCNVCCursor, mode_lib->vba.DISPCLKDelaySubtotal, @@ -4753,14 +4764,14 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode mode_lib->vba.RequiredDPPCLK[i][j][k], mode_lib->vba.RequiredDISPCLK[i][j], mode_lib->vba.PixelClock[k], - mode_lib->vba.ProjectedDCFCLKDeepSleep, + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], mode_lib->vba.NoOfDPP[i][j][k], mode_lib->vba.NumberOfCursors[k], mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k], mode_lib->vba.HTotal[k], mode_lib->vba.MaxInterDCNTileRepeaters, - mode_lib->vba.MaximumVStartup[k], + mode_lib->vba.MaximumVStartup[0][0][k], mode_lib->vba.GPUVMMaxPageTableLevels, mode_lib->vba.GPUVMEnable, mode_lib->vba.DynamicMetadataEnable[k], @@ -4770,15 +4781,15 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode mode_lib->vba.UrgentLatencyPixelDataOnly, mode_lib->vba.ExtraLatency, mode_lib->vba.TimeCalc, - mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k], - mode_lib->vba.MetaRowBytes[k], - mode_lib->vba.DPTEBytesPerRow[k], - mode_lib->vba.PrefetchLinesY[k], + mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k], + mode_lib->vba.MetaRowBytes[0][0][k], + mode_lib->vba.DPTEBytesPerRow[0][0][k], + mode_lib->vba.PrefetchLinesY[0][0][k], mode_lib->vba.SwathWidthYPerState[i][j][k], mode_lib->vba.BytePerPixelInDETY[k], mode_lib->vba.PrefillY[k], mode_lib->vba.MaxNumSwY[k], - mode_lib->vba.PrefetchLinesC[k], + mode_lib->vba.PrefetchLinesC[0][0][k], mode_lib->vba.BytePerPixelInDETC[k], mode_lib->vba.PrefillC[k], mode_lib->vba.MaxNumSwC[k], @@ -4808,19 +4819,19 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode locals->prefetch_vm_bw_valid = true; locals->prefetch_row_bw_valid = true; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { - if (locals->PDEAndMetaPTEBytesPerFrame[k] == 0) + if (locals->PDEAndMetaPTEBytesPerFrame[0][0][k] == 0) locals->prefetch_vm_bw[k] = 0; else if (locals->LinesForMetaPTE[k] > 0) - locals->prefetch_vm_bw[k] = locals->PDEAndMetaPTEBytesPerFrame[k] + locals->prefetch_vm_bw[k] = locals->PDEAndMetaPTEBytesPerFrame[0][0][k] / (locals->LinesForMetaPTE[k] * locals->HTotal[k] / locals->PixelClock[k]); else { locals->prefetch_vm_bw[k] = 0; locals->prefetch_vm_bw_valid = false; } - if (locals->MetaRowBytes[k] + locals->DPTEBytesPerRow[k] == 0) + if (locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k] == 0) locals->prefetch_row_bw[k] = 0; else if (locals->LinesForMetaAndDPTERow[k] > 0) - locals->prefetch_row_bw[k] = (locals->MetaRowBytes[k] + locals->DPTEBytesPerRow[k]) + locals->prefetch_row_bw[k] = (locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k]) / (locals->LinesForMetaAndDPTERow[k] * locals->HTotal[k] / locals->PixelClock[k]); else { locals->prefetch_row_bw[k] = 0; @@ -4839,13 +4850,13 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode mode_lib->vba.RequiredPrefetchPixelDataBWLuma[i][j][k]) + mode_lib->vba.meta_row_bw[k] + mode_lib->vba.dpte_row_bw[k]); } - locals->BandwidthWithoutPrefetchSupported[i] = true; - if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i]) { - locals->BandwidthWithoutPrefetchSupported[i] = false; + locals->BandwidthWithoutPrefetchSupported[i][0] = true; + if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i][0]) { + locals->BandwidthWithoutPrefetchSupported[i][0] = false; } locals->PrefetchSupported[i][j] = true; - if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i]) { + if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i][0]) { locals->PrefetchSupported[i][j] = false; } for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { @@ -4870,7 +4881,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode if (mode_lib->vba.PrefetchSupported[i][j] == true && mode_lib->vba.VRatioInPrefetchSupported[i][j] == true) { mode_lib->vba.BandwidthAvailableForImmediateFlip = - mode_lib->vba.ReturnBWPerState[i]; + mode_lib->vba.ReturnBWPerState[i][0]; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { mode_lib->vba.BandwidthAvailableForImmediateFlip = mode_lib->vba.BandwidthAvailableForImmediateFlip @@ -4884,9 +4895,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8 && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) { mode_lib->vba.ImmediateFlipBytes[k] = - mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k] - + mode_lib->vba.MetaRowBytes[k] - + mode_lib->vba.DPTEBytesPerRow[k]; + mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k] + + mode_lib->vba.MetaRowBytes[0][0][k] + + mode_lib->vba.DPTEBytesPerRow[0][0][k]; } } mode_lib->vba.TotImmediateFlipBytes = 0.0; @@ -4914,9 +4925,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode / mode_lib->vba.PixelClock[k], mode_lib->vba.VRatio[k], mode_lib->vba.Tno_bw[k], - mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k], - mode_lib->vba.MetaRowBytes[k], - mode_lib->vba.DPTEBytesPerRow[k], + mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k], + mode_lib->vba.MetaRowBytes[0][0][k], + mode_lib->vba.DPTEBytesPerRow[0][0][k], mode_lib->vba.DCCEnable[k], mode_lib->vba.dpte_row_height[k], mode_lib->vba.meta_row_height[k], @@ -4941,7 +4952,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode } mode_lib->vba.ImmediateFlipSupportedForState[i][j] = true; if (mode_lib->vba.total_dcn_read_bw_with_flip - > mode_lib->vba.ReturnBWPerState[i]) { + > mode_lib->vba.ReturnBWPerState[i][0]) { mode_lib->vba.ImmediateFlipSupportedForState[i][j] = false; } for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { @@ -4957,13 +4968,13 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode /*Vertical Active BW support*/ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { - mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i] = dml_min(mode_lib->vba.ReturnBusWidth * + mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][0] = dml_min(mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKPerState[i], mode_lib->vba.FabricAndDRAMBandwidthPerState[i] * 1000) * mode_lib->vba.MaxAveragePercentOfIdealDRAMBWDisplayCanUseInNormalSystemOperation / 100; - if (mode_lib->vba.MaxTotalVActiveRDBandwidth <= mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i]) - mode_lib->vba.TotalVerticalActiveBandwidthSupport[i] = true; + if (mode_lib->vba.MaxTotalVActiveRDBandwidth <= mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][0]) + mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][0] = true; else - mode_lib->vba.TotalVerticalActiveBandwidthSupport[i] = false; + mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][0] = false; } /*PTE Buffer Size Check*/ @@ -5051,7 +5062,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode status = DML_FAIL_SCALE_RATIO_TAP; } else if (mode_lib->vba.SourceFormatPixelAndScanSupport != true) { status = DML_FAIL_SOURCE_PIXEL_FORMAT; - } else if (locals->ViewportSizeSupport[i] != true) { + } else if (locals->ViewportSizeSupport[i][0] != true) { status = DML_FAIL_VIEWPORT_SIZE; } else if (locals->DIOSupport[i] != true) { status = DML_FAIL_DIO_SUPPORT; @@ -5061,7 +5072,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode status = DML_FAIL_DSC_CLK_REQUIRED; } else if (locals->UrgentLatencySupport[i][j] != true) { status = DML_FAIL_URGENT_LATENCY; - } else if (locals->ROBSupport[i] != true) { + } else if (locals->ROBSupport[i][0] != true) { status = DML_FAIL_REORDERING_BUFFER; } else if (locals->DISPCLK_DPPCLK_Support[i][j] != true) { status = DML_FAIL_DISPCLK_DPPCLK; @@ -5081,7 +5092,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode status = DML_FAIL_PITCH_SUPPORT; } else if (locals->PrefetchSupported[i][j] != true) { status = DML_FAIL_PREFETCH_SUPPORT; - } else if (locals->TotalVerticalActiveBandwidthSupport[i] != true) { + } else if (locals->TotalVerticalActiveBandwidthSupport[i][0] != true) { status = DML_FAIL_TOTAL_V_ACTIVE_BW; } else if (locals->VRatioInPrefetchSupported[i][j] != true) { status = DML_FAIL_V_RATIO_PREFETCH; @@ -5127,7 +5138,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode mode_lib->vba.DRAMSpeed = mode_lib->vba.DRAMSpeedPerState[mode_lib->vba.VoltageLevel]; mode_lib->vba.FabricClock = mode_lib->vba.FabricClockPerState[mode_lib->vba.VoltageLevel]; mode_lib->vba.SOCCLK = mode_lib->vba.SOCCLKPerState[mode_lib->vba.VoltageLevel]; - mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel]; + mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel][0]; mode_lib->vba.FabricAndDRAMBandwidth = locals->FabricAndDRAMBandwidthPerState[mode_lib->vba.VoltageLevel]; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (mode_lib->vba.BlendingAndTiming[k] == k) { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c index 2c7455e22a65..ca807846032f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c @@ -107,10 +107,10 @@ static unsigned int get_bytes_per_element(enum source_format_class source_format static bool is_dual_plane(enum source_format_class source_format) { - bool ret_val = 0; + bool ret_val = false; if ((source_format == dm_420_8) || (source_format == dm_420_10)) - ret_val = 1; + ret_val = true; return ret_val; } @@ -240,8 +240,8 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib, unsigned int swath_bytes_c = 0; unsigned int full_swath_bytes_packed_l = 0; unsigned int full_swath_bytes_packed_c = 0; - bool req128_l = 0; - bool req128_c = 0; + bool req128_l = false; + bool req128_c = false; bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear); bool surf_vert = (pipe_src_param.source_scan == dm_vert); unsigned int log2_swath_height_l = 0; @@ -264,13 +264,13 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib, total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c; if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request - req128_l = 0; - req128_c = 0; + req128_l = false; + req128_c = false; swath_bytes_l = full_swath_bytes_packed_l; swath_bytes_c = full_swath_bytes_packed_c; } else { //128b request (for luma only for yuv420 8bpc) - req128_l = 1; - req128_c = 0; + req128_l = true; + req128_c = false; swath_bytes_l = full_swath_bytes_packed_l / 2; swath_bytes_c = full_swath_bytes_packed_c; } @@ -280,9 +280,9 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib, total_swath_bytes = 2 * full_swath_bytes_packed_l; if (total_swath_bytes <= detile_buf_size_in_bytes) - req128_l = 0; + req128_l = false; else - req128_l = 1; + req128_l = true; swath_bytes_l = total_swath_bytes; swath_bytes_c = 0; @@ -670,7 +670,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib, const display_pipe_source_params_st pipe_src_param, bool is_chroma) { - bool mode_422 = 0; + bool mode_422 = false; unsigned int vp_width = 0; unsigned int vp_height = 0; unsigned int data_pitch = 0; @@ -929,8 +929,7 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, min_dst_y_ttu_vblank = min_ttu_vblank * pclk_freq_in_mhz / (double) htotal; dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start; - disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start - + min_dst_y_ttu_vblank) * dml_pow(2, 2)); + disp_dlg_regs->min_dst_y_next_start = (unsigned int) ((double) dlg_vblank_start * dml_pow(2, 2)); ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int) dml_pow(2, 18)); dml_print("DML_DLG: %s: min_dcfclk_mhz = %3.2f\n", @@ -959,7 +958,7 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, // Source // dcc_en = src.dcc; dual_plane = is_dual_plane((enum source_format_class)(src->source_format)); - mode_422 = 0; // TODO + mode_422 = false; // TODO access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed // bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0); // bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c index 1e6aeb1bd2bf..287b7a0ad108 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c @@ -107,10 +107,10 @@ static unsigned int get_bytes_per_element(enum source_format_class source_format static bool is_dual_plane(enum source_format_class source_format) { - bool ret_val = 0; + bool ret_val = false; if ((source_format == dm_420_8) || (source_format == dm_420_10)) - ret_val = 1; + ret_val = true; return ret_val; } @@ -240,8 +240,8 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib, unsigned int swath_bytes_c = 0; unsigned int full_swath_bytes_packed_l = 0; unsigned int full_swath_bytes_packed_c = 0; - bool req128_l = 0; - bool req128_c = 0; + bool req128_l = false; + bool req128_c = false; bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear); bool surf_vert = (pipe_src_param.source_scan == dm_vert); unsigned int log2_swath_height_l = 0; @@ -264,13 +264,13 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib, total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c; if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request - req128_l = 0; - req128_c = 0; + req128_l = false; + req128_c = false; swath_bytes_l = full_swath_bytes_packed_l; swath_bytes_c = full_swath_bytes_packed_c; } else { //128b request (for luma only for yuv420 8bpc) - req128_l = 1; - req128_c = 0; + req128_l = true; + req128_c = false; swath_bytes_l = full_swath_bytes_packed_l / 2; swath_bytes_c = full_swath_bytes_packed_c; } @@ -280,9 +280,9 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib, total_swath_bytes = 2 * full_swath_bytes_packed_l; if (total_swath_bytes <= detile_buf_size_in_bytes) - req128_l = 0; + req128_l = false; else - req128_l = 1; + req128_l = true; swath_bytes_l = total_swath_bytes; swath_bytes_c = 0; @@ -670,7 +670,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib, const display_pipe_source_params_st pipe_src_param, bool is_chroma) { - bool mode_422 = 0; + bool mode_422 = false; unsigned int vp_width = 0; unsigned int vp_height = 0; unsigned int data_pitch = 0; @@ -959,7 +959,7 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, // Source // dcc_en = src.dcc; dual_plane = is_dual_plane((enum source_format_class)(src->source_format)); - mode_422 = 0; // TODO + mode_422 = false; // TODO access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed // bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0); // bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c index ba77957aefe3..e6617c958bb8 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c @@ -23,7 +23,6 @@ * */ -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 #include "../display_mode_lib.h" #include "../dml_inline_defs.h" @@ -66,6 +65,7 @@ typedef struct { #define BPP_INVALID 0 #define BPP_BLENDED_PIPE 0xffffffff #define DCN21_MAX_DSC_IMAGE_WIDTH 5184 +#define DCN21_MAX_420_IMAGE_WIDTH 4096 static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib); static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation( @@ -198,7 +198,7 @@ static unsigned int CalculateVMAndRowBytes( unsigned int *meta_row_width, unsigned int *meta_row_height, unsigned int *vm_group_bytes, - long *dpte_group_bytes, + unsigned int *dpte_group_bytes, unsigned int *PixelPTEReqWidth, unsigned int *PixelPTEReqHeight, unsigned int *PTERequestSize, @@ -296,7 +296,7 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport( double UrgentOutOfOrderReturn, double ReturnBW, bool GPUVMEnable, - long dpte_group_bytes[], + int dpte_group_bytes[], unsigned int MetaChunkSize, double UrgentLatency, double ExtraLatency, @@ -310,13 +310,13 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport( int DPPPerPlane[], bool DCCEnable[], double DPPCLK[], - unsigned int SwathWidthSingleDPPY[], + double SwathWidthSingleDPPY[], unsigned int SwathHeightY[], double ReadBandwidthPlaneLuma[], unsigned int SwathHeightC[], double ReadBandwidthPlaneChroma[], unsigned int LBBitPerPixel[], - unsigned int SwathWidthY[], + double SwathWidthY[], double HRatio[], unsigned int vtaps[], unsigned int VTAPsChroma[], @@ -345,7 +345,7 @@ static void CalculateDCFCLKDeepSleep( double BytePerPixelDETY[], double BytePerPixelDETC[], double VRatio[], - unsigned int SwathWidthY[], + double SwathWidthY[], int DPPPerPlane[], double HRatio[], double PixelClock[], @@ -436,7 +436,7 @@ static void CalculateMetaAndPTETimes( unsigned int meta_row_height[], unsigned int meta_req_width[], unsigned int meta_req_height[], - long dpte_group_bytes[], + int dpte_group_bytes[], unsigned int PTERequestSizeY[], unsigned int PTERequestSizeC[], unsigned int PixelPTEReqWidthY[], @@ -478,7 +478,7 @@ static double CalculateExtraLatency( bool HostVMEnable, int NumberOfActivePlanes, int NumberOfDPP[], - long dpte_group_bytes[], + int dpte_group_bytes[], double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData, double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly, int HostVMMaxPageTableLevels, @@ -1281,7 +1281,7 @@ static unsigned int CalculateVMAndRowBytes( unsigned int *meta_row_width, unsigned int *meta_row_height, unsigned int *vm_group_bytes, - long *dpte_group_bytes, + unsigned int *dpte_group_bytes, unsigned int *PixelPTEReqWidth, unsigned int *PixelPTEReqHeight, unsigned int *PTERequestSize, @@ -1339,7 +1339,7 @@ static unsigned int CalculateVMAndRowBytes( *MetaRowByte = 0; } - if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_lvp) { + if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_l_vp) { MacroTileSizeBytes = 256; MacroTileHeight = BlockHeight256Bytes; } else if (SurfaceTiling == dm_sw_4kb_s || SurfaceTiling == dm_sw_4kb_s_x @@ -1684,11 +1684,11 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman else locals->SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportHeight[k]; - if (mode_lib->vba.ODMCombineEnabled[k] == true) + if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) MainPlaneDoesODMCombine = true; for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) if (mode_lib->vba.BlendingAndTiming[k] == j - && mode_lib->vba.ODMCombineEnabled[j] == true) + && mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) MainPlaneDoesODMCombine = true; if (MainPlaneDoesODMCombine == true) @@ -2941,12 +2941,12 @@ static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib) SwathWidth = mode_lib->vba.ViewportHeight[k]; } - if (mode_lib->vba.ODMCombineEnabled[k] == true) { + if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) { MainPlaneDoesODMCombine = true; } for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) { if (mode_lib->vba.BlendingAndTiming[k] == j - && mode_lib->vba.ODMCombineEnabled[j] == true) { + && mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) { MainPlaneDoesODMCombine = true; } } @@ -3454,7 +3454,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l == dm_420_10)) || (((mode_lib->vba.SurfaceTiling[k] == dm_sw_gfx7_2d_thin_gl || mode_lib->vba.SurfaceTiling[k] - == dm_sw_gfx7_2d_thin_lvp) + == dm_sw_gfx7_2d_thin_l_vp) && !((mode_lib->vba.SourcePixelFormat[k] == dm_444_64 || mode_lib->vba.SourcePixelFormat[k] @@ -3543,17 +3543,17 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } } for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { - locals->IdealSDPPortBandwidthPerState[i] = dml_min3( + locals->IdealSDPPortBandwidthPerState[i][0] = dml_min3( mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKPerState[i], mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels * mode_lib->vba.DRAMChannelWidth, mode_lib->vba.FabricClockPerState[i] * mode_lib->vba.FabricDatapathToDCNDataReturn); if (mode_lib->vba.HostVMEnable == false) { - locals->ReturnBWPerState[i] = locals->IdealSDPPortBandwidthPerState[i] + locals->ReturnBWPerState[i][0] = locals->IdealSDPPortBandwidthPerState[i][0] * mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly / 100.0; } else { - locals->ReturnBWPerState[i] = locals->IdealSDPPortBandwidthPerState[i] + locals->ReturnBWPerState[i][0] = locals->IdealSDPPortBandwidthPerState[i][0] * mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData / 100.0; } } @@ -3590,12 +3590,12 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l + dml_max3(mode_lib->vba.UrgentOutOfOrderReturnPerChannelPixelDataOnly, mode_lib->vba.UrgentOutOfOrderReturnPerChannelPixelMixedWithVMData, mode_lib->vba.UrgentOutOfOrderReturnPerChannelVMDataOnly) - * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i]; - if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i] + * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i][0]; + if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i][0] > locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i]) { - locals->ROBSupport[i] = true; + locals->ROBSupport[i][0] = true; } else { - locals->ROBSupport[i] = false; + locals->ROBSupport[i][0] = false; } } /*Writeback Mode Support Check*/ @@ -3972,18 +3972,25 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l && i == mode_lib->vba.soc.num_states) mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2 * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); - if (mode_lib->vba.ODMCapability == false || - (locals->PlaneRequiredDISPCLKWithoutODMCombine <= MaxMaxDispclkRoundedDown - && (!locals->DSCEnabled[k] || locals->HActive[k] <= DCN21_MAX_DSC_IMAGE_WIDTH))) { - locals->ODMCombineEnablePerState[i][k] = false; - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; - } else { - locals->ODMCombineEnablePerState[i][k] = true; - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + + locals->ODMCombineEnablePerState[i][k] = false; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; + if (mode_lib->vba.ODMCapability) { + if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) { + locals->ODMCombineEnablePerState[i][k] = true; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + } else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN21_MAX_DSC_IMAGE_WIDTH)) { + locals->ODMCombineEnablePerState[i][k] = true; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + } else if (locals->HActive[k] > DCN21_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) { + locals->ODMCombineEnablePerState[i][k] = true; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + } } + if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity && locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k] - && locals->ODMCombineEnablePerState[i][k] == false) { + && locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) { locals->NoOfDPP[i][j][k] = 1; locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); @@ -4072,16 +4079,16 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l /*Viewport Size Check*/ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { - locals->ViewportSizeSupport[i] = true; + locals->ViewportSizeSupport[i][0] = true; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { - if (locals->ODMCombineEnablePerState[i][k] == true) { + if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) { if (dml_min(locals->SwathWidthYSingleDPP[k], dml_round(mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k])) > locals->MaximumSwathWidth[k]) { - locals->ViewportSizeSupport[i] = false; + locals->ViewportSizeSupport[i][0] = false; } } else { if (locals->SwathWidthYSingleDPP[k] / 2.0 > locals->MaximumSwathWidth[k]) { - locals->ViewportSizeSupport[i] = false; + locals->ViewportSizeSupport[i][0] = false; } } } @@ -4122,11 +4129,11 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { - locals->RequiresDSC[i][k] = 0; + locals->RequiresDSC[i][k] = false; locals->RequiresFEC[i][k] = 0; if (mode_lib->vba.BlendingAndTiming[k] == k) { if (mode_lib->vba.Output[k] == dm_hdmi) { - locals->RequiresDSC[i][k] = 0; + locals->RequiresDSC[i][k] = false; locals->RequiresFEC[i][k] = 0; locals->OutputBppPerState[i][k] = TruncToValidBPP( dml_min(600.0, mode_lib->vba.PHYCLKPerState[i]) / mode_lib->vba.PixelClockBackEnd[k] * 24, @@ -4270,8 +4277,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.DSCFormatFactor = 1; } if (locals->RequiresDSC[i][k] == true) { - if (locals->ODMCombineEnablePerState[i][k] - == true) { + if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) { if (mode_lib->vba.PixelClockBackEnd[k] / 6.0 / mode_lib->vba.DSCFormatFactor > (1.0 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * mode_lib->vba.MaxDSCCLK[i]) { locals->DSCCLKRequiredMoreThanSupported[i] = @@ -4294,7 +4300,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.TotalDSCUnitsRequired = 0.0; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (locals->RequiresDSC[i][k] == true) { - if (locals->ODMCombineEnablePerState[i][k] == true) { + if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) { mode_lib->vba.TotalDSCUnitsRequired = mode_lib->vba.TotalDSCUnitsRequired + 2.0; } else { @@ -4336,7 +4342,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.bpp = locals->OutputBppPerState[i][k]; } if (locals->RequiresDSC[i][k] == true && mode_lib->vba.bpp != 0.0) { - if (locals->ODMCombineEnablePerState[i][k] == false) { + if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) { locals->DSCDelayPerState[i][k] = dscceComputeDelay( mode_lib->vba.DSCInputBitPerComponent[k], @@ -4400,7 +4406,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { locals->RequiredDPPCLKThisState[k] = locals->RequiredDPPCLK[i][j][k]; locals->NoOfDPPThisState[k] = locals->NoOfDPP[i][j][k]; - if (locals->ODMCombineEnablePerState[i][k] == true) { + if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) { locals->SwathWidthYThisState[k] = dml_min(locals->SwathWidthYSingleDPP[k], dml_round(mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k])); } else { @@ -4452,7 +4458,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l locals->PSCL_FACTOR, locals->PSCL_FACTOR_CHROMA, locals->RequiredDPPCLKThisState, - &mode_lib->vba.ProjectedDCFCLKDeepSleep); + &mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0]); for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64 @@ -4497,7 +4503,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l locals->PTERequestSizeC, locals->dpde0_bytes_per_frame_ub_c, locals->meta_pte_bytes_per_frame_ub_c); - locals->PrefetchLinesC[k] = CalculatePrefetchSourceLines( + locals->PrefetchLinesC[0][0][k] = CalculatePrefetchSourceLines( mode_lib, mode_lib->vba.VRatio[k]/2, mode_lib->vba.VTAPsChroma[k], @@ -4512,7 +4518,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = 0.0; mode_lib->vba.MetaRowBytesC = 0.0; mode_lib->vba.DPTEBytesPerRowC = 0.0; - locals->PrefetchLinesC[k] = 0.0; + locals->PrefetchLinesC[0][0][k] = 0.0; locals->PTEBufferSizeNotExceededC[i][j][k] = true; locals->PTEBufferSizeInRequestsForLuma = mode_lib->vba.PTEBufferSizeInRequestsLuma + mode_lib->vba.PTEBufferSizeInRequestsChroma; } @@ -4553,7 +4559,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l locals->PTERequestSizeY, locals->dpde0_bytes_per_frame_ub_l, locals->meta_pte_bytes_per_frame_ub_l); - locals->PrefetchLinesY[k] = CalculatePrefetchSourceLines( + locals->PrefetchLinesY[0][0][k] = CalculatePrefetchSourceLines( mode_lib, mode_lib->vba.VRatio[k], mode_lib->vba.vtaps[k], @@ -4563,10 +4569,10 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.ViewportYStartY[k], &locals->PrefillY[k], &locals->MaxNumSwY[k]); - locals->PDEAndMetaPTEBytesPerFrame[k] = + locals->PDEAndMetaPTEBytesPerFrame[0][0][k] = mode_lib->vba.PDEAndMetaPTEBytesPerFrameY + mode_lib->vba.PDEAndMetaPTEBytesPerFrameC; - locals->MetaRowBytes[k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC; - locals->DPTEBytesPerRow[k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC; + locals->MetaRowBytes[0][0][k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC; + locals->DPTEBytesPerRow[0][0][k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC; CalculateActiveRowBandwidth( mode_lib->vba.GPUVMEnable, @@ -4592,7 +4598,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.PixelChunkSizeInKByte, locals->TotalNumberOfDCCActiveDPP[i][j], mode_lib->vba.MetaChunkSize, - locals->ReturnBWPerState[i], + locals->ReturnBWPerState[i][0], mode_lib->vba.GPUVMEnable, mode_lib->vba.HostVMEnable, mode_lib->vba.NumberOfActivePlanes, @@ -4603,7 +4609,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.HostVMMaxPageTableLevels, mode_lib->vba.HostVMCachedPageTableLevels); - mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep; + mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0]; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (mode_lib->vba.BlendingAndTiming[k] == k) { if (mode_lib->vba.WritebackEnable[k] == true) { @@ -4645,15 +4651,15 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } } } - mode_lib->vba.MaxMaxVStartup = 0; + mode_lib->vba.MaxMaxVStartup[0][0] = 0; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { - locals->MaximumVStartup[k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k] + locals->MaximumVStartup[0][0][k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k] - dml_max(1.0, dml_ceil(locals->WritebackDelay[i][k] / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1.0)); - mode_lib->vba.MaxMaxVStartup = dml_max(mode_lib->vba.MaxMaxVStartup, locals->MaximumVStartup[k]); + mode_lib->vba.MaxMaxVStartup[0][0] = dml_max(mode_lib->vba.MaxMaxVStartup[0][0], locals->MaximumVStartup[0][0][k]); } mode_lib->vba.NextPrefetchMode = mode_lib->vba.MinPrefetchMode; - mode_lib->vba.NextMaxVStartup = mode_lib->vba.MaxMaxVStartup; + mode_lib->vba.NextMaxVStartup = mode_lib->vba.MaxMaxVStartup[0][0]; do { mode_lib->vba.PrefetchMode[i][j] = mode_lib->vba.NextPrefetchMode; mode_lib->vba.MaxVStartup = mode_lib->vba.NextMaxVStartup; @@ -4694,7 +4700,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l myPipe.DPPCLK = locals->RequiredDPPCLK[i][j][k]; myPipe.DISPCLK = locals->RequiredDISPCLK[i][j]; myPipe.PixelClock = mode_lib->vba.PixelClock[k]; - myPipe.DCFCLKDeepSleep = mode_lib->vba.ProjectedDCFCLKDeepSleep; + myPipe.DCFCLKDeepSleep = mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0]; myPipe.DPPPerPlane = locals->NoOfDPP[i][j][k]; myPipe.ScalerEnabled = mode_lib->vba.ScalerEnabled[k]; myPipe.SourceScan = mode_lib->vba.SourceScan[k]; @@ -4728,8 +4734,8 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l locals->SwathWidthYThisState[k] / mode_lib->vba.HRatio[k], mode_lib->vba.OutputFormat[k], mode_lib->vba.MaxInterDCNTileRepeaters, - dml_min(mode_lib->vba.MaxVStartup, locals->MaximumVStartup[k]), - locals->MaximumVStartup[k], + dml_min(mode_lib->vba.MaxVStartup, locals->MaximumVStartup[0][0][k]), + locals->MaximumVStartup[0][0][k], mode_lib->vba.GPUVMMaxPageTableLevels, mode_lib->vba.GPUVMEnable, &myHostVM, @@ -4740,15 +4746,15 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.UrgentLatency, mode_lib->vba.ExtraLatency, mode_lib->vba.TimeCalc, - locals->PDEAndMetaPTEBytesPerFrame[k], - locals->MetaRowBytes[k], - locals->DPTEBytesPerRow[k], - locals->PrefetchLinesY[k], + locals->PDEAndMetaPTEBytesPerFrame[0][0][k], + locals->MetaRowBytes[0][0][k], + locals->DPTEBytesPerRow[0][0][k], + locals->PrefetchLinesY[0][0][k], locals->SwathWidthYThisState[k], locals->BytePerPixelInDETY[k], locals->PrefillY[k], locals->MaxNumSwY[k], - locals->PrefetchLinesC[k], + locals->PrefetchLinesC[0][0][k], locals->BytePerPixelInDETC[k], locals->PrefillC[k], locals->MaxNumSwC[k], @@ -4837,14 +4843,14 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l + locals->RequiredPrefetchPixelDataBWChroma[i][j][k] * locals->UrgentBurstFactorChromaPre[k] + locals->cursor_bw_pre[k] * locals->UrgentBurstFactorCursorPre[k]); } - locals->BandwidthWithoutPrefetchSupported[i] = true; - if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i] + locals->BandwidthWithoutPrefetchSupported[i][0] = true; + if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i][0] || locals->NotEnoughUrgentLatencyHiding == 1) { - locals->BandwidthWithoutPrefetchSupported[i] = false; + locals->BandwidthWithoutPrefetchSupported[i][0] = false; } locals->PrefetchSupported[i][j] = true; - if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i] + if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i][0] || locals->NotEnoughUrgentLatencyHiding == 1 || locals->NotEnoughUrgentLatencyHidingPre == 1) { locals->PrefetchSupported[i][j] = false; @@ -4873,17 +4879,17 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } if (mode_lib->vba.MaxVStartup <= 13 || mode_lib->vba.AnyLinesForVMOrRowTooLarge == false) { - mode_lib->vba.NextMaxVStartup = mode_lib->vba.MaxMaxVStartup; + mode_lib->vba.NextMaxVStartup = mode_lib->vba.MaxMaxVStartup[0][0]; mode_lib->vba.NextPrefetchMode = mode_lib->vba.NextPrefetchMode + 1; } else { mode_lib->vba.NextMaxVStartup = mode_lib->vba.NextMaxVStartup - 1; } } while ((locals->PrefetchSupported[i][j] != true || locals->VRatioInPrefetchSupported[i][j] != true) - && (mode_lib->vba.NextMaxVStartup != mode_lib->vba.MaxMaxVStartup + && (mode_lib->vba.NextMaxVStartup != mode_lib->vba.MaxMaxVStartup[0][0] || mode_lib->vba.NextPrefetchMode < mode_lib->vba.MaxPrefetchMode)); if (locals->PrefetchSupported[i][j] == true && locals->VRatioInPrefetchSupported[i][j] == true) { - mode_lib->vba.BandwidthAvailableForImmediateFlip = locals->ReturnBWPerState[i]; + mode_lib->vba.BandwidthAvailableForImmediateFlip = locals->ReturnBWPerState[i][0]; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { mode_lib->vba.BandwidthAvailableForImmediateFlip = mode_lib->vba.BandwidthAvailableForImmediateFlip - dml_max(locals->ReadBandwidthLuma[k] * locals->UrgentBurstFactorLuma[k] @@ -4896,7 +4902,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.TotImmediateFlipBytes = 0.0; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { mode_lib->vba.TotImmediateFlipBytes = mode_lib->vba.TotImmediateFlipBytes - + locals->PDEAndMetaPTEBytesPerFrame[k] + locals->MetaRowBytes[k] + locals->DPTEBytesPerRow[k]; + + locals->PDEAndMetaPTEBytesPerFrame[0][0][k] + locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k]; } for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { @@ -4911,9 +4917,9 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.HostVMMaxPageTableLevels, mode_lib->vba.HostVMCachedPageTableLevels, mode_lib->vba.GPUVMEnable, - locals->PDEAndMetaPTEBytesPerFrame[k], - locals->MetaRowBytes[k], - locals->DPTEBytesPerRow[k], + locals->PDEAndMetaPTEBytesPerFrame[0][0][k], + locals->MetaRowBytes[0][0][k], + locals->DPTEBytesPerRow[0][0][k], mode_lib->vba.BandwidthAvailableForImmediateFlip, mode_lib->vba.TotImmediateFlipBytes, mode_lib->vba.SourcePixelFormat[k], @@ -4944,7 +4950,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } locals->ImmediateFlipSupportedForState[i][j] = true; if (mode_lib->vba.total_dcn_read_bw_with_flip - > locals->ReturnBWPerState[i]) { + > locals->ReturnBWPerState[i][0]) { locals->ImmediateFlipSupportedForState[i][j] = false; } for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { @@ -4971,7 +4977,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.WritebackInterfaceChromaBufferSize, mode_lib->vba.DCFCLKPerState[i], mode_lib->vba.UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels, - locals->ReturnBWPerState[i], + locals->ReturnBWPerState[i][0], mode_lib->vba.GPUVMEnable, locals->dpte_group_bytes, mode_lib->vba.MetaChunkSize, @@ -4983,7 +4989,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.DRAMClockChangeLatency, mode_lib->vba.SRExitTime, mode_lib->vba.SREnterPlusExitTime, - mode_lib->vba.ProjectedDCFCLKDeepSleep, + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], locals->NoOfDPPThisState, mode_lib->vba.DCCEnable, locals->RequiredDPPCLKThisState, @@ -5026,8 +5032,8 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l MaxTotalVActiveRDBandwidth = MaxTotalVActiveRDBandwidth + locals->ReadBandwidth[k]; } for (i = 0; i <= mode_lib->vba.soc.num_states; ++i) { - locals->MaxTotalVerticalActiveAvailableBandwidth[i] = dml_min( - locals->IdealSDPPortBandwidthPerState[i] * + locals->MaxTotalVerticalActiveAvailableBandwidth[i][0] = dml_min( + locals->IdealSDPPortBandwidthPerState[i][0] * mode_lib->vba.MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation / 100.0, mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels * @@ -5035,10 +5041,10 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.MaxAveragePercentOfIdealDRAMBWDisplayCanUseInNormalSystemOperation / 100.0); - if (MaxTotalVActiveRDBandwidth <= locals->MaxTotalVerticalActiveAvailableBandwidth[i]) { - locals->TotalVerticalActiveBandwidthSupport[i] = true; + if (MaxTotalVActiveRDBandwidth <= locals->MaxTotalVerticalActiveAvailableBandwidth[i][0]) { + locals->TotalVerticalActiveBandwidthSupport[i][0] = true; } else { - locals->TotalVerticalActiveBandwidthSupport[i] = false; + locals->TotalVerticalActiveBandwidthSupport[i][0] = false; } } } @@ -5117,7 +5123,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l status = DML_FAIL_SCALE_RATIO_TAP; } else if (mode_lib->vba.SourceFormatPixelAndScanSupport != true) { status = DML_FAIL_SOURCE_PIXEL_FORMAT; - } else if (locals->ViewportSizeSupport[i] != true) { + } else if (locals->ViewportSizeSupport[i][0] != true) { status = DML_FAIL_VIEWPORT_SIZE; } else if (locals->DIOSupport[i] != true) { status = DML_FAIL_DIO_SUPPORT; @@ -5125,7 +5131,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l status = DML_FAIL_NOT_ENOUGH_DSC; } else if (locals->DSCCLKRequiredMoreThanSupported[i] != false) { status = DML_FAIL_DSC_CLK_REQUIRED; - } else if (locals->ROBSupport[i] != true) { + } else if (locals->ROBSupport[i][0] != true) { status = DML_FAIL_REORDERING_BUFFER; } else if (locals->DISPCLK_DPPCLK_Support[i][j] != true) { status = DML_FAIL_DISPCLK_DPPCLK; @@ -5143,7 +5149,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l status = DML_FAIL_CURSOR_SUPPORT; } else if (mode_lib->vba.PitchSupport != true) { status = DML_FAIL_PITCH_SUPPORT; - } else if (locals->TotalVerticalActiveBandwidthSupport[i] != true) { + } else if (locals->TotalVerticalActiveBandwidthSupport[i][0] != true) { status = DML_FAIL_TOTAL_V_ACTIVE_BW; } else if (locals->PTEBufferSizeNotExceeded[i][j] != true) { status = DML_FAIL_PTE_BUFFER_SIZE; @@ -5199,13 +5205,13 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.DRAMSpeed = mode_lib->vba.DRAMSpeedPerState[mode_lib->vba.VoltageLevel]; mode_lib->vba.FabricClock = mode_lib->vba.FabricClockPerState[mode_lib->vba.VoltageLevel]; mode_lib->vba.SOCCLK = mode_lib->vba.SOCCLKPerState[mode_lib->vba.VoltageLevel]; - mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel]; + mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel][0]; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (mode_lib->vba.BlendingAndTiming[k] == k) { mode_lib->vba.ODMCombineEnabled[k] = locals->ODMCombineEnablePerState[mode_lib->vba.VoltageLevel][k]; } else { - mode_lib->vba.ODMCombineEnabled[k] = 0; + mode_lib->vba.ODMCombineEnabled[k] = false; } mode_lib->vba.DSCEnabled[k] = locals->RequiresDSC[mode_lib->vba.VoltageLevel][k]; @@ -5228,7 +5234,7 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport( double UrgentOutOfOrderReturn, double ReturnBW, bool GPUVMEnable, - long dpte_group_bytes[], + int dpte_group_bytes[], unsigned int MetaChunkSize, double UrgentLatency, double ExtraLatency, @@ -5242,13 +5248,13 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport( int DPPPerPlane[], bool DCCEnable[], double DPPCLK[], - unsigned int SwathWidthSingleDPPY[], + double SwathWidthSingleDPPY[], unsigned int SwathHeightY[], double ReadBandwidthPlaneLuma[], unsigned int SwathHeightC[], double ReadBandwidthPlaneChroma[], unsigned int LBBitPerPixel[], - unsigned int SwathWidthY[], + double SwathWidthY[], double HRatio[], unsigned int vtaps[], unsigned int VTAPsChroma[], @@ -5504,7 +5510,7 @@ static void CalculateDCFCLKDeepSleep( double BytePerPixelDETY[], double BytePerPixelDETC[], double VRatio[], - unsigned int SwathWidthY[], + double SwathWidthY[], int DPPPerPlane[], double HRatio[], double PixelClock[], @@ -5832,7 +5838,7 @@ static void CalculateMetaAndPTETimes( unsigned int meta_row_height[], unsigned int meta_req_width[], unsigned int meta_req_height[], - long dpte_group_bytes[], + int dpte_group_bytes[], unsigned int PTERequestSizeY[], unsigned int PTERequestSizeC[], unsigned int PixelPTEReqWidthY[], @@ -6088,7 +6094,7 @@ static double CalculateExtraLatency( bool HostVMEnable, int NumberOfActivePlanes, int NumberOfDPP[], - long dpte_group_bytes[], + int dpte_group_bytes[], double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData, double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly, int HostVMMaxPageTableLevels, @@ -6126,4 +6132,3 @@ static double CalculateExtraLatency( return CalculateExtraLatency; } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c index a1f207cbb966..a38baa73d484 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c @@ -23,7 +23,6 @@ * */ -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 #include "../display_mode_lib.h" #include "../display_mode_vba.h" @@ -83,10 +82,10 @@ static unsigned int get_bytes_per_element(enum source_format_class source_format static bool is_dual_plane(enum source_format_class source_format) { - bool ret_val = 0; + bool ret_val = false; if ((source_format == dm_420_8) || (source_format == dm_420_10)) - ret_val = 1; + ret_val = true; return ret_val; } @@ -223,8 +222,8 @@ static void handle_det_buf_split( unsigned int swath_bytes_c = 0; unsigned int full_swath_bytes_packed_l = 0; unsigned int full_swath_bytes_packed_c = 0; - bool req128_l = 0; - bool req128_c = 0; + bool req128_l = false; + bool req128_c = false; bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear); bool surf_vert = (pipe_src_param.source_scan == dm_vert); unsigned int log2_swath_height_l = 0; @@ -249,13 +248,13 @@ static void handle_det_buf_split( total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c; if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request - req128_l = 0; - req128_c = 0; + req128_l = false; + req128_c = false; swath_bytes_l = full_swath_bytes_packed_l; swath_bytes_c = full_swath_bytes_packed_c; } else { //128b request (for luma only for yuv420 8bpc) - req128_l = 1; - req128_c = 0; + req128_l = true; + req128_c = false; swath_bytes_l = full_swath_bytes_packed_l / 2; swath_bytes_c = full_swath_bytes_packed_c; } @@ -265,9 +264,9 @@ static void handle_det_buf_split( total_swath_bytes = 2 * full_swath_bytes_packed_l; if (total_swath_bytes <= detile_buf_size_in_bytes) - req128_l = 0; + req128_l = false; else - req128_l = 1; + req128_l = true; swath_bytes_l = total_swath_bytes; swath_bytes_c = 0; @@ -680,7 +679,7 @@ static void get_surf_rq_param( const display_pipe_params_st pipe_param, bool is_chroma) { - bool mode_422 = 0; + bool mode_422 = false; unsigned int vp_width = 0; unsigned int vp_height = 0; unsigned int data_pitch = 0; @@ -1011,7 +1010,7 @@ static void dml_rq_dlg_get_dlg_params( // Source // dcc_en = src.dcc; dual_plane = is_dual_plane((enum source_format_class) (src->source_format)); - mode_422 = 0; // FIXME + mode_422 = false; // FIXME access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed // bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0); // bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1); @@ -1523,8 +1522,8 @@ static void dml_rq_dlg_get_dlg_params( disp_dlg_regs->refcyc_per_vm_group_vblank = get_refcyc_per_vm_group_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; disp_dlg_regs->refcyc_per_vm_group_flip = get_refcyc_per_vm_group_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; - disp_dlg_regs->refcyc_per_vm_req_vblank = get_refcyc_per_vm_req_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;; - disp_dlg_regs->refcyc_per_vm_req_flip = get_refcyc_per_vm_req_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;; + disp_dlg_regs->refcyc_per_vm_req_vblank = get_refcyc_per_vm_req_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; + disp_dlg_regs->refcyc_per_vm_req_flip = get_refcyc_per_vm_req_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // Clamp to max for now if (disp_dlg_regs->refcyc_per_vm_group_vblank >= (unsigned int)dml_pow(2, 23)) @@ -1820,4 +1819,3 @@ static void calculate_ttu_cursor( } } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h index 1c97083b8d0b..bfc2f39bd1ef 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h @@ -85,7 +85,7 @@ enum dm_swizzle_mode { dm_sw_var_s_x = 29, dm_sw_var_d_x = 30, dm_sw_64kb_r_x, - dm_sw_gfx7_2d_thin_lvp, + dm_sw_gfx7_2d_thin_l_vp, dm_sw_gfx7_2d_thin_gl, }; enum lb_depth { @@ -119,6 +119,10 @@ enum mpc_combine_affinity { dm_mpc_never }; +enum RequestType { + REQ_256Bytes, REQ_128BytesNonContiguous, REQ_128BytesContiguous, REQ_NA +}; + enum self_refresh_affinity { dm_try_to_allow_self_refresh_and_mclk_switch, dm_allow_self_refresh_and_mclk_switch, @@ -135,9 +139,7 @@ enum dm_validation_status { DML_FAIL_DIO_SUPPORT, DML_FAIL_NOT_ENOUGH_DSC, DML_FAIL_DSC_CLK_REQUIRED, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT DML_FAIL_DSC_VALIDATION_FAILURE, -#endif DML_FAIL_URGENT_LATENCY, DML_FAIL_REORDERING_BUFFER, DML_FAIL_DISPCLK_DPPCLK, @@ -167,4 +169,16 @@ enum odm_combine_mode { dm_odm_combine_mode_4to1, }; +enum odm_combine_policy { + dm_odm_combine_policy_dal, + dm_odm_combine_policy_none, + dm_odm_combine_policy_2to1, + dm_odm_combine_policy_4to1, +}; + +enum immediate_flip_requirement { + dm_immediate_flip_not_required, + dm_immediate_flip_required, +}; + #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c index 704efefdcba8..2689401a03a3 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c @@ -25,18 +25,13 @@ #include "display_mode_lib.h" #include "dc_features.h" -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #include "dcn20/display_mode_vba_20.h" #include "dcn20/display_rq_dlg_calc_20.h" #include "dcn20/display_mode_vba_20v2.h" #include "dcn20/display_rq_dlg_calc_20v2.h" -#endif -#ifdef CONFIG_DRM_AMD_DC_DCN2_1 #include "dcn21/display_mode_vba_21.h" #include "dcn21/display_rq_dlg_calc_21.h" -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) const struct dml_funcs dml20_funcs = { .validate = dml20_ModeSupportAndSystemConfigurationFull, .recalculate = dml20_recalculate, @@ -50,16 +45,13 @@ const struct dml_funcs dml20v2_funcs = { .rq_dlg_get_dlg_reg = dml20v2_rq_dlg_get_dlg_reg, .rq_dlg_get_rq_reg = dml20v2_rq_dlg_get_rq_reg }; -#endif -#ifdef CONFIG_DRM_AMD_DC_DCN2_1 const struct dml_funcs dml21_funcs = { .validate = dml21_ModeSupportAndSystemConfigurationFull, .recalculate = dml21_recalculate, .rq_dlg_get_dlg_reg = dml21_rq_dlg_get_dlg_reg, .rq_dlg_get_rq_reg = dml21_rq_dlg_get_rq_reg }; -#endif void dml_init_instance(struct display_mode_lib *lib, const struct _vcs_dpi_soc_bounding_box_st *soc_bb, @@ -70,19 +62,15 @@ void dml_init_instance(struct display_mode_lib *lib, lib->ip = *ip_params; lib->project = project; switch (project) { -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 case DML_PROJECT_NAVI10: lib->funcs = dml20_funcs; break; case DML_PROJECT_NAVI10v2: lib->funcs = dml20v2_funcs; break; -#endif -#ifdef CONFIG_DRM_AMD_DC_DCN2_1 case DML_PROJECT_DCN21: lib->funcs = dml21_funcs; break; -#endif default: break; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h index d8c59aa356b6..cf2758ca5b02 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h @@ -27,20 +27,14 @@ #include "dml_common_defs.h" -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 #include "display_mode_vba.h" -#endif enum dml_project { DML_PROJECT_UNDEFINED, DML_PROJECT_RAVEN1, -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 DML_PROJECT_NAVI10, DML_PROJECT_NAVI10v2, -#endif -#ifdef CONFIG_DRM_AMD_DC_DCN2_1 DML_PROJECT_DCN21, -#endif }; struct display_mode_lib; @@ -70,9 +64,7 @@ struct display_mode_lib { struct _vcs_dpi_ip_params_st ip; struct _vcs_dpi_soc_bounding_box_st soc; enum dml_project project; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 struct vba_vars_st vba; -#endif struct dal_logger *logger; struct dml_funcs funcs; }; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h index cfacd6027467..658f81e757e9 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h @@ -63,6 +63,7 @@ struct _vcs_dpi_voltage_scaling_st { double dispclk_mhz; double phyclk_mhz; double dppclk_mhz; + double dtbclk_mhz; }; struct _vcs_dpi_soc_bounding_box_st { @@ -99,6 +100,7 @@ struct _vcs_dpi_soc_bounding_box_st { unsigned int num_chans; unsigned int vmm_page_size_bytes; unsigned int hostvm_min_page_size_bytes; + unsigned int gpuvm_min_page_size_bytes; double dram_clock_change_latency_us; double dummy_pstate_latency_us; double writeback_dram_clock_change_latency_us; @@ -112,6 +114,7 @@ struct _vcs_dpi_soc_bounding_box_st { bool do_urgent_latency_adjustment; double urgent_latency_adjustment_fabric_clock_component_us; double urgent_latency_adjustment_fabric_clock_reference_mhz; + bool disable_dram_clock_change_vactive_support; }; struct _vcs_dpi_ip_params_st { @@ -145,7 +148,6 @@ struct _vcs_dpi_ip_params_st { unsigned int writeback_interface_buffer_size_kbytes; unsigned int writeback_line_buffer_buffer_size; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 unsigned int writeback_10bpc420_supported; double writeback_max_hscl_ratio; double writeback_max_vscl_ratio; @@ -155,7 +157,6 @@ struct _vcs_dpi_ip_params_st { unsigned int writeback_max_vscl_taps; unsigned int writeback_line_buffer_luma_buffer_size; unsigned int writeback_line_buffer_chroma_buffer_size; -#endif unsigned int max_page_table_levels; unsigned int max_num_dpp; @@ -214,6 +215,7 @@ struct _vcs_dpi_display_pipe_source_params_st { int source_format; unsigned char dcc; unsigned int dcc_rate; + unsigned int dcc_rate_chroma; unsigned char dcc_use_global; unsigned char vm; bool gpuvm; // gpuvm enabled @@ -225,6 +227,10 @@ struct _vcs_dpi_display_pipe_source_params_st { int source_scan; int sw_mode; int macro_tile_size; + unsigned int surface_width_y; + unsigned int surface_height_y; + unsigned int surface_width_c; + unsigned int surface_height_c; unsigned int viewport_width; unsigned int viewport_height; unsigned int viewport_y_y; @@ -277,6 +283,7 @@ struct _vcs_dpi_display_output_params_st { int output_type; int output_format; int dsc_slices; + int max_audio_sample_rate; struct writeback_st wb; }; @@ -322,7 +329,7 @@ struct _vcs_dpi_display_pipe_dest_params_st { double pixel_rate_mhz; unsigned char synchronized_vblank_all_planes; unsigned char otg_inst; - unsigned char odm_combine; + unsigned int odm_combine; unsigned char use_maximum_vstartup; unsigned int vtotal_max; unsigned int vtotal_min; @@ -401,6 +408,7 @@ struct _vcs_dpi_display_rq_misc_params_st { struct _vcs_dpi_display_rq_params_st { unsigned char yuv420; unsigned char yuv420_10bpc; + unsigned char rgbe_alpha; display_rq_misc_params_st misc; display_rq_sizing_params_st sizing; display_rq_dlg_params_st dlg; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c index 7f9a5621922f..b3c96d9b472f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c @@ -23,7 +23,6 @@ * */ -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 #include "display_mode_lib.h" #include "display_mode_vba.h" @@ -222,13 +221,17 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib) mode_lib->vba.SRExitTime = soc->sr_exit_time_us; mode_lib->vba.SREnterPlusExitTime = soc->sr_enter_plus_exit_time_us; mode_lib->vba.DRAMClockChangeLatency = soc->dram_clock_change_latency_us; + mode_lib->vba.DummyPStateCheck = soc->dram_clock_change_latency_us == soc->dummy_pstate_latency_us; + mode_lib->vba.DRAMClockChangeSupportsVActive = !soc->disable_dram_clock_change_vactive_support || + mode_lib->vba.DummyPStateCheck; + mode_lib->vba.Downspreading = soc->downspread_percent; mode_lib->vba.DRAMChannelWidth = soc->dram_channel_width_bytes; // new! mode_lib->vba.FabricDatapathToDCNDataReturn = soc->fabric_datapath_to_dcn_data_return_bytes; // new! mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading = soc->dcn_downspread_percent; // new mode_lib->vba.DISPCLKDPPCLKVCOSpeed = soc->dispclk_dppclk_vco_speed_mhz; // new mode_lib->vba.VMMPageSize = soc->vmm_page_size_bytes; - mode_lib->vba.GPUVMMinPageSize = soc->vmm_page_size_bytes / 1024; + mode_lib->vba.GPUVMMinPageSize = soc->gpuvm_min_page_size_bytes / 1024; mode_lib->vba.HostVMMinPageSize = soc->hostvm_min_page_size_bytes / 1024; // Set the voltage scaling clocks as the defaults. Most of these will // be set to different values by the test @@ -261,7 +264,10 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib) mode_lib->vba.DRAMSpeedPerState[i] = soc->clock_limits[i].dram_speed_mts; //mode_lib->vba.DRAMSpeedPerState[i] = soc->clock_limits[i].dram_speed_mhz; mode_lib->vba.MaxDispclk[i] = soc->clock_limits[i].dispclk_mhz; + mode_lib->vba.DTBCLKPerState[i] = soc->clock_limits[i].dtbclk_mhz; } + mode_lib->vba.MinVoltageLevel = 0; + mode_lib->vba.MaxVoltageLevel = mode_lib->vba.soc.num_states; mode_lib->vba.DoUrgentLatencyAdjustment = soc->do_urgent_latency_adjustment; @@ -303,8 +309,6 @@ static void fetch_ip_params(struct display_mode_lib *mode_lib) mode_lib->vba.WritebackInterfaceBufferSize = ip->writeback_interface_buffer_size_kbytes; mode_lib->vba.WritebackLineBufferSize = ip->writeback_line_buffer_buffer_size; - mode_lib->vba.MinVoltageLevel = 0; - mode_lib->vba.MaxVoltageLevel = 5; mode_lib->vba.WritebackChromaLineBufferWidth = ip->writeback_chroma_line_buffer_width_pixels; @@ -420,8 +424,8 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) ip->dcc_supported : src->dcc && ip->dcc_supported; mode_lib->vba.DCCRate[mode_lib->vba.NumberOfActivePlanes] = src->dcc_rate; /* TODO: Needs to be set based on src->dcc_rate_luma/chroma */ - mode_lib->vba.DCCRateLuma[mode_lib->vba.NumberOfActivePlanes] = 0; - mode_lib->vba.DCCRateChroma[mode_lib->vba.NumberOfActivePlanes] = 0; + mode_lib->vba.DCCRateLuma[mode_lib->vba.NumberOfActivePlanes] = src->dcc_rate; + mode_lib->vba.DCCRateChroma[mode_lib->vba.NumberOfActivePlanes] = src->dcc_rate_chroma; mode_lib->vba.SourcePixelFormat[mode_lib->vba.NumberOfActivePlanes] = (enum source_format_class) (src->source_format); @@ -433,8 +437,6 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) dst->recout_width; // TODO: or should this be full_recout_width???...maybe only when in hsplit mode? mode_lib->vba.ODMCombineEnabled[mode_lib->vba.NumberOfActivePlanes] = dst->odm_combine; - mode_lib->vba.ODMCombineTypeEnabled[mode_lib->vba.NumberOfActivePlanes] = - dst->odm_combine; mode_lib->vba.OutputFormat[mode_lib->vba.NumberOfActivePlanes] = (enum output_format_class) (dout->output_format); mode_lib->vba.OutputBpp[mode_lib->vba.NumberOfActivePlanes] = @@ -451,7 +453,7 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) dout->dp_lanes; /* TODO: Needs to be set based on dout->audio.audio_sample_rate_khz/sample_layout */ mode_lib->vba.AudioSampleRate[mode_lib->vba.NumberOfActivePlanes] = - 44.1 * 1000; + dout->max_audio_sample_rate; mode_lib->vba.AudioSampleLayout[mode_lib->vba.NumberOfActivePlanes] = 1; mode_lib->vba.DRAMClockChangeLatencyOverride = 0.0; @@ -587,6 +589,7 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) for (k = j + 1; k < mode_lib->vba.cache_num_pipes; ++k) { display_pipe_source_params_st *src_k = &pipes[k].pipe.src; display_pipe_dest_params_st *dst_k = &pipes[k].pipe.dest; + display_output_params_st *dout_k = &pipes[j].dout; if (src_k->is_hsplit && !visited[k] && src->hsplit_grp == src_k->hsplit_grp) { @@ -597,12 +600,18 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) == dm_horz) { mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] += src_k->viewport_width; + mode_lib->vba.ViewportWidthChroma[mode_lib->vba.NumberOfActivePlanes] += + src_k->viewport_width; mode_lib->vba.ScalerRecoutWidth[mode_lib->vba.NumberOfActivePlanes] += dst_k->recout_width; } else { mode_lib->vba.ViewportHeight[mode_lib->vba.NumberOfActivePlanes] += src_k->viewport_height; + mode_lib->vba.ViewportHeightChroma[mode_lib->vba.NumberOfActivePlanes] += + src_k->viewport_height; } + mode_lib->vba.NumberOfDSCSlices[mode_lib->vba.NumberOfActivePlanes] += + dout_k->dsc_slices; visited[k] = true; } @@ -808,7 +817,9 @@ void ModeSupportAndSystemConfiguration(struct display_mode_lib *mode_lib) unsigned int total_pipes = 0; mode_lib->vba.VoltageLevel = mode_lib->vba.cache_pipes[0].clks_cfg.voltage; - mode_lib->vba.ReturnBW = mode_lib->vba.ReturnBWPerState[mode_lib->vba.VoltageLevel]; + mode_lib->vba.ReturnBW = mode_lib->vba.ReturnBWPerState[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb]; + if (mode_lib->vba.ReturnBW == 0) + mode_lib->vba.ReturnBW = mode_lib->vba.ReturnBWPerState[mode_lib->vba.VoltageLevel][0]; mode_lib->vba.FabricAndDRAMBandwidth = mode_lib->vba.FabricAndDRAMBandwidthPerState[mode_lib->vba.VoltageLevel]; fetch_socbb_params(mode_lib); @@ -858,4 +869,3 @@ double CalculateWriteBackDISPCLK( return CalculateWriteBackDISPCLK; } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h index 1540ffbe3979..2875efd85467 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h @@ -23,7 +23,6 @@ * */ -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 #ifndef __DML2_DISPLAY_MODE_VBA_H__ #define __DML2_DISPLAY_MODE_VBA_H__ @@ -155,7 +154,10 @@ struct vba_vars_st { double UrgentLatencySupportUsChroma; unsigned int DSCFormatFactor; + bool DummyPStateCheck; + bool DRAMClockChangeSupportsVActive; bool PrefetchModeSupported; + bool PrefetchAndImmediateFlipSupported; enum self_refresh_affinity AllowDRAMSelfRefreshOrDRAMClockChangeInVblank; // Mode Support only double XFCRemoteSurfaceFlipDelay; double TInitXFill; @@ -317,8 +319,7 @@ struct vba_vars_st { unsigned int DynamicMetadataTransmittedBytes[DC__NUM_DPP__MAX]; double DCCRate[DC__NUM_DPP__MAX]; double AverageDCCCompressionRate; - bool ODMCombineEnabled[DC__NUM_DPP__MAX]; - enum odm_combine_mode ODMCombineTypeEnabled[DC__NUM_DPP__MAX]; + enum odm_combine_mode ODMCombineEnabled[DC__NUM_DPP__MAX]; double OutputBpp[DC__NUM_DPP__MAX]; bool DSCEnabled[DC__NUM_DPP__MAX]; unsigned int DSCInputBitPerComponent[DC__NUM_DPP__MAX]; @@ -346,6 +347,7 @@ struct vba_vars_st { unsigned int EffectiveLBLatencyHidingSourceLinesChroma; double BandwidthAvailableForImmediateFlip; unsigned int PrefetchMode[DC__VOLTAGE_STATES + 1][2]; + unsigned int PrefetchModePerState[DC__VOLTAGE_STATES + 1][2]; unsigned int MinPrefetchMode; unsigned int MaxPrefetchMode; bool AnyLinesForVMOrRowTooLarge; @@ -395,6 +397,7 @@ struct vba_vars_st { bool WritebackLumaAndChromaScalingSupported; bool Cursor64BppSupport; double DCFCLKPerState[DC__VOLTAGE_STATES + 1]; + double DCFCLKState[DC__VOLTAGE_STATES + 1][2]; double FabricClockPerState[DC__VOLTAGE_STATES + 1]; double SOCCLKPerState[DC__VOLTAGE_STATES + 1]; double PHYCLKPerState[DC__VOLTAGE_STATES + 1]; @@ -443,7 +446,7 @@ struct vba_vars_st { double OutputLinkDPLanes[DC__NUM_DPP__MAX]; double ForcedOutputLinkBPP[DC__NUM_DPP__MAX]; // Mode Support only double ImmediateFlipBW[DC__NUM_DPP__MAX]; - double MaxMaxVStartup; + double MaxMaxVStartup[DC__VOLTAGE_STATES + 1][2]; double WritebackLumaVExtra; double WritebackChromaVExtra; @@ -470,7 +473,7 @@ struct vba_vars_st { double RoundedUpMaxSwathSizeBytesC; double EffectiveDETLBLinesLuma; double EffectiveDETLBLinesChroma; - double ProjectedDCFCLKDeepSleep; + double ProjectedDCFCLKDeepSleep[DC__VOLTAGE_STATES + 1][2]; double PDEAndMetaPTEBytesPerFrameY; double PDEAndMetaPTEBytesPerFrameC; unsigned int MetaRowBytesY; @@ -488,12 +491,11 @@ struct vba_vars_st { double FractionOfUrgentBandwidthImmediateFlip; // Mode Support debugging output /* ms locals */ - double IdealSDPPortBandwidthPerState[DC__VOLTAGE_STATES + 1]; + double IdealSDPPortBandwidthPerState[DC__VOLTAGE_STATES + 1][2]; unsigned int NoOfDPP[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; int NoOfDPPThisState[DC__NUM_DPP__MAX]; - bool ODMCombineEnablePerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX]; - enum odm_combine_mode ODMCombineTypeEnablePerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX]; - unsigned int SwathWidthYThisState[DC__NUM_DPP__MAX]; + enum odm_combine_mode ODMCombineEnablePerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX]; + double SwathWidthYThisState[DC__NUM_DPP__MAX]; unsigned int SwathHeightCPerState[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; unsigned int SwathHeightYThisState[DC__NUM_DPP__MAX]; unsigned int SwathHeightCThisState[DC__NUM_DPP__MAX]; @@ -505,7 +507,7 @@ struct vba_vars_st { double RequiredDPPCLKThisState[DC__NUM_DPP__MAX]; bool PTEBufferSizeNotExceededY[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; bool PTEBufferSizeNotExceededC[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; - bool BandwidthWithoutPrefetchSupported[DC__VOLTAGE_STATES + 1]; + bool BandwidthWithoutPrefetchSupported[DC__VOLTAGE_STATES + 1][2]; bool PrefetchSupported[DC__VOLTAGE_STATES + 1][2]; bool VRatioInPrefetchSupported[DC__VOLTAGE_STATES + 1][2]; double RequiredDISPCLK[DC__VOLTAGE_STATES + 1][2]; @@ -514,22 +516,22 @@ struct vba_vars_st { unsigned int TotalNumberOfActiveDPP[DC__VOLTAGE_STATES + 1][2]; unsigned int TotalNumberOfDCCActiveDPP[DC__VOLTAGE_STATES + 1][2]; bool ModeSupport[DC__VOLTAGE_STATES + 1][2]; - double ReturnBWPerState[DC__VOLTAGE_STATES + 1]; + double ReturnBWPerState[DC__VOLTAGE_STATES + 1][2]; bool DIOSupport[DC__VOLTAGE_STATES + 1]; bool NotEnoughDSCUnits[DC__VOLTAGE_STATES + 1]; bool DSCCLKRequiredMoreThanSupported[DC__VOLTAGE_STATES + 1]; bool DTBCLKRequiredMoreThanSupported[DC__VOLTAGE_STATES + 1]; double UrgentRoundTripAndOutOfOrderLatencyPerState[DC__VOLTAGE_STATES + 1]; - bool ROBSupport[DC__VOLTAGE_STATES + 1]; + bool ROBSupport[DC__VOLTAGE_STATES + 1][2]; bool PTEBufferSizeNotExceeded[DC__VOLTAGE_STATES + 1][2]; - bool TotalVerticalActiveBandwidthSupport[DC__VOLTAGE_STATES + 1]; - double MaxTotalVerticalActiveAvailableBandwidth[DC__VOLTAGE_STATES + 1]; + bool TotalVerticalActiveBandwidthSupport[DC__VOLTAGE_STATES + 1][2]; + double MaxTotalVerticalActiveAvailableBandwidth[DC__VOLTAGE_STATES + 1][2]; double PrefetchBW[DC__NUM_DPP__MAX]; - double PDEAndMetaPTEBytesPerFrame[DC__NUM_DPP__MAX]; - double MetaRowBytes[DC__NUM_DPP__MAX]; - double DPTEBytesPerRow[DC__NUM_DPP__MAX]; - double PrefetchLinesY[DC__NUM_DPP__MAX]; - double PrefetchLinesC[DC__NUM_DPP__MAX]; + double PDEAndMetaPTEBytesPerFrame[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; + double MetaRowBytes[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; + double DPTEBytesPerRow[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; + double PrefetchLinesY[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; + double PrefetchLinesC[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; unsigned int MaxNumSwY[DC__NUM_DPP__MAX]; unsigned int MaxNumSwC[DC__NUM_DPP__MAX]; double PrefillY[DC__NUM_DPP__MAX]; @@ -538,7 +540,7 @@ struct vba_vars_st { double LinesForMetaPTE[DC__NUM_DPP__MAX]; double LinesForMetaAndDPTERow[DC__NUM_DPP__MAX]; double MinDPPCLKUsingSingleDPP[DC__NUM_DPP__MAX]; - unsigned int SwathWidthYSingleDPP[DC__NUM_DPP__MAX]; + double SwathWidthYSingleDPP[DC__NUM_DPP__MAX]; double BytePerPixelInDETY[DC__NUM_DPP__MAX]; double BytePerPixelInDETC[DC__NUM_DPP__MAX]; bool RequiresDSC[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX]; @@ -546,7 +548,7 @@ struct vba_vars_st { double RequiresFEC[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX]; double OutputBppPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX]; double DSCDelayPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX]; - bool ViewportSizeSupport[DC__VOLTAGE_STATES + 1]; + bool ViewportSizeSupport[DC__VOLTAGE_STATES + 1][2]; unsigned int Read256BlockHeightY[DC__NUM_DPP__MAX]; unsigned int Read256BlockWidthY[DC__NUM_DPP__MAX]; unsigned int Read256BlockHeightC[DC__NUM_DPP__MAX]; @@ -561,7 +563,7 @@ struct vba_vars_st { double WriteBandwidth[DC__NUM_DPP__MAX]; double PSCL_FACTOR[DC__NUM_DPP__MAX]; double PSCL_FACTOR_CHROMA[DC__NUM_DPP__MAX]; - double MaximumVStartup[DC__NUM_DPP__MAX]; + double MaximumVStartup[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; unsigned int MacroTileWidthY[DC__NUM_DPP__MAX]; unsigned int MacroTileWidthC[DC__NUM_DPP__MAX]; double AlignedDCCMetaPitch[DC__NUM_DPP__MAX]; @@ -578,7 +580,7 @@ struct vba_vars_st { bool ImmediateFlipSupportedForState[DC__VOLTAGE_STATES + 1][2]; double WritebackDelay[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX]; unsigned int vm_group_bytes[DC__NUM_DPP__MAX]; - long dpte_group_bytes[DC__NUM_DPP__MAX]; + unsigned int dpte_group_bytes[DC__NUM_DPP__MAX]; unsigned int dpte_row_height[DC__NUM_DPP__MAX]; unsigned int meta_req_height[DC__NUM_DPP__MAX]; unsigned int meta_req_width[DC__NUM_DPP__MAX]; @@ -604,14 +606,14 @@ struct vba_vars_st { double UrgentBurstFactorChroma[DC__NUM_DPP__MAX]; double UrgentBurstFactorChromaPre[DC__NUM_DPP__MAX]; + bool MPCCombine[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; double SwathWidthCSingleDPP[DC__NUM_DPP__MAX]; double MaximumSwathWidthInLineBufferLuma; double MaximumSwathWidthInLineBufferChroma; double MaximumSwathWidthLuma[DC__NUM_DPP__MAX]; double MaximumSwathWidthChroma[DC__NUM_DPP__MAX]; - bool odm_combine_dummy[DC__NUM_DPP__MAX]; - enum odm_combine_mode odm_combine_mode_dummy[DC__NUM_DPP__MAX]; + enum odm_combine_mode odm_combine_dummy[DC__NUM_DPP__MAX]; double dummy1[DC__NUM_DPP__MAX]; double dummy2[DC__NUM_DPP__MAX]; double dummy3[DC__NUM_DPP__MAX]; @@ -621,9 +623,9 @@ struct vba_vars_st { double dummy7[DC__NUM_DPP__MAX]; double dummy8[DC__NUM_DPP__MAX]; unsigned int dummyinteger1ms[DC__NUM_DPP__MAX]; - unsigned int dummyinteger2ms[DC__NUM_DPP__MAX]; + double dummyinteger2ms[DC__NUM_DPP__MAX]; unsigned int dummyinteger3[DC__NUM_DPP__MAX]; - unsigned int dummyinteger4; + unsigned int dummyinteger4[DC__NUM_DPP__MAX]; unsigned int dummyinteger5; unsigned int dummyinteger6; unsigned int dummyinteger7; @@ -636,7 +638,6 @@ struct vba_vars_st { unsigned int dummyintegerarr2[DC__NUM_DPP__MAX]; unsigned int dummyintegerarr3[DC__NUM_DPP__MAX]; unsigned int dummyintegerarr4[DC__NUM_DPP__MAX]; - long dummylongarr1[DC__NUM_DPP__MAX]; bool dummysinglestring; bool SingleDPPViewportSizeSupportPerPlane[DC__NUM_DPP__MAX]; double PlaneRequiredDISPCLKWithODMCombine2To1; @@ -644,20 +645,19 @@ struct vba_vars_st { unsigned int TotalNumberOfSingleDPPPlanes[DC__VOLTAGE_STATES + 1][2]; bool LinkDSCEnable; bool ODMCombine4To1SupportCheckOK[DC__VOLTAGE_STATES + 1]; - bool ODMCombineEnableThisState[DC__NUM_DPP__MAX]; - enum odm_combine_mode ODMCombineEnableTypeThisState[DC__NUM_DPP__MAX]; - unsigned int SwathWidthCThisState[DC__NUM_DPP__MAX]; + enum odm_combine_mode ODMCombineEnableThisState[DC__NUM_DPP__MAX]; + double SwathWidthCThisState[DC__NUM_DPP__MAX]; bool ViewportSizeSupportPerPlane[DC__NUM_DPP__MAX]; double AlignedDCCMetaPitchY[DC__NUM_DPP__MAX]; double AlignedDCCMetaPitchC[DC__NUM_DPP__MAX]; unsigned int NotEnoughUrgentLatencyHiding; unsigned int NotEnoughUrgentLatencyHidingPre; - long PTEBufferSizeInRequestsForLuma; - long PTEBufferSizeInRequestsForChroma; + int PTEBufferSizeInRequestsForLuma; + int PTEBufferSizeInRequestsForChroma; // Missing from VBA - long dpte_group_bytes_chroma; + int dpte_group_bytes_chroma; unsigned int vm_group_bytes_chroma; double dst_x_after_scaler; double dst_y_after_scaler; @@ -682,8 +682,8 @@ struct vba_vars_st { double MinTTUVBlank[DC__NUM_DPP__MAX]; double BytePerPixelDETY[DC__NUM_DPP__MAX]; double BytePerPixelDETC[DC__NUM_DPP__MAX]; - unsigned int SwathWidthY[DC__NUM_DPP__MAX]; - unsigned int SwathWidthSingleDPPY[DC__NUM_DPP__MAX]; + double SwathWidthY[DC__NUM_DPP__MAX]; + double SwathWidthSingleDPPY[DC__NUM_DPP__MAX]; double CursorRequestDeliveryTime[DC__NUM_DPP__MAX]; double CursorRequestDeliveryTimePrefetch[DC__NUM_DPP__MAX]; double ReadBandwidthPlaneLuma[DC__NUM_DPP__MAX]; @@ -759,12 +759,12 @@ struct vba_vars_st { double LinesInDETY[DC__NUM_DPP__MAX]; double LinesInDETYRoundedDownToSwath[DC__NUM_DPP__MAX]; - unsigned int SwathWidthSingleDPPC[DC__NUM_DPP__MAX]; - unsigned int SwathWidthC[DC__NUM_DPP__MAX]; + double SwathWidthSingleDPPC[DC__NUM_DPP__MAX]; + double SwathWidthC[DC__NUM_DPP__MAX]; unsigned int BytePerPixelY[DC__NUM_DPP__MAX]; unsigned int BytePerPixelC[DC__NUM_DPP__MAX]; - long dummyinteger1; - long dummyinteger2; + unsigned int dummyinteger1; + unsigned int dummyinteger2; double FinalDRAMClockChangeLatency; double Tdmdl_vm[DC__NUM_DPP__MAX]; double Tdmdl[DC__NUM_DPP__MAX]; @@ -778,6 +778,7 @@ struct vba_vars_st { unsigned int DCCCMaxCompressedBlock[DC__NUM_DPP__MAX]; unsigned int DCCCIndependent64ByteBlock[DC__NUM_DPP__MAX]; double VStartupMargin; + bool NotEnoughTimeForDynamicMetadata; /* Missing from VBA */ unsigned int MaximumMaxVStartupLines; @@ -813,7 +814,7 @@ struct vba_vars_st { unsigned int ViewportHeightChroma[DC__NUM_DPP__MAX]; double HRatioChroma[DC__NUM_DPP__MAX]; double VRatioChroma[DC__NUM_DPP__MAX]; - long WritebackSourceWidth[DC__NUM_DPP__MAX]; + int WritebackSourceWidth[DC__NUM_DPP__MAX]; bool ModeIsSupported; bool ODMCombine4To1Supported; @@ -849,6 +850,58 @@ struct vba_vars_st { unsigned int MaxNumHDMIFRLOutputs; int AudioSampleRate[DC__NUM_DPP__MAX]; int AudioSampleLayout[DC__NUM_DPP__MAX]; + + int PercentMarginOverMinimumRequiredDCFCLK; + bool DynamicMetadataSupported[DC__VOLTAGE_STATES + 1][2]; + enum immediate_flip_requirement ImmediateFlipRequirement; + double DETBufferSizeYThisState[DC__NUM_DPP__MAX]; + double DETBufferSizeCThisState[DC__NUM_DPP__MAX]; + bool NoUrgentLatencyHiding[DC__NUM_DPP__MAX]; + bool NoUrgentLatencyHidingPre[DC__NUM_DPP__MAX]; + int swath_width_luma_ub_this_state[DC__NUM_DPP__MAX]; + int swath_width_chroma_ub_this_state[DC__NUM_DPP__MAX]; + double UrgLatency[DC__VOLTAGE_STATES + 1]; + double VActiveCursorBandwidth[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; + double VActivePixelBandwidth[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; + bool NoTimeForPrefetch[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; + bool NoTimeForDynamicMetadata[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; + double dpte_row_bandwidth[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; + double meta_row_bandwidth[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; + double DETBufferSizeYAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; + double DETBufferSizeCAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; + int swath_width_luma_ub_all_states[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; + int swath_width_chroma_ub_all_states[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; + bool NotUrgentLatencyHiding[DC__VOLTAGE_STATES + 1][2]; + unsigned int SwathHeightYAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; + unsigned int SwathHeightCAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; + unsigned int SwathWidthYAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; + unsigned int SwathWidthCAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; + double TotalDPTERowBandwidth[DC__VOLTAGE_STATES + 1][2]; + double TotalMetaRowBandwidth[DC__VOLTAGE_STATES + 1][2]; + double TotalVActiveCursorBandwidth[DC__VOLTAGE_STATES + 1][2]; + double TotalVActivePixelBandwidth[DC__VOLTAGE_STATES + 1][2]; + bool UseMinimumRequiredDCFCLK; + double WritebackDelayTime[DC__NUM_DPP__MAX]; + unsigned int DCCYIndependentBlock[DC__NUM_DPP__MAX]; + unsigned int DCCCIndependentBlock[DC__NUM_DPP__MAX]; + unsigned int dummyinteger15; + unsigned int dummyinteger16; + unsigned int dummyinteger17; + unsigned int dummyinteger18; + unsigned int dummyinteger19; + unsigned int dummyinteger20; + unsigned int dummyinteger21; + unsigned int dummyinteger22; + unsigned int dummyinteger23; + unsigned int dummyinteger24; + unsigned int dummyinteger25; + unsigned int dummyinteger26; + unsigned int dummyinteger27; + unsigned int dummyinteger28; + unsigned int dummyinteger29; + bool dummystring[DC__NUM_DPP__MAX]; + double BPP; + enum odm_combine_policy ODMCombinePolicy; }; bool CalculateMinAndMaxPrefetchMode( @@ -870,4 +923,3 @@ double CalculateWriteBackDISPCLK( unsigned int WritebackChromaLineBufferWidth); #endif /* _DML2_DISPLAY_MODE_VBA_H_ */ -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c index b953b02a1512..723af0b2dda0 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c @@ -24,7 +24,7 @@ */ #include "dml_common_defs.h" -#include "../calcs/dcn_calc_math.h" +#include "dcn_calc_math.h" #include "dml_inline_defs.h" diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h index eca140da13d8..ded71ea82413 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h @@ -27,7 +27,7 @@ #define __DML_INLINE_DEFS_H__ #include "dml_common_defs.h" -#include "../calcs/dcn_calc_math.h" +#include "dcn_calc_math.h" #include "dml_logger.h" static inline double dml_min(double a, double b) diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile index 641ffb7cfaed..3f66868df171 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile @@ -2,7 +2,13 @@ # # Makefile for the 'dsc' sub-component of DAL. +ifdef CONFIG_X86 dsc_ccflags := -mhard-float -msse +endif + +ifdef CONFIG_PPC64 +dsc_ccflags := -mhard-float -maltivec +endif ifdef CONFIG_CC_IS_GCC ifeq ($(call cc-ifversion, -lt, 0701, y), y) @@ -10,6 +16,7 @@ IS_OLD_GCC = 1 endif endif +ifdef CONFIG_X86 ifdef IS_OLD_GCC # Stack alignment mismatch, proceed with caution. # GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 @@ -18,6 +25,7 @@ dsc_ccflags += -mpreferred-stack-boundary=4 else dsc_ccflags += -msse2 endif +endif CFLAGS_$(AMDDALPATH)/dc/dsc/rc_calc.o := $(dsc_ccflags) CFLAGS_$(AMDDALPATH)/dc/dsc/rc_calc_dpi.o := $(dsc_ccflags) diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index e60f760585e4..87d682d25278 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -22,30 +22,16 @@ * Author: AMD */ -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #include "dc_hw_types.h" #include "dsc.h" #include <drm/drm_dp_helper.h> - -struct dc_dsc_policy { - bool use_min_slices_h; - int max_slices_h; // Maximum available if 0 - int min_sice_height; // Must not be less than 8 - int max_target_bpp; - int min_target_bpp; // Minimum target bits per pixel -}; - -const struct dc_dsc_policy dsc_policy = { - .use_min_slices_h = true, // DSC Policy: Use minimum number of slices that fits the pixel clock - .max_slices_h = 0, // DSC Policy: Use max available slices (in our case 4 for or 8, depending on the mode) - .min_sice_height = 108, // DSC Policy: Use slice height recommended by VESA DSC Spreadsheet user guide - .max_target_bpp = 16, - .min_target_bpp = 8, -}; - +#include "dc.h" /* This module's internal functions */ +/* default DSC policy target bitrate limit is 16bpp */ +static uint32_t dsc_policy_max_target_bpp_limit = 16; + static uint32_t dc_dsc_bandwidth_in_kbps_from_timing( const struct dc_crtc_timing *timing) { @@ -237,8 +223,12 @@ static void get_dsc_enc_caps( // This is a static HW query, so we can use any DSC memset(dsc_enc_caps, 0, sizeof(struct dsc_enc_caps)); - if (dsc) - dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz); + if (dsc) { + if (!dsc->ctx->dc->debug.disable_dsc) + dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz); + if (dsc->ctx->dc->debug.native422_support) + dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1; + } } /* Returns 'false' if no intersection was found for at least one capablity. @@ -578,9 +568,11 @@ static bool setup_dsc_config( bool is_dsc_possible = false; int pic_height; int slice_height; + struct dc_dsc_policy policy; memset(dsc_cfg, 0, sizeof(struct dc_dsc_config)); + dc_dsc_get_policy_for_timing(timing, &policy); pic_width = timing->h_addressable + timing->h_border_left + timing->h_border_right; pic_height = timing->v_addressable + timing->v_border_top + timing->v_border_bottom; @@ -596,7 +588,12 @@ static bool setup_dsc_config( goto done; if (target_bandwidth_kbps > 0) { - is_dsc_possible = decide_dsc_target_bpp_x16(&dsc_policy, &dsc_common_caps, target_bandwidth_kbps, timing, &target_bpp); + is_dsc_possible = decide_dsc_target_bpp_x16( + &policy, + &dsc_common_caps, + target_bandwidth_kbps, + timing, + &target_bpp); dsc_cfg->bits_per_pixel = target_bpp; } if (!is_dsc_possible) @@ -698,20 +695,20 @@ static bool setup_dsc_config( if (!is_dsc_possible) goto done; - if (dsc_policy.use_min_slices_h) { + if (policy.use_min_slices_h) { if (min_slices_h > 0) num_slices_h = min_slices_h; else if (max_slices_h > 0) { // Fall back to max slices if min slices is not working out - if (dsc_policy.max_slices_h) - num_slices_h = min(dsc_policy.max_slices_h, max_slices_h); + if (policy.max_slices_h) + num_slices_h = min(policy.max_slices_h, max_slices_h); else num_slices_h = max_slices_h; } else is_dsc_possible = false; } else { if (max_slices_h > 0) { - if (dsc_policy.max_slices_h) - num_slices_h = min(dsc_policy.max_slices_h, max_slices_h); + if (policy.max_slices_h) + num_slices_h = min(policy.max_slices_h, max_slices_h); else num_slices_h = max_slices_h; } else if (min_slices_h > 0) // Fall back to min slices if max slices is not possible @@ -733,7 +730,7 @@ static bool setup_dsc_config( // Slice height (i.e. number of slices per column): start with policy and pick the first one that height is divisible by. // For 4:2:0 make sure the slice height is divisible by 2 as well. if (min_slice_height_override == 0) - slice_height = min(dsc_policy.min_sice_height, pic_height); + slice_height = min(policy.min_slice_height, pic_height); else slice_height = min(min_slice_height_override, pic_height); @@ -764,7 +761,7 @@ done: return is_dsc_possible; } -bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data, const uint8_t *dpcd_dsc_ext_data, struct dsc_dec_dpcd_caps *dsc_sink_caps) +bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, const uint8_t *dpcd_dsc_basic_data, const uint8_t *dpcd_dsc_ext_data, struct dsc_dec_dpcd_caps *dsc_sink_caps) { if (!dpcd_dsc_basic_data) return false; @@ -817,6 +814,23 @@ bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data, const uint8_t *dp if (!dsc_bpp_increment_div_from_dpcd(dpcd_dsc_basic_data[DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT], &dsc_sink_caps->bpp_increment_div)) return false; + if (dc->debug.dsc_bpp_increment_div) { + /* dsc_bpp_increment_div should onl be 1, 2, 4, 8 or 16, but rather than rejecting invalid values, + * we'll accept all and get it into range. This also makes the above check against 0 redundant, + * but that one stresses out the override will be only used if it's not 0. + */ + if (dc->debug.dsc_bpp_increment_div >= 1) + dsc_sink_caps->bpp_increment_div = 1; + if (dc->debug.dsc_bpp_increment_div >= 2) + dsc_sink_caps->bpp_increment_div = 2; + if (dc->debug.dsc_bpp_increment_div >= 4) + dsc_sink_caps->bpp_increment_div = 4; + if (dc->debug.dsc_bpp_increment_div >= 8) + dsc_sink_caps->bpp_increment_div = 8; + if (dc->debug.dsc_bpp_increment_div >= 16) + dsc_sink_caps->bpp_increment_div = 16; + } + /* Extended caps */ if (dpcd_dsc_ext_data == NULL) { // Extended DPCD DSC data can be null, e.g. because it doesn't apply to SST dsc_sink_caps->branch_overall_throughput_0_mps = 0; @@ -903,4 +917,67 @@ bool dc_dsc_compute_config( timing, dsc_min_slice_height_override, dsc_cfg); return is_dsc_possible; } -#endif /* CONFIG_DRM_AMD_DC_DSC_SUPPORT */ + +void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, struct dc_dsc_policy *policy) +{ + uint32_t bpc = 0; + + policy->min_target_bpp = 0; + policy->max_target_bpp = 0; + + /* DSC Policy: Use minimum number of slices that fits the pixel clock */ + policy->use_min_slices_h = true; + + /* DSC Policy: Use max available slices + * (in our case 4 for or 8, depending on the mode) + */ + policy->max_slices_h = 0; + + /* DSC Policy: Use slice height recommended + * by VESA DSC Spreadsheet user guide + */ + policy->min_slice_height = 108; + + /* DSC Policy: follow DP specs with an internal upper limit to 16 bpp + * for better interoperability + */ + switch (timing->display_color_depth) { + case COLOR_DEPTH_888: + bpc = 8; + break; + case COLOR_DEPTH_101010: + bpc = 10; + break; + case COLOR_DEPTH_121212: + bpc = 12; + break; + default: + return; + } + switch (timing->pixel_encoding) { + case PIXEL_ENCODING_RGB: + case PIXEL_ENCODING_YCBCR444: + case PIXEL_ENCODING_YCBCR422: /* assume no YCbCr422 native support */ + /* DP specs limits to 8 */ + policy->min_target_bpp = 8; + /* DP specs limits to 3 x bpc */ + policy->max_target_bpp = 3 * bpc; + break; + case PIXEL_ENCODING_YCBCR420: + /* DP specs limits to 6 */ + policy->min_target_bpp = 6; + /* DP specs limits to 1.5 x bpc assume bpc is an even number */ + policy->max_target_bpp = bpc * 3 / 2; + break; + default: + return; + } + /* internal upper limit, default 16 bpp */ + if (policy->max_target_bpp > dsc_policy_max_target_bpp_limit) + policy->max_target_bpp = dsc_policy_max_target_bpp_limit; +} + +void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit) +{ + dsc_policy_max_target_bpp_limit = limit; +} diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h b/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h index 020ad8f685ea..9f70e87b3ecb 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h @@ -1,4 +1,3 @@ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* * Copyright 2017 Advanced Micro Devices, Inc. @@ -51,4 +50,3 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_par #endif -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h b/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h index f66d006eac5d..e5fac9f4181d 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h @@ -1,4 +1,3 @@ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* * Copyright 2017 Advanced Micro Devices, Inc. @@ -703,4 +702,3 @@ const qp_table qp_table_422_8bpc_max = { { 16, { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 4} } }; -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c index 76c4b12d6824..03ae15946c6d 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c @@ -1,4 +1,3 @@ -#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT) /* * Copyright 2017 Advanced Micro Devices, Inc. @@ -252,4 +251,3 @@ void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_com rc->rc_buf_thresh[13] = 8064; } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h index f1d6e793bc61..b6b1f09c2009 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h @@ -1,4 +1,3 @@ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* * Copyright 2017 Advanced Micro Devices, Inc. @@ -82,4 +81,3 @@ void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_com #endif -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c index 73172fd0b529..1f6e63b71456 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c @@ -1,4 +1,3 @@ -#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT) /* * Copyright 2012-17 Advanced Micro Devices, Inc. * @@ -144,4 +143,3 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_par return ret; } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/Makefile b/drivers/gpu/drm/amd/display/dc/gpio/Makefile index b3062275711e..202baa210cc8 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/Makefile +++ b/drivers/gpu/drm/amd/display/dc/gpio/Makefile @@ -61,26 +61,25 @@ AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCE120) ############################################################################### # DCN 1x ############################################################################### -ifdef CONFIG_DRM_AMD_DC_DCN1_0 +ifdef CONFIG_DRM_AMD_DC_DCN GPIO_DCN10 = hw_translate_dcn10.o hw_factory_dcn10.o AMD_DAL_GPIO_DCN10 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn10/,$(GPIO_DCN10)) AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCN10) -endif ############################################################################### # DCN 2 ############################################################################### -ifdef CONFIG_DRM_AMD_DC_DCN2_0 GPIO_DCN20 = hw_translate_dcn20.o hw_factory_dcn20.o AMD_DAL_GPIO_DCN20 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn20/,$(GPIO_DCN20)) AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCN20) -endif -ifdef CONFIG_DRM_AMD_DC_DCN2_1 +############################################################################### +# DCN 21 +############################################################################### GPIO_DCN21 = hw_translate_dcn21.o hw_factory_dcn21.o AMD_DAL_GPIO_DCN21 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn21/,$(GPIO_DCN21)) diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c index 43a440385b43..83f798cb8b21 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c @@ -22,7 +22,6 @@ * Authors: AMD * */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #include "dm_services.h" #include "include/gpio_types.h" #include "../hw_factory.h" @@ -110,6 +109,12 @@ static const struct ddc_registers ddc_data_regs_dcn[] = { ddc_data_regs_dcn2(4), ddc_data_regs_dcn2(5), ddc_data_regs_dcn2(6), + { + DDC_GPIO_VGA_REG_LIST(DATA), + .ddc_setup = 0, + .phy_aux_cntl = 0, + .dc_gpio_aux_ctrl_5 = 0 + } }; static const struct ddc_registers ddc_clk_regs_dcn[] = { @@ -119,6 +124,12 @@ static const struct ddc_registers ddc_clk_regs_dcn[] = { ddc_clk_regs_dcn2(4), ddc_clk_regs_dcn2(5), ddc_clk_regs_dcn2(6), + { + DDC_GPIO_VGA_REG_LIST(CLK), + .ddc_setup = 0, + .phy_aux_cntl = 0, + .dc_gpio_aux_ctrl_5 = 0 + } }; static const struct ddc_sh_mask ddc_shift[] = { @@ -246,4 +257,3 @@ void dal_hw_factory_dcn20_init(struct hw_factory *factory) factory->funcs = &funcs; } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.h index 43a4ce7aa3bf..0fd9b315bd7a 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.h +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.h @@ -22,7 +22,6 @@ * Authors: AMD * */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #ifndef __DAL_HW_FACTORY_DCN20_H__ #define __DAL_HW_FACTORY_DCN20_H__ @@ -30,4 +29,3 @@ void dal_hw_factory_dcn20_init(struct hw_factory *factory); #endif /* __DAL_HW_FACTORY_DCN20_H__ */ -#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c index 915e896e0e91..52ba62b3b5e4 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c @@ -26,7 +26,6 @@ /* * Pre-requisites: headers required by header of this unit */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #include "hw_translate_dcn20.h" #include "dm_services.h" @@ -379,4 +378,3 @@ void dal_hw_translate_dcn20_init(struct hw_translate *tr) tr->funcs = &funcs; } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.h index 01f52c7bed86..5f7a35530e26 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.h +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.h @@ -22,7 +22,6 @@ * Authors: AMD * */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #ifndef __DAL_HW_TRANSLATE_DCN20_H__ #define __DAL_HW_TRANSLATE_DCN20_H__ @@ -32,4 +31,3 @@ struct hw_translate; void dal_hw_translate_dcn20_init(struct hw_translate *tr); #endif /* __DAL_HW_TRANSLATE_DCN20_H__ */ -#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c index 8572678f8d4f..907c5911eb9e 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c @@ -22,7 +22,6 @@ * Authors: AMD * */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #include "dm_services.h" #include "include/gpio_types.h" #include "../hw_factory.h" @@ -239,4 +238,3 @@ void dal_hw_factory_dcn21_init(struct hw_factory *factory) factory->funcs = &funcs; } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.h index 2443f9e7afbf..4949e0c7fa06 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.h +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.h @@ -22,7 +22,6 @@ * Authors: AMD * */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) #ifndef __DAL_HW_FACTORY_DCN21_H__ #define __DAL_HW_FACTORY_DCN21_H__ @@ -30,4 +29,3 @@ void dal_hw_factory_dcn21_init(struct hw_factory *factory); #endif /* __DAL_HW_FACTORY_DCN20_H__ */ -#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c index fbb58fb8c318..291966efe63d 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c @@ -26,7 +26,6 @@ /* * Pre-requisites: headers required by header of this unit */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #include "hw_translate_dcn21.h" #include "dm_services.h" @@ -382,4 +381,3 @@ void dal_hw_translate_dcn21_init(struct hw_translate *tr) tr->funcs = &funcs; } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.h index 2bfaac24c574..9462b0a65200 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.h +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.h @@ -22,7 +22,6 @@ * Authors: AMD * */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) #ifndef __DAL_HW_TRANSLATE_DCN21_H__ #define __DAL_HW_TRANSLATE_DCN21_H__ @@ -32,4 +31,3 @@ struct hw_translate; void dal_hw_translate_dcn21_init(struct hw_translate *tr); #endif /* __DAL_HW_TRANSLATE_DCN21_H__ */ -#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h index f91e85b04956..308a543178a5 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h +++ b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h @@ -48,13 +48,11 @@ DDC_GPIO_REG_LIST(cd,id),\ .ddc_setup = REG(DC_I2C_DDC ## id ## _SETUP) -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define DDC_REG_LIST_DCN2(cd, id) \ DDC_GPIO_REG_LIST(cd, id),\ .ddc_setup = REG(DC_I2C_DDC ## id ## _SETUP),\ .phy_aux_cntl = REG(PHY_AUX_CNTL), \ .dc_gpio_aux_ctrl_5 = REG(DC_GPIO_AUX_CTRL_5) -#endif #define DDC_GPIO_VGA_REG_LIST_ENTRY(type,cd)\ .type ## _reg = REG(DC_GPIO_DDCVGA_ ## type),\ @@ -90,13 +88,11 @@ DDC_GPIO_I2C_REG_LIST(cd),\ .ddc_setup = 0 -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define DDC_I2C_REG_LIST_DCN2(cd) \ DDC_GPIO_I2C_REG_LIST(cd),\ .ddc_setup = 0,\ .phy_aux_cntl = REG(PHY_AUX_CNTL), \ .dc_gpio_aux_ctrl_5 = REG(DC_GPIO_AUX_CTRL_5) -#endif #define DDC_MASK_SH_LIST_COMMON(mask_sh) \ SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE, mask_sh),\ SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_EDID_DETECT_ENABLE, mask_sh),\ @@ -110,22 +106,18 @@ SF_DDC(DC_GPIO_I2CPAD_MASK, DC_GPIO_SDA_PD_DIS, mask_sh),\ SF_DDC(DC_GPIO_I2CPAD_MASK, DC_GPIO_SCL_PD_DIS, mask_sh) -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define DDC_MASK_SH_LIST_DCN2(mask_sh, cd) \ {DDC_MASK_SH_LIST_COMMON(mask_sh),\ 0,\ 0,\ (PHY_AUX_CNTL__AUX## cd ##_PAD_RXSEL## mask_sh),\ (DC_GPIO_AUX_CTRL_5__DDC_PAD## cd ##_I2CMODE## mask_sh)} -#endif struct ddc_registers { struct gpio_registers gpio; uint32_t ddc_setup; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) uint32_t phy_aux_cntl; uint32_t dc_gpio_aux_ctrl_5; -#endif }; struct ddc_sh_mask { @@ -140,11 +132,9 @@ struct ddc_sh_mask { /* i2cpad_mask */ uint32_t DC_GPIO_SDA_PD_DIS; uint32_t DC_GPIO_SCL_PD_DIS; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) //phy_aux_cntl uint32_t AUX_PAD_RXSEL; uint32_t DDC_PAD_I2CMODE; -#endif }; @@ -180,7 +170,6 @@ struct ddc_sh_mask { {\ DDC_I2C_REG_LIST(SCL)\ } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define ddc_data_regs_dcn2(id) \ {\ DDC_REG_LIST_DCN2(DATA, id)\ @@ -200,7 +189,6 @@ struct ddc_sh_mask { {\ DDC_REG_LIST_DCN2(SCL)\ } -#endif #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_DDC_REGS_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c index 1c12961f6472..1ae153eab31d 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c @@ -48,18 +48,18 @@ struct gpio; -static void destruct( +static void dal_hw_ddc_destruct( struct hw_ddc *pin) { dal_hw_gpio_destruct(&pin->base); } -static void destroy( +static void dal_hw_ddc_destroy( struct hw_gpio_pin **ptr) { struct hw_ddc *pin = HW_DDC_FROM_BASE(*ptr); - destruct(pin); + dal_hw_ddc_destruct(pin); kfree(pin); @@ -150,7 +150,6 @@ static enum gpio_result set_config( AUX_PAD1_MODE, 0); } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) if (ddc->regs->dc_gpio_aux_ctrl_5 != 0) { REG_UPDATE(dc_gpio_aux_ctrl_5, DDC_PAD_I2CMODE, 1); } @@ -158,7 +157,6 @@ static enum gpio_result set_config( if (ddc->regs->phy_aux_cntl != 0) { REG_UPDATE(phy_aux_cntl, AUX_PAD_RXSEL, 1); } -#endif return GPIO_RESULT_OK; case GPIO_DDC_CONFIG_TYPE_MODE_AUX: /* set the AUX pad mode */ @@ -166,12 +164,10 @@ static enum gpio_result set_config( REG_SET(gpio.MASK_reg, regval, AUX_PAD1_MODE, 1); } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) if (ddc->regs->dc_gpio_aux_ctrl_5 != 0) { REG_UPDATE(dc_gpio_aux_ctrl_5, DDC_PAD_I2CMODE, 0); } -#endif return GPIO_RESULT_OK; case GPIO_DDC_CONFIG_TYPE_POLL_FOR_CONNECT: @@ -211,7 +207,7 @@ static enum gpio_result set_config( } static const struct hw_gpio_pin_funcs funcs = { - .destroy = destroy, + .destroy = dal_hw_ddc_destroy, .open = dal_hw_gpio_open, .get_value = dal_hw_gpio_get_value, .set_value = dal_hw_gpio_set_value, @@ -220,7 +216,7 @@ static const struct hw_gpio_pin_funcs funcs = { .close = dal_hw_gpio_close, }; -static void construct( +static void dal_hw_ddc_construct( struct hw_ddc *ddc, enum gpio_id id, uint32_t en, @@ -247,7 +243,7 @@ void dal_hw_ddc_init( return; } - construct(*hw_ddc, id, en, ctx); + dal_hw_ddc_construct(*hw_ddc, id, en, ctx); } struct hw_gpio_pin *dal_hw_ddc_get_pin(struct gpio *gpio) diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c index fa9f1d055ec8..d2d36d48caaa 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c @@ -45,15 +45,11 @@ #include "dce80/hw_factory_dce80.h" #include "dce110/hw_factory_dce110.h" #include "dce120/hw_factory_dce120.h" -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) #include "dcn10/hw_factory_dcn10.h" #endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #include "dcn20/hw_factory_dcn20.h" -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) #include "dcn21/hw_factory_dcn21.h" -#endif #include "diagnostics/hw_factory_diag.h" @@ -90,19 +86,15 @@ bool dal_hw_factory_init( case DCE_VERSION_12_1: dal_hw_factory_dce120_init(factory); return true; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) case DCN_VERSION_1_0: case DCN_VERSION_1_01: dal_hw_factory_dcn10_init(factory); return true; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) case DCN_VERSION_2_0: dal_hw_factory_dcn20_init(factory); return true; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) case DCN_VERSION_2_1: dal_hw_factory_dcn21_init(factory); return true; diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c index 69b899741f6d..f9e847e6555d 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c @@ -46,22 +46,13 @@ struct gpio; -static void dal_hw_generic_construct( - struct hw_generic *pin, - enum gpio_id id, - uint32_t en, - struct dc_context *ctx) -{ - dal_hw_gpio_construct(&pin->base, id, en, ctx); -} - static void dal_hw_generic_destruct( struct hw_generic *pin) { dal_hw_gpio_destruct(&pin->base); } -static void destroy( +static void dal_hw_generic_destroy( struct hw_gpio_pin **ptr) { struct hw_generic *generic = HW_GENERIC_FROM_BASE(*ptr); @@ -90,7 +81,7 @@ static enum gpio_result set_config( } static const struct hw_gpio_pin_funcs funcs = { - .destroy = destroy, + .destroy = dal_hw_generic_destroy, .open = dal_hw_gpio_open, .get_value = dal_hw_gpio_get_value, .set_value = dal_hw_gpio_set_value, @@ -99,14 +90,14 @@ static const struct hw_gpio_pin_funcs funcs = { .close = dal_hw_gpio_close, }; -static void construct( - struct hw_generic *generic, +static void dal_hw_generic_construct( + struct hw_generic *pin, enum gpio_id id, uint32_t en, struct dc_context *ctx) { - dal_hw_generic_construct(generic, id, en, ctx); - generic->base.base.funcs = &funcs; + dal_hw_gpio_construct(&pin->base, id, en, ctx); + pin->base.base.funcs = &funcs; } void dal_hw_generic_init( @@ -126,7 +117,7 @@ void dal_hw_generic_init( return; } - construct(*hw_generic, id, en, ctx); + dal_hw_generic_construct(*hw_generic, id, en, ctx); } diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c index 00c9bcf660a3..692f29de7797 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c @@ -46,34 +46,18 @@ struct gpio; -static void dal_hw_hpd_construct( - struct hw_hpd *pin, - enum gpio_id id, - uint32_t en, - struct dc_context *ctx) -{ - dal_hw_gpio_construct(&pin->base, id, en, ctx); -} - static void dal_hw_hpd_destruct( struct hw_hpd *pin) { dal_hw_gpio_destruct(&pin->base); } - -static void destruct( - struct hw_hpd *hpd) -{ - dal_hw_hpd_destruct(hpd); -} - -static void destroy( +static void dal_hw_hpd_destroy( struct hw_gpio_pin **ptr) { struct hw_hpd *hpd = HW_HPD_FROM_BASE(*ptr); - destruct(hpd); + dal_hw_hpd_destruct(hpd); kfree(hpd); @@ -120,7 +104,7 @@ static enum gpio_result set_config( } static const struct hw_gpio_pin_funcs funcs = { - .destroy = destroy, + .destroy = dal_hw_hpd_destroy, .open = dal_hw_gpio_open, .get_value = get_value, .set_value = dal_hw_gpio_set_value, @@ -129,14 +113,14 @@ static const struct hw_gpio_pin_funcs funcs = { .close = dal_hw_gpio_close, }; -static void construct( - struct hw_hpd *hpd, +static void dal_hw_hpd_construct( + struct hw_hpd *pin, enum gpio_id id, uint32_t en, struct dc_context *ctx) { - dal_hw_hpd_construct(hpd, id, en, ctx); - hpd->base.base.funcs = &funcs; + dal_hw_gpio_construct(&pin->base, id, en, ctx); + pin->base.base.funcs = &funcs; } void dal_hw_hpd_init( @@ -156,7 +140,7 @@ void dal_hw_hpd_init( return; } - construct(*hw_hpd, id, en, ctx); + dal_hw_hpd_construct(*hw_hpd, id, en, ctx); } struct hw_gpio_pin *dal_hw_hpd_get_pin(struct gpio *gpio) diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c index f2046f55d6a8..5d396657a1ee 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c @@ -43,15 +43,11 @@ #include "dce80/hw_translate_dce80.h" #include "dce110/hw_translate_dce110.h" #include "dce120/hw_translate_dce120.h" -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) #include "dcn10/hw_translate_dcn10.h" #endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #include "dcn20/hw_translate_dcn20.h" -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) #include "dcn21/hw_translate_dcn21.h" -#endif #include "diagnostics/hw_translate_diag.h" @@ -85,19 +81,15 @@ bool dal_hw_translate_init( case DCE_VERSION_12_1: dal_hw_translate_dce120_init(translate); return true; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) case DCN_VERSION_1_0: case DCN_VERSION_1_01: dal_hw_translate_dcn10_init(translate); return true; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) case DCN_VERSION_2_0: dal_hw_translate_dcn20_init(translate); return true; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) case DCN_VERSION_2_1: dal_hw_translate_dcn21_init(translate); return true; diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h index fd39e2abe2ed..4ead89dd7c41 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h @@ -43,10 +43,8 @@ enum dc_status { DC_FAIL_BANDWIDTH_VALIDATE = 13, /* BW and Watermark validation */ DC_FAIL_SCALING = 14, DC_FAIL_DP_LINK_TRAINING = 15, -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 DC_FAIL_DSC_VALIDATE = 16, DC_NO_DSC_RESOURCE = 17, -#endif DC_FAIL_UNSUPPORTED_1 = 18, DC_FAIL_CLK_EXCEED_MAX = 21, DC_FAIL_CLK_BELOW_MIN = 22, /*THIS IS MIN PER IP*/ diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index a831079607cd..f285b76888fb 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -33,13 +33,11 @@ #include "dc_bios_types.h" #include "mem_input.h" #include "hubp.h" -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) #include "mpc.h" #endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #include "dwb.h" #include "mcif_wb.h" -#endif #define MAX_CLOCK_SOURCES 7 @@ -89,9 +87,7 @@ void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable); struct resource_pool; struct dc_state; struct resource_context; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) struct clk_bw_params; -#endif struct resource_funcs { void (*destroy)(struct resource_pool **pool); @@ -105,7 +101,7 @@ struct resource_funcs { int (*populate_dml_pipes)( struct dc *dc, - struct resource_context *res_ctx, + struct dc_state *context, display_e2e_pipe_params_st *pipes); enum dc_status (*validate_global)( @@ -135,7 +131,6 @@ struct resource_funcs { struct resource_context *res_ctx, const struct resource_pool *pool, struct dc_stream_state *stream); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void (*populate_dml_writeback_from_context)( struct dc *dc, struct resource_context *res_ctx, @@ -146,12 +141,9 @@ struct resource_funcs { struct dc_state *context, display_e2e_pipe_params_st *pipes, int pipe_cnt); -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) void (*update_bw_bounding_box)( struct dc *dc, struct clk_bw_params *bw_params); -#endif }; @@ -180,7 +172,6 @@ struct resource_pool { struct dce_i2c_sw *sw_i2cs[MAX_PIPES]; bool i2c_hw_buffer_in_use; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dwbc *dwbc[MAX_DWB_PIPES]; struct mcif_wb *mcif_wb[MAX_DWB_PIPES]; struct { @@ -188,11 +179,8 @@ struct resource_pool { unsigned int gsl_1:1; unsigned int gsl_2:1; } gsl_groups; -#endif -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct display_stream_compressor *dscs[MAX_PIPES]; -#endif unsigned int pipe_count; unsigned int underlay_pipe_index; @@ -206,9 +194,7 @@ struct resource_pool { unsigned int timing_generator_count; unsigned int mpcc_count; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) unsigned int writeback_pipe_count; -#endif /* * reserved clock source for DP */ @@ -226,9 +212,12 @@ struct resource_pool { struct abm *abm; struct dmcu *dmcu; + struct dmub_psr *psr; const struct resource_funcs *funcs; const struct resource_caps *res_cap; + + struct ddc_service *oem_device; }; struct dcn_fe_bandwidth { @@ -238,9 +227,7 @@ struct dcn_fe_bandwidth { struct stream_resource { struct output_pixel_processor *opp; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct display_stream_compressor *dsc; -#endif struct timing_generator *tg; struct stream_encoder *stream_enc; struct audio *audio; @@ -249,12 +236,10 @@ struct stream_resource { struct encoder_info_frame encoder_info_frame; struct abm *abm; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* There are only (num_pipes+1)/2 groups. 0 means unassigned, * otherwise it's using group number 'gsl_group-1' */ uint8_t gsl_group; -#endif }; struct plane_resource { @@ -306,17 +291,15 @@ struct pipe_ctx { struct pipe_ctx *next_odm_pipe; struct pipe_ctx *prev_odm_pipe; -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 +#ifdef CONFIG_DRM_AMD_DC_DCN struct _vcs_dpi_display_dlg_regs_st dlg_regs; struct _vcs_dpi_display_ttu_regs_st ttu_regs; struct _vcs_dpi_display_rq_regs_st rq_regs; struct _vcs_dpi_display_pipe_dest_params_st pipe_dlg_param; #endif union pipe_update_flags update_flags; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 struct dwbc *dwbc; struct mcif_wb *mcif_wb; -#endif }; struct resource_context { @@ -325,9 +308,7 @@ struct resource_context { bool is_audio_acquired[MAX_PIPES]; uint8_t clock_source_ref_count[MAX_CLOCK_SOURCES]; uint8_t dp_clock_source_ref_count; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 bool is_dsc_acquired[MAX_PIPES]; -#endif }; struct dce_bw_output { @@ -347,18 +328,14 @@ struct dce_bw_output { int blackout_recovery_time_us; }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dcn_bw_writeback { struct mcif_arb_params mcif_wb_arb[MAX_DWB_PIPES]; }; -#endif struct dcn_bw_output { struct dc_clocks clk; struct dcn_watermark_set watermarks; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dcn_bw_writeback bw_writeback; -#endif }; union bw_output { @@ -392,7 +369,7 @@ struct dc_state { /* Note: these are big structures, do *not* put on stack! */ struct dm_pp_display_configuration pp_display_cfg; -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 +#ifdef CONFIG_DRM_AMD_DC_DCN struct dcn_bw_internal_vars dcn_bw_vars; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h index 14716ba35662..de2d160114db 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h @@ -105,7 +105,7 @@ int dc_link_aux_transfer_raw(struct ddc_service *ddc, bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc, struct aux_payload *payload); -enum dc_status dc_link_aux_configure_timeout(struct ddc_service *ddc, +uint32_t dc_link_aux_configure_timeout(struct ddc_service *ddc, uint32_t timeout); void dal_ddc_service_write_scdc_data( diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h index 045138dbdccb..8b1f0ce6c2a7 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h @@ -28,8 +28,8 @@ #define LINK_TRAINING_ATTEMPTS 4 #define LINK_TRAINING_RETRY_DELAY 50 /* ms */ -#define LINK_AUX_DEFAULT_EXTENDED_TIMEOUT_PERIOD 32000 /*us*/ -#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 400 /*us*/ +#define LINK_AUX_DEFAULT_EXTENDED_TIMEOUT_PERIOD 3200 /*us*/ +#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 552 /*us*/ struct dc_link; struct dc_stream_state; @@ -57,10 +57,11 @@ void decide_link_settings( struct dc_link_settings *link_setting); bool perform_link_training_with_retries( - struct dc_link *link, const struct dc_link_settings *link_setting, bool skip_video_pattern, - int attempts); + int attempts, + struct pipe_ctx *pipe_ctx, + enum signal_type signal); bool is_mst_supported(struct dc_link *link); @@ -75,13 +76,13 @@ void dp_enable_mst_on_sink(struct dc_link *link, bool enable); enum dp_panel_mode dp_get_panel_mode(struct dc_link *link); void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT +bool dp_overwrite_extended_receiver_cap(struct dc_link *link); + void dp_set_fec_ready(struct dc_link *link, bool ready); void dp_set_fec_enable(struct dc_link *link, bool enable); bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable); bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable); void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable); bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx); -#endif #endif /* __DC_LINK_DP_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calc_math.h index 45a07eeffbb6..45a07eeffbb6 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calc_math.h diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index 026e6a2a2c44..ac530c057ddd 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -31,7 +31,6 @@ #define DCN_MINIMUM_DISPCLK_Khz 100000 #define DCN_MINIMUM_DPPCLK_Khz 100000 -#ifdef CONFIG_DRM_AMD_DC_DCN2_1 /* Constants */ #define DDR4_DRAM_WIDTH 64 #define WM_A 0 @@ -39,12 +38,10 @@ #define WM_C 2 #define WM_D 3 #define WM_SET_COUNT 4 -#endif #define DCN_MINIMUM_DISPCLK_Khz 100000 #define DCN_MINIMUM_DPPCLK_Khz 100000 -#ifdef CONFIG_DRM_AMD_DC_DCN2_1 /* Will these bw structures be ASIC specific? */ #define MAX_NUM_DPM_LVL 8 @@ -154,7 +151,6 @@ struct clk_bw_params { struct clk_limit_table clk_table; struct wm_table wm_table; }; -#endif /* Public interfaces */ struct clk_states { @@ -195,9 +191,8 @@ struct clk_mgr { bool psr_allow_active_cache; int dprefclk_khz; // Used by program pixel clock in clock source funcs, need to figureout where this goes int dentist_vco_freq_khz; -#ifdef CONFIG_DRM_AMD_DC_DCN2_1 + struct clk_state_registers_and_bypass boot_snapshot; struct clk_bw_params *bw_params; -#endif }; /* forward declarations */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h index a17a77192690..862952c0286a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h @@ -96,12 +96,10 @@ enum dentist_divider_range { .MP1_SMN_C2PMSG_83 = mmMP1_SMN_C2PMSG_83, \ .MP1_SMN_C2PMSG_67 = mmMP1_SMN_C2PMSG_67 -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 #define CLK_REG_LIST_NV10() \ SR(DENTIST_DISPCLK_CNTL), \ CLK_SRI(CLK3_CLK_PLL_REQ, CLK3, 0), \ CLK_SRI(CLK3_CLK2_DFS_CNTL, CLK3, 0) -#endif #define CLK_SF(reg_name, field_name, post_fix)\ .field_name = reg_name ## __ ## field_name ## post_fix @@ -120,7 +118,6 @@ enum dentist_divider_range { CLK_SF(MP1_SMN_C2PMSG_83, CONTENT, mask_sh),\ CLK_SF(MP1_SMN_C2PMSG_91, CONTENT, mask_sh), -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 #define CLK_COMMON_MASK_SH_LIST_DCN20_BASE(mask_sh) \ CLK_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh),\ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_WDIVIDER, mask_sh),\ @@ -130,7 +127,6 @@ enum dentist_divider_range { CLK_COMMON_MASK_SH_LIST_DCN20_BASE(mask_sh),\ CLK_SF(CLK3_0_CLK3_CLK_PLL_REQ, FbMult_int, mask_sh),\ CLK_SF(CLK3_0_CLK3_CLK_PLL_REQ, FbMult_frac, mask_sh) -#endif #define CLK_REG_FIELD_LIST(type) \ type DPREFCLK_SRC_SEL; \ @@ -143,30 +139,24 @@ enum dentist_divider_range { ****************** Clock Manager Private Structures *********************************** *************************************************************************************** */ -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 #define CLK20_REG_FIELD_LIST(type) \ type DENTIST_DPPCLK_WDIVIDER; \ type DENTIST_DPPCLK_CHG_DONE; \ type FbMult_int; \ type FbMult_frac; -#endif #define VBIOS_SMU_REG_FIELD_LIST(type) \ type CONTENT; struct clk_mgr_shift { CLK_REG_FIELD_LIST(uint8_t) -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 CLK20_REG_FIELD_LIST(uint8_t) -#endif VBIOS_SMU_REG_FIELD_LIST(uint32_t) }; struct clk_mgr_mask { CLK_REG_FIELD_LIST(uint32_t) -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 CLK20_REG_FIELD_LIST(uint32_t) -#endif VBIOS_SMU_REG_FIELD_LIST(uint32_t) }; @@ -174,10 +164,8 @@ struct clk_mgr_registers { uint32_t DPREFCLK_CNTL; uint32_t DENTIST_DISPCLK_CNTL; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 uint32_t CLK3_CLK2_DFS_CNTL; uint32_t CLK3_CLK_PLL_REQ; -#endif uint32_t MP1_SMN_C2PMSG_67; uint32_t MP1_SMN_C2PMSG_83; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index c81a17aeaa25..c0dc1d0f5cae 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -52,7 +52,6 @@ struct dcn_hubbub_wm { struct dcn_hubbub_wm_set sets[4]; }; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 enum dcn_hubbub_page_table_depth { DCN_PAGE_TABLE_DEPTH_1_LEVEL, DCN_PAGE_TABLE_DEPTH_2_LEVEL, @@ -101,13 +100,11 @@ struct hubbub_addr_config { } default_addrs; }; -#endif struct hubbub_funcs { void (*update_dchub)( struct hubbub *hubbub, struct dchub_init_data *dh_data); -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 int (*init_dchub_sys_ctx)( struct hubbub *hubbub, struct dcn_hubbub_phys_addr_config *pa_config); @@ -116,7 +113,6 @@ struct hubbub_funcs { struct dcn_hubbub_virt_addr_config *va_config, int vmid); -#endif bool (*get_dcc_compression_cap)(struct hubbub *hubbub, const struct dc_dcc_surface_param *input, struct dc_surface_dcc_cap *output); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h index c68f0ce346c7..5315f1f86b21 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h @@ -52,6 +52,8 @@ struct dmcu { enum dmcu_state dmcu_state; struct dmcu_version dmcu_version; unsigned int cached_wait_loop_number; + uint32_t psp_version; + bool auto_load_dmcu; }; struct dmcu_funcs { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h index 474c7194a9f8..45ef390ae052 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h @@ -36,14 +36,10 @@ struct dpp { struct dpp_caps *caps; struct pwl_params regamma_params; struct pwl_params degamma_params; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dpp_cursor_attributes cur_attr; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct pwl_params shaper_params; bool cm_bypass_mode; -#endif }; struct dpp_input_csc_matrix { @@ -51,12 +47,31 @@ struct dpp_input_csc_matrix { uint16_t regval[12]; }; +static const struct dpp_input_csc_matrix dpp_input_csc_matrix[] = { + {COLOR_SPACE_SRGB, + {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} }, + {COLOR_SPACE_SRGB_LIMITED, + {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} }, + {COLOR_SPACE_YCBCR601, + {0x2cdd, 0x2000, 0, 0xe991, 0xe926, 0x2000, 0xf4fd, 0x10ef, + 0, 0x2000, 0x38b4, 0xe3a6} }, + {COLOR_SPACE_YCBCR601_LIMITED, + {0x3353, 0x2568, 0, 0xe400, 0xe5dc, 0x2568, 0xf367, 0x1108, + 0, 0x2568, 0x40de, 0xdd3a} }, + {COLOR_SPACE_YCBCR709, + {0x3265, 0x2000, 0, 0xe6ce, 0xf105, 0x2000, 0xfa01, 0xa7d, 0, + 0x2000, 0x3b61, 0xe24f} }, + + {COLOR_SPACE_YCBCR709_LIMITED, + {0x39a6, 0x2568, 0, 0xe0d6, 0xeedd, 0x2568, 0xf925, 0x9a8, 0, + 0x2568, 0x43ee, 0xdbb2} } +}; + struct dpp_grph_csc_adjustment { struct fixed31_32 temperature_matrix[CSC_TEMPERATURE_MATRIX_SIZE]; enum graphics_gamut_adjust_type gamut_adjust_type; }; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 struct cnv_color_keyer_params { int color_keyer_en; int color_keyer_mode; @@ -82,7 +97,6 @@ struct cnv_alpha_2bit_lut { int lut2; int lut3; }; -#endif struct dcn_dpp_state { uint32_t is_enabled; @@ -190,12 +204,8 @@ struct dpp_funcs { enum surface_pixel_format format, enum expansion_mode mode, struct dc_csc_transform input_csc_color_matrix, -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 enum dc_color_space input_color_space, struct cnv_alpha_2bit_lut *alpha_2bit_lut); -#else - enum dc_color_space input_color_space); -#endif void (*dpp_full_bypass)(struct dpp *dpp_base); @@ -224,7 +234,6 @@ struct dpp_funcs { bool dppclk_div, bool enable); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) bool (*dpp_program_blnd_lut)( struct dpp *dpp, const struct pwl_params *params); @@ -237,7 +246,6 @@ struct dpp_funcs { void (*dpp_cnv_set_alpha_keyer)( struct dpp *dpp_base, struct cnv_color_keyer_params *color_keyer); -#endif }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h index c6ff3d78b435..c59740084ebc 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h @@ -22,7 +22,6 @@ * Authors: AMD * */ -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #ifndef __DAL_DSC_H__ #define __DAL_DSC_H__ @@ -98,4 +97,3 @@ struct dsc_funcs { }; #endif -#endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h index ff1a07b35c85..459f95f52486 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h @@ -51,20 +51,15 @@ enum dwb_source { dwb_src_otg3, /* for DCN1.x/DCN2.x */ }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* DCN1.x, DCN2.x support 2 pipes */ -#else -/* DCN1.x supports 2 pipes */ -#endif enum dwb_pipe { dwb_pipe0 = 0, -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) dwb_pipe1, #endif dwb_pipe_max_num, }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) enum dwb_frame_capture_enable { DWB_FRAME_CAPTURE_DISABLE = 0, DWB_FRAME_CAPTURE_ENABLE = 1, @@ -77,9 +72,7 @@ enum wbscl_coef_filter_type_sel { WBSCL_COEF_CHROMA_HORZ_FILTER = 3 }; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dwb_warmup_params { bool warmup_en; /* false: normal mode, true: enable pattern generator */ bool warmup_mode; /* false: 420, true: 444 */ @@ -88,7 +81,6 @@ struct dwb_warmup_params { int warmup_width; /* Pattern width (pixels) */ int warmup_height; /* Pattern height (lines) */ }; -#endif struct dwb_caps { enum dce_version hw_version; /* DCN engine version. */ @@ -121,7 +113,8 @@ struct dwbc { int wb_src_plane_inst;/*hubp, mpcc, inst*/ bool update_privacymask; uint32_t mask_id; - + int otg_inst; + bool mvc_cfg; }; struct dwbc_funcs { @@ -150,13 +143,11 @@ struct dwbc_funcs { struct dwbc *dwbc, bool is_new_content); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void (*set_warmup)( struct dwbc *dwbc, struct dwb_warmup_params *warmup_params); -#endif bool (*get_dwb_status)( struct dwbc *dwbc); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h index 809b62b51a43..2cb8466e657b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h @@ -38,9 +38,7 @@ enum cursor_pitch { }; enum cursor_lines_per_chunk { -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) CURSOR_LINE_PER_CHUNK_1 = 0, /* new for DCN2 */ -#endif CURSOR_LINE_PER_CHUNK_2 = 1, CURSOR_LINE_PER_CHUNK_4, CURSOR_LINE_PER_CHUNK_8, @@ -65,6 +63,26 @@ struct hubp { bool power_gated; }; +struct surface_flip_registers { + uint32_t DCSURF_SURFACE_CONTROL; + uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH; + uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS; + uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH; + uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS; + uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C; + uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_C; + uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C; + uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C; + uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH; + uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS; + uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH; + uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS; + bool tmz_surface; + bool immediate; + uint8_t vmid; + bool grph_stereo; +}; + struct hubp_funcs { void (*hubp_setup)( struct hubp *hubp, @@ -86,6 +104,9 @@ struct hubp_funcs { const struct rect *viewport, const struct rect *viewport_c); + void (*apply_PLAT_54186_wa)(struct hubp *hubp, + const struct dc_plane_address *address); + bool (*hubp_program_surface_flip_and_addr)( struct hubp *hubp, const struct dc_plane_address *address, @@ -139,7 +160,6 @@ struct hubp_funcs { unsigned int (*hubp_get_underflow_status)(struct hubp *hubp); void (*hubp_init)(struct hubp *hubp); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void (*dmdata_set_attributes)( struct hubp *hubp, const struct dc_dmdata_attributes *attr); @@ -159,7 +179,13 @@ struct hubp_funcs { void (*hubp_set_flip_control_surface_gsl)( struct hubp *hubp, bool enable); -#endif + + void (*validate_dml_output)( + struct hubp *hubp, + struct dc_context *ctx, + struct _vcs_dpi_display_rq_regs_st *dml_rq_regs, + struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr, + struct _vcs_dpi_display_ttu_regs_st *dml_ttu_attr); }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index f82365e2d03c..75d419081e76 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h @@ -36,9 +36,7 @@ #define MAX_AUDIOS 7 #define MAX_PIPES 6 -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define MAX_DWB_PIPES 1 -#endif struct gamma_curve { uint32_t offset; @@ -81,7 +79,6 @@ struct pwl_result_data { uint32_t delta_blue_reg; }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dc_rgb { uint32_t red; uint32_t green; @@ -110,7 +107,6 @@ struct tetrahedral_params { bool use_12bits; }; -#endif /* arr_curve_points - regamma regions/segments specification * arr_points - beginning and end point specified separately (only one on DCE) @@ -195,13 +191,11 @@ enum opp_regamma { OPP_REGAMMA_USER }; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT enum optc_dsc_mode { OPTC_DSC_DISABLED = 0, OPTC_DSC_ENABLED_444 = 1, /* 'RGB 444' or 'Simple YCbCr 4:2:2' (4:2:2 upsampled to 4:4:4) */ OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED = 2 /* Native 4:2:2 or 4:2:0 */ }; -#endif struct dc_bias_and_scale { uint16_t scale_red; @@ -224,12 +218,8 @@ enum test_pattern_mode { TEST_PATTERN_MODE_VERTICALBARS, TEST_PATTERN_MODE_HORIZONTALBARS, TEST_PATTERN_MODE_SINGLERAMP_RGB, -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) TEST_PATTERN_MODE_DUALRAMP_RGB, TEST_PATTERN_MODE_XR_BIAS_RGB -#else - TEST_PATTERN_MODE_DUALRAMP_RGB -#endif }; enum test_pattern_color_format { @@ -255,6 +245,13 @@ enum controller_dp_test_pattern { CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR }; +enum controller_dp_color_space { + CONTROLLER_DP_COLOR_SPACE_RGB, + CONTROLLER_DP_COLOR_SPACE_YCBCR601, + CONTROLLER_DP_COLOR_SPACE_YCBCR709, + CONTROLLER_DP_COLOR_SPACE_UDEFINED +}; + enum dc_lut_mode { LUT_BYPASS, LUT_RAM_A, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h index b21909216fb6..fb748f082c56 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h @@ -113,26 +113,21 @@ struct link_encoder { struct encoder_feature_support features; enum transmitter transmitter; enum hpd_source_id hpd_source; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 bool usbc_combo_phy; -#endif }; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct link_enc_state { uint32_t dphy_fec_en; uint32_t dphy_fec_ready_shadow; uint32_t dphy_fec_active_status; + uint32_t dp_link_training_complete; }; -#endif struct link_encoder_funcs { -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT void (*read_state)( struct link_encoder *enc, struct link_enc_state *s); -#endif bool (*validate_output_with_stream)( struct link_encoder *enc, const struct dc_stream_state *stream); void (*hw_init)(struct link_encoder *enc); @@ -174,7 +169,6 @@ struct link_encoder_funcs { unsigned int (*get_dig_frontend)(struct link_encoder *enc); void (*destroy)(struct link_encoder **enc); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void (*fec_set_enable)(struct link_encoder *enc, bool enable); @@ -182,7 +176,6 @@ struct link_encoder_funcs { bool ready); bool (*fec_is_active)(struct link_encoder *enc); -#endif bool (*is_in_alt_mode) (struct link_encoder *enc); void (*get_max_link_cap)(struct link_encoder *enc, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h index 67b610d6d91f..2e2310f1901a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h @@ -40,11 +40,9 @@ struct cstate_pstate_watermarks_st { struct dcn_watermarks { uint32_t pte_meta_urgent_ns; uint32_t urgent_ns; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) uint32_t frac_urg_bw_nom; uint32_t frac_urg_bw_flip; int32_t urgent_latency_ns; -#endif struct cstate_pstate_watermarks_st cstate_pstate; }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h index 58826be81395..094afc4c8173 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h @@ -31,9 +31,7 @@ #define MAX_MPCC 6 #define MAX_OPP 6 -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define MAX_DWB 1 -#endif enum mpc_output_csc_mode { MPC_OUTPUT_CSC_DISABLE = 0, @@ -66,14 +64,12 @@ struct mpcc_blnd_cfg { int global_alpha; bool overlap_only; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* MPCC top/bottom gain settings */ int bottom_gain_mode; int background_color_bpc; int top_gain; int bottom_inside_gain; int bottom_outside_gain; -#endif }; struct mpcc_sm_cfg { @@ -90,7 +86,6 @@ struct mpcc_sm_cfg { int force_next_field_polarity; }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct mpc_denorm_clamp { int clamp_max_r_cr; int clamp_min_r_cr; @@ -99,7 +94,6 @@ struct mpc_denorm_clamp { int clamp_max_b_cb; int clamp_min_b_cb; }; -#endif /* * MPCC connection and blending configuration for a single MPCC instance. @@ -126,10 +120,8 @@ struct mpc { struct dc_context *ctx; struct mpcc mpcc_array[MAX_MPCC]; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct pwl_params blender_params; bool cm_bypass_mode; -#endif }; struct mpcc_state { @@ -230,7 +222,6 @@ struct mpc_funcs { struct mpc *mpc, struct mpc_tree *tree); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void (*set_denorm)(struct mpc *mpc, int opp_id, enum dc_color_depth output_depth); @@ -258,7 +249,6 @@ struct mpc_funcs { struct mpc *mpc, int mpcc_id, bool power_on); -#endif }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h index 18def2b6fafe..7575564b2265 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h @@ -263,9 +263,7 @@ struct oppbuf_params { enum oppbuf_display_segmentation mso_segmentation; uint32_t mso_overlap_pixel_num; uint32_t pixel_repetition; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) uint32_t num_segment_padded_pixels; -#endif }; struct opp_funcs { @@ -305,10 +303,10 @@ struct opp_funcs { struct output_pixel_processor *opp, bool enable); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void (*opp_set_disp_pattern_generator)( struct output_pixel_processor *opp, enum controller_dp_test_pattern test_pattern, + enum controller_dp_color_space color_space, enum dc_color_depth color_depth, const struct tg_color *solid_color, int width, @@ -324,7 +322,6 @@ struct opp_funcs { void (*opp_program_left_edge_extra_pixel)( struct output_pixel_processor *opp, bool count); -#endif }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h index 6305e388612a..351b387ad606 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h @@ -65,13 +65,11 @@ struct audio_clock_info { uint32_t cts_48khz; }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) enum dynamic_metadata_mode { dmdata_dp, dmdata_hdmi, dmdata_dolby_vision }; -#endif struct encoder_info_frame { /* auxiliary video information */ @@ -90,9 +88,7 @@ struct encoder_info_frame { struct encoder_unblank_param { struct dc_link_settings link_settings; struct dc_crtc_timing timing; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 int opp_cnt; -#endif }; struct encoder_set_dp_phy_pattern_param { @@ -109,7 +105,6 @@ struct stream_encoder { enum engine_id id; }; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct enc_state { uint32_t dsc_mode; // DISABLED 0; 1 or 2 indicate enabled state. uint32_t dsc_slice_width; @@ -119,13 +114,13 @@ struct enc_state { uint32_t sec_gsp_pps_enable; uint32_t sec_stream_enable; }; -#endif struct stream_encoder_funcs { void (*dp_set_stream_attribute)( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, enum dc_color_space output_color_space, + bool use_vsc_sdp_for_colorimetry, uint32_t enable_sdp_splitting); void (*hdmi_set_stream_attribute)( @@ -219,8 +214,6 @@ struct stream_encoder_funcs { enum dc_pixel_encoding *encoding, enum dc_color_depth *depth); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT void (*enc_read_state)(struct stream_encoder *enc, struct enc_state *s); void (*dp_set_dsc_config)( @@ -232,7 +225,6 @@ struct stream_encoder_funcs { void (*dp_set_dsc_pps_info_packet)(struct stream_encoder *enc, bool enable, uint8_t *dsc_packed_pps); -#endif void (*set_dynamic_metadata)(struct stream_encoder *enc, bool enable, @@ -242,7 +234,6 @@ struct stream_encoder_funcs { void (*dp_set_odm_combine)( struct stream_encoder *enc, bool odm_combine); -#endif }; #endif /* STREAM_ENCODER_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index 27c73caf74ee..e5e7d94026fc 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -195,10 +195,8 @@ struct timing_generator_funcs { void (*lock)(struct timing_generator *tg); void (*lock_doublebuffer_disable)(struct timing_generator *tg); void (*lock_doublebuffer_enable)(struct timing_generator *tg); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void(*triplebuffer_unlock)(struct timing_generator *tg); void(*triplebuffer_lock)(struct timing_generator *tg); -#endif void (*enable_reset_trigger)(struct timing_generator *tg, int source_tg_inst); void (*enable_crtc_reset)(struct timing_generator *tg, @@ -210,7 +208,8 @@ struct timing_generator_funcs { bool enable, const struct dc_crtc_timing *timing); void (*set_drr)(struct timing_generator *tg, const struct drr_params *params); void (*set_static_screen_control)(struct timing_generator *tg, - uint32_t value); + uint32_t event_triggers, + uint32_t num_frames); void (*set_test_pattern)( struct timing_generator *tg, enum controller_dp_test_pattern test_pattern, @@ -235,7 +234,6 @@ struct timing_generator_funcs { bool (*is_optc_underflow_occurred)(struct timing_generator *tg); void (*clear_optc_underflow)(struct timing_generator *tg); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void (*set_dwb_source)(struct timing_generator *optc, uint32_t dwb_pipe_inst); @@ -243,7 +241,6 @@ struct timing_generator_funcs { uint32_t *num_of_input_segments, uint32_t *seg0_src_sel, uint32_t *seg1_src_sel); -#endif /** * Configure CRCs for the given timing generator. Return false if TG is @@ -267,13 +264,10 @@ struct timing_generator_funcs { void (*set_vtg_params)(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing); -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT void (*set_dsc_config)(struct timing_generator *optc, enum optc_dsc_mode dsc_mode, uint32_t dsc_bytes_per_pixel, uint32_t dsc_slice_width); -#endif void (*set_odm_bypass)(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing); void (*set_odm_combine)(struct timing_generator *optc, int *opp_id, int opp_cnt, struct dc_crtc_timing *timing); @@ -281,7 +275,6 @@ struct timing_generator_funcs { void (*set_gsl_source_select)(struct timing_generator *optc, int group_idx, uint32_t gsl_ready_signal); -#endif }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index d39c1e11def5..209118f9f193 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -32,326 +32,160 @@ #include "inc/hw/link_encoder.h" #include "core_status.h" -enum pipe_gating_control { - PIPE_GATING_CONTROL_DISABLE = 0, - PIPE_GATING_CONTROL_ENABLE, - PIPE_GATING_CONTROL_INIT -}; - enum vline_select { VLINE0, VLINE1 }; -struct dce_hwseq_wa { - bool blnd_crtc_trigger; - bool DEGVIDCN10_253; - bool false_optc_underflow; - bool DEGVIDCN10_254; - bool DEGVIDCN21; -}; - -struct hwseq_wa_state { - bool DEGVIDCN10_253_applied; -}; - -struct dce_hwseq { - struct dc_context *ctx; - const struct dce_hwseq_registers *regs; - const struct dce_hwseq_shift *shifts; - const struct dce_hwseq_mask *masks; - struct dce_hwseq_wa wa; - struct hwseq_wa_state wa_state; -}; - struct pipe_ctx; struct dc_state; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dc_stream_status; struct dc_writeback_info; -#endif struct dchub_init_data; -struct dc_static_screen_events; +struct dc_static_screen_params; struct resource_pool; -struct resource_context; -struct stream_resource; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 struct dc_phy_addr_space_config; struct dc_virtual_addr_space_config; -#endif -struct hubp; struct dpp; +struct dce_hwseq; struct hw_sequencer_funcs { + /* Embedded Display Related */ + void (*edp_power_control)(struct dc_link *link, bool enable); + void (*edp_wait_for_hpd_ready)(struct dc_link *link, bool power_up); - void (*disable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx); - - void (*enable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx); - + /* Pipe Programming Related */ void (*init_hw)(struct dc *dc); - - void (*init_pipes)(struct dc *dc, struct dc_state *context); - - enum dc_status (*apply_ctx_to_hw)( - struct dc *dc, struct dc_state *context); - - void (*reset_hw_ctx_wrap)( - struct dc *dc, struct dc_state *context); - - void (*apply_ctx_for_surface)( - struct dc *dc, + void (*enable_accelerated_mode)(struct dc *dc, + struct dc_state *context); + enum dc_status (*apply_ctx_to_hw)(struct dc *dc, + struct dc_state *context); + void (*disable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx); + void (*apply_ctx_for_surface)(struct dc *dc, const struct dc_stream_state *stream, - int num_planes, + int num_planes, struct dc_state *context); + void (*program_front_end_for_ctx)(struct dc *dc, struct dc_state *context); - - void (*program_gamut_remap)( + void (*update_plane_addr)(const struct dc *dc, struct pipe_ctx *pipe_ctx); - - void (*program_output_csc)(struct dc *dc, - struct pipe_ctx *pipe_ctx, - enum dc_color_space colorspace, - uint16_t *matrix, - int opp_id); - -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) - void (*program_front_end_for_ctx)( - struct dc *dc, - struct dc_state *context); - void (*program_triplebuffer)( - const struct dc *dc, - struct pipe_ctx *pipe_ctx, - bool enableTripleBuffer); - void (*set_flip_control_gsl)( - struct pipe_ctx *pipe_ctx, - bool flip_immediate); -#endif - - void (*update_plane_addr)( - const struct dc *dc, - struct pipe_ctx *pipe_ctx); - - void (*plane_atomic_disconnect)( - struct dc *dc, - struct pipe_ctx *pipe_ctx); - - void (*update_dchub)( - struct dce_hwseq *hws, - struct dchub_init_data *dh_data); - -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 - int (*init_sys_ctx)( - struct dce_hwseq *hws, - struct dc *dc, - struct dc_phy_addr_space_config *pa_config); - void (*init_vm_ctx)( - struct dce_hwseq *hws, - struct dc *dc, - struct dc_virtual_addr_space_config *va_config, - int vmid); -#endif - void (*update_mpcc)( - struct dc *dc, - struct pipe_ctx *pipe_ctx); - - void (*update_pending_status)( + void (*update_dchub)(struct dce_hwseq *hws, + struct dchub_init_data *dh_data); + void (*wait_for_mpcc_disconnect)(struct dc *dc, + struct resource_pool *res_pool, struct pipe_ctx *pipe_ctx); - - bool (*set_input_transfer_func)( - struct pipe_ctx *pipe_ctx, - const struct dc_plane_state *plane_state); - - bool (*set_output_transfer_func)( - struct pipe_ctx *pipe_ctx, - const struct dc_stream_state *stream); - - void (*power_down)(struct dc *dc); - - void (*enable_accelerated_mode)(struct dc *dc, struct dc_state *context); - - void (*enable_timing_synchronization)( - struct dc *dc, - int group_index, - int group_size, - struct pipe_ctx *grouped_pipes[]); - - void (*enable_per_frame_crtc_position_reset)( - struct dc *dc, - int group_size, + void (*program_triplebuffer)(const struct dc *dc, + struct pipe_ctx *pipe_ctx, bool enableTripleBuffer); + void (*update_pending_status)(struct pipe_ctx *pipe_ctx); + + /* Pipe Lock Related */ + void (*pipe_control_lock_global)(struct dc *dc, + struct pipe_ctx *pipe, bool lock); + void (*pipe_control_lock)(struct dc *dc, + struct pipe_ctx *pipe, bool lock); + void (*set_flip_control_gsl)(struct pipe_ctx *pipe_ctx, + bool flip_immediate); + + /* Timing Related */ + void (*get_position)(struct pipe_ctx **pipe_ctx, int num_pipes, + struct crtc_position *position); + int (*get_vupdate_offset_from_vsync)(struct pipe_ctx *pipe_ctx); + void (*enable_per_frame_crtc_position_reset)(struct dc *dc, + int group_size, struct pipe_ctx *grouped_pipes[]); + void (*enable_timing_synchronization)(struct dc *dc, + int group_index, int group_size, struct pipe_ctx *grouped_pipes[]); + void (*setup_periodic_interrupt)(struct dc *dc, + struct pipe_ctx *pipe_ctx, + enum vline_select vline); + void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes, + unsigned int vmin, unsigned int vmax, + unsigned int vmid, unsigned int vmid_frame_number); + void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx, + int num_pipes, + const struct dc_static_screen_params *events); - void (*enable_display_pipe_clock_gating)( - struct dc_context *ctx, - bool clock_gating); - - bool (*enable_display_power_gating)( - struct dc *dc, - uint8_t controller_id, - struct dc_bios *dcb, - enum pipe_gating_control power_gating); - - void (*disable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx); - - void (*update_info_frame)(struct pipe_ctx *pipe_ctx); - - void (*send_immediate_sdp_message)( - struct pipe_ctx *pipe_ctx, - const uint8_t *custom_sdp_message, - unsigned int sdp_message_size); - + /* Stream Related */ void (*enable_stream)(struct pipe_ctx *pipe_ctx); - void (*disable_stream)(struct pipe_ctx *pipe_ctx); - + void (*blank_stream)(struct pipe_ctx *pipe_ctx); void (*unblank_stream)(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings); - void (*blank_stream)(struct pipe_ctx *pipe_ctx); - - void (*enable_audio_stream)(struct pipe_ctx *pipe_ctx); + /* Bandwidth Related */ + void (*prepare_bandwidth)(struct dc *dc, struct dc_state *context); + bool (*update_bandwidth)(struct dc *dc, struct dc_state *context); + void (*optimize_bandwidth)(struct dc *dc, struct dc_state *context); - void (*disable_audio_stream)(struct pipe_ctx *pipe_ctx); - - void (*pipe_control_lock)( - struct dc *dc, - struct pipe_ctx *pipe, - bool lock); - - void (*pipe_control_lock_global)( - struct dc *dc, - struct pipe_ctx *pipe, - bool lock); - void (*blank_pixel_data)( - struct dc *dc, + /* Infopacket Related */ + void (*set_avmute)(struct pipe_ctx *pipe_ctx, bool enable); + void (*send_immediate_sdp_message)( struct pipe_ctx *pipe_ctx, - bool blank); - - void (*prepare_bandwidth)( - struct dc *dc, - struct dc_state *context); - void (*optimize_bandwidth)( - struct dc *dc, - struct dc_state *context); - - void (*exit_optimized_pwr_state)( - const struct dc *dc, - struct dc_state *context); - void (*optimize_pwr_state)( - const struct dc *dc, - struct dc_state *context); - -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) - bool (*update_bandwidth)( - struct dc *dc, - struct dc_state *context); + const uint8_t *custom_sdp_message, + unsigned int sdp_message_size); + void (*update_info_frame)(struct pipe_ctx *pipe_ctx); + void (*set_dmdata_attributes)(struct pipe_ctx *pipe); void (*program_dmdata_engine)(struct pipe_ctx *pipe_ctx); bool (*dmdata_status_done)(struct pipe_ctx *pipe_ctx); -#endif - - void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes, - unsigned int vmin, unsigned int vmax, - unsigned int vmid, unsigned int vmid_frame_number); - - void (*get_position)(struct pipe_ctx **pipe_ctx, int num_pipes, - struct crtc_position *position); - - void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx, - int num_pipes, const struct dc_static_screen_events *events); - - enum dc_status (*enable_stream_timing)( - struct pipe_ctx *pipe_ctx, - struct dc_state *context, - struct dc *dc); - - void (*setup_stereo)( - struct pipe_ctx *pipe_ctx, - struct dc *dc); - - void (*set_avmute)(struct pipe_ctx *pipe_ctx, bool enable); - - void (*log_hw_state)(struct dc *dc, - struct dc_log_buffer_ctx *log_ctx); - void (*get_hw_state)(struct dc *dc, char *pBuf, unsigned int bufSize, unsigned int mask); - void (*clear_status_bits)(struct dc *dc, unsigned int mask); - - void (*wait_for_mpcc_disconnect)(struct dc *dc, - struct resource_pool *res_pool, - struct pipe_ctx *pipe_ctx); - - void (*edp_power_control)( - struct dc_link *link, - bool enable); - void (*edp_backlight_control)( - struct dc_link *link, - bool enable); - void (*edp_wait_for_hpd_ready)(struct dc_link *link, bool power_up); + /* Cursor Related */ void (*set_cursor_position)(struct pipe_ctx *pipe); void (*set_cursor_attribute)(struct pipe_ctx *pipe); void (*set_cursor_sdr_white_level)(struct pipe_ctx *pipe); - void (*setup_periodic_interrupt)(struct pipe_ctx *pipe_ctx, enum vline_select vline); - void (*setup_vupdate_interrupt)(struct pipe_ctx *pipe_ctx); - bool (*did_underflow_occur)(struct dc *dc, struct pipe_ctx *pipe_ctx); - - void (*init_blank)(struct dc *dc, struct timing_generator *tg); - void (*disable_vga)(struct dce_hwseq *hws); - void (*bios_golden_init)(struct dc *dc); - void (*plane_atomic_power_down)(struct dc *dc, - struct dpp *dpp, - struct hubp *hubp); - - void (*plane_atomic_disable)( - struct dc *dc, struct pipe_ctx *pipe_ctx); - - void (*enable_power_gating_plane)( - struct dce_hwseq *hws, - bool enable); - - void (*dpp_pg_control)( - struct dce_hwseq *hws, - unsigned int dpp_inst, - bool power_on); - - void (*hubp_pg_control)( - struct dce_hwseq *hws, - unsigned int hubp_inst, - bool power_on); - - void (*dsc_pg_control)( - struct dce_hwseq *hws, - unsigned int dsc_inst, - bool power_on); - + /* Colour Related */ + void (*program_gamut_remap)(struct pipe_ctx *pipe_ctx); + void (*program_output_csc)(struct dc *dc, struct pipe_ctx *pipe_ctx, + enum dc_color_space colorspace, + uint16_t *matrix, int opp_id); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) - void (*update_odm)(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx); - void (*program_all_writeback_pipes_in_tree)( + /* VM Related */ + int (*init_sys_ctx)(struct dce_hwseq *hws, struct dc *dc, - const struct dc_stream_state *stream, - struct dc_state *context); + struct dc_phy_addr_space_config *pa_config); + void (*init_vm_ctx)(struct dce_hwseq *hws, + struct dc *dc, + struct dc_virtual_addr_space_config *va_config, + int vmid); + + /* Writeback Related */ void (*update_writeback)(struct dc *dc, - const struct dc_stream_status *stream_status, struct dc_writeback_info *wb_info, struct dc_state *context); void (*enable_writeback)(struct dc *dc, - const struct dc_stream_status *stream_status, struct dc_writeback_info *wb_info, struct dc_state *context); void (*disable_writeback)(struct dc *dc, unsigned int dwb_pipe_inst); -#endif - enum dc_status (*set_clock)(struct dc *dc, - enum dc_clock_type clock_type, - uint32_t clk_khz, - uint32_t stepping); - void (*get_clock)(struct dc *dc, + bool (*mmhubbub_warmup)(struct dc *dc, + unsigned int num_dwb, + struct dc_writeback_info *wb_info); + + /* Clock Related */ + enum dc_status (*set_clock)(struct dc *dc, enum dc_clock_type clock_type, + uint32_t clk_khz, uint32_t stepping); + void (*get_clock)(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg); + void (*optimize_pwr_state)(const struct dc *dc, + struct dc_state *context); + void (*exit_optimized_pwr_state)(const struct dc *dc, + struct dc_state *context); + + /* Audio Related */ + void (*enable_audio_stream)(struct pipe_ctx *pipe_ctx); + void (*disable_audio_stream)(struct pipe_ctx *pipe_ctx); + + /* Stereo 3D Related */ + void (*setup_stereo)(struct pipe_ctx *pipe_ctx, struct dc *dc); + + /* HW State Logging Related */ + void (*log_hw_state)(struct dc *dc, struct dc_log_buffer_ctx *log_ctx); + void (*get_hw_state)(struct dc *dc, char *pBuf, + unsigned int bufSize, unsigned int mask); + void (*clear_status_bits)(struct dc *dc, unsigned int mask); + -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) - bool (*s0i3_golden_init_wa)(struct dc *dc); -#endif }; void color_space_to_black_color( diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h new file mode 100644 index 000000000000..ecf566378ccd --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h @@ -0,0 +1,156 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_HW_SEQUENCER_PRIVATE_H__ +#define __DC_HW_SEQUENCER_PRIVATE_H__ + +#include "dc_types.h" + +enum pipe_gating_control { + PIPE_GATING_CONTROL_DISABLE = 0, + PIPE_GATING_CONTROL_ENABLE, + PIPE_GATING_CONTROL_INIT +}; + +struct dce_hwseq_wa { + bool blnd_crtc_trigger; + bool DEGVIDCN10_253; + bool false_optc_underflow; + bool DEGVIDCN10_254; + bool DEGVIDCN21; +}; + +struct hwseq_wa_state { + bool DEGVIDCN10_253_applied; +}; + +struct pipe_ctx; +struct dc_state; +struct dc_stream_status; +struct dc_writeback_info; +struct dchub_init_data; +struct dc_static_screen_params; +struct resource_pool; +struct resource_context; +struct stream_resource; +struct dc_phy_addr_space_config; +struct dc_virtual_addr_space_config; +struct hubp; +struct dpp; +struct dce_hwseq; +struct timing_generator; +struct tg_color; +struct output_pixel_processor; + +struct hwseq_private_funcs { + + void (*disable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx); + void (*enable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx); + void (*init_pipes)(struct dc *dc, struct dc_state *context); + void (*reset_hw_ctx_wrap)(struct dc *dc, struct dc_state *context); + void (*update_plane_addr)(const struct dc *dc, + struct pipe_ctx *pipe_ctx); + void (*plane_atomic_disconnect)(struct dc *dc, + struct pipe_ctx *pipe_ctx); + void (*update_mpcc)(struct dc *dc, struct pipe_ctx *pipe_ctx); + bool (*set_input_transfer_func)(struct dc *dc, + struct pipe_ctx *pipe_ctx, + const struct dc_plane_state *plane_state); + bool (*set_output_transfer_func)(struct dc *dc, + struct pipe_ctx *pipe_ctx, + const struct dc_stream_state *stream); + void (*power_down)(struct dc *dc); + void (*enable_display_pipe_clock_gating)(struct dc_context *ctx, + bool clock_gating); + bool (*enable_display_power_gating)(struct dc *dc, + uint8_t controller_id, + struct dc_bios *dcb, + enum pipe_gating_control power_gating); + void (*blank_pixel_data)(struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool blank); + enum dc_status (*enable_stream_timing)( + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct dc *dc); + void (*edp_backlight_control)(struct dc_link *link, + bool enable); + void (*setup_vupdate_interrupt)(struct dc *dc, + struct pipe_ctx *pipe_ctx); + bool (*did_underflow_occur)(struct dc *dc, struct pipe_ctx *pipe_ctx); + void (*init_blank)(struct dc *dc, struct timing_generator *tg); + void (*disable_vga)(struct dce_hwseq *hws); + void (*bios_golden_init)(struct dc *dc); + void (*plane_atomic_power_down)(struct dc *dc, + struct dpp *dpp, + struct hubp *hubp); + void (*plane_atomic_disable)(struct dc *dc, struct pipe_ctx *pipe_ctx); + void (*enable_power_gating_plane)(struct dce_hwseq *hws, + bool enable); + void (*dpp_pg_control)(struct dce_hwseq *hws, + unsigned int dpp_inst, + bool power_on); + void (*hubp_pg_control)(struct dce_hwseq *hws, + unsigned int hubp_inst, + bool power_on); + void (*dsc_pg_control)(struct dce_hwseq *hws, + unsigned int dsc_inst, + bool power_on); + void (*update_odm)(struct dc *dc, struct dc_state *context, + struct pipe_ctx *pipe_ctx); + void (*program_all_writeback_pipes_in_tree)(struct dc *dc, + const struct dc_stream_state *stream, + struct dc_state *context); + bool (*s0i3_golden_init_wa)(struct dc *dc); + void (*get_surface_visual_confirm_color)( + const struct pipe_ctx *pipe_ctx, + struct tg_color *color); + void (*get_hdr_visual_confirm_color)(struct pipe_ctx *pipe_ctx, + struct tg_color *color); + void (*set_hdr_multiplier)(struct pipe_ctx *pipe_ctx); + void (*verify_allow_pstate_change_high)(struct dc *dc); + void (*program_pipe)(struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context); + bool (*wait_for_blank_complete)(struct output_pixel_processor *opp); + void (*dccg_init)(struct dce_hwseq *hws); + bool (*set_blend_lut)(struct pipe_ctx *pipe_ctx, + const struct dc_plane_state *plane_state); + bool (*set_shaper_3dlut)(struct pipe_ctx *pipe_ctx, + const struct dc_plane_state *plane_state); +}; + +struct dce_hwseq { + struct dc_context *ctx; + const struct dce_hwseq_registers *regs; + const struct dce_hwseq_shift *shifts; + const struct dce_hwseq_mask *masks; + struct dce_hwseq_wa wa; + struct hwseq_wa_state wa_state; + struct hwseq_private_funcs funcs; + +}; + +#endif /* __DC_HW_SEQUENCER_PRIVATE_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h index 4eff5d38a2f9..9af7ee5bc8ee 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h @@ -60,11 +60,13 @@ void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal); bool dp_set_hw_training_pattern( struct dc_link *link, - enum dc_dp_training_pattern pattern); + enum dc_dp_training_pattern pattern, + uint32_t offset); void dp_set_hw_lane_settings( struct dc_link *link, - const struct link_training_settings *link_settings); + const struct link_training_settings *link_settings, + uint32_t offset); void dp_set_hw_test_pattern( struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h index 8503d9cc4763..2470405e996b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h +++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h @@ -458,7 +458,14 @@ uint32_t generic_reg_get8(const struct dc_context *ctx, uint32_t addr, #define IX_REG_READ(index_reg_name, data_reg_name, index) \ generic_read_indirect_reg(CTX, REG(index_reg_name), REG(data_reg_name), IND_REG(index)) +#define IX_REG_GET_N(index_reg_name, data_reg_name, index, n, ...) \ + generic_indirect_reg_get(CTX, REG(index_reg_name), REG(data_reg_name), \ + IND_REG(index), \ + n, __VA_ARGS__) +#define IX_REG_GET(index_reg_name, data_reg_name, index, field, val) \ + IX_REG_GET_N(index_reg_name, data_reg_name, index, 1, \ + FN(data_reg_name, field), val) #define IX_REG_UPDATE_N(index_reg_name, data_reg_name, index, n, ...) \ generic_indirect_reg_update_ex(CTX, \ @@ -479,10 +486,35 @@ uint32_t generic_read_indirect_reg(const struct dc_context *ctx, uint32_t addr_index, uint32_t addr_data, uint32_t index); +uint32_t generic_indirect_reg_get(const struct dc_context *ctx, + uint32_t addr_index, uint32_t addr_data, + uint32_t index, int n, + uint8_t shift1, uint32_t mask1, uint32_t *field_value1, + ...); + uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx, uint32_t addr_index, uint32_t addr_data, uint32_t index, uint32_t reg_val, int n, uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...); +/* register offload macros + * + * instead of MMIO to register directly, in some cases we want + * to gather register sequence and execute the register sequence + * from another thread so we optimize time required for lengthy ops + */ + +/* start gathering register sequence */ +#define REG_SEQ_START() \ + reg_sequence_start_gather(CTX) + +/* start execution of register sequence gathered since REG_SEQ_START */ +#define REG_SEQ_SUBMIT() \ + reg_sequence_start_execute(CTX) + +/* wait for the last REG_SEQ_SUBMIT to finish */ +#define REG_SEQ_WAIT_DONE() \ + reg_sequence_wait_done(CTX) + #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_REG_HELPER_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index bef224bf803e..5ae8ada154ef 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -46,12 +46,8 @@ struct resource_caps { int num_pll; int num_dwb; int num_ddc; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 int num_vmid; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT int num_dsc; -#endif -#endif }; struct resource_straps { @@ -181,4 +177,6 @@ void update_audio_usage( unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format); +void get_audio_check(struct audio_info *aud_modes, + struct audio_check *aud_chk); #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile index ea75420fc876..0f682ac53bb2 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/Makefile +++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile @@ -60,27 +60,23 @@ AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCE12) ############################################################################### # DCN 1x ############################################################################### -ifdef CONFIG_DRM_AMD_DC_DCN1_0 +ifdef CONFIG_DRM_AMD_DC_DCN IRQ_DCN1 = irq_service_dcn10.o AMD_DAL_IRQ_DCN1 = $(addprefix $(AMDDALPATH)/dc/irq/dcn10/,$(IRQ_DCN1)) AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN1) -endif ############################################################################### # DCN 20 ############################################################################### -ifdef CONFIG_DRM_AMD_DC_DCN2_0 IRQ_DCN2 = irq_service_dcn20.o AMD_DAL_IRQ_DCN2 = $(addprefix $(AMDDALPATH)/dc/irq/dcn20/,$(IRQ_DCN2)) AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN2) -endif ############################################################################### # DCN 21 ############################################################################### -ifdef CONFIG_DRM_AMD_DC_DCN2_1 IRQ_DCN21 = irq_service_dcn21.o AMD_DAL_IRQ_DCN21= $(addprefix $(AMDDALPATH)/dc/irq/dcn21/,$(IRQ_DCN21)) diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c index 1a581c464345..378cc11aa047 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c @@ -204,7 +204,7 @@ bool dce110_vblank_set(struct irq_service *irq_service, bool enable) { struct dc_context *dc_ctx = irq_service->ctx; - struct dc *core_dc = irq_service->ctx->dc; + struct dc *dc = irq_service->ctx->dc; enum dc_irq_source dal_irq_src = dc_interrupt_to_irq_source(irq_service->ctx->dc, info->src_id, @@ -212,7 +212,7 @@ bool dce110_vblank_set(struct irq_service *irq_service, uint8_t pipe_offset = dal_irq_src - IRQ_TYPE_VBLANK; struct timing_generator *tg = - core_dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg; + dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg; if (enable) { if (!tg || !tg->funcs->arm_vert_intr(tg, 2)) { @@ -403,7 +403,7 @@ static const struct irq_service_funcs irq_service_funcs_dce110 = { .to_dal_irq_source = to_dal_irq_source_dce110 }; -static void construct(struct irq_service *irq_service, +static void dce110_irq_construct(struct irq_service *irq_service, struct irq_service_init_data *init_data) { dal_irq_service_construct(irq_service, init_data); @@ -421,6 +421,6 @@ dal_irq_service_dce110_create(struct irq_service_init_data *init_data) if (!irq_service) return NULL; - construct(irq_service, init_data); + dce110_irq_construct(irq_service, init_data); return irq_service; } diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c index 15380336cb51..2fe4703395f3 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c @@ -273,7 +273,7 @@ static const struct irq_service_funcs irq_service_funcs_dce120 = { .to_dal_irq_source = to_dal_irq_source_dce110 }; -static void construct( +static void dce120_irq_construct( struct irq_service *irq_service, struct irq_service_init_data *init_data) { @@ -292,6 +292,6 @@ struct irq_service *dal_irq_service_dce120_create( if (!irq_service) return NULL; - construct(irq_service, init_data); + dce120_irq_construct(irq_service, init_data); return irq_service; } diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c index 281fee8ad1e5..17e426b80a00 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c @@ -283,7 +283,7 @@ static const struct irq_service_funcs irq_service_funcs_dce80 = { .to_dal_irq_source = to_dal_irq_source_dce110 }; -static void construct( +static void dce80_irq_construct( struct irq_service *irq_service, struct irq_service_init_data *init_data) { @@ -302,7 +302,7 @@ struct irq_service *dal_irq_service_dce80_create( if (!irq_service) return NULL; - construct(irq_service, init_data); + dce80_irq_construct(irq_service, init_data); return irq_service; } diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c index cc8e7dedccce..f956b3bde680 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c @@ -355,7 +355,7 @@ static const struct irq_service_funcs irq_service_funcs_dcn10 = { .to_dal_irq_source = to_dal_irq_source_dcn10 }; -static void construct( +static void dcn10_irq_construct( struct irq_service *irq_service, struct irq_service_init_data *init_data) { @@ -374,6 +374,6 @@ struct irq_service *dal_irq_service_dcn10_create( if (!irq_service) return NULL; - construct(irq_service, init_data); + dcn10_irq_construct(irq_service, init_data); return irq_service; } diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c index 5db29bf582d3..2a1fea501f8c 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c @@ -359,7 +359,7 @@ static const struct irq_service_funcs irq_service_funcs_dcn20 = { .to_dal_irq_source = to_dal_irq_source_dcn20 }; -static void construct( +static void dcn20_irq_construct( struct irq_service *irq_service, struct irq_service_init_data *init_data) { @@ -378,6 +378,6 @@ struct irq_service *dal_irq_service_dcn20_create( if (!irq_service) return NULL; - construct(irq_service, init_data); + dcn20_irq_construct(irq_service, init_data); return irq_service; } diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c index cbe7818529bb..1b971265418b 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c @@ -350,7 +350,7 @@ static const struct irq_service_funcs irq_service_funcs_dcn21 = { .to_dal_irq_source = to_dal_irq_source_dcn21 }; -static void construct( +static void dcn21_irq_construct( struct irq_service *irq_service, struct irq_service_init_data *init_data) { @@ -369,6 +369,6 @@ struct irq_service *dal_irq_service_dcn21_create( if (!irq_service) return NULL; - construct(irq_service, init_data); + dcn21_irq_construct(irq_service, init_data); return irq_service; } diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c index 0878550a8178..33053b9fe6bd 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c +++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c @@ -38,7 +38,7 @@ #include "dce120/irq_service_dce120.h" -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) #include "dcn10/irq_service_dcn10.h" #endif diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h index 30ec80ac6fc8..c34eba19860a 100644 --- a/drivers/gpu/drm/amd/display/dc/os_types.h +++ b/drivers/gpu/drm/amd/display/dc/os_types.h @@ -1,5 +1,6 @@ /* * Copyright 2012-16 Advanced Micro Devices, Inc. + * Copyright 2019 Raptor Engineering, LLC * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -29,6 +30,7 @@ #include <linux/kgdb.h> #include <linux/kref.h> #include <linux/types.h> +#include <linux/slab.h> #include <asm/byteorder.h> @@ -48,8 +50,39 @@ #define dm_error(fmt, ...) DRM_ERROR(fmt, ##__VA_ARGS__) -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) +#if defined(CONFIG_X86) #include <asm/fpu/api.h> +#define DC_FP_START() kernel_fpu_begin() +#define DC_FP_END() kernel_fpu_end() +#elif defined(CONFIG_PPC64) +#include <asm/switch_to.h> +#include <asm/cputable.h> +#define DC_FP_START() { \ + if (cpu_has_feature(CPU_FTR_VSX_COMP)) { \ + preempt_disable(); \ + enable_kernel_vsx(); \ + } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) { \ + preempt_disable(); \ + enable_kernel_altivec(); \ + } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) { \ + preempt_disable(); \ + enable_kernel_fp(); \ + } \ +} +#define DC_FP_END() { \ + if (cpu_has_feature(CPU_FTR_VSX_COMP)) { \ + disable_kernel_vsx(); \ + preempt_enable(); \ + } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) { \ + disable_kernel_altivec(); \ + preempt_enable(); \ + } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) { \ + disable_kernel_fp(); \ + preempt_enable(); \ + } \ +} +#endif #endif /* diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c index ff664bdb1482..b8040da94b9d 100644 --- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c @@ -32,6 +32,7 @@ static void virtual_stream_encoder_dp_set_stream_attribute( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, enum dc_color_space output_color_space, + bool use_vsc_sdp_for_colorimetry, uint32_t enable_sdp_splitting) {} static void virtual_stream_encoder_hdmi_set_stream_attribute( @@ -81,22 +82,14 @@ static void virtual_stream_encoder_reset_hdmi_stream_attribute( struct stream_encoder *enc) {} -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT static void virtual_enc_dp_set_odm_combine( struct stream_encoder *enc, bool odm_combine) {} -#endif -#endif static const struct stream_encoder_funcs virtual_str_enc_funcs = { -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .dp_set_odm_combine = virtual_enc_dp_set_odm_combine, -#endif -#endif .dp_set_stream_attribute = virtual_stream_encoder_dp_set_stream_attribute, .hdmi_set_stream_attribute = |