diff options
Diffstat (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c')
-rw-r--r-- | drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 338 |
1 files changed, 265 insertions, 73 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 2f4422d4c8a4..41e4774abdb0 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -91,10 +91,14 @@ #include "dcn/dcn_1_0_offset.h" #include "dcn/dcn_1_0_sh_mask.h" #include "soc15_hw_ip.h" +#include "soc15_common.h" #include "vega10_ip_offset.h" #include "soc15_common.h" +#include "gc/gc_11_0_0_offset.h" +#include "gc/gc_11_0_0_sh_mask.h" + #include "modules/inc/mod_freesync.h" #include "modules/power/power_helpers.h" #include "modules/inc/mod_info_packet.h" @@ -120,6 +124,11 @@ MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB); #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin" MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB); +#define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB); +#define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB); + #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); @@ -1258,10 +1267,20 @@ static void vblank_control_worker(struct work_struct *work) DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0); - /* Control PSR based on vblank requirements from OS */ + /* + * Control PSR based on vblank requirements from OS + * + * If panel supports PSR SU, there's no need to disable PSR when OS is + * submitting fast atomic commits (we infer this by whether the OS + * requests vblank events). Fast atomic commits will simply trigger a + * full-frame-update (FFU); a specific case of selective-update (SU) + * where the SU region is the full hactive*vactive region. See + * fill_dc_dirty_rects(). + */ if (vblank_work->stream && vblank_work->stream->link) { if (vblank_work->enable) { - if (vblank_work->stream->link->psr_settings.psr_allow_active) + if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && + vblank_work->stream->link->psr_settings.psr_allow_active) amdgpu_dm_psr_disable(vblank_work->stream); } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled && !vblank_work->stream->link->psr_settings.psr_allow_active && @@ -1509,6 +1528,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) DRM_INFO("Seamless boot condition check passed\n"); } + init_data.flags.enable_mipi_converter_optimization = true; + INIT_LIST_HEAD(&adev->dm.da_list); /* Display Core create. */ adev->dm.dc = dc_create(&init_data); @@ -1803,6 +1824,8 @@ static int load_dmcu_fw(struct amdgpu_device *adev) case IP_VERSION(3, 1, 3): case IP_VERSION(3, 1, 5): case IP_VERSION(3, 1, 6): + case IP_VERSION(3, 2, 0): + case IP_VERSION(3, 2, 1): return 0; default: break; @@ -1926,6 +1949,14 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) dmub_asic = DMUB_ASIC_DCN316; fw_name_dmub = FIRMWARE_DCN316_DMUB; break; + case IP_VERSION(3, 2, 0): + dmub_asic = DMUB_ASIC_DCN32; + fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB; + break; + case IP_VERSION(3, 2, 1): + dmub_asic = DMUB_ASIC_DCN321; + fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB; + break; default: /* ASIC doesn't support DMUB. */ return 0; @@ -2172,7 +2203,8 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) } else { ret = drm_dp_mst_topology_mgr_resume(mgr, true); if (ret < 0) { - drm_dp_mst_topology_mgr_set_mst(mgr, false); + dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, + aconnector->dc_link); need_hotplug = true; } } @@ -2554,34 +2586,6 @@ cleanup: return; } -static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state) -{ - struct dc_stream_state *stream_state; - struct amdgpu_dm_connector *aconnector = link->priv; - struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev); - struct dc_stream_update stream_update; - bool dpms_off = true; - - memset(&stream_update, 0, sizeof(stream_update)); - stream_update.dpms_off = &dpms_off; - - mutex_lock(&adev->dm.dc_lock); - stream_state = dc_stream_find_from_link(link); - - if (stream_state == NULL) { - DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n"); - mutex_unlock(&adev->dm.dc_lock); - return; - } - - stream_update.stream = stream_state; - acrtc_state->force_dpms_off = true; - dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0, - stream_state, &stream_update, - stream_state->ctx->dc->current_state); - mutex_unlock(&adev->dm.dc_lock); -} - static int dm_resume(void *handle) { struct amdgpu_device *adev = handle; @@ -2814,7 +2818,7 @@ static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) { - u32 max_cll, min_cll, max, min, q, r; + u32 max_avg, min_cll, max, min, q, r; struct amdgpu_dm_backlight_caps *caps; struct amdgpu_display_manager *dm; struct drm_connector *conn_base; @@ -2844,7 +2848,7 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) caps = &dm->backlight_caps[i]; caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps; caps->aux_support = false; - max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll; + max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall; min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll; if (caps->ext_caps->bits.oled == 1 /*|| @@ -2872,8 +2876,8 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) * The results of the above expressions can be verified at * pre_computed_values. */ - q = max_cll >> 5; - r = max_cll % 32; + q = max_avg >> 5; + r = max_avg % 32; max = (1 << q) * pre_computed_values[r]; // min luminance: maxLum * (CV/255)^2 / 100 @@ -3032,16 +3036,13 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) struct drm_device *dev = connector->dev; enum dc_connection_type new_connection_type = dc_connection_none; struct amdgpu_device *adev = drm_to_adev(dev); +#ifdef CONFIG_DRM_AMD_DC_HDCP struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); - struct dm_crtc_state *dm_crtc_state = NULL; +#endif if (adev->dm.disable_hpd_irq) return; - if (dm_con_state->base.state && dm_con_state->base.crtc) - dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state( - dm_con_state->base.state, - dm_con_state->base.crtc)); /* * In case of failure or MST no need to update connector status or notify the OS * since (for MST case) MST does this in its own context. @@ -3071,11 +3072,6 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) drm_kms_helper_connector_hotplug_event(connector); } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) { - if (new_connection_type == dc_connection_none && - aconnector->dc_link->type == dc_connection_none && - dm_crtc_state) - dm_set_dpms_off(aconnector->dc_link, dm_crtc_state); - amdgpu_dm_update_connector_after_detect(aconnector); drm_modeset_lock_all(dev); @@ -3868,9 +3864,6 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50 -#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ - defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) - static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, int bl_idx) { @@ -4074,7 +4067,6 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm) else DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name); } -#endif static int initialize_plane(struct amdgpu_display_manager *dm, struct amdgpu_mode_info *mode_info, int plane_id, @@ -4120,9 +4112,6 @@ static int initialize_plane(struct amdgpu_display_manager *dm, static void register_backlight_device(struct amdgpu_display_manager *dm, struct dc_link *link) { -#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ - defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) - if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) && link->type != dc_connection_none) { /* @@ -4138,7 +4127,6 @@ static void register_backlight_device(struct amdgpu_display_manager *dm, dm->num_of_edps++; } } -#endif } @@ -4235,6 +4223,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case IP_VERSION(3, 1, 3): case IP_VERSION(3, 1, 5): case IP_VERSION(3, 1, 6): + case IP_VERSION(3, 2, 0): + case IP_VERSION(3, 2, 1): case IP_VERSION(2, 1, 0): if (register_outbox_irq_handlers(dm->adev)) { DRM_ERROR("DM: Failed to initialize IRQ\n"); @@ -4253,6 +4243,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case IP_VERSION(3, 1, 3): case IP_VERSION(3, 1, 5): case IP_VERSION(3, 1, 6): + case IP_VERSION(3, 2, 0): + case IP_VERSION(3, 2, 1): psr_feature_enabled = true; break; default: @@ -4261,9 +4253,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) } } - /* Disable vblank IRQs aggressively for power-saving. */ - adev_to_drm(adev)->vblank_disable_immediate = true; - /* loops over all connectors on the board */ for (i = 0; i < link_cnt; i++) { struct dc_link *link = NULL; @@ -4370,6 +4359,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case IP_VERSION(3, 1, 3): case IP_VERSION(3, 1, 5): case IP_VERSION(3, 1, 6): + case IP_VERSION(3, 2, 0): + case IP_VERSION(3, 2, 1): if (dcn10_register_irq_handlers(dm->adev)) { DRM_ERROR("DM: Failed to initialize IRQ\n"); goto fail; @@ -4556,6 +4547,8 @@ static int dm_early_init(void *handle) case IP_VERSION(3, 1, 3): case IP_VERSION(3, 1, 5): case IP_VERSION(3, 1, 6): + case IP_VERSION(3, 2, 0): + case IP_VERSION(3, 2, 1): adev->mode_info.num_crtc = 4; adev->mode_info.num_hpd = 4; adev->mode_info.num_dig = 4; @@ -4865,7 +4858,9 @@ fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev, unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier); unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier); unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier); - unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits); + unsigned int pipes_log2; + + pipes_log2 = min(5u, mod_pipe_xor_bits); fill_gfx9_tiling_info_from_device(adev, tiling_info); @@ -5201,8 +5196,73 @@ add_gfx10_3_modifiers(const struct amdgpu_device *adev, AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); } +static void +add_gfx11_modifiers(struct amdgpu_device *adev, + uint64_t **mods, uint64_t *size, uint64_t *capacity) +{ + int num_pipes = 0; + int pipe_xor_bits = 0; + int num_pkrs = 0; + int pkrs = 0; + u32 gb_addr_config; + u8 i = 0; + unsigned swizzle_r_x; + uint64_t modifier_r_x; + uint64_t modifier_dcc_best; + uint64_t modifier_dcc_4k; + + /* TODO: GFX11 IP HW init hasnt finish and we get zero if we read from + * adev->gfx.config.gb_addr_config_fields.num_{pkrs,pipes} */ + gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG); + ASSERT(gb_addr_config != 0); + + num_pkrs = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS); + pkrs = ilog2(num_pkrs); + num_pipes = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PIPES); + pipe_xor_bits = ilog2(num_pipes); + + for (i = 0; i < 2; i++) { + /* Insert the best one first. */ + /* R_X swizzle modes are the best for rendering and DCC requires them. */ + if (num_pipes > 16) + swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX11_256K_R_X : AMD_FMT_MOD_TILE_GFX9_64K_R_X; + else + swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX9_64K_R_X : AMD_FMT_MOD_TILE_GFX11_256K_R_X; + + modifier_r_x = AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(TILE, swizzle_r_x) | + AMD_FMT_MOD_SET(PACKERS, pkrs); + + /* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */ + modifier_dcc_best = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B); + + /* DCC settings for 4K and greater resolutions. (required by display hw) */ + modifier_dcc_4k = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B); + + add_modifier(mods, size, capacity, modifier_dcc_best); + add_modifier(mods, size, capacity, modifier_dcc_4k); + + add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1)); + add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1)); + + add_modifier(mods, size, capacity, modifier_r_x); + } + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D)); +} + static int -get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods) +get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods) { uint64_t size = 0, capacity = 128; *mods = NULL; @@ -5234,6 +5294,9 @@ get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, u else add_gfx10_1_modifiers(adev, mods, &size, &capacity); break; + case AMDGPU_FAMILY_GC_11_0_0: + add_gfx11_modifiers(adev, mods, &size, &capacity); + break; } add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); @@ -5272,7 +5335,7 @@ fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev, dcc->enable = 1; dcc->meta_pitch = afb->base.pitches[1]; dcc->independent_64b_blks = independent_64b_blks; - if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) { + if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) { if (independent_64b_blks && independent_128b_blks) dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl; else if (independent_128b_blks) @@ -5640,6 +5703,117 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev, return 0; } +/** + * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates + * + * @plane: DRM plane containing dirty regions that need to be flushed to the eDP + * remote fb + * @old_plane_state: Old state of @plane + * @new_plane_state: New state of @plane + * @crtc_state: New state of CRTC connected to the @plane + * @flip_addrs: DC flip tracking struct, which also tracts dirty rects + * + * For PSR SU, DC informs the DMUB uController of dirty rectangle regions + * (referred to as "damage clips" in DRM nomenclature) that require updating on + * the eDP remote buffer. The responsibility of specifying the dirty regions is + * amdgpu_dm's. + * + * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the + * plane with regions that require flushing to the eDP remote buffer. In + * addition, certain use cases - such as cursor and multi-plane overlay (MPO) - + * implicitly provide damage clips without any client support via the plane + * bounds. + * + * Today, amdgpu_dm only supports the MPO and cursor usecase. + * + * TODO: Also enable for FB_DAMAGE_CLIPS + */ +static void fill_dc_dirty_rects(struct drm_plane *plane, + struct drm_plane_state *old_plane_state, + struct drm_plane_state *new_plane_state, + struct drm_crtc_state *crtc_state, + struct dc_flip_addrs *flip_addrs) +{ + struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); + struct rect *dirty_rects = flip_addrs->dirty_rects; + uint32_t num_clips; + bool bb_changed; + bool fb_changed; + uint32_t i = 0; + + flip_addrs->dirty_rect_count = 0; + + /* + * Cursor plane has it's own dirty rect update interface. See + * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data + */ + if (plane->type == DRM_PLANE_TYPE_CURSOR) + return; + + /* + * Today, we only consider MPO use-case for PSR SU. If MPO not + * requested, and there is a plane update, do FFU. + */ + if (!dm_crtc_state->mpo_requested) { + dirty_rects[0].x = 0; + dirty_rects[0].y = 0; + dirty_rects[0].width = dm_crtc_state->base.mode.crtc_hdisplay; + dirty_rects[0].height = dm_crtc_state->base.mode.crtc_vdisplay; + flip_addrs->dirty_rect_count = 1; + DRM_DEBUG_DRIVER("[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n", + new_plane_state->plane->base.id, + dm_crtc_state->base.mode.crtc_hdisplay, + dm_crtc_state->base.mode.crtc_vdisplay); + return; + } + + /* + * MPO is requested. Add entire plane bounding box to dirty rects if + * flipped to or damaged. + * + * If plane is moved or resized, also add old bounding box to dirty + * rects. + */ + num_clips = drm_plane_get_damage_clips_count(new_plane_state); + fb_changed = old_plane_state->fb->base.id != + new_plane_state->fb->base.id; + bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x || + old_plane_state->crtc_y != new_plane_state->crtc_y || + old_plane_state->crtc_w != new_plane_state->crtc_w || + old_plane_state->crtc_h != new_plane_state->crtc_h); + + DRM_DEBUG_DRIVER("[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n", + new_plane_state->plane->base.id, + bb_changed, fb_changed, num_clips); + + if (num_clips || fb_changed || bb_changed) { + dirty_rects[i].x = new_plane_state->crtc_x; + dirty_rects[i].y = new_plane_state->crtc_y; + dirty_rects[i].width = new_plane_state->crtc_w; + dirty_rects[i].height = new_plane_state->crtc_h; + DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n", + new_plane_state->plane->base.id, + dirty_rects[i].x, dirty_rects[i].y, + dirty_rects[i].width, dirty_rects[i].height); + i += 1; + } + + /* Add old plane bounding-box if plane is moved or resized */ + if (bb_changed) { + dirty_rects[i].x = old_plane_state->crtc_x; + dirty_rects[i].y = old_plane_state->crtc_y; + dirty_rects[i].width = old_plane_state->crtc_w; + dirty_rects[i].height = old_plane_state->crtc_h; + DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n", + old_plane_state->plane->base.id, + dirty_rects[i].x, dirty_rects[i].y, + dirty_rects[i].width, dirty_rects[i].height); + i += 1; + } + + flip_addrs->dirty_rect_count = i; +} + static void update_stream_scaling_settings(const struct drm_display_mode *mode, const struct dm_connector_state *dm_state, struct dc_stream_state *stream) @@ -6587,7 +6761,7 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc) state->freesync_config = cur->freesync_config; state->cm_has_degamma = cur->cm_has_degamma; state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb; - state->force_dpms_off = cur->force_dpms_off; + state->mpo_requested = cur->mpo_requested; /* TODO Duplicate dc_stream after objects are stream object is flattened */ return &state->base; @@ -6679,7 +6853,7 @@ static void dm_disable_vblank(struct drm_crtc *crtc) dm_set_vblank(crtc, false); } -/* Implemented only the options currently availible for the driver */ +/* Implemented only the options currently available for the driver */ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { .reset = dm_crtc_reset_state, .destroy = amdgpu_dm_crtc_destroy, @@ -6846,15 +7020,12 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector) if (aconnector->mst_mgr.dev) drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr); -#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ - defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) for (i = 0; i < dm->num_of_edps; i++) { if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) { backlight_device_unregister(dm->backlight_dev[i]); dm->backlight_dev[i] = NULL; } } -#endif if (aconnector->dc_em_sink) dc_sink_release(aconnector->dc_em_sink); @@ -7042,7 +7213,11 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, break; } - dc_result = dc_validate_stream(adev->dm.dc, stream); + if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) + dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream); + + if (dc_result == DC_OK) + dc_result = dc_validate_stream(adev->dm.dc, stream); if (dc_result != DC_OK) { DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n", @@ -7342,7 +7517,7 @@ static void dm_encoder_helper_disable(struct drm_encoder *encoder) } -static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth) +int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth) { switch (display_color_depth) { case COLOR_DEPTH_666: @@ -9224,6 +9399,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->surface_updates[planes_count].plane_info = &bundle->plane_infos[planes_count]; + fill_dc_dirty_rects(plane, old_plane_state, new_plane_state, + new_crtc_state, + &bundle->flip_addrs[planes_count]); + /* * Only allow immediate flips for fast updates that don't * change FB pitch, DCC state, rotation or mirroing. @@ -9310,8 +9489,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * and rely on sending it from software. */ if (acrtc_attach->base.state->event && - acrtc_state->active_planes > 0 && - !acrtc_state->force_dpms_off) { + acrtc_state->active_planes > 0) { drm_crtc_vblank_get(pcrtc); spin_lock_irqsave(&pcrtc->dev->event_lock, flags); @@ -9419,6 +9597,18 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, /* Allow PSR when skip count is 0. */ acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count; + + /* + * If sink supports PSR SU, there is no need to rely on + * a vblank event disable request to enable PSR. PSR SU + * can be enabled immediately once OS demonstrates an + * adequate number of fast atomic commits to notify KMD + * of update events. See `vblank_control_worker()`. + */ + if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && + acrtc_attach->dm_irq_params.allow_psr_entry && + !acrtc_state->stream->link->psr_settings.psr_allow_active) + amdgpu_dm_psr_enable(acrtc_state->stream); } else { acrtc_attach->dm_irq_params.allow_psr_entry = false; } @@ -9912,15 +10102,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) /* Update audio instances for each connector. */ amdgpu_dm_commit_audio(dev, state); -#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \ - defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) /* restore the backlight level */ for (i = 0; i < dm->num_of_edps; i++) { if (dm->backlight_dev[i] && (dm->actual_brightness[i] != dm->brightness[i])) amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); } -#endif + /* * send vblank event on all events not handled in flip and * mark consumed event for drm_atomic_helper_commit_hw_done @@ -10368,7 +10556,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, * added MST connectors not found in existing crtc_state in the chained mode * TODO: need to dig out the root cause of that */ - if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port)) + if (!aconnector) goto skip_modeset; if (modereset_required(new_crtc_state)) @@ -10979,7 +11167,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, } } } - pre_validate_dsc(state, &dm_state, vars); + if (!pre_validate_dsc(state, &dm_state, vars)) { + ret = -EINVAL; + goto fail; + } } #endif for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { @@ -11225,6 +11416,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, #if defined(CONFIG_DRM_AMD_DC_DCN) if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) { DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n"); + ret = -EINVAL; goto fail; } |