diff options
author | Dave Airlie <airlied@redhat.com> | 2024-06-27 17:18:49 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2024-06-27 17:18:49 +1000 |
commit | 365aa9f573995b46ca14a24165d85e31160e47b9 (patch) | |
tree | 392d9656fc902e8198ee89efe278c78376478f8a /drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | |
parent | 541b1b0a8fc235bca355921eb7f3f59a8efa3e9a (diff) | |
parent | 1ecef5589320fd56af599b624d59c355d162ac7b (diff) |
Merge tag 'amd-drm-next-6.11-2024-06-22' of https://gitlab.freedesktop.org/agd5f/linux into drm-next
amd-drm-next-6.11-2024-06-22:
amdgpu:
- HPD fixes
- PSR fixes
- DCC updates
- DCN 4.0.1 fixes
- FAMS fixes
- Misc code cleanups
- SR-IOV fixes
- GPUVM TLB flush cleanups
- Make VCN less verbose
- ACPI backlight fixes
- MES fixes
- Firmware loading cleanups
- Replay fixes
- LTTPR fixes
- Trap handler fixes
- Cursor and overlay fixes
- Primary plane zpos fixes
- DML 2.1 fixes
- RAS updates
- USB4 fixes
- MALL fixes
- Reserved VMID fix
- Silence UBSAN warnings
amdkfd:
- Misc code cleanups
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240622152523.2267072-1-alexander.deucher@amd.com
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c')
-rw-r--r-- | drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 630 |
1 files changed, 466 insertions, 164 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index ac18cddeb191..c96407379224 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -77,9 +77,11 @@ #include <linux/types.h> #include <linux/pm_runtime.h> #include <linux/pci.h> +#include <linux/power_supply.h> #include <linux/firmware.h> #include <linux/component.h> #include <linux/dmi.h> +#include <linux/sort.h> #include <drm/display/drm_dp_mst_helper.h> #include <drm/display/drm_hdmi_helper.h> @@ -375,6 +377,20 @@ static inline void reverse_planes_order(struct dc_surface_update *array_of_surfa swap(array_of_surface_update[i], array_of_surface_update[j]); } +/* + * DC will program planes with their z-order determined by their ordering + * in the dc_surface_updates array. This comparator is used to sort them + * by descending zpos. + */ +static int dm_plane_layer_index_cmp(const void *a, const void *b) +{ + const struct dc_surface_update *sa = (struct dc_surface_update *)a; + const struct dc_surface_update *sb = (struct dc_surface_update *)b; + + /* Sort by descending dc_plane layer_index (i.e. normalized_zpos) */ + return sb->surface->layer_index - sa->surface->layer_index; +} + /** * update_planes_and_stream_adapter() - Send planes to be updated in DC * @@ -399,7 +415,8 @@ static inline bool update_planes_and_stream_adapter(struct dc *dc, struct dc_stream_update *stream_update, struct dc_surface_update *array_of_surface_update) { - reverse_planes_order(array_of_surface_update, planes_count); + sort(array_of_surface_update, planes_count, + sizeof(*array_of_surface_update), dm_plane_layer_index_cmp, NULL); /* * Previous frame finished and HW is ready for optimization. @@ -774,9 +791,9 @@ static void dmub_hpd_callback(struct amdgpu_device *adev, aconnector = to_amdgpu_dm_connector(connector); if (link && aconnector->dc_link == link) { if (notify->type == DMUB_NOTIFICATION_HPD) - DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index); - else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) DRM_INFO("DMUB HPD IRQ callback: link_index=%u\n", link_index); + else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) + DRM_INFO("DMUB HPD RX IRQ callback: link_index=%u\n", link_index); else DRM_WARN("DMUB Unknown HPD callback type %d, link_index=%u\n", notify->type, link_index); @@ -788,10 +805,13 @@ static void dmub_hpd_callback(struct amdgpu_device *adev, drm_connector_list_iter_end(&iter); if (hpd_aconnector) { - if (notify->type == DMUB_NOTIFICATION_HPD) + if (notify->type == DMUB_NOTIFICATION_HPD) { + if (hpd_aconnector->dc_link->hpd_status == (notify->hpd_status == DP_HPD_PLUG)) + DRM_WARN("DMUB reported hpd status unchanged. link_index=%u\n", link_index); handle_hpd_irq_helper(hpd_aconnector); - else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) + } else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) { handle_hpd_rx_irq(hpd_aconnector); + } } } @@ -859,7 +879,31 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params) struct dmcub_trace_buf_entry entry = { 0 }; u32 count = 0; struct dmub_hpd_work *dmub_hpd_wrk; - struct dc_link *plink = NULL; + static const char *const event_type[] = { + "NO_DATA", + "AUX_REPLY", + "HPD", + "HPD_IRQ", + "SET_CONFIGC_REPLY", + "DPIA_NOTIFICATION", + }; + + do { + if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) { + trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count, + entry.param0, entry.param1); + + DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n", + entry.trace_code, entry.tick_count, entry.param0, entry.param1); + } else + break; + + count++; + + } while (count <= DMUB_TRACE_MAX_READ); + + if (count > DMUB_TRACE_MAX_READ) + DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ"); if (dc_enable_dmub_notifications(adev->dm.dc) && irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) { @@ -871,7 +915,8 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params) continue; } if (!dm->dmub_callback[notify.type]) { - DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type); + DRM_WARN("DMUB notification skipped due to no handler: type=%s\n", + event_type[notify.type]); continue; } if (dm->dmub_thread_offload[notify.type] == true) { @@ -889,37 +934,12 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params) } INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work); dmub_hpd_wrk->adev = adev; - if (notify.type == DMUB_NOTIFICATION_HPD) { - plink = adev->dm.dc->links[notify.link_index]; - if (plink) { - plink->hpd_status = - notify.hpd_status == DP_HPD_PLUG; - } - } queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work); } else { dm->dmub_callback[notify.type](adev, ¬ify); } } while (notify.pending_notification); } - - - do { - if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) { - trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count, - entry.param0, entry.param1); - - DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n", - entry.trace_code, entry.tick_count, entry.param0, entry.param1); - } else - break; - - count++; - - } while (count <= DMUB_TRACE_MAX_READ); - - if (count > DMUB_TRACE_MAX_READ) - DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ"); } static int dm_set_clockgating_state(void *handle, @@ -957,8 +977,8 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector) list_for_each_entry(mode, &connector->modes, head) { - if (max_size < mode->htotal * mode->vtotal) - max_size = mode->htotal * mode->vtotal; + if (max_size < (unsigned long) mode->htotal * mode->vtotal) + max_size = (unsigned long) mode->htotal * mode->vtotal; } if (max_size) { @@ -2855,7 +2875,8 @@ static int dm_suspend(void *handle) dm->cached_dc_state = dc_state_create_copy(dm->dc->current_state); - dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false); + if (dm->cached_dc_state) + dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false); amdgpu_dm_commit_zero_streams(dm->dc); @@ -3159,7 +3180,7 @@ static int dm_resume(void *handle) * this is the case when traversing through already created end sink * MST connectors, should be skipped */ - if (aconnector && aconnector->mst_root) + if (aconnector->mst_root) continue; mutex_lock(&aconnector->hpd_lock); @@ -4571,6 +4592,7 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector) struct drm_device *drm = aconnector->base.dev; struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm; struct backlight_properties props = { 0 }; + struct amdgpu_dm_backlight_caps caps = { 0 }; char bl_name[16]; if (aconnector->bl_idx == -1) @@ -4583,8 +4605,16 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector) return; } + amdgpu_acpi_get_backlight_caps(&caps); + if (caps.caps_valid) { + if (power_supply_is_system_supplied() > 0) + props.brightness = caps.ac_level; + else + props.brightness = caps.dc_level; + } else + props.brightness = AMDGPU_MAX_BL_LEVEL; + props.max_brightness = AMDGPU_MAX_BL_LEVEL; - props.brightness = AMDGPU_MAX_BL_LEVEL; props.type = BACKLIGHT_RAW; snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d", @@ -6392,13 +6422,13 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, dc_dsc_policy_set_enable_dsc_when_not_needed( aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE); - if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && + if (sink->sink_signal == SIGNAL_TYPE_EDP && !aconnector->dc_link->panel_config.dsc.disable_dsc_edp && dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) { apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override); - } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { + } else if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], dsc_caps, @@ -7073,7 +7103,8 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector) aconnector->dc_sink = aconnector->dc_link->local_sink ? aconnector->dc_link->local_sink : aconnector->dc_em_sink; - dc_sink_retain(aconnector->dc_sink); + if (aconnector->dc_sink) + dc_sink_retain(aconnector->dc_sink); } } @@ -7900,7 +7931,8 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) drm_add_modes_noedid(connector, 1920, 1080); } else { amdgpu_dm_connector_ddc_get_modes(connector, edid); - amdgpu_dm_connector_add_common_modes(encoder, connector); + if (encoder) + amdgpu_dm_connector_add_common_modes(encoder, connector); amdgpu_dm_connector_add_freesync_modes(connector, edid); } amdgpu_dm_fbc_init(connector); @@ -8745,8 +8777,24 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * Disable the cursor first if we're disabling all the planes. * It'll remain on the screen after the planes are re-enabled * if we don't. + * + * If the cursor is transitioning from native to overlay mode, the + * native cursor needs to be disabled first. */ - if (acrtc_state->active_planes == 0) + if (acrtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE && + dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) { + struct dc_cursor_position cursor_position = {0}; + + if (!dc_stream_set_cursor_position(acrtc_state->stream, + &cursor_position)) + drm_err(dev, "DC failed to disable native cursor\n"); + + bundle->stream_update.cursor_position = + &acrtc_state->stream->cursor_position; + } + + if (acrtc_state->active_planes == 0 && + dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) amdgpu_dm_commit_cursors(state); /* update planes when needed */ @@ -8760,7 +8808,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state); /* Cursor plane is handled after stream updates */ - if (plane->type == DRM_PLANE_TYPE_CURSOR) { + if (plane->type == DRM_PLANE_TYPE_CURSOR && + acrtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) { if ((fb && crtc == pcrtc) || (old_plane_state->fb && old_plane_state->crtc == pcrtc)) { cursor_update = true; @@ -9116,7 +9165,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * to be disabling a single plane - those pipes are being disabled. */ if (acrtc_state->active_planes && - (!updated_planes_and_streams || amdgpu_ip_version(dm->adev, DCE_HWIP, 0) == 0)) + (!updated_planes_and_streams || amdgpu_ip_version(dm->adev, DCE_HWIP, 0) == 0) && + acrtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) amdgpu_dm_commit_cursors(state); cleanup: @@ -9744,6 +9794,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) for (j = 0; j < status->plane_count; j++) dummy_updates[j].surface = status->plane_states[0]; + sort(dummy_updates, status->plane_count, + sizeof(*dummy_updates), dm_plane_layer_index_cmp, NULL); mutex_lock(&dm->dc_lock); dc_exit_ips_for_hw_access(dm->dc); @@ -10434,7 +10486,8 @@ static bool should_reset_plane(struct drm_atomic_state *state, { struct drm_plane *other; struct drm_plane_state *old_other_state, *new_other_state; - struct drm_crtc_state *new_crtc_state; + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct dm_crtc_state *old_dm_crtc_state, *new_dm_crtc_state; struct amdgpu_device *adev = drm_to_adev(plane->dev); int i; @@ -10456,14 +10509,38 @@ static bool should_reset_plane(struct drm_atomic_state *state, new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_state->crtc); + old_crtc_state = + drm_atomic_get_old_crtc_state(state, old_plane_state->crtc); if (!new_crtc_state) return true; + /* + * A change in cursor mode means a new dc pipe needs to be acquired or + * released from the state + */ + old_dm_crtc_state = to_dm_crtc_state(old_crtc_state); + new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); + if (plane->type == DRM_PLANE_TYPE_CURSOR && + old_dm_crtc_state != NULL && + old_dm_crtc_state->cursor_mode != new_dm_crtc_state->cursor_mode) { + return true; + } + /* CRTC Degamma changes currently require us to recreate planes. */ if (new_crtc_state->color_mgmt_changed) return true; + /* + * On zpos change, planes need to be reordered by removing and re-adding + * them one by one to the dc state, in order of descending zpos. + * + * TODO: We can likely skip bandwidth validation if the only thing that + * changed about the plane was it'z z-ordering. + */ + if (new_crtc_state->zpos_changed) + return true; + if (drm_atomic_crtc_needs_modeset(new_crtc_state)) return true; @@ -10611,6 +10688,68 @@ static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc, return 0; } +/* + * Helper function for checking the cursor in native mode + */ +static int dm_check_native_cursor_state(struct drm_crtc *new_plane_crtc, + struct drm_plane *plane, + struct drm_plane_state *new_plane_state, + bool enable) +{ + + struct amdgpu_crtc *new_acrtc; + int ret; + + if (!enable || !new_plane_crtc || + drm_atomic_plane_disabling(plane->state, new_plane_state)) + return 0; + + new_acrtc = to_amdgpu_crtc(new_plane_crtc); + + if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) { + DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n"); + return -EINVAL; + } + + if (new_plane_state->fb) { + ret = dm_check_cursor_fb(new_acrtc, new_plane_state, + new_plane_state->fb); + if (ret) + return ret; + } + + return 0; +} + +static bool dm_should_update_native_cursor(struct drm_atomic_state *state, + struct drm_crtc *old_plane_crtc, + struct drm_crtc *new_plane_crtc, + bool enable) +{ + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; + + if (!enable) { + if (old_plane_crtc == NULL) + return true; + + old_crtc_state = drm_atomic_get_old_crtc_state( + state, old_plane_crtc); + dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); + + return dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE; + } else { + if (new_plane_crtc == NULL) + return true; + + new_crtc_state = drm_atomic_get_new_crtc_state( + state, new_plane_crtc); + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + + return dm_new_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE; + } +} + static int dm_update_plane_state(struct dc *dc, struct drm_atomic_state *state, struct drm_plane *plane, @@ -10626,8 +10765,7 @@ static int dm_update_plane_state(struct dc *dc, struct drm_crtc_state *old_crtc_state, *new_crtc_state; struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state; struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state; - struct amdgpu_crtc *new_acrtc; - bool needs_reset; + bool needs_reset, update_native_cursor; int ret = 0; @@ -10636,24 +10774,16 @@ static int dm_update_plane_state(struct dc *dc, dm_new_plane_state = to_dm_plane_state(new_plane_state); dm_old_plane_state = to_dm_plane_state(old_plane_state); - if (plane->type == DRM_PLANE_TYPE_CURSOR) { - if (!enable || !new_plane_crtc || - drm_atomic_plane_disabling(plane->state, new_plane_state)) - return 0; - - new_acrtc = to_amdgpu_crtc(new_plane_crtc); - - if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) { - DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n"); - return -EINVAL; - } + update_native_cursor = dm_should_update_native_cursor(state, + old_plane_crtc, + new_plane_crtc, + enable); - if (new_plane_state->fb) { - ret = dm_check_cursor_fb(new_acrtc, new_plane_state, - new_plane_state->fb); - if (ret) - return ret; - } + if (plane->type == DRM_PLANE_TYPE_CURSOR && update_native_cursor) { + ret = dm_check_native_cursor_state(new_plane_crtc, plane, + new_plane_state, enable); + if (ret) + return ret; return 0; } @@ -10719,20 +10849,14 @@ static int dm_update_plane_state(struct dc *dc, ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state); if (ret) - return ret; + goto out; WARN_ON(dm_new_plane_state->dc_state); dc_new_plane_state = dc_create_plane_state(dc); - if (!dc_new_plane_state) - return -ENOMEM; - - /* Block top most plane from being a video plane */ - if (plane->type == DRM_PLANE_TYPE_OVERLAY) { - if (amdgpu_dm_plane_is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay) - return -EINVAL; - - *is_top_most_overlay = false; + if (!dc_new_plane_state) { + ret = -ENOMEM; + goto out; } DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n", @@ -10745,13 +10869,13 @@ static int dm_update_plane_state(struct dc *dc, new_crtc_state); if (ret) { dc_plane_state_release(dc_new_plane_state); - return ret; + goto out; } ret = dm_atomic_get_state(state, &dm_state); if (ret) { dc_plane_state_release(dc_new_plane_state); - return ret; + goto out; } /* @@ -10768,7 +10892,8 @@ static int dm_update_plane_state(struct dc *dc, dm_state->context)) { dc_plane_state_release(dc_new_plane_state); - return -EINVAL; + ret = -EINVAL; + goto out; } dm_new_plane_state->dc_state = dc_new_plane_state; @@ -10783,6 +10908,16 @@ static int dm_update_plane_state(struct dc *dc, *lock_and_validation_needed = true; } +out: + /* If enabling cursor overlay failed, attempt fallback to native mode */ + if (enable && ret == -EINVAL && plane->type == DRM_PLANE_TYPE_CURSOR) { + ret = dm_check_native_cursor_state(new_plane_crtc, plane, + new_plane_state, enable); + if (ret) + return ret; + + dm_new_crtc_state->cursor_mode = DM_CURSOR_NATIVE_MODE; + } return ret; } @@ -10816,99 +10951,64 @@ dm_get_plane_scale(struct drm_plane_state *plane_state, *out_plane_scale_h = plane_state->crtc_h * 1000 / plane_src_h; } -static int dm_check_crtc_cursor(struct drm_atomic_state *state, - struct drm_crtc *crtc, - struct drm_crtc_state *new_crtc_state) +/* + * The normalized_zpos value cannot be used by this iterator directly. It's only + * calculated for enabled planes, potentially causing normalized_zpos collisions + * between enabled/disabled planes in the atomic state. We need a unique value + * so that the iterator will not generate the same object twice, or loop + * indefinitely. + */ +static inline struct __drm_planes_state *__get_next_zpos( + struct drm_atomic_state *state, + struct __drm_planes_state *prev) { - struct drm_plane *cursor = crtc->cursor, *plane, *underlying; - struct drm_plane_state *old_plane_state, *new_plane_state; - struct drm_plane_state *new_cursor_state, *new_underlying_state; - int i; - int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h; - bool any_relevant_change = false; - - /* On DCE and DCN there is no dedicated hardware cursor plane. We get a - * cursor per pipe but it's going to inherit the scaling and - * positioning from the underlying pipe. Check the cursor plane's - * blending properties match the underlying planes'. - */ - - /* If no plane was enabled or changed scaling, no need to check again */ - for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { - int new_scale_w, new_scale_h, old_scale_w, old_scale_h; - - if (!new_plane_state || !new_plane_state->fb || new_plane_state->crtc != crtc) - continue; - - if (!old_plane_state || !old_plane_state->fb || old_plane_state->crtc != crtc) { - any_relevant_change = true; - break; - } - - if (new_plane_state->fb == old_plane_state->fb && - new_plane_state->crtc_w == old_plane_state->crtc_w && - new_plane_state->crtc_h == old_plane_state->crtc_h) - continue; - - dm_get_plane_scale(new_plane_state, &new_scale_w, &new_scale_h); - dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h); + unsigned int highest_zpos = 0, prev_zpos = 256; + uint32_t highest_id = 0, prev_id = UINT_MAX; + struct drm_plane_state *new_plane_state; + struct drm_plane *plane; + int i, highest_i = -1; - if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) { - any_relevant_change = true; - break; - } + if (prev != NULL) { + prev_zpos = prev->new_state->zpos; + prev_id = prev->ptr->base.id; } - if (!any_relevant_change) - return 0; - - new_cursor_state = drm_atomic_get_plane_state(state, cursor); - if (IS_ERR(new_cursor_state)) - return PTR_ERR(new_cursor_state); - - if (!new_cursor_state->fb) - return 0; - - dm_get_plane_scale(new_cursor_state, &cursor_scale_w, &cursor_scale_h); - - /* Need to check all enabled planes, even if this commit doesn't change - * their state - */ - i = drm_atomic_add_affected_planes(state, crtc); - if (i) - return i; - - for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) { - /* Narrow down to non-cursor planes on the same CRTC as the cursor */ - if (new_underlying_state->crtc != crtc || underlying == crtc->cursor) + for_each_new_plane_in_state(state, plane, new_plane_state, i) { + /* Skip planes with higher zpos than the previously returned */ + if (new_plane_state->zpos > prev_zpos || + (new_plane_state->zpos == prev_zpos && + plane->base.id >= prev_id)) continue; - /* Ignore disabled planes */ - if (!new_underlying_state->fb) - continue; - - dm_get_plane_scale(new_underlying_state, - &underlying_scale_w, &underlying_scale_h); - - if (cursor_scale_w != underlying_scale_w || - cursor_scale_h != underlying_scale_h) { - drm_dbg_atomic(crtc->dev, - "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n", - cursor->base.id, cursor->name, underlying->base.id, underlying->name); - return -EINVAL; + /* Save the index of the plane with highest zpos */ + if (new_plane_state->zpos > highest_zpos || + (new_plane_state->zpos == highest_zpos && + plane->base.id > highest_id)) { + highest_zpos = new_plane_state->zpos; + highest_id = plane->base.id; + highest_i = i; } - - /* If this plane covers the whole CRTC, no need to check planes underneath */ - if (new_underlying_state->crtc_x <= 0 && - new_underlying_state->crtc_y <= 0 && - new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay && - new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay) - break; } - return 0; + if (highest_i < 0) + return NULL; + + return &state->planes[highest_i]; } +/* + * Use the uniqueness of the plane's (zpos, drm obj ID) combination to iterate + * by descending zpos, as read from the new plane state. This is the same + * ordering as defined by drm_atomic_normalize_zpos(). + */ +#define for_each_oldnew_plane_in_descending_zpos(__state, plane, old_plane_state, new_plane_state) \ + for (struct __drm_planes_state *__i = __get_next_zpos((__state), NULL); \ + __i != NULL; __i = __get_next_zpos((__state), __i)) \ + for_each_if(((plane) = __i->ptr, \ + (void)(plane) /* Only to avoid unused-but-set-variable warning */, \ + (old_plane_state) = __i->old_state, \ + (new_plane_state) = __i->new_state, 1)) + static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc) { struct drm_connector *connector; @@ -10940,6 +11040,165 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm } /** + * DOC: Cursor Modes - Native vs Overlay + * + * In native mode, the cursor uses a integrated cursor pipe within each DCN hw + * plane. It does not require a dedicated hw plane to enable, but it is + * subjected to the same z-order and scaling as the hw plane. It also has format + * restrictions, a RGB cursor in native mode cannot be enabled within a non-RGB + * hw plane. + * + * In overlay mode, the cursor uses a separate DCN hw plane, and thus has its + * own scaling and z-pos. It also has no blending restrictions. It lends to a + * cursor behavior more akin to a DRM client's expectations. However, it does + * occupy an extra DCN plane, and therefore will only be used if a DCN plane is + * available. + */ + +/** + * dm_crtc_get_cursor_mode() - Determine the required cursor mode on crtc + * @adev: amdgpu device + * @state: DRM atomic state + * @dm_crtc_state: amdgpu state for the CRTC containing the cursor + * @cursor_mode: Returns the required cursor mode on dm_crtc_state + * + * Get whether the cursor should be enabled in native mode, or overlay mode, on + * the dm_crtc_state. + * + * The cursor should be enabled in overlay mode if there exists an underlying + * plane - on which the cursor may be blended - that is either YUV formatted, or + * scaled differently from the cursor. + * + * Since zpos info is required, drm_atomic_normalize_zpos must be called before + * calling this function. + * + * Return: 0 on success, or an error code if getting the cursor plane state + * failed. + */ +static int dm_crtc_get_cursor_mode(struct amdgpu_device *adev, + struct drm_atomic_state *state, + struct dm_crtc_state *dm_crtc_state, + enum amdgpu_dm_cursor_mode *cursor_mode) +{ + struct drm_plane_state *old_plane_state, *plane_state, *cursor_state; + struct drm_crtc_state *crtc_state = &dm_crtc_state->base; + struct drm_plane *plane; + bool consider_mode_change = false; + bool entire_crtc_covered = false; + bool cursor_changed = false; + int underlying_scale_w, underlying_scale_h; + int cursor_scale_w, cursor_scale_h; + int i; + + /* Overlay cursor not supported on HW before DCN */ + if (amdgpu_ip_version(adev, DCE_HWIP, 0) == 0) { + *cursor_mode = DM_CURSOR_NATIVE_MODE; + return 0; + } + + /* Init cursor_mode to be the same as current */ + *cursor_mode = dm_crtc_state->cursor_mode; + + /* + * Cursor mode can change if a plane's format changes, scale changes, is + * enabled/disabled, or z-order changes. + */ + for_each_oldnew_plane_in_state(state, plane, old_plane_state, plane_state, i) { + int new_scale_w, new_scale_h, old_scale_w, old_scale_h; + + /* Only care about planes on this CRTC */ + if ((drm_plane_mask(plane) & crtc_state->plane_mask) == 0) + continue; + + if (plane->type == DRM_PLANE_TYPE_CURSOR) + cursor_changed = true; + + if (drm_atomic_plane_enabling(old_plane_state, plane_state) || + drm_atomic_plane_disabling(old_plane_state, plane_state) || + old_plane_state->fb->format != plane_state->fb->format) { + consider_mode_change = true; + break; + } + + dm_get_plane_scale(plane_state, &new_scale_w, &new_scale_h); + dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h); + if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) { + consider_mode_change = true; + break; + } + } + + if (!consider_mode_change && !crtc_state->zpos_changed) + return 0; + + /* + * If no cursor change on this CRTC, and not enabled on this CRTC, then + * no need to set cursor mode. This avoids needlessly locking the cursor + * state. + */ + if (!cursor_changed && + !(drm_plane_mask(crtc_state->crtc->cursor) & crtc_state->plane_mask)) { + return 0; + } + + cursor_state = drm_atomic_get_plane_state(state, + crtc_state->crtc->cursor); + if (IS_ERR(cursor_state)) + return PTR_ERR(cursor_state); + + /* Cursor is disabled */ + if (!cursor_state->fb) + return 0; + + /* For all planes in descending z-order (all of which are below cursor + * as per zpos definitions), check their scaling and format + */ + for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, plane_state) { + + /* Only care about non-cursor planes on this CRTC */ + if ((drm_plane_mask(plane) & crtc_state->plane_mask) == 0 || + plane->type == DRM_PLANE_TYPE_CURSOR) + continue; + + /* Underlying plane is YUV format - use overlay cursor */ + if (amdgpu_dm_plane_is_video_format(plane_state->fb->format->format)) { + *cursor_mode = DM_CURSOR_OVERLAY_MODE; + return 0; + } + + dm_get_plane_scale(plane_state, + &underlying_scale_w, &underlying_scale_h); + dm_get_plane_scale(cursor_state, + &cursor_scale_w, &cursor_scale_h); + + /* Underlying plane has different scale - use overlay cursor */ + if (cursor_scale_w != underlying_scale_w && + cursor_scale_h != underlying_scale_h) { + *cursor_mode = DM_CURSOR_OVERLAY_MODE; + return 0; + } + + /* If this plane covers the whole CRTC, no need to check planes underneath */ + if (plane_state->crtc_x <= 0 && plane_state->crtc_y <= 0 && + plane_state->crtc_x + plane_state->crtc_w >= crtc_state->mode.hdisplay && + plane_state->crtc_y + plane_state->crtc_h >= crtc_state->mode.vdisplay) { + entire_crtc_covered = true; + break; + } + } + + /* If planes do not cover the entire CRTC, use overlay mode to enable + * cursor over holes + */ + if (entire_crtc_covered) + *cursor_mode = DM_CURSOR_NATIVE_MODE; + else + *cursor_mode = DM_CURSOR_OVERLAY_MODE; + + return 0; +} + +/** * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM. * * @dev: The DRM device @@ -11108,8 +11367,23 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, goto fail; } + /* + * Determine whether cursors on each CRTC should be enabled in native or + * overlay mode. + */ + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + + ret = dm_crtc_get_cursor_mode(adev, state, dm_new_crtc_state, + &dm_new_crtc_state->cursor_mode); + if (ret) { + drm_dbg(dev, "Failed to determine cursor mode\n"); + goto fail; + } + } + /* Remove exiting planes if they are modified */ - for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { + for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, new_plane_state) { if (old_plane_state->fb && new_plane_state->fb && get_mem_type(old_plane_state->fb) != get_mem_type(new_plane_state->fb)) @@ -11154,7 +11428,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, } /* Add new/modified planes */ - for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { + for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, new_plane_state) { ret = dm_update_plane_state(dc, state, plane, old_plane_state, new_plane_state, @@ -11188,11 +11462,29 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, drm_dbg_atomic(dev, "MPO enablement requested on crtc:[%p]\n", crtc); } - /* Check cursor planes scaling */ + /* Check cursor planes restrictions */ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { - ret = dm_check_crtc_cursor(state, crtc, new_crtc_state); + enum amdgpu_dm_cursor_mode required_cursor_mode; + + /* Overlay cusor not subject to native cursor restrictions */ + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE) + continue; + + /* If HW can only do native cursor, check restrictions again */ + ret = dm_crtc_get_cursor_mode(adev, state, dm_new_crtc_state, + &required_cursor_mode); + if (ret) { - drm_dbg_atomic(dev, "dm_check_crtc_cursor() failed\n"); + drm_dbg_driver(crtc->dev, + "[CRTC:%d:%s] Checking cursor mode failed\n", + crtc->base.id, crtc->name); + goto fail; + } else if (required_cursor_mode == DM_CURSOR_OVERLAY_MODE) { + drm_dbg_driver(crtc->dev, + "[CRTC:%d:%s] Cannot enable native cursor due to scaling or YUV restrictions\n", + crtc->base.id, crtc->name); + ret = -EINVAL; goto fail; } } @@ -11806,6 +12098,12 @@ void amdgpu_dm_trigger_timing_sync(struct drm_device *dev) mutex_unlock(&adev->dm.dc_lock); } +static inline void amdgpu_dm_exit_ips_for_hw_access(struct dc *dc) +{ + if (dc->ctx->dmub_srv && !dc->ctx->dmub_srv->idle_exit_counter) + dc_exit_ips_for_hw_access(dc); +} + void dm_write_reg_func(const struct dc_context *ctx, uint32_t address, u32 value, const char *func_name) { @@ -11816,6 +12114,8 @@ void dm_write_reg_func(const struct dc_context *ctx, uint32_t address, return; } #endif + + amdgpu_dm_exit_ips_for_hw_access(ctx->dc); cgs_write_register(ctx->cgs_device, address, value); trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value); } @@ -11839,6 +12139,8 @@ uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address, return 0; } + amdgpu_dm_exit_ips_for_hw_access(ctx->dc); + value = cgs_read_register(ctx->cgs_device, address); trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value); |