diff options
Diffstat (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm')
15 files changed, 4319 insertions, 1039 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile index 94911871eb9b..9a3b7bf8ab0b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile @@ -31,6 +31,10 @@ ifneq ($(CONFIG_DRM_AMD_DC),) AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o endif +ifdef CONFIG_DRM_AMD_DC_HDCP +AMDGPUDM += amdgpu_dm_hdcp.o +endif + ifneq ($(CONFIG_DEBUG_FS),) AMDGPUDM += amdgpu_dm_crc.o amdgpu_dm_debugfs.o endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 81127f7d6ed1..7aac9568d3be 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -29,6 +29,7 @@ #include "dm_services_types.h" #include "dc.h" #include "dc/inc/core_types.h" +#include "dal_asic_id.h" #include "vid.h" #include "amdgpu.h" @@ -36,6 +37,9 @@ #include "amdgpu_ucode.h" #include "atom.h" #include "amdgpu_dm.h" +#ifdef CONFIG_DRM_AMD_DC_HDCP +#include "amdgpu_dm_hdcp.h" +#endif #include "amdgpu_pm.h" #include "amd_shared.h" @@ -53,18 +57,23 @@ #include <linux/version.h> #include <linux/types.h> #include <linux/pm_runtime.h> +#include <linux/pci.h> #include <linux/firmware.h> +#include <linux/component.h> -#include <drm/drmP.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_uapi.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_dp_mst_helper.h> #include <drm/drm_fb_helper.h> +#include <drm/drm_fourcc.h> #include <drm/drm_edid.h> +#include <drm/drm_vblank.h> +#include <drm/drm_audio_component.h> +#include <drm/drm_hdcp.h> #if defined(CONFIG_DRM_AMD_DC_DCN1_0) -#include "ivsrcid/irqsrcs_dcn_1_0.h" +#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" #include "dcn/dcn_1_0_offset.h" #include "dcn/dcn_1_0_sh_mask.h" @@ -111,7 +120,8 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector); static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, struct drm_plane *plane, - unsigned long possible_crtcs); + unsigned long possible_crtcs, + const struct dc_plane_cap *plane_cap); static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, struct drm_plane *plane, uint32_t link_index); @@ -137,30 +147,12 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, static void handle_cursor_update(struct drm_plane *plane, struct drm_plane_state *old_plane_state); +static void amdgpu_dm_set_psr_caps(struct dc_link *link); +static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream); +static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream); +static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream); -static const enum drm_plane_type dm_plane_type_default[AMDGPU_MAX_PLANES] = { - DRM_PLANE_TYPE_PRIMARY, - DRM_PLANE_TYPE_PRIMARY, - DRM_PLANE_TYPE_PRIMARY, - DRM_PLANE_TYPE_PRIMARY, - DRM_PLANE_TYPE_PRIMARY, - DRM_PLANE_TYPE_PRIMARY, -}; - -static const enum drm_plane_type dm_plane_type_carizzo[AMDGPU_MAX_PLANES] = { - DRM_PLANE_TYPE_PRIMARY, - DRM_PLANE_TYPE_PRIMARY, - DRM_PLANE_TYPE_PRIMARY, - DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */ -}; - -static const enum drm_plane_type dm_plane_type_stoney[AMDGPU_MAX_PLANES] = { - DRM_PLANE_TYPE_PRIMARY, - DRM_PLANE_TYPE_PRIMARY, - DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */ -}; - /* * dm_vblank_get_counter * @@ -275,12 +267,29 @@ get_crtc_by_otg_inst(struct amdgpu_device *adev, return NULL; } +static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state) +{ + return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE || + dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; +} + +/** + * dm_pflip_high_irq() - Handle pageflip interrupt + * @interrupt_params: ignored + * + * Handles the pageflip interrupt by notifying all interested parties + * that the pageflip has been completed. + */ static void dm_pflip_high_irq(void *interrupt_params) { struct amdgpu_crtc *amdgpu_crtc; struct common_irq_params *irq_params = interrupt_params; struct amdgpu_device *adev = irq_params->adev; unsigned long flags; + struct drm_pending_vblank_event *e; + struct dm_crtc_state *acrtc_state; + uint32_t vpos, hpos, v_blank_start, v_blank_end; + bool vrr_active; amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP); @@ -303,46 +312,158 @@ static void dm_pflip_high_irq(void *interrupt_params) return; } - /* Update to correct count(s) if racing with vblank irq */ - amdgpu_crtc->last_flip_vblank = drm_crtc_accurate_vblank_count(&amdgpu_crtc->base); + /* page flip completed. */ + e = amdgpu_crtc->event; + amdgpu_crtc->event = NULL; - /* wake up userspace */ - if (amdgpu_crtc->event) { - drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event); + if (!e) + WARN_ON(1); - /* page flip completed. clean up */ - amdgpu_crtc->event = NULL; + acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state); + vrr_active = amdgpu_dm_vrr_active(acrtc_state); + + /* Fixed refresh rate, or VRR scanout position outside front-porch? */ + if (!vrr_active || + !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start, + &v_blank_end, &hpos, &vpos) || + (vpos < v_blank_start)) { + /* Update to correct count and vblank timestamp if racing with + * vblank irq. This also updates to the correct vblank timestamp + * even in VRR mode, as scanout is past the front-porch atm. + */ + drm_crtc_accurate_vblank_count(&amdgpu_crtc->base); - } else - WARN_ON(1); + /* Wake up userspace by sending the pageflip event with proper + * count and timestamp of vblank of flip completion. + */ + if (e) { + drm_crtc_send_vblank_event(&amdgpu_crtc->base, e); + + /* Event sent, so done with vblank for this flip */ + drm_crtc_vblank_put(&amdgpu_crtc->base); + } + } else if (e) { + /* VRR active and inside front-porch: vblank count and + * timestamp for pageflip event will only be up to date after + * drm_crtc_handle_vblank() has been executed from late vblank + * irq handler after start of back-porch (vline 0). We queue the + * pageflip event for send-out by drm_crtc_handle_vblank() with + * updated timestamp and count, once it runs after us. + * + * We need to open-code this instead of using the helper + * drm_crtc_arm_vblank_event(), as that helper would + * call drm_crtc_accurate_vblank_count(), which we must + * not call in VRR mode while we are in front-porch! + */ + + /* sequence will be replaced by real count during send-out. */ + e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base); + e->pipe = amdgpu_crtc->crtc_id; + + list_add_tail(&e->base.link, &adev->ddev->vblank_event_list); + e = NULL; + } + + /* Keep track of vblank of this flip for flip throttling. We use the + * cooked hw counter, as that one incremented at start of this vblank + * of pageflip completion, so last_flip_vblank is the forbidden count + * for queueing new pageflips if vsync + VRR is enabled. + */ + amdgpu_crtc->last_flip_vblank = amdgpu_get_vblank_counter_kms(adev->ddev, + amdgpu_crtc->crtc_id); amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; spin_unlock_irqrestore(&adev->ddev->event_lock, flags); - DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n", - __func__, amdgpu_crtc->crtc_id, amdgpu_crtc); + DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n", + amdgpu_crtc->crtc_id, amdgpu_crtc, + vrr_active, (int) !e); +} + +static void dm_vupdate_high_irq(void *interrupt_params) +{ + struct common_irq_params *irq_params = interrupt_params; + struct amdgpu_device *adev = irq_params->adev; + struct amdgpu_crtc *acrtc; + struct dm_crtc_state *acrtc_state; + unsigned long flags; + + acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE); - drm_crtc_vblank_put(&amdgpu_crtc->base); + if (acrtc) { + acrtc_state = to_dm_crtc_state(acrtc->base.state); + + DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id, + amdgpu_dm_vrr_active(acrtc_state)); + + /* Core vblank handling is done here after end of front-porch in + * vrr mode, as vblank timestamping will give valid results + * while now done after front-porch. This will also deliver + * page-flip completion events that have been queued to us + * if a pageflip happened inside front-porch. + */ + if (amdgpu_dm_vrr_active(acrtc_state)) { + drm_crtc_handle_vblank(&acrtc->base); + + /* BTR processing for pre-DCE12 ASICs */ + if (acrtc_state->stream && + adev->family < AMDGPU_FAMILY_AI) { + spin_lock_irqsave(&adev->ddev->event_lock, flags); + mod_freesync_handle_v_update( + adev->dm.freesync_module, + acrtc_state->stream, + &acrtc_state->vrr_params); + + dc_stream_adjust_vmin_vmax( + adev->dm.dc, + acrtc_state->stream, + &acrtc_state->vrr_params.adjust); + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + } + } + } } +/** + * dm_crtc_high_irq() - Handles CRTC interrupt + * @interrupt_params: ignored + * + * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK + * event handler. + */ static void dm_crtc_high_irq(void *interrupt_params) { struct common_irq_params *irq_params = interrupt_params; struct amdgpu_device *adev = irq_params->adev; struct amdgpu_crtc *acrtc; struct dm_crtc_state *acrtc_state; + unsigned long flags; acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK); if (acrtc) { - drm_crtc_handle_vblank(&acrtc->base); - amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); - acrtc_state = to_dm_crtc_state(acrtc->base.state); - if (acrtc_state->stream && + DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id, + amdgpu_dm_vrr_active(acrtc_state)); + + /* Core vblank handling at start of front-porch is only possible + * in non-vrr mode, as only there vblank timestamping will give + * valid results while done in front-porch. Otherwise defer it + * to dm_vupdate_high_irq after end of front-porch. + */ + if (!amdgpu_dm_vrr_active(acrtc_state)) + drm_crtc_handle_vblank(&acrtc->base); + + /* Following stuff must happen at start of vblank, for crc + * computation and below-the-range btr support in vrr mode. + */ + amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); + + if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI && acrtc_state->vrr_params.supported && acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) { + spin_lock_irqsave(&adev->ddev->event_lock, flags); mod_freesync_handle_v_update( adev->dm.freesync_module, acrtc_state->stream, @@ -352,6 +473,7 @@ static void dm_crtc_high_irq(void *interrupt_params) adev->dm.dc, acrtc_state->stream, &acrtc_state->vrr_params.adjust); + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); } } } @@ -412,16 +534,157 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector) } +static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port, + int pipe, bool *enabled, + unsigned char *buf, int max_bytes) +{ + struct drm_device *dev = dev_get_drvdata(kdev); + struct amdgpu_device *adev = dev->dev_private; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + struct amdgpu_dm_connector *aconnector; + int ret = 0; + + *enabled = false; + + mutex_lock(&adev->dm.audio_lock); + + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + aconnector = to_amdgpu_dm_connector(connector); + if (aconnector->audio_inst != port) + continue; + + *enabled = true; + ret = drm_eld_size(connector->eld); + memcpy(buf, connector->eld, min(max_bytes, ret)); + + break; + } + drm_connector_list_iter_end(&conn_iter); + + mutex_unlock(&adev->dm.audio_lock); + + DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled); + + return ret; +} + +static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = { + .get_eld = amdgpu_dm_audio_component_get_eld, +}; + +static int amdgpu_dm_audio_component_bind(struct device *kdev, + struct device *hda_kdev, void *data) +{ + struct drm_device *dev = dev_get_drvdata(kdev); + struct amdgpu_device *adev = dev->dev_private; + struct drm_audio_component *acomp = data; + + acomp->ops = &amdgpu_dm_audio_component_ops; + acomp->dev = kdev; + adev->dm.audio_component = acomp; + + return 0; +} + +static void amdgpu_dm_audio_component_unbind(struct device *kdev, + struct device *hda_kdev, void *data) +{ + struct drm_device *dev = dev_get_drvdata(kdev); + struct amdgpu_device *adev = dev->dev_private; + struct drm_audio_component *acomp = data; + + acomp->ops = NULL; + acomp->dev = NULL; + adev->dm.audio_component = NULL; +} + +static const struct component_ops amdgpu_dm_audio_component_bind_ops = { + .bind = amdgpu_dm_audio_component_bind, + .unbind = amdgpu_dm_audio_component_unbind, +}; + +static int amdgpu_dm_audio_init(struct amdgpu_device *adev) +{ + int i, ret; + + if (!amdgpu_audio) + return 0; + + adev->mode_info.audio.enabled = true; + + adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count; + + for (i = 0; i < adev->mode_info.audio.num_pins; i++) { + adev->mode_info.audio.pin[i].channels = -1; + adev->mode_info.audio.pin[i].rate = -1; + adev->mode_info.audio.pin[i].bits_per_sample = -1; + adev->mode_info.audio.pin[i].status_bits = 0; + adev->mode_info.audio.pin[i].category_code = 0; + adev->mode_info.audio.pin[i].connected = false; + adev->mode_info.audio.pin[i].id = + adev->dm.dc->res_pool->audios[i]->inst; + adev->mode_info.audio.pin[i].offset = 0; + } + + ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops); + if (ret < 0) + return ret; + + adev->dm.audio_registered = true; + + return 0; +} + +static void amdgpu_dm_audio_fini(struct amdgpu_device *adev) +{ + if (!amdgpu_audio) + return; + + if (!adev->mode_info.audio.enabled) + return; + + if (adev->dm.audio_registered) { + component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops); + adev->dm.audio_registered = false; + } + + /* TODO: Disable audio? */ + + adev->mode_info.audio.enabled = false; +} + +void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin) +{ + struct drm_audio_component *acomp = adev->dm.audio_component; + + if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) { + DRM_DEBUG_KMS("Notify ELD: %d\n", pin); + + acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, + pin, -1); + } +} + static int amdgpu_dm_init(struct amdgpu_device *adev) { struct dc_init_data init_data; +#ifdef CONFIG_DRM_AMD_DC_HDCP + struct dc_callback_init init_params; +#endif + adev->dm.ddev = adev->ddev; adev->dm.adev = adev; /* Zero all the fields */ memset(&init_data, 0, sizeof(init_data)); +#ifdef CONFIG_DRM_AMD_DC_HDCP + memset(&init_params, 0, sizeof(init_params)); +#endif mutex_init(&adev->dm.dc_lock); + mutex_init(&adev->dm.audio_lock); if(amdgpu_dm_irq_init(adev)) { DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n"); @@ -462,6 +725,18 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_dc_feature_mask & DC_FBC_MASK) init_data.flags.fbc_support = true; + if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK) + init_data.flags.multi_mon_pp_mclk_switch = true; + + if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK) + init_data.flags.disable_fractional_pwm = true; + + init_data.flags.power_down_display_on_boot = true; + +#ifdef CONFIG_DRM_AMD_DC_DCN2_0 + init_data.soc_bounding_box = adev->dm.soc_bounding_box; +#endif + /* Display Core create. */ adev->dm.dc = dc_create(&init_data); @@ -472,6 +747,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) goto error; } + dc_hardware_init(adev->dm.dc); + adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); if (!adev->dm.freesync_module) { DRM_ERROR( @@ -482,6 +759,18 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) amdgpu_dm_init_color_mod(); +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (adev->asic_type >= CHIP_RAVEN) { + adev->dm.hdcp_workqueue = hdcp_create_workqueue(&adev->psp, &init_params.cp_psp, adev->dm.dc); + + if (!adev->dm.hdcp_workqueue) + DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n"); + else + DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue); + + dc_init_callbacks(adev->dm.dc, &init_params); + } +#endif if (amdgpu_dm_initialize_drm_device(adev)) { DRM_ERROR( "amdgpu: failed to initialize sw for display support.\n"); @@ -519,7 +808,23 @@ error: static void amdgpu_dm_fini(struct amdgpu_device *adev) { + amdgpu_dm_audio_fini(adev); + amdgpu_dm_destroy_drm_device(&adev->dm); + +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (adev->dm.hdcp_workqueue) { + hdcp_destroy(adev->dm.hdcp_workqueue); + adev->dm.hdcp_workqueue = NULL; + } + + if (adev->dm.dc) + dc_deinit_callbacks(adev->dm.dc); +#endif + + /* DC Destroy TODO: Replace destroy DAL */ + if (adev->dm.dc) + dc_destroy(&adev->dm.dc); /* * TODO: pageflip, vlank interrupt * @@ -534,10 +839,8 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) mod_freesync_destroy(adev->dm.freesync_module); adev->dm.freesync_module = NULL; } - /* DC Destroy TODO: Replace destroy DAL */ - if (adev->dm.dc) - dc_destroy(&adev->dm.dc); + mutex_destroy(&adev->dm.audio_lock); mutex_destroy(&adev->dm.dc_lock); return; @@ -545,7 +848,7 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) static int load_dmcu_fw(struct amdgpu_device *adev) { - const char *fw_name_dmcu; + const char *fw_name_dmcu = NULL; int r; const struct dmcu_firmware_header_v1_0 *hdr; @@ -566,9 +869,18 @@ static int load_dmcu_fw(struct amdgpu_device *adev) case CHIP_VEGA10: case CHIP_VEGA12: case CHIP_VEGA20: + case CHIP_NAVI10: + case CHIP_NAVI14: + case CHIP_NAVI12: + case CHIP_RENOIR: return 0; case CHIP_RAVEN: - fw_name_dmcu = FIRMWARE_RAVEN_DMCU; + if (ASICREV_IS_PICASSO(adev->external_rev_id)) + fw_name_dmcu = FIRMWARE_RAVEN_DMCU; + else if (ASICREV_IS_RAVEN2(adev->external_rev_id)) + fw_name_dmcu = FIRMWARE_RAVEN_DMCU; + else + return 0; break; default: DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type); @@ -643,27 +955,29 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev) { struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; + struct drm_connector_list_iter iter; int ret = 0; - drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { aconnector = to_amdgpu_dm_connector(connector); if (aconnector->dc_link->type == dc_connection_mst_branch && aconnector->mst_mgr.aux) { DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n", - aconnector, aconnector->base.base.id); + aconnector, + aconnector->base.base.id); ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); if (ret < 0) { DRM_ERROR("DM_MST: Failed to start MST\n"); - ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single; - return ret; - } + aconnector->dc_link->type = + dc_connection_single; + break; } + } } + drm_connector_list_iter_end(&iter); - drm_modeset_unlock(&dev->mode_config.connection_mutex); return ret; } @@ -675,7 +989,7 @@ static int dm_late_init(void *handle) unsigned int linear_lut[16]; int i; struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu; - bool ret; + bool ret = false; for (i = 0; i < 16; i++) linear_lut[i] = 0xFFFF * i / 15; @@ -686,10 +1000,18 @@ static int dm_late_init(void *handle) params.backlight_lut_array_size = 16; params.backlight_lut_array = linear_lut; - ret = dmcu_load_iram(dmcu, params); + /* Min backlight level after ABM reduction, Don't allow below 1% + * 0xFFFF x 0.01 = 0x28F + */ + params.min_abm_backlight = 0x28F; - if (!ret) - return -EINVAL; + /* todo will enable for navi10 */ + if (adev->asic_type <= CHIP_RAVEN) { + ret = dmcu_load_iram(dmcu, params); + + if (!ret) + return -EINVAL; + } return detect_mst_link_for_all_connectors(adev->ddev); } @@ -698,14 +1020,13 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) { struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct drm_dp_mst_topology_mgr *mgr; int ret; bool need_hotplug = false; - drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - - list_for_each_entry(connector, &dev->mode_config.connector_list, - head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { aconnector = to_amdgpu_dm_connector(connector); if (aconnector->dc_link->type != dc_connection_mst_branch || aconnector->mst_port) @@ -716,15 +1037,14 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) if (suspend) { drm_dp_mst_topology_mgr_suspend(mgr); } else { - ret = drm_dp_mst_topology_mgr_resume(mgr); + ret = drm_dp_mst_topology_mgr_resume(mgr, true); if (ret < 0) { drm_dp_mst_topology_mgr_set_mst(mgr, false); need_hotplug = true; } } } - - drm_modeset_unlock(&dev->mode_config.connection_mutex); + drm_connector_list_iter_end(&iter); if (need_hotplug) drm_kms_helper_hotplug_event(dev); @@ -732,7 +1052,7 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) /** * dm_hw_init() - Initialize DC device - * @handle: The base driver device containing the amdpgu_dm device. + * @handle: The base driver device containing the amdgpu_dm device. * * Initialize the &struct amdgpu_display_manager device. This involves calling * the initializers of each DM component, then populating the struct with them. @@ -762,7 +1082,7 @@ static int dm_hw_init(void *handle) /** * dm_hw_fini() - Teardown DC device - * @handle: The base driver device containing the amdpgu_dm device. + * @handle: The base driver device containing the amdgpu_dm device. * * Teardown components within &struct amdgpu_display_manager that require * cleanup. This involves cleaning up the DRM device, DC, and any modules that @@ -906,32 +1226,41 @@ static int dm_resume(void *handle) struct amdgpu_display_manager *dm = &adev->dm; struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct drm_crtc *crtc; struct drm_crtc_state *new_crtc_state; struct dm_crtc_state *dm_new_crtc_state; struct drm_plane *plane; struct drm_plane_state *new_plane_state; struct dm_plane_state *dm_new_plane_state; + struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state); enum dc_connection_type new_connection_type = dc_connection_none; int i; + /* Recreate dc_state - DC invalidates it when setting power state to S3. */ + dc_release_state(dm_state->context); + dm_state->context = dc_create_state(dm->dc); + /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ + dc_resource_state_construct(dm->dc, dm_state->context); + /* power on hardware */ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); /* program HPD filter */ dc_resume(dm->dc); - /* On resume we need to rewrite the MSTM control bits to enamble MST*/ - s3_handle_mst(ddev, false); - /* * early enable HPD Rx IRQ, should be done before set mode as short * pulse interrupts are used for MST */ amdgpu_dm_irq_resume_early(adev); + /* On resume we need to rewrite the MSTM control bits to enable MST*/ + s3_handle_mst(ddev, false); + /* Do detection*/ - list_for_each_entry(connector, &ddev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(ddev, &iter); + drm_for_each_connector_iter(connector, &iter) { aconnector = to_amdgpu_dm_connector(connector); /* @@ -959,6 +1288,7 @@ static int dm_resume(void *handle) amdgpu_dm_update_connector_after_detect(aconnector); mutex_unlock(&aconnector->hpd_lock); } + drm_connector_list_iter_end(&iter); /* Force mode set in atomic commit */ for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) @@ -1174,6 +1504,11 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; aconnector->edid = NULL; +#ifdef CONFIG_DRM_AMD_DC_HDCP + /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */ + if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) + connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; +#endif } mutex_unlock(&dev->mode_config.mutex); @@ -1188,6 +1523,9 @@ static void handle_hpd_irq(void *param) struct drm_connector *connector = &aconnector->base; struct drm_device *dev = connector->dev; enum dc_connection_type new_connection_type = dc_connection_none; +#ifdef CONFIG_DRM_AMD_DC_HDCP + struct amdgpu_device *adev = dev->dev_private; +#endif /* * In case of failure or MST no need to update connector status or notify the OS @@ -1195,6 +1533,10 @@ static void handle_hpd_irq(void *param) */ mutex_lock(&aconnector->hpd_lock); +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (adev->asic_type >= CHIP_RAVEN) + hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); +#endif if (aconnector->fake_enable) aconnector->fake_enable = false; @@ -1313,6 +1655,12 @@ static void handle_hpd_rx_irq(void *param) struct dc_link *dc_link = aconnector->dc_link; bool is_mst_root_connector = aconnector->mst_mgr.mst_state; enum dc_connection_type new_connection_type = dc_connection_none; +#ifdef CONFIG_DRM_AMD_DC_HDCP + union hpd_irq_data hpd_irq_data; + struct amdgpu_device *adev = dev->dev_private; + + memset(&hpd_irq_data, 0, sizeof(hpd_irq_data)); +#endif /* * TODO:Temporary add mutex to protect hpd interrupt not have a gpio @@ -1322,7 +1670,12 @@ static void handle_hpd_rx_irq(void *param) if (dc_link->type != dc_connection_mst_branch) mutex_lock(&aconnector->hpd_lock); + +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) && +#else if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) && +#endif !is_mst_root_connector) { /* Downstream Port status changed. */ if (!dc_link_detect_sink(dc_link, &new_connection_type)) @@ -1357,6 +1710,10 @@ static void handle_hpd_rx_irq(void *param) drm_kms_helper_hotplug_event(dev); } } +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) + hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index); +#endif if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) || (dc_link->type == dc_connection_mst_branch)) dm_handle_hpd_rx_irq(aconnector); @@ -1416,10 +1773,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) int i; unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY; - if (adev->asic_type == CHIP_VEGA10 || - adev->asic_type == CHIP_VEGA12 || - adev->asic_type == CHIP_VEGA20 || - adev->asic_type == CHIP_RAVEN) + if (adev->asic_type >= CHIP_VEGA10) client_id = SOC15_IH_CLIENTID_DCE; int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; @@ -1457,6 +1811,27 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) dm_crtc_high_irq, c_irq_params); } + /* Use VUPDATE interrupt */ + for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) { + r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq); + if (r) { + DRM_ERROR("Failed to add vupdate irq id!\n"); + return r; + } + + int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; + int_params.irq_source = + dc_interrupt_to_irq_source(dc, i, 0); + + c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; + + c_irq_params->adev = adev; + c_irq_params->irq_src = int_params.irq_source; + + amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_vupdate_high_irq, c_irq_params); + } + /* Use GRPH_PFLIP interrupt */ for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { @@ -1542,6 +1917,34 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) dm_crtc_high_irq, c_irq_params); } + /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to + * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx + * to trigger at end of each vblank, regardless of state of the lock, + * matching DCE behaviour. + */ + for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT; + i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1; + i++) { + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq); + + if (r) { + DRM_ERROR("Failed to add vupdate irq id!\n"); + return r; + } + + int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; + int_params.irq_source = + dc_interrupt_to_irq_source(dc, i, 0); + + c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; + + c_irq_params->adev = adev; + c_irq_params->irq_src = int_params.irq_source; + + amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_vupdate_high_irq, c_irq_params); + } + /* Use GRPH_PFLIP interrupt */ for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT; i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1; @@ -1593,15 +1996,10 @@ static int dm_atomic_get_state(struct drm_atomic_state *state, struct amdgpu_device *adev = dev->dev_private; struct amdgpu_display_manager *dm = &adev->dm; struct drm_private_state *priv_state; - int ret; if (*dm_state) return 0; - ret = drm_modeset_lock(&dm->atomic_obj_lock, state->acquire_ctx); - if (ret) - return ret; - priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj); if (IS_ERR(priv_state)) return PTR_ERR(priv_state); @@ -1658,17 +2056,16 @@ dm_atomic_duplicate_state(struct drm_private_obj *obj) __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base); - new_state->context = dc_create_state(); + old_state = to_dm_atomic_state(obj->state); + + if (old_state && old_state->context) + new_state->context = dc_copy_state(old_state->context); + if (!new_state->context) { kfree(new_state); return NULL; } - old_state = to_dm_atomic_state(obj->state); - if (old_state && old_state->context) - dc_resource_state_copy_construct(old_state->context, - new_state->context); - return &new_state->base; } @@ -1708,13 +2105,11 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) adev->ddev->mode_config.fb_base = adev->gmc.aper_base; - drm_modeset_lock_init(&adev->dm.atomic_obj_lock); - state = kzalloc(sizeof(*state), GFP_KERNEL); if (!state) return -ENOMEM; - state->context = dc_create_state(); + state->context = dc_create_state(adev->dm.dc); if (!state->context) { kfree(state); return -ENOMEM; @@ -1731,6 +2126,10 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) if (r) return r; + r = amdgpu_dm_audio_init(adev); + if (r) + return r; + return 0; } @@ -1807,6 +2206,7 @@ static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd) } static const struct backlight_ops amdgpu_dm_backlight_ops = { + .options = BL_CORE_SUSPENDRESUME, .get_brightness = amdgpu_dm_backlight_get_brightness, .update_status = amdgpu_dm_backlight_update_status, }; @@ -1841,39 +2241,42 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm) #endif static int initialize_plane(struct amdgpu_display_manager *dm, - struct amdgpu_mode_info *mode_info, - int plane_id) + struct amdgpu_mode_info *mode_info, int plane_id, + enum drm_plane_type plane_type, + const struct dc_plane_cap *plane_cap) { struct drm_plane *plane; unsigned long possible_crtcs; int ret = 0; plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL); - mode_info->planes[plane_id] = plane; - if (!plane) { DRM_ERROR("KMS: Failed to allocate plane\n"); return -ENOMEM; } - plane->type = mode_info->plane_type[plane_id]; + plane->type = plane_type; /* - * HACK: IGT tests expect that each plane can only have - * one possible CRTC. For now, set one CRTC for each - * plane that is not an underlay, but still allow multiple - * CRTCs for underlay planes. + * HACK: IGT tests expect that the primary plane for a CRTC + * can only have one possible CRTC. Only expose support for + * any CRTC if they're not going to be used as a primary plane + * for a CRTC - like overlay or underlay planes. */ possible_crtcs = 1 << plane_id; if (plane_id >= dm->dc->caps.max_streams) possible_crtcs = 0xff; - ret = amdgpu_dm_plane_init(dm, mode_info->planes[plane_id], possible_crtcs); + ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap); if (ret) { DRM_ERROR("KMS: Failed to initialize plane\n"); + kfree(plane); return ret; } + if (mode_info) + mode_info->planes[plane_id] = plane; + return ret; } @@ -1916,8 +2319,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) struct amdgpu_encoder *aencoder = NULL; struct amdgpu_mode_info *mode_info = &adev->mode_info; uint32_t link_cnt; - int32_t total_overlay_planes, total_primary_planes; + int32_t primary_planes; enum dc_connection_type new_connection_type = dc_connection_none; + const struct dc_plane_cap *plane; link_cnt = dm->dc->caps.max_links; if (amdgpu_dm_mode_config_init(dm->adev)) { @@ -1925,24 +2329,53 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) return -EINVAL; } - /* Identify the number of planes to be initialized */ - total_overlay_planes = dm->dc->caps.max_slave_planes; - total_primary_planes = dm->dc->caps.max_planes - dm->dc->caps.max_slave_planes; + /* There is one primary plane per CRTC */ + primary_planes = dm->dc->caps.max_streams; + ASSERT(primary_planes <= AMDGPU_MAX_PLANES); - /* First initialize overlay planes, index starting after primary planes */ - for (i = (total_overlay_planes - 1); i >= 0; i--) { - if (initialize_plane(dm, mode_info, (total_primary_planes + i))) { - DRM_ERROR("KMS: Failed to initialize overlay plane\n"); + /* + * Initialize primary planes, implicit planes for legacy IOCTLS. + * Order is reversed to match iteration order in atomic check. + */ + for (i = (primary_planes - 1); i >= 0; i--) { + plane = &dm->dc->caps.planes[i]; + + if (initialize_plane(dm, mode_info, i, + DRM_PLANE_TYPE_PRIMARY, plane)) { + DRM_ERROR("KMS: Failed to initialize primary plane\n"); goto fail; } } - /* Initialize primary planes */ - for (i = (total_primary_planes - 1); i >= 0; i--) { - if (initialize_plane(dm, mode_info, i)) { - DRM_ERROR("KMS: Failed to initialize primary plane\n"); + /* + * Initialize overlay planes, index starting after primary planes. + * These planes have a higher DRM index than the primary planes since + * they should be considered as having a higher z-order. + * Order is reversed to match iteration order in atomic check. + * + * Only support DCN for now, and only expose one so we don't encourage + * userspace to use up all the pipes. + */ + for (i = 0; i < dm->dc->caps.max_planes; ++i) { + struct dc_plane_cap *plane = &dm->dc->caps.planes[i]; + + if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL) + continue; + + if (!plane->blends_with_above || !plane->blends_with_below) + continue; + + if (!plane->pixel_format_support.argb8888) + continue; + + if (initialize_plane(dm, NULL, primary_planes + i, + DRM_PLANE_TYPE_OVERLAY, plane)) { + DRM_ERROR("KMS: Failed to initialize overlay plane\n"); goto fail; } + + /* Only create one overlay plane. */ + break; } for (i = 0; i < dm->dc->caps.max_streams; i++) @@ -1994,6 +2427,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) } else if (dc_link_detect(link, DETECT_REASON_BOOT)) { amdgpu_dm_update_connector_after_detect(aconnector); register_backlight_device(dm, link); + if (amdgpu_dc_feature_mask & DC_PSR_MASK) + amdgpu_dm_set_psr_caps(link); } @@ -2024,6 +2459,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) break; #if defined(CONFIG_DRM_AMD_DC_DCN1_0) case CHIP_RAVEN: +#if defined(CONFIG_DRM_AMD_DC_DCN2_0) + case CHIP_NAVI12: + case CHIP_NAVI10: + case CHIP_NAVI14: +#endif +#if defined(CONFIG_DRM_AMD_DC_DCN2_1) + case CHIP_RENOIR: +#endif if (dcn10_register_irq_handlers(dm->adev)) { DRM_ERROR("DM: Failed to initialize IRQ\n"); goto fail; @@ -2042,8 +2485,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) fail: kfree(aencoder); kfree(aconnector); - for (i = 0; i < dm->dc->caps.max_planes; i++) - kfree(mode_info->planes[i]); + return -EINVAL; } @@ -2093,8 +2535,7 @@ static ssize_t s3_debug_store(struct device *device, { int ret; int s3_state; - struct pci_dev *pdev = to_pci_dev(device); - struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct drm_device *drm_dev = dev_get_drvdata(device); struct amdgpu_device *adev = drm_dev->dev_private; ret = kstrtoint(buf, 0, &s3_state); @@ -2124,53 +2565,45 @@ static int dm_early_init(void *handle) adev->mode_info.num_crtc = 6; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 6; - adev->mode_info.plane_type = dm_plane_type_default; break; case CHIP_KAVERI: adev->mode_info.num_crtc = 4; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 7; - adev->mode_info.plane_type = dm_plane_type_default; break; case CHIP_KABINI: case CHIP_MULLINS: adev->mode_info.num_crtc = 2; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 6; - adev->mode_info.plane_type = dm_plane_type_default; break; case CHIP_FIJI: case CHIP_TONGA: adev->mode_info.num_crtc = 6; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 7; - adev->mode_info.plane_type = dm_plane_type_default; break; case CHIP_CARRIZO: adev->mode_info.num_crtc = 3; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 9; - adev->mode_info.plane_type = dm_plane_type_carizzo; break; case CHIP_STONEY: adev->mode_info.num_crtc = 2; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 9; - adev->mode_info.plane_type = dm_plane_type_stoney; break; case CHIP_POLARIS11: case CHIP_POLARIS12: adev->mode_info.num_crtc = 5; adev->mode_info.num_hpd = 5; adev->mode_info.num_dig = 5; - adev->mode_info.plane_type = dm_plane_type_default; break; case CHIP_POLARIS10: case CHIP_VEGAM: adev->mode_info.num_crtc = 6; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 6; - adev->mode_info.plane_type = dm_plane_type_default; break; case CHIP_VEGA10: case CHIP_VEGA12: @@ -2178,14 +2611,32 @@ static int dm_early_init(void *handle) adev->mode_info.num_crtc = 6; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 6; - adev->mode_info.plane_type = dm_plane_type_default; break; #if defined(CONFIG_DRM_AMD_DC_DCN1_0) case CHIP_RAVEN: adev->mode_info.num_crtc = 4; adev->mode_info.num_hpd = 4; adev->mode_info.num_dig = 4; - adev->mode_info.plane_type = dm_plane_type_default; + break; +#endif +#if defined(CONFIG_DRM_AMD_DC_DCN2_0) + case CHIP_NAVI10: + case CHIP_NAVI12: + adev->mode_info.num_crtc = 6; + adev->mode_info.num_hpd = 6; + adev->mode_info.num_dig = 6; + break; + case CHIP_NAVI14: + adev->mode_info.num_crtc = 5; + adev->mode_info.num_hpd = 5; + adev->mode_info.num_dig = 5; + break; +#endif +#if defined(CONFIG_DRM_AMD_DC_DCN2_1) + case CHIP_RENOIR: + adev->mode_info.num_crtc = 4; + adev->mode_info.num_hpd = 4; + adev->mode_info.num_dig = 4; break; #endif default: @@ -2243,56 +2694,63 @@ static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { .destroy = amdgpu_dm_encoder_destroy, }; -static bool fill_rects_from_plane_state(const struct drm_plane_state *state, - struct dc_plane_state *plane_state) + +static int fill_dc_scaling_info(const struct drm_plane_state *state, + struct dc_scaling_info *scaling_info) { - plane_state->src_rect.x = state->src_x >> 16; - plane_state->src_rect.y = state->src_y >> 16; - /* we ignore the mantissa for now and do not deal with floating pixels :( */ - plane_state->src_rect.width = state->src_w >> 16; + int scale_w, scale_h; - if (plane_state->src_rect.width == 0) - return false; + memset(scaling_info, 0, sizeof(*scaling_info)); - plane_state->src_rect.height = state->src_h >> 16; - if (plane_state->src_rect.height == 0) - return false; + /* Source is fixed 16.16 but we ignore mantissa for now... */ + scaling_info->src_rect.x = state->src_x >> 16; + scaling_info->src_rect.y = state->src_y >> 16; - plane_state->dst_rect.x = state->crtc_x; - plane_state->dst_rect.y = state->crtc_y; + scaling_info->src_rect.width = state->src_w >> 16; + if (scaling_info->src_rect.width == 0) + return -EINVAL; + + scaling_info->src_rect.height = state->src_h >> 16; + if (scaling_info->src_rect.height == 0) + return -EINVAL; + + scaling_info->dst_rect.x = state->crtc_x; + scaling_info->dst_rect.y = state->crtc_y; if (state->crtc_w == 0) - return false; + return -EINVAL; - plane_state->dst_rect.width = state->crtc_w; + scaling_info->dst_rect.width = state->crtc_w; if (state->crtc_h == 0) - return false; + return -EINVAL; - plane_state->dst_rect.height = state->crtc_h; + scaling_info->dst_rect.height = state->crtc_h; - plane_state->clip_rect = plane_state->dst_rect; + /* DRM doesn't specify clipping on destination output. */ + scaling_info->clip_rect = scaling_info->dst_rect; - switch (state->rotation & DRM_MODE_ROTATE_MASK) { - case DRM_MODE_ROTATE_0: - plane_state->rotation = ROTATION_ANGLE_0; - break; - case DRM_MODE_ROTATE_90: - plane_state->rotation = ROTATION_ANGLE_90; - break; - case DRM_MODE_ROTATE_180: - plane_state->rotation = ROTATION_ANGLE_180; - break; - case DRM_MODE_ROTATE_270: - plane_state->rotation = ROTATION_ANGLE_270; - break; - default: - plane_state->rotation = ROTATION_ANGLE_0; - break; - } + /* TODO: Validate scaling per-format with DC plane caps */ + scale_w = scaling_info->dst_rect.width * 1000 / + scaling_info->src_rect.width; - return true; + if (scale_w < 250 || scale_w > 16000) + return -EINVAL; + + scale_h = scaling_info->dst_rect.height * 1000 / + scaling_info->src_rect.height; + + if (scale_h < 250 || scale_h > 16000) + return -EINVAL; + + /* + * The "scaling_quality" can be ignored for now, quality = 0 has DC + * assume reasonable defaults based on the format. + */ + + return 0; } + static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb, uint64_t *tiling_flags) { @@ -2321,10 +2779,16 @@ static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags) return offset ? (address + offset * 256) : 0; } -static bool fill_plane_dcc_attributes(struct amdgpu_device *adev, - const struct amdgpu_framebuffer *afb, - struct dc_plane_state *plane_state, - uint64_t info) +static int +fill_plane_dcc_attributes(struct amdgpu_device *adev, + const struct amdgpu_framebuffer *afb, + const enum surface_pixel_format format, + const enum dc_rotation_angle rotation, + const struct plane_size *plane_size, + const union dc_tiling_info *tiling_info, + const uint64_t info, + struct dc_plane_dcc_param *dcc, + struct dc_plane_address *address) { struct dc *dc = adev->dm.dc; struct dc_dcc_surface_param input; @@ -2337,133 +2801,103 @@ static bool fill_plane_dcc_attributes(struct amdgpu_device *adev, memset(&output, 0, sizeof(output)); if (!offset) - return false; + return 0; + + if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) + return 0; if (!dc->cap_funcs.get_dcc_compression_cap) - return false; + return -EINVAL; - input.format = plane_state->format; - input.surface_size.width = - plane_state->plane_size.grph.surface_size.width; - input.surface_size.height = - plane_state->plane_size.grph.surface_size.height; - input.swizzle_mode = plane_state->tiling_info.gfx9.swizzle; + input.format = format; + input.surface_size.width = plane_size->surface_size.width; + input.surface_size.height = plane_size->surface_size.height; + input.swizzle_mode = tiling_info->gfx9.swizzle; - if (plane_state->rotation == ROTATION_ANGLE_0 || - plane_state->rotation == ROTATION_ANGLE_180) + if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180) input.scan = SCAN_DIRECTION_HORIZONTAL; - else if (plane_state->rotation == ROTATION_ANGLE_90 || - plane_state->rotation == ROTATION_ANGLE_270) + else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270) input.scan = SCAN_DIRECTION_VERTICAL; if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output)) - return false; + return -EINVAL; if (!output.capable) - return false; + return -EINVAL; if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0) - return false; + return -EINVAL; - plane_state->dcc.enable = 1; - plane_state->dcc.grph.meta_pitch = + dcc->enable = 1; + dcc->meta_pitch = AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1; - plane_state->dcc.grph.independent_64b_blks = i64b; + dcc->independent_64b_blks = i64b; dcc_address = get_dcc_address(afb->address, info); - plane_state->address.grph.meta_addr.low_part = - lower_32_bits(dcc_address); - plane_state->address.grph.meta_addr.high_part = - upper_32_bits(dcc_address); + address->grph.meta_addr.low_part = lower_32_bits(dcc_address); + address->grph.meta_addr.high_part = upper_32_bits(dcc_address); - return true; + return 0; } -static int fill_plane_attributes_from_fb(struct amdgpu_device *adev, - struct dc_plane_state *plane_state, - const struct amdgpu_framebuffer *amdgpu_fb) -{ - uint64_t tiling_flags; - unsigned int awidth; - const struct drm_framebuffer *fb = &amdgpu_fb->base; - int ret = 0; - struct drm_format_name_buf format_name; - - ret = get_fb_info( - amdgpu_fb, - &tiling_flags); - - if (ret) - return ret; - - switch (fb->format->format) { - case DRM_FORMAT_C8: - plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS; - break; - case DRM_FORMAT_RGB565: - plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565; - break; - case DRM_FORMAT_XRGB8888: - case DRM_FORMAT_ARGB8888: - plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; - break; - case DRM_FORMAT_XRGB2101010: - case DRM_FORMAT_ARGB2101010: - plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010; - break; - case DRM_FORMAT_XBGR2101010: - case DRM_FORMAT_ABGR2101010: - plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010; - break; - case DRM_FORMAT_XBGR8888: - case DRM_FORMAT_ABGR8888: - plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888; - break; - case DRM_FORMAT_NV21: - plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr; - break; - case DRM_FORMAT_NV12: - plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb; - break; - default: - DRM_ERROR("Unsupported screen format %s\n", - drm_get_format_name(fb->format->format, &format_name)); - return -EINVAL; - } - - memset(&plane_state->address, 0, sizeof(plane_state->address)); - memset(&plane_state->tiling_info, 0, sizeof(plane_state->tiling_info)); - memset(&plane_state->dcc, 0, sizeof(plane_state->dcc)); - - if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { - plane_state->address.type = PLN_ADDR_TYPE_GRAPHICS; - plane_state->plane_size.grph.surface_size.x = 0; - plane_state->plane_size.grph.surface_size.y = 0; - plane_state->plane_size.grph.surface_size.width = fb->width; - plane_state->plane_size.grph.surface_size.height = fb->height; - plane_state->plane_size.grph.surface_pitch = - fb->pitches[0] / fb->format->cpp[0]; - /* TODO: unhardcode */ - plane_state->color_space = COLOR_SPACE_SRGB; - - } else { - awidth = ALIGN(fb->width, 64); - plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE; - plane_state->plane_size.video.luma_size.x = 0; - plane_state->plane_size.video.luma_size.y = 0; - plane_state->plane_size.video.luma_size.width = awidth; - plane_state->plane_size.video.luma_size.height = fb->height; - /* TODO: unhardcode */ - plane_state->plane_size.video.luma_pitch = awidth; - - plane_state->plane_size.video.chroma_size.x = 0; - plane_state->plane_size.video.chroma_size.y = 0; - plane_state->plane_size.video.chroma_size.width = awidth; - plane_state->plane_size.video.chroma_size.height = fb->height; - plane_state->plane_size.video.chroma_pitch = awidth / 2; +static int +fill_plane_buffer_attributes(struct amdgpu_device *adev, + const struct amdgpu_framebuffer *afb, + const enum surface_pixel_format format, + const enum dc_rotation_angle rotation, + const uint64_t tiling_flags, + union dc_tiling_info *tiling_info, + struct plane_size *plane_size, + struct dc_plane_dcc_param *dcc, + struct dc_plane_address *address) +{ + const struct drm_framebuffer *fb = &afb->base; + int ret; - /* TODO: unhardcode */ - plane_state->color_space = COLOR_SPACE_YCBCR709; + memset(tiling_info, 0, sizeof(*tiling_info)); + memset(plane_size, 0, sizeof(*plane_size)); + memset(dcc, 0, sizeof(*dcc)); + memset(address, 0, sizeof(*address)); + + if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { + plane_size->surface_size.x = 0; + plane_size->surface_size.y = 0; + plane_size->surface_size.width = fb->width; + plane_size->surface_size.height = fb->height; + plane_size->surface_pitch = + fb->pitches[0] / fb->format->cpp[0]; + + address->type = PLN_ADDR_TYPE_GRAPHICS; + address->grph.addr.low_part = lower_32_bits(afb->address); + address->grph.addr.high_part = upper_32_bits(afb->address); + } else if (format < SURFACE_PIXEL_FORMAT_INVALID) { + uint64_t chroma_addr = afb->address + fb->offsets[1]; + + plane_size->surface_size.x = 0; + plane_size->surface_size.y = 0; + plane_size->surface_size.width = fb->width; + plane_size->surface_size.height = fb->height; + plane_size->surface_pitch = + fb->pitches[0] / fb->format->cpp[0]; + + plane_size->chroma_size.x = 0; + plane_size->chroma_size.y = 0; + /* TODO: set these based on surface format */ + plane_size->chroma_size.width = fb->width / 2; + plane_size->chroma_size.height = fb->height / 2; + + plane_size->chroma_pitch = + fb->pitches[1] / fb->format->cpp[1]; + + address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE; + address->video_progressive.luma_addr.low_part = + lower_32_bits(afb->address); + address->video_progressive.luma_addr.high_part = + upper_32_bits(afb->address); + address->video_progressive.chroma_addr.low_part = + lower_32_bits(chroma_addr); + address->video_progressive.chroma_addr.high_part = + upper_32_bits(chroma_addr); } /* Fill GFX8 params */ @@ -2477,93 +2911,292 @@ static int fill_plane_attributes_from_fb(struct amdgpu_device *adev, num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); /* XXX fix me for VI */ - plane_state->tiling_info.gfx8.num_banks = num_banks; - plane_state->tiling_info.gfx8.array_mode = + tiling_info->gfx8.num_banks = num_banks; + tiling_info->gfx8.array_mode = DC_ARRAY_2D_TILED_THIN1; - plane_state->tiling_info.gfx8.tile_split = tile_split; - plane_state->tiling_info.gfx8.bank_width = bankw; - plane_state->tiling_info.gfx8.bank_height = bankh; - plane_state->tiling_info.gfx8.tile_aspect = mtaspect; - plane_state->tiling_info.gfx8.tile_mode = + tiling_info->gfx8.tile_split = tile_split; + tiling_info->gfx8.bank_width = bankw; + tiling_info->gfx8.bank_height = bankh; + tiling_info->gfx8.tile_aspect = mtaspect; + tiling_info->gfx8.tile_mode = DC_ADDR_SURF_MICRO_TILING_DISPLAY; } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_1D_TILED_THIN1) { - plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1; + tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1; } - plane_state->tiling_info.gfx8.pipe_config = + tiling_info->gfx8.pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); if (adev->asic_type == CHIP_VEGA10 || adev->asic_type == CHIP_VEGA12 || adev->asic_type == CHIP_VEGA20 || +#if defined(CONFIG_DRM_AMD_DC_DCN2_0) + adev->asic_type == CHIP_NAVI10 || + adev->asic_type == CHIP_NAVI14 || + adev->asic_type == CHIP_NAVI12 || +#endif +#if defined(CONFIG_DRM_AMD_DC_DCN2_1) + adev->asic_type == CHIP_RENOIR || +#endif adev->asic_type == CHIP_RAVEN) { /* Fill GFX9 params */ - plane_state->tiling_info.gfx9.num_pipes = + tiling_info->gfx9.num_pipes = adev->gfx.config.gb_addr_config_fields.num_pipes; - plane_state->tiling_info.gfx9.num_banks = + tiling_info->gfx9.num_banks = adev->gfx.config.gb_addr_config_fields.num_banks; - plane_state->tiling_info.gfx9.pipe_interleave = + tiling_info->gfx9.pipe_interleave = adev->gfx.config.gb_addr_config_fields.pipe_interleave_size; - plane_state->tiling_info.gfx9.num_shader_engines = + tiling_info->gfx9.num_shader_engines = adev->gfx.config.gb_addr_config_fields.num_se; - plane_state->tiling_info.gfx9.max_compressed_frags = + tiling_info->gfx9.max_compressed_frags = adev->gfx.config.gb_addr_config_fields.max_compress_frags; - plane_state->tiling_info.gfx9.num_rb_per_se = + tiling_info->gfx9.num_rb_per_se = adev->gfx.config.gb_addr_config_fields.num_rb_per_se; - plane_state->tiling_info.gfx9.swizzle = + tiling_info->gfx9.swizzle = AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE); - plane_state->tiling_info.gfx9.shaderEnable = 1; + tiling_info->gfx9.shaderEnable = 1; - fill_plane_dcc_attributes(adev, amdgpu_fb, plane_state, - tiling_flags); + ret = fill_plane_dcc_attributes(adev, afb, format, rotation, + plane_size, tiling_info, + tiling_flags, dcc, address); + if (ret) + return ret; } - plane_state->visible = true; - plane_state->scaling_quality.h_taps_c = 0; - plane_state->scaling_quality.v_taps_c = 0; + return 0; +} - /* is this needed? is plane_state zeroed at allocation? */ - plane_state->scaling_quality.h_taps = 0; - plane_state->scaling_quality.v_taps = 0; - plane_state->stereo_format = PLANE_STEREO_FORMAT_NONE; +static void +fill_blending_from_plane_state(const struct drm_plane_state *plane_state, + bool *per_pixel_alpha, bool *global_alpha, + int *global_alpha_value) +{ + *per_pixel_alpha = false; + *global_alpha = false; + *global_alpha_value = 0xff; - return ret; + if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY) + return; + if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) { + static const uint32_t alpha_formats[] = { + DRM_FORMAT_ARGB8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_ABGR8888, + }; + uint32_t format = plane_state->fb->format->format; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) { + if (format == alpha_formats[i]) { + *per_pixel_alpha = true; + break; + } + } + } + + if (plane_state->alpha < 0xffff) { + *global_alpha = true; + *global_alpha_value = plane_state->alpha >> 8; + } } -static int fill_plane_attributes(struct amdgpu_device *adev, - struct dc_plane_state *dc_plane_state, - struct drm_plane_state *plane_state, - struct drm_crtc_state *crtc_state) +static int +fill_plane_color_attributes(const struct drm_plane_state *plane_state, + const enum surface_pixel_format format, + enum dc_color_space *color_space) { - const struct amdgpu_framebuffer *amdgpu_fb = + bool full_range; + + *color_space = COLOR_SPACE_SRGB; + + /* DRM color properties only affect non-RGB formats. */ + if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) + return 0; + + full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE); + + switch (plane_state->color_encoding) { + case DRM_COLOR_YCBCR_BT601: + if (full_range) + *color_space = COLOR_SPACE_YCBCR601; + else + *color_space = COLOR_SPACE_YCBCR601_LIMITED; + break; + + case DRM_COLOR_YCBCR_BT709: + if (full_range) + *color_space = COLOR_SPACE_YCBCR709; + else + *color_space = COLOR_SPACE_YCBCR709_LIMITED; + break; + + case DRM_COLOR_YCBCR_BT2020: + if (full_range) + *color_space = COLOR_SPACE_2020_YCBCR; + else + return -EINVAL; + break; + + default: + return -EINVAL; + } + + return 0; +} + +static int +fill_dc_plane_info_and_addr(struct amdgpu_device *adev, + const struct drm_plane_state *plane_state, + const uint64_t tiling_flags, + struct dc_plane_info *plane_info, + struct dc_plane_address *address) +{ + const struct drm_framebuffer *fb = plane_state->fb; + const struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane_state->fb); - const struct drm_crtc *crtc = plane_state->crtc; - int ret = 0; + struct drm_format_name_buf format_name; + int ret; - if (!fill_rects_from_plane_state(plane_state, dc_plane_state)) + memset(plane_info, 0, sizeof(*plane_info)); + + switch (fb->format->format) { + case DRM_FORMAT_C8: + plane_info->format = + SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS; + break; + case DRM_FORMAT_RGB565: + plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565; + break; + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB8888: + plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; + break; + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_ARGB2101010: + plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010; + break; + case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_ABGR2101010: + plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010; + break; + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ABGR8888: + plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888; + break; + case DRM_FORMAT_NV21: + plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr; + break; + case DRM_FORMAT_NV12: + plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb; + break; + default: + DRM_ERROR( + "Unsupported screen format %s\n", + drm_get_format_name(fb->format->format, &format_name)); return -EINVAL; + } + + switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { + case DRM_MODE_ROTATE_0: + plane_info->rotation = ROTATION_ANGLE_0; + break; + case DRM_MODE_ROTATE_90: + plane_info->rotation = ROTATION_ANGLE_90; + break; + case DRM_MODE_ROTATE_180: + plane_info->rotation = ROTATION_ANGLE_180; + break; + case DRM_MODE_ROTATE_270: + plane_info->rotation = ROTATION_ANGLE_270; + break; + default: + plane_info->rotation = ROTATION_ANGLE_0; + break; + } + + plane_info->visible = true; + plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE; + + plane_info->layer_index = 0; + + ret = fill_plane_color_attributes(plane_state, plane_info->format, + &plane_info->color_space); + if (ret) + return ret; + + ret = fill_plane_buffer_attributes(adev, afb, plane_info->format, + plane_info->rotation, tiling_flags, + &plane_info->tiling_info, + &plane_info->plane_size, + &plane_info->dcc, address); + if (ret) + return ret; + + fill_blending_from_plane_state( + plane_state, &plane_info->per_pixel_alpha, + &plane_info->global_alpha, &plane_info->global_alpha_value); + + return 0; +} + +static int fill_dc_plane_attributes(struct amdgpu_device *adev, + struct dc_plane_state *dc_plane_state, + struct drm_plane_state *plane_state, + struct drm_crtc_state *crtc_state) +{ + struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); + const struct amdgpu_framebuffer *amdgpu_fb = + to_amdgpu_framebuffer(plane_state->fb); + struct dc_scaling_info scaling_info; + struct dc_plane_info plane_info; + uint64_t tiling_flags; + int ret; + + ret = fill_dc_scaling_info(plane_state, &scaling_info); + if (ret) + return ret; - ret = fill_plane_attributes_from_fb( - crtc->dev->dev_private, - dc_plane_state, - amdgpu_fb); + dc_plane_state->src_rect = scaling_info.src_rect; + dc_plane_state->dst_rect = scaling_info.dst_rect; + dc_plane_state->clip_rect = scaling_info.clip_rect; + dc_plane_state->scaling_quality = scaling_info.scaling_quality; + ret = get_fb_info(amdgpu_fb, &tiling_flags); if (ret) return ret; + ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags, + &plane_info, + &dc_plane_state->address); + if (ret) + return ret; + + dc_plane_state->format = plane_info.format; + dc_plane_state->color_space = plane_info.color_space; + dc_plane_state->format = plane_info.format; + dc_plane_state->plane_size = plane_info.plane_size; + dc_plane_state->rotation = plane_info.rotation; + dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror; + dc_plane_state->stereo_format = plane_info.stereo_format; + dc_plane_state->tiling_info = plane_info.tiling_info; + dc_plane_state->visible = plane_info.visible; + dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha; + dc_plane_state->global_alpha = plane_info.global_alpha; + dc_plane_state->global_alpha_value = plane_info.global_alpha_value; + dc_plane_state->dcc = plane_info.dcc; + dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0 + /* * Always set input transfer function, since plane state is refreshed * every time. */ - ret = amdgpu_dm_set_degamma_lut(crtc_state, dc_plane_state); - if (ret) { - dc_transfer_func_release(dc_plane_state->in_transfer_func); - dc_plane_state->in_transfer_func = NULL; - } + ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state); + if (ret) + return ret; - return ret; + return 0; } static void update_stream_scaling_settings(const struct drm_display_mode *mode, @@ -2622,16 +3255,31 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode, } static enum dc_color_depth -convert_color_depth_from_display_info(const struct drm_connector *connector) +convert_color_depth_from_display_info(const struct drm_connector *connector, + const struct drm_connector_state *state) { - struct dm_connector_state *dm_conn_state = - to_dm_connector_state(connector->state); - uint32_t bpc = connector->display_info.bpc; + uint8_t bpc = (uint8_t)connector->display_info.bpc; + + /* Assume 8 bpc by default if no bpc is specified. */ + bpc = bpc ? bpc : 8; + + if (!state) + state = connector->state; - /* TODO: Remove this when there's support for max_bpc in drm */ - if (dm_conn_state && bpc > dm_conn_state->max_bpc) - /* Round down to nearest even number. */ - bpc = dm_conn_state->max_bpc - (dm_conn_state->max_bpc & 1); + if (state) { + /* + * Cap display bpc based on the user requested value. + * + * The value for state->max_bpc may not correctly updated + * depending on when the connector gets added to the state + * or if this was called outside of atomic check, so it + * can't be used directly. + */ + bpc = min(bpc, state->max_requested_bpc); + + /* Round down to the nearest even number. */ + bpc = bpc - (bpc & 1); + } switch (bpc) { case 0: @@ -2749,16 +3397,21 @@ static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_ } -static void -fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream, - const struct drm_display_mode *mode_in, - const struct drm_connector *connector, - const struct dc_stream_state *old_stream) +static void fill_stream_properties_from_drm_display_mode( + struct dc_stream_state *stream, + const struct drm_display_mode *mode_in, + const struct drm_connector *connector, + const struct drm_connector_state *connector_state, + const struct dc_stream_state *old_stream) { struct dc_crtc_timing *timing_out = &stream->timing; const struct drm_display_info *info = &connector->display_info; + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct hdmi_vendor_infoframe hv_frame; + struct hdmi_avi_infoframe avi_frame; - memset(timing_out, 0, sizeof(struct dc_crtc_timing)); + memset(&hv_frame, 0, sizeof(hv_frame)); + memset(&avi_frame, 0, sizeof(avi_frame)); timing_out->h_border_left = 0; timing_out->h_border_right = 0; @@ -2768,6 +3421,9 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream, if (drm_mode_is_420_only(info, mode_in) && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; + else if (drm_mode_is_420_also(info, mode_in) + && aconnector->force_yuv420_output) + timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444) && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; @@ -2776,7 +3432,7 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream, timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE; timing_out->display_color_depth = convert_color_depth_from_display_info( - connector); + connector, connector_state); timing_out->scan_type = SCANNING_TYPE_NODATA; timing_out->hdmi_vic = 0; @@ -2792,6 +3448,13 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream, timing_out->flags.VSYNC_POSITIVE_POLARITY = 1; } + if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { + drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in); + timing_out->vic = avi_frame.video_code; + drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in); + timing_out->hdmi_vic = hv_frame.vic; + } + timing_out->h_addressable = mode_in->crtc_hdisplay; timing_out->h_total = mode_in->crtc_htotal; timing_out->h_sync_width = @@ -2973,12 +3636,18 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, { struct drm_display_mode *preferred_mode = NULL; struct drm_connector *drm_connector; + const struct drm_connector_state *con_state = + dm_state ? &dm_state->base : NULL; struct dc_stream_state *stream = NULL; struct drm_display_mode mode = *drm_mode; bool native_mode_found = false; bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false; int mode_refresh; int preferred_refresh = 0; +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT + struct dsc_dec_dpcd_caps dsc_caps; + uint32_t link_bandwidth_kbps; +#endif struct dc_sink *sink = NULL; if (aconnector == NULL) { @@ -3006,6 +3675,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, stream->dm_stream_context = aconnector; + stream->timing.flags.LTE_340MCSC_SCRAMBLE = + drm_connector->display_info.hdmi.scdc.scrambling.low_rates; + list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { /* Search for preferred mode */ if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) { @@ -3045,10 +3717,31 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, */ if (!scale || mode_refresh != preferred_refresh) fill_stream_properties_from_drm_display_mode(stream, - &mode, &aconnector->base, NULL); + &mode, &aconnector->base, con_state, NULL); else fill_stream_properties_from_drm_display_mode(stream, - &mode, &aconnector->base, old_stream); + &mode, &aconnector->base, con_state, old_stream); + +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT + stream->timing.flags.DSC = 0; + + if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { + dc_dsc_parse_dsc_dpcd(aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, + aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw, + &dsc_caps); + link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, + dc_link_get_link_cap(aconnector->dc_link)); + + if (dsc_caps.is_dsc_supported) + if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], + &dsc_caps, + aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override, + link_bandwidth_kbps, + &stream->timing, + &stream->timing.dsc_cfg)) + stream->timing.flags.DSC = 1; + } +#endif update_stream_scaling_settings(&mode, dm_state, stream); @@ -3059,6 +3752,18 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, update_stream_signal(stream, sink); + if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) + mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false); + if (stream->link->psr_feature_enabled) { + struct dc *core_dc = stream->link->ctx->dc; + + if (dc_is_dmcu_initialized(core_dc)) { + struct dmcu *dmcu = core_dc->res_pool->dmcu; + + stream->psr_version = dmcu->dmcu_version.psr_version; + mod_build_vsc_infopacket(stream, &stream->vsc_infopacket); + } + } finish: dc_sink_release(sink); @@ -3124,24 +3829,57 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc) dc_stream_retain(state->stream); } + state->active_planes = cur->active_planes; + state->interrupts_enabled = cur->interrupts_enabled; state->vrr_params = cur->vrr_params; state->vrr_infopacket = cur->vrr_infopacket; state->abm_level = cur->abm_level; state->vrr_supported = cur->vrr_supported; state->freesync_config = cur->freesync_config; - state->crc_enabled = cur->crc_enabled; + state->crc_src = cur->crc_src; + state->cm_has_degamma = cur->cm_has_degamma; + state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb; /* TODO Duplicate dc_stream after objects are stream object is flattened */ return &state->base; } +static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable) +{ + enum dc_irq_source irq_source; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + struct amdgpu_device *adev = crtc->dev->dev_private; + int rc; + + irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst; + + rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; + + DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n", + acrtc->crtc_id, enable ? "en" : "dis", rc); + return rc; +} static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) { enum dc_irq_source irq_source; struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); struct amdgpu_device *adev = crtc->dev->dev_private; + struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state); + int rc = 0; + + if (enable) { + /* vblank irq on -> Only need vupdate irq in vrr mode */ + if (amdgpu_dm_vrr_active(acrtc_state)) + rc = dm_set_vupdate_irq(crtc, true); + } else { + /* vblank irq off -> vupdate irq off */ + rc = dm_set_vupdate_irq(crtc, false); + } + + if (rc) + return rc; irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; @@ -3168,6 +3906,7 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { .atomic_destroy_state = dm_crtc_destroy_state, .set_crc_source = amdgpu_dm_crtc_set_crc_source, .verify_crc_source = amdgpu_dm_crtc_verify_crc_source, + .get_crc_sources = amdgpu_dm_crtc_get_crc_sources, .enable_vblank = dm_enable_vblank, .disable_vblank = dm_disable_vblank, }; @@ -3242,9 +3981,6 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, } else if (property == adev->mode_info.underscan_property) { dm_new_state->underscan_enable = val; ret = 0; - } else if (property == adev->mode_info.max_bpc_property) { - dm_new_state->max_bpc = val; - ret = 0; } else if (property == adev->mode_info.abm_level_property) { dm_new_state->abm_level = val; ret = 0; @@ -3290,9 +4026,6 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, } else if (property == adev->mode_info.underscan_property) { *val = dm_state->underscan_enable; ret = 0; - } else if (property == adev->mode_info.max_bpc_property) { - *val = dm_state->max_bpc; - ret = 0; } else if (property == adev->mode_info.abm_level_property) { *val = dm_state->abm_level; ret = 0; @@ -3301,6 +4034,13 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, return ret; } +static void amdgpu_dm_connector_unregister(struct drm_connector *connector) +{ + struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); + + drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux); +} + static void amdgpu_dm_connector_destroy(struct drm_connector *connector) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); @@ -3329,6 +4069,11 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector) drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux); drm_connector_unregister(connector); drm_connector_cleanup(connector); + if (aconnector->i2c) { + i2c_del_adapter(&aconnector->i2c->base); + kfree(aconnector->i2c); + } + kfree(connector); } @@ -3349,7 +4094,10 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) state->underscan_enable = false; state->underscan_hborder = 0; state->underscan_vborder = 0; - state->max_bpc = 8; + state->base.max_requested_bpc = 8; + + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) + state->abm_level = amdgpu_dm_abm_level; __drm_atomic_helper_connector_reset(connector, &state->base); } @@ -3375,7 +4123,6 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector) new_state->underscan_enable = state->underscan_enable; new_state->underscan_hborder = state->underscan_hborder; new_state->underscan_vborder = state->underscan_vborder; - new_state->max_bpc = state->max_bpc; return &new_state->base; } @@ -3388,7 +4135,8 @@ static const struct drm_connector_funcs amdgpu_dm_connector_funcs = { .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_set_property = amdgpu_dm_connector_atomic_set_property, - .atomic_get_property = amdgpu_dm_connector_atomic_get_property + .atomic_get_property = amdgpu_dm_connector_atomic_get_property, + .early_unregister = amdgpu_dm_connector_unregister }; static int get_modes(struct drm_connector *connector) @@ -3491,8 +4239,8 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec result = MODE_OK; else DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n", - mode->vdisplay, mode->hdisplay, + mode->vdisplay, mode->clock, dc_result); @@ -3503,6 +4251,129 @@ fail: return result; } +static int fill_hdr_info_packet(const struct drm_connector_state *state, + struct dc_info_packet *out) +{ + struct hdmi_drm_infoframe frame; + unsigned char buf[30]; /* 26 + 4 */ + ssize_t len; + int ret, i; + + memset(out, 0, sizeof(*out)); + + if (!state->hdr_output_metadata) + return 0; + + ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state); + if (ret) + return ret; + + len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf)); + if (len < 0) + return (int)len; + + /* Static metadata is a fixed 26 bytes + 4 byte header. */ + if (len != 30) + return -EINVAL; + + /* Prepare the infopacket for DC. */ + switch (state->connector->connector_type) { + case DRM_MODE_CONNECTOR_HDMIA: + out->hb0 = 0x87; /* type */ + out->hb1 = 0x01; /* version */ + out->hb2 = 0x1A; /* length */ + out->sb[0] = buf[3]; /* checksum */ + i = 1; + break; + + case DRM_MODE_CONNECTOR_DisplayPort: + case DRM_MODE_CONNECTOR_eDP: + out->hb0 = 0x00; /* sdp id, zero */ + out->hb1 = 0x87; /* type */ + out->hb2 = 0x1D; /* payload len - 1 */ + out->hb3 = (0x13 << 2); /* sdp version */ + out->sb[0] = 0x01; /* version */ + out->sb[1] = 0x1A; /* length */ + i = 2; + break; + + default: + return -EINVAL; + } + + memcpy(&out->sb[i], &buf[4], 26); + out->valid = true; + + print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb, + sizeof(out->sb), false); + + return 0; +} + +static bool +is_hdr_metadata_different(const struct drm_connector_state *old_state, + const struct drm_connector_state *new_state) +{ + struct drm_property_blob *old_blob = old_state->hdr_output_metadata; + struct drm_property_blob *new_blob = new_state->hdr_output_metadata; + + if (old_blob != new_blob) { + if (old_blob && new_blob && + old_blob->length == new_blob->length) + return memcmp(old_blob->data, new_blob->data, + old_blob->length); + + return true; + } + + return false; +} + +static int +amdgpu_dm_connector_atomic_check(struct drm_connector *conn, + struct drm_atomic_state *state) +{ + struct drm_connector_state *new_con_state = + drm_atomic_get_new_connector_state(state, conn); + struct drm_connector_state *old_con_state = + drm_atomic_get_old_connector_state(state, conn); + struct drm_crtc *crtc = new_con_state->crtc; + struct drm_crtc_state *new_crtc_state; + int ret; + + if (!crtc) + return 0; + + if (is_hdr_metadata_different(old_con_state, new_con_state)) { + struct dc_info_packet hdr_infopacket; + + ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket); + if (ret) + return ret; + + new_crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(new_crtc_state)) + return PTR_ERR(new_crtc_state); + + /* + * DC considers the stream backends changed if the + * static metadata changes. Forcing the modeset also + * gives a simple way for userspace to switch from + * 8bpc to 10bpc when setting the metadata to enter + * or exit HDR. + * + * Changing the static metadata after it's been + * set is permissible, however. So only force a + * modeset if we're entering or exiting HDR. + */ + new_crtc_state->mode_changed = + !old_con_state->hdr_output_metadata || + !new_con_state->hdr_output_metadata; + } + + return 0; +} + static const struct drm_connector_helper_funcs amdgpu_dm_connector_helper_funcs = { /* @@ -3513,12 +4384,83 @@ amdgpu_dm_connector_helper_funcs = { */ .get_modes = get_modes, .mode_valid = amdgpu_dm_connector_mode_valid, + .atomic_check = amdgpu_dm_connector_atomic_check, }; static void dm_crtc_helper_disable(struct drm_crtc *crtc) { } +static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state) +{ + struct drm_device *dev = new_crtc_state->crtc->dev; + struct drm_plane *plane; + + drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) { + if (plane->type == DRM_PLANE_TYPE_CURSOR) + return true; + } + + return false; +} + +static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state) +{ + struct drm_atomic_state *state = new_crtc_state->state; + struct drm_plane *plane; + int num_active = 0; + + drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) { + struct drm_plane_state *new_plane_state; + + /* Cursor planes are "fake". */ + if (plane->type == DRM_PLANE_TYPE_CURSOR) + continue; + + new_plane_state = drm_atomic_get_new_plane_state(state, plane); + + if (!new_plane_state) { + /* + * The plane is enable on the CRTC and hasn't changed + * state. This means that it previously passed + * validation and is therefore enabled. + */ + num_active += 1; + continue; + } + + /* We need a framebuffer to be considered enabled. */ + num_active += (new_plane_state->fb != NULL); + } + + return num_active; +} + +/* + * Sets whether interrupts should be enabled on a specific CRTC. + * We require that the stream be enabled and that there exist active + * DC planes on the stream. + */ +static void +dm_update_crtc_interrupt_state(struct drm_crtc *crtc, + struct drm_crtc_state *new_crtc_state) +{ + struct dm_crtc_state *dm_new_crtc_state = + to_dm_crtc_state(new_crtc_state); + + dm_new_crtc_state->active_planes = 0; + dm_new_crtc_state->interrupts_enabled = false; + + if (!dm_new_crtc_state->stream) + return; + + dm_new_crtc_state->active_planes = + count_crtc_active_planes(new_crtc_state); + + dm_new_crtc_state->interrupts_enabled = + dm_new_crtc_state->active_planes > 0; +} + static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state) { @@ -3527,6 +4469,14 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state); int ret = -EINVAL; + /* + * Update interrupt state for the CRTC. This needs to happen whenever + * the CRTC has changed or whenever any of its planes have changed. + * Atomic check satisfies both of these requirements since the CRTC + * is added to the state by DRM during drm_atomic_helper_check_planes. + */ + dm_update_crtc_interrupt_state(crtc, state); + if (unlikely(!dm_crtc_state->stream && modeset_required(state, NULL, dm_crtc_state->stream))) { WARN_ON(1); @@ -3537,6 +4487,15 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, if (!dm_crtc_state->stream) return 0; + /* + * We want at least one hardware plane enabled to use + * the stream with a cursor enabled. + */ + if (state->enable && state->active && + does_crtc_have_active_cursor(state) && + dm_crtc_state->active_planes == 0) + return -EINVAL; + if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK) return 0; @@ -3583,11 +4542,8 @@ static void dm_drm_plane_reset(struct drm_plane *plane) amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL); WARN_ON(amdgpu_state == NULL); - if (amdgpu_state) { - plane->state = &amdgpu_state->base; - plane->state->plane = plane; - plane->state->rotation = DRM_MODE_ROTATE_0; - } + if (amdgpu_state) + __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base); } static struct drm_plane_state * @@ -3637,10 +4593,11 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_gem_object *obj; struct amdgpu_device *adev; struct amdgpu_bo *rbo; - uint64_t chroma_addr = 0; struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old; - uint64_t tiling_flags, dcc_address; - unsigned int awidth; + struct list_head list; + struct ttm_validate_buffer tv; + struct ww_acquire_ctx ticket; + uint64_t tiling_flags; uint32_t domain; int r; @@ -3656,12 +4613,20 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane, obj = new_state->fb->obj[0]; rbo = gem_to_amdgpu_bo(obj); adev = amdgpu_ttm_adev(rbo->tbo.bdev); - r = amdgpu_bo_reserve(rbo, false); - if (unlikely(r != 0)) + INIT_LIST_HEAD(&list); + + tv.bo = &rbo->tbo; + tv.num_shared = 1; + list_add(&tv.head, &list); + + r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL); + if (r) { + dev_err(adev->dev, "fail to reserve bo (%d)\n", r); return r; + } if (plane->type != DRM_PLANE_TYPE_CURSOR) - domain = amdgpu_display_supported_domains(adev); + domain = amdgpu_display_supported_domains(adev, rbo->flags); else domain = AMDGPU_GEM_DOMAIN_VRAM; @@ -3669,21 +4634,21 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane, if (unlikely(r != 0)) { if (r != -ERESTARTSYS) DRM_ERROR("Failed to pin framebuffer with error %d\n", r); - amdgpu_bo_unreserve(rbo); + ttm_eu_backoff_reservation(&ticket, &list); return r; } r = amdgpu_ttm_alloc_gart(&rbo->tbo); if (unlikely(r != 0)) { amdgpu_bo_unpin(rbo); - amdgpu_bo_unreserve(rbo); + ttm_eu_backoff_reservation(&ticket, &list); DRM_ERROR("%p bind failed\n", rbo); return r; } amdgpu_bo_get_tiling_flags(rbo, &tiling_flags); - amdgpu_bo_unreserve(rbo); + ttm_eu_backoff_reservation(&ticket, &list); afb->address = amdgpu_bo_gpu_offset(rbo); @@ -3693,29 +4658,11 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane, dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) { struct dc_plane_state *plane_state = dm_plane_state_new->dc_state; - if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { - plane_state->address.grph.addr.low_part = lower_32_bits(afb->address); - plane_state->address.grph.addr.high_part = upper_32_bits(afb->address); - - dcc_address = - get_dcc_address(afb->address, tiling_flags); - plane_state->address.grph.meta_addr.low_part = - lower_32_bits(dcc_address); - plane_state->address.grph.meta_addr.high_part = - upper_32_bits(dcc_address); - } else { - awidth = ALIGN(new_state->fb->width, 64); - plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE; - plane_state->address.video_progressive.luma_addr.low_part - = lower_32_bits(afb->address); - plane_state->address.video_progressive.luma_addr.high_part - = upper_32_bits(afb->address); - chroma_addr = afb->address + (u64)awidth * new_state->fb->height; - plane_state->address.video_progressive.chroma_addr.low_part - = lower_32_bits(chroma_addr); - plane_state->address.video_progressive.chroma_addr.high_part - = upper_32_bits(chroma_addr); - } + fill_plane_buffer_attributes( + adev, afb, plane_state->format, plane_state->rotation, + tiling_flags, &plane_state->tiling_info, + &plane_state->plane_size, &plane_state->dcc, + &plane_state->address); } return 0; @@ -3747,13 +4694,18 @@ static int dm_plane_atomic_check(struct drm_plane *plane, { struct amdgpu_device *adev = plane->dev->dev_private; struct dc *dc = adev->dm.dc; - struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); + struct dm_plane_state *dm_plane_state; + struct dc_scaling_info scaling_info; + int ret; + + dm_plane_state = to_dm_plane_state(state); if (!dm_plane_state->dc_state) return 0; - if (!fill_rects_from_plane_state(state, dm_plane_state->dc_state)) - return -EINVAL; + ret = fill_dc_scaling_info(state, &scaling_info); + if (ret) + return ret; if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK) return 0; @@ -3764,20 +4716,10 @@ static int dm_plane_atomic_check(struct drm_plane *plane, static int dm_plane_atomic_async_check(struct drm_plane *plane, struct drm_plane_state *new_plane_state) { - struct drm_plane_state *old_plane_state = - drm_atomic_get_old_plane_state(new_plane_state->state, plane); - /* Only support async updates on cursor planes. */ if (plane->type != DRM_PLANE_TYPE_CURSOR) return -EINVAL; - /* - * DRM calls prepare_fb and cleanup_fb on new_plane_state for - * async commits so don't allow fb changes. - */ - if (old_plane_state->fb != new_plane_state->fb) - return -EINVAL; - return 0; } @@ -3787,8 +4729,7 @@ static void dm_plane_atomic_async_update(struct drm_plane *plane, struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(new_state->state, plane); - if (plane->state->fb != new_state->fb) - drm_atomic_set_fb_for_plane(plane->state, new_state->fb); + swap(plane->state->fb, new_state->fb); plane->state->src_x = new_state->src_x; plane->state->src_y = new_state->src_y; @@ -3826,64 +4767,115 @@ static const uint32_t rgb_formats[] = { DRM_FORMAT_ABGR2101010, DRM_FORMAT_XBGR8888, DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGB565, }; -static const uint32_t yuv_formats[] = { - DRM_FORMAT_NV12, - DRM_FORMAT_NV21, +static const uint32_t overlay_formats[] = { + DRM_FORMAT_XRGB8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGB565 }; static const u32 cursor_formats[] = { DRM_FORMAT_ARGB8888 }; -static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, - struct drm_plane *plane, - unsigned long possible_crtcs) +static int get_plane_formats(const struct drm_plane *plane, + const struct dc_plane_cap *plane_cap, + uint32_t *formats, int max_formats) { - int res = -EPERM; + int i, num_formats = 0; + + /* + * TODO: Query support for each group of formats directly from + * DC plane caps. This will require adding more formats to the + * caps list. + */ switch (plane->type) { case DRM_PLANE_TYPE_PRIMARY: - res = drm_universal_plane_init( - dm->adev->ddev, - plane, - possible_crtcs, - &dm_plane_funcs, - rgb_formats, - ARRAY_SIZE(rgb_formats), - NULL, plane->type, NULL); + for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) { + if (num_formats >= max_formats) + break; + + formats[num_formats++] = rgb_formats[i]; + } + + if (plane_cap && plane_cap->pixel_format_support.nv12) + formats[num_formats++] = DRM_FORMAT_NV12; break; + case DRM_PLANE_TYPE_OVERLAY: - res = drm_universal_plane_init( - dm->adev->ddev, - plane, - possible_crtcs, - &dm_plane_funcs, - yuv_formats, - ARRAY_SIZE(yuv_formats), - NULL, plane->type, NULL); + for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) { + if (num_formats >= max_formats) + break; + + formats[num_formats++] = overlay_formats[i]; + } break; + case DRM_PLANE_TYPE_CURSOR: - res = drm_universal_plane_init( - dm->adev->ddev, - plane, - possible_crtcs, - &dm_plane_funcs, - cursor_formats, - ARRAY_SIZE(cursor_formats), - NULL, plane->type, NULL); + for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) { + if (num_formats >= max_formats) + break; + + formats[num_formats++] = cursor_formats[i]; + } break; } + return num_formats; +} + +static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, + struct drm_plane *plane, + unsigned long possible_crtcs, + const struct dc_plane_cap *plane_cap) +{ + uint32_t formats[32]; + int num_formats; + int res = -EPERM; + + num_formats = get_plane_formats(plane, plane_cap, formats, + ARRAY_SIZE(formats)); + + res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs, + &dm_plane_funcs, formats, num_formats, + NULL, plane->type, NULL); + if (res) + return res; + + if (plane->type == DRM_PLANE_TYPE_OVERLAY && + plane_cap && plane_cap->per_pixel_alpha) { + unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) | + BIT(DRM_MODE_BLEND_PREMULTI); + + drm_plane_create_alpha_property(plane); + drm_plane_create_blend_mode_property(plane, blend_caps); + } + + if (plane->type == DRM_PLANE_TYPE_PRIMARY && + plane_cap && plane_cap->pixel_format_support.nv12) { + /* This only affects YUV formats. */ + drm_plane_create_color_properties( + plane, + BIT(DRM_COLOR_YCBCR_BT601) | + BIT(DRM_COLOR_YCBCR_BT709), + BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | + BIT(DRM_COLOR_YCBCR_FULL_RANGE), + DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE); + } + drm_plane_helper_add(plane, &dm_plane_helper_funcs); /* Create (reset) the plane state */ if (plane->funcs->reset) plane->funcs->reset(plane); - - return res; + return 0; } static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, @@ -3900,7 +4892,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, goto fail; cursor_plane->type = DRM_PLANE_TYPE_CURSOR; - res = amdgpu_dm_plane_init(dm, cursor_plane, 0); + res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL); acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL); if (!acrtc) @@ -3970,7 +4962,13 @@ static int to_drm_connector_type(enum signal_type st) static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector) { - return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]); + struct drm_encoder *encoder; + + /* There is only one encoder per connector */ + drm_connector_for_each_possible_encoder(connector, encoder) + return encoder; + + return NULL; } static void amdgpu_dm_get_native_mode(struct drm_connector *connector) @@ -4097,6 +5095,15 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, amdgpu_dm_connector->num_modes = drm_add_edid_modes(connector, edid); + /* sorting the probed modes before calling function + * amdgpu_dm_get_native_mode() since EDID can have + * more than one preferred mode. The modes that are + * later in the probed mode list could be of higher + * and preferred resolution. For example, 3840x2160 + * resolution in base EDID preferred timing and 4096x2160 + * preferred resolution in DID extension block later. + */ + drm_mode_sort(&connector->probed_modes); amdgpu_dm_get_native_mode(connector); } else { amdgpu_dm_connector->num_modes = 0; @@ -4132,6 +5139,13 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, { struct amdgpu_device *adev = dm->ddev->dev_private; + /* + * Some of the properties below require access to state, like bpc. + * Allocate some default initial connector state with our reset helper. + */ + if (aconnector->base.funcs->reset) + aconnector->base.funcs->reset(&aconnector->base); + aconnector->connector_id = link_index; aconnector->dc_link = link; aconnector->base.interlace_allowed = false; @@ -4139,6 +5153,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, aconnector->base.stereo_allowed = false; aconnector->base.dpms = DRM_MODE_DPMS_OFF; aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */ + aconnector->audio_inst = -1; mutex_init(&aconnector->hpd_lock); /* @@ -4176,9 +5191,12 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, drm_object_attach_property(&aconnector->base.base, adev->mode_info.underscan_vborder_property, 0); - drm_object_attach_property(&aconnector->base.base, - adev->mode_info.max_bpc_property, - 0); + + drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16); + + /* This defaults to the max in the range, but we want 8bpc. */ + aconnector->base.state->max_bpc = 8; + aconnector->base.state->max_requested_bpc = 8; if (connector_type == DRM_MODE_CONNECTOR_eDP && dc_is_dmcu_initialized(adev->dm.dc)) { @@ -4189,8 +5207,16 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, if (connector_type == DRM_MODE_CONNECTOR_HDMIA || connector_type == DRM_MODE_CONNECTOR_DisplayPort || connector_type == DRM_MODE_CONNECTOR_eDP) { + drm_object_attach_property( + &aconnector->base.base, + dm->ddev->mode_config.hdr_output_metadata_property, 0); + drm_connector_attach_vrr_capable_property( &aconnector->base); +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (adev->asic_type >= CHIP_RAVEN) + drm_connector_attach_content_protection_property(&aconnector->base, false); +#endif } } @@ -4314,9 +5340,6 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, &aconnector->base, &amdgpu_dm_connector_helper_funcs); - if (aconnector->base.funcs->reset) - aconnector->base.funcs->reset(&aconnector->base); - amdgpu_dm_connector_init_helper( dm, aconnector, @@ -4329,11 +5352,9 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, drm_connector_register(&aconnector->base); #if defined(CONFIG_DEBUG_FS) - res = connector_debugfs_init(aconnector); - if (res) { - DRM_ERROR("Failed to create debugfs for connector"); - goto out_free; - } + connector_debugfs_init(aconnector); + aconnector->debugfs_dpcd_address = 0; + aconnector->debugfs_dpcd_size = 0; #endif if (connector_type == DRM_MODE_CONNECTOR_DisplayPort @@ -4438,6 +5459,53 @@ is_scaling_state_different(const struct dm_connector_state *dm_state, return false; } +#ifdef CONFIG_DRM_AMD_DC_HDCP +static bool is_content_protection_different(struct drm_connector_state *state, + const struct drm_connector_state *old_state, + const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w) +{ + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + + /* CP is being re enabled, ignore this */ + if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && + state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { + state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED; + return false; + } + + /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */ + if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED && + state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) + state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; + + /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled + * hot-plug, headless s3, dpms + */ + if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON && + aconnector->dc_sink != NULL) + return true; + + if (old_state->content_protection == state->content_protection) + return false; + + if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) + return true; + + return false; +} + +static void update_content_protection(struct drm_connector_state *state, const struct drm_connector *connector, + struct hdcp_workqueue *hdcp_w) +{ + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + + if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) + hdcp_add_display(hdcp_w, aconnector->dc_link->link_index, aconnector); + else if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) + hdcp_remove_display(hdcp_w, aconnector->dc_link->link_index, aconnector->base.index); + +} +#endif static void remove_stream(struct amdgpu_device *adev, struct amdgpu_crtc *acrtc, struct dc_stream_state *stream) @@ -4455,12 +5523,12 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, int x, y; int xorigin = 0, yorigin = 0; - if (!crtc || !plane->state->fb) { - position->enable = false; - position->x = 0; - position->y = 0; + position->enable = false; + position->x = 0; + position->y = 0; + + if (!crtc || !plane->state->fb) return 0; - } if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) || (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) { @@ -4473,9 +5541,17 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, x = plane->state->crtc_x; y = plane->state->crtc_y; - /* avivo cursor are offset into the total surface */ - x += crtc->primary->state->src_x >> 16; - y += crtc->primary->state->src_y >> 16; + + if (x <= -amdgpu_crtc->max_cursor_width || + y <= -amdgpu_crtc->max_cursor_height) + return 0; + + if (crtc->primary->state) { + /* avivo cursor are offset into the total surface */ + x += crtc->primary->state->src_x >> 16; + y += crtc->primary->state->src_y >> 16; + } + if (x < 0) { xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); x = 0; @@ -4533,6 +5609,7 @@ static void handle_cursor_update(struct drm_plane *plane, amdgpu_crtc->cursor_width = plane->state->crtc_w; amdgpu_crtc->cursor_height = plane->state->crtc_h; + memset(&attributes, 0, sizeof(attributes)); attributes.address.high_part = upper_32_bits(address); attributes.address.low_part = lower_32_bits(address); attributes.width = plane->state->crtc_w; @@ -4581,9 +5658,10 @@ static void update_freesync_state_on_stream( struct dc_plane_state *surface, u32 flip_timestamp_in_us) { - struct mod_vrr_params vrr_params = new_crtc_state->vrr_params; + struct mod_vrr_params vrr_params; struct dc_info_packet vrr_infopacket = {0}; - struct mod_freesync_config config = new_crtc_state->freesync_config; + struct amdgpu_device *adev = dm->adev; + unsigned long flags; if (!new_stream) return; @@ -4596,19 +5674,8 @@ static void update_freesync_state_on_stream( if (!new_stream->timing.h_total || !new_stream->timing.v_total) return; - if (new_crtc_state->vrr_supported && - config.min_refresh_in_uhz && - config.max_refresh_in_uhz) { - config.state = new_crtc_state->base.vrr_enabled ? - VRR_STATE_ACTIVE_VARIABLE : - VRR_STATE_INACTIVE; - } else { - config.state = VRR_STATE_UNSUPPORTED; - } - - mod_freesync_build_vrr_params(dm->freesync_module, - new_stream, - &config, &vrr_params); + spin_lock_irqsave(&adev->ddev->event_lock, flags); + vrr_params = new_crtc_state->vrr_params; if (surface) { mod_freesync_handle_preflip( @@ -4617,6 +5684,17 @@ static void update_freesync_state_on_stream( new_stream, flip_timestamp_in_us, &vrr_params); + + if (adev->family < AMDGPU_FAMILY_AI && + amdgpu_dm_vrr_active(new_crtc_state)) { + mod_freesync_handle_v_update(dm->freesync_module, + new_stream, &vrr_params); + + /* Need to call this before the frame ends. */ + dc_stream_adjust_vmin_vmax(dm->dc, + new_crtc_state->stream, + &vrr_params.adjust); + } } mod_freesync_build_vrr_infopacket( @@ -4648,6 +5726,100 @@ static void update_freesync_state_on_stream( new_crtc_state->base.crtc->base.id, (int)new_crtc_state->base.vrr_enabled, (int)vrr_params.state); + + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); +} + +static void pre_update_freesync_state_on_stream( + struct amdgpu_display_manager *dm, + struct dm_crtc_state *new_crtc_state) +{ + struct dc_stream_state *new_stream = new_crtc_state->stream; + struct mod_vrr_params vrr_params; + struct mod_freesync_config config = new_crtc_state->freesync_config; + struct amdgpu_device *adev = dm->adev; + unsigned long flags; + + if (!new_stream) + return; + + /* + * TODO: Determine why min/max totals and vrefresh can be 0 here. + * For now it's sufficient to just guard against these conditions. + */ + if (!new_stream->timing.h_total || !new_stream->timing.v_total) + return; + + spin_lock_irqsave(&adev->ddev->event_lock, flags); + vrr_params = new_crtc_state->vrr_params; + + if (new_crtc_state->vrr_supported && + config.min_refresh_in_uhz && + config.max_refresh_in_uhz) { + config.state = new_crtc_state->base.vrr_enabled ? + VRR_STATE_ACTIVE_VARIABLE : + VRR_STATE_INACTIVE; + } else { + config.state = VRR_STATE_UNSUPPORTED; + } + + mod_freesync_build_vrr_params(dm->freesync_module, + new_stream, + &config, &vrr_params); + + new_crtc_state->freesync_timing_changed |= + (memcmp(&new_crtc_state->vrr_params.adjust, + &vrr_params.adjust, + sizeof(vrr_params.adjust)) != 0); + + new_crtc_state->vrr_params = vrr_params; + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); +} + +static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state, + struct dm_crtc_state *new_state) +{ + bool old_vrr_active = amdgpu_dm_vrr_active(old_state); + bool new_vrr_active = amdgpu_dm_vrr_active(new_state); + + if (!old_vrr_active && new_vrr_active) { + /* Transition VRR inactive -> active: + * While VRR is active, we must not disable vblank irq, as a + * reenable after disable would compute bogus vblank/pflip + * timestamps if it likely happened inside display front-porch. + * + * We also need vupdate irq for the actual core vblank handling + * at end of vblank. + */ + dm_set_vupdate_irq(new_state->base.crtc, true); + drm_crtc_vblank_get(new_state->base.crtc); + DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n", + __func__, new_state->base.crtc->base.id); + } else if (old_vrr_active && !new_vrr_active) { + /* Transition VRR active -> inactive: + * Allow vblank irq disable again for fixed refresh rate. + */ + dm_set_vupdate_irq(new_state->base.crtc, false); + drm_crtc_vblank_put(new_state->base.crtc); + DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n", + __func__, new_state->base.crtc->base.id); + } +} + +static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state) +{ + struct drm_plane *plane; + struct drm_plane_state *old_plane_state, *new_plane_state; + int i; + + /* + * TODO: Make this per-stream so we don't issue redundant updates for + * commits with multiple streams. + */ + for_each_oldnew_plane_in_state(state, plane, old_plane_state, + new_plane_state, i) + if (plane->type == DRM_PLANE_TYPE_CURSOR) + handle_cursor_update(plane, old_plane_state); } static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, @@ -4655,9 +5827,9 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, struct drm_device *dev, struct amdgpu_display_manager *dm, struct drm_crtc *pcrtc, - bool *wait_for_vblank) + bool wait_for_vblank) { - uint32_t i, r; + uint32_t i; uint64_t timestamp_ns; struct drm_plane *plane; struct drm_plane_state *old_plane_state, *new_plane_state; @@ -4667,42 +5839,44 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state); struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc)); - int flip_count = 0, planes_count = 0, vpos, hpos; + int planes_count = 0, vpos, hpos; + long r; unsigned long flags; struct amdgpu_bo *abo; - uint64_t tiling_flags, dcc_address; - uint32_t target, target_vblank; - uint64_t last_flip_vblank; - bool vrr_active = acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE; - - struct { - struct dc_surface_update surface_updates[MAX_SURFACES]; - struct dc_flip_addrs flip_addrs[MAX_SURFACES]; - struct dc_stream_update stream_update; - } *flip; - + uint64_t tiling_flags; + uint32_t target_vblank, last_flip_vblank; + bool vrr_active = amdgpu_dm_vrr_active(acrtc_state); + bool pflip_present = false; + bool swizzle = true; struct { struct dc_surface_update surface_updates[MAX_SURFACES]; struct dc_plane_info plane_infos[MAX_SURFACES]; struct dc_scaling_info scaling_infos[MAX_SURFACES]; + struct dc_flip_addrs flip_addrs[MAX_SURFACES]; struct dc_stream_update stream_update; - } *full; + } *bundle; - flip = kzalloc(sizeof(*flip), GFP_KERNEL); - full = kzalloc(sizeof(*full), GFP_KERNEL); + bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); - if (!flip || !full) { - dm_error("Failed to allocate update bundles\n"); + if (!bundle) { + dm_error("Failed to allocate update bundle\n"); goto cleanup; } + /* + * Disable the cursor first if we're disabling all the planes. + * It'll remain on the screen after the planes are re-enabled + * if we don't. + */ + if (acrtc_state->active_planes == 0) + amdgpu_dm_commit_cursors(state); + /* update planes when needed */ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { struct drm_crtc *crtc = new_plane_state->crtc; struct drm_crtc_state *new_crtc_state; struct drm_framebuffer *fb = new_plane_state->fb; - struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb); - bool pflip_needed; + bool plane_needs_flip; struct dc_plane_state *dc_plane; struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state); @@ -4717,122 +5891,104 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, if (!new_crtc_state->active) continue; - pflip_needed = old_plane_state->fb && - old_plane_state->fb != new_plane_state->fb; - dc_plane = dm_new_plane_state->dc_state; - if (pflip_needed) { - /* - * Assume even ONE crtc with immediate flip means - * entire can't wait for VBLANK - * TODO Check if it's correct - */ - if (new_pcrtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) - *wait_for_vblank = false; + if (dc_plane && !dc_plane->tiling_info.gfx9.swizzle) + swizzle = false; - /* - * TODO This might fail and hence better not used, wait - * explicitly on fences instead - * and in general should be called for - * blocking commit to as per framework helpers - */ - abo = gem_to_amdgpu_bo(fb->obj[0]); - r = amdgpu_bo_reserve(abo, true); - if (unlikely(r != 0)) - DRM_ERROR("failed to reserve buffer before flip\n"); - - /* - * Wait for all fences on this FB. Do limited wait to avoid - * deadlock during GPU reset when this fence will not signal - * but we hold reservation lock for the BO. - */ - r = reservation_object_wait_timeout_rcu(abo->tbo.resv, - true, false, - msecs_to_jiffies(5000)); - if (unlikely(r == 0)) - DRM_ERROR("Waiting for fences timed out."); + bundle->surface_updates[planes_count].surface = dc_plane; + if (new_pcrtc_state->color_mgmt_changed) { + bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction; + bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func; + } + fill_dc_scaling_info(new_plane_state, + &bundle->scaling_infos[planes_count]); + bundle->surface_updates[planes_count].scaling_info = + &bundle->scaling_infos[planes_count]; - amdgpu_bo_get_tiling_flags(abo, &tiling_flags); + plane_needs_flip = old_plane_state->fb && new_plane_state->fb; - amdgpu_bo_unreserve(abo); + pflip_present = pflip_present || plane_needs_flip; - flip->flip_addrs[flip_count].address.grph.addr.low_part = lower_32_bits(afb->address); - flip->flip_addrs[flip_count].address.grph.addr.high_part = upper_32_bits(afb->address); + if (!plane_needs_flip) { + planes_count += 1; + continue; + } - dcc_address = get_dcc_address(afb->address, tiling_flags); - flip->flip_addrs[flip_count].address.grph.meta_addr.low_part = lower_32_bits(dcc_address); - flip->flip_addrs[flip_count].address.grph.meta_addr.high_part = upper_32_bits(dcc_address); + abo = gem_to_amdgpu_bo(fb->obj[0]); - flip->flip_addrs[flip_count].flip_immediate = - (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0; + /* + * Wait for all fences on this FB. Do limited wait to avoid + * deadlock during GPU reset when this fence will not signal + * but we hold reservation lock for the BO. + */ + r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true, + false, + msecs_to_jiffies(5000)); + if (unlikely(r <= 0)) + DRM_ERROR("Waiting for fences timed out!"); - timestamp_ns = ktime_get_ns(); - flip->flip_addrs[flip_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000); - flip->surface_updates[flip_count].flip_addr = &flip->flip_addrs[flip_count]; - flip->surface_updates[flip_count].surface = dc_plane; + /* + * TODO This might fail and hence better not used, wait + * explicitly on fences instead + * and in general should be called for + * blocking commit to as per framework helpers + */ + r = amdgpu_bo_reserve(abo, true); + if (unlikely(r != 0)) + DRM_ERROR("failed to reserve buffer before flip\n"); - if (!flip->surface_updates[flip_count].surface) { - DRM_ERROR("No surface for CRTC: id=%d\n", - acrtc_attach->crtc_id); - continue; - } + amdgpu_bo_get_tiling_flags(abo, &tiling_flags); - if (plane == pcrtc->primary) - update_freesync_state_on_stream( - dm, - acrtc_state, - acrtc_state->stream, - dc_plane, - flip->flip_addrs[flip_count].flip_timestamp_in_us); + amdgpu_bo_unreserve(abo); - DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n", - __func__, - flip->flip_addrs[flip_count].address.grph.addr.high_part, - flip->flip_addrs[flip_count].address.grph.addr.low_part); + fill_dc_plane_info_and_addr( + dm->adev, new_plane_state, tiling_flags, + &bundle->plane_infos[planes_count], + &bundle->flip_addrs[planes_count].address); - flip_count += 1; - } + bundle->surface_updates[planes_count].plane_info = + &bundle->plane_infos[planes_count]; - full->surface_updates[planes_count].surface = dc_plane; - if (new_pcrtc_state->color_mgmt_changed) { - full->surface_updates[planes_count].gamma = dc_plane->gamma_correction; - full->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func; + /* + * Only allow immediate flips for fast updates that don't + * change FB pitch, DCC state, rotation or mirroing. + */ + bundle->flip_addrs[planes_count].flip_immediate = + crtc->state->async_flip && + acrtc_state->update_type == UPDATE_TYPE_FAST; + + timestamp_ns = ktime_get_ns(); + bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000); + bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count]; + bundle->surface_updates[planes_count].surface = dc_plane; + + if (!bundle->surface_updates[planes_count].surface) { + DRM_ERROR("No surface for CRTC: id=%d\n", + acrtc_attach->crtc_id); + continue; } + if (plane == pcrtc->primary) + update_freesync_state_on_stream( + dm, + acrtc_state, + acrtc_state->stream, + dc_plane, + bundle->flip_addrs[planes_count].flip_timestamp_in_us); - full->scaling_infos[planes_count].scaling_quality = dc_plane->scaling_quality; - full->scaling_infos[planes_count].src_rect = dc_plane->src_rect; - full->scaling_infos[planes_count].dst_rect = dc_plane->dst_rect; - full->scaling_infos[planes_count].clip_rect = dc_plane->clip_rect; - full->surface_updates[planes_count].scaling_info = &full->scaling_infos[planes_count]; - - - full->plane_infos[planes_count].color_space = dc_plane->color_space; - full->plane_infos[planes_count].format = dc_plane->format; - full->plane_infos[planes_count].plane_size = dc_plane->plane_size; - full->plane_infos[planes_count].rotation = dc_plane->rotation; - full->plane_infos[planes_count].horizontal_mirror = dc_plane->horizontal_mirror; - full->plane_infos[planes_count].stereo_format = dc_plane->stereo_format; - full->plane_infos[planes_count].tiling_info = dc_plane->tiling_info; - full->plane_infos[planes_count].visible = dc_plane->visible; - full->plane_infos[planes_count].per_pixel_alpha = dc_plane->per_pixel_alpha; - full->plane_infos[planes_count].dcc = dc_plane->dcc; - full->surface_updates[planes_count].plane_info = &full->plane_infos[planes_count]; + DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n", + __func__, + bundle->flip_addrs[planes_count].address.grph.addr.high_part, + bundle->flip_addrs[planes_count].address.grph.addr.low_part); planes_count += 1; } - /* - * TODO: For proper atomic behaviour, we should be calling into DC once with - * all the changes. However, DC refuses to do pageflips and non-pageflip - * changes in the same call. Change DC to respect atomic behaviour, - * hopefully eliminating dc_*_update structs in their entirety. - */ - if (flip_count) { + if (pflip_present) { if (!vrr_active) { /* Use old throttling in non-vrr fixed refresh rate mode * to keep flip scheduling based on target vblank counts @@ -4840,7 +5996,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * clients using the GLX_OML_sync_control extension or * DRI3/Present extension with defined target_msc. */ - last_flip_vblank = drm_crtc_vblank_count(pcrtc); + last_flip_vblank = amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id); } else { /* For variable refresh rate mode only: @@ -4856,11 +6012,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); } - target = (uint32_t)last_flip_vblank + *wait_for_vblank; - - /* Prepare wait for target vblank early - before the fence-waits */ - target_vblank = target - (uint32_t)drm_crtc_vblank_count(pcrtc) + - amdgpu_get_vblank_counter_kms(pcrtc->dev, acrtc_attach->crtc_id); + target_vblank = last_flip_vblank + wait_for_vblank; /* * Wait until we're out of the vertical blank period before the one @@ -4889,56 +6041,220 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, } if (acrtc_state->stream) { - - if (acrtc_state->freesync_timing_changed) - flip->stream_update.adjust = - &acrtc_state->stream->adjust; - if (acrtc_state->freesync_vrr_info_changed) - flip->stream_update.vrr_infopacket = + bundle->stream_update.vrr_infopacket = &acrtc_state->stream->vrr_infopacket; } - - mutex_lock(&dm->dc_lock); - dc_commit_updates_for_stream(dm->dc, - flip->surface_updates, - flip_count, - acrtc_state->stream, - &flip->stream_update, - dc_state); - mutex_unlock(&dm->dc_lock); } - if (planes_count) { + /* Update the planes if changed or disable if we don't have any. */ + if ((planes_count || acrtc_state->active_planes == 0) && + acrtc_state->stream) { + bundle->stream_update.stream = acrtc_state->stream; if (new_pcrtc_state->mode_changed) { - full->stream_update.src = acrtc_state->stream->src; - full->stream_update.dst = acrtc_state->stream->dst; + bundle->stream_update.src = acrtc_state->stream->src; + bundle->stream_update.dst = acrtc_state->stream->dst; } - if (new_pcrtc_state->color_mgmt_changed) - full->stream_update.out_transfer_func = acrtc_state->stream->out_transfer_func; + if (new_pcrtc_state->color_mgmt_changed) { + /* + * TODO: This isn't fully correct since we've actually + * already modified the stream in place. + */ + bundle->stream_update.gamut_remap = + &acrtc_state->stream->gamut_remap_matrix; + bundle->stream_update.output_csc_transform = + &acrtc_state->stream->csc_color_matrix; + bundle->stream_update.out_transfer_func = + acrtc_state->stream->out_transfer_func; + } acrtc_state->stream->abm_level = acrtc_state->abm_level; if (acrtc_state->abm_level != dm_old_crtc_state->abm_level) - full->stream_update.abm_level = &acrtc_state->abm_level; + bundle->stream_update.abm_level = &acrtc_state->abm_level; + /* + * If FreeSync state on the stream has changed then we need to + * re-adjust the min/max bounds now that DC doesn't handle this + * as part of commit. + */ + if (amdgpu_dm_vrr_active(dm_old_crtc_state) != + amdgpu_dm_vrr_active(acrtc_state)) { + spin_lock_irqsave(&pcrtc->dev->event_lock, flags); + dc_stream_adjust_vmin_vmax( + dm->dc, acrtc_state->stream, + &acrtc_state->vrr_params.adjust); + spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); + } mutex_lock(&dm->dc_lock); + if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && + acrtc_state->stream->link->psr_allow_active) + amdgpu_dm_psr_disable(acrtc_state->stream); + dc_commit_updates_for_stream(dm->dc, - full->surface_updates, + bundle->surface_updates, planes_count, acrtc_state->stream, - &full->stream_update, + &bundle->stream_update, dc_state); + + if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && + acrtc_state->stream->psr_version && + !acrtc_state->stream->link->psr_feature_enabled) + amdgpu_dm_link_setup_psr(acrtc_state->stream); + else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) && + acrtc_state->stream->link->psr_feature_enabled && + !acrtc_state->stream->link->psr_allow_active && + swizzle) { + amdgpu_dm_psr_enable(acrtc_state->stream); + } + mutex_unlock(&dm->dc_lock); } - for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) - if (plane->type == DRM_PLANE_TYPE_CURSOR) - handle_cursor_update(plane, old_plane_state); + /* + * Update cursor state *after* programming all the planes. + * This avoids redundant programming in the case where we're going + * to be disabling a single plane - those pipes are being disabled. + */ + if (acrtc_state->active_planes) + amdgpu_dm_commit_cursors(state); cleanup: - kfree(flip); - kfree(full); + kfree(bundle); +} + +static void amdgpu_dm_commit_audio(struct drm_device *dev, + struct drm_atomic_state *state) +{ + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_dm_connector *aconnector; + struct drm_connector *connector; + struct drm_connector_state *old_con_state, *new_con_state; + struct drm_crtc_state *new_crtc_state; + struct dm_crtc_state *new_dm_crtc_state; + const struct dc_stream_status *status; + int i, inst; + + /* Notify device removals. */ + for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { + if (old_con_state->crtc != new_con_state->crtc) { + /* CRTC changes require notification. */ + goto notify; + } + + if (!new_con_state->crtc) + continue; + + new_crtc_state = drm_atomic_get_new_crtc_state( + state, new_con_state->crtc); + + if (!new_crtc_state) + continue; + + if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) + continue; + + notify: + aconnector = to_amdgpu_dm_connector(connector); + + mutex_lock(&adev->dm.audio_lock); + inst = aconnector->audio_inst; + aconnector->audio_inst = -1; + mutex_unlock(&adev->dm.audio_lock); + + amdgpu_dm_audio_eld_notify(adev, inst); + } + + /* Notify audio device additions. */ + for_each_new_connector_in_state(state, connector, new_con_state, i) { + if (!new_con_state->crtc) + continue; + + new_crtc_state = drm_atomic_get_new_crtc_state( + state, new_con_state->crtc); + + if (!new_crtc_state) + continue; + + if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) + continue; + + new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); + if (!new_dm_crtc_state->stream) + continue; + + status = dc_stream_get_status(new_dm_crtc_state->stream); + if (!status) + continue; + + aconnector = to_amdgpu_dm_connector(connector); + + mutex_lock(&adev->dm.audio_lock); + inst = status->audio_inst; + aconnector->audio_inst = inst; + mutex_unlock(&adev->dm.audio_lock); + + amdgpu_dm_audio_eld_notify(adev, inst); + } +} + +/* + * Enable interrupts on CRTCs that are newly active, undergone + * a modeset, or have active planes again. + * + * Done in two passes, based on the for_modeset flag: + * Pass 1: For CRTCs going through modeset + * Pass 2: For CRTCs going from 0 to n active planes + * + * Interrupts can only be enabled after the planes are programmed, + * so this requires a two-pass approach since we don't want to + * just defer the interrupts until after commit planes every time. + */ +static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev, + struct drm_atomic_state *state, + bool for_modeset) +{ + struct amdgpu_device *adev = dev->dev_private; + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + int i; +#ifdef CONFIG_DEBUG_FS + enum amdgpu_dm_pipe_crc_source source; +#endif + + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + struct dm_crtc_state *dm_new_crtc_state = + to_dm_crtc_state(new_crtc_state); + struct dm_crtc_state *dm_old_crtc_state = + to_dm_crtc_state(old_crtc_state); + bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state); + bool run_pass; + + run_pass = (for_modeset && modeset) || + (!for_modeset && !modeset && + !dm_old_crtc_state->interrupts_enabled); + + if (!run_pass) + continue; + + if (!dm_new_crtc_state->interrupts_enabled) + continue; + + manage_dm_interrupts(adev, acrtc, true); + +#ifdef CONFIG_DEBUG_FS + /* The stream has changed so CRC capture needs to re-enabled. */ + source = dm_new_crtc_state->crc_src; + if (amdgpu_dm_is_valid_crc_source(source)) { + amdgpu_dm_crtc_configure_crc_source( + crtc, dm_new_crtc_state, + dm_new_crtc_state->crc_src); + } +#endif + } } /* @@ -4952,8 +6268,7 @@ cleanup: static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state, struct dc_stream_state *stream_state) { - stream_state->mode_changed = - crtc_state->mode_changed || crtc_state->active_changed; + stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state); } static int amdgpu_dm_atomic_commit(struct drm_device *dev, @@ -4966,33 +6281,29 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev, int i; /* - * We evade vblanks and pflips on crtc that - * should be changed. We do it here to flush & disable - * interrupts before drm_swap_state is called in drm_atomic_helper_commit - * it will update crtc->dm_crtc_state->stream pointer which is used in - * the ISRs. + * We evade vblank and pflip interrupts on CRTCs that are undergoing + * a modeset, being disabled, or have no active planes. + * + * It's done in atomic commit rather than commit tail for now since + * some of these interrupt handlers access the current CRTC state and + * potentially the stream pointer itself. + * + * Since the atomic state is swapped within atomic commit and not within + * commit tail this would leave to new state (that hasn't been committed yet) + * being accesssed from within the handlers. + * + * TODO: Fix this so we can do this in commit tail and not have to block + * in atomic check. */ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); - if (drm_atomic_crtc_needs_modeset(new_crtc_state) - && dm_old_crtc_state->stream) { - /* - * If the stream is removed and CRC capture was - * enabled on the CRTC the extra vblank reference - * needs to be dropped since CRC capture will be - * disabled. - */ - if (!dm_new_crtc_state->stream - && dm_new_crtc_state->crc_enabled) { - drm_crtc_vblank_put(crtc); - dm_new_crtc_state->crc_enabled = false; - } - + if (dm_old_crtc_state->interrupts_enabled && + (!dm_new_crtc_state->interrupts_enabled || + drm_atomic_crtc_needs_modeset(new_crtc_state))) manage_dm_interrupts(adev, acrtc, false); - } } /* * Add check here for SoC's that support hardware cursor plane, to @@ -5036,7 +6347,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) dc_state = dm_state->context; } else { /* No state changes, retain current state. */ - dc_state_temp = dc_create_state(); + dc_state_temp = dc_create_state(dm->dc); ASSERT(dc_state_temp); dc_state = dc_state_temp; dc_resource_state_copy_construct_current(dm->dc, dc_state); @@ -5106,10 +6417,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) crtc->hwmode = new_crtc_state->mode; } else if (modereset_required(new_crtc_state)) { DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc); - /* i.e. reset mode */ - if (dm_old_crtc_state->stream) + if (dm_old_crtc_state->stream) { + if (dm_old_crtc_state->stream->link->psr_allow_active) + amdgpu_dm_psr_disable(dm_old_crtc_state->stream); + remove_stream(adev, acrtc, dm_old_crtc_state->stream); + } } } /* for_each_crtc_in_state() */ @@ -5139,6 +6453,30 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) acrtc->otg_inst = status->primary_otg_inst; } } +#ifdef CONFIG_DRM_AMD_DC_HDCP + for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { + struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + + new_crtc_state = NULL; + + if (acrtc) + new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); + + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + + if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL && + connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) { + hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); + new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; + continue; + } + + if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue)) + update_content_protection(new_con_state, connector, adev->dm.hdcp_workqueue); + } +#endif /* Handle connector state changes */ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { @@ -5147,7 +6485,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); struct dc_surface_update dummy_updates[MAX_SURFACES]; struct dc_stream_update stream_update; + struct dc_info_packet hdr_packet; struct dc_stream_status *status = NULL; + bool abm_changed, hdr_changed, scaling_changed; memset(&dummy_updates, 0, sizeof(dummy_updates)); memset(&stream_update, 0, sizeof(stream_update)); @@ -5164,24 +6504,38 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); - if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state) && - (dm_new_crtc_state->abm_level == dm_old_crtc_state->abm_level)) + scaling_changed = is_scaling_state_different(dm_new_con_state, + dm_old_con_state); + + abm_changed = dm_new_crtc_state->abm_level != + dm_old_crtc_state->abm_level; + + hdr_changed = + is_hdr_metadata_different(old_con_state, new_con_state); + + if (!scaling_changed && !abm_changed && !hdr_changed) continue; - if (is_scaling_state_different(dm_new_con_state, dm_old_con_state)) { + stream_update.stream = dm_new_crtc_state->stream; + if (scaling_changed) { update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode, - dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream); + dm_new_con_state, dm_new_crtc_state->stream); stream_update.src = dm_new_crtc_state->stream->src; stream_update.dst = dm_new_crtc_state->stream->dst; } - if (dm_new_crtc_state->abm_level != dm_old_crtc_state->abm_level) { + if (abm_changed) { dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level; stream_update.abm_level = &dm_new_crtc_state->abm_level; } + if (hdr_changed) { + fill_hdr_info_packet(new_con_state, &hdr_packet); + stream_update.hdr_static_metadata = &hdr_packet; + } + status = dc_stream_get_status(dm_new_crtc_state->stream); WARN_ON(!status); WARN_ON(!status->plane_count); @@ -5205,45 +6559,44 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) mutex_unlock(&dm->dc_lock); } + /* Count number of newly disabled CRTCs for dropping PM refs later. */ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, - new_crtc_state, i) { - /* - * loop to enable interrupts on newly arrived crtc - */ - struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); - bool modeset_needed; - + new_crtc_state, i) { if (old_crtc_state->active && !new_crtc_state->active) crtc_disable_count++; dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); - modeset_needed = modeset_required( - new_crtc_state, - dm_new_crtc_state->stream, - dm_old_crtc_state->stream); - - if (dm_new_crtc_state->stream == NULL || !modeset_needed) - continue; - manage_dm_interrupts(adev, acrtc, true); + /* Update freesync active state. */ + pre_update_freesync_state_on_stream(dm, dm_new_crtc_state); -#ifdef CONFIG_DEBUG_FS - /* The stream has changed so CRC capture needs to re-enabled. */ - if (dm_new_crtc_state->crc_enabled) - amdgpu_dm_crtc_set_crc_source(crtc, "auto"); -#endif + /* Handle vrr on->off / off->on transitions */ + amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, + dm_new_crtc_state); } + /* Enable interrupts for CRTCs going through a modeset. */ + amdgpu_dm_enable_crtc_interrupts(dev, state, true); + + for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) + if (new_crtc_state->async_flip) + wait_for_vblank = false; + /* update planes when needed per crtc*/ for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) { dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); if (dm_new_crtc_state->stream) amdgpu_dm_commit_planes(state, dc_state, dev, - dm, crtc, &wait_for_vblank); + dm, crtc, wait_for_vblank); } + /* Enable interrupts for CRTCs going from 0 to n active planes. */ + amdgpu_dm_enable_crtc_interrupts(dev, state, false); + + /* Update audio instances for each connector. */ + amdgpu_dm_commit_audio(dev, state); /* * send vblank event on all events not handled in flip and @@ -5483,21 +6836,12 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, struct amdgpu_dm_connector *aconnector = NULL; struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL; struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL; - struct drm_plane_state *new_plane_state = NULL; new_stream = NULL; dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); acrtc = to_amdgpu_crtc(crtc); - - new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary); - - if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) { - ret = -EINVAL; - goto fail; - } - aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); /* TODO This hack should go away */ @@ -5540,7 +6884,22 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; - if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && + ret = fill_hdr_info_packet(drm_new_conn_state, + &new_stream->hdr_static_metadata); + if (ret) + goto fail; + + /* + * If we already removed the old stream from the context + * (and set the new stream to NULL) then we can't reuse + * the old stream even if the stream and scaling are unchanged. + * We'll hit the BUG_ON and black screen. + * + * TODO: Refactor this function to allow this check to work + * in all conditions. + */ + if (dm_new_crtc_state->stream && + dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) { new_crtc_state->mode_changed = false; DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d", @@ -5660,16 +7019,18 @@ skip_modeset: update_stream_scaling_settings( &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream); + /* ABM settings */ + dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; + /* * Color management settings. We also update color properties * when a modeset is needed, to ensure it gets reprogrammed. */ if (dm_new_crtc_state->base.color_mgmt_changed || drm_atomic_crtc_needs_modeset(new_crtc_state)) { - ret = amdgpu_dm_set_regamma_lut(dm_new_crtc_state); + ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state); if (ret) goto fail; - amdgpu_dm_set_ctm(dm_new_crtc_state); } /* Update Freesync settings. */ @@ -5684,6 +7045,73 @@ fail: return ret; } +static bool should_reset_plane(struct drm_atomic_state *state, + struct drm_plane *plane, + struct drm_plane_state *old_plane_state, + struct drm_plane_state *new_plane_state) +{ + struct drm_plane *other; + struct drm_plane_state *old_other_state, *new_other_state; + struct drm_crtc_state *new_crtc_state; + int i; + + /* + * TODO: Remove this hack once the checks below are sufficient + * enough to determine when we need to reset all the planes on + * the stream. + */ + if (state->allow_modeset) + return true; + + /* Exit early if we know that we're adding or removing the plane. */ + if (old_plane_state->crtc != new_plane_state->crtc) + return true; + + /* old crtc == new_crtc == NULL, plane not in context. */ + if (!new_plane_state->crtc) + return false; + + new_crtc_state = + drm_atomic_get_new_crtc_state(state, new_plane_state->crtc); + + if (!new_crtc_state) + return true; + + /* CRTC Degamma changes currently require us to recreate planes. */ + if (new_crtc_state->color_mgmt_changed) + return true; + + if (drm_atomic_crtc_needs_modeset(new_crtc_state)) + return true; + + /* + * If there are any new primary or overlay planes being added or + * removed then the z-order can potentially change. To ensure + * correct z-order and pipe acquisition the current DC architecture + * requires us to remove and recreate all existing planes. + * + * TODO: Come up with a more elegant solution for this. + */ + for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) { + if (other->type == DRM_PLANE_TYPE_CURSOR) + continue; + + if (old_other_state->crtc != new_plane_state->crtc && + new_other_state->crtc != new_plane_state->crtc) + continue; + + if (old_other_state->crtc != new_other_state->crtc) + return true; + + /* TODO: Remove this once we can handle fast format changes. */ + if (old_other_state->fb && new_other_state->fb && + old_other_state->fb->format != new_other_state->fb->format) + return true; + } + + return false; +} + static int dm_update_plane_state(struct dc *dc, struct drm_atomic_state *state, struct drm_plane *plane, @@ -5698,8 +7126,7 @@ static int dm_update_plane_state(struct dc *dc, struct drm_crtc_state *old_crtc_state, *new_crtc_state; struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state; struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state; - /* TODO return page_flip_needed() function */ - bool pflip_needed = !state->allow_modeset; + bool needs_reset; int ret = 0; @@ -5712,10 +7139,12 @@ static int dm_update_plane_state(struct dc *dc, if (plane->type == DRM_PLANE_TYPE_CURSOR) return 0; + needs_reset = should_reset_plane(state, plane, old_plane_state, + new_plane_state); + /* Remove any changed/removed planes */ if (!enable) { - if (pflip_needed && - plane->type != DRM_PLANE_TYPE_OVERLAY) + if (!needs_reset) return 0; if (!old_plane_crtc) @@ -5766,7 +7195,7 @@ static int dm_update_plane_state(struct dc *dc, if (!dm_new_crtc_state->stream) return 0; - if (pflip_needed && plane->type != DRM_PLANE_TYPE_OVERLAY) + if (!needs_reset) return 0; WARN_ON(dm_new_plane_state->dc_state); @@ -5778,7 +7207,7 @@ static int dm_update_plane_state(struct dc *dc, DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n", plane->base.id, new_plane_crtc->base.id); - ret = fill_plane_attributes( + ret = fill_dc_plane_attributes( new_plane_crtc->dev->dev_private, dc_new_plane_state, new_plane_state, @@ -5826,10 +7255,11 @@ static int dm_update_plane_state(struct dc *dc, } static int -dm_determine_update_type_for_commit(struct dc *dc, +dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm, struct drm_atomic_state *state, enum surface_update_type *out_type) { + struct dc *dc = dm->dc; struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL; int i, j, num_plane, ret = 0; struct drm_plane_state *old_plane_state, *new_plane_state; @@ -5843,21 +7273,22 @@ dm_determine_update_type_for_commit(struct dc *dc, struct dc_stream_status *status = NULL; struct dc_surface_update *updates; - struct dc_plane_state *surface; enum surface_update_type update_type = UPDATE_TYPE_FAST; updates = kcalloc(MAX_SURFACES, sizeof(*updates), GFP_KERNEL); - surface = kcalloc(MAX_SURFACES, sizeof(*surface), GFP_KERNEL); - if (!updates || !surface) { - DRM_ERROR("Plane or surface update failed to allocate"); + if (!updates) { + DRM_ERROR("Failed to allocate plane updates\n"); /* Set type to FULL to avoid crashing in DC*/ update_type = UPDATE_TYPE_FULL; goto cleanup; } for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - struct dc_stream_update stream_update = { 0 }; + struct dc_scaling_info scaling_info; + struct dc_stream_update stream_update; + + memset(&stream_update, 0, sizeof(stream_update)); new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); old_dm_crtc_state = to_dm_crtc_state(old_crtc_state); @@ -5872,6 +7303,12 @@ dm_determine_update_type_for_commit(struct dc *dc, continue; for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) { + const struct amdgpu_framebuffer *amdgpu_fb = + to_amdgpu_framebuffer(new_plane_state->fb); + struct dc_plane_info plane_info; + struct dc_flip_addrs flip_addr; + uint64_t tiling_flags; + new_plane_crtc = new_plane_state->crtc; old_plane_crtc = old_plane_state->crtc; new_dm_plane_state = to_dm_plane_state(new_plane_state); @@ -5885,23 +7322,12 @@ dm_determine_update_type_for_commit(struct dc *dc, goto cleanup; } - if (!state->allow_modeset) - continue; - if (crtc != new_plane_crtc) continue; - updates[num_plane].surface = &surface[num_plane]; + updates[num_plane].surface = new_dm_plane_state->dc_state; if (new_crtc_state->mode_changed) { - updates[num_plane].surface->src_rect = - new_dm_plane_state->dc_state->src_rect; - updates[num_plane].surface->dst_rect = - new_dm_plane_state->dc_state->dst_rect; - updates[num_plane].surface->rotation = - new_dm_plane_state->dc_state->rotation; - updates[num_plane].surface->in_transfer_func = - new_dm_plane_state->dc_state->in_transfer_func; stream_update.dst = new_dm_crtc_state->stream->dst; stream_update.src = new_dm_crtc_state->stream->src; } @@ -5913,10 +7339,37 @@ dm_determine_update_type_for_commit(struct dc *dc, new_dm_plane_state->dc_state->in_transfer_func; stream_update.gamut_remap = &new_dm_crtc_state->stream->gamut_remap_matrix; + stream_update.output_csc_transform = + &new_dm_crtc_state->stream->csc_color_matrix; stream_update.out_transfer_func = new_dm_crtc_state->stream->out_transfer_func; } + ret = fill_dc_scaling_info(new_plane_state, + &scaling_info); + if (ret) + goto cleanup; + + updates[num_plane].scaling_info = &scaling_info; + + if (amdgpu_fb) { + ret = get_fb_info(amdgpu_fb, &tiling_flags); + if (ret) + goto cleanup; + + memset(&flip_addr, 0, sizeof(flip_addr)); + + ret = fill_dc_plane_info_and_addr( + dm->adev, new_plane_state, tiling_flags, + &plane_info, + &flip_addr.address); + if (ret) + goto cleanup; + + updates[num_plane].plane_info = &plane_info; + updates[num_plane].flip_addr = &flip_addr; + } + num_plane++; } @@ -5935,9 +7388,15 @@ dm_determine_update_type_for_commit(struct dc *dc, status = dc_stream_get_status_from_state(old_dm_state->context, new_dm_crtc_state->stream); - + stream_update.stream = new_dm_crtc_state->stream; + /* + * TODO: DC modifies the surface during this call so we need + * to lock here - find a way to do this without locking. + */ + mutex_lock(&dm->dc_lock); update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane, &stream_update, status); + mutex_unlock(&dm->dc_lock); if (update_type > UPDATE_TYPE_MED) { update_type = UPDATE_TYPE_FULL; @@ -5947,7 +7406,6 @@ dm_determine_update_type_for_commit(struct dc *dc, cleanup: kfree(updates); - kfree(surface); *out_type = update_type; return ret; @@ -6108,6 +7566,26 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, if (ret) goto fail; + if (state->legacy_cursor_update) { + /* + * This is a fast cursor update coming from the plane update + * helper, check if it can be done asynchronously for better + * performance. + */ + state->async_update = + !drm_atomic_helper_async_check(dev, state); + + /* + * Skip the remaining global validation if this is an async + * update. Cursor updates can be done without affecting + * state or bandwidth calcs and this avoids the performance + * penalty of locking the private state object and + * allocating a new dc_state. + */ + if (state->async_update) + return 0; + } + /* Check scaling and underscan changes*/ /* TODO Removed scaling changes validation due to inability to commit * new stream into context w\o causing full reset. Need to @@ -6131,7 +7609,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, lock_and_validation_needed = true; } - ret = dm_determine_update_type_for_commit(dc, state, &update_type); + ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type); if (ret) goto fail; @@ -6146,9 +7624,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, */ if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST) WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL"); - else if (!lock_and_validation_needed && overall_update_type > UPDATE_TYPE_FAST) - WARN(1, "Global lock should NOT be set, overall_update_type should be UPDATE_TYPE_FAST"); - if (overall_update_type > UPDATE_TYPE_FAST) { ret = dm_atomic_get_state(state, &dm_state); @@ -6159,17 +7634,41 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, if (ret) goto fail; - if (dc_validate_global_state(dc, dm_state->context) != DC_OK) { + if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) { ret = -EINVAL; goto fail; } - } else if (state->legacy_cursor_update) { + } else { /* - * This is a fast cursor update coming from the plane update - * helper, check if it can be done asynchronously for better - * performance. + * The commit is a fast update. Fast updates shouldn't change + * the DC context, affect global validation, and can have their + * commit work done in parallel with other commits not touching + * the same resource. If we have a new DC context as part of + * the DM atomic state from validation we need to free it and + * retain the existing one instead. */ - state->async_update = !drm_atomic_helper_async_check(dev, state); + struct dm_atomic_state *new_dm_state, *old_dm_state; + + new_dm_state = dm_atomic_get_new_state(state); + old_dm_state = dm_atomic_get_old_state(state); + + if (new_dm_state && old_dm_state) { + if (new_dm_state->context) + dc_release_state(new_dm_state->context); + + new_dm_state->context = old_dm_state->context; + + if (old_dm_state->context) + dc_retain_state(old_dm_state->context); + } + } + + /* Store the overall update type for use later in atomic check. */ + for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) { + struct dm_crtc_state *dm_new_crtc_state = + to_dm_crtc_state(new_crtc_state); + + dm_new_crtc_state->update_type = (int)overall_update_type; } /* Must be success */ @@ -6300,3 +7799,92 @@ update: freesync_capable); } +static void amdgpu_dm_set_psr_caps(struct dc_link *link) +{ + uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE]; + + if (!(link->connector_signal & SIGNAL_TYPE_EDP)) + return; + if (link->type == dc_connection_none) + return; + if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT, + dpcd_data, sizeof(dpcd_data))) { + link->psr_feature_enabled = dpcd_data[0] ? true:false; + DRM_INFO("PSR support:%d\n", link->psr_feature_enabled); + } +} + +/* + * amdgpu_dm_link_setup_psr() - configure psr link + * @stream: stream state + * + * Return: true if success + */ +static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream) +{ + struct dc_link *link = NULL; + struct psr_config psr_config = {0}; + struct psr_context psr_context = {0}; + struct dc *dc = NULL; + bool ret = false; + + if (stream == NULL) + return false; + + link = stream->link; + dc = link->ctx->dc; + + psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version; + + if (psr_config.psr_version > 0) { + psr_config.psr_exit_link_training_required = 0x1; + psr_config.psr_frame_capture_indication_req = 0; + psr_config.psr_rfb_setup_time = 0x37; + psr_config.psr_sdp_transmit_line_num_deadline = 0x20; + psr_config.allow_smu_optimizations = 0x0; + + ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context); + + } + DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_feature_enabled); + + return ret; +} + +/* + * amdgpu_dm_psr_enable() - enable psr f/w + * @stream: stream state + * + * Return: true if success + */ +bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) +{ + struct dc_link *link = stream->link; + struct dc_static_screen_events triggers = {0}; + + DRM_DEBUG_DRIVER("Enabling psr...\n"); + + triggers.cursor_update = true; + triggers.overlay_update = true; + triggers.surface_update = true; + + dc_stream_set_static_screen_events(link->ctx->dc, + &stream, 1, + &triggers); + + return dc_link_set_psr_allow_active(link, true, false); +} + +/* + * amdgpu_dm_psr_disable() - disable psr f/w + * @stream: stream state + * + * Return: true if success + */ +static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream) +{ + + DRM_DEBUG_DRIVER("Disabling psr...\n"); + + return dc_link_set_psr_allow_active(stream->link, false, true); +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index fbd161ddc3f4..77c5166e6b08 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -26,8 +26,11 @@ #ifndef __AMDGPU_DM_H__ #define __AMDGPU_DM_H__ -#include <drm/drmP.h> #include <drm/drm_atomic.h> +#include <drm/drm_connector.h> +#include <drm/drm_crtc.h> +#include <drm/drm_dp_mst_helper.h> +#include <drm/drm_plane.h> /* * This file contains the definition for amdgpu_display_manager @@ -47,6 +50,7 @@ #include "irq_types.h" #include "signal_types.h" +#include "amdgpu_dm_crc.h" /* Forward declarations */ struct amdgpu_device; @@ -104,6 +108,12 @@ struct amdgpu_dm_backlight_caps { * @display_indexes_num: Max number of display streams supported * @irq_handler_list_table_lock: Synchronizes access to IRQ tables * @backlight_dev: Backlight control device + * @backlight_link: Link on which to control backlight + * @backlight_caps: Capabilities of the backlight device + * @freesync_module: Module handling freesync calculations + * @fw_dmcu: Reference to DMCU firmware + * @dmcu_fw_version: Version of the DMCU firmware + * @soc_bounding_box: SOC bounding box values provided by gpu_info FW * @cached_state: Caches device atomic state for suspend/resume * @compressor: Frame buffer compression buffer. See &struct dm_comressor_info */ @@ -124,7 +134,7 @@ struct amdgpu_display_manager { u16 display_indexes_num; /** - * @atomic_obj + * @atomic_obj: * * In combination with &dm_atomic_state it helps manage * global atomic state that doesn't map cleanly into existing @@ -132,8 +142,6 @@ struct amdgpu_display_manager { */ struct drm_private_obj atomic_obj; - struct drm_modeset_lock atomic_obj_lock; - /** * @dc_lock: * @@ -143,6 +151,28 @@ struct amdgpu_display_manager { struct mutex dc_lock; /** + * @audio_lock: + * + * Guards access to audio instance changes. + */ + struct mutex audio_lock; + + /** + * @audio_component: + * + * Used to notify ELD changes to sound driver. + */ + struct drm_audio_component *audio_component; + + /** + * @audio_registered: + * + * True if the audio component has been registered + * successfully, false otherwise. + */ + bool audio_registered; + + /** * @irq_handler_list_low_tab: * * Low priority IRQ handler table. @@ -184,6 +214,15 @@ struct amdgpu_display_manager { struct common_irq_params vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1]; + /** + * @vupdate_params: + * + * Vertical update IRQ parameters, passed to registered handlers when + * triggered. + */ + struct common_irq_params + vupdate_params[DC_IRQ_SOURCE_VUPDATE6 - DC_IRQ_SOURCE_VUPDATE1 + 1]; + spinlock_t irq_handler_list_table_lock; struct backlight_device *backlight_dev; @@ -192,6 +231,9 @@ struct amdgpu_display_manager { struct amdgpu_dm_backlight_caps backlight_caps; struct mod_freesync *freesync_module; +#ifdef CONFIG_DRM_AMD_DC_HDCP + struct hdcp_workqueue *hdcp_workqueue; +#endif struct drm_atomic_state *cached_state; @@ -199,6 +241,15 @@ struct amdgpu_display_manager { const struct firmware *fw_dmcu; uint32_t dmcu_fw_version; +#ifdef CONFIG_DRM_AMD_DC_DCN2_0 + /** + * @soc_bounding_box: + * + * gpu_info FW provided soc bounding box struct or 0 if not + * available in FW + */ + const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box; +#endif }; struct amdgpu_dm_connector { @@ -237,9 +288,17 @@ struct amdgpu_dm_connector { int max_vfreq ; int pixel_clock_mhz; + /* Audio instance - protected by audio_lock. */ + int audio_inst; + struct mutex hpd_lock; bool fake_enable; +#ifdef CONFIG_DEBUG_FS + uint32_t debugfs_dpcd_address; + uint32_t debugfs_dpcd_size; +#endif + bool force_yuv420_output; }; #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base) @@ -260,8 +319,15 @@ struct dm_crtc_state { struct drm_crtc_state base; struct dc_stream_state *stream; + bool cm_has_degamma; + bool cm_is_degamma_srgb; + + int update_type; + int active_planes; + bool interrupts_enabled; + int crc_skip_count; - bool crc_enabled; + enum amdgpu_dm_pipe_crc_source crc_src; bool freesync_timing_changed; bool freesync_vrr_info_changed; @@ -290,7 +356,6 @@ struct dm_connector_state { enum amdgpu_rmx_type scaling; uint8_t underscan_vborder; uint8_t underscan_hborder; - uint8_t max_bpc; bool underscan_enable; bool freesync_capable; uint8_t abm_level; @@ -329,28 +394,14 @@ void dm_restore_drm_connector_state(struct drm_device *dev, void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, struct edid *edid); -/* amdgpu_dm_crc.c */ -#ifdef CONFIG_DEBUG_FS -int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name); -int amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, - const char *src_name, - size_t *values_cnt); -void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc); -#else -#define amdgpu_dm_crtc_set_crc_source NULL -#define amdgpu_dm_crtc_verify_crc_source NULL -#define amdgpu_dm_crtc_handle_crc_irq(x) -#endif - #define MAX_COLOR_LUT_ENTRIES 4096 /* Legacy gamm LUT users such as X doesn't like large LUT sizes */ #define MAX_COLOR_LEGACY_LUT_ENTRIES 256 void amdgpu_dm_init_color_mod(void); -int amdgpu_dm_set_degamma_lut(struct drm_crtc_state *crtc_state, - struct dc_plane_state *dc_plane_state); -void amdgpu_dm_set_ctm(struct dm_crtc_state *crtc); -int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc); +int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc); +int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, + struct dc_plane_state *dc_plane_state); extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c index 216e48cec716..2233d293a707 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c @@ -27,6 +27,47 @@ #include "amdgpu_dm.h" #include "dc.h" #include "modules/color/color_gamma.h" +#include "basics/conversion.h" + +/* + * The DC interface to HW gives us the following color management blocks + * per pipe (surface): + * + * - Input gamma LUT (de-normalized) + * - Input CSC (normalized) + * - Surface degamma LUT (normalized) + * - Surface CSC (normalized) + * - Surface regamma LUT (normalized) + * - Output CSC (normalized) + * + * But these aren't a direct mapping to DRM color properties. The current DRM + * interface exposes CRTC degamma, CRTC CTM and CRTC regamma while our hardware + * is essentially giving: + * + * Plane CTM -> Plane degamma -> Plane CTM -> Plane regamma -> Plane CTM + * + * The input gamma LUT block isn't really applicable here since it operates + * on the actual input data itself rather than the HW fp representation. The + * input and output CSC blocks are technically available to use as part of + * the DC interface but are typically used internally by DC for conversions + * between color spaces. These could be blended together with user + * adjustments in the future but for now these should remain untouched. + * + * The pipe blending also happens after these blocks so we don't actually + * support any CRTC props with correct blending with multiple planes - but we + * can still support CRTC color management properties in DM in most single + * plane cases correctly with clever management of the DC interface in DM. + * + * As per DRM documentation, blocks should be in hardware bypass when their + * respective property is set to NULL. A linear DGM/RGM LUT should also + * considered as putting the respective block into bypass mode. + * + * This means that the following + * configuration is assumed to be the default: + * + * Plane DGM Bypass -> Plane CTM Bypass -> Plane RGM Bypass -> ... + * CRTC DGM Bypass -> CRTC CTM Bypass -> CRTC RGM Bypass + */ #define MAX_DRM_LUT_VALUE 0xFFFF @@ -41,6 +82,13 @@ void amdgpu_dm_init_color_mod(void) setup_x_points_distribution(); } +/* Extracts the DRM lut and lut size from a blob. */ +static const struct drm_color_lut * +__extract_blob_lut(const struct drm_property_blob *blob, uint32_t *size) +{ + *size = blob ? drm_color_lut_size(blob) : 0; + return blob ? (struct drm_color_lut *)blob->data : NULL; +} /* * Return true if the given lut is a linear mapping of values, i.e. it acts @@ -50,7 +98,7 @@ void amdgpu_dm_init_color_mod(void) * f(a) = (0xFF00/MAX_COLOR_LUT_ENTRIES-1)a; for integer a in * [0, MAX_COLOR_LUT_ENTRIES) */ -static bool __is_lut_linear(struct drm_color_lut *lut, uint32_t size) +static bool __is_lut_linear(const struct drm_color_lut *lut, uint32_t size) { int i; uint32_t expected; @@ -75,9 +123,8 @@ static bool __is_lut_linear(struct drm_color_lut *lut, uint32_t size) * Convert the drm_color_lut to dc_gamma. The conversion depends on the size * of the lut - whether or not it's legacy. */ -static void __drm_lut_to_dc_gamma(struct drm_color_lut *lut, - struct dc_gamma *gamma, - bool is_legacy) +static void __drm_lut_to_dc_gamma(const struct drm_color_lut *lut, + struct dc_gamma *gamma, bool is_legacy) { uint32_t r, g, b; int i; @@ -107,98 +154,16 @@ static void __drm_lut_to_dc_gamma(struct drm_color_lut *lut, } } -/** - * amdgpu_dm_set_regamma_lut: Set regamma lut for the given CRTC. - * @crtc: amdgpu_dm crtc state - * - * Update the underlying dc_stream_state's output transfer function (OTF) in - * preparation for hardware commit. If no lut is specified by user, we default - * to SRGB. - * - * RETURNS: - * 0 on success, -ENOMEM if memory cannot be allocated to calculate the OTF. - */ -int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc) -{ - struct drm_property_blob *blob = crtc->base.gamma_lut; - struct dc_stream_state *stream = crtc->stream; - struct amdgpu_device *adev = (struct amdgpu_device *) - crtc->base.state->dev->dev_private; - struct drm_color_lut *lut; - uint32_t lut_size; - struct dc_gamma *gamma; - enum dc_transfer_func_type old_type = stream->out_transfer_func->type; - - bool ret; - - if (!blob) { - /* By default, use the SRGB predefined curve.*/ - stream->out_transfer_func->type = TF_TYPE_PREDEFINED; - stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; - return 0; - } - - lut = (struct drm_color_lut *)blob->data; - lut_size = blob->length / sizeof(struct drm_color_lut); - - gamma = dc_create_gamma(); - if (!gamma) - return -ENOMEM; - - gamma->num_entries = lut_size; - if (gamma->num_entries == MAX_COLOR_LEGACY_LUT_ENTRIES) - gamma->type = GAMMA_RGB_256; - else if (gamma->num_entries == MAX_COLOR_LUT_ENTRIES) - gamma->type = GAMMA_CS_TFM_1D; - else { - /* Invalid lut size */ - dc_gamma_release(&gamma); - return -EINVAL; - } - - /* Convert drm_lut into dc_gamma */ - __drm_lut_to_dc_gamma(lut, gamma, gamma->type == GAMMA_RGB_256); - - /* Call color module to translate into something DC understands. Namely - * a transfer function. - */ - stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS; - ret = mod_color_calculate_regamma_params(stream->out_transfer_func, - gamma, true, adev->asic_type <= CHIP_RAVEN, NULL); - dc_gamma_release(&gamma); - if (!ret) { - stream->out_transfer_func->type = old_type; - DRM_ERROR("Out of memory when calculating regamma params\n"); - return -ENOMEM; - } - - return 0; -} - -/** - * amdgpu_dm_set_ctm: Set the color transform matrix for the given CRTC. - * @crtc: amdgpu_dm crtc state - * - * Update the underlying dc_stream_state's gamut remap matrix in preparation - * for hardware commit. If no matrix is specified by user, gamut remap will be - * disabled. +/* + * Converts a DRM CTM to a DC CSC float matrix. + * The matrix needs to be a 3x4 (12 entry) matrix. */ -void amdgpu_dm_set_ctm(struct dm_crtc_state *crtc) +static void __drm_ctm_to_dc_matrix(const struct drm_color_ctm *ctm, + struct fixed31_32 *matrix) { - - struct drm_property_blob *blob = crtc->base.ctm; - struct dc_stream_state *stream = crtc->stream; - struct drm_color_ctm *ctm; int64_t val; int i; - if (!blob) { - stream->gamut_remap_matrix.enable_remap = false; - return; - } - - stream->gamut_remap_matrix.enable_remap = true; - ctm = (struct drm_color_ctm *)blob->data; /* * DRM gives a 3x3 matrix, but DC wants 3x4. Assuming we're operating * with homogeneous coordinates, augment the matrix with 0's. @@ -210,83 +175,308 @@ void amdgpu_dm_set_ctm(struct dm_crtc_state *crtc) for (i = 0; i < 12; i++) { /* Skip 4th element */ if (i % 4 == 3) { - stream->gamut_remap_matrix.matrix[i] = dc_fixpt_zero; + matrix[i] = dc_fixpt_zero; continue; } /* gamut_remap_matrix[i] = ctm[i - floor(i/4)] */ - val = ctm->matrix[i - (i/4)]; + val = ctm->matrix[i - (i / 4)]; /* If negative, convert to 2's complement. */ if (val & (1ULL << 63)) val = -(val & ~(1ULL << 63)); - stream->gamut_remap_matrix.matrix[i].value = val; + matrix[i].value = val; } } +/* Calculates the legacy transfer function - only for sRGB input space. */ +static int __set_legacy_tf(struct dc_transfer_func *func, + const struct drm_color_lut *lut, uint32_t lut_size, + bool has_rom) +{ + struct dc_gamma *gamma = NULL; + bool res; -/** - * amdgpu_dm_set_degamma_lut: Set degamma lut for the given CRTC. - * @crtc: amdgpu_dm crtc state - * - * Update the underlying dc_stream_state's input transfer function (ITF) in - * preparation for hardware commit. If no lut is specified by user, we default - * to SRGB degamma. - * - * We support degamma bypass, predefined SRGB, and custom degamma - * - * RETURNS: - * 0 on success - * -EINVAL if crtc_state has a degamma_lut of invalid size - * -ENOMEM if gamma allocation fails - */ -int amdgpu_dm_set_degamma_lut(struct drm_crtc_state *crtc_state, - struct dc_plane_state *dc_plane_state) + ASSERT(lut && lut_size == MAX_COLOR_LEGACY_LUT_ENTRIES); + + gamma = dc_create_gamma(); + if (!gamma) + return -ENOMEM; + + gamma->type = GAMMA_RGB_256; + gamma->num_entries = lut_size; + __drm_lut_to_dc_gamma(lut, gamma, true); + + res = mod_color_calculate_regamma_params(func, gamma, true, has_rom, + NULL); + + dc_gamma_release(&gamma); + + return res ? 0 : -ENOMEM; +} + +/* Calculates the output transfer function based on expected input space. */ +static int __set_output_tf(struct dc_transfer_func *func, + const struct drm_color_lut *lut, uint32_t lut_size, + bool has_rom) { - struct drm_property_blob *blob = crtc_state->degamma_lut; - struct drm_color_lut *lut; - uint32_t lut_size; - struct dc_gamma *gamma; - bool ret; - - if (!blob) { - /* Default to SRGB */ - dc_plane_state->in_transfer_func->type = TF_TYPE_PREDEFINED; - dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_SRGB; - return 0; - } + struct dc_gamma *gamma = NULL; + bool res; - lut = (struct drm_color_lut *)blob->data; - if (__is_lut_linear(lut, MAX_COLOR_LUT_ENTRIES)) { - dc_plane_state->in_transfer_func->type = TF_TYPE_BYPASS; - dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; - return 0; - } + ASSERT(lut && lut_size == MAX_COLOR_LUT_ENTRIES); gamma = dc_create_gamma(); if (!gamma) return -ENOMEM; - lut_size = blob->length / sizeof(struct drm_color_lut); gamma->num_entries = lut_size; - if (gamma->num_entries == MAX_COLOR_LUT_ENTRIES) + __drm_lut_to_dc_gamma(lut, gamma, false); + + if (func->tf == TRANSFER_FUNCTION_LINEAR) { + /* + * Color module doesn't like calculating regamma params + * on top of a linear input. But degamma params can be used + * instead to simulate this. + */ gamma->type = GAMMA_CUSTOM; - else { - dc_gamma_release(&gamma); - return -EINVAL; + res = mod_color_calculate_degamma_params(func, gamma, true); + } else { + /* + * Assume sRGB. The actual mapping will depend on whether the + * input was legacy or not. + */ + gamma->type = GAMMA_CS_TFM_1D; + res = mod_color_calculate_regamma_params(func, gamma, false, + has_rom, NULL); } + dc_gamma_release(&gamma); + + return res ? 0 : -ENOMEM; +} + +/* Caculates the input transfer function based on expected input space. */ +static int __set_input_tf(struct dc_transfer_func *func, + const struct drm_color_lut *lut, uint32_t lut_size) +{ + struct dc_gamma *gamma = NULL; + bool res; + + gamma = dc_create_gamma(); + if (!gamma) + return -ENOMEM; + + gamma->type = GAMMA_CUSTOM; + gamma->num_entries = lut_size; + __drm_lut_to_dc_gamma(lut, gamma, false); - dc_plane_state->in_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS; - ret = mod_color_calculate_degamma_params(dc_plane_state->in_transfer_func, gamma, true); + res = mod_color_calculate_degamma_params(func, gamma, true); dc_gamma_release(&gamma); - if (!ret) { - dc_plane_state->in_transfer_func->type = TF_TYPE_BYPASS; - DRM_ERROR("Out of memory when calculating degamma params\n"); - return -ENOMEM; + + return res ? 0 : -ENOMEM; +} + +/** + * amdgpu_dm_update_crtc_color_mgmt: Maps DRM color management to DC stream. + * @crtc: amdgpu_dm crtc state + * + * With no plane level color management properties we're free to use any + * of the HW blocks as long as the CRTC CTM always comes before the + * CRTC RGM and after the CRTC DGM. + * + * The CRTC RGM block will be placed in the RGM LUT block if it is non-linear. + * The CRTC DGM block will be placed in the DGM LUT block if it is non-linear. + * The CRTC CTM will be placed in the gamut remap block if it is non-linear. + * + * The RGM block is typically more fully featured and accurate across + * all ASICs - DCE can't support a custom non-linear CRTC DGM. + * + * For supporting both plane level color management and CRTC level color + * management at once we have to either restrict the usage of CRTC properties + * or blend adjustments together. + * + * Returns 0 on success. + */ +int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc) +{ + struct dc_stream_state *stream = crtc->stream; + struct amdgpu_device *adev = + (struct amdgpu_device *)crtc->base.state->dev->dev_private; + bool has_rom = adev->asic_type <= CHIP_RAVEN; + struct drm_color_ctm *ctm = NULL; + const struct drm_color_lut *degamma_lut, *regamma_lut; + uint32_t degamma_size, regamma_size; + bool has_regamma, has_degamma; + bool is_legacy; + int r; + + degamma_lut = __extract_blob_lut(crtc->base.degamma_lut, °amma_size); + if (degamma_lut && degamma_size != MAX_COLOR_LUT_ENTRIES) + return -EINVAL; + + regamma_lut = __extract_blob_lut(crtc->base.gamma_lut, ®amma_size); + if (regamma_lut && regamma_size != MAX_COLOR_LUT_ENTRIES && + regamma_size != MAX_COLOR_LEGACY_LUT_ENTRIES) + return -EINVAL; + + has_degamma = + degamma_lut && !__is_lut_linear(degamma_lut, degamma_size); + + has_regamma = + regamma_lut && !__is_lut_linear(regamma_lut, regamma_size); + + is_legacy = regamma_size == MAX_COLOR_LEGACY_LUT_ENTRIES; + + /* Reset all adjustments. */ + crtc->cm_has_degamma = false; + crtc->cm_is_degamma_srgb = false; + + /* Setup regamma and degamma. */ + if (is_legacy) { + /* + * Legacy regamma forces us to use the sRGB RGM as a base. + * This also means we can't use linear DGM since DGM needs + * to use sRGB as a base as well, resulting in incorrect CRTC + * DGM and CRTC CTM. + * + * TODO: Just map this to the standard regamma interface + * instead since this isn't really right. One of the cases + * where this setup currently fails is trying to do an + * inverse color ramp in legacy userspace. + */ + crtc->cm_is_degamma_srgb = true; + stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS; + stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; + + r = __set_legacy_tf(stream->out_transfer_func, regamma_lut, + regamma_size, has_rom); + if (r) + return r; + } else if (has_regamma) { + /* CRTC RGM goes into RGM LUT. */ + stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS; + stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; + + r = __set_output_tf(stream->out_transfer_func, regamma_lut, + regamma_size, has_rom); + if (r) + return r; + } else { + /* + * No CRTC RGM means we can just put the block into bypass + * since we don't have any plane level adjustments using it. + */ + stream->out_transfer_func->type = TF_TYPE_BYPASS; + stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; + } + + /* + * CRTC DGM goes into DGM LUT. It would be nice to place it + * into the RGM since it's a more featured block but we'd + * have to place the CTM in the OCSC in that case. + */ + crtc->cm_has_degamma = has_degamma; + + /* Setup CRTC CTM. */ + if (crtc->base.ctm) { + ctm = (struct drm_color_ctm *)crtc->base.ctm->data; + + /* + * Gamut remapping must be used for gamma correction + * since it comes before the regamma correction. + * + * OCSC could be used for gamma correction, but we'd need to + * blend the adjustments together with the required output + * conversion matrix - so just use the gamut remap block + * for now. + */ + __drm_ctm_to_dc_matrix(ctm, stream->gamut_remap_matrix.matrix); + + stream->gamut_remap_matrix.enable_remap = true; + stream->csc_color_matrix.enable_adjustment = false; + } else { + /* Bypass CTM. */ + stream->gamut_remap_matrix.enable_remap = false; + stream->csc_color_matrix.enable_adjustment = false; } return 0; } +/** + * amdgpu_dm_update_plane_color_mgmt: Maps DRM color management to DC plane. + * @crtc: amdgpu_dm crtc state + * @ dc_plane_state: target DC surface + * + * Update the underlying dc_stream_state's input transfer function (ITF) in + * preparation for hardware commit. The transfer function used depends on + * the prepartion done on the stream for color management. + * + * Returns 0 on success. + */ +int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, + struct dc_plane_state *dc_plane_state) +{ + const struct drm_color_lut *degamma_lut; + uint32_t degamma_size; + int r; + + if (crtc->cm_has_degamma) { + degamma_lut = __extract_blob_lut(crtc->base.degamma_lut, + °amma_size); + ASSERT(degamma_size == MAX_COLOR_LUT_ENTRIES); + + dc_plane_state->in_transfer_func->type = + TF_TYPE_DISTRIBUTED_POINTS; + + /* + * This case isn't fully correct, but also fairly + * uncommon. This is userspace trying to use a + * legacy gamma LUT + atomic degamma LUT + * at the same time. + * + * Legacy gamma requires the input to be in linear + * space, so that means we need to apply an sRGB + * degamma. But color module also doesn't support + * a user ramp in this case so the degamma will + * be lost. + * + * Even if we did support it, it's still not right: + * + * Input -> CRTC DGM -> sRGB DGM -> CRTC CTM -> + * sRGB RGM -> CRTC RGM -> Output + * + * The CSC will be done in the wrong space since + * we're applying an sRGB DGM on top of the CRTC + * DGM. + * + * TODO: Don't use the legacy gamma interface and just + * map these to the atomic one instead. + */ + if (crtc->cm_is_degamma_srgb) + dc_plane_state->in_transfer_func->tf = + TRANSFER_FUNCTION_SRGB; + else + dc_plane_state->in_transfer_func->tf = + TRANSFER_FUNCTION_LINEAR; + + r = __set_input_tf(dc_plane_state->in_transfer_func, + degamma_lut, degamma_size); + if (r) + return r; + } else if (crtc->cm_is_degamma_srgb) { + /* + * For legacy gamma support we need the regamma input + * in linear space. Assume that the input is sRGB. + */ + dc_plane_state->in_transfer_func->type = TF_TYPE_PREDEFINED; + dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_SRGB; + } else { + /* ...Otherwise we can just bypass the DGM block. */ + dc_plane_state->in_transfer_func->type = TF_TYPE_BYPASS; + dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; + } + + return 0; +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index a10e3a50d9ef..eaad9099bc0b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c @@ -24,28 +24,63 @@ */ #include <drm/drm_crtc.h> +#include <drm/drm_vblank.h> #include "amdgpu.h" #include "amdgpu_dm.h" #include "dc.h" -enum amdgpu_dm_pipe_crc_source { - AMDGPU_DM_PIPE_CRC_SOURCE_NONE = 0, - AMDGPU_DM_PIPE_CRC_SOURCE_AUTO, - AMDGPU_DM_PIPE_CRC_SOURCE_MAX, - AMDGPU_DM_PIPE_CRC_SOURCE_INVALID = -1, +static const char *const pipe_crc_sources[] = { + "none", + "crtc", + "crtc dither", + "dprx", + "dprx dither", + "auto", }; static enum amdgpu_dm_pipe_crc_source dm_parse_crc_source(const char *source) { if (!source || !strcmp(source, "none")) return AMDGPU_DM_PIPE_CRC_SOURCE_NONE; - if (!strcmp(source, "auto")) - return AMDGPU_DM_PIPE_CRC_SOURCE_AUTO; + if (!strcmp(source, "auto") || !strcmp(source, "crtc")) + return AMDGPU_DM_PIPE_CRC_SOURCE_CRTC; + if (!strcmp(source, "dprx")) + return AMDGPU_DM_PIPE_CRC_SOURCE_DPRX; + if (!strcmp(source, "crtc dither")) + return AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER; + if (!strcmp(source, "dprx dither")) + return AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER; return AMDGPU_DM_PIPE_CRC_SOURCE_INVALID; } +static bool dm_is_crc_source_crtc(enum amdgpu_dm_pipe_crc_source src) +{ + return (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC) || + (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER); +} + +static bool dm_is_crc_source_dprx(enum amdgpu_dm_pipe_crc_source src) +{ + return (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX) || + (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER); +} + +static bool dm_need_crc_dither(enum amdgpu_dm_pipe_crc_source src) +{ + return (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER) || + (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER) || + (src == AMDGPU_DM_PIPE_CRC_SOURCE_NONE); +} + +const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc, + size_t *count) +{ + *count = ARRAY_SIZE(pipe_crc_sources); + return pipe_crc_sources; +} + int amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name, size_t *values_cnt) @@ -62,14 +97,57 @@ amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name, return 0; } -int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) +int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc, + struct dm_crtc_state *dm_crtc_state, + enum amdgpu_dm_pipe_crc_source source) { struct amdgpu_device *adev = crtc->dev->dev_private; - struct dm_crtc_state *crtc_state = to_dm_crtc_state(crtc->state); - struct dc_stream_state *stream_state = crtc_state->stream; - bool enable; + struct dc_stream_state *stream_state = dm_crtc_state->stream; + bool enable = amdgpu_dm_is_valid_crc_source(source); + int ret = 0; + /* Configuration will be deferred to stream enable. */ + if (!stream_state) + return 0; + + mutex_lock(&adev->dm.dc_lock); + + /* Enable CRTC CRC generation if necessary. */ + if (dm_is_crc_source_crtc(source)) { + if (!dc_stream_configure_crc(stream_state->ctx->dc, + stream_state, enable, enable)) { + ret = -EINVAL; + goto unlock; + } + } + + /* Configure dithering */ + if (!dm_need_crc_dither(source)) { + dc_stream_set_dither_option(stream_state, DITHER_OPTION_TRUN8); + dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state, + DYN_EXPANSION_DISABLE); + } else { + dc_stream_set_dither_option(stream_state, + DITHER_OPTION_DEFAULT); + dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state, + DYN_EXPANSION_AUTO); + } + +unlock: + mutex_unlock(&adev->dm.dc_lock); + + return ret; +} + +int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) +{ enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name); + struct drm_crtc_commit *commit; + struct dm_crtc_state *crtc_state; + struct drm_dp_aux *aux = NULL; + bool enable = false; + bool enabled = false; + int ret = 0; if (source < 0) { DRM_DEBUG_DRIVER("Unknown CRC source %s for CRTC%d\n", @@ -77,41 +155,124 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) return -EINVAL; } - if (!stream_state) { - DRM_ERROR("No stream state for CRTC%d\n", crtc->index); - return -EINVAL; - } + ret = drm_modeset_lock(&crtc->mutex, NULL); + if (ret) + return ret; - enable = (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO); + spin_lock(&crtc->commit_lock); + commit = list_first_entry_or_null(&crtc->commit_list, + struct drm_crtc_commit, commit_entry); + if (commit) + drm_crtc_commit_get(commit); + spin_unlock(&crtc->commit_lock); - mutex_lock(&adev->dm.dc_lock); - if (!dc_stream_configure_crc(stream_state->ctx->dc, stream_state, - enable, enable)) { - mutex_unlock(&adev->dm.dc_lock); - return -EINVAL; + if (commit) { + /* + * Need to wait for all outstanding programming to complete + * in commit tail since it can modify CRC related fields and + * hardware state. Since we're holding the CRTC lock we're + * guaranteed that no other commit work can be queued off + * before we modify the state below. + */ + ret = wait_for_completion_interruptible_timeout( + &commit->hw_done, 10 * HZ); + if (ret) + goto cleanup; } - /* When enabling CRC, we should also disable dithering. */ - dc_stream_set_dither_option(stream_state, - enable ? DITHER_OPTION_TRUN8 - : DITHER_OPTION_DEFAULT); + enable = amdgpu_dm_is_valid_crc_source(source); + crtc_state = to_dm_crtc_state(crtc->state); - mutex_unlock(&adev->dm.dc_lock); + /* + * USER REQ SRC | CURRENT SRC | BEHAVIOR + * ----------------------------- + * None | None | Do nothing + * None | CRTC | Disable CRTC CRC, set default to dither + * None | DPRX | Disable DPRX CRC, need 'aux', set default to dither + * None | CRTC DITHER | Disable CRTC CRC + * None | DPRX DITHER | Disable DPRX CRC, need 'aux' + * CRTC | XXXX | Enable CRTC CRC, no dither + * DPRX | XXXX | Enable DPRX CRC, need 'aux', no dither + * CRTC DITHER | XXXX | Enable CRTC CRC, set dither + * DPRX DITHER | XXXX | Enable DPRX CRC, need 'aux', set dither + */ + if (dm_is_crc_source_dprx(source) || + (source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE && + dm_is_crc_source_dprx(crtc_state->crc_src))) { + struct amdgpu_dm_connector *aconn = NULL; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + + drm_connector_list_iter_begin(crtc->dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (!connector->state || connector->state->crtc != crtc) + continue; + + aconn = to_amdgpu_dm_connector(connector); + break; + } + drm_connector_list_iter_end(&conn_iter); + + if (!aconn) { + DRM_DEBUG_DRIVER("No amd connector matching CRTC-%d\n", crtc->index); + ret = -EINVAL; + goto cleanup; + } + + aux = &aconn->dm_dp_aux.aux; + + if (!aux) { + DRM_DEBUG_DRIVER("No dp aux for amd connector\n"); + ret = -EINVAL; + goto cleanup; + } + } + + if (amdgpu_dm_crtc_configure_crc_source(crtc, crtc_state, source)) { + ret = -EINVAL; + goto cleanup; + } /* * Reading the CRC requires the vblank interrupt handler to be * enabled. Keep a reference until CRC capture stops. */ - if (!crtc_state->crc_enabled && enable) - drm_crtc_vblank_get(crtc); - else if (crtc_state->crc_enabled && !enable) + enabled = amdgpu_dm_is_valid_crc_source(crtc_state->crc_src); + if (!enabled && enable) { + ret = drm_crtc_vblank_get(crtc); + if (ret) + goto cleanup; + + if (dm_is_crc_source_dprx(source)) { + if (drm_dp_start_crc(aux, crtc)) { + DRM_DEBUG_DRIVER("dp start crc failed\n"); + ret = -EINVAL; + goto cleanup; + } + } + } else if (enabled && !enable) { drm_crtc_vblank_put(crtc); + if (dm_is_crc_source_dprx(source)) { + if (drm_dp_stop_crc(aux)) { + DRM_DEBUG_DRIVER("dp stop crc failed\n"); + ret = -EINVAL; + goto cleanup; + } + } + } - crtc_state->crc_enabled = enable; + crtc_state->crc_src = source; /* Reset crc_skipped on dm state */ crtc_state->crc_skip_count = 0; - return 0; + +cleanup: + if (commit) + drm_crtc_commit_put(commit); + + drm_modeset_unlock(&crtc->mutex); + + return ret; } /** @@ -134,7 +295,7 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc) stream_state = crtc_state->stream; /* Early return if CRC capture is not enabled. */ - if (!crtc_state->crc_enabled) + if (!amdgpu_dm_is_valid_crc_source(crtc_state->crc_src)) return; /* @@ -148,10 +309,12 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc) return; } - if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, - &crcs[0], &crcs[1], &crcs[2])) - return; + if (dm_is_crc_source_crtc(crtc_state->crc_src)) { + if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, + &crcs[0], &crcs[1], &crcs[2])) + return; - drm_crtc_add_crc_entry(crtc, true, - drm_crtc_accurate_vblank_count(crtc), crcs); + drm_crtc_add_crc_entry(crtc, true, + drm_crtc_accurate_vblank_count(crtc), crcs); + } } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h new file mode 100644 index 000000000000..f7d731797d3f --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h @@ -0,0 +1,67 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef AMD_DAL_DEV_AMDGPU_DM_AMDGPU_DM_CRC_H_ +#define AMD_DAL_DEV_AMDGPU_DM_AMDGPU_DM_CRC_H_ + +struct drm_crtc; +struct dm_crtc_state; + +enum amdgpu_dm_pipe_crc_source { + AMDGPU_DM_PIPE_CRC_SOURCE_NONE = 0, + AMDGPU_DM_PIPE_CRC_SOURCE_CRTC, + AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER, + AMDGPU_DM_PIPE_CRC_SOURCE_DPRX, + AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER, + AMDGPU_DM_PIPE_CRC_SOURCE_MAX, + AMDGPU_DM_PIPE_CRC_SOURCE_INVALID = -1, +}; + +static inline bool amdgpu_dm_is_valid_crc_source(enum amdgpu_dm_pipe_crc_source source) +{ + return (source > AMDGPU_DM_PIPE_CRC_SOURCE_NONE) && + (source < AMDGPU_DM_PIPE_CRC_SOURCE_MAX); +} + +/* amdgpu_dm_crc.c */ +#ifdef CONFIG_DEBUG_FS +int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc, + struct dm_crtc_state *dm_crtc_state, + enum amdgpu_dm_pipe_crc_source source); +int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name); +int amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, + const char *src_name, + size_t *values_cnt); +const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc, + size_t *count); +void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc); +#else +#define amdgpu_dm_crtc_set_crc_source NULL +#define amdgpu_dm_crtc_verify_crc_source NULL +#define amdgpu_dm_crtc_get_crc_sources NULL +#define amdgpu_dm_crtc_handle_crc_irq(x) +#endif + +#endif /* AMD_DAL_DEV_AMDGPU_DM_AMDGPU_DM_CRC_H_ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 4a55cde027cf..bdb37e611015 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -23,12 +23,15 @@ * */ -#include <linux/debugfs.h> +#include <linux/uaccess.h> + +#include <drm/drm_debugfs.h> #include "dc.h" #include "amdgpu.h" #include "amdgpu_dm.h" #include "amdgpu_dm_debugfs.h" +#include "dm_helpers.h" /* function description * get/ set DP configuration: lane_count, link_rate, spread_spectrum @@ -672,6 +675,71 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us } /* + * Returns the current and maximum output bpc for the connector. + * Example usage: cat /sys/kernel/debug/dri/0/DP-1/output_bpc + */ +static int output_bpc_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct drm_device *dev = connector->dev; + struct drm_crtc *crtc = NULL; + struct dm_crtc_state *dm_crtc_state = NULL; + int res = -ENODEV; + unsigned int bpc; + + mutex_lock(&dev->mode_config.mutex); + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + + if (connector->state == NULL) + goto unlock; + + crtc = connector->state->crtc; + if (crtc == NULL) + goto unlock; + + drm_modeset_lock(&crtc->mutex, NULL); + if (crtc->state == NULL) + goto unlock; + + dm_crtc_state = to_dm_crtc_state(crtc->state); + if (dm_crtc_state->stream == NULL) + goto unlock; + + switch (dm_crtc_state->stream->timing.display_color_depth) { + case COLOR_DEPTH_666: + bpc = 6; + break; + case COLOR_DEPTH_888: + bpc = 8; + break; + case COLOR_DEPTH_101010: + bpc = 10; + break; + case COLOR_DEPTH_121212: + bpc = 12; + break; + case COLOR_DEPTH_161616: + bpc = 16; + break; + default: + goto unlock; + } + + seq_printf(m, "Current: %u\n", bpc); + seq_printf(m, "Maximum: %u\n", connector->display_info.bpc); + res = 0; + +unlock: + if (crtc) + drm_modeset_unlock(&crtc->mutex); + + drm_modeset_unlock(&dev->mode_config.connection_mutex); + mutex_unlock(&dev->mode_config.mutex); + + return res; +} + +/* * Returns the min and max vrr vfreq through the connector's debugfs file. * Example usage: cat /sys/kernel/debug/dri/0/DP-1/vrr_range */ @@ -688,6 +756,130 @@ static int vrr_range_show(struct seq_file *m, void *data) return 0; } + +/* function description + * + * generic SDP message access for testing + * + * debugfs sdp_message is located at /syskernel/debug/dri/0/DP-x + * + * SDP header + * Hb0 : Secondary-Data Packet ID + * Hb1 : Secondary-Data Packet type + * Hb2 : Secondary-Data-packet-specific header, Byte 0 + * Hb3 : Secondary-Data-packet-specific header, Byte 1 + * + * for using custom sdp message: input 4 bytes SDP header and 32 bytes raw data + */ +static ssize_t dp_sdp_message_debugfs_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + int r; + uint8_t data[36]; + struct amdgpu_dm_connector *connector = file_inode(f)->i_private; + struct dm_crtc_state *acrtc_state; + uint32_t write_size = 36; + + if (connector->base.status != connector_status_connected) + return -ENODEV; + + if (size == 0) + return 0; + + acrtc_state = to_dm_crtc_state(connector->base.state->crtc->state); + + r = copy_from_user(data, buf, write_size); + + write_size -= r; + + dc_stream_send_dp_sdp(acrtc_state->stream, data, write_size); + + return write_size; +} + +static ssize_t dp_dpcd_address_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + int r; + struct amdgpu_dm_connector *connector = file_inode(f)->i_private; + + if (size < sizeof(connector->debugfs_dpcd_address)) + return 0; + + r = copy_from_user(&connector->debugfs_dpcd_address, + buf, sizeof(connector->debugfs_dpcd_address)); + + return size - r; +} + +static ssize_t dp_dpcd_size_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + int r; + struct amdgpu_dm_connector *connector = file_inode(f)->i_private; + + if (size < sizeof(connector->debugfs_dpcd_size)) + return 0; + + r = copy_from_user(&connector->debugfs_dpcd_size, + buf, sizeof(connector->debugfs_dpcd_size)); + + if (connector->debugfs_dpcd_size > 256) + connector->debugfs_dpcd_size = 0; + + return size - r; +} + +static ssize_t dp_dpcd_data_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + int r; + char *data; + struct amdgpu_dm_connector *connector = file_inode(f)->i_private; + struct dc_link *link = connector->dc_link; + uint32_t write_size = connector->debugfs_dpcd_size; + + if (size < write_size) + return 0; + + data = kzalloc(write_size, GFP_KERNEL); + if (!data) + return 0; + + r = copy_from_user(data, buf, write_size); + + dm_helpers_dp_write_dpcd(link->ctx, link, + connector->debugfs_dpcd_address, data, write_size - r); + kfree(data); + return write_size - r; +} + +static ssize_t dp_dpcd_data_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + int r; + char *data; + struct amdgpu_dm_connector *connector = file_inode(f)->i_private; + struct dc_link *link = connector->dc_link; + uint32_t read_size = connector->debugfs_dpcd_size; + + if (size < read_size) + return 0; + + data = kzalloc(read_size, GFP_KERNEL); + if (!data) + return 0; + + dm_helpers_dp_read_dpcd(link->ctx, link, + connector->debugfs_dpcd_address, data, read_size); + + r = copy_to_user(buf, data, read_size); + + kfree(data); + return read_size - r; +} + +DEFINE_SHOW_ATTRIBUTE(output_bpc); DEFINE_SHOW_ATTRIBUTE(vrr_range); static const struct file_operations dp_link_settings_debugfs_fops = { @@ -710,6 +902,31 @@ static const struct file_operations dp_phy_test_pattern_fops = { .llseek = default_llseek }; +static const struct file_operations sdp_message_fops = { + .owner = THIS_MODULE, + .write = dp_sdp_message_debugfs_write, + .llseek = default_llseek +}; + +static const struct file_operations dp_dpcd_address_debugfs_fops = { + .owner = THIS_MODULE, + .write = dp_dpcd_address_write, + .llseek = default_llseek +}; + +static const struct file_operations dp_dpcd_size_debugfs_fops = { + .owner = THIS_MODULE, + .write = dp_dpcd_size_write, + .llseek = default_llseek +}; + +static const struct file_operations dp_dpcd_data_debugfs_fops = { + .owner = THIS_MODULE, + .read = dp_dpcd_data_read, + .write = dp_dpcd_data_write, + .llseek = default_llseek +}; + static const struct { char *name; const struct file_operations *fops; @@ -717,28 +934,79 @@ static const struct { {"link_settings", &dp_link_settings_debugfs_fops}, {"phy_settings", &dp_phy_settings_debugfs_fop}, {"test_pattern", &dp_phy_test_pattern_fops}, - {"vrr_range", &vrr_range_fops} + {"output_bpc", &output_bpc_fops}, + {"vrr_range", &vrr_range_fops}, + {"sdp_message", &sdp_message_fops}, + {"aux_dpcd_address", &dp_dpcd_address_debugfs_fops}, + {"aux_dpcd_size", &dp_dpcd_size_debugfs_fops}, + {"aux_dpcd_data", &dp_dpcd_data_debugfs_fops} }; -int connector_debugfs_init(struct amdgpu_dm_connector *connector) +/* + * Force YUV420 output if available from the given mode + */ +static int force_yuv420_output_set(void *data, u64 val) +{ + struct amdgpu_dm_connector *connector = data; + + connector->force_yuv420_output = (bool)val; + + return 0; +} + +/* + * Check if YUV420 is forced when available from the given mode + */ +static int force_yuv420_output_get(void *data, u64 *val) +{ + struct amdgpu_dm_connector *connector = data; + + *val = connector->force_yuv420_output; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(force_yuv420_output_fops, force_yuv420_output_get, + force_yuv420_output_set, "%llu\n"); + +/* + * Read PSR state + */ +static int psr_get(void *data, u64 *val) +{ + struct amdgpu_dm_connector *connector = data; + struct dc_link *link = connector->dc_link; + uint32_t psr_state = 0; + + dc_link_get_psr_state(link, &psr_state); + + *val = psr_state; + + return 0; +} + + +DEFINE_DEBUGFS_ATTRIBUTE(psr_fops, psr_get, NULL, "%llu\n"); + +void connector_debugfs_init(struct amdgpu_dm_connector *connector) { int i; - struct dentry *ent, *dir = connector->base.debugfs_entry; + struct dentry *dir = connector->base.debugfs_entry; if (connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort || connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) { for (i = 0; i < ARRAY_SIZE(dp_debugfs_entries); i++) { - ent = debugfs_create_file(dp_debugfs_entries[i].name, - 0644, - dir, - connector, - dp_debugfs_entries[i].fops); - if (IS_ERR(ent)) - return PTR_ERR(ent); + debugfs_create_file(dp_debugfs_entries[i].name, + 0644, dir, connector, + dp_debugfs_entries[i].fops); } } + if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) + debugfs_create_file_unsafe("psr_state", 0444, dir, connector, &psr_fops); + + debugfs_create_file_unsafe("force_yuv420_output", 0644, dir, connector, + &force_yuv420_output_fops); - return 0; } /* @@ -837,11 +1105,64 @@ static int target_backlight_read(struct seq_file *m, void *data) return 0; } +static int mst_topo(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct drm_device *dev = node->minor->dev; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + struct amdgpu_dm_connector *aconnector; + + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) + continue; + + aconnector = to_amdgpu_dm_connector(connector); + + seq_printf(m, "\nMST topology for connector %d\n", aconnector->connector_id); + drm_dp_mst_dump_topology(m, &aconnector->mst_mgr); + } + drm_connector_list_iter_end(&conn_iter); + + return 0; +} + static const struct drm_info_list amdgpu_dm_debugfs_list[] = { {"amdgpu_current_backlight_pwm", ¤t_backlight_read}, {"amdgpu_target_backlight_pwm", &target_backlight_read}, + {"amdgpu_mst_topology", &mst_topo}, }; +/* + * Sets the DC visual confirm debug option from the given string. + * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_visual_confirm + */ +static int visual_confirm_set(void *data, u64 val) +{ + struct amdgpu_device *adev = data; + + adev->dm.dc->debug.visual_confirm = (enum visual_confirm)val; + + return 0; +} + +/* + * Reads the DC visual confirm debug option value into the given buffer. + * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_visual_confirm + */ +static int visual_confirm_get(void *data, u64 *val) +{ + struct amdgpu_device *adev = data; + + *val = adev->dm.dc->debug.visual_confirm; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(visual_confirm_fops, visual_confirm_get, + visual_confirm_set, "%llu\n"); + int dtn_debugfs_init(struct amdgpu_device *adev) { static const struct file_operations dtn_log_fops = { @@ -852,7 +1173,7 @@ int dtn_debugfs_init(struct amdgpu_device *adev) }; struct drm_minor *minor = adev->ddev->primary; - struct dentry *ent, *root = minor->debugfs_root; + struct dentry *root = minor->debugfs_root; int ret; ret = amdgpu_debugfs_add_files(adev, amdgpu_dm_debugfs_list, @@ -860,12 +1181,11 @@ int dtn_debugfs_init(struct amdgpu_device *adev) if (ret) return ret; - ent = debugfs_create_file( - "amdgpu_dm_dtn_log", - 0644, - root, - adev, - &dtn_log_fops); + debugfs_create_file("amdgpu_dm_dtn_log", 0644, root, adev, + &dtn_log_fops); - return PTR_ERR_OR_ZERO(ent); + debugfs_create_file_unsafe("amdgpu_dm_visual_confirm", 0644, root, adev, + &visual_confirm_fops); + + return 0; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h index bdef1587b0a0..5e5b2b2afa31 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h @@ -29,7 +29,7 @@ #include "amdgpu.h" #include "amdgpu_dm.h" -int connector_debugfs_init(struct amdgpu_dm_connector *connector); +void connector_debugfs_init(struct amdgpu_dm_connector *connector); int dtn_debugfs_init(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c new file mode 100644 index 000000000000..77181ddf6c8e --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -0,0 +1,346 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "amdgpu_dm_hdcp.h" +#include "amdgpu.h" +#include "amdgpu_dm.h" +#include "dm_helpers.h" +#include <drm/drm_hdcp.h> + +static bool +lp_write_i2c(void *handle, uint32_t address, const uint8_t *data, uint32_t size) +{ + + struct dc_link *link = handle; + struct i2c_payload i2c_payloads[] = {{true, address, size, (void *)data} }; + struct i2c_command cmd = {i2c_payloads, 1, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz}; + + return dm_helpers_submit_i2c(link->ctx, link, &cmd); +} + +static bool +lp_read_i2c(void *handle, uint32_t address, uint8_t offset, uint8_t *data, uint32_t size) +{ + struct dc_link *link = handle; + + struct i2c_payload i2c_payloads[] = {{true, address, 1, &offset}, {false, address, size, data} }; + struct i2c_command cmd = {i2c_payloads, 2, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz}; + + return dm_helpers_submit_i2c(link->ctx, link, &cmd); +} + +static bool +lp_write_dpcd(void *handle, uint32_t address, const uint8_t *data, uint32_t size) +{ + struct dc_link *link = handle; + + return dm_helpers_dp_write_dpcd(link->ctx, link, address, data, size); +} + +static bool +lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size) +{ + struct dc_link *link = handle; + + return dm_helpers_dp_read_dpcd(link->ctx, link, address, data, size); +} + +static void process_output(struct hdcp_workqueue *hdcp_work) +{ + struct mod_hdcp_output output = hdcp_work->output; + + if (output.callback_stop) + cancel_delayed_work(&hdcp_work->callback_dwork); + + if (output.callback_needed) + schedule_delayed_work(&hdcp_work->callback_dwork, + msecs_to_jiffies(output.callback_delay)); + + if (output.watchdog_timer_stop) + cancel_delayed_work(&hdcp_work->watchdog_timer_dwork); + + if (output.watchdog_timer_needed) + schedule_delayed_work(&hdcp_work->watchdog_timer_dwork, + msecs_to_jiffies(output.watchdog_timer_delay)); + +} + +void hdcp_add_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, struct amdgpu_dm_connector *aconnector) +{ + struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; + struct mod_hdcp_display *display = &hdcp_work[link_index].display; + struct mod_hdcp_link *link = &hdcp_work[link_index].link; + + mutex_lock(&hdcp_w->mutex); + hdcp_w->aconnector = aconnector; + + mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output); + + schedule_delayed_work(&hdcp_w->property_validate_dwork, msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS)); + + process_output(hdcp_w); + + mutex_unlock(&hdcp_w->mutex); + +} + +void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, unsigned int display_index) +{ + struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; + + mutex_lock(&hdcp_w->mutex); + + mod_hdcp_remove_display(&hdcp_w->hdcp, display_index, &hdcp_w->output); + + cancel_delayed_work(&hdcp_w->property_validate_dwork); + hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + + process_output(hdcp_w); + + mutex_unlock(&hdcp_w->mutex); + +} + +void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index) +{ + struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; + + mutex_lock(&hdcp_w->mutex); + + mod_hdcp_reset_connection(&hdcp_w->hdcp, &hdcp_w->output); + + cancel_delayed_work(&hdcp_w->property_validate_dwork); + hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + + process_output(hdcp_w); + + mutex_unlock(&hdcp_w->mutex); +} + +void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index) +{ + struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; + + schedule_work(&hdcp_w->cpirq_work); +} + + + + +static void event_callback(struct work_struct *work) +{ + struct hdcp_workqueue *hdcp_work; + + hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue, + callback_dwork); + + mutex_lock(&hdcp_work->mutex); + + cancel_delayed_work(&hdcp_work->watchdog_timer_dwork); + + mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CALLBACK, + &hdcp_work->output); + + process_output(hdcp_work); + + mutex_unlock(&hdcp_work->mutex); + + +} +static void event_property_update(struct work_struct *work) +{ + + struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue, property_update_work); + struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector; + struct drm_device *dev = hdcp_work->aconnector->base.dev; + long ret; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + mutex_lock(&hdcp_work->mutex); + + + if (aconnector->base.state->commit) { + ret = wait_for_completion_interruptible_timeout(&aconnector->base.state->commit->hw_done, 10 * HZ); + + if (ret == 0) { + DRM_ERROR("HDCP state unknown! Setting it to DESIRED"); + hdcp_work->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + } + } + + if (hdcp_work->encryption_status == MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON) + drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_ENABLED); + else + drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_DESIRED); + + + mutex_unlock(&hdcp_work->mutex); + drm_modeset_unlock(&dev->mode_config.connection_mutex); +} + +static void event_property_validate(struct work_struct *work) +{ + struct hdcp_workqueue *hdcp_work = + container_of(to_delayed_work(work), struct hdcp_workqueue, property_validate_dwork); + struct mod_hdcp_display_query query; + struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector; + + mutex_lock(&hdcp_work->mutex); + + query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + mod_hdcp_query_display(&hdcp_work->hdcp, aconnector->base.index, &query); + + if (query.encryption_status != hdcp_work->encryption_status) { + hdcp_work->encryption_status = query.encryption_status; + schedule_work(&hdcp_work->property_update_work); + } + + schedule_delayed_work(&hdcp_work->property_validate_dwork, msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS)); + + mutex_unlock(&hdcp_work->mutex); +} + +static void event_watchdog_timer(struct work_struct *work) +{ + struct hdcp_workqueue *hdcp_work; + + hdcp_work = container_of(to_delayed_work(work), + struct hdcp_workqueue, + watchdog_timer_dwork); + + mutex_lock(&hdcp_work->mutex); + + mod_hdcp_process_event(&hdcp_work->hdcp, + MOD_HDCP_EVENT_WATCHDOG_TIMEOUT, + &hdcp_work->output); + + process_output(hdcp_work); + + mutex_unlock(&hdcp_work->mutex); + +} + +static void event_cpirq(struct work_struct *work) +{ + struct hdcp_workqueue *hdcp_work; + + hdcp_work = container_of(work, struct hdcp_workqueue, cpirq_work); + + mutex_lock(&hdcp_work->mutex); + + mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CPIRQ, &hdcp_work->output); + + process_output(hdcp_work); + + mutex_unlock(&hdcp_work->mutex); + +} + + +void hdcp_destroy(struct hdcp_workqueue *hdcp_work) +{ + int i = 0; + + for (i = 0; i < hdcp_work->max_link; i++) { + cancel_delayed_work_sync(&hdcp_work[i].callback_dwork); + cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork); + } + + kfree(hdcp_work); + +} + +static void update_config(void *handle, struct cp_psp_stream_config *config) +{ + struct hdcp_workqueue *hdcp_work = handle; + struct amdgpu_dm_connector *aconnector = config->dm_stream_ctx; + int link_index = aconnector->dc_link->link_index; + struct mod_hdcp_display *display = &hdcp_work[link_index].display; + struct mod_hdcp_link *link = &hdcp_work[link_index].link; + + memset(display, 0, sizeof(*display)); + memset(link, 0, sizeof(*link)); + + display->index = aconnector->base.index; + display->state = MOD_HDCP_DISPLAY_ACTIVE; + + if (aconnector->dc_sink != NULL) + link->mode = mod_hdcp_signal_type_to_operation_mode(aconnector->dc_sink->sink_signal); + + display->controller = CONTROLLER_ID_D0 + config->otg_inst; + display->dig_fe = config->stream_enc_inst; + link->dig_be = config->link_enc_inst; + link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1; + link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw; + link->adjust.hdcp2.disable = 1; + +} + +struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc) +{ + + int max_caps = dc->caps.max_links; + struct hdcp_workqueue *hdcp_work = kzalloc(max_caps*sizeof(*hdcp_work), GFP_KERNEL); + int i = 0; + + if (hdcp_work == NULL) + goto fail_alloc_context; + + hdcp_work->max_link = max_caps; + + for (i = 0; i < max_caps; i++) { + + mutex_init(&hdcp_work[i].mutex); + + INIT_WORK(&hdcp_work[i].cpirq_work, event_cpirq); + INIT_WORK(&hdcp_work[i].property_update_work, event_property_update); + INIT_DELAYED_WORK(&hdcp_work[i].callback_dwork, event_callback); + INIT_DELAYED_WORK(&hdcp_work[i].watchdog_timer_dwork, event_watchdog_timer); + INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate); + + hdcp_work[i].hdcp.config.psp.handle = psp_context; + hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i); + hdcp_work[i].hdcp.config.ddc.funcs.write_i2c = lp_write_i2c; + hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c; + hdcp_work[i].hdcp.config.ddc.funcs.write_dpcd = lp_write_dpcd; + hdcp_work[i].hdcp.config.ddc.funcs.read_dpcd = lp_read_dpcd; + } + + cp_psp->funcs.update_stream_config = update_config; + cp_psp->handle = hdcp_work; + + return hdcp_work; + +fail_alloc_context: + kfree(hdcp_work); + + return NULL; + + + +} + + + diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h new file mode 100644 index 000000000000..d3ba505d0696 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h @@ -0,0 +1,66 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef AMDGPU_DM_AMDGPU_DM_HDCP_H_ +#define AMDGPU_DM_AMDGPU_DM_HDCP_H_ + +#include "mod_hdcp.h" +#include "hdcp.h" +#include "dc.h" +#include "dm_cp_psp.h" + +struct mod_hdcp; +struct mod_hdcp_link; +struct mod_hdcp_display; +struct cp_psp; + +struct hdcp_workqueue { + struct work_struct cpirq_work; + struct work_struct property_update_work; + struct delayed_work callback_dwork; + struct delayed_work watchdog_timer_dwork; + struct delayed_work property_validate_dwork; + struct amdgpu_dm_connector *aconnector; + struct mutex mutex; + + struct mod_hdcp hdcp; + struct mod_hdcp_output output; + struct mod_hdcp_display display; + struct mod_hdcp_link link; + + enum mod_hdcp_encryption_status encryption_status; + uint8_t max_link; +}; + +void hdcp_add_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, + struct amdgpu_dm_connector *aconnector); +void hdcp_remove_display(struct hdcp_workqueue *work, unsigned int link_index, unsigned int display_index); +void hdcp_reset_display(struct hdcp_workqueue *work, unsigned int link_index); +void hdcp_handle_cpirq(struct hdcp_workqueue *work, unsigned int link_index); +void hdcp_destroy(struct hdcp_workqueue *work); + +struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc); + +#endif /* AMDGPU_DM_AMDGPU_DM_HDCP_H_ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index b39766bd2840..11e5784aa62a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -28,7 +28,6 @@ #include <linux/version.h> #include <linux/i2c.h> -#include <drm/drmP.h> #include <drm/drm_probe_helper.h> #include <drm/amdgpu_drm.h> #include <drm/drm_edid.h> @@ -98,11 +97,10 @@ enum dc_edid_status dm_helpers_parse_edid_caps( (struct edid *) edid->raw_edid); sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads); - if (sad_count <= 0) { - DRM_INFO("SADs count is: %d, don't need to read it\n", - sad_count); + if (sad_count < 0) + DRM_ERROR("Couldn't read SADs: %d\n", sad_count); + if (sad_count <= 0) return result; - } edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT; for (i = 0; i < edid_caps->audio_mode_count; ++i) { @@ -264,7 +262,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( } /* - * poll pending down reply before clear payload allocation table + * poll pending down reply */ void dm_helpers_dp_mst_poll_pending_down_reply( struct dc_context *ctx, @@ -283,7 +281,7 @@ void dm_helpers_dp_mst_clear_payload_allocation_table( * Polls for ACT (allocation change trigger) handled and sends * ALLOCATE_PAYLOAD message. */ -bool dm_helpers_dp_mst_poll_for_allocation_change_trigger( +enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger( struct dc_context *ctx, const struct dc_stream_state *stream) { @@ -294,19 +292,19 @@ bool dm_helpers_dp_mst_poll_for_allocation_change_trigger( aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; if (!aconnector || !aconnector->mst_port) - return false; + return ACT_FAILED; mst_mgr = &aconnector->mst_port->mst_mgr; if (!mst_mgr->mst_state) - return false; + return ACT_FAILED; ret = drm_dp_check_act_status(mst_mgr); if (ret) - return false; + return ACT_FAILED; - return true; + return ACT_SUCCESS; } bool dm_helpers_dp_mst_send_payload_allocation( @@ -542,6 +540,18 @@ bool dm_helpers_submit_i2c( return result; } +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT +bool dm_helpers_dp_write_dsc_enable( + struct dc_context *ctx, + const struct dc_stream_state *stream, + bool enable +) +{ + uint8_t enable_dsc = enable ? 1 : 0; + + return dm_helpers_dp_write_dpcd(ctx, stream->sink->link, DP_DSC_ENABLE, &enable_dsc, 1); +} +#endif bool dm_helpers_is_dp_sink_present(struct dc_link *link) { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index cd10f77cdeb0..64445c4cc4c2 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -23,8 +23,6 @@ * */ -#include <drm/drmP.h> - #include "dm_services_types.h" #include "dc.h" @@ -279,8 +277,6 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev, return DAL_INVALID_IRQ_HANDLER_IDX; } - memset(handler_data, 0, sizeof(*handler_data)); - init_handler_common_data(handler_data, ih, handler_args, &adev->dm); irq_source = int_params->irq_source; @@ -674,11 +670,30 @@ static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev, __func__); } +static int amdgpu_dm_set_vupdate_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int crtc_id, + enum amdgpu_interrupt_state state) +{ + return dm_irq_state( + adev, + source, + crtc_id, + state, + IRQ_TYPE_VUPDATE, + __func__); +} + static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = { .set = amdgpu_dm_set_crtc_irq_state, .process = amdgpu_dm_irq_handler, }; +static const struct amdgpu_irq_src_funcs dm_vupdate_irq_funcs = { + .set = amdgpu_dm_set_vupdate_irq_state, + .process = amdgpu_dm_irq_handler, +}; + static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = { .set = amdgpu_dm_set_pflip_irq_state, .process = amdgpu_dm_irq_handler, @@ -695,6 +710,9 @@ void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev) adev->crtc_irq.num_types = adev->mode_info.num_crtc; adev->crtc_irq.funcs = &dm_crtc_irq_funcs; + adev->vupdate_irq.num_types = adev->mode_info.num_crtc; + adev->vupdate_irq.funcs = &dm_vupdate_irq_funcs; + adev->pageflip_irq.num_types = adev->mode_info.num_crtc; adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs; @@ -714,8 +732,10 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); @@ -733,6 +753,7 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) true); } } + drm_connector_list_iter_end(&iter); } /** @@ -747,8 +768,10 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); const struct dc_link *dc_link = amdgpu_dm_connector->dc_link; @@ -761,4 +784,5 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) false); } } + drm_connector_list_iter_end(&iter); } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index c4ea3a91f17a..2bf8534c18fb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -36,7 +36,9 @@ #include "dc_link_ddc.h" #include "i2caux_interface.h" - +#if defined(CONFIG_DEBUG_FS) +#include "amdgpu_dm_debugfs.h" +#endif /* #define TRACE_DPCD */ #ifdef TRACE_DPCD @@ -84,6 +86,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, { ssize_t result = 0; struct aux_payload payload; + enum aux_channel_operation_result operation_result; if (WARN_ON(msg->size > 16)) return -E2BIG; @@ -97,42 +100,40 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, payload.mot = (msg->request & DP_AUX_I2C_MOT) != 0; payload.defer_delay = 0; - result = dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service, &payload); + result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload, + &operation_result); if (payload.write) result = msg->size; - if (result < 0) /* DC doesn't know about kernel error codes */ - result = -EIO; + if (result < 0) + switch (operation_result) { + case AUX_CHANNEL_OPERATION_SUCCEEDED: + break; + case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON: + case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN: + result = -EIO; + break; + case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY: + case AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE: + result = -EBUSY; + break; + case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT: + result = -ETIMEDOUT; + break; + } return result; } -static enum drm_connector_status -dm_dp_mst_detect(struct drm_connector *connector, bool force) -{ - struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); - struct amdgpu_dm_connector *master = aconnector->mst_port; - - enum drm_connector_status status = - drm_dp_mst_detect_port( - connector, - &master->mst_mgr, - aconnector->port); - - return status; -} - static void dm_dp_mst_connector_destroy(struct drm_connector *connector) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder; - if (amdgpu_dm_connector->edid) { - kfree(amdgpu_dm_connector->edid); - amdgpu_dm_connector->edid = NULL; - } + kfree(amdgpu_dm_connector->edid); + amdgpu_dm_connector->edid = NULL; drm_encoder_cleanup(&amdgpu_encoder->base); kfree(amdgpu_encoder); @@ -141,15 +142,42 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector) kfree(amdgpu_dm_connector); } +static int +amdgpu_dm_mst_connector_late_register(struct drm_connector *connector) +{ + struct amdgpu_dm_connector *amdgpu_dm_connector = + to_amdgpu_dm_connector(connector); + struct drm_dp_mst_port *port = amdgpu_dm_connector->port; + +#if defined(CONFIG_DEBUG_FS) + connector_debugfs_init(amdgpu_dm_connector); + amdgpu_dm_connector->debugfs_dpcd_address = 0; + amdgpu_dm_connector->debugfs_dpcd_size = 0; +#endif + + return drm_dp_mst_connector_late_register(connector, port); +} + +static void +amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector) +{ + struct amdgpu_dm_connector *amdgpu_dm_connector = + to_amdgpu_dm_connector(connector); + struct drm_dp_mst_port *port = amdgpu_dm_connector->port; + + drm_dp_mst_connector_early_unregister(connector, port); +} + static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { - .detect = dm_dp_mst_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = dm_dp_mst_connector_destroy, .reset = amdgpu_dm_connector_funcs_reset, .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_set_property = amdgpu_dm_connector_atomic_set_property, - .atomic_get_property = amdgpu_dm_connector_atomic_get_property + .atomic_get_property = amdgpu_dm_connector_atomic_get_property, + .late_register = amdgpu_dm_mst_connector_late_register, + .early_unregister = amdgpu_dm_mst_connector_early_unregister, }; static int dm_dp_mst_get_modes(struct drm_connector *connector) @@ -208,17 +236,29 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) return ret; } -static struct drm_encoder *dm_mst_best_encoder(struct drm_connector *connector) +static struct drm_encoder * +dm_mst_atomic_best_encoder(struct drm_connector *connector, + struct drm_connector_state *connector_state) { - struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); + return &to_amdgpu_dm_connector(connector)->mst_encoder->base; +} - return &amdgpu_dm_connector->mst_encoder->base; +static int +dm_dp_mst_detect(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, bool force) +{ + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct amdgpu_dm_connector *master = aconnector->mst_port; + + return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr, + aconnector->port); } static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = { .get_modes = dm_dp_mst_get_modes, .mode_valid = amdgpu_dm_connector_mode_valid, - .best_encoder = dm_mst_best_encoder, + .atomic_best_encoder = dm_mst_atomic_best_encoder, + .detect_ctx = dm_dp_mst_detect, }; static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) @@ -373,13 +413,17 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, struct amdgpu_dm_connector *aconnector) { aconnector->dm_dp_aux.aux.name = "dmdc"; - aconnector->dm_dp_aux.aux.dev = dm->adev->dev; + aconnector->dm_dp_aux.aux.dev = aconnector->base.kdev; aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer; aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc; drm_dp_aux_register(&aconnector->dm_dp_aux.aux); drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux, - aconnector->base.name, dm->adev->dev); + &aconnector->base); + + if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP) + return; + aconnector->mst_mgr.cbs = &dm_mst_cbs; drm_dp_mst_topology_mgr_init( &aconnector->mst_mgr, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index a114954d6a5b..55a520a63712 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -24,7 +24,6 @@ #include <linux/string.h> #include <linux/acpi.h> -#include <drm/drmP.h> #include <drm/drm_probe_helper.h> #include <drm/amdgpu_drm.h> #include "dm_services.h" @@ -33,6 +32,7 @@ #include "amdgpu_dm_irq.h" #include "amdgpu_pm.h" #include "dm_pp_smu.h" +#include "amdgpu_smu.h" bool dm_pp_apply_display_requirements( @@ -40,6 +40,7 @@ bool dm_pp_apply_display_requirements( const struct dm_pp_display_configuration *pp_display_cfg) { struct amdgpu_device *adev = ctx->driver_context; + struct smu_context *smu = &adev->smu; int i; if (adev->pm.dpm_enabled) { @@ -105,6 +106,9 @@ bool dm_pp_apply_display_requirements( adev->powerplay.pp_funcs->display_configuration_change( adev->powerplay.pp_handle, &adev->pm.pm_display_cfg); + else + smu_display_configuration_change(smu, + &adev->pm.pm_display_cfg); amdgpu_pm_compute_clocks(adev); } @@ -144,6 +148,36 @@ static void get_default_clock_levels( } } +static enum smu_clk_type dc_to_smu_clock_type( + enum dm_pp_clock_type dm_pp_clk_type) +{ + enum smu_clk_type smu_clk_type = SMU_CLK_COUNT; + + switch (dm_pp_clk_type) { + case DM_PP_CLOCK_TYPE_DISPLAY_CLK: + smu_clk_type = SMU_DISPCLK; + break; + case DM_PP_CLOCK_TYPE_ENGINE_CLK: + smu_clk_type = SMU_GFXCLK; + break; + case DM_PP_CLOCK_TYPE_MEMORY_CLK: + smu_clk_type = SMU_MCLK; + break; + case DM_PP_CLOCK_TYPE_DCEFCLK: + smu_clk_type = SMU_DCEFCLK; + break; + case DM_PP_CLOCK_TYPE_SOCCLK: + smu_clk_type = SMU_SOCCLK; + break; + default: + DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n", + dm_pp_clk_type); + break; + } + + return smu_clk_type; +} + static enum amd_pp_clock_type dc_to_pp_clock_type( enum dm_pp_clock_type dm_pp_clk_type) { @@ -287,7 +321,8 @@ static void pp_to_dc_clock_levels_with_voltage( DC_DECODE_PP_CLOCK_TYPE(dc_clk_type)); for (i = 0; i < clk_level_info->num_levels; i++) { - DRM_INFO("DM_PPLIB:\t %d in kHz\n", pp_clks->data[i].clocks_in_khz); + DRM_INFO("DM_PPLIB:\t %d in kHz, %d in mV\n", pp_clks->data[i].clocks_in_khz, + pp_clks->data[i].voltage_in_mv); clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz; clk_level_info->data[i].voltage_in_mv = pp_clks->data[i].voltage_in_mv; } @@ -308,6 +343,12 @@ bool dm_pp_get_clock_levels_by_type( if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle, dc_to_pp_clock_type(clk_type), &pp_clks)) { /* Error in pplib. Provide default values. */ + return true; + } + } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type) { + if (smu_get_clock_by_type(&adev->smu, + dc_to_pp_clock_type(clk_type), + &pp_clks)) { get_default_clock_levels(clk_type, dc_clks); return true; } @@ -324,6 +365,13 @@ bool dm_pp_get_clock_levels_by_type( validation_clks.memory_max_clock = 80000; validation_clks.level = 0; } + } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_max_high_clocks) { + if (smu_get_max_high_clocks(&adev->smu, &validation_clks)) { + DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n"); + validation_clks.engine_max_clock = 72000; + validation_clks.memory_max_clock = 80000; + validation_clks.level = 0; + } } DRM_INFO("DM_PPLIB: Validation clocks:\n"); @@ -374,14 +422,21 @@ bool dm_pp_get_clock_levels_by_type_with_latency( void *pp_handle = adev->powerplay.pp_handle; struct pp_clock_levels_with_latency pp_clks = { 0 }; const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret; + + if (pp_funcs && pp_funcs->get_clock_by_type_with_latency) { + ret = pp_funcs->get_clock_by_type_with_latency(pp_handle, + dc_to_pp_clock_type(clk_type), + &pp_clks); + if (ret) + return false; + } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type_with_latency) { + if (smu_get_clock_by_type_with_latency(&adev->smu, + dc_to_smu_clock_type(clk_type), + &pp_clks)) + return false; + } - if (!pp_funcs || !pp_funcs->get_clock_by_type_with_latency) - return false; - - if (pp_funcs->get_clock_by_type_with_latency(pp_handle, - dc_to_pp_clock_type(clk_type), - &pp_clks)) - return false; pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type); @@ -397,14 +452,20 @@ bool dm_pp_get_clock_levels_by_type_with_voltage( void *pp_handle = adev->powerplay.pp_handle; struct pp_clock_levels_with_voltage pp_clk_info = {0}; const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - - if (!pp_funcs || !pp_funcs->get_clock_by_type_with_voltage) - return false; - - if (pp_funcs->get_clock_by_type_with_voltage(pp_handle, - dc_to_pp_clock_type(clk_type), - &pp_clk_info)) - return false; + int ret; + + if (pp_funcs && pp_funcs->get_clock_by_type_with_voltage) { + ret = pp_funcs->get_clock_by_type_with_voltage(pp_handle, + dc_to_pp_clock_type(clk_type), + &pp_clk_info); + if (ret) + return false; + } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type_with_voltage) { + if (smu_get_clock_by_type_with_voltage(&adev->smu, + dc_to_pp_clock_type(clk_type), + &pp_clk_info)) + return false; + } pp_to_dc_clock_levels_with_voltage(&pp_clk_info, clk_level_info, clk_type); @@ -445,6 +506,10 @@ bool dm_pp_apply_clock_for_voltage_request( ret = adev->powerplay.pp_funcs->display_clock_voltage_request( adev->powerplay.pp_handle, &pp_clock_request); + else if (adev->smu.ppt_funcs && + adev->smu.ppt_funcs->display_clock_voltage_request) + ret = smu_display_clock_voltage_request(&adev->smu, + &pp_clock_request); if (ret) return false; return true; @@ -462,6 +527,8 @@ bool dm_pp_get_static_clocks( ret = adev->powerplay.pp_funcs->get_current_clocks( adev->powerplay.pp_handle, &pp_clk_info); + else if (adev->smu.ppt_funcs) + ret = smu_get_current_clocks(&adev->smu, &pp_clk_info); if (ret) return false; @@ -472,27 +539,6 @@ bool dm_pp_get_static_clocks( return true; } -void pp_rv_set_display_requirement(struct pp_smu *pp, - struct pp_smu_display_requirement_rv *req) -{ - const struct dc_context *ctx = pp->dm; - struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - struct pp_display_clock_request clock = {0}; - - if (!pp_funcs || !pp_funcs->display_clock_voltage_request) - return; - - clock.clock_type = amd_pp_dcf_clock; - clock.clock_freq_in_khz = req->hard_min_dcefclk_mhz * 1000; - pp_funcs->display_clock_voltage_request(pp_handle, &clock); - - clock.clock_type = amd_pp_f_clock; - clock.clock_freq_in_khz = req->hard_min_fclk_mhz * 1000; - pp_funcs->display_clock_voltage_request(pp_handle, &clock); -} - void pp_rv_set_wm_ranges(struct pp_smu *pp, struct pp_smu_wm_range_sets *ranges) { @@ -508,9 +554,6 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp, wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets; wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets; - if (!pp_funcs || !pp_funcs->set_watermarks_for_clocks_ranges) - return; - for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) { if (ranges->reader_wm_sets[i].wm_inst > 3) wm_dce_clocks[i].wm_set_id = WM_SET_A; @@ -543,7 +586,12 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp, ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000; } - pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, &wm_with_clock_ranges); + if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges) + pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, + &wm_with_clock_ranges); + else + smu_set_watermarks_for_clock_ranges(&adev->smu, + &wm_with_clock_ranges); } void pp_rv_set_pme_wa_enable(struct pp_smu *pp) @@ -553,10 +601,10 @@ void pp_rv_set_pme_wa_enable(struct pp_smu *pp) void *pp_handle = adev->powerplay.pp_handle; const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - if (!pp_funcs || !pp_funcs->notify_smu_enable_pwe) - return; - - pp_funcs->notify_smu_enable_pwe(pp_handle); + if (pp_funcs && pp_funcs->notify_smu_enable_pwe) + pp_funcs->notify_smu_enable_pwe(pp_handle); + else if (adev->smu.ppt_funcs) + smu_notify_smu_enable_pwe(&adev->smu); } void pp_rv_set_active_display_count(struct pp_smu *pp, int count) @@ -611,17 +659,377 @@ void pp_rv_set_hard_min_fclk_by_freq(struct pp_smu *pp, int mhz) pp_funcs->set_hard_min_fclk_by_freq(pp_handle, mhz); } -void dm_pp_get_funcs_rv( - struct dc_context *ctx, - struct pp_smu_funcs_rv *funcs) +enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp, + struct pp_smu_wm_range_sets *ranges) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges; + struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = + wm_with_clock_ranges.wm_dmif_clocks_ranges; + struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks = + wm_with_clock_ranges.wm_mcif_clocks_ranges; + int32_t i; + + wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets; + wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets; + + for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) { + if (ranges->reader_wm_sets[i].wm_inst > 3) + wm_dce_clocks[i].wm_set_id = WM_SET_A; + else + wm_dce_clocks[i].wm_set_id = + ranges->reader_wm_sets[i].wm_inst; + wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz = + ranges->reader_wm_sets[i].max_drain_clk_mhz * 1000; + wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz = + ranges->reader_wm_sets[i].min_drain_clk_mhz * 1000; + wm_dce_clocks[i].wm_max_mem_clk_in_khz = + ranges->reader_wm_sets[i].max_fill_clk_mhz * 1000; + wm_dce_clocks[i].wm_min_mem_clk_in_khz = + ranges->reader_wm_sets[i].min_fill_clk_mhz * 1000; + } + + for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) { + if (ranges->writer_wm_sets[i].wm_inst > 3) + wm_soc_clocks[i].wm_set_id = WM_SET_A; + else + wm_soc_clocks[i].wm_set_id = + ranges->writer_wm_sets[i].wm_inst; + wm_soc_clocks[i].wm_max_socclk_clk_in_khz = + ranges->writer_wm_sets[i].max_fill_clk_mhz * 1000; + wm_soc_clocks[i].wm_min_socclk_clk_in_khz = + ranges->writer_wm_sets[i].min_fill_clk_mhz * 1000; + wm_soc_clocks[i].wm_max_mem_clk_in_khz = + ranges->writer_wm_sets[i].max_drain_clk_mhz * 1000; + wm_soc_clocks[i].wm_min_mem_clk_in_khz = + ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000; + } + + smu_set_watermarks_for_clock_ranges(&adev->smu, &wm_with_clock_ranges); + + return PP_SMU_RESULT_OK; +} + +enum pp_smu_status pp_nv_set_pme_wa_enable(struct pp_smu *pp) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + struct smu_context *smu = &adev->smu; + + if (!smu->ppt_funcs) + return PP_SMU_RESULT_UNSUPPORTED; + + /* 0: successful or smu.ppt_funcs->set_azalia_d3_pme = NULL; 1: fail */ + if (smu_set_azalia_d3_pme(smu)) + return PP_SMU_RESULT_FAIL; + + return PP_SMU_RESULT_OK; +} + +enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + struct smu_context *smu = &adev->smu; + + if (!smu->ppt_funcs) + return PP_SMU_RESULT_UNSUPPORTED; + + /* 0: successful or smu.ppt_funcs->set_display_count = NULL; 1: fail */ + if (smu_set_display_count(smu, count)) + return PP_SMU_RESULT_FAIL; + + return PP_SMU_RESULT_OK; +} + +enum pp_smu_status pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + struct smu_context *smu = &adev->smu; + + if (!smu->ppt_funcs) + return PP_SMU_RESULT_UNSUPPORTED; + + /* 0: successful or smu.ppt_funcs->set_deep_sleep_dcefclk = NULL;1: fail */ + if (smu_set_deep_sleep_dcefclk(smu, mhz)) + return PP_SMU_RESULT_FAIL; + + return PP_SMU_RESULT_OK; +} + +enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq( + struct pp_smu *pp, int mhz) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + struct smu_context *smu = &adev->smu; + struct pp_display_clock_request clock_req; + + if (!smu->ppt_funcs) + return PP_SMU_RESULT_UNSUPPORTED; + + clock_req.clock_type = amd_pp_dcef_clock; + clock_req.clock_freq_in_khz = mhz * 1000; + + /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL + * 1: fail + */ + if (smu_display_clock_voltage_request(smu, &clock_req)) + return PP_SMU_RESULT_FAIL; + + return PP_SMU_RESULT_OK; +} + +enum pp_smu_status pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + struct smu_context *smu = &adev->smu; + struct pp_display_clock_request clock_req; + + if (!smu->ppt_funcs) + return PP_SMU_RESULT_UNSUPPORTED; + + clock_req.clock_type = amd_pp_mem_clock; + clock_req.clock_freq_in_khz = mhz * 1000; + + /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL + * 1: fail + */ + if (smu_display_clock_voltage_request(smu, &clock_req)) + return PP_SMU_RESULT_FAIL; + + return PP_SMU_RESULT_OK; +} + +enum pp_smu_status pp_nv_set_pstate_handshake_support( + struct pp_smu *pp, BOOLEAN pstate_handshake_supported) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + struct smu_context *smu = &adev->smu; + + if (smu_display_disable_memory_clock_switch(smu, !pstate_handshake_supported)) + return PP_SMU_RESULT_FAIL; + + return PP_SMU_RESULT_OK; +} + +enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp, + enum pp_smu_nv_clock_id clock_id, int mhz) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + struct smu_context *smu = &adev->smu; + struct pp_display_clock_request clock_req; + + if (!smu->ppt_funcs) + return PP_SMU_RESULT_UNSUPPORTED; + + switch (clock_id) { + case PP_SMU_NV_DISPCLK: + clock_req.clock_type = amd_pp_disp_clock; + break; + case PP_SMU_NV_PHYCLK: + clock_req.clock_type = amd_pp_phy_clock; + break; + case PP_SMU_NV_PIXELCLK: + clock_req.clock_type = amd_pp_pixel_clock; + break; + default: + break; + } + clock_req.clock_freq_in_khz = mhz * 1000; + + /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL + * 1: fail + */ + if (smu_display_clock_voltage_request(smu, &clock_req)) + return PP_SMU_RESULT_FAIL; + + return PP_SMU_RESULT_OK; +} + +enum pp_smu_status pp_nv_get_maximum_sustainable_clocks( + struct pp_smu *pp, struct pp_smu_nv_clock_table *max_clocks) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + struct smu_context *smu = &adev->smu; + + if (!smu->ppt_funcs) + return PP_SMU_RESULT_UNSUPPORTED; + + if (!smu->ppt_funcs->get_max_sustainable_clocks_by_dc) + return PP_SMU_RESULT_UNSUPPORTED; + + if (!smu_get_max_sustainable_clocks_by_dc(smu, max_clocks)) + return PP_SMU_RESULT_OK; + + return PP_SMU_RESULT_FAIL; +} + +enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp, + unsigned int *clock_values_in_khz, unsigned int *num_states) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + struct smu_context *smu = &adev->smu; + + if (!smu->ppt_funcs) + return PP_SMU_RESULT_UNSUPPORTED; + + if (!smu->ppt_funcs->get_uclk_dpm_states) + return PP_SMU_RESULT_UNSUPPORTED; + + if (!smu_get_uclk_dpm_states(smu, + clock_values_in_khz, num_states)) + return PP_SMU_RESULT_OK; + + return PP_SMU_RESULT_FAIL; +} + +#ifdef CONFIG_DRM_AMD_DC_DCN2_1 +enum pp_smu_status pp_rn_get_dpm_clock_table( + struct pp_smu *pp, struct dpm_clocks *clock_table) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + struct smu_context *smu = &adev->smu; + + if (!smu->ppt_funcs) + return PP_SMU_RESULT_UNSUPPORTED; + + if (!smu->ppt_funcs->get_dpm_clock_table) + return PP_SMU_RESULT_UNSUPPORTED; + + if (!smu_get_dpm_clock_table(smu, clock_table)) + return PP_SMU_RESULT_OK; + + return PP_SMU_RESULT_FAIL; +} + +enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp, + struct pp_smu_wm_range_sets *ranges) { - funcs->pp_smu.dm = ctx; - funcs->set_display_requirement = pp_rv_set_display_requirement; - funcs->set_wm_ranges = pp_rv_set_wm_ranges; - funcs->set_pme_wa_enable = pp_rv_set_pme_wa_enable; - funcs->set_display_count = pp_rv_set_active_display_count; - funcs->set_min_deep_sleep_dcfclk = pp_rv_set_min_deep_sleep_dcfclk; - funcs->set_hard_min_dcfclk_by_freq = pp_rv_set_hard_min_dcefclk_by_freq; - funcs->set_hard_min_fclk_by_freq = pp_rv_set_hard_min_fclk_by_freq; + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + struct smu_context *smu = &adev->smu; + struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges; + struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = + wm_with_clock_ranges.wm_dmif_clocks_ranges; + struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks = + wm_with_clock_ranges.wm_mcif_clocks_ranges; + int32_t i; + + if (!smu->ppt_funcs) + return PP_SMU_RESULT_UNSUPPORTED; + + wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets; + wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets; + + for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) { + if (ranges->reader_wm_sets[i].wm_inst > 3) + wm_dce_clocks[i].wm_set_id = WM_SET_A; + else + wm_dce_clocks[i].wm_set_id = + ranges->reader_wm_sets[i].wm_inst; + + wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz = + ranges->reader_wm_sets[i].min_drain_clk_mhz; + + wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz = + ranges->reader_wm_sets[i].max_drain_clk_mhz; + + wm_dce_clocks[i].wm_min_mem_clk_in_khz = + ranges->reader_wm_sets[i].min_fill_clk_mhz; + + wm_dce_clocks[i].wm_max_mem_clk_in_khz = + ranges->reader_wm_sets[i].max_fill_clk_mhz; + } + + for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) { + if (ranges->writer_wm_sets[i].wm_inst > 3) + wm_soc_clocks[i].wm_set_id = WM_SET_A; + else + wm_soc_clocks[i].wm_set_id = + ranges->writer_wm_sets[i].wm_inst; + wm_soc_clocks[i].wm_min_socclk_clk_in_khz = + ranges->writer_wm_sets[i].min_fill_clk_mhz; + + wm_soc_clocks[i].wm_max_socclk_clk_in_khz = + ranges->writer_wm_sets[i].max_fill_clk_mhz; + + wm_soc_clocks[i].wm_min_mem_clk_in_khz = + ranges->writer_wm_sets[i].min_drain_clk_mhz; + + wm_soc_clocks[i].wm_max_mem_clk_in_khz = + ranges->writer_wm_sets[i].max_drain_clk_mhz; + } + + smu_set_watermarks_for_clock_ranges(&adev->smu, &wm_with_clock_ranges); + + return PP_SMU_RESULT_OK; } +#endif +void dm_pp_get_funcs( + struct dc_context *ctx, + struct pp_smu_funcs *funcs) +{ + switch (ctx->dce_version) { + case DCN_VERSION_1_0: + case DCN_VERSION_1_01: + funcs->ctx.ver = PP_SMU_VER_RV; + funcs->rv_funcs.pp_smu.dm = ctx; + funcs->rv_funcs.set_wm_ranges = pp_rv_set_wm_ranges; + funcs->rv_funcs.set_pme_wa_enable = pp_rv_set_pme_wa_enable; + funcs->rv_funcs.set_display_count = + pp_rv_set_active_display_count; + funcs->rv_funcs.set_min_deep_sleep_dcfclk = + pp_rv_set_min_deep_sleep_dcfclk; + funcs->rv_funcs.set_hard_min_dcfclk_by_freq = + pp_rv_set_hard_min_dcefclk_by_freq; + funcs->rv_funcs.set_hard_min_fclk_by_freq = + pp_rv_set_hard_min_fclk_by_freq; + break; +#ifdef CONFIG_DRM_AMD_DC_DCN2_0 + case DCN_VERSION_2_0: + funcs->ctx.ver = PP_SMU_VER_NV; + funcs->nv_funcs.pp_smu.dm = ctx; + funcs->nv_funcs.set_display_count = pp_nv_set_display_count; + funcs->nv_funcs.set_hard_min_dcfclk_by_freq = + pp_nv_set_hard_min_dcefclk_by_freq; + funcs->nv_funcs.set_min_deep_sleep_dcfclk = + pp_nv_set_min_deep_sleep_dcfclk; + funcs->nv_funcs.set_voltage_by_freq = + pp_nv_set_voltage_by_freq; + funcs->nv_funcs.set_wm_ranges = pp_nv_set_wm_ranges; + + /* todo set_pme_wa_enable cause 4k@6ohz display not light up */ + funcs->nv_funcs.set_pme_wa_enable = NULL; + /* todo debug waring message */ + funcs->nv_funcs.set_hard_min_uclk_by_freq = pp_nv_set_hard_min_uclk_by_freq; + /* todo compare data with window driver*/ + funcs->nv_funcs.get_maximum_sustainable_clocks = pp_nv_get_maximum_sustainable_clocks; + /*todo compare data with window driver */ + funcs->nv_funcs.get_uclk_dpm_states = pp_nv_get_uclk_dpm_states; + funcs->nv_funcs.set_pstate_handshake_support = pp_nv_set_pstate_handshake_support; + break; +#endif + +#ifdef CONFIG_DRM_AMD_DC_DCN2_1 + case DCN_VERSION_2_1: + funcs->ctx.ver = PP_SMU_VER_RN; + funcs->rn_funcs.pp_smu.dm = ctx; + funcs->rn_funcs.set_wm_ranges = pp_rn_set_wm_ranges; + funcs->rn_funcs.get_dpm_clock_table = pp_rn_get_dpm_clock_table; + break; +#endif + default: + DRM_ERROR("smu version is not supported !\n"); + break; + } +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c index d915e8c8769b..022da5d45d4d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c @@ -26,7 +26,6 @@ #include <linux/string.h> #include <linux/acpi.h> -#include <drm/drmP.h> #include <drm/drm_probe_helper.h> #include <drm/amdgpu_drm.h> #include "dm_services.h" |