diff options
Diffstat (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm')
-rw-r--r-- | drivers/gpu/drm/amd/display/amdgpu_dm/Makefile | 17 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 1564 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 171 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 484 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c | 829 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h | 122 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 443 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h | 36 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c | 463 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.c | 3150 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.h | 101 |
11 files changed, 7380 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile new file mode 100644 index 000000000000..698b1d4f83f5 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile @@ -0,0 +1,17 @@ +# +# Makefile for the 'dm' sub-component of DAL. +# It provides the control and status of dm blocks. + + + +AMDGPUDM = amdgpu_dm_types.o amdgpu_dm.o amdgpu_dm_irq.o amdgpu_dm_mst_types.o + +ifneq ($(CONFIG_DRM_AMD_DC),) +AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o +endif + +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc + +AMDGPU_DM = $(addprefix $(AMDDALPATH)/amdgpu_dm/,$(AMDGPUDM)) + +AMD_DISPLAY_FILES += $(AMDGPU_DM) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c new file mode 100644 index 000000000000..ae4ba7777839 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -0,0 +1,1564 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services_types.h" +#include "dc.h" + +#include "vid.h" +#include "amdgpu.h" +#include "atom.h" +#include "amdgpu_dm.h" +#include "amdgpu_dm_types.h" + +#include "amd_shared.h" +#include "amdgpu_dm_irq.h" +#include "dm_helpers.h" + +#include "ivsrcid/ivsrcid_vislands30.h" + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/version.h> + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_dp_mst_helper.h> + +#include "modules/inc/mod_freesync.h" + +/* Debug facilities */ +#define AMDGPU_DM_NOT_IMPL(fmt, ...) \ + DRM_INFO("DM_NOT_IMPL: " fmt, ##__VA_ARGS__) + +/* + * dm_vblank_get_counter + * + * @brief + * Get counter for number of vertical blanks + * + * @param + * struct amdgpu_device *adev - [in] desired amdgpu device + * int disp_idx - [in] which CRTC to get the counter from + * + * @return + * Counter for vertical blanks + */ +static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc) +{ + if (crtc >= adev->mode_info.num_crtc) + return 0; + else { + struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc]; + + if (NULL == acrtc->target) { + DRM_ERROR("dc_target is NULL for crtc '%d'!\n", crtc); + return 0; + } + + return dc_target_get_vblank_counter(acrtc->target); + } +} + +static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, + u32 *vbl, u32 *position) +{ + if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) + return -EINVAL; + else { + struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc]; + + if (NULL == acrtc->target) { + DRM_ERROR("dc_target is NULL for crtc '%d'!\n", crtc); + return 0; + } + + return dc_target_get_scanoutpos(acrtc->target, vbl, position); + } + + return 0; +} + +static bool dm_is_idle(void *handle) +{ + /* XXX todo */ + return true; +} + +static int dm_wait_for_idle(void *handle) +{ + /* XXX todo */ + return 0; +} + +static bool dm_check_soft_reset(void *handle) +{ + return false; +} + +static int dm_soft_reset(void *handle) +{ + /* XXX todo */ + return 0; +} + +static struct amdgpu_crtc *get_crtc_by_otg_inst( + struct amdgpu_device *adev, + int otg_inst) +{ + struct drm_device *dev = adev->ddev; + struct drm_crtc *crtc; + struct amdgpu_crtc *amdgpu_crtc; + + /* + * following if is check inherited from both functions where this one is + * used now. Need to be checked why it could happen. + */ + if (otg_inst == -1) { + WARN_ON(1); + return adev->mode_info.crtcs[0]; + } + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + amdgpu_crtc = to_amdgpu_crtc(crtc); + + if (amdgpu_crtc->otg_inst == otg_inst) + return amdgpu_crtc; + } + + return NULL; +} + +static void dm_pflip_high_irq(void *interrupt_params) +{ + struct amdgpu_flip_work *works; + struct amdgpu_crtc *amdgpu_crtc; + struct common_irq_params *irq_params = interrupt_params; + struct amdgpu_device *adev = irq_params->adev; + unsigned long flags; + + amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP); + + /* IRQ could occur when in initial stage */ + /*TODO work and BO cleanup */ + if (amdgpu_crtc == NULL) { + DRM_DEBUG_DRIVER("CRTC is null, returning.\n"); + return; + } + + spin_lock_irqsave(&adev->ddev->event_lock, flags); + works = amdgpu_crtc->pflip_works; + + if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ + DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n", + amdgpu_crtc->pflip_status, + AMDGPU_FLIP_SUBMITTED, + amdgpu_crtc->crtc_id, + amdgpu_crtc); + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + return; + } + + /* page flip completed. clean up */ + amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; + amdgpu_crtc->pflip_works = NULL; + + /* wakeup usersapce */ + if (works->event) + drm_crtc_send_vblank_event(&amdgpu_crtc->base, + works->event); + + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + + DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE, work: %p,\n", + __func__, amdgpu_crtc->crtc_id, amdgpu_crtc, works); + + drm_crtc_vblank_put(&amdgpu_crtc->base); + schedule_work(&works->unpin_work); +} + +static void dm_crtc_high_irq(void *interrupt_params) +{ + struct common_irq_params *irq_params = interrupt_params; + struct amdgpu_device *adev = irq_params->adev; + uint8_t crtc_index = 0; + struct amdgpu_crtc *acrtc; + + acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE); + + if (acrtc) + crtc_index = acrtc->crtc_id; + + drm_handle_vblank(adev->ddev, crtc_index); +} + +static int dm_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + return 0; +} + +static int dm_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + return 0; +} + +/* Prototypes of private functions */ +static int dm_early_init(void* handle); + +static void hotplug_notify_work_func(struct work_struct *work) +{ + struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work); + struct drm_device *dev = dm->ddev; + + drm_kms_helper_hotplug_event(dev); +} + +/* Init display KMS + * + * Returns 0 on success + */ +int amdgpu_dm_init(struct amdgpu_device *adev) +{ + struct dc_init_data init_data; + adev->dm.ddev = adev->ddev; + adev->dm.adev = adev; + + DRM_INFO("DAL is enabled\n"); + /* Zero all the fields */ + memset(&init_data, 0, sizeof(init_data)); + + /* initialize DAL's lock (for SYNC context use) */ + spin_lock_init(&adev->dm.dal_lock); + + /* initialize DAL's mutex */ + mutex_init(&adev->dm.dal_mutex); + + if(amdgpu_dm_irq_init(adev)) { + DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n"); + goto error; + } + + init_data.asic_id.chip_family = adev->family; + + init_data.asic_id.pci_revision_id = adev->rev_id; + init_data.asic_id.hw_internal_rev = adev->external_rev_id; + + init_data.asic_id.vram_width = adev->mc.vram_width; + /* TODO: initialize init_data.asic_id.vram_type here!!!! */ + init_data.asic_id.atombios_base_address = + adev->mode_info.atom_context->bios; + + init_data.driver = adev; + + adev->dm.cgs_device = amdgpu_cgs_create_device(adev); + + if (!adev->dm.cgs_device) { + DRM_ERROR("amdgpu: failed to create cgs device.\n"); + goto error; + } + + init_data.cgs_device = adev->dm.cgs_device; + + adev->dm.dal = NULL; + + init_data.dce_environment = DCE_ENV_PRODUCTION_DRV; + + /* Display Core create. */ + adev->dm.dc = dc_create(&init_data); + + if (!adev->dm.dc) + DRM_INFO("Display Core failed to initialize!\n"); + + INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func); + + adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); + if (!adev->dm.freesync_module) { + DRM_ERROR( + "amdgpu: failed to initialize freesync_module.\n"); + } else + DRM_INFO("amdgpu: freesync_module init done %p.\n", + adev->dm.freesync_module); + + if (amdgpu_dm_initialize_drm_device(adev)) { + DRM_ERROR( + "amdgpu: failed to initialize sw for display support.\n"); + goto error; + } + + /* Update the actual used number of crtc */ + adev->mode_info.num_crtc = adev->dm.display_indexes_num; + + /* TODO: Add_display_info? */ + + /* TODO use dynamic cursor width */ + adev->ddev->mode_config.cursor_width = 128; + adev->ddev->mode_config.cursor_height = 128; + + if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) { + DRM_ERROR( + "amdgpu: failed to initialize sw for display support.\n"); + goto error; + } + + DRM_INFO("KMS initialized.\n"); + + return 0; +error: + amdgpu_dm_fini(adev); + + return -1; +} + +void amdgpu_dm_fini(struct amdgpu_device *adev) +{ + amdgpu_dm_destroy_drm_device(&adev->dm); + /* + * TODO: pageflip, vlank interrupt + * + * amdgpu_dm_irq_fini(adev); + */ + + if (adev->dm.cgs_device) { + amdgpu_cgs_destroy_device(adev->dm.cgs_device); + adev->dm.cgs_device = NULL; + } + if (adev->dm.freesync_module) { + mod_freesync_destroy(adev->dm.freesync_module); + adev->dm.freesync_module = NULL; + } + /* DC Destroy TODO: Replace destroy DAL */ + { + dc_destroy(&adev->dm.dc); + } + return; +} + +/* moved from amdgpu_dm_kms.c */ +void amdgpu_dm_destroy() +{ +} + +static int dm_sw_init(void *handle) +{ + return 0; +} + +static int dm_sw_fini(void *handle) +{ + return 0; +} + +static void detect_link_for_all_connectors(struct drm_device *dev) +{ + struct amdgpu_connector *aconnector; + struct drm_connector *connector; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + aconnector = to_amdgpu_connector(connector); + if (aconnector->dc_link->type == dc_connection_mst_branch) { + DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n", + aconnector, aconnector->base.base.id); + + if (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) < 0) { + DRM_ERROR("DM_MST: Failed to start MST\n"); + ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single; + } + } + } + + drm_modeset_unlock(&dev->mode_config.connection_mutex); +} + +static void s3_handle_mst(struct drm_device *dev, bool suspend) +{ + struct amdgpu_connector *aconnector; + struct drm_connector *connector; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + aconnector = to_amdgpu_connector(connector); + if (aconnector->dc_link->type == dc_connection_mst_branch && + !aconnector->mst_port) { + + if (suspend) + drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr); + else + drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr); + } + } + + drm_modeset_unlock(&dev->mode_config.connection_mutex); +} + +static int dm_hw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* Create DAL display manager */ + amdgpu_dm_init(adev); + + amdgpu_dm_hpd_init(adev); + + detect_link_for_all_connectors(adev->ddev); + + return 0; +} + +static int dm_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + amdgpu_dm_hpd_fini(adev); + + amdgpu_dm_irq_fini(adev); + + return 0; +} + +static int dm_suspend(void *handle) +{ + struct amdgpu_device *adev = handle; + struct amdgpu_display_manager *dm = &adev->dm; + int ret = 0; + struct drm_crtc *crtc; + + s3_handle_mst(adev->ddev, true); + + /* flash all pending vblank events and turn interrupt off + * before disabling CRTCs. They will be enabled back in + * dm_display_resume + */ + drm_modeset_lock_all(adev->ddev); + list_for_each_entry(crtc, &adev->ddev->mode_config.crtc_list, head) { + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + if (acrtc->target) + drm_crtc_vblank_off(crtc); + } + drm_modeset_unlock_all(adev->ddev); + + amdgpu_dm_irq_suspend(adev); + + dc_set_power_state( + dm->dc, + DC_ACPI_CM_POWER_STATE_D3, + DC_VIDEO_POWER_SUSPEND); + + return ret; +} + +struct amdgpu_connector *amdgpu_dm_find_first_crct_matching_connector( + struct drm_atomic_state *state, + struct drm_crtc *crtc, + bool from_state_var) +{ + uint32_t i; + struct drm_connector_state *conn_state; + struct drm_connector *connector; + struct drm_crtc *crtc_from_state; + + for_each_connector_in_state( + state, + connector, + conn_state, + i) { + crtc_from_state = + from_state_var ? + conn_state->crtc : + connector->state->crtc; + + if (crtc_from_state == crtc) + return to_amdgpu_connector(connector); + } + + return NULL; +} + +static int dm_display_resume(struct drm_device *ddev) +{ + int ret = 0; + struct drm_connector *connector; + + struct drm_atomic_state *state = drm_atomic_state_alloc(ddev); + struct drm_plane *plane; + struct drm_crtc *crtc; + struct amdgpu_connector *aconnector; + struct drm_connector_state *conn_state; + + if (!state) + return ENOMEM; + + state->acquire_ctx = ddev->mode_config.acquire_ctx; + + /* Construct an atomic state to restore previous display setting */ + + /* + * Attach connectors to drm_atomic_state + * Should be done in the first place in order to make connectors + * available in state during crtc state processing. It is used for + * making decision if crtc should be disabled in case sink got + * disconnected. + * + * Connectors state crtc with NULL dc_sink should be cleared, because it + * will fail validation during commit + */ + list_for_each_entry(connector, &ddev->mode_config.connector_list, head) { + aconnector = to_amdgpu_connector(connector); + conn_state = drm_atomic_get_connector_state(state, connector); + + ret = PTR_ERR_OR_ZERO(conn_state); + if (ret) + goto err; + } + + /* Attach crtcs to drm_atomic_state*/ + list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { + struct drm_crtc_state *crtc_state = + drm_atomic_get_crtc_state(state, crtc); + + ret = PTR_ERR_OR_ZERO(crtc_state); + if (ret) + goto err; + + /* force a restore */ + crtc_state->mode_changed = true; + } + + + /* Attach planes to drm_atomic_state */ + list_for_each_entry(plane, &ddev->mode_config.plane_list, head) { + + struct drm_crtc *crtc; + struct drm_gem_object *obj; + struct drm_framebuffer *fb; + struct amdgpu_framebuffer *afb; + struct amdgpu_bo *rbo; + int r; + struct drm_plane_state *plane_state = drm_atomic_get_plane_state(state, plane); + + ret = PTR_ERR_OR_ZERO(plane_state); + if (ret) + goto err; + + crtc = plane_state->crtc; + fb = plane_state->fb; + + if (!crtc || !crtc->state || !crtc->state->active) + continue; + + if (!fb) { + DRM_DEBUG_KMS("No FB bound\n"); + return 0; + } + + /* + * Pin back the front buffers, cursor buffer was already pinned + * back in amdgpu_resume_kms + */ + + afb = to_amdgpu_framebuffer(fb); + + obj = afb->obj; + rbo = gem_to_amdgpu_bo(obj); + r = amdgpu_bo_reserve(rbo, false); + if (unlikely(r != 0)) + return r; + + r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, NULL); + + amdgpu_bo_unreserve(rbo); + + if (unlikely(r != 0)) { + DRM_ERROR("Failed to pin framebuffer\n"); + return r; + } + + } + + + /* Call commit internally with the state we just constructed */ + ret = drm_atomic_commit(state); + if (!ret) + return 0; + +err: + DRM_ERROR("Restoring old state failed with %i\n", ret); + drm_atomic_state_put(state); + + return ret; +} + +static int dm_resume(void *handle) +{ + struct amdgpu_device *adev = handle; + struct amdgpu_display_manager *dm = &adev->dm; + + /* power on hardware */ + dc_set_power_state( + dm->dc, + DC_ACPI_CM_POWER_STATE_D0, + DC_VIDEO_POWER_ON); + + return 0; +} + +int amdgpu_dm_display_resume(struct amdgpu_device *adev ) +{ + struct drm_device *ddev = adev->ddev; + struct amdgpu_display_manager *dm = &adev->dm; + struct amdgpu_connector *aconnector; + struct drm_connector *connector; + int ret = 0; + struct drm_crtc *crtc; + + /* program HPD filter */ + dc_resume(dm->dc); + + /* On resume we need to rewrite the MSTM control bits to enamble MST*/ + s3_handle_mst(ddev, false); + + /* + * early enable HPD Rx IRQ, should be done before set mode as short + * pulse interrupts are used for MST + */ + amdgpu_dm_irq_resume_early(adev); + + drm_modeset_lock_all(ddev); + list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + if (acrtc->target) + drm_crtc_vblank_on(crtc); + } + drm_modeset_unlock_all(ddev); + + /* Do detection*/ + list_for_each_entry(connector, + &ddev->mode_config.connector_list, head) { + aconnector = to_amdgpu_connector(connector); + + /* + * this is the case when traversing through already created + * MST connectors, should be skipped + */ + if (aconnector->mst_port) + continue; + + dc_link_detect(aconnector->dc_link, false); + aconnector->dc_sink = NULL; + amdgpu_dm_update_connector_after_detect(aconnector); + } + + drm_modeset_lock_all(ddev); + ret = dm_display_resume(ddev); + drm_modeset_unlock_all(ddev); + + amdgpu_dm_irq_resume(adev); + + return ret; +} + +static const struct amd_ip_funcs amdgpu_dm_funcs = { + .name = "dm", + .early_init = dm_early_init, + .late_init = NULL, + .sw_init = dm_sw_init, + .sw_fini = dm_sw_fini, + .hw_init = dm_hw_init, + .hw_fini = dm_hw_fini, + .suspend = dm_suspend, + .resume = dm_resume, + .is_idle = dm_is_idle, + .wait_for_idle = dm_wait_for_idle, + .check_soft_reset = dm_check_soft_reset, + .soft_reset = dm_soft_reset, + .set_clockgating_state = dm_set_clockgating_state, + .set_powergating_state = dm_set_powergating_state, +}; + +const struct amdgpu_ip_block_version dm_ip_block = +{ + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &amdgpu_dm_funcs, +}; + +/* TODO: it is temporary non-const, should fixed later */ +static struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { + .atomic_check = amdgpu_dm_atomic_check, + .atomic_commit = amdgpu_dm_atomic_commit +}; + +void amdgpu_dm_update_connector_after_detect( + struct amdgpu_connector *aconnector) +{ + struct drm_connector *connector = &aconnector->base; + struct drm_device *dev = connector->dev; + const struct dc_sink *sink; + + /* MST handled by drm_mst framework */ + if (aconnector->mst_mgr.mst_state == true) + return; + + + sink = aconnector->dc_link->local_sink; + + /* Edid mgmt connector gets first update only in mode_valid hook and then + * the connector sink is set to either fake or physical sink depends on link status. + * don't do it here if u are during boot + */ + if (aconnector->base.force != DRM_FORCE_UNSPECIFIED + && aconnector->dc_em_sink) { + + /* For S3 resume with headless use eml_sink to fake target + * because on resume connecotr->sink is set ti NULL + */ + mutex_lock(&dev->mode_config.mutex); + + if (sink) { + if (aconnector->dc_sink) + amdgpu_dm_remove_sink_from_freesync_module( + connector); + aconnector->dc_sink = sink; + amdgpu_dm_add_sink_to_freesync_module( + connector, aconnector->edid); + } else { + amdgpu_dm_remove_sink_from_freesync_module(connector); + if (!aconnector->dc_sink) + aconnector->dc_sink = aconnector->dc_em_sink; + } + + mutex_unlock(&dev->mode_config.mutex); + return; + } + + /* + * TODO: temporary guard to look for proper fix + * if this sink is MST sink, we should not do anything + */ + if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) + return; + + if (aconnector->dc_sink == sink) { + /* We got a DP short pulse (Link Loss, DP CTS, etc...). + * Do nothing!! */ + DRM_INFO("DCHPD: connector_id=%d: dc_sink didn't change.\n", + aconnector->connector_id); + return; + } + + DRM_INFO("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n", + aconnector->connector_id, aconnector->dc_sink, sink); + + mutex_lock(&dev->mode_config.mutex); + + /* 1. Update status of the drm connector + * 2. Send an event and let userspace tell us what to do */ + if (sink) { + /* TODO: check if we still need the S3 mode update workaround. + * If yes, put it here. */ + if (aconnector->dc_sink) + amdgpu_dm_remove_sink_from_freesync_module( + connector); + + aconnector->dc_sink = sink; + if (sink->dc_edid.length == 0) + aconnector->edid = NULL; + else { + aconnector->edid = + (struct edid *) sink->dc_edid.raw_edid; + + + drm_mode_connector_update_edid_property(connector, + aconnector->edid); + } + amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid); + + } else { + amdgpu_dm_remove_sink_from_freesync_module(connector); + drm_mode_connector_update_edid_property(connector, NULL); + aconnector->num_modes = 0; + aconnector->dc_sink = NULL; + } + + mutex_unlock(&dev->mode_config.mutex); +} + +static void handle_hpd_irq(void *param) +{ + struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param; + struct drm_connector *connector = &aconnector->base; + struct drm_device *dev = connector->dev; + + /* In case of failure or MST no need to update connector status or notify the OS + * since (for MST case) MST does this in it's own context. + */ + mutex_lock(&aconnector->hpd_lock); + if (dc_link_detect(aconnector->dc_link, false)) { + amdgpu_dm_update_connector_after_detect(aconnector); + + + drm_modeset_lock_all(dev); + dm_restore_drm_connector_state(dev, connector); + drm_modeset_unlock_all(dev); + + if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) + drm_kms_helper_hotplug_event(dev); + } + mutex_unlock(&aconnector->hpd_lock); + +} + +static void dm_handle_hpd_rx_irq(struct amdgpu_connector *aconnector) +{ + uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; + uint8_t dret; + bool new_irq_handled = false; + int dpcd_addr; + int dpcd_bytes_to_read; + + const int max_process_count = 30; + int process_count = 0; + + const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link); + + if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) { + dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT; + /* DPCD 0x200 - 0x201 for downstream IRQ */ + dpcd_addr = DP_SINK_COUNT; + } else { + dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI; + /* DPCD 0x2002 - 0x2005 for downstream IRQ */ + dpcd_addr = DP_SINK_COUNT_ESI; + } + + dret = drm_dp_dpcd_read( + &aconnector->dm_dp_aux.aux, + dpcd_addr, + esi, + dpcd_bytes_to_read); + + while (dret == dpcd_bytes_to_read && + process_count < max_process_count) { + uint8_t retry; + dret = 0; + + process_count++; + + DRM_DEBUG_KMS("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0) + /* handle HPD short pulse irq */ + if (aconnector->mst_mgr.mst_state) + drm_dp_mst_hpd_irq( + &aconnector->mst_mgr, + esi, + &new_irq_handled); +#endif + + if (new_irq_handled) { + /* ACK at DPCD to notify down stream */ + const int ack_dpcd_bytes_to_write = + dpcd_bytes_to_read - 1; + + for (retry = 0; retry < 3; retry++) { + uint8_t wret; + + wret = drm_dp_dpcd_write( + &aconnector->dm_dp_aux.aux, + dpcd_addr + 1, + &esi[1], + ack_dpcd_bytes_to_write); + if (wret == ack_dpcd_bytes_to_write) + break; + } + + /* check if there is new irq to be handle */ + dret = drm_dp_dpcd_read( + &aconnector->dm_dp_aux.aux, + dpcd_addr, + esi, + dpcd_bytes_to_read); + + new_irq_handled = false; + } else + break; + } + + if (process_count == max_process_count) + DRM_DEBUG_KMS("Loop exceeded max iterations\n"); +} + +static void handle_hpd_rx_irq(void *param) +{ + struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param; + struct drm_connector *connector = &aconnector->base; + struct drm_device *dev = connector->dev; + const struct dc_link *dc_link = aconnector->dc_link; + bool is_mst_root_connector = aconnector->mst_mgr.mst_state; + + /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio + * conflict, after implement i2c helper, this mutex should be + * retired. + */ + if (aconnector->dc_link->type != dc_connection_mst_branch) + mutex_lock(&aconnector->hpd_lock); + + if (dc_link_handle_hpd_rx_irq(aconnector->dc_link) && + !is_mst_root_connector) { + /* Downstream Port status changed. */ + if (dc_link_detect(aconnector->dc_link, false)) { + amdgpu_dm_update_connector_after_detect(aconnector); + + + drm_modeset_lock_all(dev); + dm_restore_drm_connector_state(dev, connector); + drm_modeset_unlock_all(dev); + + drm_kms_helper_hotplug_event(dev); + } + } + if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) || + (dc_link->type == dc_connection_mst_branch)) + dm_handle_hpd_rx_irq(aconnector); + + if (aconnector->dc_link->type != dc_connection_mst_branch) + mutex_unlock(&aconnector->hpd_lock); +} + +static void register_hpd_handlers(struct amdgpu_device *adev) +{ + struct drm_device *dev = adev->ddev; + struct drm_connector *connector; + struct amdgpu_connector *aconnector; + const struct dc_link *dc_link; + struct dc_interrupt_params int_params = {0}; + + int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; + int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; + + list_for_each_entry(connector, + &dev->mode_config.connector_list, head) { + + aconnector = to_amdgpu_connector(connector); + dc_link = aconnector->dc_link; + + if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) { + int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; + int_params.irq_source = dc_link->irq_source_hpd; + + amdgpu_dm_irq_register_interrupt(adev, &int_params, + handle_hpd_irq, + (void *) aconnector); + } + + if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) { + + /* Also register for DP short pulse (hpd_rx). */ + int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; + int_params.irq_source = dc_link->irq_source_hpd_rx; + + amdgpu_dm_irq_register_interrupt(adev, &int_params, + handle_hpd_rx_irq, + (void *) aconnector); + } + } +} + +/* Register IRQ sources and initialize IRQ callbacks */ +static int dce110_register_irq_handlers(struct amdgpu_device *adev) +{ + struct dc *dc = adev->dm.dc; + struct common_irq_params *c_irq_params; + struct dc_interrupt_params int_params = {0}; + int r; + int i; + + int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; + int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; + + /* Actions of amdgpu_irq_add_id(): + * 1. Register a set() function with base driver. + * Base driver will call set() function to enable/disable an + * interrupt in DC hardware. + * 2. Register amdgpu_dm_irq_handler(). + * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts + * coming from DC hardware. + * amdgpu_dm_irq_handler() will re-direct the interrupt to DC + * for acknowledging and handling. */ + + for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; + i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) { + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->crtc_irq); + if (r) { + DRM_ERROR("Failed to add crtc irq id!\n"); + return r; + } + + int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; + int_params.irq_source = + dc_interrupt_to_irq_source(dc, i, 0); + + c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; + + c_irq_params->adev = adev; + c_irq_params->irq_src = int_params.irq_source; + + amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_crtc_high_irq, c_irq_params); + } + + for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; + i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq); + if (r) { + DRM_ERROR("Failed to add page flip irq id!\n"); + return r; + } + + int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; + int_params.irq_source = + dc_interrupt_to_irq_source(dc, i, 0); + + c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; + + c_irq_params->adev = adev; + c_irq_params->irq_src = int_params.irq_source; + + amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_pflip_high_irq, c_irq_params); + + } + + /* HPD */ + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, + &adev->hpd_irq); + if (r) { + DRM_ERROR("Failed to add hpd irq id!\n"); + return r; + } + + register_hpd_handlers(adev); + + return 0; +} + +static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) +{ + int r; + + adev->mode_info.mode_config_initialized = true; + + amdgpu_dm_mode_funcs.fb_create = + amdgpu_mode_funcs.fb_create; + amdgpu_dm_mode_funcs.output_poll_changed = + amdgpu_mode_funcs.output_poll_changed; + + adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs; + + adev->ddev->mode_config.max_width = 16384; + adev->ddev->mode_config.max_height = 16384; + + adev->ddev->mode_config.preferred_depth = 24; + adev->ddev->mode_config.prefer_shadow = 1; + /* indicate support of immediate flip */ + adev->ddev->mode_config.async_page_flip = true; + + adev->ddev->mode_config.fb_base = adev->mc.aper_base; + + r = amdgpu_modeset_create_props(adev); + if (r) + return r; + + return 0; +} + +#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ + defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) + +static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) +{ + struct amdgpu_display_manager *dm = bl_get_data(bd); + + if (dc_link_set_backlight_level(dm->backlight_link, + bd->props.brightness, 0, 0)) + return 0; + else + return 1; +} + +static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd) +{ + return bd->props.brightness; +} + +static const struct backlight_ops amdgpu_dm_backlight_ops = { + .get_brightness = amdgpu_dm_backlight_get_brightness, + .update_status = amdgpu_dm_backlight_update_status, +}; + +void amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm) +{ + char bl_name[16]; + struct backlight_properties props = { 0 }; + + props.max_brightness = AMDGPU_MAX_BL_LEVEL; + props.type = BACKLIGHT_RAW; + + snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d", + dm->adev->ddev->primary->index); + + dm->backlight_dev = backlight_device_register(bl_name, + dm->adev->ddev->dev, + dm, + &amdgpu_dm_backlight_ops, + &props); + + if (NULL == dm->backlight_dev) + DRM_ERROR("DM: Backlight registration failed!\n"); + else + DRM_INFO("DM: Registered Backlight device: %s\n", bl_name); +} + +#endif + +/* In this architecture, the association + * connector -> encoder -> crtc + * id not really requried. The crtc and connector will hold the + * display_index as an abstraction to use with DAL component + * + * Returns 0 on success + */ +int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) +{ + struct amdgpu_display_manager *dm = &adev->dm; + uint32_t i; + struct amdgpu_connector *aconnector; + struct amdgpu_encoder *aencoder; + struct amdgpu_crtc *acrtc; + uint32_t link_cnt; + + link_cnt = dm->dc->caps.max_links; + + if (amdgpu_dm_mode_config_init(dm->adev)) { + DRM_ERROR("DM: Failed to initialize mode config\n"); + return -1; + } + + for (i = 0; i < dm->dc->caps.max_targets; i++) { + acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL); + if (!acrtc) + goto fail; + + if (amdgpu_dm_crtc_init( + dm, + acrtc, + i)) { + DRM_ERROR("KMS: Failed to initialize crtc\n"); + kfree(acrtc); + goto fail; + } + } + + dm->display_indexes_num = dm->dc->caps.max_targets; + + /* loops over all connectors on the board */ + for (i = 0; i < link_cnt; i++) { + + if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) { + DRM_ERROR( + "KMS: Cannot support more than %d display indexes\n", + AMDGPU_DM_MAX_DISPLAY_INDEX); + continue; + } + + aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); + if (!aconnector) + goto fail; + + aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL); + if (!aencoder) { + goto fail_free_connector; + } + + if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) { + DRM_ERROR("KMS: Failed to initialize encoder\n"); + goto fail_free_encoder; + } + + if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) { + DRM_ERROR("KMS: Failed to initialize connector\n"); + goto fail_free_connector; + } + + if (dc_link_detect(dc_get_link_at_index(dm->dc, i), true)) + amdgpu_dm_update_connector_after_detect(aconnector); + } + + /* Software is initialized. Now we can register interrupt handlers. */ + switch (adev->asic_type) { + case CHIP_BONAIRE: + case CHIP_HAWAII: + case CHIP_TONGA: + case CHIP_FIJI: + case CHIP_CARRIZO: + case CHIP_STONEY: + case CHIP_POLARIS11: + case CHIP_POLARIS10: + if (dce110_register_irq_handlers(dm->adev)) { + DRM_ERROR("DM: Failed to initialize IRQ\n"); + return -1; + } + break; + default: + DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type); + return -1; + } + + drm_mode_config_reset(dm->ddev); + + return 0; +fail_free_encoder: + kfree(aencoder); +fail_free_connector: + kfree(aconnector); +fail: + return -1; +} + +void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm) +{ + drm_mode_config_cleanup(dm->ddev); + return; +} + +/****************************************************************************** + * amdgpu_display_funcs functions + *****************************************************************************/ + +/** + * dm_bandwidth_update - program display watermarks + * + * @adev: amdgpu_device pointer + * + * Calculate and program the display watermarks and line buffer allocation. + */ +static void dm_bandwidth_update(struct amdgpu_device *adev) +{ + AMDGPU_DM_NOT_IMPL("%s\n", __func__); +} + +static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder, + u8 level) +{ + /* TODO: translate amdgpu_encoder to display_index and call DAL */ + AMDGPU_DM_NOT_IMPL("%s\n", __func__); +} + +static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder) +{ + /* TODO: translate amdgpu_encoder to display_index and call DAL */ + AMDGPU_DM_NOT_IMPL("%s\n", __func__); + return 0; +} + +/****************************************************************************** + * Page Flip functions + ******************************************************************************/ + +/** + * dm_page_flip - called by amdgpu_flip_work_func(), which is triggered + * via DRM IOCTL, by user mode. + * + * @adev: amdgpu_device pointer + * @crtc_id: crtc to cleanup pageflip on + * @crtc_base: new address of the crtc (GPU MC address) + * + * Does the actual pageflip (surface address update). + */ +static void dm_page_flip(struct amdgpu_device *adev, + int crtc_id, u64 crtc_base, bool async) +{ + struct amdgpu_crtc *acrtc; + struct dc_target *target; + struct dc_flip_addrs addr = { {0} }; + + /* + * TODO risk of concurrency issues + * + * This should guarded by the dal_mutex but we can't do this since the + * caller uses a spin_lock on event_lock. + * + * If we wait on the dal_mutex a second page flip interrupt might come, + * spin on the event_lock, disabling interrupts while it does so. At + * this point the core can no longer be pre-empted and return to the + * thread that waited on the dal_mutex and we're deadlocked. + * + * With multiple cores the same essentially happens but might just take + * a little longer to lock up all cores. + * + * The reason we should lock on dal_mutex is so that we can be sure + * nobody messes with acrtc->target after we read and check its value. + * + * We might be able to fix our concurrency issues with a work queue + * where we schedule all work items (mode_set, page_flip, etc.) and + * execute them one by one. Care needs to be taken to still deal with + * any potential concurrency issues arising from interrupt calls. + */ + + acrtc = adev->mode_info.crtcs[crtc_id]; + target = acrtc->target; + + /* + * Received a page flip call after the display has been reset. + * Just return in this case. Everything should be clean-up on reset. + */ + + if (!target) { + WARN_ON(1); + return; + } + + addr.address.grph.addr.low_part = lower_32_bits(crtc_base); + addr.address.grph.addr.high_part = upper_32_bits(crtc_base); + addr.flip_immediate = async; + + DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n", + __func__, + addr.address.grph.addr.high_part, + addr.address.grph.addr.low_part); + + dc_flip_surface_addrs( + adev->dm.dc, + dc_target_get_status(target)->surfaces, + &addr, 1); +} + +static int amdgpu_notify_freesync(struct drm_device *dev, void *data, + struct drm_file *filp) +{ + struct mod_freesync_params freesync_params; + uint8_t num_targets; + uint8_t i; + struct dc_target *target; + + struct amdgpu_device *adev = dev->dev_private; + int r = 0; + + /* Get freesync enable flag from DRM */ + + num_targets = dc_get_current_target_count(adev->dm.dc); + + for (i = 0; i < num_targets; i++) { + + target = dc_get_target_at_index(adev->dm.dc, i); + + mod_freesync_update_state(adev->dm.freesync_module, + target->streams, + target->stream_count, + &freesync_params); + } + + return r; +} + +#ifdef CONFIG_DRM_AMDGPU_CIK +static const struct amdgpu_display_funcs dm_dce_v8_0_display_funcs = { + .bandwidth_update = dm_bandwidth_update, /* called unconditionally */ + .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */ + .vblank_wait = NULL, + .backlight_set_level = + dm_set_backlight_level,/* called unconditionally */ + .backlight_get_level = + dm_get_backlight_level,/* called unconditionally */ + .hpd_sense = NULL,/* called unconditionally */ + .hpd_set_polarity = NULL, /* called unconditionally */ + .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */ + .page_flip = dm_page_flip, /* called unconditionally */ + .page_flip_get_scanoutpos = + dm_crtc_get_scanoutpos,/* called unconditionally */ + .add_encoder = NULL, /* VBIOS parsing. DAL does it. */ + .add_connector = NULL, /* VBIOS parsing. DAL does it. */ + .notify_freesync = amdgpu_notify_freesync, +}; +#endif + +static const struct amdgpu_display_funcs dm_dce_v10_0_display_funcs = { + .bandwidth_update = dm_bandwidth_update, /* called unconditionally */ + .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */ + .vblank_wait = NULL, + .backlight_set_level = + dm_set_backlight_level,/* called unconditionally */ + .backlight_get_level = + dm_get_backlight_level,/* called unconditionally */ + .hpd_sense = NULL,/* called unconditionally */ + .hpd_set_polarity = NULL, /* called unconditionally */ + .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */ + .page_flip = dm_page_flip, /* called unconditionally */ + .page_flip_get_scanoutpos = + dm_crtc_get_scanoutpos,/* called unconditionally */ + .add_encoder = NULL, /* VBIOS parsing. DAL does it. */ + .add_connector = NULL, /* VBIOS parsing. DAL does it. */ + .notify_freesync = amdgpu_notify_freesync, + +}; + +static const struct amdgpu_display_funcs dm_dce_v11_0_display_funcs = { + .bandwidth_update = dm_bandwidth_update, /* called unconditionally */ + .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */ + .vblank_wait = NULL, + .backlight_set_level = + dm_set_backlight_level,/* called unconditionally */ + .backlight_get_level = + dm_get_backlight_level,/* called unconditionally */ + .hpd_sense = NULL,/* called unconditionally */ + .hpd_set_polarity = NULL, /* called unconditionally */ + .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */ + .page_flip = dm_page_flip, /* called unconditionally */ + .page_flip_get_scanoutpos = + dm_crtc_get_scanoutpos,/* called unconditionally */ + .add_encoder = NULL, /* VBIOS parsing. DAL does it. */ + .add_connector = NULL, /* VBIOS parsing. DAL does it. */ + .notify_freesync = amdgpu_notify_freesync, + +}; + +#if defined(CONFIG_DEBUG_KERNEL_DC) + +static ssize_t s3_debug_store( + struct device *device, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + int ret; + int s3_state; + struct pci_dev *pdev = to_pci_dev(device); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct amdgpu_device *adev = drm_dev->dev_private; + + ret = kstrtoint(buf, 0, &s3_state); + + if (ret == 0) { + if (s3_state) { + dm_resume(adev); + amdgpu_dm_display_resume(adev); + drm_kms_helper_hotplug_event(adev->ddev); + } else + dm_suspend(adev); + } + + return ret == 0 ? count : 0; +} + +DEVICE_ATTR_WO(s3_debug); + +#endif + +static int dm_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + amdgpu_dm_set_irq_funcs(adev); + + switch (adev->asic_type) { + case CHIP_BONAIRE: + case CHIP_HAWAII: + adev->mode_info.num_crtc = 6; + adev->mode_info.num_hpd = 6; + adev->mode_info.num_dig = 6; +#ifdef CONFIG_DRM_AMDGPU_CIK + if (adev->mode_info.funcs == NULL) + adev->mode_info.funcs = &dm_dce_v8_0_display_funcs; +#endif + break; + case CHIP_FIJI: + case CHIP_TONGA: + adev->mode_info.num_crtc = 6; + adev->mode_info.num_hpd = 6; + adev->mode_info.num_dig = 7; + if (adev->mode_info.funcs == NULL) + adev->mode_info.funcs = &dm_dce_v10_0_display_funcs; + break; + case CHIP_CARRIZO: + adev->mode_info.num_crtc = 3; + adev->mode_info.num_hpd = 6; + adev->mode_info.num_dig = 9; + if (adev->mode_info.funcs == NULL) + adev->mode_info.funcs = &dm_dce_v11_0_display_funcs; + break; + case CHIP_STONEY: + adev->mode_info.num_crtc = 2; + adev->mode_info.num_hpd = 6; + adev->mode_info.num_dig = 9; + if (adev->mode_info.funcs == NULL) + adev->mode_info.funcs = &dm_dce_v11_0_display_funcs; + break; + case CHIP_POLARIS11: + adev->mode_info.num_crtc = 5; + adev->mode_info.num_hpd = 5; + adev->mode_info.num_dig = 5; + if (adev->mode_info.funcs == NULL) + adev->mode_info.funcs = &dm_dce_v11_0_display_funcs; + break; + case CHIP_POLARIS10: + adev->mode_info.num_crtc = 6; + adev->mode_info.num_hpd = 6; + adev->mode_info.num_dig = 6; + if (adev->mode_info.funcs == NULL) + adev->mode_info.funcs = &dm_dce_v11_0_display_funcs; + break; + default: + DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type); + return -EINVAL; + } + + /* Note: Do NOT change adev->audio_endpt_rreg and + * adev->audio_endpt_wreg because they are initialised in + * amdgpu_device_init() */ +#if defined(CONFIG_DEBUG_KERNEL_DC) + device_create_file( + adev->ddev->dev, + &dev_attr_s3_debug); +#endif + + return 0; +} + +bool amdgpu_dm_acquire_dal_lock(struct amdgpu_display_manager *dm) +{ + /* TODO */ + return true; +} + +bool amdgpu_dm_release_dal_lock(struct amdgpu_display_manager *dm) +{ + /* TODO */ + return true; +} + + diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h new file mode 100644 index 000000000000..1b54566f5da1 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -0,0 +1,171 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __AMDGPU_DM_H__ +#define __AMDGPU_DM_H__ + +/* +#include "linux/switch.h" +*/ + +/* + * This file contains the definition for amdgpu_display_manager + * and its API for amdgpu driver's use. + * This component provides all the display related functionality + * and this is the only component that calls DAL API. + * The API contained here intended for amdgpu driver use. + * The API that is called directly from KMS framework is located + * in amdgpu_dm_kms.h file + */ + +#define AMDGPU_DM_MAX_DISPLAY_INDEX 31 +/* +#include "include/amdgpu_dal_power_if.h" +#include "amdgpu_dm_irq.h" +*/ + +#include "irq_types.h" +#include "signal_types.h" + +/* Forward declarations */ +struct amdgpu_device; +struct drm_device; +struct amdgpu_dm_irq_handler_data; + +struct amdgpu_dm_prev_state { + struct drm_framebuffer *fb; + int32_t x; + int32_t y; + struct drm_display_mode mode; +}; + +struct common_irq_params { + struct amdgpu_device *adev; + enum dc_irq_source irq_src; +}; + +struct irq_list_head { + struct list_head head; + /* In case this interrupt needs post-processing, 'work' will be queued*/ + struct work_struct work; +}; + +struct amdgpu_display_manager { + struct dal *dal; + struct dc *dc; + struct cgs_device *cgs_device; + /* lock to be used when DAL is called from SYNC IRQ context */ + spinlock_t dal_lock; + + struct amdgpu_device *adev; /*AMD base driver*/ + struct drm_device *ddev; /*DRM base driver*/ + u16 display_indexes_num; + + struct amdgpu_dm_prev_state prev_state; + + /* + * 'irq_source_handler_table' holds a list of handlers + * per (DAL) IRQ source. + * + * Each IRQ source may need to be handled at different contexts. + * By 'context' we mean, for example: + * - The ISR context, which is the direct interrupt handler. + * - The 'deferred' context - this is the post-processing of the + * interrupt, but at a lower priority. + * + * Note that handlers are called in the same order as they were + * registered (FIFO). + */ + struct irq_list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER]; + struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER]; + + struct common_irq_params + pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1]; + + struct common_irq_params + vupdate_params[DC_IRQ_SOURCE_VUPDATE6 - DC_IRQ_SOURCE_VUPDATE1 + 1]; + + /* this spin lock synchronizes access to 'irq_handler_list_table' */ + spinlock_t irq_handler_list_table_lock; + + /* Timer-related data. */ + struct list_head timer_handler_list; + struct workqueue_struct *timer_workqueue; + + /* Use dal_mutex for any activity which is NOT syncronized by + * DRM mode setting locks. + * For example: amdgpu_dm_hpd_low_irq() calls into DAL *without* + * DRM mode setting locks being acquired. This is where dal_mutex + * is acquired before calling into DAL. */ + struct mutex dal_mutex; + + struct backlight_device *backlight_dev; + + const struct dc_link *backlight_link; + + struct work_struct mst_hotplug_work; + + struct mod_freesync *freesync_module; +}; + +/* basic init/fini API */ +int amdgpu_dm_init(struct amdgpu_device *adev); + +void amdgpu_dm_fini(struct amdgpu_device *adev); + +void amdgpu_dm_destroy(void); + +/* initializes drm_device display related structures, based on the information + * provided by DAL. The drm strcutures are: drm_crtc, drm_connector, + * drm_encoder, drm_mode_config + * + * Returns 0 on success + */ +int amdgpu_dm_initialize_drm_device( + struct amdgpu_device *adev); + +/* removes and deallocates the drm structures, created by the above function */ +void amdgpu_dm_destroy_drm_device( + struct amdgpu_display_manager *dm); + +/* Locking/Mutex */ +bool amdgpu_dm_acquire_dal_lock(struct amdgpu_display_manager *dm); + +bool amdgpu_dm_release_dal_lock(struct amdgpu_display_manager *dm); + +/* Register "Backlight device" accessible by user-mode. */ +void amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm); + +extern const struct amdgpu_ip_block_version dm_ip_block; + +void amdgpu_dm_update_connector_after_detect( + struct amdgpu_connector *aconnector); + +struct amdgpu_connector *amdgpu_dm_find_first_crct_matching_connector( + struct drm_atomic_state *state, + struct drm_crtc *crtc, + bool from_state_var); + +#endif /* __AMDGPU_DM_H__ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c new file mode 100644 index 000000000000..d4e01b51f949 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -0,0 +1,484 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include <linux/string.h> +#include <linux/acpi.h> +#include <linux/version.h> +#include <linux/i2c.h> + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> +#include <drm/amdgpu_drm.h> +#include <drm/drm_edid.h> + +#include "dm_services.h" +#include "amdgpu.h" +#include "dc.h" +#include "amdgpu_dm.h" +#include "amdgpu_dm_irq.h" +#include "amdgpu_dm_types.h" + +#include "dm_helpers.h" + +/* dm_helpers_parse_edid_caps + * + * Parse edid caps + * + * @edid: [in] pointer to edid + * edid_caps: [in] pointer to edid caps + * @return + * void + * */ +enum dc_edid_status dm_helpers_parse_edid_caps( + struct dc_context *ctx, + const struct dc_edid *edid, + struct dc_edid_caps *edid_caps) +{ + struct edid *edid_buf = (struct edid *) edid->raw_edid; + struct cea_sad *sads; + int sad_count = -1; + int sadb_count = -1; + int i = 0; + int j = 0; + uint8_t *sadb = NULL; + + enum dc_edid_status result = EDID_OK; + + if (!edid_caps || !edid) + return EDID_BAD_INPUT; + + if (!drm_edid_is_valid(edid_buf)) + result = EDID_BAD_CHECKSUM; + + edid_caps->manufacturer_id = (uint16_t) edid_buf->mfg_id[0] | + ((uint16_t) edid_buf->mfg_id[1])<<8; + edid_caps->product_id = (uint16_t) edid_buf->prod_code[0] | + ((uint16_t) edid_buf->prod_code[1])<<8; + edid_caps->serial_number = edid_buf->serial; + edid_caps->manufacture_week = edid_buf->mfg_week; + edid_caps->manufacture_year = edid_buf->mfg_year; + + /* One of the four detailed_timings stores the monitor name. It's + * stored in an array of length 13. */ + for (i = 0; i < 4; i++) { + if (edid_buf->detailed_timings[i].data.other_data.type == 0xfc) { + while (j < 13 && edid_buf->detailed_timings[i].data.other_data.data.str.str[j]) { + if (edid_buf->detailed_timings[i].data.other_data.data.str.str[j] == '\n') + break; + + edid_caps->display_name[j] = + edid_buf->detailed_timings[i].data.other_data.data.str.str[j]; + j++; + } + } + } + + edid_caps->edid_hdmi = drm_detect_hdmi_monitor( + (struct edid *) edid->raw_edid); + + sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads); + if (sad_count <= 0) { + DRM_INFO("SADs count is: %d, don't need to read it\n", + sad_count); + return result; + } + + edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT; + for (i = 0; i < edid_caps->audio_mode_count; ++i) { + struct cea_sad *sad = &sads[i]; + + edid_caps->audio_modes[i].format_code = sad->format; + edid_caps->audio_modes[i].channel_count = sad->channels; + edid_caps->audio_modes[i].sample_rate = sad->freq; + edid_caps->audio_modes[i].sample_size = sad->byte2; + } + + sadb_count = drm_edid_to_speaker_allocation((struct edid *) edid->raw_edid, &sadb); + + if (sadb_count < 0) { + DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count); + sadb_count = 0; + } + + if (sadb_count) + edid_caps->speaker_flags = sadb[0]; + else + edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION; + + kfree(sads); + kfree(sadb); + + return result; +} + +static struct amdgpu_connector *get_connector_for_sink( + struct drm_device *dev, + const struct dc_sink *sink) +{ + struct drm_connector *connector; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct amdgpu_connector *aconnector = to_amdgpu_connector(connector); + if (aconnector->dc_sink == sink) + return aconnector; + } + + return NULL; +} + +static struct amdgpu_connector *get_connector_for_link( + struct drm_device *dev, + const struct dc_link *link) +{ + struct drm_connector *connector; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct amdgpu_connector *aconnector = to_amdgpu_connector(connector); + if (aconnector->dc_link == link) + return aconnector; + } + + return NULL; +} + +static void get_payload_table( + struct amdgpu_connector *aconnector, + struct dp_mst_stream_allocation_table *proposed_table) +{ + int i; + struct drm_dp_mst_topology_mgr *mst_mgr = + &aconnector->mst_port->mst_mgr; + + mutex_lock(&mst_mgr->payload_lock); + + proposed_table->stream_count = 0; + + /* number of active streams */ + for (i = 0; i < mst_mgr->max_payloads; i++) { + if (mst_mgr->payloads[i].num_slots == 0) + break; /* end of vcp_id table */ + + ASSERT(mst_mgr->payloads[i].payload_state != + DP_PAYLOAD_DELETE_LOCAL); + + if (mst_mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL || + mst_mgr->payloads[i].payload_state == + DP_PAYLOAD_REMOTE) { + + struct dp_mst_stream_allocation *sa = + &proposed_table->stream_allocations[ + proposed_table->stream_count]; + + sa->slot_count = mst_mgr->payloads[i].num_slots; + sa->vcp_id = mst_mgr->proposed_vcpis[i]->vcpi; + proposed_table->stream_count++; + } + } + + mutex_unlock(&mst_mgr->payload_lock); +} + +/* + * Writes payload allocation table in immediate downstream device. + */ +bool dm_helpers_dp_mst_write_payload_allocation_table( + struct dc_context *ctx, + const struct dc_stream *stream, + struct dp_mst_stream_allocation_table *proposed_table, + bool enable) +{ + struct amdgpu_device *adev = ctx->driver_context; + struct drm_device *dev = adev->ddev; + struct amdgpu_connector *aconnector; + struct drm_dp_mst_topology_mgr *mst_mgr; + struct drm_dp_mst_port *mst_port; + int slots = 0; + bool ret; + int clock; + int bpp = 0; + int pbn = 0; + + aconnector = get_connector_for_sink(dev, stream->sink); + + if (!aconnector || !aconnector->mst_port) + return false; + + mst_mgr = &aconnector->mst_port->mst_mgr; + + if (!mst_mgr->mst_state) + return false; + + mst_port = aconnector->port; + + if (enable) { + clock = stream->timing.pix_clk_khz; + + switch (stream->timing.display_color_depth) { + + case COLOR_DEPTH_666: + bpp = 6; + break; + case COLOR_DEPTH_888: + bpp = 8; + break; + case COLOR_DEPTH_101010: + bpp = 10; + break; + case COLOR_DEPTH_121212: + bpp = 12; + break; + case COLOR_DEPTH_141414: + bpp = 14; + break; + case COLOR_DEPTH_161616: + bpp = 16; + break; + default: + ASSERT(bpp != 0); + break; + } + + bpp = bpp * 3; + + /* TODO need to know link rate */ + + pbn = drm_dp_calc_pbn_mode(clock, bpp); + + slots = drm_dp_find_vcpi_slots(mst_mgr, pbn); + ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port, pbn, slots); + + if (!ret) + return false; + + } else { + drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port); + } + + ret = drm_dp_update_payload_part1(mst_mgr); + + /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or + * AUX message. The sequence is slot 1-63 allocated sequence for each + * stream. AMD ASIC stream slot allocation should follow the same + * sequence. copy DRM MST allocation to dc */ + + get_payload_table(aconnector, proposed_table); + + if (ret) + return false; + + return true; +} + +/* + * Polls for ACT (allocation change trigger) handled and sends + * ALLOCATE_PAYLOAD message. + */ +bool dm_helpers_dp_mst_poll_for_allocation_change_trigger( + struct dc_context *ctx, + const struct dc_stream *stream) +{ + struct amdgpu_device *adev = ctx->driver_context; + struct drm_device *dev = adev->ddev; + struct amdgpu_connector *aconnector; + struct drm_dp_mst_topology_mgr *mst_mgr; + int ret; + + aconnector = get_connector_for_sink(dev, stream->sink); + + if (!aconnector || !aconnector->mst_port) + return false; + + mst_mgr = &aconnector->mst_port->mst_mgr; + + if (!mst_mgr->mst_state) + return false; + + ret = drm_dp_check_act_status(mst_mgr); + + if (ret) + return false; + + return true; +} + +bool dm_helpers_dp_mst_send_payload_allocation( + struct dc_context *ctx, + const struct dc_stream *stream, + bool enable) +{ + struct amdgpu_device *adev = ctx->driver_context; + struct drm_device *dev = adev->ddev; + struct amdgpu_connector *aconnector; + struct drm_dp_mst_topology_mgr *mst_mgr; + struct drm_dp_mst_port *mst_port; + int ret; + + aconnector = get_connector_for_sink(dev, stream->sink); + + if (!aconnector || !aconnector->mst_port) + return false; + + mst_port = aconnector->port; + + mst_mgr = &aconnector->mst_port->mst_mgr; + + if (!mst_mgr->mst_state) + return false; + + ret = drm_dp_update_payload_part2(mst_mgr); + + if (ret) + return false; + + if (!enable) + drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port); + + return true; +} + +bool dm_helpers_dp_mst_start_top_mgr( + struct dc_context *ctx, + const struct dc_link *link, + bool boot) +{ + struct amdgpu_device *adev = ctx->driver_context; + struct drm_device *dev = adev->ddev; + struct amdgpu_connector *aconnector = get_connector_for_link(dev, link); + + if (!aconnector) { + DRM_ERROR("Failed to found connector for link!"); + return false; + } + + if (boot) { + DRM_INFO("DM_MST: Differing MST start on aconnector: %p [id: %d]\n", + aconnector, aconnector->base.base.id); + return true; + } + + DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n", + aconnector, aconnector->base.base.id); + + return (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) == 0); +} + +void dm_helpers_dp_mst_stop_top_mgr( + struct dc_context *ctx, + const struct dc_link *link) +{ + struct amdgpu_device *adev = ctx->driver_context; + struct drm_device *dev = adev->ddev; + struct amdgpu_connector *aconnector = get_connector_for_link(dev, link); + + if (!aconnector) { + DRM_ERROR("Failed to found connector for link!"); + return; + } + + DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n", + aconnector, aconnector->base.base.id); + + if (aconnector->mst_mgr.mst_state == true) + drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false); +} + +bool dm_helpers_dp_read_dpcd( + struct dc_context *ctx, + const struct dc_link *link, + uint32_t address, + uint8_t *data, + uint32_t size) +{ + + struct amdgpu_device *adev = ctx->driver_context; + struct drm_device *dev = adev->ddev; + struct amdgpu_connector *aconnector = get_connector_for_link(dev, link); + + if (!aconnector) { + DRM_ERROR("Failed to found connector for link!"); + return false; + } + + return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address, + data, size) > 0; +} + +bool dm_helpers_dp_write_dpcd( + struct dc_context *ctx, + const struct dc_link *link, + uint32_t address, + const uint8_t *data, + uint32_t size) +{ + + struct amdgpu_device *adev = ctx->driver_context; + struct drm_device *dev = adev->ddev; + struct amdgpu_connector *aconnector = get_connector_for_link(dev, link); + + if (!aconnector) { + DRM_ERROR("Failed to found connector for link!"); + return false; + } + + return drm_dp_dpcd_write(&aconnector->dm_dp_aux.aux, + address, (uint8_t *)data, size) > 0; +} + +bool dm_helpers_submit_i2c( + struct dc_context *ctx, + const struct dc_link *link, + struct i2c_command *cmd) +{ + struct amdgpu_device *adev = ctx->driver_context; + struct drm_device *dev = adev->ddev; + struct amdgpu_connector *aconnector = get_connector_for_link(dev, link); + struct i2c_msg *msgs; + int i = 0; + int num = cmd->number_of_payloads; + bool result; + + if (!aconnector) { + DRM_ERROR("Failed to found connector for link!"); + return false; + } + + msgs = kzalloc(num * sizeof(struct i2c_msg), GFP_KERNEL); + + if (!msgs) + return false; + + for (i = 0; i < num; i++) { + msgs[i].flags = cmd->payloads[i].write ? I2C_M_RD : 0; + msgs[i].addr = cmd->payloads[i].address; + msgs[i].len = cmd->payloads[i].length; + msgs[i].buf = cmd->payloads[i].data; + } + + result = i2c_transfer(&aconnector->i2c->base, msgs, num) == num; + + kfree(msgs); + + return result; +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c new file mode 100644 index 000000000000..20e074971bff --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -0,0 +1,829 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include <drm/drmP.h> + +#include "dm_services_types.h" +#include "dc.h" + +#include "amdgpu.h" +#include "amdgpu_dm.h" +#include "amdgpu_dm_irq.h" + +/****************************************************************************** + * Private declarations. + *****************************************************************************/ + +struct handler_common_data { + struct list_head list; + interrupt_handler handler; + void *handler_arg; + + /* DM which this handler belongs to */ + struct amdgpu_display_manager *dm; +}; + +struct amdgpu_dm_irq_handler_data { + struct handler_common_data hcd; + /* DAL irq source which registered for this interrupt. */ + enum dc_irq_source irq_source; +}; + +struct amdgpu_dm_timer_handler_data { + struct handler_common_data hcd; + struct delayed_work d_work; +}; + +#define DM_IRQ_TABLE_LOCK(adev, flags) \ + spin_lock_irqsave(&adev->dm.irq_handler_list_table_lock, flags) + +#define DM_IRQ_TABLE_UNLOCK(adev, flags) \ + spin_unlock_irqrestore(&adev->dm.irq_handler_list_table_lock, flags) + +/****************************************************************************** + * Private functions. + *****************************************************************************/ + +static void init_handler_common_data( + struct handler_common_data *hcd, + void (*ih)(void *), + void *args, + struct amdgpu_display_manager *dm) +{ + hcd->handler = ih; + hcd->handler_arg = args; + hcd->dm = dm; +} + +/** + * dm_irq_work_func - Handle an IRQ outside of the interrupt handler proper. + * + * @work: work struct + */ +static void dm_irq_work_func(struct work_struct *work) +{ + struct list_head *entry; + struct irq_list_head *irq_list_head = + container_of(work, struct irq_list_head, work); + struct list_head *handler_list = &irq_list_head->head; + struct amdgpu_dm_irq_handler_data *handler_data; + + list_for_each(entry, handler_list) { + handler_data = + list_entry( + entry, + struct amdgpu_dm_irq_handler_data, + hcd.list); + + DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n", + handler_data->irq_source); + + DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n", + handler_data->irq_source); + + handler_data->hcd.handler(handler_data->hcd.handler_arg); + } + + /* Call a DAL subcomponent which registered for interrupt notification + * at INTERRUPT_LOW_IRQ_CONTEXT. + * (The most common use is HPD interrupt) */ +} + +/** + * Remove a handler and return a pointer to hander list from which the + * handler was removed. + */ +static struct list_head *remove_irq_handler( + struct amdgpu_device *adev, + void *ih, + const struct dc_interrupt_params *int_params) +{ + struct list_head *hnd_list; + struct list_head *entry, *tmp; + struct amdgpu_dm_irq_handler_data *handler; + unsigned long irq_table_flags; + bool handler_removed = false; + enum dc_irq_source irq_source; + + DM_IRQ_TABLE_LOCK(adev, irq_table_flags); + + irq_source = int_params->irq_source; + + switch (int_params->int_context) { + case INTERRUPT_HIGH_IRQ_CONTEXT: + hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source]; + break; + case INTERRUPT_LOW_IRQ_CONTEXT: + default: + hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head; + break; + } + + list_for_each_safe(entry, tmp, hnd_list) { + + handler = list_entry(entry, struct amdgpu_dm_irq_handler_data, + hcd.list); + + if (ih == handler) { + /* Found our handler. Remove it from the list. */ + list_del(&handler->hcd.list); + handler_removed = true; + break; + } + } + + DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); + + if (handler_removed == false) { + /* Not necessarily an error - caller may not + * know the context. */ + return NULL; + } + + kfree(handler); + + DRM_DEBUG_KMS( + "DM_IRQ: removed irq handler: %p for: dal_src=%d, irq context=%d\n", + ih, int_params->irq_source, int_params->int_context); + + return hnd_list; +} + +/* If 'handler_in == NULL' then remove ALL handlers. */ +static void remove_timer_handler( + struct amdgpu_device *adev, + struct amdgpu_dm_timer_handler_data *handler_in) +{ + struct amdgpu_dm_timer_handler_data *handler_temp; + struct list_head *handler_list; + struct list_head *entry, *tmp; + unsigned long irq_table_flags; + bool handler_removed = false; + + DM_IRQ_TABLE_LOCK(adev, irq_table_flags); + + handler_list = &adev->dm.timer_handler_list; + + list_for_each_safe(entry, tmp, handler_list) { + /* Note that list_for_each_safe() guarantees that + * handler_temp is NOT null. */ + handler_temp = list_entry(entry, + struct amdgpu_dm_timer_handler_data, hcd.list); + + if (handler_in == NULL || handler_in == handler_temp) { + list_del(&handler_temp->hcd.list); + DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); + + DRM_DEBUG_KMS("DM_IRQ: removing timer handler: %p\n", + handler_temp); + + if (handler_in == NULL) { + /* Since it is still in the queue, it must + * be cancelled. */ + cancel_delayed_work_sync(&handler_temp->d_work); + } + + kfree(handler_temp); + handler_removed = true; + + DM_IRQ_TABLE_LOCK(adev, irq_table_flags); + } + + if (handler_in == NULL) { + /* Remove ALL handlers. */ + continue; + } + + if (handler_in == handler_temp) { + /* Remove a SPECIFIC handler. + * Found our handler - we can stop here. */ + break; + } + } + + DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); + + if (handler_in != NULL && handler_removed == false) { + DRM_ERROR("DM_IRQ: handler: %p is not in the list!\n", + handler_in); + } +} + +/** + * dm_timer_work_func - Handle a timer. + * + * @work: work struct + */ +static void dm_timer_work_func( + struct work_struct *work) +{ + struct amdgpu_dm_timer_handler_data *handler_data = + container_of(work, struct amdgpu_dm_timer_handler_data, + d_work.work); + + DRM_DEBUG_KMS("DM_IRQ: work_func: handler_data=%p\n", handler_data); + + /* Call a DAL subcomponent which registered for timer notification. */ + handler_data->hcd.handler(handler_data->hcd.handler_arg); + + /* We support only "single shot" timers. That means we must delete + * the handler after it was called. */ + remove_timer_handler(handler_data->hcd.dm->adev, handler_data); +} + +static bool validate_irq_registration_params( + struct dc_interrupt_params *int_params, + void (*ih)(void *)) +{ + if (NULL == int_params || NULL == ih) { + DRM_ERROR("DM_IRQ: invalid input!\n"); + return false; + } + + if (int_params->int_context >= INTERRUPT_CONTEXT_NUMBER) { + DRM_ERROR("DM_IRQ: invalid context: %d!\n", + int_params->int_context); + return false; + } + + if (!DAL_VALID_IRQ_SRC_NUM(int_params->irq_source)) { + DRM_ERROR("DM_IRQ: invalid irq_source: %d!\n", + int_params->irq_source); + return false; + } + + return true; +} + +static bool validate_irq_unregistration_params( + enum dc_irq_source irq_source, + irq_handler_idx handler_idx) +{ + if (DAL_INVALID_IRQ_HANDLER_IDX == handler_idx) { + DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n"); + return false; + } + + if (!DAL_VALID_IRQ_SRC_NUM(irq_source)) { + DRM_ERROR("DM_IRQ: invalid irq_source:%d!\n", irq_source); + return false; + } + + return true; +} +/****************************************************************************** + * Public functions. + * + * Note: caller is responsible for input validation. + *****************************************************************************/ + +void *amdgpu_dm_irq_register_interrupt( + struct amdgpu_device *adev, + struct dc_interrupt_params *int_params, + void (*ih)(void *), + void *handler_args) +{ + struct list_head *hnd_list; + struct amdgpu_dm_irq_handler_data *handler_data; + unsigned long irq_table_flags; + enum dc_irq_source irq_source; + + if (false == validate_irq_registration_params(int_params, ih)) + return DAL_INVALID_IRQ_HANDLER_IDX; + + handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL); + if (!handler_data) { + DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n"); + return DAL_INVALID_IRQ_HANDLER_IDX; + } + + memset(handler_data, 0, sizeof(*handler_data)); + + init_handler_common_data(&handler_data->hcd, ih, handler_args, + &adev->dm); + + irq_source = int_params->irq_source; + + handler_data->irq_source = irq_source; + + /* Lock the list, add the handler. */ + DM_IRQ_TABLE_LOCK(adev, irq_table_flags); + + switch (int_params->int_context) { + case INTERRUPT_HIGH_IRQ_CONTEXT: + hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source]; + break; + case INTERRUPT_LOW_IRQ_CONTEXT: + default: + hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head; + break; + } + + list_add_tail(&handler_data->hcd.list, hnd_list); + + DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); + + /* This pointer will be stored by code which requested interrupt + * registration. + * The same pointer will be needed in order to unregister the + * interrupt. */ + + DRM_DEBUG_KMS( + "DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n", + handler_data, + irq_source, + int_params->int_context); + + return handler_data; +} + +void amdgpu_dm_irq_unregister_interrupt( + struct amdgpu_device *adev, + enum dc_irq_source irq_source, + void *ih) +{ + struct list_head *handler_list; + struct dc_interrupt_params int_params; + int i; + + if (false == validate_irq_unregistration_params(irq_source, ih)) + return; + + memset(&int_params, 0, sizeof(int_params)); + + int_params.irq_source = irq_source; + + for (i = 0; i < INTERRUPT_CONTEXT_NUMBER; i++) { + + int_params.int_context = i; + + handler_list = remove_irq_handler(adev, ih, &int_params); + + if (handler_list != NULL) + break; + } + + if (handler_list == NULL) { + /* If we got here, it means we searched all irq contexts + * for this irq source, but the handler was not found. */ + DRM_ERROR( + "DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n", + ih, irq_source); + } +} + +int amdgpu_dm_irq_init( + struct amdgpu_device *adev) +{ + int src; + struct irq_list_head *lh; + + DRM_DEBUG_KMS("DM_IRQ\n"); + + spin_lock_init(&adev->dm.irq_handler_list_table_lock); + + for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) { + /* low context handler list init */ + lh = &adev->dm.irq_handler_list_low_tab[src]; + INIT_LIST_HEAD(&lh->head); + INIT_WORK(&lh->work, dm_irq_work_func); + + /* high context handler init */ + INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]); + } + + INIT_LIST_HEAD(&adev->dm.timer_handler_list); + + /* allocate and initialize the workqueue for DM timer */ + adev->dm.timer_workqueue = create_singlethread_workqueue( + "dm_timer_queue"); + if (adev->dm.timer_workqueue == NULL) { + DRM_ERROR("DM_IRQ: unable to create timer queue!\n"); + return -1; + } + + return 0; +} + +void amdgpu_dm_irq_register_timer( + struct amdgpu_device *adev, + struct dc_timer_interrupt_params *int_params, + interrupt_handler ih, + void *args) +{ + unsigned long jf_delay; + struct list_head *handler_list; + struct amdgpu_dm_timer_handler_data *handler_data; + unsigned long irq_table_flags; + + handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL); + if (!handler_data) { + DRM_ERROR("DM_IRQ: failed to allocate timer handler!\n"); + return; + } + + memset(handler_data, 0, sizeof(*handler_data)); + + init_handler_common_data(&handler_data->hcd, ih, args, &adev->dm); + + INIT_DELAYED_WORK(&handler_data->d_work, dm_timer_work_func); + + /* Lock the list, add the handler. */ + DM_IRQ_TABLE_LOCK(adev, irq_table_flags); + + handler_list = &adev->dm.timer_handler_list; + + list_add_tail(&handler_data->hcd.list, handler_list); + + DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); + + jf_delay = usecs_to_jiffies(int_params->micro_sec_interval); + + queue_delayed_work(adev->dm.timer_workqueue, &handler_data->d_work, + jf_delay); + + DRM_DEBUG_KMS("DM_IRQ: added handler:%p with micro_sec_interval=%u\n", + handler_data, int_params->micro_sec_interval); + return; +} + +/* DM IRQ and timer resource release */ +void amdgpu_dm_irq_fini( + struct amdgpu_device *adev) +{ + int src; + struct irq_list_head *lh; + DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n"); + + for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) { + + /* The handler was removed from the table, + * it means it is safe to flush all the 'work' + * (because no code can schedule a new one). */ + lh = &adev->dm.irq_handler_list_low_tab[src]; + flush_work(&lh->work); + } + + /* Cancel ALL timers and release handlers (if any). */ + remove_timer_handler(adev, NULL); + /* Release the queue itself. */ + destroy_workqueue(adev->dm.timer_workqueue); +} + +int amdgpu_dm_irq_suspend( + struct amdgpu_device *adev) +{ + int src; + struct list_head *hnd_list_h; + struct list_head *hnd_list_l; + unsigned long irq_table_flags; + + DM_IRQ_TABLE_LOCK(adev, irq_table_flags); + + DRM_DEBUG_KMS("DM_IRQ: suspend\n"); + + /* disable HW interrupt */ + for (src = DC_IRQ_SOURCE_HPD1; src < DAL_IRQ_SOURCES_NUMBER; src++) { + hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head; + hnd_list_h = &adev->dm.irq_handler_list_high_tab[src]; + if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h)) + dc_interrupt_set(adev->dm.dc, src, false); + + DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); + flush_work(&adev->dm.irq_handler_list_low_tab[src].work); + + DM_IRQ_TABLE_LOCK(adev, irq_table_flags); + } + + DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); + return 0; +} + +int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev) +{ + int src; + struct list_head *hnd_list_h, *hnd_list_l; + unsigned long irq_table_flags; + + DM_IRQ_TABLE_LOCK(adev, irq_table_flags); + + DRM_DEBUG_KMS("DM_IRQ: early resume\n"); + + /* re-enable short pulse interrupts HW interrupt */ + for (src = DC_IRQ_SOURCE_HPD1RX; src < DC_IRQ_SOURCE_HPD6RX + 1; src++) { + hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head; + hnd_list_h = &adev->dm.irq_handler_list_high_tab[src]; + if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h)) + dc_interrupt_set(adev->dm.dc, src, true); + } + + DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); + + return 0; +} + +int amdgpu_dm_irq_resume(struct amdgpu_device *adev) +{ + int src; + struct list_head *hnd_list_h, *hnd_list_l; + unsigned long irq_table_flags; + + DM_IRQ_TABLE_LOCK(adev, irq_table_flags); + + DRM_DEBUG_KMS("DM_IRQ: resume\n"); + + /* re-enable HW interrupt */ + for (src = DC_IRQ_SOURCE_HPD1; src < DAL_IRQ_SOURCES_NUMBER; src++) { + hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head; + hnd_list_h = &adev->dm.irq_handler_list_high_tab[src]; + if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h)) + dc_interrupt_set(adev->dm.dc, src, true); + } + + DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); + return 0; +} + +/** + * amdgpu_dm_irq_schedule_work - schedule all work items registered for the + * "irq_source". + */ +static void amdgpu_dm_irq_schedule_work( + struct amdgpu_device *adev, + enum dc_irq_source irq_source) +{ + unsigned long irq_table_flags; + struct work_struct *work = NULL; + + DM_IRQ_TABLE_LOCK(adev, irq_table_flags); + + if (!list_empty(&adev->dm.irq_handler_list_low_tab[irq_source].head)) + work = &adev->dm.irq_handler_list_low_tab[irq_source].work; + + DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); + + if (work) { + if (!schedule_work(work)) + DRM_INFO("amdgpu_dm_irq_schedule_work FAILED src %d\n", + irq_source); + } + +} + +/** amdgpu_dm_irq_immediate_work + * Callback high irq work immediately, don't send to work queue + */ +static void amdgpu_dm_irq_immediate_work( + struct amdgpu_device *adev, + enum dc_irq_source irq_source) +{ + struct amdgpu_dm_irq_handler_data *handler_data; + struct list_head *entry; + unsigned long irq_table_flags; + + DM_IRQ_TABLE_LOCK(adev, irq_table_flags); + + list_for_each( + entry, + &adev->dm.irq_handler_list_high_tab[irq_source]) { + + handler_data = + list_entry( + entry, + struct amdgpu_dm_irq_handler_data, + hcd.list); + + /* Call a subcomponent which registered for immediate + * interrupt notification */ + handler_data->hcd.handler(handler_data->hcd.handler_arg); + } + + DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); +} + +/* + * amdgpu_dm_irq_handler + * + * Generic IRQ handler, calls all registered high irq work immediately, and + * schedules work for low irq + */ +int amdgpu_dm_irq_handler( + struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + + enum dc_irq_source src = + dc_interrupt_to_irq_source( + adev->dm.dc, + entry->src_id, + entry->src_data[0]); + + dc_interrupt_ack(adev->dm.dc, src); + + /* Call high irq work immediately */ + amdgpu_dm_irq_immediate_work(adev, src); + /*Schedule low_irq work */ + amdgpu_dm_irq_schedule_work(adev, src); + + return 0; +} + +static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type) +{ + switch (type) { + case AMDGPU_HPD_1: + return DC_IRQ_SOURCE_HPD1; + case AMDGPU_HPD_2: + return DC_IRQ_SOURCE_HPD2; + case AMDGPU_HPD_3: + return DC_IRQ_SOURCE_HPD3; + case AMDGPU_HPD_4: + return DC_IRQ_SOURCE_HPD4; + case AMDGPU_HPD_5: + return DC_IRQ_SOURCE_HPD5; + case AMDGPU_HPD_6: + return DC_IRQ_SOURCE_HPD6; + default: + return DC_IRQ_SOURCE_INVALID; + } +} + +static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type); + bool st = (state == AMDGPU_IRQ_STATE_ENABLE); + + dc_interrupt_set(adev->dm.dc, src, st); + return 0; +} + +static inline int dm_irq_state( + struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned crtc_id, + enum amdgpu_interrupt_state state, + const enum irq_type dal_irq_type, + const char *func) +{ + bool st; + enum dc_irq_source irq_source; + + struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc_id]; + + if (!acrtc) { + DRM_ERROR( + "%s: crtc is NULL at id :%d\n", + func, + crtc_id); + return 0; + } + + irq_source = dal_irq_type + acrtc->otg_inst; + + st = (state == AMDGPU_IRQ_STATE_ENABLE); + + dc_interrupt_set(adev->dm.dc, irq_source, st); + return 0; +} + +static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned crtc_id, + enum amdgpu_interrupt_state state) +{ + return dm_irq_state( + adev, + source, + crtc_id, + state, + IRQ_TYPE_PFLIP, + __func__); +} + +static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned crtc_id, + enum amdgpu_interrupt_state state) +{ + return dm_irq_state( + adev, + source, + crtc_id, + state, + IRQ_TYPE_VUPDATE, + __func__); +} + +static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = { + .set = amdgpu_dm_set_crtc_irq_state, + .process = amdgpu_dm_irq_handler, +}; + +static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = { + .set = amdgpu_dm_set_pflip_irq_state, + .process = amdgpu_dm_irq_handler, +}; + +static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = { + .set = amdgpu_dm_set_hpd_irq_state, + .process = amdgpu_dm_irq_handler, +}; + +void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST; + adev->crtc_irq.funcs = &dm_crtc_irq_funcs; + + adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST; + adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs; + + adev->hpd_irq.num_types = AMDGPU_HPD_LAST; + adev->hpd_irq.funcs = &dm_hpd_irq_funcs; +} + +/* + * amdgpu_dm_hpd_init - hpd setup callback. + * + * @adev: amdgpu_device pointer + * + * Setup the hpd pins used by the card (evergreen+). + * Enable the pin, set the polarity, and enable the hpd interrupts. + */ +void amdgpu_dm_hpd_init(struct amdgpu_device *adev) +{ + struct drm_device *dev = adev->ddev; + struct drm_connector *connector; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct amdgpu_connector *amdgpu_connector = + to_amdgpu_connector(connector); + + const struct dc_link *dc_link = amdgpu_connector->dc_link; + + if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) { + dc_interrupt_set(adev->dm.dc, + dc_link->irq_source_hpd, + true); + } + + if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) { + dc_interrupt_set(adev->dm.dc, + dc_link->irq_source_hpd_rx, + true); + } + } +} + +/** + * amdgpu_dm_hpd_fini - hpd tear down callback. + * + * @adev: amdgpu_device pointer + * + * Tear down the hpd pins used by the card (evergreen+). + * Disable the hpd interrupts. + */ +void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) +{ + struct drm_device *dev = adev->ddev; + struct drm_connector *connector; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct amdgpu_connector *amdgpu_connector = + to_amdgpu_connector(connector); + const struct dc_link *dc_link = amdgpu_connector->dc_link; + + dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd, false); + + if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) { + dc_interrupt_set(adev->dm.dc, + dc_link->irq_source_hpd_rx, + false); + } + } +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h new file mode 100644 index 000000000000..9339861c8897 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h @@ -0,0 +1,122 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_DM_IRQ_H__ +#define __AMDGPU_DM_IRQ_H__ + +#include "irq_types.h" /* DAL irq definitions */ + +/* + * Display Manager IRQ-related interfaces (for use by DAL). + */ + +/** + * amdgpu_dm_irq_init - Initialize internal structures of 'amdgpu_dm_irq'. + * + * This function should be called exactly once - during DM initialization. + * + * Returns: + * 0 - success + * non-zero - error + */ +int amdgpu_dm_irq_init( + struct amdgpu_device *adev); + +/** + * amdgpu_dm_irq_fini - deallocate internal structures of 'amdgpu_dm_irq'. + * + * This function should be called exactly once - during DM destruction. + * + */ +void amdgpu_dm_irq_fini( + struct amdgpu_device *adev); + +/** + * amdgpu_dm_irq_register_interrupt - register irq handler for Display block. + * + * @adev: AMD DRM device + * @int_params: parameters for the irq + * @ih: pointer to the irq hander function + * @handler_args: arguments which will be passed to ih + * + * Returns: + * IRQ Handler Index on success. + * NULL on failure. + * + * Cannot be called from an interrupt handler. + */ +void *amdgpu_dm_irq_register_interrupt( + struct amdgpu_device *adev, + struct dc_interrupt_params *int_params, + void (*ih)(void *), + void *handler_args); + +/** + * amdgpu_dm_irq_unregister_interrupt - unregister handler which was registered + * by amdgpu_dm_irq_register_interrupt(). + * + * @adev: AMD DRM device. + * @ih_index: irq handler index which was returned by + * amdgpu_dm_irq_register_interrupt + */ +void amdgpu_dm_irq_unregister_interrupt( + struct amdgpu_device *adev, + enum dc_irq_source irq_source, + void *ih_index); + +void amdgpu_dm_irq_register_timer( + struct amdgpu_device *adev, + struct dc_timer_interrupt_params *int_params, + interrupt_handler ih, + void *args); + +/** + * amdgpu_dm_irq_handler + * Generic IRQ handler, calls all registered high irq work immediately, and + * schedules work for low irq + */ +int amdgpu_dm_irq_handler( + struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry); + +void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev); + +void amdgpu_dm_hpd_init(struct amdgpu_device *adev); +void amdgpu_dm_hpd_fini(struct amdgpu_device *adev); + +/** + * amdgpu_dm_irq_suspend - disable ASIC interrupt during suspend. + * + */ +int amdgpu_dm_irq_suspend(struct amdgpu_device *adev); + +/** + * amdgpu_dm_irq_resume_early - enable HPDRX ASIC interrupts during resume. + * amdgpu_dm_irq_resume - enable ASIC interrupt during resume. + * + */ +int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev); +int amdgpu_dm_irq_resume(struct amdgpu_device *adev); + +#endif /* __AMDGPU_DM_IRQ_H__ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c new file mode 100644 index 000000000000..e4d94d4c215d --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -0,0 +1,443 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include <linux/version.h> +#include <drm/drm_atomic_helper.h> +#include "dm_services.h" +#include "amdgpu.h" +#include "amdgpu_dm_types.h" +#include "amdgpu_dm_mst_types.h" + +#include "dc.h" +#include "dm_helpers.h" + +/* #define TRACE_DPCD */ + +#ifdef TRACE_DPCD +#define SIDE_BAND_MSG(address) (address >= DP_SIDEBAND_MSG_DOWN_REQ_BASE && address < DP_SINK_COUNT_ESI) + +static inline char *side_band_msg_type_to_str(uint32_t address) +{ + static char str[10] = {0}; + + if (address < DP_SIDEBAND_MSG_UP_REP_BASE) + strcpy(str, "DOWN_REQ"); + else if (address < DP_SIDEBAND_MSG_DOWN_REP_BASE) + strcpy(str, "UP_REP"); + else if (address < DP_SIDEBAND_MSG_UP_REQ_BASE) + strcpy(str, "DOWN_REP"); + else + strcpy(str, "UP_REQ"); + + return str; +} + +void log_dpcd(uint8_t type, + uint32_t address, + uint8_t *data, + uint32_t size, + bool res) +{ + DRM_DEBUG_KMS("Op: %s, addr: %04x, SideBand Msg: %s, Op res: %s\n", + (type == DP_AUX_NATIVE_READ) || + (type == DP_AUX_I2C_READ) ? + "Read" : "Write", + address, + SIDE_BAND_MSG(address) ? + side_band_msg_type_to_str(address) : "Nop", + res ? "OK" : "Fail"); + + if (res) { + print_hex_dump(KERN_INFO, "Body: ", DUMP_PREFIX_NONE, 16, 1, data, size, false); + } +} +#endif + +static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) +{ + struct pci_dev *pdev = to_pci_dev(aux->dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct amdgpu_device *adev = drm_dev->dev_private; + struct dc *dc = adev->dm.dc; + bool res; + + switch (msg->request) { + case DP_AUX_NATIVE_READ: + res = dc_read_dpcd( + dc, + TO_DM_AUX(aux)->link_index, + msg->address, + msg->buffer, + msg->size); + break; + case DP_AUX_NATIVE_WRITE: + res = dc_write_dpcd( + dc, + TO_DM_AUX(aux)->link_index, + msg->address, + msg->buffer, + msg->size); + break; + default: + return 0; + } + +#ifdef TRACE_DPCD + log_dpcd(msg->request, + msg->address, + msg->buffer, + msg->size, + res); +#endif + + return msg->size; +} + +static enum drm_connector_status +dm_dp_mst_detect(struct drm_connector *connector, bool force) +{ + struct amdgpu_connector *aconnector = to_amdgpu_connector(connector); + struct amdgpu_connector *master = aconnector->mst_port; + + enum drm_connector_status status = + drm_dp_mst_detect_port( + connector, + &master->mst_mgr, + aconnector->port); + + /* + * we do not want to make this connector connected until we have edid on + * it + */ + if (status == connector_status_connected && + !aconnector->port->cached_edid) + status = connector_status_disconnected; + + return status; +} + +static void +dm_dp_mst_connector_destroy(struct drm_connector *connector) +{ + struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); + struct amdgpu_encoder *amdgpu_encoder = amdgpu_connector->mst_encoder; + + drm_encoder_cleanup(&amdgpu_encoder->base); + kfree(amdgpu_encoder); + drm_connector_cleanup(connector); + kfree(amdgpu_connector); +} + +static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { + .detect = dm_dp_mst_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = dm_dp_mst_connector_destroy, + .reset = amdgpu_dm_connector_funcs_reset, + .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .atomic_set_property = amdgpu_dm_connector_atomic_set_property +}; + +static int dm_dp_mst_get_modes(struct drm_connector *connector) +{ + struct amdgpu_connector *aconnector = to_amdgpu_connector(connector); + int ret = 0; + + ret = drm_add_edid_modes(&aconnector->base, aconnector->edid); + + drm_edid_to_eld(&aconnector->base, aconnector->edid); + + return ret; +} + +static struct drm_encoder *dm_mst_best_encoder(struct drm_connector *connector) +{ + struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); + + return &amdgpu_connector->mst_encoder->base; +} + +static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = { + .get_modes = dm_dp_mst_get_modes, + .mode_valid = amdgpu_dm_connector_mode_valid, + .best_encoder = dm_mst_best_encoder, +}; + +static struct amdgpu_encoder * +dm_dp_create_fake_mst_encoder(struct amdgpu_connector *connector) +{ + struct drm_device *dev = connector->base.dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_encoder *amdgpu_encoder; + struct drm_encoder *encoder; + const struct drm_connector_helper_funcs *connector_funcs = + connector->base.helper_private; + struct drm_encoder *enc_master = + connector_funcs->best_encoder(&connector->base); + + DRM_DEBUG_KMS("enc master is %p\n", enc_master); + amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL); + if (!amdgpu_encoder) + return NULL; + + encoder = &amdgpu_encoder->base; + encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev); + + drm_encoder_init( + dev, + &amdgpu_encoder->base, + NULL, + DRM_MODE_ENCODER_DPMST, + NULL); + + drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs); + + return amdgpu_encoder; +} + +static struct drm_connector *dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + const char *pathprop) +{ + struct amdgpu_connector *master = container_of(mgr, struct amdgpu_connector, mst_mgr); + struct drm_device *dev = master->base.dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_connector *aconnector; + struct drm_connector *connector; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + aconnector = to_amdgpu_connector(connector); + if (aconnector->mst_port == master + && !aconnector->port) { + DRM_INFO("DM_MST: reusing connector: %p [id: %d] [master: %p]\n", + aconnector, connector->base.id, aconnector->mst_port); + + aconnector->port = port; + drm_mode_connector_set_path_property(connector, pathprop); + + drm_modeset_unlock(&dev->mode_config.connection_mutex); + return &aconnector->base; + } + } + drm_modeset_unlock(&dev->mode_config.connection_mutex); + + aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); + if (!aconnector) + return NULL; + + connector = &aconnector->base; + aconnector->port = port; + aconnector->mst_port = master; + + if (drm_connector_init( + dev, + connector, + &dm_dp_mst_connector_funcs, + DRM_MODE_CONNECTOR_DisplayPort)) { + kfree(aconnector); + return NULL; + } + drm_connector_helper_add(connector, &dm_dp_mst_connector_helper_funcs); + + amdgpu_dm_connector_init_helper( + &adev->dm, + aconnector, + DRM_MODE_CONNECTOR_DisplayPort, + master->dc_link, + master->connector_id); + + aconnector->mst_encoder = dm_dp_create_fake_mst_encoder(master); + + /* + * TODO: understand why this one is needed + */ + drm_object_attach_property( + &connector->base, + dev->mode_config.path_property, + 0); + drm_object_attach_property( + &connector->base, + dev->mode_config.tile_property, + 0); + + drm_mode_connector_set_path_property(connector, pathprop); + + /* + * Initialize connector state before adding the connectror to drm and + * framebuffer lists + */ + amdgpu_dm_connector_funcs_reset(connector); + + DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n", + aconnector, connector->base.id, aconnector->mst_port); + + DRM_DEBUG_KMS(":%d\n", connector->base.id); + + return connector; +} + +static void dm_dp_destroy_mst_connector( + struct drm_dp_mst_topology_mgr *mgr, + struct drm_connector *connector) +{ + struct amdgpu_connector *aconnector = to_amdgpu_connector(connector); + + DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n", + aconnector, connector->base.id, aconnector->mst_port); + + aconnector->port = NULL; + if (aconnector->dc_sink) { + amdgpu_dm_remove_sink_from_freesync_module(connector); + dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink); + dc_sink_release(aconnector->dc_sink); + aconnector->dc_sink = NULL; + } + if (aconnector->edid) { + kfree(aconnector->edid); + aconnector->edid = NULL; + } + + drm_mode_connector_update_edid_property( + &aconnector->base, + NULL); +} + +static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) +{ + struct amdgpu_connector *master = container_of(mgr, struct amdgpu_connector, mst_mgr); + struct drm_device *dev = master->base.dev; + struct amdgpu_device *adev = dev->dev_private; + struct drm_connector *connector; + struct amdgpu_connector *aconnector; + struct edid *edid; + + drm_modeset_lock_all(dev); + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + aconnector = to_amdgpu_connector(connector); + if (aconnector->port && + aconnector->port->pdt != DP_PEER_DEVICE_NONE && + aconnector->port->pdt != DP_PEER_DEVICE_MST_BRANCHING && + !aconnector->dc_sink) { + /* + * This is plug in case, where port has been created but + * sink hasn't been created yet + */ + if (!aconnector->edid) { + struct dc_sink_init_data init_params = { + .link = aconnector->dc_link, + .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST}; + edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port); + + if (!edid) { + drm_mode_connector_update_edid_property( + &aconnector->base, + NULL); + continue; + } + + aconnector->edid = edid; + + aconnector->dc_sink = dc_link_add_remote_sink( + aconnector->dc_link, + (uint8_t *)edid, + (edid->extensions + 1) * EDID_LENGTH, + &init_params); + if (aconnector->dc_sink) + amdgpu_dm_add_sink_to_freesync_module( + connector, + edid); + + dm_restore_drm_connector_state(connector->dev, connector); + } else + edid = aconnector->edid; + + DRM_DEBUG_KMS("edid retrieved %p\n", edid); + + drm_mode_connector_update_edid_property( + &aconnector->base, + aconnector->edid); + } + } + drm_modeset_unlock_all(dev); + + schedule_work(&adev->dm.mst_hotplug_work); +} + +static void dm_dp_mst_register_connector(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct amdgpu_device *adev = dev->dev_private; + int i; + + drm_modeset_lock_all(dev); + if (adev->mode_info.rfbdev) { + /*Do not add if already registered in past*/ + for (i = 0; i < adev->mode_info.rfbdev->helper.connector_count; i++) { + if (adev->mode_info.rfbdev->helper.connector_info[i]->connector + == connector) { + drm_modeset_unlock_all(dev); + return; + } + } + + drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector); + } + else + DRM_ERROR("adev->mode_info.rfbdev is NULL\n"); + + drm_modeset_unlock_all(dev); + + drm_connector_register(connector); + +} + +static const struct drm_dp_mst_topology_cbs dm_mst_cbs = { + .add_connector = dm_dp_add_mst_connector, + .destroy_connector = dm_dp_destroy_mst_connector, + .hotplug = dm_dp_mst_hotplug, + .register_connector = dm_dp_mst_register_connector +}; + +void amdgpu_dm_initialize_mst_connector( + struct amdgpu_display_manager *dm, + struct amdgpu_connector *aconnector) +{ + aconnector->dm_dp_aux.aux.name = "dmdc"; + aconnector->dm_dp_aux.aux.dev = dm->adev->dev; + aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer; + aconnector->dm_dp_aux.link_index = aconnector->connector_id; + + drm_dp_aux_register(&aconnector->dm_dp_aux.aux); + aconnector->mst_mgr.cbs = &dm_mst_cbs; + drm_dp_mst_topology_mgr_init( + &aconnector->mst_mgr, + dm->adev->ddev, + &aconnector->dm_dp_aux.aux, + 16, + 4, + aconnector->connector_id); +} + diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h new file mode 100644 index 000000000000..6130d62ac65c --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h @@ -0,0 +1,36 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DAL_AMDGPU_DM_MST_TYPES_H__ +#define __DAL_AMDGPU_DM_MST_TYPES_H__ + +struct amdgpu_display_manager; +struct amdgpu_connector; + +void amdgpu_dm_initialize_mst_connector( + struct amdgpu_display_manager *dm, + struct amdgpu_connector *aconnector); + +#endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c new file mode 100644 index 000000000000..9d5125951acd --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c @@ -0,0 +1,463 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include <linux/string.h> +#include <linux/acpi.h> + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> +#include <drm/amdgpu_drm.h> +#include "dm_services.h" +#include "amdgpu.h" +#include "amdgpu_dm.h" +#include "amdgpu_dm_irq.h" +#include "amdgpu_dm_types.h" +#include "amdgpu_pm.h" + +#define dm_alloc(size) kzalloc(size, GFP_KERNEL) +#define dm_realloc(ptr, size) krealloc(ptr, size, GFP_KERNEL) +#define dm_free(ptr) kfree(ptr) + +/****************************************************************************** + * IRQ Interfaces. + *****************************************************************************/ + +void dal_register_timer_interrupt( + struct dc_context *ctx, + struct dc_timer_interrupt_params *int_params, + interrupt_handler ih, + void *args) +{ + struct amdgpu_device *adev = ctx->driver_context; + + if (!adev || !int_params) { + DRM_ERROR("DM_IRQ: invalid input!\n"); + return; + } + + if (int_params->int_context != INTERRUPT_LOW_IRQ_CONTEXT) { + /* only low irq ctx is supported. */ + DRM_ERROR("DM_IRQ: invalid context: %d!\n", + int_params->int_context); + return; + } + + amdgpu_dm_irq_register_timer(adev, int_params, ih, args); +} + +void dal_isr_acquire_lock(struct dc_context *ctx) +{ + /*TODO*/ +} + +void dal_isr_release_lock(struct dc_context *ctx) +{ + /*TODO*/ +} + +/****************************************************************************** + * End-of-IRQ Interfaces. + *****************************************************************************/ + +bool dm_get_platform_info(struct dc_context *ctx, + struct platform_info_params *params) +{ + /*TODO*/ + return false; +} + +bool dm_write_persistent_data(struct dc_context *ctx, + const struct dc_sink *sink, + const char *module_name, + const char *key_name, + void *params, + unsigned int size, + struct persistent_data_flag *flag) +{ + /*TODO implement*/ + return false; +} + +bool dm_read_persistent_data(struct dc_context *ctx, + const struct dc_sink *sink, + const char *module_name, + const char *key_name, + void *params, + unsigned int size, + struct persistent_data_flag *flag) +{ + /*TODO implement*/ + return false; +} + +void dm_delay_in_microseconds(struct dc_context *ctx, + unsigned int microSeconds) +{ + /*TODO implement*/ + return; +} + +/**** power component interfaces ****/ + +bool dm_pp_pre_dce_clock_change( + struct dc_context *ctx, + struct dm_pp_gpu_clock_range *requested_state, + struct dm_pp_gpu_clock_range *actual_state) +{ + /*TODO*/ + return false; +} + +bool dm_pp_apply_safe_state( + const struct dc_context *ctx) +{ + struct amdgpu_device *adev = ctx->driver_context; + + if (adev->pm.dpm_enabled) { + /* TODO: Does this require PreModeChange event to PPLIB? */ + } + + return true; +} + +bool dm_pp_apply_display_requirements( + const struct dc_context *ctx, + const struct dm_pp_display_configuration *pp_display_cfg) +{ + struct amdgpu_device *adev = ctx->driver_context; + + if (adev->pm.dpm_enabled) { + + memset(&adev->pm.pm_display_cfg, 0, + sizeof(adev->pm.pm_display_cfg)); + + adev->pm.pm_display_cfg.cpu_cc6_disable = + pp_display_cfg->cpu_cc6_disable; + + adev->pm.pm_display_cfg.cpu_pstate_disable = + pp_display_cfg->cpu_pstate_disable; + + adev->pm.pm_display_cfg.cpu_pstate_separation_time = + pp_display_cfg->cpu_pstate_separation_time; + + adev->pm.pm_display_cfg.nb_pstate_switch_disable = + pp_display_cfg->nb_pstate_switch_disable; + + adev->pm.pm_display_cfg.num_display = + pp_display_cfg->display_count; + adev->pm.pm_display_cfg.num_path_including_non_display = + pp_display_cfg->display_count; + + adev->pm.pm_display_cfg.min_core_set_clock = + pp_display_cfg->min_engine_clock_khz/10; + adev->pm.pm_display_cfg.min_core_set_clock_in_sr = + pp_display_cfg->min_engine_clock_deep_sleep_khz/10; + adev->pm.pm_display_cfg.min_mem_set_clock = + pp_display_cfg->min_memory_clock_khz/10; + + adev->pm.pm_display_cfg.multi_monitor_in_sync = + pp_display_cfg->all_displays_in_sync; + adev->pm.pm_display_cfg.min_vblank_time = + pp_display_cfg->avail_mclk_switch_time_us; + + adev->pm.pm_display_cfg.display_clk = + pp_display_cfg->disp_clk_khz/10; + + adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency = + pp_display_cfg->avail_mclk_switch_time_in_disp_active_us; + + adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index; + adev->pm.pm_display_cfg.line_time_in_us = + pp_display_cfg->line_time_in_us; + + adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh; + adev->pm.pm_display_cfg.crossfire_display_index = -1; + adev->pm.pm_display_cfg.min_bus_bandwidth = 0; + + /* TODO: complete implementation of + * amd_powerplay_display_configuration_change(). + * Follow example of: + * PHM_StoreDALConfigurationData - powerplay\hwmgr\hardwaremanager.c + * PP_IRI_DisplayConfigurationChange - powerplay\eventmgr\iri.c */ + amd_powerplay_display_configuration_change( + adev->powerplay.pp_handle, + &adev->pm.pm_display_cfg); + + /* TODO: replace by a separate call to 'apply display cfg'? */ + amdgpu_pm_compute_clocks(adev); + } + + return true; +} + +bool dc_service_get_system_clocks_range( + const struct dc_context *ctx, + struct dm_pp_gpu_clock_range *sys_clks) +{ + struct amdgpu_device *adev = ctx->driver_context; + + /* Default values, in case PPLib is not compiled-in. */ + sys_clks->mclk.max_khz = 800000; + sys_clks->mclk.min_khz = 800000; + + sys_clks->sclk.max_khz = 600000; + sys_clks->sclk.min_khz = 300000; + + if (adev->pm.dpm_enabled) { + sys_clks->mclk.max_khz = amdgpu_dpm_get_mclk(adev, false); + sys_clks->mclk.min_khz = amdgpu_dpm_get_mclk(adev, true); + + sys_clks->sclk.max_khz = amdgpu_dpm_get_sclk(adev, false); + sys_clks->sclk.min_khz = amdgpu_dpm_get_sclk(adev, true); + } + + return true; +} + +static void get_default_clock_levels( + enum dm_pp_clock_type clk_type, + struct dm_pp_clock_levels *clks) +{ + uint32_t disp_clks_in_khz[6] = { + 300000, 400000, 496560, 626090, 685720, 757900 }; + uint32_t sclks_in_khz[6] = { + 300000, 360000, 423530, 514290, 626090, 720000 }; + uint32_t mclks_in_khz[2] = { 333000, 800000 }; + + switch (clk_type) { + case DM_PP_CLOCK_TYPE_DISPLAY_CLK: + clks->num_levels = 6; + memmove(clks->clocks_in_khz, disp_clks_in_khz, + sizeof(disp_clks_in_khz)); + break; + case DM_PP_CLOCK_TYPE_ENGINE_CLK: + clks->num_levels = 6; + memmove(clks->clocks_in_khz, sclks_in_khz, + sizeof(sclks_in_khz)); + break; + case DM_PP_CLOCK_TYPE_MEMORY_CLK: + clks->num_levels = 2; + memmove(clks->clocks_in_khz, mclks_in_khz, + sizeof(mclks_in_khz)); + break; + default: + clks->num_levels = 0; + break; + } +} + +static enum amd_pp_clock_type dc_to_pp_clock_type( + enum dm_pp_clock_type dm_pp_clk_type) +{ + enum amd_pp_clock_type amd_pp_clk_type = 0; + + switch (dm_pp_clk_type) { + case DM_PP_CLOCK_TYPE_DISPLAY_CLK: + amd_pp_clk_type = amd_pp_disp_clock; + break; + case DM_PP_CLOCK_TYPE_ENGINE_CLK: + amd_pp_clk_type = amd_pp_sys_clock; + break; + case DM_PP_CLOCK_TYPE_MEMORY_CLK: + amd_pp_clk_type = amd_pp_mem_clock; + break; + default: + DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n", + dm_pp_clk_type); + break; + } + + return amd_pp_clk_type; +} + +static void pp_to_dc_clock_levels( + const struct amd_pp_clocks *pp_clks, + struct dm_pp_clock_levels *dc_clks, + enum dm_pp_clock_type dc_clk_type) +{ + uint32_t i; + + if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) { + DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n", + DC_DECODE_PP_CLOCK_TYPE(dc_clk_type), + pp_clks->count, + DM_PP_MAX_CLOCK_LEVELS); + + dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS; + } else + dc_clks->num_levels = pp_clks->count; + + DRM_INFO("DM_PPLIB: values for %s clock\n", + DC_DECODE_PP_CLOCK_TYPE(dc_clk_type)); + + for (i = 0; i < dc_clks->num_levels; i++) { + DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]); + /* translate 10kHz to kHz */ + dc_clks->clocks_in_khz[i] = pp_clks->clock[i] * 10; + } +} + +bool dm_pp_get_clock_levels_by_type( + const struct dc_context *ctx, + enum dm_pp_clock_type clk_type, + struct dm_pp_clock_levels *dc_clks) +{ + struct amdgpu_device *adev = ctx->driver_context; + void *pp_handle = adev->powerplay.pp_handle; + struct amd_pp_clocks pp_clks = { 0 }; + struct amd_pp_simple_clock_info validation_clks = { 0 }; + uint32_t i; + + if (amd_powerplay_get_clock_by_type(pp_handle, + dc_to_pp_clock_type(clk_type), &pp_clks)) { + /* Error in pplib. Provide default values. */ + get_default_clock_levels(clk_type, dc_clks); + return true; + } + + pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type); + + if (amd_powerplay_get_display_mode_validation_clocks(pp_handle, + &validation_clks)) { + /* Error in pplib. Provide default values. */ + DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n"); + validation_clks.engine_max_clock = 72000; + validation_clks.memory_max_clock = 80000; + validation_clks.level = 0; + } + + DRM_INFO("DM_PPLIB: Validation clocks:\n"); + DRM_INFO("DM_PPLIB: engine_max_clock: %d\n", + validation_clks.engine_max_clock); + DRM_INFO("DM_PPLIB: memory_max_clock: %d\n", + validation_clks.memory_max_clock); + DRM_INFO("DM_PPLIB: level : %d\n", + validation_clks.level); + + /* Translate 10 kHz to kHz. */ + validation_clks.engine_max_clock *= 10; + validation_clks.memory_max_clock *= 10; + + /* Determine the highest non-boosted level from the Validation Clocks */ + if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) { + for (i = 0; i < dc_clks->num_levels; i++) { + if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) { + /* This clock is higher the validation clock. + * Than means the previous one is the highest + * non-boosted one. */ + DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n", + dc_clks->num_levels, i + 1); + dc_clks->num_levels = i; + break; + } + } + } else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) { + for (i = 0; i < dc_clks->num_levels; i++) { + if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) { + DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n", + dc_clks->num_levels, i + 1); + dc_clks->num_levels = i; + break; + } + } + } + + return true; +} + +bool dm_pp_get_clock_levels_by_type_with_latency( + const struct dc_context *ctx, + enum dm_pp_clock_type clk_type, + struct dm_pp_clock_levels_with_latency *clk_level_info) +{ + /* TODO: to be implemented */ + return false; +} + +bool dm_pp_get_clock_levels_by_type_with_voltage( + const struct dc_context *ctx, + enum dm_pp_clock_type clk_type, + struct dm_pp_clock_levels_with_voltage *clk_level_info) +{ + /* TODO: to be implemented */ + return false; +} + +bool dm_pp_notify_wm_clock_changes( + const struct dc_context *ctx, + struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges) +{ + /* TODO: to be implemented */ + return false; +} + +bool dm_pp_apply_power_level_change_request( + const struct dc_context *ctx, + struct dm_pp_power_level_change_request *level_change_req) +{ + /* TODO: to be implemented */ + return false; +} + +bool dm_pp_apply_clock_for_voltage_request( + const struct dc_context *ctx, + struct dm_pp_clock_for_voltage_req *clock_for_voltage_req) +{ + /* TODO: to be implemented */ + return false; +} + +bool dm_pp_get_static_clocks( + const struct dc_context *ctx, + struct dm_pp_static_clock_info *static_clk_info) +{ + /* TODO: to be implemented */ + return false; +} + +/**** end of power component interfaces ****/ + +/* Calls to notification */ + +void dal_notify_setmode_complete(struct dc_context *ctx, + uint32_t h_total, + uint32_t v_total, + uint32_t h_active, + uint32_t v_active, + uint32_t pix_clk_in_khz) +{ + /*TODO*/ +} +/* End of calls to notification */ + +long dm_get_pid(void) +{ + return current->pid; +} + +long dm_get_tgid(void) +{ + return current->tgid; +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.c new file mode 100644 index 000000000000..c073f4558cf4 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.c @@ -0,0 +1,3150 @@ +/* + * Copyright 2012-13 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include <linux/types.h> +#include <linux/version.h> + +#include <drm/drmP.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_atomic.h> +#include <drm/drm_edid.h> + +#include "amdgpu.h" +#include "amdgpu_pm.h" +#include "dm_services_types.h" + +// We need to #undef FRAME_SIZE and DEPRECATED because they conflict +// with ptrace-abi.h's #define's of them. +#undef FRAME_SIZE +#undef DEPRECATED + +#include "dc.h" + +#include "amdgpu_dm_types.h" +#include "amdgpu_dm_mst_types.h" + +#include "modules/inc/mod_freesync.h" + +struct dm_connector_state { + struct drm_connector_state base; + + enum amdgpu_rmx_type scaling; + uint8_t underscan_vborder; + uint8_t underscan_hborder; + bool underscan_enable; +}; + +#define to_dm_connector_state(x)\ + container_of((x), struct dm_connector_state, base) + + +void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) +{ + drm_encoder_cleanup(encoder); + kfree(encoder); +} + +static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { + .destroy = amdgpu_dm_encoder_destroy, +}; + +static void dm_set_cursor( + struct amdgpu_crtc *amdgpu_crtc, + uint64_t gpu_addr, + uint32_t width, + uint32_t height) +{ + struct dc_cursor_attributes attributes; + amdgpu_crtc->cursor_width = width; + amdgpu_crtc->cursor_height = height; + + attributes.address.high_part = upper_32_bits(gpu_addr); + attributes.address.low_part = lower_32_bits(gpu_addr); + attributes.width = width; + attributes.height = height; + attributes.x_hot = 0; + attributes.y_hot = 0; + attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA; + attributes.rotation_angle = 0; + attributes.attribute_flags.value = 0; + + if (!dc_target_set_cursor_attributes( + amdgpu_crtc->target, + &attributes)) { + DRM_ERROR("DC failed to set cursor attributes\n"); + } +} + +static int dm_crtc_unpin_cursor_bo_old( + struct amdgpu_crtc *amdgpu_crtc) +{ + struct amdgpu_bo *robj; + int ret = 0; + + if (NULL != amdgpu_crtc && NULL != amdgpu_crtc->cursor_bo) { + robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); + + ret = amdgpu_bo_reserve(robj, false); + + if (likely(ret == 0)) { + ret = amdgpu_bo_unpin(robj); + + if (unlikely(ret != 0)) { + DRM_ERROR( + "%s: unpin failed (ret=%d), bo %p\n", + __func__, + ret, + amdgpu_crtc->cursor_bo); + } + + amdgpu_bo_unreserve(robj); + } else { + DRM_ERROR( + "%s: reserve failed (ret=%d), bo %p\n", + __func__, + ret, + amdgpu_crtc->cursor_bo); + } + + drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo); + amdgpu_crtc->cursor_bo = NULL; + } + + return ret; +} + +static int dm_crtc_pin_cursor_bo_new( + struct drm_crtc *crtc, + struct drm_file *file_priv, + uint32_t handle, + struct amdgpu_bo **ret_obj) +{ + struct amdgpu_crtc *amdgpu_crtc; + struct amdgpu_bo *robj; + struct drm_gem_object *obj; + int ret = -EINVAL; + + if (NULL != crtc) { + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + uint64_t gpu_addr; + + amdgpu_crtc = to_amdgpu_crtc(crtc); + + obj = drm_gem_object_lookup(file_priv, handle); + + if (!obj) { + DRM_ERROR( + "Cannot find cursor object %x for crtc %d\n", + handle, + amdgpu_crtc->crtc_id); + goto release; + } + robj = gem_to_amdgpu_bo(obj); + + ret = amdgpu_bo_reserve(robj, false); + + if (unlikely(ret != 0)) { + drm_gem_object_unreference_unlocked(obj); + DRM_ERROR("dm_crtc_pin_cursor_bo_new ret %x, handle %x\n", + ret, handle); + goto release; + } + + ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM, 0, + adev->mc.visible_vram_size, + &gpu_addr); + + if (ret == 0) { + amdgpu_crtc->cursor_addr = gpu_addr; + *ret_obj = robj; + } + amdgpu_bo_unreserve(robj); + if (ret) + drm_gem_object_unreference_unlocked(obj); + + } +release: + + return ret; +} + +static int dm_crtc_cursor_set( + struct drm_crtc *crtc, + struct drm_file *file_priv, + uint32_t handle, + uint32_t width, + uint32_t height) +{ + struct amdgpu_bo *new_cursor_bo; + struct dc_cursor_position position; + + int ret; + + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + + ret = EINVAL; + new_cursor_bo = NULL; + + DRM_DEBUG_KMS( + "%s: crtc_id=%d with handle %d and size %d to %d, bo_object %p\n", + __func__, + amdgpu_crtc->crtc_id, + handle, + width, + height, + amdgpu_crtc->cursor_bo); + + if (!handle) { + /* turn off cursor */ + position.enable = false; + position.x = 0; + position.y = 0; + position.hot_spot_enable = false; + + if (amdgpu_crtc->target) { + /*set cursor visible false*/ + dc_target_set_cursor_position( + amdgpu_crtc->target, + &position); + } + /*unpin old cursor buffer and update cache*/ + ret = dm_crtc_unpin_cursor_bo_old(amdgpu_crtc); + goto release; + + } + + if ((width > amdgpu_crtc->max_cursor_width) || + (height > amdgpu_crtc->max_cursor_height)) { + DRM_ERROR( + "%s: bad cursor width or height %d x %d\n", + __func__, + width, + height); + goto release; + } + /*try to pin new cursor bo*/ + ret = dm_crtc_pin_cursor_bo_new(crtc, file_priv, handle, &new_cursor_bo); + /*if map not successful then return an error*/ + if (ret) + goto release; + + /*program new cursor bo to hardware*/ + dm_set_cursor(amdgpu_crtc, amdgpu_crtc->cursor_addr, width, height); + + /*un map old, not used anymore cursor bo , + * return memory and mapping back */ + dm_crtc_unpin_cursor_bo_old(amdgpu_crtc); + + /*assign new cursor bo to our internal cache*/ + amdgpu_crtc->cursor_bo = &new_cursor_bo->gem_base; + +release: + return ret; + +} + +static int dm_crtc_cursor_move(struct drm_crtc *crtc, + int x, int y) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + int xorigin = 0, yorigin = 0; + struct dc_cursor_position position; + + /* avivo cursor are offset into the total surface */ + x += crtc->primary->state->src_x >> 16; + y += crtc->primary->state->src_y >> 16; + + /* + * TODO: for cursor debugging unguard the following + */ +#if 0 + DRM_DEBUG_KMS( + "%s: x %d y %d c->x %d c->y %d\n", + __func__, + x, + y, + crtc->x, + crtc->y); +#endif + + if (x < 0) { + xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); + x = 0; + } + if (y < 0) { + yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); + y = 0; + } + + position.enable = true; + position.x = x; + position.y = y; + + position.hot_spot_enable = true; + position.x_hotspot = xorigin; + position.y_hotspot = yorigin; + + if (amdgpu_crtc->target) { + if (!dc_target_set_cursor_position( + amdgpu_crtc->target, + &position)) { + DRM_ERROR("DC failed to set cursor position\n"); + return -EINVAL; + } + } + + return 0; +} + +static void dm_crtc_cursor_reset(struct drm_crtc *crtc) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + + DRM_DEBUG_KMS( + "%s: with cursor_bo %p\n", + __func__, + amdgpu_crtc->cursor_bo); + + if (amdgpu_crtc->cursor_bo && amdgpu_crtc->target) { + dm_set_cursor( + amdgpu_crtc, + amdgpu_crtc->cursor_addr, + amdgpu_crtc->cursor_width, + amdgpu_crtc->cursor_height); + } +} +static bool fill_rects_from_plane_state( + const struct drm_plane_state *state, + struct dc_surface *surface) +{ + surface->src_rect.x = state->src_x >> 16; + surface->src_rect.y = state->src_y >> 16; + /*we ignore for now mantissa and do not to deal with floating pixels :(*/ + surface->src_rect.width = state->src_w >> 16; + + if (surface->src_rect.width == 0) + return false; + + surface->src_rect.height = state->src_h >> 16; + if (surface->src_rect.height == 0) + return false; + + surface->dst_rect.x = state->crtc_x; + surface->dst_rect.y = state->crtc_y; + + if (state->crtc_w == 0) + return false; + + surface->dst_rect.width = state->crtc_w; + + if (state->crtc_h == 0) + return false; + + surface->dst_rect.height = state->crtc_h; + + surface->clip_rect = surface->dst_rect; + + switch (state->rotation & DRM_MODE_ROTATE_MASK) { + case DRM_MODE_ROTATE_0: + surface->rotation = ROTATION_ANGLE_0; + break; + case DRM_MODE_ROTATE_90: + surface->rotation = ROTATION_ANGLE_90; + break; + case DRM_MODE_ROTATE_180: + surface->rotation = ROTATION_ANGLE_180; + break; + case DRM_MODE_ROTATE_270: + surface->rotation = ROTATION_ANGLE_270; + break; + default: + surface->rotation = ROTATION_ANGLE_0; + break; + } + + return true; +} +static bool get_fb_info( + const struct amdgpu_framebuffer *amdgpu_fb, + uint64_t *tiling_flags, + uint64_t *fb_location) +{ + struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); + int r = amdgpu_bo_reserve(rbo, false); + if (unlikely(r != 0)){ + DRM_ERROR("Unable to reserve buffer\n"); + return false; + } + + if (fb_location) + *fb_location = amdgpu_bo_gpu_offset(rbo); + + if (tiling_flags) + amdgpu_bo_get_tiling_flags(rbo, tiling_flags); + + amdgpu_bo_unreserve(rbo); + + return true; +} +static void fill_plane_attributes_from_fb( + struct dc_surface *surface, + const struct amdgpu_framebuffer *amdgpu_fb, bool addReq) +{ + uint64_t tiling_flags; + uint64_t fb_location = 0; + const struct drm_framebuffer *fb = &amdgpu_fb->base; + struct drm_format_name_buf format_name; + + get_fb_info( + amdgpu_fb, + &tiling_flags, + addReq == true ? &fb_location:NULL); + + surface->address.type = PLN_ADDR_TYPE_GRAPHICS; + surface->address.grph.addr.low_part = lower_32_bits(fb_location); + surface->address.grph.addr.high_part = upper_32_bits(fb_location); + + switch (fb->format->format) { + case DRM_FORMAT_C8: + surface->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS; + break; + case DRM_FORMAT_RGB565: + surface->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565; + break; + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB8888: + surface->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; + break; + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_ARGB2101010: + surface->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010; + break; + case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_ABGR2101010: + surface->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010; + break; + default: + DRM_ERROR("Unsupported screen format %s\n", + drm_get_format_name(fb->format->format, &format_name)); + return; + } + + memset(&surface->tiling_info, 0, sizeof(surface->tiling_info)); + + if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) + { + unsigned bankw, bankh, mtaspect, tile_split, num_banks; + + bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH); + bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT); + mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT); + tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); + num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); + + /* XXX fix me for VI */ + surface->tiling_info.gfx8.num_banks = num_banks; + surface->tiling_info.gfx8.array_mode = + DC_ARRAY_2D_TILED_THIN1; + surface->tiling_info.gfx8.tile_split = tile_split; + surface->tiling_info.gfx8.bank_width = bankw; + surface->tiling_info.gfx8.bank_height = bankh; + surface->tiling_info.gfx8.tile_aspect = mtaspect; + surface->tiling_info.gfx8.tile_mode = + DC_ADDR_SURF_MICRO_TILING_DISPLAY; + } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) + == DC_ARRAY_1D_TILED_THIN1) { + surface->tiling_info.gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1; + } + + surface->tiling_info.gfx8.pipe_config = + AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); + + surface->plane_size.grph.surface_size.x = 0; + surface->plane_size.grph.surface_size.y = 0; + surface->plane_size.grph.surface_size.width = fb->width; + surface->plane_size.grph.surface_size.height = fb->height; + surface->plane_size.grph.surface_pitch = + fb->pitches[0] / fb->format->cpp[0]; + + surface->visible = true; + surface->scaling_quality.h_taps_c = 0; + surface->scaling_quality.v_taps_c = 0; + + /* TODO: unhardcode */ + surface->color_space = COLOR_SPACE_SRGB; + /* is this needed? is surface zeroed at allocation? */ + surface->scaling_quality.h_taps = 0; + surface->scaling_quality.v_taps = 0; + surface->stereo_format = PLANE_STEREO_FORMAT_NONE; + +} + +#define NUM_OF_RAW_GAMMA_RAMP_RGB_256 256 + +static void fill_gamma_from_crtc( + const struct drm_crtc *crtc, + struct dc_surface *dc_surface) +{ + int i; + struct dc_gamma *gamma; + struct drm_crtc_state *state = crtc->state; + struct drm_color_lut *lut = (struct drm_color_lut *) state->gamma_lut->data; + + gamma = dc_create_gamma(); + + if (gamma == NULL) + return; + + for (i = 0; i < NUM_OF_RAW_GAMMA_RAMP_RGB_256; i++) { + gamma->gamma_ramp_rgb256x3x16.red[i] = lut[i].red; + gamma->gamma_ramp_rgb256x3x16.green[i] = lut[i].green; + gamma->gamma_ramp_rgb256x3x16.blue[i] = lut[i].blue; + } + + gamma->type = GAMMA_RAMP_RBG256X3X16; + gamma->size = sizeof(gamma->gamma_ramp_rgb256x3x16); + + dc_surface->gamma_correction = gamma; +} + +static void fill_plane_attributes( + struct dc_surface *surface, + struct drm_plane_state *state, bool addrReq) +{ + const struct amdgpu_framebuffer *amdgpu_fb = + to_amdgpu_framebuffer(state->fb); + const struct drm_crtc *crtc = state->crtc; + + fill_rects_from_plane_state(state, surface); + fill_plane_attributes_from_fb( + surface, + amdgpu_fb, + addrReq); + + /* In case of gamma set, update gamma value */ + if (state->crtc->state->gamma_lut) { + fill_gamma_from_crtc(crtc, surface); + } +} + +/*****************************************************************************/ + +struct amdgpu_connector *aconnector_from_drm_crtc_id( + const struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_connector *connector; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + struct amdgpu_connector *aconnector; + + list_for_each_entry(connector, + &dev->mode_config.connector_list, head) { + + aconnector = to_amdgpu_connector(connector); + + if (aconnector->base.state->crtc != &acrtc->base) + continue; + + /* Found the connector */ + return aconnector; + } + + /* If we get here, not found. */ + return NULL; +} + +static void update_stream_scaling_settings( + const struct drm_display_mode *mode, + const struct dm_connector_state *dm_state, + const struct dc_stream *stream) +{ + struct amdgpu_device *adev = dm_state->base.crtc->dev->dev_private; + enum amdgpu_rmx_type rmx_type; + + struct rect src = { 0 }; /* viewport in target space*/ + struct rect dst = { 0 }; /* stream addressable area */ + + /* Full screen scaling by default */ + src.width = mode->hdisplay; + src.height = mode->vdisplay; + dst.width = stream->timing.h_addressable; + dst.height = stream->timing.v_addressable; + + rmx_type = dm_state->scaling; + if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) { + if (src.width * dst.height < + src.height * dst.width) { + /* height needs less upscaling/more downscaling */ + dst.width = src.width * + dst.height / src.height; + } else { + /* width needs less upscaling/more downscaling */ + dst.height = src.height * + dst.width / src.width; + } + } else if (rmx_type == RMX_CENTER) { + dst = src; + } + + dst.x = (stream->timing.h_addressable - dst.width) / 2; + dst.y = (stream->timing.v_addressable - dst.height) / 2; + + if (dm_state->underscan_enable) { + dst.x += dm_state->underscan_hborder / 2; + dst.y += dm_state->underscan_vborder / 2; + dst.width -= dm_state->underscan_hborder; + dst.height -= dm_state->underscan_vborder; + } + + adev->dm.dc->stream_funcs.stream_update_scaling(adev->dm.dc, stream, &src, &dst); + + DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n", + dst.x, dst.y, dst.width, dst.height); + +} + +static void dm_dc_surface_commit( + struct dc *dc, + struct drm_crtc *crtc) +{ + struct dc_surface *dc_surface; + const struct dc_surface *dc_surfaces[1]; + const struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + struct dc_target *dc_target = acrtc->target; + + if (!dc_target) { + dm_error( + "%s: Failed to obtain target on crtc (%d)!\n", + __func__, + acrtc->crtc_id); + goto fail; + } + + dc_surface = dc_create_surface(dc); + + if (!dc_surface) { + dm_error( + "%s: Failed to create a surface!\n", + __func__); + goto fail; + } + + /* Surface programming */ + fill_plane_attributes(dc_surface, crtc->primary->state, true); + + dc_surfaces[0] = dc_surface; + + if (false == dc_commit_surfaces_to_target( + dc, + dc_surfaces, + 1, + dc_target)) { + dm_error( + "%s: Failed to attach surface!\n", + __func__); + } + + dc_surface_release(dc_surface); +fail: + return; +} + +static enum dc_color_depth convert_color_depth_from_display_info( + const struct drm_connector *connector) +{ + uint32_t bpc = connector->display_info.bpc; + + /* Limited color depth to 8bit + * TODO: Still need to handle deep color*/ + if (bpc > 8) + bpc = 8; + + switch (bpc) { + case 0: + /* Temporary Work around, DRM don't parse color depth for + * EDID revision before 1.4 + * TODO: Fix edid parsing + */ + return COLOR_DEPTH_888; + case 6: + return COLOR_DEPTH_666; + case 8: + return COLOR_DEPTH_888; + case 10: + return COLOR_DEPTH_101010; + case 12: + return COLOR_DEPTH_121212; + case 14: + return COLOR_DEPTH_141414; + case 16: + return COLOR_DEPTH_161616; + default: + return COLOR_DEPTH_UNDEFINED; + } +} + +static enum dc_aspect_ratio get_aspect_ratio( + const struct drm_display_mode *mode_in) +{ + int32_t width = mode_in->crtc_hdisplay * 9; + int32_t height = mode_in->crtc_vdisplay * 16; + if ((width - height) < 10 && (width - height) > -10) + return ASPECT_RATIO_16_9; + else + return ASPECT_RATIO_4_3; +} + +static enum dc_color_space get_output_color_space( + const struct dc_crtc_timing *dc_crtc_timing) +{ + enum dc_color_space color_space = COLOR_SPACE_SRGB; + + switch (dc_crtc_timing->pixel_encoding) { + case PIXEL_ENCODING_YCBCR422: + case PIXEL_ENCODING_YCBCR444: + case PIXEL_ENCODING_YCBCR420: + { + /* + * 27030khz is the separation point between HDTV and SDTV + * according to HDMI spec, we use YCbCr709 and YCbCr601 + * respectively + */ + if (dc_crtc_timing->pix_clk_khz > 27030) { + if (dc_crtc_timing->flags.Y_ONLY) + color_space = + COLOR_SPACE_YCBCR709_LIMITED; + else + color_space = COLOR_SPACE_YCBCR709; + } else { + if (dc_crtc_timing->flags.Y_ONLY) + color_space = + COLOR_SPACE_YCBCR601_LIMITED; + else + color_space = COLOR_SPACE_YCBCR601; + } + + } + break; + case PIXEL_ENCODING_RGB: + color_space = COLOR_SPACE_SRGB; + break; + + default: + WARN_ON(1); + break; + } + + return color_space; +} + +/*****************************************************************************/ + +static void fill_stream_properties_from_drm_display_mode( + struct dc_stream *stream, + const struct drm_display_mode *mode_in, + const struct drm_connector *connector) +{ + struct dc_crtc_timing *timing_out = &stream->timing; + memset(timing_out, 0, sizeof(struct dc_crtc_timing)); + + timing_out->h_border_left = 0; + timing_out->h_border_right = 0; + timing_out->v_border_top = 0; + timing_out->v_border_bottom = 0; + /* TODO: un-hardcode */ + + if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444) + && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) + timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; + else + timing_out->pixel_encoding = PIXEL_ENCODING_RGB; + + timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE; + timing_out->display_color_depth = convert_color_depth_from_display_info( + connector); + timing_out->scan_type = SCANNING_TYPE_NODATA; + timing_out->hdmi_vic = 0; + timing_out->vic = drm_match_cea_mode(mode_in); + + timing_out->h_addressable = mode_in->crtc_hdisplay; + timing_out->h_total = mode_in->crtc_htotal; + timing_out->h_sync_width = + mode_in->crtc_hsync_end - mode_in->crtc_hsync_start; + timing_out->h_front_porch = + mode_in->crtc_hsync_start - mode_in->crtc_hdisplay; + timing_out->v_total = mode_in->crtc_vtotal; + timing_out->v_addressable = mode_in->crtc_vdisplay; + timing_out->v_front_porch = + mode_in->crtc_vsync_start - mode_in->crtc_vdisplay; + timing_out->v_sync_width = + mode_in->crtc_vsync_end - mode_in->crtc_vsync_start; + timing_out->pix_clk_khz = mode_in->crtc_clock; + timing_out->aspect_ratio = get_aspect_ratio(mode_in); + if (mode_in->flags & DRM_MODE_FLAG_PHSYNC) + timing_out->flags.HSYNC_POSITIVE_POLARITY = 1; + if (mode_in->flags & DRM_MODE_FLAG_PVSYNC) + timing_out->flags.VSYNC_POSITIVE_POLARITY = 1; + + stream->output_color_space = get_output_color_space(timing_out); + +} + +static void fill_audio_info( + struct audio_info *audio_info, + const struct drm_connector *drm_connector, + const struct dc_sink *dc_sink) +{ + int i = 0; + int cea_revision = 0; + const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps; + + audio_info->manufacture_id = edid_caps->manufacturer_id; + audio_info->product_id = edid_caps->product_id; + + cea_revision = drm_connector->display_info.cea_rev; + + while (i < AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS && + edid_caps->display_name[i]) { + audio_info->display_name[i] = edid_caps->display_name[i]; + i++; + } + + if(cea_revision >= 3) { + audio_info->mode_count = edid_caps->audio_mode_count; + + for (i = 0; i < audio_info->mode_count; ++i) { + audio_info->modes[i].format_code = + (enum audio_format_code) + (edid_caps->audio_modes[i].format_code); + audio_info->modes[i].channel_count = + edid_caps->audio_modes[i].channel_count; + audio_info->modes[i].sample_rates.all = + edid_caps->audio_modes[i].sample_rate; + audio_info->modes[i].sample_size = + edid_caps->audio_modes[i].sample_size; + } + } + + audio_info->flags.all = edid_caps->speaker_flags; + + /* TODO: We only check for the progressive mode, check for interlace mode too */ + if(drm_connector->latency_present[0]) { + audio_info->video_latency = drm_connector->video_latency[0]; + audio_info->audio_latency = drm_connector->audio_latency[0]; + } + + /* TODO: For DP, video and audio latency should be calculated from DPCD caps */ + +} + +static void copy_crtc_timing_for_drm_display_mode( + const struct drm_display_mode *src_mode, + struct drm_display_mode *dst_mode) +{ + dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay; + dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay; + dst_mode->crtc_clock = src_mode->crtc_clock; + dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start; + dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end; + dst_mode->crtc_hsync_start= src_mode->crtc_hsync_start; + dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end; + dst_mode->crtc_htotal = src_mode->crtc_htotal; + dst_mode->crtc_hskew = src_mode->crtc_hskew; + dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;; + dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;; + dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;; + dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;; + dst_mode->crtc_vtotal = src_mode->crtc_vtotal;; +} + +static void decide_crtc_timing_for_drm_display_mode( + struct drm_display_mode *drm_mode, + const struct drm_display_mode *native_mode, + bool scale_enabled) +{ + if (scale_enabled) { + copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); + } else if (native_mode->clock == drm_mode->clock && + native_mode->htotal == drm_mode->htotal && + native_mode->vtotal == drm_mode->vtotal) { + copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); + } else { + /* no scaling nor amdgpu inserted, no need to patch */ + } +} + +static struct dc_target *create_target_for_sink( + const struct amdgpu_connector *aconnector, + const struct drm_display_mode *drm_mode, + const struct dm_connector_state *dm_state) +{ + struct drm_display_mode *preferred_mode = NULL; + const struct drm_connector *drm_connector; + struct dc_target *target = NULL; + struct dc_stream *stream; + struct drm_display_mode mode = *drm_mode; + bool native_mode_found = false; + + if (NULL == aconnector) { + DRM_ERROR("aconnector is NULL!\n"); + goto drm_connector_null; + } + + if (NULL == dm_state) { + DRM_ERROR("dm_state is NULL!\n"); + goto dm_state_null; + } + + drm_connector = &aconnector->base; + stream = dc_create_stream_for_sink(aconnector->dc_sink); + + if (NULL == stream) { + DRM_ERROR("Failed to create stream for sink!\n"); + goto stream_create_fail; + } + + list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { + /* Search for preferred mode */ + if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) { + native_mode_found = true; + break; + } + } + if (!native_mode_found) + preferred_mode = list_first_entry_or_null( + &aconnector->base.modes, + struct drm_display_mode, + head); + + if (NULL == preferred_mode) { + /* This may not be an error, the use case is when we we have no + * usermode calls to reset and set mode upon hotplug. In this + * case, we call set mode ourselves to restore the previous mode + * and the modelist may not be filled in in time. + */ + DRM_INFO("No preferred mode found\n"); + } else { + decide_crtc_timing_for_drm_display_mode( + &mode, preferred_mode, + dm_state->scaling != RMX_OFF); + } + + fill_stream_properties_from_drm_display_mode(stream, + &mode, &aconnector->base); + update_stream_scaling_settings(&mode, dm_state, stream); + + fill_audio_info( + &stream->audio_info, + drm_connector, + aconnector->dc_sink); + + target = dc_create_target_for_streams(&stream, 1); + dc_stream_release(stream); + + if (NULL == target) { + DRM_ERROR("Failed to create target with streams!\n"); + goto target_create_fail; + } + +dm_state_null: +drm_connector_null: +target_create_fail: +stream_create_fail: + return target; +} + +void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc) +{ + drm_crtc_cleanup(crtc); + kfree(crtc); +} + +/* Implemented only the options currently availible for the driver */ +static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { + .reset = drm_atomic_helper_crtc_reset, + .cursor_set = dm_crtc_cursor_set, + .cursor_move = dm_crtc_cursor_move, + .destroy = amdgpu_dm_crtc_destroy, + .gamma_set = drm_atomic_helper_legacy_gamma_set, + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, +}; + +static enum drm_connector_status +amdgpu_dm_connector_detect(struct drm_connector *connector, bool force) +{ + bool connected; + struct amdgpu_connector *aconnector = to_amdgpu_connector(connector); + + /* Notes: + * 1. This interface is NOT called in context of HPD irq. + * 2. This interface *is called* in context of user-mode ioctl. Which + * makes it a bad place for *any* MST-related activit. */ + + if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) + connected = (aconnector->dc_sink != NULL); + else + connected = (aconnector->base.force == DRM_FORCE_ON); + + return (connected ? connector_status_connected : + connector_status_disconnected); +} + +int amdgpu_dm_connector_atomic_set_property( + struct drm_connector *connector, + struct drm_connector_state *connector_state, + struct drm_property *property, + uint64_t val) +{ + struct drm_device *dev = connector->dev; + struct amdgpu_device *adev = dev->dev_private; + struct dm_connector_state *dm_old_state = + to_dm_connector_state(connector->state); + struct dm_connector_state *dm_new_state = + to_dm_connector_state(connector_state); + + struct drm_crtc_state *new_crtc_state; + struct drm_crtc *crtc; + int i; + int ret = -EINVAL; + + if (property == dev->mode_config.scaling_mode_property) { + enum amdgpu_rmx_type rmx_type; + + switch (val) { + case DRM_MODE_SCALE_CENTER: + rmx_type = RMX_CENTER; + break; + case DRM_MODE_SCALE_ASPECT: + rmx_type = RMX_ASPECT; + break; + case DRM_MODE_SCALE_FULLSCREEN: + rmx_type = RMX_FULL; + break; + case DRM_MODE_SCALE_NONE: + default: + rmx_type = RMX_OFF; + break; + } + + if (dm_old_state->scaling == rmx_type) + return 0; + + dm_new_state->scaling = rmx_type; + ret = 0; + } else if (property == adev->mode_info.underscan_hborder_property) { + dm_new_state->underscan_hborder = val; + ret = 0; + } else if (property == adev->mode_info.underscan_vborder_property) { + dm_new_state->underscan_vborder = val; + ret = 0; + } else if (property == adev->mode_info.underscan_property) { + dm_new_state->underscan_enable = val; + ret = 0; + } + + for_each_crtc_in_state( + connector_state->state, + crtc, + new_crtc_state, + i) { + + if (crtc == connector_state->crtc) { + struct drm_plane_state *plane_state; + + /* + * Bit of magic done here. We need to ensure + * that planes get update after mode is set. + * So, we need to add primary plane to state, + * and this way atomic_update would be called + * for it + */ + plane_state = + drm_atomic_get_plane_state( + connector_state->state, + crtc->primary); + + if (!plane_state) + return -EINVAL; + } + } + + return ret; +} + +void amdgpu_dm_connector_destroy(struct drm_connector *connector) +{ + struct amdgpu_connector *aconnector = to_amdgpu_connector(connector); + const struct dc_link *link = aconnector->dc_link; + struct amdgpu_device *adev = connector->dev->dev_private; + struct amdgpu_display_manager *dm = &adev->dm; +#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ + defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) + + if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) { + amdgpu_dm_register_backlight_device(dm); + + if (dm->backlight_dev) { + backlight_device_unregister(dm->backlight_dev); + dm->backlight_dev = NULL; + } + + } +#endif + drm_connector_unregister(connector); + drm_connector_cleanup(connector); + kfree(connector); +} + +void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) +{ + struct dm_connector_state *state = + to_dm_connector_state(connector->state); + + kfree(state); + + state = kzalloc(sizeof(*state), GFP_KERNEL); + + if (state) { + state->scaling = RMX_OFF; + state->underscan_enable = false; + state->underscan_hborder = 0; + state->underscan_vborder = 0; + + connector->state = &state->base; + connector->state->connector = connector; + } +} + +struct drm_connector_state *amdgpu_dm_connector_atomic_duplicate_state( + struct drm_connector *connector) +{ + struct dm_connector_state *state = + to_dm_connector_state(connector->state); + + struct dm_connector_state *new_state = + kmemdup(state, sizeof(*state), GFP_KERNEL); + + if (new_state) { + __drm_atomic_helper_connector_duplicate_state(connector, + &new_state->base); + return &new_state->base; + } + + return NULL; +} + +static const struct drm_connector_funcs amdgpu_dm_connector_funcs = { + .reset = amdgpu_dm_connector_funcs_reset, + .detect = amdgpu_dm_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = amdgpu_dm_connector_destroy, + .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .atomic_set_property = amdgpu_dm_connector_atomic_set_property +}; + +static struct drm_encoder *best_encoder(struct drm_connector *connector) +{ + int enc_id = connector->encoder_ids[0]; + struct drm_mode_object *obj; + struct drm_encoder *encoder; + + DRM_DEBUG_KMS("Finding the best encoder\n"); + + /* pick the encoder ids */ + if (enc_id) { + obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER); + if (!obj) { + DRM_ERROR("Couldn't find a matching encoder for our connector\n"); + return NULL; + } + encoder = obj_to_encoder(obj); + return encoder; + } + DRM_ERROR("No encoder id\n"); + return NULL; +} + +static int get_modes(struct drm_connector *connector) +{ + return amdgpu_dm_connector_get_modes(connector); +} + +static void create_eml_sink(struct amdgpu_connector *aconnector) +{ + struct dc_sink_init_data init_params = { + .link = aconnector->dc_link, + .sink_signal = SIGNAL_TYPE_VIRTUAL + }; + struct edid *edid = (struct edid *) aconnector->base.edid_blob_ptr->data; + + if (!aconnector->base.edid_blob_ptr || + !aconnector->base.edid_blob_ptr->data) { + DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n", + aconnector->base.name); + + aconnector->base.force = DRM_FORCE_OFF; + aconnector->base.override_edid = false; + return; + } + + aconnector->edid = edid; + + aconnector->dc_em_sink = dc_link_add_remote_sink( + aconnector->dc_link, + (uint8_t *)edid, + (edid->extensions + 1) * EDID_LENGTH, + &init_params); + + if (aconnector->base.force + == DRM_FORCE_ON) + aconnector->dc_sink = aconnector->dc_link->local_sink ? + aconnector->dc_link->local_sink : + aconnector->dc_em_sink; +} + +static void handle_edid_mgmt(struct amdgpu_connector *aconnector) +{ + struct dc_link *link = (struct dc_link *)aconnector->dc_link; + + /* In case of headless boot with force on for DP managed connector + * Those settings have to be != 0 to get initial modeset + */ + if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) { + link->verified_link_cap.lane_count = LANE_COUNT_FOUR; + link->verified_link_cap.link_rate = LINK_RATE_HIGH2; + } + + + aconnector->base.override_edid = true; + create_eml_sink(aconnector); +} + +int amdgpu_dm_connector_mode_valid( + struct drm_connector *connector, + struct drm_display_mode *mode) +{ + int result = MODE_ERROR; + const struct dc_sink *dc_sink; + struct amdgpu_device *adev = connector->dev->dev_private; + struct dc_validation_set val_set = { 0 }; + /* TODO: Unhardcode stream count */ + struct dc_stream *streams[1]; + struct dc_target *target; + struct amdgpu_connector *aconnector = to_amdgpu_connector(connector); + + if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || + (mode->flags & DRM_MODE_FLAG_DBLSCAN)) + return result; + + /* Only run this the first time mode_valid is called to initilialize + * EDID mgmt + */ + if (aconnector->base.force != DRM_FORCE_UNSPECIFIED && + !aconnector->dc_em_sink) + handle_edid_mgmt(aconnector); + + dc_sink = to_amdgpu_connector(connector)->dc_sink; + + if (NULL == dc_sink) { + DRM_ERROR("dc_sink is NULL!\n"); + goto stream_create_fail; + } + + streams[0] = dc_create_stream_for_sink(dc_sink); + + if (NULL == streams[0]) { + DRM_ERROR("Failed to create stream for sink!\n"); + goto stream_create_fail; + } + + drm_mode_set_crtcinfo(mode, 0); + fill_stream_properties_from_drm_display_mode(streams[0], mode, connector); + + target = dc_create_target_for_streams(streams, 1); + val_set.target = target; + + if (NULL == val_set.target) { + DRM_ERROR("Failed to create target with stream!\n"); + goto target_create_fail; + } + + val_set.surface_count = 0; + streams[0]->src.width = mode->hdisplay; + streams[0]->src.height = mode->vdisplay; + streams[0]->dst = streams[0]->src; + + if (dc_validate_resources(adev->dm.dc, &val_set, 1)) + result = MODE_OK; + + dc_target_release(target); +target_create_fail: + dc_stream_release(streams[0]); +stream_create_fail: + /* TODO: error handling*/ + return result; +} + +static const struct drm_connector_helper_funcs +amdgpu_dm_connector_helper_funcs = { + /* + * If hotplug a second bigger display in FB Con mode, bigger resolution + * modes will be filtered by drm_mode_validate_size(), and those modes + * is missing after user start lightdm. So we need to renew modes list. + * in get_modes call back, not just return the modes count + */ + .get_modes = get_modes, + .mode_valid = amdgpu_dm_connector_mode_valid, + .best_encoder = best_encoder +}; + +static void dm_crtc_helper_disable(struct drm_crtc *crtc) +{ +} + +static int dm_crtc_helper_atomic_check( + struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + return 0; +} + +static bool dm_crtc_helper_mode_fixup( + struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = { + .disable = dm_crtc_helper_disable, + .atomic_check = dm_crtc_helper_atomic_check, + .mode_fixup = dm_crtc_helper_mode_fixup +}; + +static void dm_encoder_helper_disable(struct drm_encoder *encoder) +{ + +} + +static int dm_encoder_helper_atomic_check( + struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + return 0; +} + +const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = { + .disable = dm_encoder_helper_disable, + .atomic_check = dm_encoder_helper_atomic_check +}; + +static const struct drm_plane_funcs dm_plane_funcs = { + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state +}; + +static void clear_unrelated_fields(struct drm_plane_state *state) +{ + state->crtc = NULL; + state->fb = NULL; + state->state = NULL; + state->fence = NULL; +} + +static bool page_flip_needed( + const struct drm_plane_state *new_state, + const struct drm_plane_state *old_state, + struct drm_pending_vblank_event *event, + bool commit_surface_required) +{ + struct drm_plane_state old_state_tmp; + struct drm_plane_state new_state_tmp; + + struct amdgpu_framebuffer *amdgpu_fb_old; + struct amdgpu_framebuffer *amdgpu_fb_new; + struct amdgpu_crtc *acrtc_new; + + uint64_t old_tiling_flags; + uint64_t new_tiling_flags; + + bool page_flip_required; + + if (!old_state) + return false; + + if (!old_state->fb) + return false; + + if (!new_state) + return false; + + if (!new_state->fb) + return false; + + old_state_tmp = *old_state; + new_state_tmp = *new_state; + + if (!event) + return false; + + amdgpu_fb_old = to_amdgpu_framebuffer(old_state->fb); + amdgpu_fb_new = to_amdgpu_framebuffer(new_state->fb); + + if (!get_fb_info(amdgpu_fb_old, &old_tiling_flags, NULL)) + return false; + + if (!get_fb_info(amdgpu_fb_new, &new_tiling_flags, NULL)) + return false; + + if (commit_surface_required == true && + old_tiling_flags != new_tiling_flags) + return false; + + clear_unrelated_fields(&old_state_tmp); + clear_unrelated_fields(&new_state_tmp); + + page_flip_required = memcmp(&old_state_tmp, + &new_state_tmp, + sizeof(old_state_tmp)) == 0 ? true:false; + if (new_state->crtc && page_flip_required == false) { + acrtc_new = to_amdgpu_crtc(new_state->crtc); + if (acrtc_new->flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) + page_flip_required = true; + } + return page_flip_required; +} + +static int dm_plane_helper_prepare_fb( + struct drm_plane *plane, + struct drm_plane_state *new_state) +{ + struct amdgpu_framebuffer *afb; + struct drm_gem_object *obj; + struct amdgpu_bo *rbo; + int r; + + if (!new_state->fb) { + DRM_DEBUG_KMS("No FB bound\n"); + return 0; + } + + afb = to_amdgpu_framebuffer(new_state->fb); + + obj = afb->obj; + rbo = gem_to_amdgpu_bo(obj); + r = amdgpu_bo_reserve(rbo, false); + if (unlikely(r != 0)) + return r; + + r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, NULL); + + amdgpu_bo_unreserve(rbo); + + if (unlikely(r != 0)) { + DRM_ERROR("Failed to pin framebuffer\n"); + return r; + } + + return 0; +} + +static void dm_plane_helper_cleanup_fb( + struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct amdgpu_bo *rbo; + struct amdgpu_framebuffer *afb; + int r; + + if (!old_state->fb) + return; + + afb = to_amdgpu_framebuffer(old_state->fb); + rbo = gem_to_amdgpu_bo(afb->obj); + r = amdgpu_bo_reserve(rbo, false); + if (unlikely(r)) { + DRM_ERROR("failed to reserve rbo before unpin\n"); + return; + } else { + amdgpu_bo_unpin(rbo); + amdgpu_bo_unreserve(rbo); + } +} + +int dm_create_validation_set_for_target(struct drm_connector *connector, + struct drm_display_mode *mode, struct dc_validation_set *val_set) +{ + int result = MODE_ERROR; + const struct dc_sink *dc_sink = + to_amdgpu_connector(connector)->dc_sink; + /* TODO: Unhardcode stream count */ + struct dc_stream *streams[1]; + struct dc_target *target; + + if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || + (mode->flags & DRM_MODE_FLAG_DBLSCAN)) + return result; + + if (NULL == dc_sink) { + DRM_ERROR("dc_sink is NULL!\n"); + return result; + } + + streams[0] = dc_create_stream_for_sink(dc_sink); + + if (NULL == streams[0]) { + DRM_ERROR("Failed to create stream for sink!\n"); + return result; + } + + drm_mode_set_crtcinfo(mode, 0); + + fill_stream_properties_from_drm_display_mode(streams[0], mode, connector); + + target = dc_create_target_for_streams(streams, 1); + val_set->target = target; + + if (NULL == val_set->target) { + DRM_ERROR("Failed to create target with stream!\n"); + goto fail; + } + + streams[0]->src.width = mode->hdisplay; + streams[0]->src.height = mode->vdisplay; + streams[0]->dst = streams[0]->src; + + return MODE_OK; + +fail: + dc_stream_release(streams[0]); + return result; + +} + +static const struct drm_plane_helper_funcs dm_plane_helper_funcs = { + .prepare_fb = dm_plane_helper_prepare_fb, + .cleanup_fb = dm_plane_helper_cleanup_fb, +}; + +/* + * TODO: these are currently initialized to rgb formats only. + * For future use cases we should either initialize them dynamically based on + * plane capabilities, or initialize this array to all formats, so internal drm + * check will succeed, and let DC to implement proper check + */ +static uint32_t rgb_formats[] = { + DRM_FORMAT_XRGB4444, + DRM_FORMAT_ARGB4444, + DRM_FORMAT_RGBA4444, + DRM_FORMAT_ARGB1555, + DRM_FORMAT_RGB565, + DRM_FORMAT_RGB888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_XBGR2101010, + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_ABGR2101010, +}; + +int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, + struct amdgpu_crtc *acrtc, + uint32_t crtc_index) +{ + int res = -ENOMEM; + + struct drm_plane *primary_plane = + kzalloc(sizeof(*primary_plane), GFP_KERNEL); + + if (!primary_plane) + goto fail_plane; + + primary_plane->format_default = true; + + res = drm_universal_plane_init( + dm->adev->ddev, + primary_plane, + 0, + &dm_plane_funcs, + rgb_formats, + ARRAY_SIZE(rgb_formats), + NULL, + DRM_PLANE_TYPE_PRIMARY, NULL); + + primary_plane->crtc = &acrtc->base; + + drm_plane_helper_add(primary_plane, &dm_plane_helper_funcs); + + res = drm_crtc_init_with_planes( + dm->ddev, + &acrtc->base, + primary_plane, + NULL, + &amdgpu_dm_crtc_funcs, NULL); + + if (res) + goto fail; + + drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs); + + acrtc->max_cursor_width = 128; + acrtc->max_cursor_height = 128; + + acrtc->crtc_id = crtc_index; + acrtc->base.enabled = false; + + dm->adev->mode_info.crtcs[crtc_index] = acrtc; + drm_mode_crtc_set_gamma_size(&acrtc->base, 256); + + return 0; +fail: + kfree(primary_plane); +fail_plane: + acrtc->crtc_id = -1; + return res; +} + +static int to_drm_connector_type(enum signal_type st) +{ + switch (st) { + case SIGNAL_TYPE_HDMI_TYPE_A: + return DRM_MODE_CONNECTOR_HDMIA; + case SIGNAL_TYPE_EDP: + return DRM_MODE_CONNECTOR_eDP; + case SIGNAL_TYPE_RGB: + return DRM_MODE_CONNECTOR_VGA; + case SIGNAL_TYPE_DISPLAY_PORT: + case SIGNAL_TYPE_DISPLAY_PORT_MST: + return DRM_MODE_CONNECTOR_DisplayPort; + case SIGNAL_TYPE_DVI_DUAL_LINK: + case SIGNAL_TYPE_DVI_SINGLE_LINK: + return DRM_MODE_CONNECTOR_DVID; + case SIGNAL_TYPE_VIRTUAL: + return DRM_MODE_CONNECTOR_VIRTUAL; + + default: + return DRM_MODE_CONNECTOR_Unknown; + } +} + +static void amdgpu_dm_get_native_mode(struct drm_connector *connector) +{ + const struct drm_connector_helper_funcs *helper = + connector->helper_private; + struct drm_encoder *encoder; + struct amdgpu_encoder *amdgpu_encoder; + + encoder = helper->best_encoder(connector); + + if (encoder == NULL) + return; + + amdgpu_encoder = to_amdgpu_encoder(encoder); + + amdgpu_encoder->native_mode.clock = 0; + + if (!list_empty(&connector->probed_modes)) { + struct drm_display_mode *preferred_mode = NULL; + list_for_each_entry(preferred_mode, + &connector->probed_modes, + head) { + if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) { + amdgpu_encoder->native_mode = *preferred_mode; + } + break; + } + + } +} + +static struct drm_display_mode *amdgpu_dm_create_common_mode( + struct drm_encoder *encoder, char *name, + int hdisplay, int vdisplay) +{ + struct drm_device *dev = encoder->dev; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct drm_display_mode *mode = NULL; + struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; + + mode = drm_mode_duplicate(dev, native_mode); + + if(mode == NULL) + return NULL; + + mode->hdisplay = hdisplay; + mode->vdisplay = vdisplay; + mode->type &= ~DRM_MODE_TYPE_PREFERRED; + strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN); + + return mode; + +} + +static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, + struct drm_connector *connector) +{ + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct drm_display_mode *mode = NULL; + struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; + struct amdgpu_connector *amdgpu_connector = + to_amdgpu_connector(connector); + int i; + int n; + struct mode_size { + char name[DRM_DISPLAY_MODE_LEN]; + int w; + int h; + }common_modes[] = { + { "640x480", 640, 480}, + { "800x600", 800, 600}, + { "1024x768", 1024, 768}, + { "1280x720", 1280, 720}, + { "1280x800", 1280, 800}, + {"1280x1024", 1280, 1024}, + { "1440x900", 1440, 900}, + {"1680x1050", 1680, 1050}, + {"1600x1200", 1600, 1200}, + {"1920x1080", 1920, 1080}, + {"1920x1200", 1920, 1200} + }; + + n = sizeof(common_modes) / sizeof(common_modes[0]); + + for (i = 0; i < n; i++) { + struct drm_display_mode *curmode = NULL; + bool mode_existed = false; + + if (common_modes[i].w > native_mode->hdisplay || + common_modes[i].h > native_mode->vdisplay || + (common_modes[i].w == native_mode->hdisplay && + common_modes[i].h == native_mode->vdisplay)) + continue; + + list_for_each_entry(curmode, &connector->probed_modes, head) { + if (common_modes[i].w == curmode->hdisplay && + common_modes[i].h == curmode->vdisplay) { + mode_existed = true; + break; + } + } + + if (mode_existed) + continue; + + mode = amdgpu_dm_create_common_mode(encoder, + common_modes[i].name, common_modes[i].w, + common_modes[i].h); + drm_mode_probed_add(connector, mode); + amdgpu_connector->num_modes++; + } +} + +static void amdgpu_dm_connector_ddc_get_modes( + struct drm_connector *connector, + struct edid *edid) +{ + struct amdgpu_connector *amdgpu_connector = + to_amdgpu_connector(connector); + + if (edid) { + /* empty probed_modes */ + INIT_LIST_HEAD(&connector->probed_modes); + amdgpu_connector->num_modes = + drm_add_edid_modes(connector, edid); + + drm_edid_to_eld(connector, edid); + + amdgpu_dm_get_native_mode(connector); + } else + amdgpu_connector->num_modes = 0; +} + +int amdgpu_dm_connector_get_modes(struct drm_connector *connector) +{ + const struct drm_connector_helper_funcs *helper = + connector->helper_private; + struct amdgpu_connector *amdgpu_connector = + to_amdgpu_connector(connector); + struct drm_encoder *encoder; + struct edid *edid = amdgpu_connector->edid; + + encoder = helper->best_encoder(connector); + + amdgpu_dm_connector_ddc_get_modes(connector, edid); + amdgpu_dm_connector_add_common_modes(encoder, connector); + return amdgpu_connector->num_modes; +} + +void amdgpu_dm_connector_init_helper( + struct amdgpu_display_manager *dm, + struct amdgpu_connector *aconnector, + int connector_type, + const struct dc_link *link, + int link_index) +{ + struct amdgpu_device *adev = dm->ddev->dev_private; + + aconnector->connector_id = link_index; + aconnector->dc_link = link; + aconnector->base.interlace_allowed = true; + aconnector->base.doublescan_allowed = true; + aconnector->base.dpms = DRM_MODE_DPMS_OFF; + aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */ + + mutex_init(&aconnector->hpd_lock); + + /*configure suport HPD hot plug connector_>polled default value is 0 + * which means HPD hot plug not supported*/ + switch (connector_type) { + case DRM_MODE_CONNECTOR_HDMIA: + aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; + break; + case DRM_MODE_CONNECTOR_DisplayPort: + aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; + break; + case DRM_MODE_CONNECTOR_DVID: + aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; + break; + default: + break; + } + + drm_object_attach_property(&aconnector->base.base, + dm->ddev->mode_config.scaling_mode_property, + DRM_MODE_SCALE_NONE); + + drm_object_attach_property(&aconnector->base.base, + adev->mode_info.underscan_property, + UNDERSCAN_OFF); + drm_object_attach_property(&aconnector->base.base, + adev->mode_info.underscan_hborder_property, + 0); + drm_object_attach_property(&aconnector->base.base, + adev->mode_info.underscan_vborder_property, + 0); + +} + +int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, + struct i2c_msg *msgs, int num) +{ + struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap); + struct i2c_command cmd; + int i; + int result = -EIO; + + cmd.payloads = kzalloc(num * sizeof(struct i2c_payload), GFP_KERNEL); + + if (!cmd.payloads) + return result; + + cmd.number_of_payloads = num; + cmd.engine = I2C_COMMAND_ENGINE_DEFAULT; + cmd.speed = 100; + + for (i = 0; i < num; i++) { + cmd.payloads[i].write = (msgs[i].flags & I2C_M_RD); + cmd.payloads[i].address = msgs[i].addr; + cmd.payloads[i].length = msgs[i].len; + cmd.payloads[i].data = msgs[i].buf; + } + + if (dc_submit_i2c(i2c->dm->dc, i2c->link_index, &cmd)) + result = num; + + kfree(cmd.payloads); + + return result; +} + +u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm amdgpu_dm_i2c_algo = { + .master_xfer = amdgpu_dm_i2c_xfer, + .functionality = amdgpu_dm_i2c_func, +}; + +struct amdgpu_i2c_adapter *create_i2c(unsigned int link_index, struct amdgpu_display_manager *dm, int *res) +{ + struct amdgpu_i2c_adapter *i2c; + + i2c = kzalloc(sizeof (struct amdgpu_i2c_adapter), GFP_KERNEL); + i2c->dm = dm; + i2c->base.owner = THIS_MODULE; + i2c->base.class = I2C_CLASS_DDC; + i2c->base.dev.parent = &dm->adev->pdev->dev; + i2c->base.algo = &amdgpu_dm_i2c_algo; + snprintf(i2c->base.name, sizeof (i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index); + i2c->link_index = link_index; + i2c_set_adapdata(&i2c->base, i2c); + + return i2c; +} + +/* Note: this function assumes that dc_link_detect() was called for the + * dc_link which will be represented by this aconnector. */ +int amdgpu_dm_connector_init( + struct amdgpu_display_manager *dm, + struct amdgpu_connector *aconnector, + uint32_t link_index, + struct amdgpu_encoder *aencoder) +{ + int res = 0; + int connector_type; + struct dc *dc = dm->dc; + const struct dc_link *link = dc_get_link_at_index(dc, link_index); + struct amdgpu_i2c_adapter *i2c; + + DRM_DEBUG_KMS("%s()\n", __func__); + + i2c = create_i2c(link->link_index, dm, &res); + aconnector->i2c = i2c; + res = i2c_add_adapter(&i2c->base); + + if (res) { + DRM_ERROR("Failed to register hw i2c %d\n", link->link_index); + goto out_free; + } + + connector_type = to_drm_connector_type(link->connector_signal); + + res = drm_connector_init( + dm->ddev, + &aconnector->base, + &amdgpu_dm_connector_funcs, + connector_type); + + if (res) { + DRM_ERROR("connector_init failed\n"); + aconnector->connector_id = -1; + goto out_free; + } + + drm_connector_helper_add( + &aconnector->base, + &amdgpu_dm_connector_helper_funcs); + + amdgpu_dm_connector_init_helper( + dm, + aconnector, + connector_type, + link, + link_index); + + drm_mode_connector_attach_encoder( + &aconnector->base, &aencoder->base); + + drm_connector_register(&aconnector->base); + + if (connector_type == DRM_MODE_CONNECTOR_DisplayPort + || connector_type == DRM_MODE_CONNECTOR_eDP) + amdgpu_dm_initialize_mst_connector(dm, aconnector); + +#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ + defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) + + /* NOTE: this currently will create backlight device even if a panel + * is not connected to the eDP/LVDS connector. + * + * This is less than ideal but we don't have sink information at this + * stage since detection happens after. We can't do detection earlier + * since MST detection needs connectors to be created first. + */ + if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) { + /* Event if registration failed, we should continue with + * DM initialization because not having a backlight control + * is better then a black screen. */ + amdgpu_dm_register_backlight_device(dm); + + if (dm->backlight_dev) + dm->backlight_link = link; + } +#endif + +out_free: + if (res) { + kfree(i2c); + aconnector->i2c = NULL; + } + return res; +} + +int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev) +{ + switch (adev->mode_info.num_crtc) { + case 1: + return 0x1; + case 2: + return 0x3; + case 3: + return 0x7; + case 4: + return 0xf; + case 5: + return 0x1f; + case 6: + default: + return 0x3f; + } +} + +int amdgpu_dm_encoder_init( + struct drm_device *dev, + struct amdgpu_encoder *aencoder, + uint32_t link_index) +{ + struct amdgpu_device *adev = dev->dev_private; + + int res = drm_encoder_init(dev, + &aencoder->base, + &amdgpu_dm_encoder_funcs, + DRM_MODE_ENCODER_TMDS, + NULL); + + aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev); + + if (!res) + aencoder->encoder_id = link_index; + else + aencoder->encoder_id = -1; + + drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs); + + return res; +} + +enum dm_commit_action { + DM_COMMIT_ACTION_NOTHING, + DM_COMMIT_ACTION_RESET, + DM_COMMIT_ACTION_DPMS_ON, + DM_COMMIT_ACTION_DPMS_OFF, + DM_COMMIT_ACTION_SET +}; + +static enum dm_commit_action get_dm_commit_action(struct drm_crtc_state *state) +{ + /* mode changed means either actually mode changed or enabled changed */ + /* active changed means dpms changed */ + + DRM_DEBUG_KMS("crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n", + state->enable, + state->active, + state->planes_changed, + state->mode_changed, + state->active_changed, + state->connectors_changed); + + if (state->mode_changed) { + /* if it is got disabled - call reset mode */ + if (!state->enable) + return DM_COMMIT_ACTION_RESET; + + if (state->active) + return DM_COMMIT_ACTION_SET; + else + return DM_COMMIT_ACTION_RESET; + } else { + /* ! mode_changed */ + + /* if it is remain disable - skip it */ + if (!state->enable) + return DM_COMMIT_ACTION_NOTHING; + + if (state->active && state->connectors_changed) + return DM_COMMIT_ACTION_SET; + + if (state->active_changed) { + if (state->active) { + return DM_COMMIT_ACTION_DPMS_ON; + } else { + return DM_COMMIT_ACTION_DPMS_OFF; + } + } else { + /* ! active_changed */ + return DM_COMMIT_ACTION_NOTHING; + } + } +} + + +typedef bool (*predicate)(struct amdgpu_crtc *acrtc); + +static void wait_while_pflip_status(struct amdgpu_device *adev, + struct amdgpu_crtc *acrtc, predicate f) { + int count = 0; + while (f(acrtc)) { + /* Spin Wait*/ + msleep(1); + count++; + if (count == 1000) { + DRM_ERROR("%s - crtc:%d[%p], pflip_stat:%d, probable hang!\n", + __func__, acrtc->crtc_id, + acrtc, + acrtc->pflip_status); + + /* we do not expect to hit this case except on Polaris with PHY PLL + * 1. DP to HDMI passive dongle connected + * 2. unplug (headless) + * 3. plug in DP + * 3a. on plug in, DP will try verify link by training, and training + * would disable PHY PLL which HDMI rely on to drive TG + * 3b. this will cause flip interrupt cannot be generated, and we + * exit when timeout expired. however we do not have code to clean + * up flip, flip clean up will happen when the address is written + * with the restore mode change + */ + WARN_ON(1); + break; + } + } + + DRM_DEBUG_DRIVER("%s - Finished waiting for:%d msec, crtc:%d[%p], pflip_stat:%d \n", + __func__, + count, + acrtc->crtc_id, + acrtc, + acrtc->pflip_status); +} + +static bool pflip_in_progress_predicate(struct amdgpu_crtc *acrtc) +{ + return acrtc->pflip_status != AMDGPU_FLIP_NONE; +} + +static void manage_dm_interrupts( + struct amdgpu_device *adev, + struct amdgpu_crtc *acrtc, + bool enable) +{ + /* + * this is not correct translation but will work as soon as VBLANK + * constant is the same as PFLIP + */ + int irq_type = + amdgpu_crtc_idx_to_irq_type( + adev, + acrtc->crtc_id); + + if (enable) { + drm_crtc_vblank_on(&acrtc->base); + amdgpu_irq_get( + adev, + &adev->pageflip_irq, + irq_type); + } else { + wait_while_pflip_status(adev, acrtc, + pflip_in_progress_predicate); + + amdgpu_irq_put( + adev, + &adev->pageflip_irq, + irq_type); + drm_crtc_vblank_off(&acrtc->base); + } +} + + +static bool pflip_pending_predicate(struct amdgpu_crtc *acrtc) +{ + return acrtc->pflip_status == AMDGPU_FLIP_PENDING; +} + +static bool is_scaling_state_different( + const struct dm_connector_state *dm_state, + const struct dm_connector_state *old_dm_state) +{ + if (dm_state->scaling != old_dm_state->scaling) + return true; + if (!dm_state->underscan_enable && old_dm_state->underscan_enable) { + if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0) + return true; + } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) { + if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0) + return true; + } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder + || dm_state->underscan_vborder != old_dm_state->underscan_vborder) + return true; + return false; +} + +static void remove_target(struct amdgpu_device *adev, struct amdgpu_crtc *acrtc) +{ + int i; + + /* + * we evade vblanks and pflips on crtc that + * should be changed + */ + manage_dm_interrupts(adev, acrtc, false); + /* this is the update mode case */ + if (adev->dm.freesync_module) + for (i = 0; i < acrtc->target->stream_count; i++) + mod_freesync_remove_stream( + adev->dm.freesync_module, + acrtc->target->streams[i]); + dc_target_release(acrtc->target); + acrtc->target = NULL; + acrtc->otg_inst = -1; + acrtc->enabled = false; +} + +int amdgpu_dm_atomic_commit( + struct drm_device *dev, + struct drm_atomic_state *state, + bool async) +{ + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_display_manager *dm = &adev->dm; + struct drm_plane *plane; + struct drm_plane_state *new_plane_state; + struct drm_plane_state *old_plane_state; + uint32_t i, j; + int32_t ret = 0; + uint32_t commit_targets_count = 0; + uint32_t new_crtcs_count = 0; + uint32_t flip_crtcs_count = 0; + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state; + + struct dc_target *commit_targets[MAX_TARGETS]; + struct amdgpu_crtc *new_crtcs[MAX_TARGETS]; + struct dc_target *new_target; + struct drm_crtc *flip_crtcs[MAX_TARGETS]; + struct amdgpu_flip_work *work[MAX_TARGETS] = {0}; + struct amdgpu_bo *new_abo[MAX_TARGETS] = {0}; + + /* In this step all new fb would be pinned */ + + /* + * TODO: Revisit when we support true asynchronous commit. + * Right now we receive async commit only from pageflip, in which case + * we should not pin/unpin the fb here, it should be done in + * amdgpu_crtc_flip and from the vblank irq handler. + */ + if (!async) { + ret = drm_atomic_helper_prepare_planes(dev, state); + if (ret) + return ret; + } + + /* Page flip if needed */ + for_each_plane_in_state(state, plane, new_plane_state, i) { + struct drm_plane_state *old_plane_state = plane->state; + struct drm_crtc *crtc = new_plane_state->crtc; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + struct drm_framebuffer *fb = new_plane_state->fb; + struct drm_crtc_state *crtc_state; + + if (!fb || !crtc) + continue; + + crtc_state = drm_atomic_get_crtc_state(state, crtc); + + if (!crtc_state->planes_changed || !crtc_state->active) + continue; + + if (page_flip_needed( + new_plane_state, + old_plane_state, + crtc_state->event, + false)) { + ret = amdgpu_crtc_prepare_flip(crtc, + fb, + crtc_state->event, + acrtc->flip_flags, + drm_crtc_vblank_count(crtc), + &work[flip_crtcs_count], + &new_abo[flip_crtcs_count]); + + if (ret) { + /* According to atomic_commit hook API, EINVAL is not allowed */ + if (unlikely(ret == -EINVAL)) + ret = -ENOMEM; + + DRM_ERROR("Atomic commit: Flip for crtc id %d: [%p], " + "failed, errno = %d\n", + acrtc->crtc_id, + acrtc, + ret); + /* cleanup all flip configurations which + * succeeded in this commit + */ + for (i = 0; i < flip_crtcs_count; i++) + amdgpu_crtc_cleanup_flip_ctx( + work[i], + new_abo[i]); + + return ret; + } + + flip_crtcs[flip_crtcs_count] = crtc; + flip_crtcs_count++; + } + } + + /* + * This is the point of no return - everything below never fails except + * when the hw goes bonghits. Which means we can commit the new state on + * the software side now. + */ + + drm_atomic_helper_swap_state(state, true); + + /* + * From this point state become old state really. New state is + * initialized to appropriate objects and could be accessed from there + */ + + /* + * there is no fences usage yet in state. We can skip the following line + * wait_for_fences(dev, state); + */ + + drm_atomic_helper_update_legacy_modeset_state(dev, state); + + /* update changed items */ + for_each_crtc_in_state(state, crtc, old_crtc_state, i) { + struct amdgpu_crtc *acrtc; + struct amdgpu_connector *aconnector = NULL; + enum dm_commit_action action; + struct drm_crtc_state *new_state = crtc->state; + + acrtc = to_amdgpu_crtc(crtc); + + aconnector = + amdgpu_dm_find_first_crct_matching_connector( + state, + crtc, + false); + + /* handles headless hotplug case, updating new_state and + * aconnector as needed + */ + + action = get_dm_commit_action(new_state); + + switch (action) { + case DM_COMMIT_ACTION_DPMS_ON: + case DM_COMMIT_ACTION_SET: { + struct dm_connector_state *dm_state = NULL; + new_target = NULL; + + if (aconnector) + dm_state = to_dm_connector_state(aconnector->base.state); + + new_target = create_target_for_sink( + aconnector, + &crtc->state->mode, + dm_state); + + DRM_INFO("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc); + + if (!new_target) { + /* + * this could happen because of issues with + * userspace notifications delivery. + * In this case userspace tries to set mode on + * display which is disconnect in fact. + * dc_sink in NULL in this case on aconnector. + * We expect reset mode will come soon. + * + * This can also happen when unplug is done + * during resume sequence ended + * + * In this case, we want to pretend we still + * have a sink to keep the pipe running so that + * hw state is consistent with the sw state + */ + DRM_DEBUG_KMS("%s: Failed to create new target for crtc %d\n", + __func__, acrtc->base.base.id); + break; + } + + if (acrtc->target) + remove_target(adev, acrtc); + + /* + * this loop saves set mode crtcs + * we needed to enable vblanks once all + * resources acquired in dc after dc_commit_targets + */ + new_crtcs[new_crtcs_count] = acrtc; + new_crtcs_count++; + + acrtc->target = new_target; + acrtc->enabled = true; + acrtc->hw_mode = crtc->state->mode; + crtc->hwmode = crtc->state->mode; + + break; + } + + case DM_COMMIT_ACTION_NOTHING: { + struct dm_connector_state *dm_state = NULL; + + if (!aconnector) + break; + + dm_state = to_dm_connector_state(aconnector->base.state); + + /* Scaling update */ + update_stream_scaling_settings( + &crtc->state->mode, + dm_state, + acrtc->target->streams[0]); + + break; + } + case DM_COMMIT_ACTION_DPMS_OFF: + case DM_COMMIT_ACTION_RESET: + DRM_INFO("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc); + /* i.e. reset mode */ + if (acrtc->target) + remove_target(adev, acrtc); + break; + } /* switch() */ + } /* for_each_crtc_in_state() */ + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + if (acrtc->target) { + commit_targets[commit_targets_count] = acrtc->target; + ++commit_targets_count; + } + } + + /* + * Add streams after required streams from new and replaced targets + * are removed from freesync module + */ + if (adev->dm.freesync_module) { + for (i = 0; i < new_crtcs_count; i++) { + struct amdgpu_connector *aconnector = NULL; + new_target = new_crtcs[i]->target; + aconnector = + amdgpu_dm_find_first_crct_matching_connector( + state, + &new_crtcs[i]->base, + false); + if (!aconnector) { + DRM_INFO( + "Atomic commit: Failed to find connector for acrtc id:%d " + "skipping freesync init\n", + new_crtcs[i]->crtc_id); + continue; + } + + for (j = 0; j < new_target->stream_count; j++) + mod_freesync_add_stream( + adev->dm.freesync_module, + new_target->streams[j], &aconnector->caps); + } + } + + /* DC is optimized not to do anything if 'targets' didn't change. */ + dc_commit_targets(dm->dc, commit_targets, commit_targets_count); + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + if (acrtc->target != NULL) + acrtc->otg_inst = + dc_target_get_status(acrtc->target)->primary_otg_inst; + } + + /* update planes when needed */ + for_each_plane_in_state(state, plane, old_plane_state, i) { + struct drm_plane_state *plane_state = plane->state; + struct drm_crtc *crtc = plane_state->crtc; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + struct drm_framebuffer *fb = plane_state->fb; + struct drm_connector *connector; + struct dm_connector_state *dm_state = NULL; + enum dm_commit_action action; + + if (!fb || !crtc || !crtc->state->active) + continue; + + action = get_dm_commit_action(crtc->state); + + /* Surfaces are created under two scenarios: + * 1. This commit is not a page flip. + * 2. This commit is a page flip, and targets are created. + */ + if (!page_flip_needed( + plane_state, + old_plane_state, + crtc->state->event, true) || + action == DM_COMMIT_ACTION_DPMS_ON || + action == DM_COMMIT_ACTION_SET) { + list_for_each_entry(connector, + &dev->mode_config.connector_list, head) { + if (connector->state->crtc == crtc) { + dm_state = to_dm_connector_state( + connector->state); + break; + } + } + + /* + * This situation happens in the following case: + * we are about to get set mode for connector who's only + * possible crtc (in encoder crtc mask) is used by + * another connector, that is why it will try to + * re-assing crtcs in order to make configuration + * supported. For our implementation we need to make all + * encoders support all crtcs, then this issue will + * never arise again. But to guard code from this issue + * check is left. + * + * Also it should be needed when used with actual + * drm_atomic_commit ioctl in future + */ + if (!dm_state) + continue; + + /* + * if flip is pending (ie, still waiting for fence to return + * before address is submitted) here, we cannot commit_surface + * as commit_surface will pre-maturely write out the future + * address. wait until flip is submitted before proceeding. + */ + wait_while_pflip_status(adev, acrtc, pflip_pending_predicate); + + dm_dc_surface_commit(dm->dc, crtc); + } + } + + for (i = 0; i < new_crtcs_count; i++) { + /* + * loop to enable interrupts on newly arrived crtc + */ + struct amdgpu_crtc *acrtc = new_crtcs[i]; + + if (adev->dm.freesync_module) { + for (j = 0; j < acrtc->target->stream_count; j++) + mod_freesync_notify_mode_change( + adev->dm.freesync_module, + acrtc->target->streams, + acrtc->target->stream_count); + } + + manage_dm_interrupts(adev, acrtc, true); + dm_crtc_cursor_reset(&acrtc->base); + + } + + /* Do actual flip */ + flip_crtcs_count = 0; + for_each_plane_in_state(state, plane, old_plane_state, i) { + struct drm_plane_state *plane_state = plane->state; + struct drm_crtc *crtc = plane_state->crtc; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + struct drm_framebuffer *fb = plane_state->fb; + + if (!fb || !crtc || !crtc->state->planes_changed || + !crtc->state->active) + continue; + + if (page_flip_needed( + plane_state, + old_plane_state, + crtc->state->event, + false)) { + amdgpu_crtc_submit_flip( + crtc, + fb, + work[flip_crtcs_count], + new_abo[i]); + flip_crtcs_count++; + /*clean up the flags for next usage*/ + acrtc->flip_flags = 0; + } + } + + /* In this state all old framebuffers would be unpinned */ + + /* TODO: Revisit when we support true asynchronous commit.*/ + if (!async) + drm_atomic_helper_cleanup_planes(dev, state); + + drm_atomic_state_put(state); + + return ret; +} +/* + * This functions handle all cases when set mode does not come upon hotplug. + * This include when the same display is unplugged then plugged back into the + * same port and when we are running without usermode desktop manager supprot + */ +void dm_restore_drm_connector_state(struct drm_device *dev, struct drm_connector *connector) +{ + struct drm_crtc *crtc; + struct amdgpu_device *adev = dev->dev_private; + struct dc *dc = adev->dm.dc; + struct amdgpu_connector *aconnector = to_amdgpu_connector(connector); + struct amdgpu_crtc *disconnected_acrtc; + const struct dc_sink *sink; + struct dc_target *commit_targets[6]; + struct dc_target *current_target; + uint32_t commit_targets_count = 0; + int i; + + if (!aconnector->dc_sink || !connector->state || !connector->encoder) + return; + + disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); + + if (!disconnected_acrtc || !disconnected_acrtc->target) + return; + + sink = disconnected_acrtc->target->streams[0]->sink; + + /* + * If the previous sink is not released and different from the current, + * we deduce we are in a state where we can not rely on usermode call + * to turn on the display, so we do it here + */ + if (sink != aconnector->dc_sink) { + struct dm_connector_state *dm_state = + to_dm_connector_state(aconnector->base.state); + + struct dc_target *new_target = + create_target_for_sink( + aconnector, + &disconnected_acrtc->base.state->mode, + dm_state); + + DRM_INFO("Headless hotplug, restoring connector state\n"); + /* + * we evade vblanks and pflips on crtc that + * should be changed + */ + manage_dm_interrupts(adev, disconnected_acrtc, false); + /* this is the update mode case */ + + current_target = disconnected_acrtc->target; + + disconnected_acrtc->target = new_target; + disconnected_acrtc->enabled = true; + disconnected_acrtc->hw_mode = disconnected_acrtc->base.state->mode; + + commit_targets_count = 0; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + if (acrtc->target) { + commit_targets[commit_targets_count] = acrtc->target; + ++commit_targets_count; + } + } + + /* DC is optimized not to do anything if 'targets' didn't change. */ + if (!dc_commit_targets(dc, commit_targets, + commit_targets_count)) { + DRM_INFO("Failed to restore connector state!\n"); + dc_target_release(disconnected_acrtc->target); + disconnected_acrtc->target = current_target; + manage_dm_interrupts(adev, disconnected_acrtc, true); + return; + } + + if (adev->dm.freesync_module) { + + for (i = 0; i < current_target->stream_count; i++) + mod_freesync_remove_stream( + adev->dm.freesync_module, + current_target->streams[i]); + + for (i = 0; i < new_target->stream_count; i++) + mod_freesync_add_stream( + adev->dm.freesync_module, + new_target->streams[i], + &aconnector->caps); + } + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + if (acrtc->target != NULL) { + acrtc->otg_inst = + dc_target_get_status(acrtc->target)->primary_otg_inst; + } + } + + dc_target_release(current_target); + + dm_dc_surface_commit(dc, &disconnected_acrtc->base); + + manage_dm_interrupts(adev, disconnected_acrtc, true); + dm_crtc_cursor_reset(&disconnected_acrtc->base); + + } +} + +static uint32_t add_val_sets_surface( + struct dc_validation_set *val_sets, + uint32_t set_count, + const struct dc_target *target, + const struct dc_surface *surface) +{ + uint32_t i = 0; + + while (i < set_count) { + if (val_sets[i].target == target) + break; + ++i; + } + + val_sets[i].surfaces[val_sets[i].surface_count] = surface; + val_sets[i].surface_count++; + + return val_sets[i].surface_count; +} + +static uint32_t update_in_val_sets_target( + struct dc_validation_set *val_sets, + struct drm_crtc **crtcs, + uint32_t set_count, + const struct dc_target *old_target, + const struct dc_target *new_target, + struct drm_crtc *crtc) +{ + uint32_t i = 0; + + while (i < set_count) { + if (val_sets[i].target == old_target) + break; + ++i; + } + + val_sets[i].target = new_target; + crtcs[i] = crtc; + + if (i == set_count) { + /* nothing found. add new one to the end */ + return set_count + 1; + } + + return set_count; +} + +static uint32_t remove_from_val_sets( + struct dc_validation_set *val_sets, + uint32_t set_count, + const struct dc_target *target) +{ + int i; + + for (i = 0; i < set_count; i++) + if (val_sets[i].target == target) + break; + + if (i == set_count) { + /* nothing found */ + return set_count; + } + + set_count--; + + for (; i < set_count; i++) { + val_sets[i] = val_sets[i + 1]; + } + + return set_count; +} + +int amdgpu_dm_atomic_check(struct drm_device *dev, + struct drm_atomic_state *state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + struct drm_plane *plane; + struct drm_plane_state *plane_state; + int i, j; + int ret; + int set_count; + int new_target_count; + struct dc_validation_set set[MAX_TARGETS] = {{ 0 }}; + struct dc_target *new_targets[MAX_TARGETS] = { 0 }; + struct drm_crtc *crtc_set[MAX_TARGETS] = { 0 }; + struct amdgpu_device *adev = dev->dev_private; + struct dc *dc = adev->dm.dc; + bool need_to_validate = false; + + ret = drm_atomic_helper_check(dev, state); + + if (ret) { + DRM_ERROR("Atomic state validation failed with error :%d !\n", + ret); + return ret; + } + + ret = -EINVAL; + + /* copy existing configuration */ + new_target_count = 0; + set_count = 0; + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + if (acrtc->target) { + set[set_count].target = acrtc->target; + crtc_set[set_count] = crtc; + ++set_count; + } + } + + /* update changed items */ + for_each_crtc_in_state(state, crtc, crtc_state, i) { + struct amdgpu_crtc *acrtc = NULL; + struct amdgpu_connector *aconnector = NULL; + enum dm_commit_action action; + + acrtc = to_amdgpu_crtc(crtc); + + aconnector = amdgpu_dm_find_first_crct_matching_connector(state, crtc, true); + + action = get_dm_commit_action(crtc_state); + + switch (action) { + case DM_COMMIT_ACTION_DPMS_ON: + case DM_COMMIT_ACTION_SET: { + struct dc_target *new_target = NULL; + struct drm_connector_state *conn_state = NULL; + struct dm_connector_state *dm_state = NULL; + + if (aconnector) { + conn_state = drm_atomic_get_connector_state(state, &aconnector->base); + if (IS_ERR(conn_state)) + return ret; + dm_state = to_dm_connector_state(conn_state); + } + + new_target = create_target_for_sink(aconnector, &crtc_state->mode, dm_state); + + /* + * we can have no target on ACTION_SET if a display + * was disconnected during S3, in this case it not and + * error, the OS will be updated after detection, and + * do the right thing on next atomic commit + */ + if (!new_target) { + DRM_DEBUG_KMS("%s: Failed to create new target for crtc %d\n", + __func__, acrtc->base.base.id); + break; + } + + new_targets[new_target_count] = new_target; + set_count = update_in_val_sets_target( + set, + crtc_set, + set_count, + acrtc->target, + new_target, + crtc); + + new_target_count++; + need_to_validate = true; + break; + } + + case DM_COMMIT_ACTION_NOTHING: { + const struct drm_connector *drm_connector = NULL; + struct drm_connector_state *conn_state = NULL; + struct dm_connector_state *dm_state = NULL; + struct dm_connector_state *old_dm_state = NULL; + struct dc_target *new_target; + + if (!aconnector) + break; + + for_each_connector_in_state( + state, drm_connector, conn_state, j) { + if (&aconnector->base == drm_connector) + break; + } + + old_dm_state = to_dm_connector_state(drm_connector->state); + dm_state = to_dm_connector_state(conn_state); + + /* Support underscan adjustment*/ + if (!is_scaling_state_different(dm_state, old_dm_state)) + break; + + new_target = create_target_for_sink(aconnector, &crtc_state->mode, dm_state); + + if (!new_target) { + DRM_ERROR("%s: Failed to create new target for crtc %d\n", + __func__, acrtc->base.base.id); + break; + } + + new_targets[new_target_count] = new_target; + set_count = update_in_val_sets_target( + set, + crtc_set, + set_count, + acrtc->target, + new_target, + crtc); + + new_target_count++; + need_to_validate = true; + + break; + } + case DM_COMMIT_ACTION_DPMS_OFF: + case DM_COMMIT_ACTION_RESET: + /* i.e. reset mode */ + if (acrtc->target) { + set_count = remove_from_val_sets( + set, + set_count, + acrtc->target); + } + break; + } + + /* + * TODO revisit when removing commit action + * and looking at atomic flags directly + */ + + /* commit needs planes right now (for gamma, eg.) */ + /* TODO rework commit to chack crtc for gamma change */ + ret = drm_atomic_add_affected_planes(state, crtc); + if (ret) + return ret; + } + + for (i = 0; i < set_count; i++) { + for_each_plane_in_state(state, plane, plane_state, j) { + struct drm_plane_state *old_plane_state = plane->state; + struct drm_crtc *crtc = plane_state->crtc; + struct drm_framebuffer *fb = plane_state->fb; + struct drm_connector *connector; + struct dm_connector_state *dm_state = NULL; + enum dm_commit_action action; + struct drm_crtc_state *crtc_state; + + + if (!fb || !crtc || crtc_set[i] != crtc || + !crtc->state->planes_changed || !crtc->state->active) + continue; + + action = get_dm_commit_action(crtc->state); + + /* Surfaces are created under two scenarios: + * 1. This commit is not a page flip. + * 2. This commit is a page flip, and targets are created. + */ + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (!page_flip_needed(plane_state, old_plane_state, + crtc_state->event, true) || + action == DM_COMMIT_ACTION_DPMS_ON || + action == DM_COMMIT_ACTION_SET) { + struct dc_surface *surface; + + list_for_each_entry(connector, + &dev->mode_config.connector_list, head) { + if (connector->state->crtc == crtc) { + dm_state = to_dm_connector_state( + connector->state); + break; + } + } + + /* + * This situation happens in the following case: + * we are about to get set mode for connector who's only + * possible crtc (in encoder crtc mask) is used by + * another connector, that is why it will try to + * re-assing crtcs in order to make configuration + * supported. For our implementation we need to make all + * encoders support all crtcs, then this issue will + * never arise again. But to guard code from this issue + * check is left. + * + * Also it should be needed when used with actual + * drm_atomic_commit ioctl in future + */ + if (!dm_state) + continue; + + surface = dc_create_surface(dc); + fill_plane_attributes( + surface, + plane_state, + false); + + add_val_sets_surface( + set, + set_count, + set[i].target, + surface); + + need_to_validate = true; + } + } + } + + if (need_to_validate == false || set_count == 0 || + dc_validate_resources(dc, set, set_count)) + ret = 0; + + for (i = 0; i < set_count; i++) { + for (j = 0; j < set[i].surface_count; j++) { + dc_surface_release(set[i].surfaces[j]); + } + } + for (i = 0; i < new_target_count; i++) + dc_target_release(new_targets[i]); + + if (ret != 0) + DRM_ERROR("Atomic check failed.\n"); + + return ret; +} + +static bool is_dp_capable_without_timing_msa( + struct dc *dc, + struct amdgpu_connector *amdgpu_connector) +{ + uint8_t dpcd_data; + bool capable = false; + if (amdgpu_connector->dc_link && + dc_read_dpcd(dc, amdgpu_connector->dc_link->link_index, + DP_DOWN_STREAM_PORT_COUNT, + &dpcd_data, sizeof(dpcd_data)) ) + capable = dpcd_data & DP_MSA_TIMING_PAR_IGNORED? true:false; + + return capable; +} +void amdgpu_dm_add_sink_to_freesync_module( + struct drm_connector *connector, + struct edid *edid) +{ + int i; + uint64_t val_capable; + bool edid_check_required; + struct detailed_timing *timing; + struct detailed_non_pixel *data; + struct detailed_data_monitor_range *range; + struct amdgpu_connector *amdgpu_connector = + to_amdgpu_connector(connector); + + struct drm_device *dev = connector->dev; + struct amdgpu_device *adev = dev->dev_private; + edid_check_required = false; + if (!amdgpu_connector->dc_sink) { + DRM_ERROR("dc_sink NULL, could not add free_sync module.\n"); + return; + } + if (!adev->dm.freesync_module) + return; + /* + * if edid non zero restrict freesync only for dp and edp + */ + if (edid) { + if (amdgpu_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT + || amdgpu_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) { + edid_check_required = is_dp_capable_without_timing_msa( + adev->dm.dc, + amdgpu_connector); + } + } + val_capable = 0; + if (edid_check_required == true && (edid->version > 1 || + (edid->version == 1 && edid->revision > 1))) { + for (i = 0; i < 4; i++) { + + timing = &edid->detailed_timings[i]; + data = &timing->data.other_data; + range = &data->data.range; + /* + * Check if monitor has continuous frequency mode + */ + if (data->type != EDID_DETAIL_MONITOR_RANGE) + continue; + /* + * Check for flag range limits only. If flag == 1 then + * no additional timing information provided. + * Default GTF, GTF Secondary curve and CVT are not + * supported + */ + if (range->flags != 1) + continue; + + amdgpu_connector->min_vfreq = range->min_vfreq; + amdgpu_connector->max_vfreq = range->max_vfreq; + amdgpu_connector->pixel_clock_mhz = + range->pixel_clock_mhz * 10; + break; + } + + if (amdgpu_connector->max_vfreq - + amdgpu_connector->min_vfreq > 10) { + amdgpu_connector->caps.supported = true; + amdgpu_connector->caps.min_refresh_in_micro_hz = + amdgpu_connector->min_vfreq * 1000000; + amdgpu_connector->caps.max_refresh_in_micro_hz = + amdgpu_connector->max_vfreq * 1000000; + val_capable = 1; + } + } + + /* + * TODO figure out how to notify user-mode or DRM of freesync caps + * once we figure out how to deal with freesync in an upstreamable + * fashion + */ + +} + +void amdgpu_dm_remove_sink_from_freesync_module( + struct drm_connector *connector) +{ + /* + * TODO fill in once we figure out how to deal with freesync in + * an upstreamable fashion + */ +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.h new file mode 100644 index 000000000000..4f7bd3bae44e --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.h @@ -0,0 +1,101 @@ +/* + * Copyright 2012-13 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __AMDGPU_DM_TYPES_H__ +#define __AMDGPU_DM_TYPES_H__ + +#include <drm/drmP.h> + +struct amdgpu_framebuffer; +struct amdgpu_display_manager; +struct dc_validation_set; +struct dc_surface; + +/*TODO Jodan Hersen use the one in amdgpu_dm*/ +int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, + struct amdgpu_crtc *amdgpu_crtc, + uint32_t link_index); +int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, + struct amdgpu_connector *amdgpu_connector, + uint32_t link_index, + struct amdgpu_encoder *amdgpu_encoder); +int amdgpu_dm_encoder_init( + struct drm_device *dev, + struct amdgpu_encoder *aencoder, + uint32_t link_index); + +void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc); +void amdgpu_dm_connector_destroy(struct drm_connector *connector); +void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder); + +int amdgpu_dm_connector_get_modes(struct drm_connector *connector); + +int amdgpu_dm_atomic_commit( + struct drm_device *dev, + struct drm_atomic_state *state, + bool async); +int amdgpu_dm_atomic_check(struct drm_device *dev, + struct drm_atomic_state *state); + +int dm_create_validation_set_for_target( + struct drm_connector *connector, + struct drm_display_mode *mode, + struct dc_validation_set *val_set); + +void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector); +struct drm_connector_state *amdgpu_dm_connector_atomic_duplicate_state( + struct drm_connector *connector); + +int amdgpu_dm_connector_atomic_set_property( + struct drm_connector *connector, + struct drm_connector_state *state, + struct drm_property *property, + uint64_t val); + +int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev); + +void amdgpu_dm_connector_init_helper( + struct amdgpu_display_manager *dm, + struct amdgpu_connector *aconnector, + int connector_type, + const struct dc_link *link, + int link_index); + +int amdgpu_dm_connector_mode_valid( + struct drm_connector *connector, + struct drm_display_mode *mode); + +void dm_restore_drm_connector_state(struct drm_device *dev, struct drm_connector *connector); + +void amdgpu_dm_add_sink_to_freesync_module( + struct drm_connector *connector, + struct edid *edid); + +void amdgpu_dm_remove_sink_from_freesync_module( + struct drm_connector *connector); + +extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs; + +#endif /* __AMDGPU_DM_TYPES_H__ */ |