aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/display/amdgpu_dm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm')
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c1328
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h40
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c90
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c131
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c40
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c256
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c171
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c30
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c12
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h2
12 files changed, 1637 insertions, 467 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index e9ac20bed0f2..7e7929f24ae4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -77,9 +77,11 @@
#include <linux/types.h>
#include <linux/pm_runtime.h>
#include <linux/pci.h>
+#include <linux/power_supply.h>
#include <linux/firmware.h>
#include <linux/component.h>
#include <linux/dmi.h>
+#include <linux/sort.h>
#include <drm/display/drm_dp_mst_helper.h>
#include <drm/display/drm_hdmi_helper.h>
@@ -151,6 +153,9 @@ MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB);
#define FIRMWARE_DCN_351_DMUB "amdgpu/dcn_3_5_1_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_DCN_351_DMUB);
+#define FIRMWARE_DCN_401_DMUB "amdgpu/dcn_4_0_1_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_401_DMUB);
+
/* Number of bytes in PSP header for firmware. */
#define PSP_HEADER_BYTES 0x100
@@ -363,13 +368,18 @@ static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
return false;
}
-static inline void reverse_planes_order(struct dc_surface_update *array_of_surface_update,
- int planes_count)
+/*
+ * DC will program planes with their z-order determined by their ordering
+ * in the dc_surface_updates array. This comparator is used to sort them
+ * by descending zpos.
+ */
+static int dm_plane_layer_index_cmp(const void *a, const void *b)
{
- int i, j;
+ const struct dc_surface_update *sa = (struct dc_surface_update *)a;
+ const struct dc_surface_update *sb = (struct dc_surface_update *)b;
- for (i = 0, j = planes_count - 1; i < j; i++, j--)
- swap(array_of_surface_update[i], array_of_surface_update[j]);
+ /* Sort by descending dc_plane layer_index (i.e. normalized_zpos) */
+ return sb->surface->layer_index - sa->surface->layer_index;
}
/**
@@ -396,7 +406,8 @@ static inline bool update_planes_and_stream_adapter(struct dc *dc,
struct dc_stream_update *stream_update,
struct dc_surface_update *array_of_surface_update)
{
- reverse_planes_order(array_of_surface_update, planes_count);
+ sort(array_of_surface_update, planes_count,
+ sizeof(*array_of_surface_update), dm_plane_layer_index_cmp, NULL);
/*
* Previous frame finished and HW is ready for optimization.
@@ -531,7 +542,7 @@ static void dm_vupdate_high_irq(void *interrupt_params)
if (acrtc) {
vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
drm_dev = acrtc->base.dev;
- vblank = &drm_dev->vblank[acrtc->base.index];
+ vblank = drm_crtc_vblank_crtc(&acrtc->base);
previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
frame_duration_ns = vblank->time - previous_timestamp;
@@ -594,12 +605,14 @@ static void dm_crtc_high_irq(void *interrupt_params)
if (!acrtc)
return;
- if (acrtc->wb_pending) {
- if (acrtc->wb_conn) {
- spin_lock_irqsave(&acrtc->wb_conn->job_lock, flags);
+ if (acrtc->wb_conn) {
+ spin_lock_irqsave(&acrtc->wb_conn->job_lock, flags);
+
+ if (acrtc->wb_pending) {
job = list_first_entry_or_null(&acrtc->wb_conn->job_queue,
struct drm_writeback_job,
list_entry);
+ acrtc->wb_pending = false;
spin_unlock_irqrestore(&acrtc->wb_conn->job_lock, flags);
if (job) {
@@ -617,8 +630,7 @@ static void dm_crtc_high_irq(void *interrupt_params)
acrtc->dm_irq_params.stream, 0);
}
} else
- DRM_ERROR("%s: no amdgpu_crtc wb_conn\n", __func__);
- acrtc->wb_pending = false;
+ spin_unlock_irqrestore(&acrtc->wb_conn->job_lock, flags);
}
vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
@@ -770,9 +782,9 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
aconnector = to_amdgpu_dm_connector(connector);
if (link && aconnector->dc_link == link) {
if (notify->type == DMUB_NOTIFICATION_HPD)
- DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
- else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
DRM_INFO("DMUB HPD IRQ callback: link_index=%u\n", link_index);
+ else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
+ DRM_INFO("DMUB HPD RX IRQ callback: link_index=%u\n", link_index);
else
DRM_WARN("DMUB Unknown HPD callback type %d, link_index=%u\n",
notify->type, link_index);
@@ -784,10 +796,13 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
drm_connector_list_iter_end(&iter);
if (hpd_aconnector) {
- if (notify->type == DMUB_NOTIFICATION_HPD)
+ if (notify->type == DMUB_NOTIFICATION_HPD) {
+ if (hpd_aconnector->dc_link->hpd_status == (notify->hpd_status == DP_HPD_PLUG))
+ DRM_WARN("DMUB reported hpd status unchanged. link_index=%u\n", link_index);
handle_hpd_irq_helper(hpd_aconnector);
- else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
+ } else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) {
handle_hpd_rx_irq(hpd_aconnector);
+ }
}
}
@@ -855,7 +870,31 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
struct dmcub_trace_buf_entry entry = { 0 };
u32 count = 0;
struct dmub_hpd_work *dmub_hpd_wrk;
- struct dc_link *plink = NULL;
+ static const char *const event_type[] = {
+ "NO_DATA",
+ "AUX_REPLY",
+ "HPD",
+ "HPD_IRQ",
+ "SET_CONFIGC_REPLY",
+ "DPIA_NOTIFICATION",
+ };
+
+ do {
+ if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
+ trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
+ entry.param0, entry.param1);
+
+ DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
+ entry.trace_code, entry.tick_count, entry.param0, entry.param1);
+ } else
+ break;
+
+ count++;
+
+ } while (count <= DMUB_TRACE_MAX_READ);
+
+ if (count > DMUB_TRACE_MAX_READ)
+ DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
if (dc_enable_dmub_notifications(adev->dm.dc) &&
irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
@@ -867,7 +906,8 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
continue;
}
if (!dm->dmub_callback[notify.type]) {
- DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
+ DRM_WARN("DMUB notification skipped due to no handler: type=%s\n",
+ event_type[notify.type]);
continue;
}
if (dm->dmub_thread_offload[notify.type] == true) {
@@ -885,37 +925,12 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
}
INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
dmub_hpd_wrk->adev = adev;
- if (notify.type == DMUB_NOTIFICATION_HPD) {
- plink = adev->dm.dc->links[notify.link_index];
- if (plink) {
- plink->hpd_status =
- notify.hpd_status == DP_HPD_PLUG;
- }
- }
queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
} else {
dm->dmub_callback[notify.type](adev, &notify);
}
} while (notify.pending_notification);
}
-
-
- do {
- if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
- trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
- entry.param0, entry.param1);
-
- DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
- entry.trace_code, entry.tick_count, entry.param0, entry.param1);
- } else
- break;
-
- count++;
-
- } while (count <= DMUB_TRACE_MAX_READ);
-
- if (count > DMUB_TRACE_MAX_READ)
- DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
}
static int dm_set_clockgating_state(void *handle,
@@ -953,8 +968,8 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector)
list_for_each_entry(mode, &connector->modes, head) {
- if (max_size < mode->htotal * mode->vtotal)
- max_size = mode->htotal * mode->vtotal;
+ if (max_size < (unsigned long) mode->htotal * mode->vtotal)
+ max_size = (unsigned long) mode->htotal * mode->vtotal;
}
if (max_size) {
@@ -1203,6 +1218,9 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
+ memset(fb_info->fb[DMUB_WINDOW_SHARED_STATE].cpu_addr, 0,
+ fb_info->fb[DMUB_WINDOW_SHARED_STATE].size);
+
/* Initialize hardware. */
memset(&hw_params, 0, sizeof(hw_params));
hw_params.fb_base = adev->gmc.fb_start;
@@ -1223,6 +1241,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
case IP_VERSION(3, 1, 4):
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(4, 0, 1):
hw_params.dpia_supported = true;
hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
break;
@@ -1274,6 +1293,7 @@ static void dm_dmub_hw_resume(struct amdgpu_device *adev)
struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
enum dmub_status status;
bool init;
+ int r;
if (!dmub_srv) {
/* DMUB isn't supported on the ASIC. */
@@ -1291,7 +1311,9 @@ static void dm_dmub_hw_resume(struct amdgpu_device *adev)
DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
} else {
/* Perform the full hardware initialization. */
- dm_dmub_hw_init(adev);
+ r = dm_dmub_hw_init(adev);
+ if (r)
+ DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
}
}
@@ -1619,6 +1641,117 @@ static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
}
}
+void*
+dm_allocate_gpu_mem(
+ struct amdgpu_device *adev,
+ enum dc_gpu_mem_alloc_type type,
+ size_t size,
+ long long *addr)
+{
+ struct dal_allocation *da;
+ u32 domain = (type == DC_MEM_ALLOC_TYPE_GART) ?
+ AMDGPU_GEM_DOMAIN_GTT : AMDGPU_GEM_DOMAIN_VRAM;
+ int ret;
+
+ da = kzalloc(sizeof(struct dal_allocation), GFP_KERNEL);
+ if (!da)
+ return NULL;
+
+ ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
+ domain, &da->bo,
+ &da->gpu_addr, &da->cpu_ptr);
+
+ *addr = da->gpu_addr;
+
+ if (ret) {
+ kfree(da);
+ return NULL;
+ }
+
+ /* add da to list in dm */
+ list_add(&da->list, &adev->dm.da_list);
+
+ return da->cpu_ptr;
+}
+
+static enum dmub_status
+dm_dmub_send_vbios_gpint_command(struct amdgpu_device *adev,
+ enum dmub_gpint_command command_code,
+ uint16_t param,
+ uint32_t timeout_us)
+{
+ union dmub_gpint_data_register reg, test;
+ uint32_t i;
+
+ /* Assume that VBIOS DMUB is ready to take commands */
+
+ reg.bits.status = 1;
+ reg.bits.command_code = command_code;
+ reg.bits.param = param;
+
+ cgs_write_register(adev->dm.cgs_device, 0x34c0 + 0x01f8, reg.all);
+
+ for (i = 0; i < timeout_us; ++i) {
+ udelay(1);
+
+ /* Check if our GPINT got acked */
+ reg.bits.status = 0;
+ test = (union dmub_gpint_data_register)
+ cgs_read_register(adev->dm.cgs_device, 0x34c0 + 0x01f8);
+
+ if (test.all == reg.all)
+ return DMUB_STATUS_OK;
+ }
+
+ return DMUB_STATUS_TIMEOUT;
+}
+
+static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *adev)
+{
+ struct dml2_soc_bb *bb;
+ long long addr;
+ int i = 0;
+ uint16_t chunk;
+ enum dmub_gpint_command send_addrs[] = {
+ DMUB_GPINT__SET_BB_ADDR_WORD0,
+ DMUB_GPINT__SET_BB_ADDR_WORD1,
+ DMUB_GPINT__SET_BB_ADDR_WORD2,
+ DMUB_GPINT__SET_BB_ADDR_WORD3,
+ };
+ enum dmub_status ret;
+
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(4, 0, 1):
+ break;
+ default:
+ return NULL;
+ }
+
+ bb = dm_allocate_gpu_mem(adev,
+ DC_MEM_ALLOC_TYPE_GART,
+ sizeof(struct dml2_soc_bb),
+ &addr);
+ if (!bb)
+ return NULL;
+
+ for (i = 0; i < 4; i++) {
+ /* Extract 16-bit chunk */
+ chunk = ((uint64_t) addr >> (i * 16)) & 0xFFFF;
+ /* Send the chunk */
+ ret = dm_dmub_send_vbios_gpint_command(adev, send_addrs[i], chunk, 30000);
+ if (ret != DMUB_STATUS_OK)
+ /* No need to free bb here since it shall be done unconditionally <elsewhere> */
+ return NULL;
+ }
+
+ /* Now ask DMUB to copy the bb */
+ ret = dm_dmub_send_vbios_gpint_command(adev, DMUB_GPINT__BB_COPY, 1, 200000);
+ if (ret != DMUB_STATUS_OK)
+ return NULL;
+
+ return bb;
+}
+
static int amdgpu_dm_init(struct amdgpu_device *adev)
{
struct dc_init_data init_data;
@@ -1654,13 +1787,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_data.driver = adev;
- adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
-
- if (!adev->dm.cgs_device) {
- DRM_ERROR("amdgpu: failed to create cgs device.\n");
- goto error;
- }
-
+ /* cgs_device was created in dm_sw_init() */
init_data.cgs_device = adev->dm.cgs_device;
init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
@@ -1736,7 +1863,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_DISABLE_IPS)
init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL;
else
- init_data.flags.disable_ips = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
+ init_data.flags.disable_ips = DMUB_IPS_ENABLE;
init_data.flags.disable_ips_in_vpb = 0;
@@ -1744,10 +1871,13 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
init_data.num_virtual_links = 1;
- INIT_LIST_HEAD(&adev->dm.da_list);
-
retrieve_dmi_info(&adev->dm);
+ if (adev->dm.bb_from_dmub)
+ init_data.bb_from_dmub = adev->dm.bb_from_dmub;
+ else
+ init_data.bb_from_dmub = NULL;
+
/* Display Core create. */
adev->dm.dc = dc_create(&init_data);
@@ -1781,8 +1911,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
adev->dm.dc->debug.force_subvp_mclk_switch = true;
- if (amdgpu_dc_debug_mask & DC_ENABLE_DML2)
+ if (amdgpu_dc_debug_mask & DC_ENABLE_DML2) {
adev->dm.dc->debug.using_dml2 = true;
+ adev->dm.dc->debug.using_dml21 = true;
+ }
adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
@@ -1832,6 +1964,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
}
+ if (adev->dm.dc->caps.ips_support && adev->dm.dc->config.disable_ips == DMUB_IPS_ENABLE)
+ adev->dm.idle_workqueue = idle_create_workqueue(adev);
+
if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
@@ -1929,6 +2064,16 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
adev->dm.vblank_control_workqueue = NULL;
}
+ if (adev->dm.idle_workqueue) {
+ if (adev->dm.idle_workqueue->running) {
+ adev->dm.idle_workqueue->enable = false;
+ flush_work(&adev->dm.idle_workqueue->work);
+ }
+
+ kfree(adev->dm.idle_workqueue);
+ adev->dm.idle_workqueue = NULL;
+ }
+
amdgpu_dm_destroy_drm_device(&adev->dm);
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
@@ -2059,6 +2204,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
case IP_VERSION(3, 2, 1):
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(4, 0, 1):
return 0;
default:
break;
@@ -2182,6 +2328,10 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
case IP_VERSION(3, 5, 1):
dmub_asic = DMUB_ASIC_DCN35;
break;
+ case IP_VERSION(4, 0, 1):
+ dmub_asic = DMUB_ASIC_DCN401;
+ break;
+
default:
/* ASIC doesn't support DMUB. */
return 0;
@@ -2285,6 +2435,8 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
return -EINVAL;
}
+ adev->dm.bb_from_dmub = dm_dmub_get_vbios_bounding_box(adev);
+
return 0;
}
@@ -2293,6 +2445,16 @@ static int dm_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
+ adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
+
+ if (!adev->dm.cgs_device) {
+ DRM_ERROR("amdgpu: failed to create cgs device.\n");
+ return -EINVAL;
+ }
+
+ /* Moved from dm init since we need to use allocations for storing bounding box data */
+ INIT_LIST_HEAD(&adev->dm.da_list);
+
r = dm_dmub_sw_init(adev);
if (r)
return r;
@@ -2304,6 +2466,9 @@ static int dm_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ kfree(adev->dm.bb_from_dmub);
+ adev->dm.bb_from_dmub = NULL;
+
kfree(adev->dm.dmub_fb_info);
adev->dm.dmub_fb_info = NULL;
@@ -2335,13 +2500,13 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev)
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->dc_link->type == dc_connection_mst_branch &&
aconnector->mst_mgr.aux) {
- DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
+ drm_dbg_kms(dev, "DM_MST: starting TM on aconnector: %p [id: %d]\n",
aconnector,
aconnector->base.base.id);
ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
if (ret < 0) {
- DRM_ERROR("DM_MST: Failed to start MST\n");
+ drm_err(dev, "DM_MST: Failed to start MST\n");
aconnector->dc_link->type =
dc_connection_single;
ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
@@ -2567,8 +2732,12 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
static int dm_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
/* Create DAL display manager */
- amdgpu_dm_init(adev);
+ r = amdgpu_dm_init(adev);
+ if (r)
+ return r;
amdgpu_dm_hpd_init(adev);
return 0;
@@ -2701,7 +2870,8 @@ static int dm_suspend(void *handle)
dm->cached_dc_state = dc_state_create_copy(dm->dc->current_state);
- dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
+ if (dm->cached_dc_state)
+ dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
amdgpu_dm_commit_zero_streams(dm->dc);
@@ -2943,6 +3113,7 @@ static int dm_resume(void *handle)
commit_params.streams = dc_state->streams;
commit_params.stream_count = dc_state->stream_count;
+ dc_exit_ips_for_hw_access(dm->dc);
WARN_ON(!dc_commit_streams(dm->dc, &commit_params));
dm_gpureset_commit_state(dm->cached_dc_state, dm);
@@ -3004,7 +3175,7 @@ static int dm_resume(void *handle)
* this is the case when traversing through already created end sink
* MST connectors, should be skipped
*/
- if (aconnector && aconnector->mst_root)
+ if (aconnector->mst_root)
continue;
mutex_lock(&aconnector->hpd_lock);
@@ -3015,7 +3186,8 @@ static int dm_resume(void *handle)
emulated_link_detect(aconnector->dc_link);
} else {
mutex_lock(&dm->dc_lock);
- dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+ dc_exit_ips_for_hw_access(dm->dc);
+ dc_link_detect(aconnector->dc_link, DETECT_REASON_RESUMEFROMS3S4);
mutex_unlock(&dm->dc_lock);
}
@@ -3267,15 +3439,15 @@ void amdgpu_dm_update_connector_after_detect(
* We got a DP short pulse (Link Loss, DP CTS, etc...).
* Do nothing!!
*/
- DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
- aconnector->connector_id);
+ drm_dbg_kms(dev, "DCHPD: connector_id=%d: dc_sink didn't change.\n",
+ aconnector->connector_id);
if (sink)
dc_sink_release(sink);
return;
}
- DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
- aconnector->connector_id, aconnector->dc_sink, sink);
+ drm_dbg_kms(dev, "DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
+ aconnector->connector_id, aconnector->dc_sink, sink);
mutex_lock(&dev->mode_config.mutex);
@@ -3351,6 +3523,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
enum dc_connection_type new_connection_type = dc_connection_none;
struct amdgpu_device *adev = drm_to_adev(dev);
struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
+ struct dc *dc = aconnector->dc_link->ctx->dc;
bool ret = false;
if (adev->dm.disable_hpd_irq)
@@ -3385,6 +3558,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
drm_kms_helper_connector_hotplug_event(connector);
} else {
mutex_lock(&adev->dm.dc_lock);
+ dc_exit_ips_for_hw_access(dc);
ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
mutex_unlock(&adev->dm.dc_lock);
if (ret) {
@@ -3444,6 +3618,7 @@ static void handle_hpd_rx_irq(void *param)
bool has_left_work = false;
int idx = dc_link->link_index;
struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
+ struct dc *dc = aconnector->dc_link->ctx->dc;
memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
@@ -3533,6 +3708,7 @@ out:
bool ret = false;
mutex_lock(&adev->dm.dc_lock);
+ dc_exit_ips_for_hw_access(dc);
ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX);
mutex_unlock(&adev->dm.dc_lock);
@@ -3561,7 +3737,7 @@ out:
mutex_unlock(&aconnector->hpd_lock);
}
-static void register_hpd_handlers(struct amdgpu_device *adev)
+static int register_hpd_handlers(struct amdgpu_device *adev)
{
struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
@@ -3573,11 +3749,17 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
- if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true))
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD,
+ dmub_hpd_callback, true)) {
DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+ return -EINVAL;
+ }
- if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true))
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ,
+ dmub_hpd_callback, true)) {
DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+ return -EINVAL;
+ }
}
list_for_each_entry(connector,
@@ -3593,9 +3775,16 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
int_params.irq_source = dc_link->irq_source_hpd;
- amdgpu_dm_irq_register_interrupt(adev, &int_params,
- handle_hpd_irq,
- (void *) aconnector);
+ if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
+ int_params.irq_source < DC_IRQ_SOURCE_HPD1 ||
+ int_params.irq_source > DC_IRQ_SOURCE_HPD6) {
+ DRM_ERROR("Failed to register hpd irq!\n");
+ return -EINVAL;
+ }
+
+ if (!amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ handle_hpd_irq, (void *) aconnector))
+ return -ENOMEM;
}
if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
@@ -3604,11 +3793,19 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
int_params.irq_source = dc_link->irq_source_hpd_rx;
- amdgpu_dm_irq_register_interrupt(adev, &int_params,
- handle_hpd_rx_irq,
- (void *) aconnector);
+ if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
+ int_params.irq_source < DC_IRQ_SOURCE_HPD1RX ||
+ int_params.irq_source > DC_IRQ_SOURCE_HPD6RX) {
+ DRM_ERROR("Failed to register hpd rx irq!\n");
+ return -EINVAL;
+ }
+
+ if (!amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ handle_hpd_rx_irq, (void *) aconnector))
+ return -ENOMEM;
}
}
+ return 0;
}
#if defined(CONFIG_DRM_AMD_DC_SI)
@@ -3649,13 +3846,21 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
int_params.irq_source =
dc_interrupt_to_irq_source(dc, i + 1, 0);
+ if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
+ int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 ||
+ int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) {
+ DRM_ERROR("Failed to register vblank irq!\n");
+ return -EINVAL;
+ }
+
c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
c_irq_params->adev = adev;
c_irq_params->irq_src = int_params.irq_source;
- amdgpu_dm_irq_register_interrupt(adev, &int_params,
- dm_crtc_high_irq, c_irq_params);
+ if (!amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_crtc_high_irq, c_irq_params))
+ return -ENOMEM;
}
/* Use GRPH_PFLIP interrupt */
@@ -3671,14 +3876,21 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
int_params.irq_source =
dc_interrupt_to_irq_source(dc, i, 0);
+ if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
+ int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST ||
+ int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) {
+ DRM_ERROR("Failed to register pflip irq!\n");
+ return -EINVAL;
+ }
+
c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
c_irq_params->adev = adev;
c_irq_params->irq_src = int_params.irq_source;
- amdgpu_dm_irq_register_interrupt(adev, &int_params,
- dm_pflip_high_irq, c_irq_params);
-
+ if (!amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_pflip_high_irq, c_irq_params))
+ return -ENOMEM;
}
/* HPD */
@@ -3689,9 +3901,9 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
return r;
}
- register_hpd_handlers(adev);
+ r = register_hpd_handlers(adev);
- return 0;
+ return r;
}
#endif
@@ -3735,13 +3947,21 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
int_params.irq_source =
dc_interrupt_to_irq_source(dc, i, 0);
+ if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
+ int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 ||
+ int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) {
+ DRM_ERROR("Failed to register vblank irq!\n");
+ return -EINVAL;
+ }
+
c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
c_irq_params->adev = adev;
c_irq_params->irq_src = int_params.irq_source;
- amdgpu_dm_irq_register_interrupt(adev, &int_params,
- dm_crtc_high_irq, c_irq_params);
+ if (!amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_crtc_high_irq, c_irq_params))
+ return -ENOMEM;
}
/* Use VUPDATE interrupt */
@@ -3756,13 +3976,21 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
int_params.irq_source =
dc_interrupt_to_irq_source(dc, i, 0);
+ if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
+ int_params.irq_source < DC_IRQ_SOURCE_VUPDATE1 ||
+ int_params.irq_source > DC_IRQ_SOURCE_VUPDATE6) {
+ DRM_ERROR("Failed to register vupdate irq!\n");
+ return -EINVAL;
+ }
+
c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
c_irq_params->adev = adev;
c_irq_params->irq_src = int_params.irq_source;
- amdgpu_dm_irq_register_interrupt(adev, &int_params,
- dm_vupdate_high_irq, c_irq_params);
+ if (!amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_vupdate_high_irq, c_irq_params))
+ return -ENOMEM;
}
/* Use GRPH_PFLIP interrupt */
@@ -3778,14 +4006,21 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
int_params.irq_source =
dc_interrupt_to_irq_source(dc, i, 0);
+ if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
+ int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST ||
+ int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) {
+ DRM_ERROR("Failed to register pflip irq!\n");
+ return -EINVAL;
+ }
+
c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
c_irq_params->adev = adev;
c_irq_params->irq_src = int_params.irq_source;
- amdgpu_dm_irq_register_interrupt(adev, &int_params,
- dm_pflip_high_irq, c_irq_params);
-
+ if (!amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_pflip_high_irq, c_irq_params))
+ return -ENOMEM;
}
/* HPD */
@@ -3796,9 +4031,9 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
return r;
}
- register_hpd_handlers(adev);
+ r = register_hpd_handlers(adev);
- return 0;
+ return r;
}
/* Register IRQ sources and initialize IRQ callbacks */
@@ -3850,13 +4085,21 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
int_params.irq_source =
dc_interrupt_to_irq_source(dc, i, 0);
+ if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
+ int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 ||
+ int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) {
+ DRM_ERROR("Failed to register vblank irq!\n");
+ return -EINVAL;
+ }
+
c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
c_irq_params->adev = adev;
c_irq_params->irq_src = int_params.irq_source;
- amdgpu_dm_irq_register_interrupt(
- adev, &int_params, dm_crtc_high_irq, c_irq_params);
+ if (!amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_crtc_high_irq, c_irq_params))
+ return -ENOMEM;
}
/* Use otg vertical line interrupt */
@@ -3874,9 +4117,11 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
int_params.irq_source =
dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
- if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
- DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
- break;
+ if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
+ int_params.irq_source < DC_IRQ_SOURCE_DC1_VLINE0 ||
+ int_params.irq_source > DC_IRQ_SOURCE_DC6_VLINE0) {
+ DRM_ERROR("Failed to register vline0 irq!\n");
+ return -EINVAL;
}
c_irq_params = &adev->dm.vline0_params[int_params.irq_source
@@ -3885,8 +4130,10 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
c_irq_params->adev = adev;
c_irq_params->irq_src = int_params.irq_source;
- amdgpu_dm_irq_register_interrupt(adev, &int_params,
- dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
+ if (!amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_dcn_vertical_interrupt0_high_irq,
+ c_irq_params))
+ return -ENOMEM;
}
#endif
@@ -3909,13 +4156,21 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
int_params.irq_source =
dc_interrupt_to_irq_source(dc, i, 0);
+ if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
+ int_params.irq_source < DC_IRQ_SOURCE_VUPDATE1 ||
+ int_params.irq_source > DC_IRQ_SOURCE_VUPDATE6) {
+ DRM_ERROR("Failed to register vupdate irq!\n");
+ return -EINVAL;
+ }
+
c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
c_irq_params->adev = adev;
c_irq_params->irq_src = int_params.irq_source;
- amdgpu_dm_irq_register_interrupt(adev, &int_params,
- dm_vupdate_high_irq, c_irq_params);
+ if (!amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_vupdate_high_irq, c_irq_params))
+ return -ENOMEM;
}
/* Use GRPH_PFLIP interrupt */
@@ -3932,14 +4187,21 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
int_params.irq_source =
dc_interrupt_to_irq_source(dc, i, 0);
+ if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
+ int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST ||
+ int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) {
+ DRM_ERROR("Failed to register pflip irq!\n");
+ return -EINVAL;
+ }
+
c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
c_irq_params->adev = adev;
c_irq_params->irq_src = int_params.irq_source;
- amdgpu_dm_irq_register_interrupt(adev, &int_params,
- dm_pflip_high_irq, c_irq_params);
-
+ if (!amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_pflip_high_irq, c_irq_params))
+ return -ENOMEM;
}
/* HPD */
@@ -3950,9 +4212,9 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
return r;
}
- register_hpd_handlers(adev);
+ r = register_hpd_handlers(adev);
- return 0;
+ return r;
}
/* Register Outbox IRQ sources and initialize IRQ callbacks */
static int register_outbox_irq_handlers(struct amdgpu_device *adev)
@@ -3983,8 +4245,9 @@ static int register_outbox_irq_handlers(struct amdgpu_device *adev)
c_irq_params->adev = adev;
c_irq_params->irq_src = int_params.irq_source;
- amdgpu_dm_irq_register_interrupt(adev, &int_params,
- dm_dmub_outbox1_low_irq, c_irq_params);
+ if (!amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_dmub_outbox1_low_irq, c_irq_params))
+ return -ENOMEM;
}
return 0;
@@ -4119,8 +4382,11 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
}
#ifdef AMD_PRIVATE_COLOR
- if (amdgpu_dm_create_color_properties(adev))
+ if (amdgpu_dm_create_color_properties(adev)) {
+ dc_state_release(state->context);
+ kfree(state);
return -ENOMEM;
+ }
#endif
r = amdgpu_dm_audio_init(adev);
@@ -4321,6 +4587,7 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
struct drm_device *drm = aconnector->base.dev;
struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm;
struct backlight_properties props = { 0 };
+ struct amdgpu_dm_backlight_caps caps = { 0 };
char bl_name[16];
if (aconnector->bl_idx == -1)
@@ -4333,8 +4600,16 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
return;
}
+ amdgpu_acpi_get_backlight_caps(&caps);
+ if (caps.caps_valid) {
+ if (power_supply_is_system_supplied() > 0)
+ props.brightness = caps.ac_level;
+ else
+ props.brightness = caps.dc_level;
+ } else
+ props.brightness = AMDGPU_MAX_BL_LEVEL;
+
props.max_brightness = AMDGPU_MAX_BL_LEVEL;
- props.brightness = AMDGPU_MAX_BL_LEVEL;
props.type = BACKLIGHT_RAW;
snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
@@ -4456,7 +4731,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
/* There is one primary plane per CRTC */
primary_planes = dm->dc->caps.max_streams;
- ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
+ if (primary_planes > AMDGPU_MAX_PLANES) {
+ DRM_ERROR("DM: Plane nums out of 6 planes\n");
+ return -EINVAL;
+ }
/*
* Initialize primary planes, implicit planes for legacy IOCTLS.
@@ -4523,6 +4801,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case IP_VERSION(2, 1, 0):
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(4, 0, 1):
if (register_outbox_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
goto fail;
@@ -4545,6 +4824,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case IP_VERSION(3, 2, 1):
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(4, 0, 1):
psr_feature_enabled = true;
break;
default:
@@ -4574,17 +4854,17 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
}
}
+ if (link_cnt > MAX_LINKS) {
+ DRM_ERROR(
+ "KMS: Cannot support more than %d display indexes\n",
+ MAX_LINKS);
+ goto fail;
+ }
+
/* loops over all connectors on the board */
for (i = 0; i < link_cnt; i++) {
struct dc_link *link = NULL;
- if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
- DRM_ERROR(
- "KMS: Cannot support more than %d display indexes\n",
- AMDGPU_DM_MAX_DISPLAY_INDEX);
- continue;
- }
-
link = dc_get_link_at_index(dm->dc, i);
if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) {
@@ -4639,6 +4919,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
bool ret = false;
mutex_lock(&dm->dc_lock);
+ dc_exit_ips_for_hw_access(dm->dc);
ret = dc_link_detect(link, DETECT_REASON_BOOT);
mutex_unlock(&dm->dc_lock);
@@ -4719,6 +5000,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case IP_VERSION(3, 2, 1):
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(4, 0, 1):
if (dcn10_register_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
goto fail;
@@ -4855,6 +5137,9 @@ static int dm_init_microcode(struct amdgpu_device *adev)
case IP_VERSION(3, 5, 1):
fw_name_dmub = FIRMWARE_DCN_351_DMUB;
break;
+ case IP_VERSION(4, 0, 1):
+ fw_name_dmub = FIRMWARE_DCN_401_DMUB;
+ break;
default:
/* ASIC doesn't support DMUB. */
return 0;
@@ -4979,6 +5264,7 @@ static int dm_early_init(void *handle)
case IP_VERSION(3, 2, 1):
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(4, 0, 1):
adev->mode_info.num_crtc = 4;
adev->mode_info.num_hpd = 4;
adev->mode_info.num_dig = 4;
@@ -6026,6 +6312,7 @@ static bool is_freesync_video_mode(const struct drm_display_mode *mode,
return true;
}
+#if defined(CONFIG_DRM_AMD_DC_FP)
static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
struct dc_sink *sink, struct dc_stream_state *stream,
struct dsc_dec_dpcd_caps *dsc_caps)
@@ -6044,7 +6331,6 @@ static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
}
}
-
static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
struct dc_sink *sink, struct dc_stream_state *stream,
struct dsc_dec_dpcd_caps *dsc_caps,
@@ -6108,7 +6394,6 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
}
}
-
static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
struct dc_sink *sink, struct dc_stream_state *stream,
struct dsc_dec_dpcd_caps *dsc_caps)
@@ -6132,13 +6417,13 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
dc_dsc_policy_set_enable_dsc_when_not_needed(
aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
- if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP &&
+ if (sink->sink_signal == SIGNAL_TYPE_EDP &&
!aconnector->dc_link->panel_config.dsc.disable_dsc_edp &&
dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
- } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
+ } else if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
dsc_caps,
@@ -6186,6 +6471,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
}
+#endif
static struct dc_stream_state *
create_stream_for_sink(struct drm_connector *connector,
@@ -6207,8 +6493,9 @@ create_stream_for_sink(struct drm_connector *connector,
int mode_refresh;
int preferred_refresh = 0;
enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN;
+#if defined(CONFIG_DRM_AMD_DC_FP)
struct dsc_dec_dpcd_caps dsc_caps;
-
+#endif
struct dc_link *link = NULL;
struct dc_sink *sink = NULL;
@@ -6324,10 +6611,12 @@ create_stream_for_sink(struct drm_connector *connector,
stream->timing = *aconnector->timing_requested;
}
+#if defined(CONFIG_DRM_AMD_DC_FP)
/* SST DSC determination policy */
update_dsc_caps(aconnector, sink, stream, &dsc_caps);
if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
+#endif
update_stream_scaling_settings(&mode, dm_state, stream);
@@ -6551,12 +6840,34 @@ static const struct attribute_group amdgpu_group = {
.attrs = amdgpu_attrs
};
+static bool
+amdgpu_dm_should_create_sysfs(struct amdgpu_dm_connector *amdgpu_dm_connector)
+{
+ if (amdgpu_dm_abm_level >= 0)
+ return false;
+
+ if (amdgpu_dm_connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
+ return false;
+
+ /* check for OLED panels */
+ if (amdgpu_dm_connector->bl_idx >= 0) {
+ struct drm_device *drm = amdgpu_dm_connector->base.dev;
+ struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm;
+ struct amdgpu_dm_backlight_caps *caps;
+
+ caps = &dm->backlight_caps[amdgpu_dm_connector->bl_idx];
+ if (caps->aux_support)
+ return false;
+ }
+
+ return true;
+}
+
static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
{
struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
- amdgpu_dm_abm_level < 0)
+ if (amdgpu_dm_should_create_sysfs(amdgpu_dm_connector))
sysfs_remove_group(&connector->kdev->kobj, &amdgpu_group);
drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
@@ -6663,8 +6974,7 @@ amdgpu_dm_connector_late_register(struct drm_connector *connector)
to_amdgpu_dm_connector(connector);
int r;
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
- amdgpu_dm_abm_level < 0) {
+ if (amdgpu_dm_should_create_sysfs(amdgpu_dm_connector)) {
r = sysfs_create_group(&connector->kdev->kobj,
&amdgpu_group);
if (r)
@@ -6788,7 +7098,8 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
aconnector->dc_sink = aconnector->dc_link->local_sink ?
aconnector->dc_link->local_sink :
aconnector->dc_em_sink;
- dc_sink_retain(aconnector->dc_sink);
+ if (aconnector->dc_sink)
+ dc_sink_retain(aconnector->dc_sink);
}
}
@@ -7251,7 +7562,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
}
}
- if (j == dc_state->stream_count)
+ if (j == dc_state->stream_count || pbn_div == 0)
continue;
slot_num = DIV_ROUND_UP(pbn, pbn_div);
@@ -7615,7 +7926,8 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
drm_add_modes_noedid(connector, 1920, 1080);
} else {
amdgpu_dm_connector_ddc_get_modes(connector, edid);
- amdgpu_dm_connector_add_common_modes(encoder, connector);
+ if (encoder)
+ amdgpu_dm_connector_add_common_modes(encoder, connector);
amdgpu_dm_connector_add_freesync_modes(connector, edid);
}
amdgpu_dm_fbc_init(connector);
@@ -8346,6 +8658,77 @@ static inline uint32_t get_mem_type(struct drm_framebuffer *fb)
return abo->tbo.resource ? abo->tbo.resource->mem_type : 0;
}
+static void amdgpu_dm_update_cursor(struct drm_plane *plane,
+ struct drm_plane_state *old_plane_state,
+ struct dc_stream_update *update)
+{
+ struct amdgpu_device *adev = drm_to_adev(plane->dev);
+ struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
+ struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
+ struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ uint64_t address = afb ? afb->address : 0;
+ struct dc_cursor_position position = {0};
+ struct dc_cursor_attributes attributes;
+ int ret;
+
+ if (!plane->state->fb && !old_plane_state->fb)
+ return;
+
+ drm_dbg_atomic(plane->dev, "crtc_id=%d with size %d to %d\n",
+ amdgpu_crtc->crtc_id, plane->state->crtc_w,
+ plane->state->crtc_h);
+
+ ret = amdgpu_dm_plane_get_cursor_position(plane, crtc, &position);
+ if (ret)
+ return;
+
+ if (!position.enable) {
+ /* turn off cursor */
+ if (crtc_state && crtc_state->stream) {
+ dc_stream_set_cursor_position(crtc_state->stream,
+ &position);
+ update->cursor_position = &crtc_state->stream->cursor_position;
+ }
+ return;
+ }
+
+ amdgpu_crtc->cursor_width = plane->state->crtc_w;
+ amdgpu_crtc->cursor_height = plane->state->crtc_h;
+
+ memset(&attributes, 0, sizeof(attributes));
+ attributes.address.high_part = upper_32_bits(address);
+ attributes.address.low_part = lower_32_bits(address);
+ attributes.width = plane->state->crtc_w;
+ attributes.height = plane->state->crtc_h;
+ attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
+ attributes.rotation_angle = 0;
+ attributes.attribute_flags.value = 0;
+
+ /* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM
+ * legacy gamma setup.
+ */
+ if (crtc_state->cm_is_degamma_srgb &&
+ adev->dm.dc->caps.color.dpp.gamma_corr)
+ attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1;
+
+ attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
+
+ if (crtc_state->stream) {
+ if (!dc_stream_set_cursor_attributes(crtc_state->stream,
+ &attributes))
+ DRM_ERROR("DC failed to set cursor attributes\n");
+
+ update->cursor_attributes = &crtc_state->stream->cursor_attributes;
+
+ if (!dc_stream_set_cursor_position(crtc_state->stream,
+ &position))
+ DRM_ERROR("DC failed to set cursor position\n");
+
+ update->cursor_position = &crtc_state->stream->cursor_position;
+ }
+}
+
static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct drm_device *dev,
struct amdgpu_display_manager *dm,
@@ -8369,6 +8752,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bool cursor_update = false;
bool pflip_present = false;
bool dirty_rects_changed = false;
+ bool updated_planes_and_streams = false;
struct {
struct dc_surface_update surface_updates[MAX_SURFACES];
struct dc_plane_info plane_infos[MAX_SURFACES];
@@ -8388,8 +8772,24 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* Disable the cursor first if we're disabling all the planes.
* It'll remain on the screen after the planes are re-enabled
* if we don't.
+ *
+ * If the cursor is transitioning from native to overlay mode, the
+ * native cursor needs to be disabled first.
*/
- if (acrtc_state->active_planes == 0)
+ if (acrtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE &&
+ dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) {
+ struct dc_cursor_position cursor_position = {0};
+
+ if (!dc_stream_set_cursor_position(acrtc_state->stream,
+ &cursor_position))
+ drm_err(dev, "DC failed to disable native cursor\n");
+
+ bundle->stream_update.cursor_position =
+ &acrtc_state->stream->cursor_position;
+ }
+
+ if (acrtc_state->active_planes == 0 &&
+ dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE)
amdgpu_dm_commit_cursors(state);
/* update planes when needed */
@@ -8403,10 +8803,14 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
/* Cursor plane is handled after stream updates */
- if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+ if (plane->type == DRM_PLANE_TYPE_CURSOR &&
+ acrtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) {
if ((fb && crtc == pcrtc) ||
- (old_plane_state->fb && old_plane_state->crtc == pcrtc))
+ (old_plane_state->fb && old_plane_state->crtc == pcrtc)) {
cursor_update = true;
+ if (amdgpu_ip_version(dm->adev, DCE_HWIP, 0) != 0)
+ amdgpu_dm_update_cursor(plane, old_plane_state, &bundle->stream_update);
+ }
continue;
}
@@ -8605,15 +9009,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->stream_update.vrr_infopacket =
&acrtc_state->stream->vrr_infopacket;
}
- } else if (cursor_update && acrtc_state->active_planes > 0 &&
- acrtc_attach->base.state->event) {
- drm_crtc_vblank_get(pcrtc);
-
+ } else if (cursor_update && acrtc_state->active_planes > 0) {
spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
-
- acrtc_attach->event = acrtc_attach->base.state->event;
- acrtc_attach->base.state->event = NULL;
-
+ if (acrtc_attach->base.state->event) {
+ drm_crtc_vblank_get(pcrtc);
+ acrtc_attach->event = acrtc_attach->base.state->event;
+ acrtc_attach->base.state->event = NULL;
+ }
spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
}
@@ -8679,6 +9081,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
acrtc_state->stream,
&bundle->stream_update,
bundle->surface_updates);
+ updated_planes_and_streams = true;
/**
* Enable or disable the interrupts on the backend.
@@ -8756,7 +9159,9 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* This avoids redundant programming in the case where we're going
* to be disabling a single plane - those pipes are being disabled.
*/
- if (acrtc_state->active_planes)
+ if (acrtc_state->active_planes &&
+ (!updated_planes_and_streams || amdgpu_ip_version(dm->adev, DCE_HWIP, 0) == 0) &&
+ acrtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE)
amdgpu_dm_commit_cursors(state);
cleanup:
@@ -8945,7 +9350,8 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
memset(&position, 0, sizeof(position));
mutex_lock(&dm->dc_lock);
- dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
+ dc_exit_ips_for_hw_access(dm->dc);
+ dc_stream_program_cursor_position(dm_old_crtc_state->stream, &position);
mutex_unlock(&dm->dc_lock);
}
@@ -8961,7 +9367,9 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
if (amdgpu_dm_crtc_modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
- DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
+ drm_dbg_atomic(dev,
+ "Atomic commit: SET crtc id %d: [%p]\n",
+ acrtc->crtc_id, acrtc);
if (!dm_new_crtc_state->stream) {
/*
@@ -8979,8 +9387,9 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
* have a sink to keep the pipe running so that
* hw state is consistent with the sw state
*/
- DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
- __func__, acrtc->base.base.id);
+ drm_dbg_atomic(dev,
+ "Failed to create new stream for crtc %d\n",
+ acrtc->base.base.id);
continue;
}
@@ -8994,7 +9403,9 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
crtc->hwmode = new_crtc_state->mode;
mode_set_reset_required = true;
} else if (modereset_required(new_crtc_state)) {
- DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
+ drm_dbg_atomic(dev,
+ "Atomic commit: RESET. crtc id %d:[%p]\n",
+ acrtc->crtc_id, acrtc);
/* i.e. reset mode */
if (dm_old_crtc_state->stream)
remove_stream(adev, acrtc, dm_old_crtc_state->stream);
@@ -9014,6 +9425,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
dm_enable_per_frame_crtc_master_sync(dc_state);
mutex_lock(&dm->dc_lock);
+ dc_exit_ips_for_hw_access(dm->dc);
WARN_ON(!dc_commit_streams(dm->dc, &params));
/* Allow idle optimization when vblank count is 0 for display off */
@@ -9377,8 +9789,11 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
for (j = 0; j < status->plane_count; j++)
dummy_updates[j].surface = status->plane_states[0];
+ sort(dummy_updates, status->plane_count,
+ sizeof(*dummy_updates), dm_plane_layer_index_cmp, NULL);
mutex_lock(&dm->dc_lock);
+ dc_exit_ips_for_hw_access(dm->dc);
dc_update_planes_and_stream(dm->dc,
dummy_updates,
status->plane_count,
@@ -9449,7 +9864,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
#endif
if (amdgpu_dm_crtc_configure_crc_source(
crtc, dm_new_crtc_state, cur_crc_src))
- DRM_DEBUG_DRIVER("Failed to configure crc source");
+ drm_dbg_atomic(dev, "Failed to configure crc source");
}
}
#endif
@@ -10066,7 +10481,8 @@ static bool should_reset_plane(struct drm_atomic_state *state,
{
struct drm_plane *other;
struct drm_plane_state *old_other_state, *new_other_state;
- struct drm_crtc_state *new_crtc_state;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct dm_crtc_state *old_dm_crtc_state, *new_dm_crtc_state;
struct amdgpu_device *adev = drm_to_adev(plane->dev);
int i;
@@ -10088,14 +10504,38 @@ static bool should_reset_plane(struct drm_atomic_state *state,
new_crtc_state =
drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
+ old_crtc_state =
+ drm_atomic_get_old_crtc_state(state, old_plane_state->crtc);
if (!new_crtc_state)
return true;
+ /*
+ * A change in cursor mode means a new dc pipe needs to be acquired or
+ * released from the state
+ */
+ old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
+ new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
+ if (plane->type == DRM_PLANE_TYPE_CURSOR &&
+ old_dm_crtc_state != NULL &&
+ old_dm_crtc_state->cursor_mode != new_dm_crtc_state->cursor_mode) {
+ return true;
+ }
+
/* CRTC Degamma changes currently require us to recreate planes. */
if (new_crtc_state->color_mgmt_changed)
return true;
+ /*
+ * On zpos change, planes need to be reordered by removing and re-adding
+ * them one by one to the dc state, in order of descending zpos.
+ *
+ * TODO: We can likely skip bandwidth validation if the only thing that
+ * changed about the plane was it'z z-ordering.
+ */
+ if (new_crtc_state->zpos_changed)
+ return true;
+
if (drm_atomic_crtc_needs_modeset(new_crtc_state))
return true;
@@ -10227,12 +10667,14 @@ static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
* check tiling flags when the FB doesn't have a modifier.
*/
if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
- if (adev->family < AMDGPU_FAMILY_AI) {
+ if (adev->family >= AMDGPU_FAMILY_GC_12_0_0) {
+ linear = AMDGPU_TILING_GET(afb->tiling_flags, GFX12_SWIZZLE_MODE) == 0;
+ } else if (adev->family >= AMDGPU_FAMILY_AI) {
+ linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
+ } else {
linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
- } else {
- linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
}
if (!linear) {
DRM_DEBUG_ATOMIC("Cursor FB not linear");
@@ -10243,6 +10685,68 @@ static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
return 0;
}
+/*
+ * Helper function for checking the cursor in native mode
+ */
+static int dm_check_native_cursor_state(struct drm_crtc *new_plane_crtc,
+ struct drm_plane *plane,
+ struct drm_plane_state *new_plane_state,
+ bool enable)
+{
+
+ struct amdgpu_crtc *new_acrtc;
+ int ret;
+
+ if (!enable || !new_plane_crtc ||
+ drm_atomic_plane_disabling(plane->state, new_plane_state))
+ return 0;
+
+ new_acrtc = to_amdgpu_crtc(new_plane_crtc);
+
+ if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
+ DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
+ return -EINVAL;
+ }
+
+ if (new_plane_state->fb) {
+ ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
+ new_plane_state->fb);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool dm_should_update_native_cursor(struct drm_atomic_state *state,
+ struct drm_crtc *old_plane_crtc,
+ struct drm_crtc *new_plane_crtc,
+ bool enable)
+{
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+
+ if (!enable) {
+ if (old_plane_crtc == NULL)
+ return true;
+
+ old_crtc_state = drm_atomic_get_old_crtc_state(
+ state, old_plane_crtc);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ return dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE;
+ } else {
+ if (new_plane_crtc == NULL)
+ return true;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(
+ state, new_plane_crtc);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ return dm_new_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE;
+ }
+}
+
static int dm_update_plane_state(struct dc *dc,
struct drm_atomic_state *state,
struct drm_plane *plane,
@@ -10258,8 +10762,7 @@ static int dm_update_plane_state(struct dc *dc,
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
- struct amdgpu_crtc *new_acrtc;
- bool needs_reset;
+ bool needs_reset, update_native_cursor;
int ret = 0;
@@ -10268,24 +10771,16 @@ static int dm_update_plane_state(struct dc *dc,
dm_new_plane_state = to_dm_plane_state(new_plane_state);
dm_old_plane_state = to_dm_plane_state(old_plane_state);
- if (plane->type == DRM_PLANE_TYPE_CURSOR) {
- if (!enable || !new_plane_crtc ||
- drm_atomic_plane_disabling(plane->state, new_plane_state))
- return 0;
-
- new_acrtc = to_amdgpu_crtc(new_plane_crtc);
-
- if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
- DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
- return -EINVAL;
- }
+ update_native_cursor = dm_should_update_native_cursor(state,
+ old_plane_crtc,
+ new_plane_crtc,
+ enable);
- if (new_plane_state->fb) {
- ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
- new_plane_state->fb);
- if (ret)
- return ret;
- }
+ if (plane->type == DRM_PLANE_TYPE_CURSOR && update_native_cursor) {
+ ret = dm_check_native_cursor_state(new_plane_crtc, plane,
+ new_plane_state, enable);
+ if (ret)
+ return ret;
return 0;
}
@@ -10351,20 +10846,14 @@ static int dm_update_plane_state(struct dc *dc,
ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state);
if (ret)
- return ret;
+ goto out;
WARN_ON(dm_new_plane_state->dc_state);
dc_new_plane_state = dc_create_plane_state(dc);
- if (!dc_new_plane_state)
- return -ENOMEM;
-
- /* Block top most plane from being a video plane */
- if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
- if (amdgpu_dm_plane_is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay)
- return -EINVAL;
-
- *is_top_most_overlay = false;
+ if (!dc_new_plane_state) {
+ ret = -ENOMEM;
+ goto out;
}
DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
@@ -10377,13 +10866,13 @@ static int dm_update_plane_state(struct dc *dc,
new_crtc_state);
if (ret) {
dc_plane_state_release(dc_new_plane_state);
- return ret;
+ goto out;
}
ret = dm_atomic_get_state(state, &dm_state);
if (ret) {
dc_plane_state_release(dc_new_plane_state);
- return ret;
+ goto out;
}
/*
@@ -10400,7 +10889,8 @@ static int dm_update_plane_state(struct dc *dc,
dm_state->context)) {
dc_plane_state_release(dc_new_plane_state);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
dm_new_plane_state->dc_state = dc_new_plane_state;
@@ -10415,6 +10905,16 @@ static int dm_update_plane_state(struct dc *dc,
*lock_and_validation_needed = true;
}
+out:
+ /* If enabling cursor overlay failed, attempt fallback to native mode */
+ if (enable && ret == -EINVAL && plane->type == DRM_PLANE_TYPE_CURSOR) {
+ ret = dm_check_native_cursor_state(new_plane_crtc, plane,
+ new_plane_state, enable);
+ if (ret)
+ return ret;
+
+ dm_new_crtc_state->cursor_mode = DM_CURSOR_NATIVE_MODE;
+ }
return ret;
}
@@ -10448,99 +10948,64 @@ dm_get_plane_scale(struct drm_plane_state *plane_state,
*out_plane_scale_h = plane_state->crtc_h * 1000 / plane_src_h;
}
-static int dm_check_crtc_cursor(struct drm_atomic_state *state,
- struct drm_crtc *crtc,
- struct drm_crtc_state *new_crtc_state)
+/*
+ * The normalized_zpos value cannot be used by this iterator directly. It's only
+ * calculated for enabled planes, potentially causing normalized_zpos collisions
+ * between enabled/disabled planes in the atomic state. We need a unique value
+ * so that the iterator will not generate the same object twice, or loop
+ * indefinitely.
+ */
+static inline struct __drm_planes_state *__get_next_zpos(
+ struct drm_atomic_state *state,
+ struct __drm_planes_state *prev)
{
- struct drm_plane *cursor = crtc->cursor, *plane, *underlying;
- struct drm_plane_state *old_plane_state, *new_plane_state;
- struct drm_plane_state *new_cursor_state, *new_underlying_state;
- int i;
- int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
- bool any_relevant_change = false;
-
- /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
- * cursor per pipe but it's going to inherit the scaling and
- * positioning from the underlying pipe. Check the cursor plane's
- * blending properties match the underlying planes'.
- */
-
- /* If no plane was enabled or changed scaling, no need to check again */
- for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
- int new_scale_w, new_scale_h, old_scale_w, old_scale_h;
-
- if (!new_plane_state || !new_plane_state->fb || new_plane_state->crtc != crtc)
- continue;
-
- if (!old_plane_state || !old_plane_state->fb || old_plane_state->crtc != crtc) {
- any_relevant_change = true;
- break;
- }
-
- if (new_plane_state->fb == old_plane_state->fb &&
- new_plane_state->crtc_w == old_plane_state->crtc_w &&
- new_plane_state->crtc_h == old_plane_state->crtc_h)
- continue;
-
- dm_get_plane_scale(new_plane_state, &new_scale_w, &new_scale_h);
- dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h);
+ unsigned int highest_zpos = 0, prev_zpos = 256;
+ uint32_t highest_id = 0, prev_id = UINT_MAX;
+ struct drm_plane_state *new_plane_state;
+ struct drm_plane *plane;
+ int i, highest_i = -1;
- if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) {
- any_relevant_change = true;
- break;
- }
+ if (prev != NULL) {
+ prev_zpos = prev->new_state->zpos;
+ prev_id = prev->ptr->base.id;
}
- if (!any_relevant_change)
- return 0;
-
- new_cursor_state = drm_atomic_get_plane_state(state, cursor);
- if (IS_ERR(new_cursor_state))
- return PTR_ERR(new_cursor_state);
-
- if (!new_cursor_state->fb)
- return 0;
-
- dm_get_plane_scale(new_cursor_state, &cursor_scale_w, &cursor_scale_h);
-
- /* Need to check all enabled planes, even if this commit doesn't change
- * their state
- */
- i = drm_atomic_add_affected_planes(state, crtc);
- if (i)
- return i;
-
- for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
- /* Narrow down to non-cursor planes on the same CRTC as the cursor */
- if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
+ for_each_new_plane_in_state(state, plane, new_plane_state, i) {
+ /* Skip planes with higher zpos than the previously returned */
+ if (new_plane_state->zpos > prev_zpos ||
+ (new_plane_state->zpos == prev_zpos &&
+ plane->base.id >= prev_id))
continue;
- /* Ignore disabled planes */
- if (!new_underlying_state->fb)
- continue;
-
- dm_get_plane_scale(new_underlying_state,
- &underlying_scale_w, &underlying_scale_h);
-
- if (cursor_scale_w != underlying_scale_w ||
- cursor_scale_h != underlying_scale_h) {
- drm_dbg_atomic(crtc->dev,
- "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
- cursor->base.id, cursor->name, underlying->base.id, underlying->name);
- return -EINVAL;
+ /* Save the index of the plane with highest zpos */
+ if (new_plane_state->zpos > highest_zpos ||
+ (new_plane_state->zpos == highest_zpos &&
+ plane->base.id > highest_id)) {
+ highest_zpos = new_plane_state->zpos;
+ highest_id = plane->base.id;
+ highest_i = i;
}
-
- /* If this plane covers the whole CRTC, no need to check planes underneath */
- if (new_underlying_state->crtc_x <= 0 &&
- new_underlying_state->crtc_y <= 0 &&
- new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
- new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
- break;
}
- return 0;
+ if (highest_i < 0)
+ return NULL;
+
+ return &state->planes[highest_i];
}
+/*
+ * Use the uniqueness of the plane's (zpos, drm obj ID) combination to iterate
+ * by descending zpos, as read from the new plane state. This is the same
+ * ordering as defined by drm_atomic_normalize_zpos().
+ */
+#define for_each_oldnew_plane_in_descending_zpos(__state, plane, old_plane_state, new_plane_state) \
+ for (struct __drm_planes_state *__i = __get_next_zpos((__state), NULL); \
+ __i != NULL; __i = __get_next_zpos((__state), __i)) \
+ for_each_if(((plane) = __i->ptr, \
+ (void)(plane) /* Only to avoid unused-but-set-variable warning */, \
+ (old_plane_state) = __i->old_state, \
+ (new_plane_state) = __i->new_state, 1))
+
static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
{
struct drm_connector *connector;
@@ -10572,6 +11037,169 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
}
/**
+ * DOC: Cursor Modes - Native vs Overlay
+ *
+ * In native mode, the cursor uses a integrated cursor pipe within each DCN hw
+ * plane. It does not require a dedicated hw plane to enable, but it is
+ * subjected to the same z-order and scaling as the hw plane. It also has format
+ * restrictions, a RGB cursor in native mode cannot be enabled within a non-RGB
+ * hw plane.
+ *
+ * In overlay mode, the cursor uses a separate DCN hw plane, and thus has its
+ * own scaling and z-pos. It also has no blending restrictions. It lends to a
+ * cursor behavior more akin to a DRM client's expectations. However, it does
+ * occupy an extra DCN plane, and therefore will only be used if a DCN plane is
+ * available.
+ */
+
+/**
+ * dm_crtc_get_cursor_mode() - Determine the required cursor mode on crtc
+ * @adev: amdgpu device
+ * @state: DRM atomic state
+ * @dm_crtc_state: amdgpu state for the CRTC containing the cursor
+ * @cursor_mode: Returns the required cursor mode on dm_crtc_state
+ *
+ * Get whether the cursor should be enabled in native mode, or overlay mode, on
+ * the dm_crtc_state.
+ *
+ * The cursor should be enabled in overlay mode if there exists an underlying
+ * plane - on which the cursor may be blended - that is either YUV formatted, or
+ * scaled differently from the cursor.
+ *
+ * Since zpos info is required, drm_atomic_normalize_zpos must be called before
+ * calling this function.
+ *
+ * Return: 0 on success, or an error code if getting the cursor plane state
+ * failed.
+ */
+static int dm_crtc_get_cursor_mode(struct amdgpu_device *adev,
+ struct drm_atomic_state *state,
+ struct dm_crtc_state *dm_crtc_state,
+ enum amdgpu_dm_cursor_mode *cursor_mode)
+{
+ struct drm_plane_state *old_plane_state, *plane_state, *cursor_state;
+ struct drm_crtc_state *crtc_state = &dm_crtc_state->base;
+ struct drm_plane *plane;
+ bool consider_mode_change = false;
+ bool entire_crtc_covered = false;
+ bool cursor_changed = false;
+ int underlying_scale_w, underlying_scale_h;
+ int cursor_scale_w, cursor_scale_h;
+ int i;
+
+ /* Overlay cursor not supported on HW before DCN
+ * DCN401 does not have the cursor-on-scaled-plane or cursor-on-yuv-plane restrictions
+ * as previous DCN generations, so enable native mode on DCN401 in addition to DCE
+ */
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) == 0 ||
+ amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(4, 0, 1)) {
+ *cursor_mode = DM_CURSOR_NATIVE_MODE;
+ return 0;
+ }
+
+ /* Init cursor_mode to be the same as current */
+ *cursor_mode = dm_crtc_state->cursor_mode;
+
+ /*
+ * Cursor mode can change if a plane's format changes, scale changes, is
+ * enabled/disabled, or z-order changes.
+ */
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, plane_state, i) {
+ int new_scale_w, new_scale_h, old_scale_w, old_scale_h;
+
+ /* Only care about planes on this CRTC */
+ if ((drm_plane_mask(plane) & crtc_state->plane_mask) == 0)
+ continue;
+
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ cursor_changed = true;
+
+ if (drm_atomic_plane_enabling(old_plane_state, plane_state) ||
+ drm_atomic_plane_disabling(old_plane_state, plane_state) ||
+ old_plane_state->fb->format != plane_state->fb->format) {
+ consider_mode_change = true;
+ break;
+ }
+
+ dm_get_plane_scale(plane_state, &new_scale_w, &new_scale_h);
+ dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h);
+ if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) {
+ consider_mode_change = true;
+ break;
+ }
+ }
+
+ if (!consider_mode_change && !crtc_state->zpos_changed)
+ return 0;
+
+ /*
+ * If no cursor change on this CRTC, and not enabled on this CRTC, then
+ * no need to set cursor mode. This avoids needlessly locking the cursor
+ * state.
+ */
+ if (!cursor_changed &&
+ !(drm_plane_mask(crtc_state->crtc->cursor) & crtc_state->plane_mask)) {
+ return 0;
+ }
+
+ cursor_state = drm_atomic_get_plane_state(state,
+ crtc_state->crtc->cursor);
+ if (IS_ERR(cursor_state))
+ return PTR_ERR(cursor_state);
+
+ /* Cursor is disabled */
+ if (!cursor_state->fb)
+ return 0;
+
+ /* For all planes in descending z-order (all of which are below cursor
+ * as per zpos definitions), check their scaling and format
+ */
+ for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, plane_state) {
+
+ /* Only care about non-cursor planes on this CRTC */
+ if ((drm_plane_mask(plane) & crtc_state->plane_mask) == 0 ||
+ plane->type == DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ /* Underlying plane is YUV format - use overlay cursor */
+ if (amdgpu_dm_plane_is_video_format(plane_state->fb->format->format)) {
+ *cursor_mode = DM_CURSOR_OVERLAY_MODE;
+ return 0;
+ }
+
+ dm_get_plane_scale(plane_state,
+ &underlying_scale_w, &underlying_scale_h);
+ dm_get_plane_scale(cursor_state,
+ &cursor_scale_w, &cursor_scale_h);
+
+ /* Underlying plane has different scale - use overlay cursor */
+ if (cursor_scale_w != underlying_scale_w &&
+ cursor_scale_h != underlying_scale_h) {
+ *cursor_mode = DM_CURSOR_OVERLAY_MODE;
+ return 0;
+ }
+
+ /* If this plane covers the whole CRTC, no need to check planes underneath */
+ if (plane_state->crtc_x <= 0 && plane_state->crtc_y <= 0 &&
+ plane_state->crtc_x + plane_state->crtc_w >= crtc_state->mode.hdisplay &&
+ plane_state->crtc_y + plane_state->crtc_h >= crtc_state->mode.vdisplay) {
+ entire_crtc_covered = true;
+ break;
+ }
+ }
+
+ /* If planes do not cover the entire CRTC, use overlay mode to enable
+ * cursor over holes
+ */
+ if (entire_crtc_covered)
+ *cursor_mode = DM_CURSOR_NATIVE_MODE;
+ else
+ *cursor_mode = DM_CURSOR_OVERLAY_MODE;
+
+ return 0;
+}
+
+/**
* amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
*
* @dev: The DRM device
@@ -10607,7 +11235,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_plane *plane;
- struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct drm_plane_state *old_plane_state, *new_plane_state, *new_cursor_state;
enum dc_status status;
int ret, i;
bool lock_and_validation_needed = false;
@@ -10621,7 +11249,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
ret = drm_atomic_helper_check_modeset(dev, state);
if (ret) {
- DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
+ drm_dbg_atomic(dev, "drm_atomic_helper_check_modeset() failed\n");
goto fail;
}
@@ -10636,7 +11264,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
if (IS_ERR(new_crtc_state)) {
- DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
+ drm_dbg_atomic(dev, "drm_atomic_get_crtc_state() failed\n");
ret = PTR_ERR(new_crtc_state);
goto fail;
}
@@ -10651,7 +11279,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
ret = add_affected_mst_dsc_crtcs(state, crtc);
if (ret) {
- DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
+ drm_dbg_atomic(dev, "add_affected_mst_dsc_crtcs() failed\n");
goto fail;
}
}
@@ -10668,7 +11296,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
if (ret) {
- DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
+ drm_dbg_atomic(dev, "amdgpu_dm_verify_lut_sizes() failed\n");
goto fail;
}
@@ -10677,13 +11305,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
ret = drm_atomic_add_affected_connectors(state, crtc);
if (ret) {
- DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
+ drm_dbg_atomic(dev, "drm_atomic_add_affected_connectors() failed\n");
goto fail;
}
ret = drm_atomic_add_affected_planes(state, crtc);
if (ret) {
- DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
+ drm_dbg_atomic(dev, "drm_atomic_add_affected_planes() failed\n");
goto fail;
}
@@ -10722,7 +11350,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (IS_ERR(new_plane_state)) {
ret = PTR_ERR(new_plane_state);
- DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
+ drm_dbg_atomic(dev, "new_plane_state is BAD\n");
goto fail;
}
}
@@ -10740,8 +11368,23 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail;
}
+ /*
+ * Determine whether cursors on each CRTC should be enabled in native or
+ * overlay mode.
+ */
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ ret = dm_crtc_get_cursor_mode(adev, state, dm_new_crtc_state,
+ &dm_new_crtc_state->cursor_mode);
+ if (ret) {
+ drm_dbg(dev, "Failed to determine cursor mode\n");
+ goto fail;
+ }
+ }
+
/* Remove exiting planes if they are modified */
- for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
+ for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, new_plane_state) {
if (old_plane_state->fb && new_plane_state->fb &&
get_mem_type(old_plane_state->fb) !=
get_mem_type(new_plane_state->fb))
@@ -10754,7 +11397,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
&lock_and_validation_needed,
&is_top_most_overlay);
if (ret) {
- DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
+ drm_dbg_atomic(dev, "dm_update_plane_state() failed\n");
goto fail;
}
}
@@ -10767,7 +11410,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
false,
&lock_and_validation_needed);
if (ret) {
- DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
+ drm_dbg_atomic(dev, "DISABLE: dm_update_crtc_state() failed\n");
goto fail;
}
}
@@ -10780,13 +11423,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
true,
&lock_and_validation_needed);
if (ret) {
- DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
+ drm_dbg_atomic(dev, "ENABLE: dm_update_crtc_state() failed\n");
goto fail;
}
}
/* Add new/modified planes */
- for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
+ for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, new_plane_state) {
ret = dm_update_plane_state(dc, state, plane,
old_plane_state,
new_plane_state,
@@ -10794,35 +11437,75 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
&lock_and_validation_needed,
&is_top_most_overlay);
if (ret) {
- DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
+ drm_dbg_atomic(dev, "dm_update_plane_state() failed\n");
goto fail;
}
}
+#if defined(CONFIG_DRM_AMD_DC_FP)
if (dc_resource_is_dsc_encoding_supported(dc)) {
ret = pre_validate_dsc(state, &dm_state, vars);
if (ret != 0)
goto fail;
}
+#endif
/* Run this here since we want to validate the streams we created */
ret = drm_atomic_helper_check_planes(dev, state);
if (ret) {
- DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
+ drm_dbg_atomic(dev, "drm_atomic_helper_check_planes() failed\n");
goto fail;
}
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
if (dm_new_crtc_state->mpo_requested)
- DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
+ drm_dbg_atomic(dev, "MPO enablement requested on crtc:[%p]\n", crtc);
}
- /* Check cursor planes scaling */
+ /* Check cursor restrictions */
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
- ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
+ enum amdgpu_dm_cursor_mode required_cursor_mode;
+ int is_rotated, is_scaled;
+
+ /* Overlay cusor not subject to native cursor restrictions */
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE)
+ continue;
+
+ /* Check if rotation or scaling is enabled on DCN401 */
+ if ((drm_plane_mask(crtc->cursor) & new_crtc_state->plane_mask) &&
+ amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(4, 0, 1)) {
+ new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
+
+ is_rotated = new_cursor_state &&
+ ((new_cursor_state->rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_0);
+ is_scaled = new_cursor_state && ((new_cursor_state->src_w >> 16 != new_cursor_state->crtc_w) ||
+ (new_cursor_state->src_h >> 16 != new_cursor_state->crtc_h));
+
+ if (is_rotated || is_scaled) {
+ drm_dbg_driver(
+ crtc->dev,
+ "[CRTC:%d:%s] cannot enable hardware cursor due to rotation/scaling\n",
+ crtc->base.id, crtc->name);
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+
+ /* If HW can only do native cursor, check restrictions again */
+ ret = dm_crtc_get_cursor_mode(adev, state, dm_new_crtc_state,
+ &required_cursor_mode);
if (ret) {
- DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
+ drm_dbg_driver(crtc->dev,
+ "[CRTC:%d:%s] Checking cursor mode failed\n",
+ crtc->base.id, crtc->name);
+ goto fail;
+ } else if (required_cursor_mode == DM_CURSOR_OVERLAY_MODE) {
+ drm_dbg_driver(crtc->dev,
+ "[CRTC:%d:%s] Cannot enable native cursor due to scaling or YUV restrictions\n",
+ crtc->base.id, crtc->name);
+ ret = -EINVAL;
goto fail;
}
}
@@ -10905,28 +11588,30 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (lock_and_validation_needed) {
ret = dm_atomic_get_state(state, &dm_state);
if (ret) {
- DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
+ drm_dbg_atomic(dev, "dm_atomic_get_state() failed\n");
goto fail;
}
ret = do_aquire_global_lock(dev, state);
if (ret) {
- DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
+ drm_dbg_atomic(dev, "do_aquire_global_lock() failed\n");
goto fail;
}
+#if defined(CONFIG_DRM_AMD_DC_FP)
if (dc_resource_is_dsc_encoding_supported(dc)) {
ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
if (ret) {
- DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
+ drm_dbg_atomic(dev, "compute_mst_dsc_configs_for_state() failed\n");
ret = -EINVAL;
goto fail;
}
}
+#endif
ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
if (ret) {
- DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
+ drm_dbg_atomic(dev, "dm_update_mst_vcpi_slots_for_dsc() failed\n");
goto fail;
}
@@ -10938,12 +11623,12 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
*/
ret = drm_dp_mst_atomic_check(state);
if (ret) {
- DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
+ drm_dbg_atomic(dev, "drm_dp_mst_atomic_check() failed\n");
goto fail;
}
status = dc_validate_global_state(dc, dm_state->context, true);
if (status != DC_OK) {
- DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
+ drm_dbg_atomic(dev, "DC global validation failure: %s (%d)",
dc_status_to_str(status), status);
ret = -EINVAL;
goto fail;
@@ -11021,11 +11706,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
fail:
if (ret == -EDEADLK)
- DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
+ drm_dbg_atomic(dev, "Atomic check stopped to avoid deadlock.\n");
else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
- DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
+ drm_dbg_atomic(dev, "Atomic check stopped due to signal.\n");
else
- DRM_DEBUG_DRIVER("Atomic check failed with err: %d\n", ret);
+ drm_dbg_atomic(dev, "Atomic check failed with err: %d\n", ret);
trace_amdgpu_dm_atomic_check_finish(state, ret);
@@ -11181,6 +11866,49 @@ static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
return ret;
}
+static void parse_edid_displayid_vrr(struct drm_connector *connector,
+ struct edid *edid)
+{
+ u8 *edid_ext = NULL;
+ int i;
+ int j = 0;
+ u16 min_vfreq;
+ u16 max_vfreq;
+
+ if (edid == NULL || edid->extensions == 0)
+ return;
+
+ /* Find DisplayID extension */
+ for (i = 0; i < edid->extensions; i++) {
+ edid_ext = (void *)(edid + (i + 1));
+ if (edid_ext[0] == DISPLAYID_EXT)
+ break;
+ }
+
+ if (edid_ext == NULL)
+ return;
+
+ while (j < EDID_LENGTH) {
+ /* Get dynamic video timing range from DisplayID if available */
+ if (EDID_LENGTH - j > 13 && edid_ext[j] == 0x25 &&
+ (edid_ext[j+1] & 0xFE) == 0 && (edid_ext[j+2] == 9)) {
+ min_vfreq = edid_ext[j+9];
+ if (edid_ext[j+1] & 7)
+ max_vfreq = edid_ext[j+10] + ((edid_ext[j+11] & 3) << 8);
+ else
+ max_vfreq = edid_ext[j+10];
+
+ if (max_vfreq && min_vfreq) {
+ connector->display_info.monitor_range.max_vfreq = max_vfreq;
+ connector->display_info.monitor_range.min_vfreq = min_vfreq;
+
+ return;
+ }
+ }
+ j++;
+ }
+}
+
static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector,
struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
{
@@ -11289,7 +12017,6 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
amdgpu_dm_connector->min_vfreq = 0;
amdgpu_dm_connector->max_vfreq = 0;
- amdgpu_dm_connector->pixel_clock_mhz = 0;
connector->display_info.monitor_range.min_vfreq = 0;
connector->display_info.monitor_range.max_vfreq = 0;
freesync_capable = false;
@@ -11302,6 +12029,11 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
if (!adev->dm.freesync_module)
goto update;
+ /* Some eDP panels only have the refresh rate range info in DisplayID */
+ if ((connector->display_info.monitor_range.min_vfreq == 0 ||
+ connector->display_info.monitor_range.max_vfreq == 0))
+ parse_edid_displayid_vrr(connector, edid);
+
if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
sink->sink_signal == SIGNAL_TYPE_EDP)) {
bool edid_check_required = false;
@@ -11309,9 +12041,11 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
if (is_dp_capable_without_timing_msa(adev->dm.dc,
amdgpu_dm_connector)) {
if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) {
- freesync_capable = true;
amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
+ if (amdgpu_dm_connector->max_vfreq -
+ amdgpu_dm_connector->min_vfreq > 10)
+ freesync_capable = true;
} else {
edid_check_required = edid->version > 1 ||
(edid->version == 1 &&
@@ -11353,8 +12087,6 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
connector->display_info.monitor_range.min_vfreq;
amdgpu_dm_connector->max_vfreq =
connector->display_info.monitor_range.max_vfreq;
- amdgpu_dm_connector->pixel_clock_mhz =
- range->pixel_clock_mhz * 10;
break;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 09519b7abf67..5fd1b6b44577 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -50,7 +50,7 @@
#define AMDGPU_DM_MAX_NUM_EDP 2
-#define AMDGPU_DMUB_NOTIFICATION_MAX 5
+#define AMDGPU_DMUB_NOTIFICATION_MAX 6
#define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID 0x00001A
#define AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE 0x40
@@ -137,6 +137,13 @@ struct vblank_control_work {
bool enable;
};
+struct idle_workqueue {
+ struct work_struct work;
+ struct amdgpu_display_manager *dm;
+ bool enable;
+ bool running;
+};
+
/**
* struct amdgpu_dm_backlight_caps - Information about backlight
*
@@ -173,6 +180,14 @@ struct amdgpu_dm_backlight_caps {
* @aux_support: Describes if the display supports AUX backlight.
*/
bool aux_support;
+ /**
+ * @ac_level: the default brightness if booted on AC
+ */
+ u8 ac_level;
+ /**
+ * @dc_level: the default brightness if booted on DC
+ */
+ u8 dc_level;
};
/**
@@ -487,6 +502,7 @@ struct amdgpu_display_manager {
* Deferred work for vblank control events.
*/
struct workqueue_struct *vblank_control_workqueue;
+ struct idle_workqueue *idle_workqueue;
struct drm_atomic_state *cached_state;
struct dc_state *cached_dc_state;
@@ -570,6 +586,11 @@ struct amdgpu_display_manager {
* Guards access to DPIA AUX
*/
struct mutex dpia_aux_lock;
+
+ /*
+ * Bounding box data read from dmub during early initialization for DCN4+
+ */
+ struct dml2_soc_bb *bb_from_dmub;
};
enum dsc_clock_force_state {
@@ -678,7 +699,6 @@ struct amdgpu_dm_connector {
* value is set to zero when there is no FreeSync support.
*/
int max_vfreq ;
- int pixel_clock_mhz;
/* Audio instance - protected by audio_lock. */
int audio_inst;
@@ -822,6 +842,11 @@ struct dm_plane_state {
enum amdgpu_transfer_function blend_tf;
};
+enum amdgpu_dm_cursor_mode {
+ DM_CURSOR_NATIVE_MODE = 0,
+ DM_CURSOR_OVERLAY_MODE,
+};
+
struct dm_crtc_state {
struct drm_crtc_state base;
struct dc_stream_state *stream;
@@ -852,6 +877,8 @@ struct dm_crtc_state {
* encoding.
*/
enum amdgpu_transfer_function regamma_tf;
+
+ enum amdgpu_dm_cursor_mode cursor_mode;
};
#define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
@@ -956,4 +983,13 @@ amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
struct drm_crtc *crtc);
int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth);
+struct idle_workqueue *idle_create_workqueue(struct amdgpu_device *adev);
+
+void *dm_allocate_gpu_mem(struct amdgpu_device *adev,
+ enum dc_gpu_mem_alloc_type type,
+ size_t size,
+ long long *addr);
+
+bool amdgpu_dm_is_headless(struct amdgpu_device *adev);
+
#endif /* __AMDGPU_DM_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index e23a0a276e33..99014339aaa3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -35,6 +35,9 @@
#include "amdgpu_dm_trace.h"
#include "amdgpu_dm_debugfs.h"
+#define HPD_DETECTION_PERIOD_uS 5000000
+#define HPD_DETECTION_TIME_uS 1000
+
void amdgpu_dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc)
{
struct drm_crtc *crtc = &acrtc->base;
@@ -146,9 +149,93 @@ static void amdgpu_dm_crtc_set_panel_sr_feature(
struct amdgpu_dm_connector *aconn =
(struct amdgpu_dm_connector *) vblank_work->stream->dm_stream_context;
- if (!aconn->disallow_edp_enter_psr)
+ if (!aconn->disallow_edp_enter_psr) {
+ struct amdgpu_display_manager *dm = vblank_work->dm;
+
amdgpu_dm_psr_enable(vblank_work->stream);
+ if (dm->idle_workqueue &&
+ dm->dc->idle_optimizations_allowed &&
+ dm->idle_workqueue->enable &&
+ !dm->idle_workqueue->running)
+ schedule_work(&dm->idle_workqueue->work);
+ }
+ }
+}
+
+bool amdgpu_dm_is_headless(struct amdgpu_device *adev)
+{
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ struct drm_device *dev;
+ bool is_headless = true;
+
+ if (adev == NULL)
+ return true;
+
+ dev = adev->dm.ddev;
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ if (connector->status == connector_status_connected) {
+ is_headless = false;
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+ return is_headless;
+}
+
+static void amdgpu_dm_idle_worker(struct work_struct *work)
+{
+ struct idle_workqueue *idle_work;
+
+ idle_work = container_of(work, struct idle_workqueue, work);
+ idle_work->dm->idle_workqueue->running = true;
+
+ while (idle_work->enable) {
+ fsleep(HPD_DETECTION_PERIOD_uS);
+ mutex_lock(&idle_work->dm->dc_lock);
+ if (!idle_work->dm->dc->idle_optimizations_allowed) {
+ mutex_unlock(&idle_work->dm->dc_lock);
+ break;
+ }
+ dc_allow_idle_optimizations(idle_work->dm->dc, false);
+
+ mutex_unlock(&idle_work->dm->dc_lock);
+ fsleep(HPD_DETECTION_TIME_uS);
+ mutex_lock(&idle_work->dm->dc_lock);
+
+ if (!amdgpu_dm_is_headless(idle_work->dm->adev) &&
+ !amdgpu_dm_psr_is_active_allowed(idle_work->dm)) {
+ mutex_unlock(&idle_work->dm->dc_lock);
+ break;
+ }
+
+ if (idle_work->enable)
+ dc_allow_idle_optimizations(idle_work->dm->dc, true);
+ mutex_unlock(&idle_work->dm->dc_lock);
}
+ idle_work->dm->idle_workqueue->running = false;
+}
+
+struct idle_workqueue *idle_create_workqueue(struct amdgpu_device *adev)
+{
+ struct idle_workqueue *idle_work;
+
+ idle_work = kzalloc(sizeof(*idle_work), GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(idle_work))
+ return NULL;
+
+ idle_work->dm = &adev->dm;
+ idle_work->enable = false;
+ idle_work->running = false;
+ INIT_WORK(&idle_work->work, amdgpu_dm_idle_worker);
+
+ return idle_work;
}
static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
@@ -304,6 +391,7 @@ static struct drm_crtc_state *amdgpu_dm_crtc_duplicate_state(struct drm_crtc *cr
state->regamma_tf = cur->regamma_tf;
state->crc_skip_count = cur->crc_skip_count;
state->mpo_requested = cur->mpo_requested;
+ state->cursor_mode = cur->cursor_mode;
/* TODO Duplicate dc_stream after objects are stream object is flattened */
return &state->base;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 4d7a5d470b1e..62cb59f00929 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -30,6 +30,7 @@
#include "amdgpu.h"
#include "amdgpu_dm.h"
#include "amdgpu_dm_debugfs.h"
+#include "amdgpu_dm_replay.h"
#include "dm_helpers.h"
#include "dmub/dmub_srv.h"
#include "resource.h"
@@ -960,6 +961,58 @@ static int dmub_fw_state_show(struct seq_file *m, void *data)
return seq_write(m, state_base, state_size);
}
+/* replay_capability_show() - show eDP panel replay capability
+ *
+ * The read function: replay_capability_show
+ * Shows if sink and driver has Replay capability or not.
+ *
+ * cat /sys/kernel/debug/dri/0/eDP-X/replay_capability
+ *
+ * Expected output:
+ * "Sink support: no\n" - if panel doesn't support Replay
+ * "Sink support: yes\n" - if panel supports Replay
+ * "Driver support: no\n" - if driver doesn't support Replay
+ * "Driver support: yes\n" - if driver supports Replay
+ */
+static int replay_capability_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct dc_link *link = aconnector->dc_link;
+ bool sink_support_replay = false;
+ bool driver_support_replay = false;
+
+ if (!link)
+ return -ENODEV;
+
+ if (link->type == dc_connection_none)
+ return -ENODEV;
+
+ if (!(link->connector_signal & SIGNAL_TYPE_EDP))
+ return -ENODEV;
+
+ /* If Replay is already set to support, skip the checks */
+ if (link->replay_settings.config.replay_supported) {
+ sink_support_replay = true;
+ driver_support_replay = true;
+ } else if ((amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) {
+ sink_support_replay = amdgpu_dm_link_supports_replay(link, aconnector);
+ } else {
+ struct dc *dc = link->ctx->dc;
+
+ sink_support_replay = amdgpu_dm_link_supports_replay(link, aconnector);
+ if (dc->ctx->dmub_srv && dc->ctx->dmub_srv->dmub)
+ driver_support_replay =
+ (bool)dc->ctx->dmub_srv->dmub->feature_caps.replay_supported;
+ }
+
+ seq_printf(m, "Sink support: %s\n", str_yes_no(sink_support_replay));
+ seq_printf(m, "Driver support: %s\n", str_yes_no(driver_support_replay));
+ seq_printf(m, "Config support: %s\n", str_yes_no(link->replay_settings.config.replay_supported));
+
+ return 0;
+}
+
/* psr_capability_show() - show eDP panel PSR capability
*
* The read function: sink_psr_capability_show
@@ -1367,7 +1420,7 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf,
uint8_t param_nums = 0;
bool ret = false;
- if (!aconnector || !aconnector->dc_link)
+ if (!aconnector->dc_link)
return -EINVAL;
if (size == 0)
@@ -2619,6 +2672,49 @@ unlock:
}
/*
+ * IPS status. Read only.
+ *
+ * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_ips_status
+ */
+static int ips_status_show(struct seq_file *m, void *unused)
+{
+ struct amdgpu_device *adev = m->private;
+ struct dc *dc = adev->dm.dc;
+ struct dc_dmub_srv *dc_dmub_srv;
+
+ seq_printf(m, "IPS config: %d\n", dc->config.disable_ips);
+ seq_printf(m, "Idle optimization: %d\n", dc->idle_optimizations_allowed);
+
+ if (adev->dm.idle_workqueue) {
+ seq_printf(m, "Idle workqueue - enabled: %d\n", adev->dm.idle_workqueue->enable);
+ seq_printf(m, "Idle workqueue - running: %d\n", adev->dm.idle_workqueue->running);
+ }
+
+ dc_dmub_srv = dc->ctx->dmub_srv;
+ if (dc_dmub_srv && dc_dmub_srv->dmub) {
+ uint32_t rcg_count, ips1_count, ips2_count;
+ volatile const struct dmub_shared_state_ips_fw *ips_fw =
+ &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw;
+ rcg_count = ips_fw->rcg_entry_count;
+ ips1_count = ips_fw->ips1_entry_count;
+ ips2_count = ips_fw->ips2_entry_count;
+ seq_printf(m, "entry counts: rcg=%u ips1=%u ips2=%u\n",
+ rcg_count,
+ ips1_count,
+ ips2_count);
+ rcg_count = ips_fw->rcg_exit_count;
+ ips1_count = ips_fw->ips1_exit_count;
+ ips2_count = ips_fw->ips2_exit_count;
+ seq_printf(m, "exit counts: rcg=%u ips1=%u ips2=%u",
+ rcg_count,
+ ips1_count,
+ ips2_count);
+ seq_puts(m, "\n");
+ }
+ return 0;
+}
+
+/*
* Backlight at this moment. Read only.
* As written to display, taking ABM and backlight lut into account.
* Ranges from 0x0 to 0x10000 (= 100% PWM)
@@ -2768,6 +2864,7 @@ DEFINE_SHOW_ATTRIBUTE(dp_lttpr_status);
DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability);
DEFINE_SHOW_ATTRIBUTE(internal_display);
DEFINE_SHOW_ATTRIBUTE(odm_combine_segments);
+DEFINE_SHOW_ATTRIBUTE(replay_capability);
DEFINE_SHOW_ATTRIBUTE(psr_capability);
DEFINE_SHOW_ATTRIBUTE(dp_is_mst_connector);
DEFINE_SHOW_ATTRIBUTE(dp_mst_progress_status);
@@ -2938,6 +3035,22 @@ DEFINE_DEBUGFS_ATTRIBUTE(force_yuv420_output_fops, force_yuv420_output_get,
force_yuv420_output_set, "%llu\n");
/*
+ * Read Replay state
+ */
+static int replay_get_state(void *data, u64 *val)
+{
+ struct amdgpu_dm_connector *connector = data;
+ struct dc_link *link = connector->dc_link;
+ uint64_t state = REPLAY_STATE_INVALID;
+
+ dc_link_get_replay_state(link, &state);
+
+ *val = state;
+
+ return 0;
+}
+
+/*
* Read PSR state
*/
static int psr_get(void *data, u64 *val)
@@ -2962,7 +3075,7 @@ static int psr_read_residency(void *data, u64 *val)
struct dc_link *link = connector->dc_link;
u32 residency = 0;
- link->dc->link_srv->edp_get_psr_residency(link, &residency);
+ link->dc->link_srv->edp_get_psr_residency(link, &residency, PSR_RESIDENCY_MODE_PHY);
*val = (u64)residency;
@@ -3155,6 +3268,8 @@ static int dmcub_trace_event_state_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(dmcub_trace_event_state_fops, dmcub_trace_event_state_get,
dmcub_trace_event_state_set, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(replay_state_fops, replay_get_state, NULL, "%llu\n");
+
DEFINE_DEBUGFS_ATTRIBUTE(psr_fops, psr_get, NULL, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(psr_residency_fops, psr_read_residency, NULL,
"%llu\n");
@@ -3169,6 +3284,7 @@ DEFINE_DEBUGFS_ATTRIBUTE(disallow_edp_enter_psr_fops,
DEFINE_SHOW_ATTRIBUTE(current_backlight);
DEFINE_SHOW_ATTRIBUTE(target_backlight);
+DEFINE_SHOW_ATTRIBUTE(ips_status);
static const struct {
char *name;
@@ -3328,6 +3444,9 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
}
}
if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) {
+ debugfs_create_file("replay_capability", 0444, dir, connector,
+ &replay_capability_fops);
+ debugfs_create_file("replay_state", 0444, dir, connector, &replay_state_fops);
debugfs_create_file_unsafe("psr_capability", 0444, dir, connector, &psr_capability_fops);
debugfs_create_file_unsafe("psr_state", 0444, dir, connector, &psr_fops);
debugfs_create_file_unsafe("psr_residency", 0444, dir,
@@ -3676,6 +3795,7 @@ static int trigger_hpd_mst_set(void *data, u64 val)
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
struct dc_link *link = NULL;
+ int ret;
if (val == 1) {
drm_connector_list_iter_begin(dev, &iter);
@@ -3687,7 +3807,9 @@ static int trigger_hpd_mst_set(void *data, u64 val)
dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
mutex_unlock(&adev->dm.dc_lock);
- drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
+ ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
+ if (ret < 0)
+ DRM_ERROR("DM_MST: Failed to set the device into MST mode!");
}
}
} else if (val == 0) {
@@ -4055,4 +4177,7 @@ void dtn_debugfs_init(struct amdgpu_device *adev)
debugfs_create_file_unsafe("amdgpu_dm_disable_hpd", 0644, root, adev,
&disable_hpd_ops);
+ if (adev->dm.dc->caps.ips_support)
+ debugfs_create_file_unsafe("amdgpu_dm_ips_status", 0644, root, adev,
+ &ips_status_fops);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 2c36f3d00ca2..b490ae67b6be 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -69,6 +69,7 @@ static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps)
case drm_edid_encode_panel_id('A', 'U', 'O', 0xE69B):
case drm_edid_encode_panel_id('B', 'O', 'E', 0x092A):
case drm_edid_encode_panel_id('L', 'G', 'D', 0x06D1):
+ case drm_edid_encode_panel_id('M', 'S', 'F', 0x1003):
DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id);
edid_caps->panel_patch.remove_sink_ext_caps = true;
break;
@@ -574,10 +575,8 @@ bool dm_helpers_dp_write_dpcd(
{
struct amdgpu_dm_connector *aconnector = link->priv;
- if (!aconnector) {
- DRM_ERROR("Failed to find connector for link!");
+ if (!aconnector)
return false;
- }
return drm_dp_dpcd_write(&aconnector->dm_dp_aux.aux,
address, (uint8_t *)data, size) > 0;
@@ -806,9 +805,6 @@ bool dm_helpers_dp_write_dsc_enable(
uint8_t enable_passthrough = enable ? DSC_PASSTHROUGH : DSC_DISABLE;
uint8_t ret = 0;
- if (!stream)
- return false;
-
if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
if (!aconnector->dsc_aux)
return false;
@@ -1044,30 +1040,8 @@ void *dm_helpers_allocate_gpu_mem(
long long *addr)
{
struct amdgpu_device *adev = ctx->driver_context;
- struct dal_allocation *da;
- u32 domain = (type == DC_MEM_ALLOC_TYPE_GART) ?
- AMDGPU_GEM_DOMAIN_GTT : AMDGPU_GEM_DOMAIN_VRAM;
- int ret;
-
- da = kzalloc(sizeof(struct dal_allocation), GFP_KERNEL);
- if (!da)
- return NULL;
- ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
- domain, &da->bo,
- &da->gpu_addr, &da->cpu_ptr);
-
- *addr = da->gpu_addr;
-
- if (ret) {
- kfree(da);
- return NULL;
- }
-
- /* add da to list in dm */
- list_add(&da->list, &adev->dm.da_list);
-
- return da->cpu_ptr;
+ return dm_allocate_gpu_mem(adev, type, size, addr);
}
void dm_helpers_free_gpu_mem(
@@ -1261,7 +1235,13 @@ void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz)
void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable)
{
- /* TODO: add periodic detection implementation */
+ struct amdgpu_device *adev = ctx->driver_context;
+
+ if (adev->dm.idle_workqueue) {
+ adev->dm.idle_workqueue->enable = enable;
+ if (enable && !adev->dm.idle_workqueue->running && amdgpu_dm_is_headless(adev))
+ schedule_work(&adev->dm.idle_workqueue->work);
+ }
}
void dm_helpers_dp_mst_update_branch_bandwidth(
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index a5e1a93ddaea..5442da90f508 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -23,6 +23,7 @@
*
*/
+#include <linux/vmalloc.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_dp_mst_helper.h>
#include <drm/drm_atomic.h>
@@ -182,6 +183,8 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
dc_sink_release(dc_sink);
aconnector->dc_sink = NULL;
aconnector->edid = NULL;
+ aconnector->dsc_aux = NULL;
+ port->passthrough_aux = NULL;
}
aconnector->mst_status = MST_STATUS_DEFAULT;
@@ -210,6 +213,7 @@ bool needs_dsc_aux_workaround(struct dc_link *link)
return false;
}
+#if defined(CONFIG_DRM_AMD_DC_FP)
static bool is_synaptics_cascaded_panamera(struct dc_link *link, struct drm_dp_mst_port *port)
{
u8 branch_vendor_data[4] = { 0 }; // Vendor data 0x50C ~ 0x50F
@@ -269,6 +273,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
return true;
}
+#endif
static bool retrieve_downstream_port_device(struct amdgpu_dm_connector *aconnector)
{
@@ -402,9 +407,11 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
amdgpu_dm_update_freesync_caps(
connector, aconnector->edid);
+#if defined(CONFIG_DRM_AMD_DC_FP)
if (!validate_dsc_caps_on_connector(aconnector))
memset(&aconnector->dc_sink->dsc_caps,
0, sizeof(aconnector->dc_sink->dsc_caps));
+#endif
if (!retrieve_downstream_port_device(aconnector))
memset(&aconnector->mst_downstream_port_present,
@@ -494,6 +501,8 @@ dm_dp_mst_detect(struct drm_connector *connector,
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
aconnector->edid = NULL;
+ aconnector->dsc_aux = NULL;
+ port->passthrough_aux = NULL;
amdgpu_dm_set_mst_status(&aconnector->mst_status,
MST_REMOTE_EDID | MST_ALLOCATE_NEW_PAYLOAD | MST_CLEAR_ALLOCATED_PAYLOAD,
@@ -794,6 +803,7 @@ struct dsc_mst_fairness_params {
struct amdgpu_dm_connector *aconnector;
};
+#if defined(CONFIG_DRM_AMD_DC_FP)
static int kbps_to_peak_pbn(int kbps)
{
u64 peak_kbps = kbps;
@@ -1233,14 +1243,6 @@ static bool is_dsc_need_re_compute(
if (!aconnector || !aconnector->dsc_aux)
continue;
- /*
- * check if cached virtual MST DSC caps are available and DSC is supported
- * as per specifications in their Virtual DPCD registers.
- */
- if (!(aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported ||
- aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT))
- continue;
-
stream_on_link[new_stream_on_link_num] = aconnector;
new_stream_on_link_num++;
@@ -1490,9 +1492,10 @@ int pre_validate_dsc(struct drm_atomic_state *state,
* from dm_state->context.
*/
- local_dc_state = kmemdup(dm_state->context, sizeof(struct dc_state), GFP_KERNEL);
+ local_dc_state = vmalloc(sizeof(struct dc_state));
if (!local_dc_state)
return -ENOMEM;
+ memcpy(local_dc_state, dm_state->context, sizeof(struct dc_state));
for (i = 0; i < local_dc_state->stream_count; i++) {
struct dc_stream_state *stream = dm_state->context->streams[i];
@@ -1562,7 +1565,7 @@ clean_exit:
dc_stream_release(local_dc_state->streams[i]);
}
- kfree(local_dc_state);
+ vfree(local_dc_state);
return ret;
}
@@ -1594,110 +1597,173 @@ static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
return bw_range->max_target_bpp_x16 && bw_range->min_target_bpp_x16;
}
+#endif
+
+#if defined(CONFIG_DRM_AMD_DC_FP)
+static bool dp_get_link_current_set_bw(struct drm_dp_aux *aux, uint32_t *cur_link_bw)
+{
+ uint32_t total_data_bw_efficiency_x10000 = 0;
+ uint32_t link_rate_per_lane_kbps = 0;
+ enum dc_link_rate link_rate;
+ union lane_count_set lane_count;
+ u8 dp_link_encoding;
+ u8 link_bw_set = 0;
+
+ *cur_link_bw = 0;
+
+ if (drm_dp_dpcd_read(aux, DP_MAIN_LINK_CHANNEL_CODING_SET, &dp_link_encoding, 1) != 1 ||
+ drm_dp_dpcd_read(aux, DP_LANE_COUNT_SET, &lane_count.raw, 1) != 1 ||
+ drm_dp_dpcd_read(aux, DP_LINK_BW_SET, &link_bw_set, 1) != 1)
+ return false;
+
+ switch (dp_link_encoding) {
+ case DP_8b_10b_ENCODING:
+ link_rate = link_bw_set;
+ link_rate_per_lane_kbps = link_rate * LINK_RATE_REF_FREQ_IN_KHZ * BITS_PER_DP_BYTE;
+ total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_8b_10b_x10000;
+ total_data_bw_efficiency_x10000 /= 100;
+ total_data_bw_efficiency_x10000 *= DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100;
+ break;
+ case DP_128b_132b_ENCODING:
+ switch (link_bw_set) {
+ case DP_LINK_BW_10:
+ link_rate = LINK_RATE_UHBR10;
+ break;
+ case DP_LINK_BW_13_5:
+ link_rate = LINK_RATE_UHBR13_5;
+ break;
+ case DP_LINK_BW_20:
+ link_rate = LINK_RATE_UHBR20;
+ break;
+ default:
+ return false;
+ }
+
+ link_rate_per_lane_kbps = link_rate * 10000;
+ total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_128b_132b_x10000;
+ break;
+ default:
+ return false;
+ }
+
+ *cur_link_bw = link_rate_per_lane_kbps * lane_count.bits.LANE_COUNT_SET / 10000 * total_data_bw_efficiency_x10000;
+ return true;
+}
+#endif
enum dc_status dm_dp_mst_is_port_support_mode(
struct amdgpu_dm_connector *aconnector,
struct dc_stream_state *stream)
{
- int pbn, branch_max_throughput_mps = 0;
+#if defined(CONFIG_DRM_AMD_DC_FP)
+ int branch_max_throughput_mps = 0;
struct dc_link_settings cur_link_settings;
- unsigned int end_to_end_bw_in_kbps = 0;
- unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0;
+ uint32_t end_to_end_bw_in_kbps = 0;
+ uint32_t root_link_bw_in_kbps = 0;
+ uint32_t virtual_channel_bw_in_kbps = 0;
struct dc_dsc_bw_range bw_range = {0};
struct dc_dsc_config_options dsc_options = {0};
+ uint32_t stream_kbps;
- /*
- * Consider the case with the depth of the mst topology tree is equal or less than 2
- * A. When dsc bitstream can be transmitted along the entire path
- * 1. dsc is possible between source and branch/leaf device (common dsc params is possible), AND
- * 2. dsc passthrough supported at MST branch, or
- * 3. dsc decoding supported at leaf MST device
- * Use maximum dsc compression as bw constraint
- * B. When dsc bitstream cannot be transmitted along the entire path
- * Use native bw as bw constraint
+ /* DSC unnecessary case
+ * Check if timing could be supported within end-to-end BW
*/
- if (is_dsc_common_config_possible(stream, &bw_range) &&
- (aconnector->mst_output_port->passthrough_aux ||
- aconnector->dsc_aux == &aconnector->mst_output_port->aux)) {
- cur_link_settings = stream->link->verified_link_cap;
- upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, &cur_link_settings);
- down_link_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn);
-
- /* pick the end to end bw bottleneck */
- end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps, down_link_bw_in_kbps);
-
- if (end_to_end_bw_in_kbps < bw_range.min_kbps) {
- DRM_DEBUG_DRIVER("maximum dsc compression cannot fit into end-to-end bandwidth\n");
- return DC_FAIL_BANDWIDTH_VALIDATE;
- }
+ stream_kbps =
+ dc_bandwidth_in_kbps_from_timing(&stream->timing,
+ dc_link_get_highest_encoding_format(stream->link));
+ cur_link_settings = stream->link->verified_link_cap;
+ root_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, &cur_link_settings);
+ virtual_channel_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn);
+
+ /* pick the end to end bw bottleneck */
+ end_to_end_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
+
+ if (stream_kbps <= end_to_end_bw_in_kbps) {
+ DRM_DEBUG_DRIVER("No DSC needed. End-to-end bw sufficient.");
+ return DC_OK;
+ }
- if (end_to_end_bw_in_kbps < bw_range.stream_kbps) {
- dc_dsc_get_default_config_option(stream->link->dc, &dsc_options);
- dsc_options.max_target_bpp_limit_override_x16 = aconnector->base.display_info.max_dsc_bpp * 16;
- if (dc_dsc_compute_config(stream->sink->ctx->dc->res_pool->dscs[0],
- &stream->sink->dsc_caps.dsc_dec_caps,
- &dsc_options,
- end_to_end_bw_in_kbps,
- &stream->timing,
- dc_link_get_highest_encoding_format(stream->link),
- &stream->timing.dsc_cfg)) {
- stream->timing.flags.DSC = 1;
- DRM_DEBUG_DRIVER("end-to-end bandwidth require dsc and dsc config found\n");
- } else {
- DRM_DEBUG_DRIVER("end-to-end bandwidth require dsc but dsc config not found\n");
+ /*DSC necessary case*/
+ if (!aconnector->dsc_aux)
+ return DC_FAIL_BANDWIDTH_VALIDATE;
+
+ if (is_dsc_common_config_possible(stream, &bw_range)) {
+
+ /*capable of dsc passthough. dsc bitstream along the entire path*/
+ if (aconnector->mst_output_port->passthrough_aux) {
+ if (bw_range.min_kbps > end_to_end_bw_in_kbps) {
+ DRM_DEBUG_DRIVER("DSC passthrough. Max dsc compression can't fit into end-to-end bw\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
}
- }
- } else {
- /* Check if mode could be supported within max slot
- * number of current mst link and full_pbn of mst links.
- */
- int pbn_div, slot_num, max_slot_num;
- enum dc_link_encoding_format link_encoding;
- uint32_t stream_kbps =
- dc_bandwidth_in_kbps_from_timing(&stream->timing,
- dc_link_get_highest_encoding_format(stream->link));
-
- pbn = kbps_to_peak_pbn(stream_kbps);
- pbn_div = dm_mst_get_pbn_divider(stream->link);
- slot_num = DIV_ROUND_UP(pbn, pbn_div);
-
- link_encoding = dc_link_get_highest_encoding_format(stream->link);
- if (link_encoding == DC_LINK_ENCODING_DP_8b_10b)
- max_slot_num = 63;
- else if (link_encoding == DC_LINK_ENCODING_DP_128b_132b)
- max_slot_num = 64;
- else {
- DRM_DEBUG_DRIVER("Invalid link encoding format\n");
- return DC_FAIL_BANDWIDTH_VALIDATE;
+ } else {
+ /*dsc bitstream decoded at the dp last link*/
+ struct drm_dp_mst_port *immediate_upstream_port = NULL;
+ uint32_t end_link_bw = 0;
+
+ /*Get last DP link BW capability*/
+ if (dp_get_link_current_set_bw(&aconnector->mst_output_port->aux, &end_link_bw)) {
+ if (stream_kbps > end_link_bw) {
+ DRM_DEBUG_DRIVER("DSC decode at last link. Mode required bw can't fit into available bw\n");
+ return DC_FAIL_BANDWIDTH_VALIDATE;
+ }
+ }
+
+ /*Get virtual channel bandwidth between source and the link before the last link*/
+ if (aconnector->mst_output_port->parent->port_parent)
+ immediate_upstream_port = aconnector->mst_output_port->parent->port_parent;
+
+ if (immediate_upstream_port) {
+ virtual_channel_bw_in_kbps = kbps_from_pbn(immediate_upstream_port->full_pbn);
+ virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
+ if (bw_range.min_kbps > virtual_channel_bw_in_kbps) {
+ DRM_DEBUG_DRIVER("DSC decode at last link. Max dsc compression can't fit into MST available bw\n");
+ return DC_FAIL_BANDWIDTH_VALIDATE;
+ }
+ }
}
- if (slot_num > max_slot_num ||
- pbn > aconnector->mst_output_port->full_pbn) {
- DRM_DEBUG_DRIVER("Mode can not be supported within mst links!");
+ /*Confirm if we can obtain dsc config*/
+ dc_dsc_get_default_config_option(stream->link->dc, &dsc_options);
+ dsc_options.max_target_bpp_limit_override_x16 = aconnector->base.display_info.max_dsc_bpp * 16;
+ if (dc_dsc_compute_config(stream->sink->ctx->dc->res_pool->dscs[0],
+ &stream->sink->dsc_caps.dsc_dec_caps,
+ &dsc_options,
+ end_to_end_bw_in_kbps,
+ &stream->timing,
+ dc_link_get_highest_encoding_format(stream->link),
+ &stream->timing.dsc_cfg)) {
+ stream->timing.flags.DSC = 1;
+ DRM_DEBUG_DRIVER("Require dsc and dsc config found\n");
+ } else {
+ DRM_DEBUG_DRIVER("Require dsc but can't find appropriate dsc config\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
}
- }
- /* check is mst dsc output bandwidth branch_overall_throughput_0_mps */
- switch (stream->timing.pixel_encoding) {
- case PIXEL_ENCODING_RGB:
- case PIXEL_ENCODING_YCBCR444:
- branch_max_throughput_mps =
- aconnector->dc_sink->dsc_caps.dsc_dec_caps.branch_overall_throughput_0_mps;
- break;
- case PIXEL_ENCODING_YCBCR422:
- case PIXEL_ENCODING_YCBCR420:
- branch_max_throughput_mps =
- aconnector->dc_sink->dsc_caps.dsc_dec_caps.branch_overall_throughput_1_mps;
- break;
- default:
- break;
- }
+ /* check is mst dsc output bandwidth branch_overall_throughput_0_mps */
+ switch (stream->timing.pixel_encoding) {
+ case PIXEL_ENCODING_RGB:
+ case PIXEL_ENCODING_YCBCR444:
+ branch_max_throughput_mps =
+ aconnector->dc_sink->dsc_caps.dsc_dec_caps.branch_overall_throughput_0_mps;
+ break;
+ case PIXEL_ENCODING_YCBCR422:
+ case PIXEL_ENCODING_YCBCR420:
+ branch_max_throughput_mps =
+ aconnector->dc_sink->dsc_caps.dsc_dec_caps.branch_overall_throughput_1_mps;
+ break;
+ default:
+ break;
+ }
- if (branch_max_throughput_mps != 0 &&
- ((stream->timing.pix_clk_100hz / 10) > branch_max_throughput_mps * 1000))
+ if (branch_max_throughput_mps != 0 &&
+ ((stream->timing.pix_clk_100hz / 10) > branch_max_throughput_mps * 1000)) {
+ DRM_DEBUG_DRIVER("DSC is required but max throughput mps fails");
+ return DC_FAIL_BANDWIDTH_VALIDATE;
+ }
+ } else {
+ DRM_DEBUG_DRIVER("DSC is required but can't find common dsc config.");
return DC_FAIL_BANDWIDTH_VALIDATE;
-
+ }
+#endif
return DC_OK;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index 8a4c40b4c27e..a83bd0331c3b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -104,8 +104,6 @@ void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state
*global_alpha = false;
*global_alpha_value = 0xff;
- if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
- return;
if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
@@ -354,6 +352,46 @@ static int amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(struct amdg
return ret;
}
+static int amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(struct amdgpu_device *adev,
+ const struct amdgpu_framebuffer *afb,
+ const enum surface_pixel_format format,
+ const enum dc_rotation_angle rotation,
+ const struct plane_size *plane_size,
+ union dc_tiling_info *tiling_info,
+ struct dc_plane_dcc_param *dcc,
+ struct dc_plane_address *address,
+ const bool force_disable_dcc)
+{
+ const uint64_t modifier = afb->base.modifier;
+ int ret = 0;
+
+ /* TODO: Most of this function shouldn't be needed on GFX12. */
+ amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(adev, tiling_info);
+
+ tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier);
+
+ if (amdgpu_dm_plane_modifier_has_dcc(modifier) && !force_disable_dcc) {
+ int max_compressed_block = AMD_FMT_MOD_GET(DCC_MAX_COMPRESSED_BLOCK, modifier);
+
+ dcc->enable = 1;
+ dcc->independent_64b_blks = max_compressed_block == 0;
+
+ if (max_compressed_block == 0)
+ dcc->dcc_ind_blk = hubp_ind_block_64b;
+ else if (max_compressed_block == 1)
+ dcc->dcc_ind_blk = hubp_ind_block_128b;
+ else
+ dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
+ }
+
+ /* TODO: This seems wrong because there is no DCC plane on GFX12. */
+ ret = amdgpu_dm_plane_validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
+ if (ret)
+ drm_dbg_kms(adev_to_drm(adev), "amdgpu_dm_plane_validate_dcc: returned error: %d\n", ret);
+
+ return ret;
+}
+
static void amdgpu_dm_plane_add_gfx10_1_modifiers(const struct amdgpu_device *adev,
uint64_t **mods,
uint64_t *size,
@@ -647,6 +685,38 @@ static void amdgpu_dm_plane_add_gfx11_modifiers(struct amdgpu_device *adev,
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D));
}
+static void amdgpu_dm_plane_add_gfx12_modifiers(struct amdgpu_device *adev,
+ uint64_t **mods, uint64_t *size, uint64_t *capacity)
+{
+ uint64_t ver = AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX12);
+ uint64_t mod_256k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_256K_2D);
+ uint64_t mod_64k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_64K_2D);
+ uint64_t mod_4k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_4K_2D);
+ uint64_t mod_256b = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_256B_2D);
+ uint64_t dcc = ver | AMD_FMT_MOD_SET(DCC, 1);
+ uint8_t max_comp_block[] = {1, 0};
+ uint64_t max_comp_block_mod[ARRAY_SIZE(max_comp_block)] = {0};
+ uint8_t i = 0, j = 0;
+ uint64_t gfx12_modifiers[] = {mod_256k, mod_64k, mod_4k, mod_256b, DRM_FORMAT_MOD_LINEAR};
+
+ for (i = 0; i < ARRAY_SIZE(max_comp_block); i++)
+ max_comp_block_mod[i] = AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, max_comp_block[i]);
+
+ /* With DCC: Best choice should be kept first. Hence, add all 256k modifiers of different
+ * max compressed blocks first and then move on to the next smaller sized layouts.
+ * Do not add the linear modifier here, and hence the condition of size-1 for the loop
+ */
+ for (j = 0; j < ARRAY_SIZE(gfx12_modifiers) - 1; j++)
+ for (i = 0; i < ARRAY_SIZE(max_comp_block); i++)
+ amdgpu_dm_plane_add_modifier(mods, size, capacity,
+ ver | dcc | max_comp_block_mod[i] | gfx12_modifiers[j]);
+
+ /* Without DCC. Add all modifiers including linear at the end */
+ for (i = 0; i < ARRAY_SIZE(gfx12_modifiers); i++)
+ amdgpu_dm_plane_add_modifier(mods, size, capacity, gfx12_modifiers[i]);
+
+}
+
static int amdgpu_dm_plane_get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
{
uint64_t size = 0, capacity = 128;
@@ -684,6 +754,9 @@ static int amdgpu_dm_plane_get_plane_modifiers(struct amdgpu_device *adev, unsig
case AMDGPU_FAMILY_GC_11_5_0:
amdgpu_dm_plane_add_gfx11_modifiers(adev, mods, &size, &capacity);
break;
+ case AMDGPU_FAMILY_GC_12_0_0:
+ amdgpu_dm_plane_add_gfx12_modifiers(adev, mods, &size, &capacity);
+ break;
}
amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
@@ -822,7 +895,15 @@ int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev,
upper_32_bits(chroma_addr);
}
- if (adev->family >= AMDGPU_FAMILY_AI) {
+ if (adev->family >= AMDGPU_FAMILY_GC_12_0_0) {
+ ret = amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(adev, afb, format,
+ rotation, plane_size,
+ tiling_info, dcc,
+ address,
+ force_disable_dcc);
+ if (ret)
+ return ret;
+ } else if (adev->family >= AMDGPU_FAMILY_AI) {
ret = amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
rotation, plane_size,
tiling_info, dcc,
@@ -1175,15 +1256,26 @@ static int amdgpu_dm_plane_atomic_check(struct drm_plane *plane,
static int amdgpu_dm_plane_atomic_async_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_plane_state *new_plane_state;
+ struct dm_crtc_state *dm_new_crtc_state;
+
/* Only support async updates on cursor planes. */
if (plane->type != DRM_PLANE_TYPE_CURSOR)
return -EINVAL;
+ new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ /* Reject overlay cursors for now*/
+ if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE)
+ return -EINVAL;
+
return 0;
}
-static int amdgpu_dm_plane_get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
- struct dc_cursor_position *position)
+int amdgpu_dm_plane_get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct dc_cursor_position *position)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
int x, y;
@@ -1254,7 +1346,7 @@ void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane,
/* turn off cursor */
if (crtc_state && crtc_state->stream) {
mutex_lock(&adev->dm.dc_lock);
- dc_stream_set_cursor_position(crtc_state->stream,
+ dc_stream_program_cursor_position(crtc_state->stream,
&position);
mutex_unlock(&adev->dm.dc_lock);
}
@@ -1284,11 +1376,11 @@ void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane,
if (crtc_state->stream) {
mutex_lock(&adev->dm.dc_lock);
- if (!dc_stream_set_cursor_attributes(crtc_state->stream,
+ if (!dc_stream_program_cursor_attributes(crtc_state->stream,
&attributes))
DRM_ERROR("DC failed to set cursor attributes\n");
- if (!dc_stream_set_cursor_position(crtc_state->stream,
+ if (!dc_stream_program_cursor_position(crtc_state->stream,
&position))
DRM_ERROR("DC failed to set cursor position\n");
mutex_unlock(&adev->dm.dc_lock);
@@ -1395,8 +1487,6 @@ static bool amdgpu_dm_plane_format_mod_supported(struct drm_plane *plane,
const struct drm_format_info *info = drm_format_info(format);
int i;
- enum dm_micro_swizzle microtile = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier) & 3;
-
if (!info)
return false;
@@ -1418,29 +1508,34 @@ static bool amdgpu_dm_plane_format_mod_supported(struct drm_plane *plane,
if (i == plane->modifier_count)
return false;
- /*
- * For D swizzle the canonical modifier depends on the bpp, so check
- * it here.
- */
- if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
- adev->family >= AMDGPU_FAMILY_NV) {
- if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
- return false;
- }
+ /* GFX12 doesn't have these limitations. */
+ if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) <= AMD_FMT_MOD_TILE_VER_GFX11) {
+ enum dm_micro_swizzle microtile = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier) & 3;
- if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
- info->cpp[0] < 8)
- return false;
-
- if (amdgpu_dm_plane_modifier_has_dcc(modifier)) {
- /* Per radeonsi comments 16/64 bpp are more complicated. */
- if (info->cpp[0] != 4)
- return false;
- /* We support multi-planar formats, but not when combined with
- * additional DCC metadata planes.
+ /*
+ * For D swizzle the canonical modifier depends on the bpp, so check
+ * it here.
*/
- if (info->num_planes > 1)
+ if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
+ adev->family >= AMDGPU_FAMILY_NV) {
+ if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
+ return false;
+ }
+
+ if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
+ info->cpp[0] < 8)
return false;
+
+ if (amdgpu_dm_plane_modifier_has_dcc(modifier)) {
+ /* Per radeonsi comments 16/64 bpp are more complicated. */
+ if (info->cpp[0] != 4)
+ return false;
+ /* We support multi-planar formats, but not when combined with
+ * additional DCC metadata planes.
+ */
+ if (info->num_planes > 1)
+ return false;
+ }
}
return true;
@@ -1675,6 +1770,7 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
int res = -EPERM;
unsigned int supported_rotations;
uint64_t *modifiers = NULL;
+ unsigned int primary_zpos = dm->dc->caps.max_slave_planes;
num_formats = amdgpu_dm_plane_get_plane_formats(plane, plane_cap, formats,
ARRAY_SIZE(formats));
@@ -1704,10 +1800,19 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
}
if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
- drm_plane_create_zpos_immutable_property(plane, 0);
+ /*
+ * Allow OVERLAY planes to be used as underlays by assigning an
+ * immutable zpos = # of OVERLAY planes to the PRIMARY plane.
+ */
+ drm_plane_create_zpos_immutable_property(plane, primary_zpos);
} else if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
- unsigned int zpos = 1 + drm_plane_index(plane);
- drm_plane_create_zpos_property(plane, zpos, 1, 254);
+ /*
+ * OVERLAY planes can be below or above the PRIMARY, but cannot
+ * be above the CURSOR plane.
+ */
+ unsigned int zpos = primary_zpos + 1 + drm_plane_index(plane);
+
+ drm_plane_create_zpos_property(plane, zpos, 0, 254);
} else if (plane->type == DRM_PLANE_TYPE_CURSOR) {
drm_plane_create_zpos_immutable_property(plane, 255);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
index b51a6b57bd9b..6498359bff6f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
@@ -29,6 +29,9 @@
#include "dc.h"
+int amdgpu_dm_plane_get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct dc_cursor_position *position);
+
void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane,
struct drm_plane_state *old_plane_state);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index bfa090432ce2..f40240aafe98 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -158,7 +158,7 @@ void amdgpu_dm_psr_enable(struct dc_stream_state *stream)
DRM_DEBUG_DRIVER("Enabling psr...\n");
vsync_rate_hz = div64_u64(div64_u64((
- stream->timing.pix_clk_100hz * 100),
+ stream->timing.pix_clk_100hz * (uint64_t)100),
stream->timing.v_total),
stream->timing.h_total);
@@ -223,3 +223,31 @@ bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
return dc_set_psr_allow_active(dm->dc, false);
}
+/*
+ * amdgpu_dm_psr_is_active_allowed() - check if psr is allowed on any stream
+ * @dm: pointer to amdgpu_display_manager
+ *
+ * Return: true if allowed
+ */
+
+bool amdgpu_dm_psr_is_active_allowed(struct amdgpu_display_manager *dm)
+{
+ unsigned int i;
+ bool allow_active = false;
+
+ for (i = 0; i < dm->dc->current_state->stream_count ; i++) {
+ struct dc_link *link;
+ struct dc_stream_state *stream = dm->dc->current_state->streams[i];
+
+ link = stream->link;
+ if (!link)
+ continue;
+ if (link->psr_settings.psr_feature_enabled &&
+ link->psr_settings.psr_allow_active) {
+ allow_active = true;
+ break;
+ }
+ }
+
+ return allow_active;
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
index 1fdfd183c0d9..cd2d45c2b5ef 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
@@ -36,5 +36,6 @@ void amdgpu_dm_psr_enable(struct dc_stream_state *stream);
bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
+bool amdgpu_dm_psr_is_active_allowed(struct amdgpu_display_manager *dm);
#endif /* AMDGPU_DM_AMDGPU_DM_PSR_H_ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
index 738a58eebba7..41f07f13a7b5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
@@ -24,6 +24,7 @@
*/
#include "amdgpu_dm_replay.h"
+#include "dc_dmub_srv.h"
#include "dc.h"
#include "dm_helpers.h"
#include "amdgpu_dm.h"
@@ -32,12 +33,12 @@
#include "dc/inc/link.h"
/*
- * link_supports_replay() - check if the link supports replay
+ * amdgpu_dm_link_supports_replay() - check if the link supports replay
* @link: link
* @aconnector: aconnector
*
*/
-static bool link_supports_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector)
+bool amdgpu_dm_link_supports_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector)
{
struct dm_connector_state *state = to_dm_connector_state(aconnector->base.state);
struct dpcd_caps *dpcd_caps = &link->dpcd_caps;
@@ -78,6 +79,7 @@ bool amdgpu_dm_set_replay_caps(struct dc_link *link, struct amdgpu_dm_connector
{
struct replay_config pr_config = { 0 };
union replay_debug_flags *debug_flags = NULL;
+ struct dc *dc = link->ctx->dc;
// If Replay is already set to support, return true to skip checks
if (link->replay_settings.config.replay_supported)
@@ -89,7 +91,11 @@ bool amdgpu_dm_set_replay_caps(struct dc_link *link, struct amdgpu_dm_connector
if (link->panel_config.psr.disallow_replay)
return false;
- if (!link_supports_replay(link, aconnector))
+ if (!amdgpu_dm_link_supports_replay(link, aconnector))
+ return false;
+
+ if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub ||
+ !dc->ctx->dmub_srv->dmub->feature_caps.replay_supported)
return false;
// Mark Replay is supported in pr_config
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h
index f0d30eb47312..8126bdb1eb6b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h
@@ -38,7 +38,7 @@ enum replay_enable_option {
pr_enable_option_full_screen_video_coasting = 0x40000,
};
-
+bool amdgpu_dm_link_supports_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector);
bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool enable);
bool amdgpu_dm_set_replay_caps(struct dc_link *link, struct amdgpu_dm_connector *aconnector);
bool amdgpu_dm_link_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector);