aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/display/amdgpu_dm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm')
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c451
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c15
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c722
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h34
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c56
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c104
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c562
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c290
11 files changed, 1760 insertions, 491 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
index af16973f2c41..94911871eb9b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -28,11 +28,11 @@
AMDGPUDM = amdgpu_dm.o amdgpu_dm_irq.o amdgpu_dm_mst_types.o amdgpu_dm_color.o
ifneq ($(CONFIG_DRM_AMD_DC),)
-AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o
+AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o
endif
ifneq ($(CONFIG_DEBUG_FS),)
-AMDGPUDM += amdgpu_dm_crc.o
+AMDGPUDM += amdgpu_dm_crc.o amdgpu_dm_debugfs.o
endif
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 1dd1142246c2..5fc13e71a3b5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -39,6 +39,9 @@
#include "dm_helpers.h"
#include "dm_services_types.h"
#include "amdgpu_dm_mst_types.h"
+#if defined(CONFIG_DEBUG_FS)
+#include "amdgpu_dm_debugfs.h"
+#endif
#include "ivsrcid/ivsrcid_vislands30.h"
@@ -46,6 +49,7 @@
#include <linux/moduleparam.h>
#include <linux/version.h>
#include <linux/types.h>
+#include <linux/pm_runtime.h>
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
@@ -56,7 +60,7 @@
#include "modules/inc/mod_freesync.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
#include "ivsrcid/irqsrcs_dcn_1_0.h"
#include "dcn/dcn_1_0_offset.h"
@@ -346,7 +350,6 @@ static void hotplug_notify_work_func(struct work_struct *work)
drm_kms_helper_hotplug_event(dev);
}
-#if defined(CONFIG_DRM_AMD_DC_FBC)
/* Allocate memory for FBC compressed data */
static void amdgpu_dm_fbc_init(struct drm_connector *connector)
{
@@ -387,7 +390,6 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector)
}
}
-#endif
/* Init display KMS
@@ -433,11 +435,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
- if (amdgpu_dc_log)
- init_data.log_mask = DC_DEFAULT_LOG_MASK;
- else
- init_data.log_mask = DC_MIN_LOG_MASK;
-
/*
* TODO debug why this doesn't work on Raven
*/
@@ -649,18 +646,6 @@ amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
static int dm_resume(void *handle)
{
struct amdgpu_device *adev = handle;
- struct amdgpu_display_manager *dm = &adev->dm;
- int ret = 0;
-
- /* power on hardware */
- dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
-
- ret = amdgpu_dm_display_resume(adev);
- return ret;
-}
-
-int amdgpu_dm_display_resume(struct amdgpu_device *adev)
-{
struct drm_device *ddev = adev->ddev;
struct amdgpu_display_manager *dm = &adev->dm;
struct amdgpu_dm_connector *aconnector;
@@ -671,10 +656,12 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
struct drm_plane *plane;
struct drm_plane_state *new_plane_state;
struct dm_plane_state *dm_new_plane_state;
-
- int ret = 0;
+ int ret;
int i;
+ /* power on hardware */
+ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
+
/* program HPD filter */
dc_resume(dm->dc);
@@ -688,8 +675,7 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
amdgpu_dm_irq_resume_early(adev);
/* Do detection*/
- list_for_each_entry(connector,
- &ddev->mode_config.connector_list, head) {
+ list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
aconnector = to_amdgpu_dm_connector(connector);
/*
@@ -711,7 +697,7 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
}
/* Force mode set in atomic comit */
- for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i)
+ for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
new_crtc_state->active_changed = true;
/*
@@ -719,7 +705,7 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
* them here, since they were duplicated as part of the suspend
* procedure.
*/
- for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i) {
+ for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
if (dm_new_crtc_state->stream) {
WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
@@ -728,7 +714,7 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
}
}
- for_each_new_plane_in_state(adev->dm.cached_state, plane, new_plane_state, i) {
+ for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
dm_new_plane_state = to_dm_plane_state(new_plane_state);
if (dm_new_plane_state->dc_state) {
WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
@@ -737,9 +723,9 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
}
}
- ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
+ ret = drm_atomic_helper_resume(ddev, dm->cached_state);
- adev->dm.cached_state = NULL;
+ dm->cached_state = NULL;
amdgpu_dm_irq_resume_late(adev);
@@ -917,16 +903,17 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
(struct edid *) sink->dc_edid.raw_edid;
- drm_mode_connector_update_edid_property(connector,
+ drm_connector_update_edid_property(connector,
aconnector->edid);
}
amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
} else {
amdgpu_dm_remove_sink_from_freesync_module(connector);
- drm_mode_connector_update_edid_property(connector, NULL);
+ drm_connector_update_edid_property(connector, NULL);
aconnector->num_modes = 0;
aconnector->dc_sink = NULL;
+ aconnector->edid = NULL;
}
mutex_unlock(&dev->mode_config.mutex);
@@ -1054,7 +1041,7 @@ static void handle_hpd_rx_irq(void *param)
if (dc_link->type != dc_connection_mst_branch)
mutex_lock(&aconnector->hpd_lock);
- if (dc_link_handle_hpd_rx_irq(dc_link, NULL) &&
+ if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
!is_mst_root_connector) {
/* Downstream Port status changed. */
if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
@@ -1131,6 +1118,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
if (adev->asic_type == CHIP_VEGA10 ||
adev->asic_type == CHIP_VEGA12 ||
+ adev->asic_type == CHIP_VEGA20 ||
adev->asic_type == CHIP_RAVEN)
client_id = SOC15_IH_CLIENTID_DCE;
@@ -1204,7 +1192,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
return 0;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
/* Register IRQ sources and initialize IRQ callbacks */
static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
{
@@ -1529,14 +1517,16 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case CHIP_POLARIS11:
case CHIP_POLARIS10:
case CHIP_POLARIS12:
+ case CHIP_VEGAM:
case CHIP_VEGA10:
case CHIP_VEGA12:
+ case CHIP_VEGA20:
if (dce110_register_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
goto fail;
}
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
case CHIP_RAVEN:
if (dcn10_register_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
@@ -1545,11 +1535,11 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
/*
* Temporary disable until pplib/smu interaction is implemented
*/
- dm->dc->debug.disable_stutter = true;
+ dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
break;
#endif
default:
- DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
+ DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
goto fail;
}
@@ -1657,7 +1647,6 @@ static ssize_t s3_debug_store(struct device *device,
if (ret == 0) {
if (s3_state) {
dm_resume(adev);
- amdgpu_dm_display_resume(adev);
drm_kms_helper_hotplug_event(adev->ddev);
} else
dm_suspend(adev);
@@ -1722,6 +1711,7 @@ static int dm_early_init(void *handle)
adev->mode_info.plane_type = dm_plane_type_default;
break;
case CHIP_POLARIS10:
+ case CHIP_VEGAM:
adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
@@ -1729,12 +1719,13 @@ static int dm_early_init(void *handle)
break;
case CHIP_VEGA10:
case CHIP_VEGA12:
+ case CHIP_VEGA20:
adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
adev->mode_info.plane_type = dm_plane_type_default;
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
case CHIP_RAVEN:
adev->mode_info.num_crtc = 4;
adev->mode_info.num_hpd = 4;
@@ -1743,7 +1734,7 @@ static int dm_early_init(void *handle)
break;
#endif
default:
- DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
+ DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
return -EINVAL;
}
@@ -1848,7 +1839,7 @@ static bool fill_rects_from_plane_state(const struct drm_plane_state *state,
static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
uint64_t *tiling_flags)
{
- struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
int r = amdgpu_bo_reserve(rbo, false);
if (unlikely(r)) {
@@ -1977,6 +1968,7 @@ static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
if (adev->asic_type == CHIP_VEGA10 ||
adev->asic_type == CHIP_VEGA12 ||
+ adev->asic_type == CHIP_VEGA20 ||
adev->asic_type == CHIP_RAVEN) {
/* Fill GFX9 params */
plane_state->tiling_info.gfx9.num_pipes =
@@ -2017,7 +2009,6 @@ static int fill_plane_attributes(struct amdgpu_device *adev,
const struct amdgpu_framebuffer *amdgpu_fb =
to_amdgpu_framebuffer(plane_state->fb);
const struct drm_crtc *crtc = plane_state->crtc;
- struct dc_transfer_func *input_tf;
int ret = 0;
if (!fill_rects_from_plane_state(plane_state, dc_plane_state))
@@ -2031,13 +2022,6 @@ static int fill_plane_attributes(struct amdgpu_device *adev,
if (ret)
return ret;
- input_tf = dc_create_transfer_func();
-
- if (input_tf == NULL)
- return -ENOMEM;
-
- dc_plane_state->in_transfer_func = input_tf;
-
/*
* Always set input transfer function, since plane state is refreshed
* every time.
@@ -2113,12 +2097,6 @@ convert_color_depth_from_display_info(const struct drm_connector *connector)
{
uint32_t bpc = connector->display_info.bpc;
- /* Limited color depth to 8bit
- * TODO: Still need to handle deep color
- */
- if (bpc > 8)
- bpc = 8;
-
switch (bpc) {
case 0:
/* Temporary Work around, DRM don't parse color depth for
@@ -2198,6 +2176,46 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
return color_space;
}
+static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out)
+{
+ if (timing_out->display_color_depth <= COLOR_DEPTH_888)
+ return;
+
+ timing_out->display_color_depth--;
+}
+
+static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out,
+ const struct drm_display_info *info)
+{
+ int normalized_clk;
+ if (timing_out->display_color_depth <= COLOR_DEPTH_888)
+ return;
+ do {
+ normalized_clk = timing_out->pix_clk_khz;
+ /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
+ if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ normalized_clk /= 2;
+ /* Adjusting pix clock following on HDMI spec based on colour depth */
+ switch (timing_out->display_color_depth) {
+ case COLOR_DEPTH_101010:
+ normalized_clk = (normalized_clk * 30) / 24;
+ break;
+ case COLOR_DEPTH_121212:
+ normalized_clk = (normalized_clk * 36) / 24;
+ break;
+ case COLOR_DEPTH_161616:
+ normalized_clk = (normalized_clk * 48) / 24;
+ break;
+ default:
+ return;
+ }
+ if (normalized_clk <= info->max_tmds_clock)
+ return;
+ reduce_mode_colour_depth(timing_out);
+
+ } while (timing_out->display_color_depth > COLOR_DEPTH_888);
+
+}
/*****************************************************************************/
static void
@@ -2206,7 +2224,7 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
const struct drm_connector *connector)
{
struct dc_crtc_timing *timing_out = &stream->timing;
- struct dc_transfer_func *tf = dc_create_transfer_func();
+ const struct drm_display_info *info = &connector->display_info;
memset(timing_out, 0, sizeof(struct dc_crtc_timing));
@@ -2215,8 +2233,10 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
timing_out->v_border_top = 0;
timing_out->v_border_bottom = 0;
/* TODO: un-hardcode */
-
- if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
+ if (drm_mode_is_420_only(info, mode_in)
+ && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+ else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
&& stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
else
@@ -2250,9 +2270,10 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
stream->output_color_space = get_output_color_space(timing_out);
- tf->type = TF_TYPE_PREDEFINED;
- tf->tf = TRANSFER_FUNCTION_SRGB;
- stream->out_transfer_func = tf;
+ stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
+ stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
+ if (stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ adjust_colour_depth_from_display_info(timing_out, info);
}
static void fill_audio_info(struct audio_info *audio_info,
@@ -2336,27 +2357,22 @@ decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
}
}
-static int create_fake_sink(struct amdgpu_dm_connector *aconnector)
+static struct dc_sink *
+create_fake_sink(struct amdgpu_dm_connector *aconnector)
{
- struct dc_sink *sink = NULL;
struct dc_sink_init_data sink_init_data = { 0 };
-
+ struct dc_sink *sink = NULL;
sink_init_data.link = aconnector->dc_link;
sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
sink = dc_sink_create(&sink_init_data);
if (!sink) {
DRM_ERROR("Failed to create sink!\n");
- return -ENOMEM;
+ return NULL;
}
-
sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
- aconnector->fake_enable = true;
-
- aconnector->dc_sink = sink;
- aconnector->dc_link->local_sink = sink;
- return 0;
+ return sink;
}
static void set_multisync_trigger_params(
@@ -2419,7 +2435,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
struct dc_stream_state *stream = NULL;
struct drm_display_mode mode = *drm_mode;
bool native_mode_found = false;
-
+ struct dc_sink *sink = NULL;
if (aconnector == NULL) {
DRM_ERROR("aconnector is NULL!\n");
return stream;
@@ -2437,15 +2453,18 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
return stream;
}
- if (create_fake_sink(aconnector))
+ sink = create_fake_sink(aconnector);
+ if (!sink)
return stream;
+ } else {
+ sink = aconnector->dc_sink;
}
- stream = dc_create_stream_for_sink(aconnector->dc_sink);
+ stream = dc_create_stream_for_sink(sink);
if (stream == NULL) {
DRM_ERROR("Failed to create stream for sink!\n");
- return stream;
+ goto finish;
}
list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
@@ -2484,10 +2503,16 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
fill_audio_info(
&stream->audio_info,
drm_connector,
- aconnector->dc_sink);
+ sink);
update_stream_signal(stream);
+ if (dm_state && dm_state->freesync_capable)
+ stream->ignore_msa_timing_param = true;
+finish:
+ if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL)
+ dc_sink_release(sink);
+
return stream;
}
@@ -2710,18 +2735,15 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
const struct dc_link *link = aconnector->dc_link;
struct amdgpu_device *adev = connector->dev->dev_private;
struct amdgpu_display_manager *dm = &adev->dm;
+
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
- link->type != dc_connection_none) {
- amdgpu_dm_register_backlight_device(dm);
-
- if (dm->backlight_dev) {
- backlight_device_unregister(dm->backlight_dev);
- dm->backlight_dev = NULL;
- }
-
+ link->type != dc_connection_none &&
+ dm->backlight_dev) {
+ backlight_device_unregister(dm->backlight_dev);
+ dm->backlight_dev = NULL;
}
#endif
drm_connector_unregister(connector);
@@ -2734,6 +2756,9 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
struct dm_connector_state *state =
to_dm_connector_state(connector->state);
+ if (connector->state)
+ __drm_atomic_helper_connector_destroy_state(connector->state);
+
kfree(state);
state = kzalloc(sizeof(*state), GFP_KERNEL);
@@ -2744,8 +2769,7 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
state->underscan_hborder = 0;
state->underscan_vborder = 0;
- connector->state = &state->base;
- connector->state->connector = connector;
+ __drm_atomic_helper_connector_reset(connector, &state->base);
}
}
@@ -2855,7 +2879,7 @@ static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
create_eml_sink(aconnector);
}
-int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
+enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
int result = MODE_ERROR;
@@ -3058,8 +3082,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
}
afb = to_amdgpu_framebuffer(new_state->fb);
-
- obj = afb->obj;
+ obj = new_state->fb->obj[0];
rbo = gem_to_amdgpu_bo(obj);
adev = amdgpu_ttm_adev(rbo->tbo.bdev);
r = amdgpu_bo_reserve(rbo, false);
@@ -3067,20 +3090,29 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
return r;
if (plane->type != DRM_PLANE_TYPE_CURSOR)
- domain = amdgpu_display_framebuffer_domains(adev);
+ domain = amdgpu_display_supported_domains(adev);
else
domain = AMDGPU_GEM_DOMAIN_VRAM;
- r = amdgpu_bo_pin(rbo, domain, &afb->address);
-
- amdgpu_bo_unreserve(rbo);
-
+ r = amdgpu_bo_pin(rbo, domain);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
+ amdgpu_bo_unreserve(rbo);
return r;
}
+ r = amdgpu_ttm_alloc_gart(&rbo->tbo);
+ if (unlikely(r != 0)) {
+ amdgpu_bo_unpin(rbo);
+ amdgpu_bo_unreserve(rbo);
+ DRM_ERROR("%p bind failed\n", rbo);
+ return r;
+ }
+ amdgpu_bo_unreserve(rbo);
+
+ afb->address = amdgpu_bo_gpu_offset(rbo);
+
amdgpu_bo_ref(rbo);
if (dm_plane_state_new->dc_state &&
@@ -3105,17 +3137,6 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
}
}
- /* It's a hack for s3 since in 4.9 kernel filter out cursor buffer
- * prepare and cleanup in drm_atomic_helper_prepare_planes
- * and drm_atomic_helper_cleanup_planes because fb doens't in s3.
- * IN 4.10 kernel this code should be removed and amdgpu_device_suspend
- * code touching fram buffers should be avoided for DC.
- */
- if (plane->type == DRM_PLANE_TYPE_CURSOR) {
- struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_state->crtc);
-
- acrtc->cursor_bo = obj;
- }
return 0;
}
@@ -3123,14 +3144,12 @@ static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct amdgpu_bo *rbo;
- struct amdgpu_framebuffer *afb;
int r;
if (!old_state->fb)
return;
- afb = to_amdgpu_framebuffer(old_state->fb);
- rbo = gem_to_amdgpu_bo(afb->obj);
+ rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
r = amdgpu_bo_reserve(rbo, false);
if (unlikely(r)) {
DRM_ERROR("failed to reserve rbo before unpin\n");
@@ -3463,12 +3482,15 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
struct edid *edid = amdgpu_dm_connector->edid;
encoder = helper->best_encoder(connector);
- amdgpu_dm_connector_ddc_get_modes(connector, edid);
- amdgpu_dm_connector_add_common_modes(encoder, connector);
-#if defined(CONFIG_DRM_AMD_DC_FBC)
+ if (!edid || !drm_edid_is_valid(edid)) {
+ drm_add_modes_noedid(connector, 640, 480);
+ } else {
+ amdgpu_dm_connector_ddc_get_modes(connector, edid);
+ amdgpu_dm_connector_add_common_modes(encoder, connector);
+ }
amdgpu_dm_fbc_init(connector);
-#endif
+
return amdgpu_dm_connector->num_modes;
}
@@ -3487,7 +3509,6 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
aconnector->base.stereo_allowed = false;
aconnector->base.dpms = DRM_MODE_DPMS_OFF;
aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
-
mutex_init(&aconnector->hpd_lock);
/* configure support HPD hot plug connector_>polled default value is 0
@@ -3496,9 +3517,13 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
switch (connector_type) {
case DRM_MODE_CONNECTOR_HDMIA:
aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ aconnector->base.ycbcr_420_allowed =
+ link->link_enc->features.ycbcr420_supported ? true : false;
break;
case DRM_MODE_CONNECTOR_DisplayPort:
aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ aconnector->base.ycbcr_420_allowed =
+ link->link_enc->features.ycbcr420_supported ? true : false;
break;
case DRM_MODE_CONNECTOR_DVID:
aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
@@ -3651,10 +3676,17 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
link,
link_index);
- drm_mode_connector_attach_encoder(
+ drm_connector_attach_encoder(
&aconnector->base, &aencoder->base);
drm_connector_register(&aconnector->base);
+#if defined(CONFIG_DEBUG_FS)
+ res = connector_debugfs_init(aconnector);
+ if (res) {
+ DRM_ERROR("Failed to create debugfs for connector");
+ goto out_free;
+ }
+#endif
if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
|| connector_type == DRM_MODE_CONNECTOR_eDP)
@@ -3773,7 +3805,7 @@ static void remove_stream(struct amdgpu_device *adev,
static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
struct dc_cursor_position *position)
{
- struct amdgpu_crtc *amdgpu_crtc = amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
int x, y;
int xorigin = 0, yorigin = 0;
@@ -3905,7 +3937,7 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
int r, vpos, hpos;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
- struct amdgpu_bo *abo = gem_to_amdgpu_bo(afb->obj);
+ struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]);
struct amdgpu_device *adev = crtc->dev->dev_private;
bool async_flip = (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
struct dc_flip_addrs addr = { {0} };
@@ -3951,8 +3983,6 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
/* Flip */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
- /* update crtc fb */
- crtc->primary->fb = fb;
WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE);
WARN_ON(!acrtc_state->stream);
@@ -3965,10 +3995,11 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
if (acrtc->base.state->event)
prepare_flip_isr(acrtc);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->plane_states[0];
surface_updates->flip_addr = &addr;
-
dc_commit_updates_for_stream(adev->dm.dc,
surface_updates,
1,
@@ -3981,9 +4012,96 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
__func__,
addr.address.grph.addr.high_part,
addr.address.grph.addr.low_part);
+}
+/*
+ * TODO this whole function needs to go
+ *
+ * dc_surface_update is needlessly complex. See if we can just replace this
+ * with a dc_plane_state and follow the atomic model a bit more closely here.
+ */
+static bool commit_planes_to_stream(
+ struct dc *dc,
+ struct dc_plane_state **plane_states,
+ uint8_t new_plane_count,
+ struct dm_crtc_state *dm_new_crtc_state,
+ struct dm_crtc_state *dm_old_crtc_state,
+ struct dc_state *state)
+{
+ /* no need to dynamically allocate this. it's pretty small */
+ struct dc_surface_update updates[MAX_SURFACES];
+ struct dc_flip_addrs *flip_addr;
+ struct dc_plane_info *plane_info;
+ struct dc_scaling_info *scaling_info;
+ int i;
+ struct dc_stream_state *dc_stream = dm_new_crtc_state->stream;
+ struct dc_stream_update *stream_update =
+ kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL);
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ if (!stream_update) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ flip_addr = kcalloc(MAX_SURFACES, sizeof(struct dc_flip_addrs),
+ GFP_KERNEL);
+ plane_info = kcalloc(MAX_SURFACES, sizeof(struct dc_plane_info),
+ GFP_KERNEL);
+ scaling_info = kcalloc(MAX_SURFACES, sizeof(struct dc_scaling_info),
+ GFP_KERNEL);
+
+ if (!flip_addr || !plane_info || !scaling_info) {
+ kfree(flip_addr);
+ kfree(plane_info);
+ kfree(scaling_info);
+ kfree(stream_update);
+ return false;
+ }
+
+ memset(updates, 0, sizeof(updates));
+
+ stream_update->src = dc_stream->src;
+ stream_update->dst = dc_stream->dst;
+ stream_update->out_transfer_func = dc_stream->out_transfer_func;
+
+ for (i = 0; i < new_plane_count; i++) {
+ updates[i].surface = plane_states[i];
+ updates[i].gamma =
+ (struct dc_gamma *)plane_states[i]->gamma_correction;
+ updates[i].in_transfer_func = plane_states[i]->in_transfer_func;
+ flip_addr[i].address = plane_states[i]->address;
+ flip_addr[i].flip_immediate = plane_states[i]->flip_immediate;
+ plane_info[i].color_space = plane_states[i]->color_space;
+ plane_info[i].format = plane_states[i]->format;
+ plane_info[i].plane_size = plane_states[i]->plane_size;
+ plane_info[i].rotation = plane_states[i]->rotation;
+ plane_info[i].horizontal_mirror = plane_states[i]->horizontal_mirror;
+ plane_info[i].stereo_format = plane_states[i]->stereo_format;
+ plane_info[i].tiling_info = plane_states[i]->tiling_info;
+ plane_info[i].visible = plane_states[i]->visible;
+ plane_info[i].per_pixel_alpha = plane_states[i]->per_pixel_alpha;
+ plane_info[i].dcc = plane_states[i]->dcc;
+ scaling_info[i].scaling_quality = plane_states[i]->scaling_quality;
+ scaling_info[i].src_rect = plane_states[i]->src_rect;
+ scaling_info[i].dst_rect = plane_states[i]->dst_rect;
+ scaling_info[i].clip_rect = plane_states[i]->clip_rect;
+
+ updates[i].flip_addr = &flip_addr[i];
+ updates[i].plane_info = &plane_info[i];
+ updates[i].scaling_info = &scaling_info[i];
+ }
+
+ dc_commit_updates_for_stream(
+ dc,
+ updates,
+ new_plane_count,
+ dc_stream, stream_update, plane_states, state);
+
+ kfree(flip_addr);
+ kfree(plane_info);
+ kfree(scaling_info);
+ kfree(stream_update);
+ return true;
}
static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
@@ -4001,6 +4119,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct drm_crtc_state *new_pcrtc_state =
drm_atomic_get_new_crtc_state(state, pcrtc);
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
+ struct dm_crtc_state *dm_old_crtc_state =
+ to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
int planes_count = 0;
unsigned long flags;
@@ -4037,7 +4157,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
}
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- if (!pflip_needed) {
+ if (!pflip_needed || plane->type == DRM_PLANE_TYPE_OVERLAY) {
WARN_ON(!dm_new_plane_state->dc_state);
plane_states_constructed[planes_count] = dm_new_plane_state->dc_state;
@@ -4079,10 +4199,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
}
- if (false == dc_commit_planes_to_stream(dm->dc,
+
+ if (false == commit_planes_to_stream(dm->dc,
plane_states_constructed,
planes_count,
- dc_stream_attach,
+ acrtc_state,
+ dm_old_crtc_state,
dm_state->context))
dm_error("%s: Failed to attach plane!\n", __func__);
} else {
@@ -4149,6 +4271,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct drm_connector *connector;
struct drm_connector_state *old_con_state, *new_con_state;
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ int crtc_disable_count = 0;
drm_atomic_helper_update_legacy_modeset_state(dev, state);
@@ -4211,6 +4334,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
if (dm_old_crtc_state->stream)
remove_stream(adev, acrtc, dm_old_crtc_state->stream);
+ pm_runtime_get_noresume(dev->dev);
+
acrtc->enabled = true;
acrtc->hw_mode = new_crtc_state->mode;
crtc->hwmode = new_crtc_state->mode;
@@ -4307,8 +4432,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
struct dc_stream_status *status = NULL;
- if (acrtc)
+ if (acrtc) {
new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
+ }
/* Skip any modesets/resets */
if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
@@ -4331,11 +4458,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
WARN_ON(!status->plane_count);
/*TODO How it works with MPO ?*/
- if (!dc_commit_planes_to_stream(
+ if (!commit_planes_to_stream(
dm->dc,
status->plane_states,
status->plane_count,
- dm_new_crtc_state->stream,
+ dm_new_crtc_state,
+ to_dm_crtc_state(old_crtc_state),
dm_state->context))
dm_error("%s: Failed to update stream scaling!\n", __func__);
}
@@ -4348,6 +4476,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
bool modeset_needed;
+ if (old_crtc_state->active && !new_crtc_state->active)
+ crtc_disable_count++;
+
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
modeset_needed = modeset_required(
@@ -4396,6 +4527,14 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_wait_for_flip_done(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
+
+ /* Finally, drop a runtime PM reference for each newly disabled CRTC,
+ * so we can put the GPU into runtime suspend if we're not driving any
+ * displays anymore
+ */
+ for (i = 0; i < crtc_disable_count; i++)
+ pm_runtime_put_autosuspend(dev->dev);
+ pm_runtime_mark_last_busy(dev->dev);
}
@@ -4555,8 +4694,8 @@ static int dm_update_crtcs_state(struct dc *dc,
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct amdgpu_crtc *acrtc = NULL;
struct amdgpu_dm_connector *aconnector = NULL;
- struct drm_connector_state *new_con_state = NULL;
- struct dm_connector_state *dm_conn_state = NULL;
+ struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
+ struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
struct drm_plane_state *new_plane_state = NULL;
new_stream = NULL;
@@ -4577,19 +4716,22 @@ static int dm_update_crtcs_state(struct dc *dc,
/* TODO This hack should go away */
if (aconnector && enable) {
// Make sure fake sink is created in plug-in scenario
- new_con_state = drm_atomic_get_connector_state(state,
+ drm_new_conn_state = drm_atomic_get_new_connector_state(state,
&aconnector->base);
+ drm_old_conn_state = drm_atomic_get_old_connector_state(state,
+ &aconnector->base);
- if (IS_ERR(new_con_state)) {
- ret = PTR_ERR_OR_ZERO(new_con_state);
+ if (IS_ERR(drm_new_conn_state)) {
+ ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
break;
}
- dm_conn_state = to_dm_connector_state(new_con_state);
+ dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
+ dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
new_stream = create_stream_for_sink(aconnector,
&new_crtc_state->mode,
- dm_conn_state);
+ dm_new_conn_state);
/*
* we can have no stream on ACTION_SET if a display
@@ -4695,20 +4837,30 @@ next_crtc:
* We want to do dc stream updates that do not require a
* full modeset below.
*/
- if (!enable || !aconnector || modereset_required(new_crtc_state))
+ if (!(enable && aconnector && new_crtc_state->enable &&
+ new_crtc_state->active))
continue;
/*
* Given above conditions, the dc state cannot be NULL because:
- * 1. We're attempting to enable a CRTC. Which has a...
- * 2. Valid connector attached, and
- * 3. User does not want to reset it (disable or mark inactive,
- * which can happen on a CRTC that's already disabled).
- * => It currently exists.
+ * 1. We're in the process of enabling CRTCs (just been added
+ * to the dc context, or already is on the context)
+ * 2. Has a valid connector attached, and
+ * 3. Is currently active and enabled.
+ * => The dc stream state currently exists.
*/
BUG_ON(dm_new_crtc_state->stream == NULL);
- /* Color managment settings */
- if (dm_new_crtc_state->base.color_mgmt_changed) {
+ /* Scaling or underscan settings */
+ if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
+ update_stream_scaling_settings(
+ &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
+
+ /*
+ * Color management settings. We also update color properties
+ * when a modeset is needed, to ensure it gets reprogrammed.
+ */
+ if (dm_new_crtc_state->base.color_mgmt_changed ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state)) {
ret = amdgpu_dm_set_regamma_lut(dm_new_crtc_state);
if (ret)
goto fail;
@@ -4755,7 +4907,8 @@ static int dm_update_planes_state(struct dc *dc,
/* Remove any changed/removed planes */
if (!enable) {
- if (pflip_needed)
+ if (pflip_needed &&
+ plane->type != DRM_PLANE_TYPE_OVERLAY)
continue;
if (!old_plane_crtc)
@@ -4802,7 +4955,8 @@ static int dm_update_planes_state(struct dc *dc,
if (!dm_new_crtc_state->stream)
continue;
- if (pflip_needed)
+ if (pflip_needed &&
+ plane->type != DRM_PLANE_TYPE_OVERLAY)
continue;
WARN_ON(dm_new_plane_state->dc_state);
@@ -5009,17 +5163,24 @@ void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
struct edid *edid)
{
int i;
- uint64_t val_capable;
bool edid_check_required;
struct detailed_timing *timing;
struct detailed_non_pixel *data;
struct detailed_data_monitor_range *range;
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
+ struct dm_connector_state *dm_con_state;
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = dev->dev_private;
+ if (!connector->state) {
+ DRM_ERROR("%s - Connector has no state", __func__);
+ return;
+ }
+
+ dm_con_state = to_dm_connector_state(connector->state);
+
edid_check_required = false;
if (!amdgpu_dm_connector->dc_sink) {
DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
@@ -5038,7 +5199,7 @@ void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
amdgpu_dm_connector);
}
}
- val_capable = 0;
+ dm_con_state->freesync_capable = false;
if (edid_check_required == true && (edid->version > 1 ||
(edid->version == 1 && edid->revision > 1))) {
for (i = 0; i < 4; i++) {
@@ -5074,7 +5235,7 @@ void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
amdgpu_dm_connector->min_vfreq * 1000000;
amdgpu_dm_connector->caps.max_refresh_in_micro_hz =
amdgpu_dm_connector->max_vfreq * 1000000;
- val_capable = 1;
+ dm_con_state->freesync_capable = true;
}
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index b68400c1154b..a29dc35954c9 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -28,7 +28,6 @@
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
-#include "dc.h"
/*
* This file contains the definition for amdgpu_display_manager
@@ -53,6 +52,7 @@
struct amdgpu_device;
struct drm_device;
struct amdgpu_dm_irq_handler_data;
+struct dc;
struct amdgpu_dm_prev_state {
struct drm_framebuffer *fb;
@@ -72,13 +72,11 @@ struct irq_list_head {
struct work_struct work;
};
-#if defined(CONFIG_DRM_AMD_DC_FBC)
struct dm_comressor_info {
void *cpu_addr;
struct amdgpu_bo *bo_ptr;
uint64_t gpu_addr;
};
-#endif
struct amdgpu_display_manager {
@@ -129,9 +127,8 @@ struct amdgpu_display_manager {
* Caches device atomic state for suspend/resume
*/
struct drm_atomic_state *cached_state;
-#if defined(CONFIG_DRM_AMD_DC_FBC)
+
struct dm_comressor_info compressor;
-#endif
};
struct amdgpu_dm_connector {
@@ -220,6 +217,7 @@ struct dm_connector_state {
uint8_t underscan_hborder;
bool underscan_enable;
struct mod_freesync_user_enable user_enable;
+ bool freesync_capable;
};
#define to_dm_connector_state(x)\
@@ -246,7 +244,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
struct dc_link *link,
int link_index);
-int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
+enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode);
void dm_restore_drm_connector_state(struct drm_device *dev,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index 25f064c01038..b329393307e5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -25,6 +25,7 @@
#include "amdgpu_mode.h"
#include "amdgpu_dm.h"
+#include "dc.h"
#include "modules/color/color_gamma.h"
#define MAX_DRM_LUT_VALUE 0xFFFF
@@ -87,9 +88,9 @@ static void __drm_lut_to_dc_gamma(struct drm_color_lut *lut,
g = drm_color_lut_extract(lut[i].green, 16);
b = drm_color_lut_extract(lut[i].blue, 16);
- gamma->entries.red[i] = dal_fixed31_32_from_int(r);
- gamma->entries.green[i] = dal_fixed31_32_from_int(g);
- gamma->entries.blue[i] = dal_fixed31_32_from_int(b);
+ gamma->entries.red[i] = dc_fixpt_from_int(r);
+ gamma->entries.green[i] = dc_fixpt_from_int(g);
+ gamma->entries.blue[i] = dc_fixpt_from_int(b);
}
return;
}
@@ -100,9 +101,9 @@ static void __drm_lut_to_dc_gamma(struct drm_color_lut *lut,
g = drm_color_lut_extract(lut[i].green, 16);
b = drm_color_lut_extract(lut[i].blue, 16);
- gamma->entries.red[i] = dal_fixed31_32_from_fraction(r, MAX_DRM_LUT_VALUE);
- gamma->entries.green[i] = dal_fixed31_32_from_fraction(g, MAX_DRM_LUT_VALUE);
- gamma->entries.blue[i] = dal_fixed31_32_from_fraction(b, MAX_DRM_LUT_VALUE);
+ gamma->entries.red[i] = dc_fixpt_from_fraction(r, MAX_DRM_LUT_VALUE);
+ gamma->entries.green[i] = dc_fixpt_from_fraction(g, MAX_DRM_LUT_VALUE);
+ gamma->entries.blue[i] = dc_fixpt_from_fraction(b, MAX_DRM_LUT_VALUE);
}
}
@@ -207,7 +208,7 @@ void amdgpu_dm_set_ctm(struct dm_crtc_state *crtc)
for (i = 0; i < 12; i++) {
/* Skip 4th element */
if (i % 4 == 3) {
- stream->gamut_remap_matrix.matrix[i] = dal_fixed31_32_zero;
+ stream->gamut_remap_matrix.matrix[i] = dc_fixpt_zero;
continue;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
new file mode 100644
index 000000000000..0d9e410ca01e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -0,0 +1,722 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/debugfs.h>
+
+#include "dc.h"
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_debugfs.h"
+
+/* function description
+ * get/ set DP configuration: lane_count, link_rate, spread_spectrum
+ *
+ * valid lane count value: 1, 2, 4
+ * valid link rate value:
+ * 06h = 1.62Gbps per lane
+ * 0Ah = 2.7Gbps per lane
+ * 0Ch = 3.24Gbps per lane
+ * 14h = 5.4Gbps per lane
+ * 1Eh = 8.1Gbps per lane
+ *
+ * debugfs is located at /sys/kernel/debug/dri/0/DP-x/link_settings
+ *
+ * --- to get dp configuration
+ *
+ * cat link_settings
+ *
+ * It will list current, verified, reported, preferred dp configuration.
+ * current -- for current video mode
+ * verified --- maximum configuration which pass link training
+ * reported --- DP rx report caps (DPCD register offset 0, 1 2)
+ * preferred --- user force settings
+ *
+ * --- set (or force) dp configuration
+ *
+ * echo <lane_count> <link_rate> > link_settings
+ *
+ * for example, to force to 2 lane, 2.7GHz,
+ * echo 4 0xa > link_settings
+ *
+ * spread_spectrum could not be changed dynamically.
+ *
+ * in case invalid lane count, link rate are force, no hw programming will be
+ * done. please check link settings after force operation to see if HW get
+ * programming.
+ *
+ * cat link_settings
+ *
+ * check current and preferred settings.
+ *
+ */
+static ssize_t dp_link_settings_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+ struct dc_link *link = connector->dc_link;
+ char *rd_buf = NULL;
+ char *rd_buf_ptr = NULL;
+ const uint32_t rd_buf_size = 100;
+ uint32_t result = 0;
+ uint8_t str_len = 0;
+ int r;
+
+ if (*pos & 3 || size & 3)
+ return -EINVAL;
+
+ rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+ if (!rd_buf)
+ return 0;
+
+ rd_buf_ptr = rd_buf;
+
+ str_len = strlen("Current: %d %d %d ");
+ snprintf(rd_buf_ptr, str_len, "Current: %d %d %d ",
+ link->cur_link_settings.lane_count,
+ link->cur_link_settings.link_rate,
+ link->cur_link_settings.link_spread);
+ rd_buf_ptr += str_len;
+
+ str_len = strlen("Verified: %d %d %d ");
+ snprintf(rd_buf_ptr, str_len, "Verified: %d %d %d ",
+ link->verified_link_cap.lane_count,
+ link->verified_link_cap.link_rate,
+ link->verified_link_cap.link_spread);
+ rd_buf_ptr += str_len;
+
+ str_len = strlen("Reported: %d %d %d ");
+ snprintf(rd_buf_ptr, str_len, "Reported: %d %d %d ",
+ link->reported_link_cap.lane_count,
+ link->reported_link_cap.link_rate,
+ link->reported_link_cap.link_spread);
+ rd_buf_ptr += str_len;
+
+ str_len = strlen("Preferred: %d %d %d ");
+ snprintf(rd_buf_ptr, str_len, "Preferred: %d %d %d\n",
+ link->preferred_link_setting.lane_count,
+ link->preferred_link_setting.link_rate,
+ link->preferred_link_setting.link_spread);
+
+ while (size) {
+ if (*pos >= rd_buf_size)
+ break;
+
+ r = put_user(*(rd_buf + result), buf);
+ if (r)
+ return r; /* r = -EFAULT */
+
+ buf += 1;
+ size -= 1;
+ *pos += 1;
+ result += 1;
+ }
+
+ kfree(rd_buf);
+ return result;
+}
+
+static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+ struct dc_link *link = connector->dc_link;
+ struct dc *dc = (struct dc *)link->dc;
+ struct dc_link_settings prefer_link_settings;
+ char *wr_buf = NULL;
+ char *wr_buf_ptr = NULL;
+ const uint32_t wr_buf_size = 40;
+ int r;
+ int bytes_from_user;
+ char *sub_str;
+ /* 0: lane_count; 1: link_rate */
+ uint8_t param_index = 0;
+ long param[2];
+ const char delimiter[3] = {' ', '\n', '\0'};
+ bool valid_input = false;
+
+ if (size == 0)
+ return -EINVAL;
+
+ wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
+ if (!wr_buf)
+ return -EINVAL;
+ wr_buf_ptr = wr_buf;
+
+ r = copy_from_user(wr_buf_ptr, buf, wr_buf_size);
+
+ /* r is bytes not be copied */
+ if (r >= wr_buf_size) {
+ kfree(wr_buf);
+ DRM_DEBUG_DRIVER("user data not read\n");
+ return -EINVAL;
+ }
+
+ bytes_from_user = wr_buf_size - r;
+
+ while (isspace(*wr_buf_ptr))
+ wr_buf_ptr++;
+
+ while ((*wr_buf_ptr != '\0') && (param_index < 2)) {
+
+ sub_str = strsep(&wr_buf_ptr, delimiter);
+
+ r = kstrtol(sub_str, 16, &param[param_index]);
+
+ if (r)
+ DRM_DEBUG_DRIVER("string to int convert error code: %d\n", r);
+
+ param_index++;
+ while (isspace(*wr_buf_ptr))
+ wr_buf_ptr++;
+ }
+
+ switch (param[0]) {
+ case LANE_COUNT_ONE:
+ case LANE_COUNT_TWO:
+ case LANE_COUNT_FOUR:
+ valid_input = true;
+ break;
+ default:
+ break;
+ }
+
+ switch (param[1]) {
+ case LINK_RATE_LOW:
+ case LINK_RATE_HIGH:
+ case LINK_RATE_RBR2:
+ case LINK_RATE_HIGH2:
+ case LINK_RATE_HIGH3:
+ valid_input = true;
+ break;
+ default:
+ break;
+ }
+
+ if (!valid_input) {
+ kfree(wr_buf);
+ DRM_DEBUG_DRIVER("Invalid Input value No HW will be programmed\n");
+ return bytes_from_user;
+ }
+
+ /* save user force lane_count, link_rate to preferred settings
+ * spread spectrum will not be changed
+ */
+ prefer_link_settings.link_spread = link->cur_link_settings.link_spread;
+ prefer_link_settings.lane_count = param[0];
+ prefer_link_settings.link_rate = param[1];
+
+ dc_link_set_preferred_link_settings(dc, &prefer_link_settings, link);
+
+ kfree(wr_buf);
+ return bytes_from_user;
+}
+
+/* function: get current DP PHY settings: voltage swing, pre-emphasis,
+ * post-cursor2 (defined by VESA DP specification)
+ *
+ * valid values
+ * voltage swing: 0,1,2,3
+ * pre-emphasis : 0,1,2,3
+ * post cursor2 : 0,1,2,3
+ *
+ *
+ * how to use this debugfs
+ *
+ * debugfs is located at /sys/kernel/debug/dri/0/DP-x
+ *
+ * there will be directories, like DP-1, DP-2,DP-3, etc. for DP display
+ *
+ * To figure out which DP-x is the display for DP to be check,
+ * cd DP-x
+ * ls -ll
+ * There should be debugfs file, like link_settings, phy_settings.
+ * cat link_settings
+ * from lane_count, link_rate to figure which DP-x is for display to be worked
+ * on
+ *
+ * To get current DP PHY settings,
+ * cat phy_settings
+ *
+ * To change DP PHY settings,
+ * echo <voltage_swing> <pre-emphasis> <post_cursor2> > phy_settings
+ * for examle, to change voltage swing to 2, pre-emphasis to 3, post_cursor2 to
+ * 0,
+ * echo 2 3 0 > phy_settings
+ *
+ * To check if change be applied, get current phy settings by
+ * cat phy_settings
+ *
+ * In case invalid values are set by user, like
+ * echo 1 4 0 > phy_settings
+ *
+ * HW will NOT be programmed by these settings.
+ * cat phy_settings will show the previous valid settings.
+ */
+static ssize_t dp_phy_settings_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+ struct dc_link *link = connector->dc_link;
+ char *rd_buf = NULL;
+ const uint32_t rd_buf_size = 20;
+ uint32_t result = 0;
+ int r;
+
+ if (*pos & 3 || size & 3)
+ return -EINVAL;
+
+ rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+ if (!rd_buf)
+ return -EINVAL;
+
+ snprintf(rd_buf, rd_buf_size, " %d %d %d ",
+ link->cur_lane_setting.VOLTAGE_SWING,
+ link->cur_lane_setting.PRE_EMPHASIS,
+ link->cur_lane_setting.POST_CURSOR2);
+
+ while (size) {
+ if (*pos >= rd_buf_size)
+ break;
+
+ r = put_user((*(rd_buf + result)), buf);
+ if (r)
+ return r; /* r = -EFAULT */
+
+ buf += 1;
+ size -= 1;
+ *pos += 1;
+ result += 1;
+ }
+
+ kfree(rd_buf);
+ return result;
+}
+
+static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+ struct dc_link *link = connector->dc_link;
+ struct dc *dc = (struct dc *)link->dc;
+ char *wr_buf = NULL;
+ char *wr_buf_ptr = NULL;
+ uint32_t wr_buf_size = 40;
+ int r;
+ int bytes_from_user;
+ char *sub_str;
+ uint8_t param_index = 0;
+ long param[3];
+ const char delimiter[3] = {' ', '\n', '\0'};
+ bool use_prefer_link_setting;
+ struct link_training_settings link_lane_settings;
+
+ if (size == 0)
+ return 0;
+
+ wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
+ if (!wr_buf)
+ return 0;
+ wr_buf_ptr = wr_buf;
+
+ r = copy_from_user(wr_buf_ptr, buf, wr_buf_size);
+
+ /* r is bytes not be copied */
+ if (r >= wr_buf_size) {
+ kfree(wr_buf);
+ DRM_DEBUG_DRIVER("user data not be read\n");
+ return 0;
+ }
+
+ bytes_from_user = wr_buf_size - r;
+
+ while (isspace(*wr_buf_ptr))
+ wr_buf_ptr++;
+
+ while ((*wr_buf_ptr != '\0') && (param_index < 3)) {
+
+ sub_str = strsep(&wr_buf_ptr, delimiter);
+
+ r = kstrtol(sub_str, 16, &param[param_index]);
+
+ if (r)
+ DRM_DEBUG_DRIVER("string to int convert error code: %d\n", r);
+
+ param_index++;
+ while (isspace(*wr_buf_ptr))
+ wr_buf_ptr++;
+ }
+
+ if ((param[0] > VOLTAGE_SWING_MAX_LEVEL) ||
+ (param[1] > PRE_EMPHASIS_MAX_LEVEL) ||
+ (param[2] > POST_CURSOR2_MAX_LEVEL)) {
+ kfree(wr_buf);
+ DRM_DEBUG_DRIVER("Invalid Input No HW will be programmed\n");
+ return bytes_from_user;
+ }
+
+ /* get link settings: lane count, link rate */
+ use_prefer_link_setting =
+ ((link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN) &&
+ (link->test_pattern_enabled));
+
+ memset(&link_lane_settings, 0, sizeof(link_lane_settings));
+
+ if (use_prefer_link_setting) {
+ link_lane_settings.link_settings.lane_count =
+ link->preferred_link_setting.lane_count;
+ link_lane_settings.link_settings.link_rate =
+ link->preferred_link_setting.link_rate;
+ link_lane_settings.link_settings.link_spread =
+ link->preferred_link_setting.link_spread;
+ } else {
+ link_lane_settings.link_settings.lane_count =
+ link->cur_link_settings.lane_count;
+ link_lane_settings.link_settings.link_rate =
+ link->cur_link_settings.link_rate;
+ link_lane_settings.link_settings.link_spread =
+ link->cur_link_settings.link_spread;
+ }
+
+ /* apply phy settings from user */
+ for (r = 0; r < link_lane_settings.link_settings.lane_count; r++) {
+ link_lane_settings.lane_settings[r].VOLTAGE_SWING =
+ (enum dc_voltage_swing) (param[0]);
+ link_lane_settings.lane_settings[r].PRE_EMPHASIS =
+ (enum dc_pre_emphasis) (param[1]);
+ link_lane_settings.lane_settings[r].POST_CURSOR2 =
+ (enum dc_post_cursor2) (param[2]);
+ }
+
+ /* program ASIC registers and DPCD registers */
+ dc_link_set_drive_settings(dc, &link_lane_settings, link);
+
+ kfree(wr_buf);
+ return bytes_from_user;
+}
+
+/* function description
+ *
+ * set PHY layer or Link layer test pattern
+ * PHY test pattern is used for PHY SI check.
+ * Link layer test will not affect PHY SI.
+ *
+ * Reset Test Pattern:
+ * 0 = DP_TEST_PATTERN_VIDEO_MODE
+ *
+ * PHY test pattern supported:
+ * 1 = DP_TEST_PATTERN_D102
+ * 2 = DP_TEST_PATTERN_SYMBOL_ERROR
+ * 3 = DP_TEST_PATTERN_PRBS7
+ * 4 = DP_TEST_PATTERN_80BIT_CUSTOM
+ * 5 = DP_TEST_PATTERN_CP2520_1
+ * 6 = DP_TEST_PATTERN_CP2520_2 = DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE
+ * 7 = DP_TEST_PATTERN_CP2520_3
+ *
+ * DP PHY Link Training Patterns
+ * 8 = DP_TEST_PATTERN_TRAINING_PATTERN1
+ * 9 = DP_TEST_PATTERN_TRAINING_PATTERN2
+ * a = DP_TEST_PATTERN_TRAINING_PATTERN3
+ * b = DP_TEST_PATTERN_TRAINING_PATTERN4
+ *
+ * DP Link Layer Test pattern
+ * c = DP_TEST_PATTERN_COLOR_SQUARES
+ * d = DP_TEST_PATTERN_COLOR_SQUARES_CEA
+ * e = DP_TEST_PATTERN_VERTICAL_BARS
+ * f = DP_TEST_PATTERN_HORIZONTAL_BARS
+ * 10= DP_TEST_PATTERN_COLOR_RAMP
+ *
+ * debugfs phy_test_pattern is located at /syskernel/debug/dri/0/DP-x
+ *
+ * --- set test pattern
+ * echo <test pattern #> > test_pattern
+ *
+ * If test pattern # is not supported, NO HW programming will be done.
+ * for DP_TEST_PATTERN_80BIT_CUSTOM, it needs extra 10 bytes of data
+ * for the user pattern. input 10 bytes data are separated by space
+ *
+ * echo 0x4 0x11 0x22 0x33 0x44 0x55 0x66 0x77 0x88 0x99 0xaa > test_pattern
+ *
+ * --- reset test pattern
+ * echo 0 > test_pattern
+ *
+ * --- HPD detection is disabled when set PHY test pattern
+ *
+ * when PHY test pattern (pattern # within [1,7]) is set, HPD pin of HW ASIC
+ * is disable. User could unplug DP display from DP connected and plug scope to
+ * check test pattern PHY SI.
+ * If there is need unplug scope and plug DP display back, do steps below:
+ * echo 0 > phy_test_pattern
+ * unplug scope
+ * plug DP display.
+ *
+ * "echo 0 > phy_test_pattern" will re-enable HPD pin again so that video sw
+ * driver could detect "unplug scope" and "plug DP display"
+ */
+static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+ struct dc_link *link = connector->dc_link;
+ char *wr_buf = NULL;
+ char *wr_buf_ptr = NULL;
+ uint32_t wr_buf_size = 100;
+ uint32_t wr_buf_count = 0;
+ int r;
+ int bytes_from_user;
+ char *sub_str = NULL;
+ uint8_t param_index = 0;
+ uint8_t param_nums = 0;
+ long param[11] = {0x0};
+ const char delimiter[3] = {' ', '\n', '\0'};
+ enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
+ bool disable_hpd = false;
+ bool valid_test_pattern = false;
+ /* init with defalut 80bit custom pattern */
+ uint8_t custom_pattern[10] = {
+ 0x1f, 0x7c, 0xf0, 0xc1, 0x07,
+ 0x1f, 0x7c, 0xf0, 0xc1, 0x07
+ };
+ struct dc_link_settings prefer_link_settings = {LANE_COUNT_UNKNOWN,
+ LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED};
+ struct dc_link_settings cur_link_settings = {LANE_COUNT_UNKNOWN,
+ LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED};
+ struct link_training_settings link_training_settings;
+ int i;
+
+ if (size == 0)
+ return 0;
+
+ wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
+ if (!wr_buf)
+ return 0;
+ wr_buf_ptr = wr_buf;
+
+ r = copy_from_user(wr_buf_ptr, buf, wr_buf_size);
+
+ /* r is bytes not be copied */
+ if (r >= wr_buf_size) {
+ kfree(wr_buf);
+ DRM_DEBUG_DRIVER("user data not be read\n");
+ return 0;
+ }
+
+ bytes_from_user = wr_buf_size - r;
+
+ /* check number of parameters. isspace could not differ space and \n */
+ while ((*wr_buf_ptr != 0xa) && (wr_buf_count < wr_buf_size)) {
+ /* skip space*/
+ while (isspace(*wr_buf_ptr) && (wr_buf_count < wr_buf_size)) {
+ wr_buf_ptr++;
+ wr_buf_count++;
+ }
+
+ if (wr_buf_count == wr_buf_size)
+ break;
+
+ /* skip non-space*/
+ while ((!isspace(*wr_buf_ptr)) && (wr_buf_count < wr_buf_size)) {
+ wr_buf_ptr++;
+ wr_buf_count++;
+ }
+
+ param_nums++;
+
+ if (wr_buf_count == wr_buf_size)
+ break;
+ }
+
+ /* max 11 parameters */
+ if (param_nums > 11)
+ param_nums = 11;
+
+ wr_buf_ptr = wr_buf; /* reset buf pinter */
+ wr_buf_count = 0; /* number of char already checked */
+
+ while (isspace(*wr_buf_ptr) && (wr_buf_count < wr_buf_size)) {
+ wr_buf_ptr++;
+ wr_buf_count++;
+ }
+
+ while (param_index < param_nums) {
+ /* after strsep, wr_buf_ptr will be moved to after space */
+ sub_str = strsep(&wr_buf_ptr, delimiter);
+
+ r = kstrtol(sub_str, 16, &param[param_index]);
+
+ if (r)
+ DRM_DEBUG_DRIVER("string to int convert error code: %d\n", r);
+
+ param_index++;
+ }
+
+ test_pattern = param[0];
+
+ switch (test_pattern) {
+ case DP_TEST_PATTERN_VIDEO_MODE:
+ case DP_TEST_PATTERN_COLOR_SQUARES:
+ case DP_TEST_PATTERN_COLOR_SQUARES_CEA:
+ case DP_TEST_PATTERN_VERTICAL_BARS:
+ case DP_TEST_PATTERN_HORIZONTAL_BARS:
+ case DP_TEST_PATTERN_COLOR_RAMP:
+ valid_test_pattern = true;
+ break;
+
+ case DP_TEST_PATTERN_D102:
+ case DP_TEST_PATTERN_SYMBOL_ERROR:
+ case DP_TEST_PATTERN_PRBS7:
+ case DP_TEST_PATTERN_80BIT_CUSTOM:
+ case DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE:
+ case DP_TEST_PATTERN_TRAINING_PATTERN4:
+ disable_hpd = true;
+ valid_test_pattern = true;
+ break;
+
+ default:
+ valid_test_pattern = false;
+ test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
+ break;
+ }
+
+ if (!valid_test_pattern) {
+ kfree(wr_buf);
+ DRM_DEBUG_DRIVER("Invalid Test Pattern Parameters\n");
+ return bytes_from_user;
+ }
+
+ if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM) {
+ for (i = 0; i < 10; i++) {
+ if ((uint8_t) param[i + 1] != 0x0)
+ break;
+ }
+
+ if (i < 10) {
+ /* not use default value */
+ for (i = 0; i < 10; i++)
+ custom_pattern[i] = (uint8_t) param[i + 1];
+ }
+ }
+
+ /* Usage: set DP physical test pattern using debugfs with normal DP
+ * panel. Then plug out DP panel and connect a scope to measure
+ * For normal video mode and test pattern generated from CRCT,
+ * they are visibile to user. So do not disable HPD.
+ * Video Mode is also set to clear the test pattern, so enable HPD
+ * because it might have been disabled after a test pattern was set.
+ * AUX depends on HPD * sequence dependent, do not move!
+ */
+ if (!disable_hpd)
+ dc_link_enable_hpd(link);
+
+ prefer_link_settings.lane_count = link->verified_link_cap.lane_count;
+ prefer_link_settings.link_rate = link->verified_link_cap.link_rate;
+ prefer_link_settings.link_spread = link->verified_link_cap.link_spread;
+
+ cur_link_settings.lane_count = link->cur_link_settings.lane_count;
+ cur_link_settings.link_rate = link->cur_link_settings.link_rate;
+ cur_link_settings.link_spread = link->cur_link_settings.link_spread;
+
+ link_training_settings.link_settings = cur_link_settings;
+
+
+ if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) {
+ if (prefer_link_settings.lane_count != LANE_COUNT_UNKNOWN &&
+ prefer_link_settings.link_rate != LINK_RATE_UNKNOWN &&
+ (prefer_link_settings.lane_count != cur_link_settings.lane_count ||
+ prefer_link_settings.link_rate != cur_link_settings.link_rate))
+ link_training_settings.link_settings = prefer_link_settings;
+ }
+
+ for (i = 0; i < (unsigned int)(link_training_settings.link_settings.lane_count); i++)
+ link_training_settings.lane_settings[i] = link->cur_lane_setting;
+
+ dc_link_set_test_pattern(
+ link,
+ test_pattern,
+ &link_training_settings,
+ custom_pattern,
+ 10);
+
+ /* Usage: Set DP physical test pattern using AMDDP with normal DP panel
+ * Then plug out DP panel and connect a scope to measure DP PHY signal.
+ * Need disable interrupt to avoid SW driver disable DP output. This is
+ * done after the test pattern is set.
+ */
+ if (valid_test_pattern && disable_hpd)
+ dc_link_disable_hpd(link);
+
+ kfree(wr_buf);
+
+ return bytes_from_user;
+}
+
+static const struct file_operations dp_link_settings_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .read = dp_link_settings_read,
+ .write = dp_link_settings_write,
+ .llseek = default_llseek
+};
+
+static const struct file_operations dp_phy_settings_debugfs_fop = {
+ .owner = THIS_MODULE,
+ .read = dp_phy_settings_read,
+ .write = dp_phy_settings_write,
+ .llseek = default_llseek
+};
+
+static const struct file_operations dp_phy_test_pattern_fops = {
+ .owner = THIS_MODULE,
+ .write = dp_phy_test_pattern_debugfs_write,
+ .llseek = default_llseek
+};
+
+static const struct {
+ char *name;
+ const struct file_operations *fops;
+} dp_debugfs_entries[] = {
+ {"link_settings", &dp_link_settings_debugfs_fops},
+ {"phy_settings", &dp_phy_settings_debugfs_fop},
+ {"test_pattern", &dp_phy_test_pattern_fops}
+};
+
+int connector_debugfs_init(struct amdgpu_dm_connector *connector)
+{
+ int i;
+ struct dentry *ent, *dir = connector->base.debugfs_entry;
+
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ for (i = 0; i < ARRAY_SIZE(dp_debugfs_entries); i++) {
+ ent = debugfs_create_file(dp_debugfs_entries[i].name,
+ 0644,
+ dir,
+ connector,
+ dp_debugfs_entries[i].fops);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+ }
+ }
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
new file mode 100644
index 000000000000..d9ed1b2aa811
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __AMDGPU_DM_DEBUGFS_H__
+#define __AMDGPU_DM_DEBUGFS_H__
+
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+
+int connector_debugfs_init(struct amdgpu_dm_connector *connector);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index ca0b08bfa2cf..8403b6a9a77b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -169,6 +169,11 @@ static void get_payload_table(
mutex_unlock(&mst_mgr->payload_lock);
}
+void dm_helpers_dp_update_branch_info(
+ struct dc_context *ctx,
+ const struct dc_link *link)
+{}
+
/*
* Writes payload allocation table in immediate downstream device.
*/
@@ -330,11 +335,6 @@ bool dm_helpers_dp_mst_send_payload_allocation(
return true;
}
-bool dm_helpers_dc_conn_log(struct dc_context *ctx, struct log_entry *entry, enum dc_log_type event)
-{
- return true;
-}
-
void dm_dtn_log_begin(struct dc_context *ctx)
{}
@@ -440,7 +440,7 @@ bool dm_helpers_submit_i2c(
return false;
}
- msgs = kzalloc(num * sizeof(struct i2c_msg), GFP_KERNEL);
+ msgs = kcalloc(num, sizeof(struct i2c_msg), GFP_KERNEL);
if (!msgs)
return false;
@@ -459,6 +459,22 @@ bool dm_helpers_submit_i2c(
return result;
}
+bool dm_helpers_is_dp_sink_present(struct dc_link *link)
+{
+ bool dp_sink_present;
+ struct amdgpu_dm_connector *aconnector = link->priv;
+
+ if (!aconnector) {
+ BUG_ON("Failed to found connector for link!");
+ return true;
+ }
+
+ mutex_lock(&aconnector->dm_dp_aux.aux.hw_mutex);
+ dp_sink_present = dc_link_is_dp_sink_present(link);
+ mutex_unlock(&aconnector->dm_dp_aux.aux.hw_mutex);
+ return dp_sink_present;
+}
+
enum dc_edid_status dm_helpers_read_local_edid(
struct dc_context *ctx,
struct dc_link *link,
@@ -502,6 +518,34 @@ enum dc_edid_status dm_helpers_read_local_edid(
DRM_ERROR("EDID err: %d, on connector: %s",
edid_status,
aconnector->base.name);
+ if (link->aux_mode) {
+ union test_request test_request = { {0} };
+ union test_response test_response = { {0} };
+
+ dm_helpers_dp_read_dpcd(ctx,
+ link,
+ DP_TEST_REQUEST,
+ &test_request.raw,
+ sizeof(union test_request));
+
+ if (!test_request.bits.EDID_READ)
+ return edid_status;
+
+ test_response.bits.EDID_CHECKSUM_WRITE = 1;
+
+ dm_helpers_dp_write_dpcd(ctx,
+ link,
+ DP_TEST_EDID_CHECKSUM,
+ &sink->dc_edid.raw_edid[sink->dc_edid.length-1],
+ 1);
+
+ dm_helpers_dp_write_dpcd(ctx,
+ link,
+ DP_TEST_RESPONSE,
+ &test_response.raw,
+ sizeof(test_response));
+
+ }
return edid_status;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 4be21bf54749..a910f01838ab 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -555,6 +555,9 @@ static inline int dm_irq_state(struct amdgpu_device *adev,
return 0;
}
+ if (acrtc->otg_inst == -1)
+ return 0;
+
irq_source = dal_irq_type + acrtc->otg_inst;
st = (state == AMDGPU_IRQ_STATE_ENABLE);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index ace9ad578ca0..9a300732ba37 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -80,53 +80,72 @@ static void log_dpcd(uint8_t type,
static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
{
- enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ?
- I2C_MOT_TRUE : I2C_MOT_FALSE;
- enum ddc_result res;
- ssize_t read_bytes;
+ ssize_t result = 0;
+ enum i2caux_transaction_action action;
+ enum aux_transaction_type type;
if (WARN_ON(msg->size > 16))
return -E2BIG;
switch (msg->request & ~DP_AUX_I2C_MOT) {
case DP_AUX_NATIVE_READ:
- read_bytes = dal_ddc_service_read_dpcd_data(
- TO_DM_AUX(aux)->ddc_service,
- false,
- I2C_MOT_UNDEF,
- msg->address,
- msg->buffer,
- msg->size);
- return read_bytes;
+ type = AUX_TRANSACTION_TYPE_DP;
+ action = I2CAUX_TRANSACTION_ACTION_DP_READ;
+
+ result = dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service,
+ msg->address,
+ &msg->reply,
+ msg->buffer,
+ msg->size,
+ type,
+ action);
+ break;
case DP_AUX_NATIVE_WRITE:
- res = dal_ddc_service_write_dpcd_data(
- TO_DM_AUX(aux)->ddc_service,
- false,
- I2C_MOT_UNDEF,
- msg->address,
- msg->buffer,
- msg->size);
+ type = AUX_TRANSACTION_TYPE_DP;
+ action = I2CAUX_TRANSACTION_ACTION_DP_WRITE;
+
+ dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service,
+ msg->address,
+ &msg->reply,
+ msg->buffer,
+ msg->size,
+ type,
+ action);
+ result = msg->size;
break;
case DP_AUX_I2C_READ:
- read_bytes = dal_ddc_service_read_dpcd_data(
- TO_DM_AUX(aux)->ddc_service,
- true,
- mot,
- msg->address,
- msg->buffer,
- msg->size);
- return read_bytes;
+ type = AUX_TRANSACTION_TYPE_I2C;
+ if (msg->request & DP_AUX_I2C_MOT)
+ action = I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT;
+ else
+ action = I2CAUX_TRANSACTION_ACTION_I2C_READ;
+
+ result = dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service,
+ msg->address,
+ &msg->reply,
+ msg->buffer,
+ msg->size,
+ type,
+ action);
+ break;
case DP_AUX_I2C_WRITE:
- res = dal_ddc_service_write_dpcd_data(
- TO_DM_AUX(aux)->ddc_service,
- true,
- mot,
- msg->address,
- msg->buffer,
- msg->size);
+ type = AUX_TRANSACTION_TYPE_I2C;
+ if (msg->request & DP_AUX_I2C_MOT)
+ action = I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT;
+ else
+ action = I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
+
+ dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service,
+ msg->address,
+ &msg->reply,
+ msg->buffer,
+ msg->size,
+ type,
+ action);
+ result = msg->size;
break;
default:
- return 0;
+ return -EINVAL;
}
#ifdef TRACE_DPCD
@@ -137,7 +156,10 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
r == DDC_RESULT_SUCESSFULL);
#endif
- return msg->size;
+ if (result < 0) /* DC doesn't know about kernel error codes */
+ result = -EIO;
+
+ return result;
}
static enum drm_connector_status
@@ -229,7 +251,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
if (!edid) {
- drm_mode_connector_update_edid_property(
+ drm_connector_update_edid_property(
&aconnector->base,
NULL);
return ret;
@@ -257,7 +279,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
connector, aconnector->edid);
}
- drm_mode_connector_update_edid_property(
+ drm_connector_update_edid_property(
&aconnector->base, aconnector->edid);
ret = drm_add_edid_modes(connector, aconnector->edid);
@@ -341,7 +363,7 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
aconnector, connector->base.id, aconnector->mst_port);
aconnector->port = port;
- drm_mode_connector_set_path_property(connector, pathprop);
+ drm_connector_set_path_property(connector, pathprop);
drm_connector_list_iter_end(&conn_iter);
aconnector->mst_connected = true;
@@ -389,7 +411,7 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
dev->mode_config.tile_property,
0);
- drm_mode_connector_set_path_property(connector, pathprop);
+ drm_connector_set_path_property(connector, pathprop);
/*
* Initialize connector state before adding the connectror to drm and
@@ -437,7 +459,7 @@ static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
static void dm_dp_mst_link_status_reset(struct drm_connector *connector)
{
mutex_lock(&connector->dev->mode_config.mutex);
- drm_mode_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD);
+ drm_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD);
mutex_unlock(&connector->dev->mode_config.mutex);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
new file mode 100644
index 000000000000..c69ae78d82b2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -0,0 +1,562 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ */
+#include <linux/string.h>
+#include <linux/acpi.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/amdgpu_drm.h>
+#include "dm_services.h"
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_irq.h"
+#include "amdgpu_pm.h"
+#include "dm_pp_smu.h"
+
+
+bool dm_pp_apply_display_requirements(
+ const struct dc_context *ctx,
+ const struct dm_pp_display_configuration *pp_display_cfg)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ int i;
+
+ if (adev->pm.dpm_enabled) {
+
+ memset(&adev->pm.pm_display_cfg, 0,
+ sizeof(adev->pm.pm_display_cfg));
+
+ adev->pm.pm_display_cfg.cpu_cc6_disable =
+ pp_display_cfg->cpu_cc6_disable;
+
+ adev->pm.pm_display_cfg.cpu_pstate_disable =
+ pp_display_cfg->cpu_pstate_disable;
+
+ adev->pm.pm_display_cfg.cpu_pstate_separation_time =
+ pp_display_cfg->cpu_pstate_separation_time;
+
+ adev->pm.pm_display_cfg.nb_pstate_switch_disable =
+ pp_display_cfg->nb_pstate_switch_disable;
+
+ adev->pm.pm_display_cfg.num_display =
+ pp_display_cfg->display_count;
+ adev->pm.pm_display_cfg.num_path_including_non_display =
+ pp_display_cfg->display_count;
+
+ adev->pm.pm_display_cfg.min_core_set_clock =
+ pp_display_cfg->min_engine_clock_khz/10;
+ adev->pm.pm_display_cfg.min_core_set_clock_in_sr =
+ pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
+ adev->pm.pm_display_cfg.min_mem_set_clock =
+ pp_display_cfg->min_memory_clock_khz/10;
+
+ adev->pm.pm_display_cfg.min_dcef_deep_sleep_set_clk =
+ pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
+ adev->pm.pm_display_cfg.min_dcef_set_clk =
+ pp_display_cfg->min_dcfclock_khz/10;
+
+ adev->pm.pm_display_cfg.multi_monitor_in_sync =
+ pp_display_cfg->all_displays_in_sync;
+ adev->pm.pm_display_cfg.min_vblank_time =
+ pp_display_cfg->avail_mclk_switch_time_us;
+
+ adev->pm.pm_display_cfg.display_clk =
+ pp_display_cfg->disp_clk_khz/10;
+
+ adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency =
+ pp_display_cfg->avail_mclk_switch_time_in_disp_active_us;
+
+ adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index;
+ adev->pm.pm_display_cfg.line_time_in_us =
+ pp_display_cfg->line_time_in_us;
+
+ adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh;
+ adev->pm.pm_display_cfg.crossfire_display_index = -1;
+ adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
+
+ for (i = 0; i < pp_display_cfg->display_count; i++) {
+ const struct dm_pp_single_disp_config *dc_cfg =
+ &pp_display_cfg->disp_configs[i];
+ adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1;
+ }
+
+ /* TODO: complete implementation of
+ * pp_display_configuration_change().
+ * Follow example of:
+ * PHM_StoreDALConfigurationData - powerplay\hwmgr\hardwaremanager.c
+ * PP_IRI_DisplayConfigurationChange - powerplay\eventmgr\iri.c */
+ if (adev->powerplay.pp_funcs->display_configuration_change)
+ adev->powerplay.pp_funcs->display_configuration_change(
+ adev->powerplay.pp_handle,
+ &adev->pm.pm_display_cfg);
+
+ /* TODO: replace by a separate call to 'apply display cfg'? */
+ amdgpu_pm_compute_clocks(adev);
+ }
+
+ return true;
+}
+
+static void get_default_clock_levels(
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels *clks)
+{
+ uint32_t disp_clks_in_khz[6] = {
+ 300000, 400000, 496560, 626090, 685720, 757900 };
+ uint32_t sclks_in_khz[6] = {
+ 300000, 360000, 423530, 514290, 626090, 720000 };
+ uint32_t mclks_in_khz[2] = { 333000, 800000 };
+
+ switch (clk_type) {
+ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+ clks->num_levels = 6;
+ memmove(clks->clocks_in_khz, disp_clks_in_khz,
+ sizeof(disp_clks_in_khz));
+ break;
+ case DM_PP_CLOCK_TYPE_ENGINE_CLK:
+ clks->num_levels = 6;
+ memmove(clks->clocks_in_khz, sclks_in_khz,
+ sizeof(sclks_in_khz));
+ break;
+ case DM_PP_CLOCK_TYPE_MEMORY_CLK:
+ clks->num_levels = 2;
+ memmove(clks->clocks_in_khz, mclks_in_khz,
+ sizeof(mclks_in_khz));
+ break;
+ default:
+ clks->num_levels = 0;
+ break;
+ }
+}
+
+static enum amd_pp_clock_type dc_to_pp_clock_type(
+ enum dm_pp_clock_type dm_pp_clk_type)
+{
+ enum amd_pp_clock_type amd_pp_clk_type = 0;
+
+ switch (dm_pp_clk_type) {
+ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+ amd_pp_clk_type = amd_pp_disp_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_ENGINE_CLK:
+ amd_pp_clk_type = amd_pp_sys_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_MEMORY_CLK:
+ amd_pp_clk_type = amd_pp_mem_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_DCEFCLK:
+ amd_pp_clk_type = amd_pp_dcef_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_DCFCLK:
+ amd_pp_clk_type = amd_pp_dcf_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_PIXELCLK:
+ amd_pp_clk_type = amd_pp_pixel_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_FCLK:
+ amd_pp_clk_type = amd_pp_f_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
+ amd_pp_clk_type = amd_pp_phy_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_DPPCLK:
+ amd_pp_clk_type = amd_pp_dpp_clock;
+ break;
+ default:
+ DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
+ dm_pp_clk_type);
+ break;
+ }
+
+ return amd_pp_clk_type;
+}
+
+static enum dm_pp_clocks_state pp_to_dc_powerlevel_state(
+ enum PP_DAL_POWERLEVEL max_clocks_state)
+{
+ switch (max_clocks_state) {
+ case PP_DAL_POWERLEVEL_0:
+ return DM_PP_CLOCKS_DPM_STATE_LEVEL_0;
+ case PP_DAL_POWERLEVEL_1:
+ return DM_PP_CLOCKS_DPM_STATE_LEVEL_1;
+ case PP_DAL_POWERLEVEL_2:
+ return DM_PP_CLOCKS_DPM_STATE_LEVEL_2;
+ case PP_DAL_POWERLEVEL_3:
+ return DM_PP_CLOCKS_DPM_STATE_LEVEL_3;
+ case PP_DAL_POWERLEVEL_4:
+ return DM_PP_CLOCKS_DPM_STATE_LEVEL_4;
+ case PP_DAL_POWERLEVEL_5:
+ return DM_PP_CLOCKS_DPM_STATE_LEVEL_5;
+ case PP_DAL_POWERLEVEL_6:
+ return DM_PP_CLOCKS_DPM_STATE_LEVEL_6;
+ case PP_DAL_POWERLEVEL_7:
+ return DM_PP_CLOCKS_DPM_STATE_LEVEL_7;
+ default:
+ DRM_ERROR("DM_PPLIB: invalid powerlevel state: %d!\n",
+ max_clocks_state);
+ return DM_PP_CLOCKS_STATE_INVALID;
+ }
+}
+
+static void pp_to_dc_clock_levels(
+ const struct amd_pp_clocks *pp_clks,
+ struct dm_pp_clock_levels *dc_clks,
+ enum dm_pp_clock_type dc_clk_type)
+{
+ uint32_t i;
+
+ if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) {
+ DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
+ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
+ pp_clks->count,
+ DM_PP_MAX_CLOCK_LEVELS);
+
+ dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS;
+ } else
+ dc_clks->num_levels = pp_clks->count;
+
+ DRM_INFO("DM_PPLIB: values for %s clock\n",
+ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
+
+ for (i = 0; i < dc_clks->num_levels; i++) {
+ DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
+ dc_clks->clocks_in_khz[i] = pp_clks->clock[i];
+ }
+}
+
+static void pp_to_dc_clock_levels_with_latency(
+ const struct pp_clock_levels_with_latency *pp_clks,
+ struct dm_pp_clock_levels_with_latency *clk_level_info,
+ enum dm_pp_clock_type dc_clk_type)
+{
+ uint32_t i;
+
+ if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
+ DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
+ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
+ pp_clks->num_levels,
+ DM_PP_MAX_CLOCK_LEVELS);
+
+ clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
+ } else
+ clk_level_info->num_levels = pp_clks->num_levels;
+
+ DRM_DEBUG("DM_PPLIB: values for %s clock\n",
+ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
+
+ for (i = 0; i < clk_level_info->num_levels; i++) {
+ DRM_DEBUG("DM_PPLIB:\t %d in kHz\n", pp_clks->data[i].clocks_in_khz);
+ clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
+ clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us;
+ }
+}
+
+static void pp_to_dc_clock_levels_with_voltage(
+ const struct pp_clock_levels_with_voltage *pp_clks,
+ struct dm_pp_clock_levels_with_voltage *clk_level_info,
+ enum dm_pp_clock_type dc_clk_type)
+{
+ uint32_t i;
+
+ if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
+ DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
+ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
+ pp_clks->num_levels,
+ DM_PP_MAX_CLOCK_LEVELS);
+
+ clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
+ } else
+ clk_level_info->num_levels = pp_clks->num_levels;
+
+ DRM_INFO("DM_PPLIB: values for %s clock\n",
+ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
+
+ for (i = 0; i < clk_level_info->num_levels; i++) {
+ DRM_INFO("DM_PPLIB:\t %d in kHz\n", pp_clks->data[i].clocks_in_khz);
+ clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
+ clk_level_info->data[i].voltage_in_mv = pp_clks->data[i].voltage_in_mv;
+ }
+}
+
+bool dm_pp_get_clock_levels_by_type(
+ const struct dc_context *ctx,
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels *dc_clks)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ void *pp_handle = adev->powerplay.pp_handle;
+ struct amd_pp_clocks pp_clks = { 0 };
+ struct amd_pp_simple_clock_info validation_clks = { 0 };
+ uint32_t i;
+
+ if (adev->powerplay.pp_funcs->get_clock_by_type) {
+ if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
+ dc_to_pp_clock_type(clk_type), &pp_clks)) {
+ /* Error in pplib. Provide default values. */
+ get_default_clock_levels(clk_type, dc_clks);
+ return true;
+ }
+ }
+
+ pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
+
+ if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks) {
+ if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks(
+ pp_handle, &validation_clks)) {
+ /* Error in pplib. Provide default values. */
+ DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
+ validation_clks.engine_max_clock = 72000;
+ validation_clks.memory_max_clock = 80000;
+ validation_clks.level = 0;
+ }
+ }
+
+ DRM_INFO("DM_PPLIB: Validation clocks:\n");
+ DRM_INFO("DM_PPLIB: engine_max_clock: %d\n",
+ validation_clks.engine_max_clock);
+ DRM_INFO("DM_PPLIB: memory_max_clock: %d\n",
+ validation_clks.memory_max_clock);
+ DRM_INFO("DM_PPLIB: level : %d\n",
+ validation_clks.level);
+
+ /* Translate 10 kHz to kHz. */
+ validation_clks.engine_max_clock *= 10;
+ validation_clks.memory_max_clock *= 10;
+
+ /* Determine the highest non-boosted level from the Validation Clocks */
+ if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) {
+ for (i = 0; i < dc_clks->num_levels; i++) {
+ if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
+ /* This clock is higher the validation clock.
+ * Than means the previous one is the highest
+ * non-boosted one. */
+ DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
+ dc_clks->num_levels, i);
+ dc_clks->num_levels = i > 0 ? i : 1;
+ break;
+ }
+ }
+ } else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) {
+ for (i = 0; i < dc_clks->num_levels; i++) {
+ if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) {
+ DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
+ dc_clks->num_levels, i);
+ dc_clks->num_levels = i > 0 ? i : 1;
+ break;
+ }
+ }
+ }
+
+ return true;
+}
+
+bool dm_pp_get_clock_levels_by_type_with_latency(
+ const struct dc_context *ctx,
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels_with_latency *clk_level_info)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ void *pp_handle = adev->powerplay.pp_handle;
+ struct pp_clock_levels_with_latency pp_clks = { 0 };
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs || !pp_funcs->get_clock_by_type_with_latency)
+ return false;
+
+ if (pp_funcs->get_clock_by_type_with_latency(pp_handle,
+ dc_to_pp_clock_type(clk_type),
+ &pp_clks))
+ return false;
+
+ pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type);
+
+ return true;
+}
+
+bool dm_pp_get_clock_levels_by_type_with_voltage(
+ const struct dc_context *ctx,
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels_with_voltage *clk_level_info)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ void *pp_handle = adev->powerplay.pp_handle;
+ struct pp_clock_levels_with_voltage pp_clk_info = {0};
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (pp_funcs->get_clock_by_type_with_voltage(pp_handle,
+ dc_to_pp_clock_type(clk_type),
+ &pp_clk_info))
+ return false;
+
+ pp_to_dc_clock_levels_with_voltage(&pp_clk_info, clk_level_info, clk_type);
+
+ return true;
+}
+
+bool dm_pp_notify_wm_clock_changes(
+ const struct dc_context *ctx,
+ struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
+{
+ /* TODO: to be implemented */
+ return false;
+}
+
+bool dm_pp_apply_power_level_change_request(
+ const struct dc_context *ctx,
+ struct dm_pp_power_level_change_request *level_change_req)
+{
+ /* TODO: to be implemented */
+ return false;
+}
+
+bool dm_pp_apply_clock_for_voltage_request(
+ const struct dc_context *ctx,
+ struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ struct pp_display_clock_request pp_clock_request = {0};
+ int ret = 0;
+
+ pp_clock_request.clock_type = dc_to_pp_clock_type(clock_for_voltage_req->clk_type);
+ pp_clock_request.clock_freq_in_khz = clock_for_voltage_req->clocks_in_khz;
+
+ if (!pp_clock_request.clock_type)
+ return false;
+
+ if (adev->powerplay.pp_funcs->display_clock_voltage_request)
+ ret = adev->powerplay.pp_funcs->display_clock_voltage_request(
+ adev->powerplay.pp_handle,
+ &pp_clock_request);
+ if (ret)
+ return false;
+ return true;
+}
+
+bool dm_pp_get_static_clocks(
+ const struct dc_context *ctx,
+ struct dm_pp_static_clock_info *static_clk_info)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ struct amd_pp_clock_info pp_clk_info = {0};
+ int ret = 0;
+
+ if (adev->powerplay.pp_funcs->get_current_clocks)
+ ret = adev->powerplay.pp_funcs->get_current_clocks(
+ adev->powerplay.pp_handle,
+ &pp_clk_info);
+ if (ret)
+ return false;
+
+ static_clk_info->max_clocks_state = pp_to_dc_powerlevel_state(pp_clk_info.max_clocks_state);
+ static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock;
+ static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock;
+
+ return true;
+}
+
+void pp_rv_set_display_requirement(struct pp_smu *pp,
+ struct pp_smu_display_requirement_rv *req)
+{
+ struct dc_context *ctx = pp->ctx;
+ struct amdgpu_device *adev = ctx->driver_context;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs || !pp_funcs->display_configuration_changed)
+ return;
+
+ amdgpu_dpm_display_configuration_changed(adev);
+}
+
+void pp_rv_set_wm_ranges(struct pp_smu *pp,
+ struct pp_smu_wm_range_sets *ranges)
+{
+ struct dc_context *ctx = pp->ctx;
+ struct amdgpu_device *adev = ctx->driver_context;
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
+ struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = wm_with_clock_ranges.wm_dmif_clocks_ranges;
+ struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks = wm_with_clock_ranges.wm_mcif_clocks_ranges;
+ int32_t i;
+
+ wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
+ wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
+
+ for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
+ if (ranges->reader_wm_sets[i].wm_inst > 3)
+ wm_dce_clocks[i].wm_set_id = WM_SET_A;
+ else
+ wm_dce_clocks[i].wm_set_id =
+ ranges->reader_wm_sets[i].wm_inst;
+ wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
+ ranges->reader_wm_sets[i].max_drain_clk_khz;
+ wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
+ ranges->reader_wm_sets[i].min_drain_clk_khz;
+ wm_dce_clocks[i].wm_max_mem_clk_in_khz =
+ ranges->reader_wm_sets[i].max_fill_clk_khz;
+ wm_dce_clocks[i].wm_min_mem_clk_in_khz =
+ ranges->reader_wm_sets[i].min_fill_clk_khz;
+ }
+
+ for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
+ if (ranges->writer_wm_sets[i].wm_inst > 3)
+ wm_soc_clocks[i].wm_set_id = WM_SET_A;
+ else
+ wm_soc_clocks[i].wm_set_id =
+ ranges->writer_wm_sets[i].wm_inst;
+ wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
+ ranges->writer_wm_sets[i].max_fill_clk_khz;
+ wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
+ ranges->writer_wm_sets[i].min_fill_clk_khz;
+ wm_soc_clocks[i].wm_max_mem_clk_in_khz =
+ ranges->writer_wm_sets[i].max_drain_clk_khz;
+ wm_soc_clocks[i].wm_min_mem_clk_in_khz =
+ ranges->writer_wm_sets[i].min_drain_clk_khz;
+ }
+
+ pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, &wm_with_clock_ranges);
+}
+
+void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
+{
+ struct dc_context *ctx = pp->ctx;
+ struct amdgpu_device *adev = ctx->driver_context;
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs || !pp_funcs->notify_smu_enable_pwe)
+ return;
+
+ pp_funcs->notify_smu_enable_pwe(pp_handle);
+}
+
+void dm_pp_get_funcs_rv(
+ struct dc_context *ctx,
+ struct pp_smu_funcs_rv *funcs)
+{
+ funcs->pp_smu.ctx = ctx;
+ funcs->set_display_requirement = pp_rv_set_display_requirement;
+ funcs->set_wm_ranges = pp_rv_set_wm_ranges;
+ funcs->set_pme_wa_enable = pp_rv_set_pme_wa_enable;
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
index 89342b48be6b..9f0a217603ad 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
@@ -35,10 +35,13 @@
#include "amdgpu_dm_irq.h"
#include "amdgpu_pm.h"
-unsigned long long dm_get_timestamp(struct dc_context *ctx)
+
+
+unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx,
+ unsigned long long current_time_stamp,
+ unsigned long long last_time_stamp)
{
- /* TODO: return actual timestamp */
- return 0;
+ return current_time_stamp - last_time_stamp;
}
void dm_perf_trace_timestamp(const char *func_name, unsigned int line)
@@ -71,285 +74,4 @@ bool dm_read_persistent_data(struct dc_context *ctx,
/**** power component interfaces ****/
-bool dm_pp_apply_display_requirements(
- const struct dc_context *ctx,
- const struct dm_pp_display_configuration *pp_display_cfg)
-{
- struct amdgpu_device *adev = ctx->driver_context;
-
- if (adev->pm.dpm_enabled) {
-
- memset(&adev->pm.pm_display_cfg, 0,
- sizeof(adev->pm.pm_display_cfg));
-
- adev->pm.pm_display_cfg.cpu_cc6_disable =
- pp_display_cfg->cpu_cc6_disable;
-
- adev->pm.pm_display_cfg.cpu_pstate_disable =
- pp_display_cfg->cpu_pstate_disable;
-
- adev->pm.pm_display_cfg.cpu_pstate_separation_time =
- pp_display_cfg->cpu_pstate_separation_time;
-
- adev->pm.pm_display_cfg.nb_pstate_switch_disable =
- pp_display_cfg->nb_pstate_switch_disable;
-
- adev->pm.pm_display_cfg.num_display =
- pp_display_cfg->display_count;
- adev->pm.pm_display_cfg.num_path_including_non_display =
- pp_display_cfg->display_count;
-
- adev->pm.pm_display_cfg.min_core_set_clock =
- pp_display_cfg->min_engine_clock_khz/10;
- adev->pm.pm_display_cfg.min_core_set_clock_in_sr =
- pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
- adev->pm.pm_display_cfg.min_mem_set_clock =
- pp_display_cfg->min_memory_clock_khz/10;
-
- adev->pm.pm_display_cfg.multi_monitor_in_sync =
- pp_display_cfg->all_displays_in_sync;
- adev->pm.pm_display_cfg.min_vblank_time =
- pp_display_cfg->avail_mclk_switch_time_us;
-
- adev->pm.pm_display_cfg.display_clk =
- pp_display_cfg->disp_clk_khz/10;
-
- adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency =
- pp_display_cfg->avail_mclk_switch_time_in_disp_active_us;
-
- adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index;
- adev->pm.pm_display_cfg.line_time_in_us =
- pp_display_cfg->line_time_in_us;
-
- adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh;
- adev->pm.pm_display_cfg.crossfire_display_index = -1;
- adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
-
- /* TODO: complete implementation of
- * pp_display_configuration_change().
- * Follow example of:
- * PHM_StoreDALConfigurationData - powerplay\hwmgr\hardwaremanager.c
- * PP_IRI_DisplayConfigurationChange - powerplay\eventmgr\iri.c */
- if (adev->powerplay.pp_funcs->display_configuration_change)
- adev->powerplay.pp_funcs->display_configuration_change(
- adev->powerplay.pp_handle,
- &adev->pm.pm_display_cfg);
-
- /* TODO: replace by a separate call to 'apply display cfg'? */
- amdgpu_pm_compute_clocks(adev);
- }
-
- return true;
-}
-
-static void get_default_clock_levels(
- enum dm_pp_clock_type clk_type,
- struct dm_pp_clock_levels *clks)
-{
- uint32_t disp_clks_in_khz[6] = {
- 300000, 400000, 496560, 626090, 685720, 757900 };
- uint32_t sclks_in_khz[6] = {
- 300000, 360000, 423530, 514290, 626090, 720000 };
- uint32_t mclks_in_khz[2] = { 333000, 800000 };
-
- switch (clk_type) {
- case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
- clks->num_levels = 6;
- memmove(clks->clocks_in_khz, disp_clks_in_khz,
- sizeof(disp_clks_in_khz));
- break;
- case DM_PP_CLOCK_TYPE_ENGINE_CLK:
- clks->num_levels = 6;
- memmove(clks->clocks_in_khz, sclks_in_khz,
- sizeof(sclks_in_khz));
- break;
- case DM_PP_CLOCK_TYPE_MEMORY_CLK:
- clks->num_levels = 2;
- memmove(clks->clocks_in_khz, mclks_in_khz,
- sizeof(mclks_in_khz));
- break;
- default:
- clks->num_levels = 0;
- break;
- }
-}
-
-static enum amd_pp_clock_type dc_to_pp_clock_type(
- enum dm_pp_clock_type dm_pp_clk_type)
-{
- enum amd_pp_clock_type amd_pp_clk_type = 0;
-
- switch (dm_pp_clk_type) {
- case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
- amd_pp_clk_type = amd_pp_disp_clock;
- break;
- case DM_PP_CLOCK_TYPE_ENGINE_CLK:
- amd_pp_clk_type = amd_pp_sys_clock;
- break;
- case DM_PP_CLOCK_TYPE_MEMORY_CLK:
- amd_pp_clk_type = amd_pp_mem_clock;
- break;
- default:
- DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
- dm_pp_clk_type);
- break;
- }
-
- return amd_pp_clk_type;
-}
-
-static void pp_to_dc_clock_levels(
- const struct amd_pp_clocks *pp_clks,
- struct dm_pp_clock_levels *dc_clks,
- enum dm_pp_clock_type dc_clk_type)
-{
- uint32_t i;
-
- if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) {
- DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
- DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
- pp_clks->count,
- DM_PP_MAX_CLOCK_LEVELS);
-
- dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS;
- } else
- dc_clks->num_levels = pp_clks->count;
-
- DRM_INFO("DM_PPLIB: values for %s clock\n",
- DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
-
- for (i = 0; i < dc_clks->num_levels; i++) {
- DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
- /* translate 10kHz to kHz */
- dc_clks->clocks_in_khz[i] = pp_clks->clock[i] * 10;
- }
-}
-
-bool dm_pp_get_clock_levels_by_type(
- const struct dc_context *ctx,
- enum dm_pp_clock_type clk_type,
- struct dm_pp_clock_levels *dc_clks)
-{
- struct amdgpu_device *adev = ctx->driver_context;
- void *pp_handle = adev->powerplay.pp_handle;
- struct amd_pp_clocks pp_clks = { 0 };
- struct amd_pp_simple_clock_info validation_clks = { 0 };
- uint32_t i;
-
- if (adev->powerplay.pp_funcs->get_clock_by_type) {
- if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
- dc_to_pp_clock_type(clk_type), &pp_clks)) {
- /* Error in pplib. Provide default values. */
- get_default_clock_levels(clk_type, dc_clks);
- return true;
- }
- }
-
- pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
-
- if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks) {
- if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks(
- pp_handle, &validation_clks)) {
- /* Error in pplib. Provide default values. */
- DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
- validation_clks.engine_max_clock = 72000;
- validation_clks.memory_max_clock = 80000;
- validation_clks.level = 0;
- }
- }
-
- DRM_INFO("DM_PPLIB: Validation clocks:\n");
- DRM_INFO("DM_PPLIB: engine_max_clock: %d\n",
- validation_clks.engine_max_clock);
- DRM_INFO("DM_PPLIB: memory_max_clock: %d\n",
- validation_clks.memory_max_clock);
- DRM_INFO("DM_PPLIB: level : %d\n",
- validation_clks.level);
-
- /* Translate 10 kHz to kHz. */
- validation_clks.engine_max_clock *= 10;
- validation_clks.memory_max_clock *= 10;
-
- /* Determine the highest non-boosted level from the Validation Clocks */
- if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) {
- for (i = 0; i < dc_clks->num_levels; i++) {
- if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
- /* This clock is higher the validation clock.
- * Than means the previous one is the highest
- * non-boosted one. */
- DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
- dc_clks->num_levels, i);
- dc_clks->num_levels = i > 0 ? i : 1;
- break;
- }
- }
- } else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) {
- for (i = 0; i < dc_clks->num_levels; i++) {
- if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) {
- DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
- dc_clks->num_levels, i);
- dc_clks->num_levels = i > 0 ? i : 1;
- break;
- }
- }
- }
-
- return true;
-}
-
-bool dm_pp_get_clock_levels_by_type_with_latency(
- const struct dc_context *ctx,
- enum dm_pp_clock_type clk_type,
- struct dm_pp_clock_levels_with_latency *clk_level_info)
-{
- /* TODO: to be implemented */
- return false;
-}
-
-bool dm_pp_get_clock_levels_by_type_with_voltage(
- const struct dc_context *ctx,
- enum dm_pp_clock_type clk_type,
- struct dm_pp_clock_levels_with_voltage *clk_level_info)
-{
- /* TODO: to be implemented */
- return false;
-}
-
-bool dm_pp_notify_wm_clock_changes(
- const struct dc_context *ctx,
- struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
-{
- /* TODO: to be implemented */
- return false;
-}
-
-bool dm_pp_apply_power_level_change_request(
- const struct dc_context *ctx,
- struct dm_pp_power_level_change_request *level_change_req)
-{
- /* TODO: to be implemented */
- return false;
-}
-
-bool dm_pp_apply_clock_for_voltage_request(
- const struct dc_context *ctx,
- struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
-{
- /* TODO: to be implemented */
- return false;
-}
-
-bool dm_pp_get_static_clocks(
- const struct dc_context *ctx,
- struct dm_pp_static_clock_info *static_clk_info)
-{
- /* TODO: to be implemented */
- return false;
-}
-
-void dm_pp_get_funcs_rv(
- struct dc_context *ctx,
- struct pp_smu_funcs_rv *funcs)
-{}
-/**** end of power component interfaces ****/