aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm')
-rw-r--r--drivers/gpu/drm/msm/Kconfig4
-rw-r--r--drivers/gpu/drm/msm/Makefile2
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_catalog.c61
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h1
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c246
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h170
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c67
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_preempt.c456
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c13
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h27
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h210
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h187
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h218
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h338
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h485
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h46
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c25
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h27
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c45
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h38
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c179
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h107
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h90
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c37
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c250
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h33
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c121
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h17
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c52
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h11
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c37
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h34
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c293
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h31
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c46
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h50
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h18
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c19
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.c294
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.h38
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.c148
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.h18
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.c734
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.h118
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c482
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.h40
-rw-r--r--drivers/gpu/drm/msm/dp/dp_debug.c68
-rw-r--r--drivers/gpu/drm/msm/dp/dp_debug.h10
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c909
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.h18
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c150
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.h27
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.c432
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.h44
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c254
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.h42
-rw-r--r--drivers/gpu/drm/msm/dp/dp_utils.c20
-rw-r--r--drivers/gpu/drm/msm/dp/dp_utils.h8
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c7
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c8
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h31
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c144
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h11
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_devfreq.c9
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_trace.h28
-rw-r--r--drivers/gpu/drm/msm/msm_kms.c4
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h6
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.c46
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c2
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.h18
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c7
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx.xml7
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml39
-rw-r--r--drivers/gpu/drm/msm/registers/display/mdp5.xml16
-rw-r--r--drivers/gpu/drm/msm/registers/display/mdss.xml29
114 files changed, 5497 insertions, 3150 deletions
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 90c68106b63b..7ec833b6d829 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -6,6 +6,7 @@ config DRM_MSM
depends on ARCH_QCOM || SOC_IMX5 || COMPILE_TEST
depends on COMMON_CLK
depends on IOMMU_SUPPORT
+ depends on OF
depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
depends on QCOM_OCMEM || QCOM_OCMEM=n
depends on QCOM_LLCC || QCOM_LLCC=n
@@ -14,6 +15,7 @@ config DRM_MSM
select IOMMU_IO_PGTABLE
select QCOM_MDT_LOADER if ARCH_QCOM
select REGULATOR
+ select DRM_CLIENT_SELECTION
select DRM_DISPLAY_DP_AUX_BUS
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
@@ -92,6 +94,7 @@ config DRM_MSM_DPU
bool "Enable DPU support in MSM DRM driver"
depends on DRM_MSM
select DRM_MSM_MDSS
+ select DRM_DISPLAY_DSC_HELPER
default y
help
Compile in support for the Display Processing Unit in
@@ -113,6 +116,7 @@ config DRM_MSM_DSI
depends on DRM_MSM
select DRM_PANEL
select DRM_MIPI_DSI
+ select DRM_DISPLAY_DSC_HELPER
default y
help
Choose this option if you have a need for MIPI DSI connector
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 13110fcc46a8..f274d9430cc3 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -23,6 +23,7 @@ adreno-y := \
adreno/a6xx_gpu.o \
adreno/a6xx_gmu.o \
adreno/a6xx_hfi.o \
+ adreno/a6xx_preempt.o \
adreno-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \
@@ -210,6 +211,7 @@ DISPLAY_HEADERS = \
generated/mdp4.xml.h \
generated/mdp5.xml.h \
generated/mdp_common.xml.h \
+ generated/mdss.xml.h \
generated/sfpb.xml.h
$(addprefix $(obj)/,$(adreno-y)): $(addprefix $(obj)/,$(ADRENO_HEADERS))
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
index 0dc255ddf5ce..379a3d346c30 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
@@ -22,7 +22,7 @@ static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
/* ignore if there has not been a ctx switch: */
- if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+ if (ring->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index b46ff49f47cf..b6df115bb567 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -40,7 +40,7 @@ static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
/* ignore if there has not been a ctx switch: */
- if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+ if (ring->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 8b4cdf95f445..50c490b492f0 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -34,7 +34,7 @@ static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
/* ignore if there has not been a ctx switch: */
- if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+ if (ring->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index e09044930547..ee89db72e36e 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -77,7 +77,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
- if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+ if (ring->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
@@ -132,7 +132,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
unsigned int i, ibs = 0;
if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) {
- gpu->cur_ctx_seqno = 0;
+ ring->cur_ctx_seqno = 0;
a5xx_submit_in_rb(gpu, submit);
return;
}
@@ -171,7 +171,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
- if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+ if (ring->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index 7705f8010484..6b91e0bd1514 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -307,7 +307,7 @@ int a5xx_power_init(struct msm_gpu *gpu)
else if (adreno_is_a540(adreno_gpu))
a540_lm_setup(gpu);
- /* Set up SP/TP power collpase */
+ /* Set up SP/TP power collapse */
a5xx_pc_init(gpu);
/* Start the GPMU */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
index 0312b6ee0356..0c560e84ad5a 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
@@ -973,6 +973,25 @@ static const struct adreno_info a6xx_gpus[] = {
},
.address_space_size = SZ_16G,
}, {
+ .chip_ids = ADRENO_CHIP_IDS(0x06060300),
+ .family = ADRENO_6XX_GEN4,
+ .fw = {
+ [ADRENO_FW_SQE] = "a660_sqe.fw",
+ [ADRENO_FW_GMU] = "a663_gmu.bin",
+ },
+ .gmem = SZ_1M + SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_HAS_HW_APRIV,
+ .init = a6xx_gpu_init,
+ .a6xx = &(const struct a6xx_info) {
+ .hwcg = a690_hwcg,
+ .protect = &a660_protect,
+ .gmu_cgc_mode = 0x00020200,
+ .prim_fifo_threshold = 0x00300200,
+ },
+ .address_space_size = SZ_16G,
+ }, {
.chip_ids = ADRENO_CHIP_IDS(0x06030500),
.family = ADRENO_6XX_GEN4,
.fw = {
@@ -1281,6 +1300,28 @@ static const u32 a730_protect_regs[] = {
};
DECLARE_ADRENO_PROTECT(a730_protect, 48);
+static const uint32_t a7xx_pwrup_reglist_regs[] = {
+ REG_A6XX_UCHE_TRAP_BASE,
+ REG_A6XX_UCHE_TRAP_BASE + 1,
+ REG_A6XX_UCHE_WRITE_THRU_BASE,
+ REG_A6XX_UCHE_WRITE_THRU_BASE + 1,
+ REG_A6XX_UCHE_GMEM_RANGE_MIN,
+ REG_A6XX_UCHE_GMEM_RANGE_MIN + 1,
+ REG_A6XX_UCHE_GMEM_RANGE_MAX,
+ REG_A6XX_UCHE_GMEM_RANGE_MAX + 1,
+ REG_A6XX_UCHE_CACHE_WAYS,
+ REG_A6XX_UCHE_MODE_CNTL,
+ REG_A6XX_RB_NC_MODE_CNTL,
+ REG_A6XX_RB_CMP_DBG_ECO_CNTL,
+ REG_A7XX_GRAS_NC_MODE_CNTL,
+ REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE,
+ REG_A6XX_UCHE_GBIF_GX_CONFIG,
+ REG_A6XX_UCHE_CLIENT_PF,
+ REG_A6XX_TPL1_DBG_ECO_CNTL1,
+};
+
+DECLARE_ADRENO_REGLIST_LIST(a7xx_pwrup_reglist);
+
static const struct adreno_info a7xx_gpus[] = {
{
.chip_ids = ADRENO_CHIP_IDS(0x07000200),
@@ -1315,15 +1356,18 @@ static const struct adreno_info a7xx_gpus[] = {
.gmem = SZ_2M,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
- ADRENO_QUIRK_HAS_HW_APRIV,
+ ADRENO_QUIRK_HAS_HW_APRIV |
+ ADRENO_QUIRK_PREEMPTION,
.init = a6xx_gpu_init,
.zapfw = "a730_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a730_hwcg,
.protect = &a730_protect,
+ .pwrup_reglist = &a7xx_pwrup_reglist,
.gmu_cgc_mode = 0x00020000,
},
.address_space_size = SZ_16G,
+ .preempt_record_size = 2860 * SZ_1K,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x43050a01), /* "C510v2" */
.family = ADRENO_7XX_GEN2,
@@ -1334,16 +1378,19 @@ static const struct adreno_info a7xx_gpus[] = {
.gmem = 3 * SZ_1M,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
- ADRENO_QUIRK_HAS_HW_APRIV,
+ ADRENO_QUIRK_HAS_HW_APRIV |
+ ADRENO_QUIRK_PREEMPTION,
.init = a6xx_gpu_init,
.zapfw = "a740_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a740_hwcg,
.protect = &a730_protect,
+ .pwrup_reglist = &a7xx_pwrup_reglist,
.gmu_chipid = 0x7020100,
.gmu_cgc_mode = 0x00020202,
},
.address_space_size = SZ_16G,
+ .preempt_record_size = 4192 * SZ_1K,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x43050c01), /* "C512v2" */
.family = ADRENO_7XX_GEN2,
@@ -1354,15 +1401,18 @@ static const struct adreno_info a7xx_gpus[] = {
.gmem = 3 * SZ_1M,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
- ADRENO_QUIRK_HAS_HW_APRIV,
+ ADRENO_QUIRK_HAS_HW_APRIV |
+ ADRENO_QUIRK_PREEMPTION,
.init = a6xx_gpu_init,
.a6xx = &(const struct a6xx_info) {
.hwcg = a740_hwcg,
.protect = &a730_protect,
+ .pwrup_reglist = &a7xx_pwrup_reglist,
.gmu_chipid = 0x7050001,
.gmu_cgc_mode = 0x00020202,
},
.address_space_size = SZ_256G,
+ .preempt_record_size = 4192 * SZ_1K,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x43051401), /* "C520v2" */
.family = ADRENO_7XX_GEN3,
@@ -1373,15 +1423,18 @@ static const struct adreno_info a7xx_gpus[] = {
.gmem = 3 * SZ_1M,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
- ADRENO_QUIRK_HAS_HW_APRIV,
+ ADRENO_QUIRK_HAS_HW_APRIV |
+ ADRENO_QUIRK_PREEMPTION,
.init = a6xx_gpu_init,
.zapfw = "gen70900_zap.mbn",
.a6xx = &(const struct a6xx_info) {
.protect = &a730_protect,
+ .pwrup_reglist = &a7xx_pwrup_reglist,
.gmu_chipid = 0x7090100,
.gmu_cgc_mode = 0x00020202,
},
.address_space_size = SZ_16G,
+ .preempt_record_size = 3572 * SZ_1K,
}
};
DECLARE_ADRENO_GPULIST(a7xx);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 37927bdd6fbe..14db7376c712 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -1522,15 +1522,13 @@ static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
irq = platform_get_irq_byname(pdev, name);
- ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu);
+ ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH | IRQF_NO_AUTOEN, name, gmu);
if (ret) {
DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n",
name, ret);
return ret;
}
- disable_irq(irq);
-
return irq;
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 94b6c5cab6f4..b4a79f88ccf4 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -99,6 +99,7 @@ struct a6xx_gmu {
struct completion pd_gate;
struct qmp *qmp;
+ struct a6xx_hfi_msg_bw_table *bw_table;
};
static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 702b8d4b3497..019610341df1 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -68,6 +68,8 @@ static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
uint32_t wptr;
unsigned long flags;
@@ -81,12 +83,17 @@ static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
/* Make sure to wrap wptr if we need to */
wptr = get_wptr(ring);
- spin_unlock_irqrestore(&ring->preempt_lock, flags);
-
- /* Make sure everything is posted before making a decision */
- mb();
+ /* Update HW if this is the current ring and we are not in preempt*/
+ if (!a6xx_in_preempt(a6xx_gpu)) {
+ if (a6xx_gpu->cur_ring == ring)
+ gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
+ else
+ ring->restore_wptr = true;
+ } else {
+ ring->restore_wptr = true;
+ }
- gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
+ spin_unlock_irqrestore(&ring->preempt_lock, flags);
}
static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter,
@@ -110,7 +117,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
u32 asid;
u64 memptr = rbmemptr(ring, ttbr0);
- if (ctx->seqno == a6xx_gpu->base.base.cur_ctx_seqno)
+ if (ctx->seqno == ring->cur_ctx_seqno)
return;
if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid))
@@ -148,12 +155,14 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
/*
* Write the new TTBR0 to the memstore. This is good for debugging.
+ * Needed for preemption
*/
- OUT_PKT7(ring, CP_MEM_WRITE, 4);
+ OUT_PKT7(ring, CP_MEM_WRITE, 5);
OUT_RING(ring, CP_MEM_WRITE_0_ADDR_LO(lower_32_bits(memptr)));
OUT_RING(ring, CP_MEM_WRITE_1_ADDR_HI(upper_32_bits(memptr)));
OUT_RING(ring, lower_32_bits(ttbr));
- OUT_RING(ring, (asid << 16) | upper_32_bits(ttbr));
+ OUT_RING(ring, upper_32_bits(ttbr));
+ OUT_RING(ring, ctx->seqno);
/*
* Sync both threads after switching pagetables and enable BR only
@@ -229,7 +238,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
- if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+ if (ring->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
@@ -278,6 +287,46 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
a6xx_flush(gpu, ring);
}
+static void a6xx_emit_set_pseudo_reg(struct msm_ringbuffer *ring,
+ struct a6xx_gpu *a6xx_gpu, struct msm_gpu_submitqueue *queue)
+{
+ u64 preempt_postamble;
+
+ OUT_PKT7(ring, CP_SET_PSEUDO_REG, 12);
+
+ OUT_RING(ring, SMMU_INFO);
+ /* don't save SMMU, we write the record from the kernel instead */
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+
+ /* privileged and non secure buffer save */
+ OUT_RING(ring, NON_SECURE_SAVE_ADDR);
+ OUT_RING(ring, lower_32_bits(
+ a6xx_gpu->preempt_iova[ring->id]));
+ OUT_RING(ring, upper_32_bits(
+ a6xx_gpu->preempt_iova[ring->id]));
+
+ /* user context buffer save, seems to be unnused by fw */
+ OUT_RING(ring, NON_PRIV_SAVE_ADDR);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+
+ OUT_RING(ring, COUNTER);
+ /* seems OK to set to 0 to disable it */
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+
+ /* Emit postamble to clear perfcounters */
+ preempt_postamble = a6xx_gpu->preempt_postamble_iova;
+
+ OUT_PKT7(ring, CP_SET_AMBLE, 3);
+ OUT_RING(ring, lower_32_bits(preempt_postamble));
+ OUT_RING(ring, upper_32_bits(preempt_postamble));
+ OUT_RING(ring, CP_SET_AMBLE_2_DWORDS(
+ a6xx_gpu->preempt_postamble_len) |
+ CP_SET_AMBLE_2_TYPE(KMD_AMBLE_TYPE));
+}
+
static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
@@ -295,6 +344,13 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
a6xx_set_pagetable(a6xx_gpu, ring, submit);
+ /*
+ * If preemption is enabled, then set the pseudo register for the save
+ * sequence
+ */
+ if (gpu->nr_rings > 1)
+ a6xx_emit_set_pseudo_reg(ring, a6xx_gpu, submit->queue);
+
get_stats_counter(ring, REG_A7XX_RBBM_PERFCTR_CP(0),
rbmemptr_stats(ring, index, cpcycles_start));
get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER,
@@ -306,8 +362,10 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_PKT7(ring, CP_SET_MARKER, 1);
OUT_RING(ring, 0x101); /* IFPC disable */
- OUT_PKT7(ring, CP_SET_MARKER, 1);
- OUT_RING(ring, 0x00d); /* IB1LIST start */
+ if (submit->queue->flags & MSM_SUBMITQUEUE_ALLOW_PREEMPT) {
+ OUT_PKT7(ring, CP_SET_MARKER, 1);
+ OUT_RING(ring, 0x00d); /* IB1LIST start */
+ }
/* Submit the commands */
for (i = 0; i < submit->nr_cmds; i++) {
@@ -315,7 +373,7 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
- if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+ if (ring->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
@@ -338,8 +396,10 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
update_shadow_rptr(gpu, ring);
}
- OUT_PKT7(ring, CP_SET_MARKER, 1);
- OUT_RING(ring, 0x00e); /* IB1LIST end */
+ if (submit->queue->flags & MSM_SUBMITQUEUE_ALLOW_PREEMPT) {
+ OUT_PKT7(ring, CP_SET_MARKER, 1);
+ OUT_RING(ring, 0x00e); /* IB1LIST end */
+ }
get_stats_counter(ring, REG_A7XX_RBBM_PERFCTR_CP(0),
rbmemptr_stats(ring, index, cpcycles_end));
@@ -386,6 +446,8 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_RING(ring, upper_32_bits(rbmemptr(ring, bv_fence)));
OUT_RING(ring, submit->seqno);
+ a6xx_gpu->last_seqno[ring->id] = submit->seqno;
+
/* write the ringbuffer timestamp */
OUT_PKT7(ring, CP_EVENT_WRITE, 4);
OUT_RING(ring, CACHE_CLEAN | CP_EVENT_WRITE_0_IRQ | BIT(27));
@@ -399,10 +461,32 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_PKT7(ring, CP_SET_MARKER, 1);
OUT_RING(ring, 0x100); /* IFPC enable */
+ /* If preemption is enabled */
+ if (gpu->nr_rings > 1) {
+ /* Yield the floor on command completion */
+ OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
+
+ /*
+ * If dword[2:1] are non zero, they specify an address for
+ * the CP to write the value of dword[3] to on preemption
+ * complete. Write 0 to skip the write
+ */
+ OUT_RING(ring, 0x00);
+ OUT_RING(ring, 0x00);
+ /* Data value - not used if the address above is 0 */
+ OUT_RING(ring, 0x01);
+ /* generate interrupt on preemption completion */
+ OUT_RING(ring, 0x00);
+ }
+
+
trace_msm_gpu_submit_flush(submit,
gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER));
a6xx_flush(gpu, ring);
+
+ /* Check to see if we need to start preemption */
+ a6xx_preempt_trigger(gpu);
}
static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
@@ -551,6 +635,15 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
gpu->ubwc_config.macrotile_mode = 1;
}
+ if (adreno_is_a663(gpu)) {
+ gpu->ubwc_config.highest_bank_bit = 13;
+ gpu->ubwc_config.amsbc = 1;
+ gpu->ubwc_config.rgb565_predicator = 1;
+ gpu->ubwc_config.uavflagprd_inv = 2;
+ gpu->ubwc_config.macrotile_mode = 1;
+ gpu->ubwc_config.ubwc_swizzle = 0x4;
+ }
+
if (adreno_is_7c3(gpu)) {
gpu->ubwc_config.highest_bank_bit = 14;
gpu->ubwc_config.amsbc = 1;
@@ -609,6 +702,77 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
adreno_gpu->ubwc_config.macrotile_mode);
}
+static void a7xx_patch_pwrup_reglist(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ const struct adreno_reglist_list *reglist;
+ void *ptr = a6xx_gpu->pwrup_reglist_ptr;
+ struct cpu_gpu_lock *lock = ptr;
+ u32 *dest = (u32 *)&lock->regs[0];
+ int i;
+
+ reglist = adreno_gpu->info->a6xx->pwrup_reglist;
+
+ lock->gpu_req = lock->cpu_req = lock->turn = 0;
+ lock->ifpc_list_len = 0;
+ lock->preemption_list_len = reglist->count;
+
+ /*
+ * For each entry in each of the lists, write the offset and the current
+ * register value into the GPU buffer
+ */
+ for (i = 0; i < reglist->count; i++) {
+ *dest++ = reglist->regs[i];
+ *dest++ = gpu_read(gpu, reglist->regs[i]);
+ }
+
+ /*
+ * The overall register list is composed of
+ * 1. Static IFPC-only registers
+ * 2. Static IFPC + preemption registers
+ * 3. Dynamic IFPC + preemption registers (ex: perfcounter selects)
+ *
+ * The first two lists are static. Size of these lists are stored as
+ * number of pairs in ifpc_list_len and preemption_list_len
+ * respectively. With concurrent binning, Some of the perfcounter
+ * registers being virtualized, CP needs to know the pipe id to program
+ * the aperture inorder to restore the same. Thus, third list is a
+ * dynamic list with triplets as
+ * (<aperture, shifted 12 bits> <address> <data>), and the length is
+ * stored as number for triplets in dynamic_list_len.
+ */
+ lock->dynamic_list_len = 0;
+}
+
+static int a7xx_preempt_start(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ if (gpu->nr_rings <= 1)
+ return 0;
+
+ /* Turn CP protection off */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ a6xx_emit_set_pseudo_reg(ring, a6xx_gpu, NULL);
+
+ /* Yield the floor on command completion */
+ OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
+ OUT_RING(ring, 0x00);
+ OUT_RING(ring, 0x00);
+ OUT_RING(ring, 0x00);
+ /* Generate interrupt on preemption completion */
+ OUT_RING(ring, 0x00);
+
+ a6xx_flush(gpu, ring);
+
+ return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
+}
+
static int a6xx_cp_init(struct msm_gpu *gpu)
{
struct msm_ringbuffer *ring = gpu->rb[0];
@@ -640,6 +804,8 @@ static int a6xx_cp_init(struct msm_gpu *gpu)
static int a7xx_cp_init(struct msm_gpu *gpu)
{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct msm_ringbuffer *ring = gpu->rb[0];
u32 mask;
@@ -677,11 +843,11 @@ static int a7xx_cp_init(struct msm_gpu *gpu)
/* *Don't* send a power up reg list for concurrent binning (TODO) */
/* Lo address */
- OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, lower_32_bits(a6xx_gpu->pwrup_reglist_iova));
/* Hi address */
- OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, upper_32_bits(a6xx_gpu->pwrup_reglist_iova));
/* BIT(31) set => read the regs from the list */
- OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, BIT(31));
a6xx_flush(gpu, ring);
return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
@@ -805,6 +971,16 @@ static int a6xx_ucode_load(struct msm_gpu *gpu)
msm_gem_object_set_name(a6xx_gpu->shadow_bo, "shadow");
}
+ a6xx_gpu->pwrup_reglist_ptr = msm_gem_kernel_new(gpu->dev, PAGE_SIZE,
+ MSM_BO_WC | MSM_BO_MAP_PRIV,
+ gpu->aspace, &a6xx_gpu->pwrup_reglist_bo,
+ &a6xx_gpu->pwrup_reglist_iova);
+
+ if (IS_ERR(a6xx_gpu->pwrup_reglist_ptr))
+ return PTR_ERR(a6xx_gpu->pwrup_reglist_ptr);
+
+ msm_gem_object_set_name(a6xx_gpu->pwrup_reglist_bo, "pwrup_reglist");
+
return 0;
}
@@ -864,6 +1040,7 @@ static int hw_init(struct msm_gpu *gpu)
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
u64 gmem_range_min;
+ unsigned int i;
int ret;
if (!adreno_has_gmu_wrapper(adreno_gpu)) {
@@ -1072,7 +1249,7 @@ static int hw_init(struct msm_gpu *gpu)
if (adreno_is_a690(adreno_gpu))
gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x90);
/* Set dualQ + disable afull for A660 GPU */
- else if (adreno_is_a660(adreno_gpu))
+ else if (adreno_is_a660(adreno_gpu) || adreno_is_a663(adreno_gpu))
gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x66906);
else if (adreno_is_a7xx(adreno_gpu))
gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG,
@@ -1134,22 +1311,32 @@ static int hw_init(struct msm_gpu *gpu)
if (a6xx_gpu->shadow_bo) {
gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR,
shadowptr(a6xx_gpu, gpu->rb[0]));
+ for (unsigned int i = 0; i < gpu->nr_rings; i++)
+ a6xx_gpu->shadow[i] = 0;
}
/* ..which means "always" on A7xx, also for BV shadow */
if (adreno_is_a7xx(adreno_gpu)) {
gpu_write64(gpu, REG_A7XX_CP_BV_RB_RPTR_ADDR,
- rbmemptr(gpu->rb[0], bv_fence));
+ rbmemptr(gpu->rb[0], bv_rptr));
}
+ a6xx_preempt_hw_init(gpu);
+
/* Always come up on rb 0 */
a6xx_gpu->cur_ring = gpu->rb[0];
- gpu->cur_ctx_seqno = 0;
+ for (i = 0; i < gpu->nr_rings; i++)
+ gpu->rb[i]->cur_ctx_seqno = 0;
/* Enable the SQE_to start the CP engine */
gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
+ if (adreno_is_a7xx(adreno_gpu) && !a6xx_gpu->pwrup_reglist_emitted) {
+ a7xx_patch_pwrup_reglist(gpu);
+ a6xx_gpu->pwrup_reglist_emitted = true;
+ }
+
ret = adreno_is_a7xx(adreno_gpu) ? a7xx_cp_init(gpu) : a6xx_cp_init(gpu);
if (ret)
goto out;
@@ -1187,6 +1374,10 @@ static int hw_init(struct msm_gpu *gpu)
out:
if (adreno_has_gmu_wrapper(adreno_gpu))
return ret;
+
+ /* Last step - yield the ringbuffer */
+ a7xx_preempt_start(gpu);
+
/*
* Tell the GMU that we are done touching the GPU and it can start power
* management
@@ -1564,8 +1755,13 @@ static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
if (status & A6XX_RBBM_INT_0_MASK_SWFUSEVIOLATION)
a7xx_sw_fuse_violation_irq(gpu);
- if (status & A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS)
+ if (status & A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) {
msm_gpu_retire(gpu);
+ a6xx_preempt_trigger(gpu);
+ }
+
+ if (status & A6XX_RBBM_INT_0_MASK_CP_SW)
+ a6xx_preempt_irq(gpu);
return IRQ_HANDLED;
}
@@ -2259,6 +2455,7 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
struct a6xx_gpu *a6xx_gpu;
struct adreno_gpu *adreno_gpu;
struct msm_gpu *gpu;
+ extern int enable_preemption;
bool is_a7xx;
int ret;
@@ -2297,7 +2494,10 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
return ERR_PTR(ret);
}
- if (is_a7xx)
+ if ((enable_preemption == 1) || (enable_preemption == -1 &&
+ (config->info->quirks & ADRENO_QUIRK_PREEMPTION)))
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_a7xx, 4);
+ else if (is_a7xx)
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_a7xx, 1);
else if (adreno_has_gmu_wrapper(adreno_gpu))
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_gmuwrapper, 1);
@@ -2338,6 +2538,8 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
a6xx_fault_handler);
a6xx_calc_ubwc_config(adreno_gpu);
+ /* Set up the preemption specific bits and pieces for each ringbuffer */
+ a6xx_preempt_init(gpu);
return gpu;
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index 0fb7febf70e7..4aceffb6aae8 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -12,15 +12,35 @@
extern bool hang_debug;
+struct cpu_gpu_lock {
+ uint32_t gpu_req;
+ uint32_t cpu_req;
+ uint32_t turn;
+ union {
+ struct {
+ uint16_t list_length;
+ uint16_t list_offset;
+ };
+ struct {
+ uint8_t ifpc_list_len;
+ uint8_t preemption_list_len;
+ uint16_t dynamic_list_len;
+ };
+ };
+ uint64_t regs[62];
+};
+
/**
* struct a6xx_info - a6xx specific information from device table
*
* @hwcg: hw clock gating register sequence
* @protect: CP_PROTECT settings
+ * @pwrup_reglist pwrup reglist for preemption
*/
struct a6xx_info {
const struct adreno_reglist *hwcg;
const struct adreno_protect *protect;
+ const struct adreno_reglist_list *pwrup_reglist;
u32 gmu_chipid;
u32 gmu_cgc_mode;
u32 prim_fifo_threshold;
@@ -33,6 +53,29 @@ struct a6xx_gpu {
uint64_t sqe_iova;
struct msm_ringbuffer *cur_ring;
+ struct msm_ringbuffer *next_ring;
+
+ struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS];
+ void *preempt[MSM_GPU_MAX_RINGS];
+ uint64_t preempt_iova[MSM_GPU_MAX_RINGS];
+ struct drm_gem_object *preempt_smmu_bo[MSM_GPU_MAX_RINGS];
+ void *preempt_smmu[MSM_GPU_MAX_RINGS];
+ uint64_t preempt_smmu_iova[MSM_GPU_MAX_RINGS];
+ uint32_t last_seqno[MSM_GPU_MAX_RINGS];
+
+ atomic_t preempt_state;
+ spinlock_t eval_lock;
+ struct timer_list preempt_timer;
+
+ unsigned int preempt_level;
+ bool uses_gmem;
+ bool skip_save_restore;
+
+ struct drm_gem_object *preempt_postamble_bo;
+ void *preempt_postamble_ptr;
+ uint64_t preempt_postamble_iova;
+ uint64_t preempt_postamble_len;
+ bool postamble_enabled;
struct a6xx_gmu gmu;
@@ -40,6 +83,11 @@ struct a6xx_gpu {
uint64_t shadow_iova;
uint32_t *shadow;
+ struct drm_gem_object *pwrup_reglist_bo;
+ void *pwrup_reglist_ptr;
+ uint64_t pwrup_reglist_iova;
+ bool pwrup_reglist_emitted;
+
bool has_whereami;
void __iomem *llc_mmio;
@@ -52,6 +100,100 @@ struct a6xx_gpu {
#define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
/*
+ * In order to do lockless preemption we use a simple state machine to progress
+ * through the process.
+ *
+ * PREEMPT_NONE - no preemption in progress. Next state START.
+ * PREEMPT_START - The trigger is evaluating if preemption is possible. Next
+ * states: TRIGGERED, NONE
+ * PREEMPT_FINISH - An intermediate state before moving back to NONE. Next
+ * state: NONE.
+ * PREEMPT_TRIGGERED: A preemption has been executed on the hardware. Next
+ * states: FAULTED, PENDING
+ * PREEMPT_FAULTED: A preemption timed out (never completed). This will trigger
+ * recovery. Next state: N/A
+ * PREEMPT_PENDING: Preemption complete interrupt fired - the callback is
+ * checking the success of the operation. Next state: FAULTED, NONE.
+ */
+
+enum a6xx_preempt_state {
+ PREEMPT_NONE = 0,
+ PREEMPT_START,
+ PREEMPT_FINISH,
+ PREEMPT_TRIGGERED,
+ PREEMPT_FAULTED,
+ PREEMPT_PENDING,
+};
+
+/*
+ * struct a6xx_preempt_record is a shared buffer between the microcode and the
+ * CPU to store the state for preemption. The record itself is much larger
+ * (2112k) but most of that is used by the CP for storage.
+ *
+ * There is a preemption record assigned per ringbuffer. When the CPU triggers a
+ * preemption, it fills out the record with the useful information (wptr, ring
+ * base, etc) and the microcode uses that information to set up the CP following
+ * the preemption. When a ring is switched out, the CP will save the ringbuffer
+ * state back to the record. In this way, once the records are properly set up
+ * the CPU can quickly switch back and forth between ringbuffers by only
+ * updating a few registers (often only the wptr).
+ *
+ * These are the CPU aware registers in the record:
+ * @magic: Must always be 0xAE399D6EUL
+ * @info: Type of the record - written 0 by the CPU, updated by the CP
+ * @errno: preemption error record
+ * @data: Data field in YIELD and SET_MARKER packets, Written and used by CP
+ * @cntl: Value of RB_CNTL written by CPU, save/restored by CP
+ * @rptr: Value of RB_RPTR written by CPU, save/restored by CP
+ * @wptr: Value of RB_WPTR written by CPU, save/restored by CP
+ * @_pad: Reserved/padding
+ * @rptr_addr: Value of RB_RPTR_ADDR_LO|HI written by CPU, save/restored by CP
+ * @rbase: Value of RB_BASE written by CPU, save/restored by CP
+ * @counter: GPU address of the storage area for the preemption counters
+ * @bv_rptr_addr: Value of BV_RB_RPTR_ADDR_LO|HI written by CPU, save/restored by CP
+ */
+struct a6xx_preempt_record {
+ u32 magic;
+ u32 info;
+ u32 errno;
+ u32 data;
+ u32 cntl;
+ u32 rptr;
+ u32 wptr;
+ u32 _pad;
+ u64 rptr_addr;
+ u64 rbase;
+ u64 counter;
+ u64 bv_rptr_addr;
+};
+
+#define A6XX_PREEMPT_RECORD_MAGIC 0xAE399D6EUL
+
+#define PREEMPT_SMMU_INFO_SIZE 4096
+
+#define PREEMPT_RECORD_SIZE(adreno_gpu) \
+ ((adreno_gpu->info->preempt_record_size) == 0 ? \
+ 4192 * SZ_1K : (adreno_gpu->info->preempt_record_size))
+
+/*
+ * The preemption counter block is a storage area for the value of the
+ * preemption counters that are saved immediately before context switch. We
+ * append it on to the end of the allocation for the preemption record.
+ */
+#define A6XX_PREEMPT_COUNTER_SIZE (16 * 4)
+
+struct a7xx_cp_smmu_info {
+ u32 magic;
+ u32 _pad4;
+ u64 ttbr0;
+ u32 asid;
+ u32 context_idr;
+ u32 context_bank;
+};
+
+#define GEN7_CP_SMMU_INFO_MAGIC 0x241350d5UL
+
+/*
* Given a register and a count, return a value to program into
* REG_CP_PROTECT_REG(n) - this will block both reads and writes for
* _len + 1 registers starting at _reg.
@@ -108,6 +250,34 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu);
+void a6xx_preempt_init(struct msm_gpu *gpu);
+void a6xx_preempt_hw_init(struct msm_gpu *gpu);
+void a6xx_preempt_trigger(struct msm_gpu *gpu);
+void a6xx_preempt_irq(struct msm_gpu *gpu);
+void a6xx_preempt_fini(struct msm_gpu *gpu);
+int a6xx_preempt_submitqueue_setup(struct msm_gpu *gpu,
+ struct msm_gpu_submitqueue *queue);
+void a6xx_preempt_submitqueue_close(struct msm_gpu *gpu,
+ struct msm_gpu_submitqueue *queue);
+
+/* Return true if we are in a preempt state */
+static inline bool a6xx_in_preempt(struct a6xx_gpu *a6xx_gpu)
+{
+ /*
+ * Make sure the read to preempt_state is ordered with respect to reads
+ * of other variables before ...
+ */
+ smp_rmb();
+
+ int preempt_state = atomic_read(&a6xx_gpu->preempt_state);
+
+ /* ... and after. */
+ smp_rmb();
+
+ return !(preempt_state == PREEMPT_NONE ||
+ preempt_state == PREEMPT_FINISH);
+}
+
void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
bool suspended);
unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index cdb3f6e74d3e..cb8844ed46b2 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -478,6 +478,37 @@ static void a660_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
msg->cnoc_cmds_data[1][0] = 0x60000001;
}
+static void a663_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
+{
+ /*
+ * Send a single "off" entry just to get things running
+ * TODO: bus scaling
+ */
+ msg->bw_level_num = 1;
+
+ msg->ddr_cmds_num = 3;
+ msg->ddr_wait_bitmask = 0x07;
+
+ msg->ddr_cmds_addrs[0] = 0x50004;
+ msg->ddr_cmds_addrs[1] = 0x50000;
+ msg->ddr_cmds_addrs[2] = 0x500b4;
+
+ msg->ddr_cmds_data[0][0] = 0x40000000;
+ msg->ddr_cmds_data[0][1] = 0x40000000;
+ msg->ddr_cmds_data[0][2] = 0x40000000;
+
+ /*
+ * These are the CX (CNOC) votes - these are used by the GMU but the
+ * votes are known and fixed for the target
+ */
+ msg->cnoc_cmds_num = 1;
+ msg->cnoc_wait_bitmask = 0x01;
+
+ msg->cnoc_cmds_addrs[0] = 0x50058;
+ msg->cnoc_cmds_data[0][0] = 0x40000000;
+ msg->cnoc_cmds_data[1][0] = 0x60000001;
+}
+
static void adreno_7c3_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{
/*
@@ -630,32 +661,44 @@ static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
{
- struct a6xx_hfi_msg_bw_table msg = { 0 };
+ struct a6xx_hfi_msg_bw_table *msg;
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ if (gmu->bw_table)
+ goto send;
+
+ msg = devm_kzalloc(gmu->dev, sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
if (adreno_is_a618(adreno_gpu))
- a618_build_bw_table(&msg);
+ a618_build_bw_table(msg);
else if (adreno_is_a619(adreno_gpu))
- a619_build_bw_table(&msg);
+ a619_build_bw_table(msg);
else if (adreno_is_a640_family(adreno_gpu))
- a640_build_bw_table(&msg);
+ a640_build_bw_table(msg);
else if (adreno_is_a650(adreno_gpu))
- a650_build_bw_table(&msg);
+ a650_build_bw_table(msg);
else if (adreno_is_7c3(adreno_gpu))
- adreno_7c3_build_bw_table(&msg);
+ adreno_7c3_build_bw_table(msg);
else if (adreno_is_a660(adreno_gpu))
- a660_build_bw_table(&msg);
+ a660_build_bw_table(msg);
+ else if (adreno_is_a663(adreno_gpu))
+ a663_build_bw_table(msg);
else if (adreno_is_a690(adreno_gpu))
- a690_build_bw_table(&msg);
+ a690_build_bw_table(msg);
else if (adreno_is_a730(adreno_gpu))
- a730_build_bw_table(&msg);
+ a730_build_bw_table(msg);
else if (adreno_is_a740_family(adreno_gpu))
- a740_build_bw_table(&msg);
+ a740_build_bw_table(msg);
else
- a6xx_build_bw_table(&msg);
+ a6xx_build_bw_table(msg);
+
+ gmu->bw_table = msg;
- return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg),
+send:
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, gmu->bw_table, sizeof(*(gmu->bw_table)),
NULL, 0);
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
new file mode 100644
index 000000000000..2fd4e39f618f
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
@@ -0,0 +1,456 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2023 Collabora, Ltd. */
+/* Copyright (c) 2024 Valve Corporation */
+
+#include "msm_gem.h"
+#include "a6xx_gpu.h"
+#include "a6xx_gmu.xml.h"
+#include "msm_mmu.h"
+#include "msm_gpu_trace.h"
+
+/*
+ * Try to transition the preemption state from old to new. Return
+ * true on success or false if the original state wasn't 'old'
+ */
+static inline bool try_preempt_state(struct a6xx_gpu *a6xx_gpu,
+ enum a6xx_preempt_state old, enum a6xx_preempt_state new)
+{
+ enum a6xx_preempt_state cur = atomic_cmpxchg(&a6xx_gpu->preempt_state,
+ old, new);
+
+ return (cur == old);
+}
+
+/*
+ * Force the preemption state to the specified state. This is used in cases
+ * where the current state is known and won't change
+ */
+static inline void set_preempt_state(struct a6xx_gpu *gpu,
+ enum a6xx_preempt_state new)
+{
+ /*
+ * preempt_state may be read by other cores trying to trigger a
+ * preemption or in the interrupt handler so barriers are needed
+ * before...
+ */
+ smp_mb__before_atomic();
+ atomic_set(&gpu->preempt_state, new);
+ /* ... and after*/
+ smp_mb__after_atomic();
+}
+
+/* Write the most recent wptr for the given ring into the hardware */
+static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ unsigned long flags;
+ uint32_t wptr;
+
+ spin_lock_irqsave(&ring->preempt_lock, flags);
+
+ if (ring->restore_wptr) {
+ wptr = get_wptr(ring);
+
+ gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
+
+ ring->restore_wptr = false;
+ }
+
+ spin_unlock_irqrestore(&ring->preempt_lock, flags);
+}
+
+/* Return the highest priority ringbuffer with something in it */
+static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ unsigned long flags;
+ int i;
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ bool empty;
+ struct msm_ringbuffer *ring = gpu->rb[i];
+
+ spin_lock_irqsave(&ring->preempt_lock, flags);
+ empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring));
+ if (!empty && ring == a6xx_gpu->cur_ring)
+ empty = ring->memptrs->fence == a6xx_gpu->last_seqno[i];
+ spin_unlock_irqrestore(&ring->preempt_lock, flags);
+
+ if (!empty)
+ return ring;
+ }
+
+ return NULL;
+}
+
+static void a6xx_preempt_timer(struct timer_list *t)
+{
+ struct a6xx_gpu *a6xx_gpu = from_timer(a6xx_gpu, t, preempt_timer);
+ struct msm_gpu *gpu = &a6xx_gpu->base.base;
+ struct drm_device *dev = gpu->dev;
+
+ if (!try_preempt_state(a6xx_gpu, PREEMPT_TRIGGERED, PREEMPT_FAULTED))
+ return;
+
+ dev_err(dev->dev, "%s: preemption timed out\n", gpu->name);
+ kthread_queue_work(gpu->worker, &gpu->recover_work);
+}
+
+static void preempt_prepare_postamble(struct a6xx_gpu *a6xx_gpu)
+{
+ u32 *postamble = a6xx_gpu->preempt_postamble_ptr;
+ u32 count = 0;
+
+ postamble[count++] = PKT7(CP_REG_RMW, 3);
+ postamble[count++] = REG_A6XX_RBBM_PERFCTR_SRAM_INIT_CMD;
+ postamble[count++] = 0;
+ postamble[count++] = 1;
+
+ postamble[count++] = PKT7(CP_WAIT_REG_MEM, 6);
+ postamble[count++] = CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ);
+ postamble[count++] = CP_WAIT_REG_MEM_1_POLL_ADDR_LO(
+ REG_A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS);
+ postamble[count++] = CP_WAIT_REG_MEM_2_POLL_ADDR_HI(0);
+ postamble[count++] = CP_WAIT_REG_MEM_3_REF(0x1);
+ postamble[count++] = CP_WAIT_REG_MEM_4_MASK(0x1);
+ postamble[count++] = CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(0);
+
+ a6xx_gpu->preempt_postamble_len = count;
+
+ a6xx_gpu->postamble_enabled = true;
+}
+
+static void preempt_disable_postamble(struct a6xx_gpu *a6xx_gpu)
+{
+ u32 *postamble = a6xx_gpu->preempt_postamble_ptr;
+
+ /*
+ * Disable the postamble by replacing the first packet header with a NOP
+ * that covers the whole buffer.
+ */
+ *postamble = PKT7(CP_NOP, (a6xx_gpu->preempt_postamble_len - 1));
+
+ a6xx_gpu->postamble_enabled = false;
+}
+
+void a6xx_preempt_irq(struct msm_gpu *gpu)
+{
+ uint32_t status;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct drm_device *dev = gpu->dev;
+
+ if (!try_preempt_state(a6xx_gpu, PREEMPT_TRIGGERED, PREEMPT_PENDING))
+ return;
+
+ /* Delete the preemption watchdog timer */
+ del_timer(&a6xx_gpu->preempt_timer);
+
+ /*
+ * The hardware should be setting the stop bit of CP_CONTEXT_SWITCH_CNTL
+ * to zero before firing the interrupt, but there is a non zero chance
+ * of a hardware condition or a software race that could set it again
+ * before we have a chance to finish. If that happens, log and go for
+ * recovery
+ */
+ status = gpu_read(gpu, REG_A6XX_CP_CONTEXT_SWITCH_CNTL);
+ if (unlikely(status & A6XX_CP_CONTEXT_SWITCH_CNTL_STOP)) {
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "!!!!!!!!!!!!!!!! preemption faulted !!!!!!!!!!!!!! irq\n");
+ set_preempt_state(a6xx_gpu, PREEMPT_FAULTED);
+ dev_err(dev->dev, "%s: Preemption failed to complete\n",
+ gpu->name);
+ kthread_queue_work(gpu->worker, &gpu->recover_work);
+ return;
+ }
+
+ a6xx_gpu->cur_ring = a6xx_gpu->next_ring;
+ a6xx_gpu->next_ring = NULL;
+
+ set_preempt_state(a6xx_gpu, PREEMPT_FINISH);
+
+ update_wptr(gpu, a6xx_gpu->cur_ring);
+
+ set_preempt_state(a6xx_gpu, PREEMPT_NONE);
+
+ trace_msm_gpu_preemption_irq(a6xx_gpu->cur_ring->id);
+
+ /*
+ * Retrigger preemption to avoid a deadlock that might occur when preemption
+ * is skipped due to it being already in flight when requested.
+ */
+ a6xx_preempt_trigger(gpu);
+}
+
+void a6xx_preempt_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ int i;
+
+ /* No preemption if we only have one ring */
+ if (gpu->nr_rings == 1)
+ return;
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ struct a6xx_preempt_record *record_ptr = a6xx_gpu->preempt[i];
+
+ record_ptr->wptr = 0;
+ record_ptr->rptr = 0;
+ record_ptr->rptr_addr = shadowptr(a6xx_gpu, gpu->rb[i]);
+ record_ptr->info = 0;
+ record_ptr->data = 0;
+ record_ptr->rbase = gpu->rb[i]->iova;
+ }
+
+ /* Write a 0 to signal that we aren't switching pagetables */
+ gpu_write64(gpu, REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO, 0);
+
+ /* Enable the GMEM save/restore feature for preemption */
+ gpu_write(gpu, REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE, 0x1);
+
+ /* Reset the preemption state */
+ set_preempt_state(a6xx_gpu, PREEMPT_NONE);
+
+ spin_lock_init(&a6xx_gpu->eval_lock);
+
+ /* Always come up on rb 0 */
+ a6xx_gpu->cur_ring = gpu->rb[0];
+}
+
+void a6xx_preempt_trigger(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ unsigned long flags;
+ struct msm_ringbuffer *ring;
+ unsigned int cntl;
+ bool sysprof;
+
+ if (gpu->nr_rings == 1)
+ return;
+
+ /*
+ * Lock to make sure another thread attempting preemption doesn't skip it
+ * while we are still evaluating the next ring. This makes sure the other
+ * thread does start preemption if we abort it and avoids a soft lock.
+ */
+ spin_lock_irqsave(&a6xx_gpu->eval_lock, flags);
+
+ /*
+ * Try to start preemption by moving from NONE to START. If
+ * unsuccessful, a preemption is already in flight
+ */
+ if (!try_preempt_state(a6xx_gpu, PREEMPT_NONE, PREEMPT_START)) {
+ spin_unlock_irqrestore(&a6xx_gpu->eval_lock, flags);
+ return;
+ }
+
+ cntl = A6XX_CP_CONTEXT_SWITCH_CNTL_LEVEL(a6xx_gpu->preempt_level);
+
+ if (a6xx_gpu->skip_save_restore)
+ cntl |= A6XX_CP_CONTEXT_SWITCH_CNTL_SKIP_SAVE_RESTORE;
+
+ if (a6xx_gpu->uses_gmem)
+ cntl |= A6XX_CP_CONTEXT_SWITCH_CNTL_USES_GMEM;
+
+ cntl |= A6XX_CP_CONTEXT_SWITCH_CNTL_STOP;
+
+ /* Get the next ring to preempt to */
+ ring = get_next_ring(gpu);
+
+ /*
+ * If no ring is populated or the highest priority ring is the current
+ * one do nothing except to update the wptr to the latest and greatest
+ */
+ if (!ring || (a6xx_gpu->cur_ring == ring)) {
+ set_preempt_state(a6xx_gpu, PREEMPT_FINISH);
+ update_wptr(gpu, a6xx_gpu->cur_ring);
+ set_preempt_state(a6xx_gpu, PREEMPT_NONE);
+ spin_unlock_irqrestore(&a6xx_gpu->eval_lock, flags);
+ return;
+ }
+
+ spin_unlock_irqrestore(&a6xx_gpu->eval_lock, flags);
+
+ spin_lock_irqsave(&ring->preempt_lock, flags);
+
+ struct a7xx_cp_smmu_info *smmu_info_ptr =
+ a6xx_gpu->preempt_smmu[ring->id];
+ struct a6xx_preempt_record *record_ptr = a6xx_gpu->preempt[ring->id];
+ u64 ttbr0 = ring->memptrs->ttbr0;
+ u32 context_idr = ring->memptrs->context_idr;
+
+ smmu_info_ptr->ttbr0 = ttbr0;
+ smmu_info_ptr->context_idr = context_idr;
+ record_ptr->wptr = get_wptr(ring);
+
+ /*
+ * The GPU will write the wptr we set above when we preempt. Reset
+ * restore_wptr to make sure that we don't write WPTR to the same
+ * thing twice. It's still possible subsequent submissions will update
+ * wptr again, in which case they will set the flag to true. This has
+ * to be protected by the lock for setting the flag and updating wptr
+ * to be atomic.
+ */
+ ring->restore_wptr = false;
+
+ trace_msm_gpu_preemption_trigger(a6xx_gpu->cur_ring->id, ring->id);
+
+ spin_unlock_irqrestore(&ring->preempt_lock, flags);
+
+ gpu_write64(gpu,
+ REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO,
+ a6xx_gpu->preempt_smmu_iova[ring->id]);
+
+ gpu_write64(gpu,
+ REG_A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR,
+ a6xx_gpu->preempt_iova[ring->id]);
+
+ a6xx_gpu->next_ring = ring;
+
+ /* Start a timer to catch a stuck preemption */
+ mod_timer(&a6xx_gpu->preempt_timer, jiffies + msecs_to_jiffies(10000));
+
+ /* Enable or disable postamble as needed */
+ sysprof = refcount_read(&a6xx_gpu->base.base.sysprof_active) > 1;
+
+ if (!sysprof && !a6xx_gpu->postamble_enabled)
+ preempt_prepare_postamble(a6xx_gpu);
+
+ if (sysprof && a6xx_gpu->postamble_enabled)
+ preempt_disable_postamble(a6xx_gpu);
+
+ /* Set the preemption state to triggered */
+ set_preempt_state(a6xx_gpu, PREEMPT_TRIGGERED);
+
+ /* Trigger the preemption */
+ gpu_write(gpu, REG_A6XX_CP_CONTEXT_SWITCH_CNTL, cntl);
+}
+
+static int preempt_init_ring(struct a6xx_gpu *a6xx_gpu,
+ struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ struct drm_gem_object *bo = NULL;
+ phys_addr_t ttbr;
+ u64 iova = 0;
+ void *ptr;
+ int asid;
+
+ ptr = msm_gem_kernel_new(gpu->dev,
+ PREEMPT_RECORD_SIZE(adreno_gpu),
+ MSM_BO_WC | MSM_BO_MAP_PRIV, gpu->aspace, &bo, &iova);
+
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
+
+ memset(ptr, 0, PREEMPT_RECORD_SIZE(adreno_gpu));
+
+ msm_gem_object_set_name(bo, "preempt_record ring%d", ring->id);
+
+ a6xx_gpu->preempt_bo[ring->id] = bo;
+ a6xx_gpu->preempt_iova[ring->id] = iova;
+ a6xx_gpu->preempt[ring->id] = ptr;
+
+ struct a6xx_preempt_record *record_ptr = ptr;
+
+ ptr = msm_gem_kernel_new(gpu->dev,
+ PREEMPT_SMMU_INFO_SIZE,
+ MSM_BO_WC | MSM_BO_MAP_PRIV | MSM_BO_GPU_READONLY,
+ gpu->aspace, &bo, &iova);
+
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
+
+ memset(ptr, 0, PREEMPT_SMMU_INFO_SIZE);
+
+ msm_gem_object_set_name(bo, "preempt_smmu_info ring%d", ring->id);
+
+ a6xx_gpu->preempt_smmu_bo[ring->id] = bo;
+ a6xx_gpu->preempt_smmu_iova[ring->id] = iova;
+ a6xx_gpu->preempt_smmu[ring->id] = ptr;
+
+ struct a7xx_cp_smmu_info *smmu_info_ptr = ptr;
+
+ msm_iommu_pagetable_params(gpu->aspace->mmu, &ttbr, &asid);
+
+ smmu_info_ptr->magic = GEN7_CP_SMMU_INFO_MAGIC;
+ smmu_info_ptr->ttbr0 = ttbr;
+ smmu_info_ptr->asid = 0xdecafbad;
+ smmu_info_ptr->context_idr = 0;
+
+ /* Set up the defaults on the preemption record */
+ record_ptr->magic = A6XX_PREEMPT_RECORD_MAGIC;
+ record_ptr->info = 0;
+ record_ptr->data = 0;
+ record_ptr->rptr = 0;
+ record_ptr->wptr = 0;
+ record_ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT;
+ record_ptr->rbase = ring->iova;
+ record_ptr->counter = 0;
+ record_ptr->bv_rptr_addr = rbmemptr(ring, bv_rptr);
+
+ return 0;
+}
+
+void a6xx_preempt_fini(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ int i;
+
+ for (i = 0; i < gpu->nr_rings; i++)
+ msm_gem_kernel_put(a6xx_gpu->preempt_bo[i], gpu->aspace);
+}
+
+void a6xx_preempt_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ int i;
+
+ /* No preemption if we only have one ring */
+ if (gpu->nr_rings <= 1)
+ return;
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ if (preempt_init_ring(a6xx_gpu, gpu->rb[i]))
+ goto fail;
+ }
+
+ /* TODO: make this configurable? */
+ a6xx_gpu->preempt_level = 1;
+ a6xx_gpu->uses_gmem = 1;
+ a6xx_gpu->skip_save_restore = 1;
+
+ a6xx_gpu->preempt_postamble_ptr = msm_gem_kernel_new(gpu->dev,
+ PAGE_SIZE,
+ MSM_BO_WC | MSM_BO_MAP_PRIV | MSM_BO_GPU_READONLY,
+ gpu->aspace, &a6xx_gpu->preempt_postamble_bo,
+ &a6xx_gpu->preempt_postamble_iova);
+
+ preempt_prepare_postamble(a6xx_gpu);
+
+ if (IS_ERR(a6xx_gpu->preempt_postamble_ptr))
+ goto fail;
+
+ timer_setup(&a6xx_gpu->preempt_timer, a6xx_preempt_timer, 0);
+
+ return;
+fail:
+ /*
+ * On any failure our adventure is over. Clean up and
+ * set nr_rings to 1 to force preemption off
+ */
+ a6xx_preempt_fini(gpu);
+ gpu->nr_rings = 1;
+
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "preemption init failed, disabling preemption\n");
+
+ return;
+}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index cfc74a9e2646..9ffe91920fbf 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -20,6 +20,10 @@ bool allow_vram_carveout = false;
MODULE_PARM_DESC(allow_vram_carveout, "Allow using VRAM Carveout, in place of IOMMU");
module_param_named(allow_vram_carveout, allow_vram_carveout, bool, 0600);
+int enable_preemption = -1;
+MODULE_PARM_DESC(enable_preemption, "Enable preemption (A7xx only) (1=on , 0=disable, -1=auto (default))");
+module_param(enable_preemption, int, 0600);
+
extern const struct adreno_gpulist a2xx_gpulist;
extern const struct adreno_gpulist a3xx_gpulist;
extern const struct adreno_gpulist a4xx_gpulist;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 465a4cd14a43..75f5367e73ca 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -533,7 +533,7 @@ int adreno_load_fw(struct adreno_gpu *adreno_gpu)
if (!adreno_gpu->info->fw[i])
continue;
- /* Skip loading GMU firwmare with GMU Wrapper */
+ /* Skip loading GMU firmware with GMU Wrapper */
if (adreno_has_gmu_wrapper(adreno_gpu) && i == ADRENO_FW_GMU)
continue;
@@ -572,8 +572,19 @@ struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
int adreno_hw_init(struct msm_gpu *gpu)
{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int ret;
+
VERB("%s", gpu->name);
+ if (adreno_gpu->info->family >= ADRENO_6XX_GEN1 &&
+ qcom_scm_set_gpu_smmu_aperture_is_available()) {
+ /* We currently always use context bank 0, so hard code this */
+ ret = qcom_scm_set_gpu_smmu_aperture(0);
+ if (ret)
+ DRM_DEV_ERROR(gpu->dev->dev, "unable to set SMMU aperture: %d\n", ret);
+ }
+
for (int i = 0; i < gpu->nr_rings; i++) {
struct msm_ringbuffer *ring = gpu->rb[i];
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 58d7e7915c57..e71f420f8b3a 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -56,6 +56,7 @@ enum adreno_family {
#define ADRENO_QUIRK_LMLOADKILL_DISABLE BIT(2)
#define ADRENO_QUIRK_HAS_HW_APRIV BIT(3)
#define ADRENO_QUIRK_HAS_CACHED_COHERENT BIT(4)
+#define ADRENO_QUIRK_PREEMPTION BIT(5)
/* Helper for formating the chip_id in the way that userspace tools like
* crashdec expect.
@@ -111,6 +112,7 @@ struct adreno_info {
* {SHRT_MAX, 0} sentinal.
*/
struct adreno_speedbin *speedbins;
+ u64 preempt_record_size;
};
#define ADRENO_CHIP_IDS(tbl...) (uint32_t[]) { tbl, 0 }
@@ -156,6 +158,19 @@ static const struct adreno_protect name = { \
.count_max = __count_max, \
};
+struct adreno_reglist_list {
+ /** @reg: List of register **/
+ const u32 *regs;
+ /** @count: Number of registers in the list **/
+ u32 count;
+};
+
+#define DECLARE_ADRENO_REGLIST_LIST(name) \
+static const struct adreno_reglist_list name = { \
+ .regs = name ## _regs, \
+ .count = ARRAY_SIZE(name ## _regs), \
+};
+
struct adreno_gpu {
struct msm_gpu base;
const struct adreno_info *info;
@@ -455,6 +470,11 @@ static inline int adreno_is_a680(const struct adreno_gpu *gpu)
return adreno_is_revn(gpu, 680);
}
+static inline int adreno_is_a663(const struct adreno_gpu *gpu)
+{
+ return gpu->info->chip_ids[0] == 0x06060300;
+}
+
static inline int adreno_is_a690(const struct adreno_gpu *gpu)
{
return gpu->info->chip_ids[0] == 0x06090000;
@@ -656,12 +676,15 @@ OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
OUT_RING(ring, PKT4(regindx, cnt));
}
+#define PKT7(opcode, cnt) \
+ (CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) | \
+ ((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23))
+
static inline void
OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
{
adreno_wait_ring(ring, cnt + 1);
- OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) |
- ((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23));
+ OUT_RING(ring, PKT7(opcode, cnt));
}
struct msm_gpu *a2xx_gpu_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
new file mode 100644
index 000000000000..ab3dfb0b374e
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DPU_1_14_MSM8937_H
+#define _DPU_1_14_MSM8937_H
+
+static const struct dpu_caps msm8937_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_LINE_WIDTH,
+ .max_mixer_blendstages = 0x4,
+ .max_linewidth = DEFAULT_DPU_LINE_WIDTH,
+ .pixel_ram_size = 40 * 1024,
+ .max_hdeci_exp = MAX_HORZ_DECIMATION,
+ .max_vdeci_exp = MAX_VERT_DECIMATION,
+};
+
+static const struct dpu_mdp_cfg msm8937_mdp[] = {
+ {
+ .name = "top_0",
+ .base = 0x0, .len = 0x454,
+ .features = BIT(DPU_MDP_VSYNC_SEL),
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_RGB0] = { .reg_off = 0x2ac, .bit_off = 4 },
+ [DPU_CLK_CTRL_RGB1] = { .reg_off = 0x2b4, .bit_off = 4 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_CURSOR0] = { .reg_off = 0x3a8, .bit_off = 16 },
+ },
+ },
+};
+
+static const struct dpu_ctl_cfg msm8937_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x64,
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0x64,
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0x64,
+ },
+};
+
+static const struct dpu_sspp_cfg msm8937_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x150,
+ .features = VIG_MSM8953_MASK,
+ .sblk = &dpu_vig_sblk_qseed2,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_4", .id = SSPP_RGB0,
+ .base = 0x14000, .len = 0x150,
+ .features = RGB_MSM8953_MASK,
+ .sblk = &dpu_rgb_sblk,
+ .xin_id = 1,
+ .type = SSPP_TYPE_RGB,
+ .clk_ctrl = DPU_CLK_CTRL_RGB0,
+ }, {
+ .name = "sspp_5", .id = SSPP_RGB1,
+ .base = 0x16000, .len = 0x150,
+ .features = RGB_MSM8953_MASK,
+ .sblk = &dpu_rgb_sblk,
+ .xin_id = 5,
+ .type = SSPP_TYPE_RGB,
+ .clk_ctrl = DPU_CLK_CTRL_RGB1,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x150,
+ .features = DMA_MSM8953_MASK | BIT(DPU_SSPP_CURSOR),
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 2,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ },
+};
+
+static const struct dpu_lm_cfg msm8937_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .sblk = &msm8998_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .sblk = &msm8998_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ },
+};
+
+static const struct dpu_pingpong_cfg msm8937_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_MSM8996_MASK,
+ .sblk = &msm8996_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x70800, .len = 0xd4,
+ .features = PINGPONG_MSM8996_MASK,
+ .sblk = &msm8996_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
+ },
+};
+
+static const struct dpu_dspp_cfg msm8937_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &msm8998_dspp_sblk,
+ },
+};
+
+static const struct dpu_intf_cfg msm8937_intf[] = {
+ {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x268,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 14,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x6b000, .len = 0x268,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 14,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = -1,
+ },
+};
+
+static const struct dpu_perf_cfg msm8937_perf_data = {
+ .max_bw_low = 3100000,
+ .max_bw_high = 3100000,
+ .min_core_ib = 2400000,
+ .min_llcc_ib = 0, /* No LLCC on this SoC */
+ .min_dram_ib = 800000,
+ .undersized_prefill_lines = 2,
+ .xtra_prefill_lines = 2,
+ .dest_scale_prefill_lines = 3,
+ .macrotile_prefill_lines = 4,
+ .yuv_nv12_prefill_lines = 8,
+ .linear_prefill_lines = 1,
+ .downscaling_prefill_lines = 1,
+ .amortizable_threshold = 25,
+ .min_prefill_lines = 14,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfffc, 0xff00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(msm8998_qos_linear),
+ .entries = msm8998_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(msm8998_qos_macrotile),
+ .entries = msm8998_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(msm8998_qos_nrt),
+ .entries = msm8998_qos_nrt
+ },
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version msm8937_mdss_ver = {
+ .core_major_ver = 1,
+ .core_minor_ver = 14,
+};
+
+const struct dpu_mdss_cfg dpu_msm8937_cfg = {
+ .mdss_ver = &msm8937_mdss_ver,
+ .caps = &msm8937_dpu_caps,
+ .mdp = msm8937_mdp,
+ .ctl_count = ARRAY_SIZE(msm8937_ctl),
+ .ctl = msm8937_ctl,
+ .sspp_count = ARRAY_SIZE(msm8937_sspp),
+ .sspp = msm8937_sspp,
+ .mixer_count = ARRAY_SIZE(msm8937_lm),
+ .mixer = msm8937_lm,
+ .dspp_count = ARRAY_SIZE(msm8937_dspp),
+ .dspp = msm8937_dspp,
+ .pingpong_count = ARRAY_SIZE(msm8937_pp),
+ .pingpong = msm8937_pp,
+ .intf_count = ARRAY_SIZE(msm8937_intf),
+ .intf = msm8937_intf,
+ .vbif_count = ARRAY_SIZE(msm8996_vbif),
+ .vbif = msm8996_vbif,
+ .perf = &msm8937_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
new file mode 100644
index 000000000000..6bdaecca6761
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
@@ -0,0 +1,187 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DPU_1_14_MSM8917_H
+#define _DPU_1_14_MSM8917_H
+
+static const struct dpu_caps msm8917_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_LINE_WIDTH,
+ .max_mixer_blendstages = 0x4,
+ .max_linewidth = DEFAULT_DPU_LINE_WIDTH,
+ .pixel_ram_size = 16 * 1024,
+ .max_hdeci_exp = MAX_HORZ_DECIMATION,
+ .max_vdeci_exp = MAX_VERT_DECIMATION,
+};
+
+static const struct dpu_mdp_cfg msm8917_mdp[] = {
+ {
+ .name = "top_0",
+ .base = 0x0, .len = 0x454,
+ .features = BIT(DPU_MDP_VSYNC_SEL),
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_RGB0] = { .reg_off = 0x2ac, .bit_off = 4 },
+ [DPU_CLK_CTRL_RGB1] = { .reg_off = 0x2b4, .bit_off = 4 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_CURSOR0] = { .reg_off = 0x3a8, .bit_off = 16 },
+ },
+ },
+};
+
+static const struct dpu_ctl_cfg msm8917_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x64,
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0x64,
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0x64,
+ },
+};
+
+static const struct dpu_sspp_cfg msm8917_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x150,
+ .features = VIG_MSM8953_MASK,
+ .sblk = &dpu_vig_sblk_qseed2,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_4", .id = SSPP_RGB0,
+ .base = 0x14000, .len = 0x150,
+ .features = RGB_MSM8953_MASK,
+ .sblk = &dpu_rgb_sblk,
+ .xin_id = 1,
+ .type = SSPP_TYPE_RGB,
+ .clk_ctrl = DPU_CLK_CTRL_RGB0,
+ }, {
+ .name = "sspp_5", .id = SSPP_RGB1,
+ .base = 0x16000, .len = 0x150,
+ .features = RGB_MSM8953_MASK,
+ .sblk = &dpu_rgb_sblk,
+ .xin_id = 5,
+ .type = SSPP_TYPE_RGB,
+ .clk_ctrl = DPU_CLK_CTRL_RGB1,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x150,
+ .features = DMA_MSM8953_MASK | BIT(DPU_SSPP_CURSOR),
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 2,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ },
+};
+
+static const struct dpu_lm_cfg msm8917_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .sblk = &msm8998_lm_sblk,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ },
+};
+
+static const struct dpu_pingpong_cfg msm8917_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_MSM8996_MASK,
+ .sblk = &msm8996_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
+ },
+};
+
+static const struct dpu_dspp_cfg msm8917_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &msm8998_dspp_sblk,
+ },
+};
+
+static const struct dpu_intf_cfg msm8917_intf[] = {
+ {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x268,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 14,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = -1,
+ },
+};
+
+static const struct dpu_perf_cfg msm8917_perf_data = {
+ .max_bw_low = 1800000,
+ .max_bw_high = 1800000,
+ .min_core_ib = 2400000,
+ .min_llcc_ib = 0, /* No LLCC on this SoC */
+ .min_dram_ib = 800000,
+ .undersized_prefill_lines = 2,
+ .xtra_prefill_lines = 2,
+ .dest_scale_prefill_lines = 3,
+ .macrotile_prefill_lines = 4,
+ .yuv_nv12_prefill_lines = 8,
+ .linear_prefill_lines = 1,
+ .downscaling_prefill_lines = 1,
+ .amortizable_threshold = 25,
+ .min_prefill_lines = 21,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfffc, 0xff00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(msm8998_qos_linear),
+ .entries = msm8998_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(msm8998_qos_macrotile),
+ .entries = msm8998_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(msm8998_qos_nrt),
+ .entries = msm8998_qos_nrt
+ },
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version msm8917_mdss_ver = {
+ .core_major_ver = 1,
+ .core_minor_ver = 15,
+};
+
+const struct dpu_mdss_cfg dpu_msm8917_cfg = {
+ .mdss_ver = &msm8917_mdss_ver,
+ .caps = &msm8917_dpu_caps,
+ .mdp = msm8917_mdp,
+ .ctl_count = ARRAY_SIZE(msm8917_ctl),
+ .ctl = msm8917_ctl,
+ .sspp_count = ARRAY_SIZE(msm8917_sspp),
+ .sspp = msm8917_sspp,
+ .mixer_count = ARRAY_SIZE(msm8917_lm),
+ .mixer = msm8917_lm,
+ .dspp_count = ARRAY_SIZE(msm8917_dspp),
+ .dspp = msm8917_dspp,
+ .pingpong_count = ARRAY_SIZE(msm8917_pp),
+ .pingpong = msm8917_pp,
+ .intf_count = ARRAY_SIZE(msm8917_intf),
+ .intf = msm8917_intf,
+ .vbif_count = ARRAY_SIZE(msm8996_vbif),
+ .vbif = msm8996_vbif,
+ .perf = &msm8917_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
new file mode 100644
index 000000000000..14f36ea6ad0e
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DPU_1_16_MSM8953_H
+#define _DPU_1_16_MSM8953_H
+
+static const struct dpu_caps msm8953_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_LINE_WIDTH,
+ .max_mixer_blendstages = 0x4,
+ .max_linewidth = DEFAULT_DPU_LINE_WIDTH,
+ .pixel_ram_size = 40 * 1024,
+ .max_hdeci_exp = MAX_HORZ_DECIMATION,
+ .max_vdeci_exp = MAX_VERT_DECIMATION,
+};
+
+static const struct dpu_mdp_cfg msm8953_mdp[] = {
+ {
+ .name = "top_0",
+ .base = 0x0, .len = 0x454,
+ .features = BIT(DPU_MDP_VSYNC_SEL),
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_RGB0] = { .reg_off = 0x2ac, .bit_off = 4 },
+ [DPU_CLK_CTRL_RGB1] = { .reg_off = 0x2b4, .bit_off = 4 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_CURSOR0] = { .reg_off = 0x3a8, .bit_off = 16 },
+ },
+ },
+};
+
+static const struct dpu_ctl_cfg msm8953_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x64,
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0x64,
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0x64,
+ },
+};
+
+static const struct dpu_sspp_cfg msm8953_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x150,
+ .features = VIG_MSM8953_MASK,
+ .sblk = &dpu_vig_sblk_qseed2,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_4", .id = SSPP_RGB0,
+ .base = 0x14000, .len = 0x150,
+ .features = RGB_MSM8953_MASK,
+ .sblk = &dpu_rgb_sblk,
+ .xin_id = 1,
+ .type = SSPP_TYPE_RGB,
+ .clk_ctrl = DPU_CLK_CTRL_RGB0,
+ }, {
+ .name = "sspp_5", .id = SSPP_RGB1,
+ .base = 0x16000, .len = 0x150,
+ .features = RGB_MSM8953_MASK,
+ .sblk = &dpu_rgb_sblk,
+ .xin_id = 5,
+ .type = SSPP_TYPE_RGB,
+ .clk_ctrl = DPU_CLK_CTRL_RGB1,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x150,
+ .features = DMA_MSM8953_MASK | BIT(DPU_SSPP_CURSOR),
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 2,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ },
+};
+
+static const struct dpu_lm_cfg msm8953_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .sblk = &msm8998_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .sblk = &msm8998_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ },
+};
+
+static const struct dpu_pingpong_cfg msm8953_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_MSM8996_MASK,
+ .sblk = &msm8996_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x70800, .len = 0xd4,
+ .features = PINGPONG_MSM8996_MASK,
+ .sblk = &msm8996_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
+ },
+};
+
+static const struct dpu_dspp_cfg msm8953_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &msm8998_dspp_sblk,
+ },
+};
+
+static const struct dpu_intf_cfg msm8953_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x6a000, .len = 0x268,
+ .type = INTF_NONE,
+ .prog_fetch_lines_worst_case = 14,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x268,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 14,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x6b000, .len = 0x268,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 14,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = -1,
+ },
+};
+
+static const struct dpu_perf_cfg msm8953_perf_data = {
+ .max_bw_low = 3400000,
+ .max_bw_high = 3400000,
+ .min_core_ib = 2400000,
+ .min_llcc_ib = 0, /* No LLCC on this SoC */
+ .min_dram_ib = 800000,
+ .undersized_prefill_lines = 2,
+ .xtra_prefill_lines = 2,
+ .dest_scale_prefill_lines = 3,
+ .macrotile_prefill_lines = 4,
+ .yuv_nv12_prefill_lines = 8,
+ .linear_prefill_lines = 1,
+ .downscaling_prefill_lines = 1,
+ .amortizable_threshold = 25,
+ .min_prefill_lines = 14,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfffc, 0xff00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(msm8998_qos_linear),
+ .entries = msm8998_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(msm8998_qos_macrotile),
+ .entries = msm8998_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(msm8998_qos_nrt),
+ .entries = msm8998_qos_nrt
+ },
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version msm8953_mdss_ver = {
+ .core_major_ver = 1,
+ .core_minor_ver = 16,
+};
+
+const struct dpu_mdss_cfg dpu_msm8953_cfg = {
+ .mdss_ver = &msm8953_mdss_ver,
+ .caps = &msm8953_dpu_caps,
+ .mdp = msm8953_mdp,
+ .ctl_count = ARRAY_SIZE(msm8953_ctl),
+ .ctl = msm8953_ctl,
+ .sspp_count = ARRAY_SIZE(msm8953_sspp),
+ .sspp = msm8953_sspp,
+ .mixer_count = ARRAY_SIZE(msm8953_lm),
+ .mixer = msm8953_lm,
+ .dspp_count = ARRAY_SIZE(msm8953_dspp),
+ .dspp = msm8953_dspp,
+ .pingpong_count = ARRAY_SIZE(msm8953_pp),
+ .pingpong = msm8953_pp,
+ .intf_count = ARRAY_SIZE(msm8953_intf),
+ .intf = msm8953_intf,
+ .vbif_count = ARRAY_SIZE(msm8996_vbif),
+ .vbif = msm8996_vbif,
+ .perf = &msm8953_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
new file mode 100644
index 000000000000..491f6f5827d1
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
@@ -0,0 +1,338 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_1_7_MSM8996_H
+#define _DPU_1_7_MSM8996_H
+
+static const struct dpu_caps msm8996_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0x7,
+ .has_src_split = true,
+ .max_linewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+ .max_hdeci_exp = MAX_HORZ_DECIMATION,
+ .max_vdeci_exp = MAX_VERT_DECIMATION,
+};
+
+static const struct dpu_mdp_cfg msm8996_mdp[] = {
+ {
+ .name = "top_0",
+ .base = 0x0, .len = 0x454,
+ .features = BIT(DPU_MDP_VSYNC_SEL),
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0 },
+ [DPU_CLK_CTRL_RGB0] = { .reg_off = 0x2ac, .bit_off = 4 },
+ [DPU_CLK_CTRL_RGB1] = { .reg_off = 0x2b4, .bit_off = 4 },
+ [DPU_CLK_CTRL_RGB2] = { .reg_off = 0x2bc, .bit_off = 4 },
+ [DPU_CLK_CTRL_RGB3] = { .reg_off = 0x2c4, .bit_off = 4 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_CURSOR0] = { .reg_off = 0x3a8, .bit_off = 16 },
+ [DPU_CLK_CTRL_CURSOR1] = { .reg_off = 0x3b0, .bit_off = 16 },
+ },
+ },
+};
+
+static const struct dpu_ctl_cfg msm8996_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x64,
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0x64,
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0x64,
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x1600, .len = 0x64,
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x1800, .len = 0x64,
+ },
+};
+
+static const struct dpu_sspp_cfg msm8996_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x150,
+ .features = VIG_MSM8996_MASK,
+ .sblk = &dpu_vig_sblk_qseed2,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x150,
+ .features = VIG_MSM8996_MASK,
+ .sblk = &dpu_vig_sblk_qseed2,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG1,
+ }, {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x150,
+ .features = VIG_MSM8996_MASK,
+ .sblk = &dpu_vig_sblk_qseed2,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG2,
+ }, {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x150,
+ .features = VIG_MSM8996_MASK,
+ .sblk = &dpu_vig_sblk_qseed2,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG3,
+ }, {
+ .name = "sspp_4", .id = SSPP_RGB0,
+ .base = 0x14000, .len = 0x150,
+ .features = RGB_MSM8996_MASK,
+ .sblk = &dpu_rgb_sblk,
+ .xin_id = 1,
+ .type = SSPP_TYPE_RGB,
+ .clk_ctrl = DPU_CLK_CTRL_RGB0,
+ }, {
+ .name = "sspp_5", .id = SSPP_RGB1,
+ .base = 0x16000, .len = 0x150,
+ .features = RGB_MSM8996_MASK,
+ .sblk = &dpu_rgb_sblk,
+ .xin_id = 5,
+ .type = SSPP_TYPE_RGB,
+ .clk_ctrl = DPU_CLK_CTRL_RGB1,
+ }, {
+ .name = "sspp_6", .id = SSPP_RGB2,
+ .base = 0x18000, .len = 0x150,
+ .features = RGB_MSM8996_MASK,
+ .sblk = &dpu_rgb_sblk,
+ .xin_id = 9,
+ .type = SSPP_TYPE_RGB,
+ .clk_ctrl = DPU_CLK_CTRL_RGB2,
+ }, {
+ .name = "sspp_7", .id = SSPP_RGB3,
+ .base = 0x1a000, .len = 0x150,
+ .features = RGB_MSM8996_MASK,
+ .sblk = &dpu_rgb_sblk,
+ .xin_id = 13,
+ .type = SSPP_TYPE_RGB,
+ .clk_ctrl = DPU_CLK_CTRL_RGB3,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x150,
+ .features = DMA_MSM8996_MASK,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 2,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x150,
+ .features = DMA_MSM8996_MASK,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 10,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ },
+};
+
+static const struct dpu_lm_cfg msm8996_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &msm8998_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &msm8998_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &msm8998_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_2,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &msm8998_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ },
+};
+
+static const struct dpu_pingpong_cfg msm8996_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_MSM8996_TE2_MASK,
+ .sblk = &msm8996_pp_sblk_te,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x70800, .len = 0xd4,
+ .features = PINGPONG_MSM8996_TE2_MASK,
+ .sblk = &msm8996_pp_sblk_te,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x71000, .len = 0xd4,
+ .features = PINGPONG_MSM8996_MASK,
+ .sblk = &msm8996_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14),
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x71800, .len = 0xd4,
+ .features = PINGPONG_MSM8996_MASK,
+ .sblk = &msm8996_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15),
+ },
+};
+
+static const struct dpu_dsc_cfg msm8996_dsc[] = {
+ {
+ .name = "dsc_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x140,
+ }, {
+ .name = "dsc_1", .id = DSC_1,
+ .base = 0x80400, .len = 0x140,
+ },
+};
+
+static const struct dpu_dspp_cfg msm8996_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &msm8998_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &msm8998_dspp_sblk,
+ },
+};
+
+static const struct dpu_intf_cfg msm8996_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x6a000, .len = 0x268,
+ .type = INTF_NONE,
+ .prog_fetch_lines_worst_case = 25,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x268,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 25,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x6b000, .len = 0x268,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 25,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x6b800, .len = 0x268,
+ .type = INTF_HDMI,
+ .prog_fetch_lines_worst_case = 25,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ .intr_tear_rd_ptr = -1,
+ },
+};
+
+static const struct dpu_perf_cfg msm8996_perf_data = {
+ .max_bw_low = 9600000,
+ .max_bw_high = 9600000,
+ .min_core_ib = 2400000,
+ .min_llcc_ib = 0, /* No LLCC on this SoC */
+ .min_dram_ib = 800000,
+ .undersized_prefill_lines = 2,
+ .xtra_prefill_lines = 2,
+ .dest_scale_prefill_lines = 3,
+ .macrotile_prefill_lines = 4,
+ .yuv_nv12_prefill_lines = 8,
+ .linear_prefill_lines = 1,
+ .downscaling_prefill_lines = 1,
+ .amortizable_threshold = 25,
+ .min_prefill_lines = 21,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfffc, 0xff00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(msm8998_qos_linear),
+ .entries = msm8998_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(msm8998_qos_macrotile),
+ .entries = msm8998_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(msm8998_qos_nrt),
+ .entries = msm8998_qos_nrt
+ },
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version msm8996_mdss_ver = {
+ .core_major_ver = 1,
+ .core_minor_ver = 7,
+};
+
+const struct dpu_mdss_cfg dpu_msm8996_cfg = {
+ .mdss_ver = &msm8996_mdss_ver,
+ .caps = &msm8996_dpu_caps,
+ .mdp = msm8996_mdp,
+ .ctl_count = ARRAY_SIZE(msm8996_ctl),
+ .ctl = msm8996_ctl,
+ .sspp_count = ARRAY_SIZE(msm8996_sspp),
+ .sspp = msm8996_sspp,
+ .mixer_count = ARRAY_SIZE(msm8996_lm),
+ .mixer = msm8996_lm,
+ .dspp_count = ARRAY_SIZE(msm8996_dspp),
+ .dspp = msm8996_dspp,
+ .pingpong_count = ARRAY_SIZE(msm8996_pp),
+ .pingpong = msm8996_pp,
+ .dsc_count = ARRAY_SIZE(msm8996_dsc),
+ .dsc = msm8996_dsc,
+ .intf_count = ARRAY_SIZE(msm8996_intf),
+ .intf = msm8996_intf,
+ .vbif_count = ARRAY_SIZE(msm8996_vbif),
+ .vbif = msm8996_vbif,
+ .perf = &msm8996_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
index 1d3e9666c741..64c94e919a69 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
@@ -157,18 +157,6 @@ static const struct dpu_lm_cfg msm8998_lm[] = {
.lm_pair = LM_5,
.pingpong = PINGPONG_2,
}, {
- .name = "lm_3", .id = LM_3,
- .base = 0x47000, .len = 0x320,
- .features = MIXER_MSM8998_MASK,
- .sblk = &msm8998_lm_sblk,
- .pingpong = PINGPONG_NONE,
- }, {
- .name = "lm_4", .id = LM_4,
- .base = 0x48000, .len = 0x320,
- .features = MIXER_MSM8998_MASK,
- .sblk = &msm8998_lm_sblk,
- .pingpong = PINGPONG_NONE,
- }, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
.features = MIXER_MSM8998_MASK,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
index 7a23389a5732..72bd4f7e9e50 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
@@ -156,25 +156,13 @@ static const struct dpu_lm_cfg sdm845_lm[] = {
.pingpong = PINGPONG_2,
.dspp = DSPP_2,
}, {
- .name = "lm_3", .id = LM_3,
- .base = 0x0, .len = 0x320,
- .features = MIXER_SDM845_MASK,
- .sblk = &sdm845_lm_sblk,
- .pingpong = PINGPONG_NONE,
- .dspp = DSPP_3,
- }, {
- .name = "lm_4", .id = LM_4,
- .base = 0x0, .len = 0x320,
- .features = MIXER_SDM845_MASK,
- .sblk = &sdm845_lm_sblk,
- .pingpong = PINGPONG_NONE,
- }, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
.features = MIXER_SDM845_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
new file mode 100644
index 000000000000..907b4d7ceb47
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
@@ -0,0 +1,485 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DPU_8_4_SA8775P_H
+#define _DPU_8_4_SA8775P_H
+
+static const struct dpu_caps sa8775p_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0xb,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = 5120,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
+static const struct dpu_mdp_cfg sa8775p_mdp = {
+ .name = "top_0",
+ .base = 0x0, .len = 0x494,
+ .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
+ [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
+ },
+};
+
+/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
+static const struct dpu_ctl_cfg sa8775p_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x15000, .len = 0x204,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x16000, .len = 0x204,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x17000, .len = 0x204,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x18000, .len = 0x204,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x19000, .len = 0x204,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ }, {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a000, .len = 0x204,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ },
+};
+
+static const struct dpu_sspp_cfg sa8775p_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x32c,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_1,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x32c,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_1,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG1,
+ }, {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x32c,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_1,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG2,
+ }, {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x32c,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_1,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG3,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x32c,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x32c,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x32c,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA2,
+ }, {
+ .name = "sspp_11", .id = SSPP_DMA3,
+ .base = 0x2a000, .len = 0x32c,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 13,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA3,
+ },
+};
+
+static const struct dpu_lm_cfg sa8775p_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x400,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x400,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x400,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_3,
+ .pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
+ }, {
+ .name = "lm_3", .id = LM_3,
+ .base = 0x47000, .len = 0x400,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
+ }, {
+ .name = "lm_4", .id = LM_4,
+ .base = 0x48000, .len = 0x400,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_4,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x400,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_4,
+ .pingpong = PINGPONG_5,
+ },
+};
+
+static const struct dpu_dspp_cfg sa8775p_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_2", .id = DSPP_2,
+ .base = 0x58000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_3", .id = DSPP_3,
+ .base = 0x5a000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
+};
+
+static const struct dpu_pingpong_cfg sa8775p_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x69000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x6a000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x6b000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x6c000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ }, {
+ .name = "pingpong_4", .id = PINGPONG_4,
+ .base = 0x6d000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ }, {
+ .name = "pingpong_5", .id = PINGPONG_5,
+ .base = 0x6e000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
+ }, {
+ .name = "pingpong_6", .id = PINGPONG_6,
+ .base = 0x65800, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ }, {
+ .name = "pingpong_7", .id = PINGPONG_7,
+ .base = 0x65c00, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ },
+};
+
+static const struct dpu_merge_3d_cfg sa8775p_merge_3d[] = {
+ {
+ .name = "merge_3d_0", .id = MERGE_3D_0,
+ .base = 0x4e000, .len = 0x8,
+ }, {
+ .name = "merge_3d_1", .id = MERGE_3D_1,
+ .base = 0x4f000, .len = 0x8,
+ }, {
+ .name = "merge_3d_2", .id = MERGE_3D_2,
+ .base = 0x50000, .len = 0x8,
+ }, {
+ .name = "merge_3d_3", .id = MERGE_3D_3,
+ .base = 0x65f00, .len = 0x8,
+ },
+};
+
+/*
+ * NOTE: Each display compression engine (DCE) contains dual hard
+ * slice DSC encoders so both share same base address but with
+ * its own different sub block address.
+ */
+static const struct dpu_dsc_cfg sa8775p_dsc[] = {
+ {
+ .name = "dce_0_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_0_1", .id = DSC_1,
+ .base = 0x80000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_1,
+ }, {
+ .name = "dce_1_0", .id = DSC_2,
+ .base = 0x81000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_1_1", .id = DSC_3,
+ .base = 0x81000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_1,
+ }, {
+ .name = "dce_2_0", .id = DSC_4,
+ .base = 0x82000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_2_1", .id = DSC_5,
+ .base = 0x82000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_1,
+ },
+};
+
+static const struct dpu_wb_cfg sa8775p_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SM8250_MASK,
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
+ .clk_ctrl = DPU_CLK_CTRL_WB2,
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 4096,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
+};
+
+/* TODO: INTF 3, 6, 7 and 8 are used for MST, marked as INTF_NONE for now */
+static const struct dpu_intf_cfg sa8775p_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x34000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x35000, .len = 0x300,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x36000, .len = 0x300,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2),
+ }, {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x37000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_NONE,
+ .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ }, {
+ .name = "intf_4", .id = INTF_4,
+ .base = 0x38000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 20),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 21),
+ }, {
+ .name = "intf_6", .id = INTF_6,
+ .base = 0x3A000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_NONE,
+ .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 17),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 16),
+ }, {
+ .name = "intf_7", .id = INTF_7,
+ .base = 0x3b000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_NONE,
+ .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 18),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 19),
+ }, {
+ .name = "intf_8", .id = INTF_8,
+ .base = 0x3c000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_NONE,
+ .controller_id = MSM_DP_CONTROLLER_1, /* pair with intf_4 for DP MST */
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
+ },
+};
+
+static const struct dpu_perf_cfg sa8775p_perf_data = {
+ .max_bw_low = 13600000,
+ .max_bw_high = 18200000,
+ .min_core_ib = 2500000,
+ .min_llcc_ib = 0,
+ .min_dram_ib = 800000,
+ .min_prefill_lines = 35,
+ /* FIXME: lut tables */
+ .danger_lut_tbl = {0x3ffff, 0x3ffff, 0x0},
+ .safe_lut_tbl = {0xfff0, 0xfff0, 0x1},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sm6350_qos_linear_macrotile),
+ .entries = sm6350_qos_linear_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sm6350_qos_linear_macrotile),
+ .entries = sm6350_qos_linear_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ /* TODO: macrotile-qseed is different from macrotile */
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version sa8775p_mdss_ver = {
+ .core_major_ver = 8,
+ .core_minor_ver = 4,
+};
+
+const struct dpu_mdss_cfg dpu_sa8775p_cfg = {
+ .mdss_ver = &sa8775p_mdss_ver,
+ .caps = &sa8775p_dpu_caps,
+ .mdp = &sa8775p_mdp,
+ .cdm = &sc7280_cdm,
+ .ctl_count = ARRAY_SIZE(sa8775p_ctl),
+ .ctl = sa8775p_ctl,
+ .sspp_count = ARRAY_SIZE(sa8775p_sspp),
+ .sspp = sa8775p_sspp,
+ .mixer_count = ARRAY_SIZE(sa8775p_lm),
+ .mixer = sa8775p_lm,
+ .dspp_count = ARRAY_SIZE(sa8775p_dspp),
+ .dspp = sa8775p_dspp,
+ .pingpong_count = ARRAY_SIZE(sa8775p_pp),
+ .pingpong = sa8775p_pp,
+ .dsc_count = ARRAY_SIZE(sa8775p_dsc),
+ .dsc = sa8775p_dsc,
+ .merge_3d_count = ARRAY_SIZE(sa8775p_merge_3d),
+ .merge_3d = sa8775p_merge_3d,
+ .wb_count = ARRAY_SIZE(sa8775p_wb),
+ .wb = sa8775p_wb,
+ .intf_count = ARRAY_SIZE(sa8775p_intf),
+ .intf = sa8775p_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .perf = &sa8775p_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
index 7c286bafb948..e7183cf05776 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
@@ -8,72 +8,26 @@
#include "dpu_kms.h"
#include "dpu_hw_interrupts.h"
-/**
- * dpu_core_irq_preinstall - perform pre-installation of core IRQ handler
- * @kms: MSM KMS handle
- * @return: none
- */
void dpu_core_irq_preinstall(struct msm_kms *kms);
-/**
- * dpu_core_irq_uninstall - uninstall core IRQ handler
- * @kms: MSM KMS handle
- * @return: none
- */
void dpu_core_irq_uninstall(struct msm_kms *kms);
-/**
- * dpu_core_irq - core IRQ handler
- * @kms: MSM KMS handle
- * @return: interrupt handling status
- */
irqreturn_t dpu_core_irq(struct msm_kms *kms);
-/**
- * dpu_core_irq_read - IRQ helper function for reading IRQ status
- * @dpu_kms: DPU handle
- * @irq_idx: irq index
- * @return: non-zero if irq detected; otherwise no irq detected
- */
u32 dpu_core_irq_read(
struct dpu_kms *dpu_kms,
unsigned int irq_idx);
-/**
- * dpu_core_irq_register_callback - For registering callback function on IRQ
- * interrupt
- * @dpu_kms: DPU handle
- * @irq_idx: irq index
- * @irq_cb: IRQ callback funcion.
- * @irq_arg: IRQ callback argument.
- * @return: 0 for success registering callback, otherwise failure
- *
- * This function supports registration of multiple callbacks for each interrupt.
- */
int dpu_core_irq_register_callback(
struct dpu_kms *dpu_kms,
unsigned int irq_idx,
void (*irq_cb)(void *arg),
void *irq_arg);
-/**
- * dpu_core_irq_unregister_callback - For unregistering callback function on IRQ
- * interrupt
- * @dpu_kms: DPU handle
- * @irq_idx: irq index
- * @return: 0 for success registering callback, otherwise failure
- *
- * This function supports registration of multiple callbacks for each interrupt.
- */
int dpu_core_irq_unregister_callback(
struct dpu_kms *dpu_kms,
unsigned int irq_idx);
-/**
- * dpu_debugfs_core_irq_init - register core irq debugfs
- * @dpu_kms: pointer to kms
- * @parent: debugfs directory root
- */
void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
struct dentry *parent);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
index 68fae048a9a8..6f0a37f954fe 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -80,7 +80,7 @@ static u64 _dpu_core_perf_calc_clk(const struct dpu_perf_cfg *perf_cfg,
mode = &state->adjusted_mode;
- crtc_clk = mode->vtotal * mode->hdisplay * drm_mode_vrefresh(mode);
+ crtc_clk = (u64)mode->vtotal * mode->hdisplay * drm_mode_vrefresh(mode);
drm_atomic_crtc_for_each_plane(plane, crtc) {
pstate = to_dpu_plane_state(plane->state);
@@ -140,6 +140,12 @@ static void _dpu_core_perf_calc_crtc(const struct dpu_core_perf *core_perf,
perf->max_per_pipe_ib, perf->bw_ctl);
}
+/**
+ * dpu_core_perf_crtc_check - validate performance of the given crtc state
+ * @crtc: Pointer to crtc
+ * @state: Pointer to new crtc state
+ * return: zero if success, or error code otherwise
+ */
int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
@@ -301,6 +307,12 @@ static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms)
return clk_rate;
}
+/**
+ * dpu_core_perf_crtc_update - update performance of the given crtc
+ * @crtc: Pointer to crtc
+ * @params_changed: true if crtc parameters are modified
+ * return: zero if success, or error code otherwise
+ */
int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
int params_changed)
{
@@ -446,6 +458,11 @@ static const struct file_operations dpu_core_perf_mode_fops = {
.write = _dpu_core_perf_mode_write,
};
+/**
+ * dpu_core_perf_debugfs_init - initialize debugfs for core performance context
+ * @dpu_kms: Pointer to the dpu_kms struct
+ * @parent: Pointer to parent debugfs
+ */
int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
{
struct dpu_core_perf *perf = &dpu_kms->perf;
@@ -482,6 +499,12 @@ int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
}
#endif
+/**
+ * dpu_core_perf_init - initialize the given core performance context
+ * @perf: Pointer to core performance context
+ * @perf_cfg: Pointer to platform performance configuration
+ * @max_core_clk_rate: Maximum core clock rate
+ */
int dpu_core_perf_init(struct dpu_core_perf *perf,
const struct dpu_perf_cfg *perf_cfg,
unsigned long max_core_clk_rate)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
index 4186977390bd..451bf8021114 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
@@ -54,47 +54,20 @@ struct dpu_core_perf {
u64 fix_core_ab_vote;
};
-/**
- * dpu_core_perf_crtc_check - validate performance of the given crtc state
- * @crtc: Pointer to crtc
- * @state: Pointer to new crtc state
- * return: zero if success, or error code otherwise
- */
int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
struct drm_crtc_state *state);
-/**
- * dpu_core_perf_crtc_update - update performance of the given crtc
- * @crtc: Pointer to crtc
- * @params_changed: true if crtc parameters are modified
- * return: zero if success, or error code otherwise
- */
int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
int params_changed);
-/**
- * dpu_core_perf_crtc_release_bw - release bandwidth of the given crtc
- * @crtc: Pointer to crtc
- */
void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc);
-/**
- * dpu_core_perf_init - initialize the given core performance context
- * @perf: Pointer to core performance context
- * @perf_cfg: Pointer to platform performance configuration
- * @max_core_clk_rate: Maximum core clock rate
- */
int dpu_core_perf_init(struct dpu_core_perf *perf,
const struct dpu_perf_cfg *perf_cfg,
unsigned long max_core_clk_rate);
struct dpu_kms;
-/**
- * dpu_core_perf_debugfs_init - initialize debugfs for core performance context
- * @dpu_kms: Pointer to the dpu_kms struct
- * @debugfs_parent: Pointer to parent debugfs
- */
int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent);
#endif /* _DPU_CORE_PERF_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index db6c57900781..9f6ffd344693 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -572,6 +572,10 @@ static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
spin_unlock_irqrestore(&dev->event_lock, flags);
}
+/**
+ * dpu_crtc_get_intf_mode - get interface mode of the given crtc
+ * @crtc: Pointert to crtc
+ */
enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
{
struct drm_encoder *encoder;
@@ -594,6 +598,10 @@ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
return INTF_MODE_NONE;
}
+/**
+ * dpu_crtc_vblank_callback - called on vblank irq, issues completion events
+ * @crtc: Pointer to drm crtc object
+ */
void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
{
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
@@ -704,6 +712,10 @@ void dpu_crtc_frame_event_cb(struct drm_crtc *crtc, u32 event)
kthread_queue_work(priv->event_thread[crtc_id].worker, &fevent->work);
}
+/**
+ * dpu_crtc_complete_commit - callback signalling completion of current commit
+ * @crtc: Pointer to drm crtc object
+ */
void dpu_crtc_complete_commit(struct drm_crtc *crtc)
{
trace_dpu_crtc_complete_commit(DRMID(crtc));
@@ -934,6 +946,10 @@ static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
return rc;
}
+/**
+ * dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
+ * @crtc: Pointer to drm crtc object
+ */
void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
{
struct drm_encoder *encoder;
@@ -1230,6 +1246,24 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
return 0;
}
+static enum drm_mode_status dpu_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
+
+ /*
+ * max crtc width is equal to the max mixer width * 2 and max height is 4K
+ */
+ return drm_mode_validate_size(mode,
+ 2 * dpu_kms->catalog->caps->max_mixer_width,
+ 4096);
+}
+
+/**
+ * dpu_crtc_vblank - enable or disable vblanks for this crtc
+ * @crtc: Pointer to drm crtc object
+ * @en: true to enable vblanks, false to disable
+ */
int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
{
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
@@ -1445,10 +1479,19 @@ static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
.atomic_check = dpu_crtc_atomic_check,
.atomic_begin = dpu_crtc_atomic_begin,
.atomic_flush = dpu_crtc_atomic_flush,
+ .mode_valid = dpu_crtc_mode_valid,
.get_scanout_position = dpu_crtc_get_scanout_position,
};
-/* initialize crtc */
+/**
+ * dpu_crtc_init - create a new crtc object
+ * @dev: dpu device
+ * @plane: base plane
+ * @cursor: cursor plane
+ * @return: new crtc object or error
+ *
+ * initialize CRTC
+ */
struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
struct drm_plane *cursor)
{
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
index febc3e764a63..0b148f3ce0d7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -239,55 +239,17 @@ static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc)
return crtc ? atomic_read(&to_dpu_crtc(crtc)->frame_pending) : -EINVAL;
}
-/**
- * dpu_crtc_vblank - enable or disable vblanks for this crtc
- * @crtc: Pointer to drm crtc object
- * @en: true to enable vblanks, false to disable
- */
int dpu_crtc_vblank(struct drm_crtc *crtc, bool en);
-/**
- * dpu_crtc_vblank_callback - called on vblank irq, issues completion events
- * @crtc: Pointer to drm crtc object
- */
void dpu_crtc_vblank_callback(struct drm_crtc *crtc);
-/**
- * dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
- * @crtc: Pointer to drm crtc object
- */
void dpu_crtc_commit_kickoff(struct drm_crtc *crtc);
-/**
- * dpu_crtc_complete_commit - callback signalling completion of current commit
- * @crtc: Pointer to drm crtc object
- */
void dpu_crtc_complete_commit(struct drm_crtc *crtc);
-/**
- * dpu_crtc_init - create a new crtc object
- * @dev: dpu device
- * @plane: base plane
- * @cursor: cursor plane
- * @Return: new crtc object or error
- */
struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
struct drm_plane *cursor);
-/**
- * dpu_crtc_register_custom_event - api for enabling/disabling crtc event
- * @kms: Pointer to dpu_kms
- * @crtc_drm: Pointer to crtc object
- * @event: Event that client is interested
- * @en: Flag to enable/disable the event
- */
-int dpu_crtc_register_custom_event(struct dpu_kms *kms,
- struct drm_crtc *crtc_drm, u32 event, bool en);
-
-/**
- * dpu_crtc_get_intf_mode - get interface mode of the given crtc
- * @crtc: Pointert to crtc
- */
enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc);
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index bd3698bf0cf7..83de7564e2c1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -217,6 +217,10 @@ static u32 dither_matrix[DITHER_MATRIX_SZ] = {
15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10
};
+/**
+ * dpu_encoder_get_drm_fmt - return DRM fourcc format
+ * @phys_enc: Pointer to physical encoder structure
+ */
u32 dpu_encoder_get_drm_fmt(struct dpu_encoder_phys *phys_enc)
{
struct drm_encoder *drm_enc;
@@ -235,6 +239,11 @@ u32 dpu_encoder_get_drm_fmt(struct dpu_encoder_phys *phys_enc)
return DRM_FORMAT_RGB888;
}
+/**
+ * dpu_encoder_needs_periph_flush - return true if physical encoder requires
+ * peripheral flush
+ * @phys_enc: Pointer to physical encoder structure
+ */
bool dpu_encoder_needs_periph_flush(struct dpu_encoder_phys *phys_enc)
{
struct drm_encoder *drm_enc;
@@ -253,6 +262,10 @@ bool dpu_encoder_needs_periph_flush(struct dpu_encoder_phys *phys_enc)
msm_dp_needs_periph_flush(priv->dp[disp_info->h_tile_instance[0]], mode);
}
+/**
+ * dpu_encoder_is_widebus_enabled - return bool value if widebus is enabled
+ * @drm_enc: Pointer to previously created drm encoder structure
+ */
bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc)
{
const struct dpu_encoder_virt *dpu_enc;
@@ -272,6 +285,11 @@ bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc)
return false;
}
+/**
+ * dpu_encoder_is_dsc_enabled - indicate whether dsc is enabled
+ * for the encoder.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ */
bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc)
{
const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
@@ -279,6 +297,12 @@ bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc)
return dpu_enc->dsc ? true : false;
}
+/**
+ * dpu_encoder_get_crc_values_cnt - get number of physical encoders contained
+ * in virtual encoder that can collect CRC values
+ * @drm_enc: Pointer to previously created drm encoder structure
+ * Returns: Number of physical encoders for given drm encoder
+ */
int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
@@ -297,6 +321,10 @@ int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc)
return num_intf;
}
+/**
+ * dpu_encoder_setup_misr - enable misr calculations
+ * @drm_enc: Pointer to previously created drm encoder structure
+ */
void dpu_encoder_setup_misr(const struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
@@ -315,6 +343,13 @@ void dpu_encoder_setup_misr(const struct drm_encoder *drm_enc)
}
}
+/**
+ * dpu_encoder_get_crc - get the crc value from interface blocks
+ * @drm_enc: Pointer to previously created drm encoder structure
+ * @crcs: array to fill with CRC data
+ * @pos: offset into the @crcs array
+ * Returns: 0 on success, error otherwise
+ */
int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos)
{
struct dpu_encoder_virt *dpu_enc;
@@ -385,6 +420,12 @@ static char *dpu_encoder_helper_get_intf_type(enum dpu_intf_mode intf_mode)
}
}
+/**
+ * dpu_encoder_helper_report_irq_timeout - utility to report error that irq has
+ * timed out, including reporting frame error event to crtc and debug dump
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: Failing interrupt index
+ */
void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
enum dpu_intr_idx intr_idx)
{
@@ -402,6 +443,15 @@ void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
u32 irq_idx, struct dpu_encoder_wait_info *info);
+/**
+ * dpu_encoder_helper_wait_for_irq - utility to wait on an irq.
+ * note: will call dpu_encoder_helper_wait_for_irq on timeout
+ * @phys_enc: Pointer to physical encoder structure
+ * @irq_idx: IRQ index
+ * @func: IRQ callback to be called in case of timeout
+ * @wait_info: wait info struct
+ * @return: 0 or -ERROR
+ */
int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
unsigned int irq_idx,
void (*func)(void *arg),
@@ -473,6 +523,10 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
return ret;
}
+/**
+ * dpu_encoder_get_vsync_count - get vsync count for the encoder.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ */
int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
@@ -480,6 +534,10 @@ int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc)
return phys ? atomic_read(&phys->vsync_cnt) : 0;
}
+/**
+ * dpu_encoder_get_linecount - get interface line count for the encoder.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ */
int dpu_encoder_get_linecount(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
@@ -495,6 +553,13 @@ int dpu_encoder_get_linecount(struct drm_encoder *drm_enc)
return linecount;
}
+/**
+ * dpu_encoder_helper_split_config - split display configuration helper function
+ * This helper function may be used by physical encoders to configure
+ * the split display related registers.
+ * @phys_enc: Pointer to physical encoder structure
+ * @interface: enum dpu_intf setting
+ */
void dpu_encoder_helper_split_config(
struct dpu_encoder_phys *phys_enc,
enum dpu_intf interface)
@@ -544,6 +609,10 @@ void dpu_encoder_helper_split_config(
}
}
+/**
+ * dpu_encoder_use_dsc_merge - returns true if the encoder uses DSC merge topology.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ */
bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
@@ -560,6 +629,12 @@ bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc)
return (num_dsc > 0) && (num_dsc > intf_count);
}
+/**
+ * dpu_encoder_get_dsc_config - get DSC config for the DPU encoder
+ * This helper function is used by physical encoder to get DSC config
+ * used for this encoder.
+ * @drm_enc: Pointer to encoder structure
+ */
struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc)
{
struct msm_drm_private *priv = drm_enc->dev->dev_private;
@@ -1089,6 +1164,11 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
return 0;
}
+/**
+ * dpu_encoder_prepare_wb_job - prepare writeback job for the encoder.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ * @job: Pointer to the current drm writeback job
+ */
void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc,
struct drm_writeback_job *job)
{
@@ -1106,6 +1186,11 @@ void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc,
}
}
+/**
+ * dpu_encoder_cleanup_wb_job - cleanup writeback job for the encoder.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ * @job: Pointer to the current drm writeback job
+ */
void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc,
struct drm_writeback_job *job)
{
@@ -1248,6 +1333,10 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
}
}
+/**
+ * dpu_encoder_virt_runtime_resume - pm runtime resume the encoder configs
+ * @drm_enc: encoder pointer
+ */
void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
@@ -1389,6 +1478,12 @@ static struct dpu_hw_intf *dpu_encoder_get_intf(const struct dpu_mdss_cfg *catal
return NULL;
}
+/**
+ * dpu_encoder_vblank_callback - Notify virtual encoder of vblank IRQ reception
+ * @drm_enc: Pointer to drm encoder structure
+ * @phy_enc: Pointer to physical encoder
+ * Note: This is called from IRQ handler context.
+ */
void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
struct dpu_encoder_phys *phy_enc)
{
@@ -1411,6 +1506,12 @@ void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
DPU_ATRACE_END("encoder_vblank_callback");
}
+/**
+ * dpu_encoder_underrun_callback - Notify virtual encoder of underrun IRQ reception
+ * @drm_enc: Pointer to drm encoder structure
+ * @phy_enc: Pointer to physical encoder
+ * Note: This is called from IRQ handler context.
+ */
void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
struct dpu_encoder_phys *phy_enc)
{
@@ -1429,6 +1530,11 @@ void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
DPU_ATRACE_END("encoder_underrun_callback");
}
+/**
+ * dpu_encoder_assign_crtc - Link the encoder to the crtc it's assigned to
+ * @drm_enc: encoder pointer
+ * @crtc: crtc pointer
+ */
void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc)
{
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
@@ -1441,6 +1547,13 @@ void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc)
spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
}
+/**
+ * dpu_encoder_toggle_vblank_for_crtc - Toggles vblank interrupts on or off if
+ * the encoder is assigned to the given crtc
+ * @drm_enc: encoder pointer
+ * @crtc: crtc pointer
+ * @enable: true if vblank should be enabled
+ */
void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc,
struct drm_crtc *crtc, bool enable)
{
@@ -1465,6 +1578,13 @@ void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc,
}
}
+/**
+ * dpu_encoder_frame_done_callback - Notify virtual encoder that this phys
+ * encoder completes last request frame
+ * @drm_enc: Pointer to drm encoder structure
+ * @ready_phys: Pointer to physical encoder
+ * @event: Event to process
+ */
void dpu_encoder_frame_done_callback(
struct drm_encoder *drm_enc,
struct dpu_encoder_phys *ready_phys, u32 event)
@@ -1587,6 +1707,12 @@ static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
phys->ops.trigger_start(phys);
}
+/**
+ * dpu_encoder_helper_trigger_start - control start helper function
+ * This helper function may be optionally specified by physical
+ * encoders if they require ctl_start triggering.
+ * @phys_enc: Pointer to physical encoder structure
+ */
void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_ctl *ctl;
@@ -1708,6 +1834,11 @@ static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
}
+/**
+ * dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous
+ * kickoff and trigger the ctl prepare progress for command mode display.
+ * @drm_enc: encoder pointer
+ */
void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
@@ -1784,6 +1915,11 @@ static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc,
return line_time;
}
+/**
+ * dpu_encoder_vsync_time - get the time of the next vsync
+ * @drm_enc: encoder pointer
+ * @wakeup_time: pointer to ktime_t to write the vsync time to
+ */
int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time)
{
struct drm_display_mode *mode;
@@ -1930,6 +2066,13 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
dsc, dsc_common_mode, initial_lines);
}
+/**
+ * dpu_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl
+ * path (i.e. ctl flush and start) at next appropriate time.
+ * Immediately: if no previous commit is outstanding.
+ * Delayed: Block until next trigger can be issued.
+ * @drm_enc: encoder pointer
+ */
void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
@@ -1966,6 +2109,10 @@ void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
dpu_encoder_prep_dsc(dpu_enc, dpu_enc->dsc);
}
+/**
+ * dpu_encoder_is_valid_for_commit - check if encode has valid parameters for commit.
+ * @drm_enc: Pointer to drm encoder structure
+ */
bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
@@ -1987,6 +2134,11 @@ bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc)
return true;
}
+/**
+ * dpu_encoder_kickoff - trigger a double buffer flip of the ctl path
+ * (i.e. ctl flush and start) immediately.
+ * @drm_enc: encoder pointer
+ */
void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
@@ -2085,6 +2237,10 @@ static void dpu_encoder_unprep_dsc(struct dpu_encoder_virt *dpu_enc)
}
}
+/**
+ * dpu_encoder_helper_phys_cleanup - helper to cleanup dpu pipeline
+ * @phys_enc: Pointer to physical encoder structure
+ */
void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
@@ -2168,6 +2324,12 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
ctl->ops.clear_pending_flush(ctl);
}
+/**
+ * dpu_encoder_helper_phys_setup_cdm - setup chroma down sampling block
+ * @phys_enc: Pointer to physical encoder
+ * @dpu_fmt: Pinter to the format description
+ * @output_type: HDMI/WB
+ */
void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc,
const struct msm_format *dpu_fmt,
u32 output_type)
@@ -2472,6 +2634,13 @@ static const struct drm_encoder_funcs dpu_encoder_funcs = {
.debugfs_init = dpu_encoder_debugfs_init,
};
+/**
+ * dpu_encoder_init - initialize virtual encoder object
+ * @dev: Pointer to drm device structure
+ * @drm_enc_mode: corresponding DRM_MODE_ENCODER_* constant
+ * @disp_info: Pointer to display information structure
+ * Returns: Pointer to newly created drm encoder
+ */
struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
int drm_enc_mode,
struct msm_display_info *disp_info)
@@ -2593,6 +2762,10 @@ int dpu_encoder_wait_for_tx_complete(struct drm_encoder *drm_enc)
return ret;
}
+/**
+ * dpu_encoder_get_intf_mode - get interface mode of the given encoder
+ * @encoder: Pointer to drm encoder object
+ */
enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
{
struct dpu_encoder_virt *dpu_enc = NULL;
@@ -2612,6 +2785,12 @@ enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
return INTF_MODE_NONE;
}
+/**
+ * dpu_encoder_helper_get_dsc - get DSC blocks mask for the DPU encoder
+ * This helper function is used by physical encoder to get DSC blocks mask
+ * used for this encoder.
+ * @phys_enc: Pointer to physical encoder structure
+ */
unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc)
{
struct drm_encoder *encoder = phys_enc->parent;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
index f7465a1774aa..92b5ee390788 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -19,6 +19,8 @@
#define IDLE_TIMEOUT (66 - 16/2)
+#define MAX_H_TILES_PER_DISPLAY 2
+
/**
* struct msm_display_info - defines display properties
* @intf_type: INTF_ type
@@ -36,159 +38,54 @@ struct msm_display_info {
enum dpu_vsync_source vsync_source;
};
-/**
- * dpu_encoder_assign_crtc - Link the encoder to the crtc it's assigned to
- * @encoder: encoder pointer
- * @crtc: crtc pointer
- */
void dpu_encoder_assign_crtc(struct drm_encoder *encoder,
struct drm_crtc *crtc);
-/**
- * dpu_encoder_toggle_vblank_for_crtc - Toggles vblank interrupts on or off if
- * the encoder is assigned to the given crtc
- * @encoder: encoder pointer
- * @crtc: crtc pointer
- * @enable: true if vblank should be enabled
- */
void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *encoder,
struct drm_crtc *crtc, bool enable);
-/**
- * dpu_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl
- * path (i.e. ctl flush and start) at next appropriate time.
- * Immediately: if no previous commit is outstanding.
- * Delayed: Block until next trigger can be issued.
- * @encoder: encoder pointer
- */
void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder);
-/**
- * dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous
- * kickoff and trigger the ctl prepare progress for command mode display.
- * @encoder: encoder pointer
- */
void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *encoder);
-/**
- * dpu_encoder_kickoff - trigger a double buffer flip of the ctl path
- * (i.e. ctl flush and start) immediately.
- * @encoder: encoder pointer
- */
void dpu_encoder_kickoff(struct drm_encoder *encoder);
-/**
- * dpu_encoder_wakeup_time - get the time of the next vsync
- */
int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time);
int dpu_encoder_wait_for_commit_done(struct drm_encoder *drm_encoder);
int dpu_encoder_wait_for_tx_complete(struct drm_encoder *drm_encoder);
-/*
- * dpu_encoder_get_intf_mode - get interface mode of the given encoder
- * @encoder: Pointer to drm encoder object
- */
enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder);
-/**
- * dpu_encoder_virt_runtime_resume - pm runtime resume the encoder configs
- * @encoder: encoder pointer
- */
void dpu_encoder_virt_runtime_resume(struct drm_encoder *encoder);
-/**
- * dpu_encoder_init - initialize virtual encoder object
- * @dev: Pointer to drm device structure
- * @drm_enc_mode: corresponding DRM_MODE_ENCODER_* constant
- * @disp_info: Pointer to display information structure
- * Returns: Pointer to newly created drm encoder
- */
struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
int drm_enc_mode,
struct msm_display_info *disp_info);
-/**
- * dpu_encoder_set_idle_timeout - set the idle timeout for video
- * and command mode encoders.
- * @drm_enc: Pointer to previously created drm encoder structure
- * @idle_timeout: idle timeout duration in milliseconds
- */
-void dpu_encoder_set_idle_timeout(struct drm_encoder *drm_enc,
- u32 idle_timeout);
-/**
- * dpu_encoder_get_linecount - get interface line count for the encoder.
- * @drm_enc: Pointer to previously created drm encoder structure
- */
int dpu_encoder_get_linecount(struct drm_encoder *drm_enc);
-/**
- * dpu_encoder_get_vsync_count - get vsync count for the encoder.
- * @drm_enc: Pointer to previously created drm encoder structure
- */
int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc);
-/**
- * dpu_encoder_is_widebus_enabled - return bool value if widebus is enabled
- * @drm_enc: Pointer to previously created drm encoder structure
- */
bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc);
-/**
- * dpu_encoder_is_dsc_enabled - indicate whether dsc is enabled
- * for the encoder.
- * @drm_enc: Pointer to previously created drm encoder structure
- */
bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc);
-/**
- * dpu_encoder_get_crc_values_cnt - get number of physical encoders contained
- * in virtual encoder that can collect CRC values
- * @drm_enc: Pointer to previously created drm encoder structure
- * Returns: Number of physical encoders for given drm encoder
- */
int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc);
-/**
- * dpu_encoder_setup_misr - enable misr calculations
- * @drm_enc: Pointer to previously created drm encoder structure
- */
void dpu_encoder_setup_misr(const struct drm_encoder *drm_encoder);
-/**
- * dpu_encoder_get_crc - get the crc value from interface blocks
- * @drm_enc: Pointer to previously created drm encoder structure
- * Returns: 0 on success, error otherwise
- */
int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos);
-/**
- * dpu_encoder_use_dsc_merge - returns true if the encoder uses DSC merge topology.
- * @drm_enc: Pointer to previously created drm encoder structure
- */
bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc);
-/**
- * dpu_encoder_prepare_wb_job - prepare writeback job for the encoder.
- * @drm_enc: Pointer to previously created drm encoder structure
- * @job: Pointer to the current drm writeback job
- */
void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc,
struct drm_writeback_job *job);
-/**
- * dpu_encoder_cleanup_wb_job - cleanup writeback job for the encoder.
- * @drm_enc: Pointer to previously created drm encoder structure
- * @job: Pointer to the current drm writeback job
- */
void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc,
struct drm_writeback_job *job);
-/**
- * dpu_encoder_is_valid_for_commit - check if encode has valid parameters for commit.
- * @drm_enc: Pointer to drm encoder structure
- */
bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc);
#endif /* __DPU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
index e77ebe3a68da..63f09857025c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -279,37 +279,15 @@ struct dpu_encoder_wait_info {
s64 timeout_ms;
};
-/**
- * dpu_encoder_phys_vid_init - Construct a new video mode physical encoder
- * @p: Pointer to init params structure
- * Return: Error code or newly allocated encoder
- */
struct dpu_encoder_phys *dpu_encoder_phys_vid_init(struct drm_device *dev,
struct dpu_enc_phys_init_params *p);
-/**
- * dpu_encoder_phys_cmd_init - Construct a new command mode physical encoder
- * @dev: Corresponding device for devres management
- * @p: Pointer to init params structure
- * Return: Error code or newly allocated encoder
- */
struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(struct drm_device *dev,
struct dpu_enc_phys_init_params *p);
-/**
- * dpu_encoder_phys_wb_init - initialize writeback encoder
- * @dev: Corresponding device for devres management
- * @init: Pointer to init info structure with initialization params
- */
struct dpu_encoder_phys *dpu_encoder_phys_wb_init(struct drm_device *dev,
struct dpu_enc_phys_init_params *p);
-/**
- * dpu_encoder_helper_trigger_start - control start helper function
- * This helper function may be optionally specified by physical
- * encoders if they require ctl_start triggering.
- * @phys_enc: Pointer to physical encoder structure
- */
void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc);
static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
@@ -331,106 +309,38 @@ static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
return BLEND_3D_NONE;
}
-/**
- * dpu_encoder_helper_get_dsc - get DSC blocks mask for the DPU encoder
- * This helper function is used by physical encoder to get DSC blocks mask
- * used for this encoder.
- * @phys_enc: Pointer to physical encoder structure
- */
unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc);
-/**
- * dpu_encoder_get_dsc_config - get DSC config for the DPU encoder
- * This helper function is used by physical encoder to get DSC config
- * used for this encoder.
- * @drm_enc: Pointer to encoder structure
- */
struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc);
-/**
- * dpu_encoder_get_drm_fmt - return DRM fourcc format
- * @phys_enc: Pointer to physical encoder structure
- */
u32 dpu_encoder_get_drm_fmt(struct dpu_encoder_phys *phys_enc);
-/**
- * dpu_encoder_needs_periph_flush - return true if physical encoder requires
- * peripheral flush
- * @phys_enc: Pointer to physical encoder structure
- */
bool dpu_encoder_needs_periph_flush(struct dpu_encoder_phys *phys_enc);
-/**
- * dpu_encoder_helper_split_config - split display configuration helper function
- * This helper function may be used by physical encoders to configure
- * the split display related registers.
- * @phys_enc: Pointer to physical encoder structure
- * @interface: enum dpu_intf setting
- */
void dpu_encoder_helper_split_config(
struct dpu_encoder_phys *phys_enc,
enum dpu_intf interface);
-/**
- * dpu_encoder_helper_report_irq_timeout - utility to report error that irq has
- * timed out, including reporting frame error event to crtc and debug dump
- * @phys_enc: Pointer to physical encoder structure
- * @intr_idx: Failing interrupt index
- */
void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
enum dpu_intr_idx intr_idx);
-/**
- * dpu_encoder_helper_wait_for_irq - utility to wait on an irq.
- * note: will call dpu_encoder_helper_wait_for_irq on timeout
- * @phys_enc: Pointer to physical encoder structure
- * @irq: IRQ index
- * @func: IRQ callback to be called in case of timeout
- * @wait_info: wait info struct
- * @Return: 0 or -ERROR
- */
int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
unsigned int irq,
void (*func)(void *arg),
struct dpu_encoder_wait_info *wait_info);
-/**
- * dpu_encoder_helper_phys_cleanup - helper to cleanup dpu pipeline
- * @phys_enc: Pointer to physical encoder structure
- */
void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc);
-/**
- * dpu_encoder_helper_phys_setup_cdm - setup chroma down sampling block
- * @phys_enc: Pointer to physical encoder
- * @output_type: HDMI/WB
- */
void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc,
const struct msm_format *dpu_fmt,
u32 output_type);
-/**
- * dpu_encoder_vblank_callback - Notify virtual encoder of vblank IRQ reception
- * @drm_enc: Pointer to drm encoder structure
- * @phys_enc: Pointer to physical encoder
- * Note: This is called from IRQ handler context.
- */
void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
struct dpu_encoder_phys *phy_enc);
-/** dpu_encoder_underrun_callback - Notify virtual encoder of underrun IRQ reception
- * @drm_enc: Pointer to drm encoder structure
- * @phys_enc: Pointer to physical encoder
- * Note: This is called from IRQ handler context.
- */
void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
struct dpu_encoder_phys *phy_enc);
-/** dpu_encoder_frame_done_callback -- Notify virtual encoder that this phys encoder completes last request frame
- * @drm_enc: Pointer to drm encoder structure
- * @phys_enc: Pointer to physical encoder
- * @event: Event to process
- */
void dpu_encoder_frame_done_callback(
struct drm_encoder *drm_enc,
struct dpu_encoder_phys *ready_phys, u32 event);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index 6fc31d47cd1d..e9bbccc44dad 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -720,6 +720,12 @@ static void dpu_encoder_phys_cmd_init_ops(
ops->get_line_count = dpu_encoder_phys_cmd_get_line_count;
}
+/**
+ * dpu_encoder_phys_cmd_init - Construct a new command mode physical encoder
+ * @dev: Corresponding device for devres management
+ * @p: Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(struct drm_device *dev,
struct dpu_enc_phys_init_params *p)
{
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index d8a2edebfe8c..abd6600046cb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -746,6 +746,12 @@ static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
ops->get_frame_count = dpu_encoder_phys_vid_get_frame_count;
}
+/**
+ * dpu_encoder_phys_vid_init - Construct a new video mode physical encoder
+ * @dev: Corresponding device for devres management
+ * @p: Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
struct dpu_encoder_phys *dpu_encoder_phys_vid_init(struct drm_device *dev,
struct dpu_enc_phys_init_params *p)
{
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
index 07035ab77b79..4c006ec74575 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
@@ -166,10 +166,10 @@ static void dpu_encoder_phys_wb_set_qos(struct dpu_encoder_phys *phys_enc)
/**
* dpu_encoder_phys_wb_setup_fb - setup output framebuffer
* @phys_enc: Pointer to physical encoder
- * @fb: Pointer to output framebuffer
+ * @format: Format of the framebuffer
*/
static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc,
- struct drm_framebuffer *fb)
+ const struct msm_format *format)
{
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
struct dpu_hw_wb *hw_wb;
@@ -193,12 +193,12 @@ static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc,
hw_wb->ops.setup_roi(hw_wb, wb_cfg);
if (hw_wb->ops.setup_outformat)
- hw_wb->ops.setup_outformat(hw_wb, wb_cfg);
+ hw_wb->ops.setup_outformat(hw_wb, wb_cfg, format);
if (hw_wb->ops.setup_cdp) {
const struct dpu_perf_cfg *perf = phys_enc->dpu_kms->catalog->perf;
- hw_wb->ops.setup_cdp(hw_wb, wb_cfg->dest.format,
+ hw_wb->ops.setup_cdp(hw_wb, format,
perf->cdp_cfg[DPU_PERF_CDP_USAGE_NRT].wr_enable);
}
@@ -321,15 +321,10 @@ static void dpu_encoder_phys_wb_setup(
{
struct dpu_hw_wb *hw_wb = phys_enc->hw_wb;
struct drm_display_mode mode = phys_enc->cached_mode;
- struct drm_framebuffer *fb = NULL;
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
- struct drm_writeback_job *wb_job;
const struct msm_format *format;
- const struct msm_format *dpu_fmt;
- wb_job = wb_enc->wb_job;
format = msm_framebuffer_format(wb_enc->wb_job->fb);
- dpu_fmt = mdp_get_format(&phys_enc->dpu_kms->base, format->pixel_format, wb_job->fb->modifier);
DPU_DEBUG("[mode_set:%d, \"%s\",%d,%d]\n",
hw_wb->idx - WB_0, mode.name,
@@ -341,9 +336,9 @@ static void dpu_encoder_phys_wb_setup(
dpu_encoder_phys_wb_set_qos(phys_enc);
- dpu_encoder_phys_wb_setup_fb(phys_enc, fb);
+ dpu_encoder_phys_wb_setup_fb(phys_enc, format);
- dpu_encoder_helper_phys_setup_cdm(phys_enc, dpu_fmt, CDM_CDWN_OUTPUT_WB);
+ dpu_encoder_helper_phys_setup_cdm(phys_enc, format, CDM_CDWN_OUTPUT_WB);
dpu_encoder_phys_wb_setup_ctl(phys_enc);
}
@@ -587,26 +582,20 @@ static void dpu_encoder_phys_wb_prepare_wb_job(struct dpu_encoder_phys *phys_enc
format = msm_framebuffer_format(job->fb);
- wb_cfg->dest.format = mdp_get_format(&phys_enc->dpu_kms->base,
- format->pixel_format, job->fb->modifier);
- if (!wb_cfg->dest.format) {
- /* this error should be detected during atomic_check */
- DPU_ERROR("failed to get format %p4cc\n", &format->pixel_format);
- return;
- }
-
- ret = dpu_format_populate_layout(aspace, job->fb, &wb_cfg->dest);
+ ret = dpu_format_populate_plane_sizes(job->fb, &wb_cfg->dest);
if (ret) {
- DPU_DEBUG("failed to populate layout %d\n", ret);
+ DPU_DEBUG("failed to populate plane sizes%d\n", ret);
return;
}
+ dpu_format_populate_addrs(aspace, job->fb, &wb_cfg->dest);
+
wb_cfg->dest.width = job->fb->width;
wb_cfg->dest.height = job->fb->height;
- wb_cfg->dest.num_planes = wb_cfg->dest.format->num_planes;
+ wb_cfg->dest.num_planes = format->num_planes;
- if ((wb_cfg->dest.format->fetch_type == MDP_PLANE_PLANAR) &&
- (wb_cfg->dest.format->element[0] == C1_B_Cb))
+ if ((format->fetch_type == MDP_PLANE_PLANAR) &&
+ (format->element[0] == C1_B_Cb))
swap(wb_cfg->dest.plane_addr[1], wb_cfg->dest.plane_addr[2]);
DPU_DEBUG("[fb_offset:%8.8x,%8.8x,%8.8x,%8.8x]\n",
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
index 6b1e9a617da3..59c9427da7dd 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
@@ -13,9 +13,6 @@
#define DPU_UBWC_PLANE_SIZE_ALIGNMENT 4096
-#define DPU_MAX_IMG_WIDTH 0x3FFF
-#define DPU_MAX_IMG_HEIGHT 0x3FFF
-
/*
* struct dpu_media_color_map - maps drm format to media format
* @format: DRM base pixel format
@@ -93,10 +90,9 @@ static int _dpu_format_get_media_color_ubwc(const struct msm_format *fmt)
return color_fmt;
}
-static int _dpu_format_get_plane_sizes_ubwc(
+static int _dpu_format_populate_plane_sizes_ubwc(
const struct msm_format *fmt,
- const uint32_t width,
- const uint32_t height,
+ struct drm_framebuffer *fb,
struct dpu_hw_fmt_layout *layout)
{
int i;
@@ -104,9 +100,8 @@ static int _dpu_format_get_plane_sizes_ubwc(
bool meta = MSM_FORMAT_IS_UBWC(fmt);
memset(layout, 0, sizeof(struct dpu_hw_fmt_layout));
- layout->format = fmt;
- layout->width = width;
- layout->height = height;
+ layout->width = fb->width;
+ layout->height = fb->height;
layout->num_planes = fmt->num_planes;
color = _dpu_format_get_media_color_ubwc(fmt);
@@ -116,19 +111,19 @@ static int _dpu_format_get_plane_sizes_ubwc(
return -EINVAL;
}
- if (MSM_FORMAT_IS_YUV(layout->format)) {
+ if (MSM_FORMAT_IS_YUV(fmt)) {
uint32_t y_sclines, uv_sclines;
uint32_t y_meta_scanlines = 0;
uint32_t uv_meta_scanlines = 0;
layout->num_planes = 2;
- layout->plane_pitch[0] = VENUS_Y_STRIDE(color, width);
- y_sclines = VENUS_Y_SCANLINES(color, height);
+ layout->plane_pitch[0] = VENUS_Y_STRIDE(color, fb->width);
+ y_sclines = VENUS_Y_SCANLINES(color, fb->height);
layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] *
y_sclines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
- layout->plane_pitch[1] = VENUS_UV_STRIDE(color, width);
- uv_sclines = VENUS_UV_SCANLINES(color, height);
+ layout->plane_pitch[1] = VENUS_UV_STRIDE(color, fb->width);
+ uv_sclines = VENUS_UV_SCANLINES(color, fb->height);
layout->plane_size[1] = MSM_MEDIA_ALIGN(layout->plane_pitch[1] *
uv_sclines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
@@ -136,13 +131,13 @@ static int _dpu_format_get_plane_sizes_ubwc(
goto done;
layout->num_planes += 2;
- layout->plane_pitch[2] = VENUS_Y_META_STRIDE(color, width);
- y_meta_scanlines = VENUS_Y_META_SCANLINES(color, height);
+ layout->plane_pitch[2] = VENUS_Y_META_STRIDE(color, fb->width);
+ y_meta_scanlines = VENUS_Y_META_SCANLINES(color, fb->height);
layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] *
y_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
- layout->plane_pitch[3] = VENUS_UV_META_STRIDE(color, width);
- uv_meta_scanlines = VENUS_UV_META_SCANLINES(color, height);
+ layout->plane_pitch[3] = VENUS_UV_META_STRIDE(color, fb->width);
+ uv_meta_scanlines = VENUS_UV_META_SCANLINES(color, fb->height);
layout->plane_size[3] = MSM_MEDIA_ALIGN(layout->plane_pitch[3] *
uv_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
@@ -151,16 +146,16 @@ static int _dpu_format_get_plane_sizes_ubwc(
layout->num_planes = 1;
- layout->plane_pitch[0] = VENUS_RGB_STRIDE(color, width);
- rgb_scanlines = VENUS_RGB_SCANLINES(color, height);
+ layout->plane_pitch[0] = VENUS_RGB_STRIDE(color, fb->width);
+ rgb_scanlines = VENUS_RGB_SCANLINES(color, fb->height);
layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] *
rgb_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
if (!meta)
goto done;
layout->num_planes += 2;
- layout->plane_pitch[2] = VENUS_RGB_META_STRIDE(color, width);
- rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color, height);
+ layout->plane_pitch[2] = VENUS_RGB_META_STRIDE(color, fb->width);
+ rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color, fb->height);
layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] *
rgb_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
}
@@ -172,26 +167,23 @@ done:
return 0;
}
-static int _dpu_format_get_plane_sizes_linear(
+static int _dpu_format_populate_plane_sizes_linear(
const struct msm_format *fmt,
- const uint32_t width,
- const uint32_t height,
- struct dpu_hw_fmt_layout *layout,
- const uint32_t *pitches)
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *layout)
{
int i;
memset(layout, 0, sizeof(struct dpu_hw_fmt_layout));
- layout->format = fmt;
- layout->width = width;
- layout->height = height;
+ layout->width = fb->width;
+ layout->height = fb->height;
layout->num_planes = fmt->num_planes;
/* Due to memset above, only need to set planes of interest */
if (fmt->fetch_type == MDP_PLANE_INTERLEAVED) {
layout->num_planes = 1;
- layout->plane_size[0] = width * height * layout->format->bpp;
- layout->plane_pitch[0] = width * layout->format->bpp;
+ layout->plane_size[0] = fb->width * fb->height * fmt->bpp;
+ layout->plane_pitch[0] = fb->width * fmt->bpp;
} else {
uint32_t v_subsample, h_subsample;
uint32_t chroma_samp;
@@ -201,7 +193,7 @@ static int _dpu_format_get_plane_sizes_linear(
_dpu_get_v_h_subsample_rate(chroma_samp, &v_subsample,
&h_subsample);
- if (width % h_subsample || height % v_subsample) {
+ if (fb->width % h_subsample || fb->height % v_subsample) {
DRM_ERROR("mismatch in subsample vs dimensions\n");
return -EINVAL;
}
@@ -209,11 +201,11 @@ static int _dpu_format_get_plane_sizes_linear(
if ((fmt->pixel_format == DRM_FORMAT_NV12) &&
(MSM_FORMAT_IS_DX(fmt)))
bpp = 2;
- layout->plane_pitch[0] = width * bpp;
+ layout->plane_pitch[0] = fb->width * bpp;
layout->plane_pitch[1] = layout->plane_pitch[0] / h_subsample;
- layout->plane_size[0] = layout->plane_pitch[0] * height;
+ layout->plane_size[0] = layout->plane_pitch[0] * fb->height;
layout->plane_size[1] = layout->plane_pitch[1] *
- (height / v_subsample);
+ (fb->height / v_subsample);
if (fmt->fetch_type == MDP_PLANE_PSEUDO_PLANAR) {
layout->num_planes = 2;
@@ -234,8 +226,13 @@ static int _dpu_format_get_plane_sizes_linear(
* all the components based on ubwc specifications.
*/
for (i = 0; i < layout->num_planes && i < DPU_MAX_PLANES; ++i) {
- if (pitches && layout->plane_pitch[i] < pitches[i])
- layout->plane_pitch[i] = pitches[i];
+ if (layout->plane_pitch[i] <= fb->pitches[i]) {
+ layout->plane_pitch[i] = fb->pitches[i];
+ } else {
+ DRM_DEBUG("plane %u expected pitch %u, fb %u\n",
+ i, layout->plane_pitch[i], fb->pitches[i]);
+ return -EINVAL;
+ }
}
for (i = 0; i < DPU_MAX_PLANES; i++)
@@ -244,53 +241,54 @@ static int _dpu_format_get_plane_sizes_linear(
return 0;
}
-static int dpu_format_get_plane_sizes(
- const struct msm_format *fmt,
- const uint32_t w,
- const uint32_t h,
- struct dpu_hw_fmt_layout *layout,
- const uint32_t *pitches)
+/**
+ * dpu_format_populate_plane_sizes - populate non-address part of the layout based on
+ * fb, and format found in the fb
+ * @fb: framebuffer pointer
+ * @layout: format layout structure to populate
+ *
+ * Return: error code on failure or 0 if new addresses were populated
+ */
+int dpu_format_populate_plane_sizes(
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *layout)
{
- if (!layout || !fmt) {
+ const struct msm_format *fmt;
+
+ if (!layout || !fb) {
DRM_ERROR("invalid pointer\n");
return -EINVAL;
}
- if ((w > DPU_MAX_IMG_WIDTH) || (h > DPU_MAX_IMG_HEIGHT)) {
+ if (fb->width > DPU_MAX_IMG_WIDTH ||
+ fb->height > DPU_MAX_IMG_HEIGHT) {
DRM_ERROR("image dimensions outside max range\n");
return -ERANGE;
}
+ fmt = msm_framebuffer_format(fb);
+
if (MSM_FORMAT_IS_UBWC(fmt) || MSM_FORMAT_IS_TILE(fmt))
- return _dpu_format_get_plane_sizes_ubwc(fmt, w, h, layout);
+ return _dpu_format_populate_plane_sizes_ubwc(fmt, fb, layout);
- return _dpu_format_get_plane_sizes_linear(fmt, w, h, layout, pitches);
+ return _dpu_format_populate_plane_sizes_linear(fmt, fb, layout);
}
-static int _dpu_format_populate_addrs_ubwc(
- struct msm_gem_address_space *aspace,
- struct drm_framebuffer *fb,
- struct dpu_hw_fmt_layout *layout)
+static void _dpu_format_populate_addrs_ubwc(struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *layout)
{
+ const struct msm_format *fmt;
uint32_t base_addr = 0;
bool meta;
- if (!fb || !layout) {
- DRM_ERROR("invalid pointers\n");
- return -EINVAL;
- }
-
- if (aspace)
- base_addr = msm_framebuffer_iova(fb, aspace, 0);
- if (!base_addr) {
- DRM_ERROR("failed to retrieve base addr\n");
- return -EFAULT;
- }
+ base_addr = msm_framebuffer_iova(fb, aspace, 0);
- meta = MSM_FORMAT_IS_UBWC(layout->format);
+ fmt = msm_framebuffer_format(fb);
+ meta = MSM_FORMAT_IS_UBWC(fmt);
/* Per-format logic for verifying active planes */
- if (MSM_FORMAT_IS_YUV(layout->format)) {
+ if (MSM_FORMAT_IS_YUV(fmt)) {
/************************************************/
/* UBWC ** */
/* buffer ** DPU PLANE */
@@ -319,7 +317,7 @@ static int _dpu_format_populate_addrs_ubwc(
+ layout->plane_size[2] + layout->plane_size[3];
if (!meta)
- return 0;
+ return;
/* configure Y metadata plane */
layout->plane_addr[2] = base_addr;
@@ -350,119 +348,43 @@ static int _dpu_format_populate_addrs_ubwc(
layout->plane_addr[1] = 0;
if (!meta)
- return 0;
+ return;
layout->plane_addr[2] = base_addr;
layout->plane_addr[3] = 0;
}
- return 0;
}
-static int _dpu_format_populate_addrs_linear(
- struct msm_gem_address_space *aspace,
- struct drm_framebuffer *fb,
- struct dpu_hw_fmt_layout *layout)
+static void _dpu_format_populate_addrs_linear(struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *layout)
{
unsigned int i;
- /* Can now check the pitches given vs pitches expected */
- for (i = 0; i < layout->num_planes; ++i) {
- if (layout->plane_pitch[i] > fb->pitches[i]) {
- DRM_ERROR("plane %u expected pitch %u, fb %u\n",
- i, layout->plane_pitch[i], fb->pitches[i]);
- return -EINVAL;
- }
- }
-
/* Populate addresses for simple formats here */
- for (i = 0; i < layout->num_planes; ++i) {
- if (aspace)
- layout->plane_addr[i] =
- msm_framebuffer_iova(fb, aspace, i);
- if (!layout->plane_addr[i]) {
- DRM_ERROR("failed to retrieve base addr\n");
- return -EFAULT;
- }
- }
-
- return 0;
+ for (i = 0; i < layout->num_planes; ++i)
+ layout->plane_addr[i] = msm_framebuffer_iova(fb, aspace, i);
}
-int dpu_format_populate_layout(
- struct msm_gem_address_space *aspace,
- struct drm_framebuffer *fb,
- struct dpu_hw_fmt_layout *layout)
+/**
+ * dpu_format_populate_addrs - populate buffer addresses based on
+ * mmu, fb, and format found in the fb
+ * @aspace: address space pointer
+ * @fb: framebuffer pointer
+ * @layout: format layout structure to populate
+ */
+void dpu_format_populate_addrs(struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *layout)
{
- int ret;
-
- if (!fb || !layout) {
- DRM_ERROR("invalid arguments\n");
- return -EINVAL;
- }
+ const struct msm_format *fmt;
- if ((fb->width > DPU_MAX_IMG_WIDTH) ||
- (fb->height > DPU_MAX_IMG_HEIGHT)) {
- DRM_ERROR("image dimensions outside max range\n");
- return -ERANGE;
- }
-
- layout->format = msm_framebuffer_format(fb);
-
- /* Populate the plane sizes etc via get_format */
- ret = dpu_format_get_plane_sizes(layout->format, fb->width, fb->height,
- layout, fb->pitches);
- if (ret)
- return ret;
+ fmt = msm_framebuffer_format(fb);
/* Populate the addresses given the fb */
- if (MSM_FORMAT_IS_UBWC(layout->format) ||
- MSM_FORMAT_IS_TILE(layout->format))
- ret = _dpu_format_populate_addrs_ubwc(aspace, fb, layout);
+ if (MSM_FORMAT_IS_UBWC(fmt) ||
+ MSM_FORMAT_IS_TILE(fmt))
+ _dpu_format_populate_addrs_ubwc(aspace, fb, layout);
else
- ret = _dpu_format_populate_addrs_linear(aspace, fb, layout);
-
- return ret;
-}
-
-int dpu_format_check_modified_format(
- const struct msm_kms *kms,
- const struct msm_format *fmt,
- const struct drm_mode_fb_cmd2 *cmd,
- struct drm_gem_object **bos)
-{
- const struct drm_format_info *info;
- struct dpu_hw_fmt_layout layout;
- uint32_t bos_total_size = 0;
- int ret, i;
-
- if (!fmt || !cmd || !bos) {
- DRM_ERROR("invalid arguments\n");
- return -EINVAL;
- }
-
- info = drm_format_info(fmt->pixel_format);
- if (!info)
- return -EINVAL;
-
- ret = dpu_format_get_plane_sizes(fmt, cmd->width, cmd->height,
- &layout, cmd->pitches);
- if (ret)
- return ret;
-
- for (i = 0; i < info->num_planes; i++) {
- if (!bos[i]) {
- DRM_ERROR("invalid handle for plane %d\n", i);
- return -EINVAL;
- }
- if ((i == 0) || (bos[i] != bos[0]))
- bos_total_size += bos[i]->size;
- }
-
- if (bos_total_size < layout.total_size) {
- DRM_ERROR("buffers total size too small %u expected %u\n",
- bos_total_size, layout.total_size);
- return -EINVAL;
- }
-
- return 0;
+ _dpu_format_populate_addrs_linear(aspace, fb, layout);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
index 210d0ed5f0af..c6145d43aa3f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
@@ -31,35 +31,12 @@ static inline bool dpu_find_format(u32 format, const u32 *supported_formats,
return false;
}
-/**
- * dpu_format_check_modified_format - validate format and buffers for
- * dpu non-standard, i.e. modified format
- * @kms: kms driver
- * @msm_fmt: pointer to the msm_fmt base pointer of an msm_format
- * @cmd: fb_cmd2 structure user request
- * @bos: gem buffer object list
- *
- * Return: error code on failure, 0 on success
- */
-int dpu_format_check_modified_format(
- const struct msm_kms *kms,
- const struct msm_format *msm_fmt,
- const struct drm_mode_fb_cmd2 *cmd,
- struct drm_gem_object **bos);
+void dpu_format_populate_addrs(struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *layout);
-/**
- * dpu_format_populate_layout - populate the given format layout based on
- * mmu, fb, and format found in the fb
- * @aspace: address space pointer
- * @fb: framebuffer pointer
- * @fmtl: format layout structure to populate
- *
- * Return: error code on failure, -EAGAIN if success but the addresses
- * are the same as before or 0 if new addresses were populated
- */
-int dpu_format_populate_layout(
- struct msm_gem_address_space *aspace,
+int dpu_format_populate_plane_sizes(
struct drm_framebuffer *fb,
- struct dpu_hw_fmt_layout *fmtl);
+ struct dpu_hw_fmt_layout *layout);
#endif /*_DPU_FORMATS_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index dcb4fd85e73b..2cbf41f33cc0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -21,6 +21,16 @@
(VIG_BASE_MASK | \
BIT(DPU_SSPP_CSC_10BIT))
+#define VIG_MSM8953_MASK \
+ (BIT(DPU_SSPP_QOS) |\
+ BIT(DPU_SSPP_SCALER_QSEED2) |\
+ BIT(DPU_SSPP_CSC))
+
+#define VIG_MSM8996_MASK \
+ (BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_CDP) |\
+ BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_SCALER_QSEED2) |\
+ BIT(DPU_SSPP_CSC))
+
#define VIG_MSM8998_MASK \
(VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3_COMPATIBLE))
@@ -32,6 +42,12 @@
#define VIG_QCM2290_MASK (VIG_BASE_MASK | BIT(DPU_SSPP_QOS_8LVL))
+#define DMA_MSM8953_MASK \
+ (BIT(DPU_SSPP_QOS))
+
+#define DMA_MSM8996_MASK \
+ (BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_CDP))
+
#define DMA_MSM8998_MASK \
(BIT(DPU_SSPP_QOS) |\
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
@@ -57,9 +73,19 @@
#define DMA_CURSOR_SDM845_MASK_SDMA \
(DMA_CURSOR_SDM845_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
+#define DMA_CURSOR_MSM8996_MASK \
+ (DMA_MSM8996_MASK | BIT(DPU_SSPP_CURSOR))
+
#define DMA_CURSOR_MSM8998_MASK \
(DMA_MSM8998_MASK | BIT(DPU_SSPP_CURSOR))
+#define RGB_MSM8953_MASK \
+ (BIT(DPU_SSPP_QOS))
+
+#define RGB_MSM8996_MASK \
+ (BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_CDP) |\
+ BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_SCALER_RGB))
+
#define MIXER_MSM8998_MASK \
(BIT(DPU_MIXER_SOURCESPLIT))
@@ -69,6 +95,12 @@
#define MIXER_QCM2290_MASK \
(BIT(DPU_DIM_LAYER) | BIT(DPU_MIXER_COMBINED_ALPHA))
+#define PINGPONG_MSM8996_MASK \
+ (BIT(DPU_PINGPONG_DSC))
+
+#define PINGPONG_MSM8996_TE2_MASK \
+ (PINGPONG_MSM8996_MASK | BIT(DPU_PINGPONG_TE2))
+
#define PINGPONG_SDM845_MASK \
(BIT(DPU_PINGPONG_DITHER) | BIT(DPU_PINGPONG_DSC))
@@ -115,10 +147,6 @@
#define MAX_HORZ_DECIMATION 4
#define MAX_VERT_DECIMATION 4
-#define MAX_UPSCALE_RATIO 20
-#define MAX_DOWNSCALE_RATIO 4
-#define SSPP_UNITY_SCALE 1
-
#define STRCAT(X, Y) (X Y)
static const uint32_t plane_formats[] = {
@@ -276,8 +304,6 @@ static const u32 wb2_formats_rgb_yuv[] = {
/* SSPP common configuration */
#define _VIG_SBLK(scaler_ver) \
{ \
- .maxdwnscale = MAX_DOWNSCALE_RATIO, \
- .maxupscale = MAX_UPSCALE_RATIO, \
.scaler_blk = {.name = "scaler", \
.version = scaler_ver, \
.base = 0xa00, .len = 0xa0,}, \
@@ -285,15 +311,11 @@ static const u32 wb2_formats_rgb_yuv[] = {
.base = 0x1a00, .len = 0x100,}, \
.format_list = plane_formats_yuv, \
.num_formats = ARRAY_SIZE(plane_formats_yuv), \
- .virt_format_list = plane_formats, \
- .virt_num_formats = ARRAY_SIZE(plane_formats), \
.rotation_cfg = NULL, \
}
#define _VIG_SBLK_ROT(scaler_ver, rot_cfg) \
{ \
- .maxdwnscale = MAX_DOWNSCALE_RATIO, \
- .maxupscale = MAX_UPSCALE_RATIO, \
.scaler_blk = {.name = "scaler", \
.version = scaler_ver, \
.base = 0xa00, .len = 0xa0,}, \
@@ -301,29 +323,40 @@ static const u32 wb2_formats_rgb_yuv[] = {
.base = 0x1a00, .len = 0x100,}, \
.format_list = plane_formats_yuv, \
.num_formats = ARRAY_SIZE(plane_formats_yuv), \
- .virt_format_list = plane_formats, \
- .virt_num_formats = ARRAY_SIZE(plane_formats), \
.rotation_cfg = rot_cfg, \
}
#define _VIG_SBLK_NOSCALE() \
{ \
- .maxdwnscale = SSPP_UNITY_SCALE, \
- .maxupscale = SSPP_UNITY_SCALE, \
.format_list = plane_formats, \
.num_formats = ARRAY_SIZE(plane_formats), \
- .virt_format_list = plane_formats, \
- .virt_num_formats = ARRAY_SIZE(plane_formats), \
+ }
+
+/* qseed2 is not supported, so disabled scaling */
+#define _VIG_SBLK_QSEED2() \
+ { \
+ .scaler_blk = {.name = "scaler", \
+ /* no version for qseed2 */ \
+ .base = 0x200, .len = 0xa0,}, \
+ .csc_blk = {.name = "csc", \
+ .base = 0x320, .len = 0x100,}, \
+ .format_list = plane_formats_yuv, \
+ .num_formats = ARRAY_SIZE(plane_formats_yuv), \
+ .rotation_cfg = NULL, \
+ }
+
+#define _RGB_SBLK() \
+ { \
+ .scaler_blk = {.name = "scaler", \
+ .base = 0x200, .len = 0x28,}, \
+ .format_list = plane_formats, \
+ .num_formats = ARRAY_SIZE(plane_formats), \
}
#define _DMA_SBLK() \
{ \
- .maxdwnscale = SSPP_UNITY_SCALE, \
- .maxupscale = SSPP_UNITY_SCALE, \
.format_list = plane_formats, \
.num_formats = ARRAY_SIZE(plane_formats), \
- .virt_format_list = plane_formats, \
- .virt_num_formats = ARRAY_SIZE(plane_formats), \
}
static const struct dpu_rotation_cfg dpu_rot_sc7280_cfg_v2 = {
@@ -332,6 +365,9 @@ static const struct dpu_rotation_cfg dpu_rot_sc7280_cfg_v2 = {
.rot_format_list = rotation_v2_formats,
};
+static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed2 =
+ _VIG_SBLK_QSEED2();
+
static const struct dpu_sspp_sub_blks dpu_vig_sblk_noscale =
_VIG_SBLK_NOSCALE();
@@ -363,6 +399,8 @@ static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_2 =
static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_3 =
_VIG_SBLK(SSPP_SCALER_VER(3, 3));
+static const struct dpu_sspp_sub_blks dpu_rgb_sblk = _RGB_SBLK();
+
static const struct dpu_sspp_sub_blks dpu_dma_sblk = _DMA_SBLK();
/*************************************************************
@@ -427,6 +465,15 @@ static const struct dpu_dspp_sub_blks sdm845_dspp_sblk = {
/*************************************************************
* PINGPONG sub blocks config
*************************************************************/
+static const struct dpu_pingpong_sub_blks msm8996_pp_sblk_te = {
+ .te2 = {.name = "te2", .base = 0x2000, .len = 0x0,
+ .version = 0x1},
+};
+
+static const struct dpu_pingpong_sub_blks msm8996_pp_sblk = {
+ /* No dither block */
+};
+
static const struct dpu_pingpong_sub_blks sdm845_pp_sblk_te = {
.te2 = {.name = "te2", .base = 0x2000, .len = 0x0,
.version = 0x1},
@@ -492,6 +539,34 @@ static const struct dpu_vbif_dynamic_ot_cfg msm8998_ot_rdwr_cfg[] = {
},
};
+static const struct dpu_vbif_cfg msm8996_vbif[] = {
+ {
+ .name = "vbif_rt", .id = VBIF_RT,
+ .base = 0, .len = 0x1040,
+ .default_ot_rd_limit = 32,
+ .default_ot_wr_limit = 16,
+ .features = BIT(DPU_VBIF_QOS_REMAP) | BIT(DPU_VBIF_QOS_OTLIM),
+ .xin_halt_timeout = 0x4000,
+ .qos_rp_remap_size = 0x20,
+ .dynamic_ot_rd_tbl = {
+ .count = ARRAY_SIZE(msm8998_ot_rdwr_cfg),
+ .cfg = msm8998_ot_rdwr_cfg,
+ },
+ .dynamic_ot_wr_tbl = {
+ .count = ARRAY_SIZE(msm8998_ot_rdwr_cfg),
+ .cfg = msm8998_ot_rdwr_cfg,
+ },
+ .qos_rt_tbl = {
+ .npriority_lvl = ARRAY_SIZE(msm8998_rt_pri_lvl),
+ .priority_lvl = msm8998_rt_pri_lvl,
+ },
+ .qos_nrt_tbl = {
+ .npriority_lvl = ARRAY_SIZE(msm8998_nrt_pri_lvl),
+ .priority_lvl = msm8998_nrt_pri_lvl,
+ },
+ },
+};
+
static const struct dpu_vbif_cfg msm8998_vbif[] = {
{
.name = "vbif_rt", .id = VBIF_RT,
@@ -675,6 +750,11 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = {
* Hardware catalog
*************************************************************/
+#include "catalog/dpu_1_7_msm8996.h"
+#include "catalog/dpu_1_14_msm8937.h"
+#include "catalog/dpu_1_15_msm8917.h"
+#include "catalog/dpu_1_16_msm8953.h"
+
#include "catalog/dpu_3_0_msm8998.h"
#include "catalog/dpu_3_2_sdm660.h"
#include "catalog/dpu_3_3_sdm630.h"
@@ -699,6 +779,7 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = {
#include "catalog/dpu_8_0_sc8280xp.h"
#include "catalog/dpu_8_1_sm8450.h"
+#include "catalog/dpu_8_4_sa8775p.h"
#include "catalog/dpu_9_0_sm8550.h"
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index 37e18e820a20..c701d18c3522 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -21,8 +21,8 @@
#define DPU_HW_BLK_NAME_LEN 16
-#define MAX_IMG_WIDTH 0x3fff
-#define MAX_IMG_HEIGHT 0x3fff
+#define DPU_MAX_IMG_WIDTH 0x3fff
+#define DPU_MAX_IMG_HEIGHT 0x3fff
#define CRTC_DUAL_MIXERS 2
@@ -364,21 +364,15 @@ struct dpu_caps {
/**
* struct dpu_sspp_sub_blks : SSPP sub-blocks
* common: Pointer to common configurations shared by sub blocks
- * @maxdwnscale: max downscale ratio supported(without DECIMATION)
- * @maxupscale: maxupscale ratio supported
* @max_per_pipe_bw: maximum allowable bandwidth of this pipe in kBps
* @qseed_ver: qseed version
* @scaler_blk:
* @csc_blk:
* @format_list: Pointer to list of supported formats
* @num_formats: Number of supported formats
- * @virt_format_list: Pointer to list of supported formats for virtual planes
- * @virt_num_formats: Number of supported formats for virtual planes
* @dpu_rotation_cfg: inline rotation configuration
*/
struct dpu_sspp_sub_blks {
- u32 maxdwnscale;
- u32 maxupscale;
u32 max_per_pipe_bw;
u32 qseed_ver;
struct dpu_scaler_blk scaler_blk;
@@ -386,8 +380,6 @@ struct dpu_sspp_sub_blks {
const u32 *format_list;
u32 num_formats;
- const u32 *virt_format_list;
- u32 virt_num_formats;
const struct dpu_rotation_cfg *rotation_cfg;
};
@@ -831,6 +823,10 @@ struct dpu_mdss_cfg {
const struct dpu_format_extended *vig_formats;
};
+extern const struct dpu_mdss_cfg dpu_msm8917_cfg;
+extern const struct dpu_mdss_cfg dpu_msm8937_cfg;
+extern const struct dpu_mdss_cfg dpu_msm8953_cfg;
+extern const struct dpu_mdss_cfg dpu_msm8996_cfg;
extern const struct dpu_mdss_cfg dpu_msm8998_cfg;
extern const struct dpu_mdss_cfg dpu_sdm630_cfg;
extern const struct dpu_mdss_cfg dpu_sdm660_cfg;
@@ -850,6 +846,7 @@ extern const struct dpu_mdss_cfg dpu_sm8350_cfg;
extern const struct dpu_mdss_cfg dpu_sc7280_cfg;
extern const struct dpu_mdss_cfg dpu_sc8280xp_cfg;
extern const struct dpu_mdss_cfg dpu_sm8450_cfg;
+extern const struct dpu_mdss_cfg dpu_sa8775p_cfg;
extern const struct dpu_mdss_cfg dpu_sm8550_cfg;
extern const struct dpu_mdss_cfg dpu_sm8650_cfg;
extern const struct dpu_mdss_cfg dpu_x1e80100_cfg;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
index 55d2768a6d4d..ae1534c49ae0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
@@ -222,6 +222,14 @@ static void dpu_hw_cdm_bind_pingpong_blk(struct dpu_hw_cdm *ctx, const enum dpu_
DPU_REG_WRITE(c, CDM_MUX, mux_cfg);
}
+/**
+ * dpu_hw_cdm_init - initializes the cdm hw driver object.
+ * should be called once before accessing every cdm.
+ * @dev: DRM device handle
+ * @cfg: CDM catalog entry for which driver object is required
+ * @addr : mapped register io address of MDSS
+ * @mdss_rev: mdss hw core revision
+ */
struct dpu_hw_cdm *dpu_hw_cdm_init(struct drm_device *dev,
const struct dpu_cdm_cfg *cfg, void __iomem *addr,
const struct dpu_mdss_version *mdss_rev)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
index ec71c9886d75..6bb3476a05f8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
@@ -122,14 +122,6 @@ struct dpu_hw_cdm {
struct dpu_hw_cdm_ops ops;
};
-/**
- * dpu_hw_cdm_init - initializes the cdm hw driver object.
- * should be called once before accessing every cdm.
- * @dev: DRM device handle
- * @cdm: CDM catalog entry for which driver object is required
- * @addr : mapped register io address of MDSS
- * @mdss_rev: mdss hw core revision
- */
struct dpu_hw_cdm *dpu_hw_cdm_init(struct drm_device *dev,
const struct dpu_cdm_cfg *cdm, void __iomem *addr,
const struct dpu_mdss_version *mdss_rev);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index 2e50049f2f85..4893f10d6a58 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -736,6 +736,15 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active;
};
+/**
+ * dpu_hw_ctl_init() - Initializes the ctl_path hw driver object.
+ * Should be called before accessing any ctl_path register.
+ * @dev: Corresponding device for devres management
+ * @cfg: ctl_path catalog entry for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @mixer_count: Number of mixers in @mixer
+ * @mixer: Pointer to an array of Layer Mixers defined in the catalog
+ */
struct dpu_hw_ctl *dpu_hw_ctl_init(struct drm_device *dev,
const struct dpu_ctl_cfg *cfg,
void __iomem *addr,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
index 4401fdc0f3e4..85c6c835cc87 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -294,15 +294,6 @@ static inline struct dpu_hw_ctl *to_dpu_hw_ctl(struct dpu_hw_blk *hw)
return container_of(hw, struct dpu_hw_ctl, base);
}
-/**
- * dpu_hw_ctl_init() - Initializes the ctl_path hw driver object.
- * Should be called before accessing any ctl_path register.
- * @dev: Corresponding device for devres management
- * @cfg: ctl_path catalog entry for which driver object is required
- * @addr: mapped register io address of MDP
- * @mixer_count: Number of mixers in @mixer
- * @mixer: Pointer to an array of Layer Mixers defined in the catalog
- */
struct dpu_hw_ctl *dpu_hw_ctl_init(struct drm_device *dev,
const struct dpu_ctl_cfg *cfg,
void __iomem *addr,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
index 5e9aad1b2aa2..657200401f57 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
@@ -190,6 +190,13 @@ static void _setup_dsc_ops(struct dpu_hw_dsc_ops *ops,
ops->dsc_bind_pingpong_blk = dpu_hw_dsc_bind_pingpong_blk;
};
+/**
+ * dpu_hw_dsc_init() - Initializes the DSC hw driver object.
+ * @dev: Corresponding device for devres management
+ * @cfg: DSC catalog entry for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * Return: Error code or allocated dpu_hw_dsc context
+ */
struct dpu_hw_dsc *dpu_hw_dsc_init(struct drm_device *dev,
const struct dpu_dsc_cfg *cfg,
void __iomem *addr)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
index 989c88d2449b..fc171bdeca48 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
@@ -62,24 +62,10 @@ struct dpu_hw_dsc {
struct dpu_hw_dsc_ops ops;
};
-/**
- * dpu_hw_dsc_init() - Initializes the DSC hw driver object.
- * @dev: Corresponding device for devres management
- * @cfg: DSC catalog entry for which driver object is required
- * @addr: Mapped register io address of MDP
- * Return: Error code or allocated dpu_hw_dsc context
- */
struct dpu_hw_dsc *dpu_hw_dsc_init(struct drm_device *dev,
const struct dpu_dsc_cfg *cfg,
void __iomem *addr);
-/**
- * dpu_hw_dsc_init_1_2() - initializes the v1.2 DSC hw driver object
- * @dev: Corresponding device for devres management
- * @cfg: DSC catalog entry for which driver object is required
- * @addr: Mapped register io address of MDP
- * Returns: Error code or allocated dpu_hw_dsc context
- */
struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(struct drm_device *dev,
const struct dpu_dsc_cfg *cfg,
void __iomem *addr);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c
index ba193b0376fe..b9c433567262 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c
@@ -369,6 +369,13 @@ static void _setup_dcs_ops_1_2(struct dpu_hw_dsc_ops *ops,
ops->dsc_bind_pingpong_blk = dpu_hw_dsc_bind_pingpong_blk_1_2;
}
+/**
+ * dpu_hw_dsc_init_1_2() - initializes the v1.2 DSC hw driver object
+ * @dev: Corresponding device for devres management
+ * @cfg: DSC catalog entry for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * Returns: Error code or allocated dpu_hw_dsc context
+ */
struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(struct drm_device *dev,
const struct dpu_dsc_cfg *cfg,
void __iomem *addr)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
index b1da88e2935f..829ca272873e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
@@ -70,6 +70,14 @@ static void _setup_dspp_ops(struct dpu_hw_dspp *c,
c->ops.setup_pcc = dpu_setup_dspp_pcc;
}
+/**
+ * dpu_hw_dspp_init() - Initializes the DSPP hw driver object.
+ * should be called once before accessing every DSPP.
+ * @dev: Corresponding device for devres management
+ * @cfg: DSPP catalog entry for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * Return: pointer to structure or ERR_PTR
+ */
struct dpu_hw_dspp *dpu_hw_dspp_init(struct drm_device *dev,
const struct dpu_dspp_cfg *cfg,
void __iomem *addr)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h
index 3b435690b6cc..45c26cd49fa3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h
@@ -78,14 +78,6 @@ static inline struct dpu_hw_dspp *to_dpu_hw_dspp(struct dpu_hw_blk *hw)
return container_of(hw, struct dpu_hw_dspp, base);
}
-/**
- * dpu_hw_dspp_init() - Initializes the DSPP hw driver object.
- * should be called once before accessing every DSPP.
- * @dev: Corresponding device for devres management
- * @cfg: DSPP catalog entry for which driver object is required
- * @addr: Mapped register io address of MDP
- * Return: pointer to structure or ERR_PTR
- */
struct dpu_hw_dspp *dpu_hw_dspp_init(struct drm_device *dev,
const struct dpu_dspp_cfg *cfg,
void __iomem *addr);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
index b85881aab047..49bd77a755aa 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -237,6 +237,11 @@ static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, unsigned int
irq_entry->cb(irq_entry->arg);
}
+/**
+ * dpu_core_irq - core IRQ handler
+ * @kms: MSM KMS handle
+ * @return: interrupt handling status
+ */
irqreturn_t dpu_core_irq(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
@@ -442,6 +447,12 @@ static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
wmb();
}
+/**
+ * dpu_core_irq_read - IRQ helper function for reading IRQ status
+ * @dpu_kms: DPU handle
+ * @irq_idx: irq index
+ * @return: non-zero if irq detected; otherwise no irq detected
+ */
u32 dpu_core_irq_read(struct dpu_kms *dpu_kms,
unsigned int irq_idx)
{
@@ -476,6 +487,12 @@ u32 dpu_core_irq_read(struct dpu_kms *dpu_kms,
return intr_status;
}
+/**
+ * dpu_hw_intr_init(): Initializes the interrupts hw object
+ * @dev: Corresponding device for devres management
+ * @addr: mapped register io address of MDP
+ * @m: pointer to MDSS catalog data
+ */
struct dpu_hw_intr *dpu_hw_intr_init(struct drm_device *dev,
void __iomem *addr,
const struct dpu_mdss_cfg *m)
@@ -517,6 +534,17 @@ struct dpu_hw_intr *dpu_hw_intr_init(struct drm_device *dev,
return intr;
}
+/**
+ * dpu_core_irq_register_callback - For registering callback function on IRQ
+ * interrupt
+ * @dpu_kms: DPU handle
+ * @irq_idx: irq index
+ * @irq_cb: IRQ callback function.
+ * @irq_arg: IRQ callback argument.
+ * @return: 0 for success registering callback, otherwise failure
+ *
+ * This function supports registration of multiple callbacks for each interrupt.
+ */
int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms,
unsigned int irq_idx,
void (*irq_cb)(void *arg),
@@ -567,6 +595,15 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms,
return 0;
}
+/**
+ * dpu_core_irq_unregister_callback - For unregistering callback function on IRQ
+ * interrupt
+ * @dpu_kms: DPU handle
+ * @irq_idx: irq index
+ * @return: 0 for success registering callback, otherwise failure
+ *
+ * This function supports registration of multiple callbacks for each interrupt.
+ */
int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms,
unsigned int irq_idx)
{
@@ -628,6 +665,11 @@ static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq);
+/**
+ * dpu_debugfs_core_irq_init - register core irq debugfs
+ * @dpu_kms: pointer to kms
+ * @parent: debugfs directory root
+ */
void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
struct dentry *parent)
{
@@ -636,6 +678,11 @@ void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
}
#endif
+/**
+ * dpu_core_irq_preinstall - perform pre-installation of core IRQ handler
+ * @kms: MSM KMS handle
+ * @return: none
+ */
void dpu_core_irq_preinstall(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
@@ -653,6 +700,11 @@ void dpu_core_irq_preinstall(struct msm_kms *kms)
}
}
+/**
+ * dpu_core_irq_uninstall - uninstall core IRQ handler
+ * @kms: MSM KMS handle
+ * @return: none
+ */
void dpu_core_irq_uninstall(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
index 564b750a28fe..142358a105c5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
@@ -68,12 +68,6 @@ struct dpu_hw_intr {
struct dpu_hw_intr_entry irq_tbl[DPU_NUM_IRQS];
};
-/**
- * dpu_hw_intr_init(): Initializes the interrupts hw object
- * @dev: Corresponding device for devres management
- * @addr: mapped register io address of MDP
- * @m: pointer to MDSS catalog data
- */
struct dpu_hw_intr *dpu_hw_intr_init(struct drm_device *dev,
void __iomem *addr,
const struct dpu_mdss_cfg *m);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
index 29cb854f831a..fb1d25baa518 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -547,6 +547,14 @@ static void dpu_hw_intf_program_intf_cmd_cfg(struct dpu_hw_intf *intf,
DPU_REG_WRITE(&intf->hw, INTF_CONFIG2, intf_cfg2);
}
+/**
+ * dpu_hw_intf_init() - Initializes the INTF driver for the passed
+ * interface catalog entry.
+ * @dev: Corresponding device for devres management
+ * @cfg: interface catalog entry for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @mdss_rev: dpu core's major and minor versions
+ */
struct dpu_hw_intf *dpu_hw_intf_init(struct drm_device *dev,
const struct dpu_intf_cfg *cfg,
void __iomem *addr,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
index fc23650dfbf0..114be272ac0a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -130,14 +130,6 @@ struct dpu_hw_intf {
struct dpu_hw_intf_ops ops;
};
-/**
- * dpu_hw_intf_init() - Initializes the INTF driver for the passed
- * interface catalog entry.
- * @dev: Corresponding device for devres management
- * @cfg: interface catalog entry for which driver object is required
- * @addr: mapped register io address of MDP
- * @mdss_rev: dpu core's major and minor versions
- */
struct dpu_hw_intf *dpu_hw_intf_init(struct drm_device *dev,
const struct dpu_intf_cfg *cfg,
void __iomem *addr,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
index 1d3ccf3228c6..81b56f066519 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -158,6 +158,13 @@ static void _setup_mixer_ops(struct dpu_hw_lm_ops *ops,
ops->collect_misr = dpu_hw_lm_collect_misr;
}
+/**
+ * dpu_hw_lm_init() - Initializes the mixer hw driver object.
+ * should be called once before accessing every mixer.
+ * @dev: Corresponding device for devres management
+ * @cfg: mixer catalog entry for which driver object is required
+ * @addr: mapped register io address of MDP
+ */
struct dpu_hw_mixer *dpu_hw_lm_init(struct drm_device *dev,
const struct dpu_lm_cfg *cfg,
void __iomem *addr)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
index 0a3381755249..6f60fa9b3cd7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
@@ -93,13 +93,6 @@ static inline struct dpu_hw_mixer *to_dpu_hw_mixer(struct dpu_hw_blk *hw)
return container_of(hw, struct dpu_hw_mixer, base);
}
-/**
- * dpu_hw_lm_init() - Initializes the mixer hw driver object.
- * should be called once before accessing every mixer.
- * @dev: Corresponding device for devres management
- * @cfg: mixer catalog entry for which driver object is required
- * @addr: mapped register io address of MDP
- */
struct dpu_hw_mixer *dpu_hw_lm_init(struct drm_device *dev,
const struct dpu_lm_cfg *cfg,
void __iomem *addr);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
index a2eff36a2224..f8806a4d317b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -293,7 +293,6 @@ enum dpu_3d_blend_mode {
/**
* struct dpu_hw_fmt_layout - format information of the source pixel data
- * @format: pixel format parameters
* @num_planes: number of planes (including meta data planes)
* @width: image width
* @height: image height
@@ -303,7 +302,6 @@ enum dpu_3d_blend_mode {
* @plane_pitch: pitch of each plane
*/
struct dpu_hw_fmt_layout {
- const struct msm_format *format;
uint32_t num_planes;
uint32_t width;
uint32_t height;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c
index ddfa40a959cb..0b3325f9c870 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c
@@ -39,6 +39,14 @@ static void _setup_merge_3d_ops(struct dpu_hw_merge_3d *c,
c->ops.setup_3d_mode = dpu_hw_merge_3d_setup_3d_mode;
};
+/**
+ * dpu_hw_merge_3d_init() - Initializes the merge_3d driver for the passed
+ * merge3d catalog entry.
+ * @dev: Corresponding device for devres management
+ * @cfg: Pingpong catalog entry for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * Return: Error code or allocated dpu_hw_merge_3d context
+ */
struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(struct drm_device *dev,
const struct dpu_merge_3d_cfg *cfg,
void __iomem *addr)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h
index c192f02ec1ab..6833c0207523 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h
@@ -45,14 +45,6 @@ static inline struct dpu_hw_merge_3d *to_dpu_hw_merge_3d(struct dpu_hw_blk *hw)
return container_of(hw, struct dpu_hw_merge_3d, base);
}
-/**
- * dpu_hw_merge_3d_init() - Initializes the merge_3d driver for the passed
- * merge3d catalog entry.
- * @dev: Corresponding device for devres management
- * @cfg: Pingpong catalog entry for which driver object is required
- * @addr: Mapped register io address of MDP
- * Return: Error code or allocated dpu_hw_merge_3d context
- */
struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(struct drm_device *dev,
const struct dpu_merge_3d_cfg *cfg,
void __iomem *addr);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
index 2db4c6fba37a..36c0ec775b92 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
@@ -283,6 +283,15 @@ static int dpu_hw_pp_setup_dsc(struct dpu_hw_pingpong *pp)
return 0;
}
+/**
+ * dpu_hw_pingpong_init() - initializes the pingpong driver for the passed
+ * pingpong catalog entry.
+ * @dev: Corresponding device for devres management
+ * @cfg: Pingpong catalog entry for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @mdss_rev: dpu core's major and minor versions
+ * Return: Error code or allocated dpu_hw_pingpong context
+ */
struct dpu_hw_pingpong *dpu_hw_pingpong_init(struct drm_device *dev,
const struct dpu_pingpong_cfg *cfg,
void __iomem *addr,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
index a48b69fd79a3..dd99e1c21a1e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -118,15 +118,6 @@ static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw)
return container_of(hw, struct dpu_hw_pingpong, base);
}
-/**
- * dpu_hw_pingpong_init() - initializes the pingpong driver for the passed
- * pingpong catalog entry.
- * @dev: Corresponding device for devres management
- * @cfg: Pingpong catalog entry for which driver object is required
- * @addr: Mapped register io address of MDP
- * @mdss_rev: dpu core's major and minor versions
- * Return: Error code or allocated dpu_hw_pingpong context
- */
struct dpu_hw_pingpong *dpu_hw_pingpong_init(struct drm_device *dev,
const struct dpu_pingpong_cfg *cfg,
void __iomem *addr,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index 2c720f1fc1b2..32c7c8084553 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -672,6 +672,15 @@ int _dpu_hw_sspp_init_debugfs(struct dpu_hw_sspp *hw_pipe, struct dpu_kms *kms,
}
#endif
+/**
+ * dpu_hw_sspp_init() - Initializes the sspp hw driver object.
+ * Should be called once before accessing every pipe.
+ * @dev: Corresponding device for devres management
+ * @cfg: Pipe catalog entry for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @mdss_data: UBWC / MDSS configuration data
+ * @mdss_rev: dpu core's major and minor versions
+ */
struct dpu_hw_sspp *dpu_hw_sspp_init(struct drm_device *dev,
const struct dpu_sspp_cfg *cfg,
void __iomem *addr,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
index 4a910b808687..56a0edf2a57c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -12,6 +12,8 @@
struct dpu_hw_sspp;
+#define DPU_SSPP_MAX_PITCH_SIZE 0xffff
+
/**
* Flags
*/
@@ -142,10 +144,12 @@ struct dpu_hw_pixel_ext {
* @src_rect: src ROI, caller takes into account the different operations
* such as decimation, flip etc to program this field
* @dest_rect: destination ROI.
+ * @rotation: simplified drm rotation hint
*/
struct dpu_sw_pipe_cfg {
struct drm_rect src_rect;
struct drm_rect dst_rect;
+ unsigned int rotation;
};
/**
@@ -315,15 +319,7 @@ struct dpu_hw_sspp {
};
struct dpu_kms;
-/**
- * dpu_hw_sspp_init() - Initializes the sspp hw driver object.
- * Should be called once before accessing every pipe.
- * @dev: Corresponding device for devres management
- * @cfg: Pipe catalog entry for which driver object is required
- * @addr: Mapped register io address of MDP
- * @mdss_data: UBWC / MDSS configuration data
- * @mdss_rev: dpu core's major and minor versions
- */
+
struct dpu_hw_sspp *dpu_hw_sspp_init(struct drm_device *dev,
const struct dpu_sspp_cfg *cfg,
void __iomem *addr,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
index 0f40eea7f5e2..ad19330de61a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
@@ -284,6 +284,13 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
ops->intf_audio_select = dpu_hw_intf_audio_select;
}
+/**
+ * dpu_hw_mdptop_init - initializes the top driver for the passed config
+ * @dev: Corresponding device for devres management
+ * @cfg: MDP TOP configuration from catalog
+ * @addr: Mapped register io address of MDP
+ * @mdss_rev: dpu core's major and minor versions
+ */
struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev,
const struct dpu_mdp_cfg *cfg,
void __iomem *addr,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
index f1ab9fd106e5..04efdcd21ceb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
@@ -157,18 +157,9 @@ struct dpu_hw_mdp {
struct dpu_hw_mdp_ops ops;
};
-/**
- * dpu_hw_mdptop_init - initializes the top driver for the passed config
- * @dev: Corresponding device for devres management
- * @cfg: MDP TOP configuration from catalog
- * @addr: Mapped register io address of MDP
- * @mdss_rev: dpu core's major and minor versions
- */
struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev,
const struct dpu_mdp_cfg *cfg,
void __iomem *addr,
const struct dpu_mdss_version *mdss_rev);
-void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp);
-
#endif /*_DPU_HW_TOP_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
index 98e34afde2d2..af76ad8a8103 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
@@ -213,6 +213,13 @@ static void _setup_vbif_ops(struct dpu_hw_vbif_ops *ops,
ops->set_write_gather_en = dpu_hw_set_write_gather_en;
}
+/**
+ * dpu_hw_vbif_init() - Initializes the VBIF driver for the passed
+ * VBIF catalog entry.
+ * @dev: Corresponding device for devres management
+ * @cfg: VBIF catalog entry for which driver object is required
+ * @addr: Mapped register io address of MDSS
+ */
struct dpu_hw_vbif *dpu_hw_vbif_init(struct drm_device *dev,
const struct dpu_vbif_cfg *cfg,
void __iomem *addr)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
index e2b4307500e4..285121ec804c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
@@ -105,13 +105,6 @@ struct dpu_hw_vbif {
struct dpu_hw_vbif_ops ops;
};
-/**
- * dpu_hw_vbif_init() - Initializes the VBIF driver for the passed
- * VBIF catalog entry.
- * @dev: Corresponding device for devres management
- * @cfg: VBIF catalog entry for which driver object is required
- * @addr: Mapped register io address of MDSS
- */
struct dpu_hw_vbif *dpu_hw_vbif_init(struct drm_device *dev,
const struct dpu_vbif_cfg *cfg,
void __iomem *addr);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
index 93ff01c889b5..fb9f90957762 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
@@ -64,10 +64,10 @@ static void dpu_hw_wb_setup_outaddress(struct dpu_hw_wb *ctx,
}
static void dpu_hw_wb_setup_format(struct dpu_hw_wb *ctx,
- struct dpu_hw_wb_cfg *data)
+ struct dpu_hw_wb_cfg *data,
+ const struct msm_format *fmt)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
- const struct msm_format *fmt = data->dest.format;
u32 dst_format, pattern, ystride0, ystride1, outsize, chroma_samp;
u32 write_config = 0;
u32 opmode = 0;
@@ -213,6 +213,14 @@ static void _setup_wb_ops(struct dpu_hw_wb_ops *ops,
ops->setup_clk_force_ctrl = dpu_hw_wb_setup_clk_force_ctrl;
}
+/**
+ * dpu_hw_wb_init() - Initializes the writeback hw driver object.
+ * @dev: Corresponding device for devres management
+ * @cfg: wb_path catalog entry for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @mdss_rev: dpu core's major and minor versions
+ * Return: Error code or allocated dpu_hw_wb context
+ */
struct dpu_hw_wb *dpu_hw_wb_init(struct drm_device *dev,
const struct dpu_wb_cfg *cfg,
void __iomem *addr,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h
index 37497473e16c..ee5e5ab786e1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h
@@ -37,7 +37,8 @@ struct dpu_hw_wb_ops {
struct dpu_hw_wb_cfg *wb);
void (*setup_outformat)(struct dpu_hw_wb *ctx,
- struct dpu_hw_wb_cfg *wb);
+ struct dpu_hw_wb_cfg *wb,
+ const struct msm_format *fmt);
void (*setup_roi)(struct dpu_hw_wb *ctx,
struct dpu_hw_wb_cfg *wb);
@@ -74,14 +75,6 @@ struct dpu_hw_wb {
struct dpu_hw_wb_ops ops;
};
-/**
- * dpu_hw_wb_init() - Initializes the writeback hw driver object.
- * @dev: Corresponding device for devres management
- * @cfg: wb_path catalog entry for which driver object is required
- * @addr: mapped register io address of MDP
- * @mdss_rev: dpu core's major and minor versions
- * Return: Error code or allocated dpu_hw_wb context
- */
struct dpu_hw_wb *dpu_hw_wb_init(struct drm_device *dev,
const struct dpu_wb_cfg *cfg,
void __iomem *addr,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 9bcae53c4f45..ca4847b2b738 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -230,6 +230,21 @@ static int dpu_regset32_show(struct seq_file *s, void *data)
}
DEFINE_SHOW_ATTRIBUTE(dpu_regset32);
+/**
+ * dpu_debugfs_create_regset32 - Create register read back file for debugfs
+ *
+ * This function is almost identical to the standard debugfs_create_regset32()
+ * function, with the main difference being that a list of register
+ * names/offsets do not need to be provided. The 'read' function simply outputs
+ * sequential register values over a specified range.
+ *
+ * @name: File name within debugfs
+ * @mode: File mode within debugfs
+ * @parent: Parent directory entry within debugfs, can be NULL
+ * @offset: sub-block offset
+ * @length: sub-block length, in bytes
+ * @dpu_kms: pointer to dpu kms structure
+ */
void dpu_debugfs_create_regset32(const char *name, umode_t mode,
void *parent,
uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms)
@@ -1025,7 +1040,6 @@ static const struct msm_kms_funcs kms_funcs = {
.complete_commit = dpu_kms_complete_commit,
.enable_vblank = dpu_kms_enable_vblank,
.disable_vblank = dpu_kms_disable_vblank,
- .check_modified_format = dpu_format_check_modified_format,
.destroy = dpu_kms_destroy,
.snapshot = dpu_kms_mdp_snapshot,
#ifdef CONFIG_DEBUG_FS
@@ -1061,6 +1075,13 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
return 0;
}
+/**
+ * dpu_kms_get_clk_rate() - get the clock rate
+ * @dpu_kms: pointer to dpu_kms structure
+ * @clock_name: clock name to get the rate
+ *
+ * Return: current clock rate
+ */
unsigned long dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
{
struct clk *clk;
@@ -1202,13 +1223,8 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
- /*
- * max crtc width is equal to the max mixer width * 2 and max height is
- * is 4K
- */
- dev->mode_config.max_width =
- dpu_kms->catalog->caps->max_mixer_width * 2;
- dev->mode_config.max_height = 4096;
+ dev->mode_config.max_width = DPU_MAX_IMG_WIDTH;
+ dev->mode_config.max_height = DPU_MAX_IMG_HEIGHT;
dev->max_vblank_count = 0xffffffff;
/* Disable vblank irqs aggressively for power-saving */
@@ -1445,8 +1461,13 @@ static const struct dev_pm_ops dpu_pm_ops = {
};
static const struct of_device_id dpu_dt_match[] = {
+ { .compatible = "qcom,msm8917-mdp5", .data = &dpu_msm8917_cfg, },
+ { .compatible = "qcom,msm8937-mdp5", .data = &dpu_msm8937_cfg, },
+ { .compatible = "qcom,msm8953-mdp5", .data = &dpu_msm8953_cfg, },
+ { .compatible = "qcom,msm8996-mdp5", .data = &dpu_msm8996_cfg, },
{ .compatible = "qcom,msm8998-dpu", .data = &dpu_msm8998_cfg, },
{ .compatible = "qcom,qcm2290-dpu", .data = &dpu_qcm2290_cfg, },
+ { .compatible = "qcom,sa8775p-dpu", .data = &dpu_sa8775p_cfg, },
{ .compatible = "qcom,sdm630-mdp5", .data = &dpu_sdm630_cfg, },
{ .compatible = "qcom,sdm660-mdp5", .data = &dpu_sdm660_cfg, },
{ .compatible = "qcom,sdm670-dpu", .data = &dpu_sdm670_cfg, },
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index 935ff6fd172c..88d64d43ea1a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -145,38 +145,11 @@ struct dpu_global_state
* @dpu_debugfs_create_regset32: Create 32-bit register dump file
*/
-/**
- * dpu_debugfs_create_regset32 - Create register read back file for debugfs
- *
- * This function is almost identical to the standard debugfs_create_regset32()
- * function, with the main difference being that a list of register
- * names/offsets do not need to be provided. The 'read' function simply outputs
- * sequential register values over a specified range.
- *
- * @name: File name within debugfs
- * @mode: File mode within debugfs
- * @parent: Parent directory entry within debugfs, can be NULL
- * @offset: sub-block offset
- * @length: sub-block length, in bytes
- * @dpu_kms: pointer to dpu kms structure
- */
void dpu_debugfs_create_regset32(const char *name, umode_t mode,
void *parent,
uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms);
/**
- * dpu_debugfs_get_root - Return root directory entry for KMS's debugfs
- *
- * The return value should be passed as the 'parent' argument to subsequent
- * debugfs create calls.
- *
- * @dpu_kms: Pointer to DPU's KMS structure
- *
- * Return: dentry pointer for DPU's debugfs location
- */
-void *dpu_debugfs_get_root(struct dpu_kms *dpu_kms);
-
-/**
* DPU info management functions
* These functions/definitions allow for building up a 'dpu_info' structure
* containing one or more "key=value\n" entries.
@@ -189,13 +162,6 @@ void *dpu_debugfs_get_root(struct dpu_kms *dpu_kms);
int dpu_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
void dpu_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
-/**
- * dpu_kms_get_clk_rate() - get the clock rate
- * @dpu_kms: pointer to dpu_kms structure
- * @clock_name: clock name to get the rate
- *
- * Return: current clock rate
- */
unsigned long dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name);
#endif /* __dpu_kms_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index 29298e066163..3ffac24333a2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -528,8 +528,7 @@ static const struct dpu_csc_cfg *_dpu_plane_get_csc(struct dpu_sw_pipe *pipe,
static void _dpu_plane_setup_scaler(struct dpu_sw_pipe *pipe,
const struct msm_format *fmt, bool color_fill,
- struct dpu_sw_pipe_cfg *pipe_cfg,
- unsigned int rotation)
+ struct dpu_sw_pipe_cfg *pipe_cfg)
{
struct dpu_hw_sspp *pipe_hw = pipe->sspp;
const struct drm_format_info *info = drm_format_info(fmt->pixel_format);
@@ -552,7 +551,7 @@ static void _dpu_plane_setup_scaler(struct dpu_sw_pipe *pipe,
dst_height,
&scaler3_cfg, fmt,
info->hsub, info->vsub,
- rotation);
+ pipe_cfg->rotation);
/* configure pixel extension based on scalar config */
_dpu_plane_setup_pixel_ext(&scaler3_cfg, &pixel_ext,
@@ -604,7 +603,7 @@ static void _dpu_plane_color_fill_pipe(struct dpu_plane_state *pstate,
if (pipe->sspp->ops.setup_rects)
pipe->sspp->ops.setup_rects(pipe, &pipe_cfg);
- _dpu_plane_setup_scaler(pipe, fmt, true, &pipe_cfg, pstate->rotation);
+ _dpu_plane_setup_scaler(pipe, fmt, true, &pipe_cfg);
}
/**
@@ -648,7 +647,6 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
struct drm_framebuffer *fb = new_state->fb;
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_plane_state *pstate = to_dpu_plane_state(new_state);
- struct dpu_hw_fmt_layout layout;
struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
int ret;
@@ -676,17 +674,6 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
}
}
- /* validate framebuffer layout before commit */
- ret = dpu_format_populate_layout(pstate->aspace,
- new_state->fb, &layout);
- if (ret) {
- DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret);
- if (pstate->aspace)
- msm_framebuffer_cleanup(new_state->fb, pstate->aspace,
- pstate->needs_dirtyfb);
- return ret;
- }
-
return 0;
}
@@ -708,12 +695,17 @@ static void dpu_plane_cleanup_fb(struct drm_plane *plane,
}
static int dpu_plane_check_inline_rotation(struct dpu_plane *pdpu,
- const struct dpu_sspp_sub_blks *sblk,
- struct drm_rect src, const struct msm_format *fmt)
+ struct dpu_sw_pipe *pipe,
+ struct drm_rect src,
+ const struct msm_format *fmt)
{
+ const struct dpu_sspp_sub_blks *sblk = pipe->sspp->cap->sblk;
size_t num_formats;
const u32 *supported_formats;
+ if (!test_bit(DPU_SSPP_INLINE_ROTATION, &pipe->sspp->cap->features))
+ return -EINVAL;
+
if (!sblk->rotation_cfg) {
DPU_ERROR("invalid rotation cfg\n");
return -EINVAL;
@@ -743,6 +735,7 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,
{
uint32_t min_src_size;
struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
+ int ret;
min_src_size = MSM_FORMAT_IS_YUV(fmt) ? 2 : 1;
@@ -780,6 +773,12 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,
return -EINVAL;
}
+ if (pipe_cfg->rotation & DRM_MODE_ROTATE_90) {
+ ret = dpu_plane_check_inline_rotation(pdpu, pipe, pipe_cfg->src_rect, fmt);
+ if (ret)
+ return ret;
+ }
+
/* max clk check */
if (_dpu_plane_calc_clk(mode, pipe_cfg) > kms->perf.max_core_clk_rate) {
DPU_DEBUG_PLANE(pdpu, "plane exceeds max mdp core clk limits\n");
@@ -789,37 +788,29 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,
return 0;
}
-static int dpu_plane_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
+#define MAX_UPSCALE_RATIO 20
+#define MAX_DOWNSCALE_RATIO 4
+
+static int dpu_plane_atomic_check_nosspp(struct drm_plane *plane,
+ struct drm_plane_state *new_plane_state,
+ const struct drm_crtc_state *crtc_state)
{
- struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
- plane);
- int ret = 0, min_scale;
+ int i, ret = 0, min_scale, max_scale;
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
u64 max_mdp_clk_rate = kms->perf.max_core_clk_rate;
struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state);
- struct dpu_sw_pipe *pipe = &pstate->pipe;
- struct dpu_sw_pipe *r_pipe = &pstate->r_pipe;
- const struct drm_crtc_state *crtc_state = NULL;
- const struct msm_format *fmt;
struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg;
struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg;
struct drm_rect fb_rect = { 0 };
uint32_t max_linewidth;
- unsigned int rotation;
- uint32_t supported_rotations;
- const struct dpu_sspp_cfg *pipe_hw_caps = pstate->pipe.sspp->cap;
- const struct dpu_sspp_sub_blks *sblk = pstate->pipe.sspp->cap->sblk;
- if (new_plane_state->crtc)
- crtc_state = drm_atomic_get_new_crtc_state(state,
- new_plane_state->crtc);
+ min_scale = FRAC_16_16(1, MAX_UPSCALE_RATIO);
+ max_scale = MAX_DOWNSCALE_RATIO << 16;
- min_scale = FRAC_16_16(1, sblk->maxupscale);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
min_scale,
- sblk->maxdwnscale << 16,
+ max_scale,
true, true);
if (ret) {
DPU_DEBUG_PLANE(pdpu, "Check plane state failed (%d)\n", ret);
@@ -828,12 +819,6 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
if (!new_plane_state->visible)
return 0;
- pipe->multirect_index = DPU_SSPP_RECT_SOLO;
- pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
- r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
- r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
- r_pipe->sspp = NULL;
-
pstate->stage = DPU_STAGE_0 + pstate->base.normalized_zpos;
if (pstate->stage >= pdpu->catalog->caps->max_mixer_blendstages) {
DPU_ERROR("> %d plane stages assigned\n",
@@ -841,13 +826,8 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
- pipe_cfg->src_rect = new_plane_state->src;
-
/* state->src is 16.16, src_rect is not */
- pipe_cfg->src_rect.x1 >>= 16;
- pipe_cfg->src_rect.x2 >>= 16;
- pipe_cfg->src_rect.y1 >>= 16;
- pipe_cfg->src_rect.y2 >>= 16;
+ drm_rect_fp_to_int(&pipe_cfg->src_rect, &new_plane_state->src);
pipe_cfg->dst_rect = new_plane_state->dst;
@@ -855,14 +835,22 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
fb_rect.y2 = new_plane_state->fb->height;
/* Ensure fb size is supported */
- if (drm_rect_width(&fb_rect) > MAX_IMG_WIDTH ||
- drm_rect_height(&fb_rect) > MAX_IMG_HEIGHT) {
+ if (drm_rect_width(&fb_rect) > DPU_MAX_IMG_WIDTH ||
+ drm_rect_height(&fb_rect) > DPU_MAX_IMG_HEIGHT) {
DPU_DEBUG_PLANE(pdpu, "invalid framebuffer " DRM_RECT_FMT "\n",
DRM_RECT_ARG(&fb_rect));
return -E2BIG;
}
- fmt = msm_framebuffer_format(new_plane_state->fb);
+ ret = dpu_format_populate_plane_sizes(new_plane_state->fb, &pstate->layout);
+ if (ret) {
+ DPU_ERROR_PLANE(pdpu, "failed to get format plane sizes, %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < pstate->layout.num_planes; i++)
+ if (pstate->layout.plane_pitch[i] > DPU_SSPP_MAX_PITCH_SIZE)
+ return -E2BIG;
max_linewidth = pdpu->catalog->caps->max_linewidth;
@@ -872,6 +860,86 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
if ((drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) ||
_dpu_plane_calc_clk(&crtc_state->adjusted_mode, pipe_cfg) > max_mdp_clk_rate) {
+ if (drm_rect_width(&pipe_cfg->src_rect) > 2 * max_linewidth) {
+ DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n",
+ DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth);
+ return -E2BIG;
+ }
+
+ *r_pipe_cfg = *pipe_cfg;
+ pipe_cfg->src_rect.x2 = (pipe_cfg->src_rect.x1 + pipe_cfg->src_rect.x2) >> 1;
+ pipe_cfg->dst_rect.x2 = (pipe_cfg->dst_rect.x1 + pipe_cfg->dst_rect.x2) >> 1;
+ r_pipe_cfg->src_rect.x1 = pipe_cfg->src_rect.x2;
+ r_pipe_cfg->dst_rect.x1 = pipe_cfg->dst_rect.x2;
+ } else {
+ memset(r_pipe_cfg, 0, sizeof(*r_pipe_cfg));
+ }
+
+ drm_rect_rotate_inv(&pipe_cfg->src_rect,
+ new_plane_state->fb->width, new_plane_state->fb->height,
+ new_plane_state->rotation);
+ if (r_pipe_cfg->src_rect.x1 != 0)
+ drm_rect_rotate_inv(&r_pipe_cfg->src_rect,
+ new_plane_state->fb->width, new_plane_state->fb->height,
+ new_plane_state->rotation);
+
+ pstate->needs_qos_remap = drm_atomic_crtc_needs_modeset(crtc_state);
+
+ return 0;
+}
+
+static int dpu_plane_atomic_check_sspp(struct drm_plane *plane,
+ struct drm_atomic_state *state,
+ const struct drm_crtc_state *crtc_state)
+{
+ struct drm_plane_state *new_plane_state =
+ drm_atomic_get_new_plane_state(state, plane);
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state);
+ struct dpu_sw_pipe *pipe = &pstate->pipe;
+ struct dpu_sw_pipe *r_pipe = &pstate->r_pipe;
+ const struct msm_format *fmt;
+ struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg;
+ struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg;
+ uint32_t max_linewidth;
+ uint32_t supported_rotations;
+ const struct dpu_sspp_cfg *pipe_hw_caps;
+ const struct dpu_sspp_sub_blks *sblk;
+ int ret = 0;
+
+ pipe_hw_caps = pipe->sspp->cap;
+ sblk = pipe->sspp->cap->sblk;
+
+ /*
+ * We already have verified scaling against platform limitations.
+ * Now check if the SSPP supports scaling at all.
+ */
+ if (!sblk->scaler_blk.len &&
+ ((drm_rect_width(&new_plane_state->src) >> 16 !=
+ drm_rect_width(&new_plane_state->dst)) ||
+ (drm_rect_height(&new_plane_state->src) >> 16 !=
+ drm_rect_height(&new_plane_state->dst))))
+ return -ERANGE;
+
+ fmt = msm_framebuffer_format(new_plane_state->fb);
+
+ max_linewidth = pdpu->catalog->caps->max_linewidth;
+
+ supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0;
+
+ if (pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION))
+ supported_rotations |= DRM_MODE_ROTATE_90;
+
+ pipe_cfg->rotation = drm_rotation_simplify(new_plane_state->rotation,
+ supported_rotations);
+ r_pipe_cfg->rotation = pipe_cfg->rotation;
+
+ ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt,
+ &crtc_state->adjusted_mode);
+ if (ret)
+ return ret;
+
+ if (drm_rect_width(&r_pipe_cfg->src_rect) != 0) {
/*
* In parallel multirect case only the half of the usual width
* is supported for tiled formats. If we are here, we know that
@@ -885,16 +953,11 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
return -E2BIG;
}
- if (drm_rect_width(&pipe_cfg->src_rect) > 2 * max_linewidth) {
- DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n",
- DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth);
- return -E2BIG;
- }
-
if (drm_rect_width(&pipe_cfg->src_rect) != drm_rect_width(&pipe_cfg->dst_rect) ||
drm_rect_height(&pipe_cfg->src_rect) != drm_rect_height(&pipe_cfg->dst_rect) ||
(!test_bit(DPU_SSPP_SMART_DMA_V1, &pipe->sspp->cap->features) &&
!test_bit(DPU_SSPP_SMART_DMA_V2, &pipe->sspp->cap->features)) ||
+ pipe_cfg->rotation & DRM_MODE_ROTATE_90 ||
MSM_FORMAT_IS_YUV(fmt)) {
DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u, can't use split source\n",
DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth);
@@ -912,51 +975,48 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
r_pipe->multirect_index = DPU_SSPP_RECT_1;
r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
- *r_pipe_cfg = *pipe_cfg;
- pipe_cfg->src_rect.x2 = (pipe_cfg->src_rect.x1 + pipe_cfg->src_rect.x2) >> 1;
- pipe_cfg->dst_rect.x2 = (pipe_cfg->dst_rect.x1 + pipe_cfg->dst_rect.x2) >> 1;
- r_pipe_cfg->src_rect.x1 = pipe_cfg->src_rect.x2;
- r_pipe_cfg->dst_rect.x1 = pipe_cfg->dst_rect.x2;
- }
-
- drm_rect_rotate_inv(&pipe_cfg->src_rect,
- new_plane_state->fb->width, new_plane_state->fb->height,
- new_plane_state->rotation);
- if (r_pipe->sspp)
- drm_rect_rotate_inv(&r_pipe_cfg->src_rect,
- new_plane_state->fb->width, new_plane_state->fb->height,
- new_plane_state->rotation);
-
- ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt, &crtc_state->adjusted_mode);
- if (ret)
- return ret;
-
- if (r_pipe->sspp) {
ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg, fmt,
&crtc_state->adjusted_mode);
if (ret)
return ret;
}
- supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0;
+ return 0;
+}
- if (pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION))
- supported_rotations |= DRM_MODE_ROTATE_90;
+static int dpu_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ int ret = 0;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state);
+ struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
+ struct dpu_sw_pipe *pipe = &pstate->pipe;
+ struct dpu_sw_pipe *r_pipe = &pstate->r_pipe;
+ const struct drm_crtc_state *crtc_state = NULL;
- rotation = drm_rotation_simplify(new_plane_state->rotation,
- supported_rotations);
+ if (new_plane_state->crtc)
+ crtc_state = drm_atomic_get_new_crtc_state(state,
+ new_plane_state->crtc);
- if ((pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION)) &&
- (rotation & DRM_MODE_ROTATE_90)) {
- ret = dpu_plane_check_inline_rotation(pdpu, sblk, pipe_cfg->src_rect, fmt);
- if (ret)
- return ret;
- }
+ pipe->sspp = dpu_rm_get_sspp(&dpu_kms->rm, pdpu->pipe);
+ r_pipe->sspp = NULL;
- pstate->rotation = rotation;
- pstate->needs_qos_remap = drm_atomic_crtc_needs_modeset(crtc_state);
+ ret = dpu_plane_atomic_check_nosspp(plane, new_plane_state, crtc_state);
+ if (ret)
+ return ret;
- return 0;
+ if (!new_plane_state->visible)
+ return 0;
+
+ pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+ r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+
+ return dpu_plane_atomic_check_sspp(plane, state, crtc_state);
}
static void dpu_plane_flush_csc(struct dpu_plane *pdpu, struct dpu_sw_pipe *pipe)
@@ -981,6 +1041,10 @@ static void dpu_plane_flush_csc(struct dpu_plane *pdpu, struct dpu_sw_pipe *pipe
}
+/**
+ * dpu_plane_flush - final plane operations before commit flush
+ * @plane: Pointer to drm plane structure
+ */
void dpu_plane_flush(struct drm_plane *plane)
{
struct dpu_plane *pdpu;
@@ -1060,14 +1124,14 @@ static void dpu_plane_sspp_update_pipe(struct drm_plane *plane,
pipe_cfg);
}
- _dpu_plane_setup_scaler(pipe, fmt, false, pipe_cfg, pstate->rotation);
+ _dpu_plane_setup_scaler(pipe, fmt, false, pipe_cfg);
if (pipe->sspp->ops.setup_multirect)
pipe->sspp->ops.setup_multirect(
pipe);
if (pipe->sspp->ops.setup_format) {
- unsigned int rotation = pstate->rotation;
+ unsigned int rotation = pipe_cfg->rotation;
src_flags = 0x0;
@@ -1101,7 +1165,8 @@ static void dpu_plane_sspp_update_pipe(struct drm_plane *plane,
_dpu_plane_set_qos_remap(plane, pipe);
}
-static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
+static void dpu_plane_sspp_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
{
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct drm_plane_state *state = plane->state;
@@ -1115,17 +1180,6 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
msm_framebuffer_format(fb);
struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg;
struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg;
- struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
- struct msm_gem_address_space *aspace = kms->base.aspace;
- struct dpu_hw_fmt_layout layout;
- bool layout_valid = false;
- int ret;
-
- ret = dpu_format_populate_layout(aspace, fb, &layout);
- if (ret)
- DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret);
- else
- layout_valid = true;
pstate->pending = true;
@@ -1133,6 +1187,8 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
pstate->needs_qos_remap |= (is_rt_pipe != pdpu->is_rt_pipe);
pdpu->is_rt_pipe = is_rt_pipe;
+ dpu_format_populate_addrs(pstate->aspace, new_state->fb, &pstate->layout);
+
DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FP_FMT "->crtc%u " DRM_RECT_FMT
", %p4cc ubwc %d\n", fb->base.id, DRM_RECT_FP_ARG(&state->src),
crtc->base.id, DRM_RECT_ARG(&state->dst),
@@ -1140,12 +1196,12 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
dpu_plane_sspp_update_pipe(plane, pipe, pipe_cfg, fmt,
drm_mode_vrefresh(&crtc->mode),
- layout_valid ? &layout : NULL);
+ &pstate->layout);
if (r_pipe->sspp) {
dpu_plane_sspp_update_pipe(plane, r_pipe, r_pipe_cfg, fmt,
drm_mode_vrefresh(&crtc->mode),
- layout_valid ? &layout : NULL);
+ &pstate->layout);
}
if (pstate->needs_qos_remap)
@@ -1197,7 +1253,7 @@ static void dpu_plane_atomic_update(struct drm_plane *plane,
if (!new_state->visible) {
_dpu_plane_atomic_disable(plane);
} else {
- dpu_plane_sspp_atomic_update(plane);
+ dpu_plane_sspp_atomic_update(plane, new_state);
}
}
@@ -1301,7 +1357,6 @@ static void dpu_plane_reset(struct drm_plane *plane)
{
struct dpu_plane *pdpu;
struct dpu_plane_state *pstate;
- struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
if (!plane) {
DPU_ERROR("invalid plane\n");
@@ -1323,16 +1378,6 @@ static void dpu_plane_reset(struct drm_plane *plane)
return;
}
- /*
- * Set the SSPP here until we have proper virtualized DPU planes.
- * This is the place where the state is allocated, so fill it fully.
- */
- pstate->pipe.sspp = dpu_rm_get_sspp(&dpu_kms->rm, pdpu->pipe);
- pstate->pipe.multirect_index = DPU_SSPP_RECT_SOLO;
- pstate->pipe.multirect_mode = DPU_SSPP_MULTIRECT_NONE;
-
- pstate->r_pipe.sspp = NULL;
-
__drm_atomic_helper_plane_reset(plane, &pstate->base);
}
@@ -1388,7 +1433,15 @@ static const struct drm_plane_helper_funcs dpu_plane_helper_funcs = {
.atomic_update = dpu_plane_atomic_update,
};
-/* initialize plane */
+/**
+ * dpu_plane_init - create new dpu plane for the given pipe
+ * @dev: Pointer to DRM device
+ * @pipe: dpu hardware pipe identifier
+ * @type: Plane type - PRIMARY/OVERLAY/CURSOR
+ * @possible_crtcs: bitmask of crtc that can be attached to the given pipe
+ *
+ * Initialize the plane.
+ */
struct drm_plane *dpu_plane_init(struct drm_device *dev,
uint32_t pipe, enum drm_plane_type type,
unsigned long possible_crtcs)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
index abd6b21a049b..97090ca7842b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
@@ -30,7 +30,7 @@
* @plane_fetch_bw: calculated BW per plane
* @plane_clk: calculated clk per plane
* @needs_dirtyfb: whether attached CRTC needs pixel data explicitly flushed
- * @rotation: simplified drm rotation hint
+ * @layout: framebuffer memory layout
*/
struct dpu_plane_state {
struct drm_plane_state base;
@@ -47,46 +47,21 @@ struct dpu_plane_state {
u64 plane_clk;
bool needs_dirtyfb;
- unsigned int rotation;
+
+ struct dpu_hw_fmt_layout layout;
};
#define to_dpu_plane_state(x) \
container_of(x, struct dpu_plane_state, base)
-/**
- * dpu_plane_flush - final plane operations before commit flush
- * @plane: Pointer to drm plane structure
- */
void dpu_plane_flush(struct drm_plane *plane);
-/**
- * dpu_plane_set_error: enable/disable error condition
- * @plane: pointer to drm_plane structure
- */
void dpu_plane_set_error(struct drm_plane *plane, bool error);
-/**
- * dpu_plane_init - create new dpu plane for the given pipe
- * @dev: Pointer to DRM device
- * @pipe: dpu hardware pipe identifier
- * @type: Plane type - PRIMARY/OVERLAY/CURSOR
- * @possible_crtcs: bitmask of crtc that can be attached to the given pipe
- *
- */
struct drm_plane *dpu_plane_init(struct drm_device *dev,
uint32_t pipe, enum drm_plane_type type,
unsigned long possible_crtcs);
-/**
- * dpu_plane_color_fill - enables color fill on plane
- * @plane: Pointer to DRM plane object
- * @color: RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
- * @alpha: 8-bit fill alpha value, 255 selects 100% alpha
- * Returns: 0 on success
- */
-int dpu_plane_color_fill(struct drm_plane *plane,
- uint32_t color, uint32_t alpha);
-
#ifdef CONFIG_DEBUG_FS
void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable);
#else
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 44938ba7a2b7..c247af03dc8e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -34,6 +34,16 @@ struct dpu_rm_requirements {
struct msm_display_topology topology;
};
+/**
+ * dpu_rm_init - Read hardware catalog and create reservation tracking objects
+ * for all HW blocks.
+ * @dev: Corresponding device for devres management
+ * @rm: DPU Resource Manager handle
+ * @cat: Pointer to hardware catalog
+ * @mdss_data: Pointer to MDSS / UBWC configuration
+ * @mmio: mapped register io address of MDP
+ * @return: 0 on Success otherwise -ERROR
+ */
int dpu_rm_init(struct drm_device *dev,
struct dpu_rm *rm,
const struct dpu_mdss_cfg *cat,
@@ -641,6 +651,13 @@ static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt,
}
}
+/**
+ * dpu_rm_release - Given the encoder for the display chain, release any
+ * HW blocks previously reserved for that use case.
+ * @global_state: resources shared across multiple kms objects
+ * @enc: DRM Encoder handle
+ * @return: 0 on Success otherwise -ERROR
+ */
void dpu_rm_release(struct dpu_global_state *global_state,
struct drm_encoder *enc)
{
@@ -657,6 +674,20 @@ void dpu_rm_release(struct dpu_global_state *global_state,
_dpu_rm_clear_mapping(&global_state->cdm_to_enc_id, 1, enc->base.id);
}
+/**
+ * dpu_rm_reserve - Given a CRTC->Encoder->Connector display chain, analyze
+ * the use connections and user requirements, specified through related
+ * topology control properties, and reserve hardware blocks to that
+ * display chain.
+ * HW blocks can then be accessed through dpu_rm_get_* functions.
+ * HW Reservations should be released via dpu_rm_release_hw.
+ * @rm: DPU Resource Manager handle
+ * @global_state: resources shared across multiple kms objects
+ * @enc: DRM Encoder handle
+ * @crtc_state: Proposed Atomic DRM CRTC State handle
+ * @topology: Pointer to topology info for the display
+ * @return: 0 on Success otherwise -ERROR
+ */
int dpu_rm_reserve(
struct dpu_rm *rm,
struct dpu_global_state *global_state,
@@ -694,6 +725,16 @@ int dpu_rm_reserve(
return ret;
}
+/**
+ * dpu_rm_get_assigned_resources - Get hw resources of the given type that are
+ * assigned to this encoder
+ * @rm: DPU Resource Manager handle
+ * @global_state: resources shared across multiple kms objects
+ * @enc_id: encoder id requesting for allocation
+ * @type: resource type to return data for
+ * @blks: pointer to the array to be filled by HW resources
+ * @blks_size: size of the @blks array
+ */
int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
struct dpu_global_state *global_state, uint32_t enc_id,
enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size)
@@ -772,6 +813,11 @@ static void dpu_rm_print_state_helper(struct drm_printer *p,
}
+/**
+ * dpu_rm_print_state - output the RM private state
+ * @p: DRM printer
+ * @global_state: global state
+ */
void dpu_rm_print_state(struct drm_printer *p,
const struct dpu_global_state *global_state)
{
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
index e63db8ace6b9..ea0e49cb7b0d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -38,62 +38,40 @@ struct dpu_rm {
};
/**
- * dpu_rm_init - Read hardware catalog and create reservation tracking objects
- * for all HW blocks.
- * @dev: Corresponding device for devres management
- * @rm: DPU Resource Manager handle
- * @cat: Pointer to hardware catalog
- * @mdss_data: Pointer to MDSS / UBWC configuration
- * @mmio: mapped register io address of MDP
- * @Return: 0 on Success otherwise -ERROR
+ * struct msm_display_topology - defines a display topology pipeline
+ * @num_lm: number of layer mixers used
+ * @num_intf: number of interfaces the panel is mounted on
+ * @num_dspp: number of dspp blocks used
+ * @num_dsc: number of Display Stream Compression (DSC) blocks used
+ * @needs_cdm: indicates whether cdm block is needed for this display topology
*/
+struct msm_display_topology {
+ u32 num_lm;
+ u32 num_intf;
+ u32 num_dspp;
+ u32 num_dsc;
+ bool needs_cdm;
+};
+
int dpu_rm_init(struct drm_device *dev,
struct dpu_rm *rm,
const struct dpu_mdss_cfg *cat,
const struct msm_mdss_data *mdss_data,
void __iomem *mmio);
-/**
- * dpu_rm_reserve - Given a CRTC->Encoder->Connector display chain, analyze
- * the use connections and user requirements, specified through related
- * topology control properties, and reserve hardware blocks to that
- * display chain.
- * HW blocks can then be accessed through dpu_rm_get_* functions.
- * HW Reservations should be released via dpu_rm_release_hw.
- * @rm: DPU Resource Manager handle
- * @drm_enc: DRM Encoder handle
- * @crtc_state: Proposed Atomic DRM CRTC State handle
- * @topology: Pointer to topology info for the display
- * @Return: 0 on Success otherwise -ERROR
- */
int dpu_rm_reserve(struct dpu_rm *rm,
struct dpu_global_state *global_state,
struct drm_encoder *drm_enc,
struct drm_crtc_state *crtc_state,
struct msm_display_topology topology);
-/**
- * dpu_rm_reserve - Given the encoder for the display chain, release any
- * HW blocks previously reserved for that use case.
- * @rm: DPU Resource Manager handle
- * @enc: DRM Encoder handle
- * @Return: 0 on Success otherwise -ERROR
- */
void dpu_rm_release(struct dpu_global_state *global_state,
struct drm_encoder *enc);
-/**
- * Get hw resources of the given type that are assigned to this encoder.
- */
int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
struct dpu_global_state *global_state, uint32_t enc_id,
enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size);
-/**
- * dpu_rm_print_state - output the RM private state
- * @p: DRM printer
- * @global_state: global state
- */
void dpu_rm_print_state(struct drm_printer *p,
const struct dpu_global_state *global_state);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
index 47c02b98eac3..2a551e455aa3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
@@ -204,6 +204,11 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
vbif->ops.set_halt_ctrl(vbif, params->xin_id, false);
}
+/**
+ * dpu_vbif_set_qos_remap - set QoS priority level remap
+ * @dpu_kms: DPU handler
+ * @params: Pointer to QoS configuration parameters
+ */
void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
struct dpu_vbif_set_qos_params *params)
{
@@ -245,6 +250,10 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
}
}
+/**
+ * dpu_vbif_clear_errors - clear any vbif errors
+ * @dpu_kms: DPU handler
+ */
void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms)
{
struct dpu_hw_vbif *vbif;
@@ -262,6 +271,10 @@ void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms)
}
}
+/**
+ * dpu_vbif_init_memtypes - initialize xin memory types for vbif
+ * @dpu_kms: DPU handler
+ */
void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms)
{
struct dpu_hw_vbif *vbif;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
index e1b1f7f4e4be..62e47ae1e3ee 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
@@ -38,32 +38,14 @@ struct dpu_vbif_set_qos_params {
bool is_rt;
};
-/**
- * dpu_vbif_set_ot_limit - set OT limit for vbif client
- * @dpu_kms: DPU handler
- * @params: Pointer to OT configuration parameters
- */
void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
struct dpu_vbif_set_ot_params *params);
-/**
- * dpu_vbif_set_qos_remap - set QoS priority level remap
- * @dpu_kms: DPU handler
- * @params: Pointer to QoS configuration parameters
- */
void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
struct dpu_vbif_set_qos_params *params);
-/**
- * dpu_vbif_clear_errors - clear any vbif errors
- * @dpu_kms: DPU handler
- */
void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms);
-/**
- * dpu_vbif_init_memtypes - initialize xin memory types for vbif
- * @dpu_kms: DPU handler
- */
void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms);
void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root);
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
index 4d55e3cf570f..07a2c1e87219 100644
--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
@@ -25,24 +25,21 @@ static void msm_disp_state_dump_regs(u32 **reg, u32 aligned_len, void __iomem *b
addr = base_addr;
end_addr = base_addr + aligned_len;
- if (!(*reg))
- *reg = kvzalloc(len_padded, GFP_KERNEL);
-
- if (*reg)
- dump_addr = *reg;
+ *reg = kvzalloc(len_padded, GFP_KERNEL);
+ if (!*reg)
+ return;
+ dump_addr = *reg;
for (i = 0; i < num_rows; i++) {
x0 = (addr < end_addr) ? readl_relaxed(addr + 0x0) : 0;
x4 = (addr + 0x4 < end_addr) ? readl_relaxed(addr + 0x4) : 0;
x8 = (addr + 0x8 < end_addr) ? readl_relaxed(addr + 0x8) : 0;
xc = (addr + 0xc < end_addr) ? readl_relaxed(addr + 0xc) : 0;
- if (dump_addr) {
- dump_addr[i * 4] = x0;
- dump_addr[i * 4 + 1] = x4;
- dump_addr[i * 4 + 2] = x8;
- dump_addr[i * 4 + 3] = xc;
- }
+ dump_addr[i * 4] = x0;
+ dump_addr[i * 4 + 1] = x4;
+ dump_addr[i * 4 + 2] = x8;
+ dump_addr[i * 4 + 3] = xc;
addr += REG_DUMP_ALIGN;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
index a599fc5d63c5..74e01a5dd419 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.c
+++ b/drivers/gpu/drm/msm/dp/dp_audio.c
@@ -17,281 +17,281 @@
#include "dp_display.h"
#include "dp_utils.h"
-struct dp_audio_private {
+struct msm_dp_audio_private {
struct platform_device *audio_pdev;
struct platform_device *pdev;
struct drm_device *drm_dev;
- struct dp_catalog *catalog;
+ struct msm_dp_catalog *catalog;
u32 channels;
- struct dp_audio dp_audio;
+ struct msm_dp_audio msm_dp_audio;
};
-static u32 dp_audio_get_header(struct dp_catalog *catalog,
- enum dp_catalog_audio_sdp_type sdp,
- enum dp_catalog_audio_header_type header)
+static u32 msm_dp_audio_get_header(struct msm_dp_catalog *catalog,
+ enum msm_dp_catalog_audio_sdp_type sdp,
+ enum msm_dp_catalog_audio_header_type header)
{
- return dp_catalog_audio_get_header(catalog, sdp, header);
+ return msm_dp_catalog_audio_get_header(catalog, sdp, header);
}
-static void dp_audio_set_header(struct dp_catalog *catalog,
+static void msm_dp_audio_set_header(struct msm_dp_catalog *catalog,
u32 data,
- enum dp_catalog_audio_sdp_type sdp,
- enum dp_catalog_audio_header_type header)
+ enum msm_dp_catalog_audio_sdp_type sdp,
+ enum msm_dp_catalog_audio_header_type header)
{
- dp_catalog_audio_set_header(catalog, sdp, header, data);
+ msm_dp_catalog_audio_set_header(catalog, sdp, header, data);
}
-static void dp_audio_stream_sdp(struct dp_audio_private *audio)
+static void msm_dp_audio_stream_sdp(struct msm_dp_audio_private *audio)
{
- struct dp_catalog *catalog = audio->catalog;
+ struct msm_dp_catalog *catalog = audio->catalog;
u32 value, new_value;
u8 parity_byte;
/* Config header and parity byte 1 */
- value = dp_audio_get_header(catalog,
+ value = msm_dp_audio_get_header(catalog,
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1);
new_value = 0x02;
- parity_byte = dp_utils_calculate_parity(new_value);
+ parity_byte = msm_dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
- dp_audio_set_header(catalog, value,
+ msm_dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1);
/* Config header and parity byte 2 */
- value = dp_audio_get_header(catalog,
+ value = msm_dp_audio_get_header(catalog,
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2);
new_value = value;
- parity_byte = dp_utils_calculate_parity(new_value);
+ parity_byte = msm_dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
- dp_audio_set_header(catalog, value,
+ msm_dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2);
/* Config header and parity byte 3 */
- value = dp_audio_get_header(catalog,
+ value = msm_dp_audio_get_header(catalog,
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3);
new_value = audio->channels - 1;
- parity_byte = dp_utils_calculate_parity(new_value);
+ parity_byte = msm_dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_3_BIT)
| (parity_byte << PARITY_BYTE_3_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
- dp_audio_set_header(catalog, value,
+ msm_dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3);
}
-static void dp_audio_timestamp_sdp(struct dp_audio_private *audio)
+static void msm_dp_audio_timestamp_sdp(struct msm_dp_audio_private *audio)
{
- struct dp_catalog *catalog = audio->catalog;
+ struct msm_dp_catalog *catalog = audio->catalog;
u32 value, new_value;
u8 parity_byte;
/* Config header and parity byte 1 */
- value = dp_audio_get_header(catalog,
+ value = msm_dp_audio_get_header(catalog,
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1);
new_value = 0x1;
- parity_byte = dp_utils_calculate_parity(new_value);
+ parity_byte = msm_dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
- dp_audio_set_header(catalog, value,
+ msm_dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1);
/* Config header and parity byte 2 */
- value = dp_audio_get_header(catalog,
+ value = msm_dp_audio_get_header(catalog,
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2);
new_value = 0x17;
- parity_byte = dp_utils_calculate_parity(new_value);
+ parity_byte = msm_dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
- dp_audio_set_header(catalog, value,
+ msm_dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2);
/* Config header and parity byte 3 */
- value = dp_audio_get_header(catalog,
+ value = msm_dp_audio_get_header(catalog,
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3);
new_value = (0x0 | (0x11 << 2));
- parity_byte = dp_utils_calculate_parity(new_value);
+ parity_byte = msm_dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_3_BIT)
| (parity_byte << PARITY_BYTE_3_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
- dp_audio_set_header(catalog, value,
+ msm_dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3);
}
-static void dp_audio_infoframe_sdp(struct dp_audio_private *audio)
+static void msm_dp_audio_infoframe_sdp(struct msm_dp_audio_private *audio)
{
- struct dp_catalog *catalog = audio->catalog;
+ struct msm_dp_catalog *catalog = audio->catalog;
u32 value, new_value;
u8 parity_byte;
/* Config header and parity byte 1 */
- value = dp_audio_get_header(catalog,
+ value = msm_dp_audio_get_header(catalog,
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1);
new_value = 0x84;
- parity_byte = dp_utils_calculate_parity(new_value);
+ parity_byte = msm_dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
- dp_audio_set_header(catalog, value,
+ msm_dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1);
/* Config header and parity byte 2 */
- value = dp_audio_get_header(catalog,
+ value = msm_dp_audio_get_header(catalog,
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2);
new_value = 0x1b;
- parity_byte = dp_utils_calculate_parity(new_value);
+ parity_byte = msm_dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
- dp_audio_set_header(catalog, value,
+ msm_dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2);
/* Config header and parity byte 3 */
- value = dp_audio_get_header(catalog,
+ value = msm_dp_audio_get_header(catalog,
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3);
new_value = (0x0 | (0x11 << 2));
- parity_byte = dp_utils_calculate_parity(new_value);
+ parity_byte = msm_dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_3_BIT)
| (parity_byte << PARITY_BYTE_3_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
new_value, parity_byte);
- dp_audio_set_header(catalog, value,
+ msm_dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3);
}
-static void dp_audio_copy_management_sdp(struct dp_audio_private *audio)
+static void msm_dp_audio_copy_management_sdp(struct msm_dp_audio_private *audio)
{
- struct dp_catalog *catalog = audio->catalog;
+ struct msm_dp_catalog *catalog = audio->catalog;
u32 value, new_value;
u8 parity_byte;
/* Config header and parity byte 1 */
- value = dp_audio_get_header(catalog,
+ value = msm_dp_audio_get_header(catalog,
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1);
new_value = 0x05;
- parity_byte = dp_utils_calculate_parity(new_value);
+ parity_byte = msm_dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
- dp_audio_set_header(catalog, value,
+ msm_dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1);
/* Config header and parity byte 2 */
- value = dp_audio_get_header(catalog,
+ value = msm_dp_audio_get_header(catalog,
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2);
new_value = 0x0F;
- parity_byte = dp_utils_calculate_parity(new_value);
+ parity_byte = msm_dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
- dp_audio_set_header(catalog, value,
+ msm_dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2);
/* Config header and parity byte 3 */
- value = dp_audio_get_header(catalog,
+ value = msm_dp_audio_get_header(catalog,
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3);
new_value = 0x0;
- parity_byte = dp_utils_calculate_parity(new_value);
+ parity_byte = msm_dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_3_BIT)
| (parity_byte << PARITY_BYTE_3_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
- dp_audio_set_header(catalog, value,
+ msm_dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3);
}
-static void dp_audio_isrc_sdp(struct dp_audio_private *audio)
+static void msm_dp_audio_isrc_sdp(struct msm_dp_audio_private *audio)
{
- struct dp_catalog *catalog = audio->catalog;
+ struct msm_dp_catalog *catalog = audio->catalog;
u32 value, new_value;
u8 parity_byte;
/* Config header and parity byte 1 */
- value = dp_audio_get_header(catalog,
+ value = msm_dp_audio_get_header(catalog,
DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1);
new_value = 0x06;
- parity_byte = dp_utils_calculate_parity(new_value);
+ parity_byte = msm_dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
- dp_audio_set_header(catalog, value,
+ msm_dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1);
/* Config header and parity byte 2 */
- value = dp_audio_get_header(catalog,
+ value = msm_dp_audio_get_header(catalog,
DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2);
new_value = 0x0F;
- parity_byte = dp_utils_calculate_parity(new_value);
+ parity_byte = msm_dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
- dp_audio_set_header(catalog, value,
+ msm_dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2);
}
-static void dp_audio_setup_sdp(struct dp_audio_private *audio)
+static void msm_dp_audio_setup_sdp(struct msm_dp_audio_private *audio)
{
- dp_catalog_audio_config_sdp(audio->catalog);
+ msm_dp_catalog_audio_config_sdp(audio->catalog);
- dp_audio_stream_sdp(audio);
- dp_audio_timestamp_sdp(audio);
- dp_audio_infoframe_sdp(audio);
- dp_audio_copy_management_sdp(audio);
- dp_audio_isrc_sdp(audio);
+ msm_dp_audio_stream_sdp(audio);
+ msm_dp_audio_timestamp_sdp(audio);
+ msm_dp_audio_infoframe_sdp(audio);
+ msm_dp_audio_copy_management_sdp(audio);
+ msm_dp_audio_isrc_sdp(audio);
}
-static void dp_audio_setup_acr(struct dp_audio_private *audio)
+static void msm_dp_audio_setup_acr(struct msm_dp_audio_private *audio)
{
u32 select = 0;
- struct dp_catalog *catalog = audio->catalog;
+ struct msm_dp_catalog *catalog = audio->catalog;
- switch (audio->dp_audio.bw_code) {
+ switch (audio->msm_dp_audio.bw_code) {
case DP_LINK_BW_1_62:
select = 0;
break;
@@ -310,15 +310,15 @@ static void dp_audio_setup_acr(struct dp_audio_private *audio)
break;
}
- dp_catalog_audio_config_acr(catalog, select);
+ msm_dp_catalog_audio_config_acr(catalog, select);
}
-static void dp_audio_safe_to_exit_level(struct dp_audio_private *audio)
+static void msm_dp_audio_safe_to_exit_level(struct msm_dp_audio_private *audio)
{
- struct dp_catalog *catalog = audio->catalog;
+ struct msm_dp_catalog *catalog = audio->catalog;
u32 safe_to_exit_level = 0;
- switch (audio->dp_audio.lane_count) {
+ switch (audio->msm_dp_audio.lane_count) {
case 1:
safe_to_exit_level = 14;
break;
@@ -336,49 +336,49 @@ static void dp_audio_safe_to_exit_level(struct dp_audio_private *audio)
break;
}
- dp_catalog_audio_sfe_level(catalog, safe_to_exit_level);
+ msm_dp_catalog_audio_sfe_level(catalog, safe_to_exit_level);
}
-static void dp_audio_enable(struct dp_audio_private *audio, bool enable)
+static void msm_dp_audio_enable(struct msm_dp_audio_private *audio, bool enable)
{
- struct dp_catalog *catalog = audio->catalog;
+ struct msm_dp_catalog *catalog = audio->catalog;
- dp_catalog_audio_enable(catalog, enable);
+ msm_dp_catalog_audio_enable(catalog, enable);
}
-static struct dp_audio_private *dp_audio_get_data(struct platform_device *pdev)
+static struct msm_dp_audio_private *msm_dp_audio_get_data(struct platform_device *pdev)
{
- struct dp_audio *dp_audio;
- struct msm_dp *dp_display;
+ struct msm_dp_audio *msm_dp_audio;
+ struct msm_dp *msm_dp_display;
if (!pdev) {
DRM_ERROR("invalid input\n");
return ERR_PTR(-ENODEV);
}
- dp_display = platform_get_drvdata(pdev);
- if (!dp_display) {
+ msm_dp_display = platform_get_drvdata(pdev);
+ if (!msm_dp_display) {
DRM_ERROR("invalid input\n");
return ERR_PTR(-ENODEV);
}
- dp_audio = dp_display->dp_audio;
+ msm_dp_audio = msm_dp_display->msm_dp_audio;
- if (!dp_audio) {
- DRM_ERROR("invalid dp_audio data\n");
+ if (!msm_dp_audio) {
+ DRM_ERROR("invalid msm_dp_audio data\n");
return ERR_PTR(-EINVAL);
}
- return container_of(dp_audio, struct dp_audio_private, dp_audio);
+ return container_of(msm_dp_audio, struct msm_dp_audio_private, msm_dp_audio);
}
-static int dp_audio_hook_plugged_cb(struct device *dev, void *data,
+static int msm_dp_audio_hook_plugged_cb(struct device *dev, void *data,
hdmi_codec_plugged_cb fn,
struct device *codec_dev)
{
struct platform_device *pdev;
- struct msm_dp *dp_display;
+ struct msm_dp *msm_dp_display;
pdev = to_platform_device(dev);
if (!pdev) {
@@ -386,20 +386,20 @@ static int dp_audio_hook_plugged_cb(struct device *dev, void *data,
return -ENODEV;
}
- dp_display = platform_get_drvdata(pdev);
- if (!dp_display) {
+ msm_dp_display = platform_get_drvdata(pdev);
+ if (!msm_dp_display) {
pr_err("invalid input\n");
return -ENODEV;
}
- return dp_display_set_plugged_cb(dp_display, fn, codec_dev);
+ return msm_dp_display_set_plugged_cb(msm_dp_display, fn, codec_dev);
}
-static int dp_audio_get_eld(struct device *dev,
+static int msm_dp_audio_get_eld(struct device *dev,
void *data, uint8_t *buf, size_t len)
{
struct platform_device *pdev;
- struct msm_dp *dp_display;
+ struct msm_dp *msm_dp_display;
pdev = to_platform_device(dev);
@@ -408,30 +408,30 @@ static int dp_audio_get_eld(struct device *dev,
return -ENODEV;
}
- dp_display = platform_get_drvdata(pdev);
- if (!dp_display) {
+ msm_dp_display = platform_get_drvdata(pdev);
+ if (!msm_dp_display) {
DRM_ERROR("invalid input\n");
return -ENODEV;
}
- memcpy(buf, dp_display->connector->eld,
- min(sizeof(dp_display->connector->eld), len));
+ memcpy(buf, msm_dp_display->connector->eld,
+ min(sizeof(msm_dp_display->connector->eld), len));
return 0;
}
-int dp_audio_hw_params(struct device *dev,
+int msm_dp_audio_hw_params(struct device *dev,
void *data,
struct hdmi_codec_daifmt *daifmt,
struct hdmi_codec_params *params)
{
int rc = 0;
- struct dp_audio_private *audio;
+ struct msm_dp_audio_private *audio;
struct platform_device *pdev;
- struct msm_dp *dp_display;
+ struct msm_dp *msm_dp_display;
pdev = to_platform_device(dev);
- dp_display = platform_get_drvdata(pdev);
+ msm_dp_display = platform_get_drvdata(pdev);
/*
* there could be cases where sound card can be opened even
@@ -441,12 +441,12 @@ int dp_audio_hw_params(struct device *dev,
* such cases check for connection status and bail out if not
* connected.
*/
- if (!dp_display->power_on) {
+ if (!msm_dp_display->power_on) {
rc = -EINVAL;
goto end;
}
- audio = dp_audio_get_data(pdev);
+ audio = msm_dp_audio_get_data(pdev);
if (IS_ERR(audio)) {
rc = PTR_ERR(audio);
goto end;
@@ -454,26 +454,26 @@ int dp_audio_hw_params(struct device *dev,
audio->channels = params->channels;
- dp_audio_setup_sdp(audio);
- dp_audio_setup_acr(audio);
- dp_audio_safe_to_exit_level(audio);
- dp_audio_enable(audio, true);
- dp_display_signal_audio_start(dp_display);
- dp_display->audio_enabled = true;
+ msm_dp_audio_setup_sdp(audio);
+ msm_dp_audio_setup_acr(audio);
+ msm_dp_audio_safe_to_exit_level(audio);
+ msm_dp_audio_enable(audio, true);
+ msm_dp_display_signal_audio_start(msm_dp_display);
+ msm_dp_display->audio_enabled = true;
end:
return rc;
}
-static void dp_audio_shutdown(struct device *dev, void *data)
+static void msm_dp_audio_shutdown(struct device *dev, void *data)
{
- struct dp_audio_private *audio;
+ struct msm_dp_audio_private *audio;
struct platform_device *pdev;
- struct msm_dp *dp_display;
+ struct msm_dp *msm_dp_display;
pdev = to_platform_device(dev);
- dp_display = platform_get_drvdata(pdev);
- audio = dp_audio_get_data(pdev);
+ msm_dp_display = platform_get_drvdata(pdev);
+ audio = msm_dp_audio_get_data(pdev);
if (IS_ERR(audio)) {
DRM_ERROR("failed to get audio data\n");
return;
@@ -487,32 +487,32 @@ static void dp_audio_shutdown(struct device *dev, void *data)
* connected. is_connected cannot be used here as its set
* to false earlier than this call
*/
- if (!dp_display->audio_enabled)
+ if (!msm_dp_display->audio_enabled)
return;
- dp_audio_enable(audio, false);
+ msm_dp_audio_enable(audio, false);
/* signal the dp display to safely shutdown clocks */
- dp_display_signal_audio_complete(dp_display);
+ msm_dp_display_signal_audio_complete(msm_dp_display);
}
-static const struct hdmi_codec_ops dp_audio_codec_ops = {
- .hw_params = dp_audio_hw_params,
- .audio_shutdown = dp_audio_shutdown,
- .get_eld = dp_audio_get_eld,
- .hook_plugged_cb = dp_audio_hook_plugged_cb,
+static const struct hdmi_codec_ops msm_dp_audio_codec_ops = {
+ .hw_params = msm_dp_audio_hw_params,
+ .audio_shutdown = msm_dp_audio_shutdown,
+ .get_eld = msm_dp_audio_get_eld,
+ .hook_plugged_cb = msm_dp_audio_hook_plugged_cb,
};
static struct hdmi_codec_pdata codec_data = {
- .ops = &dp_audio_codec_ops,
+ .ops = &msm_dp_audio_codec_ops,
.max_i2s_channels = 8,
.i2s = 1,
};
-void dp_unregister_audio_driver(struct device *dev, struct dp_audio *dp_audio)
+void msm_dp_unregister_audio_driver(struct device *dev, struct msm_dp_audio *msm_dp_audio)
{
- struct dp_audio_private *audio_priv;
+ struct msm_dp_audio_private *audio_priv;
- audio_priv = container_of(dp_audio, struct dp_audio_private, dp_audio);
+ audio_priv = container_of(msm_dp_audio, struct msm_dp_audio_private, msm_dp_audio);
if (audio_priv->audio_pdev) {
platform_device_unregister(audio_priv->audio_pdev);
@@ -520,13 +520,13 @@ void dp_unregister_audio_driver(struct device *dev, struct dp_audio *dp_audio)
}
}
-int dp_register_audio_driver(struct device *dev,
- struct dp_audio *dp_audio)
+int msm_dp_register_audio_driver(struct device *dev,
+ struct msm_dp_audio *msm_dp_audio)
{
- struct dp_audio_private *audio_priv;
+ struct msm_dp_audio_private *audio_priv;
- audio_priv = container_of(dp_audio,
- struct dp_audio_private, dp_audio);
+ audio_priv = container_of(msm_dp_audio,
+ struct msm_dp_audio_private, msm_dp_audio);
audio_priv->audio_pdev = platform_device_register_data(dev,
HDMI_CODEC_DRV_NAME,
@@ -536,13 +536,13 @@ int dp_register_audio_driver(struct device *dev,
return PTR_ERR_OR_ZERO(audio_priv->audio_pdev);
}
-struct dp_audio *dp_audio_get(struct platform_device *pdev,
- struct dp_panel *panel,
- struct dp_catalog *catalog)
+struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev,
+ struct msm_dp_panel *panel,
+ struct msm_dp_catalog *catalog)
{
int rc = 0;
- struct dp_audio_private *audio;
- struct dp_audio *dp_audio;
+ struct msm_dp_audio_private *audio;
+ struct msm_dp_audio *msm_dp_audio;
if (!pdev || !panel || !catalog) {
DRM_ERROR("invalid input\n");
@@ -559,23 +559,23 @@ struct dp_audio *dp_audio_get(struct platform_device *pdev,
audio->pdev = pdev;
audio->catalog = catalog;
- dp_audio = &audio->dp_audio;
+ msm_dp_audio = &audio->msm_dp_audio;
- dp_catalog_audio_init(catalog);
+ msm_dp_catalog_audio_init(catalog);
- return dp_audio;
+ return msm_dp_audio;
error:
return ERR_PTR(rc);
}
-void dp_audio_put(struct dp_audio *dp_audio)
+void msm_dp_audio_put(struct msm_dp_audio *msm_dp_audio)
{
- struct dp_audio_private *audio;
+ struct msm_dp_audio_private *audio;
- if (!dp_audio)
+ if (!msm_dp_audio)
return;
- audio = container_of(dp_audio, struct dp_audio_private, dp_audio);
+ audio = container_of(msm_dp_audio, struct msm_dp_audio_private, msm_dp_audio);
devm_kfree(&audio->pdev->dev, audio);
}
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.h b/drivers/gpu/drm/msm/dp/dp_audio.h
index 4ab78880af82..1c9efaaa40e5 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.h
+++ b/drivers/gpu/drm/msm/dp/dp_audio.h
@@ -13,58 +13,58 @@
#include <sound/hdmi-codec.h>
/**
- * struct dp_audio
+ * struct msm_dp_audio
* @lane_count: number of lanes configured in current session
* @bw_code: link rate's bandwidth code for current session
*/
-struct dp_audio {
+struct msm_dp_audio {
u32 lane_count;
u32 bw_code;
};
/**
- * dp_audio_get()
+ * msm_dp_audio_get()
*
* Creates and instance of dp audio.
*
* @pdev: caller's platform device instance.
- * @panel: an instance of dp_panel module.
- * @catalog: an instance of dp_catalog module.
+ * @panel: an instance of msm_dp_panel module.
+ * @catalog: an instance of msm_dp_catalog module.
*
* Returns the error code in case of failure, otherwize
- * an instance of newly created dp_module.
+ * an instance of newly created msm_dp_module.
*/
-struct dp_audio *dp_audio_get(struct platform_device *pdev,
- struct dp_panel *panel,
- struct dp_catalog *catalog);
+struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev,
+ struct msm_dp_panel *panel,
+ struct msm_dp_catalog *catalog);
/**
- * dp_register_audio_driver()
+ * msm_dp_register_audio_driver()
*
* Registers DP device with hdmi_codec interface.
*
* @dev: DP device instance.
- * @dp_audio: an instance of dp_audio module.
+ * @msm_dp_audio: an instance of msm_dp_audio module.
*
*
* Returns the error code in case of failure, otherwise
* zero on success.
*/
-int dp_register_audio_driver(struct device *dev,
- struct dp_audio *dp_audio);
+int msm_dp_register_audio_driver(struct device *dev,
+ struct msm_dp_audio *msm_dp_audio);
-void dp_unregister_audio_driver(struct device *dev, struct dp_audio *dp_audio);
+void msm_dp_unregister_audio_driver(struct device *dev, struct msm_dp_audio *msm_dp_audio);
/**
- * dp_audio_put()
+ * msm_dp_audio_put()
*
- * Cleans the dp_audio instance.
+ * Cleans the msm_dp_audio instance.
*
- * @dp_audio: an instance of dp_audio.
+ * @msm_dp_audio: an instance of msm_dp_audio.
*/
-void dp_audio_put(struct dp_audio *dp_audio);
+void msm_dp_audio_put(struct msm_dp_audio *msm_dp_audio);
-int dp_audio_hw_params(struct device *dev,
+int msm_dp_audio_hw_params(struct device *dev,
void *data,
struct hdmi_codec_daifmt *daifmt,
struct hdmi_codec_params *params);
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index 00dfafbebe0e..bc8d46abfc61 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -20,9 +20,9 @@ enum msm_dp_aux_err {
DP_AUX_ERR_PHY,
};
-struct dp_aux_private {
+struct msm_dp_aux_private {
struct device *dev;
- struct dp_catalog *catalog;
+ struct msm_dp_catalog *catalog;
struct phy *phy;
@@ -42,12 +42,12 @@ struct dp_aux_private {
u32 offset;
u32 segment;
- struct drm_dp_aux dp_aux;
+ struct drm_dp_aux msm_dp_aux;
};
#define MAX_AUX_RETRIES 5
-static ssize_t dp_aux_write(struct dp_aux_private *aux,
+static ssize_t msm_dp_aux_write(struct msm_dp_aux_private *aux,
struct drm_dp_aux_msg *msg)
{
u8 data[4];
@@ -88,11 +88,11 @@ static ssize_t dp_aux_write(struct dp_aux_private *aux,
/* index = 0, write */
if (i == 0)
reg |= DP_AUX_DATA_INDEX_WRITE;
- dp_catalog_aux_write_data(aux->catalog, reg);
+ msm_dp_catalog_aux_write_data(aux->catalog, reg);
}
- dp_catalog_aux_clear_trans(aux->catalog, false);
- dp_catalog_aux_clear_hw_interrupts(aux->catalog);
+ msm_dp_catalog_aux_clear_trans(aux->catalog, false);
+ msm_dp_catalog_aux_clear_hw_interrupts(aux->catalog);
reg = 0; /* Transaction number == 1 */
if (!aux->native) { /* i2c */
@@ -106,12 +106,12 @@ static ssize_t dp_aux_write(struct dp_aux_private *aux,
}
reg |= DP_AUX_TRANS_CTRL_GO;
- dp_catalog_aux_write_trans(aux->catalog, reg);
+ msm_dp_catalog_aux_write_trans(aux->catalog, reg);
return len;
}
-static ssize_t dp_aux_cmd_fifo_tx(struct dp_aux_private *aux,
+static ssize_t msm_dp_aux_cmd_fifo_tx(struct msm_dp_aux_private *aux,
struct drm_dp_aux_msg *msg)
{
ssize_t ret;
@@ -119,7 +119,7 @@ static ssize_t dp_aux_cmd_fifo_tx(struct dp_aux_private *aux,
reinit_completion(&aux->comp);
- ret = dp_aux_write(aux, msg);
+ ret = msm_dp_aux_write(aux, msg);
if (ret < 0)
return ret;
@@ -131,7 +131,7 @@ static ssize_t dp_aux_cmd_fifo_tx(struct dp_aux_private *aux,
return ret;
}
-static ssize_t dp_aux_cmd_fifo_rx(struct dp_aux_private *aux,
+static ssize_t msm_dp_aux_cmd_fifo_rx(struct msm_dp_aux_private *aux,
struct drm_dp_aux_msg *msg)
{
u32 data;
@@ -139,20 +139,20 @@ static ssize_t dp_aux_cmd_fifo_rx(struct dp_aux_private *aux,
u32 i, actual_i;
u32 len = msg->size;
- dp_catalog_aux_clear_trans(aux->catalog, true);
+ msm_dp_catalog_aux_clear_trans(aux->catalog, true);
data = DP_AUX_DATA_INDEX_WRITE; /* INDEX_WRITE */
data |= DP_AUX_DATA_READ; /* read */
- dp_catalog_aux_write_data(aux->catalog, data);
+ msm_dp_catalog_aux_write_data(aux->catalog, data);
dp = msg->buffer;
/* discard first byte */
- data = dp_catalog_aux_read_data(aux->catalog);
+ data = msm_dp_catalog_aux_read_data(aux->catalog);
for (i = 0; i < len; i++) {
- data = dp_catalog_aux_read_data(aux->catalog);
+ data = msm_dp_catalog_aux_read_data(aux->catalog);
*dp++ = (u8)((data >> DP_AUX_DATA_OFFSET) & 0xff);
actual_i = (data >> DP_AUX_DATA_INDEX_OFFSET) & 0xFF;
@@ -163,7 +163,7 @@ static ssize_t dp_aux_cmd_fifo_rx(struct dp_aux_private *aux,
return i;
}
-static void dp_aux_update_offset_and_segment(struct dp_aux_private *aux,
+static void msm_dp_aux_update_offset_and_segment(struct msm_dp_aux_private *aux,
struct drm_dp_aux_msg *input_msg)
{
u32 edid_address = 0x50;
@@ -185,7 +185,7 @@ static void dp_aux_update_offset_and_segment(struct dp_aux_private *aux,
}
/**
- * dp_aux_transfer_helper() - helper function for EDID read transactions
+ * msm_dp_aux_transfer_helper() - helper function for EDID read transactions
*
* @aux: DP AUX private structure
* @input_msg: input message from DRM upstream APIs
@@ -196,7 +196,7 @@ static void dp_aux_update_offset_and_segment(struct dp_aux_private *aux,
* This helper function is used to fix EDID reads for non-compliant
* sinks that do not handle the i2c middle-of-transaction flag correctly.
*/
-static void dp_aux_transfer_helper(struct dp_aux_private *aux,
+static void msm_dp_aux_transfer_helper(struct msm_dp_aux_private *aux,
struct drm_dp_aux_msg *input_msg,
bool send_seg)
{
@@ -238,7 +238,7 @@ static void dp_aux_transfer_helper(struct dp_aux_private *aux,
helper_msg.address = segment_address;
helper_msg.buffer = &aux->segment;
helper_msg.size = 1;
- dp_aux_cmd_fifo_tx(aux, &helper_msg);
+ msm_dp_aux_cmd_fifo_tx(aux, &helper_msg);
}
/*
@@ -252,7 +252,7 @@ static void dp_aux_transfer_helper(struct dp_aux_private *aux,
helper_msg.address = input_msg->address;
helper_msg.buffer = &aux->offset;
helper_msg.size = 1;
- dp_aux_cmd_fifo_tx(aux, &helper_msg);
+ msm_dp_aux_cmd_fifo_tx(aux, &helper_msg);
end:
aux->offset += message_size;
@@ -265,15 +265,15 @@ end:
* It will call aux_reset() function to reset the AUX channel,
* if the waiting is timeout.
*/
-static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
+static ssize_t msm_dp_aux_transfer(struct drm_dp_aux *msm_dp_aux,
struct drm_dp_aux_msg *msg)
{
ssize_t ret;
int const aux_cmd_native_max = 16;
int const aux_cmd_i2c_max = 128;
- struct dp_aux_private *aux;
+ struct msm_dp_aux_private *aux;
- aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+ aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
aux->native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ);
@@ -292,7 +292,7 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
return -EINVAL;
}
- ret = pm_runtime_resume_and_get(dp_aux->dev);
+ ret = pm_runtime_resume_and_get(msm_dp_aux->dev);
if (ret)
return ret;
@@ -313,8 +313,8 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
goto exit;
}
- dp_aux_update_offset_and_segment(aux, msg);
- dp_aux_transfer_helper(aux, msg, true);
+ msm_dp_aux_update_offset_and_segment(aux, msg);
+ msm_dp_aux_transfer_helper(aux, msg, true);
aux->read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
aux->cmd_busy = true;
@@ -327,7 +327,7 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
aux->no_send_stop = true;
}
- ret = dp_aux_cmd_fifo_tx(aux, msg);
+ ret = msm_dp_aux_cmd_fifo_tx(aux, msg);
if (ret < 0) {
if (aux->native) {
aux->retry_cnt++;
@@ -335,14 +335,14 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
phy_calibrate(aux->phy);
}
/* reset aux if link is in connected state */
- if (dp_catalog_link_is_connected(aux->catalog))
- dp_catalog_aux_reset(aux->catalog);
+ if (msm_dp_catalog_link_is_connected(aux->catalog))
+ msm_dp_catalog_aux_reset(aux->catalog);
} else {
aux->retry_cnt = 0;
switch (aux->aux_error_num) {
case DP_AUX_ERR_NONE:
if (aux->read)
- ret = dp_aux_cmd_fifo_rx(aux, msg);
+ ret = msm_dp_aux_cmd_fifo_rx(aux, msg);
msg->reply = aux->native ? DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
break;
case DP_AUX_ERR_DEFER:
@@ -364,24 +364,24 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
exit:
mutex_unlock(&aux->mutex);
- pm_runtime_put_sync(dp_aux->dev);
+ pm_runtime_put_sync(msm_dp_aux->dev);
return ret;
}
-irqreturn_t dp_aux_isr(struct drm_dp_aux *dp_aux)
+irqreturn_t msm_dp_aux_isr(struct drm_dp_aux *msm_dp_aux)
{
u32 isr;
- struct dp_aux_private *aux;
+ struct msm_dp_aux_private *aux;
- if (!dp_aux) {
+ if (!msm_dp_aux) {
DRM_ERROR("invalid input\n");
return IRQ_NONE;
}
- aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+ aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
- isr = dp_catalog_aux_get_irq(aux->catalog);
+ isr = msm_dp_catalog_aux_get_irq(aux->catalog);
/* no interrupts pending, return immediately */
if (!isr)
@@ -403,7 +403,7 @@ irqreturn_t dp_aux_isr(struct drm_dp_aux *dp_aux)
if (isr & DP_INTR_AUX_ERROR) {
aux->aux_error_num = DP_AUX_ERR_PHY;
- dp_catalog_aux_clear_hw_interrupts(aux->catalog);
+ msm_dp_catalog_aux_clear_hw_interrupts(aux->catalog);
} else if (isr & DP_INTR_NACK_DEFER) {
aux->aux_error_num = DP_AUX_ERR_NACK_DEFER;
} else if (isr & DP_INTR_WRONG_ADDR) {
@@ -429,68 +429,68 @@ irqreturn_t dp_aux_isr(struct drm_dp_aux *dp_aux)
return IRQ_HANDLED;
}
-void dp_aux_enable_xfers(struct drm_dp_aux *dp_aux, bool enabled)
+void msm_dp_aux_enable_xfers(struct drm_dp_aux *msm_dp_aux, bool enabled)
{
- struct dp_aux_private *aux;
+ struct msm_dp_aux_private *aux;
- aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+ aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
aux->enable_xfers = enabled;
}
-void dp_aux_reconfig(struct drm_dp_aux *dp_aux)
+void msm_dp_aux_reconfig(struct drm_dp_aux *msm_dp_aux)
{
- struct dp_aux_private *aux;
+ struct msm_dp_aux_private *aux;
- aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+ aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
phy_calibrate(aux->phy);
- dp_catalog_aux_reset(aux->catalog);
+ msm_dp_catalog_aux_reset(aux->catalog);
}
-void dp_aux_init(struct drm_dp_aux *dp_aux)
+void msm_dp_aux_init(struct drm_dp_aux *msm_dp_aux)
{
- struct dp_aux_private *aux;
+ struct msm_dp_aux_private *aux;
- if (!dp_aux) {
+ if (!msm_dp_aux) {
DRM_ERROR("invalid input\n");
return;
}
- aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+ aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
mutex_lock(&aux->mutex);
- dp_catalog_aux_enable(aux->catalog, true);
+ msm_dp_catalog_aux_enable(aux->catalog, true);
aux->retry_cnt = 0;
aux->initted = true;
mutex_unlock(&aux->mutex);
}
-void dp_aux_deinit(struct drm_dp_aux *dp_aux)
+void msm_dp_aux_deinit(struct drm_dp_aux *msm_dp_aux)
{
- struct dp_aux_private *aux;
+ struct msm_dp_aux_private *aux;
- aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+ aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
mutex_lock(&aux->mutex);
aux->initted = false;
- dp_catalog_aux_enable(aux->catalog, false);
+ msm_dp_catalog_aux_enable(aux->catalog, false);
mutex_unlock(&aux->mutex);
}
-int dp_aux_register(struct drm_dp_aux *dp_aux)
+int msm_dp_aux_register(struct drm_dp_aux *msm_dp_aux)
{
int ret;
- if (!dp_aux) {
+ if (!msm_dp_aux) {
DRM_ERROR("invalid input\n");
return -EINVAL;
}
- ret = drm_dp_aux_register(dp_aux);
+ ret = drm_dp_aux_register(msm_dp_aux);
if (ret) {
DRM_ERROR("%s: failed to register drm aux: %d\n", __func__,
ret);
@@ -500,34 +500,34 @@ int dp_aux_register(struct drm_dp_aux *dp_aux)
return 0;
}
-void dp_aux_unregister(struct drm_dp_aux *dp_aux)
+void msm_dp_aux_unregister(struct drm_dp_aux *msm_dp_aux)
{
- drm_dp_aux_unregister(dp_aux);
+ drm_dp_aux_unregister(msm_dp_aux);
}
-static int dp_wait_hpd_asserted(struct drm_dp_aux *dp_aux,
+static int msm_dp_wait_hpd_asserted(struct drm_dp_aux *msm_dp_aux,
unsigned long wait_us)
{
int ret;
- struct dp_aux_private *aux;
+ struct msm_dp_aux_private *aux;
- aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+ aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
ret = pm_runtime_resume_and_get(aux->dev);
if (ret)
return ret;
- ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog, wait_us);
+ ret = msm_dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog, wait_us);
pm_runtime_put_sync(aux->dev);
return ret;
}
-struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog,
+struct drm_dp_aux *msm_dp_aux_get(struct device *dev, struct msm_dp_catalog *catalog,
struct phy *phy,
bool is_edp)
{
- struct dp_aux_private *aux;
+ struct msm_dp_aux_private *aux;
if (!catalog) {
DRM_ERROR("invalid input\n");
@@ -553,23 +553,23 @@ struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog,
* before registering AUX with the DRM device so that
* msm eDP panel can be detected by generic_dep_panel_probe().
*/
- aux->dp_aux.name = "dpu_dp_aux";
- aux->dp_aux.dev = dev;
- aux->dp_aux.transfer = dp_aux_transfer;
- aux->dp_aux.wait_hpd_asserted = dp_wait_hpd_asserted;
- drm_dp_aux_init(&aux->dp_aux);
+ aux->msm_dp_aux.name = "dpu_dp_aux";
+ aux->msm_dp_aux.dev = dev;
+ aux->msm_dp_aux.transfer = msm_dp_aux_transfer;
+ aux->msm_dp_aux.wait_hpd_asserted = msm_dp_wait_hpd_asserted;
+ drm_dp_aux_init(&aux->msm_dp_aux);
- return &aux->dp_aux;
+ return &aux->msm_dp_aux;
}
-void dp_aux_put(struct drm_dp_aux *dp_aux)
+void msm_dp_aux_put(struct drm_dp_aux *msm_dp_aux)
{
- struct dp_aux_private *aux;
+ struct msm_dp_aux_private *aux;
- if (!dp_aux)
+ if (!msm_dp_aux)
return;
- aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+ aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
mutex_destroy(&aux->mutex);
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h
index 4f65e892a807..39c5b4c8596a 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.h
+++ b/drivers/gpu/drm/msm/dp/dp_aux.h
@@ -9,18 +9,18 @@
#include "dp_catalog.h"
#include <drm/display/drm_dp_helper.h>
-int dp_aux_register(struct drm_dp_aux *dp_aux);
-void dp_aux_unregister(struct drm_dp_aux *dp_aux);
-irqreturn_t dp_aux_isr(struct drm_dp_aux *dp_aux);
-void dp_aux_enable_xfers(struct drm_dp_aux *dp_aux, bool enabled);
-void dp_aux_init(struct drm_dp_aux *dp_aux);
-void dp_aux_deinit(struct drm_dp_aux *dp_aux);
-void dp_aux_reconfig(struct drm_dp_aux *dp_aux);
+int msm_dp_aux_register(struct drm_dp_aux *msm_dp_aux);
+void msm_dp_aux_unregister(struct drm_dp_aux *msm_dp_aux);
+irqreturn_t msm_dp_aux_isr(struct drm_dp_aux *msm_dp_aux);
+void msm_dp_aux_enable_xfers(struct drm_dp_aux *msm_dp_aux, bool enabled);
+void msm_dp_aux_init(struct drm_dp_aux *msm_dp_aux);
+void msm_dp_aux_deinit(struct drm_dp_aux *msm_dp_aux);
+void msm_dp_aux_reconfig(struct drm_dp_aux *msm_dp_aux);
struct phy;
-struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog,
+struct drm_dp_aux *msm_dp_aux_get(struct device *dev, struct msm_dp_catalog *catalog,
struct phy *phy,
bool is_edp);
-void dp_aux_put(struct drm_dp_aux *aux);
+void msm_dp_aux_put(struct drm_dp_aux *aux);
#endif /*__DP_AUX_H_*/
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index 6e55cbf69674..b4c8856fb25d 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -75,18 +75,18 @@ struct dss_io_data {
struct dss_io_region p0;
};
-struct dp_catalog_private {
+struct msm_dp_catalog_private {
struct device *dev;
struct drm_device *drm_dev;
struct dss_io_data io;
u32 (*audio_map)[DP_AUDIO_SDP_HEADER_MAX];
- struct dp_catalog dp_catalog;
+ struct msm_dp_catalog msm_dp_catalog;
};
-void dp_catalog_snapshot(struct dp_catalog *dp_catalog, struct msm_disp_state *disp_state)
+void msm_dp_catalog_snapshot(struct msm_dp_catalog *msm_dp_catalog, struct msm_disp_state *disp_state)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
struct dss_io_data *dss = &catalog->io;
msm_disp_snapshot_add_block(disp_state, dss->ahb.len, dss->ahb.base, "dp_ahb");
@@ -95,12 +95,12 @@ void dp_catalog_snapshot(struct dp_catalog *dp_catalog, struct msm_disp_state *d
msm_disp_snapshot_add_block(disp_state, dss->p0.len, dss->p0.base, "dp_p0");
}
-static inline u32 dp_read_aux(struct dp_catalog_private *catalog, u32 offset)
+static inline u32 msm_dp_read_aux(struct msm_dp_catalog_private *catalog, u32 offset)
{
return readl_relaxed(catalog->io.aux.base + offset);
}
-static inline void dp_write_aux(struct dp_catalog_private *catalog,
+static inline void msm_dp_write_aux(struct msm_dp_catalog_private *catalog,
u32 offset, u32 data)
{
/*
@@ -110,12 +110,12 @@ static inline void dp_write_aux(struct dp_catalog_private *catalog,
writel(data, catalog->io.aux.base + offset);
}
-static inline u32 dp_read_ahb(const struct dp_catalog_private *catalog, u32 offset)
+static inline u32 msm_dp_read_ahb(const struct msm_dp_catalog_private *catalog, u32 offset)
{
return readl_relaxed(catalog->io.ahb.base + offset);
}
-static inline void dp_write_ahb(struct dp_catalog_private *catalog,
+static inline void msm_dp_write_ahb(struct msm_dp_catalog_private *catalog,
u32 offset, u32 data)
{
/*
@@ -125,7 +125,7 @@ static inline void dp_write_ahb(struct dp_catalog_private *catalog,
writel(data, catalog->io.ahb.base + offset);
}
-static inline void dp_write_p0(struct dp_catalog_private *catalog,
+static inline void msm_dp_write_p0(struct msm_dp_catalog_private *catalog,
u32 offset, u32 data)
{
/*
@@ -135,7 +135,7 @@ static inline void dp_write_p0(struct dp_catalog_private *catalog,
writel(data, catalog->io.p0.base + offset);
}
-static inline u32 dp_read_p0(struct dp_catalog_private *catalog,
+static inline u32 msm_dp_read_p0(struct msm_dp_catalog_private *catalog,
u32 offset)
{
/*
@@ -145,12 +145,12 @@ static inline u32 dp_read_p0(struct dp_catalog_private *catalog,
return readl_relaxed(catalog->io.p0.base + offset);
}
-static inline u32 dp_read_link(struct dp_catalog_private *catalog, u32 offset)
+static inline u32 msm_dp_read_link(struct msm_dp_catalog_private *catalog, u32 offset)
{
return readl_relaxed(catalog->io.link.base + offset);
}
-static inline void dp_write_link(struct dp_catalog_private *catalog,
+static inline void msm_dp_write_link(struct msm_dp_catalog_private *catalog,
u32 offset, u32 data)
{
/*
@@ -161,64 +161,64 @@ static inline void dp_write_link(struct dp_catalog_private *catalog,
}
/* aux related catalog functions */
-u32 dp_catalog_aux_read_data(struct dp_catalog *dp_catalog)
+u32 msm_dp_catalog_aux_read_data(struct msm_dp_catalog *msm_dp_catalog)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- return dp_read_aux(catalog, REG_DP_AUX_DATA);
+ return msm_dp_read_aux(catalog, REG_DP_AUX_DATA);
}
-int dp_catalog_aux_write_data(struct dp_catalog *dp_catalog, u32 data)
+int msm_dp_catalog_aux_write_data(struct msm_dp_catalog *msm_dp_catalog, u32 data)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- dp_write_aux(catalog, REG_DP_AUX_DATA, data);
+ msm_dp_write_aux(catalog, REG_DP_AUX_DATA, data);
return 0;
}
-int dp_catalog_aux_write_trans(struct dp_catalog *dp_catalog, u32 data)
+int msm_dp_catalog_aux_write_trans(struct msm_dp_catalog *msm_dp_catalog, u32 data)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, data);
+ msm_dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, data);
return 0;
}
-int dp_catalog_aux_clear_trans(struct dp_catalog *dp_catalog, bool read)
+int msm_dp_catalog_aux_clear_trans(struct msm_dp_catalog *msm_dp_catalog, bool read)
{
u32 data;
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
if (read) {
- data = dp_read_aux(catalog, REG_DP_AUX_TRANS_CTRL);
+ data = msm_dp_read_aux(catalog, REG_DP_AUX_TRANS_CTRL);
data &= ~DP_AUX_TRANS_CTRL_GO;
- dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, data);
+ msm_dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, data);
} else {
- dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, 0);
+ msm_dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, 0);
}
return 0;
}
-int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog)
+int msm_dp_catalog_aux_clear_hw_interrupts(struct msm_dp_catalog *msm_dp_catalog)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- dp_read_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_STATUS);
- dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x1f);
- dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x9f);
- dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0);
+ msm_dp_read_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_STATUS);
+ msm_dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x1f);
+ msm_dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x9f);
+ msm_dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0);
return 0;
}
/**
- * dp_catalog_aux_reset() - reset AUX controller
+ * msm_dp_catalog_aux_reset() - reset AUX controller
*
- * @dp_catalog: DP catalog structure
+ * @msm_dp_catalog: DP catalog structure
*
* return: void
*
@@ -227,47 +227,47 @@ int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog)
* NOTE: reset AUX controller will also clear any pending HPD related interrupts
*
*/
-void dp_catalog_aux_reset(struct dp_catalog *dp_catalog)
+void msm_dp_catalog_aux_reset(struct msm_dp_catalog *msm_dp_catalog)
{
u32 aux_ctrl;
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- aux_ctrl = dp_read_aux(catalog, REG_DP_AUX_CTRL);
+ aux_ctrl = msm_dp_read_aux(catalog, REG_DP_AUX_CTRL);
aux_ctrl |= DP_AUX_CTRL_RESET;
- dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
+ msm_dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
usleep_range(1000, 1100); /* h/w recommended delay */
aux_ctrl &= ~DP_AUX_CTRL_RESET;
- dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
+ msm_dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
}
-void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable)
+void msm_dp_catalog_aux_enable(struct msm_dp_catalog *msm_dp_catalog, bool enable)
{
u32 aux_ctrl;
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- aux_ctrl = dp_read_aux(catalog, REG_DP_AUX_CTRL);
+ aux_ctrl = msm_dp_read_aux(catalog, REG_DP_AUX_CTRL);
if (enable) {
- dp_write_aux(catalog, REG_DP_TIMEOUT_COUNT, 0xffff);
- dp_write_aux(catalog, REG_DP_AUX_LIMITS, 0xffff);
+ msm_dp_write_aux(catalog, REG_DP_TIMEOUT_COUNT, 0xffff);
+ msm_dp_write_aux(catalog, REG_DP_AUX_LIMITS, 0xffff);
aux_ctrl |= DP_AUX_CTRL_ENABLE;
} else {
aux_ctrl &= ~DP_AUX_CTRL_ENABLE;
}
- dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
+ msm_dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
}
-int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog,
+int msm_dp_catalog_aux_wait_for_hpd_connect_state(struct msm_dp_catalog *msm_dp_catalog,
unsigned long wait_us)
{
u32 state;
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
/* poll for hpd connected status every 2ms and timeout after wait_us */
return readl_poll_timeout(catalog->io.aux.base +
@@ -294,10 +294,10 @@ static void dump_regs(void __iomem *base, int len)
}
}
-void dp_catalog_dump_regs(struct dp_catalog *dp_catalog)
+void msm_dp_catalog_dump_regs(struct msm_dp_catalog *msm_dp_catalog)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
struct dss_io_data *io = &catalog->io;
pr_info("AHB regs\n");
@@ -313,17 +313,17 @@ void dp_catalog_dump_regs(struct dp_catalog *dp_catalog)
dump_regs(io->p0.base, io->p0.len);
}
-u32 dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog)
+u32 msm_dp_catalog_aux_get_irq(struct msm_dp_catalog *msm_dp_catalog)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
u32 intr, intr_ack;
- intr = dp_read_ahb(catalog, REG_DP_INTR_STATUS);
+ intr = msm_dp_read_ahb(catalog, REG_DP_INTR_STATUS);
intr &= ~DP_INTERRUPT_STATUS1_MASK;
intr_ack = (intr & DP_INTERRUPT_STATUS1)
<< DP_INTERRUPT_STATUS_ACK_SHIFT;
- dp_write_ahb(catalog, REG_DP_INTR_STATUS, intr_ack |
+ msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS, intr_ack |
DP_INTERRUPT_STATUS1_MASK);
return intr;
@@ -331,40 +331,40 @@ u32 dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog)
}
/* controller related catalog functions */
-void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog *dp_catalog,
- u32 dp_tu, u32 valid_boundary,
+void msm_dp_catalog_ctrl_update_transfer_unit(struct msm_dp_catalog *msm_dp_catalog,
+ u32 msm_dp_tu, u32 valid_boundary,
u32 valid_boundary2)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- dp_write_link(catalog, REG_DP_VALID_BOUNDARY, valid_boundary);
- dp_write_link(catalog, REG_DP_TU, dp_tu);
- dp_write_link(catalog, REG_DP_VALID_BOUNDARY_2, valid_boundary2);
+ msm_dp_write_link(catalog, REG_DP_VALID_BOUNDARY, valid_boundary);
+ msm_dp_write_link(catalog, REG_DP_TU, msm_dp_tu);
+ msm_dp_write_link(catalog, REG_DP_VALID_BOUNDARY_2, valid_boundary2);
}
-void dp_catalog_ctrl_state_ctrl(struct dp_catalog *dp_catalog, u32 state)
+void msm_dp_catalog_ctrl_state_ctrl(struct msm_dp_catalog *msm_dp_catalog, u32 state)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- dp_write_link(catalog, REG_DP_STATE_CTRL, state);
+ msm_dp_write_link(catalog, REG_DP_STATE_CTRL, state);
}
-void dp_catalog_ctrl_config_ctrl(struct dp_catalog *dp_catalog, u32 cfg)
+void msm_dp_catalog_ctrl_config_ctrl(struct msm_dp_catalog *msm_dp_catalog, u32 cfg)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
drm_dbg_dp(catalog->drm_dev, "DP_CONFIGURATION_CTRL=0x%x\n", cfg);
- dp_write_link(catalog, REG_DP_CONFIGURATION_CTRL, cfg);
+ msm_dp_write_link(catalog, REG_DP_CONFIGURATION_CTRL, cfg);
}
-void dp_catalog_ctrl_lane_mapping(struct dp_catalog *dp_catalog)
+void msm_dp_catalog_ctrl_lane_mapping(struct msm_dp_catalog *msm_dp_catalog)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
u32 ln_0 = 0, ln_1 = 1, ln_2 = 2, ln_3 = 3; /* One-to-One mapping */
u32 ln_mapping;
@@ -373,71 +373,71 @@ void dp_catalog_ctrl_lane_mapping(struct dp_catalog *dp_catalog)
ln_mapping |= ln_2 << LANE2_MAPPING_SHIFT;
ln_mapping |= ln_3 << LANE3_MAPPING_SHIFT;
- dp_write_link(catalog, REG_DP_LOGICAL2PHYSICAL_LANE_MAPPING,
+ msm_dp_write_link(catalog, REG_DP_LOGICAL2PHYSICAL_LANE_MAPPING,
ln_mapping);
}
-void dp_catalog_ctrl_psr_mainlink_enable(struct dp_catalog *dp_catalog,
+void msm_dp_catalog_ctrl_psr_mainlink_enable(struct msm_dp_catalog *msm_dp_catalog,
bool enable)
{
u32 val;
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- val = dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
+ val = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
if (enable)
val |= DP_MAINLINK_CTRL_ENABLE;
else
val &= ~DP_MAINLINK_CTRL_ENABLE;
- dp_write_link(catalog, REG_DP_MAINLINK_CTRL, val);
+ msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, val);
}
-void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog,
+void msm_dp_catalog_ctrl_mainlink_ctrl(struct msm_dp_catalog *msm_dp_catalog,
bool enable)
{
u32 mainlink_ctrl;
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
drm_dbg_dp(catalog->drm_dev, "enable=%d\n", enable);
if (enable) {
/*
* To make sure link reg writes happens before other operation,
- * dp_write_link() function uses writel()
+ * msm_dp_write_link() function uses writel()
*/
- mainlink_ctrl = dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
+ mainlink_ctrl = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
mainlink_ctrl &= ~(DP_MAINLINK_CTRL_RESET |
DP_MAINLINK_CTRL_ENABLE);
- dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+ msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
mainlink_ctrl |= DP_MAINLINK_CTRL_RESET;
- dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+ msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
mainlink_ctrl &= ~DP_MAINLINK_CTRL_RESET;
- dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+ msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
mainlink_ctrl |= (DP_MAINLINK_CTRL_ENABLE |
DP_MAINLINK_FB_BOUNDARY_SEL);
- dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+ msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
} else {
- mainlink_ctrl = dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
+ mainlink_ctrl = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
mainlink_ctrl &= ~DP_MAINLINK_CTRL_ENABLE;
- dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+ msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
}
}
-void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog,
+void msm_dp_catalog_ctrl_config_misc(struct msm_dp_catalog *msm_dp_catalog,
u32 colorimetry_cfg,
u32 test_bits_depth)
{
u32 misc_val;
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- misc_val = dp_read_link(catalog, REG_DP_MISC1_MISC0);
+ misc_val = msm_dp_read_link(catalog, REG_DP_MISC1_MISC0);
/* clear bpp bits */
misc_val &= ~(0x07 << DP_MISC0_TEST_BITS_DEPTH_SHIFT);
@@ -447,27 +447,27 @@ void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog,
misc_val |= DP_MISC0_SYNCHRONOUS_CLK;
drm_dbg_dp(catalog->drm_dev, "misc settings = 0x%x\n", misc_val);
- dp_write_link(catalog, REG_DP_MISC1_MISC0, misc_val);
+ msm_dp_write_link(catalog, REG_DP_MISC1_MISC0, misc_val);
}
-void dp_catalog_setup_peripheral_flush(struct dp_catalog *dp_catalog)
+void msm_dp_catalog_setup_peripheral_flush(struct msm_dp_catalog *msm_dp_catalog)
{
u32 mainlink_ctrl, hw_revision;
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- mainlink_ctrl = dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
+ mainlink_ctrl = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
- hw_revision = dp_catalog_hw_revision(dp_catalog);
+ hw_revision = msm_dp_catalog_hw_revision(msm_dp_catalog);
if (hw_revision >= DP_HW_VERSION_1_2)
mainlink_ctrl |= DP_MAINLINK_FLUSH_MODE_SDE_PERIPH_UPDATE;
else
mainlink_ctrl |= DP_MAINLINK_FLUSH_MODE_UPDATE_SDP;
- dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+ msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
}
-void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog,
+void msm_dp_catalog_ctrl_config_msa(struct msm_dp_catalog *msm_dp_catalog,
u32 rate, u32 stream_rate_khz,
bool is_ycbcr_420)
{
@@ -478,8 +478,8 @@ void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog,
u32 const link_rate_hbr3 = 810000;
unsigned long den, num;
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
if (rate == link_rate_hbr3)
pixel_div = 6;
@@ -522,22 +522,22 @@ void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog,
nvid *= 3;
drm_dbg_dp(catalog->drm_dev, "mvid=0x%x, nvid=0x%x\n", mvid, nvid);
- dp_write_link(catalog, REG_DP_SOFTWARE_MVID, mvid);
- dp_write_link(catalog, REG_DP_SOFTWARE_NVID, nvid);
- dp_write_p0(catalog, MMSS_DP_DSC_DTO, 0x0);
+ msm_dp_write_link(catalog, REG_DP_SOFTWARE_MVID, mvid);
+ msm_dp_write_link(catalog, REG_DP_SOFTWARE_NVID, nvid);
+ msm_dp_write_p0(catalog, MMSS_DP_DSC_DTO, 0x0);
}
-int dp_catalog_ctrl_set_pattern_state_bit(struct dp_catalog *dp_catalog,
+int msm_dp_catalog_ctrl_set_pattern_state_bit(struct msm_dp_catalog *msm_dp_catalog,
u32 state_bit)
{
int bit, ret;
u32 data;
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
bit = BIT(state_bit - 1);
drm_dbg_dp(catalog->drm_dev, "hw: bit=%d train=%d\n", bit, state_bit);
- dp_catalog_ctrl_state_ctrl(dp_catalog, bit);
+ msm_dp_catalog_ctrl_state_ctrl(msm_dp_catalog, bit);
bit = BIT(state_bit - 1) << DP_MAINLINK_READY_LINK_TRAINING_SHIFT;
@@ -554,25 +554,25 @@ int dp_catalog_ctrl_set_pattern_state_bit(struct dp_catalog *dp_catalog,
}
/**
- * dp_catalog_hw_revision() - retrieve DP hw revision
+ * msm_dp_catalog_hw_revision() - retrieve DP hw revision
*
- * @dp_catalog: DP catalog structure
+ * @msm_dp_catalog: DP catalog structure
*
* Return: DP controller hw revision
*
*/
-u32 dp_catalog_hw_revision(const struct dp_catalog *dp_catalog)
+u32 msm_dp_catalog_hw_revision(const struct msm_dp_catalog *msm_dp_catalog)
{
- const struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ const struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- return dp_read_ahb(catalog, REG_DP_HW_VERSION);
+ return msm_dp_read_ahb(catalog, REG_DP_HW_VERSION);
}
/**
- * dp_catalog_ctrl_reset() - reset DP controller
+ * msm_dp_catalog_ctrl_reset() - reset DP controller
*
- * @dp_catalog: DP catalog structure
+ * @msm_dp_catalog: DP catalog structure
*
* return: void
*
@@ -581,28 +581,28 @@ u32 dp_catalog_hw_revision(const struct dp_catalog *dp_catalog)
* NOTE: reset DP controller will also clear any pending HPD related interrupts
*
*/
-void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog)
+void msm_dp_catalog_ctrl_reset(struct msm_dp_catalog *msm_dp_catalog)
{
u32 sw_reset;
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- sw_reset = dp_read_ahb(catalog, REG_DP_SW_RESET);
+ sw_reset = msm_dp_read_ahb(catalog, REG_DP_SW_RESET);
sw_reset |= DP_SW_RESET;
- dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset);
+ msm_dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset);
usleep_range(1000, 1100); /* h/w recommended delay */
sw_reset &= ~DP_SW_RESET;
- dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset);
+ msm_dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset);
}
-bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog)
+bool msm_dp_catalog_ctrl_mainlink_ready(struct msm_dp_catalog *msm_dp_catalog)
{
u32 data;
int ret;
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
/* Poll for mainlink ready status */
ret = readl_poll_timeout(catalog->io.link.base +
@@ -617,96 +617,96 @@ bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog)
return true;
}
-void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog,
+void msm_dp_catalog_ctrl_enable_irq(struct msm_dp_catalog *msm_dp_catalog,
bool enable)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
if (enable) {
- dp_write_ahb(catalog, REG_DP_INTR_STATUS,
+ msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS,
DP_INTERRUPT_STATUS1_MASK);
- dp_write_ahb(catalog, REG_DP_INTR_STATUS2,
+ msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS2,
DP_INTERRUPT_STATUS2_MASK);
} else {
- dp_write_ahb(catalog, REG_DP_INTR_STATUS, 0x00);
- dp_write_ahb(catalog, REG_DP_INTR_STATUS2, 0x00);
+ msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS, 0x00);
+ msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS2, 0x00);
}
}
-void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog,
+void msm_dp_catalog_hpd_config_intr(struct msm_dp_catalog *msm_dp_catalog,
u32 intr_mask, bool en)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- u32 config = dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK);
+ u32 config = msm_dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK);
config = (en ? config | intr_mask : config & ~intr_mask);
drm_dbg_dp(catalog->drm_dev, "intr_mask=%#x config=%#x\n",
intr_mask, config);
- dp_write_aux(catalog, REG_DP_DP_HPD_INT_MASK,
+ msm_dp_write_aux(catalog, REG_DP_DP_HPD_INT_MASK,
config & DP_DP_HPD_INT_MASK);
}
-void dp_catalog_ctrl_hpd_enable(struct dp_catalog *dp_catalog)
+void msm_dp_catalog_ctrl_hpd_enable(struct msm_dp_catalog *msm_dp_catalog)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- u32 reftimer = dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER);
+ u32 reftimer = msm_dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER);
/* Configure REFTIMER and enable it */
reftimer |= DP_DP_HPD_REFTIMER_ENABLE;
- dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer);
+ msm_dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer);
/* Enable HPD */
- dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, DP_DP_HPD_CTRL_HPD_EN);
+ msm_dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, DP_DP_HPD_CTRL_HPD_EN);
}
-void dp_catalog_ctrl_hpd_disable(struct dp_catalog *dp_catalog)
+void msm_dp_catalog_ctrl_hpd_disable(struct msm_dp_catalog *msm_dp_catalog)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- u32 reftimer = dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER);
+ u32 reftimer = msm_dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER);
reftimer &= ~DP_DP_HPD_REFTIMER_ENABLE;
- dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer);
+ msm_dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer);
- dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, 0);
+ msm_dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, 0);
}
-static void dp_catalog_enable_sdp(struct dp_catalog_private *catalog)
+static void msm_dp_catalog_enable_sdp(struct msm_dp_catalog_private *catalog)
{
/* trigger sdp */
- dp_write_link(catalog, MMSS_DP_SDP_CFG3, UPDATE_SDP);
- dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x0);
+ msm_dp_write_link(catalog, MMSS_DP_SDP_CFG3, UPDATE_SDP);
+ msm_dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x0);
}
-void dp_catalog_ctrl_config_psr(struct dp_catalog *dp_catalog)
+void msm_dp_catalog_ctrl_config_psr(struct msm_dp_catalog *msm_dp_catalog)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
u32 config;
/* enable PSR1 function */
- config = dp_read_link(catalog, REG_PSR_CONFIG);
+ config = msm_dp_read_link(catalog, REG_PSR_CONFIG);
config |= PSR1_SUPPORTED;
- dp_write_link(catalog, REG_PSR_CONFIG, config);
+ msm_dp_write_link(catalog, REG_PSR_CONFIG, config);
- dp_write_ahb(catalog, REG_DP_INTR_MASK4, DP_INTERRUPT_MASK4);
- dp_catalog_enable_sdp(catalog);
+ msm_dp_write_ahb(catalog, REG_DP_INTR_MASK4, DP_INTERRUPT_MASK4);
+ msm_dp_catalog_enable_sdp(catalog);
}
-void dp_catalog_ctrl_set_psr(struct dp_catalog *dp_catalog, bool enter)
+void msm_dp_catalog_ctrl_set_psr(struct msm_dp_catalog *msm_dp_catalog, bool enter)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
u32 cmd;
- cmd = dp_read_link(catalog, REG_PSR_CMD);
+ cmd = msm_dp_read_link(catalog, REG_PSR_CMD);
cmd &= ~(PSR_ENTER | PSR_EXIT);
@@ -715,17 +715,17 @@ void dp_catalog_ctrl_set_psr(struct dp_catalog *dp_catalog, bool enter)
else
cmd |= PSR_EXIT;
- dp_catalog_enable_sdp(catalog);
- dp_write_link(catalog, REG_PSR_CMD, cmd);
+ msm_dp_catalog_enable_sdp(catalog);
+ msm_dp_write_link(catalog, REG_PSR_CMD, cmd);
}
-u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog)
+u32 msm_dp_catalog_link_is_connected(struct msm_dp_catalog *msm_dp_catalog)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
u32 status;
- status = dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS);
+ status = msm_dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS);
drm_dbg_dp(catalog->drm_dev, "aux status: %#x\n", status);
status >>= DP_DP_HPD_STATE_STATUS_BITS_SHIFT;
status &= DP_DP_HPD_STATE_STATUS_BITS_MASK;
@@ -733,16 +733,16 @@ u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog)
return status;
}
-u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog)
+u32 msm_dp_catalog_hpd_get_intr_status(struct msm_dp_catalog *msm_dp_catalog)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
int isr, mask;
- isr = dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS);
- dp_write_aux(catalog, REG_DP_DP_HPD_INT_ACK,
+ isr = msm_dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS);
+ msm_dp_write_aux(catalog, REG_DP_DP_HPD_INT_ACK,
(isr & DP_DP_HPD_INT_MASK));
- mask = dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK);
+ mask = msm_dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK);
/*
* We only want to return interrupts that are unmasked to the caller.
@@ -754,115 +754,115 @@ u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog)
return isr & (mask | ~DP_DP_HPD_INT_MASK);
}
-u32 dp_catalog_ctrl_read_psr_interrupt_status(struct dp_catalog *dp_catalog)
+u32 msm_dp_catalog_ctrl_read_psr_interrupt_status(struct msm_dp_catalog *msm_dp_catalog)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
u32 intr, intr_ack;
- intr = dp_read_ahb(catalog, REG_DP_INTR_STATUS4);
+ intr = msm_dp_read_ahb(catalog, REG_DP_INTR_STATUS4);
intr_ack = (intr & DP_INTERRUPT_STATUS4)
<< DP_INTERRUPT_STATUS_ACK_SHIFT;
- dp_write_ahb(catalog, REG_DP_INTR_STATUS4, intr_ack);
+ msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS4, intr_ack);
return intr;
}
-int dp_catalog_ctrl_get_interrupt(struct dp_catalog *dp_catalog)
+int msm_dp_catalog_ctrl_get_interrupt(struct msm_dp_catalog *msm_dp_catalog)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
u32 intr, intr_ack;
- intr = dp_read_ahb(catalog, REG_DP_INTR_STATUS2);
+ intr = msm_dp_read_ahb(catalog, REG_DP_INTR_STATUS2);
intr &= ~DP_INTERRUPT_STATUS2_MASK;
intr_ack = (intr & DP_INTERRUPT_STATUS2)
<< DP_INTERRUPT_STATUS_ACK_SHIFT;
- dp_write_ahb(catalog, REG_DP_INTR_STATUS2,
+ msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS2,
intr_ack | DP_INTERRUPT_STATUS2_MASK);
return intr;
}
-void dp_catalog_ctrl_phy_reset(struct dp_catalog *dp_catalog)
+void msm_dp_catalog_ctrl_phy_reset(struct msm_dp_catalog *msm_dp_catalog)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- dp_write_ahb(catalog, REG_DP_PHY_CTRL,
+ msm_dp_write_ahb(catalog, REG_DP_PHY_CTRL,
DP_PHY_CTRL_SW_RESET | DP_PHY_CTRL_SW_RESET_PLL);
usleep_range(1000, 1100); /* h/w recommended delay */
- dp_write_ahb(catalog, REG_DP_PHY_CTRL, 0x0);
+ msm_dp_write_ahb(catalog, REG_DP_PHY_CTRL, 0x0);
}
-void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog,
+void msm_dp_catalog_ctrl_send_phy_pattern(struct msm_dp_catalog *msm_dp_catalog,
u32 pattern)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
u32 value = 0x0;
/* Make sure to clear the current pattern before starting a new one */
- dp_write_link(catalog, REG_DP_STATE_CTRL, 0x0);
+ msm_dp_write_link(catalog, REG_DP_STATE_CTRL, 0x0);
drm_dbg_dp(catalog->drm_dev, "pattern: %#x\n", pattern);
switch (pattern) {
case DP_PHY_TEST_PATTERN_D10_2:
- dp_write_link(catalog, REG_DP_STATE_CTRL,
+ msm_dp_write_link(catalog, REG_DP_STATE_CTRL,
DP_STATE_CTRL_LINK_TRAINING_PATTERN1);
break;
case DP_PHY_TEST_PATTERN_ERROR_COUNT:
value &= ~(1 << 16);
- dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+ msm_dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
value);
value |= SCRAMBLER_RESET_COUNT_VALUE;
- dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+ msm_dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
value);
- dp_write_link(catalog, REG_DP_MAINLINK_LEVELS,
+ msm_dp_write_link(catalog, REG_DP_MAINLINK_LEVELS,
DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2);
- dp_write_link(catalog, REG_DP_STATE_CTRL,
+ msm_dp_write_link(catalog, REG_DP_STATE_CTRL,
DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE);
break;
case DP_PHY_TEST_PATTERN_PRBS7:
- dp_write_link(catalog, REG_DP_STATE_CTRL,
+ msm_dp_write_link(catalog, REG_DP_STATE_CTRL,
DP_STATE_CTRL_LINK_PRBS7);
break;
case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
- dp_write_link(catalog, REG_DP_STATE_CTRL,
+ msm_dp_write_link(catalog, REG_DP_STATE_CTRL,
DP_STATE_CTRL_LINK_TEST_CUSTOM_PATTERN);
/* 00111110000011111000001111100000 */
- dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG0,
+ msm_dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG0,
0x3E0F83E0);
/* 00001111100000111110000011111000 */
- dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG1,
+ msm_dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG1,
0x0F83E0F8);
/* 1111100000111110 */
- dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG2,
+ msm_dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG2,
0x0000F83E);
break;
case DP_PHY_TEST_PATTERN_CP2520:
- value = dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
+ value = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
value &= ~DP_MAINLINK_CTRL_SW_BYPASS_SCRAMBLER;
- dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value);
+ msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value);
value = DP_HBR2_ERM_PATTERN;
- dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+ msm_dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
value);
value |= SCRAMBLER_RESET_COUNT_VALUE;
- dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+ msm_dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
value);
- dp_write_link(catalog, REG_DP_MAINLINK_LEVELS,
+ msm_dp_write_link(catalog, REG_DP_MAINLINK_LEVELS,
DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2);
- dp_write_link(catalog, REG_DP_STATE_CTRL,
+ msm_dp_write_link(catalog, REG_DP_STATE_CTRL,
DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE);
- value = dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
+ value = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
value |= DP_MAINLINK_CTRL_ENABLE;
- dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value);
+ msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value);
break;
case DP_PHY_TEST_PATTERN_SEL_MASK:
- dp_write_link(catalog, REG_DP_MAINLINK_CTRL,
+ msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL,
DP_MAINLINK_CTRL_ENABLE);
- dp_write_link(catalog, REG_DP_STATE_CTRL,
+ msm_dp_write_link(catalog, REG_DP_STATE_CTRL,
DP_STATE_CTRL_LINK_TRAINING_PATTERN4);
break;
default:
@@ -872,94 +872,94 @@ void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog,
}
}
-u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog *dp_catalog)
+u32 msm_dp_catalog_ctrl_read_phy_pattern(struct msm_dp_catalog *msm_dp_catalog)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- return dp_read_link(catalog, REG_DP_MAINLINK_READY);
+ return msm_dp_read_link(catalog, REG_DP_MAINLINK_READY);
}
/* panel related catalog functions */
-int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog, u32 total,
- u32 sync_start, u32 width_blanking, u32 dp_active)
+int msm_dp_catalog_panel_timing_cfg(struct msm_dp_catalog *msm_dp_catalog, u32 total,
+ u32 sync_start, u32 width_blanking, u32 msm_dp_active)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
u32 reg;
- dp_write_link(catalog, REG_DP_TOTAL_HOR_VER, total);
- dp_write_link(catalog, REG_DP_START_HOR_VER_FROM_SYNC, sync_start);
- dp_write_link(catalog, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY, width_blanking);
- dp_write_link(catalog, REG_DP_ACTIVE_HOR_VER, dp_active);
+ msm_dp_write_link(catalog, REG_DP_TOTAL_HOR_VER, total);
+ msm_dp_write_link(catalog, REG_DP_START_HOR_VER_FROM_SYNC, sync_start);
+ msm_dp_write_link(catalog, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY, width_blanking);
+ msm_dp_write_link(catalog, REG_DP_ACTIVE_HOR_VER, msm_dp_active);
- reg = dp_read_p0(catalog, MMSS_DP_INTF_CONFIG);
+ reg = msm_dp_read_p0(catalog, MMSS_DP_INTF_CONFIG);
- if (dp_catalog->wide_bus_en)
+ if (msm_dp_catalog->wide_bus_en)
reg |= DP_INTF_CONFIG_DATABUS_WIDEN;
else
reg &= ~DP_INTF_CONFIG_DATABUS_WIDEN;
- DRM_DEBUG_DP("wide_bus_en=%d reg=%#x\n", dp_catalog->wide_bus_en, reg);
+ DRM_DEBUG_DP("wide_bus_en=%d reg=%#x\n", msm_dp_catalog->wide_bus_en, reg);
- dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, reg);
+ msm_dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, reg);
return 0;
}
-static void dp_catalog_panel_send_vsc_sdp(struct dp_catalog *dp_catalog, struct dp_sdp *vsc_sdp)
+static void msm_dp_catalog_panel_send_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog, struct dp_sdp *vsc_sdp)
{
- struct dp_catalog_private *catalog;
+ struct msm_dp_catalog_private *catalog;
u32 header[2];
u32 val;
int i;
- catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog);
+ catalog = container_of(msm_dp_catalog, struct msm_dp_catalog_private, msm_dp_catalog);
- dp_utils_pack_sdp_header(&vsc_sdp->sdp_header, header);
+ msm_dp_utils_pack_sdp_header(&vsc_sdp->sdp_header, header);
- dp_write_link(catalog, MMSS_DP_GENERIC0_0, header[0]);
- dp_write_link(catalog, MMSS_DP_GENERIC0_1, header[1]);
+ msm_dp_write_link(catalog, MMSS_DP_GENERIC0_0, header[0]);
+ msm_dp_write_link(catalog, MMSS_DP_GENERIC0_1, header[1]);
for (i = 0; i < sizeof(vsc_sdp->db); i += 4) {
val = ((vsc_sdp->db[i]) | (vsc_sdp->db[i + 1] << 8) | (vsc_sdp->db[i + 2] << 16) |
(vsc_sdp->db[i + 3] << 24));
- dp_write_link(catalog, MMSS_DP_GENERIC0_2 + i, val);
+ msm_dp_write_link(catalog, MMSS_DP_GENERIC0_2 + i, val);
}
}
-static void dp_catalog_panel_update_sdp(struct dp_catalog *dp_catalog)
+static void msm_dp_catalog_panel_update_sdp(struct msm_dp_catalog *msm_dp_catalog)
{
- struct dp_catalog_private *catalog;
+ struct msm_dp_catalog_private *catalog;
u32 hw_revision;
- catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog);
+ catalog = container_of(msm_dp_catalog, struct msm_dp_catalog_private, msm_dp_catalog);
- hw_revision = dp_catalog_hw_revision(dp_catalog);
+ hw_revision = msm_dp_catalog_hw_revision(msm_dp_catalog);
if (hw_revision < DP_HW_VERSION_1_2 && hw_revision >= DP_HW_VERSION_1_0) {
- dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x01);
- dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x00);
+ msm_dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x01);
+ msm_dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x00);
}
}
-void dp_catalog_panel_enable_vsc_sdp(struct dp_catalog *dp_catalog, struct dp_sdp *vsc_sdp)
+void msm_dp_catalog_panel_enable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog, struct dp_sdp *vsc_sdp)
{
- struct dp_catalog_private *catalog;
+ struct msm_dp_catalog_private *catalog;
u32 cfg, cfg2, misc;
- catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog);
+ catalog = container_of(msm_dp_catalog, struct msm_dp_catalog_private, msm_dp_catalog);
- cfg = dp_read_link(catalog, MMSS_DP_SDP_CFG);
- cfg2 = dp_read_link(catalog, MMSS_DP_SDP_CFG2);
- misc = dp_read_link(catalog, REG_DP_MISC1_MISC0);
+ cfg = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG);
+ cfg2 = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG2);
+ misc = msm_dp_read_link(catalog, REG_DP_MISC1_MISC0);
cfg |= GEN0_SDP_EN;
- dp_write_link(catalog, MMSS_DP_SDP_CFG, cfg);
+ msm_dp_write_link(catalog, MMSS_DP_SDP_CFG, cfg);
cfg2 |= GENERIC0_SDPSIZE_VALID;
- dp_write_link(catalog, MMSS_DP_SDP_CFG2, cfg2);
+ msm_dp_write_link(catalog, MMSS_DP_SDP_CFG2, cfg2);
- dp_catalog_panel_send_vsc_sdp(dp_catalog, vsc_sdp);
+ msm_dp_catalog_panel_send_vsc_sdp(msm_dp_catalog, vsc_sdp);
/* indicates presence of VSC (BIT(6) of MISC1) */
misc |= DP_MISC1_VSC_SDP;
@@ -967,27 +967,27 @@ void dp_catalog_panel_enable_vsc_sdp(struct dp_catalog *dp_catalog, struct dp_sd
drm_dbg_dp(catalog->drm_dev, "vsc sdp enable=1\n");
pr_debug("misc settings = 0x%x\n", misc);
- dp_write_link(catalog, REG_DP_MISC1_MISC0, misc);
+ msm_dp_write_link(catalog, REG_DP_MISC1_MISC0, misc);
- dp_catalog_panel_update_sdp(dp_catalog);
+ msm_dp_catalog_panel_update_sdp(msm_dp_catalog);
}
-void dp_catalog_panel_disable_vsc_sdp(struct dp_catalog *dp_catalog)
+void msm_dp_catalog_panel_disable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog)
{
- struct dp_catalog_private *catalog;
+ struct msm_dp_catalog_private *catalog;
u32 cfg, cfg2, misc;
- catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog);
+ catalog = container_of(msm_dp_catalog, struct msm_dp_catalog_private, msm_dp_catalog);
- cfg = dp_read_link(catalog, MMSS_DP_SDP_CFG);
- cfg2 = dp_read_link(catalog, MMSS_DP_SDP_CFG2);
- misc = dp_read_link(catalog, REG_DP_MISC1_MISC0);
+ cfg = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG);
+ cfg2 = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG2);
+ misc = msm_dp_read_link(catalog, REG_DP_MISC1_MISC0);
cfg &= ~GEN0_SDP_EN;
- dp_write_link(catalog, MMSS_DP_SDP_CFG, cfg);
+ msm_dp_write_link(catalog, MMSS_DP_SDP_CFG, cfg);
cfg2 &= ~GENERIC0_SDPSIZE_VALID;
- dp_write_link(catalog, MMSS_DP_SDP_CFG2, cfg2);
+ msm_dp_write_link(catalog, MMSS_DP_SDP_CFG2, cfg2);
/* switch back to MSA */
misc &= ~DP_MISC1_VSC_SDP;
@@ -995,16 +995,16 @@ void dp_catalog_panel_disable_vsc_sdp(struct dp_catalog *dp_catalog)
drm_dbg_dp(catalog->drm_dev, "vsc sdp enable=0\n");
pr_debug("misc settings = 0x%x\n", misc);
- dp_write_link(catalog, REG_DP_MISC1_MISC0, misc);
+ msm_dp_write_link(catalog, REG_DP_MISC1_MISC0, misc);
- dp_catalog_panel_update_sdp(dp_catalog);
+ msm_dp_catalog_panel_update_sdp(msm_dp_catalog);
}
-void dp_catalog_panel_tpg_enable(struct dp_catalog *dp_catalog,
+void msm_dp_catalog_panel_tpg_enable(struct msm_dp_catalog *msm_dp_catalog,
struct drm_display_mode *drm_mode)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
u32 hsync_period, vsync_period;
u32 display_v_start, display_v_end;
u32 hsync_start_x, hsync_end_x;
@@ -1036,49 +1036,49 @@ void dp_catalog_panel_tpg_enable(struct dp_catalog *dp_catalog,
display_hctl = (hsync_end_x << 16) | hsync_start_x;
- dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, 0x0);
- dp_write_p0(catalog, MMSS_DP_INTF_HSYNC_CTL, hsync_ctl);
- dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F0, vsync_period *
+ msm_dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, 0x0);
+ msm_dp_write_p0(catalog, MMSS_DP_INTF_HSYNC_CTL, hsync_ctl);
+ msm_dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F0, vsync_period *
hsync_period);
- dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0, v_sync_width *
+ msm_dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0, v_sync_width *
hsync_period);
- dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F1, 0);
- dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1, 0);
- dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_HCTL, display_hctl);
- dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_HCTL, 0);
- dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F0, display_v_start);
- dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F0, display_v_end);
- dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F1, 0);
- dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F1, 0);
- dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F0, 0);
- dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F0, 0);
- dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F1, 0);
- dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F1, 0);
- dp_write_p0(catalog, MMSS_DP_INTF_POLARITY_CTL, 0);
-
- dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL,
+ msm_dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F1, 0);
+ msm_dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1, 0);
+ msm_dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_HCTL, display_hctl);
+ msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_HCTL, 0);
+ msm_dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F0, display_v_start);
+ msm_dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F0, display_v_end);
+ msm_dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F1, 0);
+ msm_dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F1, 0);
+ msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F0, 0);
+ msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F0, 0);
+ msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F1, 0);
+ msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F1, 0);
+ msm_dp_write_p0(catalog, MMSS_DP_INTF_POLARITY_CTL, 0);
+
+ msm_dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL,
DP_TPG_CHECKERED_RECT_PATTERN);
- dp_write_p0(catalog, MMSS_DP_TPG_VIDEO_CONFIG,
+ msm_dp_write_p0(catalog, MMSS_DP_TPG_VIDEO_CONFIG,
DP_TPG_VIDEO_CONFIG_BPP_8BIT |
DP_TPG_VIDEO_CONFIG_RGB);
- dp_write_p0(catalog, MMSS_DP_BIST_ENABLE,
+ msm_dp_write_p0(catalog, MMSS_DP_BIST_ENABLE,
DP_BIST_ENABLE_DPBIST_EN);
- dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN,
+ msm_dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN,
DP_TIMING_ENGINE_EN_EN);
drm_dbg_dp(catalog->drm_dev, "%s: enabled tpg\n", __func__);
}
-void dp_catalog_panel_tpg_disable(struct dp_catalog *dp_catalog)
+void msm_dp_catalog_panel_tpg_disable(struct msm_dp_catalog *msm_dp_catalog)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL, 0x0);
- dp_write_p0(catalog, MMSS_DP_BIST_ENABLE, 0x0);
- dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN, 0x0);
+ msm_dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL, 0x0);
+ msm_dp_write_p0(catalog, MMSS_DP_BIST_ENABLE, 0x0);
+ msm_dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN, 0x0);
}
-static void __iomem *dp_ioremap(struct platform_device *pdev, int idx, size_t *len)
+static void __iomem *msm_dp_ioremap(struct platform_device *pdev, int idx, size_t *len)
{
struct resource *res;
void __iomem *base;
@@ -1090,21 +1090,21 @@ static void __iomem *dp_ioremap(struct platform_device *pdev, int idx, size_t *l
return base;
}
-static int dp_catalog_get_io(struct dp_catalog_private *catalog)
+static int msm_dp_catalog_get_io(struct msm_dp_catalog_private *catalog)
{
struct platform_device *pdev = to_platform_device(catalog->dev);
struct dss_io_data *dss = &catalog->io;
- dss->ahb.base = dp_ioremap(pdev, 0, &dss->ahb.len);
+ dss->ahb.base = msm_dp_ioremap(pdev, 0, &dss->ahb.len);
if (IS_ERR(dss->ahb.base))
return PTR_ERR(dss->ahb.base);
- dss->aux.base = dp_ioremap(pdev, 1, &dss->aux.len);
+ dss->aux.base = msm_dp_ioremap(pdev, 1, &dss->aux.len);
if (IS_ERR(dss->aux.base)) {
/*
* The initial binding had a single reg, but in order to
* support variation in the sub-region sizes this was split.
- * dp_ioremap() will fail with -EINVAL here if only a single
+ * msm_dp_ioremap() will fail with -EINVAL here if only a single
* reg is specified, so fill in the sub-region offsets and
* lengths based on this single region.
*/
@@ -1126,13 +1126,13 @@ static int dp_catalog_get_io(struct dp_catalog_private *catalog)
return PTR_ERR(dss->aux.base);
}
} else {
- dss->link.base = dp_ioremap(pdev, 2, &dss->link.len);
+ dss->link.base = msm_dp_ioremap(pdev, 2, &dss->link.len);
if (IS_ERR(dss->link.base)) {
DRM_ERROR("unable to remap link region: %pe\n", dss->link.base);
return PTR_ERR(dss->link.base);
}
- dss->p0.base = dp_ioremap(pdev, 3, &dss->p0.len);
+ dss->p0.base = msm_dp_ioremap(pdev, 3, &dss->p0.len);
if (IS_ERR(dss->p0.base)) {
DRM_ERROR("unable to remap p0 region: %pe\n", dss->p0.base);
return PTR_ERR(dss->p0.base);
@@ -1142,9 +1142,9 @@ static int dp_catalog_get_io(struct dp_catalog_private *catalog)
return 0;
}
-struct dp_catalog *dp_catalog_get(struct device *dev)
+struct msm_dp_catalog *msm_dp_catalog_get(struct device *dev)
{
- struct dp_catalog_private *catalog;
+ struct msm_dp_catalog_private *catalog;
int ret;
catalog = devm_kzalloc(dev, sizeof(*catalog), GFP_KERNEL);
@@ -1153,78 +1153,78 @@ struct dp_catalog *dp_catalog_get(struct device *dev)
catalog->dev = dev;
- ret = dp_catalog_get_io(catalog);
+ ret = msm_dp_catalog_get_io(catalog);
if (ret)
return ERR_PTR(ret);
- return &catalog->dp_catalog;
+ return &catalog->msm_dp_catalog;
}
-u32 dp_catalog_audio_get_header(struct dp_catalog *dp_catalog,
- enum dp_catalog_audio_sdp_type sdp,
- enum dp_catalog_audio_header_type header)
+u32 msm_dp_catalog_audio_get_header(struct msm_dp_catalog *msm_dp_catalog,
+ enum msm_dp_catalog_audio_sdp_type sdp,
+ enum msm_dp_catalog_audio_header_type header)
{
- struct dp_catalog_private *catalog;
+ struct msm_dp_catalog_private *catalog;
u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX];
- catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
sdp_map = catalog->audio_map;
- return dp_read_link(catalog, sdp_map[sdp][header]);
+ return msm_dp_read_link(catalog, sdp_map[sdp][header]);
}
-void dp_catalog_audio_set_header(struct dp_catalog *dp_catalog,
- enum dp_catalog_audio_sdp_type sdp,
- enum dp_catalog_audio_header_type header,
+void msm_dp_catalog_audio_set_header(struct msm_dp_catalog *msm_dp_catalog,
+ enum msm_dp_catalog_audio_sdp_type sdp,
+ enum msm_dp_catalog_audio_header_type header,
u32 data)
{
- struct dp_catalog_private *catalog;
+ struct msm_dp_catalog_private *catalog;
u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX];
- if (!dp_catalog)
+ if (!msm_dp_catalog)
return;
- catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
sdp_map = catalog->audio_map;
- dp_write_link(catalog, sdp_map[sdp][header], data);
+ msm_dp_write_link(catalog, sdp_map[sdp][header], data);
}
-void dp_catalog_audio_config_acr(struct dp_catalog *dp_catalog, u32 select)
+void msm_dp_catalog_audio_config_acr(struct msm_dp_catalog *msm_dp_catalog, u32 select)
{
- struct dp_catalog_private *catalog;
+ struct msm_dp_catalog_private *catalog;
u32 acr_ctrl;
- if (!dp_catalog)
+ if (!msm_dp_catalog)
return;
- catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
acr_ctrl = select << 4 | BIT(31) | BIT(8) | BIT(14);
drm_dbg_dp(catalog->drm_dev, "select: %#x, acr_ctrl: %#x\n",
select, acr_ctrl);
- dp_write_link(catalog, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl);
+ msm_dp_write_link(catalog, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl);
}
-void dp_catalog_audio_enable(struct dp_catalog *dp_catalog, bool enable)
+void msm_dp_catalog_audio_enable(struct msm_dp_catalog *msm_dp_catalog, bool enable)
{
- struct dp_catalog_private *catalog;
+ struct msm_dp_catalog_private *catalog;
u32 audio_ctrl;
- if (!dp_catalog)
+ if (!msm_dp_catalog)
return;
- catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- audio_ctrl = dp_read_link(catalog, MMSS_DP_AUDIO_CFG);
+ audio_ctrl = msm_dp_read_link(catalog, MMSS_DP_AUDIO_CFG);
if (enable)
audio_ctrl |= BIT(0);
@@ -1233,24 +1233,24 @@ void dp_catalog_audio_enable(struct dp_catalog *dp_catalog, bool enable)
drm_dbg_dp(catalog->drm_dev, "dp_audio_cfg = 0x%x\n", audio_ctrl);
- dp_write_link(catalog, MMSS_DP_AUDIO_CFG, audio_ctrl);
+ msm_dp_write_link(catalog, MMSS_DP_AUDIO_CFG, audio_ctrl);
/* make sure audio engine is disabled */
wmb();
}
-void dp_catalog_audio_config_sdp(struct dp_catalog *dp_catalog)
+void msm_dp_catalog_audio_config_sdp(struct msm_dp_catalog *msm_dp_catalog)
{
- struct dp_catalog_private *catalog;
+ struct msm_dp_catalog_private *catalog;
u32 sdp_cfg = 0;
u32 sdp_cfg2 = 0;
- if (!dp_catalog)
+ if (!msm_dp_catalog)
return;
- catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- sdp_cfg = dp_read_link(catalog, MMSS_DP_SDP_CFG);
+ sdp_cfg = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG);
/* AUDIO_TIMESTAMP_SDP_EN */
sdp_cfg |= BIT(1);
/* AUDIO_STREAM_SDP_EN */
@@ -1264,9 +1264,9 @@ void dp_catalog_audio_config_sdp(struct dp_catalog *dp_catalog)
drm_dbg_dp(catalog->drm_dev, "sdp_cfg = 0x%x\n", sdp_cfg);
- dp_write_link(catalog, MMSS_DP_SDP_CFG, sdp_cfg);
+ msm_dp_write_link(catalog, MMSS_DP_SDP_CFG, sdp_cfg);
- sdp_cfg2 = dp_read_link(catalog, MMSS_DP_SDP_CFG2);
+ sdp_cfg2 = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG2);
/* IFRM_REGSRC -> Do not use reg values */
sdp_cfg2 &= ~BIT(0);
/* AUDIO_STREAM_HB3_REGSRC-> Do not use reg values */
@@ -1274,12 +1274,12 @@ void dp_catalog_audio_config_sdp(struct dp_catalog *dp_catalog)
drm_dbg_dp(catalog->drm_dev, "sdp_cfg2 = 0x%x\n", sdp_cfg2);
- dp_write_link(catalog, MMSS_DP_SDP_CFG2, sdp_cfg2);
+ msm_dp_write_link(catalog, MMSS_DP_SDP_CFG2, sdp_cfg2);
}
-void dp_catalog_audio_init(struct dp_catalog *dp_catalog)
+void msm_dp_catalog_audio_init(struct msm_dp_catalog *msm_dp_catalog)
{
- struct dp_catalog_private *catalog;
+ struct msm_dp_catalog_private *catalog;
static u32 sdp_map[][DP_AUDIO_SDP_HEADER_MAX] = {
{
@@ -1309,27 +1309,27 @@ void dp_catalog_audio_init(struct dp_catalog *dp_catalog)
},
};
- if (!dp_catalog)
+ if (!msm_dp_catalog)
return;
- catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
catalog->audio_map = sdp_map;
}
-void dp_catalog_audio_sfe_level(struct dp_catalog *dp_catalog, u32 safe_to_exit_level)
+void msm_dp_catalog_audio_sfe_level(struct msm_dp_catalog *msm_dp_catalog, u32 safe_to_exit_level)
{
- struct dp_catalog_private *catalog;
+ struct msm_dp_catalog_private *catalog;
u32 mainlink_levels;
- if (!dp_catalog)
+ if (!msm_dp_catalog)
return;
- catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- mainlink_levels = dp_read_link(catalog, REG_DP_MAINLINK_LEVELS);
+ mainlink_levels = msm_dp_read_link(catalog, REG_DP_MAINLINK_LEVELS);
mainlink_levels &= 0xFE0;
mainlink_levels |= safe_to_exit_level;
@@ -1337,5 +1337,5 @@ void dp_catalog_audio_sfe_level(struct dp_catalog *dp_catalog, u32 safe_to_exit_
"mainlink_level = 0x%x, safe_to_exit_level = 0x%x\n",
mainlink_levels, safe_to_exit_level);
- dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, mainlink_levels);
+ msm_dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, mainlink_levels);
}
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
index 4679d50b8c73..e932b17eecbf 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.h
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.h
@@ -31,7 +31,7 @@
#define DP_HW_VERSION_1_0 0x10000000
#define DP_HW_VERSION_1_2 0x10020000
-enum dp_catalog_audio_sdp_type {
+enum msm_dp_catalog_audio_sdp_type {
DP_AUDIO_SDP_STREAM,
DP_AUDIO_SDP_TIMESTAMP,
DP_AUDIO_SDP_INFOFRAME,
@@ -40,89 +40,89 @@ enum dp_catalog_audio_sdp_type {
DP_AUDIO_SDP_MAX,
};
-enum dp_catalog_audio_header_type {
+enum msm_dp_catalog_audio_header_type {
DP_AUDIO_SDP_HEADER_1,
DP_AUDIO_SDP_HEADER_2,
DP_AUDIO_SDP_HEADER_3,
DP_AUDIO_SDP_HEADER_MAX,
};
-struct dp_catalog {
+struct msm_dp_catalog {
bool wide_bus_en;
};
/* Debug module */
-void dp_catalog_snapshot(struct dp_catalog *dp_catalog, struct msm_disp_state *disp_state);
+void msm_dp_catalog_snapshot(struct msm_dp_catalog *msm_dp_catalog, struct msm_disp_state *disp_state);
/* AUX APIs */
-u32 dp_catalog_aux_read_data(struct dp_catalog *dp_catalog);
-int dp_catalog_aux_write_data(struct dp_catalog *dp_catalog, u32 data);
-int dp_catalog_aux_write_trans(struct dp_catalog *dp_catalog, u32 data);
-int dp_catalog_aux_clear_trans(struct dp_catalog *dp_catalog, bool read);
-int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog);
-void dp_catalog_aux_reset(struct dp_catalog *dp_catalog);
-void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable);
-int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog,
+u32 msm_dp_catalog_aux_read_data(struct msm_dp_catalog *msm_dp_catalog);
+int msm_dp_catalog_aux_write_data(struct msm_dp_catalog *msm_dp_catalog, u32 data);
+int msm_dp_catalog_aux_write_trans(struct msm_dp_catalog *msm_dp_catalog, u32 data);
+int msm_dp_catalog_aux_clear_trans(struct msm_dp_catalog *msm_dp_catalog, bool read);
+int msm_dp_catalog_aux_clear_hw_interrupts(struct msm_dp_catalog *msm_dp_catalog);
+void msm_dp_catalog_aux_reset(struct msm_dp_catalog *msm_dp_catalog);
+void msm_dp_catalog_aux_enable(struct msm_dp_catalog *msm_dp_catalog, bool enable);
+int msm_dp_catalog_aux_wait_for_hpd_connect_state(struct msm_dp_catalog *msm_dp_catalog,
unsigned long wait_us);
-u32 dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog);
+u32 msm_dp_catalog_aux_get_irq(struct msm_dp_catalog *msm_dp_catalog);
/* DP Controller APIs */
-void dp_catalog_ctrl_state_ctrl(struct dp_catalog *dp_catalog, u32 state);
-void dp_catalog_ctrl_config_ctrl(struct dp_catalog *dp_catalog, u32 config);
-void dp_catalog_ctrl_lane_mapping(struct dp_catalog *dp_catalog);
-void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog, bool enable);
-void dp_catalog_ctrl_psr_mainlink_enable(struct dp_catalog *dp_catalog, bool enable);
-void dp_catalog_setup_peripheral_flush(struct dp_catalog *dp_catalog);
-void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog, u32 cc, u32 tb);
-void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, u32 rate,
+void msm_dp_catalog_ctrl_state_ctrl(struct msm_dp_catalog *msm_dp_catalog, u32 state);
+void msm_dp_catalog_ctrl_config_ctrl(struct msm_dp_catalog *msm_dp_catalog, u32 config);
+void msm_dp_catalog_ctrl_lane_mapping(struct msm_dp_catalog *msm_dp_catalog);
+void msm_dp_catalog_ctrl_mainlink_ctrl(struct msm_dp_catalog *msm_dp_catalog, bool enable);
+void msm_dp_catalog_ctrl_psr_mainlink_enable(struct msm_dp_catalog *msm_dp_catalog, bool enable);
+void msm_dp_catalog_setup_peripheral_flush(struct msm_dp_catalog *msm_dp_catalog);
+void msm_dp_catalog_ctrl_config_misc(struct msm_dp_catalog *msm_dp_catalog, u32 cc, u32 tb);
+void msm_dp_catalog_ctrl_config_msa(struct msm_dp_catalog *msm_dp_catalog, u32 rate,
u32 stream_rate_khz, bool is_ycbcr_420);
-int dp_catalog_ctrl_set_pattern_state_bit(struct dp_catalog *dp_catalog, u32 pattern);
-u32 dp_catalog_hw_revision(const struct dp_catalog *dp_catalog);
-void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog);
-bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog);
-void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog, bool enable);
-void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog,
+int msm_dp_catalog_ctrl_set_pattern_state_bit(struct msm_dp_catalog *msm_dp_catalog, u32 pattern);
+u32 msm_dp_catalog_hw_revision(const struct msm_dp_catalog *msm_dp_catalog);
+void msm_dp_catalog_ctrl_reset(struct msm_dp_catalog *msm_dp_catalog);
+bool msm_dp_catalog_ctrl_mainlink_ready(struct msm_dp_catalog *msm_dp_catalog);
+void msm_dp_catalog_ctrl_enable_irq(struct msm_dp_catalog *msm_dp_catalog, bool enable);
+void msm_dp_catalog_hpd_config_intr(struct msm_dp_catalog *msm_dp_catalog,
u32 intr_mask, bool en);
-void dp_catalog_ctrl_hpd_enable(struct dp_catalog *dp_catalog);
-void dp_catalog_ctrl_hpd_disable(struct dp_catalog *dp_catalog);
-void dp_catalog_ctrl_config_psr(struct dp_catalog *dp_catalog);
-void dp_catalog_ctrl_set_psr(struct dp_catalog *dp_catalog, bool enter);
-u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog);
-u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog);
-void dp_catalog_ctrl_phy_reset(struct dp_catalog *dp_catalog);
-int dp_catalog_ctrl_get_interrupt(struct dp_catalog *dp_catalog);
-u32 dp_catalog_ctrl_read_psr_interrupt_status(struct dp_catalog *dp_catalog);
-void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog *dp_catalog,
- u32 dp_tu, u32 valid_boundary,
+void msm_dp_catalog_ctrl_hpd_enable(struct msm_dp_catalog *msm_dp_catalog);
+void msm_dp_catalog_ctrl_hpd_disable(struct msm_dp_catalog *msm_dp_catalog);
+void msm_dp_catalog_ctrl_config_psr(struct msm_dp_catalog *msm_dp_catalog);
+void msm_dp_catalog_ctrl_set_psr(struct msm_dp_catalog *msm_dp_catalog, bool enter);
+u32 msm_dp_catalog_link_is_connected(struct msm_dp_catalog *msm_dp_catalog);
+u32 msm_dp_catalog_hpd_get_intr_status(struct msm_dp_catalog *msm_dp_catalog);
+void msm_dp_catalog_ctrl_phy_reset(struct msm_dp_catalog *msm_dp_catalog);
+int msm_dp_catalog_ctrl_get_interrupt(struct msm_dp_catalog *msm_dp_catalog);
+u32 msm_dp_catalog_ctrl_read_psr_interrupt_status(struct msm_dp_catalog *msm_dp_catalog);
+void msm_dp_catalog_ctrl_update_transfer_unit(struct msm_dp_catalog *msm_dp_catalog,
+ u32 msm_dp_tu, u32 valid_boundary,
u32 valid_boundary2);
-void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog,
+void msm_dp_catalog_ctrl_send_phy_pattern(struct msm_dp_catalog *msm_dp_catalog,
u32 pattern);
-u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog *dp_catalog);
+u32 msm_dp_catalog_ctrl_read_phy_pattern(struct msm_dp_catalog *msm_dp_catalog);
/* DP Panel APIs */
-int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog, u32 total,
- u32 sync_start, u32 width_blanking, u32 dp_active);
-void dp_catalog_panel_enable_vsc_sdp(struct dp_catalog *dp_catalog, struct dp_sdp *vsc_sdp);
-void dp_catalog_panel_disable_vsc_sdp(struct dp_catalog *dp_catalog);
-void dp_catalog_dump_regs(struct dp_catalog *dp_catalog);
-void dp_catalog_panel_tpg_enable(struct dp_catalog *dp_catalog,
+int msm_dp_catalog_panel_timing_cfg(struct msm_dp_catalog *msm_dp_catalog, u32 total,
+ u32 sync_start, u32 width_blanking, u32 msm_dp_active);
+void msm_dp_catalog_panel_enable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog, struct dp_sdp *vsc_sdp);
+void msm_dp_catalog_panel_disable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog);
+void msm_dp_catalog_dump_regs(struct msm_dp_catalog *msm_dp_catalog);
+void msm_dp_catalog_panel_tpg_enable(struct msm_dp_catalog *msm_dp_catalog,
struct drm_display_mode *drm_mode);
-void dp_catalog_panel_tpg_disable(struct dp_catalog *dp_catalog);
+void msm_dp_catalog_panel_tpg_disable(struct msm_dp_catalog *msm_dp_catalog);
-struct dp_catalog *dp_catalog_get(struct device *dev);
+struct msm_dp_catalog *msm_dp_catalog_get(struct device *dev);
/* DP Audio APIs */
-u32 dp_catalog_audio_get_header(struct dp_catalog *dp_catalog,
- enum dp_catalog_audio_sdp_type sdp,
- enum dp_catalog_audio_header_type header);
-void dp_catalog_audio_set_header(struct dp_catalog *dp_catalog,
- enum dp_catalog_audio_sdp_type sdp,
- enum dp_catalog_audio_header_type header,
+u32 msm_dp_catalog_audio_get_header(struct msm_dp_catalog *msm_dp_catalog,
+ enum msm_dp_catalog_audio_sdp_type sdp,
+ enum msm_dp_catalog_audio_header_type header);
+void msm_dp_catalog_audio_set_header(struct msm_dp_catalog *msm_dp_catalog,
+ enum msm_dp_catalog_audio_sdp_type sdp,
+ enum msm_dp_catalog_audio_header_type header,
u32 data);
-void dp_catalog_audio_config_acr(struct dp_catalog *catalog, u32 select);
-void dp_catalog_audio_enable(struct dp_catalog *catalog, bool enable);
-void dp_catalog_audio_config_sdp(struct dp_catalog *catalog);
-void dp_catalog_audio_init(struct dp_catalog *catalog);
-void dp_catalog_audio_sfe_level(struct dp_catalog *catalog, u32 safe_to_exit_level);
+void msm_dp_catalog_audio_config_acr(struct msm_dp_catalog *catalog, u32 select);
+void msm_dp_catalog_audio_enable(struct msm_dp_catalog *catalog, bool enable);
+void msm_dp_catalog_audio_config_sdp(struct msm_dp_catalog *catalog);
+void msm_dp_catalog_audio_init(struct msm_dp_catalog *catalog);
+void msm_dp_catalog_audio_sfe_level(struct msm_dp_catalog *catalog, u32 safe_to_exit_level);
#endif /* _DP_CATALOG_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index f342fc5ae41e..bc2ca8133b79 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -40,7 +40,7 @@ enum {
DP_TRAINING_2,
};
-struct dp_tu_calc_input {
+struct msm_dp_tu_calc_input {
u64 lclk; /* 162, 270, 540 and 810 */
u64 pclk_khz; /* in KHz */
u64 hactive; /* active h-width */
@@ -55,7 +55,7 @@ struct dp_tu_calc_input {
int num_of_dsc_slices; /* number of slices per line */
};
-struct dp_vc_tu_mapping_table {
+struct msm_dp_vc_tu_mapping_table {
u32 vic;
u8 lanes;
u8 lrate; /* DP_LINK_RATE -> 162(6), 270(10), 540(20), 810 (30) */
@@ -69,14 +69,14 @@ struct dp_vc_tu_mapping_table {
u8 tu_size_minus1;
};
-struct dp_ctrl_private {
- struct dp_ctrl dp_ctrl;
+struct msm_dp_ctrl_private {
+ struct msm_dp_ctrl msm_dp_ctrl;
struct drm_device *drm_dev;
struct device *dev;
struct drm_dp_aux *aux;
- struct dp_panel *panel;
- struct dp_link *link;
- struct dp_catalog *catalog;
+ struct msm_dp_panel *panel;
+ struct msm_dp_link *link;
+ struct msm_dp_catalog *catalog;
struct phy *phy;
@@ -99,8 +99,8 @@ struct dp_ctrl_private {
bool stream_clks_on;
};
-static int dp_aux_link_configure(struct drm_dp_aux *aux,
- struct dp_link_info *link)
+static int msm_dp_aux_link_configure(struct drm_dp_aux *aux,
+ struct msm_dp_link_info *link)
{
u8 values[2];
int err;
@@ -118,14 +118,14 @@ static int dp_aux_link_configure(struct drm_dp_aux *aux,
return 0;
}
-void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl)
+void msm_dp_ctrl_push_idle(struct msm_dp_ctrl *msm_dp_ctrl)
{
- struct dp_ctrl_private *ctrl;
+ struct msm_dp_ctrl_private *ctrl;
- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
reinit_completion(&ctrl->idle_comp);
- dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_PUSH_IDLE);
+ msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_PUSH_IDLE);
if (!wait_for_completion_timeout(&ctrl->idle_comp,
IDLE_PATTERN_COMPLETION_TIMEOUT_JIFFIES))
@@ -134,7 +134,7 @@ void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl)
drm_dbg_dp(ctrl->drm_dev, "mainlink off\n");
}
-static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl)
+static void msm_dp_ctrl_config_ctrl(struct msm_dp_ctrl_private *ctrl)
{
u32 config = 0, tbd;
const u8 *dpcd = ctrl->panel->dpcd;
@@ -142,15 +142,15 @@ static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl)
/* Default-> LSCLK DIV: 1/4 LCLK */
config |= (2 << DP_CONFIGURATION_CTRL_LSCLK_DIV_SHIFT);
- if (ctrl->panel->dp_mode.out_fmt_is_yuv_420)
+ if (ctrl->panel->msm_dp_mode.out_fmt_is_yuv_420)
config |= DP_CONFIGURATION_CTRL_RGB_YUV; /* YUV420 */
/* Scrambler reset enable */
if (drm_dp_alternate_scrambler_reset_cap(dpcd))
config |= DP_CONFIGURATION_CTRL_ASSR;
- tbd = dp_link_get_test_bits_depth(ctrl->link,
- ctrl->panel->dp_mode.bpp);
+ tbd = msm_dp_link_get_test_bits_depth(ctrl->link,
+ ctrl->panel->msm_dp_mode.bpp);
config |= tbd << DP_CONFIGURATION_CTRL_BPC_SHIFT;
@@ -170,24 +170,24 @@ static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl)
if (ctrl->panel->psr_cap.version)
config |= DP_CONFIGURATION_CTRL_SEND_VSC;
- dp_catalog_ctrl_config_ctrl(ctrl->catalog, config);
+ msm_dp_catalog_ctrl_config_ctrl(ctrl->catalog, config);
}
-static void dp_ctrl_configure_source_params(struct dp_ctrl_private *ctrl)
+static void msm_dp_ctrl_configure_source_params(struct msm_dp_ctrl_private *ctrl)
{
u32 cc, tb;
- dp_catalog_ctrl_lane_mapping(ctrl->catalog);
- dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true);
- dp_catalog_setup_peripheral_flush(ctrl->catalog);
+ msm_dp_catalog_ctrl_lane_mapping(ctrl->catalog);
+ msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true);
+ msm_dp_catalog_setup_peripheral_flush(ctrl->catalog);
- dp_ctrl_config_ctrl(ctrl);
+ msm_dp_ctrl_config_ctrl(ctrl);
- tb = dp_link_get_test_bits_depth(ctrl->link,
- ctrl->panel->dp_mode.bpp);
- cc = dp_link_get_colorimetry_config(ctrl->link);
- dp_catalog_ctrl_config_misc(ctrl->catalog, cc, tb);
- dp_panel_timing_cfg(ctrl->panel);
+ tb = msm_dp_link_get_test_bits_depth(ctrl->link,
+ ctrl->panel->msm_dp_mode.bpp);
+ cc = msm_dp_link_get_colorimetry_config(ctrl->link);
+ msm_dp_catalog_ctrl_config_misc(ctrl->catalog, cc, tb);
+ msm_dp_panel_timing_cfg(ctrl->panel);
}
/*
@@ -310,7 +310,7 @@ static int _tu_param_compare(s64 a, s64 b)
}
}
-static void dp_panel_update_tu_timings(struct dp_tu_calc_input *in,
+static void msm_dp_panel_update_tu_timings(struct msm_dp_tu_calc_input *in,
struct tu_algo_data *tu)
{
int nlanes = in->nlanes;
@@ -622,9 +622,9 @@ static void _tu_valid_boundary_calc(struct tu_algo_data *tu)
}
}
-static void _dp_ctrl_calc_tu(struct dp_ctrl_private *ctrl,
- struct dp_tu_calc_input *in,
- struct dp_vc_tu_mapping_table *tu_table)
+static void _dp_ctrl_calc_tu(struct msm_dp_ctrl_private *ctrl,
+ struct msm_dp_tu_calc_input *in,
+ struct msm_dp_vc_tu_mapping_table *tu_table)
{
struct tu_algo_data *tu;
int compare_result_1, compare_result_2;
@@ -645,7 +645,7 @@ static void _dp_ctrl_calc_tu(struct dp_ctrl_private *ctrl,
if (!tu)
return;
- dp_panel_update_tu_timings(in, tu);
+ msm_dp_panel_update_tu_timings(in, tu);
tu->err_fp = drm_fixp_from_fraction(1000, 1); /* 1000 */
@@ -956,21 +956,21 @@ tu_size_calc:
kfree(tu);
}
-static void dp_ctrl_calc_tu_parameters(struct dp_ctrl_private *ctrl,
- struct dp_vc_tu_mapping_table *tu_table)
+static void msm_dp_ctrl_calc_tu_parameters(struct msm_dp_ctrl_private *ctrl,
+ struct msm_dp_vc_tu_mapping_table *tu_table)
{
- struct dp_tu_calc_input in;
+ struct msm_dp_tu_calc_input in;
struct drm_display_mode *drm_mode;
- drm_mode = &ctrl->panel->dp_mode.drm_mode;
+ drm_mode = &ctrl->panel->msm_dp_mode.drm_mode;
in.lclk = ctrl->link->link_params.rate / 1000;
in.pclk_khz = drm_mode->clock;
in.hactive = drm_mode->hdisplay;
in.hporch = drm_mode->htotal - drm_mode->hdisplay;
in.nlanes = ctrl->link->link_params.num_lanes;
- in.bpp = ctrl->panel->dp_mode.bpp;
- in.pixel_enc = ctrl->panel->dp_mode.out_fmt_is_yuv_420 ? 420 : 444;
+ in.bpp = ctrl->panel->msm_dp_mode.bpp;
+ in.pixel_enc = ctrl->panel->msm_dp_mode.out_fmt_is_yuv_420 ? 420 : 444;
in.dsc_en = 0;
in.async_en = 0;
in.fec_en = 0;
@@ -980,16 +980,16 @@ static void dp_ctrl_calc_tu_parameters(struct dp_ctrl_private *ctrl,
_dp_ctrl_calc_tu(ctrl, &in, tu_table);
}
-static void dp_ctrl_setup_tr_unit(struct dp_ctrl_private *ctrl)
+static void msm_dp_ctrl_setup_tr_unit(struct msm_dp_ctrl_private *ctrl)
{
- u32 dp_tu = 0x0;
+ u32 msm_dp_tu = 0x0;
u32 valid_boundary = 0x0;
u32 valid_boundary2 = 0x0;
- struct dp_vc_tu_mapping_table tu_calc_table;
+ struct msm_dp_vc_tu_mapping_table tu_calc_table;
- dp_ctrl_calc_tu_parameters(ctrl, &tu_calc_table);
+ msm_dp_ctrl_calc_tu_parameters(ctrl, &tu_calc_table);
- dp_tu |= tu_calc_table.tu_size_minus1;
+ msm_dp_tu |= tu_calc_table.tu_size_minus1;
valid_boundary |= tu_calc_table.valid_boundary_link;
valid_boundary |= (tu_calc_table.delay_start_link << 16);
@@ -1001,13 +1001,13 @@ static void dp_ctrl_setup_tr_unit(struct dp_ctrl_private *ctrl)
valid_boundary2 |= BIT(0);
pr_debug("dp_tu=0x%x, valid_boundary=0x%x, valid_boundary2=0x%x\n",
- dp_tu, valid_boundary, valid_boundary2);
+ msm_dp_tu, valid_boundary, valid_boundary2);
- dp_catalog_ctrl_update_transfer_unit(ctrl->catalog,
- dp_tu, valid_boundary, valid_boundary2);
+ msm_dp_catalog_ctrl_update_transfer_unit(ctrl->catalog,
+ msm_dp_tu, valid_boundary, valid_boundary2);
}
-static int dp_ctrl_wait4video_ready(struct dp_ctrl_private *ctrl)
+static int msm_dp_ctrl_wait4video_ready(struct msm_dp_ctrl_private *ctrl)
{
int ret = 0;
@@ -1019,7 +1019,7 @@ static int dp_ctrl_wait4video_ready(struct dp_ctrl_private *ctrl)
return ret;
}
-static int dp_ctrl_set_vx_px(struct dp_ctrl_private *ctrl,
+static int msm_dp_ctrl_set_vx_px(struct msm_dp_ctrl_private *ctrl,
u8 v_level, u8 p_level)
{
union phy_configure_opts *phy_opts = &ctrl->phy_opts;
@@ -1034,9 +1034,9 @@ static int dp_ctrl_set_vx_px(struct dp_ctrl_private *ctrl,
return 0;
}
-static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl)
+static int msm_dp_ctrl_update_vx_px(struct msm_dp_ctrl_private *ctrl)
{
- struct dp_link *link = ctrl->link;
+ struct msm_dp_link *link = ctrl->link;
int ret = 0, lane, lane_cnt;
u8 buf[4];
u32 max_level_reached = 0;
@@ -1046,7 +1046,7 @@ static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl)
drm_dbg_dp(ctrl->drm_dev,
"voltage level: %d emphasis level: %d\n",
voltage_swing_level, pre_emphasis_level);
- ret = dp_ctrl_set_vx_px(ctrl,
+ ret = msm_dp_ctrl_set_vx_px(ctrl,
voltage_swing_level, pre_emphasis_level);
if (ret)
@@ -1083,7 +1083,7 @@ static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl)
return ret;
}
-static bool dp_ctrl_train_pattern_set(struct dp_ctrl_private *ctrl,
+static bool msm_dp_ctrl_train_pattern_set(struct msm_dp_ctrl_private *ctrl,
u8 pattern)
{
u8 buf;
@@ -1100,7 +1100,7 @@ static bool dp_ctrl_train_pattern_set(struct dp_ctrl_private *ctrl,
return ret == 1;
}
-static int dp_ctrl_read_link_status(struct dp_ctrl_private *ctrl,
+static int msm_dp_ctrl_read_link_status(struct msm_dp_ctrl_private *ctrl,
u8 *link_status)
{
int ret = 0, len;
@@ -1114,24 +1114,24 @@ static int dp_ctrl_read_link_status(struct dp_ctrl_private *ctrl,
return ret;
}
-static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl,
+static int msm_dp_ctrl_link_train_1(struct msm_dp_ctrl_private *ctrl,
int *training_step)
{
int tries, old_v_level, ret = 0;
u8 link_status[DP_LINK_STATUS_SIZE];
int const maximum_retries = 4;
- dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+ msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
*training_step = DP_TRAINING_1;
- ret = dp_catalog_ctrl_set_pattern_state_bit(ctrl->catalog, 1);
+ ret = msm_dp_catalog_ctrl_set_pattern_state_bit(ctrl->catalog, 1);
if (ret)
return ret;
- dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 |
+ msm_dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE);
- ret = dp_ctrl_update_vx_px(ctrl);
+ ret = msm_dp_ctrl_update_vx_px(ctrl);
if (ret)
return ret;
@@ -1140,7 +1140,7 @@ static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl,
for (tries = 0; tries < maximum_retries; tries++) {
drm_dp_link_train_clock_recovery_delay(ctrl->aux, ctrl->panel->dpcd);
- ret = dp_ctrl_read_link_status(ctrl, link_status);
+ ret = msm_dp_ctrl_read_link_status(ctrl, link_status);
if (ret)
return ret;
@@ -1160,8 +1160,8 @@ static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl,
old_v_level = ctrl->link->phy_params.v_level;
}
- dp_link_adjust_levels(ctrl->link, link_status);
- ret = dp_ctrl_update_vx_px(ctrl);
+ msm_dp_link_adjust_levels(ctrl->link, link_status);
+ ret = msm_dp_ctrl_update_vx_px(ctrl);
if (ret)
return ret;
}
@@ -1170,7 +1170,7 @@ static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl,
return -ETIMEDOUT;
}
-static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl)
+static int msm_dp_ctrl_link_rate_down_shift(struct msm_dp_ctrl_private *ctrl)
{
int ret = 0;
@@ -1198,7 +1198,7 @@ static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl)
return ret;
}
-static int dp_ctrl_link_lane_down_shift(struct dp_ctrl_private *ctrl)
+static int msm_dp_ctrl_link_lane_down_shift(struct msm_dp_ctrl_private *ctrl)
{
if (ctrl->link->link_params.num_lanes == 1)
@@ -1213,13 +1213,13 @@ static int dp_ctrl_link_lane_down_shift(struct dp_ctrl_private *ctrl)
return 0;
}
-static void dp_ctrl_clear_training_pattern(struct dp_ctrl_private *ctrl)
+static void msm_dp_ctrl_clear_training_pattern(struct msm_dp_ctrl_private *ctrl)
{
- dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_DISABLE);
+ msm_dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_DISABLE);
drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd);
}
-static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
+static int msm_dp_ctrl_link_train_2(struct msm_dp_ctrl_private *ctrl,
int *training_step)
{
int tries = 0, ret = 0;
@@ -1228,7 +1228,7 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
int const maximum_retries = 5;
u8 link_status[DP_LINK_STATUS_SIZE];
- dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+ msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
*training_step = DP_TRAINING_2;
@@ -1243,16 +1243,16 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
state_ctrl_bit = 2;
}
- ret = dp_catalog_ctrl_set_pattern_state_bit(ctrl->catalog, state_ctrl_bit);
+ ret = msm_dp_catalog_ctrl_set_pattern_state_bit(ctrl->catalog, state_ctrl_bit);
if (ret)
return ret;
- dp_ctrl_train_pattern_set(ctrl, pattern);
+ msm_dp_ctrl_train_pattern_set(ctrl, pattern);
for (tries = 0; tries <= maximum_retries; tries++) {
drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd);
- ret = dp_ctrl_read_link_status(ctrl, link_status);
+ ret = msm_dp_ctrl_read_link_status(ctrl, link_status);
if (ret)
return ret;
@@ -1261,8 +1261,8 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
return 0;
}
- dp_link_adjust_levels(ctrl->link, link_status);
- ret = dp_ctrl_update_vx_px(ctrl);
+ msm_dp_link_adjust_levels(ctrl->link, link_status);
+ ret = msm_dp_ctrl_update_vx_px(ctrl);
if (ret)
return ret;
@@ -1271,24 +1271,24 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
return -ETIMEDOUT;
}
-static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
+static int msm_dp_ctrl_link_train(struct msm_dp_ctrl_private *ctrl,
int *training_step)
{
int ret = 0;
const u8 *dpcd = ctrl->panel->dpcd;
u8 encoding[] = { 0, DP_SET_ANSI_8B10B };
u8 assr;
- struct dp_link_info link_info = {0};
+ struct msm_dp_link_info link_info = {0};
- dp_ctrl_config_ctrl(ctrl);
+ msm_dp_ctrl_config_ctrl(ctrl);
link_info.num_lanes = ctrl->link->link_params.num_lanes;
link_info.rate = ctrl->link->link_params.rate;
link_info.capabilities = DP_LINK_CAP_ENHANCED_FRAMING;
- dp_link_reset_phy_params_vx_px(ctrl->link);
+ msm_dp_link_reset_phy_params_vx_px(ctrl->link);
- dp_aux_link_configure(ctrl->aux, &link_info);
+ msm_dp_aux_link_configure(ctrl->aux, &link_info);
if (drm_dp_max_downspread(dpcd))
encoding[0] |= DP_SPREAD_AMP_0_5;
@@ -1302,7 +1302,7 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
&assr, 1);
}
- ret = dp_ctrl_link_train_1(ctrl, training_step);
+ ret = msm_dp_ctrl_link_train_1(ctrl, training_step);
if (ret) {
DRM_ERROR("link training #1 failed. ret=%d\n", ret);
goto end;
@@ -1311,7 +1311,7 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
/* print success info as this is a result of user initiated action */
drm_dbg_dp(ctrl->drm_dev, "link training #1 successful\n");
- ret = dp_ctrl_link_train_2(ctrl, training_step);
+ ret = msm_dp_ctrl_link_train_2(ctrl, training_step);
if (ret) {
DRM_ERROR("link training #2 failed. ret=%d\n", ret);
goto end;
@@ -1321,17 +1321,17 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
drm_dbg_dp(ctrl->drm_dev, "link training #2 successful\n");
end:
- dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+ msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
return ret;
}
-static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl,
+static int msm_dp_ctrl_setup_main_link(struct msm_dp_ctrl_private *ctrl,
int *training_step)
{
int ret = 0;
- dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true);
+ msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true);
if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN)
return ret;
@@ -1342,17 +1342,17 @@ static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl,
* a link training pattern, we have to first do soft reset.
*/
- ret = dp_ctrl_link_train(ctrl, training_step);
+ ret = msm_dp_ctrl_link_train(ctrl, training_step);
return ret;
}
-int dp_ctrl_core_clk_enable(struct dp_ctrl *dp_ctrl)
+int msm_dp_ctrl_core_clk_enable(struct msm_dp_ctrl *msm_dp_ctrl)
{
- struct dp_ctrl_private *ctrl;
+ struct msm_dp_ctrl_private *ctrl;
int ret = 0;
- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
if (ctrl->core_clks_on) {
drm_dbg_dp(ctrl->drm_dev, "core clks already enabled\n");
@@ -1374,11 +1374,11 @@ int dp_ctrl_core_clk_enable(struct dp_ctrl *dp_ctrl)
return 0;
}
-void dp_ctrl_core_clk_disable(struct dp_ctrl *dp_ctrl)
+void msm_dp_ctrl_core_clk_disable(struct msm_dp_ctrl *msm_dp_ctrl)
{
- struct dp_ctrl_private *ctrl;
+ struct msm_dp_ctrl_private *ctrl;
- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
clk_bulk_disable_unprepare(ctrl->num_core_clks, ctrl->core_clks);
@@ -1391,12 +1391,12 @@ void dp_ctrl_core_clk_disable(struct dp_ctrl *dp_ctrl)
ctrl->core_clks_on ? "on" : "off");
}
-static int dp_ctrl_link_clk_enable(struct dp_ctrl *dp_ctrl)
+static int msm_dp_ctrl_link_clk_enable(struct msm_dp_ctrl *msm_dp_ctrl)
{
- struct dp_ctrl_private *ctrl;
+ struct msm_dp_ctrl_private *ctrl;
int ret = 0;
- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
if (ctrl->link_clks_on) {
drm_dbg_dp(ctrl->drm_dev, "links clks already enabled\n");
@@ -1406,7 +1406,7 @@ static int dp_ctrl_link_clk_enable(struct dp_ctrl *dp_ctrl)
if (!ctrl->core_clks_on) {
drm_dbg_dp(ctrl->drm_dev, "Enable core clks before link clks\n");
- dp_ctrl_core_clk_enable(dp_ctrl);
+ msm_dp_ctrl_core_clk_enable(msm_dp_ctrl);
}
ret = clk_bulk_prepare_enable(ctrl->num_link_clks, ctrl->link_clks);
@@ -1424,11 +1424,11 @@ static int dp_ctrl_link_clk_enable(struct dp_ctrl *dp_ctrl)
return 0;
}
-static void dp_ctrl_link_clk_disable(struct dp_ctrl *dp_ctrl)
+static void msm_dp_ctrl_link_clk_disable(struct msm_dp_ctrl *msm_dp_ctrl)
{
- struct dp_ctrl_private *ctrl;
+ struct msm_dp_ctrl_private *ctrl;
- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
clk_bulk_disable_unprepare(ctrl->num_link_clks, ctrl->link_clks);
@@ -1441,7 +1441,7 @@ static void dp_ctrl_link_clk_disable(struct dp_ctrl *dp_ctrl)
ctrl->core_clks_on ? "on" : "off");
}
-static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl)
+static int msm_dp_ctrl_enable_mainlink_clocks(struct msm_dp_ctrl_private *ctrl)
{
int ret = 0;
struct phy *phy = ctrl->phy;
@@ -1455,7 +1455,7 @@ static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl)
phy_power_on(phy);
dev_pm_opp_set_rate(ctrl->dev, ctrl->link->link_params.rate * 1000);
- ret = dp_ctrl_link_clk_enable(&ctrl->dp_ctrl);
+ ret = msm_dp_ctrl_link_clk_enable(&ctrl->msm_dp_ctrl);
if (ret)
DRM_ERROR("Unable to start link clocks. ret=%d\n", ret);
@@ -1464,13 +1464,13 @@ static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl)
return ret;
}
-void dp_ctrl_reset_irq_ctrl(struct dp_ctrl *dp_ctrl, bool enable)
+void msm_dp_ctrl_reset_irq_ctrl(struct msm_dp_ctrl *msm_dp_ctrl, bool enable)
{
- struct dp_ctrl_private *ctrl;
+ struct msm_dp_ctrl_private *ctrl;
- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
- dp_catalog_ctrl_reset(ctrl->catalog);
+ msm_dp_catalog_ctrl_reset(ctrl->catalog);
/*
* all dp controller programmable registers will not
@@ -1478,28 +1478,28 @@ void dp_ctrl_reset_irq_ctrl(struct dp_ctrl *dp_ctrl, bool enable)
* therefore interrupt mask bits have to be updated
* to enable/disable interrupts
*/
- dp_catalog_ctrl_enable_irq(ctrl->catalog, enable);
+ msm_dp_catalog_ctrl_enable_irq(ctrl->catalog, enable);
}
-void dp_ctrl_config_psr(struct dp_ctrl *dp_ctrl)
+void msm_dp_ctrl_config_psr(struct msm_dp_ctrl *msm_dp_ctrl)
{
u8 cfg;
- struct dp_ctrl_private *ctrl = container_of(dp_ctrl,
- struct dp_ctrl_private, dp_ctrl);
+ struct msm_dp_ctrl_private *ctrl = container_of(msm_dp_ctrl,
+ struct msm_dp_ctrl_private, msm_dp_ctrl);
if (!ctrl->panel->psr_cap.version)
return;
- dp_catalog_ctrl_config_psr(ctrl->catalog);
+ msm_dp_catalog_ctrl_config_psr(ctrl->catalog);
cfg = DP_PSR_ENABLE;
drm_dp_dpcd_write(ctrl->aux, DP_PSR_EN_CFG, &cfg, 1);
}
-void dp_ctrl_set_psr(struct dp_ctrl *dp_ctrl, bool enter)
+void msm_dp_ctrl_set_psr(struct msm_dp_ctrl *msm_dp_ctrl, bool enter)
{
- struct dp_ctrl_private *ctrl = container_of(dp_ctrl,
- struct dp_ctrl_private, dp_ctrl);
+ struct msm_dp_ctrl_private *ctrl = container_of(msm_dp_ctrl,
+ struct msm_dp_ctrl_private, msm_dp_ctrl);
if (!ctrl->panel->psr_cap.version)
return;
@@ -1516,64 +1516,64 @@ void dp_ctrl_set_psr(struct dp_ctrl *dp_ctrl, bool enter)
*/
if (enter) {
reinit_completion(&ctrl->psr_op_comp);
- dp_catalog_ctrl_set_psr(ctrl->catalog, true);
+ msm_dp_catalog_ctrl_set_psr(ctrl->catalog, true);
if (!wait_for_completion_timeout(&ctrl->psr_op_comp,
PSR_OPERATION_COMPLETION_TIMEOUT_JIFFIES)) {
DRM_ERROR("PSR_ENTRY timedout\n");
- dp_catalog_ctrl_set_psr(ctrl->catalog, false);
+ msm_dp_catalog_ctrl_set_psr(ctrl->catalog, false);
return;
}
- dp_ctrl_push_idle(dp_ctrl);
- dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+ msm_dp_ctrl_push_idle(msm_dp_ctrl);
+ msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
- dp_catalog_ctrl_psr_mainlink_enable(ctrl->catalog, false);
+ msm_dp_catalog_ctrl_psr_mainlink_enable(ctrl->catalog, false);
} else {
- dp_catalog_ctrl_psr_mainlink_enable(ctrl->catalog, true);
+ msm_dp_catalog_ctrl_psr_mainlink_enable(ctrl->catalog, true);
- dp_catalog_ctrl_set_psr(ctrl->catalog, false);
- dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
- dp_ctrl_wait4video_ready(ctrl);
- dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+ msm_dp_catalog_ctrl_set_psr(ctrl->catalog, false);
+ msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
+ msm_dp_ctrl_wait4video_ready(ctrl);
+ msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
}
}
-void dp_ctrl_phy_init(struct dp_ctrl *dp_ctrl)
+void msm_dp_ctrl_phy_init(struct msm_dp_ctrl *msm_dp_ctrl)
{
- struct dp_ctrl_private *ctrl;
+ struct msm_dp_ctrl_private *ctrl;
struct phy *phy;
- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
phy = ctrl->phy;
- dp_catalog_ctrl_phy_reset(ctrl->catalog);
+ msm_dp_catalog_ctrl_phy_reset(ctrl->catalog);
phy_init(phy);
drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n",
phy, phy->init_count, phy->power_count);
}
-void dp_ctrl_phy_exit(struct dp_ctrl *dp_ctrl)
+void msm_dp_ctrl_phy_exit(struct msm_dp_ctrl *msm_dp_ctrl)
{
- struct dp_ctrl_private *ctrl;
+ struct msm_dp_ctrl_private *ctrl;
struct phy *phy;
- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
phy = ctrl->phy;
- dp_catalog_ctrl_phy_reset(ctrl->catalog);
+ msm_dp_catalog_ctrl_phy_reset(ctrl->catalog);
phy_exit(phy);
drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n",
phy, phy->init_count, phy->power_count);
}
-static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl)
+static int msm_dp_ctrl_reinitialize_mainlink(struct msm_dp_ctrl_private *ctrl)
{
struct phy *phy = ctrl->phy;
int ret = 0;
- dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+ msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
ctrl->phy_opts.dp.lanes = ctrl->link->link_params.num_lanes;
phy_configure(phy, &ctrl->phy_opts);
/*
@@ -1583,13 +1583,13 @@ static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl)
*/
dev_pm_opp_set_rate(ctrl->dev, 0);
- dp_ctrl_link_clk_disable(&ctrl->dp_ctrl);
+ msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl);
phy_power_off(phy);
/* hw recommended delay before re-enabling clocks */
msleep(20);
- ret = dp_ctrl_enable_mainlink_clocks(ctrl);
+ ret = msm_dp_ctrl_enable_mainlink_clocks(ctrl);
if (ret) {
DRM_ERROR("Failed to enable mainlink clks. ret=%d\n", ret);
return ret;
@@ -1598,18 +1598,18 @@ static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl)
return ret;
}
-static int dp_ctrl_deinitialize_mainlink(struct dp_ctrl_private *ctrl)
+static int msm_dp_ctrl_deinitialize_mainlink(struct msm_dp_ctrl_private *ctrl)
{
struct phy *phy;
phy = ctrl->phy;
- dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+ msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
- dp_catalog_ctrl_reset(ctrl->catalog);
+ msm_dp_catalog_ctrl_reset(ctrl->catalog);
dev_pm_opp_set_rate(ctrl->dev, 0);
- dp_ctrl_link_clk_disable(&ctrl->dp_ctrl);
+ msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl);
phy_power_off(phy);
@@ -1622,30 +1622,30 @@ static int dp_ctrl_deinitialize_mainlink(struct dp_ctrl_private *ctrl)
return 0;
}
-static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl)
+static int msm_dp_ctrl_link_maintenance(struct msm_dp_ctrl_private *ctrl)
{
int ret = 0;
int training_step = DP_TRAINING_NONE;
- dp_ctrl_push_idle(&ctrl->dp_ctrl);
+ msm_dp_ctrl_push_idle(&ctrl->msm_dp_ctrl);
ctrl->link->phy_params.p_level = 0;
ctrl->link->phy_params.v_level = 0;
- ret = dp_ctrl_setup_main_link(ctrl, &training_step);
+ ret = msm_dp_ctrl_setup_main_link(ctrl, &training_step);
if (ret)
goto end;
- dp_ctrl_clear_training_pattern(ctrl);
+ msm_dp_ctrl_clear_training_pattern(ctrl);
- dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
+ msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
- ret = dp_ctrl_wait4video_ready(ctrl);
+ ret = msm_dp_ctrl_wait4video_ready(ctrl);
end:
return ret;
}
-static bool dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl)
+static bool msm_dp_ctrl_send_phy_test_pattern(struct msm_dp_ctrl_private *ctrl)
{
bool success = false;
u32 pattern_sent = 0x0;
@@ -1653,17 +1653,17 @@ static bool dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl)
drm_dbg_dp(ctrl->drm_dev, "request: 0x%x\n", pattern_requested);
- if (dp_ctrl_set_vx_px(ctrl,
+ if (msm_dp_ctrl_set_vx_px(ctrl,
ctrl->link->phy_params.v_level,
ctrl->link->phy_params.p_level)) {
DRM_ERROR("Failed to set v/p levels\n");
return false;
}
- dp_catalog_ctrl_send_phy_pattern(ctrl->catalog, pattern_requested);
- dp_ctrl_update_vx_px(ctrl);
- dp_link_send_test_response(ctrl->link);
+ msm_dp_catalog_ctrl_send_phy_pattern(ctrl->catalog, pattern_requested);
+ msm_dp_ctrl_update_vx_px(ctrl);
+ msm_dp_link_send_test_response(ctrl->link);
- pattern_sent = dp_catalog_ctrl_read_phy_pattern(ctrl->catalog);
+ pattern_sent = msm_dp_catalog_ctrl_read_phy_pattern(ctrl->catalog);
switch (pattern_sent) {
case MR_LINK_TRAINING1:
@@ -1697,7 +1697,7 @@ static bool dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl)
return success;
}
-static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
+static int msm_dp_ctrl_process_phy_test_request(struct msm_dp_ctrl_private *ctrl)
{
int ret;
unsigned long pixel_rate;
@@ -1713,15 +1713,15 @@ static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
* running. Add the global reset just before disabling the
* link clocks and core clocks.
*/
- dp_ctrl_off(&ctrl->dp_ctrl);
+ msm_dp_ctrl_off(&ctrl->msm_dp_ctrl);
- ret = dp_ctrl_on_link(&ctrl->dp_ctrl);
+ ret = msm_dp_ctrl_on_link(&ctrl->msm_dp_ctrl);
if (ret) {
DRM_ERROR("failed to enable DP link controller\n");
return ret;
}
- pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
+ pixel_rate = ctrl->panel->msm_dp_mode.drm_mode.clock;
ret = clk_set_rate(ctrl->pixel_clk, pixel_rate * 1000);
if (ret) {
DRM_ERROR("Failed to set pixel clock rate. ret=%d\n", ret);
@@ -1739,49 +1739,49 @@ static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
ctrl->stream_clks_on = true;
}
- dp_ctrl_send_phy_test_pattern(ctrl);
+ msm_dp_ctrl_send_phy_test_pattern(ctrl);
return 0;
}
-void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl)
+void msm_dp_ctrl_handle_sink_request(struct msm_dp_ctrl *msm_dp_ctrl)
{
- struct dp_ctrl_private *ctrl;
+ struct msm_dp_ctrl_private *ctrl;
u32 sink_request = 0x0;
- if (!dp_ctrl) {
+ if (!msm_dp_ctrl) {
DRM_ERROR("invalid input\n");
return;
}
- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
sink_request = ctrl->link->sink_request;
if (sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
drm_dbg_dp(ctrl->drm_dev, "PHY_TEST_PATTERN request\n");
- if (dp_ctrl_process_phy_test_request(ctrl)) {
+ if (msm_dp_ctrl_process_phy_test_request(ctrl)) {
DRM_ERROR("process phy_test_req failed\n");
return;
}
}
if (sink_request & DP_LINK_STATUS_UPDATED) {
- if (dp_ctrl_link_maintenance(ctrl)) {
+ if (msm_dp_ctrl_link_maintenance(ctrl)) {
DRM_ERROR("LM failed: TEST_LINK_TRAINING\n");
return;
}
}
if (sink_request & DP_TEST_LINK_TRAINING) {
- dp_link_send_test_response(ctrl->link);
- if (dp_ctrl_link_maintenance(ctrl)) {
+ msm_dp_link_send_test_response(ctrl->link);
+ if (msm_dp_ctrl_link_maintenance(ctrl)) {
DRM_ERROR("LM failed: TEST_LINK_TRAINING\n");
return;
}
}
}
-static bool dp_ctrl_clock_recovery_any_ok(
+static bool msm_dp_ctrl_clock_recovery_any_ok(
const u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count)
{
@@ -1800,20 +1800,20 @@ static bool dp_ctrl_clock_recovery_any_ok(
return drm_dp_clock_recovery_ok(link_status, reduced_cnt);
}
-static bool dp_ctrl_channel_eq_ok(struct dp_ctrl_private *ctrl)
+static bool msm_dp_ctrl_channel_eq_ok(struct msm_dp_ctrl_private *ctrl)
{
u8 link_status[DP_LINK_STATUS_SIZE];
int num_lanes = ctrl->link->link_params.num_lanes;
- dp_ctrl_read_link_status(ctrl, link_status);
+ msm_dp_ctrl_read_link_status(ctrl, link_status);
return drm_dp_channel_eq_ok(link_status, num_lanes);
}
-int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
+int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl)
{
int rc = 0;
- struct dp_ctrl_private *ctrl;
+ struct msm_dp_ctrl_private *ctrl;
u32 rate;
int link_train_max_retries = 5;
u32 const phy_cts_pixel_clk_khz = 148500;
@@ -1821,15 +1821,15 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
unsigned int training_step;
unsigned long pixel_rate;
- if (!dp_ctrl)
+ if (!msm_dp_ctrl)
return -EINVAL;
- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
rate = ctrl->panel->link_info.rate;
- pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
+ pixel_rate = ctrl->panel->msm_dp_mode.drm_mode.clock;
- dp_ctrl_core_clk_enable(&ctrl->dp_ctrl);
+ msm_dp_ctrl_core_clk_enable(&ctrl->msm_dp_ctrl);
if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
drm_dbg_dp(ctrl->drm_dev,
@@ -1840,7 +1840,7 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
ctrl->link->link_params.rate = rate;
ctrl->link->link_params.num_lanes =
ctrl->panel->link_info.num_lanes;
- if (ctrl->panel->dp_mode.out_fmt_is_yuv_420)
+ if (ctrl->panel->msm_dp_mode.out_fmt_is_yuv_420)
pixel_rate >>= 1;
}
@@ -1848,32 +1848,32 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
ctrl->link->link_params.rate, ctrl->link->link_params.num_lanes,
pixel_rate);
- rc = dp_ctrl_enable_mainlink_clocks(ctrl);
+ rc = msm_dp_ctrl_enable_mainlink_clocks(ctrl);
if (rc)
return rc;
while (--link_train_max_retries) {
training_step = DP_TRAINING_NONE;
- rc = dp_ctrl_setup_main_link(ctrl, &training_step);
+ rc = msm_dp_ctrl_setup_main_link(ctrl, &training_step);
if (rc == 0) {
/* training completed successfully */
break;
} else if (training_step == DP_TRAINING_1) {
/* link train_1 failed */
- if (!dp_catalog_link_is_connected(ctrl->catalog))
+ if (!msm_dp_catalog_link_is_connected(ctrl->catalog))
break;
- dp_ctrl_read_link_status(ctrl, link_status);
+ msm_dp_ctrl_read_link_status(ctrl, link_status);
- rc = dp_ctrl_link_rate_down_shift(ctrl);
+ rc = msm_dp_ctrl_link_rate_down_shift(ctrl);
if (rc < 0) { /* already in RBR = 1.6G */
- if (dp_ctrl_clock_recovery_any_ok(link_status,
+ if (msm_dp_ctrl_clock_recovery_any_ok(link_status,
ctrl->link->link_params.num_lanes)) {
/*
* some lanes are ready,
* reduce lane number
*/
- rc = dp_ctrl_link_lane_down_shift(ctrl);
+ rc = msm_dp_ctrl_link_lane_down_shift(ctrl);
if (rc < 0) { /* lane == 1 already */
/* end with failure */
break;
@@ -1885,16 +1885,16 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
}
} else if (training_step == DP_TRAINING_2) {
/* link train_2 failed */
- if (!dp_catalog_link_is_connected(ctrl->catalog))
+ if (!msm_dp_catalog_link_is_connected(ctrl->catalog))
break;
- dp_ctrl_read_link_status(ctrl, link_status);
+ msm_dp_ctrl_read_link_status(ctrl, link_status);
if (!drm_dp_clock_recovery_ok(link_status,
ctrl->link->link_params.num_lanes))
- rc = dp_ctrl_link_rate_down_shift(ctrl);
+ rc = msm_dp_ctrl_link_rate_down_shift(ctrl);
else
- rc = dp_ctrl_link_lane_down_shift(ctrl);
+ rc = msm_dp_ctrl_link_lane_down_shift(ctrl);
if (rc < 0) {
/* end with failure */
@@ -1902,10 +1902,10 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
}
/* stop link training before start re training */
- dp_ctrl_clear_training_pattern(ctrl);
+ msm_dp_ctrl_clear_training_pattern(ctrl);
}
- rc = dp_ctrl_reinitialize_mainlink(ctrl);
+ rc = msm_dp_ctrl_reinitialize_mainlink(ctrl);
if (rc) {
DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n", rc);
break;
@@ -1926,38 +1926,38 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
* link training failed
* end txing train pattern here
*/
- dp_ctrl_clear_training_pattern(ctrl);
+ msm_dp_ctrl_clear_training_pattern(ctrl);
- dp_ctrl_deinitialize_mainlink(ctrl);
+ msm_dp_ctrl_deinitialize_mainlink(ctrl);
rc = -ECONNRESET;
}
return rc;
}
-static int dp_ctrl_link_retrain(struct dp_ctrl_private *ctrl)
+static int msm_dp_ctrl_link_retrain(struct msm_dp_ctrl_private *ctrl)
{
int training_step = DP_TRAINING_NONE;
- return dp_ctrl_setup_main_link(ctrl, &training_step);
+ return msm_dp_ctrl_setup_main_link(ctrl, &training_step);
}
-int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train)
+int msm_dp_ctrl_on_stream(struct msm_dp_ctrl *msm_dp_ctrl, bool force_link_train)
{
int ret = 0;
bool mainlink_ready = false;
- struct dp_ctrl_private *ctrl;
+ struct msm_dp_ctrl_private *ctrl;
unsigned long pixel_rate;
unsigned long pixel_rate_orig;
- if (!dp_ctrl)
+ if (!msm_dp_ctrl)
return -EINVAL;
- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
- pixel_rate = pixel_rate_orig = ctrl->panel->dp_mode.drm_mode.clock;
+ pixel_rate = pixel_rate_orig = ctrl->panel->msm_dp_mode.drm_mode.clock;
- if (dp_ctrl->wide_bus_en || ctrl->panel->dp_mode.out_fmt_is_yuv_420)
+ if (msm_dp_ctrl->wide_bus_en || ctrl->panel->msm_dp_mode.out_fmt_is_yuv_420)
pixel_rate >>= 1;
drm_dbg_dp(ctrl->drm_dev, "rate=%d, num_lanes=%d, pixel_rate=%lu\n",
@@ -1969,7 +1969,7 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train)
ctrl->core_clks_on, ctrl->link_clks_on, ctrl->stream_clks_on);
if (!ctrl->link_clks_on) { /* link clk is off */
- ret = dp_ctrl_enable_mainlink_clocks(ctrl);
+ ret = msm_dp_ctrl_enable_mainlink_clocks(ctrl);
if (ret) {
DRM_ERROR("Failed to start link clocks. ret=%d\n", ret);
goto end;
@@ -1993,11 +1993,11 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train)
ctrl->stream_clks_on = true;
}
- if (force_link_train || !dp_ctrl_channel_eq_ok(ctrl))
- dp_ctrl_link_retrain(ctrl);
+ if (force_link_train || !msm_dp_ctrl_channel_eq_ok(ctrl))
+ msm_dp_ctrl_link_retrain(ctrl);
/* stop txing train pattern to end link training */
- dp_ctrl_clear_training_pattern(ctrl);
+ msm_dp_ctrl_clear_training_pattern(ctrl);
/*
* Set up transfer unit values and set controller state to send
@@ -2005,22 +2005,22 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train)
*/
reinit_completion(&ctrl->video_comp);
- dp_ctrl_configure_source_params(ctrl);
+ msm_dp_ctrl_configure_source_params(ctrl);
- dp_catalog_ctrl_config_msa(ctrl->catalog,
+ msm_dp_catalog_ctrl_config_msa(ctrl->catalog,
ctrl->link->link_params.rate,
pixel_rate_orig,
- ctrl->panel->dp_mode.out_fmt_is_yuv_420);
+ ctrl->panel->msm_dp_mode.out_fmt_is_yuv_420);
- dp_ctrl_setup_tr_unit(ctrl);
+ msm_dp_ctrl_setup_tr_unit(ctrl);
- dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
+ msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
- ret = dp_ctrl_wait4video_ready(ctrl);
+ ret = msm_dp_ctrl_wait4video_ready(ctrl);
if (ret)
return ret;
- mainlink_ready = dp_catalog_ctrl_mainlink_ready(ctrl->catalog);
+ mainlink_ready = msm_dp_catalog_ctrl_mainlink_ready(ctrl->catalog);
drm_dbg_dp(ctrl->drm_dev,
"mainlink %s\n", mainlink_ready ? "READY" : "NOT READY");
@@ -2028,20 +2028,20 @@ end:
return ret;
}
-void dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl)
+void msm_dp_ctrl_off_link_stream(struct msm_dp_ctrl *msm_dp_ctrl)
{
- struct dp_ctrl_private *ctrl;
+ struct msm_dp_ctrl_private *ctrl;
struct phy *phy;
- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
phy = ctrl->phy;
- dp_catalog_panel_disable_vsc_sdp(ctrl->catalog);
+ msm_dp_catalog_panel_disable_vsc_sdp(ctrl->catalog);
/* set dongle to D3 (power off) mode */
- dp_link_psm_config(ctrl->link, &ctrl->panel->link_info, true);
+ msm_dp_link_psm_config(ctrl->link, &ctrl->panel->link_info, true);
- dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+ msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
if (ctrl->stream_clks_on) {
clk_disable_unprepare(ctrl->pixel_clk);
@@ -2049,7 +2049,7 @@ void dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl)
}
dev_pm_opp_set_rate(ctrl->dev, 0);
- dp_ctrl_link_clk_disable(&ctrl->dp_ctrl);
+ msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl);
phy_power_off(phy);
@@ -2061,17 +2061,17 @@ void dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl)
phy, phy->init_count, phy->power_count);
}
-void dp_ctrl_off_link(struct dp_ctrl *dp_ctrl)
+void msm_dp_ctrl_off_link(struct msm_dp_ctrl *msm_dp_ctrl)
{
- struct dp_ctrl_private *ctrl;
+ struct msm_dp_ctrl_private *ctrl;
struct phy *phy;
- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
phy = ctrl->phy;
- dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+ msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
- dp_ctrl_link_clk_disable(&ctrl->dp_ctrl);
+ msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl);
DRM_DEBUG_DP("Before, phy=%p init_count=%d power_on=%d\n",
phy, phy->init_count, phy->power_count);
@@ -2082,19 +2082,19 @@ void dp_ctrl_off_link(struct dp_ctrl *dp_ctrl)
phy, phy->init_count, phy->power_count);
}
-void dp_ctrl_off(struct dp_ctrl *dp_ctrl)
+void msm_dp_ctrl_off(struct msm_dp_ctrl *msm_dp_ctrl)
{
- struct dp_ctrl_private *ctrl;
+ struct msm_dp_ctrl_private *ctrl;
struct phy *phy;
- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
phy = ctrl->phy;
- dp_catalog_panel_disable_vsc_sdp(ctrl->catalog);
+ msm_dp_catalog_panel_disable_vsc_sdp(ctrl->catalog);
- dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+ msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
- dp_catalog_ctrl_reset(ctrl->catalog);
+ msm_dp_catalog_ctrl_reset(ctrl->catalog);
if (ctrl->stream_clks_on) {
clk_disable_unprepare(ctrl->pixel_clk);
@@ -2102,26 +2102,26 @@ void dp_ctrl_off(struct dp_ctrl *dp_ctrl)
}
dev_pm_opp_set_rate(ctrl->dev, 0);
- dp_ctrl_link_clk_disable(&ctrl->dp_ctrl);
+ msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl);
phy_power_off(phy);
drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n",
phy, phy->init_count, phy->power_count);
}
-irqreturn_t dp_ctrl_isr(struct dp_ctrl *dp_ctrl)
+irqreturn_t msm_dp_ctrl_isr(struct msm_dp_ctrl *msm_dp_ctrl)
{
- struct dp_ctrl_private *ctrl;
+ struct msm_dp_ctrl_private *ctrl;
u32 isr;
irqreturn_t ret = IRQ_NONE;
- if (!dp_ctrl)
+ if (!msm_dp_ctrl)
return IRQ_NONE;
- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
if (ctrl->panel->psr_cap.version) {
- isr = dp_catalog_ctrl_read_psr_interrupt_status(ctrl->catalog);
+ isr = msm_dp_catalog_ctrl_read_psr_interrupt_status(ctrl->catalog);
if (isr)
complete(&ctrl->psr_op_comp);
@@ -2136,7 +2136,7 @@ irqreturn_t dp_ctrl_isr(struct dp_ctrl *dp_ctrl)
drm_dbg_dp(ctrl->drm_dev, "PSR frame capture done\n");
}
- isr = dp_catalog_ctrl_get_interrupt(ctrl->catalog);
+ isr = msm_dp_catalog_ctrl_get_interrupt(ctrl->catalog);
if (isr & DP_CTRL_INTR_READY_FOR_VIDEO) {
@@ -2164,13 +2164,13 @@ static const char *ctrl_clks[] = {
"ctrl_link_iface",
};
-static int dp_ctrl_clk_init(struct dp_ctrl *dp_ctrl)
+static int msm_dp_ctrl_clk_init(struct msm_dp_ctrl *msm_dp_ctrl)
{
- struct dp_ctrl_private *ctrl;
+ struct msm_dp_ctrl_private *ctrl;
struct device *dev;
int i, rc;
- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
dev = ctrl->dev;
ctrl->num_core_clks = ARRAY_SIZE(core_clks);
@@ -2204,12 +2204,12 @@ static int dp_ctrl_clk_init(struct dp_ctrl *dp_ctrl)
return 0;
}
-struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link,
- struct dp_panel *panel, struct drm_dp_aux *aux,
- struct dp_catalog *catalog,
+struct msm_dp_ctrl *msm_dp_ctrl_get(struct device *dev, struct msm_dp_link *link,
+ struct msm_dp_panel *panel, struct drm_dp_aux *aux,
+ struct msm_dp_catalog *catalog,
struct phy *phy)
{
- struct dp_ctrl_private *ctrl;
+ struct msm_dp_ctrl_private *ctrl;
int ret;
if (!dev || !panel || !aux ||
@@ -2228,7 +2228,7 @@ struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link,
if (ret) {
dev_err(dev, "invalid DP OPP table in device tree\n");
/* caller do PTR_ERR(opp_table) */
- return (struct dp_ctrl *)ERR_PTR(ret);
+ return (struct msm_dp_ctrl *)ERR_PTR(ret);
}
/* OPP table is optional */
@@ -2248,11 +2248,11 @@ struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link,
ctrl->dev = dev;
ctrl->phy = phy;
- ret = dp_ctrl_clk_init(&ctrl->dp_ctrl);
+ ret = msm_dp_ctrl_clk_init(&ctrl->msm_dp_ctrl);
if (ret) {
dev_err(dev, "failed to init clocks\n");
return ERR_PTR(ret);
}
- return &ctrl->dp_ctrl;
+ return &ctrl->msm_dp_ctrl;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
index ffcbd9a25748..b7abfedbf574 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.h
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h
@@ -11,34 +11,34 @@
#include "dp_link.h"
#include "dp_catalog.h"
-struct dp_ctrl {
+struct msm_dp_ctrl {
bool wide_bus_en;
};
struct phy;
-int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl);
-int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train);
-void dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl);
-void dp_ctrl_off_link(struct dp_ctrl *dp_ctrl);
-void dp_ctrl_off(struct dp_ctrl *dp_ctrl);
-void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl);
-irqreturn_t dp_ctrl_isr(struct dp_ctrl *dp_ctrl);
-void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl);
-struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link,
- struct dp_panel *panel, struct drm_dp_aux *aux,
- struct dp_catalog *catalog,
+int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl);
+int msm_dp_ctrl_on_stream(struct msm_dp_ctrl *msm_dp_ctrl, bool force_link_train);
+void msm_dp_ctrl_off_link_stream(struct msm_dp_ctrl *msm_dp_ctrl);
+void msm_dp_ctrl_off_link(struct msm_dp_ctrl *msm_dp_ctrl);
+void msm_dp_ctrl_off(struct msm_dp_ctrl *msm_dp_ctrl);
+void msm_dp_ctrl_push_idle(struct msm_dp_ctrl *msm_dp_ctrl);
+irqreturn_t msm_dp_ctrl_isr(struct msm_dp_ctrl *msm_dp_ctrl);
+void msm_dp_ctrl_handle_sink_request(struct msm_dp_ctrl *msm_dp_ctrl);
+struct msm_dp_ctrl *msm_dp_ctrl_get(struct device *dev, struct msm_dp_link *link,
+ struct msm_dp_panel *panel, struct drm_dp_aux *aux,
+ struct msm_dp_catalog *catalog,
struct phy *phy);
-void dp_ctrl_reset_irq_ctrl(struct dp_ctrl *dp_ctrl, bool enable);
-void dp_ctrl_phy_init(struct dp_ctrl *dp_ctrl);
-void dp_ctrl_phy_exit(struct dp_ctrl *dp_ctrl);
-void dp_ctrl_irq_phy_exit(struct dp_ctrl *dp_ctrl);
+void msm_dp_ctrl_reset_irq_ctrl(struct msm_dp_ctrl *msm_dp_ctrl, bool enable);
+void msm_dp_ctrl_phy_init(struct msm_dp_ctrl *msm_dp_ctrl);
+void msm_dp_ctrl_phy_exit(struct msm_dp_ctrl *msm_dp_ctrl);
+void msm_dp_ctrl_irq_phy_exit(struct msm_dp_ctrl *msm_dp_ctrl);
-void dp_ctrl_set_psr(struct dp_ctrl *dp_ctrl, bool enable);
-void dp_ctrl_config_psr(struct dp_ctrl *dp_ctrl);
+void msm_dp_ctrl_set_psr(struct msm_dp_ctrl *msm_dp_ctrl, bool enable);
+void msm_dp_ctrl_config_psr(struct msm_dp_ctrl *msm_dp_ctrl);
-int dp_ctrl_core_clk_enable(struct dp_ctrl *dp_ctrl);
-void dp_ctrl_core_clk_disable(struct dp_ctrl *dp_ctrl);
+int msm_dp_ctrl_core_clk_enable(struct msm_dp_ctrl *msm_dp_ctrl);
+void msm_dp_ctrl_core_clk_disable(struct msm_dp_ctrl *msm_dp_ctrl);
#endif /* _DP_CTRL_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c
index b8611f6d2296..22fd946ee201 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.c
+++ b/drivers/gpu/drm/msm/dp/dp_debug.c
@@ -17,15 +17,15 @@
#define DEBUG_NAME "msm_dp"
-struct dp_debug_private {
- struct dp_link *link;
- struct dp_panel *panel;
+struct msm_dp_debug_private {
+ struct msm_dp_link *link;
+ struct msm_dp_panel *panel;
struct drm_connector *connector;
};
-static int dp_debug_show(struct seq_file *seq, void *p)
+static int msm_dp_debug_show(struct seq_file *seq, void *p)
{
- struct dp_debug_private *debug = seq->private;
+ struct msm_dp_debug_private *debug = seq->private;
u64 lclk = 0;
u32 link_params_rate;
const struct drm_display_mode *drm_mode;
@@ -33,7 +33,7 @@ static int dp_debug_show(struct seq_file *seq, void *p)
if (!debug)
return -ENODEV;
- drm_mode = &debug->panel->dp_mode.drm_mode;
+ drm_mode = &debug->panel->msm_dp_mode.drm_mode;
seq_printf(seq, "\tname = %s\n", DEBUG_NAME);
seq_printf(seq, "\tdrm_dp_link\n\t\trate = %u\n",
@@ -55,8 +55,8 @@ static int dp_debug_show(struct seq_file *seq, void *p)
drm_mode->hsync_end - drm_mode->hsync_start,
drm_mode->vsync_end - drm_mode->vsync_start);
seq_printf(seq, "\t\tactive_low = %dx%d\n",
- debug->panel->dp_mode.h_active_low,
- debug->panel->dp_mode.v_active_low);
+ debug->panel->msm_dp_mode.h_active_low,
+ debug->panel->msm_dp_mode.v_active_low);
seq_printf(seq, "\t\th_skew = %d\n",
drm_mode->hskew);
seq_printf(seq, "\t\trefresh rate = %d\n",
@@ -64,7 +64,7 @@ static int dp_debug_show(struct seq_file *seq, void *p)
seq_printf(seq, "\t\tpixel clock khz = %d\n",
drm_mode->clock);
seq_printf(seq, "\t\tbpp = %d\n",
- debug->panel->dp_mode.bpp);
+ debug->panel->msm_dp_mode.bpp);
/* Link Information */
seq_printf(seq, "\tdp_link:\n\t\ttest_requested = %d\n",
@@ -83,11 +83,11 @@ static int dp_debug_show(struct seq_file *seq, void *p)
return 0;
}
-DEFINE_SHOW_ATTRIBUTE(dp_debug);
+DEFINE_SHOW_ATTRIBUTE(msm_dp_debug);
-static int dp_test_data_show(struct seq_file *m, void *data)
+static int msm_dp_test_data_show(struct seq_file *m, void *data)
{
- const struct dp_debug_private *debug = m->private;
+ const struct msm_dp_debug_private *debug = m->private;
const struct drm_connector *connector = debug->connector;
u32 bpc;
@@ -98,18 +98,18 @@ static int dp_test_data_show(struct seq_file *m, void *data)
seq_printf(m, "vdisplay: %d\n",
debug->link->test_video.test_v_height);
seq_printf(m, "bpc: %u\n",
- dp_link_bit_depth_to_bpp(bpc) / 3);
+ msm_dp_link_bit_depth_to_bpp(bpc) / 3);
} else {
seq_puts(m, "0");
}
return 0;
}
-DEFINE_SHOW_ATTRIBUTE(dp_test_data);
+DEFINE_SHOW_ATTRIBUTE(msm_dp_test_data);
-static int dp_test_type_show(struct seq_file *m, void *data)
+static int msm_dp_test_type_show(struct seq_file *m, void *data)
{
- const struct dp_debug_private *debug = m->private;
+ const struct msm_dp_debug_private *debug = m->private;
const struct drm_connector *connector = debug->connector;
if (connector->status == connector_status_connected)
@@ -119,15 +119,15 @@ static int dp_test_type_show(struct seq_file *m, void *data)
return 0;
}
-DEFINE_SHOW_ATTRIBUTE(dp_test_type);
+DEFINE_SHOW_ATTRIBUTE(msm_dp_test_type);
-static ssize_t dp_test_active_write(struct file *file,
+static ssize_t msm_dp_test_active_write(struct file *file,
const char __user *ubuf,
size_t len, loff_t *offp)
{
char *input_buffer;
int status = 0;
- const struct dp_debug_private *debug;
+ const struct msm_dp_debug_private *debug;
const struct drm_connector *connector;
int val = 0;
@@ -164,9 +164,9 @@ static ssize_t dp_test_active_write(struct file *file,
return len;
}
-static int dp_test_active_show(struct seq_file *m, void *data)
+static int msm_dp_test_active_show(struct seq_file *m, void *data)
{
- struct dp_debug_private *debug = m->private;
+ struct msm_dp_debug_private *debug = m->private;
struct drm_connector *connector = debug->connector;
if (connector->status == connector_status_connected) {
@@ -181,28 +181,28 @@ static int dp_test_active_show(struct seq_file *m, void *data)
return 0;
}
-static int dp_test_active_open(struct inode *inode,
+static int msm_dp_test_active_open(struct inode *inode,
struct file *file)
{
- return single_open(file, dp_test_active_show,
+ return single_open(file, msm_dp_test_active_show,
inode->i_private);
}
static const struct file_operations test_active_fops = {
.owner = THIS_MODULE,
- .open = dp_test_active_open,
+ .open = msm_dp_test_active_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
- .write = dp_test_active_write
+ .write = msm_dp_test_active_write
};
-int dp_debug_init(struct device *dev, struct dp_panel *panel,
- struct dp_link *link,
+int msm_dp_debug_init(struct device *dev, struct msm_dp_panel *panel,
+ struct msm_dp_link *link,
struct drm_connector *connector,
struct dentry *root, bool is_edp)
{
- struct dp_debug_private *debug;
+ struct msm_dp_debug_private *debug;
if (!dev || !panel || !link) {
DRM_ERROR("invalid input\n");
@@ -217,20 +217,20 @@ int dp_debug_init(struct device *dev, struct dp_panel *panel,
debug->panel = panel;
debugfs_create_file("dp_debug", 0444, root,
- debug, &dp_debug_fops);
+ debug, &msm_dp_debug_fops);
if (!is_edp) {
- debugfs_create_file("msm_dp_test_active", 0444,
+ debugfs_create_file("dp_test_active", 0444,
root,
debug, &test_active_fops);
- debugfs_create_file("msm_dp_test_data", 0444,
+ debugfs_create_file("dp_test_data", 0444,
root,
- debug, &dp_test_data_fops);
+ debug, &msm_dp_test_data_fops);
- debugfs_create_file("msm_dp_test_type", 0444,
+ debugfs_create_file("dp_test_type", 0444,
root,
- debug, &dp_test_type_fops);
+ debug, &msm_dp_test_type_fops);
}
return 0;
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h
index 7e1aa892fc09..6dc0ff4f0f65 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.h
+++ b/drivers/gpu/drm/msm/dp/dp_debug.h
@@ -12,7 +12,7 @@
#if defined(CONFIG_DEBUG_FS)
/**
- * dp_debug_get() - configure and get the DisplayPlot debug module data
+ * msm_dp_debug_get() - configure and get the DisplayPlot debug module data
*
* @dev: device instance of the caller
* @panel: instance of panel module
@@ -25,8 +25,8 @@
* This function sets up the debug module and provides a way
* for debugfs input to be communicated with existing modules
*/
-int dp_debug_init(struct device *dev, struct dp_panel *panel,
- struct dp_link *link,
+int msm_dp_debug_init(struct device *dev, struct msm_dp_panel *panel,
+ struct msm_dp_link *link,
struct drm_connector *connector,
struct dentry *root,
bool is_edp);
@@ -34,8 +34,8 @@ int dp_debug_init(struct device *dev, struct dp_panel *panel,
#else
static inline
-int dp_debug_init(struct device *dev, struct dp_panel *panel,
- struct dp_link *link,
+int msm_dp_debug_init(struct device *dev, struct msm_dp_panel *panel,
+ struct msm_dp_link *link,
struct drm_connector *connector,
struct dentry *root,
bool is_edp)
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index e1228fb093ee..aba925aab7ad 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -67,13 +67,13 @@ enum {
#define WAIT_FOR_RESUME_TIMEOUT_JIFFIES (HZ / 2)
-struct dp_event {
+struct msm_dp_event {
u32 event_id;
u32 data;
u32 delay;
};
-struct dp_display_private {
+struct msm_dp_display_private {
int irq;
unsigned int id;
@@ -85,14 +85,14 @@ struct dp_display_private {
struct drm_device *drm_dev;
- struct dp_catalog *catalog;
+ struct msm_dp_catalog *catalog;
struct drm_dp_aux *aux;
- struct dp_link *link;
- struct dp_panel *panel;
- struct dp_ctrl *ctrl;
+ struct msm_dp_link *link;
+ struct msm_dp_panel *panel;
+ struct msm_dp_ctrl *ctrl;
- struct dp_display_mode dp_mode;
- struct msm_dp dp_display;
+ struct msm_dp_display_mode msm_dp_mode;
+ struct msm_dp msm_dp_display;
/* wait for audio signaling */
struct completion audio_comp;
@@ -104,12 +104,12 @@ struct dp_display_private {
u32 event_pndx;
u32 event_gndx;
struct task_struct *ev_tsk;
- struct dp_event event_list[DP_EVENT_Q_MAX];
+ struct msm_dp_event event_list[DP_EVENT_Q_MAX];
spinlock_t event_lock;
bool wide_bus_supported;
- struct dp_audio *audio;
+ struct msm_dp_audio *audio;
};
struct msm_dp_desc {
@@ -118,25 +118,33 @@ struct msm_dp_desc {
bool wide_bus_supported;
};
-static const struct msm_dp_desc sc7180_dp_descs[] = {
+static const struct msm_dp_desc msm_dp_desc_sa8775p[] = {
+ { .io_start = 0x0af54000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true },
+ { .io_start = 0x0af5c000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true },
+ { .io_start = 0x22154000, .id = MSM_DP_CONTROLLER_2, .wide_bus_supported = true },
+ { .io_start = 0x2215c000, .id = MSM_DP_CONTROLLER_3, .wide_bus_supported = true },
+ {}
+};
+
+static const struct msm_dp_desc msm_dp_desc_sc7180[] = {
{ .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true },
{}
};
-static const struct msm_dp_desc sc7280_dp_descs[] = {
+static const struct msm_dp_desc msm_dp_desc_sc7280[] = {
{ .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true },
{ .io_start = 0x0aea0000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true },
{}
};
-static const struct msm_dp_desc sc8180x_dp_descs[] = {
+static const struct msm_dp_desc msm_dp_desc_sc8180x[] = {
{ .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true },
{ .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true },
{ .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .wide_bus_supported = true },
{}
};
-static const struct msm_dp_desc sc8280xp_dp_descs[] = {
+static const struct msm_dp_desc msm_dp_desc_sc8280xp[] = {
{ .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true },
{ .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true },
{ .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .wide_bus_supported = true },
@@ -148,12 +156,12 @@ static const struct msm_dp_desc sc8280xp_dp_descs[] = {
{}
};
-static const struct msm_dp_desc sm8650_dp_descs[] = {
+static const struct msm_dp_desc msm_dp_desc_sm8650[] = {
{ .io_start = 0x0af54000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true },
{}
};
-static const struct msm_dp_desc x1e80100_dp_descs[] = {
+static const struct msm_dp_desc msm_dp_desc_x1e80100[] = {
{ .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true },
{ .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true },
{ .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .wide_bus_supported = true },
@@ -161,70 +169,71 @@ static const struct msm_dp_desc x1e80100_dp_descs[] = {
{}
};
-static const struct of_device_id dp_dt_match[] = {
- { .compatible = "qcom,sc7180-dp", .data = &sc7180_dp_descs },
- { .compatible = "qcom,sc7280-dp", .data = &sc7280_dp_descs },
- { .compatible = "qcom,sc7280-edp", .data = &sc7280_dp_descs },
- { .compatible = "qcom,sc8180x-dp", .data = &sc8180x_dp_descs },
- { .compatible = "qcom,sc8180x-edp", .data = &sc8180x_dp_descs },
- { .compatible = "qcom,sc8280xp-dp", .data = &sc8280xp_dp_descs },
- { .compatible = "qcom,sc8280xp-edp", .data = &sc8280xp_dp_descs },
- { .compatible = "qcom,sdm845-dp", .data = &sc7180_dp_descs },
- { .compatible = "qcom,sm8350-dp", .data = &sc7180_dp_descs },
- { .compatible = "qcom,sm8650-dp", .data = &sm8650_dp_descs },
- { .compatible = "qcom,x1e80100-dp", .data = &x1e80100_dp_descs },
+static const struct of_device_id msm_dp_dt_match[] = {
+ { .compatible = "qcom,sa8775p-dp", .data = &msm_dp_desc_sa8775p },
+ { .compatible = "qcom,sc7180-dp", .data = &msm_dp_desc_sc7180 },
+ { .compatible = "qcom,sc7280-dp", .data = &msm_dp_desc_sc7280 },
+ { .compatible = "qcom,sc7280-edp", .data = &msm_dp_desc_sc7280 },
+ { .compatible = "qcom,sc8180x-dp", .data = &msm_dp_desc_sc8180x },
+ { .compatible = "qcom,sc8180x-edp", .data = &msm_dp_desc_sc8180x },
+ { .compatible = "qcom,sc8280xp-dp", .data = &msm_dp_desc_sc8280xp },
+ { .compatible = "qcom,sc8280xp-edp", .data = &msm_dp_desc_sc8280xp },
+ { .compatible = "qcom,sdm845-dp", .data = &msm_dp_desc_sc7180 },
+ { .compatible = "qcom,sm8350-dp", .data = &msm_dp_desc_sc7180 },
+ { .compatible = "qcom,sm8650-dp", .data = &msm_dp_desc_sm8650 },
+ { .compatible = "qcom,x1e80100-dp", .data = &msm_dp_desc_x1e80100 },
{}
};
-static struct dp_display_private *dev_get_dp_display_private(struct device *dev)
+static struct msm_dp_display_private *dev_get_dp_display_private(struct device *dev)
{
struct msm_dp *dp = dev_get_drvdata(dev);
- return container_of(dp, struct dp_display_private, dp_display);
+ return container_of(dp, struct msm_dp_display_private, msm_dp_display);
}
-static int dp_add_event(struct dp_display_private *dp_priv, u32 event,
+static int msm_dp_add_event(struct msm_dp_display_private *msm_dp_priv, u32 event,
u32 data, u32 delay)
{
unsigned long flag;
- struct dp_event *todo;
+ struct msm_dp_event *todo;
int pndx;
- spin_lock_irqsave(&dp_priv->event_lock, flag);
- pndx = dp_priv->event_pndx + 1;
+ spin_lock_irqsave(&msm_dp_priv->event_lock, flag);
+ pndx = msm_dp_priv->event_pndx + 1;
pndx %= DP_EVENT_Q_MAX;
- if (pndx == dp_priv->event_gndx) {
+ if (pndx == msm_dp_priv->event_gndx) {
pr_err("event_q is full: pndx=%d gndx=%d\n",
- dp_priv->event_pndx, dp_priv->event_gndx);
- spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+ msm_dp_priv->event_pndx, msm_dp_priv->event_gndx);
+ spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag);
return -EPERM;
}
- todo = &dp_priv->event_list[dp_priv->event_pndx++];
- dp_priv->event_pndx %= DP_EVENT_Q_MAX;
+ todo = &msm_dp_priv->event_list[msm_dp_priv->event_pndx++];
+ msm_dp_priv->event_pndx %= DP_EVENT_Q_MAX;
todo->event_id = event;
todo->data = data;
todo->delay = delay;
- wake_up(&dp_priv->event_q);
- spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+ wake_up(&msm_dp_priv->event_q);
+ spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag);
return 0;
}
-static int dp_del_event(struct dp_display_private *dp_priv, u32 event)
+static int msm_dp_del_event(struct msm_dp_display_private *msm_dp_priv, u32 event)
{
unsigned long flag;
- struct dp_event *todo;
+ struct msm_dp_event *todo;
u32 gndx;
- spin_lock_irqsave(&dp_priv->event_lock, flag);
- if (dp_priv->event_pndx == dp_priv->event_gndx) {
- spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+ spin_lock_irqsave(&msm_dp_priv->event_lock, flag);
+ if (msm_dp_priv->event_pndx == msm_dp_priv->event_gndx) {
+ spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag);
return -ENOENT;
}
- gndx = dp_priv->event_gndx;
- while (dp_priv->event_pndx != gndx) {
- todo = &dp_priv->event_list[gndx];
+ gndx = msm_dp_priv->event_gndx;
+ while (msm_dp_priv->event_pndx != gndx) {
+ todo = &msm_dp_priv->event_list[gndx];
if (todo->event_id == event) {
todo->event_id = EV_NO_EVENT; /* deleted */
todo->delay = 0;
@@ -232,60 +241,60 @@ static int dp_del_event(struct dp_display_private *dp_priv, u32 event)
gndx++;
gndx %= DP_EVENT_Q_MAX;
}
- spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+ spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag);
return 0;
}
-void dp_display_signal_audio_start(struct msm_dp *dp_display)
+void msm_dp_display_signal_audio_start(struct msm_dp *msm_dp_display)
{
- struct dp_display_private *dp;
+ struct msm_dp_display_private *dp;
- dp = container_of(dp_display, struct dp_display_private, dp_display);
+ dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display);
reinit_completion(&dp->audio_comp);
}
-void dp_display_signal_audio_complete(struct msm_dp *dp_display)
+void msm_dp_display_signal_audio_complete(struct msm_dp *msm_dp_display)
{
- struct dp_display_private *dp;
+ struct msm_dp_display_private *dp;
- dp = container_of(dp_display, struct dp_display_private, dp_display);
+ dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display);
complete_all(&dp->audio_comp);
}
-static int dp_hpd_event_thread_start(struct dp_display_private *dp_priv);
+static int msm_dp_hpd_event_thread_start(struct msm_dp_display_private *msm_dp_priv);
-static int dp_display_bind(struct device *dev, struct device *master,
+static int msm_dp_display_bind(struct device *dev, struct device *master,
void *data)
{
int rc = 0;
- struct dp_display_private *dp = dev_get_dp_display_private(dev);
+ struct msm_dp_display_private *dp = dev_get_dp_display_private(dev);
struct msm_drm_private *priv = dev_get_drvdata(master);
struct drm_device *drm = priv->dev;
- dp->dp_display.drm_dev = drm;
- priv->dp[dp->id] = &dp->dp_display;
+ dp->msm_dp_display.drm_dev = drm;
+ priv->dp[dp->id] = &dp->msm_dp_display;
dp->drm_dev = drm;
dp->aux->drm_dev = drm;
- rc = dp_aux_register(dp->aux);
+ rc = msm_dp_aux_register(dp->aux);
if (rc) {
DRM_ERROR("DRM DP AUX register failed\n");
goto end;
}
- rc = dp_register_audio_driver(dev, dp->audio);
+ rc = msm_dp_register_audio_driver(dev, dp->audio);
if (rc) {
DRM_ERROR("Audio registration Dp failed\n");
goto end;
}
- rc = dp_hpd_event_thread_start(dp);
+ rc = msm_dp_hpd_event_thread_start(dp);
if (rc) {
DRM_ERROR("Event thread create failed\n");
goto end;
@@ -296,44 +305,44 @@ end:
return rc;
}
-static void dp_display_unbind(struct device *dev, struct device *master,
+static void msm_dp_display_unbind(struct device *dev, struct device *master,
void *data)
{
- struct dp_display_private *dp = dev_get_dp_display_private(dev);
+ struct msm_dp_display_private *dp = dev_get_dp_display_private(dev);
struct msm_drm_private *priv = dev_get_drvdata(master);
kthread_stop(dp->ev_tsk);
of_dp_aux_depopulate_bus(dp->aux);
- dp_unregister_audio_driver(dev, dp->audio);
- dp_aux_unregister(dp->aux);
+ msm_dp_unregister_audio_driver(dev, dp->audio);
+ msm_dp_aux_unregister(dp->aux);
dp->drm_dev = NULL;
dp->aux->drm_dev = NULL;
priv->dp[dp->id] = NULL;
}
-static const struct component_ops dp_display_comp_ops = {
- .bind = dp_display_bind,
- .unbind = dp_display_unbind,
+static const struct component_ops msm_dp_display_comp_ops = {
+ .bind = msm_dp_display_bind,
+ .unbind = msm_dp_display_unbind,
};
-static void dp_display_send_hpd_event(struct msm_dp *dp_display)
+static void msm_dp_display_send_hpd_event(struct msm_dp *msm_dp_display)
{
- struct dp_display_private *dp;
+ struct msm_dp_display_private *dp;
struct drm_connector *connector;
- dp = container_of(dp_display, struct dp_display_private, dp_display);
+ dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display);
- connector = dp->dp_display.connector;
+ connector = dp->msm_dp_display.connector;
drm_helper_hpd_irq_event(connector->dev);
}
-static int dp_display_send_hpd_notification(struct dp_display_private *dp,
+static int msm_dp_display_send_hpd_notification(struct msm_dp_display_private *dp,
bool hpd)
{
- if ((hpd && dp->dp_display.link_ready) ||
- (!hpd && !dp->dp_display.link_ready)) {
+ if ((hpd && dp->msm_dp_display.link_ready) ||
+ (!hpd && !dp->msm_dp_display.link_ready)) {
drm_dbg_dp(dp->drm_dev, "HPD already %s\n",
(hpd ? "on" : "off"));
return 0;
@@ -342,139 +351,139 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp,
/* reset video pattern flag on disconnect */
if (!hpd) {
dp->panel->video_test = false;
- if (!dp->dp_display.is_edp)
- drm_dp_set_subconnector_property(dp->dp_display.connector,
+ if (!dp->msm_dp_display.is_edp)
+ drm_dp_set_subconnector_property(dp->msm_dp_display.connector,
connector_status_disconnected,
dp->panel->dpcd,
dp->panel->downstream_ports);
}
- dp->dp_display.link_ready = hpd;
+ dp->msm_dp_display.link_ready = hpd;
drm_dbg_dp(dp->drm_dev, "type=%d hpd=%d\n",
- dp->dp_display.connector_type, hpd);
- dp_display_send_hpd_event(&dp->dp_display);
+ dp->msm_dp_display.connector_type, hpd);
+ msm_dp_display_send_hpd_event(&dp->msm_dp_display);
return 0;
}
-static int dp_display_process_hpd_high(struct dp_display_private *dp)
+static int msm_dp_display_process_hpd_high(struct msm_dp_display_private *dp)
{
- struct drm_connector *connector = dp->dp_display.connector;
+ struct drm_connector *connector = dp->msm_dp_display.connector;
const struct drm_display_info *info = &connector->display_info;
int rc = 0;
- rc = dp_panel_read_sink_caps(dp->panel, connector);
+ rc = msm_dp_panel_read_sink_caps(dp->panel, connector);
if (rc)
goto end;
- dp_link_process_request(dp->link);
+ msm_dp_link_process_request(dp->link);
- if (!dp->dp_display.is_edp)
+ if (!dp->msm_dp_display.is_edp)
drm_dp_set_subconnector_property(connector,
connector_status_connected,
dp->panel->dpcd,
dp->panel->downstream_ports);
- dp->dp_display.psr_supported = dp->panel->psr_cap.version && psr_enabled;
+ dp->msm_dp_display.psr_supported = dp->panel->psr_cap.version && psr_enabled;
dp->audio_supported = info->has_audio;
- dp_panel_handle_sink_request(dp->panel);
+ msm_dp_panel_handle_sink_request(dp->panel);
/*
* set sink to normal operation mode -- D0
* before dpcd read
*/
- dp_link_psm_config(dp->link, &dp->panel->link_info, false);
+ msm_dp_link_psm_config(dp->link, &dp->panel->link_info, false);
- dp_link_reset_phy_params_vx_px(dp->link);
- rc = dp_ctrl_on_link(dp->ctrl);
+ msm_dp_link_reset_phy_params_vx_px(dp->link);
+ rc = msm_dp_ctrl_on_link(dp->ctrl);
if (rc) {
DRM_ERROR("failed to complete DP link training\n");
goto end;
}
- dp_add_event(dp, EV_USER_NOTIFICATION, true, 0);
+ msm_dp_add_event(dp, EV_USER_NOTIFICATION, true, 0);
end:
return rc;
}
-static void dp_display_host_phy_init(struct dp_display_private *dp)
+static void msm_dp_display_host_phy_init(struct msm_dp_display_private *dp)
{
drm_dbg_dp(dp->drm_dev, "type=%d core_init=%d phy_init=%d\n",
- dp->dp_display.connector_type, dp->core_initialized,
+ dp->msm_dp_display.connector_type, dp->core_initialized,
dp->phy_initialized);
if (!dp->phy_initialized) {
- dp_ctrl_phy_init(dp->ctrl);
+ msm_dp_ctrl_phy_init(dp->ctrl);
dp->phy_initialized = true;
}
}
-static void dp_display_host_phy_exit(struct dp_display_private *dp)
+static void msm_dp_display_host_phy_exit(struct msm_dp_display_private *dp)
{
drm_dbg_dp(dp->drm_dev, "type=%d core_init=%d phy_init=%d\n",
- dp->dp_display.connector_type, dp->core_initialized,
+ dp->msm_dp_display.connector_type, dp->core_initialized,
dp->phy_initialized);
if (dp->phy_initialized) {
- dp_ctrl_phy_exit(dp->ctrl);
+ msm_dp_ctrl_phy_exit(dp->ctrl);
dp->phy_initialized = false;
}
}
-static void dp_display_host_init(struct dp_display_private *dp)
+static void msm_dp_display_host_init(struct msm_dp_display_private *dp)
{
drm_dbg_dp(dp->drm_dev, "type=%d core_init=%d phy_init=%d\n",
- dp->dp_display.connector_type, dp->core_initialized,
+ dp->msm_dp_display.connector_type, dp->core_initialized,
dp->phy_initialized);
- dp_ctrl_core_clk_enable(dp->ctrl);
- dp_ctrl_reset_irq_ctrl(dp->ctrl, true);
- dp_aux_init(dp->aux);
+ msm_dp_ctrl_core_clk_enable(dp->ctrl);
+ msm_dp_ctrl_reset_irq_ctrl(dp->ctrl, true);
+ msm_dp_aux_init(dp->aux);
dp->core_initialized = true;
}
-static void dp_display_host_deinit(struct dp_display_private *dp)
+static void msm_dp_display_host_deinit(struct msm_dp_display_private *dp)
{
drm_dbg_dp(dp->drm_dev, "type=%d core_init=%d phy_init=%d\n",
- dp->dp_display.connector_type, dp->core_initialized,
+ dp->msm_dp_display.connector_type, dp->core_initialized,
dp->phy_initialized);
- dp_ctrl_reset_irq_ctrl(dp->ctrl, false);
- dp_aux_deinit(dp->aux);
- dp_ctrl_core_clk_disable(dp->ctrl);
+ msm_dp_ctrl_reset_irq_ctrl(dp->ctrl, false);
+ msm_dp_aux_deinit(dp->aux);
+ msm_dp_ctrl_core_clk_disable(dp->ctrl);
dp->core_initialized = false;
}
-static int dp_display_usbpd_configure_cb(struct device *dev)
+static int msm_dp_display_usbpd_configure_cb(struct device *dev)
{
- struct dp_display_private *dp = dev_get_dp_display_private(dev);
+ struct msm_dp_display_private *dp = dev_get_dp_display_private(dev);
- dp_display_host_phy_init(dp);
+ msm_dp_display_host_phy_init(dp);
- return dp_display_process_hpd_high(dp);
+ return msm_dp_display_process_hpd_high(dp);
}
-static int dp_display_notify_disconnect(struct device *dev)
+static int msm_dp_display_notify_disconnect(struct device *dev)
{
- struct dp_display_private *dp = dev_get_dp_display_private(dev);
+ struct msm_dp_display_private *dp = dev_get_dp_display_private(dev);
- dp_add_event(dp, EV_USER_NOTIFICATION, false, 0);
+ msm_dp_add_event(dp, EV_USER_NOTIFICATION, false, 0);
return 0;
}
-static void dp_display_handle_video_request(struct dp_display_private *dp)
+static void msm_dp_display_handle_video_request(struct msm_dp_display_private *dp)
{
if (dp->link->sink_request & DP_TEST_LINK_VIDEO_PATTERN) {
dp->panel->video_test = true;
- dp_link_send_test_response(dp->link);
+ msm_dp_link_send_test_response(dp->link);
}
}
-static int dp_display_handle_port_status_changed(struct dp_display_private *dp)
+static int msm_dp_display_handle_port_status_changed(struct msm_dp_display_private *dp)
{
int rc = 0;
@@ -482,12 +491,12 @@ static int dp_display_handle_port_status_changed(struct dp_display_private *dp)
drm_dbg_dp(dp->drm_dev, "sink count is zero, nothing to do\n");
if (dp->hpd_state != ST_DISCONNECTED) {
dp->hpd_state = ST_DISCONNECT_PENDING;
- dp_add_event(dp, EV_USER_NOTIFICATION, false, 0);
+ msm_dp_add_event(dp, EV_USER_NOTIFICATION, false, 0);
}
} else {
if (dp->hpd_state == ST_DISCONNECTED) {
dp->hpd_state = ST_MAINLINK_READY;
- rc = dp_display_process_hpd_high(dp);
+ rc = msm_dp_display_process_hpd_high(dp);
if (rc)
dp->hpd_state = ST_DISCONNECTED;
}
@@ -496,7 +505,7 @@ static int dp_display_handle_port_status_changed(struct dp_display_private *dp)
return rc;
}
-static int dp_display_handle_irq_hpd(struct dp_display_private *dp)
+static int msm_dp_display_handle_irq_hpd(struct msm_dp_display_private *dp)
{
u32 sink_request = dp->link->sink_request;
@@ -510,48 +519,48 @@ static int dp_display_handle_irq_hpd(struct dp_display_private *dp)
}
}
- dp_ctrl_handle_sink_request(dp->ctrl);
+ msm_dp_ctrl_handle_sink_request(dp->ctrl);
if (sink_request & DP_TEST_LINK_VIDEO_PATTERN)
- dp_display_handle_video_request(dp);
+ msm_dp_display_handle_video_request(dp);
return 0;
}
-static int dp_display_usbpd_attention_cb(struct device *dev)
+static int msm_dp_display_usbpd_attention_cb(struct device *dev)
{
int rc = 0;
u32 sink_request;
- struct dp_display_private *dp = dev_get_dp_display_private(dev);
+ struct msm_dp_display_private *dp = dev_get_dp_display_private(dev);
/* check for any test request issued by sink */
- rc = dp_link_process_request(dp->link);
+ rc = msm_dp_link_process_request(dp->link);
if (!rc) {
sink_request = dp->link->sink_request;
drm_dbg_dp(dp->drm_dev, "hpd_state=%d sink_request=%d\n",
dp->hpd_state, sink_request);
if (sink_request & DS_PORT_STATUS_CHANGED)
- rc = dp_display_handle_port_status_changed(dp);
+ rc = msm_dp_display_handle_port_status_changed(dp);
else
- rc = dp_display_handle_irq_hpd(dp);
+ rc = msm_dp_display_handle_irq_hpd(dp);
}
return rc;
}
-static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
+static int msm_dp_hpd_plug_handle(struct msm_dp_display_private *dp, u32 data)
{
u32 state;
int ret;
- struct platform_device *pdev = dp->dp_display.pdev;
+ struct platform_device *pdev = dp->msm_dp_display.pdev;
- dp_aux_enable_xfers(dp->aux, true);
+ msm_dp_aux_enable_xfers(dp->aux, true);
mutex_lock(&dp->event_mutex);
state = dp->hpd_state;
drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n",
- dp->dp_display.connector_type, state);
+ dp->msm_dp_display.connector_type, state);
if (state == ST_DISPLAY_OFF) {
mutex_unlock(&dp->event_mutex);
@@ -565,7 +574,7 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
if (state == ST_DISCONNECT_PENDING) {
/* wait until ST_DISCONNECTED */
- dp_add_event(dp, EV_HPD_PLUG_INT, 0, 1); /* delay = 1 */
+ msm_dp_add_event(dp, EV_HPD_PLUG_INT, 0, 1); /* delay = 1 */
mutex_unlock(&dp->event_mutex);
return 0;
}
@@ -577,7 +586,7 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
return ret;
}
- ret = dp_display_usbpd_configure_cb(&pdev->dev);
+ ret = msm_dp_display_usbpd_configure_cb(&pdev->dev);
if (ret) { /* link train failed */
dp->hpd_state = ST_DISCONNECTED;
pm_runtime_put_sync(&pdev->dev);
@@ -586,60 +595,60 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
}
drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n",
- dp->dp_display.connector_type, state);
+ dp->msm_dp_display.connector_type, state);
mutex_unlock(&dp->event_mutex);
/* uevent will complete connection part */
return 0;
};
-static void dp_display_handle_plugged_change(struct msm_dp *dp_display,
+static void msm_dp_display_handle_plugged_change(struct msm_dp *msm_dp_display,
bool plugged)
{
- struct dp_display_private *dp;
+ struct msm_dp_display_private *dp;
- dp = container_of(dp_display,
- struct dp_display_private, dp_display);
+ dp = container_of(msm_dp_display,
+ struct msm_dp_display_private, msm_dp_display);
/* notify audio subsystem only if sink supports audio */
- if (dp_display->plugged_cb && dp_display->codec_dev &&
+ if (msm_dp_display->plugged_cb && msm_dp_display->codec_dev &&
dp->audio_supported)
- dp_display->plugged_cb(dp_display->codec_dev, plugged);
+ msm_dp_display->plugged_cb(msm_dp_display->codec_dev, plugged);
}
-static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
+static int msm_dp_hpd_unplug_handle(struct msm_dp_display_private *dp, u32 data)
{
u32 state;
- struct platform_device *pdev = dp->dp_display.pdev;
+ struct platform_device *pdev = dp->msm_dp_display.pdev;
- dp_aux_enable_xfers(dp->aux, false);
+ msm_dp_aux_enable_xfers(dp->aux, false);
mutex_lock(&dp->event_mutex);
state = dp->hpd_state;
drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n",
- dp->dp_display.connector_type, state);
+ dp->msm_dp_display.connector_type, state);
/* unplugged, no more irq_hpd handle */
- dp_del_event(dp, EV_IRQ_HPD_INT);
+ msm_dp_del_event(dp, EV_IRQ_HPD_INT);
if (state == ST_DISCONNECTED) {
/* triggered by irq_hdp with sink_count = 0 */
if (dp->link->sink_count == 0) {
- dp_display_host_phy_exit(dp);
+ msm_dp_display_host_phy_exit(dp);
}
- dp_display_notify_disconnect(&dp->dp_display.pdev->dev);
+ msm_dp_display_notify_disconnect(&dp->msm_dp_display.pdev->dev);
mutex_unlock(&dp->event_mutex);
return 0;
} else if (state == ST_DISCONNECT_PENDING) {
mutex_unlock(&dp->event_mutex);
return 0;
} else if (state == ST_MAINLINK_READY) {
- dp_ctrl_off_link(dp->ctrl);
- dp_display_host_phy_exit(dp);
+ msm_dp_ctrl_off_link(dp->ctrl);
+ msm_dp_display_host_phy_exit(dp);
dp->hpd_state = ST_DISCONNECTED;
- dp_display_notify_disconnect(&dp->dp_display.pdev->dev);
+ msm_dp_display_notify_disconnect(&dp->msm_dp_display.pdev->dev);
pm_runtime_put_sync(&pdev->dev);
mutex_unlock(&dp->event_mutex);
return 0;
@@ -649,7 +658,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
* We don't need separate work for disconnect as
* connect/attention interrupts are disabled
*/
- dp_display_notify_disconnect(&dp->dp_display.pdev->dev);
+ msm_dp_display_notify_disconnect(&dp->msm_dp_display.pdev->dev);
if (state == ST_DISPLAY_OFF) {
dp->hpd_state = ST_DISCONNECTED;
@@ -658,10 +667,10 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
}
/* signal the disconnect event early to ensure proper teardown */
- dp_display_handle_plugged_change(&dp->dp_display, false);
+ msm_dp_display_handle_plugged_change(&dp->msm_dp_display, false);
drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n",
- dp->dp_display.connector_type, state);
+ dp->msm_dp_display.connector_type, state);
/* uevent will complete disconnection part */
pm_runtime_put_sync(&pdev->dev);
@@ -669,7 +678,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
return 0;
}
-static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
+static int msm_dp_irq_hpd_handle(struct msm_dp_display_private *dp, u32 data)
{
u32 state;
@@ -678,7 +687,7 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
/* irq_hpd can happen at either connected or disconnected state */
state = dp->hpd_state;
drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n",
- dp->dp_display.connector_type, state);
+ dp->msm_dp_display.connector_type, state);
if (state == ST_DISPLAY_OFF) {
mutex_unlock(&dp->event_mutex);
@@ -687,33 +696,33 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
if (state == ST_MAINLINK_READY || state == ST_DISCONNECT_PENDING) {
/* wait until ST_CONNECTED */
- dp_add_event(dp, EV_IRQ_HPD_INT, 0, 1); /* delay = 1 */
+ msm_dp_add_event(dp, EV_IRQ_HPD_INT, 0, 1); /* delay = 1 */
mutex_unlock(&dp->event_mutex);
return 0;
}
- dp_display_usbpd_attention_cb(&dp->dp_display.pdev->dev);
+ msm_dp_display_usbpd_attention_cb(&dp->msm_dp_display.pdev->dev);
drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n",
- dp->dp_display.connector_type, state);
+ dp->msm_dp_display.connector_type, state);
mutex_unlock(&dp->event_mutex);
return 0;
}
-static void dp_display_deinit_sub_modules(struct dp_display_private *dp)
+static void msm_dp_display_deinit_sub_modules(struct msm_dp_display_private *dp)
{
- dp_audio_put(dp->audio);
- dp_panel_put(dp->panel);
- dp_aux_put(dp->aux);
+ msm_dp_audio_put(dp->audio);
+ msm_dp_panel_put(dp->panel);
+ msm_dp_aux_put(dp->aux);
}
-static int dp_init_sub_modules(struct dp_display_private *dp)
+static int msm_dp_init_sub_modules(struct msm_dp_display_private *dp)
{
int rc = 0;
- struct device *dev = &dp->dp_display.pdev->dev;
- struct dp_panel_in panel_in = {
+ struct device *dev = &dp->msm_dp_display.pdev->dev;
+ struct msm_dp_panel_in panel_in = {
.dev = dev,
};
struct phy *phy;
@@ -723,14 +732,14 @@ static int dp_init_sub_modules(struct dp_display_private *dp)
return PTR_ERR(phy);
rc = phy_set_mode_ext(phy, PHY_MODE_DP,
- dp->dp_display.is_edp ? PHY_SUBMODE_EDP : PHY_SUBMODE_DP);
+ dp->msm_dp_display.is_edp ? PHY_SUBMODE_EDP : PHY_SUBMODE_DP);
if (rc) {
DRM_ERROR("failed to set phy submode, rc = %d\n", rc);
dp->catalog = NULL;
goto error;
}
- dp->catalog = dp_catalog_get(dev);
+ dp->catalog = msm_dp_catalog_get(dev);
if (IS_ERR(dp->catalog)) {
rc = PTR_ERR(dp->catalog);
DRM_ERROR("failed to initialize catalog, rc = %d\n", rc);
@@ -738,9 +747,9 @@ static int dp_init_sub_modules(struct dp_display_private *dp)
goto error;
}
- dp->aux = dp_aux_get(dev, dp->catalog,
+ dp->aux = msm_dp_aux_get(dev, dp->catalog,
phy,
- dp->dp_display.is_edp);
+ dp->msm_dp_display.is_edp);
if (IS_ERR(dp->aux)) {
rc = PTR_ERR(dp->aux);
DRM_ERROR("failed to initialize aux, rc = %d\n", rc);
@@ -748,7 +757,7 @@ static int dp_init_sub_modules(struct dp_display_private *dp)
goto error;
}
- dp->link = dp_link_get(dev, dp->aux);
+ dp->link = msm_dp_link_get(dev, dp->aux);
if (IS_ERR(dp->link)) {
rc = PTR_ERR(dp->link);
DRM_ERROR("failed to initialize link, rc = %d\n", rc);
@@ -760,7 +769,7 @@ static int dp_init_sub_modules(struct dp_display_private *dp)
panel_in.catalog = dp->catalog;
panel_in.link = dp->link;
- dp->panel = dp_panel_get(&panel_in);
+ dp->panel = msm_dp_panel_get(&panel_in);
if (IS_ERR(dp->panel)) {
rc = PTR_ERR(dp->panel);
DRM_ERROR("failed to initialize panel, rc = %d\n", rc);
@@ -768,7 +777,7 @@ static int dp_init_sub_modules(struct dp_display_private *dp)
goto error_link;
}
- dp->ctrl = dp_ctrl_get(dev, dp->link, dp->panel, dp->aux,
+ dp->ctrl = msm_dp_ctrl_get(dev, dp->link, dp->panel, dp->aux,
dp->catalog,
phy);
if (IS_ERR(dp->ctrl)) {
@@ -778,7 +787,7 @@ static int dp_init_sub_modules(struct dp_display_private *dp)
goto error_ctrl;
}
- dp->audio = dp_audio_get(dp->dp_display.pdev, dp->panel, dp->catalog);
+ dp->audio = msm_dp_audio_get(dp->msm_dp_display.pdev, dp->panel, dp->catalog);
if (IS_ERR(dp->audio)) {
rc = PTR_ERR(dp->audio);
pr_err("failed to initialize audio, rc = %d\n", rc);
@@ -789,51 +798,51 @@ static int dp_init_sub_modules(struct dp_display_private *dp)
return rc;
error_ctrl:
- dp_panel_put(dp->panel);
+ msm_dp_panel_put(dp->panel);
error_link:
- dp_aux_put(dp->aux);
+ msm_dp_aux_put(dp->aux);
error:
return rc;
}
-static int dp_display_set_mode(struct msm_dp *dp_display,
- struct dp_display_mode *mode)
+static int msm_dp_display_set_mode(struct msm_dp *msm_dp_display,
+ struct msm_dp_display_mode *mode)
{
- struct dp_display_private *dp;
+ struct msm_dp_display_private *dp;
- dp = container_of(dp_display, struct dp_display_private, dp_display);
+ dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display);
- drm_mode_copy(&dp->panel->dp_mode.drm_mode, &mode->drm_mode);
- dp->panel->dp_mode.bpp = mode->bpp;
- dp->panel->dp_mode.out_fmt_is_yuv_420 = mode->out_fmt_is_yuv_420;
- dp_panel_init_panel_info(dp->panel);
+ drm_mode_copy(&dp->panel->msm_dp_mode.drm_mode, &mode->drm_mode);
+ dp->panel->msm_dp_mode.bpp = mode->bpp;
+ dp->panel->msm_dp_mode.out_fmt_is_yuv_420 = mode->out_fmt_is_yuv_420;
+ msm_dp_panel_init_panel_info(dp->panel);
return 0;
}
-static int dp_display_enable(struct dp_display_private *dp, bool force_link_train)
+static int msm_dp_display_enable(struct msm_dp_display_private *dp, bool force_link_train)
{
int rc = 0;
- struct msm_dp *dp_display = &dp->dp_display;
+ struct msm_dp *msm_dp_display = &dp->msm_dp_display;
drm_dbg_dp(dp->drm_dev, "sink_count=%d\n", dp->link->sink_count);
- if (dp_display->power_on) {
+ if (msm_dp_display->power_on) {
drm_dbg_dp(dp->drm_dev, "Link already setup, return\n");
return 0;
}
- rc = dp_ctrl_on_stream(dp->ctrl, force_link_train);
+ rc = msm_dp_ctrl_on_stream(dp->ctrl, force_link_train);
if (!rc)
- dp_display->power_on = true;
+ msm_dp_display->power_on = true;
return rc;
}
-static int dp_display_post_enable(struct msm_dp *dp_display)
+static int msm_dp_display_post_enable(struct msm_dp *msm_dp_display)
{
- struct dp_display_private *dp;
+ struct msm_dp_display_private *dp;
u32 rate;
- dp = container_of(dp_display, struct dp_display_private, dp_display);
+ dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display);
rate = dp->link->link_params.rate;
@@ -843,85 +852,85 @@ static int dp_display_post_enable(struct msm_dp *dp_display)
}
/* signal the connect event late to synchronize video and display */
- dp_display_handle_plugged_change(dp_display, true);
+ msm_dp_display_handle_plugged_change(msm_dp_display, true);
- if (dp_display->psr_supported)
- dp_ctrl_config_psr(dp->ctrl);
+ if (msm_dp_display->psr_supported)
+ msm_dp_ctrl_config_psr(dp->ctrl);
return 0;
}
-static int dp_display_disable(struct dp_display_private *dp)
+static int msm_dp_display_disable(struct msm_dp_display_private *dp)
{
- struct msm_dp *dp_display = &dp->dp_display;
+ struct msm_dp *msm_dp_display = &dp->msm_dp_display;
- if (!dp_display->power_on)
+ if (!msm_dp_display->power_on)
return 0;
/* wait only if audio was enabled */
- if (dp_display->audio_enabled) {
+ if (msm_dp_display->audio_enabled) {
/* signal the disconnect event */
- dp_display_handle_plugged_change(dp_display, false);
+ msm_dp_display_handle_plugged_change(msm_dp_display, false);
if (!wait_for_completion_timeout(&dp->audio_comp,
HZ * 5))
DRM_ERROR("audio comp timeout\n");
}
- dp_display->audio_enabled = false;
+ msm_dp_display->audio_enabled = false;
if (dp->link->sink_count == 0) {
/*
* irq_hpd with sink_count = 0
* hdmi unplugged out of dongle
*/
- dp_ctrl_off_link_stream(dp->ctrl);
+ msm_dp_ctrl_off_link_stream(dp->ctrl);
} else {
/*
* unplugged interrupt
* dongle unplugged out of DUT
*/
- dp_ctrl_off(dp->ctrl);
- dp_display_host_phy_exit(dp);
+ msm_dp_ctrl_off(dp->ctrl);
+ msm_dp_display_host_phy_exit(dp);
}
- dp_display->power_on = false;
+ msm_dp_display->power_on = false;
drm_dbg_dp(dp->drm_dev, "sink count: %d\n", dp->link->sink_count);
return 0;
}
-int dp_display_set_plugged_cb(struct msm_dp *dp_display,
+int msm_dp_display_set_plugged_cb(struct msm_dp *msm_dp_display,
hdmi_codec_plugged_cb fn, struct device *codec_dev)
{
bool plugged;
- dp_display->plugged_cb = fn;
- dp_display->codec_dev = codec_dev;
- plugged = dp_display->link_ready;
- dp_display_handle_plugged_change(dp_display, plugged);
+ msm_dp_display->plugged_cb = fn;
+ msm_dp_display->codec_dev = codec_dev;
+ plugged = msm_dp_display->link_ready;
+ msm_dp_display_handle_plugged_change(msm_dp_display, plugged);
return 0;
}
/**
- * dp_bridge_mode_valid - callback to determine if specified mode is valid
+ * msm_dp_bridge_mode_valid - callback to determine if specified mode is valid
* @bridge: Pointer to drm bridge structure
* @info: display info
* @mode: Pointer to drm mode structure
* Returns: Validity status for specified mode
*/
-enum drm_mode_status dp_bridge_mode_valid(struct drm_bridge *bridge,
+enum drm_mode_status msm_dp_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
const u32 num_components = 3, default_bpp = 24;
- struct dp_display_private *dp_display;
- struct dp_link_info *link_info;
+ struct msm_dp_display_private *msm_dp_display;
+ struct msm_dp_link_info *link_info;
u32 mode_rate_khz = 0, supported_rate_khz = 0, mode_bpp = 0;
struct msm_dp *dp;
int mode_pclk_khz = mode->clock;
- dp = to_dp_bridge(bridge)->dp_display;
+ dp = to_dp_bridge(bridge)->msm_dp_display;
if (!dp || !mode_pclk_khz || !dp->connector) {
DRM_ERROR("invalid params\n");
@@ -931,18 +940,18 @@ enum drm_mode_status dp_bridge_mode_valid(struct drm_bridge *bridge,
if (mode->clock > DP_MAX_PIXEL_CLK_KHZ)
return MODE_CLOCK_HIGH;
- dp_display = container_of(dp, struct dp_display_private, dp_display);
- link_info = &dp_display->panel->link_info;
+ msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display);
+ link_info = &msm_dp_display->panel->link_info;
if (drm_mode_is_420_only(&dp->connector->display_info, mode) &&
- dp_display->panel->vsc_sdp_supported)
+ msm_dp_display->panel->vsc_sdp_supported)
mode_pclk_khz /= 2;
mode_bpp = dp->connector->display_info.bpc * num_components;
if (!mode_bpp)
mode_bpp = default_bpp;
- mode_bpp = dp_panel_get_mode_bpp(dp_display->panel,
+ mode_bpp = msm_dp_panel_get_mode_bpp(msm_dp_display->panel,
mode_bpp, mode_pclk_khz);
mode_rate_khz = mode_pclk_khz * mode_bpp;
@@ -954,50 +963,50 @@ enum drm_mode_status dp_bridge_mode_valid(struct drm_bridge *bridge,
return MODE_OK;
}
-int dp_display_get_modes(struct msm_dp *dp)
+int msm_dp_display_get_modes(struct msm_dp *dp)
{
- struct dp_display_private *dp_display;
+ struct msm_dp_display_private *msm_dp_display;
if (!dp) {
DRM_ERROR("invalid params\n");
return 0;
}
- dp_display = container_of(dp, struct dp_display_private, dp_display);
+ msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display);
- return dp_panel_get_modes(dp_display->panel,
+ return msm_dp_panel_get_modes(msm_dp_display->panel,
dp->connector);
}
-bool dp_display_check_video_test(struct msm_dp *dp)
+bool msm_dp_display_check_video_test(struct msm_dp *dp)
{
- struct dp_display_private *dp_display;
+ struct msm_dp_display_private *msm_dp_display;
- dp_display = container_of(dp, struct dp_display_private, dp_display);
+ msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display);
- return dp_display->panel->video_test;
+ return msm_dp_display->panel->video_test;
}
-int dp_display_get_test_bpp(struct msm_dp *dp)
+int msm_dp_display_get_test_bpp(struct msm_dp *dp)
{
- struct dp_display_private *dp_display;
+ struct msm_dp_display_private *msm_dp_display;
if (!dp) {
DRM_ERROR("invalid params\n");
return 0;
}
- dp_display = container_of(dp, struct dp_display_private, dp_display);
+ msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display);
- return dp_link_bit_depth_to_bpp(
- dp_display->link->test_video.test_bit_depth);
+ return msm_dp_link_bit_depth_to_bpp(
+ msm_dp_display->link->test_video.test_bit_depth);
}
void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp)
{
- struct dp_display_private *dp_display;
+ struct msm_dp_display_private *msm_dp_display;
- dp_display = container_of(dp, struct dp_display_private, dp_display);
+ msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display);
/*
* if we are reading registers we need the link clocks to be on
@@ -1006,65 +1015,65 @@ void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp)
* power_on status before dumping DP registers to avoid crash due
* to unclocked access
*/
- mutex_lock(&dp_display->event_mutex);
+ mutex_lock(&msm_dp_display->event_mutex);
if (!dp->power_on) {
- mutex_unlock(&dp_display->event_mutex);
+ mutex_unlock(&msm_dp_display->event_mutex);
return;
}
- dp_catalog_snapshot(dp_display->catalog, disp_state);
+ msm_dp_catalog_snapshot(msm_dp_display->catalog, disp_state);
- mutex_unlock(&dp_display->event_mutex);
+ mutex_unlock(&msm_dp_display->event_mutex);
}
-void dp_display_set_psr(struct msm_dp *dp_display, bool enter)
+void msm_dp_display_set_psr(struct msm_dp *msm_dp_display, bool enter)
{
- struct dp_display_private *dp;
+ struct msm_dp_display_private *dp;
- if (!dp_display) {
+ if (!msm_dp_display) {
DRM_ERROR("invalid params\n");
return;
}
- dp = container_of(dp_display, struct dp_display_private, dp_display);
- dp_ctrl_set_psr(dp->ctrl, enter);
+ dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display);
+ msm_dp_ctrl_set_psr(dp->ctrl, enter);
}
static int hpd_event_thread(void *data)
{
- struct dp_display_private *dp_priv;
+ struct msm_dp_display_private *msm_dp_priv;
unsigned long flag;
- struct dp_event *todo;
+ struct msm_dp_event *todo;
int timeout_mode = 0;
- dp_priv = (struct dp_display_private *)data;
+ msm_dp_priv = (struct msm_dp_display_private *)data;
while (1) {
if (timeout_mode) {
- wait_event_timeout(dp_priv->event_q,
- (dp_priv->event_pndx == dp_priv->event_gndx) ||
+ wait_event_timeout(msm_dp_priv->event_q,
+ (msm_dp_priv->event_pndx == msm_dp_priv->event_gndx) ||
kthread_should_stop(), EVENT_TIMEOUT);
} else {
- wait_event_interruptible(dp_priv->event_q,
- (dp_priv->event_pndx != dp_priv->event_gndx) ||
+ wait_event_interruptible(msm_dp_priv->event_q,
+ (msm_dp_priv->event_pndx != msm_dp_priv->event_gndx) ||
kthread_should_stop());
}
if (kthread_should_stop())
break;
- spin_lock_irqsave(&dp_priv->event_lock, flag);
- todo = &dp_priv->event_list[dp_priv->event_gndx];
+ spin_lock_irqsave(&msm_dp_priv->event_lock, flag);
+ todo = &msm_dp_priv->event_list[msm_dp_priv->event_gndx];
if (todo->delay) {
- struct dp_event *todo_next;
+ struct msm_dp_event *todo_next;
- dp_priv->event_gndx++;
- dp_priv->event_gndx %= DP_EVENT_Q_MAX;
+ msm_dp_priv->event_gndx++;
+ msm_dp_priv->event_gndx %= DP_EVENT_Q_MAX;
/* re enter delay event into q */
- todo_next = &dp_priv->event_list[dp_priv->event_pndx++];
- dp_priv->event_pndx %= DP_EVENT_Q_MAX;
+ todo_next = &msm_dp_priv->event_list[msm_dp_priv->event_pndx++];
+ msm_dp_priv->event_pndx %= DP_EVENT_Q_MAX;
todo_next->event_id = todo->event_id;
todo_next->data = todo->data;
todo_next->delay = todo->delay - 1;
@@ -1075,33 +1084,33 @@ static int hpd_event_thread(void *data)
/* switch to timeout mode */
timeout_mode = 1;
- spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+ spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag);
continue;
}
/* timeout with no events in q */
- if (dp_priv->event_pndx == dp_priv->event_gndx) {
- spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+ if (msm_dp_priv->event_pndx == msm_dp_priv->event_gndx) {
+ spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag);
continue;
}
- dp_priv->event_gndx++;
- dp_priv->event_gndx %= DP_EVENT_Q_MAX;
+ msm_dp_priv->event_gndx++;
+ msm_dp_priv->event_gndx %= DP_EVENT_Q_MAX;
timeout_mode = 0;
- spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+ spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag);
switch (todo->event_id) {
case EV_HPD_PLUG_INT:
- dp_hpd_plug_handle(dp_priv, todo->data);
+ msm_dp_hpd_plug_handle(msm_dp_priv, todo->data);
break;
case EV_HPD_UNPLUG_INT:
- dp_hpd_unplug_handle(dp_priv, todo->data);
+ msm_dp_hpd_unplug_handle(msm_dp_priv, todo->data);
break;
case EV_IRQ_HPD_INT:
- dp_irq_hpd_handle(dp_priv, todo->data);
+ msm_dp_irq_hpd_handle(msm_dp_priv, todo->data);
break;
case EV_USER_NOTIFICATION:
- dp_display_send_hpd_notification(dp_priv,
+ msm_dp_display_send_hpd_notification(msm_dp_priv,
todo->data);
break;
default:
@@ -1112,22 +1121,22 @@ static int hpd_event_thread(void *data)
return 0;
}
-static int dp_hpd_event_thread_start(struct dp_display_private *dp_priv)
+static int msm_dp_hpd_event_thread_start(struct msm_dp_display_private *msm_dp_priv)
{
/* set event q to empty */
- dp_priv->event_gndx = 0;
- dp_priv->event_pndx = 0;
+ msm_dp_priv->event_gndx = 0;
+ msm_dp_priv->event_pndx = 0;
- dp_priv->ev_tsk = kthread_run(hpd_event_thread, dp_priv, "dp_hpd_handler");
- if (IS_ERR(dp_priv->ev_tsk))
- return PTR_ERR(dp_priv->ev_tsk);
+ msm_dp_priv->ev_tsk = kthread_run(hpd_event_thread, msm_dp_priv, "dp_hpd_handler");
+ if (IS_ERR(msm_dp_priv->ev_tsk))
+ return PTR_ERR(msm_dp_priv->ev_tsk);
return 0;
}
-static irqreturn_t dp_display_irq_handler(int irq, void *dev_id)
+static irqreturn_t msm_dp_display_irq_handler(int irq, void *dev_id)
{
- struct dp_display_private *dp = dev_id;
+ struct msm_dp_display_private *dp = dev_id;
irqreturn_t ret = IRQ_NONE;
u32 hpd_isr_status;
@@ -1136,43 +1145,43 @@ static irqreturn_t dp_display_irq_handler(int irq, void *dev_id)
return IRQ_NONE;
}
- hpd_isr_status = dp_catalog_hpd_get_intr_status(dp->catalog);
+ hpd_isr_status = msm_dp_catalog_hpd_get_intr_status(dp->catalog);
if (hpd_isr_status & 0x0F) {
drm_dbg_dp(dp->drm_dev, "type=%d isr=0x%x\n",
- dp->dp_display.connector_type, hpd_isr_status);
+ dp->msm_dp_display.connector_type, hpd_isr_status);
/* hpd related interrupts */
if (hpd_isr_status & DP_DP_HPD_PLUG_INT_MASK)
- dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0);
+ msm_dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0);
if (hpd_isr_status & DP_DP_IRQ_HPD_INT_MASK) {
- dp_add_event(dp, EV_IRQ_HPD_INT, 0, 0);
+ msm_dp_add_event(dp, EV_IRQ_HPD_INT, 0, 0);
}
if (hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK) {
- dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
- dp_add_event(dp, EV_HPD_PLUG_INT, 0, 3);
+ msm_dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
+ msm_dp_add_event(dp, EV_HPD_PLUG_INT, 0, 3);
}
if (hpd_isr_status & DP_DP_HPD_UNPLUG_INT_MASK)
- dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
+ msm_dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
ret = IRQ_HANDLED;
}
/* DP controller isr */
- ret |= dp_ctrl_isr(dp->ctrl);
+ ret |= msm_dp_ctrl_isr(dp->ctrl);
/* DP aux isr */
- ret |= dp_aux_isr(dp->aux);
+ ret |= msm_dp_aux_isr(dp->aux);
return ret;
}
-static int dp_display_request_irq(struct dp_display_private *dp)
+static int msm_dp_display_request_irq(struct msm_dp_display_private *dp)
{
int rc = 0;
- struct platform_device *pdev = dp->dp_display.pdev;
+ struct platform_device *pdev = dp->msm_dp_display.pdev;
dp->irq = platform_get_irq(pdev, 0);
if (dp->irq < 0) {
@@ -1180,7 +1189,7 @@ static int dp_display_request_irq(struct dp_display_private *dp)
return dp->irq;
}
- rc = devm_request_irq(&pdev->dev, dp->irq, dp_display_irq_handler,
+ rc = devm_request_irq(&pdev->dev, dp->irq, msm_dp_display_irq_handler,
IRQF_TRIGGER_HIGH|IRQF_NO_AUTOEN,
"dp_display_isr", dp);
@@ -1193,7 +1202,7 @@ static int dp_display_request_irq(struct dp_display_private *dp)
return 0;
}
-static const struct msm_dp_desc *dp_display_get_desc(struct platform_device *pdev)
+static const struct msm_dp_desc *msm_dp_display_get_desc(struct platform_device *pdev)
{
const struct msm_dp_desc *descs = of_device_get_match_data(&pdev->dev);
struct resource *res;
@@ -1212,7 +1221,7 @@ static const struct msm_dp_desc *dp_display_get_desc(struct platform_device *pde
return NULL;
}
-static int dp_display_probe_tail(struct device *dev)
+static int msm_dp_display_probe_tail(struct device *dev)
{
struct msm_dp *dp = dev_get_drvdata(dev);
int ret;
@@ -1232,19 +1241,19 @@ static int dp_display_probe_tail(struct device *dev)
return ret;
}
- ret = component_add(dev, &dp_display_comp_ops);
+ ret = component_add(dev, &msm_dp_display_comp_ops);
if (ret)
DRM_ERROR("component add failed, rc=%d\n", ret);
return ret;
}
-static int dp_auxbus_done_probe(struct drm_dp_aux *aux)
+static int msm_dp_auxbus_done_probe(struct drm_dp_aux *aux)
{
- return dp_display_probe_tail(aux->dev);
+ return msm_dp_display_probe_tail(aux->dev);
}
-static int dp_display_get_connector_type(struct platform_device *pdev,
+static int msm_dp_display_get_connector_type(struct platform_device *pdev,
const struct msm_dp_desc *desc)
{
struct device_node *node = pdev->dev.of_node;
@@ -1263,10 +1272,10 @@ static int dp_display_get_connector_type(struct platform_device *pdev,
return connector_type;
}
-static int dp_display_probe(struct platform_device *pdev)
+static int msm_dp_display_probe(struct platform_device *pdev)
{
int rc = 0;
- struct dp_display_private *dp;
+ struct msm_dp_display_private *dp;
const struct msm_dp_desc *desc;
if (!pdev || !pdev->dev.of_node) {
@@ -1278,18 +1287,18 @@ static int dp_display_probe(struct platform_device *pdev)
if (!dp)
return -ENOMEM;
- desc = dp_display_get_desc(pdev);
+ desc = msm_dp_display_get_desc(pdev);
if (!desc)
return -EINVAL;
- dp->dp_display.pdev = pdev;
+ dp->msm_dp_display.pdev = pdev;
dp->id = desc->id;
- dp->dp_display.connector_type = dp_display_get_connector_type(pdev, desc);
+ dp->msm_dp_display.connector_type = msm_dp_display_get_connector_type(pdev, desc);
dp->wide_bus_supported = desc->wide_bus_supported;
- dp->dp_display.is_edp =
- (dp->dp_display.connector_type == DRM_MODE_CONNECTOR_eDP);
+ dp->msm_dp_display.is_edp =
+ (dp->msm_dp_display.connector_type == DRM_MODE_CONNECTOR_eDP);
- rc = dp_init_sub_modules(dp);
+ rc = msm_dp_init_sub_modules(dp);
if (rc) {
DRM_ERROR("init sub module failed\n");
return -EPROBE_DEFER;
@@ -1301,28 +1310,28 @@ static int dp_display_probe(struct platform_device *pdev)
spin_lock_init(&dp->event_lock);
/* Store DP audio handle inside DP display */
- dp->dp_display.dp_audio = dp->audio;
+ dp->msm_dp_display.msm_dp_audio = dp->audio;
init_completion(&dp->audio_comp);
- platform_set_drvdata(pdev, &dp->dp_display);
+ platform_set_drvdata(pdev, &dp->msm_dp_display);
rc = devm_pm_runtime_enable(&pdev->dev);
if (rc)
goto err;
- rc = dp_display_request_irq(dp);
+ rc = msm_dp_display_request_irq(dp);
if (rc)
goto err;
- if (dp->dp_display.is_edp) {
- rc = devm_of_dp_aux_populate_bus(dp->aux, dp_auxbus_done_probe);
+ if (dp->msm_dp_display.is_edp) {
+ rc = devm_of_dp_aux_populate_bus(dp->aux, msm_dp_auxbus_done_probe);
if (rc) {
DRM_ERROR("eDP auxbus population failed, rc=%d\n", rc);
goto err;
}
} else {
- rc = dp_display_probe_tail(&pdev->dev);
+ rc = msm_dp_display_probe_tail(&pdev->dev);
if (rc)
goto err;
}
@@ -1330,70 +1339,70 @@ static int dp_display_probe(struct platform_device *pdev)
return rc;
err:
- dp_display_deinit_sub_modules(dp);
+ msm_dp_display_deinit_sub_modules(dp);
return rc;
}
-static void dp_display_remove(struct platform_device *pdev)
+static void msm_dp_display_remove(struct platform_device *pdev)
{
- struct dp_display_private *dp = dev_get_dp_display_private(&pdev->dev);
+ struct msm_dp_display_private *dp = dev_get_dp_display_private(&pdev->dev);
- component_del(&pdev->dev, &dp_display_comp_ops);
- dp_display_deinit_sub_modules(dp);
+ component_del(&pdev->dev, &msm_dp_display_comp_ops);
+ msm_dp_display_deinit_sub_modules(dp);
platform_set_drvdata(pdev, NULL);
}
-static int dp_pm_runtime_suspend(struct device *dev)
+static int msm_dp_pm_runtime_suspend(struct device *dev)
{
- struct dp_display_private *dp = dev_get_dp_display_private(dev);
+ struct msm_dp_display_private *dp = dev_get_dp_display_private(dev);
disable_irq(dp->irq);
- if (dp->dp_display.is_edp) {
- dp_display_host_phy_exit(dp);
- dp_catalog_ctrl_hpd_disable(dp->catalog);
+ if (dp->msm_dp_display.is_edp) {
+ msm_dp_display_host_phy_exit(dp);
+ msm_dp_catalog_ctrl_hpd_disable(dp->catalog);
}
- dp_display_host_deinit(dp);
+ msm_dp_display_host_deinit(dp);
return 0;
}
-static int dp_pm_runtime_resume(struct device *dev)
+static int msm_dp_pm_runtime_resume(struct device *dev)
{
- struct dp_display_private *dp = dev_get_dp_display_private(dev);
+ struct msm_dp_display_private *dp = dev_get_dp_display_private(dev);
/*
* for eDP, host cotroller, HPD block and PHY are enabled here
* but with HPD irq disabled
*
* for DP, only host controller is enabled here.
- * HPD block is enabled at dp_bridge_hpd_enable()
+ * HPD block is enabled at msm_dp_bridge_hpd_enable()
* PHY will be enabled at plugin handler later
*/
- dp_display_host_init(dp);
- if (dp->dp_display.is_edp) {
- dp_catalog_ctrl_hpd_enable(dp->catalog);
- dp_display_host_phy_init(dp);
+ msm_dp_display_host_init(dp);
+ if (dp->msm_dp_display.is_edp) {
+ msm_dp_catalog_ctrl_hpd_enable(dp->catalog);
+ msm_dp_display_host_phy_init(dp);
}
enable_irq(dp->irq);
return 0;
}
-static const struct dev_pm_ops dp_pm_ops = {
- SET_RUNTIME_PM_OPS(dp_pm_runtime_suspend, dp_pm_runtime_resume, NULL)
+static const struct dev_pm_ops msm_dp_pm_ops = {
+ SET_RUNTIME_PM_OPS(msm_dp_pm_runtime_suspend, msm_dp_pm_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
};
-static struct platform_driver dp_display_driver = {
- .probe = dp_display_probe,
- .remove_new = dp_display_remove,
+static struct platform_driver msm_dp_display_driver = {
+ .probe = msm_dp_display_probe,
+ .remove_new = msm_dp_display_remove,
.driver = {
.name = "msm-dp-display",
- .of_match_table = dp_dt_match,
+ .of_match_table = msm_dp_dt_match,
.suppress_bind_attrs = true,
- .pm = &dp_pm_ops,
+ .pm = &msm_dp_pm_ops,
},
};
@@ -1401,7 +1410,7 @@ int __init msm_dp_register(void)
{
int ret;
- ret = platform_driver_register(&dp_display_driver);
+ ret = platform_driver_register(&msm_dp_display_driver);
if (ret)
DRM_ERROR("Dp display driver register failed");
@@ -1410,294 +1419,294 @@ int __init msm_dp_register(void)
void __exit msm_dp_unregister(void)
{
- platform_driver_unregister(&dp_display_driver);
+ platform_driver_unregister(&msm_dp_display_driver);
}
-bool msm_dp_is_yuv_420_enabled(const struct msm_dp *dp_display,
+bool msm_dp_is_yuv_420_enabled(const struct msm_dp *msm_dp_display,
const struct drm_display_mode *mode)
{
- struct dp_display_private *dp;
+ struct msm_dp_display_private *dp;
const struct drm_display_info *info;
- dp = container_of(dp_display, struct dp_display_private, dp_display);
- info = &dp_display->connector->display_info;
+ dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display);
+ info = &msm_dp_display->connector->display_info;
return dp->panel->vsc_sdp_supported && drm_mode_is_420_only(info, mode);
}
-bool msm_dp_needs_periph_flush(const struct msm_dp *dp_display,
+bool msm_dp_needs_periph_flush(const struct msm_dp *msm_dp_display,
const struct drm_display_mode *mode)
{
- return msm_dp_is_yuv_420_enabled(dp_display, mode);
+ return msm_dp_is_yuv_420_enabled(msm_dp_display, mode);
}
-bool msm_dp_wide_bus_available(const struct msm_dp *dp_display)
+bool msm_dp_wide_bus_available(const struct msm_dp *msm_dp_display)
{
- struct dp_display_private *dp;
+ struct msm_dp_display_private *dp;
- dp = container_of(dp_display, struct dp_display_private, dp_display);
+ dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display);
- if (dp->dp_mode.out_fmt_is_yuv_420)
+ if (dp->msm_dp_mode.out_fmt_is_yuv_420)
return false;
return dp->wide_bus_supported;
}
-void dp_display_debugfs_init(struct msm_dp *dp_display, struct dentry *root, bool is_edp)
+void msm_dp_display_debugfs_init(struct msm_dp *msm_dp_display, struct dentry *root, bool is_edp)
{
- struct dp_display_private *dp;
+ struct msm_dp_display_private *dp;
struct device *dev;
int rc;
- dp = container_of(dp_display, struct dp_display_private, dp_display);
- dev = &dp->dp_display.pdev->dev;
+ dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display);
+ dev = &dp->msm_dp_display.pdev->dev;
- rc = dp_debug_init(dev, dp->panel, dp->link, dp->dp_display.connector, root, is_edp);
+ rc = msm_dp_debug_init(dev, dp->panel, dp->link, dp->msm_dp_display.connector, root, is_edp);
if (rc)
DRM_ERROR("failed to initialize debug, rc = %d\n", rc);
}
-int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
+int msm_dp_modeset_init(struct msm_dp *msm_dp_display, struct drm_device *dev,
struct drm_encoder *encoder, bool yuv_supported)
{
- struct dp_display_private *dp_priv;
+ struct msm_dp_display_private *msm_dp_priv;
int ret;
- dp_display->drm_dev = dev;
+ msm_dp_display->drm_dev = dev;
- dp_priv = container_of(dp_display, struct dp_display_private, dp_display);
+ msm_dp_priv = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display);
- ret = dp_bridge_init(dp_display, dev, encoder);
+ ret = msm_dp_bridge_init(msm_dp_display, dev, encoder, yuv_supported);
if (ret) {
DRM_DEV_ERROR(dev->dev,
"failed to create dp bridge: %d\n", ret);
return ret;
}
- dp_display->connector = dp_drm_connector_init(dp_display, encoder, yuv_supported);
- if (IS_ERR(dp_display->connector)) {
- ret = PTR_ERR(dp_display->connector);
+ msm_dp_display->connector = msm_dp_drm_connector_init(msm_dp_display, encoder);
+ if (IS_ERR(msm_dp_display->connector)) {
+ ret = PTR_ERR(msm_dp_display->connector);
DRM_DEV_ERROR(dev->dev,
"failed to create dp connector: %d\n", ret);
- dp_display->connector = NULL;
+ msm_dp_display->connector = NULL;
return ret;
}
- dp_priv->panel->connector = dp_display->connector;
+ msm_dp_priv->panel->connector = msm_dp_display->connector;
return 0;
}
-void dp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
+void msm_dp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
struct drm_bridge_state *old_bridge_state)
{
- struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge);
- struct msm_dp *dp = dp_bridge->dp_display;
+ struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge);
+ struct msm_dp *dp = msm_dp_bridge->msm_dp_display;
int rc = 0;
- struct dp_display_private *dp_display;
+ struct msm_dp_display_private *msm_dp_display;
u32 state;
bool force_link_train = false;
- dp_display = container_of(dp, struct dp_display_private, dp_display);
- if (!dp_display->dp_mode.drm_mode.clock) {
+ msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display);
+ if (!msm_dp_display->msm_dp_mode.drm_mode.clock) {
DRM_ERROR("invalid params\n");
return;
}
if (dp->is_edp)
- dp_hpd_plug_handle(dp_display, 0);
+ msm_dp_hpd_plug_handle(msm_dp_display, 0);
- mutex_lock(&dp_display->event_mutex);
+ mutex_lock(&msm_dp_display->event_mutex);
if (pm_runtime_resume_and_get(&dp->pdev->dev)) {
DRM_ERROR("failed to pm_runtime_resume\n");
- mutex_unlock(&dp_display->event_mutex);
+ mutex_unlock(&msm_dp_display->event_mutex);
return;
}
- state = dp_display->hpd_state;
+ state = msm_dp_display->hpd_state;
if (state != ST_DISPLAY_OFF && state != ST_MAINLINK_READY) {
- mutex_unlock(&dp_display->event_mutex);
+ mutex_unlock(&msm_dp_display->event_mutex);
return;
}
- rc = dp_display_set_mode(dp, &dp_display->dp_mode);
+ rc = msm_dp_display_set_mode(dp, &msm_dp_display->msm_dp_mode);
if (rc) {
DRM_ERROR("Failed to perform a mode set, rc=%d\n", rc);
- mutex_unlock(&dp_display->event_mutex);
+ mutex_unlock(&msm_dp_display->event_mutex);
return;
}
- state = dp_display->hpd_state;
+ state = msm_dp_display->hpd_state;
if (state == ST_DISPLAY_OFF) {
- dp_display_host_phy_init(dp_display);
+ msm_dp_display_host_phy_init(msm_dp_display);
force_link_train = true;
}
- dp_display_enable(dp_display, force_link_train);
+ msm_dp_display_enable(msm_dp_display, force_link_train);
- rc = dp_display_post_enable(dp);
+ rc = msm_dp_display_post_enable(dp);
if (rc) {
DRM_ERROR("DP display post enable failed, rc=%d\n", rc);
- dp_display_disable(dp_display);
+ msm_dp_display_disable(msm_dp_display);
}
/* completed connection */
- dp_display->hpd_state = ST_CONNECTED;
+ msm_dp_display->hpd_state = ST_CONNECTED;
drm_dbg_dp(dp->drm_dev, "type=%d Done\n", dp->connector_type);
- mutex_unlock(&dp_display->event_mutex);
+ mutex_unlock(&msm_dp_display->event_mutex);
}
-void dp_bridge_atomic_disable(struct drm_bridge *drm_bridge,
+void msm_dp_bridge_atomic_disable(struct drm_bridge *drm_bridge,
struct drm_bridge_state *old_bridge_state)
{
- struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge);
- struct msm_dp *dp = dp_bridge->dp_display;
- struct dp_display_private *dp_display;
+ struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge);
+ struct msm_dp *dp = msm_dp_bridge->msm_dp_display;
+ struct msm_dp_display_private *msm_dp_display;
- dp_display = container_of(dp, struct dp_display_private, dp_display);
+ msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display);
- dp_ctrl_push_idle(dp_display->ctrl);
+ msm_dp_ctrl_push_idle(msm_dp_display->ctrl);
}
-void dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
+void msm_dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
struct drm_bridge_state *old_bridge_state)
{
- struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge);
- struct msm_dp *dp = dp_bridge->dp_display;
+ struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge);
+ struct msm_dp *dp = msm_dp_bridge->msm_dp_display;
u32 state;
- struct dp_display_private *dp_display;
+ struct msm_dp_display_private *msm_dp_display;
- dp_display = container_of(dp, struct dp_display_private, dp_display);
+ msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display);
if (dp->is_edp)
- dp_hpd_unplug_handle(dp_display, 0);
+ msm_dp_hpd_unplug_handle(msm_dp_display, 0);
- mutex_lock(&dp_display->event_mutex);
+ mutex_lock(&msm_dp_display->event_mutex);
- state = dp_display->hpd_state;
+ state = msm_dp_display->hpd_state;
if (state != ST_DISCONNECT_PENDING && state != ST_CONNECTED)
drm_dbg_dp(dp->drm_dev, "type=%d wrong hpd_state=%d\n",
dp->connector_type, state);
- dp_display_disable(dp_display);
+ msm_dp_display_disable(msm_dp_display);
- state = dp_display->hpd_state;
+ state = msm_dp_display->hpd_state;
if (state == ST_DISCONNECT_PENDING) {
/* completed disconnection */
- dp_display->hpd_state = ST_DISCONNECTED;
+ msm_dp_display->hpd_state = ST_DISCONNECTED;
} else {
- dp_display->hpd_state = ST_DISPLAY_OFF;
+ msm_dp_display->hpd_state = ST_DISPLAY_OFF;
}
drm_dbg_dp(dp->drm_dev, "type=%d Done\n", dp->connector_type);
pm_runtime_put_sync(&dp->pdev->dev);
- mutex_unlock(&dp_display->event_mutex);
+ mutex_unlock(&msm_dp_display->event_mutex);
}
-void dp_bridge_mode_set(struct drm_bridge *drm_bridge,
+void msm_dp_bridge_mode_set(struct drm_bridge *drm_bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
- struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge);
- struct msm_dp *dp = dp_bridge->dp_display;
- struct dp_display_private *dp_display;
- struct dp_panel *dp_panel;
+ struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge);
+ struct msm_dp *dp = msm_dp_bridge->msm_dp_display;
+ struct msm_dp_display_private *msm_dp_display;
+ struct msm_dp_panel *msm_dp_panel;
- dp_display = container_of(dp, struct dp_display_private, dp_display);
- dp_panel = dp_display->panel;
+ msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display);
+ msm_dp_panel = msm_dp_display->panel;
- memset(&dp_display->dp_mode, 0x0, sizeof(struct dp_display_mode));
+ memset(&msm_dp_display->msm_dp_mode, 0x0, sizeof(struct msm_dp_display_mode));
- if (dp_display_check_video_test(dp))
- dp_display->dp_mode.bpp = dp_display_get_test_bpp(dp);
+ if (msm_dp_display_check_video_test(dp))
+ msm_dp_display->msm_dp_mode.bpp = msm_dp_display_get_test_bpp(dp);
else /* Default num_components per pixel = 3 */
- dp_display->dp_mode.bpp = dp->connector->display_info.bpc * 3;
+ msm_dp_display->msm_dp_mode.bpp = dp->connector->display_info.bpc * 3;
- if (!dp_display->dp_mode.bpp)
- dp_display->dp_mode.bpp = 24; /* Default bpp */
+ if (!msm_dp_display->msm_dp_mode.bpp)
+ msm_dp_display->msm_dp_mode.bpp = 24; /* Default bpp */
- drm_mode_copy(&dp_display->dp_mode.drm_mode, adjusted_mode);
+ drm_mode_copy(&msm_dp_display->msm_dp_mode.drm_mode, adjusted_mode);
- dp_display->dp_mode.v_active_low =
- !!(dp_display->dp_mode.drm_mode.flags & DRM_MODE_FLAG_NVSYNC);
+ msm_dp_display->msm_dp_mode.v_active_low =
+ !!(msm_dp_display->msm_dp_mode.drm_mode.flags & DRM_MODE_FLAG_NVSYNC);
- dp_display->dp_mode.h_active_low =
- !!(dp_display->dp_mode.drm_mode.flags & DRM_MODE_FLAG_NHSYNC);
+ msm_dp_display->msm_dp_mode.h_active_low =
+ !!(msm_dp_display->msm_dp_mode.drm_mode.flags & DRM_MODE_FLAG_NHSYNC);
- dp_display->dp_mode.out_fmt_is_yuv_420 =
+ msm_dp_display->msm_dp_mode.out_fmt_is_yuv_420 =
drm_mode_is_420_only(&dp->connector->display_info, adjusted_mode) &&
- dp_panel->vsc_sdp_supported;
+ msm_dp_panel->vsc_sdp_supported;
/* populate wide_bus_support to different layers */
- dp_display->ctrl->wide_bus_en =
- dp_display->dp_mode.out_fmt_is_yuv_420 ? false : dp_display->wide_bus_supported;
- dp_display->catalog->wide_bus_en =
- dp_display->dp_mode.out_fmt_is_yuv_420 ? false : dp_display->wide_bus_supported;
+ msm_dp_display->ctrl->wide_bus_en =
+ msm_dp_display->msm_dp_mode.out_fmt_is_yuv_420 ? false : msm_dp_display->wide_bus_supported;
+ msm_dp_display->catalog->wide_bus_en =
+ msm_dp_display->msm_dp_mode.out_fmt_is_yuv_420 ? false : msm_dp_display->wide_bus_supported;
}
-void dp_bridge_hpd_enable(struct drm_bridge *bridge)
+void msm_dp_bridge_hpd_enable(struct drm_bridge *bridge)
{
- struct msm_dp_bridge *dp_bridge = to_dp_bridge(bridge);
- struct msm_dp *dp_display = dp_bridge->dp_display;
- struct dp_display_private *dp = container_of(dp_display, struct dp_display_private, dp_display);
+ struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(bridge);
+ struct msm_dp *msm_dp_display = msm_dp_bridge->msm_dp_display;
+ struct msm_dp_display_private *dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display);
/*
* this is for external DP with hpd irq enabled case,
- * step-1: dp_pm_runtime_resume() enable dp host only
+ * step-1: msm_dp_pm_runtime_resume() enable dp host only
* step-2: enable hdp block and have hpd irq enabled here
* step-3: waiting for plugin irq while phy is not initialized
* step-4: DP PHY is initialized at plugin handler before link training
*
*/
mutex_lock(&dp->event_mutex);
- if (pm_runtime_resume_and_get(&dp_display->pdev->dev)) {
+ if (pm_runtime_resume_and_get(&msm_dp_display->pdev->dev)) {
DRM_ERROR("failed to resume power\n");
mutex_unlock(&dp->event_mutex);
return;
}
- dp_catalog_ctrl_hpd_enable(dp->catalog);
+ msm_dp_catalog_ctrl_hpd_enable(dp->catalog);
/* enable HDP interrupts */
- dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, true);
+ msm_dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, true);
- dp_display->internal_hpd = true;
+ msm_dp_display->internal_hpd = true;
mutex_unlock(&dp->event_mutex);
}
-void dp_bridge_hpd_disable(struct drm_bridge *bridge)
+void msm_dp_bridge_hpd_disable(struct drm_bridge *bridge)
{
- struct msm_dp_bridge *dp_bridge = to_dp_bridge(bridge);
- struct msm_dp *dp_display = dp_bridge->dp_display;
- struct dp_display_private *dp = container_of(dp_display, struct dp_display_private, dp_display);
+ struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(bridge);
+ struct msm_dp *msm_dp_display = msm_dp_bridge->msm_dp_display;
+ struct msm_dp_display_private *dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display);
mutex_lock(&dp->event_mutex);
/* disable HDP interrupts */
- dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
- dp_catalog_ctrl_hpd_disable(dp->catalog);
+ msm_dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
+ msm_dp_catalog_ctrl_hpd_disable(dp->catalog);
- dp_display->internal_hpd = false;
+ msm_dp_display->internal_hpd = false;
- pm_runtime_put_sync(&dp_display->pdev->dev);
+ pm_runtime_put_sync(&msm_dp_display->pdev->dev);
mutex_unlock(&dp->event_mutex);
}
-void dp_bridge_hpd_notify(struct drm_bridge *bridge,
+void msm_dp_bridge_hpd_notify(struct drm_bridge *bridge,
enum drm_connector_status status)
{
- struct msm_dp_bridge *dp_bridge = to_dp_bridge(bridge);
- struct msm_dp *dp_display = dp_bridge->dp_display;
- struct dp_display_private *dp = container_of(dp_display, struct dp_display_private, dp_display);
+ struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(bridge);
+ struct msm_dp *msm_dp_display = msm_dp_bridge->msm_dp_display;
+ struct msm_dp_display_private *dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display);
/* Without next_bridge interrupts are handled by the DP core directly */
- if (dp_display->internal_hpd)
+ if (msm_dp_display->internal_hpd)
return;
- if (!dp_display->link_ready && status == connector_status_connected)
- dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0);
- else if (dp_display->link_ready && status == connector_status_disconnected)
- dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
+ if (!msm_dp_display->link_ready && status == connector_status_connected)
+ msm_dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0);
+ else if (msm_dp_display->link_ready && status == connector_status_disconnected)
+ msm_dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
}
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index ec7fa67e0569..ecbc2d92f546 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -27,18 +27,18 @@ struct msm_dp {
hdmi_codec_plugged_cb plugged_cb;
- struct dp_audio *dp_audio;
+ struct msm_dp_audio *msm_dp_audio;
bool psr_supported;
};
-int dp_display_set_plugged_cb(struct msm_dp *dp_display,
+int msm_dp_display_set_plugged_cb(struct msm_dp *msm_dp_display,
hdmi_codec_plugged_cb fn, struct device *codec_dev);
-int dp_display_get_modes(struct msm_dp *dp_display);
-bool dp_display_check_video_test(struct msm_dp *dp_display);
-int dp_display_get_test_bpp(struct msm_dp *dp_display);
-void dp_display_signal_audio_start(struct msm_dp *dp_display);
-void dp_display_signal_audio_complete(struct msm_dp *dp_display);
-void dp_display_set_psr(struct msm_dp *dp, bool enter);
-void dp_display_debugfs_init(struct msm_dp *dp_display, struct dentry *dentry, bool is_edp);
+int msm_dp_display_get_modes(struct msm_dp *msm_dp_display);
+bool msm_dp_display_check_video_test(struct msm_dp *msm_dp_display);
+int msm_dp_display_get_test_bpp(struct msm_dp *msm_dp_display);
+void msm_dp_display_signal_audio_start(struct msm_dp *msm_dp_display);
+void msm_dp_display_signal_audio_complete(struct msm_dp *msm_dp_display);
+void msm_dp_display_set_psr(struct msm_dp *dp, bool enter);
+void msm_dp_display_debugfs_init(struct msm_dp *msm_dp_display, struct dentry *dentry, bool is_edp);
#endif /* _DP_DISPLAY_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index 1b9be5bd97f1..d3e241ea6941 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -14,15 +14,15 @@
#include "dp_drm.h"
/**
- * dp_bridge_detect - callback to determine if connector is connected
+ * msm_dp_bridge_detect - callback to determine if connector is connected
* @bridge: Pointer to drm bridge structure
* Returns: Bridge's 'is connected' status
*/
-static enum drm_connector_status dp_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status msm_dp_bridge_detect(struct drm_bridge *bridge)
{
struct msm_dp *dp;
- dp = to_dp_bridge(bridge)->dp_display;
+ dp = to_dp_bridge(bridge)->msm_dp_display;
drm_dbg_dp(dp->drm_dev, "link_ready = %s\n",
(dp->link_ready) ? "true" : "false");
@@ -31,14 +31,14 @@ static enum drm_connector_status dp_bridge_detect(struct drm_bridge *bridge)
connector_status_disconnected;
}
-static int dp_bridge_atomic_check(struct drm_bridge *bridge,
+static int msm_dp_bridge_atomic_check(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct msm_dp *dp;
- dp = to_dp_bridge(bridge)->dp_display;
+ dp = to_dp_bridge(bridge)->msm_dp_display;
drm_dbg_dp(dp->drm_dev, "link_ready = %s\n",
(dp->link_ready) ? "true" : "false");
@@ -62,12 +62,12 @@ static int dp_bridge_atomic_check(struct drm_bridge *bridge,
/**
- * dp_bridge_get_modes - callback to add drm modes via drm_mode_probed_add()
+ * msm_dp_bridge_get_modes - callback to add drm modes via drm_mode_probed_add()
* @bridge: Poiner to drm bridge
* @connector: Pointer to drm connector structure
* Returns: Number of modes added
*/
-static int dp_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector *connector)
+static int msm_dp_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector *connector)
{
int rc = 0;
struct msm_dp *dp;
@@ -75,11 +75,11 @@ static int dp_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector *
if (!connector)
return 0;
- dp = to_dp_bridge(bridge)->dp_display;
+ dp = to_dp_bridge(bridge)->msm_dp_display;
/* pluggable case assumes EDID is read when HPD */
if (dp->link_ready) {
- rc = dp_display_get_modes(dp);
+ rc = msm_dp_display_get_modes(dp);
if (rc <= 0) {
DRM_ERROR("failed to get DP sink modes, rc=%d\n", rc);
return rc;
@@ -90,37 +90,37 @@ static int dp_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector *
return rc;
}
-static void dp_bridge_debugfs_init(struct drm_bridge *bridge, struct dentry *root)
+static void msm_dp_bridge_debugfs_init(struct drm_bridge *bridge, struct dentry *root)
{
- struct msm_dp *dp = to_dp_bridge(bridge)->dp_display;
+ struct msm_dp *dp = to_dp_bridge(bridge)->msm_dp_display;
- dp_display_debugfs_init(dp, root, false);
+ msm_dp_display_debugfs_init(dp, root, false);
}
-static const struct drm_bridge_funcs dp_bridge_ops = {
+static const struct drm_bridge_funcs msm_dp_bridge_ops = {
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
- .atomic_enable = dp_bridge_atomic_enable,
- .atomic_disable = dp_bridge_atomic_disable,
- .atomic_post_disable = dp_bridge_atomic_post_disable,
- .mode_set = dp_bridge_mode_set,
- .mode_valid = dp_bridge_mode_valid,
- .get_modes = dp_bridge_get_modes,
- .detect = dp_bridge_detect,
- .atomic_check = dp_bridge_atomic_check,
- .hpd_enable = dp_bridge_hpd_enable,
- .hpd_disable = dp_bridge_hpd_disable,
- .hpd_notify = dp_bridge_hpd_notify,
- .debugfs_init = dp_bridge_debugfs_init,
+ .atomic_enable = msm_dp_bridge_atomic_enable,
+ .atomic_disable = msm_dp_bridge_atomic_disable,
+ .atomic_post_disable = msm_dp_bridge_atomic_post_disable,
+ .mode_set = msm_dp_bridge_mode_set,
+ .mode_valid = msm_dp_bridge_mode_valid,
+ .get_modes = msm_dp_bridge_get_modes,
+ .detect = msm_dp_bridge_detect,
+ .atomic_check = msm_dp_bridge_atomic_check,
+ .hpd_enable = msm_dp_bridge_hpd_enable,
+ .hpd_disable = msm_dp_bridge_hpd_disable,
+ .hpd_notify = msm_dp_bridge_hpd_notify,
+ .debugfs_init = msm_dp_bridge_debugfs_init,
};
-static int edp_bridge_atomic_check(struct drm_bridge *drm_bridge,
+static int msm_edp_bridge_atomic_check(struct drm_bridge *drm_bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct msm_dp *dp = to_dp_bridge(drm_bridge)->dp_display;
+ struct msm_dp *dp = to_dp_bridge(drm_bridge)->msm_dp_display;
if (WARN_ON(!conn_state))
return -ENODEV;
@@ -136,18 +136,18 @@ static int edp_bridge_atomic_check(struct drm_bridge *drm_bridge,
return 0;
}
-static void edp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
+static void msm_edp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
struct drm_bridge_state *old_bridge_state)
{
struct drm_atomic_state *atomic_state = old_bridge_state->base.state;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
- struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge);
- struct msm_dp *dp = dp_bridge->dp_display;
+ struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge);
+ struct msm_dp *dp = msm_dp_bridge->msm_dp_display;
/*
* Check the old state of the crtc to determine if the panel
- * was put into psr state previously by the edp_bridge_atomic_disable.
+ * was put into psr state previously by the msm_edp_bridge_atomic_disable.
* If the panel is in psr, just exit psr state and skip the full
* bridge enable sequence.
*/
@@ -159,21 +159,21 @@ static void edp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
old_crtc_state = drm_atomic_get_old_crtc_state(atomic_state, crtc);
if (old_crtc_state && old_crtc_state->self_refresh_active) {
- dp_display_set_psr(dp, false);
+ msm_dp_display_set_psr(dp, false);
return;
}
- dp_bridge_atomic_enable(drm_bridge, old_bridge_state);
+ msm_dp_bridge_atomic_enable(drm_bridge, old_bridge_state);
}
-static void edp_bridge_atomic_disable(struct drm_bridge *drm_bridge,
+static void msm_edp_bridge_atomic_disable(struct drm_bridge *drm_bridge,
struct drm_bridge_state *old_bridge_state)
{
struct drm_atomic_state *atomic_state = old_bridge_state->base.state;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state = NULL, *old_crtc_state = NULL;
- struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge);
- struct msm_dp *dp = dp_bridge->dp_display;
+ struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge);
+ struct msm_dp *dp = msm_dp_bridge->msm_dp_display;
crtc = drm_atomic_get_old_crtc_for_encoder(atomic_state,
drm_bridge->encoder);
@@ -194,24 +194,24 @@ static void edp_bridge_atomic_disable(struct drm_bridge *drm_bridge,
* If old crtc state is active, then this is a display disable
* call while the sink is in psr state. So, exit psr here.
* The eDP controller will be disabled in the
- * edp_bridge_atomic_post_disable function.
+ * msm_edp_bridge_atomic_post_disable function.
*
* We observed sink is stuck in self refresh if psr exit is skipped
* when display disable occurs while the sink is in psr state.
*/
if (new_crtc_state->self_refresh_active) {
- dp_display_set_psr(dp, true);
+ msm_dp_display_set_psr(dp, true);
return;
} else if (old_crtc_state->self_refresh_active) {
- dp_display_set_psr(dp, false);
+ msm_dp_display_set_psr(dp, false);
return;
}
out:
- dp_bridge_atomic_disable(drm_bridge, old_bridge_state);
+ msm_dp_bridge_atomic_disable(drm_bridge, old_bridge_state);
}
-static void edp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
+static void msm_edp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
struct drm_bridge_state *old_bridge_state)
{
struct drm_atomic_state *atomic_state = old_bridge_state->base.state;
@@ -228,29 +228,29 @@ static void edp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
return;
/*
- * Self refresh mode is already set in edp_bridge_atomic_disable.
+ * Self refresh mode is already set in msm_edp_bridge_atomic_disable.
*/
if (new_crtc_state->self_refresh_active)
return;
- dp_bridge_atomic_post_disable(drm_bridge, old_bridge_state);
+ msm_dp_bridge_atomic_post_disable(drm_bridge, old_bridge_state);
}
/**
- * edp_bridge_mode_valid - callback to determine if specified mode is valid
+ * msm_edp_bridge_mode_valid - callback to determine if specified mode is valid
* @bridge: Pointer to drm bridge structure
* @info: display info
* @mode: Pointer to drm mode structure
* Returns: Validity status for specified mode
*/
-static enum drm_mode_status edp_bridge_mode_valid(struct drm_bridge *bridge,
+static enum drm_mode_status msm_edp_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
struct msm_dp *dp;
int mode_pclk_khz = mode->clock;
- dp = to_dp_bridge(bridge)->dp_display;
+ dp = to_dp_bridge(bridge)->msm_dp_display;
if (!dp || !mode_pclk_khz || !dp->connector) {
DRM_ERROR("invalid params\n");
@@ -268,42 +268,43 @@ static enum drm_mode_status edp_bridge_mode_valid(struct drm_bridge *bridge,
return MODE_OK;
}
-static void edp_bridge_debugfs_init(struct drm_bridge *bridge, struct dentry *root)
+static void msm_edp_bridge_debugfs_init(struct drm_bridge *bridge, struct dentry *root)
{
- struct msm_dp *dp = to_dp_bridge(bridge)->dp_display;
+ struct msm_dp *dp = to_dp_bridge(bridge)->msm_dp_display;
- dp_display_debugfs_init(dp, root, true);
+ msm_dp_display_debugfs_init(dp, root, true);
}
-static const struct drm_bridge_funcs edp_bridge_ops = {
- .atomic_enable = edp_bridge_atomic_enable,
- .atomic_disable = edp_bridge_atomic_disable,
- .atomic_post_disable = edp_bridge_atomic_post_disable,
- .mode_set = dp_bridge_mode_set,
- .mode_valid = edp_bridge_mode_valid,
+static const struct drm_bridge_funcs msm_edp_bridge_ops = {
+ .atomic_enable = msm_edp_bridge_atomic_enable,
+ .atomic_disable = msm_edp_bridge_atomic_disable,
+ .atomic_post_disable = msm_edp_bridge_atomic_post_disable,
+ .mode_set = msm_dp_bridge_mode_set,
+ .mode_valid = msm_edp_bridge_mode_valid,
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
- .atomic_check = edp_bridge_atomic_check,
- .debugfs_init = edp_bridge_debugfs_init,
+ .atomic_check = msm_edp_bridge_atomic_check,
+ .debugfs_init = msm_edp_bridge_debugfs_init,
};
-int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
- struct drm_encoder *encoder)
+int msm_dp_bridge_init(struct msm_dp *msm_dp_display, struct drm_device *dev,
+ struct drm_encoder *encoder, bool yuv_supported)
{
int rc;
- struct msm_dp_bridge *dp_bridge;
+ struct msm_dp_bridge *msm_dp_bridge;
struct drm_bridge *bridge;
- dp_bridge = devm_kzalloc(dev->dev, sizeof(*dp_bridge), GFP_KERNEL);
- if (!dp_bridge)
+ msm_dp_bridge = devm_kzalloc(dev->dev, sizeof(*msm_dp_bridge), GFP_KERNEL);
+ if (!msm_dp_bridge)
return -ENOMEM;
- dp_bridge->dp_display = dp_display;
+ msm_dp_bridge->msm_dp_display = msm_dp_display;
- bridge = &dp_bridge->bridge;
- bridge->funcs = dp_display->is_edp ? &edp_bridge_ops : &dp_bridge_ops;
- bridge->type = dp_display->connector_type;
+ bridge = &msm_dp_bridge->bridge;
+ bridge->funcs = msm_dp_display->is_edp ? &msm_edp_bridge_ops : &msm_dp_bridge_ops;
+ bridge->type = msm_dp_display->connector_type;
+ bridge->ycbcr_420_allowed = yuv_supported;
/*
* Many ops only make sense for DP. Why?
@@ -316,7 +317,7 @@ int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
* allows the panel driver to properly power itself on to read the
* modes.
*/
- if (!dp_display->is_edp) {
+ if (!msm_dp_display->is_edp) {
bridge->ops =
DRM_BRIDGE_OP_DETECT |
DRM_BRIDGE_OP_HPD |
@@ -337,9 +338,9 @@ int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
return rc;
}
- if (dp_display->next_bridge) {
+ if (msm_dp_display->next_bridge) {
rc = drm_bridge_attach(encoder,
- dp_display->next_bridge, bridge,
+ msm_dp_display->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (rc < 0) {
DRM_ERROR("failed to attach panel bridge: %d\n", rc);
@@ -351,21 +352,18 @@ int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
}
/* connector initialization */
-struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display, struct drm_encoder *encoder,
- bool yuv_supported)
+struct drm_connector *msm_dp_drm_connector_init(struct msm_dp *msm_dp_display,
+ struct drm_encoder *encoder)
{
struct drm_connector *connector = NULL;
- connector = drm_bridge_connector_init(dp_display->drm_dev, encoder);
+ connector = drm_bridge_connector_init(msm_dp_display->drm_dev, encoder);
if (IS_ERR(connector))
return connector;
- if (!dp_display->is_edp)
+ if (!msm_dp_display->is_edp)
drm_connector_attach_dp_subconnector_property(connector);
- if (yuv_supported)
- connector->ycbcr_420_allowed = true;
-
drm_connector_attach_encoder(connector, encoder);
return connector;
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.h b/drivers/gpu/drm/msm/dp/dp_drm.h
index 45e57ac25a4d..8eae2f74839f 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.h
+++ b/drivers/gpu/drm/msm/dp/dp_drm.h
@@ -14,31 +14,32 @@
struct msm_dp_bridge {
struct drm_bridge bridge;
- struct msm_dp *dp_display;
+ struct msm_dp *msm_dp_display;
};
#define to_dp_bridge(x) container_of((x), struct msm_dp_bridge, bridge)
-struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display, struct drm_encoder *encoder,
- bool yuv_supported);
-int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
- struct drm_encoder *encoder);
+struct drm_connector *msm_dp_drm_connector_init(struct msm_dp *msm_dp_display,
+ struct drm_encoder *encoder);
+int msm_dp_bridge_init(struct msm_dp *msm_dp_display, struct drm_device *dev,
+ struct drm_encoder *encoder,
+ bool yuv_supported);
-void dp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
+void msm_dp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
struct drm_bridge_state *old_bridge_state);
-void dp_bridge_atomic_disable(struct drm_bridge *drm_bridge,
+void msm_dp_bridge_atomic_disable(struct drm_bridge *drm_bridge,
struct drm_bridge_state *old_bridge_state);
-void dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
+void msm_dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
struct drm_bridge_state *old_bridge_state);
-enum drm_mode_status dp_bridge_mode_valid(struct drm_bridge *bridge,
+enum drm_mode_status msm_dp_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode);
-void dp_bridge_mode_set(struct drm_bridge *drm_bridge,
+void msm_dp_bridge_mode_set(struct drm_bridge *drm_bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode);
-void dp_bridge_hpd_enable(struct drm_bridge *bridge);
-void dp_bridge_hpd_disable(struct drm_bridge *bridge);
-void dp_bridge_hpd_notify(struct drm_bridge *bridge,
+void msm_dp_bridge_hpd_enable(struct drm_bridge *bridge);
+void msm_dp_bridge_hpd_disable(struct drm_bridge *bridge);
+void msm_dp_bridge_hpd_notify(struct drm_bridge *bridge,
enum drm_connector_status status);
#endif /* _DP_DRM_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index d8967615d84d..1a1fbb2d7d4f 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -28,25 +28,25 @@ enum audio_pattern_type {
AUDIO_TEST_PATTERN_SAWTOOTH = 0x01,
};
-struct dp_link_request {
+struct msm_dp_link_request {
u32 test_requested;
u32 test_link_rate;
u32 test_lane_count;
};
-struct dp_link_private {
+struct msm_dp_link_private {
u32 prev_sink_count;
struct drm_device *drm_dev;
struct drm_dp_aux *aux;
- struct dp_link dp_link;
+ struct msm_dp_link msm_dp_link;
- struct dp_link_request request;
+ struct msm_dp_link_request request;
struct mutex psm_mutex;
u8 link_status[DP_LINK_STATUS_SIZE];
};
-static int dp_aux_link_power_up(struct drm_dp_aux *aux,
- struct dp_link_info *link)
+static int msm_dp_aux_link_power_up(struct drm_dp_aux *aux,
+ struct msm_dp_link_info *link)
{
u8 value;
ssize_t len;
@@ -73,8 +73,8 @@ static int dp_aux_link_power_up(struct drm_dp_aux *aux,
return 0;
}
-static int dp_aux_link_power_down(struct drm_dp_aux *aux,
- struct dp_link_info *link)
+static int msm_dp_aux_link_power_down(struct drm_dp_aux *aux,
+ struct msm_dp_link_info *link)
{
u8 value;
int err;
@@ -96,7 +96,7 @@ static int dp_aux_link_power_down(struct drm_dp_aux *aux,
return 0;
}
-static int dp_link_get_period(struct dp_link_private *link, int const addr)
+static int msm_dp_link_get_period(struct msm_dp_link_private *link, int const addr)
{
int ret = 0;
u8 data;
@@ -122,19 +122,19 @@ exit:
return ret;
}
-static int dp_link_parse_audio_channel_period(struct dp_link_private *link)
+static int msm_dp_link_parse_audio_channel_period(struct msm_dp_link_private *link)
{
int ret = 0;
- struct dp_link_test_audio *req = &link->dp_link.test_audio;
+ struct msm_dp_link_test_audio *req = &link->msm_dp_link.test_audio;
- ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH1);
+ ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH1);
if (ret == -EINVAL)
goto exit;
req->test_audio_period_ch_1 = ret;
drm_dbg_dp(link->drm_dev, "test_audio_period_ch_1 = 0x%x\n", ret);
- ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH2);
+ ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH2);
if (ret == -EINVAL)
goto exit;
@@ -142,42 +142,42 @@ static int dp_link_parse_audio_channel_period(struct dp_link_private *link)
drm_dbg_dp(link->drm_dev, "test_audio_period_ch_2 = 0x%x\n", ret);
/* TEST_AUDIO_PERIOD_CH_3 (Byte 0x275) */
- ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH3);
+ ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH3);
if (ret == -EINVAL)
goto exit;
req->test_audio_period_ch_3 = ret;
drm_dbg_dp(link->drm_dev, "test_audio_period_ch_3 = 0x%x\n", ret);
- ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH4);
+ ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH4);
if (ret == -EINVAL)
goto exit;
req->test_audio_period_ch_4 = ret;
drm_dbg_dp(link->drm_dev, "test_audio_period_ch_4 = 0x%x\n", ret);
- ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH5);
+ ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH5);
if (ret == -EINVAL)
goto exit;
req->test_audio_period_ch_5 = ret;
drm_dbg_dp(link->drm_dev, "test_audio_period_ch_5 = 0x%x\n", ret);
- ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH6);
+ ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH6);
if (ret == -EINVAL)
goto exit;
req->test_audio_period_ch_6 = ret;
drm_dbg_dp(link->drm_dev, "test_audio_period_ch_6 = 0x%x\n", ret);
- ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH7);
+ ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH7);
if (ret == -EINVAL)
goto exit;
req->test_audio_period_ch_7 = ret;
drm_dbg_dp(link->drm_dev, "test_audio_period_ch_7 = 0x%x\n", ret);
- ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH8);
+ ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH8);
if (ret == -EINVAL)
goto exit;
@@ -187,7 +187,7 @@ exit:
return ret;
}
-static int dp_link_parse_audio_pattern_type(struct dp_link_private *link)
+static int msm_dp_link_parse_audio_pattern_type(struct msm_dp_link_private *link)
{
int ret = 0;
u8 data;
@@ -208,13 +208,13 @@ static int dp_link_parse_audio_pattern_type(struct dp_link_private *link)
goto exit;
}
- link->dp_link.test_audio.test_audio_pattern_type = data;
+ link->msm_dp_link.test_audio.test_audio_pattern_type = data;
drm_dbg_dp(link->drm_dev, "audio pattern type = 0x%x\n", data);
exit:
return ret;
}
-static int dp_link_parse_audio_mode(struct dp_link_private *link)
+static int msm_dp_link_parse_audio_mode(struct msm_dp_link_private *link)
{
int ret = 0;
u8 data;
@@ -248,8 +248,8 @@ static int dp_link_parse_audio_mode(struct dp_link_private *link)
goto exit;
}
- link->dp_link.test_audio.test_audio_sampling_rate = sampling_rate;
- link->dp_link.test_audio.test_audio_channel_count = channel_count;
+ link->msm_dp_link.test_audio.test_audio_sampling_rate = sampling_rate;
+ link->msm_dp_link.test_audio.test_audio_channel_count = channel_count;
drm_dbg_dp(link->drm_dev,
"sampling_rate = 0x%x, channel_count = 0x%x\n",
sampling_rate, channel_count);
@@ -257,25 +257,25 @@ exit:
return ret;
}
-static int dp_link_parse_audio_pattern_params(struct dp_link_private *link)
+static int msm_dp_link_parse_audio_pattern_params(struct msm_dp_link_private *link)
{
int ret = 0;
- ret = dp_link_parse_audio_mode(link);
+ ret = msm_dp_link_parse_audio_mode(link);
if (ret)
goto exit;
- ret = dp_link_parse_audio_pattern_type(link);
+ ret = msm_dp_link_parse_audio_pattern_type(link);
if (ret)
goto exit;
- ret = dp_link_parse_audio_channel_period(link);
+ ret = msm_dp_link_parse_audio_channel_period(link);
exit:
return ret;
}
-static bool dp_link_is_video_pattern_valid(u32 pattern)
+static bool msm_dp_link_is_video_pattern_valid(u32 pattern)
{
switch (pattern) {
case DP_NO_TEST_PATTERN:
@@ -289,12 +289,12 @@ static bool dp_link_is_video_pattern_valid(u32 pattern)
}
/**
- * dp_link_is_bit_depth_valid() - validates the bit depth requested
+ * msm_dp_link_is_bit_depth_valid() - validates the bit depth requested
* @tbd: bit depth requested by the sink
*
* Returns true if the requested bit depth is supported.
*/
-static bool dp_link_is_bit_depth_valid(u32 tbd)
+static bool msm_dp_link_is_bit_depth_valid(u32 tbd)
{
/* DP_TEST_VIDEO_PATTERN_NONE is treated as invalid */
switch (tbd) {
@@ -307,7 +307,7 @@ static bool dp_link_is_bit_depth_valid(u32 tbd)
}
}
-static int dp_link_parse_timing_params1(struct dp_link_private *link,
+static int msm_dp_link_parse_timing_params1(struct msm_dp_link_private *link,
int addr, int len, u32 *val)
{
u8 bp[2];
@@ -328,7 +328,7 @@ static int dp_link_parse_timing_params1(struct dp_link_private *link,
return 0;
}
-static int dp_link_parse_timing_params2(struct dp_link_private *link,
+static int msm_dp_link_parse_timing_params2(struct msm_dp_link_private *link,
int addr, int len,
u32 *val1, u32 *val2)
{
@@ -351,7 +351,7 @@ static int dp_link_parse_timing_params2(struct dp_link_private *link,
return 0;
}
-static int dp_link_parse_timing_params3(struct dp_link_private *link,
+static int msm_dp_link_parse_timing_params3(struct msm_dp_link_private *link,
int addr, u32 *val)
{
u8 bp;
@@ -369,13 +369,13 @@ static int dp_link_parse_timing_params3(struct dp_link_private *link,
}
/**
- * dp_link_parse_video_pattern_params() - parses video pattern parameters from DPCD
+ * msm_dp_link_parse_video_pattern_params() - parses video pattern parameters from DPCD
* @link: Display Port Driver data
*
* Returns 0 if it successfully parses the video link pattern and the link
* bit depth requested by the sink and, and if the values parsed are valid.
*/
-static int dp_link_parse_video_pattern_params(struct dp_link_private *link)
+static int msm_dp_link_parse_video_pattern_params(struct msm_dp_link_private *link)
{
int ret = 0;
ssize_t rlen;
@@ -388,13 +388,13 @@ static int dp_link_parse_video_pattern_params(struct dp_link_private *link)
return rlen;
}
- if (!dp_link_is_video_pattern_valid(bp)) {
+ if (!msm_dp_link_is_video_pattern_valid(bp)) {
DRM_ERROR("invalid link video pattern = 0x%x\n", bp);
ret = -EINVAL;
return ret;
}
- link->dp_link.test_video.test_video_pattern = bp;
+ link->msm_dp_link.test_video.test_video_pattern = bp;
/* Read the requested color bit depth and dynamic range (Byte 0x232) */
rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_MISC0, &bp);
@@ -404,88 +404,88 @@ static int dp_link_parse_video_pattern_params(struct dp_link_private *link)
}
/* Dynamic Range */
- link->dp_link.test_video.test_dyn_range =
+ link->msm_dp_link.test_video.test_dyn_range =
(bp & DP_TEST_DYNAMIC_RANGE_CEA);
/* Color bit depth */
bp &= DP_TEST_BIT_DEPTH_MASK;
- if (!dp_link_is_bit_depth_valid(bp)) {
+ if (!msm_dp_link_is_bit_depth_valid(bp)) {
DRM_ERROR("invalid link bit depth = 0x%x\n", bp);
ret = -EINVAL;
return ret;
}
- link->dp_link.test_video.test_bit_depth = bp;
+ link->msm_dp_link.test_video.test_bit_depth = bp;
/* resolution timing params */
- ret = dp_link_parse_timing_params1(link, DP_TEST_H_TOTAL_HI, 2,
- &link->dp_link.test_video.test_h_total);
+ ret = msm_dp_link_parse_timing_params1(link, DP_TEST_H_TOTAL_HI, 2,
+ &link->msm_dp_link.test_video.test_h_total);
if (ret) {
DRM_ERROR("failed to parse test_htotal(DP_TEST_H_TOTAL_HI)\n");
return ret;
}
- ret = dp_link_parse_timing_params1(link, DP_TEST_V_TOTAL_HI, 2,
- &link->dp_link.test_video.test_v_total);
+ ret = msm_dp_link_parse_timing_params1(link, DP_TEST_V_TOTAL_HI, 2,
+ &link->msm_dp_link.test_video.test_v_total);
if (ret) {
DRM_ERROR("failed to parse test_v_total(DP_TEST_V_TOTAL_HI)\n");
return ret;
}
- ret = dp_link_parse_timing_params1(link, DP_TEST_H_START_HI, 2,
- &link->dp_link.test_video.test_h_start);
+ ret = msm_dp_link_parse_timing_params1(link, DP_TEST_H_START_HI, 2,
+ &link->msm_dp_link.test_video.test_h_start);
if (ret) {
DRM_ERROR("failed to parse test_h_start(DP_TEST_H_START_HI)\n");
return ret;
}
- ret = dp_link_parse_timing_params1(link, DP_TEST_V_START_HI, 2,
- &link->dp_link.test_video.test_v_start);
+ ret = msm_dp_link_parse_timing_params1(link, DP_TEST_V_START_HI, 2,
+ &link->msm_dp_link.test_video.test_v_start);
if (ret) {
DRM_ERROR("failed to parse test_v_start(DP_TEST_V_START_HI)\n");
return ret;
}
- ret = dp_link_parse_timing_params2(link, DP_TEST_HSYNC_HI, 2,
- &link->dp_link.test_video.test_hsync_pol,
- &link->dp_link.test_video.test_hsync_width);
+ ret = msm_dp_link_parse_timing_params2(link, DP_TEST_HSYNC_HI, 2,
+ &link->msm_dp_link.test_video.test_hsync_pol,
+ &link->msm_dp_link.test_video.test_hsync_width);
if (ret) {
DRM_ERROR("failed to parse (DP_TEST_HSYNC_HI)\n");
return ret;
}
- ret = dp_link_parse_timing_params2(link, DP_TEST_VSYNC_HI, 2,
- &link->dp_link.test_video.test_vsync_pol,
- &link->dp_link.test_video.test_vsync_width);
+ ret = msm_dp_link_parse_timing_params2(link, DP_TEST_VSYNC_HI, 2,
+ &link->msm_dp_link.test_video.test_vsync_pol,
+ &link->msm_dp_link.test_video.test_vsync_width);
if (ret) {
DRM_ERROR("failed to parse (DP_TEST_VSYNC_HI)\n");
return ret;
}
- ret = dp_link_parse_timing_params1(link, DP_TEST_H_WIDTH_HI, 2,
- &link->dp_link.test_video.test_h_width);
+ ret = msm_dp_link_parse_timing_params1(link, DP_TEST_H_WIDTH_HI, 2,
+ &link->msm_dp_link.test_video.test_h_width);
if (ret) {
DRM_ERROR("failed to parse test_h_width(DP_TEST_H_WIDTH_HI)\n");
return ret;
}
- ret = dp_link_parse_timing_params1(link, DP_TEST_V_HEIGHT_HI, 2,
- &link->dp_link.test_video.test_v_height);
+ ret = msm_dp_link_parse_timing_params1(link, DP_TEST_V_HEIGHT_HI, 2,
+ &link->msm_dp_link.test_video.test_v_height);
if (ret) {
DRM_ERROR("failed to parse test_v_height\n");
return ret;
}
- ret = dp_link_parse_timing_params3(link, DP_TEST_MISC1,
- &link->dp_link.test_video.test_rr_d);
- link->dp_link.test_video.test_rr_d &= DP_TEST_REFRESH_DENOMINATOR;
+ ret = msm_dp_link_parse_timing_params3(link, DP_TEST_MISC1,
+ &link->msm_dp_link.test_video.test_rr_d);
+ link->msm_dp_link.test_video.test_rr_d &= DP_TEST_REFRESH_DENOMINATOR;
if (ret) {
DRM_ERROR("failed to parse test_rr_d (DP_TEST_MISC1)\n");
return ret;
}
- ret = dp_link_parse_timing_params3(link, DP_TEST_REFRESH_RATE_NUMERATOR,
- &link->dp_link.test_video.test_rr_n);
+ ret = msm_dp_link_parse_timing_params3(link, DP_TEST_REFRESH_RATE_NUMERATOR,
+ &link->msm_dp_link.test_video.test_rr_n);
if (ret) {
DRM_ERROR("failed to parse test_rr_n\n");
return ret;
@@ -505,34 +505,34 @@ static int dp_link_parse_video_pattern_params(struct dp_link_private *link)
"TEST_V_HEIGHT = %d\n"
"TEST_REFRESH_DENOMINATOR = %d\n"
"TEST_REFRESH_NUMERATOR = %d\n",
- link->dp_link.test_video.test_video_pattern,
- link->dp_link.test_video.test_dyn_range,
- link->dp_link.test_video.test_bit_depth,
- link->dp_link.test_video.test_h_total,
- link->dp_link.test_video.test_v_total,
- link->dp_link.test_video.test_h_start,
- link->dp_link.test_video.test_v_start,
- link->dp_link.test_video.test_hsync_pol,
- link->dp_link.test_video.test_hsync_width,
- link->dp_link.test_video.test_vsync_pol,
- link->dp_link.test_video.test_vsync_width,
- link->dp_link.test_video.test_h_width,
- link->dp_link.test_video.test_v_height,
- link->dp_link.test_video.test_rr_d,
- link->dp_link.test_video.test_rr_n);
+ link->msm_dp_link.test_video.test_video_pattern,
+ link->msm_dp_link.test_video.test_dyn_range,
+ link->msm_dp_link.test_video.test_bit_depth,
+ link->msm_dp_link.test_video.test_h_total,
+ link->msm_dp_link.test_video.test_v_total,
+ link->msm_dp_link.test_video.test_h_start,
+ link->msm_dp_link.test_video.test_v_start,
+ link->msm_dp_link.test_video.test_hsync_pol,
+ link->msm_dp_link.test_video.test_hsync_width,
+ link->msm_dp_link.test_video.test_vsync_pol,
+ link->msm_dp_link.test_video.test_vsync_width,
+ link->msm_dp_link.test_video.test_h_width,
+ link->msm_dp_link.test_video.test_v_height,
+ link->msm_dp_link.test_video.test_rr_d,
+ link->msm_dp_link.test_video.test_rr_n);
return ret;
}
/**
- * dp_link_parse_link_training_params() - parses link training parameters from
+ * msm_dp_link_parse_link_training_params() - parses link training parameters from
* DPCD
* @link: Display Port Driver data
*
* Returns 0 if it successfully parses the link rate (Byte 0x219) and lane
* count (Byte 0x220), and if these values parse are valid.
*/
-static int dp_link_parse_link_training_params(struct dp_link_private *link)
+static int msm_dp_link_parse_link_training_params(struct msm_dp_link_private *link)
{
u8 bp;
ssize_t rlen;
@@ -571,13 +571,13 @@ static int dp_link_parse_link_training_params(struct dp_link_private *link)
}
/**
- * dp_link_parse_phy_test_params() - parses the phy link parameters
+ * msm_dp_link_parse_phy_test_params() - parses the phy link parameters
* @link: Display Port Driver data
*
* Parses the DPCD (Byte 0x248) for the DP PHY link pattern that is being
* requested.
*/
-static int dp_link_parse_phy_test_params(struct dp_link_private *link)
+static int msm_dp_link_parse_phy_test_params(struct msm_dp_link_private *link)
{
u8 data;
ssize_t rlen;
@@ -589,7 +589,7 @@ static int dp_link_parse_phy_test_params(struct dp_link_private *link)
return rlen;
}
- link->dp_link.phy_params.phy_test_pattern_sel = data & 0x07;
+ link->msm_dp_link.phy_params.phy_test_pattern_sel = data & 0x07;
drm_dbg_dp(link->drm_dev, "phy_test_pattern_sel = 0x%x\n", data);
@@ -608,12 +608,12 @@ static int dp_link_parse_phy_test_params(struct dp_link_private *link)
}
/**
- * dp_link_is_video_audio_test_requested() - checks for audio/video link request
+ * msm_dp_link_is_video_audio_test_requested() - checks for audio/video link request
* @link: link requested by the sink
*
* Returns true if the requested link is a permitted audio/video link.
*/
-static bool dp_link_is_video_audio_test_requested(u32 link)
+static bool msm_dp_link_is_video_audio_test_requested(u32 link)
{
u8 video_audio_test = (DP_TEST_LINK_VIDEO_PATTERN |
DP_TEST_LINK_AUDIO_PATTERN |
@@ -624,13 +624,13 @@ static bool dp_link_is_video_audio_test_requested(u32 link)
}
/**
- * dp_link_parse_request() - parses link request parameters from sink
+ * msm_dp_link_parse_request() - parses link request parameters from sink
* @link: Display Port Driver data
*
* Parses the DPCD to check if an automated link is requested (Byte 0x201),
* and what type of link automation is being requested (Byte 0x218).
*/
-static int dp_link_parse_request(struct dp_link_private *link)
+static int msm_dp_link_parse_request(struct msm_dp_link_private *link)
{
int ret = 0;
u8 data;
@@ -672,27 +672,27 @@ static int dp_link_parse_request(struct dp_link_private *link)
drm_dbg_dp(link->drm_dev, "Test:(0x%x) requested\n", data);
link->request.test_requested = data;
if (link->request.test_requested == DP_TEST_LINK_PHY_TEST_PATTERN) {
- ret = dp_link_parse_phy_test_params(link);
+ ret = msm_dp_link_parse_phy_test_params(link);
if (ret)
goto end;
- ret = dp_link_parse_link_training_params(link);
+ ret = msm_dp_link_parse_link_training_params(link);
if (ret)
goto end;
}
if (link->request.test_requested == DP_TEST_LINK_TRAINING) {
- ret = dp_link_parse_link_training_params(link);
+ ret = msm_dp_link_parse_link_training_params(link);
if (ret)
goto end;
}
- if (dp_link_is_video_audio_test_requested(
+ if (msm_dp_link_is_video_audio_test_requested(
link->request.test_requested)) {
- ret = dp_link_parse_video_pattern_params(link);
+ ret = msm_dp_link_parse_video_pattern_params(link);
if (ret)
goto end;
- ret = dp_link_parse_audio_pattern_params(link);
+ ret = msm_dp_link_parse_audio_pattern_params(link);
}
end:
/*
@@ -700,29 +700,29 @@ end:
* a DP_TEST_NAK.
*/
if (ret) {
- link->dp_link.test_response = DP_TEST_NAK;
+ link->msm_dp_link.test_response = DP_TEST_NAK;
} else {
if (link->request.test_requested != DP_TEST_LINK_EDID_READ)
- link->dp_link.test_response = DP_TEST_ACK;
+ link->msm_dp_link.test_response = DP_TEST_ACK;
else
- link->dp_link.test_response =
+ link->msm_dp_link.test_response =
DP_TEST_EDID_CHECKSUM_WRITE;
}
return ret;
}
-static int dp_link_parse_sink_status_field(struct dp_link_private *link)
+static int msm_dp_link_parse_sink_status_field(struct msm_dp_link_private *link)
{
int len;
- link->prev_sink_count = link->dp_link.sink_count;
+ link->prev_sink_count = link->msm_dp_link.sink_count;
len = drm_dp_read_sink_count(link->aux);
if (len < 0) {
DRM_ERROR("DP parse sink count failed\n");
return len;
}
- link->dp_link.sink_count = len;
+ link->msm_dp_link.sink_count = len;
len = drm_dp_dpcd_read_link_status(link->aux,
link->link_status);
@@ -731,11 +731,11 @@ static int dp_link_parse_sink_status_field(struct dp_link_private *link)
return len;
}
- return dp_link_parse_request(link);
+ return msm_dp_link_parse_request(link);
}
/**
- * dp_link_process_link_training_request() - processes new training requests
+ * msm_dp_link_process_link_training_request() - processes new training requests
* @link: Display Port link data
*
* This function will handle new link training requests that are initiated by
@@ -745,7 +745,7 @@ static int dp_link_parse_sink_status_field(struct dp_link_private *link)
* The function will return 0 if a link training request has been processed,
* otherwise it will return -EINVAL.
*/
-static int dp_link_process_link_training_request(struct dp_link_private *link)
+static int msm_dp_link_process_link_training_request(struct msm_dp_link_private *link)
{
if (link->request.test_requested != DP_TEST_LINK_TRAINING)
return -EINVAL;
@@ -756,49 +756,49 @@ static int dp_link_process_link_training_request(struct dp_link_private *link)
link->request.test_link_rate,
link->request.test_lane_count);
- link->dp_link.link_params.num_lanes = link->request.test_lane_count;
- link->dp_link.link_params.rate =
+ link->msm_dp_link.link_params.num_lanes = link->request.test_lane_count;
+ link->msm_dp_link.link_params.rate =
drm_dp_bw_code_to_link_rate(link->request.test_link_rate);
return 0;
}
-bool dp_link_send_test_response(struct dp_link *dp_link)
+bool msm_dp_link_send_test_response(struct msm_dp_link *msm_dp_link)
{
- struct dp_link_private *link = NULL;
+ struct msm_dp_link_private *link = NULL;
int ret = 0;
- if (!dp_link) {
+ if (!msm_dp_link) {
DRM_ERROR("invalid input\n");
return false;
}
- link = container_of(dp_link, struct dp_link_private, dp_link);
+ link = container_of(msm_dp_link, struct msm_dp_link_private, msm_dp_link);
ret = drm_dp_dpcd_writeb(link->aux, DP_TEST_RESPONSE,
- dp_link->test_response);
+ msm_dp_link->test_response);
return ret == 1;
}
-int dp_link_psm_config(struct dp_link *dp_link,
- struct dp_link_info *link_info, bool enable)
+int msm_dp_link_psm_config(struct msm_dp_link *msm_dp_link,
+ struct msm_dp_link_info *link_info, bool enable)
{
- struct dp_link_private *link = NULL;
+ struct msm_dp_link_private *link = NULL;
int ret = 0;
- if (!dp_link) {
+ if (!msm_dp_link) {
DRM_ERROR("invalid params\n");
return -EINVAL;
}
- link = container_of(dp_link, struct dp_link_private, dp_link);
+ link = container_of(msm_dp_link, struct msm_dp_link_private, msm_dp_link);
mutex_lock(&link->psm_mutex);
if (enable)
- ret = dp_aux_link_power_down(link->aux, link_info);
+ ret = msm_dp_aux_link_power_down(link->aux, link_info);
else
- ret = dp_aux_link_power_up(link->aux, link_info);
+ ret = msm_dp_aux_link_power_up(link->aux, link_info);
if (ret)
DRM_ERROR("Failed to %s low power mode\n", enable ?
@@ -808,24 +808,24 @@ int dp_link_psm_config(struct dp_link *dp_link,
return ret;
}
-bool dp_link_send_edid_checksum(struct dp_link *dp_link, u8 checksum)
+bool msm_dp_link_send_edid_checksum(struct msm_dp_link *msm_dp_link, u8 checksum)
{
- struct dp_link_private *link = NULL;
+ struct msm_dp_link_private *link = NULL;
int ret = 0;
- if (!dp_link) {
+ if (!msm_dp_link) {
DRM_ERROR("invalid input\n");
return false;
}
- link = container_of(dp_link, struct dp_link_private, dp_link);
+ link = container_of(msm_dp_link, struct msm_dp_link_private, msm_dp_link);
ret = drm_dp_dpcd_writeb(link->aux, DP_TEST_EDID_CHECKSUM,
checksum);
return ret == 1;
}
-static void dp_link_parse_vx_px(struct dp_link_private *link)
+static void msm_dp_link_parse_vx_px(struct msm_dp_link_private *link)
{
drm_dbg_dp(link->drm_dev, "vx: 0=%d, 1=%d, 2=%d, 3=%d\n",
drm_dp_get_adjust_request_voltage(link->link_status, 0),
@@ -845,31 +845,31 @@ static void dp_link_parse_vx_px(struct dp_link_private *link)
*/
drm_dbg_dp(link->drm_dev,
"Current: v_level = 0x%x, p_level = 0x%x\n",
- link->dp_link.phy_params.v_level,
- link->dp_link.phy_params.p_level);
- link->dp_link.phy_params.v_level =
+ link->msm_dp_link.phy_params.v_level,
+ link->msm_dp_link.phy_params.p_level);
+ link->msm_dp_link.phy_params.v_level =
drm_dp_get_adjust_request_voltage(link->link_status, 0);
- link->dp_link.phy_params.p_level =
+ link->msm_dp_link.phy_params.p_level =
drm_dp_get_adjust_request_pre_emphasis(link->link_status, 0);
- link->dp_link.phy_params.p_level >>= DP_TRAIN_PRE_EMPHASIS_SHIFT;
+ link->msm_dp_link.phy_params.p_level >>= DP_TRAIN_PRE_EMPHASIS_SHIFT;
drm_dbg_dp(link->drm_dev,
"Requested: v_level = 0x%x, p_level = 0x%x\n",
- link->dp_link.phy_params.v_level,
- link->dp_link.phy_params.p_level);
+ link->msm_dp_link.phy_params.v_level,
+ link->msm_dp_link.phy_params.p_level);
}
/**
- * dp_link_process_phy_test_pattern_request() - process new phy link requests
+ * msm_dp_link_process_phy_test_pattern_request() - process new phy link requests
* @link: Display Port Driver data
*
* This function will handle new phy link pattern requests that are initiated
* by the sink. The function will return 0 if a phy link pattern has been
* processed, otherwise it will return -EINVAL.
*/
-static int dp_link_process_phy_test_pattern_request(
- struct dp_link_private *link)
+static int msm_dp_link_process_phy_test_pattern_request(
+ struct msm_dp_link_private *link)
{
if (!(link->request.test_requested & DP_TEST_LINK_PHY_TEST_PATTERN)) {
drm_dbg_dp(link->drm_dev, "no phy test\n");
@@ -886,24 +886,24 @@ static int dp_link_process_phy_test_pattern_request(
drm_dbg_dp(link->drm_dev,
"Current: rate = 0x%x, lane count = 0x%x\n",
- link->dp_link.link_params.rate,
- link->dp_link.link_params.num_lanes);
+ link->msm_dp_link.link_params.rate,
+ link->msm_dp_link.link_params.num_lanes);
drm_dbg_dp(link->drm_dev,
"Requested: rate = 0x%x, lane count = 0x%x\n",
link->request.test_link_rate,
link->request.test_lane_count);
- link->dp_link.link_params.num_lanes = link->request.test_lane_count;
- link->dp_link.link_params.rate =
+ link->msm_dp_link.link_params.num_lanes = link->request.test_lane_count;
+ link->msm_dp_link.link_params.rate =
drm_dp_bw_code_to_link_rate(link->request.test_link_rate);
- dp_link_parse_vx_px(link);
+ msm_dp_link_parse_vx_px(link);
return 0;
}
-static bool dp_link_read_psr_error_status(struct dp_link_private *link)
+static bool msm_dp_link_read_psr_error_status(struct msm_dp_link_private *link)
{
u8 status;
@@ -921,7 +921,7 @@ static bool dp_link_read_psr_error_status(struct dp_link_private *link)
return true;
}
-static bool dp_link_psr_capability_changed(struct dp_link_private *link)
+static bool msm_dp_link_psr_capability_changed(struct msm_dp_link_private *link)
{
u8 status;
@@ -941,7 +941,7 @@ static u8 get_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r)
}
/**
- * dp_link_process_link_status_update() - processes link status updates
+ * msm_dp_link_process_link_status_update() - processes link status updates
* @link: Display Port link module data
*
* This function will check for changes in the link status, e.g. clock
@@ -951,13 +951,13 @@ static u8 get_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r)
* The function will return 0 if the a link status update has been processed,
* otherwise it will return -EINVAL.
*/
-static int dp_link_process_link_status_update(struct dp_link_private *link)
+static int msm_dp_link_process_link_status_update(struct msm_dp_link_private *link)
{
bool channel_eq_done = drm_dp_channel_eq_ok(link->link_status,
- link->dp_link.link_params.num_lanes);
+ link->msm_dp_link.link_params.num_lanes);
bool clock_recovery_done = drm_dp_clock_recovery_ok(link->link_status,
- link->dp_link.link_params.num_lanes);
+ link->msm_dp_link.link_params.num_lanes);
drm_dbg_dp(link->drm_dev,
"channel_eq_done = %d, clock_recovery_done = %d\n",
@@ -970,7 +970,7 @@ static int dp_link_process_link_status_update(struct dp_link_private *link)
}
/**
- * dp_link_process_ds_port_status_change() - process port status changes
+ * msm_dp_link_process_ds_port_status_change() - process port status changes
* @link: Display Port Driver data
*
* This function will handle downstream port updates that are initiated by
@@ -980,122 +980,122 @@ static int dp_link_process_link_status_update(struct dp_link_private *link)
* The function will return 0 if a downstream port update has been
* processed, otherwise it will return -EINVAL.
*/
-static int dp_link_process_ds_port_status_change(struct dp_link_private *link)
+static int msm_dp_link_process_ds_port_status_change(struct msm_dp_link_private *link)
{
if (get_link_status(link->link_status, DP_LANE_ALIGN_STATUS_UPDATED) &
DP_DOWNSTREAM_PORT_STATUS_CHANGED)
goto reset;
- if (link->prev_sink_count == link->dp_link.sink_count)
+ if (link->prev_sink_count == link->msm_dp_link.sink_count)
return -EINVAL;
reset:
/* reset prev_sink_count */
- link->prev_sink_count = link->dp_link.sink_count;
+ link->prev_sink_count = link->msm_dp_link.sink_count;
return 0;
}
-static bool dp_link_is_video_pattern_requested(struct dp_link_private *link)
+static bool msm_dp_link_is_video_pattern_requested(struct msm_dp_link_private *link)
{
return (link->request.test_requested & DP_TEST_LINK_VIDEO_PATTERN)
&& !(link->request.test_requested &
DP_TEST_LINK_AUDIO_DISABLED_VIDEO);
}
-static bool dp_link_is_audio_pattern_requested(struct dp_link_private *link)
+static bool msm_dp_link_is_audio_pattern_requested(struct msm_dp_link_private *link)
{
return (link->request.test_requested & DP_TEST_LINK_AUDIO_PATTERN);
}
-static void dp_link_reset_data(struct dp_link_private *link)
+static void msm_dp_link_reset_data(struct msm_dp_link_private *link)
{
- link->request = (const struct dp_link_request){ 0 };
- link->dp_link.test_video = (const struct dp_link_test_video){ 0 };
- link->dp_link.test_video.test_bit_depth = DP_TEST_BIT_DEPTH_UNKNOWN;
- link->dp_link.test_audio = (const struct dp_link_test_audio){ 0 };
- link->dp_link.phy_params.phy_test_pattern_sel = 0;
- link->dp_link.sink_request = 0;
- link->dp_link.test_response = 0;
+ link->request = (const struct msm_dp_link_request){ 0 };
+ link->msm_dp_link.test_video = (const struct msm_dp_link_test_video){ 0 };
+ link->msm_dp_link.test_video.test_bit_depth = DP_TEST_BIT_DEPTH_UNKNOWN;
+ link->msm_dp_link.test_audio = (const struct msm_dp_link_test_audio){ 0 };
+ link->msm_dp_link.phy_params.phy_test_pattern_sel = 0;
+ link->msm_dp_link.sink_request = 0;
+ link->msm_dp_link.test_response = 0;
}
/**
- * dp_link_process_request() - handle HPD IRQ transition to HIGH
- * @dp_link: pointer to link module data
+ * msm_dp_link_process_request() - handle HPD IRQ transition to HIGH
+ * @msm_dp_link: pointer to link module data
*
* This function will handle the HPD IRQ state transitions from LOW to HIGH
* (including cases when there are back to back HPD IRQ HIGH) indicating
* the start of a new link training request or sink status update.
*/
-int dp_link_process_request(struct dp_link *dp_link)
+int msm_dp_link_process_request(struct msm_dp_link *msm_dp_link)
{
int ret = 0;
- struct dp_link_private *link;
+ struct msm_dp_link_private *link;
- if (!dp_link) {
+ if (!msm_dp_link) {
DRM_ERROR("invalid input\n");
return -EINVAL;
}
- link = container_of(dp_link, struct dp_link_private, dp_link);
+ link = container_of(msm_dp_link, struct msm_dp_link_private, msm_dp_link);
- dp_link_reset_data(link);
+ msm_dp_link_reset_data(link);
- ret = dp_link_parse_sink_status_field(link);
+ ret = msm_dp_link_parse_sink_status_field(link);
if (ret)
return ret;
if (link->request.test_requested == DP_TEST_LINK_EDID_READ) {
- dp_link->sink_request |= DP_TEST_LINK_EDID_READ;
- } else if (!dp_link_process_ds_port_status_change(link)) {
- dp_link->sink_request |= DS_PORT_STATUS_CHANGED;
- } else if (!dp_link_process_link_training_request(link)) {
- dp_link->sink_request |= DP_TEST_LINK_TRAINING;
- } else if (!dp_link_process_phy_test_pattern_request(link)) {
- dp_link->sink_request |= DP_TEST_LINK_PHY_TEST_PATTERN;
- } else if (dp_link_read_psr_error_status(link)) {
+ msm_dp_link->sink_request |= DP_TEST_LINK_EDID_READ;
+ } else if (!msm_dp_link_process_ds_port_status_change(link)) {
+ msm_dp_link->sink_request |= DS_PORT_STATUS_CHANGED;
+ } else if (!msm_dp_link_process_link_training_request(link)) {
+ msm_dp_link->sink_request |= DP_TEST_LINK_TRAINING;
+ } else if (!msm_dp_link_process_phy_test_pattern_request(link)) {
+ msm_dp_link->sink_request |= DP_TEST_LINK_PHY_TEST_PATTERN;
+ } else if (msm_dp_link_read_psr_error_status(link)) {
DRM_ERROR("PSR IRQ_HPD received\n");
- } else if (dp_link_psr_capability_changed(link)) {
+ } else if (msm_dp_link_psr_capability_changed(link)) {
drm_dbg_dp(link->drm_dev, "PSR Capability changed\n");
} else {
- ret = dp_link_process_link_status_update(link);
+ ret = msm_dp_link_process_link_status_update(link);
if (!ret) {
- dp_link->sink_request |= DP_LINK_STATUS_UPDATED;
+ msm_dp_link->sink_request |= DP_LINK_STATUS_UPDATED;
} else {
- if (dp_link_is_video_pattern_requested(link)) {
+ if (msm_dp_link_is_video_pattern_requested(link)) {
ret = 0;
- dp_link->sink_request |= DP_TEST_LINK_VIDEO_PATTERN;
+ msm_dp_link->sink_request |= DP_TEST_LINK_VIDEO_PATTERN;
}
- if (dp_link_is_audio_pattern_requested(link)) {
- dp_link->sink_request |= DP_TEST_LINK_AUDIO_PATTERN;
+ if (msm_dp_link_is_audio_pattern_requested(link)) {
+ msm_dp_link->sink_request |= DP_TEST_LINK_AUDIO_PATTERN;
ret = -EINVAL;
}
}
}
drm_dbg_dp(link->drm_dev, "sink request=%#x\n",
- dp_link->sink_request);
+ msm_dp_link->sink_request);
return ret;
}
-int dp_link_get_colorimetry_config(struct dp_link *dp_link)
+int msm_dp_link_get_colorimetry_config(struct msm_dp_link *msm_dp_link)
{
u32 cc = DP_MISC0_COLORIMERY_CFG_LEGACY_RGB;
- struct dp_link_private *link;
+ struct msm_dp_link_private *link;
- if (!dp_link) {
+ if (!msm_dp_link) {
DRM_ERROR("invalid input\n");
return -EINVAL;
}
- link = container_of(dp_link, struct dp_link_private, dp_link);
+ link = container_of(msm_dp_link, struct msm_dp_link_private, msm_dp_link);
/*
* Unless a video pattern CTS test is ongoing, use RGB_VESA
* Only RGB_VESA and RGB_CEA supported for now
*/
- if (dp_link_is_video_pattern_requested(link)) {
- if (link->dp_link.test_video.test_dyn_range &
+ if (msm_dp_link_is_video_pattern_requested(link)) {
+ if (link->msm_dp_link.test_video.test_dyn_range &
DP_TEST_DYNAMIC_RANGE_CEA)
cc = DP_MISC0_COLORIMERY_CFG_CEA_RGB;
}
@@ -1103,22 +1103,22 @@ int dp_link_get_colorimetry_config(struct dp_link *dp_link)
return cc;
}
-int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status)
+int msm_dp_link_adjust_levels(struct msm_dp_link *msm_dp_link, u8 *link_status)
{
int i;
u8 max_p_level;
int v_max = 0, p_max = 0;
- struct dp_link_private *link;
+ struct msm_dp_link_private *link;
- if (!dp_link) {
+ if (!msm_dp_link) {
DRM_ERROR("invalid input\n");
return -EINVAL;
}
- link = container_of(dp_link, struct dp_link_private, dp_link);
+ link = container_of(msm_dp_link, struct msm_dp_link_private, msm_dp_link);
/* use the max level across lanes */
- for (i = 0; i < dp_link->link_params.num_lanes; i++) {
+ for (i = 0; i < msm_dp_link->link_params.num_lanes; i++) {
u8 data_v = drm_dp_get_adjust_request_voltage(link_status, i);
u8 data_p = drm_dp_get_adjust_request_pre_emphasis(link_status,
i);
@@ -1131,56 +1131,56 @@ int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status)
p_max = data_p;
}
- dp_link->phy_params.v_level = v_max >> DP_TRAIN_VOLTAGE_SWING_SHIFT;
- dp_link->phy_params.p_level = p_max >> DP_TRAIN_PRE_EMPHASIS_SHIFT;
+ msm_dp_link->phy_params.v_level = v_max >> DP_TRAIN_VOLTAGE_SWING_SHIFT;
+ msm_dp_link->phy_params.p_level = p_max >> DP_TRAIN_PRE_EMPHASIS_SHIFT;
/**
* Adjust the voltage swing and pre-emphasis level combination to within
* the allowable range.
*/
- if (dp_link->phy_params.v_level > DP_TRAIN_LEVEL_MAX) {
+ if (msm_dp_link->phy_params.v_level > DP_TRAIN_LEVEL_MAX) {
drm_dbg_dp(link->drm_dev,
"Requested vSwingLevel=%d, change to %d\n",
- dp_link->phy_params.v_level,
+ msm_dp_link->phy_params.v_level,
DP_TRAIN_LEVEL_MAX);
- dp_link->phy_params.v_level = DP_TRAIN_LEVEL_MAX;
+ msm_dp_link->phy_params.v_level = DP_TRAIN_LEVEL_MAX;
}
- if (dp_link->phy_params.p_level > DP_TRAIN_LEVEL_MAX) {
+ if (msm_dp_link->phy_params.p_level > DP_TRAIN_LEVEL_MAX) {
drm_dbg_dp(link->drm_dev,
"Requested preEmphasisLevel=%d, change to %d\n",
- dp_link->phy_params.p_level,
+ msm_dp_link->phy_params.p_level,
DP_TRAIN_LEVEL_MAX);
- dp_link->phy_params.p_level = DP_TRAIN_LEVEL_MAX;
+ msm_dp_link->phy_params.p_level = DP_TRAIN_LEVEL_MAX;
}
- max_p_level = DP_TRAIN_LEVEL_MAX - dp_link->phy_params.v_level;
- if (dp_link->phy_params.p_level > max_p_level) {
+ max_p_level = DP_TRAIN_LEVEL_MAX - msm_dp_link->phy_params.v_level;
+ if (msm_dp_link->phy_params.p_level > max_p_level) {
drm_dbg_dp(link->drm_dev,
"Requested preEmphasisLevel=%d, change to %d\n",
- dp_link->phy_params.p_level,
+ msm_dp_link->phy_params.p_level,
max_p_level);
- dp_link->phy_params.p_level = max_p_level;
+ msm_dp_link->phy_params.p_level = max_p_level;
}
drm_dbg_dp(link->drm_dev, "adjusted: v_level=%d, p_level=%d\n",
- dp_link->phy_params.v_level, dp_link->phy_params.p_level);
+ msm_dp_link->phy_params.v_level, msm_dp_link->phy_params.p_level);
return 0;
}
-void dp_link_reset_phy_params_vx_px(struct dp_link *dp_link)
+void msm_dp_link_reset_phy_params_vx_px(struct msm_dp_link *msm_dp_link)
{
- dp_link->phy_params.v_level = 0;
- dp_link->phy_params.p_level = 0;
+ msm_dp_link->phy_params.v_level = 0;
+ msm_dp_link->phy_params.p_level = 0;
}
-u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp)
+u32 msm_dp_link_get_test_bits_depth(struct msm_dp_link *msm_dp_link, u32 bpp)
{
u32 tbd;
- struct dp_link_private *link;
+ struct msm_dp_link_private *link;
- link = container_of(dp_link, struct dp_link_private, dp_link);
+ link = container_of(msm_dp_link, struct msm_dp_link_private, msm_dp_link);
/*
* Few simplistic rules and assumptions made here:
@@ -1209,10 +1209,10 @@ u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp)
return tbd;
}
-struct dp_link *dp_link_get(struct device *dev, struct drm_dp_aux *aux)
+struct msm_dp_link *msm_dp_link_get(struct device *dev, struct drm_dp_aux *aux)
{
- struct dp_link_private *link;
- struct dp_link *dp_link;
+ struct msm_dp_link_private *link;
+ struct msm_dp_link *msm_dp_link;
if (!dev || !aux) {
DRM_ERROR("invalid input\n");
@@ -1226,7 +1226,7 @@ struct dp_link *dp_link_get(struct device *dev, struct drm_dp_aux *aux)
link->aux = aux;
mutex_init(&link->psm_mutex);
- dp_link = &link->dp_link;
+ msm_dp_link = &link->msm_dp_link;
- return dp_link;
+ return msm_dp_link;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h
index 5846337bb56f..8db5d5698a97 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.h
+++ b/drivers/gpu/drm/msm/dp/dp_link.h
@@ -12,7 +12,7 @@
#define DP_TEST_BIT_DEPTH_UNKNOWN 0xFFFFFFFF
#define DP_LINK_CAP_ENHANCED_FRAMING (1 << 0)
-struct dp_link_info {
+struct msm_dp_link_info {
unsigned char revision;
unsigned int rate;
unsigned int num_lanes;
@@ -21,7 +21,7 @@ struct dp_link_info {
#define DP_TRAIN_LEVEL_MAX 3
-struct dp_link_test_video {
+struct msm_dp_link_test_video {
u32 test_video_pattern;
u32 test_bit_depth;
u32 test_dyn_range;
@@ -39,7 +39,7 @@ struct dp_link_test_video {
u32 test_rr_n;
};
-struct dp_link_test_audio {
+struct msm_dp_link_test_audio {
u32 test_audio_sampling_rate;
u32 test_audio_channel_count;
u32 test_audio_pattern_type;
@@ -53,21 +53,21 @@ struct dp_link_test_audio {
u32 test_audio_period_ch_8;
};
-struct dp_link_phy_params {
+struct msm_dp_link_phy_params {
u32 phy_test_pattern_sel;
u8 v_level;
u8 p_level;
};
-struct dp_link {
+struct msm_dp_link {
u32 sink_request;
u32 test_response;
u8 sink_count;
- struct dp_link_test_video test_video;
- struct dp_link_test_audio test_audio;
- struct dp_link_phy_params phy_params;
- struct dp_link_info link_params;
+ struct msm_dp_link_test_video test_video;
+ struct msm_dp_link_test_audio test_audio;
+ struct msm_dp_link_phy_params phy_params;
+ struct msm_dp_link_info link_params;
};
/**
@@ -78,7 +78,7 @@ struct dp_link {
* git bit depth value. This function assumes that bit depth has
* already been validated.
*/
-static inline u32 dp_link_bit_depth_to_bpp(u32 tbd)
+static inline u32 msm_dp_link_bit_depth_to_bpp(u32 tbd)
{
/*
* Few simplistic rules and assumptions made here:
@@ -99,22 +99,22 @@ static inline u32 dp_link_bit_depth_to_bpp(u32 tbd)
}
}
-void dp_link_reset_phy_params_vx_px(struct dp_link *dp_link);
-u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp);
-int dp_link_process_request(struct dp_link *dp_link);
-int dp_link_get_colorimetry_config(struct dp_link *dp_link);
-int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status);
-bool dp_link_send_test_response(struct dp_link *dp_link);
-int dp_link_psm_config(struct dp_link *dp_link,
- struct dp_link_info *link_info, bool enable);
-bool dp_link_send_edid_checksum(struct dp_link *dp_link, u8 checksum);
+void msm_dp_link_reset_phy_params_vx_px(struct msm_dp_link *msm_dp_link);
+u32 msm_dp_link_get_test_bits_depth(struct msm_dp_link *msm_dp_link, u32 bpp);
+int msm_dp_link_process_request(struct msm_dp_link *msm_dp_link);
+int msm_dp_link_get_colorimetry_config(struct msm_dp_link *msm_dp_link);
+int msm_dp_link_adjust_levels(struct msm_dp_link *msm_dp_link, u8 *link_status);
+bool msm_dp_link_send_test_response(struct msm_dp_link *msm_dp_link);
+int msm_dp_link_psm_config(struct msm_dp_link *msm_dp_link,
+ struct msm_dp_link_info *link_info, bool enable);
+bool msm_dp_link_send_edid_checksum(struct msm_dp_link *msm_dp_link, u8 checksum);
/**
- * dp_link_get() - get the functionalities of dp test module
+ * msm_dp_link_get() - get the functionalities of dp test module
*
*
- * return: a pointer to dp_link struct
+ * return: a pointer to msm_dp_link struct
*/
-struct dp_link *dp_link_get(struct device *dev, struct drm_dp_aux *aux);
+struct msm_dp_link *msm_dp_link_get(struct device *dev, struct drm_dp_aux *aux);
#endif /* _DP_LINK_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 6ff6c9ef351f..5d7eaa31bf31 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -14,52 +14,52 @@
#define DP_MAX_NUM_DP_LANES 4
#define DP_LINK_RATE_HBR2 540000 /* kbytes */
-struct dp_panel_private {
+struct msm_dp_panel_private {
struct device *dev;
struct drm_device *drm_dev;
- struct dp_panel dp_panel;
+ struct msm_dp_panel msm_dp_panel;
struct drm_dp_aux *aux;
- struct dp_link *link;
- struct dp_catalog *catalog;
+ struct msm_dp_link *link;
+ struct msm_dp_catalog *catalog;
bool panel_on;
};
-static void dp_panel_read_psr_cap(struct dp_panel_private *panel)
+static void msm_dp_panel_read_psr_cap(struct msm_dp_panel_private *panel)
{
ssize_t rlen;
- struct dp_panel *dp_panel;
+ struct msm_dp_panel *msm_dp_panel;
- dp_panel = &panel->dp_panel;
+ msm_dp_panel = &panel->msm_dp_panel;
/* edp sink */
- if (dp_panel->dpcd[DP_EDP_CONFIGURATION_CAP]) {
+ if (msm_dp_panel->dpcd[DP_EDP_CONFIGURATION_CAP]) {
rlen = drm_dp_dpcd_read(panel->aux, DP_PSR_SUPPORT,
- &dp_panel->psr_cap, sizeof(dp_panel->psr_cap));
- if (rlen == sizeof(dp_panel->psr_cap)) {
+ &msm_dp_panel->psr_cap, sizeof(msm_dp_panel->psr_cap));
+ if (rlen == sizeof(msm_dp_panel->psr_cap)) {
drm_dbg_dp(panel->drm_dev,
"psr version: 0x%x, psr_cap: 0x%x\n",
- dp_panel->psr_cap.version,
- dp_panel->psr_cap.capabilities);
+ msm_dp_panel->psr_cap.version,
+ msm_dp_panel->psr_cap.capabilities);
} else
DRM_ERROR("failed to read psr info, rlen=%zd\n", rlen);
}
}
-static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
+static int msm_dp_panel_read_dpcd(struct msm_dp_panel *msm_dp_panel)
{
int rc;
- struct dp_panel_private *panel;
- struct dp_link_info *link_info;
+ struct msm_dp_panel_private *panel;
+ struct msm_dp_link_info *link_info;
u8 *dpcd, major, minor;
- panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
- dpcd = dp_panel->dpcd;
+ panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
+ dpcd = msm_dp_panel->dpcd;
rc = drm_dp_read_dpcd_caps(panel->aux, dpcd);
if (rc)
return rc;
- dp_panel->vsc_sdp_supported = drm_dp_vsc_sdp_supported(panel->aux, dpcd);
- link_info = &dp_panel->link_info;
+ msm_dp_panel->vsc_sdp_supported = drm_dp_vsc_sdp_supported(panel->aux, dpcd);
+ link_info = &msm_dp_panel->link_info;
link_info->revision = dpcd[DP_DPCD_REV];
major = (link_info->revision >> 4) & 0x0f;
minor = link_info->revision & 0x0f;
@@ -68,12 +68,12 @@ static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
link_info->num_lanes = drm_dp_max_lane_count(dpcd);
/* Limit data lanes from data-lanes of endpoint property of dtsi */
- if (link_info->num_lanes > dp_panel->max_dp_lanes)
- link_info->num_lanes = dp_panel->max_dp_lanes;
+ if (link_info->num_lanes > msm_dp_panel->max_dp_lanes)
+ link_info->num_lanes = msm_dp_panel->max_dp_lanes;
/* Limit link rate from link-frequencies of endpoint property of dtsi */
- if (link_info->rate > dp_panel->max_dp_link_rate)
- link_info->rate = dp_panel->max_dp_link_rate;
+ if (link_info->rate > msm_dp_panel->max_dp_link_rate)
+ link_info->rate = msm_dp_panel->max_dp_link_rate;
drm_dbg_dp(panel->drm_dev, "version: %d.%d\n", major, minor);
drm_dbg_dp(panel->drm_dev, "link_rate=%d\n", link_info->rate);
@@ -82,21 +82,21 @@ static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
if (drm_dp_enhanced_frame_cap(dpcd))
link_info->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
- dp_panel_read_psr_cap(panel);
+ msm_dp_panel_read_psr_cap(panel);
return rc;
}
-static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel,
+static u32 msm_dp_panel_get_supported_bpp(struct msm_dp_panel *msm_dp_panel,
u32 mode_edid_bpp, u32 mode_pclk_khz)
{
- const struct dp_link_info *link_info;
+ const struct msm_dp_link_info *link_info;
const u32 max_supported_bpp = 30, min_supported_bpp = 18;
u32 bpp, data_rate_khz;
bpp = min(mode_edid_bpp, max_supported_bpp);
- link_info = &dp_panel->link_info;
+ link_info = &msm_dp_panel->link_info;
data_rate_khz = link_info->num_lanes * link_info->rate * 8;
do {
@@ -108,39 +108,39 @@ static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel,
return min_supported_bpp;
}
-int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
+int msm_dp_panel_read_sink_caps(struct msm_dp_panel *msm_dp_panel,
struct drm_connector *connector)
{
int rc, bw_code;
int count;
- struct dp_panel_private *panel;
+ struct msm_dp_panel_private *panel;
- if (!dp_panel || !connector) {
+ if (!msm_dp_panel || !connector) {
DRM_ERROR("invalid input\n");
return -EINVAL;
}
- panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
drm_dbg_dp(panel->drm_dev, "max_lanes=%d max_link_rate=%d\n",
- dp_panel->max_dp_lanes, dp_panel->max_dp_link_rate);
+ msm_dp_panel->max_dp_lanes, msm_dp_panel->max_dp_link_rate);
- rc = dp_panel_read_dpcd(dp_panel);
+ rc = msm_dp_panel_read_dpcd(msm_dp_panel);
if (rc) {
DRM_ERROR("read dpcd failed %d\n", rc);
return rc;
}
- bw_code = drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate);
+ bw_code = drm_dp_link_rate_to_bw_code(msm_dp_panel->link_info.rate);
if (!is_link_rate_valid(bw_code) ||
- !is_lane_count_valid(dp_panel->link_info.num_lanes) ||
- (bw_code > dp_panel->max_bw_code)) {
- DRM_ERROR("Illegal link rate=%d lane=%d\n", dp_panel->link_info.rate,
- dp_panel->link_info.num_lanes);
+ !is_lane_count_valid(msm_dp_panel->link_info.num_lanes) ||
+ (bw_code > msm_dp_panel->max_bw_code)) {
+ DRM_ERROR("Illegal link rate=%d lane=%d\n", msm_dp_panel->link_info.rate,
+ msm_dp_panel->link_info.num_lanes);
return -EINVAL;
}
- if (drm_dp_is_branch(dp_panel->dpcd)) {
+ if (drm_dp_is_branch(msm_dp_panel->dpcd)) {
count = drm_dp_read_sink_count(panel->aux);
if (!count) {
panel->link->sink_count = 0;
@@ -148,21 +148,21 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
}
}
- rc = drm_dp_read_downstream_info(panel->aux, dp_panel->dpcd,
- dp_panel->downstream_ports);
+ rc = drm_dp_read_downstream_info(panel->aux, msm_dp_panel->dpcd,
+ msm_dp_panel->downstream_ports);
if (rc)
return rc;
- drm_edid_free(dp_panel->drm_edid);
+ drm_edid_free(msm_dp_panel->drm_edid);
- dp_panel->drm_edid = drm_edid_read_ddc(connector, &panel->aux->ddc);
+ msm_dp_panel->drm_edid = drm_edid_read_ddc(connector, &panel->aux->ddc);
- drm_edid_connector_update(connector, dp_panel->drm_edid);
+ drm_edid_connector_update(connector, msm_dp_panel->drm_edid);
- if (!dp_panel->drm_edid) {
+ if (!msm_dp_panel->drm_edid) {
DRM_ERROR("panel edid read failed\n");
/* check edid read fail is due to unplug */
- if (!dp_catalog_link_is_connected(panel->catalog)) {
+ if (!msm_dp_catalog_link_is_connected(panel->catalog)) {
rc = -ETIMEDOUT;
goto end;
}
@@ -172,87 +172,87 @@ end:
return rc;
}
-u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel,
+u32 msm_dp_panel_get_mode_bpp(struct msm_dp_panel *msm_dp_panel,
u32 mode_edid_bpp, u32 mode_pclk_khz)
{
- struct dp_panel_private *panel;
+ struct msm_dp_panel_private *panel;
u32 bpp;
- if (!dp_panel || !mode_edid_bpp || !mode_pclk_khz) {
+ if (!msm_dp_panel || !mode_edid_bpp || !mode_pclk_khz) {
DRM_ERROR("invalid input\n");
return 0;
}
- panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
- if (dp_panel->video_test)
- bpp = dp_link_bit_depth_to_bpp(
+ if (msm_dp_panel->video_test)
+ bpp = msm_dp_link_bit_depth_to_bpp(
panel->link->test_video.test_bit_depth);
else
- bpp = dp_panel_get_supported_bpp(dp_panel, mode_edid_bpp,
+ bpp = msm_dp_panel_get_supported_bpp(msm_dp_panel, mode_edid_bpp,
mode_pclk_khz);
return bpp;
}
-int dp_panel_get_modes(struct dp_panel *dp_panel,
+int msm_dp_panel_get_modes(struct msm_dp_panel *msm_dp_panel,
struct drm_connector *connector)
{
- if (!dp_panel) {
+ if (!msm_dp_panel) {
DRM_ERROR("invalid input\n");
return -EINVAL;
}
- if (dp_panel->drm_edid)
+ if (msm_dp_panel->drm_edid)
return drm_edid_connector_add_modes(connector);
return 0;
}
-static u8 dp_panel_get_edid_checksum(const struct edid *edid)
+static u8 msm_dp_panel_get_edid_checksum(const struct edid *edid)
{
edid += edid->extensions;
return edid->checksum;
}
-void dp_panel_handle_sink_request(struct dp_panel *dp_panel)
+void msm_dp_panel_handle_sink_request(struct msm_dp_panel *msm_dp_panel)
{
- struct dp_panel_private *panel;
+ struct msm_dp_panel_private *panel;
- if (!dp_panel) {
+ if (!msm_dp_panel) {
DRM_ERROR("invalid input\n");
return;
}
- panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
if (panel->link->sink_request & DP_TEST_LINK_EDID_READ) {
/* FIXME: get rid of drm_edid_raw() */
- const struct edid *edid = drm_edid_raw(dp_panel->drm_edid);
+ const struct edid *edid = drm_edid_raw(msm_dp_panel->drm_edid);
u8 checksum;
if (edid)
- checksum = dp_panel_get_edid_checksum(edid);
+ checksum = msm_dp_panel_get_edid_checksum(edid);
else
- checksum = dp_panel->connector->real_edid_checksum;
+ checksum = msm_dp_panel->connector->real_edid_checksum;
- dp_link_send_edid_checksum(panel->link, checksum);
- dp_link_send_test_response(panel->link);
+ msm_dp_link_send_edid_checksum(panel->link, checksum);
+ msm_dp_link_send_test_response(panel->link);
}
}
-void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable)
+void msm_dp_panel_tpg_config(struct msm_dp_panel *msm_dp_panel, bool enable)
{
- struct dp_catalog *catalog;
- struct dp_panel_private *panel;
+ struct msm_dp_catalog *catalog;
+ struct msm_dp_panel_private *panel;
- if (!dp_panel) {
+ if (!msm_dp_panel) {
DRM_ERROR("invalid input\n");
return;
}
- panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
catalog = panel->catalog;
if (!panel->panel_on) {
@@ -262,31 +262,31 @@ void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable)
}
if (!enable) {
- dp_catalog_panel_tpg_disable(catalog);
+ msm_dp_catalog_panel_tpg_disable(catalog);
return;
}
drm_dbg_dp(panel->drm_dev, "calling catalog tpg_enable\n");
- dp_catalog_panel_tpg_enable(catalog, &panel->dp_panel.dp_mode.drm_mode);
+ msm_dp_catalog_panel_tpg_enable(catalog, &panel->msm_dp_panel.msm_dp_mode.drm_mode);
}
-static int dp_panel_setup_vsc_sdp_yuv_420(struct dp_panel *dp_panel)
+static int msm_dp_panel_setup_vsc_sdp_yuv_420(struct msm_dp_panel *msm_dp_panel)
{
- struct dp_catalog *catalog;
- struct dp_panel_private *panel;
- struct dp_display_mode *dp_mode;
+ struct msm_dp_catalog *catalog;
+ struct msm_dp_panel_private *panel;
+ struct msm_dp_display_mode *msm_dp_mode;
struct drm_dp_vsc_sdp vsc_sdp_data;
struct dp_sdp vsc_sdp;
ssize_t len;
- if (!dp_panel) {
+ if (!msm_dp_panel) {
DRM_ERROR("invalid input\n");
return -EINVAL;
}
- panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
catalog = panel->catalog;
- dp_mode = &dp_panel->dp_mode;
+ msm_dp_mode = &msm_dp_panel->msm_dp_mode;
memset(&vsc_sdp_data, 0, sizeof(vsc_sdp_data));
@@ -300,7 +300,7 @@ static int dp_panel_setup_vsc_sdp_yuv_420(struct dp_panel *dp_panel)
vsc_sdp_data.colorimetry = DP_COLORIMETRY_DEFAULT;
/* VSC SDP Payload for DB17 */
- vsc_sdp_data.bpc = dp_mode->bpp / 3;
+ vsc_sdp_data.bpc = msm_dp_mode->bpp / 3;
vsc_sdp_data.dynamic_range = DP_DYNAMIC_RANGE_CTA;
/* VSC SDP Payload for DB18 */
@@ -312,36 +312,36 @@ static int dp_panel_setup_vsc_sdp_yuv_420(struct dp_panel *dp_panel)
return len;
}
- dp_catalog_panel_enable_vsc_sdp(catalog, &vsc_sdp);
+ msm_dp_catalog_panel_enable_vsc_sdp(catalog, &vsc_sdp);
return 0;
}
-void dp_panel_dump_regs(struct dp_panel *dp_panel)
+void msm_dp_panel_dump_regs(struct msm_dp_panel *msm_dp_panel)
{
- struct dp_catalog *catalog;
- struct dp_panel_private *panel;
+ struct msm_dp_catalog *catalog;
+ struct msm_dp_panel_private *panel;
- panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
catalog = panel->catalog;
- dp_catalog_dump_regs(catalog);
+ msm_dp_catalog_dump_regs(catalog);
}
-int dp_panel_timing_cfg(struct dp_panel *dp_panel)
+int msm_dp_panel_timing_cfg(struct msm_dp_panel *msm_dp_panel)
{
u32 data, total_ver, total_hor;
- struct dp_catalog *catalog;
- struct dp_panel_private *panel;
+ struct msm_dp_catalog *catalog;
+ struct msm_dp_panel_private *panel;
struct drm_display_mode *drm_mode;
u32 width_blanking;
u32 sync_start;
- u32 dp_active;
+ u32 msm_dp_active;
u32 total;
- panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
catalog = panel->catalog;
- drm_mode = &panel->dp_panel.dp_mode.drm_mode;
+ drm_mode = &panel->msm_dp_panel.msm_dp_mode.drm_mode;
drm_dbg_dp(panel->drm_dev, "width=%d hporch= %d %d %d\n",
drm_mode->hdisplay, drm_mode->htotal - drm_mode->hsync_end,
@@ -371,9 +371,9 @@ int dp_panel_timing_cfg(struct dp_panel *dp_panel)
data = drm_mode->vsync_end - drm_mode->vsync_start;
data <<= 16;
- data |= (panel->dp_panel.dp_mode.v_active_low << 31);
+ data |= (panel->msm_dp_panel.msm_dp_mode.v_active_low << 31);
data |= drm_mode->hsync_end - drm_mode->hsync_start;
- data |= (panel->dp_panel.dp_mode.h_active_low << 15);
+ data |= (panel->msm_dp_panel.msm_dp_mode.h_active_low << 15);
width_blanking = data;
@@ -381,26 +381,26 @@ int dp_panel_timing_cfg(struct dp_panel *dp_panel)
data <<= 16;
data |= drm_mode->hdisplay;
- dp_active = data;
+ msm_dp_active = data;
- dp_catalog_panel_timing_cfg(catalog, total, sync_start, width_blanking, dp_active);
+ msm_dp_catalog_panel_timing_cfg(catalog, total, sync_start, width_blanking, msm_dp_active);
- if (dp_panel->dp_mode.out_fmt_is_yuv_420)
- dp_panel_setup_vsc_sdp_yuv_420(dp_panel);
+ if (msm_dp_panel->msm_dp_mode.out_fmt_is_yuv_420)
+ msm_dp_panel_setup_vsc_sdp_yuv_420(msm_dp_panel);
panel->panel_on = true;
return 0;
}
-int dp_panel_init_panel_info(struct dp_panel *dp_panel)
+int msm_dp_panel_init_panel_info(struct msm_dp_panel *msm_dp_panel)
{
struct drm_display_mode *drm_mode;
- struct dp_panel_private *panel;
+ struct msm_dp_panel_private *panel;
- drm_mode = &dp_panel->dp_mode.drm_mode;
+ drm_mode = &msm_dp_panel->msm_dp_mode.drm_mode;
- panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
/*
* print resolution info as this is a result
@@ -421,18 +421,18 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel)
drm_mode->vsync_end - drm_mode->vsync_start);
drm_dbg_dp(panel->drm_dev, "pixel clock (KHz)=(%d)\n",
drm_mode->clock);
- drm_dbg_dp(panel->drm_dev, "bpp = %d\n", dp_panel->dp_mode.bpp);
+ drm_dbg_dp(panel->drm_dev, "bpp = %d\n", msm_dp_panel->msm_dp_mode.bpp);
- dp_panel->dp_mode.bpp = dp_panel_get_mode_bpp(dp_panel, dp_panel->dp_mode.bpp,
- dp_panel->dp_mode.drm_mode.clock);
+ msm_dp_panel->msm_dp_mode.bpp = msm_dp_panel_get_mode_bpp(msm_dp_panel, msm_dp_panel->msm_dp_mode.bpp,
+ msm_dp_panel->msm_dp_mode.drm_mode.clock);
drm_dbg_dp(panel->drm_dev, "updated bpp = %d\n",
- dp_panel->dp_mode.bpp);
+ msm_dp_panel->msm_dp_mode.bpp);
return 0;
}
-static u32 dp_panel_link_frequencies(struct device_node *of_node)
+static u32 msm_dp_panel_link_frequencies(struct device_node *of_node)
{
struct device_node *endpoint;
u64 frequency = 0;
@@ -456,17 +456,17 @@ static u32 dp_panel_link_frequencies(struct device_node *of_node)
return frequency;
}
-static int dp_panel_parse_dt(struct dp_panel *dp_panel)
+static int msm_dp_panel_parse_dt(struct msm_dp_panel *msm_dp_panel)
{
- struct dp_panel_private *panel;
+ struct msm_dp_panel_private *panel;
struct device_node *of_node;
int cnt;
- panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
of_node = panel->dev->of_node;
/*
- * data-lanes is the property of dp_out endpoint
+ * data-lanes is the property of msm_dp_out endpoint
*/
cnt = drm_of_get_data_lanes_count_ep(of_node, 1, 0, 1, DP_MAX_NUM_DP_LANES);
if (cnt < 0) {
@@ -475,21 +475,21 @@ static int dp_panel_parse_dt(struct dp_panel *dp_panel)
}
if (cnt > 0)
- dp_panel->max_dp_lanes = cnt;
+ msm_dp_panel->max_dp_lanes = cnt;
else
- dp_panel->max_dp_lanes = DP_MAX_NUM_DP_LANES; /* 4 lanes */
+ msm_dp_panel->max_dp_lanes = DP_MAX_NUM_DP_LANES; /* 4 lanes */
- dp_panel->max_dp_link_rate = dp_panel_link_frequencies(of_node);
- if (!dp_panel->max_dp_link_rate)
- dp_panel->max_dp_link_rate = DP_LINK_RATE_HBR2;
+ msm_dp_panel->max_dp_link_rate = msm_dp_panel_link_frequencies(of_node);
+ if (!msm_dp_panel->max_dp_link_rate)
+ msm_dp_panel->max_dp_link_rate = DP_LINK_RATE_HBR2;
return 0;
}
-struct dp_panel *dp_panel_get(struct dp_panel_in *in)
+struct msm_dp_panel *msm_dp_panel_get(struct msm_dp_panel_in *in)
{
- struct dp_panel_private *panel;
- struct dp_panel *dp_panel;
+ struct msm_dp_panel_private *panel;
+ struct msm_dp_panel *msm_dp_panel;
int ret;
if (!in->dev || !in->catalog || !in->aux || !in->link) {
@@ -506,20 +506,20 @@ struct dp_panel *dp_panel_get(struct dp_panel_in *in)
panel->catalog = in->catalog;
panel->link = in->link;
- dp_panel = &panel->dp_panel;
- dp_panel->max_bw_code = DP_LINK_BW_8_1;
+ msm_dp_panel = &panel->msm_dp_panel;
+ msm_dp_panel->max_bw_code = DP_LINK_BW_8_1;
- ret = dp_panel_parse_dt(dp_panel);
+ ret = msm_dp_panel_parse_dt(msm_dp_panel);
if (ret)
return ERR_PTR(ret);
- return dp_panel;
+ return msm_dp_panel;
}
-void dp_panel_put(struct dp_panel *dp_panel)
+void msm_dp_panel_put(struct msm_dp_panel *msm_dp_panel)
{
- if (!dp_panel)
+ if (!msm_dp_panel)
return;
- drm_edid_free(dp_panel->drm_edid);
+ drm_edid_free(msm_dp_panel->drm_edid);
}
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index 6722e3923fa5..0e944db3adf2 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -13,7 +13,7 @@
struct edid;
-struct dp_display_mode {
+struct msm_dp_display_mode {
struct drm_display_mode drm_mode;
u32 bpp;
u32 h_active_low;
@@ -21,28 +21,28 @@ struct dp_display_mode {
bool out_fmt_is_yuv_420;
};
-struct dp_panel_in {
+struct msm_dp_panel_in {
struct device *dev;
struct drm_dp_aux *aux;
- struct dp_link *link;
- struct dp_catalog *catalog;
+ struct msm_dp_link *link;
+ struct msm_dp_catalog *catalog;
};
-struct dp_panel_psr {
+struct msm_dp_panel_psr {
u8 version;
u8 capabilities;
};
-struct dp_panel {
+struct msm_dp_panel {
/* dpcd raw data */
u8 dpcd[DP_RECEIVER_CAP_SIZE];
u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
- struct dp_link_info link_info;
+ struct msm_dp_link_info link_info;
const struct drm_edid *drm_edid;
struct drm_connector *connector;
- struct dp_display_mode dp_mode;
- struct dp_panel_psr psr_cap;
+ struct msm_dp_display_mode msm_dp_mode;
+ struct msm_dp_panel_psr psr_cap;
bool video_test;
bool vsc_sdp_supported;
@@ -52,18 +52,18 @@ struct dp_panel {
u32 max_bw_code;
};
-int dp_panel_init_panel_info(struct dp_panel *dp_panel);
-int dp_panel_deinit(struct dp_panel *dp_panel);
-int dp_panel_timing_cfg(struct dp_panel *dp_panel);
-void dp_panel_dump_regs(struct dp_panel *dp_panel);
-int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
+int msm_dp_panel_init_panel_info(struct msm_dp_panel *msm_dp_panel);
+int msm_dp_panel_deinit(struct msm_dp_panel *msm_dp_panel);
+int msm_dp_panel_timing_cfg(struct msm_dp_panel *msm_dp_panel);
+void msm_dp_panel_dump_regs(struct msm_dp_panel *msm_dp_panel);
+int msm_dp_panel_read_sink_caps(struct msm_dp_panel *msm_dp_panel,
struct drm_connector *connector);
-u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, u32 mode_max_bpp,
+u32 msm_dp_panel_get_mode_bpp(struct msm_dp_panel *msm_dp_panel, u32 mode_max_bpp,
u32 mode_pclk_khz);
-int dp_panel_get_modes(struct dp_panel *dp_panel,
+int msm_dp_panel_get_modes(struct msm_dp_panel *msm_dp_panel,
struct drm_connector *connector);
-void dp_panel_handle_sink_request(struct dp_panel *dp_panel);
-void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable);
+void msm_dp_panel_handle_sink_request(struct msm_dp_panel *msm_dp_panel);
+void msm_dp_panel_tpg_config(struct msm_dp_panel *msm_dp_panel, bool enable);
/**
* is_link_rate_valid() - validates the link rate
@@ -80,7 +80,7 @@ static inline bool is_link_rate_valid(u32 bw_code)
}
/**
- * dp_link_is_lane_count_valid() - validates the lane count
+ * msm_dp_link_is_lane_count_valid() - validates the lane count
* @lane_count: lane count requested by the sink
*
* Returns true if the requested lane count is supported.
@@ -92,6 +92,6 @@ static inline bool is_lane_count_valid(u32 lane_count)
lane_count == 4);
}
-struct dp_panel *dp_panel_get(struct dp_panel_in *in);
-void dp_panel_put(struct dp_panel *dp_panel);
+struct msm_dp_panel *msm_dp_panel_get(struct msm_dp_panel_in *in);
+void msm_dp_panel_put(struct msm_dp_panel *msm_dp_panel);
#endif /* _DP_PANEL_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_utils.c b/drivers/gpu/drm/msm/dp/dp_utils.c
index da9207caf72d..2a40f07fe2d5 100644
--- a/drivers/gpu/drm/msm/dp/dp_utils.c
+++ b/drivers/gpu/drm/msm/dp/dp_utils.c
@@ -9,7 +9,7 @@
#define DP_SDP_HEADER_SIZE 8
-u8 dp_utils_get_g0_value(u8 data)
+u8 msm_dp_utils_get_g0_value(u8 data)
{
u8 c[4];
u8 g[4];
@@ -30,7 +30,7 @@ u8 dp_utils_get_g0_value(u8 data)
return ret_data;
}
-u8 dp_utils_get_g1_value(u8 data)
+u8 msm_dp_utils_get_g1_value(u8 data)
{
u8 c[4];
u8 g[4];
@@ -51,7 +51,7 @@ u8 dp_utils_get_g1_value(u8 data)
return ret_data;
}
-u8 dp_utils_calculate_parity(u32 data)
+u8 msm_dp_utils_calculate_parity(u32 data)
{
u8 x0 = 0;
u8 x1 = 0;
@@ -65,8 +65,8 @@ u8 dp_utils_calculate_parity(u32 data)
iData = (data >> i * 4) & 0xF;
ci = iData ^ x1;
- x1 = x0 ^ dp_utils_get_g1_value(ci);
- x0 = dp_utils_get_g0_value(ci);
+ x1 = x0 ^ msm_dp_utils_get_g1_value(ci);
+ x0 = msm_dp_utils_get_g0_value(ci);
}
parity_byte = x1 | (x0 << 4);
@@ -74,7 +74,7 @@ u8 dp_utils_calculate_parity(u32 data)
return parity_byte;
}
-ssize_t dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 *header_buff)
+ssize_t msm_dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 *header_buff)
{
size_t length;
@@ -83,14 +83,14 @@ ssize_t dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 *header_b
return -ENOSPC;
header_buff[0] = FIELD_PREP(HEADER_0_MASK, sdp_header->HB0) |
- FIELD_PREP(PARITY_0_MASK, dp_utils_calculate_parity(sdp_header->HB0)) |
+ FIELD_PREP(PARITY_0_MASK, msm_dp_utils_calculate_parity(sdp_header->HB0)) |
FIELD_PREP(HEADER_1_MASK, sdp_header->HB1) |
- FIELD_PREP(PARITY_1_MASK, dp_utils_calculate_parity(sdp_header->HB1));
+ FIELD_PREP(PARITY_1_MASK, msm_dp_utils_calculate_parity(sdp_header->HB1));
header_buff[1] = FIELD_PREP(HEADER_2_MASK, sdp_header->HB2) |
- FIELD_PREP(PARITY_2_MASK, dp_utils_calculate_parity(sdp_header->HB2)) |
+ FIELD_PREP(PARITY_2_MASK, msm_dp_utils_calculate_parity(sdp_header->HB2)) |
FIELD_PREP(HEADER_3_MASK, sdp_header->HB3) |
- FIELD_PREP(PARITY_3_MASK, dp_utils_calculate_parity(sdp_header->HB3));
+ FIELD_PREP(PARITY_3_MASK, msm_dp_utils_calculate_parity(sdp_header->HB3));
return length;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_utils.h b/drivers/gpu/drm/msm/dp/dp_utils.h
index 7c056d9798dc..88d53157f5b5 100644
--- a/drivers/gpu/drm/msm/dp/dp_utils.h
+++ b/drivers/gpu/drm/msm/dp/dp_utils.h
@@ -28,9 +28,9 @@
#define HEADER_3_MASK GENMASK(23, 16)
#define PARITY_3_MASK GENMASK(31, 24)
-u8 dp_utils_get_g0_value(u8 data);
-u8 dp_utils_get_g1_value(u8 data);
-u8 dp_utils_calculate_parity(u32 data);
-ssize_t dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 *header_buff);
+u8 msm_dp_utils_get_g0_value(u8 data);
+u8 msm_dp_utils_get_g1_value(u8 data);
+u8 msm_dp_utils_calculate_parity(u32 data);
+ssize_t msm_dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 *header_buff);
#endif /* _DP_UTILS_H_ */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c
index e6ffaf92d26d..a719fd33d9d8 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c
@@ -157,9 +157,8 @@ static inline u32 pll_get_pll_cmp(u64 fdata, unsigned long ref_clk)
#define HDMI_MHZ_TO_HZ ((u64)1000000)
static int pll_get_post_div(struct hdmi_8998_post_divider *pd, u64 bclk)
{
- u32 const ratio_list[] = {1, 2, 3, 4, 5, 6,
- 9, 10, 12, 15, 25};
- u32 const band_list[] = {0, 1, 2, 3};
+ static const u32 ratio_list[] = {1, 2, 3, 4, 5, 6, 9, 10, 12, 15, 25};
+ static const u32 band_list[] = {0, 1, 2, 3};
u32 const sz_ratio = ARRAY_SIZE(ratio_list);
u32 const sz_band = ARRAY_SIZE(band_list);
u32 const cmp_cnt = 1024;
@@ -270,7 +269,7 @@ find_optimal_index:
case 25:
found_hsclk_divsel = 14;
break;
- };
+ }
pd->vco_freq = found_vco_freq;
pd->tx_band_sel = found_tx_band_sel;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 8c13b08708d2..c2dd8ef6d6dc 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -11,6 +11,7 @@
#include <linux/of_address.h>
#include <linux/uaccess.h>
+#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
@@ -291,7 +292,7 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
if (priv->kms_init) {
drm_kms_helper_poll_init(ddev);
- msm_fbdev_setup(ddev);
+ drm_client_setup(ddev, NULL);
}
return 0;
@@ -902,6 +903,7 @@ static const struct drm_driver msm_driver = {
#ifdef CONFIG_DEBUG_FS
.debugfs_init = msm_debugfs_init,
#endif
+ MSM_FBDEV_DRIVER_OPS,
.show_fdinfo = msm_show_fdinfo,
.ioctls = msm_ioctls,
.num_ioctls = ARRAY_SIZE(msm_ioctls),
@@ -983,6 +985,10 @@ module_param(prefer_mdp5, bool, 0444);
/* list all platforms supported by both mdp5 and dpu drivers */
static const char *const msm_mdp5_dpu_migration[] = {
+ "qcom,msm8917-mdp5",
+ "qcom,msm8937-mdp5",
+ "qcom,msm8953-mdp5",
+ "qcom,msm8996-mdp5",
"qcom,sdm630-mdp5",
"qcom,sdm660-mdp5",
NULL,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 2e28a1344636..d8c9a1b19263 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -36,6 +36,9 @@
extern struct fault_attr fail_gem_alloc;
extern struct fault_attr fail_gem_iova;
+struct drm_fb_helper;
+struct drm_fb_helper_surface_size;
+
struct msm_kms;
struct msm_gpu;
struct msm_mmu;
@@ -49,7 +52,6 @@ struct msm_gem_vma;
struct msm_disp_state;
#define MAX_CRTCS 8
-#define MAX_BRIDGES 8
#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
@@ -68,23 +70,6 @@ enum msm_dsi_controller {
};
#define MSM_GPU_MAX_RINGS 4
-#define MAX_H_TILES_PER_DISPLAY 2
-
-/**
- * struct msm_display_topology - defines a display topology pipeline
- * @num_lm: number of layer mixers used
- * @num_intf: number of interfaces the panel is mounted on
- * @num_dspp: number of dspp blocks used
- * @num_dsc: number of Display Stream Compression (DSC) blocks used
- * @needs_cdm: indicates whether cdm block is needed for this display topology
- */
-struct msm_display_topology {
- u32 num_lm;
- u32 num_intf;
- u32 num_dspp;
- u32 num_dsc;
- bool needs_cdm;
-};
/* Commit/Event thread specific structure */
struct msm_drm_thread {
@@ -290,11 +275,13 @@ struct drm_framebuffer * msm_alloc_stolen_fb(struct drm_device *dev,
int w, int h, int p, uint32_t format);
#ifdef CONFIG_DRM_FBDEV_EMULATION
-void msm_fbdev_setup(struct drm_device *dev);
+int msm_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes);
+#define MSM_FBDEV_DRIVER_OPS \
+ .fbdev_probe = msm_fbdev_driver_fbdev_probe
#else
-static inline void msm_fbdev_setup(struct drm_device *dev)
-{
-}
+#define MSM_FBDEV_DRIVER_OPS \
+ .fbdev_probe = NULL
#endif
struct hdmi;
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 030bedac632d..c62249b1ab3d 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -65,8 +65,31 @@ static const struct fb_ops msm_fb_ops = {
.fb_destroy = msm_fbdev_fb_destroy,
};
-static int msm_fbdev_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
+static int msm_fbdev_fb_dirty(struct drm_fb_helper *helper,
+ struct drm_clip_rect *clip)
+{
+ struct drm_device *dev = helper->dev;
+ int ret;
+
+ /* Call damage handlers only if necessary */
+ if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
+ return 0;
+
+ if (helper->fb->funcs->dirty) {
+ ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
+ if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct drm_fb_helper_funcs msm_fbdev_helper_funcs = {
+ .fb_dirty = msm_fbdev_fb_dirty,
+};
+
+int msm_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
{
struct drm_device *dev = helper->dev;
struct msm_drm_private *priv = dev->dev_private;
@@ -114,6 +137,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
DBG("fbi=%p, dev=%p", fbi, dev);
+ helper->funcs = &msm_fbdev_helper_funcs;
helper->fb = fb;
fbi->fbops = &msm_fb_ops;
@@ -138,119 +162,3 @@ fail:
drm_framebuffer_remove(fb);
return ret;
}
-
-static int msm_fbdev_fb_dirty(struct drm_fb_helper *helper,
- struct drm_clip_rect *clip)
-{
- struct drm_device *dev = helper->dev;
- int ret;
-
- /* Call damage handlers only if necessary */
- if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
- return 0;
-
- if (helper->fb->funcs->dirty) {
- ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
- if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
- return ret;
- }
-
- return 0;
-}
-
-static const struct drm_fb_helper_funcs msm_fb_helper_funcs = {
- .fb_probe = msm_fbdev_create,
- .fb_dirty = msm_fbdev_fb_dirty,
-};
-
-/*
- * struct drm_client
- */
-
-static void msm_fbdev_client_unregister(struct drm_client_dev *client)
-{
- struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
-
- if (fb_helper->info) {
- drm_fb_helper_unregister_info(fb_helper);
- } else {
- drm_client_release(&fb_helper->client);
- drm_fb_helper_unprepare(fb_helper);
- kfree(fb_helper);
- }
-}
-
-static int msm_fbdev_client_restore(struct drm_client_dev *client)
-{
- drm_fb_helper_lastclose(client->dev);
-
- return 0;
-}
-
-static int msm_fbdev_client_hotplug(struct drm_client_dev *client)
-{
- struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
- struct drm_device *dev = client->dev;
- int ret;
-
- if (dev->fb_helper)
- return drm_fb_helper_hotplug_event(dev->fb_helper);
-
- ret = drm_fb_helper_init(dev, fb_helper);
- if (ret)
- goto err_drm_err;
-
- if (!drm_drv_uses_atomic_modeset(dev))
- drm_helper_disable_unused_functions(dev);
-
- ret = drm_fb_helper_initial_config(fb_helper);
- if (ret)
- goto err_drm_fb_helper_fini;
-
- return 0;
-
-err_drm_fb_helper_fini:
- drm_fb_helper_fini(fb_helper);
-err_drm_err:
- drm_err(dev, "Failed to setup fbdev emulation (ret=%d)\n", ret);
- return ret;
-}
-
-static const struct drm_client_funcs msm_fbdev_client_funcs = {
- .owner = THIS_MODULE,
- .unregister = msm_fbdev_client_unregister,
- .restore = msm_fbdev_client_restore,
- .hotplug = msm_fbdev_client_hotplug,
-};
-
-/* initialize fbdev helper */
-void msm_fbdev_setup(struct drm_device *dev)
-{
- struct drm_fb_helper *helper;
- int ret;
-
- if (!fbdev)
- return;
-
- drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
- drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
-
- helper = kzalloc(sizeof(*helper), GFP_KERNEL);
- if (!helper)
- return;
- drm_fb_helper_prepare(dev, helper, 32, &msm_fb_helper_funcs);
-
- ret = drm_client_init(dev, &helper->client, "fbdev", &msm_fbdev_client_funcs);
- if (ret) {
- drm_err(dev, "Failed to register client: %d\n", ret);
- goto err_drm_fb_helper_unprepare;
- }
-
- drm_client_register(&helper->client);
-
- return;
-
-err_drm_fb_helper_unprepare:
- drm_fb_helper_unprepare(helper);
- kfree(helper);
-}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index a274b8466423..0d4a3744cfcb 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -783,7 +783,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
mutex_unlock(&gpu->active_lock);
gpu->funcs->submit(gpu, submit);
- gpu->cur_ctx_seqno = submit->queue->ctx->seqno;
+ submit->ring->cur_ctx_seqno = submit->queue->ctx->seqno;
pm_runtime_put(&gpu->pdev->dev);
hangcheck_timer_reset(gpu);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 1f02bb9956be..7cabc8480d7c 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -194,17 +194,6 @@ struct msm_gpu {
refcount_t sysprof_active;
/**
- * cur_ctx_seqno:
- *
- * The ctx->seqno value of the last context to submit rendering,
- * and the one with current pgtables installed (for generations
- * that support per-context pgtables). Tracked by seqno rather
- * than pointer value to avoid dangling pointers, and cases where
- * a ctx can be freed and a new one created with the same address.
- */
- int cur_ctx_seqno;
-
- /**
* lock:
*
* General lock for serializing all the gpu things.
diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
index ea70c1c32d94..6970b0f7f457 100644
--- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c
+++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
@@ -140,6 +140,7 @@ void msm_devfreq_init(struct msm_gpu *gpu)
{
struct msm_gpu_devfreq *df = &gpu->devfreq;
struct msm_drm_private *priv = gpu->dev->dev_private;
+ int ret;
/* We need target support to do devfreq */
if (!gpu->funcs->gpu_busy)
@@ -156,8 +157,12 @@ void msm_devfreq_init(struct msm_gpu *gpu)
mutex_init(&df->lock);
- dev_pm_qos_add_request(&gpu->pdev->dev, &df->boost_freq,
- DEV_PM_QOS_MIN_FREQUENCY, 0);
+ ret = dev_pm_qos_add_request(&gpu->pdev->dev, &df->boost_freq,
+ DEV_PM_QOS_MIN_FREQUENCY, 0);
+ if (ret < 0) {
+ DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize QoS\n");
+ return;
+ }
msm_devfreq_profile.initial_freq = gpu->fast_rate;
diff --git a/drivers/gpu/drm/msm/msm_gpu_trace.h b/drivers/gpu/drm/msm/msm_gpu_trace.h
index ac40d857bc45..7f863282db0d 100644
--- a/drivers/gpu/drm/msm/msm_gpu_trace.h
+++ b/drivers/gpu/drm/msm/msm_gpu_trace.h
@@ -177,6 +177,34 @@ TRACE_EVENT(msm_gpu_resume,
TP_printk("%u", __entry->dummy)
);
+TRACE_EVENT(msm_gpu_preemption_trigger,
+ TP_PROTO(int ring_id_from, int ring_id_to),
+ TP_ARGS(ring_id_from, ring_id_to),
+ TP_STRUCT__entry(
+ __field(int, ring_id_from)
+ __field(int, ring_id_to)
+ ),
+ TP_fast_assign(
+ __entry->ring_id_from = ring_id_from;
+ __entry->ring_id_to = ring_id_to;
+ ),
+ TP_printk("preempting %u -> %u",
+ __entry->ring_id_from,
+ __entry->ring_id_to)
+);
+
+TRACE_EVENT(msm_gpu_preemption_irq,
+ TP_PROTO(u32 ring_id),
+ TP_ARGS(ring_id),
+ TP_STRUCT__entry(
+ __field(u32, ring_id)
+ ),
+ TP_fast_assign(
+ __entry->ring_id = ring_id;
+ ),
+ TP_printk("preempted to %u", __entry->ring_id)
+);
+
#endif
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c
index af6a6fcb1173..f3326d09bdbc 100644
--- a/drivers/gpu/drm/msm/msm_kms.c
+++ b/drivers/gpu/drm/msm/msm_kms.c
@@ -5,11 +5,11 @@
* Author: Rob Clark <[email protected]>
*/
+#include <linux/aperture.h>
#include <linux/kthread.h>
#include <linux/sched/mm.h>
#include <uapi/linux/sched/types.h>
-#include <drm/drm_aperture.h>
#include <drm/drm_drv.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_vblank.h>
@@ -237,7 +237,7 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
int ret;
/* the fw fb could be anywhere in memory */
- ret = drm_aperture_remove_framebuffers(drv);
+ ret = aperture_remove_all_conflicting_devices(drv->name);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 1e0c54de3716..e60162744c66 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -92,12 +92,6 @@ struct msm_kms_funcs {
* Format handling:
*/
- /* do format checking on format modified through fb_cmd2 modifiers */
- int (*check_modified_format)(const struct msm_kms *kms,
- const struct msm_format *msm_fmt,
- const struct drm_mode_fb_cmd2 *cmd,
- struct drm_gem_object **bos);
-
/* misc: */
long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c
index faa88fd6eb4d..b7bd899ead44 100644
--- a/drivers/gpu/drm/msm/msm_mdss.c
+++ b/drivers/gpu/drm/msm/msm_mdss.c
@@ -19,13 +19,7 @@
#include "msm_mdss.h"
#include "msm_kms.h"
-#define HW_REV 0x0
-#define HW_INTR_STATUS 0x0010
-
-#define UBWC_DEC_HW_VERSION 0x58
-#define UBWC_STATIC 0x144
-#define UBWC_CTRL_2 0x150
-#define UBWC_PREDICTION_MODE 0x154
+#include <generated/mdss.xml.h>
#define MIN_IB_BW 400000000UL /* Min ib vote 400MB */
@@ -83,7 +77,7 @@ static void msm_mdss_irq(struct irq_desc *desc)
chained_irq_enter(chip, desc);
- interrupts = readl_relaxed(msm_mdss->mmio + HW_INTR_STATUS);
+ interrupts = readl_relaxed(msm_mdss->mmio + REG_MDSS_HW_INTR_STATUS);
while (interrupts) {
irq_hw_number_t hwirq = fls(interrupts) - 1;
@@ -173,7 +167,7 @@ static void msm_mdss_setup_ubwc_dec_20(struct msm_mdss *msm_mdss)
{
const struct msm_mdss_data *data = msm_mdss->mdss_data;
- writel_relaxed(data->ubwc_static, msm_mdss->mmio + UBWC_STATIC);
+ writel_relaxed(data->ubwc_static, msm_mdss->mmio + REG_MDSS_UBWC_STATIC);
}
static void msm_mdss_setup_ubwc_dec_30(struct msm_mdss *msm_mdss)
@@ -189,7 +183,7 @@ static void msm_mdss_setup_ubwc_dec_30(struct msm_mdss *msm_mdss)
if (data->ubwc_enc_version == UBWC_1_0)
value |= BIT(8);
- writel_relaxed(value, msm_mdss->mmio + UBWC_STATIC);
+ writel_relaxed(value, msm_mdss->mmio + REG_MDSS_UBWC_STATIC);
}
static void msm_mdss_setup_ubwc_dec_40(struct msm_mdss *msm_mdss)
@@ -200,21 +194,22 @@ static void msm_mdss_setup_ubwc_dec_40(struct msm_mdss *msm_mdss)
(data->highest_bank_bit & 0x7) << 4 |
(data->macrotile_mode & 0x1) << 12;
- writel_relaxed(value, msm_mdss->mmio + UBWC_STATIC);
+ writel_relaxed(value, msm_mdss->mmio + REG_MDSS_UBWC_STATIC);
if (data->ubwc_enc_version == UBWC_3_0) {
- writel_relaxed(1, msm_mdss->mmio + UBWC_CTRL_2);
- writel_relaxed(0, msm_mdss->mmio + UBWC_PREDICTION_MODE);
+ writel_relaxed(1, msm_mdss->mmio + REG_MDSS_UBWC_CTRL_2);
+ writel_relaxed(0, msm_mdss->mmio + REG_MDSS_UBWC_PREDICTION_MODE);
} else {
if (data->ubwc_dec_version == UBWC_4_3)
- writel_relaxed(3, msm_mdss->mmio + UBWC_CTRL_2);
+ writel_relaxed(3, msm_mdss->mmio + REG_MDSS_UBWC_CTRL_2);
else
- writel_relaxed(2, msm_mdss->mmio + UBWC_CTRL_2);
- writel_relaxed(1, msm_mdss->mmio + UBWC_PREDICTION_MODE);
+ writel_relaxed(2, msm_mdss->mmio + REG_MDSS_UBWC_CTRL_2);
+ writel_relaxed(1, msm_mdss->mmio + REG_MDSS_UBWC_PREDICTION_MODE);
}
}
-#define MDSS_HW_MAJ_MIN GENMASK(31, 16)
+#define MDSS_HW_MAJ_MIN \
+ (MDSS_HW_VERSION_MAJOR__MASK | MDSS_HW_VERSION_MINOR__MASK)
#define MDSS_HW_MSM8996 0x1007
#define MDSS_HW_MSM8937 0x100e
@@ -235,7 +230,7 @@ static const struct msm_mdss_data *msm_mdss_generate_mdp5_mdss_data(struct msm_m
if (!data)
return NULL;
- hw_rev = readl_relaxed(mdss->mmio + HW_REV);
+ hw_rev = readl_relaxed(mdss->mmio + REG_MDSS_HW_VERSION);
hw_rev = FIELD_GET(MDSS_HW_MAJ_MIN, hw_rev);
if (hw_rev == MDSS_HW_MSM8996 ||
@@ -334,9 +329,9 @@ static int msm_mdss_enable(struct msm_mdss *msm_mdss)
dev_err(msm_mdss->dev, "Unsupported UBWC decoder version %x\n",
msm_mdss->mdss_data->ubwc_dec_version);
dev_err(msm_mdss->dev, "HW_REV: 0x%x\n",
- readl_relaxed(msm_mdss->mmio + HW_REV));
+ readl_relaxed(msm_mdss->mmio + REG_MDSS_HW_VERSION));
dev_err(msm_mdss->dev, "UBWC_DEC_HW_VERSION: 0x%x\n",
- readl_relaxed(msm_mdss->mmio + UBWC_DEC_HW_VERSION));
+ readl_relaxed(msm_mdss->mmio + REG_MDSS_UBWC_DEC_HW_VERSION));
break;
}
@@ -573,6 +568,16 @@ static const struct msm_mdss_data qcm2290_data = {
.reg_bus_bw = 76800,
};
+static const struct msm_mdss_data sa8775p_data = {
+ .ubwc_enc_version = UBWC_4_0,
+ .ubwc_dec_version = UBWC_4_0,
+ .ubwc_swizzle = 4,
+ .ubwc_static = 1,
+ .highest_bank_bit = 0,
+ .macrotile_mode = 1,
+ .reg_bus_bw = 74000,
+};
+
static const struct msm_mdss_data sc7180_data = {
.ubwc_enc_version = UBWC_2_0,
.ubwc_dec_version = UBWC_2_0,
@@ -710,6 +715,7 @@ static const struct of_device_id mdss_dt_match[] = {
{ .compatible = "qcom,mdss" },
{ .compatible = "qcom,msm8998-mdss", .data = &msm8998_data },
{ .compatible = "qcom,qcm2290-mdss", .data = &qcm2290_data },
+ { .compatible = "qcom,sa8775p-mdss", .data = &sa8775p_data },
{ .compatible = "qcom,sdm670-mdss", .data = &sdm670_data },
{ .compatible = "qcom,sdm845-mdss", .data = &sdm845_data },
{ .compatible = "qcom,sc7180-mdss", .data = &sc7180_data },
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 9d6655f96f0c..c803556a8f64 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -64,7 +64,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
char name[32];
int ret;
- /* We assume everwhere that MSM_GPU_RINGBUFFER_SZ is a power of 2 */
+ /* We assume everywhere that MSM_GPU_RINGBUFFER_SZ is a power of 2 */
BUILD_BUG_ON(!is_power_of_2(MSM_GPU_RINGBUFFER_SZ));
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h
index 0d6beb8cd39a..d1e49f701c81 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.h
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.h
@@ -31,10 +31,12 @@ struct msm_rbmemptrs {
volatile uint32_t rptr;
volatile uint32_t fence;
/* Introduced on A7xx */
+ volatile uint32_t bv_rptr;
volatile uint32_t bv_fence;
volatile struct msm_gpu_submit_stats stats[MSM_GPU_SUBMIT_STATS_COUNT];
volatile u64 ttbr0;
+ volatile u32 context_idr;
};
struct msm_cp_state {
@@ -99,6 +101,22 @@ struct msm_ringbuffer {
* preemption. Can be aquired from irq context.
*/
spinlock_t preempt_lock;
+
+ /*
+ * Whether we skipped writing wptr and it needs to be updated in the
+ * future when the ring becomes current.
+ */
+ bool restore_wptr;
+
+ /**
+ * cur_ctx_seqno:
+ *
+ * The ctx->seqno value of the last context to submit to this ring
+ * Tracked by seqno rather than pointer value to avoid dangling
+ * pointers, and cases where a ctx can be freed and a new one created
+ * with the same address.
+ */
+ int cur_ctx_seqno;
};
struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c
index 0e803125a325..2fc3eaf81f44 100644
--- a/drivers/gpu/drm/msm/msm_submitqueue.c
+++ b/drivers/gpu/drm/msm/msm_submitqueue.c
@@ -161,6 +161,8 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
struct msm_drm_private *priv = drm->dev_private;
struct msm_gpu_submitqueue *queue;
enum drm_sched_priority sched_prio;
+ extern int enable_preemption;
+ bool preemption_supported;
unsigned ring_nr;
int ret;
@@ -170,6 +172,11 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
if (!priv->gpu)
return -ENODEV;
+ preemption_supported = priv->gpu->nr_rings == 1 && enable_preemption != 0;
+
+ if (flags & MSM_SUBMITQUEUE_ALLOW_PREEMPT && preemption_supported)
+ return -EINVAL;
+
ret = msm_gpu_convert_priority(priv->gpu, prio, &ring_nr, &sched_prio);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
index 97608603ea62..2db425abf0f3 100644
--- a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
@@ -2358,7 +2358,12 @@ to upconvert to 32b float internally?
<reg32 offset="0x0" name="REG" type="a6x_cp_protect"/>
</array>
- <reg32 offset="0x08A0" name="CP_CONTEXT_SWITCH_CNTL"/>
+ <reg32 offset="0x08A0" name="CP_CONTEXT_SWITCH_CNTL">
+ <bitfield name="STOP" pos="0" type="boolean"/>
+ <bitfield name="LEVEL" low="6" high="7"/>
+ <bitfield name="USES_GMEM" pos="8" type="boolean"/>
+ <bitfield name="SKIP_SAVE_RESTORE" pos="9" type="boolean"/>
+ </reg32>
<reg64 offset="0x08A1" name="CP_CONTEXT_SWITCH_SMMU_INFO"/>
<reg64 offset="0x08A3" name="CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR"/>
<reg64 offset="0x08A5" name="CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR"/>
diff --git a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
index cab01af55d22..55a35182858c 100644
--- a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
@@ -581,8 +581,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
and forcibly switch to the indicated context.
</doc>
<value name="CP_CONTEXT_SWITCH" value="0x54" variants="A6XX"/>
- <!-- Note, kgsl calls this CP_SET_AMBLE: -->
- <value name="CP_SET_CTXSWITCH_IB" value="0x55" variants="A6XX-"/>
+ <value name="CP_SET_AMBLE" value="0x55" variants="A6XX-"/>
<!--
Seems to always have the payload:
@@ -2013,42 +2012,38 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
</reg32>
</domain>
-<domain name="CP_SET_CTXSWITCH_IB" width="32">
+<domain name="CP_SET_AMBLE" width="32">
<doc>
- Used by the userspace driver to set various IB's which are
- executed during context save/restore for handling
- state that isn't restored by the
- context switch routine itself.
- </doc>
- <enum name="ctxswitch_ib">
- <value name="RESTORE_IB" value="0">
+ Used by the userspace and kernel drivers to set various IB's
+ which are executed during context save/restore for handling
+ state that isn't restored by the context switch routine itself.
+ </doc>
+ <enum name="amble_type">
+ <value name="PREAMBLE_AMBLE_TYPE" value="0">
<doc>Executed unconditionally when switching back to the context.</doc>
</value>
- <value name="YIELD_RESTORE_IB" value="1">
+ <value name="BIN_PREAMBLE_AMBLE_TYPE" value="1">
<doc>
Executed when switching back after switching
away during execution of
- a CP_SET_MARKER packet with RM6_YIELD as the
- payload *and* the normal save routine was
- bypassed for a shorter one. I think this is
- connected to the "skipsaverestore" bit set by
- the kernel when preempting.
+ a CP_SET_MARKER packet with RM6_BIN_RENDER_END as the
+ payload *and* skipsaverestore is set. This is
+ expected to restore static register values not
+ saved when skipsaverestore is set.
</doc>
</value>
- <value name="SAVE_IB" value="2">
+ <value name="POSTAMBLE_AMBLE_TYPE" value="2">
<doc>
Executed when switching away from the context,
except for context switches initiated via
CP_YIELD.
</doc>
</value>
- <value name="RB_SAVE_IB" value="3">
+ <value name="KMD_AMBLE_TYPE" value="3">
<doc>
This can only be set by the RB (i.e. the kernel)
and executes with protected mode off, but
- is otherwise similar to SAVE_IB.
-
- Note, kgsl calls this CP_KMD_AMBLE_TYPE
+ is otherwise similar to POSTAMBLE_AMBLE_TYPE.
</doc>
</value>
</enum>
@@ -2060,7 +2055,7 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
</reg32>
<reg32 offset="2" name="2">
<bitfield name="DWORDS" low="0" high="19" type="uint"/>
- <bitfield name="TYPE" low="20" high="21" type="ctxswitch_ib"/>
+ <bitfield name="TYPE" low="20" high="21" type="amble_type"/>
</reg32>
</domain>
diff --git a/drivers/gpu/drm/msm/registers/display/mdp5.xml b/drivers/gpu/drm/msm/registers/display/mdp5.xml
index 92f3263af170..8c9c4af350aa 100644
--- a/drivers/gpu/drm/msm/registers/display/mdp5.xml
+++ b/drivers/gpu/drm/msm/registers/display/mdp5.xml
@@ -9,22 +9,6 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<domain name="VBIF" width="32">
</domain>
-<domain name="MDSS" width="32">
- <reg32 offset="0x00000" name="HW_VERSION">
- <bitfield name="STEP" low="0" high="15" type="uint"/>
- <bitfield name="MINOR" low="16" high="27" type="uint"/>
- <bitfield name="MAJOR" low="28" high="31" type="uint"/>
- </reg32>
-
- <reg32 offset="0x00010" name="HW_INTR_STATUS">
- <bitfield name="INTR_MDP" pos="0" type="boolean"/>
- <bitfield name="INTR_DSI0" pos="4" type="boolean"/>
- <bitfield name="INTR_DSI1" pos="5" type="boolean"/>
- <bitfield name="INTR_HDMI" pos="8" type="boolean"/>
- <bitfield name="INTR_EDP" pos="12" type="boolean"/>
- </reg32>
-</domain>
-
<domain name="MDP5" width="32">
<enum name="mdp5_intf_type">
diff --git a/drivers/gpu/drm/msm/registers/display/mdss.xml b/drivers/gpu/drm/msm/registers/display/mdss.xml
new file mode 100644
index 000000000000..ac85caf1575c
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/display/mdss.xml
@@ -0,0 +1,29 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+
+<domain name="MDSS" width="32">
+ <reg32 offset="0x00000" name="HW_VERSION">
+ <bitfield name="STEP" low="0" high="15" type="uint"/>
+ <bitfield name="MINOR" low="16" high="27" type="uint"/>
+ <bitfield name="MAJOR" low="28" high="31" type="uint"/>
+ </reg32>
+
+ <reg32 offset="0x00010" name="HW_INTR_STATUS">
+ <bitfield name="INTR_MDP" pos="0" type="boolean"/>
+ <bitfield name="INTR_DSI0" pos="4" type="boolean"/>
+ <bitfield name="INTR_DSI1" pos="5" type="boolean"/>
+ <bitfield name="INTR_HDMI" pos="8" type="boolean"/>
+ <bitfield name="INTR_EDP" pos="12" type="boolean"/>
+ </reg32>
+
+ <reg32 offset="0x00058" name="UBWC_DEC_HW_VERSION"/>
+
+ <reg32 offset="0x00144" name="UBWC_STATIC"/>
+ <reg32 offset="0x00150" name="UBWC_CTRL_2"/>
+ <reg32 offset="0x00154" name="UBWC_PREDICTION_MODE"/>
+</domain>
+
+</database>