aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm/adreno/a6xx_gpu.c')
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c62
1 files changed, 55 insertions, 7 deletions
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 17cfad6424db..83c31b2ad865 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -10,7 +10,6 @@
#include <linux/bitfield.h>
#include <linux/devfreq.h>
-#include <linux/nvmem-consumer.h>
#include <linux/soc/qcom/llcc-qcom.h>
#define GPU_PAS_ID 13
@@ -102,6 +101,7 @@ static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter,
static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
struct msm_ringbuffer *ring, struct msm_file_private *ctx)
{
+ bool sysprof = refcount_read(&a6xx_gpu->base.base.sysprof_active) > 1;
phys_addr_t ttbr;
u32 asid;
u64 memptr = rbmemptr(ring, ttbr0);
@@ -112,6 +112,15 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid))
return;
+ if (!sysprof) {
+ /* Turn off protected mode to write to special registers */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ OUT_PKT4(ring, REG_A6XX_RBBM_PERFCTR_SRAM_INIT_CMD, 1);
+ OUT_RING(ring, 1);
+ }
+
/* Execute the table update */
OUT_PKT7(ring, CP_SMMU_TABLE_UPDATE, 4);
OUT_RING(ring, CP_SMMU_TABLE_UPDATE_0_TTBR0_LO(lower_32_bits(ttbr)));
@@ -138,6 +147,25 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
OUT_PKT7(ring, CP_EVENT_WRITE, 1);
OUT_RING(ring, 0x31);
+
+ if (!sysprof) {
+ /*
+ * Wait for SRAM clear after the pgtable update, so the
+ * two can happen in parallel:
+ */
+ OUT_PKT7(ring, CP_WAIT_REG_MEM, 6);
+ OUT_RING(ring, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ));
+ OUT_RING(ring, CP_WAIT_REG_MEM_1_POLL_ADDR_LO(
+ REG_A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS));
+ OUT_RING(ring, CP_WAIT_REG_MEM_2_POLL_ADDR_HI(0));
+ OUT_RING(ring, CP_WAIT_REG_MEM_3_REF(0x1));
+ OUT_RING(ring, CP_WAIT_REG_MEM_4_MASK(0x1));
+ OUT_RING(ring, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(0));
+
+ /* Re-enable protected mode: */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+ }
}
static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
@@ -655,19 +683,23 @@ static void a6xx_set_cp_protect(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
const u32 *regs = a6xx_protect;
- unsigned i, count = ARRAY_SIZE(a6xx_protect), count_max = 32;
-
- BUILD_BUG_ON(ARRAY_SIZE(a6xx_protect) > 32);
- BUILD_BUG_ON(ARRAY_SIZE(a650_protect) > 48);
+ unsigned i, count, count_max;
if (adreno_is_a650(adreno_gpu)) {
regs = a650_protect;
count = ARRAY_SIZE(a650_protect);
count_max = 48;
+ BUILD_BUG_ON(ARRAY_SIZE(a650_protect) > 48);
} else if (adreno_is_a660_family(adreno_gpu)) {
regs = a660_protect;
count = ARRAY_SIZE(a660_protect);
count_max = 48;
+ BUILD_BUG_ON(ARRAY_SIZE(a660_protect) > 48);
+ } else {
+ regs = a6xx_protect;
+ count = ARRAY_SIZE(a6xx_protect);
+ count_max = 32;
+ BUILD_BUG_ON(ARRAY_SIZE(a6xx_protect) > 32);
}
/*
@@ -1506,7 +1538,7 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
if (a6xx_gpu->have_mmu500)
a6xx_gpu->llc_mmio = NULL;
else
- a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem", "gpu_cx");
+ a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem");
a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU);
a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW);
@@ -1736,6 +1768,18 @@ static u32 a618_get_speed_bin(u32 fuse)
return UINT_MAX;
}
+static u32 adreno_7c3_get_speed_bin(u32 fuse)
+{
+ if (fuse == 0)
+ return 0;
+ else if (fuse == 117)
+ return 0;
+ else if (fuse == 190)
+ return 1;
+
+ return UINT_MAX;
+}
+
static u32 fuse_to_supp_hw(struct device *dev, struct adreno_rev rev, u32 fuse)
{
u32 val = UINT_MAX;
@@ -1743,6 +1787,9 @@ static u32 fuse_to_supp_hw(struct device *dev, struct adreno_rev rev, u32 fuse)
if (adreno_cmp_rev(ADRENO_REV(6, 1, 8, ANY_ID), rev))
val = a618_get_speed_bin(fuse);
+ if (adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), rev))
+ val = adreno_7c3_get_speed_bin(fuse);
+
if (val == UINT_MAX) {
DRM_DEV_ERROR(dev,
"missing support for speed-bin: %u. Some OPPs may not be supported by hardware",
@@ -1759,7 +1806,7 @@ static int a6xx_set_supported_hw(struct device *dev, struct adreno_rev rev)
u32 speedbin;
int ret;
- ret = nvmem_cell_read_variable_le_u32(dev, "speed_bin", &speedbin);
+ ret = adreno_read_speedbin(dev, &speedbin);
/*
* -ENOENT means that the platform doesn't support speedbin which is
* fine
@@ -1786,6 +1833,7 @@ done:
static const struct adreno_gpu_funcs funcs = {
.base = {
.get_param = adreno_get_param,
+ .set_param = adreno_set_param,
.hw_init = a6xx_hw_init,
.pm_suspend = a6xx_pm_suspend,
.pm_resume = a6xx_pm_resume,