diff options
41 files changed, 736 insertions, 349 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 96ad79627dbb..e9af03113fc3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1037,7 +1037,6 @@ struct amdgpu_uvd { bool use_ctx_buf; struct amd_sched_entity entity; uint32_t srbm_soft_reset; - bool is_powergated; }; /* @@ -1066,7 +1065,6 @@ struct amdgpu_vce { struct amd_sched_entity entity; uint32_t srbm_soft_reset; unsigned num_rings; - bool is_powergated; }; /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index fa34dcae392f..d9e5aa4a79ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -834,16 +834,18 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, case CHIP_TOPAZ: if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) || ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) || - ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) + ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) { + info->is_kicker = true; strcpy(fw_name, "amdgpu/topaz_k_smc.bin"); - else + } else strcpy(fw_name, "amdgpu/topaz_smc.bin"); break; case CHIP_TONGA: if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) || - ((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1))) + ((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1))) { + info->is_kicker = true; strcpy(fw_name, "amdgpu/tonga_k_smc.bin"); - else + } else strcpy(fw_name, "amdgpu/tonga_smc.bin"); break; case CHIP_FIJI: @@ -858,9 +860,10 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, ((adev->pdev->device == 0x67ff) && ((adev->pdev->revision == 0xcf) || (adev->pdev->revision == 0xef) || - (adev->pdev->revision == 0xff)))) + (adev->pdev->revision == 0xff)))) { + info->is_kicker = true; strcpy(fw_name, "amdgpu/polaris11_k_smc.bin"); - else + } else strcpy(fw_name, "amdgpu/polaris11_smc.bin"); } else if (type == CGS_UCODE_ID_SMU_SK) { strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin"); @@ -874,9 +877,10 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, (adev->pdev->revision == 0xe4) || (adev->pdev->revision == 0xe5) || (adev->pdev->revision == 0xe7) || - (adev->pdev->revision == 0xef))) + (adev->pdev->revision == 0xef))) { + info->is_kicker = true; strcpy(fw_name, "amdgpu/polaris10_k_smc.bin"); - else + } else strcpy(fw_name, "amdgpu/polaris10_smc.bin"); } else if (type == CGS_UCODE_ID_SMU_SK) { strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index ec7037a48b6e..51d759463384 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -504,13 +504,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, list_for_each_entry(entry, list, head) { struct amdgpu_bo *bo = container_of(entry->bo, struct amdgpu_bo, tbo); - - /* if anything is swapped out don't swap it in here, - just abort and wait for the next CS */ - if (!amdgpu_bo_gpu_accessible(bo)) - goto error; - - if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow)) + if (amdgpu_gem_va_check(NULL, bo)) goto error; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 95e026a4a2de..346e80a7119b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -1296,7 +1296,8 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) if (!adev->pm.dpm_enabled) return; - amdgpu_display_bandwidth_update(adev); + if (adev->mode_info.num_crtc) + amdgpu_display_bandwidth_update(adev); for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 1cf1d9d1aec1..5b24e89552ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -3737,9 +3737,15 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev, default: encoder->possible_crtcs = 0x3; break; + case 3: + encoder->possible_crtcs = 0x7; + break; case 4: encoder->possible_crtcs = 0xf; break; + case 5: + encoder->possible_crtcs = 0x1f; + break; case 6: encoder->possible_crtcs = 0x3f; break; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index e3589b55a1e1..1f9354541f29 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -1983,6 +1983,14 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev) WREG32(mmPA_CL_ENHANCE, PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK | (3 << PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT)); WREG32(mmPA_SC_ENHANCE, PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER_MASK); + + tmp = RREG32(mmSPI_ARB_PRIORITY); + tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS0, 2); + tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS1, 2); + tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS2, 2); + tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS3, 2); + WREG32(mmSPI_ARB_PRIORITY, tmp); + mutex_unlock(&adev->grbm_idx_mutex); udelay(50); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 35f9cd83b821..67afc901905c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -3898,6 +3898,14 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) | (adev->gfx.config.sc_earlyz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT)); + + tmp = RREG32(mmSPI_ARB_PRIORITY); + tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS0, 2); + tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS1, 2); + tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS2, 2); + tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS3, 2); + WREG32(mmSPI_ARB_PRIORITY, tmp); + mutex_unlock(&adev->grbm_idx_mutex); } @@ -7260,7 +7268,7 @@ static void gfx_v8_0_ring_emit_ce_meta_init(struct amdgpu_ring *ring, uint64_t c static union { struct amdgpu_ce_ib_state regular; struct amdgpu_ce_ib_state_chained_ib chained; - } ce_payload = {0}; + } ce_payload = {}; if (ring->adev->virt.chained_ib_support) { ce_payload_addr = csa_addr + offsetof(struct amdgpu_gfx_meta_data_chained_ib, ce_payload); @@ -7287,7 +7295,7 @@ static void gfx_v8_0_ring_emit_de_meta_init(struct amdgpu_ring *ring, uint64_t c static union { struct amdgpu_de_ib_state regular; struct amdgpu_de_ib_state_chained_ib chained; - } de_payload = {0}; + } de_payload = {}; gds_addr = csa_addr + 4096; if (ring->adev->virt.chained_ib_support) { diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index f15df99f0a06..b34cefc7ebd5 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -401,7 +401,8 @@ static void uvd_v4_2_stop(struct amdgpu_device *adev) break; mdelay(1); } - break; + if (status & 2) + break; } for (i = 0; i < 10; ++i) { @@ -411,7 +412,8 @@ static void uvd_v4_2_stop(struct amdgpu_device *adev) break; mdelay(1); } - break; + if (status & 0xf) + break; } /* Stall UMC and register bus before resetting VCPU */ @@ -424,7 +426,8 @@ static void uvd_v4_2_stop(struct amdgpu_device *adev) break; mdelay(1); } - break; + if (status & 0x240) + break; } WREG32_P(0x3D49, 0, ~(1 << 2)); @@ -723,7 +726,8 @@ static int uvd_v4_2_set_powergating_state(void *handle, if (state == AMD_PG_STATE_GATE) { uvd_v4_2_stop(adev); if (adev->pg_flags & AMD_PG_SUPPORT_UVD && amdgpu_dpm == 0) { - if (!(RREG32_SMC(ixCURRENT_PG_STATUS) & 0x4)) { + if (!(RREG32_SMC(ixCURRENT_PG_STATUS) & + CURRENT_PG_STATUS__UVD_PG_STATUS_MASK)) { WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK | UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_DOWN_MASK | UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK)); @@ -733,7 +737,8 @@ static int uvd_v4_2_set_powergating_state(void *handle, return 0; } else { if (adev->pg_flags & AMD_PG_SUPPORT_UVD && amdgpu_dpm == 0) { - if (RREG32_SMC(ixCURRENT_PG_STATUS) & 0x4) { + if (RREG32_SMC(ixCURRENT_PG_STATUS) & + CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) { WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK | UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_UP_MASK | UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK)); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index 46e715193924..ad8c02e423d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -825,12 +825,10 @@ static int uvd_v5_0_set_powergating_state(void *handle, if (state == AMD_PG_STATE_GATE) { uvd_v5_0_stop(adev); - adev->uvd.is_powergated = true; } else { ret = uvd_v5_0_start(adev); if (ret) goto out; - adev->uvd.is_powergated = false; } out: @@ -844,7 +842,8 @@ static void uvd_v5_0_get_clockgating_state(void *handle, u32 *flags) mutex_lock(&adev->pm.mutex); - if (adev->uvd.is_powergated) { + if (RREG32_SMC(ixCURRENT_PG_STATUS) & + CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) { DRM_INFO("Cannot get clockgating state when UVD is powergated.\n"); goto out; } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index af83ab8c1250..18a6de4e1512 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -1051,12 +1051,10 @@ static int uvd_v6_0_set_powergating_state(void *handle, if (state == AMD_PG_STATE_GATE) { uvd_v6_0_stop(adev); - adev->uvd.is_powergated = true; } else { ret = uvd_v6_0_start(adev); if (ret) goto out; - adev->uvd.is_powergated = false; } out: @@ -1070,7 +1068,8 @@ static void uvd_v6_0_get_clockgating_state(void *handle, u32 *flags) mutex_lock(&adev->pm.mutex); - if (adev->uvd.is_powergated) { + if (RREG32_SMC(ixCURRENT_PG_STATUS) & + CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) { DRM_INFO("Cannot get clockgating state when UVD is powergated.\n"); goto out; } diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index a8c40eebdd78..93ec8815bb13 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -510,6 +510,8 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx) WREG32(mmVCE_LMI_SWAP_CNTL, 0); WREG32(mmVCE_LMI_SWAP_CNTL1, 0); WREG32(mmVCE_LMI_VM_CTRL, 0); + WREG32_OR(mmVCE_VCPU_CNTL, 0x00100000); + if (adev->asic_type >= CHIP_STONEY) { WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8)); WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8)); @@ -766,12 +768,10 @@ static int vce_v3_0_set_powergating_state(void *handle, ret = vce_v3_0_stop(adev); if (ret) goto out; - adev->vce.is_powergated = true; } else { ret = vce_v3_0_start(adev); if (ret) goto out; - adev->vce.is_powergated = false; } out: @@ -785,7 +785,8 @@ static void vce_v3_0_get_clockgating_state(void *handle, u32 *flags) mutex_lock(&adev->pm.mutex); - if (adev->vce.is_powergated) { + if (RREG32_SMC(ixCURRENT_PG_STATUS) & + CURRENT_PG_STATUS__VCE_PG_STATUS_MASK) { DRM_INFO("Cannot get clockgating state when VCE is powergated.\n"); goto out; } diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h index 25882a4dea5d..34c6ff52710e 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h @@ -5452,5 +5452,7 @@ #define ROM_SW_DATA_63__ROM_SW_DATA__SHIFT 0x0 #define ROM_SW_DATA_64__ROM_SW_DATA_MASK 0xffffffff #define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0 +#define CURRENT_PG_STATUS__VCE_PG_STATUS_MASK 0x00000002 +#define CURRENT_PG_STATUS__UVD_PG_STATUS_MASK 0x00000004 #endif /* SMU_7_0_1_SH_MASK_H */ diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h index a9ef1562f43b..66597c64f525 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h @@ -1121,5 +1121,6 @@ #define ixROM_SW_DATA_62 0xc060011c #define ixROM_SW_DATA_63 0xc0600120 #define ixROM_SW_DATA_64 0xc0600124 +#define ixCURRENT_PG_STATUS 0xc020029c #endif /* SMU_7_1_1_D_H */ diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_sh_mask.h index 2c997f7b5d13..fb06f2e2f6e6 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_sh_mask.h @@ -4860,5 +4860,7 @@ #define ROM_SW_DATA_63__ROM_SW_DATA__SHIFT 0x0 #define ROM_SW_DATA_64__ROM_SW_DATA_MASK 0xffffffff #define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0 +#define CURRENT_PG_STATUS__VCE_PG_STATUS_MASK 0x00000002 +#define CURRENT_PG_STATUS__UVD_PG_STATUS_MASK 0x00000004 #endif /* SMU_7_1_1_SH_MASK_H */ diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h index 22dd4c2b7290..4446d43d2a8f 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h @@ -1271,5 +1271,6 @@ #define ixROM_SW_DATA_62 0xc060011c #define ixROM_SW_DATA_63 0xc0600120 #define ixROM_SW_DATA_64 0xc0600124 +#define ixCURRENT_PG_STATUS 0xc020029c #endif /* SMU_7_1_2_D_H */ diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h index 518fd02e9d35..627906674fe8 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h @@ -5830,5 +5830,7 @@ #define ROM_SW_DATA_63__ROM_SW_DATA__SHIFT 0x0 #define ROM_SW_DATA_64__ROM_SW_DATA_MASK 0xffffffff #define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0 +#define CURRENT_PG_STATUS__VCE_PG_STATUS_MASK 0x00000002 +#define CURRENT_PG_STATUS__UVD_PG_STATUS_MASK 0x00000004 #endif /* SMU_7_1_2_SH_MASK_H */ diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h index eca2b851f25f..0333d880bc9e 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h @@ -1244,5 +1244,5 @@ #define ixGC_CAC_ACC_CU14 0xc8 #define ixGC_CAC_ACC_CU15 0xc9 #define ixGC_CAC_OVRD_CU 0xe7 - +#define ixCURRENT_PG_STATUS 0xc020029c #endif /* SMU_7_1_3_D_H */ diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h index 1ede9e274714..654c1093d362 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h @@ -6076,5 +6076,8 @@ #define GC_CAC_OVRD_CU__OVRRD_SELECT__SHIFT 0x0 #define GC_CAC_OVRD_CU__OVRRD_VALUE_MASK 0xffff0000 #define GC_CAC_OVRD_CU__OVRRD_VALUE__SHIFT 0x10 +#define CURRENT_PG_STATUS__VCE_PG_STATUS_MASK 0x00000002 +#define CURRENT_PG_STATUS__UVD_PG_STATUS_MASK 0x00000004 + #endif /* SMU_7_1_3_SH_MASK_H */ diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h index 1d26ae768147..17b9d41f3e87 100644 --- a/drivers/gpu/drm/amd/include/cgs_common.h +++ b/drivers/gpu/drm/amd/include/cgs_common.h @@ -171,6 +171,7 @@ struct cgs_firmware_info { uint32_t ucode_start_address; void *kptr; + bool is_kicker; }; struct cgs_mode_info { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c index 3341c0fbd069..1dc31aa72781 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c @@ -477,6 +477,151 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris12[] = { { 0xFFFFFFFF } }; +static const struct gpu_pt_config_reg DIDTConfig_Polaris11_Kicker[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value Type + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* DIDT_SQ */ + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x004c, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00d0, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0069, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x0048, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x005f, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x007a, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x001f, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x002d, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x0088, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + /* DIDT_TD */ + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + /* DIDT_TCP */ + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT,0x01aa, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { 0xFFFFFFFF } /* End of list */ +}; static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable) { @@ -630,7 +775,10 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr) } else if (hwmgr->chip_id == CHIP_POLARIS11) { result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11); PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); - result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11); + if (hwmgr->smumgr->is_kicker) + result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11_Kicker); + else + result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11); PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); } else if (hwmgr->chip_id == CHIP_POLARIS12) { result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11); diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index 9b6531bd6350..7c318a95e0c2 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h @@ -137,6 +137,7 @@ struct pp_smumgr { uint32_t usec_timeout; bool reload_fw; const struct pp_smumgr_func *smumgr_funcs; + bool is_kicker; }; extern int smum_early_init(struct pp_instance *handle); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c index 0e26900e459e..c6c3c5751ac7 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c @@ -494,6 +494,7 @@ static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr, struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct pp_smumgr *smumgr = hwmgr->smumgr; state->CcPwrDynRm = 0; state->CcPwrDynRm1 = 0; @@ -502,7 +503,10 @@ static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr, state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset * VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1); - state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1; + if (smumgr->is_kicker) + state->VddcPhase = data->vddc_phase_shed_control ^ 0x3; + else + state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1; CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm); CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index 6749fbe26c74..35ac27681415 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c @@ -533,6 +533,8 @@ int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr) cgs_get_firmware_info(smumgr->device, smu7_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info); + smumgr->is_kicker = info.is_kicker; + result = smu7_upload_smc_firmware_data(smumgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE); return result; diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index c97588a28216..11f102e7ddfd 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -2046,6 +2046,112 @@ static void complete_crtc_signaling(struct drm_device *dev, kfree(fence_state); } +int drm_atomic_remove_fb(struct drm_framebuffer *fb) +{ + struct drm_modeset_acquire_ctx ctx; + struct drm_device *dev = fb->dev; + struct drm_atomic_state *state; + struct drm_plane *plane; + struct drm_connector *conn; + struct drm_connector_state *conn_state; + int i, ret = 0; + unsigned plane_mask, disable_crtcs = false; + + state = drm_atomic_state_alloc(dev); + if (!state) + return -ENOMEM; + + drm_modeset_acquire_init(&ctx, 0); + state->acquire_ctx = &ctx; + +retry: + plane_mask = 0; + ret = drm_modeset_lock_all_ctx(dev, &ctx); + if (ret) + goto unlock; + + drm_for_each_plane(plane, dev) { + struct drm_plane_state *plane_state; + + if (plane->state->fb != fb) + continue; + + plane_state = drm_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_state)) { + ret = PTR_ERR(plane_state); + goto unlock; + } + + /* + * Some drivers do not support keeping crtc active with the + * primary plane disabled. If we fail to commit with -EINVAL + * then we will try to perform the same commit but with all + * crtc's disabled for primary planes as well. + */ + if (disable_crtcs && plane_state->crtc->primary == plane) { + struct drm_crtc_state *crtc_state; + + crtc_state = drm_atomic_get_existing_crtc_state(state, plane_state->crtc); + + ret = drm_atomic_add_affected_connectors(state, plane_state->crtc); + if (ret) + goto unlock; + + crtc_state->active = false; + ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL); + if (ret) + goto unlock; + } + + drm_atomic_set_fb_for_plane(plane_state, NULL); + ret = drm_atomic_set_crtc_for_plane(plane_state, NULL); + if (ret) + goto unlock; + + plane_mask |= BIT(drm_plane_index(plane)); + + plane->old_fb = plane->fb; + } + + /* This list is only not empty when disable_crtcs is set. */ + for_each_connector_in_state(state, conn, conn_state, i) { + ret = drm_atomic_set_crtc_for_connector(conn_state, NULL); + + if (ret) + goto unlock; + } + + if (plane_mask) + ret = drm_atomic_commit(state); + +unlock: + if (plane_mask) + drm_atomic_clean_old_fb(dev, plane_mask, ret); + + if (ret == -EDEADLK) { + drm_modeset_backoff(&ctx); + goto retry; + } + + drm_atomic_state_put(state); + + if (ret == -EINVAL && !disable_crtcs) { + disable_crtcs = true; + + state = drm_atomic_state_alloc(dev); + if (state) { + state->acquire_ctx = &ctx; + goto retry; + } + ret = -ENOMEM; + } + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + + return ret; +} + int drm_mode_atomic_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h index 955c5690bf64..e0678f8a51cf 100644 --- a/drivers/gpu/drm/drm_crtc_internal.h +++ b/drivers/gpu/drm/drm_crtc_internal.h @@ -183,6 +183,7 @@ int drm_atomic_get_property(struct drm_mode_object *obj, struct drm_property *property, uint64_t *val); int drm_mode_atomic_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int drm_atomic_remove_fb(struct drm_framebuffer *fb); /* drm_plane.c */ diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index 28a0108a1ab8..c0e593a7f9b4 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -773,6 +773,12 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb) * in this manner. */ if (drm_framebuffer_read_refcount(fb) > 1) { + if (drm_drv_uses_atomic_modeset(dev)) { + int ret = drm_atomic_remove_fb(fb); + WARN(ret, "atomic remove_fb failed with %i\n", ret); + goto out; + } + drm_modeset_lock_all(dev); /* remove from any CRTC */ drm_for_each_crtc(crtc, dev) { @@ -790,6 +796,7 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb) drm_modeset_unlock_all(dev); } +out: drm_framebuffer_unreference(fb); } EXPORT_SYMBOL(drm_framebuffer_remove); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index c30d649cb147..b360e6251836 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -14,19 +14,19 @@ * GNU General Public License for more details. */ -#include <asm/dma-iommu.h> - #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_of.h> #include <linux/dma-mapping.h> +#include <linux/dma-iommu.h> #include <linux/pm_runtime.h> #include <linux/module.h> #include <linux/of_graph.h> #include <linux/component.h> #include <linux/console.h> +#include <linux/iommu.h> #include "rockchip_drm_drv.h" #include "rockchip_drm_fb.h" @@ -50,28 +50,31 @@ static struct drm_driver rockchip_drm_driver; int rockchip_drm_dma_attach_device(struct drm_device *drm_dev, struct device *dev) { - struct dma_iommu_mapping *mapping = drm_dev->dev->archdata.mapping; + struct rockchip_drm_private *private = drm_dev->dev_private; int ret; if (!is_support_iommu) return 0; - ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); - if (ret) + ret = iommu_attach_device(private->domain, dev); + if (ret) { + dev_err(dev, "Failed to attach iommu device\n"); return ret; + } - dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); - - return arm_iommu_attach_device(dev, mapping); + return 0; } void rockchip_drm_dma_detach_device(struct drm_device *drm_dev, struct device *dev) { + struct rockchip_drm_private *private = drm_dev->dev_private; + struct iommu_domain *domain = private->domain; + if (!is_support_iommu) return; - arm_iommu_detach_device(dev); + iommu_detach_device(domain, dev); } int rockchip_register_crtc_funcs(struct drm_crtc *crtc, @@ -123,11 +126,46 @@ static void rockchip_drm_crtc_disable_vblank(struct drm_device *dev, priv->crtc_funcs[pipe]->disable_vblank(crtc); } +static int rockchip_drm_init_iommu(struct drm_device *drm_dev) +{ + struct rockchip_drm_private *private = drm_dev->dev_private; + struct iommu_domain_geometry *geometry; + u64 start, end; + + if (!is_support_iommu) + return 0; + + private->domain = iommu_domain_alloc(&platform_bus_type); + if (!private->domain) + return -ENOMEM; + + geometry = &private->domain->geometry; + start = geometry->aperture_start; + end = geometry->aperture_end; + + DRM_DEBUG("IOMMU context initialized (aperture: %#llx-%#llx)\n", + start, end); + drm_mm_init(&private->mm, start, end - start + 1); + mutex_init(&private->mm_lock); + + return 0; +} + +static void rockchip_iommu_cleanup(struct drm_device *drm_dev) +{ + struct rockchip_drm_private *private = drm_dev->dev_private; + + if (!is_support_iommu) + return; + + drm_mm_takedown(&private->mm); + iommu_domain_free(private->domain); +} + static int rockchip_drm_bind(struct device *dev) { struct drm_device *drm_dev; struct rockchip_drm_private *private; - struct dma_iommu_mapping *mapping = NULL; int ret; drm_dev = drm_dev_alloc(&rockchip_drm_driver, dev); @@ -151,38 +189,14 @@ static int rockchip_drm_bind(struct device *dev) rockchip_drm_mode_config_init(drm_dev); - dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), - GFP_KERNEL); - if (!dev->dma_parms) { - ret = -ENOMEM; + ret = rockchip_drm_init_iommu(drm_dev); + if (ret) goto err_config_cleanup; - } - - if (is_support_iommu) { - /* TODO(djkurtz): fetch the mapping start/size from somewhere */ - mapping = arm_iommu_create_mapping(&platform_bus_type, - 0x00000000, - SZ_2G); - if (IS_ERR(mapping)) { - ret = PTR_ERR(mapping); - goto err_config_cleanup; - } - - ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); - if (ret) - goto err_release_mapping; - - dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); - - ret = arm_iommu_attach_device(dev, mapping); - if (ret) - goto err_release_mapping; - } /* Try to bind all sub drivers. */ ret = component_bind_all(dev, drm_dev); if (ret) - goto err_detach_device; + goto err_iommu_cleanup; /* init kms poll for handling hpd */ drm_kms_helper_poll_init(drm_dev); @@ -207,8 +221,6 @@ static int rockchip_drm_bind(struct device *dev) if (ret) goto err_fbdev_fini; - if (is_support_iommu) - arm_iommu_release_mapping(mapping); return 0; err_fbdev_fini: rockchip_drm_fbdev_fini(drm_dev); @@ -217,12 +229,8 @@ err_vblank_cleanup: err_kms_helper_poll_fini: drm_kms_helper_poll_fini(drm_dev); component_unbind_all(dev, drm_dev); -err_detach_device: - if (is_support_iommu) - arm_iommu_detach_device(dev); -err_release_mapping: - if (is_support_iommu) - arm_iommu_release_mapping(mapping); +err_iommu_cleanup: + rockchip_iommu_cleanup(drm_dev); err_config_cleanup: drm_mode_config_cleanup(drm_dev); drm_dev->dev_private = NULL; @@ -239,8 +247,7 @@ static void rockchip_drm_unbind(struct device *dev) drm_vblank_cleanup(drm_dev); drm_kms_helper_poll_fini(drm_dev); component_unbind_all(dev, drm_dev); - if (is_support_iommu) - arm_iommu_detach_device(dev); + rockchip_iommu_cleanup(drm_dev); drm_mode_config_cleanup(drm_dev); drm_dev->dev_private = NULL; drm_dev_unregister(drm_dev); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h index fb6226cf84b7..adc39302bec5 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h @@ -30,6 +30,7 @@ struct drm_device; struct drm_connector; +struct iommu_domain; /* * Rockchip drm private crtc funcs. @@ -60,7 +61,10 @@ struct rockchip_drm_private { struct drm_gem_object *fbdev_bo; const struct rockchip_crtc_funcs *crtc_funcs[ROCKCHIP_MAX_CRTC]; struct drm_atomic_state *state; - + struct iommu_domain *domain; + /* protect drm_mm on multi-threads */ + struct mutex mm_lock; + struct drm_mm mm; struct list_head psr_list; spinlock_t psr_list_lock; }; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index b70f9423379c..df9e57064f19 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -16,11 +16,146 @@ #include <drm/drmP.h> #include <drm/drm_gem.h> #include <drm/drm_vma_manager.h> +#include <linux/iommu.h> #include "rockchip_drm_drv.h" #include "rockchip_drm_gem.h" -static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj, +static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj) +{ + struct drm_device *drm = rk_obj->base.dev; + struct rockchip_drm_private *private = drm->dev_private; + int prot = IOMMU_READ | IOMMU_WRITE; + ssize_t ret; + + mutex_lock(&private->mm_lock); + + ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm, + rk_obj->base.size, PAGE_SIZE, + 0, 0); + + mutex_unlock(&private->mm_lock); + if (ret < 0) { + DRM_ERROR("out of I/O virtual memory: %zd\n", ret); + return ret; + } + + rk_obj->dma_addr = rk_obj->mm.start; + + ret = iommu_map_sg(private->domain, rk_obj->dma_addr, rk_obj->sgt->sgl, + rk_obj->sgt->nents, prot); + if (ret < rk_obj->base.size) { + DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n", + ret, rk_obj->base.size); + ret = -ENOMEM; + goto err_remove_node; + } + + rk_obj->size = ret; + + return 0; + +err_remove_node: + drm_mm_remove_node(&rk_obj->mm); + + return ret; +} + +static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj) +{ + struct drm_device *drm = rk_obj->base.dev; + struct rockchip_drm_private *private = drm->dev_private; + + iommu_unmap(private->domain, rk_obj->dma_addr, rk_obj->size); + + mutex_lock(&private->mm_lock); + + drm_mm_remove_node(&rk_obj->mm); + + mutex_unlock(&private->mm_lock); + + return 0; +} + +static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj) +{ + struct drm_device *drm = rk_obj->base.dev; + int ret, i; + struct scatterlist *s; + + rk_obj->pages = drm_gem_get_pages(&rk_obj->base); + if (IS_ERR(rk_obj->pages)) + return PTR_ERR(rk_obj->pages); + + rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT; + + rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages); + if (IS_ERR(rk_obj->sgt)) { + ret = PTR_ERR(rk_obj->sgt); + goto err_put_pages; + } + + /* + * Fake up the SG table so that dma_sync_sg_for_device() can be used + * to flush the pages associated with it. + * + * TODO: Replace this by drm_clflush_sg() once it can be implemented + * without relying on symbols that are not exported. + */ + for_each_sg(rk_obj->sgt->sgl, s, rk_obj->sgt->nents, i) + sg_dma_address(s) = sg_phys(s); + + dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents, + DMA_TO_DEVICE); + + return 0; + +err_put_pages: + drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false); + return ret; +} + +static void rockchip_gem_put_pages(struct rockchip_gem_object *rk_obj) +{ + sg_free_table(rk_obj->sgt); + kfree(rk_obj->sgt); + drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true); +} + +static int rockchip_gem_alloc_iommu(struct rockchip_gem_object *rk_obj, + bool alloc_kmap) +{ + int ret; + + ret = rockchip_gem_get_pages(rk_obj); + if (ret < 0) + return ret; + + ret = rockchip_gem_iommu_map(rk_obj); + if (ret < 0) + goto err_free; + + if (alloc_kmap) { + rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, + pgprot_writecombine(PAGE_KERNEL)); + if (!rk_obj->kvaddr) { + DRM_ERROR("failed to vmap() buffer\n"); + ret = -ENOMEM; + goto err_unmap; + } + } + + return 0; + +err_unmap: + rockchip_gem_iommu_unmap(rk_obj); +err_free: + rockchip_gem_put_pages(rk_obj); + + return ret; +} + +static int rockchip_gem_alloc_dma(struct rockchip_gem_object *rk_obj, bool alloc_kmap) { struct drm_gem_object *obj = &rk_obj->base; @@ -42,7 +177,27 @@ static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj, return 0; } -static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj) +static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj, + bool alloc_kmap) +{ + struct drm_gem_object *obj = &rk_obj->base; + struct drm_device *drm = obj->dev; + struct rockchip_drm_private *private = drm->dev_private; + + if (private->domain) + return rockchip_gem_alloc_iommu(rk_obj, alloc_kmap); + else + return rockchip_gem_alloc_dma(rk_obj, alloc_kmap); +} + +static void rockchip_gem_free_iommu(struct rockchip_gem_object *rk_obj) +{ + vunmap(rk_obj->kvaddr); + rockchip_gem_iommu_unmap(rk_obj); + rockchip_gem_put_pages(rk_obj); +} + +static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj) { struct drm_gem_object *obj = &rk_obj->base; struct drm_device *drm = obj->dev; @@ -51,23 +206,68 @@ static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj) rk_obj->dma_attrs); } -static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, - struct vm_area_struct *vma) +static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj) +{ + if (rk_obj->pages) + rockchip_gem_free_iommu(rk_obj); + else + rockchip_gem_free_dma(rk_obj); +} +static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj, + struct vm_area_struct *vma) { + struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); + unsigned int i, count = obj->size >> PAGE_SHIFT; + unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + unsigned long uaddr = vma->vm_start; + unsigned long offset = vma->vm_pgoff; + unsigned long end = user_count + offset; int ret; + + if (user_count == 0) + return -ENXIO; + if (end > count) + return -ENXIO; + + for (i = offset; i < end; i++) { + ret = vm_insert_page(vma, uaddr, rk_obj->pages[i]); + if (ret) + return ret; + uaddr += PAGE_SIZE; + } + + return 0; +} + +static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj, + struct vm_area_struct *vma) +{ struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); struct drm_device *drm = obj->dev; + return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, + obj->size, rk_obj->dma_attrs); +} + +static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, + struct vm_area_struct *vma) +{ + int ret; + struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); + /* - * dma_alloc_attrs() allocated a struct page table for rk_obj, so clear + * We allocated a struct page table for rk_obj, so clear * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap(). */ vma->vm_flags &= ~VM_PFNMAP; vma->vm_pgoff = 0; - ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, - obj->size, rk_obj->dma_attrs); + if (rk_obj->pages) + ret = rockchip_drm_gem_object_mmap_iommu(obj, vma); + else + ret = rockchip_drm_gem_object_mmap_dma(obj, vma); + if (ret) drm_gem_vm_close(vma); @@ -101,6 +301,12 @@ int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma) return rockchip_drm_gem_object_mmap(obj, vma); } +static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj) +{ + drm_gem_object_release(&rk_obj->base); + kfree(rk_obj); +} + struct rockchip_gem_object * rockchip_gem_create_object(struct drm_device *drm, unsigned int size, bool alloc_kmap) @@ -117,7 +323,7 @@ struct rockchip_gem_object * obj = &rk_obj->base; - drm_gem_private_object_init(drm, obj, size); + drm_gem_object_init(drm, obj, size); ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap); if (ret) @@ -126,7 +332,7 @@ struct rockchip_gem_object * return rk_obj; err_free_rk_obj: - kfree(rk_obj); + rockchip_gem_release_object(rk_obj); return ERR_PTR(ret); } @@ -138,13 +344,11 @@ void rockchip_gem_free_object(struct drm_gem_object *obj) { struct rockchip_gem_object *rk_obj; - drm_gem_free_mmap_offset(obj); - rk_obj = to_rockchip_obj(obj); rockchip_gem_free_buf(rk_obj); - kfree(rk_obj); + rockchip_gem_release_object(rk_obj); } /* @@ -253,6 +457,9 @@ struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj) struct sg_table *sgt; int ret; + if (rk_obj->pages) + return drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages); + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); if (!sgt) return ERR_PTR(-ENOMEM); @@ -273,6 +480,10 @@ void *rockchip_gem_prime_vmap(struct drm_gem_object *obj) { struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); + if (rk_obj->pages) + return vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, + pgprot_writecombine(PAGE_KERNEL)); + if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING) return NULL; @@ -281,5 +492,12 @@ void *rockchip_gem_prime_vmap(struct drm_gem_object *obj) void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) { - /* Nothing to do */ + struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); + + if (rk_obj->pages) { + vunmap(vaddr); + return; + } + + /* Nothing to do if allocated by DMA mapping API. */ } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h index 18b3488db4ec..3f6ea4d18a5c 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h @@ -23,7 +23,15 @@ struct rockchip_gem_object { void *kvaddr; dma_addr_t dma_addr; + /* Used when IOMMU is disabled */ unsigned long dma_attrs; + + /* Used when IOMMU is enabled */ + struct drm_mm_node mm; + unsigned long num_pages; + struct page **pages; + struct sg_table *sgt; + size_t size; }; struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c index e992bed98dcb..d45a4335df5d 100644 --- a/drivers/gpu/drm/sti/sti_crtc.c +++ b/drivers/gpu/drm/sti/sti_crtc.c @@ -134,21 +134,6 @@ sti_crtc_mode_set_nofb(struct drm_crtc *crtc) sti_crtc_mode_set(crtc, &crtc->state->adjusted_mode); } -static void sti_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) -{ - struct sti_mixer *mixer = to_sti_mixer(crtc); - - if (crtc->state->event) { - crtc->state->event->pipe = drm_crtc_index(crtc); - - WARN_ON(drm_crtc_vblank_get(crtc) != 0); - - mixer->pending_event = crtc->state->event; - crtc->state->event = NULL; - } -} - static void sti_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { @@ -156,6 +141,8 @@ static void sti_crtc_atomic_flush(struct drm_crtc *crtc, struct sti_mixer *mixer = to_sti_mixer(crtc); struct sti_compositor *compo = dev_get_drvdata(mixer->dev); struct drm_plane *p; + struct drm_pending_vblank_event *event; + unsigned long flags; DRM_DEBUG_DRIVER("\n"); @@ -220,13 +207,24 @@ static void sti_crtc_atomic_flush(struct drm_crtc *crtc, break; } } + + event = crtc->state->event; + if (event) { + crtc->state->event = NULL; + + spin_lock_irqsave(&crtc->dev->event_lock, flags); + if (drm_crtc_vblank_get(crtc) == 0) + drm_crtc_arm_vblank_event(crtc, event); + else + drm_crtc_send_vblank_event(crtc, event); + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + } } static const struct drm_crtc_helper_funcs sti_crtc_helper_funcs = { .enable = sti_crtc_enable, .disable = sti_crtc_disabling, .mode_set_nofb = sti_crtc_mode_set_nofb, - .atomic_begin = sti_crtc_atomic_begin, .atomic_flush = sti_crtc_atomic_flush, }; @@ -250,7 +248,6 @@ int sti_crtc_vblank_cb(struct notifier_block *nb, struct sti_compositor *compo; struct drm_crtc *crtc = data; struct sti_mixer *mixer; - unsigned long flags; struct sti_private *priv; unsigned int pipe; @@ -267,14 +264,6 @@ int sti_crtc_vblank_cb(struct notifier_block *nb, drm_crtc_handle_vblank(crtc); - spin_lock_irqsave(&crtc->dev->event_lock, flags); - if (mixer->pending_event) { - drm_crtc_send_vblank_event(crtc, mixer->pending_event); - drm_crtc_vblank_put(crtc); - mixer->pending_event = NULL; - } - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); - if (mixer->status == STI_MIXER_DISABLING) { struct drm_plane *p; @@ -317,19 +306,12 @@ void sti_crtc_disable_vblank(struct drm_device *drm_dev, unsigned int pipe) struct sti_private *priv = drm_dev->dev_private; struct sti_compositor *compo = priv->compo; struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb[pipe]; - struct drm_crtc *crtc = &compo->mixer[pipe]->drm_crtc; struct sti_vtg *vtg = compo->vtg[pipe]; DRM_DEBUG_DRIVER("\n"); if (sti_vtg_unregister_client(vtg, vtg_vblank_nb)) DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n"); - - /* free the resources of the pending requests */ - if (compo->mixer[pipe]->pending_event) { - drm_crtc_vblank_put(crtc); - compo->mixer[pipe]->pending_event = NULL; - } } static int sti_crtc_late_register(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c index 788feed208d7..e6c1646b9c53 100644 --- a/drivers/gpu/drm/sti/sti_drv.c +++ b/drivers/gpu/drm/sti/sti_drv.c @@ -58,7 +58,9 @@ static int sti_drm_fps_set(void *data, u64 val) list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) { struct sti_plane *plane = to_sti_plane(p); + memset(&plane->fps_info, 0, sizeof(plane->fps_info)); plane->fps_info.output = (val >> i) & 1; + i++; } @@ -115,50 +117,13 @@ err: return ret; } -static void sti_atomic_schedule(struct sti_private *private, - struct drm_atomic_state *state) -{ - private->commit.state = state; - schedule_work(&private->commit.work); -} - -static void sti_atomic_complete(struct sti_private *private, - struct drm_atomic_state *state) -{ - struct drm_device *drm = private->drm_dev; - - /* - * Everything below can be run asynchronously without the need to grab - * any modeset locks at all under one condition: It must be guaranteed - * that the asynchronous work has either been cancelled (if the driver - * supports it, which at least requires that the framebuffers get - * cleaned up with drm_atomic_helper_cleanup_planes()) or completed - * before the new state gets committed on the software side with - * drm_atomic_helper_swap_state(). - * - * This scheme allows new atomic state updates to be prepared and - * checked in parallel to the asynchronous completion of the previous - * update. Which is important since compositors need to figure out the - * composition of the next frame right after having submitted the - * current layout. - */ - - drm_atomic_helper_commit_modeset_disables(drm, state); - drm_atomic_helper_commit_planes(drm, state, 0); - drm_atomic_helper_commit_modeset_enables(drm, state); - - drm_atomic_helper_wait_for_vblanks(drm, state); - - drm_atomic_helper_cleanup_planes(drm, state); - drm_atomic_state_put(state); -} - -static void sti_atomic_work(struct work_struct *work) +static void sti_drm_dbg_cleanup(struct drm_minor *minor) { - struct sti_private *private = container_of(work, - struct sti_private, commit.work); + drm_debugfs_remove_files(sti_drm_dbg_list, + ARRAY_SIZE(sti_drm_dbg_list), minor); - sti_atomic_complete(private, private->commit.state); + drm_debugfs_remove_files((struct drm_info_list *)&sti_drm_fps_fops, + 1, minor); } static int sti_atomic_check(struct drm_device *dev, @@ -181,38 +146,6 @@ static int sti_atomic_check(struct drm_device *dev, return ret; } -static int sti_atomic_commit(struct drm_device *drm, - struct drm_atomic_state *state, bool nonblock) -{ - struct sti_private *private = drm->dev_private; - int err; - - err = drm_atomic_helper_prepare_planes(drm, state); - if (err) - return err; - - /* serialize outstanding nonblocking commits */ - mutex_lock(&private->commit.lock); - flush_work(&private->commit.work); - - /* - * This is the point of no return - everything below never fails except - * when the hw goes bonghits. Which means we can commit the new state on - * the software side now. - */ - - drm_atomic_helper_swap_state(state, true); - - drm_atomic_state_get(state); - if (nonblock) - sti_atomic_schedule(private, state); - else - sti_atomic_complete(private, state); - - mutex_unlock(&private->commit.lock); - return 0; -} - static void sti_output_poll_changed(struct drm_device *ddev) { struct sti_private *private = ddev->dev_private; @@ -224,7 +157,7 @@ static const struct drm_mode_config_funcs sti_mode_config_funcs = { .fb_create = drm_fb_cma_create, .output_poll_changed = sti_output_poll_changed, .atomic_check = sti_atomic_check, - .atomic_commit = sti_atomic_commit, + .atomic_commit = drm_atomic_helper_commit, }; static void sti_mode_config_init(struct drm_device *dev) @@ -304,9 +237,6 @@ static int sti_init(struct drm_device *ddev) dev_set_drvdata(ddev->dev, ddev); private->drm_dev = ddev; - mutex_init(&private->commit.lock); - INIT_WORK(&private->commit.work, sti_atomic_work); - drm_mode_config_init(ddev); sti_mode_config_init(ddev); @@ -327,6 +257,7 @@ static void sti_cleanup(struct drm_device *ddev) drm_kms_helper_poll_fini(ddev); drm_vblank_cleanup(ddev); + component_unbind_all(ddev->dev, ddev); kfree(private); ddev->dev_private = NULL; } diff --git a/drivers/gpu/drm/sti/sti_drv.h b/drivers/gpu/drm/sti/sti_drv.h index 4c75845cc9ab..6502ed2d3351 100644 --- a/drivers/gpu/drm/sti/sti_drv.h +++ b/drivers/gpu/drm/sti/sti_drv.h @@ -25,12 +25,6 @@ struct sti_private { struct drm_property *plane_zorder_property; struct drm_device *drm_dev; struct drm_fbdev_cma *fbdev; - - struct { - struct drm_atomic_state *state; - struct work_struct work; - struct mutex lock; - } commit; }; extern struct platform_driver sti_tvout_driver; diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c index 877d053d86f4..86279f5022c2 100644 --- a/drivers/gpu/drm/sti/sti_gdp.c +++ b/drivers/gpu/drm/sti/sti_gdp.c @@ -610,7 +610,6 @@ static int sti_gdp_atomic_check(struct drm_plane *drm_plane, struct sti_plane *plane = to_sti_plane(drm_plane); struct sti_gdp *gdp = to_sti_gdp(plane); struct drm_crtc *crtc = state->crtc; - struct sti_compositor *compo = dev_get_drvdata(gdp->dev); struct drm_framebuffer *fb = state->fb; struct drm_crtc_state *crtc_state; struct sti_mixer *mixer; @@ -648,45 +647,30 @@ static int sti_gdp_atomic_check(struct drm_plane *drm_plane, return -EINVAL; } - if (!gdp->vtg) { - /* Register gdp callback */ - gdp->vtg = compo->vtg[mixer->id]; - if (sti_vtg_register_client(gdp->vtg, - &gdp->vtg_field_nb, crtc)) { - DRM_ERROR("Cannot register VTG notifier\n"); + /* Set gdp clock */ + if (mode->clock && gdp->clk_pix) { + struct clk *clkp; + int rate = mode->clock * 1000; + int res; + + /* + * According to the mixer used, the gdp pixel clock + * should have a different parent clock. + */ + if (mixer->id == STI_MIXER_MAIN) + clkp = gdp->clk_main_parent; + else + clkp = gdp->clk_aux_parent; + + if (clkp) + clk_set_parent(gdp->clk_pix, clkp); + + res = clk_set_rate(gdp->clk_pix, rate); + if (res < 0) { + DRM_ERROR("Cannot set rate (%dHz) for gdp\n", + rate); return -EINVAL; } - - /* Set and enable gdp clock */ - if (gdp->clk_pix) { - struct clk *clkp; - int rate = mode->clock * 1000; - int res; - - /* - * According to the mixer used, the gdp pixel clock - * should have a different parent clock. - */ - if (mixer->id == STI_MIXER_MAIN) - clkp = gdp->clk_main_parent; - else - clkp = gdp->clk_aux_parent; - - if (clkp) - clk_set_parent(gdp->clk_pix, clkp); - - res = clk_set_rate(gdp->clk_pix, rate); - if (res < 0) { - DRM_ERROR("Cannot set rate (%dHz) for gdp\n", - rate); - return -EINVAL; - } - - if (clk_prepare_enable(gdp->clk_pix)) { - DRM_ERROR("Failed to prepare/enable gdp\n"); - return -EINVAL; - } - } } DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n", @@ -724,6 +708,31 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane, if (!crtc || !fb) return; + if ((oldstate->fb == state->fb) && + (oldstate->crtc_x == state->crtc_x) && + (oldstate->crtc_y == state->crtc_y) && + (oldstate->crtc_w == state->crtc_w) && + (oldstate->crtc_h == state->crtc_h) && + (oldstate->src_x == state->src_x) && + (oldstate->src_y == state->src_y) && + (oldstate->src_w == state->src_w) && + (oldstate->src_h == state->src_h)) { + /* No change since last update, do not post cmd */ + DRM_DEBUG_DRIVER("No change, not posting cmd\n"); + plane->status = STI_PLANE_UPDATED; + return; + } + + if (!gdp->vtg) { + struct sti_compositor *compo = dev_get_drvdata(gdp->dev); + struct sti_mixer *mixer = to_sti_mixer(crtc); + + /* Register gdp callback */ + gdp->vtg = compo->vtg[mixer->id]; + sti_vtg_register_client(gdp->vtg, &gdp->vtg_field_nb, crtc); + clk_prepare_enable(gdp->clk_pix); + } + mode = &crtc->mode; dst_x = state->crtc_x; dst_y = state->crtc_y; diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index c9151849d604..ce2dcba679d5 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -95,7 +95,6 @@ #define HDMI_CFG_HDCP_EN BIT(2) #define HDMI_CFG_ESS_NOT_OESS BIT(3) #define HDMI_CFG_H_SYNC_POL_NEG BIT(4) -#define HDMI_CFG_SINK_TERM_DET_EN BIT(5) #define HDMI_CFG_V_SYNC_POL_NEG BIT(6) #define HDMI_CFG_422_EN BIT(8) #define HDMI_CFG_FIFO_OVERRUN_CLR BIT(12) @@ -159,7 +158,6 @@ struct sti_hdmi_connector { struct drm_encoder *encoder; struct sti_hdmi *hdmi; struct drm_property *colorspace_property; - struct drm_property *hdmi_mode_property; }; #define to_sti_hdmi_connector(x) \ @@ -266,12 +264,9 @@ static void hdmi_config(struct sti_hdmi *hdmi) /* Select encryption type and the framing mode */ conf |= HDMI_CFG_ESS_NOT_OESS; - if (hdmi->hdmi_mode == HDMI_MODE_HDMI) + if (hdmi->hdmi_monitor) conf |= HDMI_CFG_HDMI_NOT_DVI; - /* Enable sink term detection */ - conf |= HDMI_CFG_SINK_TERM_DET_EN; - /* Set Hsync polarity */ if (hdmi->mode.flags & DRM_MODE_FLAG_NHSYNC) { DRM_DEBUG_DRIVER("H Sync Negative\n"); @@ -607,9 +602,6 @@ static void hdmi_dbg_cfg(struct seq_file *s, int val) tmp = val & HDMI_CFG_ESS_NOT_OESS; DBGFS_PRINT_STR("HDCP mode:", tmp ? "ESS enable" : "OESS enable"); seq_puts(s, "\t\t\t\t\t"); - tmp = val & HDMI_CFG_SINK_TERM_DET_EN; - DBGFS_PRINT_STR("Sink term detection:", tmp ? "enable" : "disable"); - seq_puts(s, "\t\t\t\t\t"); tmp = val & HDMI_CFG_H_SYNC_POL_NEG; DBGFS_PRINT_STR("Hsync polarity:", tmp ? "inverted" : "normal"); seq_puts(s, "\t\t\t\t\t"); @@ -977,6 +969,11 @@ static int sti_hdmi_connector_get_modes(struct drm_connector *connector) if (!edid) goto fail; + hdmi->hdmi_monitor = drm_detect_hdmi_monitor(edid); + DRM_DEBUG_KMS("%s : %dx%d cm\n", + (hdmi->hdmi_monitor ? "hdmi monitor" : "dvi monitor"), + edid->width_cm, edid->height_cm); + count = drm_add_edid_modes(connector, edid); drm_mode_connector_update_edid_property(connector, edid); drm_edid_to_eld(connector, edid); @@ -1060,19 +1057,6 @@ static void sti_hdmi_connector_init_property(struct drm_device *drm_dev, } hdmi_connector->colorspace_property = prop; drm_object_attach_property(&connector->base, prop, hdmi->colorspace); - - /* hdmi_mode property */ - hdmi->hdmi_mode = DEFAULT_HDMI_MODE; - prop = drm_property_create_enum(drm_dev, 0, "hdmi_mode", - hdmi_mode_names, - ARRAY_SIZE(hdmi_mode_names)); - if (!prop) { - DRM_ERROR("fails to create colorspace property\n"); - return; - } - hdmi_connector->hdmi_mode_property = prop; - drm_object_attach_property(&connector->base, prop, hdmi->hdmi_mode); - } static int @@ -1090,11 +1074,6 @@ sti_hdmi_connector_set_property(struct drm_connector *connector, return 0; } - if (property == hdmi_connector->hdmi_mode_property) { - hdmi->hdmi_mode = val; - return 0; - } - DRM_ERROR("failed to set hdmi connector property\n"); return -EINVAL; } @@ -1114,11 +1093,6 @@ sti_hdmi_connector_get_property(struct drm_connector *connector, return 0; } - if (property == hdmi_connector->hdmi_mode_property) { - *val = hdmi->hdmi_mode; - return 0; - } - DRM_ERROR("failed to get hdmi connector property\n"); return -EINVAL; } diff --git a/drivers/gpu/drm/sti/sti_hdmi.h b/drivers/gpu/drm/sti/sti_hdmi.h index 119bc3582ac7..407012350f1a 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.h +++ b/drivers/gpu/drm/sti/sti_hdmi.h @@ -30,19 +30,6 @@ struct hdmi_audio_params { struct hdmi_audio_infoframe cea; }; -/* values for the framing mode property */ -enum sti_hdmi_modes { - HDMI_MODE_HDMI, - HDMI_MODE_DVI, -}; - -static const struct drm_prop_enum_list hdmi_mode_names[] = { - { HDMI_MODE_HDMI, "hdmi" }, - { HDMI_MODE_DVI, "dvi" }, -}; - -#define DEFAULT_HDMI_MODE HDMI_MODE_HDMI - static const struct drm_prop_enum_list colorspace_mode_names[] = { { HDMI_COLORSPACE_RGB, "rgb" }, { HDMI_COLORSPACE_YUV422, "yuv422" }, @@ -73,7 +60,7 @@ static const struct drm_prop_enum_list colorspace_mode_names[] = { * @reset: reset control of the hdmi phy * @ddc_adapt: i2c ddc adapter * @colorspace: current colorspace selected - * @hdmi_mode: select framing for HDMI or DVI + * @hdmi_monitor: true if HDMI monitor detected else DVI monitor assumed * @audio_pdev: ASoC hdmi-codec platform device * @audio: hdmi audio parameters. * @drm_connector: hdmi connector @@ -98,7 +85,7 @@ struct sti_hdmi { struct reset_control *reset; struct i2c_adapter *ddc_adapt; enum hdmi_colorspace colorspace; - enum sti_hdmi_modes hdmi_mode; + bool hdmi_monitor; struct platform_device *audio_pdev; struct hdmi_audio_params audio; struct drm_connector *drm_connector; diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c index 4376fd8a8e52..66f843148ef7 100644 --- a/drivers/gpu/drm/sti/sti_hqvdp.c +++ b/drivers/gpu/drm/sti/sti_hqvdp.c @@ -1037,9 +1037,9 @@ static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane, src_w = state->src_w >> 16; src_h = state->src_h >> 16; - if (!sti_hqvdp_check_hw_scaling(hqvdp, mode, - src_w, src_h, - dst_w, dst_h)) { + if (mode->clock && !sti_hqvdp_check_hw_scaling(hqvdp, mode, + src_w, src_h, + dst_w, dst_h)) { DRM_ERROR("Scaling beyond HW capabilities\n"); return -EINVAL; } diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h index 830a3c42d886..e64a00e61049 100644 --- a/drivers/gpu/drm/sti/sti_mixer.h +++ b/drivers/gpu/drm/sti/sti_mixer.h @@ -28,7 +28,6 @@ enum sti_mixer_status { * @regs: mixer registers * @id: id of the mixer * @drm_crtc: crtc object link to the mixer - * @pending_event: set if a flip event is pending on crtc * @status: to know the status of the mixer */ struct sti_mixer { @@ -36,7 +35,6 @@ struct sti_mixer { void __iomem *regs; int id; struct drm_crtc drm_crtc; - struct drm_pending_vblank_event *pending_event; enum sti_mixer_status status; }; diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c index c3d9c8ae14af..943bce56692e 100644 --- a/drivers/gpu/drm/sti/sti_vtg.c +++ b/drivers/gpu/drm/sti/sti_vtg.c @@ -17,7 +17,6 @@ #include "sti_vtg.h" #define VTG_MODE_MASTER 0 -#define VTG_MODE_SLAVE_BY_EXT0 1 /* registers offset */ #define VTG_MODE 0x0000 @@ -132,7 +131,6 @@ struct sti_vtg_sync_params { * @irq_status: store the IRQ status value * @notifier_list: notifier callback * @crtc: the CRTC for vblank event - * @slave: slave vtg * @link: List node to link the structure in lookup list */ struct sti_vtg { @@ -144,7 +142,6 @@ struct sti_vtg { u32 irq_status; struct raw_notifier_head notifier_list; struct drm_crtc *crtc; - struct sti_vtg *slave; struct list_head link; }; @@ -166,10 +163,6 @@ struct sti_vtg *of_vtg_find(struct device_node *np) static void vtg_reset(struct sti_vtg *vtg) { - /* reset slave and then master */ - if (vtg->slave) - vtg_reset(vtg->slave); - writel(1, vtg->regs + VTG_DRST_AUTOC); } @@ -259,10 +252,6 @@ static void vtg_set_mode(struct sti_vtg *vtg, { unsigned int i; - if (vtg->slave) - vtg_set_mode(vtg->slave, VTG_MODE_SLAVE_BY_EXT0, - vtg->sync_params, mode); - /* Set the number of clock cycles per line */ writel(mode->htotal, vtg->regs + VTG_CLKLN); @@ -318,11 +307,7 @@ void sti_vtg_set_config(struct sti_vtg *vtg, vtg_reset(vtg); - /* enable irq for the vtg vblank synchro */ - if (vtg->slave) - vtg_enable_irq(vtg->slave); - else - vtg_enable_irq(vtg); + vtg_enable_irq(vtg); } /** @@ -365,18 +350,12 @@ u32 sti_vtg_get_pixel_number(struct drm_display_mode mode, int x) int sti_vtg_register_client(struct sti_vtg *vtg, struct notifier_block *nb, struct drm_crtc *crtc) { - if (vtg->slave) - return sti_vtg_register_client(vtg->slave, nb, crtc); - vtg->crtc = crtc; return raw_notifier_chain_register(&vtg->notifier_list, nb); } int sti_vtg_unregister_client(struct sti_vtg *vtg, struct notifier_block *nb) { - if (vtg->slave) - return sti_vtg_unregister_client(vtg->slave, nb); - return raw_notifier_chain_unregister(&vtg->notifier_list, nb); } @@ -434,29 +413,20 @@ static int vtg_probe(struct platform_device *pdev) return -ENOMEM; } - np = of_parse_phandle(pdev->dev.of_node, "st,slave", 0); - if (np) { - vtg->slave = of_vtg_find(np); - of_node_put(np); + vtg->irq = platform_get_irq(pdev, 0); + if (vtg->irq < 0) { + DRM_ERROR("Failed to get VTG interrupt\n"); + return vtg->irq; + } - if (!vtg->slave) - return -EPROBE_DEFER; - } else { - vtg->irq = platform_get_irq(pdev, 0); - if (vtg->irq < 0) { - DRM_ERROR("Failed to get VTG interrupt\n"); - return vtg->irq; - } - - RAW_INIT_NOTIFIER_HEAD(&vtg->notifier_list); - - ret = devm_request_threaded_irq(dev, vtg->irq, vtg_irq, - vtg_irq_thread, IRQF_ONESHOT, - dev_name(dev), vtg); - if (ret < 0) { - DRM_ERROR("Failed to register VTG interrupt\n"); - return ret; - } + RAW_INIT_NOTIFIER_HEAD(&vtg->notifier_list); + + ret = devm_request_threaded_irq(dev, vtg->irq, vtg_irq, + vtg_irq_thread, IRQF_ONESHOT, + dev_name(dev), vtg); + if (ret < 0) { + DRM_ERROR("Failed to register VTG interrupt\n"); + return ret; } vtg_register(vtg); diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 7e75fa053473..a8e8db024980 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -42,7 +42,7 @@ #include <linux/spinlock.h> #include <linux/reservation.h> -#define TTM_MAX_BO_PRIORITY 16 +#define TTM_MAX_BO_PRIORITY 16U struct ttm_backend_func { /** diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index f330ba4547cf..900129c8f6cf 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -109,6 +109,7 @@ header-y += dlm_netlink.h header-y += dlm_plock.h header-y += dm-ioctl.h header-y += dm-log-userspace.h +header-y += dma-buf.h header-y += dn.h header-y += dqblk_xfs.h header-y += edd.h |