From b89659b783291f9e91dd2d91fc34fc6354116bfe Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Tue, 3 Mar 2020 19:23:24 +0800 Subject: drm/amdgpu: amends feature bits for MM bandwidth mgr Signed-off-by: Monk Liu Reviewed-by: Emily Deng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index f0128f745bd2..0a95b137eadb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -83,6 +83,8 @@ enum AMDGIM_FEATURE_FLAG { AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2, /* VRAM LOST by GIM */ AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4, + /* MM bandwidth */ + AMDGIM_FEATURE_GIM_MM_BW_MGR = 0x8, /* PP ONE VF MODE in GIM */ AMDGIM_FEATURE_PP_ONE_VF = (1 << 4), }; -- cgit From 3aa0115d238c71423d0e212138678a8cf51d4361 Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Wed, 4 Mar 2020 14:02:55 +0800 Subject: drm/amdgpu: cleanup all virtualization detection routine we need to move virt detection much earlier because: 1) HW team confirms us that RCC_IOV_FUNC_IDENTIFIER will always be at DE5 (dw) mmio offset from vega10, this way there is no need to implement detect_hw_virt() routine in each nbio/chip file. for VI SRIOV chip (tonga & fiji), the BIF_IOV_FUNC_IDENTIFIER is at 0x1503 2) we need to acknowledged we are SRIOV VF before we do IP discovery because the IP discovery content will be updated by host everytime after it recieved a new coming "REQ_GPU_INIT_DATA" request from guest (there will be patches for this new handshake soon). Signed-off-by: Monk Liu Reviewed-by: Emily Deng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 33 ++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 6 ++++ drivers/gpu/drm/amd/amdgpu/cik.c | 8 ------ drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c | 18 ------------ drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c | 18 ------------ drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c | 7 ----- drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c | 18 ------------ drivers/gpu/drm/amd/amdgpu/nv.c | 2 -- drivers/gpu/drm/amd/amdgpu/si.c | 8 ------ drivers/gpu/drm/amd/amdgpu/soc15.c | 1 - drivers/gpu/drm/amd/amdgpu/vi.c | 24 ---------------- .../amd/include/asic_reg/nbif/nbif_6_1_offset.h | 2 ++ .../amd/include/asic_reg/nbio/nbio_7_0_offset.h | 2 ++ .../amd/include/asic_reg/nbio/nbio_7_4_offset.h | 2 ++ 16 files changed, 48 insertions(+), 105 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index f422ef58b4d8..449720086fbc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3055,6 +3055,9 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10) adev->enable_mes = true; + /* detect hw virtualization here */ + amdgpu_detect_virtualization(adev); + if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) { r = amdgpu_discovery_init(adev); if (r) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h index 919bd566ba3c..edaac242ff85 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h @@ -77,7 +77,6 @@ struct amdgpu_nbio_funcs { u32 *flags); void (*ih_control)(struct amdgpu_device *adev); void (*init_registers)(struct amdgpu_device *adev); - void (*detect_hw_virt)(struct amdgpu_device *adev); void (*remap_hdp_registers)(struct amdgpu_device *adev); void (*handle_ras_controller_intr_no_bifring)(struct amdgpu_device *adev); void (*handle_ras_err_event_athub_intr_no_bifring)(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index adc813cde8e2..43a1ee332727 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -287,3 +287,36 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) } } } + +void amdgpu_detect_virtualization(struct amdgpu_device *adev) +{ + uint32_t reg; + + switch (adev->asic_type) { + case CHIP_TONGA: + case CHIP_FIJI: + reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); + break; + case CHIP_VEGA10: + case CHIP_VEGA20: + case CHIP_NAVI10: + case CHIP_NAVI12: + case CHIP_ARCTURUS: + reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER); + break; + default: /* other chip doesn't support SRIOV */ + reg = 0; + break; + } + + if (reg & 1) + adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; + + if (reg & 0x80000000) + adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; + + if (!reg) { + if (is_virtual_machine()) /* passthrough mode exclus sriov mod */ + adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 0a95b137eadb..74f9843fce82 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -30,6 +30,11 @@ #define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */ #define AMDGPU_SRIOV_CAPS_RUNTIME (1 << 4) /* is out of full access mode */ +/* all asic after AI use this offset */ +#define mmRCC_IOV_FUNC_IDENTIFIER 0xDE5 +/* tonga/fiji use this offset */ +#define mmBIF_IOV_FUNC_IDENTIFIER 0x1503 + struct amdgpu_mm_table { struct amdgpu_bo *bo; uint32_t *cpu_addr; @@ -305,4 +310,5 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size, unsigned int key, unsigned int chksum); void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev); +void amdgpu_detect_virtualization(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 006f21ef7ddf..db68ffa27984 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1811,12 +1811,6 @@ static uint32_t cik_get_rev_id(struct amdgpu_device *adev) >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT; } -static void cik_detect_hw_virtualization(struct amdgpu_device *adev) -{ - if (is_virtual_machine()) /* passthrough mode */ - adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; -} - static void cik_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { if (!ring || !ring->funcs->emit_wreg) { @@ -2179,8 +2173,6 @@ static const struct amdgpu_ip_block_version cik_common_ip_block = int cik_set_ip_blocks(struct amdgpu_device *adev) { - cik_detect_hw_virtualization(adev); - switch (adev->asic_type) { case CHIP_BONAIRE: amdgpu_device_ip_block_add(adev, &cik_common_ip_block); diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c index f3a3fe746222..cbcf04578b99 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c @@ -290,23 +290,6 @@ const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg = { .ref_and_mask_sdma1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__SDMA1_MASK, }; -static void nbio_v2_3_detect_hw_virt(struct amdgpu_device *adev) -{ - uint32_t reg; - - reg = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_RCC_IOV_FUNC_IDENTIFIER); - if (reg & 1) - adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; - - if (reg & 0x80000000) - adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; - - if (!reg) { - if (is_virtual_machine()) /* passthrough mode exclus sriov mod */ - adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; - } -} - static void nbio_v2_3_init_registers(struct amdgpu_device *adev) { uint32_t def, data; @@ -338,6 +321,5 @@ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = { .get_clockgating_state = nbio_v2_3_get_clockgating_state, .ih_control = nbio_v2_3_ih_control, .init_registers = nbio_v2_3_init_registers, - .detect_hw_virt = nbio_v2_3_detect_hw_virt, .remap_hdp_registers = nbio_v2_3_remap_hdp_registers, }; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c index 635d9e1fc0a3..7b2fb050407d 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c @@ -241,23 +241,6 @@ const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = { .ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK }; -static void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev) -{ - uint32_t reg; - - reg = RREG32_SOC15(NBIO, 0, mmRCC_PF_0_0_RCC_IOV_FUNC_IDENTIFIER); - if (reg & 1) - adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; - - if (reg & 0x80000000) - adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; - - if (!reg) { - if (is_virtual_machine()) /* passthrough mode exclus sriov mod */ - adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; - } -} - static void nbio_v6_1_init_registers(struct amdgpu_device *adev) { uint32_t def, data; @@ -294,5 +277,4 @@ const struct amdgpu_nbio_funcs nbio_v6_1_funcs = { .get_clockgating_state = nbio_v6_1_get_clockgating_state, .ih_control = nbio_v6_1_ih_control, .init_registers = nbio_v6_1_init_registers, - .detect_hw_virt = nbio_v6_1_detect_hw_virt, }; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c index d6cbf26074bc..d34628e113fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c @@ -280,12 +280,6 @@ const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = { .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK, }; -static void nbio_v7_0_detect_hw_virt(struct amdgpu_device *adev) -{ - if (is_virtual_machine()) /* passthrough mode exclus sriov mod */ - adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; -} - static void nbio_v7_0_init_registers(struct amdgpu_device *adev) { @@ -310,6 +304,5 @@ const struct amdgpu_nbio_funcs nbio_v7_0_funcs = { .get_clockgating_state = nbio_v7_0_get_clockgating_state, .ih_control = nbio_v7_0_ih_control, .init_registers = nbio_v7_0_init_registers, - .detect_hw_virt = nbio_v7_0_detect_hw_virt, .remap_hdp_registers = nbio_v7_0_remap_hdp_registers, }; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index 149d386590df..41c53c149852 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -292,23 +292,6 @@ const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = { .ref_and_mask_sdma7 = GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK, }; -static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev) -{ - uint32_t reg; - - reg = RREG32_SOC15(NBIO, 0, mmRCC_IOV_FUNC_IDENTIFIER); - if (reg & 1) - adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; - - if (reg & 0x80000000) - adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; - - if (!reg) { - if (is_virtual_machine()) /* passthrough mode exclus sriov mod */ - adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; - } -} - static void nbio_v7_4_init_registers(struct amdgpu_device *adev) { @@ -561,7 +544,6 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = { .get_clockgating_state = nbio_v7_4_get_clockgating_state, .ih_control = nbio_v7_4_ih_control, .init_registers = nbio_v7_4_init_registers, - .detect_hw_virt = nbio_v7_4_detect_hw_virt, .remap_hdp_registers = nbio_v7_4_remap_hdp_registers, .handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring, .handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring, diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 033cbbca2072..a67d78d7eeeb 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -465,8 +465,6 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) adev->nbio.funcs = &nbio_v2_3_funcs; adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; - adev->nbio.funcs->detect_hw_virt(adev); - if (amdgpu_sriov_vf(adev)) adev->virt.ops = &xgpu_nv_virt_ops; diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index 4d415bfdb42f..153db3f763bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -1249,12 +1249,6 @@ static int si_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) return 0; } -static void si_detect_hw_virtualization(struct amdgpu_device *adev) -{ - if (is_virtual_machine()) /* passthrough mode */ - adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; -} - static void si_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { if (!ring || !ring->funcs->emit_wreg) { @@ -2165,8 +2159,6 @@ static const struct amdgpu_ip_block_version si_common_ip_block = int si_set_ip_blocks(struct amdgpu_device *adev) { - si_detect_hw_virtualization(adev); - switch (adev->asic_type) { case CHIP_VERDE: case CHIP_TAHITI: diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index a40499d51c93..a8c90d83a9ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -712,7 +712,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) adev->df.funcs = &df_v1_7_funcs; adev->rev_id = soc15_get_rev_id(adev); - adev->nbio.funcs->detect_hw_virt(adev); if (amdgpu_sriov_vf(adev)) adev->virt.ops = &xgpu_ai_virt_ops; diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 78b35901643b..0a90c296409b 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -448,27 +448,6 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev, return true; } -static void vi_detect_hw_virtualization(struct amdgpu_device *adev) -{ - uint32_t reg = 0; - - if (adev->asic_type == CHIP_TONGA || - adev->asic_type == CHIP_FIJI) { - reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); - /* bit0: 0 means pf and 1 means vf */ - if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER)) - adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; - /* bit31: 0 means disable IOV and 1 means enable */ - if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE)) - adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; - } - - if (reg == 0) { - if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */ - adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; - } -} - static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = { {mmGRBM_STATUS}, {mmGRBM_STATUS2}, @@ -1730,9 +1709,6 @@ static const struct amdgpu_ip_block_version vi_common_ip_block = int vi_set_ip_blocks(struct amdgpu_device *adev) { - /* in early init stage, vbios code won't work */ - vi_detect_hw_virtualization(adev); - if (amdgpu_sriov_vf(adev)) adev->virt.ops = &xgpu_vi_virt_ops; diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_1_offset.h index 68d0ffad28c7..92fd27c26a77 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_1_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_1_offset.h @@ -1162,8 +1162,10 @@ #define mmRCC_CONFIG_MEMSIZE_BASE_IDX 0 #define mmRCC_CONFIG_RESERVED 0x0de4 // duplicate #define mmRCC_CONFIG_RESERVED_BASE_IDX 0 +#ifndef mmRCC_IOV_FUNC_IDENTIFIER #define mmRCC_IOV_FUNC_IDENTIFIER 0x0de5 // duplicate #define mmRCC_IOV_FUNC_IDENTIFIER_BASE_IDX 0 +#endif // addressBlock: syshub_mmreg_ind_syshubdec diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_offset.h index 435462294fbc..a7cd760ebf8f 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_offset.h @@ -4251,8 +4251,10 @@ #define mmRCC_CONFIG_MEMSIZE_BASE_IDX 2 #define mmRCC_CONFIG_RESERVED 0x00c4 #define mmRCC_CONFIG_RESERVED_BASE_IDX 2 +#ifndef mmRCC_IOV_FUNC_IDENTIFIER #define mmRCC_IOV_FUNC_IDENTIFIER 0x00c5 #define mmRCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2 +#endif // addressBlock: nbio_nbif0_rcc_dev0_BIFDEC1 diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h index ce5830ebe095..0c5a08bc034a 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h @@ -2687,8 +2687,10 @@ #define mmRCC_CONFIG_MEMSIZE_BASE_IDX 2 #define mmRCC_CONFIG_RESERVED 0x00c4 #define mmRCC_CONFIG_RESERVED_BASE_IDX 2 +#ifndef mmRCC_IOV_FUNC_IDENTIFIER #define mmRCC_IOV_FUNC_IDENTIFIER 0x00c5 #define mmRCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2 +#endif // addressBlock: nbio_nbif0_rcc_dev0_BIFDEC1 -- cgit From aa53bc2edb66624ac05902910c41d8b4f685b8bc Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Wed, 4 Mar 2020 11:38:36 +0800 Subject: drm/amdgpu: introduce new request and its function 1) modify xgpu_nv_send_access_requests to support new idh request 2) introduce new function: req_gpu_init_data() which is used to notify host to prepare vbios/ip-discovery/pfvf exchange Signed-off-by: Monk Liu Reviewed-by: Emily Deng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 13 +++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 3 ++ drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c | 48 ++++++++++++++++++++++++++------ drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h | 2 +- 4 files changed, 57 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index d791bfe5ae68..4d06c79065bf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -153,6 +153,19 @@ int amdgpu_virt_reset_gpu(struct amdgpu_device *adev) return 0; } +void amdgpu_virt_request_init_data(struct amdgpu_device *adev) +{ + struct amdgpu_virt *virt = &adev->virt; + + if (virt->ops && virt->ops->req_init_data) + virt->ops->req_init_data(adev); + + if (adev->virt.req_init_data_ver > 0) + DRM_INFO("host supports REQ_INIT_DATA handshake\n"); + else + DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n"); +} + /** * amdgpu_virt_wait_reset() - wait for reset gpu completed * @amdgpu: amdgpu device. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 74f9843fce82..f6ae3c656304 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -59,6 +59,7 @@ struct amdgpu_vf_error_buffer { struct amdgpu_virt_ops { int (*req_full_gpu)(struct amdgpu_device *adev, bool init); int (*rel_full_gpu)(struct amdgpu_device *adev, bool init); + int (*req_init_data)(struct amdgpu_device *adev); int (*reset_gpu)(struct amdgpu_device *adev); int (*wait_reset)(struct amdgpu_device *adev); void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3); @@ -263,6 +264,7 @@ struct amdgpu_virt { struct amdgpu_virt_fw_reserve fw_reserve; uint32_t gim_feature; uint32_t reg_access_mode; + int req_init_data_ver; }; #define amdgpu_sriov_enabled(adev) \ @@ -303,6 +305,7 @@ void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init); int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init); int amdgpu_virt_reset_gpu(struct amdgpu_device *adev); +void amdgpu_virt_request_init_data(struct amdgpu_device *adev); int amdgpu_virt_wait_reset(struct amdgpu_device *adev); int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev); void amdgpu_virt_free_mm_table(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index d9ce12c558be..6b9e390d0493 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -109,7 +109,6 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event) timeout -= 10; } while (timeout > 1); - pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r); return -ETIME; } @@ -163,18 +162,45 @@ static int xgpu_nv_send_access_requests(struct amdgpu_device *adev, enum idh_request req) { int r; + enum idh_event event = -1; xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0); - /* start to check msg if request is idh_req_gpu_init_access */ - if (req == IDH_REQ_GPU_INIT_ACCESS || - req == IDH_REQ_GPU_FINI_ACCESS || - req == IDH_REQ_GPU_RESET_ACCESS) { - r = xgpu_nv_poll_msg(adev, IDH_READY_TO_ACCESS_GPU); + switch (req) { + case IDH_REQ_GPU_INIT_ACCESS: + case IDH_REQ_GPU_FINI_ACCESS: + case IDH_REQ_GPU_RESET_ACCESS: + event = IDH_READY_TO_ACCESS_GPU; + break; + case IDH_REQ_GPU_INIT_DATA: + event = IDH_REQ_GPU_INIT_DATA_READY; + break; + default: + break; + } + + if (event != -1) { + r = xgpu_nv_poll_msg(adev, event); if (r) { - pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n"); - return r; + if (req != IDH_REQ_GPU_INIT_DATA) { + pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r); + return r; + } + else /* host doesn't support REQ_GPU_INIT_DATA handshake */ + adev->virt.req_init_data_ver = 0; + } else { + if (req == IDH_REQ_GPU_INIT_DATA) + { + adev->virt.req_init_data_ver = + RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, + mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW1)); + + /* assume V1 in case host doesn't set version number */ + if (adev->virt.req_init_data_ver < 1) + adev->virt.req_init_data_ver = 1; + } } + /* Retrieve checksum from mailbox2 */ if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) { adev->virt.fw_reserve.checksum_key = @@ -212,6 +238,11 @@ static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev, return r; } +static int xgpu_nv_request_init_data(struct amdgpu_device *adev) +{ + return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA); +} + static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -377,6 +408,7 @@ void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev) const struct amdgpu_virt_ops xgpu_nv_virt_ops = { .req_full_gpu = xgpu_nv_request_full_gpu_access, .rel_full_gpu = xgpu_nv_release_full_gpu_access, + .req_init_data = xgpu_nv_request_init_data, .reset_gpu = xgpu_nv_request_reset, .wait_reset = NULL, .trans_msg = xgpu_nv_mailbox_trans_msg, diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h index 598ed2c57155..b9eed0f83b62 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h @@ -25,7 +25,7 @@ #define __MXGPU_NV_H__ #define NV_MAILBOX_POLL_ACK_TIMEDOUT 500 -#define NV_MAILBOX_POLL_MSG_TIMEDOUT 12000 +#define NV_MAILBOX_POLL_MSG_TIMEDOUT 6000 #define NV_MAILBOX_POLL_FLR_TIMEDOUT 500 enum idh_request { -- cgit From 95a2f917387a23c8b09f4ab95e1560a69db5b1f1 Mon Sep 17 00:00:00 2001 From: Yintian Tao Date: Tue, 7 Apr 2020 18:08:39 +0800 Subject: drm/amdgpu: restrict debugfs register access under SR-IOV Under bare metal, there is no more else to take care of the GPU register access through MMIO. Under Virtualization, to access GPU register is implemented through KIQ during run-time due to world-switch. Therefore, under SR-IOV user can only access debugfs to r/w GPU registers when meets all three conditions below. - amdgpu_gpu_recovery=0 - TDR happened - in_gpu_reset=0 v2: merge amdgpu_virt_can_access_debugfs() into amdgpu_virt_enable_access_debugfs() v3: drop ret variable in amdgpu_virt_enable_access_debugfs() and directly return result Signed-off-by: Yintian Tao Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 73 +++++++++++++++++++++++++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 8 +++- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 24 ++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 7 +++ 4 files changed, 106 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index c0f9a651dc06..1a4894fa3693 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -152,11 +152,16 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, if (r < 0) return r; + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) + return r; + if (use_bank) { if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) { pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_put_autosuspend(adev->ddev->dev); + amdgpu_virt_disable_access_debugfs(adev); return -EINVAL; } mutex_lock(&adev->grbm_idx_mutex); @@ -207,6 +212,7 @@ end: pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_put_autosuspend(adev->ddev->dev); + amdgpu_virt_disable_access_debugfs(adev); return result; } @@ -255,6 +261,10 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, if (r < 0) return r; + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) + return r; + while (size) { uint32_t value; @@ -263,6 +273,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, if (r) { pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_put_autosuspend(adev->ddev->dev); + amdgpu_virt_disable_access_debugfs(adev); return r; } @@ -275,6 +286,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_put_autosuspend(adev->ddev->dev); + amdgpu_virt_disable_access_debugfs(adev); return result; } @@ -304,6 +316,10 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user if (r < 0) return r; + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) + return r; + while (size) { uint32_t value; @@ -311,6 +327,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user if (r) { pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_put_autosuspend(adev->ddev->dev); + amdgpu_virt_disable_access_debugfs(adev); return r; } @@ -325,6 +342,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_put_autosuspend(adev->ddev->dev); + amdgpu_virt_disable_access_debugfs(adev); return result; } @@ -354,6 +372,10 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, if (r < 0) return r; + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) + return r; + while (size) { uint32_t value; @@ -362,6 +384,7 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, if (r) { pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_put_autosuspend(adev->ddev->dev); + amdgpu_virt_disable_access_debugfs(adev); return r; } @@ -374,6 +397,7 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_put_autosuspend(adev->ddev->dev); + amdgpu_virt_disable_access_debugfs(adev); return result; } @@ -403,6 +427,10 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user if (r < 0) return r; + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) + return r; + while (size) { uint32_t value; @@ -410,6 +438,7 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user if (r) { pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_put_autosuspend(adev->ddev->dev); + amdgpu_virt_disable_access_debugfs(adev); return r; } @@ -424,6 +453,7 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_put_autosuspend(adev->ddev->dev); + amdgpu_virt_disable_access_debugfs(adev); return result; } @@ -453,6 +483,10 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, if (r < 0) return r; + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) + return r; + while (size) { uint32_t value; @@ -461,6 +495,7 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, if (r) { pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_put_autosuspend(adev->ddev->dev); + amdgpu_virt_disable_access_debugfs(adev); return r; } @@ -473,6 +508,7 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_put_autosuspend(adev->ddev->dev); + amdgpu_virt_disable_access_debugfs(adev); return result; } @@ -502,6 +538,10 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user * if (r < 0) return r; + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) + return r; + while (size) { uint32_t value; @@ -509,6 +549,7 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user * if (r) { pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_put_autosuspend(adev->ddev->dev); + amdgpu_virt_disable_access_debugfs(adev); return r; } @@ -523,6 +564,7 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user * pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_put_autosuspend(adev->ddev->dev); + amdgpu_virt_disable_access_debugfs(adev); return result; } @@ -651,16 +693,24 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, if (r < 0) return r; + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) + return r; + r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize); pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_put_autosuspend(adev->ddev->dev); - if (r) + if (r) { + amdgpu_virt_disable_access_debugfs(adev); return r; + } - if (size > valuesize) + if (size > valuesize) { + amdgpu_virt_disable_access_debugfs(adev); return -EINVAL; + } outsize = 0; x = 0; @@ -673,6 +723,7 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, } } + amdgpu_virt_disable_access_debugfs(adev); return !r ? outsize : r; } @@ -720,6 +771,10 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, if (r < 0) return r; + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) + return r; + /* switch to the specific se/sh/cu */ mutex_lock(&adev->grbm_idx_mutex); amdgpu_gfx_select_se_sh(adev, se, sh, cu); @@ -734,16 +789,20 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_put_autosuspend(adev->ddev->dev); - if (!x) + if (!x) { + amdgpu_virt_disable_access_debugfs(adev); return -EINVAL; + } while (size && (offset < x * 4)) { uint32_t value; value = data[offset >> 2]; r = put_user(value, (uint32_t *)buf); - if (r) + if (r) { + amdgpu_virt_disable_access_debugfs(adev); return r; + } result += 4; buf += 4; @@ -751,6 +810,7 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, size -= 4; } + amdgpu_virt_disable_access_debugfs(adev); return result; } @@ -805,6 +865,10 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, if (r < 0) return r; + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) + return r; + /* switch to the specific se/sh/cu */ mutex_lock(&adev->grbm_idx_mutex); amdgpu_gfx_select_se_sh(adev, se, sh, cu); @@ -840,6 +904,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, err: kfree(data); + amdgpu_virt_disable_access_debugfs(adev); return result; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 2b99f5952375..35c381ec0423 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -33,6 +33,7 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job) struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); struct amdgpu_job *job = to_amdgpu_job(s_job); struct amdgpu_task_info ti; + struct amdgpu_device *adev = ring->adev; memset(&ti, 0, sizeof(struct amdgpu_task_info)); @@ -49,10 +50,13 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job) DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n", ti.process_name, ti.tgid, ti.task_name, ti.pid); - if (amdgpu_device_should_recover_gpu(ring->adev)) + if (amdgpu_device_should_recover_gpu(ring->adev)) { amdgpu_device_gpu_recover(ring->adev, job); - else + } else { drm_sched_suspend_timeout(&ring->sched); + if (amdgpu_sriov_vf(adev)) + adev->virt.tdr_debug = true; + } } int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 4d06c79065bf..6a81c2ee7c7b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -334,3 +334,27 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev) adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; } } + +bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev) +{ + return amdgpu_sriov_is_debug(adev) ? true : false; +} + +int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev) +{ + if (!amdgpu_sriov_vf(adev)) + return 0; + + if (amdgpu_virt_can_access_debugfs(adev)) + adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; + else + return -EPERM; + + return 0; +} + +void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev) +{ + if (amdgpu_sriov_vf(adev)) + adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index f6ae3c656304..8f20e6dbd7a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -265,6 +265,7 @@ struct amdgpu_virt { uint32_t gim_feature; uint32_t reg_access_mode; int req_init_data_ver; + bool tdr_debug; }; #define amdgpu_sriov_enabled(adev) \ @@ -296,6 +297,8 @@ static inline bool is_virtual_machine(void) #define amdgpu_sriov_is_pp_one_vf(adev) \ ((adev)->virt.gim_feature & AMDGIM_FEATURE_PP_ONE_VF) +#define amdgpu_sriov_is_debug(adev) \ + ((!adev->in_gpu_reset) && adev->virt.tdr_debug) bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); void amdgpu_virt_init_setting(struct amdgpu_device *adev); @@ -314,4 +317,8 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size, unsigned int chksum); void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev); void amdgpu_detect_virtualization(struct amdgpu_device *adev); + +bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev); +int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev); +void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev); #endif -- cgit From d32709dac627e5602fb3e66bcf4316906356120d Mon Sep 17 00:00:00 2001 From: Yintian Tao Date: Mon, 13 Apr 2020 14:31:27 +0800 Subject: drm/amdgpu: resume kiq access debugfs If there is no GPU hang, user still can access debugfs through kiq. Signed-off-by: Yintian Tao Reviewed-by: Monk Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 12 +++++++++--- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 2 ++ 2 files changed, 11 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 6a81c2ee7c7b..8c10084f44ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -335,17 +335,23 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev) } } -bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev) +bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev) { return amdgpu_sriov_is_debug(adev) ? true : false; } +bool amdgpu_virt_access_debugfs_is_kiq(struct amdgpu_device *adev) +{ + return amdgpu_sriov_is_normal(adev) ? true : false; +} + int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev) { - if (!amdgpu_sriov_vf(adev)) + if (!amdgpu_sriov_vf(adev) || + amdgpu_virt_access_debugfs_is_kiq(adev)) return 0; - if (amdgpu_virt_can_access_debugfs(adev)) + if (amdgpu_virt_access_debugfs_is_mmio(adev)) adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; else return -EPERM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 8f20e6dbd7a9..de27308802c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -299,6 +299,8 @@ static inline bool is_virtual_machine(void) ((adev)->virt.gim_feature & AMDGIM_FEATURE_PP_ONE_VF) #define amdgpu_sriov_is_debug(adev) \ ((!adev->in_gpu_reset) && adev->virt.tdr_debug) +#define amdgpu_sriov_is_normal(adev) \ + ((!adev->in_gpu_reset) && (!adev->virt.tdr_debug)) bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); void amdgpu_virt_init_setting(struct amdgpu_device *adev); -- cgit