diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 633 |
1 files changed, 401 insertions, 232 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 4a8fc15467cf..3573ecdb06ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -31,6 +31,7 @@ #include <linux/debugfs.h> #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_atomic_helper.h> #include <drm/amdgpu_drm.h> #include <linux/vgaarb.h> #include <linux/vga_switcheroo.h> @@ -53,6 +54,10 @@ #include "bif/bif_4_1_d.h" #include <linux/pci.h> #include <linux/firmware.h> +#include "amdgpu_vf_error.h" + +#include "amdgpu_amdkfd.h" +#include "amdgpu_pm.h" MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); @@ -62,6 +67,7 @@ MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev); static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev); static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev); +static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev); static const char *amdgpu_asic_name[] = { "TAHITI", @@ -104,10 +110,8 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, { uint32_t ret; - if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) { - BUG_ON(in_interrupt()); + if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) return amdgpu_virt_kiq_rreg(adev, reg); - } if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); @@ -128,11 +132,13 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, { trace_amdgpu_mm_wreg(adev->pdev->device, reg, v); - if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) { - BUG_ON(in_interrupt()); - return amdgpu_virt_kiq_wreg(adev, reg, v); + if (adev->asic_type >= CHIP_VEGA10 && reg == 0) { + adev->last_mm_index = v; } + if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) + return amdgpu_virt_kiq_wreg(adev, reg, v); + if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); else { @@ -143,6 +149,10 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); } + + if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) { + udelay(500); + } } u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg) @@ -157,6 +167,9 @@ u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg) void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v) { + if (adev->asic_type >= CHIP_VEGA10 && reg == 0) { + adev->last_mm_index = v; + } if ((reg * 4) < adev->rio_mem_size) iowrite32(v, adev->rio_mem + (reg * 4)); @@ -164,6 +177,10 @@ void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v) iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4)); iowrite32(v, adev->rio_mem + (mmMM_DATA * 4)); } + + if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) { + udelay(500); + } } /** @@ -318,51 +335,16 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev, static int amdgpu_vram_scratch_init(struct amdgpu_device *adev) { - int r; - - if (adev->vram_scratch.robj == NULL) { - r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE, - PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, &adev->vram_scratch.robj); - if (r) { - return r; - } - } - - r = amdgpu_bo_reserve(adev->vram_scratch.robj, false); - if (unlikely(r != 0)) - return r; - r = amdgpu_bo_pin(adev->vram_scratch.robj, - AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr); - if (r) { - amdgpu_bo_unreserve(adev->vram_scratch.robj); - return r; - } - r = amdgpu_bo_kmap(adev->vram_scratch.robj, - (void **)&adev->vram_scratch.ptr); - if (r) - amdgpu_bo_unpin(adev->vram_scratch.robj); - amdgpu_bo_unreserve(adev->vram_scratch.robj); - - return r; + return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, + &adev->vram_scratch.robj, + &adev->vram_scratch.gpu_addr, + (void **)&adev->vram_scratch.ptr); } static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev) { - int r; - - if (adev->vram_scratch.robj == NULL) { - return; - } - r = amdgpu_bo_reserve(adev->vram_scratch.robj, true); - if (likely(r == 0)) { - amdgpu_bo_kunmap(adev->vram_scratch.robj); - amdgpu_bo_unpin(adev->vram_scratch.robj); - amdgpu_bo_unreserve(adev->vram_scratch.robj); - } - amdgpu_bo_unref(&adev->vram_scratch.robj); + amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL); } /** @@ -419,6 +401,15 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev) */ static int amdgpu_doorbell_init(struct amdgpu_device *adev) { + /* No doorbell on SI hardware generation */ + if (adev->asic_type < CHIP_BONAIRE) { + adev->doorbell.base = 0; + adev->doorbell.size = 0; + adev->doorbell.num_doorbells = 0; + adev->doorbell.ptr = NULL; + return 0; + } + /* doorbell bar mapping */ adev->doorbell.base = pci_resource_start(adev->pdev, 2); adev->doorbell.size = pci_resource_len(adev->pdev, 2); @@ -521,7 +512,8 @@ static int amdgpu_wb_init(struct amdgpu_device *adev) int r; if (adev->wb.wb_obj == NULL) { - r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t), + /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */ + r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, &adev->wb.wb_obj, &adev->wb.gpu_addr, (void **)&adev->wb.wb); @@ -552,32 +544,10 @@ static int amdgpu_wb_init(struct amdgpu_device *adev) int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb) { unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb); - if (offset < adev->wb.num_wb) { - __set_bit(offset, adev->wb.used); - *wb = offset; - return 0; - } else { - return -EINVAL; - } -} -/** - * amdgpu_wb_get_64bit - Allocate a wb entry - * - * @adev: amdgpu_device pointer - * @wb: wb index - * - * Allocate a wb slot for use by the driver (all asics). - * Returns 0 on success or -EINVAL on failure. - */ -int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb) -{ - unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used, - adev->wb.num_wb, 0, 2, 7, 0); - if ((offset + 1) < adev->wb.num_wb) { + if (offset < adev->wb.num_wb) { __set_bit(offset, adev->wb.used); - __set_bit(offset + 1, adev->wb.used); - *wb = offset; + *wb = offset << 3; /* convert to dw offset */ return 0; } else { return -EINVAL; @@ -595,23 +565,7 @@ int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb) void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb) { if (wb < adev->wb.num_wb) - __clear_bit(wb, adev->wb.used); -} - -/** - * amdgpu_wb_free_64bit - Free a wb entry - * - * @adev: amdgpu_device pointer - * @wb: wb index - * - * Free a wb slot allocated for use by the driver (all asics) - */ -void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb) -{ - if ((wb + 1) < adev->wb.num_wb) { - __clear_bit(wb, adev->wb.used); - __clear_bit(wb + 1, adev->wb.used); - } + __clear_bit(wb >> 3, adev->wb.used); } /** @@ -665,7 +619,7 @@ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 } /** - * amdgpu_gtt_location - try to find GTT location + * amdgpu_gart_location - try to find GTT location * @adev: amdgpu device structure holding all necessary informations * @mc: memory controller structure holding memory informations * @@ -676,30 +630,105 @@ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 * * FIXME: when reducing GTT size align new size on power of 2. */ -void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc) +void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc) { u64 size_af, size_bf; - size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align; - size_bf = mc->vram_start & ~mc->gtt_base_align; + size_af = adev->mc.mc_mask - mc->vram_end; + size_bf = mc->vram_start; if (size_bf > size_af) { - if (mc->gtt_size > size_bf) { + if (mc->gart_size > size_bf) { dev_warn(adev->dev, "limiting GTT\n"); - mc->gtt_size = size_bf; + mc->gart_size = size_bf; } - mc->gtt_start = 0; + mc->gart_start = 0; } else { - if (mc->gtt_size > size_af) { + if (mc->gart_size > size_af) { dev_warn(adev->dev, "limiting GTT\n"); - mc->gtt_size = size_af; + mc->gart_size = size_af; } - mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; + mc->gart_start = mc->vram_end + 1; } - mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; + mc->gart_end = mc->gart_start + mc->gart_size - 1; dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n", - mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end); + mc->gart_size >> 20, mc->gart_start, mc->gart_end); +} + +/* + * Firmware Reservation functions + */ +/** + * amdgpu_fw_reserve_vram_fini - free fw reserved vram + * + * @adev: amdgpu_device pointer + * + * free fw reserved vram if it has been reserved. + */ +void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev) +{ + amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo, + NULL, &adev->fw_vram_usage.va); +} + +/** + * amdgpu_fw_reserve_vram_init - create bo vram reservation from fw + * + * @adev: amdgpu_device pointer + * + * create bo vram reservation from fw. + */ +int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev) +{ + int r = 0; + u64 gpu_addr; + u64 vram_size = adev->mc.visible_vram_size; + + adev->fw_vram_usage.va = NULL; + adev->fw_vram_usage.reserved_bo = NULL; + + if (adev->fw_vram_usage.size > 0 && + adev->fw_vram_usage.size <= vram_size) { + + r = amdgpu_bo_create(adev, adev->fw_vram_usage.size, + PAGE_SIZE, true, 0, + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | + AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, 0, + &adev->fw_vram_usage.reserved_bo); + if (r) + goto error_create; + + r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false); + if (r) + goto error_reserve; + r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo, + AMDGPU_GEM_DOMAIN_VRAM, + adev->fw_vram_usage.start_offset, + (adev->fw_vram_usage.start_offset + + adev->fw_vram_usage.size), &gpu_addr); + if (r) + goto error_pin; + r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo, + &adev->fw_vram_usage.va); + if (r) + goto error_kmap; + + amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo); + } + return r; + +error_kmap: + amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo); +error_pin: + amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo); +error_reserve: + amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo); +error_create: + adev->fw_vram_usage.va = NULL; + adev->fw_vram_usage.reserved_bo = NULL; + return r; } + /* * GPU helpers function. */ @@ -716,22 +745,6 @@ bool amdgpu_need_post(struct amdgpu_device *adev) { uint32_t reg; - if (adev->has_hw_reset) { - adev->has_hw_reset = false; - return true; - } - /* then check MEM_SIZE, in case the crtcs are off */ - reg = amdgpu_asic_get_config_memsize(adev); - - if ((reg != 0) && (reg != 0xffffffff)) - return false; - - return true; - -} - -static bool amdgpu_vpost_needed(struct amdgpu_device *adev) -{ if (amdgpu_sriov_vf(adev)) return false; @@ -754,7 +767,23 @@ static bool amdgpu_vpost_needed(struct amdgpu_device *adev) return true; } } - return amdgpu_need_post(adev); + + if (adev->has_hw_reset) { + adev->has_hw_reset = false; + return true; + } + + /* bios scratch used on CIK+ */ + if (adev->asic_type >= CHIP_BONAIRE) + return amdgpu_atombios_scratch_need_asic_init(adev); + + /* check MEM_SIZE for older asics */ + reg = amdgpu_asic_get_config_memsize(adev); + + if ((reg != 0) && (reg != 0xffffffff)) + return false; + + return true; } /** @@ -936,6 +965,20 @@ static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) return r; } +static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + struct atom_context *ctx = adev->mode_info.atom_context; + + return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version); +} + +static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version, + NULL); + /** * amdgpu_atombios_fini - free the driver info and callbacks for atombios * @@ -955,6 +998,7 @@ static void amdgpu_atombios_fini(struct amdgpu_device *adev) adev->mode_info.atom_context = NULL; kfree(adev->mode_info.atom_card_info); adev->mode_info.atom_card_info = NULL; + device_remove_file(adev->dev, &dev_attr_vbios_version); } /** @@ -971,6 +1015,7 @@ static int amdgpu_atombios_init(struct amdgpu_device *adev) { struct card_info *atom_card_info = kzalloc(sizeof(struct card_info), GFP_KERNEL); + int ret; if (!atom_card_info) return -ENOMEM; @@ -1007,6 +1052,13 @@ static int amdgpu_atombios_init(struct amdgpu_device *adev) amdgpu_atombios_scratch_regs_init(adev); amdgpu_atombios_allocate_fb_scratch(adev); } + + ret = device_create_file(adev->dev, &dev_attr_vbios_version); + if (ret) { + DRM_ERROR("Failed to create device file for VBIOS version\n"); + return ret; + } + return 0; } @@ -1031,19 +1083,6 @@ static unsigned int amdgpu_vga_set_decode(void *cookie, bool state) return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; } -/** - * amdgpu_check_pot_argument - check that argument is a power of two - * - * @arg: value to check - * - * Validates that a certain argument is a power of two (all asics). - * Returns true if argument is valid. - */ -static bool amdgpu_check_pot_argument(int arg) -{ - return (arg & (arg - 1)) == 0; -} - static void amdgpu_check_block_size(struct amdgpu_device *adev) { /* defines number of bits in page table versus page directory, @@ -1077,7 +1116,7 @@ static void amdgpu_check_vm_size(struct amdgpu_device *adev) if (amdgpu_vm_size == -1) return; - if (!amdgpu_check_pot_argument(amdgpu_vm_size)) { + if (!is_power_of_2(amdgpu_vm_size)) { dev_warn(adev->dev, "VM size (%d) must be a power of 2\n", amdgpu_vm_size); goto def_value; @@ -1118,19 +1157,31 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev) dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n", amdgpu_sched_jobs); amdgpu_sched_jobs = 4; - } else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs)){ + } else if (!is_power_of_2(amdgpu_sched_jobs)){ dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n", amdgpu_sched_jobs); amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs); } - if (amdgpu_gart_size != -1) { + if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) { + /* gart size must be greater or equal to 32M */ + dev_warn(adev->dev, "gart size (%d) too small\n", + amdgpu_gart_size); + amdgpu_gart_size = -1; + } + + if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) { /* gtt size must be greater or equal to 32M */ - if (amdgpu_gart_size < 32) { - dev_warn(adev->dev, "gart size (%d) too small\n", - amdgpu_gart_size); - amdgpu_gart_size = -1; - } + dev_warn(adev->dev, "gtt size (%d) too small\n", + amdgpu_gtt_size); + amdgpu_gtt_size = -1; + } + + /* valid range is between 4 and 9 inclusive */ + if (amdgpu_vm_fragment_size != -1 && + (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) { + dev_warn(adev->dev, "valid range is between 4 and 9\n"); + amdgpu_vm_fragment_size = -1; } amdgpu_check_vm_size(adev); @@ -1138,7 +1189,7 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev) amdgpu_check_block_size(adev); if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 || - !amdgpu_check_pot_argument(amdgpu_vram_page_split))) { + !is_power_of_2(amdgpu_vram_page_split))) { dev_warn(adev->dev, "invalid VRAM page split (%d)\n", amdgpu_vram_page_split); amdgpu_vram_page_split = 1024; @@ -1807,10 +1858,8 @@ static int amdgpu_fini(struct amdgpu_device *adev) adev->ip_blocks[i].status.late_initialized = false; } - if (amdgpu_sriov_vf(adev)) { - amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL); + if (amdgpu_sriov_vf(adev)) amdgpu_virt_release_full_gpu(adev, false); - } return 0; } @@ -1898,10 +1947,12 @@ static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev) static enum amd_ip_block_type ip_order[] = { AMD_IP_BLOCK_TYPE_SMC, + AMD_IP_BLOCK_TYPE_PSP, AMD_IP_BLOCK_TYPE_DCE, AMD_IP_BLOCK_TYPE_GFX, AMD_IP_BLOCK_TYPE_SDMA, - AMD_IP_BLOCK_TYPE_VCE, + AMD_IP_BLOCK_TYPE_UVD, + AMD_IP_BLOCK_TYPE_VCE }; for (i = 0; i < ARRAY_SIZE(ip_order); i++) { @@ -1982,16 +2033,67 @@ static int amdgpu_resume(struct amdgpu_device *adev) static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev) { - if (adev->is_atom_fw) { - if (amdgpu_atomfirmware_gpu_supports_virtualization(adev)) - adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; - } else { - if (amdgpu_atombios_has_gpu_virtualization_table(adev)) - adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; + if (amdgpu_sriov_vf(adev)) { + if (adev->is_atom_fw) { + if (amdgpu_atomfirmware_gpu_supports_virtualization(adev)) + adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; + } else { + if (amdgpu_atombios_has_gpu_virtualization_table(adev)) + adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; + } + + if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS)) + amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0); + } +} + +bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) +{ + switch (asic_type) { +#if defined(CONFIG_DRM_AMD_DC) + case CHIP_BONAIRE: + case CHIP_HAWAII: + case CHIP_KAVERI: + case CHIP_CARRIZO: + case CHIP_STONEY: + case CHIP_POLARIS11: + case CHIP_POLARIS10: + case CHIP_POLARIS12: + case CHIP_TONGA: + case CHIP_FIJI: +#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA) + return amdgpu_dc != 0; +#endif + case CHIP_KABINI: + case CHIP_MULLINS: + return amdgpu_dc > 0; + case CHIP_VEGA10: +#if defined(CONFIG_DRM_AMD_DC_DCN1_0) + case CHIP_RAVEN: +#endif + return amdgpu_dc != 0; +#endif + default: + return false; } } /** + * amdgpu_device_has_dc_support - check if dc is supported + * + * @adev: amdgpu_device_pointer + * + * Returns true for supported, false for not supported + */ +bool amdgpu_device_has_dc_support(struct amdgpu_device *adev) +{ + if (amdgpu_sriov_vf(adev)) + return false; + + return amdgpu_device_asic_has_dc_support(adev->asic_type); +} + +/** * amdgpu_device_init - initialize the driver * * @adev: amdgpu_device pointer @@ -2019,7 +2121,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->flags = flags; adev->asic_type = flags & AMD_ASIC_MASK; adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; - adev->mc.gtt_size = 512 * 1024 * 1024; + adev->mc.gart_size = 512 * 1024 * 1024; adev->accel_working = false; adev->num_rings = 0; adev->mman.buffer_funcs = NULL; @@ -2028,6 +2130,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->vm_manager.vm_pte_num_rings = 0; adev->gart.gart_funcs = NULL; adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); + bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); adev->smc_rreg = &amdgpu_invalid_rreg; adev->smc_wreg = &amdgpu_invalid_wreg; @@ -2044,7 +2147,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg; adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg; - DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); @@ -2056,8 +2158,10 @@ int amdgpu_device_init(struct amdgpu_device *adev, mutex_init(&adev->pm.mutex); mutex_init(&adev->gfx.gpu_clock_mutex); mutex_init(&adev->srbm_mutex); + mutex_init(&adev->gfx.pipe_reserve_mutex); mutex_init(&adev->grbm_idx_mutex); mutex_init(&adev->mn_lock); + mutex_init(&adev->virt.vf_errors.lock); hash_init(adev->mn_hash); amdgpu_check_arguments(adev); @@ -2068,6 +2172,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, spin_lock_init(&adev->uvd_ctx_idx_lock); spin_lock_init(&adev->didt_idx_lock); spin_lock_init(&adev->gc_cac_idx_lock); + spin_lock_init(&adev->se_cac_idx_lock); spin_lock_init(&adev->audio_endpt_idx_lock); spin_lock_init(&adev->mm_stats.lock); @@ -2099,9 +2204,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size); - if (adev->asic_type >= CHIP_BONAIRE) - /* doorbell bar mapping */ - amdgpu_doorbell_init(adev); + /* doorbell bar mapping */ + amdgpu_doorbell_init(adev); /* io port mapping */ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { @@ -2143,6 +2247,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_atombios_init(adev); if (r) { dev_err(adev->dev, "amdgpu_atombios_init failed\n"); + amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0); goto failed; } @@ -2150,7 +2255,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, amdgpu_device_detect_sriov_bios(adev); /* Post card if necessary */ - if (amdgpu_vpost_needed(adev)) { + if (amdgpu_need_post(adev)) { if (!adev->bios) { dev_err(adev->dev, "no vBIOS found\n"); r = -EINVAL; @@ -2166,21 +2271,32 @@ int amdgpu_device_init(struct amdgpu_device *adev, DRM_INFO("GPU post is not needed\n"); } - if (!adev->is_atom_fw) { + if (adev->is_atom_fw) { + /* Initialize clocks */ + r = amdgpu_atomfirmware_get_clock_info(adev); + if (r) { + dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n"); + amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); + goto failed; + } + } else { /* Initialize clocks */ r = amdgpu_atombios_get_clock_info(adev); if (r) { dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n"); - return r; + amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); + goto failed; } /* init i2c buses */ - amdgpu_atombios_i2c_init(adev); + if (!amdgpu_device_has_dc_support(adev)) + amdgpu_atombios_i2c_init(adev); } /* Fence driver */ r = amdgpu_fence_driver_init(adev); if (r) { dev_err(adev->dev, "amdgpu_fence_driver_init failed\n"); + amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0); goto failed; } @@ -2190,6 +2306,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_init(adev); if (r) { dev_err(adev->dev, "amdgpu_init failed\n"); + amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0); amdgpu_fini(adev); goto failed; } @@ -2209,6 +2326,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_ib_pool_init(adev); if (r) { dev_err(adev->dev, "IB initialization failed (%d).\n", r); + amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r); goto failed; } @@ -2216,8 +2334,15 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (r) DRM_ERROR("ib ring test failed (%d).\n", r); + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_init_data_exchange(adev); + amdgpu_fbdev_init(adev); + r = amdgpu_pm_sysfs_init(adev); + if (r) + DRM_ERROR("registering pm debugfs failed (%d).\n", r); + r = amdgpu_gem_debugfs_init(adev); if (r) DRM_ERROR("registering gem debugfs failed (%d).\n", r); @@ -2234,6 +2359,10 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (r) DRM_ERROR("registering firmware debugfs failed (%d).\n", r); + r = amdgpu_debugfs_vbios_dump_init(adev); + if (r) + DRM_ERROR("Creating vbios dump debugfs failed (%d).\n", r); + if ((amdgpu_testing & 1)) { if (adev->accel_working) amdgpu_test_moves(adev); @@ -2253,12 +2382,14 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_late_init(adev); if (r) { dev_err(adev->dev, "amdgpu_late_init failed\n"); + amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r); goto failed; } return 0; failed: + amdgpu_vf_error_trans_all(adev); if (runtime) vga_switcheroo_fini_domain_pm_ops(adev->dev); return r; @@ -2283,6 +2414,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev) /* evict vram memory */ amdgpu_bo_evict_vram(adev); amdgpu_ib_pool_fini(adev); + amdgpu_fw_reserve_vram_fini(adev); amdgpu_fence_driver_fini(adev); amdgpu_fbdev_fini(adev); r = amdgpu_fini(adev); @@ -2293,7 +2425,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev) adev->accel_working = false; cancel_delayed_work_sync(&adev->late_init_work); /* free i2c buses */ - amdgpu_i2c_fini(adev); + if (!amdgpu_device_has_dc_support(adev)) + amdgpu_i2c_fini(adev); amdgpu_atombios_fini(adev); kfree(adev->bios); adev->bios = NULL; @@ -2307,8 +2440,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev) adev->rio_mem = NULL; iounmap(adev->rmmio); adev->rmmio = NULL; - if (adev->asic_type >= CHIP_BONAIRE) - amdgpu_doorbell_fini(adev); + amdgpu_doorbell_fini(adev); + amdgpu_pm_sysfs_fini(adev); amdgpu_debugfs_regs_cleanup(adev); } @@ -2344,12 +2477,16 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) drm_kms_helper_poll_disable(dev); - /* turn off display hw */ - drm_modeset_lock_all(dev); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); + if (!amdgpu_device_has_dc_support(adev)) { + /* turn off display hw */ + drm_modeset_lock_all(dev); + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); + } + drm_modeset_unlock_all(dev); } - drm_modeset_unlock_all(dev); + + amdgpu_amdkfd_suspend(adev); /* unpin the front buffers and cursors */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { @@ -2392,10 +2529,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) */ amdgpu_bo_evict_vram(adev); - if (adev->is_atom_fw) - amdgpu_atomfirmware_scratch_regs_save(adev); - else - amdgpu_atombios_scratch_regs_save(adev); + amdgpu_atombios_scratch_regs_save(adev); pci_save_state(dev->pdev); if (suspend) { /* Shut down the device */ @@ -2444,10 +2578,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) if (r) goto unlock; } - if (adev->is_atom_fw) - amdgpu_atomfirmware_scratch_regs_restore(adev); - else - amdgpu_atombios_scratch_regs_restore(adev); + amdgpu_atombios_scratch_regs_restore(adev); /* post card */ if (amdgpu_need_post(adev)) { @@ -2490,16 +2621,31 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) } } } + r = amdgpu_amdkfd_resume(adev); + if (r) + return r; /* blat the mode back in */ if (fbcon) { - drm_helper_resume_force_mode(dev); - /* turn on display hw */ - drm_modeset_lock_all(dev); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); + if (!amdgpu_device_has_dc_support(adev)) { + /* pre DCE11 */ + drm_helper_resume_force_mode(dev); + + /* turn on display hw */ + drm_modeset_lock_all(dev); + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); + } + drm_modeset_unlock_all(dev); + } else { + /* + * There is no equivalent atomic helper to turn on + * display, so we defined our own function for this, + * once suspend resume is supported by the atomic + * framework this will be reworked + */ + amdgpu_dm_display_resume(adev); } - drm_modeset_unlock_all(dev); } drm_kms_helper_poll_enable(dev); @@ -2516,7 +2662,10 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) #ifdef CONFIG_PM dev->dev->power.disable_depth++; #endif - drm_helper_hpd_irq_event(dev); + if (!amdgpu_device_has_dc_support(adev)) + drm_helper_hpd_irq_event(dev); + else + drm_kms_helper_hotplug_event(dev); #ifdef CONFIG_PM dev->dev->power.disable_depth--; #endif @@ -2536,6 +2685,9 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev) int i; bool asic_hang = false; + if (amdgpu_sriov_vf(adev)) + return true; + for (i = 0; i < adev->num_ip_blocks; i++) { if (!adev->ip_blocks[i].status.valid) continue; @@ -2578,7 +2730,8 @@ static bool amdgpu_need_full_reset(struct amdgpu_device *adev) if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) || (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) || (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) || - (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) { + (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) || + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { if (adev->ip_blocks[i].status.hang) { DRM_INFO("Some block need full reset!\n"); return true; @@ -2654,12 +2807,6 @@ static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev, goto err; } - r = amdgpu_ttm_bind(&bo->shadow->tbo, &bo->shadow->tbo.mem); - if (r) { - DRM_ERROR("%p bind failed\n", bo->shadow); - goto err; - } - r = amdgpu_bo_restore_from_shadow(adev, ring, bo, NULL, fence, true); if (r) { @@ -2692,7 +2839,7 @@ int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job) mutex_lock(&adev->virt.lock_reset); atomic_inc(&adev->gpu_reset_counter); - adev->gfx.in_reset = true; + adev->in_sriov_reset = true; /* block TTM */ resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); @@ -2803,7 +2950,7 @@ give_up_reset: dev_info(adev->dev, "GPU reset successed!\n"); } - adev->gfx.in_reset = false; + adev->in_sriov_reset = false; mutex_unlock(&adev->virt.lock_reset); return r; } @@ -2818,6 +2965,7 @@ give_up_reset: */ int amdgpu_gpu_reset(struct amdgpu_device *adev) { + struct drm_atomic_state *state = NULL; int i, r; int resched; bool need_full_reset, vram_lost = false; @@ -2831,6 +2979,9 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) /* block TTM */ resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); + /* store modesetting */ + if (amdgpu_device_has_dc_support(adev)) + state = drm_atomic_helper_suspend(adev->ddev); /* block scheduler */ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { @@ -2860,21 +3011,9 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) r = amdgpu_suspend(adev); retry: - /* Disable fb access */ - if (adev->mode_info.num_crtc) { - struct amdgpu_mode_mc_save save; - amdgpu_display_stop_mc_access(adev, &save); - amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC); - } - if (adev->is_atom_fw) - amdgpu_atomfirmware_scratch_regs_save(adev); - else - amdgpu_atombios_scratch_regs_save(adev); + amdgpu_atombios_scratch_regs_save(adev); r = amdgpu_asic_reset(adev); - if (adev->is_atom_fw) - amdgpu_atomfirmware_scratch_regs_restore(adev); - else - amdgpu_atombios_scratch_regs_restore(adev); + amdgpu_atombios_scratch_regs_restore(adev); /* post card */ amdgpu_atom_asic_init(adev->mode_info.atom_context); @@ -2959,15 +3098,22 @@ out: } } - drm_helper_resume_force_mode(adev->ddev); + if (amdgpu_device_has_dc_support(adev)) { + r = drm_atomic_helper_resume(adev->ddev, state); + amdgpu_dm_display_resume(adev); + } else + drm_helper_resume_force_mode(adev->ddev); ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched); - if (r) + if (r) { /* bad news, how to tell it to userspace ? */ dev_info(adev->dev, "GPU reset failed\n"); - else + } + else { dev_info(adev->dev, "GPU reset successed!\n"); + } + amdgpu_vf_error_trans_all(adev); return r; } @@ -3115,9 +3261,9 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, pm_pg_lock = (*pos >> 23) & 1; if (*pos & (1ULL << 62)) { - se_bank = (*pos >> 24) & 0x3FF; - sh_bank = (*pos >> 34) & 0x3FF; - instance_bank = (*pos >> 44) & 0x3FF; + se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24; + sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34; + instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44; if (se_bank == 0x3FF) se_bank = 0xFFFFFFFF; @@ -3191,9 +3337,9 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf, pm_pg_lock = (*pos >> 23) & 1; if (*pos & (1ULL << 62)) { - se_bank = (*pos >> 24) & 0x3FF; - sh_bank = (*pos >> 34) & 0x3FF; - instance_bank = (*pos >> 44) & 0x3FF; + se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24; + sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34; + instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44; if (se_bank == 0x3FF) se_bank = 0xFFFFFFFF; @@ -3508,10 +3654,7 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, valuesize = sizeof(values); if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor) - r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &values[0], &valuesize); - else if (adev->pm.funcs && adev->pm.funcs->read_sensor) - r = adev->pm.funcs->read_sensor(adev, idx, &values[0], - &valuesize); + r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize); else return -EINVAL; @@ -3544,12 +3687,12 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, return -EINVAL; /* decode offset */ - offset = (*pos & 0x7F); - se = ((*pos >> 7) & 0xFF); - sh = ((*pos >> 15) & 0xFF); - cu = ((*pos >> 23) & 0xFF); - wave = ((*pos >> 31) & 0xFF); - simd = ((*pos >> 37) & 0xFF); + offset = (*pos & GENMASK_ULL(6, 0)); + se = (*pos & GENMASK_ULL(14, 7)) >> 7; + sh = (*pos & GENMASK_ULL(22, 15)) >> 15; + cu = (*pos & GENMASK_ULL(30, 23)) >> 23; + wave = (*pos & GENMASK_ULL(36, 31)) >> 31; + simd = (*pos & GENMASK_ULL(44, 37)) >> 37; /* switch to the specific se/sh/cu */ mutex_lock(&adev->grbm_idx_mutex); @@ -3594,14 +3737,14 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, return -EINVAL; /* decode offset */ - offset = (*pos & 0xFFF); /* in dwords */ - se = ((*pos >> 12) & 0xFF); - sh = ((*pos >> 20) & 0xFF); - cu = ((*pos >> 28) & 0xFF); - wave = ((*pos >> 36) & 0xFF); - simd = ((*pos >> 44) & 0xFF); - thread = ((*pos >> 52) & 0xFF); - bank = ((*pos >> 60) & 1); + offset = *pos & GENMASK_ULL(11, 0); + se = (*pos & GENMASK_ULL(19, 12)) >> 12; + sh = (*pos & GENMASK_ULL(27, 20)) >> 20; + cu = (*pos & GENMASK_ULL(35, 28)) >> 28; + wave = (*pos & GENMASK_ULL(43, 36)) >> 36; + simd = (*pos & GENMASK_ULL(51, 44)) >> 44; + thread = (*pos & GENMASK_ULL(59, 52)) >> 52; + bank = (*pos & GENMASK_ULL(61, 60)) >> 60; data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL); if (!data) @@ -3799,6 +3942,28 @@ int amdgpu_debugfs_init(struct drm_minor *minor) { return 0; } + +static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct amdgpu_device *adev = dev->dev_private; + + seq_write(m, adev->bios, adev->bios_size); + return 0; +} + +static const struct drm_info_list amdgpu_vbios_dump_list[] = { + {"amdgpu_vbios", + amdgpu_debugfs_get_vbios_dump, + 0, NULL}, +}; + +static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev) +{ + return amdgpu_debugfs_add_files(adev, + amdgpu_vbios_dump_list, 1); +} #else static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev) { @@ -3808,5 +3973,9 @@ static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) { return 0; } +static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev) +{ + return 0; +} static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { } #endif |