diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
32 files changed, 472 insertions, 202 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 44f62fda4022..0283e2b3c851 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1801,8 +1801,6 @@ void amdgpu_display_update_priority(struct amdgpu_device *adev); void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, u64 num_vis_bytes); -void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain); -bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); void amdgpu_device_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc, u64 base); void amdgpu_device_gart_location(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index f4c474a95875..71efcf38f11b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -57,6 +57,10 @@ #define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8 #define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c #define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68 +#define ACP_BT_PLAY_REGS_START 0x14970 +#define ACP_BT_PLAY_REGS_END 0x14a24 +#define ACP_BT_COMP1_REG_OFFSET 0xac +#define ACP_BT_COMP2_REG_OFFSET 0xa8 #define mmACP_PGFSM_RETAIN_REG 0x51c9 #define mmACP_PGFSM_CONFIG_REG 0x51ca @@ -77,7 +81,7 @@ #define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE 0x000000FF #define ACP_TIMEOUT_LOOP 0x000000FF -#define ACP_DEVS 3 +#define ACP_DEVS 4 #define ACP_SRC_ID 162 enum { @@ -316,14 +320,13 @@ static int acp_hw_init(void *handle) if (adev->acp.acp_cell == NULL) return -ENOMEM; - adev->acp.acp_res = kcalloc(4, sizeof(struct resource), GFP_KERNEL); - + adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL); if (adev->acp.acp_res == NULL) { kfree(adev->acp.acp_cell); return -ENOMEM; } - i2s_pdata = kcalloc(2, sizeof(struct i2s_platform_data), GFP_KERNEL); + i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL); if (i2s_pdata == NULL) { kfree(adev->acp.acp_res); kfree(adev->acp.acp_cell); @@ -358,6 +361,20 @@ static int acp_hw_init(void *handle) i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET; i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET; + i2s_pdata[2].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET; + switch (adev->asic_type) { + case CHIP_STONEY: + i2s_pdata[2].quirks |= DW_I2S_QUIRK_16BIT_IDX_OVERRIDE; + break; + default: + break; + } + + i2s_pdata[2].cap = DWC_I2S_PLAY | DWC_I2S_RECORD; + i2s_pdata[2].snd_rates = SNDRV_PCM_RATE_8000_96000; + i2s_pdata[2].i2s_reg_comp1 = ACP_BT_COMP1_REG_OFFSET; + i2s_pdata[2].i2s_reg_comp2 = ACP_BT_COMP2_REG_OFFSET; + adev->acp.acp_res[0].name = "acp2x_dma"; adev->acp.acp_res[0].flags = IORESOURCE_MEM; adev->acp.acp_res[0].start = acp_base; @@ -373,13 +390,18 @@ static int acp_hw_init(void *handle) adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START; adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END; - adev->acp.acp_res[3].name = "acp2x_dma_irq"; - adev->acp.acp_res[3].flags = IORESOURCE_IRQ; - adev->acp.acp_res[3].start = amdgpu_irq_create_mapping(adev, 162); - adev->acp.acp_res[3].end = adev->acp.acp_res[3].start; + adev->acp.acp_res[3].name = "acp2x_dw_bt_i2s_play_cap"; + adev->acp.acp_res[3].flags = IORESOURCE_MEM; + adev->acp.acp_res[3].start = acp_base + ACP_BT_PLAY_REGS_START; + adev->acp.acp_res[3].end = acp_base + ACP_BT_PLAY_REGS_END; + + adev->acp.acp_res[4].name = "acp2x_dma_irq"; + adev->acp.acp_res[4].flags = IORESOURCE_IRQ; + adev->acp.acp_res[4].start = amdgpu_irq_create_mapping(adev, 162); + adev->acp.acp_res[4].end = adev->acp.acp_res[4].start; adev->acp.acp_cell[0].name = "acp_audio_dma"; - adev->acp.acp_cell[0].num_resources = 4; + adev->acp.acp_cell[0].num_resources = 5; adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0]; adev->acp.acp_cell[0].platform_data = &adev->asic_type; adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type); @@ -396,6 +418,12 @@ static int acp_hw_init(void *handle) adev->acp.acp_cell[2].platform_data = &i2s_pdata[1]; adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data); + adev->acp.acp_cell[3].name = "designware-i2s"; + adev->acp.acp_cell[3].num_resources = 1; + adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3]; + adev->acp.acp_cell[3].platform_data = &i2s_pdata[2]; + adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data); + r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, ACP_DEVS); if (r) @@ -451,7 +479,6 @@ static int acp_hw_init(void *handle) val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET); val &= ~ACP_SOFT_RESET__SoftResetAud_MASK; cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val); - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 0d8c3fc6eace..353993218f21 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -364,7 +364,6 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev, struct acpi_bus_event *event) { struct amdgpu_atif *atif = adev->atif; - struct atif_sbios_requests req; int count; DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n", @@ -379,42 +378,48 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev, /* Not our event */ return NOTIFY_DONE; - /* Check pending SBIOS requests */ - count = amdgpu_atif_get_sbios_requests(atif, &req); + if (atif->functions.sbios_requests) { + struct atif_sbios_requests req; - if (count <= 0) - return NOTIFY_DONE; + /* Check pending SBIOS requests */ + count = amdgpu_atif_get_sbios_requests(atif, &req); + + if (count <= 0) + return NOTIFY_DONE; - DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count); + DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count); - if (req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) { - struct amdgpu_encoder *enc = atif->encoder_for_bl; + /* todo: add DC handling */ + if ((req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) && + !amdgpu_device_has_dc_support(adev)) { + struct amdgpu_encoder *enc = atif->encoder_for_bl; - if (enc) { - struct amdgpu_encoder_atom_dig *dig = enc->enc_priv; + if (enc) { + struct amdgpu_encoder_atom_dig *dig = enc->enc_priv; - DRM_DEBUG_DRIVER("Changing brightness to %d\n", - req.backlight_level); + DRM_DEBUG_DRIVER("Changing brightness to %d\n", + req.backlight_level); - amdgpu_display_backlight_set_level(adev, enc, req.backlight_level); + amdgpu_display_backlight_set_level(adev, enc, req.backlight_level); #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) - backlight_force_update(dig->bl_dev, - BACKLIGHT_UPDATE_HOTKEY); + backlight_force_update(dig->bl_dev, + BACKLIGHT_UPDATE_HOTKEY); #endif + } } - } - if (req.pending & ATIF_DGPU_DISPLAY_EVENT) { - if ((adev->flags & AMD_IS_PX) && - amdgpu_atpx_dgpu_req_power_for_displays()) { - pm_runtime_get_sync(adev->ddev->dev); - /* Just fire off a uevent and let userspace tell us what to do */ - drm_helper_hpd_irq_event(adev->ddev); - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + if (req.pending & ATIF_DGPU_DISPLAY_EVENT) { + if ((adev->flags & AMD_IS_PX) && + amdgpu_atpx_dgpu_req_power_for_displays()) { + pm_runtime_get_sync(adev->ddev->dev); + /* Just fire off a uevent and let userspace tell us what to do */ + drm_helper_hpd_irq_event(adev->ddev); + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + } } + /* TODO: check other events */ } - /* TODO: check other events */ /* We've handled the event, stop the notifier chain. The ACPI interface * overloads ACPI_VIDEO_NOTIFY_PROBE, we don't want to send that to diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index e3ed08dca7b7..f8bbbb3a9504 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -243,6 +243,33 @@ int amdgpu_amdkfd_resume(struct amdgpu_device *adev) return r; } +int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev) +{ + int r = 0; + + if (adev->kfd) + r = kgd2kfd->pre_reset(adev->kfd); + + return r; +} + +int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev) +{ + int r = 0; + + if (adev->kfd) + r = kgd2kfd->post_reset(adev->kfd); + + return r; +} + +void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)kgd; + + amdgpu_device_gpu_recover(adev, NULL, false); +} + int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, void **mem_obj, uint64_t *gpu_addr, void **cpu_ptr) @@ -461,6 +488,14 @@ err: return ret; } +void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)kgd; + + amdgpu_dpm_switch_power_profile(adev, + PP_SMC_POWER_PROFILE_COMPUTE, !idle); +} + bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid) { if (adev->kfd) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index a8418a3f4e9d..2f379c183ed2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -119,6 +119,7 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm); int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine, uint32_t vmid, uint64_t gpu_addr, uint32_t *ib_cmd, uint32_t ib_len); +void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle); struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void); struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void); @@ -126,6 +127,12 @@ struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void); bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid); +int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev); + +int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev); + +void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd); + /* Shared API */ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, void **mem_obj, uint64_t *gpu_addr, @@ -183,6 +190,9 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd, int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info, struct dma_fence **ef); +int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd, + struct kfd_vm_fault_info *info); + void amdgpu_amdkfd_gpuvm_init_mem_limits(void); void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index ea79908dac4c..ea3f698aef5e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -145,6 +145,7 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, uint32_t page_table_base); static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid); static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid); +static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd); /* Because of REG_GET_FIELD() being used, we put this function in the * asic specific file. @@ -216,6 +217,10 @@ static const struct kfd2kgd_calls kfd2kgd = { .invalidate_tlbs = invalidate_tlbs, .invalidate_tlbs_vmid = invalidate_tlbs_vmid, .submit_ib = amdgpu_amdkfd_submit_ib, + .get_vm_fault_info = amdgpu_amdkfd_gpuvm_get_vm_fault_info, + .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg, + .gpu_recover = amdgpu_amdkfd_gpu_reset, + .set_compute_idle = amdgpu_amdkfd_set_compute_idle }; struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void) @@ -571,6 +576,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, unsigned long flags, end_jiffies; int retry; + if (adev->in_gpu_reset) + return -EIO; + acquire_queue(kgd, pipe_id, queue_id); WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0); @@ -882,6 +890,9 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid) int vmid; unsigned int tmp; + if (adev->in_gpu_reset) + return -EIO; + for (vmid = 0; vmid < 16; vmid++) { if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) continue; @@ -911,3 +922,19 @@ static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid) RREG32(mmVM_INVALIDATE_RESPONSE); return 0; } + + /** + * read_vmid_from_vmfault_reg - read vmid from register + * + * adev: amdgpu_device pointer + * @vmid: vmid pointer + * read vmid from register (CIK). + */ +static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + + uint32_t status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); + + return REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index 19dd665e7307..f6e53e9352bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -176,6 +176,9 @@ static const struct kfd2kgd_calls kfd2kgd = { .invalidate_tlbs = invalidate_tlbs, .invalidate_tlbs_vmid = invalidate_tlbs_vmid, .submit_ib = amdgpu_amdkfd_submit_ib, + .get_vm_fault_info = amdgpu_amdkfd_gpuvm_get_vm_fault_info, + .gpu_recover = amdgpu_amdkfd_gpu_reset, + .set_compute_idle = amdgpu_amdkfd_set_compute_idle }; struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void) @@ -568,6 +571,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, int retry; struct vi_mqd *m = get_mqd(mqd); + if (adev->in_gpu_reset) + return -EIO; + acquire_queue(kgd, pipe_id, queue_id); if (m->cp_hqd_vmid == 0) @@ -844,6 +850,9 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid) int vmid; unsigned int tmp; + if (adev->in_gpu_reset) + return -EIO; + for (vmid = 0; vmid < 16; vmid++) { if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) continue; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 1db60aa5b7f0..8efedfcb9dfc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -213,6 +213,8 @@ static const struct kfd2kgd_calls kfd2kgd = { .invalidate_tlbs = invalidate_tlbs, .invalidate_tlbs_vmid = invalidate_tlbs_vmid, .submit_ib = amdgpu_amdkfd_submit_ib, + .gpu_recover = amdgpu_amdkfd_gpu_reset, + .set_compute_idle = amdgpu_amdkfd_set_compute_idle }; struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void) @@ -679,6 +681,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, uint32_t temp; struct v9_mqd *m = get_mqd(mqd); + if (adev->in_gpu_reset) + return -EIO; + acquire_queue(kgd, pipe_id, queue_id); if (m->cp_hqd_vmid == 0) @@ -866,6 +871,9 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid) int vmid; struct amdgpu_ring *ring = &adev->gfx.kiq.ring; + if (adev->in_gpu_reset) + return -EIO; + if (ring->ready) return invalidate_tlbs_with_kiq(adev, pasid); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 079af8ac2636..8a707d8bbb1c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -334,7 +334,7 @@ static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain, "Called with userptr BO")) return -EINVAL; - amdgpu_ttm_placement_from_domain(bo, domain); + amdgpu_bo_placement_from_domain(bo, domain); ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); if (ret) @@ -622,7 +622,7 @@ static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm, pr_err("%s: Failed to reserve BO\n", __func__); goto release_out; } - amdgpu_ttm_placement_from_domain(bo, mem->domain); + amdgpu_bo_placement_from_domain(bo, mem->domain); ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); if (ret) pr_err("%s: failed to validate BO\n", __func__); @@ -1621,6 +1621,20 @@ bo_reserve_failed: return ret; } +int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd, + struct kfd_vm_fault_info *mem) +{ + struct amdgpu_device *adev; + + adev = (struct amdgpu_device *)kgd; + if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) { + *mem = *adev->gmc.vm_fault_info; + mb(); + atomic_set(&adev->gmc.vm_fault_info_updated, 0); + } + return 0; +} + /* Evict a userptr BO by stopping the queues if necessary * * Runs in MMU notifier, may be in RECLAIM_FS context. This means it @@ -1680,7 +1694,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, if (amdgpu_bo_reserve(bo, true)) return -EAGAIN; - amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); amdgpu_bo_unreserve(bo); if (ret) { @@ -1824,7 +1838,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) if (mem->user_pages[0]) { amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, mem->user_pages); - amdgpu_ttm_placement_from_domain(bo, mem->domain); + amdgpu_bo_placement_from_domain(bo, mem->domain); ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); if (ret) { pr_err("%s: failed to validate BO\n", __func__); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index b33f1680c9a3..a028661d9e20 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -575,6 +575,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = { { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, + { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0, 0, 0, 0, 0 }, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 7c5cc33d0cda..178d9ce4eba1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -419,7 +419,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, } retry: - amdgpu_ttm_placement_from_domain(bo, domain); + amdgpu_bo_placement_from_domain(bo, domain); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); p->bytes_moved += ctx.bytes_moved; @@ -478,7 +478,7 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, update_bytes_moved_vis = !amdgpu_gmc_vram_full_visible(&adev->gmc) && amdgpu_bo_in_cpu_visible_vram(bo); - amdgpu_ttm_placement_from_domain(bo, other); + amdgpu_bo_placement_from_domain(bo, other); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); p->bytes_moved += ctx.bytes_moved; if (update_bytes_moved_vis) @@ -532,8 +532,8 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, /* Check if we have user pages and nobody bound the BO already */ if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) && lobj->user_pages) { - amdgpu_ttm_placement_from_domain(bo, - AMDGPU_GEM_DOMAIN_CPU); + amdgpu_bo_placement_from_domain(bo, + AMDGPU_GEM_DOMAIN_CPU); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); if (r) return r; @@ -1232,7 +1232,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, job = p->job; p->job = NULL; - r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp); + r = drm_sched_job_init(&job->base, entity, p->filp); if (r) { amdgpu_job_free(job); amdgpu_mn_unlock(p->mn); @@ -1262,7 +1262,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, priority = job->base.s_priority; drm_sched_entity_push_job(&job->base, entity); - ring = to_amdgpu_ring(entity->sched); + ring = to_amdgpu_ring(entity->rq->sched); amdgpu_ring_priority_get(ring, priority); ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence); @@ -1655,7 +1655,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) { (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; - amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains); + amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains); r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 83e3b320a793..df6965761046 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -104,8 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, failed: for (j = 0; j < i; j++) - drm_sched_entity_destroy(&adev->rings[j]->sched, - &ctx->rings[j].entity); + drm_sched_entity_destroy(&ctx->rings[j].entity); kfree(ctx->fences); ctx->fences = NULL; return r; @@ -178,8 +177,7 @@ static void amdgpu_ctx_do_release(struct kref *ref) if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) continue; - drm_sched_entity_destroy(&ctx->adev->rings[i]->sched, - &ctx->rings[i].entity); + drm_sched_entity_destroy(&ctx->rings[i].entity); } amdgpu_ctx_fini(ref); @@ -466,8 +464,8 @@ void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr) if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) continue; - max_wait = drm_sched_entity_flush(&ctx->adev->rings[i]->sched, - &ctx->rings[i].entity, max_wait); + max_wait = drm_sched_entity_flush(&ctx->rings[i].entity, + max_wait); } } mutex_unlock(&mgr->lock); @@ -492,8 +490,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) continue; if (kref_read(&ctx->refcount) == 1) - drm_sched_entity_fini(&ctx->adev->rings[i]->sched, - &ctx->rings[i].entity); + drm_sched_entity_fini(&ctx->rings[i].entity); else DRM_ERROR("ctx %p is still alive\n", ctx); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 386a7b34d2f4..e839470880d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1926,7 +1926,7 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work) } /** - * amdgpu_device_ip_suspend - run suspend for hardware IPs + * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1) * * @adev: amdgpu_device pointer * @@ -1936,7 +1936,55 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work) * in each IP into a state suitable for suspend. * Returns 0 on success, negative error code on failure. */ -int amdgpu_device_ip_suspend(struct amdgpu_device *adev) +static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) +{ + int i, r; + + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_request_full_gpu(adev, false); + + for (i = adev->num_ip_blocks - 1; i >= 0; i--) { + if (!adev->ip_blocks[i].status.valid) + continue; + /* displays are handled separately */ + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) { + /* ungate blocks so that suspend can properly shut them down */ + if (adev->ip_blocks[i].version->funcs->set_clockgating_state) { + r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, + AMD_CG_STATE_UNGATE); + if (r) { + DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + } + } + /* XXX handle errors */ + r = adev->ip_blocks[i].version->funcs->suspend(adev); + /* XXX handle errors */ + if (r) { + DRM_ERROR("suspend of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + } + } + } + + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_release_full_gpu(adev, false); + + return 0; +} + +/** + * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2) + * + * @adev: amdgpu_device pointer + * + * Main suspend function for hardware IPs. The list of all the hardware + * IPs that make up the asic is walked, clockgating is disabled and the + * suspend callbacks are run. suspend puts the hardware and software state + * in each IP into a state suitable for suspend. + * Returns 0 on success, negative error code on failure. + */ +static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) { int i, r; @@ -1957,6 +2005,9 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev) for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_blocks[i].status.valid) continue; + /* displays are handled in phase1 */ + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) + continue; /* ungate blocks so that suspend can properly shut them down */ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_SMC && adev->ip_blocks[i].version->funcs->set_clockgating_state) { @@ -1982,6 +2033,29 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev) return 0; } +/** + * amdgpu_device_ip_suspend - run suspend for hardware IPs + * + * @adev: amdgpu_device pointer + * + * Main suspend function for hardware IPs. The list of all the hardware + * IPs that make up the asic is walked, clockgating is disabled and the + * suspend callbacks are run. suspend puts the hardware and software state + * in each IP into a state suitable for suspend. + * Returns 0 on success, negative error code on failure. + */ +int amdgpu_device_ip_suspend(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_device_ip_suspend_phase1(adev); + if (r) + return r; + r = amdgpu_device_ip_suspend_phase2(adev); + + return r; +} + static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) { int i, r; @@ -2004,7 +2078,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) continue; r = block->version->funcs->hw_init(adev); - DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed"); + DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); if (r) return r; } @@ -2039,7 +2113,7 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev) continue; r = block->version->funcs->hw_init(adev); - DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed"); + DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); if (r) return r; } @@ -2628,6 +2702,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) drm_kms_helper_poll_disable(dev); + if (fbcon) + amdgpu_fbdev_set_suspend(adev, 1); + if (!amdgpu_device_has_dc_support(adev)) { /* turn off display hw */ drm_modeset_lock_all(dev); @@ -2635,44 +2712,46 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); } drm_modeset_unlock_all(dev); - } - - amdgpu_amdkfd_suspend(adev); - - /* unpin the front buffers and cursors */ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct drm_framebuffer *fb = crtc->primary->fb; - struct amdgpu_bo *robj; - - if (amdgpu_crtc->cursor_bo) { - struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); - r = amdgpu_bo_reserve(aobj, true); - if (r == 0) { - amdgpu_bo_unpin(aobj); - amdgpu_bo_unreserve(aobj); + /* unpin the front buffers and cursors */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_framebuffer *fb = crtc->primary->fb; + struct amdgpu_bo *robj; + + if (amdgpu_crtc->cursor_bo) { + struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); + r = amdgpu_bo_reserve(aobj, true); + if (r == 0) { + amdgpu_bo_unpin(aobj); + amdgpu_bo_unreserve(aobj); + } } - } - if (fb == NULL || fb->obj[0] == NULL) { - continue; - } - robj = gem_to_amdgpu_bo(fb->obj[0]); - /* don't unpin kernel fb objects */ - if (!amdgpu_fbdev_robj_is_fb(adev, robj)) { - r = amdgpu_bo_reserve(robj, true); - if (r == 0) { - amdgpu_bo_unpin(robj); - amdgpu_bo_unreserve(robj); + if (fb == NULL || fb->obj[0] == NULL) { + continue; + } + robj = gem_to_amdgpu_bo(fb->obj[0]); + /* don't unpin kernel fb objects */ + if (!amdgpu_fbdev_robj_is_fb(adev, robj)) { + r = amdgpu_bo_reserve(robj, true); + if (r == 0) { + amdgpu_bo_unpin(robj); + amdgpu_bo_unreserve(robj); + } } } } + + amdgpu_amdkfd_suspend(adev); + + r = amdgpu_device_ip_suspend_phase1(adev); + /* evict vram memory */ amdgpu_bo_evict_vram(adev); amdgpu_fence_driver_suspend(adev); - r = amdgpu_device_ip_suspend(adev); + r = amdgpu_device_ip_suspend_phase2(adev); /* evict remaining vram memory * This second call to evict vram is to evict the gart page table @@ -2691,11 +2770,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) DRM_ERROR("amdgpu asic reset failed\n"); } - if (fbcon) { - console_lock(); - amdgpu_fbdev_set_suspend(adev, 1); - console_unlock(); - } return 0; } @@ -2720,15 +2794,12 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - if (fbcon) - console_lock(); - if (resume) { pci_set_power_state(dev->pdev, PCI_D0); pci_restore_state(dev->pdev); r = pci_enable_device(dev->pdev); if (r) - goto unlock; + return r; } /* post card */ @@ -2741,28 +2812,30 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) r = amdgpu_device_ip_resume(adev); if (r) { DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r); - goto unlock; + return r; } amdgpu_fence_driver_resume(adev); r = amdgpu_device_ip_late_init(adev); if (r) - goto unlock; - - /* pin cursors */ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - - if (amdgpu_crtc->cursor_bo) { - struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); - r = amdgpu_bo_reserve(aobj, true); - if (r == 0) { - r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); - if (r != 0) - DRM_ERROR("Failed to pin cursor BO (%d)\n", r); - amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj); - amdgpu_bo_unreserve(aobj); + return r; + + if (!amdgpu_device_has_dc_support(adev)) { + /* pin cursors */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + + if (amdgpu_crtc->cursor_bo) { + struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); + r = amdgpu_bo_reserve(aobj, true); + if (r == 0) { + r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); + if (r != 0) + DRM_ERROR("Failed to pin cursor BO (%d)\n", r); + amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj); + amdgpu_bo_unreserve(aobj); + } } } } @@ -2770,6 +2843,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) if (r) return r; + /* Make sure IB tests flushed */ + flush_delayed_work(&adev->late_init_work); + /* blat the mode back in */ if (fbcon) { if (!amdgpu_device_has_dc_support(adev)) { @@ -2783,6 +2859,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) } drm_modeset_unlock_all(dev); } + amdgpu_fbdev_set_suspend(adev, 0); } drm_kms_helper_poll_enable(dev); @@ -2806,15 +2883,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) #ifdef CONFIG_PM dev->dev->power.disable_depth--; #endif - - if (fbcon) - amdgpu_fbdev_set_suspend(adev, 0); - -unlock: - if (fbcon) - console_unlock(); - - return r; + return 0; } /** @@ -3091,7 +3160,7 @@ static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev) * @adev: amdgpu device pointer * * attempt to do soft-reset or full-reset and reinitialize Asic - * return 0 means successed otherwise failed + * return 0 means succeeded otherwise failed */ static int amdgpu_device_reset(struct amdgpu_device *adev) { @@ -3169,7 +3238,7 @@ out: * @from_hypervisor: request from hypervisor * * do VF FLR and reinitialize Asic - * return 0 means successed otherwise failed + * return 0 means succeeded otherwise failed */ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, bool from_hypervisor) @@ -3241,6 +3310,9 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, atomic_inc(&adev->gpu_reset_counter); adev->in_gpu_reset = 1; + /* Block kfd */ + amdgpu_amdkfd_pre_reset(adev); + /* block TTM */ resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); @@ -3256,7 +3328,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, if (job && job->base.sched == &ring->sched) continue; - drm_sched_hw_job_reset(&ring->sched, &job->base); + drm_sched_hw_job_reset(&ring->sched, job ? &job->base : NULL); /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ amdgpu_fence_driver_force_completion(ring); @@ -3294,9 +3366,11 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter)); amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); } else { - dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter)); + dev_info(adev->dev, "GPU reset(%d) succeeded!\n",atomic_read(&adev->gpu_reset_counter)); } + /*unlock kfd */ + amdgpu_amdkfd_post_reset(adev); amdgpu_vf_error_trans_all(adev); adev->in_gpu_reset = 0; mutex_unlock(&adev->lock_reset); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index d44b76455e89..69c5d22f29bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -373,8 +373,8 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev) void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state) { if (adev->mode_info.rfbdev) - drm_fb_helper_set_suspend(&adev->mode_info.rfbdev->helper, - state); + drm_fb_helper_set_suspend_unlocked(&adev->mode_info.rfbdev->helper, + state); } int amdgpu_fbdev_total_size(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index bcbdcf997d20..71792d820ae0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -344,7 +344,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, if (r) goto free_pages; - amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); amdgpu_bo_unreserve(bo); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index 6cb4948233cb..bb5a47a45790 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -105,6 +105,8 @@ struct amdgpu_gmc { /* protects concurrent invalidation */ spinlock_t invalidate_lock; bool translate_further; + struct kfd_vm_fault_info *vm_fault_info; + atomic_t vm_fault_info_updated; const struct amdgpu_gmc_funcs *gmc_funcs; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 5a2c26a85984..391e2f7c03aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -133,7 +133,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, if (!f) return -EINVAL; - r = drm_sched_job_init(&job->base, entity->sched, entity, owner); + r = drm_sched_job_init(&job->base, entity, owner); if (r) return r; @@ -143,7 +143,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, priority = job->base.s_priority; drm_sched_entity_push_job(&job->base, entity); - ring = to_amdgpu_ring(entity->sched); + ring = to_amdgpu_ring(entity->rq->sched); amdgpu_ring_priority_get(ring, priority); return 0; @@ -167,7 +167,7 @@ int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, struct drm_sched_entity *s_entity) { - struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->sched); + struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched); struct amdgpu_job *job = to_amdgpu_job(sched_job); struct amdgpu_vm *vm = job->vm; struct dma_fence *fence; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 207f238649b4..c7dce14fd47d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -286,7 +286,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file struct drm_crtc *crtc; uint32_t ui32 = 0; uint64_t ui64 = 0; - int i, j, found; + int i, found; int ui32_size = sizeof(ui32); if (!info->return_size || !info->return_pointer) @@ -328,64 +328,61 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file case AMDGPU_HW_IP_GFX: type = AMD_IP_BLOCK_TYPE_GFX; for (i = 0; i < adev->gfx.num_gfx_rings; i++) - ring_mask |= ((adev->gfx.gfx_ring[i].ready ? 1 : 0) << i); + ring_mask |= adev->gfx.gfx_ring[i].ready << i; ib_start_alignment = 32; ib_size_alignment = 32; break; case AMDGPU_HW_IP_COMPUTE: type = AMD_IP_BLOCK_TYPE_GFX; for (i = 0; i < adev->gfx.num_compute_rings; i++) - ring_mask |= ((adev->gfx.compute_ring[i].ready ? 1 : 0) << i); + ring_mask |= adev->gfx.compute_ring[i].ready << i; ib_start_alignment = 32; ib_size_alignment = 32; break; case AMDGPU_HW_IP_DMA: type = AMD_IP_BLOCK_TYPE_SDMA; for (i = 0; i < adev->sdma.num_instances; i++) - ring_mask |= ((adev->sdma.instance[i].ring.ready ? 1 : 0) << i); + ring_mask |= adev->sdma.instance[i].ring.ready << i; ib_start_alignment = 256; ib_size_alignment = 4; break; case AMDGPU_HW_IP_UVD: type = AMD_IP_BLOCK_TYPE_UVD; - for (i = 0; i < adev->uvd.num_uvd_inst; i++) - ring_mask |= ((adev->uvd.inst[i].ring.ready ? 1 : 0) << i); + ring_mask |= adev->uvd.inst[0].ring.ready; ib_start_alignment = 64; ib_size_alignment = 64; break; case AMDGPU_HW_IP_VCE: type = AMD_IP_BLOCK_TYPE_VCE; for (i = 0; i < adev->vce.num_rings; i++) - ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i); + ring_mask |= adev->vce.ring[i].ready << i; ib_start_alignment = 4; ib_size_alignment = 1; break; case AMDGPU_HW_IP_UVD_ENC: type = AMD_IP_BLOCK_TYPE_UVD; - for (i = 0; i < adev->uvd.num_uvd_inst; i++) - for (j = 0; j < adev->uvd.num_enc_rings; j++) - ring_mask |= - ((adev->uvd.inst[i].ring_enc[j].ready ? 1 : 0) << - (j + i * adev->uvd.num_enc_rings)); + for (i = 0; i < adev->uvd.num_enc_rings; i++) + ring_mask |= + adev->uvd.inst[0].ring_enc[i].ready << i; ib_start_alignment = 64; ib_size_alignment = 64; break; case AMDGPU_HW_IP_VCN_DEC: type = AMD_IP_BLOCK_TYPE_VCN; - ring_mask = adev->vcn.ring_dec.ready ? 1 : 0; + ring_mask = adev->vcn.ring_dec.ready; ib_start_alignment = 16; ib_size_alignment = 16; break; case AMDGPU_HW_IP_VCN_ENC: type = AMD_IP_BLOCK_TYPE_VCN; for (i = 0; i < adev->vcn.num_enc_rings; i++) - ring_mask |= ((adev->vcn.ring_enc[i].ready ? 1 : 0) << i); + ring_mask |= adev->vcn.ring_enc[i].ready << i; ib_start_alignment = 64; ib_size_alignment = 1; break; case AMDGPU_HW_IP_VCN_JPEG: type = AMD_IP_BLOCK_TYPE_VCN; - ring_mask = adev->vcn.ring_jpeg.ready ? 1 : 0; + ring_mask = adev->vcn.ring_jpeg.ready; ib_start_alignment = 16; ib_size_alignment = 16; break; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index b12526ce1a9d..21bfa2d8039e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -51,7 +51,7 @@ * */ -static bool amdgpu_need_backup(struct amdgpu_device *adev) +static bool amdgpu_bo_need_backup(struct amdgpu_device *adev) { if (adev->flags & AMD_IS_APU) return false; @@ -84,12 +84,12 @@ static void amdgpu_bo_subtract_pin_size(struct amdgpu_bo *bo) } } -static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) +static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo) { struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); - if (WARN_ON_ONCE(bo->pin_count > 0)) + if (bo->pin_count > 0) amdgpu_bo_subtract_pin_size(bo); if (bo->kfd_bo) @@ -111,7 +111,7 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) } /** - * amdgpu_ttm_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo + * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo * @bo: buffer object to be checked * * Uses destroy function associated with the object to determine if this is @@ -120,22 +120,22 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) * Returns: * true if the object belongs to &amdgpu_bo, false if not. */ -bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo) +bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo) { - if (bo->destroy == &amdgpu_ttm_bo_destroy) + if (bo->destroy == &amdgpu_bo_destroy) return true; return false; } /** - * amdgpu_ttm_placement_from_domain - set buffer's placement + * amdgpu_bo_placement_from_domain - set buffer's placement * @abo: &amdgpu_bo buffer object whose placement is to be set * @domain: requested domain * * Sets buffer's placement according to requested domain and the buffer's * flags. */ -void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain) +void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) { struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); struct ttm_placement *placement = &abo->placement; @@ -216,6 +216,8 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain) c++; } + BUG_ON(c >= AMDGPU_BO_MAX_PLACEMENTS); + placement->num_placement = c; placement->placement = places; @@ -488,13 +490,13 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, #endif bo->tbo.bdev = &adev->mman.bdev; - amdgpu_ttm_placement_from_domain(bo, bp->domain); + amdgpu_bo_placement_from_domain(bo, bp->domain); if (bp->type == ttm_bo_type_kernel) bo->tbo.priority = 1; r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type, &bo->placement, page_align, &ctx, acc_size, - NULL, bp->resv, &amdgpu_ttm_bo_destroy); + NULL, bp->resv, &amdgpu_bo_destroy); if (unlikely(r != 0)) return r; @@ -594,7 +596,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev, if (r) return r; - if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) { + if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_bo_need_backup(adev)) { if (!bp->resv) WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv, NULL)); @@ -682,7 +684,7 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo) domain = bo->preferred_domains; retry: - amdgpu_ttm_placement_from_domain(bo, domain); + amdgpu_bo_placement_from_domain(bo, domain); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { domain = bo->allowed_domains; @@ -915,7 +917,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, /* force to pin into visible video ram */ if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; - amdgpu_ttm_placement_from_domain(bo, domain); + amdgpu_bo_placement_from_domain(bo, domain); for (i = 0; i < bo->placement.num_placement; i++) { unsigned fpfn, lpfn; @@ -1246,7 +1248,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, struct amdgpu_bo *abo; struct ttm_mem_reg *old_mem = &bo->mem; - if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) + if (!amdgpu_bo_is_amdgpu_bo(bo)) return; abo = ttm_to_amdgpu_bo(bo); @@ -1263,7 +1265,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, return; /* move_notify is called before move happens */ - trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type); + trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type); } /** @@ -1285,7 +1287,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) unsigned long offset, size; int r; - if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) + if (!amdgpu_bo_is_amdgpu_bo(bo)) return 0; abo = ttm_to_amdgpu_bo(bo); @@ -1307,8 +1309,8 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) /* hurrah the memory is not visible ! */ atomic64_inc(&adev->num_vram_cpu_page_faults); - amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | - AMDGPU_GEM_DOMAIN_GTT); + amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT); /* Avoid costly evictions; only set GTT as a busy placement */ abo->placement.num_busy_placement = 1; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 9c3e29a04eb1..18945dd6982d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -32,6 +32,7 @@ #include "amdgpu.h" #define AMDGPU_BO_INVALID_OFFSET LONG_MAX +#define AMDGPU_BO_MAX_PLACEMENTS 3 struct amdgpu_bo_param { unsigned long size; @@ -77,7 +78,7 @@ struct amdgpu_bo { /* Protected by tbo.reserved */ u32 preferred_domains; u32 allowed_domains; - struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; + struct ttm_place placements[AMDGPU_BO_MAX_PLACEMENTS]; struct ttm_placement placement; struct ttm_buffer_object tbo; struct ttm_bo_kmap_obj kmap; @@ -234,6 +235,9 @@ static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo) return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC; } +bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); +void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain); + int amdgpu_bo_create(struct amdgpu_device *adev, struct amdgpu_bo_param *bp, struct amdgpu_bo **bo_ptr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index 3ed02f472003..1c5d97f4b4dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c @@ -323,7 +323,7 @@ static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf, return ret; if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) { - amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c index ea9850c9224d..d8357290ad09 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c @@ -66,8 +66,6 @@ static int amdgpu_identity_map(struct amdgpu_device *adev, u32 ring, struct amdgpu_ring **out_ring) { - u32 instance; - switch (mapper->hw_ip) { case AMDGPU_HW_IP_GFX: *out_ring = &adev->gfx.gfx_ring[ring]; @@ -79,16 +77,13 @@ static int amdgpu_identity_map(struct amdgpu_device *adev, *out_ring = &adev->sdma.instance[ring].ring; break; case AMDGPU_HW_IP_UVD: - instance = ring; - *out_ring = &adev->uvd.inst[instance].ring; + *out_ring = &adev->uvd.inst[0].ring; break; case AMDGPU_HW_IP_VCE: *out_ring = &adev->vce.ring[ring]; break; case AMDGPU_HW_IP_UVD_ENC: - instance = ring / adev->uvd.num_enc_rings; - *out_ring = - &adev->uvd.inst[instance].ring_enc[ring%adev->uvd.num_enc_rings]; + *out_ring = &adev->uvd.inst[0].ring_enc[ring]; break; case AMDGPU_HW_IP_VCN_DEC: *out_ring = &adev->vcn.ring_dec; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 76920035eb22..11f262f15200 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -436,7 +436,7 @@ TRACE_EVENT(amdgpu_cs_bo_status, __entry->total_bo, __entry->total_size) ); -TRACE_EVENT(amdgpu_ttm_bo_move, +TRACE_EVENT(amdgpu_bo_move, TP_PROTO(struct amdgpu_bo* bo, uint32_t new_placement, uint32_t old_placement), TP_ARGS(bo, new_placement, old_placement), TP_STRUCT__entry( diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 13977ea6a097..8c4358e36c87 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -248,7 +248,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, } /* Object isn't an AMDGPU object so ignore */ - if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) { + if (!amdgpu_bo_is_amdgpu_bo(bo)) { placement->placement = &placements; placement->busy_placement = &placements; placement->num_placement = 1; @@ -261,7 +261,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, case TTM_PL_VRAM: if (!adev->mman.buffer_funcs_enabled) { /* Move to system memory */ - amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); + amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && amdgpu_bo_in_cpu_visible_vram(abo)) { @@ -271,7 +271,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, * BO will be evicted to GTT rather than causing other * BOs to be evicted from VRAM */ - amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | + amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT); abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; abo->placements[0].lpfn = 0; @@ -279,12 +279,12 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, abo->placement.num_busy_placement = 1; } else { /* Move to GTT memory */ - amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); + amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); } break; case TTM_PL_TT: default: - amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); + amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); } *placement = abo->placement; } @@ -1925,8 +1925,9 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) return; } } else { - drm_sched_entity_destroy(adev->mman.entity.sched, - &adev->mman.entity); + drm_sched_entity_destroy(&adev->mman.entity); + dma_fence_put(man->move); + man->move = NULL; } /* this just adjusts TTM size idea, which sets lpfn to the correct value */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 80b5c453f8c1..fca86d71fafc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -305,8 +305,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) { int i, j; - drm_sched_entity_destroy(&adev->uvd.inst->ring.sched, - &adev->uvd.entity); + drm_sched_entity_destroy(&adev->uvd.entity); for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { kfree(adev->uvd.inst[j].saved_bo); @@ -473,7 +472,7 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx) if (cmd == 0x0 || cmd == 0x3) { /* yes, force it into VRAM */ uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM; - amdgpu_ttm_placement_from_domain(bo, domain); + amdgpu_bo_placement_from_domain(bo, domain); } amdgpu_uvd_force_into_uvd_segment(bo); @@ -1014,7 +1013,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, if (!ring->adev->uvd.address_64_bit) { struct ttm_operation_ctx ctx = { true, false }; - amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); amdgpu_uvd_force_into_uvd_segment(bo); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 86182c966ed6..b6ab4f5350c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -221,7 +221,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev) if (adev->vce.vcpu_bo == NULL) return 0; - drm_sched_entity_destroy(&adev->vce.ring[0].sched, &adev->vce.entity); + drm_sched_entity_destroy(&adev->vce.entity); amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr, (void **)&adev->vce.cpu_addr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 098dd1ba751a..9eedc9810004 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -387,7 +387,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, ats_entries = 0; } - ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); + ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched); r = reservation_object_reserve_shared(bo->tbo.resv); if (r) @@ -495,11 +495,12 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, eaddr = eaddr & ((1 << shift) - 1); flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; + if (vm->root.base.bo->shadow) + flags |= AMDGPU_GEM_CREATE_SHADOW; if (vm->use_cpu_for_update) flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; else - flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS | - AMDGPU_GEM_CREATE_SHADOW); + flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS; /* walk over the address space and allocate the page tables */ for (pt_idx = from; pt_idx <= to; ++pt_idx) { @@ -1113,7 +1114,7 @@ restart: struct amdgpu_ring *ring; struct dma_fence *fence; - ring = container_of(vm->entity.sched, struct amdgpu_ring, + ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched); amdgpu_ring_pad_ib(ring, params.ib); @@ -1403,7 +1404,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, addr, flags); } - ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); + ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched); nptes = last - start + 1; @@ -2587,7 +2588,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; if (vm->use_cpu_for_update) flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; - else + else if (vm_context != AMDGPU_VM_CONTEXT_COMPUTE) flags |= AMDGPU_GEM_CREATE_SHADOW; size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level); @@ -2642,7 +2643,7 @@ error_free_root: vm->root.base.bo = NULL; error_free_sched_entity: - drm_sched_entity_destroy(&ring->sched, &vm->entity); + drm_sched_entity_destroy(&vm->entity); return r; } @@ -2662,8 +2663,7 @@ error_free_sched_entity: * - pasid (old PASID is released, because compute manages its own PASIDs) * * Reinitializes the page directory to reflect the changed ATS - * setting. May leave behind an unused shadow BO for the page - * directory when switching from SDMA updates to CPU updates. + * setting. * * Returns: * 0 for success, -errno for errors. @@ -2713,6 +2713,9 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) vm->pasid = 0; } + /* Free the shadow bo for compute VM */ + amdgpu_bo_unref(&vm->root.base.bo->shadow); + error: amdgpu_bo_unreserve(vm->root.base.bo); return r; @@ -2779,7 +2782,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); } - drm_sched_entity_destroy(vm->entity.sched, &vm->entity); + drm_sched_entity_destroy(&vm->entity); if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { dev_err(adev->dev, "still active bo inside vm\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 702e257a483f..78ab939ae5d8 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1476,7 +1476,7 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev) tmp |= PCIE_LC_CNTL4__LC_REDO_EQ_MASK; WREG32_PCIE(ixPCIE_LC_CNTL4, tmp); - mdelay(100); + msleep(100); /* linkctl */ pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 9ab39117cc4e..ef00d14f8645 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -3490,7 +3490,7 @@ static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev) /* wait for RLC_SAFE_MODE */ for (i = 0; i < adev->usec_timeout; i++) { - if (!REG_GET_FIELD(SOC15_REG_OFFSET(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) + if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) break; udelay(1); } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 10920f0bd85f..36dc367c4b45 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -28,6 +28,7 @@ #include "cik.h" #include "gmc_v7_0.h" #include "amdgpu_ucode.h" +#include "amdgpu_amdkfd.h" #include "bif/bif_4_1_d.h" #include "bif/bif_4_1_sh_mask.h" @@ -1078,6 +1079,12 @@ static int gmc_v7_0_sw_init(void *handle) adev->vm_manager.vram_base_offset = 0; } + adev->gmc.vm_fault_info = kmalloc(sizeof(struct kfd_vm_fault_info), + GFP_KERNEL); + if (!adev->gmc.vm_fault_info) + return -ENOMEM; + atomic_set(&adev->gmc.vm_fault_info_updated, 0); + return 0; } @@ -1087,6 +1094,7 @@ static int gmc_v7_0_sw_fini(void *handle) amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); + kfree(adev->gmc.vm_fault_info); gmc_v7_0_gart_fini(adev); amdgpu_bo_fini(adev); release_firmware(adev->gmc.fw); @@ -1276,7 +1284,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - u32 addr, status, mc_client; + u32 addr, status, mc_client, vmid; addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); @@ -1301,6 +1309,29 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, entry->pasid); } + vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, + VMID); + if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid) + && !atomic_read(&adev->gmc.vm_fault_info_updated)) { + struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info; + u32 protections = REG_GET_FIELD(status, + VM_CONTEXT1_PROTECTION_FAULT_STATUS, + PROTECTIONS); + + info->vmid = vmid; + info->mc_id = REG_GET_FIELD(status, + VM_CONTEXT1_PROTECTION_FAULT_STATUS, + MEMORY_CLIENT_ID); + info->status = status; + info->page_addr = addr; + info->prot_valid = protections & 0x7 ? true : false; + info->prot_read = protections & 0x8 ? true : false; + info->prot_write = protections & 0x10 ? true : false; + info->prot_exec = protections & 0x20 ? true : false; + mb(); + atomic_set(&adev->gmc.vm_fault_info_updated, 1); + } + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 75f3ffb2891e..70fc97b59b4f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -26,6 +26,7 @@ #include "amdgpu.h" #include "gmc_v8_0.h" #include "amdgpu_ucode.h" +#include "amdgpu_amdkfd.h" #include "gmc/gmc_8_1_d.h" #include "gmc/gmc_8_1_sh_mask.h" @@ -1182,6 +1183,12 @@ static int gmc_v8_0_sw_init(void *handle) adev->vm_manager.vram_base_offset = 0; } + adev->gmc.vm_fault_info = kmalloc(sizeof(struct kfd_vm_fault_info), + GFP_KERNEL); + if (!adev->gmc.vm_fault_info) + return -ENOMEM; + atomic_set(&adev->gmc.vm_fault_info_updated, 0); + return 0; } @@ -1191,6 +1198,7 @@ static int gmc_v8_0_sw_fini(void *handle) amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); + kfree(adev->gmc.vm_fault_info); gmc_v8_0_gart_fini(adev); amdgpu_bo_fini(adev); release_firmware(adev->gmc.fw); @@ -1426,7 +1434,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - u32 addr, status, mc_client; + u32 addr, status, mc_client, vmid; if (amdgpu_sriov_vf(adev)) { dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", @@ -1463,6 +1471,29 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, entry->pasid); } + vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, + VMID); + if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid) + && !atomic_read(&adev->gmc.vm_fault_info_updated)) { + struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info; + u32 protections = REG_GET_FIELD(status, + VM_CONTEXT1_PROTECTION_FAULT_STATUS, + PROTECTIONS); + + info->vmid = vmid; + info->mc_id = REG_GET_FIELD(status, + VM_CONTEXT1_PROTECTION_FAULT_STATUS, + MEMORY_CLIENT_ID); + info->status = status; + info->page_addr = addr; + info->prot_valid = protections & 0x7 ? true : false; + info->prot_read = protections & 0x8 ? true : false; + info->prot_write = protections & 0x10 ? true : false; + info->prot_exec = protections & 0x20 ? true : false; + mb(); + atomic_set(&adev->gmc.vm_fault_info_updated, 1); + } + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 9df94b45d17d..399a5db27649 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -269,7 +269,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, entry->src_id, entry->ring_id, entry->vmid, entry->pasid, task_info.process_name, task_info.tgid, task_info.task_name, task_info.pid); - dev_err(adev->dev, " at page 0x%016llx from %d\n", + dev_err(adev->dev, " at address 0x%016llx from %d\n", addr, entry->client_id); if (!amdgpu_sriov_vf(adev)) dev_err(adev->dev, |