diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 390 |
1 files changed, 235 insertions, 155 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 0593ef8fe0a6..ea14f1c8f430 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -26,30 +26,30 @@ #include <drm/drm_drv.h> #include <drm/drm_fbdev_generic.h> #include <drm/drm_gem.h> -#include <drm/drm_vblank.h> #include <drm/drm_managed.h> -#include "amdgpu_drv.h" - #include <drm/drm_pciids.h> -#include <linux/module.h> -#include <linux/pm_runtime.h> -#include <linux/vga_switcheroo.h> #include <drm/drm_probe_helper.h> -#include <linux/mmu_notifier.h> -#include <linux/suspend.h> +#include <drm/drm_vblank.h> + #include <linux/cc_platform.h> #include <linux/dynamic_debug.h> +#include <linux/module.h> +#include <linux/mmu_notifier.h> +#include <linux/pm_runtime.h> +#include <linux/suspend.h> +#include <linux/vga_switcheroo.h> #include "amdgpu.h" -#include "amdgpu_irq.h" +#include "amdgpu_amdkfd.h" #include "amdgpu_dma_buf.h" -#include "amdgpu_sched.h" +#include "amdgpu_drv.h" #include "amdgpu_fdinfo.h" -#include "amdgpu_amdkfd.h" - +#include "amdgpu_irq.h" +#include "amdgpu_psp.h" #include "amdgpu_ras.h" -#include "amdgpu_xgmi.h" #include "amdgpu_reset.h" +#include "amdgpu_sched.h" +#include "amdgpu_xgmi.h" #include "../amdxcp/amdgpu_xcp_drv.h" /* @@ -113,11 +113,24 @@ * gl1c_cache_size, gl2c_cache_size, mall_size, enabled_rb_pipes_mask_hi * 3.53.0 - Support for GFX11 CP GFX shadowing * 3.54.0 - Add AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS support + * - 3.55.0 - Add AMDGPU_INFO_GPUVM_FAULT query + * - 3.56.0 - Update IB start address and size alignment for decode and encode + * - 3.57.0 - Compute tunneling on GFX10+ */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 54 +#define KMS_DRIVER_MINOR 57 #define KMS_DRIVER_PATCHLEVEL 0 +/* + * amdgpu.debug module options. Are all disabled by default + */ +enum AMDGPU_DEBUG_MASK { + AMDGPU_DEBUG_VM = BIT(0), + AMDGPU_DEBUG_LARGEBAR = BIT(1), + AMDGPU_DEBUG_DISABLE_GPU_SOFT_RECOVERY = BIT(2), + AMDGPU_DEBUG_USE_VRAM_FW_BUF = BIT(3), +}; + unsigned int amdgpu_vram_limit = UINT_MAX; int amdgpu_vis_vram_limit; int amdgpu_gart_size = -1; /* auto */ @@ -140,7 +153,6 @@ int amdgpu_vm_size = -1; int amdgpu_vm_fragment_size = -1; int amdgpu_vm_block_size = -1; int amdgpu_vm_fault_stop; -int amdgpu_vm_debug; int amdgpu_vm_update_mode = -1; int amdgpu_exp_hw_support; int amdgpu_dc = -1; @@ -183,6 +195,7 @@ int amdgpu_async_gfx_ring = 1; int amdgpu_mcbp = -1; int amdgpu_discovery = -1; int amdgpu_mes; +int amdgpu_mes_log_enable = 0; int amdgpu_mes_kiq; int amdgpu_noretry = -1; int amdgpu_force_asic_type = -1; @@ -195,6 +208,12 @@ int amdgpu_use_xgmi_p2p = 1; int amdgpu_vcnfw_log; int amdgpu_sg_display = -1; /* auto */ int amdgpu_user_partt_mode = AMDGPU_AUTO_COMPUTE_PARTITION_MODE; +int amdgpu_umsch_mm; +int amdgpu_seamless = -1; /* auto */ +uint amdgpu_debug_mask; +int amdgpu_agp = -1; /* auto */ +int amdgpu_wbrf = -1; +int amdgpu_damage_clips = -1; /* auto */ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work); @@ -313,9 +332,7 @@ module_param_named(msi, amdgpu_msi, int, 0444); * jobs is 10000. The timeout for compute is 60000. */ MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: for bare metal 10000 for non-compute jobs and 60000 for compute jobs; " - "for passthrough or sriov, 10000 for all jobs." - " 0: keep default value. negative: infinity timeout), " - "format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; " + "for passthrough or sriov, 10000 for all jobs. 0: keep default value. negative: infinity timeout), format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; " "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video]."); module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444); @@ -350,8 +367,9 @@ module_param_named(aspm, amdgpu_aspm, int, 0444); * Override for runtime power management control for dGPUs. The amdgpu driver can dynamically power down * the dGPUs when they are idle if supported. The default is -1 (auto enable). * Setting the value to 0 disables this functionality. + * Setting the value to -2 is auto enabled with power down when displays are attached. */ -MODULE_PARM_DESC(runpm, "PX runtime pm (2 = force enable with BAMACO, 1 = force enable with BACO, 0 = disable, -1 = auto)"); +MODULE_PARM_DESC(runpm, "PX runtime pm (2 = force enable with BAMACO, 1 = force enable with BACO, 0 = disable, -1 = auto, -2 = auto with displays)"); module_param_named(runpm, amdgpu_runtime_pm, int, 0444); /** @@ -408,13 +426,6 @@ MODULE_PARM_DESC(vm_fault_stop, "Stop on VM fault (0 = never (default), 1 = prin module_param_named(vm_fault_stop, amdgpu_vm_fault_stop, int, 0444); /** - * DOC: vm_debug (int) - * Debug VM handling (0 = disabled, 1 = enabled). The default is 0 (Disabled). - */ -MODULE_PARM_DESC(vm_debug, "Debug VM handling (0 = disabled (default), 1 = enabled)"); -module_param_named(vm_debug, amdgpu_vm_debug, int, 0644); - -/** * DOC: vm_update_mode (int) * Override VM update mode. VM updated by using CPU (0 = never, 1 = Graphics only, 2 = Compute only, 3 = Both). The default * is -1 (Only in large BAR(LB) systems Compute VM tables will be updated by CPU, otherwise 0, never). @@ -584,8 +595,8 @@ module_param_named(timeout_period, amdgpu_watchdog_timer.period, uint, 0644); */ #ifdef CONFIG_DRM_AMDGPU_SI -#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE) -int amdgpu_si_support = 0; +#if IS_ENABLED(CONFIG_DRM_RADEON) || IS_ENABLED(CONFIG_DRM_RADEON_MODULE) +int amdgpu_si_support; MODULE_PARM_DESC(si_support, "SI support (1 = enabled, 0 = disabled (default))"); #else int amdgpu_si_support = 1; @@ -603,8 +614,8 @@ module_param_named(si_support, amdgpu_si_support, int, 0444); */ #ifdef CONFIG_DRM_AMDGPU_CIK -#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE) -int amdgpu_cik_support = 0; +#if IS_ENABLED(CONFIG_DRM_RADEON) || IS_ENABLED(CONFIG_DRM_RADEON_MODULE) +int amdgpu_cik_support; MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled, 0 = disabled (default))"); #else int amdgpu_cik_support = 1; @@ -620,8 +631,7 @@ module_param_named(cik_support, amdgpu_cik_support, int, 0444); * E.g. 0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte. The default is 0 (disabled). */ MODULE_PARM_DESC(smu_memory_pool_size, - "reserve gtt for smu debug usage, 0 = disable," - "0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte"); + "reserve gtt for smu debug usage, 0 = disable,0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte"); module_param_named(smu_memory_pool_size, amdgpu_smu_memory_pool_size, uint, 0444); /** @@ -659,6 +669,15 @@ MODULE_PARM_DESC(mes, module_param_named(mes, amdgpu_mes, int, 0444); /** + * DOC: mes_log_enable (int) + * Enable Micro Engine Scheduler log. This is used to enable/disable MES internal log. + * (0 = disabled (default), 1 = enabled) + */ +MODULE_PARM_DESC(mes_log_enable, + "Enable Micro Engine Scheduler log (0 = disabled (default), 1 = enabled)"); +module_param_named(mes_log_enable, amdgpu_mes_log_enable, int, 0444); + +/** * DOC: mes_kiq (int) * Enable Micro Engine Scheduler KIQ. This is a new engine pipe for kiq. * (0 = disabled (default), 1 = enabled) @@ -747,32 +766,6 @@ MODULE_PARM_DESC(send_sigterm, "Send sigterm to HSA process on unhandled exception (0 = disable, 1 = enable)"); /** - * DOC: debug_largebar (int) - * Set debug_largebar as 1 to enable simulating large-bar capability on non-large bar - * system. This limits the VRAM size reported to ROCm applications to the visible - * size, usually 256MB. - * Default value is 0, diabled. - */ -int debug_largebar; -module_param(debug_largebar, int, 0444); -MODULE_PARM_DESC(debug_largebar, - "Debug large-bar flag used to simulate large-bar capability on non-large bar machine (0 = disable, 1 = enable)"); - -/** - * DOC: ignore_crat (int) - * Ignore CRAT table during KFD initialization. By default, KFD uses the ACPI CRAT - * table to get information about AMD APUs. This option can serve as a workaround on - * systems with a broken CRAT table. - * - * Default is auto (according to asic type, iommu_v2, and crat table, to decide - * whether use CRAT) - */ -int ignore_crat; -module_param(ignore_crat, int, 0444); -MODULE_PARM_DESC(ignore_crat, - "Ignore CRAT table during KFD initialization (0 = auto (default), 1 = ignore CRAT)"); - -/** * DOC: halt_if_hws_hang (int) * Halt if HWS hang is detected. Default value, 0, disables the halt on hang. * Setting 1 enables halt on hang. @@ -791,9 +784,9 @@ module_param(hws_gws_support, bool, 0444); MODULE_PARM_DESC(hws_gws_support, "Assume MEC2 FW supports GWS barriers (false = rely on FW version check (Default), true = force supported)"); /** - * DOC: queue_preemption_timeout_ms (int) - * queue preemption timeout in ms (1 = Minimum, 9000 = default) - */ + * DOC: queue_preemption_timeout_ms (int) + * queue preemption timeout in ms (1 = Minimum, 9000 = default) + */ int queue_preemption_timeout_ms = 9000; module_param(queue_preemption_timeout_ms, int, 0644); MODULE_PARM_DESC(queue_preemption_timeout_ms, "queue preemption timeout in ms (1 = Minimum, 9000 = default)"); @@ -867,18 +860,31 @@ module_param_named(visualconfirm, amdgpu_dc_visual_confirm, uint, 0444); * the ABM algorithm, with 1 being the least reduction and 4 being the most * reduction. * - * Defaults to 0, or disabled. Userspace can still override this level later - * after boot. + * Defaults to -1, or disabled. Userspace can only override this level after + * boot if it's set to auto. */ -uint amdgpu_dm_abm_level; -MODULE_PARM_DESC(abmlevel, "ABM level (0 = off (default), 1-4 = backlight reduction level) "); -module_param_named(abmlevel, amdgpu_dm_abm_level, uint, 0444); +int amdgpu_dm_abm_level = -1; +MODULE_PARM_DESC(abmlevel, + "ABM level (0 = off, 1-4 = backlight reduction level, -1 auto (default))"); +module_param_named(abmlevel, amdgpu_dm_abm_level, int, 0444); int amdgpu_backlight = -1; MODULE_PARM_DESC(backlight, "Backlight control (0 = pwm, 1 = aux, -1 auto (default))"); module_param_named(backlight, amdgpu_backlight, bint, 0444); /** + * DOC: damageclips (int) + * Enable or disable damage clips support. If damage clips support is disabled, + * we will force full frame updates, irrespective of what user space sends to + * us. + * + * Defaults to -1 (where it is enabled unless a PSR-SU display is detected). + */ +MODULE_PARM_DESC(damageclips, + "Damage clips support (0 = disable, 1 = enable, -1 auto (default))"); +module_param_named(damageclips, amdgpu_damage_clips, int, 0444); + +/** * DOC: tmz (int) * Trusted Memory Zone (TMZ) is a method to protect data being written * to or read from memory. @@ -919,7 +925,7 @@ module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444); * GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco) */ MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco/bamaco)"); -module_param_named(reset_method, amdgpu_reset_method, int, 0444); +module_param_named(reset_method, amdgpu_reset_method, int, 0644); /** * DOC: bad_page_threshold (int) Bad page threshold is specifies the @@ -951,6 +957,15 @@ MODULE_PARM_DESC(sg_display, "S/G Display (-1 = auto (default), 0 = disable)"); module_param_named(sg_display, amdgpu_sg_display, int, 0444); /** + * DOC: umsch_mm (int) + * Enable Multi Media User Mode Scheduler. This is a HW scheduling engine for VCN and VPE. + * (0 = disabled (default), 1 = enabled) + */ +MODULE_PARM_DESC(umsch_mm, + "Enable Multi Media User Mode Scheduler (0 = disabled (default), 1 = enabled)"); +module_param_named(umsch_mm, amdgpu_umsch_mm, int, 0444); + +/** * DOC: smu_pptable_id (int) * Used to override pptable id. id = 0 use VBIOS pptable. * id > 0 use the soft pptable with specicfied id. @@ -981,6 +996,51 @@ module_param_named(user_partt_mode, amdgpu_user_partt_mode, uint, 0444); module_param(enforce_isolation, bool, 0444); MODULE_PARM_DESC(enforce_isolation, "enforce process isolation between graphics and compute . enforce_isolation = on"); +/** + * DOC: seamless (int) + * Seamless boot will keep the image on the screen during the boot process. + */ +MODULE_PARM_DESC(seamless, "Seamless boot (-1 = auto (default), 0 = disable, 1 = enable)"); +module_param_named(seamless, amdgpu_seamless, int, 0444); + +/** + * DOC: debug_mask (uint) + * Debug options for amdgpu, work as a binary mask with the following options: + * + * - 0x1: Debug VM handling + * - 0x2: Enable simulating large-bar capability on non-large bar system. This + * limits the VRAM size reported to ROCm applications to the visible + * size, usually 256MB. + * - 0x4: Disable GPU soft recovery, always do a full reset + */ +MODULE_PARM_DESC(debug_mask, "debug options for amdgpu, disabled by default"); +module_param_named(debug_mask, amdgpu_debug_mask, uint, 0444); + +/** + * DOC: agp (int) + * Enable the AGP aperture. This provides an aperture in the GPU's internal + * address space for direct access to system memory. Note that these accesses + * are non-snooped, so they are only used for access to uncached memory. + */ +MODULE_PARM_DESC(agp, "AGP (-1 = auto (default), 0 = disable, 1 = enable)"); +module_param_named(agp, amdgpu_agp, int, 0444); + +/** + * DOC: wbrf (int) + * Enable Wifi RFI interference mitigation feature. + * Due to electrical and mechanical constraints there may be likely interference of + * relatively high-powered harmonics of the (G-)DDR memory clocks with local radio + * module frequency bands used by Wifi 6/6e/7. To mitigate the possible RFI interference, + * with this feature enabled, PMFW will use either “shadowed P-State” or “P-State” based + * on active list of frequencies in-use (to be avoided) as part of initial setting or + * P-state transition. However, there may be potential performance impact with this + * feature enabled. + * (0 = disabled, 1 = enabled, -1 = auto (default setting, will be enabled if supported)) + */ +MODULE_PARM_DESC(wbrf, + "Enable Wifi RFI interference mitigation (0 = disabled, 1 = enabled, -1 = auto(default)"); +module_param_named(wbrf, amdgpu_wbrf, int, 0444); + /* These devices are not supported by amdgpu. * They are supported by the mach64, r128, radeon drivers */ @@ -2061,6 +2121,14 @@ static const struct pci_device_id pciidlist[] = { MODULE_DEVICE_TABLE(pci, pciidlist); +static const struct amdgpu_asic_type_quirk asic_type_quirks[] = { + /* differentiate between P10 and P11 asics with the same DID */ + {0x67FF, 0xE3, CHIP_POLARIS10}, + {0x67FF, 0xE7, CHIP_POLARIS10}, + {0x67FF, 0xF3, CHIP_POLARIS10}, + {0x67FF, 0xF7, CHIP_POLARIS10}, +}; + static const struct drm_driver amdgpu_kms_driver; static void amdgpu_get_secondary_funcs(struct amdgpu_device *adev) @@ -2085,6 +2153,45 @@ static void amdgpu_get_secondary_funcs(struct amdgpu_device *adev) } } +static void amdgpu_init_debug_options(struct amdgpu_device *adev) +{ + if (amdgpu_debug_mask & AMDGPU_DEBUG_VM) { + pr_info("debug: VM handling debug enabled\n"); + adev->debug_vm = true; + } + + if (amdgpu_debug_mask & AMDGPU_DEBUG_LARGEBAR) { + pr_info("debug: enabled simulating large-bar capability on non-large bar system\n"); + adev->debug_largebar = true; + } + + if (amdgpu_debug_mask & AMDGPU_DEBUG_DISABLE_GPU_SOFT_RECOVERY) { + pr_info("debug: soft reset for GPU recovery disabled\n"); + adev->debug_disable_soft_recovery = true; + } + + if (amdgpu_debug_mask & AMDGPU_DEBUG_USE_VRAM_FW_BUF) { + pr_info("debug: place fw in vram for frontdoor loading\n"); + adev->debug_use_vram_fw_buf = true; + } +} + +static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(asic_type_quirks); i++) { + if (pdev->device == asic_type_quirks[i].device && + pdev->revision == asic_type_quirks[i].revision) { + flags &= ~AMD_ASIC_MASK; + flags |= asic_type_quirks[i].type; + break; + } + } + + return flags; +} + static int amdgpu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -2112,15 +2219,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, "See modparam exp_hw_support\n"); return -ENODEV; } - /* differentiate between P10 and P11 asics with the same DID */ - if (pdev->device == 0x67FF && - (pdev->revision == 0xE3 || - pdev->revision == 0xE7 || - pdev->revision == 0xF3 || - pdev->revision == 0xF7)) { - flags &= ~AMD_ASIC_MASK; - flags |= CHIP_POLARIS10; - } + + flags = amdgpu_fix_asic_type(pdev, flags); /* Due to hardware bugs, S/G Display on raven requires a 1:1 IOMMU mapping, * however, SME requires an indirect IOMMU mapping because the encryption @@ -2185,6 +2285,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, pci_set_drvdata(pdev, ddev); + amdgpu_init_debug_options(adev); + ret = amdgpu_driver_load_kms(adev, flags); if (ret) goto err_pci; @@ -2204,6 +2306,10 @@ retry_init: if (ret) goto err_pci; + ret = amdgpu_amdkfd_drm_client_create(adev); + if (ret) + goto err_pci; + /* * 1. don't init fbdev on hw without DCE * 2. don't init fbdev if there are no connectors @@ -2238,6 +2344,8 @@ retry_init: pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); + pci_wake_from_d3(pdev, TRUE); + /* * For runpm implemented via BACO, PMFW will handle the * timing for BACO in and out: @@ -2284,38 +2392,6 @@ amdgpu_pci_remove(struct pci_dev *pdev) pm_runtime_forbid(dev->dev); } - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2) && - !amdgpu_sriov_vf(adev)) { - bool need_to_reset_gpu = false; - - if (adev->gmc.xgmi.num_physical_nodes > 1) { - struct amdgpu_hive_info *hive; - - hive = amdgpu_get_xgmi_hive(adev); - if (hive->device_remove_count == 0) - need_to_reset_gpu = true; - hive->device_remove_count++; - amdgpu_put_xgmi_hive(hive); - } else { - need_to_reset_gpu = true; - } - - /* Workaround for ASICs need to reset SMU. - * Called only when the first device is removed. - */ - if (need_to_reset_gpu) { - struct amdgpu_reset_context reset_context; - - adev->shutdown = true; - memset(&reset_context, 0, sizeof(reset_context)); - reset_context.method = AMD_RESET_METHOD_NONE; - reset_context.reset_req_dev = adev; - set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); - set_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context.flags); - amdgpu_device_gpu_recover(adev, NULL, &reset_context); - } - } - amdgpu_driver_unload_kms(dev); /* @@ -2405,6 +2481,7 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work) /* Use a common context, just need to make sure full reset is done */ set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags); + set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags); r = amdgpu_do_asic_reset(&device_list, &reset_context); if (r) { @@ -2413,11 +2490,13 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work) } for (i = 0; i < mgpu_info.num_dgpu; i++) { adev = mgpu_info.gpu_ins[i].adev; - if (!adev->kfd.init_complete) + if (!adev->kfd.init_complete) { + kgd2kfd_init_zone_device(adev); amdgpu_amdkfd_device_init(adev); + amdgpu_amdkfd_drm_client_create(adev); + } amdgpu_ttm_set_buffer_funcs_status(adev, true); } - return; } static int amdgpu_pmops_prepare(struct device *dev) @@ -2428,8 +2507,9 @@ static int amdgpu_pmops_prepare(struct device *dev) /* Return a positive number here so * DPM_FLAG_SMART_SUSPEND works properly */ - if (amdgpu_device_supports_boco(drm_dev)) - return pm_runtime_suspended(dev); + if (amdgpu_device_supports_boco(drm_dev) && + pm_runtime_suspended(dev)) + return 1; /* if we will not support s3 or s2i for the device * then skip suspend @@ -2438,7 +2518,7 @@ static int amdgpu_pmops_prepare(struct device *dev) !amdgpu_acpi_is_s3_active(adev)) return 1; - return 0; + return amdgpu_device_prepare(drm_dev); } static void amdgpu_pmops_complete(struct device *dev) @@ -2451,6 +2531,7 @@ static int amdgpu_pmops_suspend(struct device *dev) struct drm_device *drm_dev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(drm_dev); + adev->suspend_complete = false; if (amdgpu_acpi_is_s0ix_active(adev)) adev->in_s0ix = true; else if (amdgpu_acpi_is_s3_active(adev)) @@ -2465,6 +2546,7 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev) struct drm_device *drm_dev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(drm_dev); + adev->suspend_complete = true; if (amdgpu_acpi_should_gpu_reset(adev)) return amdgpu_asic_reset(adev); @@ -2541,24 +2623,26 @@ static int amdgpu_runtime_idle_check_display(struct device *dev) struct drm_connector_list_iter iter; int ret = 0; - /* XXX: Return busy if any displays are connected to avoid - * possible display wakeups after runtime resume due to - * hotplug events in case any displays were connected while - * the GPU was in suspend. Remove this once that is fixed. - */ - mutex_lock(&drm_dev->mode_config.mutex); - drm_connector_list_iter_begin(drm_dev, &iter); - drm_for_each_connector_iter(list_connector, &iter) { - if (list_connector->status == connector_status_connected) { - ret = -EBUSY; - break; + if (amdgpu_runtime_pm != -2) { + /* XXX: Return busy if any displays are connected to avoid + * possible display wakeups after runtime resume due to + * hotplug events in case any displays were connected while + * the GPU was in suspend. Remove this once that is fixed. + */ + mutex_lock(&drm_dev->mode_config.mutex); + drm_connector_list_iter_begin(drm_dev, &iter); + drm_for_each_connector_iter(list_connector, &iter) { + if (list_connector->status == connector_status_connected) { + ret = -EBUSY; + break; + } } - } - drm_connector_list_iter_end(&iter); - mutex_unlock(&drm_dev->mode_config.mutex); + drm_connector_list_iter_end(&iter); + mutex_unlock(&drm_dev->mode_config.mutex); - if (ret) - return ret; + if (ret) + return ret; + } if (adev->dc_enabled) { struct drm_crtc *crtc; @@ -2614,6 +2698,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) /* wait for all rings to drain before suspending */ for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; + if (ring && ring->sched.ready) { ret = amdgpu_fence_wait_empty(ring); if (ret) @@ -2622,7 +2707,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) } adev->in_runpm = true; - if (amdgpu_device_supports_px(drm_dev)) + if (adev->pm.rpm_mode == AMDGPU_RUNPM_PX) drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; /* @@ -2632,21 +2717,24 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) * platforms. * TODO: this may be also needed for PX capable platform. */ - if (amdgpu_device_supports_boco(drm_dev)) + if (adev->pm.rpm_mode == AMDGPU_RUNPM_BOCO) adev->mp1_state = PP_MP1_STATE_UNLOAD; + ret = amdgpu_device_prepare(drm_dev); + if (ret) + return ret; ret = amdgpu_device_suspend(drm_dev, false); if (ret) { adev->in_runpm = false; - if (amdgpu_device_supports_boco(drm_dev)) + if (adev->pm.rpm_mode == AMDGPU_RUNPM_BOCO) adev->mp1_state = PP_MP1_STATE_NONE; return ret; } - if (amdgpu_device_supports_boco(drm_dev)) + if (adev->pm.rpm_mode == AMDGPU_RUNPM_BOCO) adev->mp1_state = PP_MP1_STATE_NONE; - if (amdgpu_device_supports_px(drm_dev)) { + if (adev->pm.rpm_mode == AMDGPU_RUNPM_PX) { /* Only need to handle PCI state in the driver for ATPX * PCI core handles it for _PR3. */ @@ -2655,9 +2743,10 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) pci_ignore_hotplug(pdev); pci_set_power_state(pdev, PCI_D3cold); drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF; - } else if (amdgpu_device_supports_boco(drm_dev)) { + } else if (adev->pm.rpm_mode == AMDGPU_RUNPM_BOCO) { /* nothing to do */ - } else if (amdgpu_device_supports_baco(drm_dev)) { + } else if ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || + (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)) { amdgpu_device_baco_enter(drm_dev); } @@ -2680,7 +2769,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev) if (!pci_device_is_present(adev->pdev)) adev->no_hw_access = true; - if (amdgpu_device_supports_px(drm_dev)) { + if (adev->pm.rpm_mode == AMDGPU_RUNPM_PX) { drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; /* Only need to handle PCI state in the driver for ATPX @@ -2692,22 +2781,23 @@ static int amdgpu_pmops_runtime_resume(struct device *dev) if (ret) return ret; pci_set_master(pdev); - } else if (amdgpu_device_supports_boco(drm_dev)) { + } else if (adev->pm.rpm_mode == AMDGPU_RUNPM_BOCO) { /* Only need to handle PCI state in the driver for ATPX * PCI core handles it for _PR3. */ pci_set_master(pdev); - } else if (amdgpu_device_supports_baco(drm_dev)) { + } else if ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || + (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)) { amdgpu_device_baco_exit(drm_dev); } ret = amdgpu_device_resume(drm_dev, false); if (ret) { - if (amdgpu_device_supports_px(drm_dev)) + if (adev->pm.rpm_mode == AMDGPU_RUNPM_PX) pci_disable_device(pdev); return ret; } - if (amdgpu_device_supports_px(drm_dev)) + if (adev->pm.rpm_mode == AMDGPU_RUNPM_PX) drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; adev->in_runpm = false; return 0; @@ -2717,8 +2807,7 @@ static int amdgpu_pmops_runtime_idle(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(drm_dev); - /* we don't want the main rpm_idle to call suspend - we want to autosuspend */ - int ret = 1; + int ret; if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE) { pm_runtime_forbid(dev); @@ -2738,6 +2827,7 @@ long amdgpu_drm_ioctl(struct file *filp, struct drm_file *file_priv = filp->private_data; struct drm_device *dev; long ret; + dev = file_priv->minor->dev; ret = pm_runtime_get_sync(dev->dev); if (ret < 0) @@ -2802,9 +2892,8 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv) if (!filp) return -EINVAL; - if (filp->f_op != &amdgpu_driver_kms_fops) { + if (filp->f_op != &amdgpu_driver_kms_fops) return -EINVAL; - } file = filp->private_data; *fpriv = file->driver_priv; @@ -2850,10 +2939,7 @@ static const struct drm_driver amdgpu_kms_driver = { .show_fdinfo = amdgpu_show_fdinfo, #endif - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = amdgpu_gem_prime_import, - .gem_prime_mmap = drm_gem_prime_mmap, .name = DRIVER_NAME, .desc = DRIVER_DESC, @@ -2877,10 +2963,7 @@ const struct drm_driver amdgpu_partition_driver = { .fops = &amdgpu_driver_kms_fops, .release = &amdgpu_driver_release_kms, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = amdgpu_gem_prime_import, - .gem_prime_mmap = drm_gem_prime_mmap, .name = DRIVER_NAME, .desc = DRIVER_DESC, @@ -2897,16 +2980,13 @@ static struct pci_error_handlers amdgpu_pci_err_handler = { .resume = amdgpu_pci_resume, }; -extern const struct attribute_group amdgpu_vram_mgr_attr_group; -extern const struct attribute_group amdgpu_gtt_mgr_attr_group; - static const struct attribute_group *amdgpu_sysfs_groups[] = { &amdgpu_vram_mgr_attr_group, &amdgpu_gtt_mgr_attr_group, + &amdgpu_flash_attr_group, NULL, }; - static struct pci_driver amdgpu_kms_pci_driver = { .name = DRIVER_NAME, .id_table = pciidlist, |