aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_pm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/xe/xe_pm.c')
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c99
1 files changed, 82 insertions, 17 deletions
diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
index fcfb49af8c89..e518557e0eec 100644
--- a/drivers/gpu/drm/xe/xe_pm.c
+++ b/drivers/gpu/drm/xe/xe_pm.c
@@ -70,12 +70,42 @@
*/
#ifdef CONFIG_LOCKDEP
-static struct lockdep_map xe_pm_runtime_lockdep_map = {
- .name = "xe_pm_runtime_lockdep_map"
+static struct lockdep_map xe_pm_runtime_d3cold_map = {
+ .name = "xe_rpm_d3cold_map"
+};
+
+static struct lockdep_map xe_pm_runtime_nod3cold_map = {
+ .name = "xe_rpm_nod3cold_map"
};
#endif
/**
+ * xe_rpm_reclaim_safe() - Whether runtime resume can be done from reclaim context
+ * @xe: The xe device.
+ *
+ * Return: true if it is safe to runtime resume from reclaim context.
+ * false otherwise.
+ */
+bool xe_rpm_reclaim_safe(const struct xe_device *xe)
+{
+ return !xe->d3cold.capable && !xe->info.has_sriov;
+}
+
+static void xe_rpm_lockmap_acquire(const struct xe_device *xe)
+{
+ lock_map_acquire(xe_rpm_reclaim_safe(xe) ?
+ &xe_pm_runtime_nod3cold_map :
+ &xe_pm_runtime_d3cold_map);
+}
+
+static void xe_rpm_lockmap_release(const struct xe_device *xe)
+{
+ lock_map_release(xe_rpm_reclaim_safe(xe) ?
+ &xe_pm_runtime_nod3cold_map :
+ &xe_pm_runtime_d3cold_map);
+}
+
+/**
* xe_pm_suspend - Helper for System suspend, i.e. S0->S3 / S0->S2idle
* @xe: xe device instance
*
@@ -354,7 +384,7 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
* annotation here and in xe_pm_runtime_get() lockdep will see
* the potential lock inversion and give us a nice splat.
*/
- lock_map_acquire(&xe_pm_runtime_lockdep_map);
+ xe_rpm_lockmap_acquire(xe);
/*
* Applying lock for entire list op as xe_ttm_bo_destroy and xe_bo_move_notify
@@ -366,9 +396,9 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
xe_bo_runtime_pm_release_mmap_offset(bo);
mutex_unlock(&xe->mem_access.vram_userfault.lock);
- if (xe->d3cold.allowed) {
- xe_display_pm_suspend(xe, true);
+ xe_display_pm_runtime_suspend(xe);
+ if (xe->d3cold.allowed) {
err = xe_bo_evict_all(xe);
if (err)
goto out;
@@ -387,7 +417,7 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
out:
if (err)
xe_display_pm_resume(xe, true);
- lock_map_release(&xe_pm_runtime_lockdep_map);
+ xe_rpm_lockmap_release(xe);
xe_pm_write_callback_task(xe, NULL);
return err;
}
@@ -408,7 +438,7 @@ int xe_pm_runtime_resume(struct xe_device *xe)
/* Disable access_ongoing asserts and prevent recursive pm calls */
xe_pm_write_callback_task(xe, current);
- lock_map_acquire(&xe_pm_runtime_lockdep_map);
+ xe_rpm_lockmap_acquire(xe);
if (xe->d3cold.allowed) {
err = xe_pcode_ready(xe, true);
@@ -431,14 +461,16 @@ int xe_pm_runtime_resume(struct xe_device *xe)
for_each_gt(gt, xe, id)
xe_gt_resume(gt);
+ xe_display_pm_runtime_resume(xe);
+
if (xe->d3cold.allowed) {
- xe_display_pm_resume(xe, true);
err = xe_bo_restore_user(xe);
if (err)
goto out;
}
+
out:
- lock_map_release(&xe_pm_runtime_lockdep_map);
+ xe_rpm_lockmap_release(xe);
xe_pm_write_callback_task(xe, NULL);
return err;
}
@@ -452,15 +484,37 @@ out:
* stuff that can happen inside the runtime_resume callback by acquiring
* a dummy lock (it doesn't protect anything and gets compiled out on
* non-debug builds). Lockdep then only needs to see the
- * xe_pm_runtime_lockdep_map -> runtime_resume callback once, and then can
- * hopefully validate all the (callers_locks) -> xe_pm_runtime_lockdep_map.
+ * xe_pm_runtime_xxx_map -> runtime_resume callback once, and then can
+ * hopefully validate all the (callers_locks) -> xe_pm_runtime_xxx_map.
* For example if the (callers_locks) are ever grabbed in the
* runtime_resume callback, lockdep should give us a nice splat.
*/
-static void pm_runtime_lockdep_prime(void)
+static void xe_rpm_might_enter_cb(const struct xe_device *xe)
+{
+ xe_rpm_lockmap_acquire(xe);
+ xe_rpm_lockmap_release(xe);
+}
+
+/*
+ * Prime the lockdep maps for known locking orders that need to
+ * be supported but that may not always occur on all systems.
+ */
+static void xe_pm_runtime_lockdep_prime(void)
{
- lock_map_acquire(&xe_pm_runtime_lockdep_map);
- lock_map_release(&xe_pm_runtime_lockdep_map);
+ struct dma_resv lockdep_resv;
+
+ dma_resv_init(&lockdep_resv);
+ lock_map_acquire(&xe_pm_runtime_d3cold_map);
+ /* D3Cold takes the dma_resv locks to evict bos */
+ dma_resv_lock(&lockdep_resv, NULL);
+ dma_resv_unlock(&lockdep_resv);
+ lock_map_release(&xe_pm_runtime_d3cold_map);
+
+ /* Shrinkers might like to wake up the device under reclaim. */
+ fs_reclaim_acquire(GFP_KERNEL);
+ lock_map_acquire(&xe_pm_runtime_nod3cold_map);
+ lock_map_release(&xe_pm_runtime_nod3cold_map);
+ fs_reclaim_release(GFP_KERNEL);
}
/**
@@ -475,7 +529,7 @@ void xe_pm_runtime_get(struct xe_device *xe)
if (xe_pm_read_callback_task(xe) == current)
return;
- pm_runtime_lockdep_prime();
+ xe_rpm_might_enter_cb(xe);
pm_runtime_resume(xe->drm.dev);
}
@@ -507,7 +561,7 @@ int xe_pm_runtime_get_ioctl(struct xe_device *xe)
if (WARN_ON(xe_pm_read_callback_task(xe) == current))
return -ELOOP;
- pm_runtime_lockdep_prime();
+ xe_rpm_might_enter_cb(xe);
return pm_runtime_get_sync(xe->drm.dev);
}
@@ -575,7 +629,7 @@ bool xe_pm_runtime_resume_and_get(struct xe_device *xe)
return true;
}
- pm_runtime_lockdep_prime();
+ xe_rpm_might_enter_cb(xe);
return pm_runtime_resume_and_get(xe->drm.dev) >= 0;
}
@@ -667,3 +721,14 @@ void xe_pm_d3cold_allowed_toggle(struct xe_device *xe)
drm_dbg(&xe->drm,
"d3cold: allowed=%s\n", str_yes_no(xe->d3cold.allowed));
}
+
+/**
+ * xe_pm_module_init() - Perform xe_pm specific module initialization.
+ *
+ * Return: 0 on success. Currently doesn't fail.
+ */
+int __init xe_pm_module_init(void)
+{
+ xe_pm_runtime_lockdep_prime();
+ return 0;
+}