aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2021-10-11 18:09:39 +1000
committerDave Airlie <airlied@redhat.com>2021-10-11 18:09:39 +1000
commit1176d15f0f6e556d54ced510ac4a91694960332b (patch)
treeeba3bba08865e9711647fff161afc75aaa667c67 /kernel
parentc7c774fe09389fc806bbe4b487c18e45f576c1ae (diff)
parent1a839e016e4964b5c8384e5d82e5e5ac02a23f52 (diff)
Merge tag 'drm-intel-gt-next-2021-10-08' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
UAPI Changes: - Add uAPI for using PXP protected objects Mesa changes: https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/8064 - Add PCI IDs and LMEM discovery/placement uAPI for DG1 Mesa changes: https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/11584 - Disable engine bonding on Gen12+ except TGL, RKL and ADL-S Cross-subsystem Changes: - Merges 'tip/locking/wwmutex' branch (core kernel tip) - "mei: pxp: export pavp client to me client bus" Core Changes: - Update ttm_move_memcpy for async use (Thomas) Driver Changes: - Enable GuC submission by default on DG1 (Matt B) - Add PXP (Protected Xe Path) support for Gen12 integrated (Daniele, Sean, Anshuman) See "drm/i915/pxp: add PXP documentation" for details! - Remove force_probe protection for ADL-S (Raviteja) - Add base support for XeHP/XeHP SDV (Matt R, Stuart, Lucas) - Handle DRI_PRIME=1 on Intel igfx + Intel dgfx hybrid graphics setup (Tvrtko) - Use Transparent Hugepages when IOMMU is enabled (Tvrtko, Chris) - Implement LMEM backup and restore for suspend / resume (Thomas) - Report INSTDONE_GEOM values in error state for DG2 (Matt R) - Add DG2-specific shadow register table (Matt R) - Update Gen11/Gen12/XeHP shadow register tables (Matt R) - Maintain backward-compatible nested batch behavior on TGL+ (Matt R) - Add new LRI reg offsets for DG2 (Akeem) - Initialize unused MOCS entries to device specific values (Ayaz) - Track and use the correct UC MOCS index on Gen12 (Ayaz) - Add separate MOCS table for Gen12 devices other than TGL/RKL (Ayaz) - Simplify the locking and eliminate some RCU usage (Daniel) - Add some flushing for the 64K GTT path (Matt A) - Mark GPU wedging on driver unregister unrecoverable (Janusz) - Major rework in the GuC codebase, simplify locking and add docs (Matt B) - Add DG1 GuC/HuC firmwares (Daniele, Matt B) - Remember to call i915_sw_fence_fini on guc_state.blocked (Matt A) - Use "gt" forcewake domain name for error messages instead of "blitter" (Matt R) - Drop now duplicate LMEM uAPI RFC kerneldoc section (Daniel) - Fix early tracepoints for requests (Matt A) - Use locked access to ctx->engines in set_priority (Daniel) - Convert gen6/gen7/gen8 read operations to fwtable (Matt R) - Drop gen11/gen12 specific mmio write handlers (Matt R) - Drop gen11 specific mmio read handlers (Matt R) - Use designated initializers for init/exit table (Kees) - Fix syncmap memory leak (Matt B) - Add pretty printing for buddy allocator state debug (Matt A) - Fix potential error pointer dereference in pinned_context() (Dan) - Remove IS_ACTIVE macro (Lucas) - Static code checker fixes (Nathan) - Clean up disabled warnings (Nathan) - Increase timeout in i915_gem_contexts selftests 5x for GuC submission (Matt B) - Ensure wa_init_finish() is called for ctx workaround list (Matt R) - Initialize L3CC table in mocs init (Sreedhar, Ayaz, Ram) - Get PM ref before accessing HW register (Vinay) - Move __i915_gem_free_object to ttm_bo_destroy (Maarten) - Deduplicate frequency dump on debugfs (Lucas) - Make wa list per-gt (Venkata) - Do not define dummy vma in stack (Venkata) - Take pinning into account in __i915_gem_object_is_lmem (Matt B, Thomas) - Do not report currently active engine when describing objects (Tvrtko) - Fix pdfdocs build error by removing nested grid from GuC docs (Akira) - Remove false warning from the rps worker (Tejas) - Flush buffer pools on driver remove (Janusz) - Fix runtime pm handling in i915_gem_shrink (Maarten) - Rework TTM object initialization slightly (Thomas) - Use fixed offset for PTEs location (Michal Wa) - Verify result from CTB (de)register action and improve error messages (Michal Wa) - Fix bug in user proto-context creation that leaked contexts (Matt B) - Re-use Gen11 forcewake read functions on Gen12 (Matt R) - Make shadow tables range-based (Matt R) - Ditch the i915_gem_ww_ctx loop member (Thomas, Maarten) - Use NULL instead of 0 where appropriate (Ville) - Rename pci/debugfs functions to respect file prefix (Jani, Lucas) - Drop guc_communication_enabled (Daniele) - Selftest fixes (Thomas, Daniel, Matt A, Maarten) - Clean up inconsistent indenting (Colin) - Use direction definition DMA_BIDIRECTIONAL instead of PCI_DMA_BIDIRECTIONAL (Cai) - Add "intel_" as prefix in set_mocs_index() (Ayaz) From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/YWAO80MB2eyToYoy@jlahtine-mobl.ger.corp.intel.com Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/locking/mutex.c41
-rw-r--r--kernel/locking/test-ww_mutex.c86
-rw-r--r--kernel/locking/ww_rt_mutex.c25
3 files changed, 131 insertions, 21 deletions
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index d456579d0952..2fede72b6af5 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -94,6 +94,9 @@ static inline unsigned long __owner_flags(unsigned long owner)
return owner & MUTEX_FLAGS;
}
+/*
+ * Returns: __mutex_owner(lock) on failure or NULL on success.
+ */
static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff)
{
unsigned long owner, curr = (unsigned long)current;
@@ -736,6 +739,44 @@ __ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, true);
}
+/**
+ * ww_mutex_trylock - tries to acquire the w/w mutex with optional acquire context
+ * @ww: mutex to lock
+ * @ww_ctx: optional w/w acquire context
+ *
+ * Trylocks a mutex with the optional acquire context; no deadlock detection is
+ * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
+ *
+ * Unlike ww_mutex_lock, no deadlock handling is performed. However, if a @ctx is
+ * specified, -EALREADY handling may happen in calls to ww_mutex_trylock.
+ *
+ * A mutex acquired with this function must be released with ww_mutex_unlock.
+ */
+int ww_mutex_trylock(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
+{
+ if (!ww_ctx)
+ return mutex_trylock(&ww->base);
+
+ MUTEX_WARN_ON(ww->base.magic != &ww->base);
+
+ /*
+ * Reset the wounded flag after a kill. No other process can
+ * race and wound us here, since they can't have a valid owner
+ * pointer if we don't have any locks held.
+ */
+ if (ww_ctx->acquired == 0)
+ ww_ctx->wounded = 0;
+
+ if (__mutex_trylock(&ww->base)) {
+ ww_mutex_set_context_fastpath(ww, ww_ctx);
+ mutex_acquire_nest(&ww->base.dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_);
+ return 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ww_mutex_trylock);
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __sched
mutex_lock_nested(struct mutex *lock, unsigned int subclass)
diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
index 3e82f449b4ff..d63ac411f367 100644
--- a/kernel/locking/test-ww_mutex.c
+++ b/kernel/locking/test-ww_mutex.c
@@ -16,6 +16,15 @@
static DEFINE_WD_CLASS(ww_class);
struct workqueue_struct *wq;
+#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
+#define ww_acquire_init_noinject(a, b) do { \
+ ww_acquire_init((a), (b)); \
+ (a)->deadlock_inject_countdown = ~0U; \
+ } while (0)
+#else
+#define ww_acquire_init_noinject(a, b) ww_acquire_init((a), (b))
+#endif
+
struct test_mutex {
struct work_struct work;
struct ww_mutex mutex;
@@ -36,7 +45,7 @@ static void test_mutex_work(struct work_struct *work)
wait_for_completion(&mtx->go);
if (mtx->flags & TEST_MTX_TRY) {
- while (!ww_mutex_trylock(&mtx->mutex))
+ while (!ww_mutex_trylock(&mtx->mutex, NULL))
cond_resched();
} else {
ww_mutex_lock(&mtx->mutex, NULL);
@@ -109,19 +118,38 @@ static int test_mutex(void)
return 0;
}
-static int test_aa(void)
+static int test_aa(bool trylock)
{
struct ww_mutex mutex;
struct ww_acquire_ctx ctx;
int ret;
+ const char *from = trylock ? "trylock" : "lock";
ww_mutex_init(&mutex, &ww_class);
ww_acquire_init(&ctx, &ww_class);
- ww_mutex_lock(&mutex, &ctx);
+ if (!trylock) {
+ ret = ww_mutex_lock(&mutex, &ctx);
+ if (ret) {
+ pr_err("%s: initial lock failed!\n", __func__);
+ goto out;
+ }
+ } else {
+ if (!ww_mutex_trylock(&mutex, &ctx)) {
+ pr_err("%s: initial trylock failed!\n", __func__);
+ goto out;
+ }
+ }
- if (ww_mutex_trylock(&mutex)) {
- pr_err("%s: trylocked itself!\n", __func__);
+ if (ww_mutex_trylock(&mutex, NULL)) {
+ pr_err("%s: trylocked itself without context from %s!\n", __func__, from);
+ ww_mutex_unlock(&mutex);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (ww_mutex_trylock(&mutex, &ctx)) {
+ pr_err("%s: trylocked itself with context from %s!\n", __func__, from);
ww_mutex_unlock(&mutex);
ret = -EINVAL;
goto out;
@@ -129,17 +157,17 @@ static int test_aa(void)
ret = ww_mutex_lock(&mutex, &ctx);
if (ret != -EALREADY) {
- pr_err("%s: missed deadlock for recursing, ret=%d\n",
- __func__, ret);
+ pr_err("%s: missed deadlock for recursing, ret=%d from %s\n",
+ __func__, ret, from);
if (!ret)
ww_mutex_unlock(&mutex);
ret = -EINVAL;
goto out;
}
+ ww_mutex_unlock(&mutex);
ret = 0;
out:
- ww_mutex_unlock(&mutex);
ww_acquire_fini(&ctx);
return ret;
}
@@ -150,7 +178,7 @@ struct test_abba {
struct ww_mutex b_mutex;
struct completion a_ready;
struct completion b_ready;
- bool resolve;
+ bool resolve, trylock;
int result;
};
@@ -160,8 +188,13 @@ static void test_abba_work(struct work_struct *work)
struct ww_acquire_ctx ctx;
int err;
- ww_acquire_init(&ctx, &ww_class);
- ww_mutex_lock(&abba->b_mutex, &ctx);
+ ww_acquire_init_noinject(&ctx, &ww_class);
+ if (!abba->trylock)
+ ww_mutex_lock(&abba->b_mutex, &ctx);
+ else
+ WARN_ON(!ww_mutex_trylock(&abba->b_mutex, &ctx));
+
+ WARN_ON(READ_ONCE(abba->b_mutex.ctx) != &ctx);
complete(&abba->b_ready);
wait_for_completion(&abba->a_ready);
@@ -181,7 +214,7 @@ static void test_abba_work(struct work_struct *work)
abba->result = err;
}
-static int test_abba(bool resolve)
+static int test_abba(bool trylock, bool resolve)
{
struct test_abba abba;
struct ww_acquire_ctx ctx;
@@ -192,12 +225,18 @@ static int test_abba(bool resolve)
INIT_WORK_ONSTACK(&abba.work, test_abba_work);
init_completion(&abba.a_ready);
init_completion(&abba.b_ready);
+ abba.trylock = trylock;
abba.resolve = resolve;
schedule_work(&abba.work);
- ww_acquire_init(&ctx, &ww_class);
- ww_mutex_lock(&abba.a_mutex, &ctx);
+ ww_acquire_init_noinject(&ctx, &ww_class);
+ if (!trylock)
+ ww_mutex_lock(&abba.a_mutex, &ctx);
+ else
+ WARN_ON(!ww_mutex_trylock(&abba.a_mutex, &ctx));
+
+ WARN_ON(READ_ONCE(abba.a_mutex.ctx) != &ctx);
complete(&abba.a_ready);
wait_for_completion(&abba.b_ready);
@@ -249,7 +288,7 @@ static void test_cycle_work(struct work_struct *work)
struct ww_acquire_ctx ctx;
int err, erra = 0;
- ww_acquire_init(&ctx, &ww_class);
+ ww_acquire_init_noinject(&ctx, &ww_class);
ww_mutex_lock(&cycle->a_mutex, &ctx);
complete(cycle->a_signal);
@@ -581,7 +620,9 @@ static int stress(int nlocks, int nthreads, unsigned int flags)
static int __init test_ww_mutex_init(void)
{
int ncpus = num_online_cpus();
- int ret;
+ int ret, i;
+
+ printk(KERN_INFO "Beginning ww mutex selftests\n");
wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
if (!wq)
@@ -591,17 +632,19 @@ static int __init test_ww_mutex_init(void)
if (ret)
return ret;
- ret = test_aa();
+ ret = test_aa(false);
if (ret)
return ret;
- ret = test_abba(false);
+ ret = test_aa(true);
if (ret)
return ret;
- ret = test_abba(true);
- if (ret)
- return ret;
+ for (i = 0; i < 4; i++) {
+ ret = test_abba(i & 1, i & 2);
+ if (ret)
+ return ret;
+ }
ret = test_cycle(ncpus);
if (ret)
@@ -619,6 +662,7 @@ static int __init test_ww_mutex_init(void)
if (ret)
return ret;
+ printk(KERN_INFO "All ww mutex selftests passed\n");
return 0;
}
diff --git a/kernel/locking/ww_rt_mutex.c b/kernel/locking/ww_rt_mutex.c
index 3f1fff7d2780..0e00205cf467 100644
--- a/kernel/locking/ww_rt_mutex.c
+++ b/kernel/locking/ww_rt_mutex.c
@@ -9,6 +9,31 @@
#define WW_RT
#include "rtmutex.c"
+int ww_mutex_trylock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
+{
+ struct rt_mutex *rtm = &lock->base;
+
+ if (!ww_ctx)
+ return rt_mutex_trylock(rtm);
+
+ /*
+ * Reset the wounded flag after a kill. No other process can
+ * race and wound us here, since they can't have a valid owner
+ * pointer if we don't have any locks held.
+ */
+ if (ww_ctx->acquired == 0)
+ ww_ctx->wounded = 0;
+
+ if (__rt_mutex_trylock(&rtm->rtmutex)) {
+ ww_mutex_set_context_fastpath(lock, ww_ctx);
+ mutex_acquire_nest(&rtm->dep_map, 0, 1, ww_ctx->dep_map, _RET_IP_);
+ return 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ww_mutex_trylock);
+
static int __sched
__ww_rt_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx,
unsigned int state, unsigned long ip)