summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNicolai Hähnle <Nicolai.Haehnle@amd.com>2016-11-28 12:52:27 +0100
committerNicolai Hähnle <Nicolai.Haehnle@amd.com>2016-12-01 12:18:58 +0100
commit5c7396e2676bc8efb7cb4603d25d3577d377e714 (patch)
tree6a6c4bf3ffacbae036cda49bbaa9b46242d1a145
parent9363d84289a65e47b4ce7def1d27b0971a8e202f (diff)
[rfc] locking/ww_mutex: Always spin optimistically for the first waitermutex
Check the current owner's context once against our stamp. If our stamp is lower, we continue to spin optimistically instead of backing off. This is correct with respect to deadlock detection because while the (owner, ww_ctx) pair may re-appear if the owner task manages to unlock and re-acquire the lock while we're spinning, the context could only have been re-initialized with an even higher stamp. We also still detect when we have to back off for other waiters that join the list while we're spinning. But taking the wait_lock in mutex_spin_on_owner feels iffy, even if it is done only once. Median timings taken of a contention-heavy GPU workload: Before: real 0m53.086s user 0m7.360s sys 1m46.204s After: real 0m52.577s user 0m7.544s sys 1m49.200s Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Maarten Lankhorst <dev@mblankhorst.nl> Cc: Daniel Vetter <daniel@ffwll.ch> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: dri-devel@lists.freedesktop.org Signed-off-by: Nicolai Hähnle <Nicolai.Haehnle@amd.com>
-rw-r--r--kernel/locking/mutex.c35
1 files changed, 32 insertions, 3 deletions
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 38d173ce5762..92162391d477 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -378,6 +378,28 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
struct mutex_waiter *waiter)
{
bool ret = true;
+ struct ww_acquire_ctx *owner_ww_ctx = NULL;
+
+ if (use_ww_ctx && ww_ctx && ww_ctx->acquired > 0) {
+ struct ww_mutex *ww;
+ unsigned long flags;
+
+ ww = container_of(lock, struct ww_mutex, base);
+
+ /*
+ * Check the stamp of the current owner once. This allows us
+ * to spin optimistically in the case where the current owner
+ * has a higher stamp than us.
+ */
+ spin_lock_mutex(&lock->wait_lock, flags);
+ owner_ww_ctx = ww->ctx;
+ if (owner_ww_ctx &&
+ __ww_mutex_stamp_after(ww_ctx, owner_ww_ctx)) {
+ spin_unlock_mutex(&lock->wait_lock, flags);
+ return false;
+ }
+ spin_unlock_mutex(&lock->wait_lock, flags);
+ }
rcu_read_lock();
while (__mutex_owner(lock) == owner) {
@@ -414,9 +436,16 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
* Check this in every inner iteration because we may
* be racing against another thread's ww_mutex_lock.
*/
- if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx)) {
- ret = false;
- break;
+ if (ww_ctx->acquired > 0) {
+ struct ww_acquire_ctx *current_ctx;
+
+ current_ctx = READ_ONCE(ww->ctx);
+
+ if (current_ctx &&
+ current_ctx != owner_ww_ctx) {
+ ret = false;
+ break;
+ }
}
/*