mirror of
https://github.com/Divested-Mobile/DivestOS-Build.git
synced 2024-12-20 21:34:24 -05:00
160 lines
4.9 KiB
Diff
160 lines
4.9 KiB
Diff
From 9eb0e01be831d0f37ea6278a92c32424141f55fb Mon Sep 17 00:00:00 2001
|
|
From: Peter Zijlstra <peterz@infradead.org>
|
|
Date: Wed, 11 Jan 2017 21:09:50 +0100
|
|
Subject: perf/core: Fix concurrent sys_perf_event_open() vs. 'move_group' race
|
|
|
|
commit 321027c1fe77f892f4ea07846aeae08cefbbb290 upstream.
|
|
|
|
Di Shen reported a race between two concurrent sys_perf_event_open()
|
|
calls where both try and move the same pre-existing software group
|
|
into a hardware context.
|
|
|
|
The problem is exactly that described in commit:
|
|
|
|
f63a8daa5812 ("perf: Fix event->ctx locking")
|
|
|
|
... where, while we wait for a ctx->mutex acquisition, the event->ctx
|
|
relation can have changed under us.
|
|
|
|
That very same commit failed to recognise sys_perf_event_context() as an
|
|
external access vector to the events and thereby didn't apply the
|
|
established locking rules correctly.
|
|
|
|
So while one sys_perf_event_open() call is stuck waiting on
|
|
mutex_lock_double(), the other (which owns said locks) moves the group
|
|
about. So by the time the former sys_perf_event_open() acquires the
|
|
locks, the context we've acquired is stale (and possibly dead).
|
|
|
|
Apply the established locking rules as per perf_event_ctx_lock_nested()
|
|
to the mutex_lock_double() for the 'move_group' case. This obviously means
|
|
we need to validate state after we acquire the locks.
|
|
|
|
Reported-by: Di Shen (Keen Lab)
|
|
Tested-by: John Dias <joaodias@google.com>
|
|
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
|
|
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
|
|
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
|
|
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
|
|
Cc: Jiri Olsa <jolsa@redhat.com>
|
|
Cc: Kees Cook <keescook@chromium.org>
|
|
Cc: Linus Torvalds <torvalds@linux-foundation.org>
|
|
Cc: Min Chong <mchong@google.com>
|
|
Cc: Peter Zijlstra <peterz@infradead.org>
|
|
Cc: Stephane Eranian <eranian@google.com>
|
|
Cc: Thomas Gleixner <tglx@linutronix.de>
|
|
Cc: Vince Weaver <vincent.weaver@maine.edu>
|
|
Fixes: f63a8daa5812 ("perf: Fix event->ctx locking")
|
|
Link: http://lkml.kernel.org/r/20170106131444.GZ3174@twins.programming.kicks-ass.net
|
|
Signed-off-by: Ingo Molnar <mingo@kernel.org>
|
|
[bwh: Backported to 3.2:
|
|
- Use ACCESS_ONCE() instead of READ_ONCE()
|
|
- Test perf_event::group_flags instead of group_caps
|
|
- Add the err_locked cleanup block, which we didn't need before
|
|
- Adjust context]
|
|
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
|
|
---
|
|
kernel/events/core.c | 61 ++++++++++++++++++++++++++++++++++++++++++++++++----
|
|
1 file changed, 57 insertions(+), 4 deletions(-)
|
|
|
|
diff --git a/kernel/events/core.c b/kernel/events/core.c
|
|
index a301c68..49a1db4 100644
|
|
--- a/kernel/events/core.c
|
|
+++ b/kernel/events/core.c
|
|
@@ -6474,6 +6474,37 @@ static void mutex_lock_double(struct mutex *a, struct mutex *b)
|
|
mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
|
|
}
|
|
|
|
+/*
|
|
+ * Variation on perf_event_ctx_lock_nested(), except we take two context
|
|
+ * mutexes.
|
|
+ */
|
|
+static struct perf_event_context *
|
|
+__perf_event_ctx_lock_double(struct perf_event *group_leader,
|
|
+ struct perf_event_context *ctx)
|
|
+{
|
|
+ struct perf_event_context *gctx;
|
|
+
|
|
+again:
|
|
+ rcu_read_lock();
|
|
+ gctx = ACCESS_ONCE(group_leader->ctx);
|
|
+ if (!atomic_inc_not_zero(&gctx->refcount)) {
|
|
+ rcu_read_unlock();
|
|
+ goto again;
|
|
+ }
|
|
+ rcu_read_unlock();
|
|
+
|
|
+ mutex_lock_double(&gctx->mutex, &ctx->mutex);
|
|
+
|
|
+ if (group_leader->ctx != gctx) {
|
|
+ mutex_unlock(&ctx->mutex);
|
|
+ mutex_unlock(&gctx->mutex);
|
|
+ put_ctx(gctx);
|
|
+ goto again;
|
|
+ }
|
|
+
|
|
+ return gctx;
|
|
+}
|
|
+
|
|
/**
|
|
* sys_perf_event_open - open a performance event, associate it to a task/cpu
|
|
*
|
|
@@ -6661,14 +6692,31 @@ SYSCALL_DEFINE5(perf_event_open,
|
|
}
|
|
|
|
if (move_group) {
|
|
- gctx = group_leader->ctx;
|
|
+ gctx = __perf_event_ctx_lock_double(group_leader, ctx);
|
|
+
|
|
+ /*
|
|
+ * Check if we raced against another sys_perf_event_open() call
|
|
+ * moving the software group underneath us.
|
|
+ */
|
|
+ if (!(group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
|
|
+ /*
|
|
+ * If someone moved the group out from under us, check
|
|
+ * if this new event wound up on the same ctx, if so
|
|
+ * its the regular !move_group case, otherwise fail.
|
|
+ */
|
|
+ if (gctx != ctx) {
|
|
+ err = -EINVAL;
|
|
+ goto err_locked;
|
|
+ } else {
|
|
+ perf_event_ctx_unlock(group_leader, gctx);
|
|
+ move_group = 0;
|
|
+ }
|
|
+ }
|
|
|
|
/*
|
|
* See perf_event_ctx_lock() for comments on the details
|
|
* of swizzling perf_event::ctx.
|
|
*/
|
|
- mutex_lock_double(&gctx->mutex, &ctx->mutex);
|
|
-
|
|
perf_remove_from_context(group_leader, false);
|
|
|
|
/*
|
|
@@ -6710,7 +6758,7 @@ SYSCALL_DEFINE5(perf_event_open,
|
|
perf_unpin_context(ctx);
|
|
|
|
if (move_group) {
|
|
- mutex_unlock(&gctx->mutex);
|
|
+ perf_event_ctx_unlock(group_leader, gctx);
|
|
put_ctx(gctx);
|
|
}
|
|
mutex_unlock(&ctx->mutex);
|
|
@@ -6737,6 +6785,11 @@ SYSCALL_DEFINE5(perf_event_open,
|
|
fd_install(event_fd, event_file);
|
|
return event_fd;
|
|
|
|
+err_locked:
|
|
+ if (move_group)
|
|
+ perf_event_ctx_unlock(group_leader, gctx);
|
|
+ mutex_unlock(&ctx->mutex);
|
|
+ fput(event_file);
|
|
err_context:
|
|
perf_unpin_context(ctx);
|
|
put_ctx(ctx);
|
|
--
|
|
cgit v1.1
|
|
|