2017-11-07 17:32:46 -05:00
|
|
|
From 8fff105e13041e49b82f92eef034f363a6b1c071 Mon Sep 17 00:00:00 2001
|
2017-10-29 01:48:53 -04:00
|
|
|
From: "Suzuki K. Poulose" <suzuki.poulose@arm.com>
|
2017-11-07 17:32:46 -05:00
|
|
|
Date: Tue, 17 Mar 2015 18:14:59 +0000
|
|
|
|
Subject: arm64: perf: reject groups spanning multiple HW PMUs
|
2017-10-29 01:48:53 -04:00
|
|
|
|
|
|
|
The perf core implicitly rejects events spanning multiple HW PMUs, as in
|
|
|
|
these cases the event->ctx will differ. However this validation is
|
|
|
|
performed after pmu::event_init() is called in perf_init_event(), and
|
|
|
|
thus pmu::event_init() may be called with a group leader from a
|
|
|
|
different HW PMU.
|
|
|
|
|
2017-11-07 17:32:46 -05:00
|
|
|
The ARM64 PMU driver does not take this fact into account, and when
|
2017-10-29 01:48:53 -04:00
|
|
|
validating groups assumes that it can call to_arm_pmu(event->pmu) for
|
|
|
|
any HW event. When the event in question is from another HW PMU this is
|
|
|
|
wrong, and results in dereferencing garbage.
|
|
|
|
|
2017-11-07 17:32:46 -05:00
|
|
|
This patch updates the ARM64 PMU driver to first test for and reject
|
2017-10-29 01:48:53 -04:00
|
|
|
events from other PMUs, moving the to_arm_pmu and related logic after
|
|
|
|
this test. Fixes a crash triggered by perf_fuzzer on Linux-4.0-rc2, with
|
|
|
|
a CCI PMU present:
|
|
|
|
|
2017-11-07 17:32:46 -05:00
|
|
|
Bad mode in Synchronous Abort handler detected, code 0x86000006 -- IABT (current EL)
|
|
|
|
CPU: 0 PID: 1371 Comm: perf_fuzzer Not tainted 3.19.0+ #249
|
|
|
|
Hardware name: V2F-1XV7 Cortex-A53x2 SMM (DT)
|
|
|
|
task: ffffffc07c73a280 ti: ffffffc07b0a0000 task.ti: ffffffc07b0a0000
|
|
|
|
PC is at 0x0
|
|
|
|
LR is at validate_event+0x90/0xa8
|
|
|
|
pc : [<0000000000000000>] lr : [<ffffffc000090228>] pstate: 00000145
|
|
|
|
sp : ffffffc07b0a3ba0
|
2017-10-29 01:48:53 -04:00
|
|
|
|
2017-11-07 17:32:46 -05:00
|
|
|
[< (null)>] (null)
|
|
|
|
[<ffffffc0000907d8>] armpmu_event_init+0x174/0x3cc
|
|
|
|
[<ffffffc00015d870>] perf_try_init_event+0x34/0x70
|
|
|
|
[<ffffffc000164094>] perf_init_event+0xe0/0x10c
|
|
|
|
[<ffffffc000164348>] perf_event_alloc+0x288/0x358
|
|
|
|
[<ffffffc000164c5c>] SyS_perf_event_open+0x464/0x98c
|
|
|
|
Code: bad PC value
|
|
|
|
|
|
|
|
Also cleans up the code to use the arm_pmu only when we know
|
|
|
|
that we are dealing with an arm pmu event.
|
2017-10-29 01:48:53 -04:00
|
|
|
|
|
|
|
Cc: Will Deacon <will.deacon@arm.com>
|
|
|
|
Acked-by: Mark Rutland <mark.rutland@arm.com>
|
|
|
|
Acked-by: Peter Ziljstra (Intel) <peterz@infradead.org>
|
|
|
|
Signed-off-by: Suzuki K. Poulose <suzuki.poulose@arm.com>
|
|
|
|
Signed-off-by: Will Deacon <will.deacon@arm.com>
|
|
|
|
---
|
2017-11-07 17:32:46 -05:00
|
|
|
arch/arm64/kernel/perf_event.c | 21 +++++++++++++++------
|
2017-10-29 01:48:53 -04:00
|
|
|
1 file changed, 15 insertions(+), 6 deletions(-)
|
|
|
|
|
2017-11-07 17:32:46 -05:00
|
|
|
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
|
|
|
|
index 25a5308..68a7415 100644
|
|
|
|
--- a/arch/arm64/kernel/perf_event.c
|
|
|
|
+++ b/arch/arm64/kernel/perf_event.c
|
|
|
|
@@ -322,22 +322,31 @@ out:
|
2017-10-29 01:48:53 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
-validate_event(struct pmu_hw_events *hw_events,
|
|
|
|
- struct perf_event *event)
|
|
|
|
+validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
|
2017-11-07 17:32:46 -05:00
|
|
|
+ struct perf_event *event)
|
2017-10-29 01:48:53 -04:00
|
|
|
{
|
|
|
|
- struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
|
|
|
+ struct arm_pmu *armpmu;
|
2017-11-07 17:32:46 -05:00
|
|
|
struct hw_perf_event fake_event = event->hw;
|
|
|
|
struct pmu *leader_pmu = event->group_leader->pmu;
|
2017-10-29 01:48:53 -04:00
|
|
|
|
|
|
|
if (is_software_event(event))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
|
|
|
|
+ * core perf code won't check that the pmu->ctx == leader->ctx
|
|
|
|
+ * until after pmu->event_init(event).
|
|
|
|
+ */
|
|
|
|
+ if (event->pmu != pmu)
|
|
|
|
+ return 0;
|
|
|
|
+
|
2017-11-07 17:32:46 -05:00
|
|
|
if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
|
2017-10-29 01:48:53 -04:00
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
+ armpmu = to_arm_pmu(event->pmu);
|
2017-11-07 17:32:46 -05:00
|
|
|
return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
|
2017-10-29 01:48:53 -04:00
|
|
|
}
|
|
|
|
|
2017-11-07 17:32:46 -05:00
|
|
|
@@ -355,15 +364,15 @@ validate_group(struct perf_event *event)
|
|
|
|
memset(fake_used_mask, 0, sizeof(fake_used_mask));
|
|
|
|
fake_pmu.used_mask = fake_used_mask;
|
2017-10-29 01:48:53 -04:00
|
|
|
|
|
|
|
- if (!validate_event(&fake_pmu, leader))
|
|
|
|
+ if (!validate_event(event->pmu, &fake_pmu, leader))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
|
|
|
|
- if (!validate_event(&fake_pmu, sibling))
|
|
|
|
+ if (!validate_event(event->pmu, &fake_pmu, sibling))
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
- if (!validate_event(&fake_pmu, event))
|
|
|
|
+ if (!validate_event(event->pmu, &fake_pmu, event))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
--
|
|
|
|
cgit v1.1
|
|
|
|
|