aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/kernel/events
diff options
context:
space:
mode:
authorRavi Bangoria <ravi.bangoria@linux.ibm.com>2020-05-14 16:47:39 +0530
committerMichael Ellerman <mpe@ellerman.id.au>2020-05-19 00:14:45 +1000
commit29da4f91c0c1fbda12b8a31be0d564930208c92e (patch)
tree5ae2bc339c5d581722f099efd4110efa05dde5be /kernel/events
parentpowerpc/watchpoint: Prepare handler to handle more than one watchpoint (diff)
downloadwireguard-linux-29da4f91c0c1fbda12b8a31be0d564930208c92e.tar.xz
wireguard-linux-29da4f91c0c1fbda12b8a31be0d564930208c92e.zip
powerpc/watchpoint: Don't allow concurrent perf and ptrace events
With Book3s DAWR, ptrace and perf watchpoints on powerpc behaves differently. Ptrace watchpoint works in one-shot mode and generates signal before executing instruction. It's ptrace user's job to single-step the instruction and re-enable the watchpoint. OTOH, in case of perf watchpoint, kernel emulates/single-steps the instruction and then generates event. If perf and ptrace creates two events with same or overlapping address ranges, it's ambiguous to decide who should single-step the instruction. Because of this issue, don't allow perf and ptrace watchpoint at the same time if their address range overlaps. Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Reviewed-by: Michael Neuling <mikey@neuling.org> Link: https://lore.kernel.org/r/20200514111741.97993-15-ravi.bangoria@linux.ibm.com
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/hw_breakpoint.c16
1 files changed, 16 insertions, 0 deletions
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index 3cc8416ec844..b48d7039a015 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -213,6 +213,15 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
list_del(&bp->hw.bp_list);
}
+__weak int arch_reserve_bp_slot(struct perf_event *bp)
+{
+ return 0;
+}
+
+__weak void arch_release_bp_slot(struct perf_event *bp)
+{
+}
+
/*
* Function to perform processor-specific cleanup during unregistration
*/
@@ -270,6 +279,7 @@ static int __reserve_bp_slot(struct perf_event *bp, u64 bp_type)
struct bp_busy_slots slots = {0};
enum bp_type_idx type;
int weight;
+ int ret;
/* We couldn't initialize breakpoint constraints on boot */
if (!constraints_initialized)
@@ -294,6 +304,10 @@ static int __reserve_bp_slot(struct perf_event *bp, u64 bp_type)
if (slots.pinned + (!!slots.flexible) > nr_slots[type])
return -ENOSPC;
+ ret = arch_reserve_bp_slot(bp);
+ if (ret)
+ return ret;
+
toggle_bp_slot(bp, true, type, weight);
return 0;
@@ -317,6 +331,8 @@ static void __release_bp_slot(struct perf_event *bp, u64 bp_type)
enum bp_type_idx type;
int weight;
+ arch_release_bp_slot(bp);
+
type = find_slot_idx(bp_type);
weight = hw_breakpoint_weight(bp);
toggle_bp_slot(bp, false, type, weight);