aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/events
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2015-03-27 09:46:19 +0100
committerIngo Molnar <mingo@kernel.org>2015-03-27 09:46:19 +0100
commit936c663aed930972f7e185485fd6c2da69e33819 (patch)
tree41bb069f66e109fc63c4114a4390de44d3068356 /kernel/events
parentMerge branch 'perf/urgent' into perf/core, to pick up fixes and to refresh the tree (diff)
parentperf: Remove type specific target pointers (diff)
downloadlinux-dev-936c663aed930972f7e185485fd6c2da69e33819.tar.xz
linux-dev-936c663aed930972f7e185485fd6c2da69e33819.zip
Merge branch 'perf/x86' into perf/core, because it's ready
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/core.c73
-rw-r--r--kernel/events/hw_breakpoint.c8
2 files changed, 29 insertions, 52 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 9a5f339a0e2d..b01dfb602db1 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -34,11 +34,11 @@
#include <linux/syscalls.h>
#include <linux/anon_inodes.h>
#include <linux/kernel_stat.h>
+#include <linux/cgroup.h>
#include <linux/perf_event.h>
#include <linux/ftrace_event.h>
#include <linux/hw_breakpoint.h>
#include <linux/mm_types.h>
-#include <linux/cgroup.h>
#include <linux/module.h>
#include <linux/mman.h>
#include <linux/compat.h>
@@ -351,32 +351,6 @@ static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
#ifdef CONFIG_CGROUP_PERF
-/*
- * perf_cgroup_info keeps track of time_enabled for a cgroup.
- * This is a per-cpu dynamically allocated data structure.
- */
-struct perf_cgroup_info {
- u64 time;
- u64 timestamp;
-};
-
-struct perf_cgroup {
- struct cgroup_subsys_state css;
- struct perf_cgroup_info __percpu *info;
-};
-
-/*
- * Must ensure cgroup is pinned (css_get) before calling
- * this function. In other words, we cannot call this function
- * if there is no cgroup event for the current CPU context.
- */
-static inline struct perf_cgroup *
-perf_cgroup_from_task(struct task_struct *task)
-{
- return container_of(task_css(task, perf_event_cgrp_id),
- struct perf_cgroup, css);
-}
-
static inline bool
perf_cgroup_match(struct perf_event *event)
{
@@ -3220,7 +3194,10 @@ static void __perf_event_read(void *info)
static inline u64 perf_event_count(struct perf_event *event)
{
- return local64_read(&event->count) + atomic64_read(&event->child_count);
+ if (event->pmu->count)
+ return event->pmu->count(event);
+
+ return __perf_event_count(event);
}
static u64 perf_event_read(struct perf_event *event)
@@ -7149,7 +7126,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
struct perf_event *group_leader,
struct perf_event *parent_event,
perf_overflow_handler_t overflow_handler,
- void *context)
+ void *context, int cgroup_fd)
{
struct pmu *pmu;
struct perf_event *event;
@@ -7204,16 +7181,12 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
if (task) {
event->attach_state = PERF_ATTACH_TASK;
-
- if (attr->type == PERF_TYPE_TRACEPOINT)
- event->hw.tp_target = task;
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
/*
- * hw_breakpoint is a bit difficult here..
+ * XXX pmu::event_init needs to know what task to account to
+ * and we cannot use the ctx information because we need the
+ * pmu before we get a ctx.
*/
- else if (attr->type == PERF_TYPE_BREAKPOINT)
- event->hw.bp_target = task;
-#endif
+ event->hw.target = task;
}
if (!overflow_handler && parent_event) {
@@ -7245,6 +7218,12 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
if (!has_branch_stack(event))
event->attr.branch_sample_type = 0;
+ if (cgroup_fd != -1) {
+ err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
+ if (err)
+ goto err_ns;
+ }
+
pmu = perf_init_event(event);
if (!pmu)
goto err_ns;
@@ -7268,6 +7247,8 @@ err_pmu:
event->destroy(event);
module_put(pmu->module);
err_ns:
+ if (is_cgroup_event(event))
+ perf_detach_cgroup(event);
if (event->ns)
put_pid_ns(event->ns);
kfree(event);
@@ -7486,6 +7467,7 @@ SYSCALL_DEFINE5(perf_event_open,
int move_group = 0;
int err;
int f_flags = O_RDWR;
+ int cgroup_fd = -1;
/* for future expandability... */
if (flags & ~PERF_FLAG_ALL)
@@ -7551,21 +7533,16 @@ SYSCALL_DEFINE5(perf_event_open,
get_online_cpus();
+ if (flags & PERF_FLAG_PID_CGROUP)
+ cgroup_fd = pid;
+
event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
- NULL, NULL);
+ NULL, NULL, cgroup_fd);
if (IS_ERR(event)) {
err = PTR_ERR(event);
goto err_cpus;
}
- if (flags & PERF_FLAG_PID_CGROUP) {
- err = perf_cgroup_connect(pid, event, &attr, group_leader);
- if (err) {
- __free_event(event);
- goto err_cpus;
- }
- }
-
if (is_sampling_event(event)) {
if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) {
err = -ENOTSUPP;
@@ -7802,7 +7779,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
*/
event = perf_event_alloc(attr, cpu, task, NULL, NULL,
- overflow_handler, context);
+ overflow_handler, context, -1);
if (IS_ERR(event)) {
err = PTR_ERR(event);
goto err;
@@ -8163,7 +8140,7 @@ inherit_event(struct perf_event *parent_event,
parent_event->cpu,
child,
group_leader, parent_event,
- NULL, NULL);
+ NULL, NULL, -1);
if (IS_ERR(child_event))
return child_event;
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index 9803a6600d49..92ce5f4ccc26 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -116,12 +116,12 @@ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
*/
static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
{
- struct task_struct *tsk = bp->hw.bp_target;
+ struct task_struct *tsk = bp->hw.target;
struct perf_event *iter;
int count = 0;
list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
- if (iter->hw.bp_target == tsk &&
+ if (iter->hw.target == tsk &&
find_slot_idx(iter) == type &&
(iter->cpu < 0 || cpu == iter->cpu))
count += hw_breakpoint_weight(iter);
@@ -153,7 +153,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
int nr;
nr = info->cpu_pinned;
- if (!bp->hw.bp_target)
+ if (!bp->hw.target)
nr += max_task_bp_pinned(cpu, type);
else
nr += task_bp_pinned(cpu, bp, type);
@@ -210,7 +210,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
weight = -weight;
/* Pinned counter cpu profiling */
- if (!bp->hw.bp_target) {
+ if (!bp->hw.target) {
get_bp_info(bp->cpu, type)->cpu_pinned += weight;
return;
}