aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/power5+-pmu.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2009-05-14 13:29:14 +1000
committerIngo Molnar <mingo@elte.hu>2009-05-15 16:38:55 +0200
commitef923214a4816c289e4af2d67a9ebb1a31e4ac61 (patch)
treebf850f4f53a4f8391b6b9c0335e58364668586d9 /arch/powerpc/kernel/power5+-pmu.c
parentperf_counter: frequency based adaptive irq_period, 32-bit fix (diff)
downloadlinux-dev-ef923214a4816c289e4af2d67a9ebb1a31e4ac61.tar.xz
linux-dev-ef923214a4816c289e4af2d67a9ebb1a31e4ac61.zip
perf_counter: powerpc: use u64 for event codes internally
Although the perf_counter API allows 63-bit raw event codes, internally in the powerpc back-end we had been using 32-bit event codes. This expands them to 64 bits so that we can add bits for specifying threshold start/stop events and instruction sampling modes later. This also corrects the return value of can_go_on_limited_pmc; we were returning an event code rather than just a 0/1 value in some circumstances. That didn't particularly matter while event codes were 32-bit, but now that event codes are 64-bit it might, so this fixes it. [ Impact: extend PowerPC perfcounter interfaces from u32 to u64 ] Signed-off-by: Paul Mackerras <paulus@samba.org> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> LKML-Reference: <18955.36874.472452.353104@drongo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/powerpc/kernel/power5+-pmu.c')
-rw-r--r--arch/powerpc/kernel/power5+-pmu.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c
index 8154eaa2404f..3ac0654372ab 100644
--- a/arch/powerpc/kernel/power5+-pmu.c
+++ b/arch/powerpc/kernel/power5+-pmu.c
@@ -135,7 +135,7 @@ static u64 unit_cons[PM_LASTUNIT+1][2] = {
[PM_GRS] = { 0x0e00000000ull, 0x0c40000000ull },
};
-static int power5p_get_constraint(unsigned int event, u64 *maskp, u64 *valp)
+static int power5p_get_constraint(u64 event, u64 *maskp, u64 *valp)
{
int pmc, byte, unit, sh;
int bit, fmask;
@@ -188,7 +188,7 @@ static int power5p_get_constraint(unsigned int event, u64 *maskp, u64 *valp)
return 0;
}
-static int power5p_limited_pmc_event(unsigned int event)
+static int power5p_limited_pmc_event(u64 event)
{
int pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
@@ -273,11 +273,11 @@ static int find_alternative_bdecode(unsigned int event)
return -1;
}
-static int power5p_get_alternatives(unsigned int event, unsigned int flags,
- unsigned int alt[])
+static int power5p_get_alternatives(u64 event, unsigned int flags, u64 alt[])
{
- int i, j, ae, nalt = 1;
+ int i, j, nalt = 1;
int nlim;
+ u64 ae;
alt[0] = event;
nalt = 1;
@@ -402,7 +402,7 @@ static unsigned char direct_event_is_marked[0x28] = {
* Returns 1 if event counts things relating to marked instructions
* and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not.
*/
-static int power5p_marked_instr_event(unsigned int event)
+static int power5p_marked_instr_event(u64 event)
{
int pmc, psel;
int bit, byte, unit;
@@ -451,7 +451,7 @@ static int power5p_marked_instr_event(unsigned int event)
return (mask >> (byte * 8 + bit)) & 1;
}
-static int power5p_compute_mmcr(unsigned int event[], int n_ev,
+static int power5p_compute_mmcr(u64 event[], int n_ev,
unsigned int hwc[], u64 mmcr[])
{
u64 mmcr1 = 0;