summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/reg.h1
-rw-r--r--arch/powerpc/kernel/perf_event.c24
2 files changed, 24 insertions, 1 deletions
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 1bc6a12f3725..7e4abebe76c0 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -892,6 +892,7 @@
#define PV_970 0x0039
#define PV_POWER5 0x003A
#define PV_POWER5p 0x003B
+#define PV_POWER7 0x003F
#define PV_970FX 0x003C
#define PV_630 0x0040
#define PV_630p 0x0041
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index ab6f6beadb57..97e0ae414940 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -1269,6 +1269,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs)
return ip;
}
+static bool pmc_overflow(unsigned long val)
+{
+ if ((int)val < 0)
+ return true;
+
+ /*
+ * Events on POWER7 can roll back if a speculative event doesn't
+ * eventually complete. Unfortunately in some rare cases they will
+ * raise a performance monitor exception. We need to catch this to
+ * ensure we reset the PMC. In all cases the PMC will be 256 or less
+ * cycles from overflow.
+ *
+ * We only do this if the first pass fails to find any overflowing
+ * PMCs because a user might set a period of less than 256 and we
+ * don't want to mistakenly reset them.
+ */
+ if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256))
+ return true;
+
+ return false;
+}
+
/*
* Performance monitor interrupt stuff
*/
@@ -1316,7 +1338,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
if (is_limited_pmc(i + 1))
continue;
val = read_pmc(i + 1);
- if ((int)val < 0)
+ if (pmc_overflow(val))
write_pmc(i + 1, 0);
}
}