diff options
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/devtree.c | 104 | ||||
-rw-r--r-- | arch/arm/kernel/entry-common.S | 16 | ||||
-rw-r--r-- | arch/arm/kernel/head-nommu.S | 2 | ||||
-rw-r--r-- | arch/arm/kernel/hw_breakpoint.c | 154 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event.c | 85 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_cpu.c | 74 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_v6.c | 126 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_v7.c | 246 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_xscale.c | 157 | ||||
-rw-r--r-- | arch/arm/kernel/process.c | 4 | ||||
-rw-r--r-- | arch/arm/kernel/ptrace.c | 43 | ||||
-rw-r--r-- | arch/arm/kernel/setup.c | 84 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 12 | ||||
-rw-r--r-- | arch/arm/kernel/smp_twd.c | 54 | ||||
-rw-r--r-- | arch/arm/kernel/topology.c | 42 | ||||
-rw-r--r-- | arch/arm/kernel/vmlinux.lds.S | 19 |
16 files changed, 704 insertions, 518 deletions
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c index bee7f9d47f02..70f1bdeb241b 100644 --- a/arch/arm/kernel/devtree.c +++ b/arch/arm/kernel/devtree.c @@ -19,8 +19,10 @@ #include <linux/of_irq.h> #include <linux/of_platform.h> +#include <asm/cputype.h> #include <asm/setup.h> #include <asm/page.h> +#include <asm/smp_plat.h> #include <asm/mach/arch.h> #include <asm/mach-types.h> @@ -61,6 +63,108 @@ void __init arm_dt_memblock_reserve(void) } } +/* + * arm_dt_init_cpu_maps - Function retrieves cpu nodes from the device tree + * and builds the cpu logical map array containing MPIDR values related to + * logical cpus + * + * Updates the cpu possible mask with the number of parsed cpu nodes + */ +void __init arm_dt_init_cpu_maps(void) +{ + /* + * Temp logical map is initialized with UINT_MAX values that are + * considered invalid logical map entries since the logical map must + * contain a list of MPIDR[23:0] values where MPIDR[31:24] must + * read as 0. + */ + struct device_node *cpu, *cpus; + u32 i, j, cpuidx = 1; + u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0; + + u32 tmp_map[NR_CPUS] = { [0 ... NR_CPUS-1] = UINT_MAX }; + bool bootcpu_valid = false; + cpus = of_find_node_by_path("/cpus"); + + if (!cpus) + return; + + for_each_child_of_node(cpus, cpu) { + u32 hwid; + + pr_debug(" * %s...\n", cpu->full_name); + /* + * A device tree containing CPU nodes with missing "reg" + * properties is considered invalid to build the + * cpu_logical_map. + */ + if (of_property_read_u32(cpu, "reg", &hwid)) { + pr_debug(" * %s missing reg property\n", + cpu->full_name); + return; + } + + /* + * 8 MSBs must be set to 0 in the DT since the reg property + * defines the MPIDR[23:0]. + */ + if (hwid & ~MPIDR_HWID_BITMASK) + return; + + /* + * Duplicate MPIDRs are a recipe for disaster. + * Scan all initialized entries and check for + * duplicates. If any is found just bail out. + * temp values were initialized to UINT_MAX + * to avoid matching valid MPIDR[23:0] values. + */ + for (j = 0; j < cpuidx; j++) + if (WARN(tmp_map[j] == hwid, "Duplicate /cpu reg " + "properties in the DT\n")) + return; + + /* + * Build a stashed array of MPIDR values. Numbering scheme + * requires that if detected the boot CPU must be assigned + * logical id 0. Other CPUs get sequential indexes starting + * from 1. If a CPU node with a reg property matching the + * boot CPU MPIDR is detected, this is recorded so that the + * logical map built from DT is validated and can be used + * to override the map created in smp_setup_processor_id(). + */ + if (hwid == mpidr) { + i = 0; + bootcpu_valid = true; + } else { + i = cpuidx++; + } + + if (WARN(cpuidx > nr_cpu_ids, "DT /cpu %u nodes greater than " + "max cores %u, capping them\n", + cpuidx, nr_cpu_ids)) { + cpuidx = nr_cpu_ids; + break; + } + + tmp_map[i] = hwid; + } + + if (WARN(!bootcpu_valid, "DT missing boot CPU MPIDR[23:0], " + "fall back to default cpu_logical_map\n")) + return; + + /* + * Since the boot CPU node contains proper data, and all nodes have + * a reg property, the DT CPU list can be considered valid and the + * logical map created in smp_setup_processor_id() can be overridden + */ + for (i = 0; i < cpuidx; i++) { + set_cpu_possible(i, true); + cpu_logical_map(i) = tmp_map[i]; + pr_debug("cpu logical map 0x%x\n", cpu_logical_map(i)); + } +} + /** * setup_machine_fdt - Machine setup when an dtb was passed to the kernel * @dt_phys: physical address of dt blob diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 34711757ba59..804153c0a9cf 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -417,16 +417,6 @@ local_restart: ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing stmdb sp!, {r4, r5} @ push fifth and sixth args -#ifdef CONFIG_SECCOMP - tst r10, #_TIF_SECCOMP - beq 1f - mov r0, scno - bl __secure_computing - add r0, sp, #S_R0 + S_OFF @ pointer to regs - ldmia r0, {r0 - r3} @ have to reload r0 - r3 -1: -#endif - tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls? bne __sys_trace @@ -458,11 +448,13 @@ __sys_trace: ldmccia r1, {r0 - r6} @ have to reload r0 - r6 stmccia sp, {r4, r5} @ and update the stack args ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine - b 2b + cmp scno, #-1 @ skip the syscall? + bne 2b + add sp, sp, #S_OFF @ restore stack + b ret_slow_syscall __sys_trace_return: str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 - mov r1, scno mov r0, sp bl syscall_trace_exit b ret_slow_syscall diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index 278cfc144f44..2c228a07e58c 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -68,7 +68,7 @@ __after_proc_init: * CP15 system control register value returned in r0 from * the CPU init function. */ -#ifdef CONFIG_ALIGNMENT_TRAP +#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6 orr r0, r0, #CR_A #else bic r0, r0, #CR_A diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 281bf3301241..5ff2e77782b1 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -52,14 +52,14 @@ static u8 debug_arch; /* Maximum supported watchpoint length. */ static u8 max_watchpoint_len; -#define READ_WB_REG_CASE(OP2, M, VAL) \ - case ((OP2 << 4) + M): \ - ARM_DBG_READ(c ## M, OP2, VAL); \ +#define READ_WB_REG_CASE(OP2, M, VAL) \ + case ((OP2 << 4) + M): \ + ARM_DBG_READ(c0, c ## M, OP2, VAL); \ break -#define WRITE_WB_REG_CASE(OP2, M, VAL) \ - case ((OP2 << 4) + M): \ - ARM_DBG_WRITE(c ## M, OP2, VAL);\ +#define WRITE_WB_REG_CASE(OP2, M, VAL) \ + case ((OP2 << 4) + M): \ + ARM_DBG_WRITE(c0, c ## M, OP2, VAL); \ break #define GEN_READ_WB_REG_CASES(OP2, VAL) \ @@ -136,12 +136,12 @@ static u8 get_debug_arch(void) /* Do we implement the extended CPUID interface? */ if (((read_cpuid_id() >> 16) & 0xf) != 0xf) { - pr_warning("CPUID feature registers not supported. " - "Assuming v6 debug is present.\n"); + pr_warn_once("CPUID feature registers not supported. " + "Assuming v6 debug is present.\n"); return ARM_DEBUG_ARCH_V6; } - ARM_DBG_READ(c0, 0, didr); + ARM_DBG_READ(c0, c0, 0, didr); return (didr >> 16) & 0xf; } @@ -169,7 +169,7 @@ static int debug_exception_updates_fsr(void) static int get_num_wrp_resources(void) { u32 didr; - ARM_DBG_READ(c0, 0, didr); + ARM_DBG_READ(c0, c0, 0, didr); return ((didr >> 28) & 0xf) + 1; } @@ -177,7 +177,7 @@ static int get_num_wrp_resources(void) static int get_num_brp_resources(void) { u32 didr; - ARM_DBG_READ(c0, 0, didr); + ARM_DBG_READ(c0, c0, 0, didr); return ((didr >> 24) & 0xf) + 1; } @@ -228,19 +228,17 @@ static int get_num_brps(void) * be put into halting debug mode at any time by an external debugger * but there is nothing we can do to prevent that. */ -static int enable_monitor_mode(void) +static int monitor_mode_enabled(void) { u32 dscr; - int ret = 0; - - ARM_DBG_READ(c1, 0, dscr); + ARM_DBG_READ(c0, c1, 0, dscr); + return !!(dscr & ARM_DSCR_MDBGEN); +} - /* Ensure that halting mode is disabled. */ - if (WARN_ONCE(dscr & ARM_DSCR_HDBGEN, - "halting debug mode enabled. Unable to access hardware resources.\n")) { - ret = -EPERM; - goto out; - } +static int enable_monitor_mode(void) +{ + u32 dscr; + ARM_DBG_READ(c0, c1, 0, dscr); /* If monitor mode is already enabled, just return. */ if (dscr & ARM_DSCR_MDBGEN) @@ -250,24 +248,27 @@ static int enable_monitor_mode(void) switch (get_debug_arch()) { case ARM_DEBUG_ARCH_V6: case ARM_DEBUG_ARCH_V6_1: - ARM_DBG_WRITE(c1, 0, (dscr | ARM_DSCR_MDBGEN)); + ARM_DBG_WRITE(c0, c1, 0, (dscr | ARM_DSCR_MDBGEN)); break; case ARM_DEBUG_ARCH_V7_ECP14: case ARM_DEBUG_ARCH_V7_1: - ARM_DBG_WRITE(c2, 2, (dscr | ARM_DSCR_MDBGEN)); + ARM_DBG_WRITE(c0, c2, 2, (dscr | ARM_DSCR_MDBGEN)); + isb(); break; default: - ret = -ENODEV; - goto out; + return -ENODEV; } /* Check that the write made it through. */ - ARM_DBG_READ(c1, 0, dscr); - if (!(dscr & ARM_DSCR_MDBGEN)) - ret = -EPERM; + ARM_DBG_READ(c0, c1, 0, dscr); + if (!(dscr & ARM_DSCR_MDBGEN)) { + pr_warn_once("Failed to enable monitor mode on CPU %d.\n", + smp_processor_id()); + return -EPERM; + } out: - return ret; + return 0; } int hw_breakpoint_slots(int type) @@ -328,14 +329,9 @@ int arch_install_hw_breakpoint(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); struct perf_event **slot, **slots; - int i, max_slots, ctrl_base, val_base, ret = 0; + int i, max_slots, ctrl_base, val_base; u32 addr, ctrl; - /* Ensure that we are in monitor mode and halting mode is disabled. */ - ret = enable_monitor_mode(); - if (ret) - goto out; - addr = info->address; ctrl = encode_ctrl_reg(info->ctrl) | 0x1; @@ -362,9 +358,9 @@ int arch_install_hw_breakpoint(struct perf_event *bp) } } - if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n")) { - ret = -EBUSY; - goto out; + if (i == max_slots) { + pr_warning("Can't find any breakpoint slot\n"); + return -EBUSY; } /* Override the breakpoint data with the step data. */ @@ -383,9 +379,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp) /* Setup the control register. */ write_wb_reg(ctrl_base + i, ctrl); - -out: - return ret; + return 0; } void arch_uninstall_hw_breakpoint(struct perf_event *bp) @@ -416,8 +410,10 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) } } - if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n")) + if (i == max_slots) { + pr_warning("Can't find any breakpoint slot\n"); return; + } /* Ensure that we disable the mismatch breakpoint. */ if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE && @@ -596,6 +592,10 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) int ret = 0; u32 offset, alignment_mask = 0x3; + /* Ensure that we are in monitor debug mode. */ + if (!monitor_mode_enabled()) + return -ENODEV; + /* Build the arch_hw_breakpoint. */ ret = arch_build_bp_info(bp); if (ret) @@ -858,7 +858,7 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, local_irq_enable(); /* We only handle watchpoints and hardware breakpoints. */ - ARM_DBG_READ(c1, 0, dscr); + ARM_DBG_READ(c0, c1, 0, dscr); /* Perform perf callbacks. */ switch (ARM_DSCR_MOE(dscr)) { @@ -906,7 +906,7 @@ static struct undef_hook debug_reg_hook = { static void reset_ctrl_regs(void *unused) { int i, raw_num_brps, err = 0, cpu = smp_processor_id(); - u32 dbg_power; + u32 val; /* * v7 debug contains save and restore registers so that debug state @@ -919,23 +919,30 @@ static void reset_ctrl_regs(void *unused) switch (debug_arch) { case ARM_DEBUG_ARCH_V6: case ARM_DEBUG_ARCH_V6_1: - /* ARMv6 cores just need to reset the registers. */ - goto reset_regs; + /* ARMv6 cores clear the registers out of reset. */ + goto out_mdbgen; case ARM_DEBUG_ARCH_V7_ECP14: /* * Ensure sticky power-down is clear (i.e. debug logic is * powered up). */ - asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power)); - if ((dbg_power & 0x1) == 0) + ARM_DBG_READ(c1, c5, 4, val); + if ((val & 0x1) == 0) err = -EPERM; + + /* + * Check whether we implement OS save and restore. + */ + ARM_DBG_READ(c1, c1, 4, val); + if ((val & 0x9) == 0) + goto clear_vcr; break; case ARM_DEBUG_ARCH_V7_1: /* * Ensure the OS double lock is clear. */ - asm volatile("mrc p14, 0, %0, c1, c3, 4" : "=r" (dbg_power)); - if ((dbg_power & 0x1) == 1) + ARM_DBG_READ(c1, c3, 4, val); + if ((val & 0x1) == 1) err = -EPERM; break; } @@ -947,24 +954,29 @@ static void reset_ctrl_regs(void *unused) } /* - * Unconditionally clear the lock by writing a value + * Unconditionally clear the OS lock by writing a value * other than 0xC5ACCE55 to the access register. */ - asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0)); + ARM_DBG_WRITE(c1, c0, 4, 0); isb(); /* * Clear any configured vector-catch events before * enabling monitor mode. */ - asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0)); +clear_vcr: + ARM_DBG_WRITE(c0, c7, 0, 0); isb(); -reset_regs: - if (enable_monitor_mode()) + if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { + pr_warning("CPU %d failed to disable vector catch\n", cpu); return; + } - /* We must also reset any reserved registers. */ + /* + * The control/value register pairs are UNKNOWN out of reset so + * clear them to avoid spurious debug events. + */ raw_num_brps = get_num_brp_resources(); for (i = 0; i < raw_num_brps; ++i) { write_wb_reg(ARM_BASE_BCR + i, 0UL); @@ -975,6 +987,19 @@ reset_regs: write_wb_reg(ARM_BASE_WCR + i, 0UL); write_wb_reg(ARM_BASE_WVR + i, 0UL); } + + if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { + pr_warning("CPU %d failed to clear debug register pairs\n", cpu); + return; + } + + /* + * Have a crack at enabling monitor mode. We don't actually need + * it yet, but reporting an error early is useful if it fails. + */ +out_mdbgen: + if (enable_monitor_mode()) + cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu)); } static int __cpuinit dbg_reset_notify(struct notifier_block *self, @@ -992,8 +1017,6 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = { static int __init arch_hw_breakpoint_init(void) { - u32 dscr; - debug_arch = get_debug_arch(); if (!debug_arch_supported()) { @@ -1028,17 +1051,10 @@ static int __init arch_hw_breakpoint_init(void) core_num_brps, core_has_mismatch_brps() ? "(+1 reserved) " : "", core_num_wrps); - ARM_DBG_READ(c1, 0, dscr); - if (dscr & ARM_DSCR_HDBGEN) { - max_watchpoint_len = 4; - pr_warning("halting debug mode enabled. Assuming maximum watchpoint size of %u bytes.\n", - max_watchpoint_len); - } else { - /* Work out the maximum supported watchpoint length. */ - max_watchpoint_len = get_max_wp_len(); - pr_info("maximum watchpoint size is %u bytes.\n", - max_watchpoint_len); - } + /* Work out the maximum supported watchpoint length. */ + max_watchpoint_len = get_max_wp_len(); + pr_info("maximum watchpoint size is %u bytes.\n", + max_watchpoint_len); /* Register debug fault handler. */ hook_fault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP, diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 53c0304b734a..f9e8657dd241 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -86,12 +86,10 @@ armpmu_map_event(struct perf_event *event, return -ENOENT; } -int -armpmu_event_set_period(struct perf_event *event, - struct hw_perf_event *hwc, - int idx) +int armpmu_event_set_period(struct perf_event *event) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; s64 left = local64_read(&hwc->period_left); s64 period = hwc->sample_period; int ret = 0; @@ -119,24 +117,22 @@ armpmu_event_set_period(struct perf_event *event, local64_set(&hwc->prev_count, (u64)-left); - armpmu->write_counter(idx, (u64)(-left) & 0xffffffff); + armpmu->write_counter(event, (u64)(-left) & 0xffffffff); perf_event_update_userpage(event); return ret; } -u64 -armpmu_event_update(struct perf_event *event, - struct hw_perf_event *hwc, - int idx) +u64 armpmu_event_update(struct perf_event *event) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; u64 delta, prev_raw_count, new_raw_count; again: prev_raw_count = local64_read(&hwc->prev_count); - new_raw_count = armpmu->read_counter(idx); + new_raw_count = armpmu->read_counter(event); if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, new_raw_count) != prev_raw_count) @@ -159,7 +155,7 @@ armpmu_read(struct perf_event *event) if (hwc->idx < 0) return; - armpmu_event_update(event, hwc, hwc->idx); + armpmu_event_update(event); } static void @@ -173,14 +169,13 @@ armpmu_stop(struct perf_event *event, int flags) * PERF_EF_UPDATE, see comments in armpmu_start(). */ if (!(hwc->state & PERF_HES_STOPPED)) { - armpmu->disable(hwc, hwc->idx); - armpmu_event_update(event, hwc, hwc->idx); + armpmu->disable(event); + armpmu_event_update(event); hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; } } -static void -armpmu_start(struct perf_event *event, int flags) +static void armpmu_start(struct perf_event *event, int flags) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; @@ -200,8 +195,8 @@ armpmu_start(struct perf_event *event, int flags) * get an interrupt too soon or *way* too late if the overflow has * happened since disabling. */ - armpmu_event_set_period(event, hwc, hwc->idx); - armpmu->enable(hwc, hwc->idx); + armpmu_event_set_period(event); + armpmu->enable(event); } static void @@ -233,7 +228,7 @@ armpmu_add(struct perf_event *event, int flags) perf_pmu_disable(event->pmu); /* If we don't have a space for the counter then finish early. */ - idx = armpmu->get_event_idx(hw_events, hwc); + idx = armpmu->get_event_idx(hw_events, event); if (idx < 0) { err = idx; goto out; @@ -244,7 +239,7 @@ armpmu_add(struct perf_event *event, int flags) * sure it is disabled. */ event->hw.idx = idx; - armpmu->disable(hwc, idx); + armpmu->disable(event); hw_events->events[idx] = event; hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; @@ -264,13 +259,12 @@ validate_event(struct pmu_hw_events *hw_events, struct perf_event *event) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - struct hw_perf_event fake_event = event->hw; struct pmu *leader_pmu = event->group_leader->pmu; if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) return 1; - return armpmu->get_event_idx(hw_events, &fake_event) >= 0; + return armpmu->get_event_idx(hw_events, event) >= 0; } static int @@ -316,7 +310,7 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) static void armpmu_release_hardware(struct arm_pmu *armpmu) { - armpmu->free_irq(); + armpmu->free_irq(armpmu); pm_runtime_put_sync(&armpmu->plat_device->dev); } @@ -330,7 +324,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu) return -ENODEV; pm_runtime_get_sync(&pmu_device->dev); - err = armpmu->request_irq(armpmu_dispatch_irq); + err = armpmu->request_irq(armpmu, armpmu_dispatch_irq); if (err) { armpmu_release_hardware(armpmu); return err; @@ -465,13 +459,13 @@ static void armpmu_enable(struct pmu *pmu) int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); if (enabled) - armpmu->start(); + armpmu->start(armpmu); } static void armpmu_disable(struct pmu *pmu) { struct arm_pmu *armpmu = to_arm_pmu(pmu); - armpmu->stop(); + armpmu->stop(armpmu); } #ifdef CONFIG_PM_RUNTIME @@ -517,12 +511,13 @@ static void __init armpmu_init(struct arm_pmu *armpmu) }; } -int armpmu_register(struct arm_pmu *armpmu, char *name, int type) +int armpmu_register(struct arm_pmu *armpmu, int type) { armpmu_init(armpmu); + pm_runtime_enable(&armpmu->plat_device->dev); pr_info("enabled with %s PMU driver, %d counters available\n", armpmu->name, armpmu->num_events); - return perf_pmu_register(&armpmu->pmu, name, type); + return perf_pmu_register(&armpmu->pmu, armpmu->name, type); } /* @@ -576,6 +571,10 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) { struct frame_tail __user *tail; + if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { + /* We don't support guest os callchain now */ + return; + } tail = (struct frame_tail __user *)regs->ARM_fp - 1; @@ -603,9 +602,41 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) { struct stackframe fr; + if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { + /* We don't support guest os callchain now */ + return; + } + fr.fp = regs->ARM_fp; fr.sp = regs->ARM_sp; fr.lr = regs->ARM_lr; fr.pc = regs->ARM_pc; walk_stackframe(&fr, callchain_trace, entry); } + +unsigned long perf_instruction_pointer(struct pt_regs *regs) +{ + if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) + return perf_guest_cbs->get_guest_ip(); + + return instruction_pointer(regs); +} + +unsigned long perf_misc_flags(struct pt_regs *regs) +{ + int misc = 0; + + if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { + if (perf_guest_cbs->is_user_mode()) + misc |= PERF_RECORD_MISC_GUEST_USER; + else + misc |= PERF_RECORD_MISC_GUEST_KERNEL; + } else { + if (user_mode(regs)) + misc |= PERF_RECORD_MISC_USER; + else + misc |= PERF_RECORD_MISC_KERNEL; + } + + return misc; +} diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index 8d7d8d4de9d6..9a4f6307a016 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -23,6 +23,7 @@ #include <linux/kernel.h> #include <linux/of.h> #include <linux/platform_device.h> +#include <linux/slab.h> #include <linux/spinlock.h> #include <asm/cputype.h> @@ -45,7 +46,7 @@ const char *perf_pmu_name(void) if (!cpu_pmu) return NULL; - return cpu_pmu->pmu.name; + return cpu_pmu->name; } EXPORT_SYMBOL_GPL(perf_pmu_name); @@ -70,7 +71,7 @@ static struct pmu_hw_events *cpu_pmu_get_cpu_events(void) return &__get_cpu_var(cpu_hw_events); } -static void cpu_pmu_free_irq(void) +static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) { int i, irq, irqs; struct platform_device *pmu_device = cpu_pmu->plat_device; @@ -86,7 +87,7 @@ static void cpu_pmu_free_irq(void) } } -static int cpu_pmu_request_irq(irq_handler_t handler) +static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) { int i, err, irq, irqs; struct platform_device *pmu_device = cpu_pmu->plat_device; @@ -147,7 +148,7 @@ static void __devinit cpu_pmu_init(struct arm_pmu *cpu_pmu) /* Ensure the PMU has sane values out of reset. */ if (cpu_pmu && cpu_pmu->reset) - on_each_cpu(cpu_pmu->reset, NULL, 1); + on_each_cpu(cpu_pmu->reset, cpu_pmu, 1); } /* @@ -163,7 +164,9 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b, return NOTIFY_DONE; if (cpu_pmu && cpu_pmu->reset) - cpu_pmu->reset(NULL); + cpu_pmu->reset(cpu_pmu); + else + return NOTIFY_DONE; return NOTIFY_OK; } @@ -195,13 +198,13 @@ static struct platform_device_id __devinitdata cpu_pmu_plat_device_ids[] = { /* * CPU PMU identification and probing. */ -static struct arm_pmu *__devinit probe_current_pmu(void) +static int __devinit probe_current_pmu(struct arm_pmu *pmu) { - struct arm_pmu *pmu = NULL; int cpu = get_cpu(); unsigned long cpuid = read_cpuid_id(); unsigned long implementor = (cpuid & 0xFF000000) >> 24; unsigned long part_number = (cpuid & 0xFFF0); + int ret = -ENODEV; pr_info("probing PMU on CPU %d\n", cpu); @@ -211,25 +214,25 @@ static struct arm_pmu *__devinit probe_current_pmu(void) case 0xB360: /* ARM1136 */ case 0xB560: /* ARM1156 */ case 0xB760: /* ARM1176 */ - pmu = armv6pmu_init(); + ret = armv6pmu_init(pmu); break; case 0xB020: /* ARM11mpcore */ - pmu = armv6mpcore_pmu_init(); + ret = armv6mpcore_pmu_init(pmu); break; case 0xC080: /* Cortex-A8 */ - pmu = armv7_a8_pmu_init(); + ret = armv7_a8_pmu_init(pmu); break; case 0xC090: /* Cortex-A9 */ - pmu = armv7_a9_pmu_init(); + ret = armv7_a9_pmu_init(pmu); break; case 0xC050: /* Cortex-A5 */ - pmu = armv7_a5_pmu_init(); + ret = armv7_a5_pmu_init(pmu); break; case 0xC0F0: /* Cortex-A15 */ - pmu = armv7_a15_pmu_init(); + ret = armv7_a15_pmu_init(pmu); break; case 0xC070: /* Cortex-A7 */ - pmu = armv7_a7_pmu_init(); + ret = armv7_a7_pmu_init(pmu); break; } /* Intel CPUs [xscale]. */ @@ -237,43 +240,54 @@ static struct arm_pmu *__devinit probe_current_pmu(void) part_number = (cpuid >> 13) & 0x7; switch (part_number) { case 1: - pmu = xscale1pmu_init(); + ret = xscale1pmu_init(pmu); break; case 2: - pmu = xscale2pmu_init(); + ret = xscale2pmu_init(pmu); break; } } put_cpu(); - return pmu; + return ret; } static int __devinit cpu_pmu_device_probe(struct platform_device *pdev) { const struct of_device_id *of_id; - struct arm_pmu *(*init_fn)(void); + int (*init_fn)(struct arm_pmu *); struct device_node *node = pdev->dev.of_node; + struct arm_pmu *pmu; + int ret = -ENODEV; if (cpu_pmu) { pr_info("attempt to register multiple PMU devices!"); return -ENOSPC; } + pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL); + if (!pmu) { + pr_info("failed to allocate PMU device!"); + return -ENOMEM; + } + if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) { init_fn = of_id->data; - cpu_pmu = init_fn(); + ret = init_fn(pmu); } else { - cpu_pmu = probe_current_pmu(); + ret = probe_current_pmu(pmu); } - if (!cpu_pmu) - return -ENODEV; + if (ret) { + pr_info("failed to register PMU devices!"); + kfree(pmu); + return ret; + } + cpu_pmu = pmu; cpu_pmu->plat_device = pdev; cpu_pmu_init(cpu_pmu); - register_cpu_notifier(&cpu_pmu_hotplug_notifier); - armpmu_register(cpu_pmu, cpu_pmu->name, PERF_TYPE_RAW); + armpmu_register(cpu_pmu, PERF_TYPE_RAW); return 0; } @@ -290,6 +304,16 @@ static struct platform_driver cpu_pmu_driver = { static int __init register_pmu_driver(void) { - return platform_driver_register(&cpu_pmu_driver); + int err; + + err = register_cpu_notifier(&cpu_pmu_hotplug_notifier); + if (err) + return err; + + err = platform_driver_register(&cpu_pmu_driver); + if (err) + unregister_cpu_notifier(&cpu_pmu_hotplug_notifier); + + return err; } device_initcall(register_pmu_driver); diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index 6ccc07971745..f3e22ff8b6a2 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c @@ -401,9 +401,10 @@ armv6_pmcr_counter_has_overflowed(unsigned long pmcr, return ret; } -static inline u32 -armv6pmu_read_counter(int counter) +static inline u32 armv6pmu_read_counter(struct perf_event *event) { + struct hw_perf_event *hwc = &event->hw; + int counter = hwc->idx; unsigned long value = 0; if (ARMV6_CYCLE_COUNTER == counter) @@ -418,10 +419,11 @@ armv6pmu_read_counter(int counter) return value; } -static inline void -armv6pmu_write_counter(int counter, - u32 value) +static inline void armv6pmu_write_counter(struct perf_event *event, u32 value) { + struct hw_perf_event *hwc = &event->hw; + int counter = hwc->idx; + if (ARMV6_CYCLE_COUNTER == counter) asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value)); else if (ARMV6_COUNTER0 == counter) @@ -432,12 +434,13 @@ armv6pmu_write_counter(int counter, WARN_ONCE(1, "invalid counter number (%d)\n", counter); } -static void -armv6pmu_enable_event(struct hw_perf_event *hwc, - int idx) +static void armv6pmu_enable_event(struct perf_event *event) { unsigned long val, mask, evt, flags; + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; struct pmu_hw_events *events = cpu_pmu->get_hw_events(); + int idx = hwc->idx; if (ARMV6_CYCLE_COUNTER == idx) { mask = 0; @@ -473,7 +476,8 @@ armv6pmu_handle_irq(int irq_num, { unsigned long pmcr = armv6_pmcr_read(); struct perf_sample_data data; - struct pmu_hw_events *cpuc; + struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev; + struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events(); struct pt_regs *regs; int idx; @@ -489,7 +493,6 @@ armv6pmu_handle_irq(int irq_num, */ armv6_pmcr_write(pmcr); - cpuc = &__get_cpu_var(cpu_hw_events); for (idx = 0; idx < cpu_pmu->num_events; ++idx) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; @@ -506,13 +509,13 @@ armv6pmu_handle_irq(int irq_num, continue; hwc = &event->hw; - armpmu_event_update(event, hwc, idx); + armpmu_event_update(event); perf_sample_data_init(&data, 0, hwc->last_period); - if (!armpmu_event_set_period(event, hwc, idx)) + if (!armpmu_event_set_period(event)) continue; if (perf_event_overflow(event, &data, regs)) - cpu_pmu->disable(hwc, idx); + cpu_pmu->disable(event); } /* @@ -527,8 +530,7 @@ armv6pmu_handle_irq(int irq_num, return IRQ_HANDLED; } -static void -armv6pmu_start(void) +static void armv6pmu_start(struct arm_pmu *cpu_pmu) { unsigned long flags, val; struct pmu_hw_events *events = cpu_pmu->get_hw_events(); @@ -540,8 +542,7 @@ armv6pmu_start(void) raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } -static void -armv6pmu_stop(void) +static void armv6pmu_stop(struct arm_pmu *cpu_pmu) { unsigned long flags, val; struct pmu_hw_events *events = cpu_pmu->get_hw_events(); @@ -555,10 +556,11 @@ armv6pmu_stop(void) static int armv6pmu_get_event_idx(struct pmu_hw_events *cpuc, - struct hw_perf_event *event) + struct perf_event *event) { + struct hw_perf_event *hwc = &event->hw; /* Always place a cycle counter into the cycle counter. */ - if (ARMV6_PERFCTR_CPU_CYCLES == event->config_base) { + if (ARMV6_PERFCTR_CPU_CYCLES == hwc->config_base) { if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask)) return -EAGAIN; @@ -579,12 +581,13 @@ armv6pmu_get_event_idx(struct pmu_hw_events *cpuc, } } -static void -armv6pmu_disable_event(struct hw_perf_event *hwc, - int idx) +static void armv6pmu_disable_event(struct perf_event *event) { unsigned long val, mask, evt, flags; + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; struct pmu_hw_events *events = cpu_pmu->get_hw_events(); + int idx = hwc->idx; if (ARMV6_CYCLE_COUNTER == idx) { mask = ARMV6_PMCR_CCOUNT_IEN; @@ -613,12 +616,13 @@ armv6pmu_disable_event(struct hw_perf_event *hwc, raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } -static void -armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc, - int idx) +static void armv6mpcore_pmu_disable_event(struct perf_event *event) { unsigned long val, mask, flags, evt = 0; + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; struct pmu_hw_events *events = cpu_pmu->get_hw_events(); + int idx = hwc->idx; if (ARMV6_CYCLE_COUNTER == idx) { mask = ARMV6_PMCR_CCOUNT_IEN; @@ -649,24 +653,22 @@ static int armv6_map_event(struct perf_event *event) &armv6_perf_cache_map, 0xFF); } -static struct arm_pmu armv6pmu = { - .name = "v6", - .handle_irq = armv6pmu_handle_irq, - .enable = armv6pmu_enable_event, - .disable = armv6pmu_disable_event, - .read_counter = armv6pmu_read_counter, - .write_counter = armv6pmu_write_counter, - .get_event_idx = armv6pmu_get_event_idx, - .start = armv6pmu_start, - .stop = armv6pmu_stop, - .map_event = armv6_map_event, - .num_events = 3, - .max_period = (1LLU << 32) - 1, -}; - -static struct arm_pmu *__devinit armv6pmu_init(void) +static int __devinit armv6pmu_init(struct arm_pmu *cpu_pmu) { - return &armv6pmu; + cpu_pmu->name = "v6"; + cpu_pmu->handle_irq = armv6pmu_handle_irq; + cpu_pmu->enable = armv6pmu_enable_event; + cpu_pmu->disable = armv6pmu_disable_event; + cpu_pmu->read_counter = armv6pmu_read_counter; + cpu_pmu->write_counter = armv6pmu_write_counter; + cpu_pmu->get_event_idx = armv6pmu_get_event_idx; + cpu_pmu->start = armv6pmu_start; + cpu_pmu->stop = armv6pmu_stop; + cpu_pmu->map_event = armv6_map_event; + cpu_pmu->num_events = 3; + cpu_pmu->max_period = (1LLU << 32) - 1; + + return 0; } /* @@ -683,33 +685,31 @@ static int armv6mpcore_map_event(struct perf_event *event) &armv6mpcore_perf_cache_map, 0xFF); } -static struct arm_pmu armv6mpcore_pmu = { - .name = "v6mpcore", - .handle_irq = armv6pmu_handle_irq, - .enable = armv6pmu_enable_event, - .disable = armv6mpcore_pmu_disable_event, - .read_counter = armv6pmu_read_counter, - .write_counter = armv6pmu_write_counter, - .get_event_idx = armv6pmu_get_event_idx, - .start = armv6pmu_start, - .stop = armv6pmu_stop, - .map_event = armv6mpcore_map_event, - .num_events = 3, - .max_period = (1LLU << 32) - 1, -}; - -static struct arm_pmu *__devinit armv6mpcore_pmu_init(void) +static int __devinit armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu) { - return &armv6mpcore_pmu; + cpu_pmu->name = "v6mpcore"; + cpu_pmu->handle_irq = armv6pmu_handle_irq; + cpu_pmu->enable = armv6pmu_enable_event; + cpu_pmu->disable = armv6mpcore_pmu_disable_event; + cpu_pmu->read_counter = armv6pmu_read_counter; + cpu_pmu->write_counter = armv6pmu_write_counter; + cpu_pmu->get_event_idx = armv6pmu_get_event_idx; + cpu_pmu->start = armv6pmu_start; + cpu_pmu->stop = armv6pmu_stop; + cpu_pmu->map_event = armv6mpcore_map_event; + cpu_pmu->num_events = 3; + cpu_pmu->max_period = (1LLU << 32) - 1; + + return 0; } #else -static struct arm_pmu *__devinit armv6pmu_init(void) +static int armv6pmu_init(struct arm_pmu *cpu_pmu) { - return NULL; + return -ENODEV; } -static struct arm_pmu *__devinit armv6mpcore_pmu_init(void) +static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu) { - return NULL; + return -ENODEV; } #endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */ diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index bd4b090ebcfd..7d0cce85d17e 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -18,8 +18,6 @@ #ifdef CONFIG_CPU_V7 -static struct arm_pmu armv7pmu; - /* * Common ARMv7 event types * @@ -738,7 +736,8 @@ static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] */ #define ARMV7_IDX_CYCLE_COUNTER 0 #define ARMV7_IDX_COUNTER0 1 -#define ARMV7_IDX_COUNTER_LAST (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) +#define ARMV7_IDX_COUNTER_LAST(cpu_pmu) \ + (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) #define ARMV7_MAX_COUNTERS 32 #define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1) @@ -804,49 +803,34 @@ static inline int armv7_pmnc_has_overflowed(u32 pmnc) return pmnc & ARMV7_OVERFLOWED_MASK; } -static inline int armv7_pmnc_counter_valid(int idx) +static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx) { - return idx >= ARMV7_IDX_CYCLE_COUNTER && idx <= ARMV7_IDX_COUNTER_LAST; + return idx >= ARMV7_IDX_CYCLE_COUNTER && + idx <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); } static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx) { - int ret = 0; - u32 counter; - - if (!armv7_pmnc_counter_valid(idx)) { - pr_err("CPU%u checking wrong counter %d overflow status\n", - smp_processor_id(), idx); - } else { - counter = ARMV7_IDX_TO_COUNTER(idx); - ret = pmnc & BIT(counter); - } - - return ret; + return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx)); } static inline int armv7_pmnc_select_counter(int idx) { - u32 counter; - - if (!armv7_pmnc_counter_valid(idx)) { - pr_err("CPU%u selecting wrong PMNC counter %d\n", - smp_processor_id(), idx); - return -EINVAL; - } - - counter = ARMV7_IDX_TO_COUNTER(idx); + u32 counter = ARMV7_IDX_TO_COUNTER(idx); asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter)); isb(); return idx; } -static inline u32 armv7pmu_read_counter(int idx) +static inline u32 armv7pmu_read_counter(struct perf_event *event) { + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; u32 value = 0; - if (!armv7_pmnc_counter_valid(idx)) + if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) pr_err("CPU%u reading wrong counter %d\n", smp_processor_id(), idx); else if (idx == ARMV7_IDX_CYCLE_COUNTER) @@ -857,9 +841,13 @@ static inline u32 armv7pmu_read_counter(int idx) return value; } -static inline void armv7pmu_write_counter(int idx, u32 value) +static inline void armv7pmu_write_counter(struct perf_event *event, u32 value) { - if (!armv7_pmnc_counter_valid(idx)) + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) pr_err("CPU%u writing wrong counter %d\n", smp_processor_id(), idx); else if (idx == ARMV7_IDX_CYCLE_COUNTER) @@ -878,60 +866,28 @@ static inline void armv7_pmnc_write_evtsel(int idx, u32 val) static inline int armv7_pmnc_enable_counter(int idx) { - u32 counter; - - if (!armv7_pmnc_counter_valid(idx)) { - pr_err("CPU%u enabling wrong PMNC counter %d\n", - smp_processor_id(), idx); - return -EINVAL; - } - - counter = ARMV7_IDX_TO_COUNTER(idx); + u32 counter = ARMV7_IDX_TO_COUNTER(idx); asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter))); return idx; } static inline int armv7_pmnc_disable_counter(int idx) { - u32 counter; - - if (!armv7_pmnc_counter_valid(idx)) { - pr_err("CPU%u disabling wrong PMNC counter %d\n", - smp_processor_id(), idx); - return -EINVAL; - } - - counter = ARMV7_IDX_TO_COUNTER(idx); + u32 counter = ARMV7_IDX_TO_COUNTER(idx); asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter))); return idx; } static inline int armv7_pmnc_enable_intens(int idx) { - u32 counter; - - if (!armv7_pmnc_counter_valid(idx)) { - pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n", - smp_processor_id(), idx); - return -EINVAL; - } - - counter = ARMV7_IDX_TO_COUNTER(idx); + u32 counter = ARMV7_IDX_TO_COUNTER(idx); asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter))); return idx; } static inline int armv7_pmnc_disable_intens(int idx) { - u32 counter; - - if (!armv7_pmnc_counter_valid(idx)) { - pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n", - smp_processor_id(), idx); - return -EINVAL; - } - - counter = ARMV7_IDX_TO_COUNTER(idx); + u32 counter = ARMV7_IDX_TO_COUNTER(idx); asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter))); isb(); /* Clear the overflow flag in case an interrupt is pending. */ @@ -956,7 +912,7 @@ static inline u32 armv7_pmnc_getreset_flags(void) } #ifdef DEBUG -static void armv7_pmnc_dump_regs(void) +static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu) { u32 val; unsigned int cnt; @@ -981,7 +937,8 @@ static void armv7_pmnc_dump_regs(void) asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); printk(KERN_INFO "CCNT =0x%08x\n", val); - for (cnt = ARMV7_IDX_COUNTER0; cnt <= ARMV7_IDX_COUNTER_LAST; cnt++) { + for (cnt = ARMV7_IDX_COUNTER0; + cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) { armv7_pmnc_select_counter(cnt); asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val)); printk(KERN_INFO "CNT[%d] count =0x%08x\n", @@ -993,10 +950,19 @@ static void armv7_pmnc_dump_regs(void) } #endif -static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) +static void armv7pmu_enable_event(struct perf_event *event) { unsigned long flags; + struct hw_perf_event *hwc = &event->hw; + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct pmu_hw_events *events = cpu_pmu->get_hw_events(); + int idx = hwc->idx; + + if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) { + pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n", + smp_processor_id(), idx); + return; + } /* * Enable counter and interrupt, and set the counter to count @@ -1014,7 +980,7 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) * We only need to set the event for the cycle counter if we * have the ability to perform event filtering. */ - if (armv7pmu.set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER) + if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER) armv7_pmnc_write_evtsel(idx, hwc->config_base); /* @@ -1030,10 +996,19 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } -static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) +static void armv7pmu_disable_event(struct perf_event *event) { unsigned long flags; + struct hw_perf_event *hwc = &event->hw; + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct pmu_hw_events *events = cpu_pmu->get_hw_events(); + int idx = hwc->idx; + + if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) { + pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n", + smp_processor_id(), idx); + return; + } /* * Disable counter and interrupt @@ -1057,7 +1032,8 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) { u32 pmnc; struct perf_sample_data data; - struct pmu_hw_events *cpuc; + struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev; + struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events(); struct pt_regs *regs; int idx; @@ -1077,7 +1053,6 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) */ regs = get_irq_regs(); - cpuc = &__get_cpu_var(cpu_hw_events); for (idx = 0; idx < cpu_pmu->num_events; ++idx) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; @@ -1094,13 +1069,13 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) continue; hwc = &event->hw; - armpmu_event_update(event, hwc, idx); + armpmu_event_update(event); perf_sample_data_init(&data, 0, hwc->last_period); - if (!armpmu_event_set_period(event, hwc, idx)) + if (!armpmu_event_set_period(event)) continue; if (perf_event_overflow(event, &data, regs)) - cpu_pmu->disable(hwc, idx); + cpu_pmu->disable(event); } /* @@ -1115,7 +1090,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) return IRQ_HANDLED; } -static void armv7pmu_start(void) +static void armv7pmu_start(struct arm_pmu *cpu_pmu) { unsigned long flags; struct pmu_hw_events *events = cpu_pmu->get_hw_events(); @@ -1126,7 +1101,7 @@ static void armv7pmu_start(void) raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } -static void armv7pmu_stop(void) +static void armv7pmu_stop(struct arm_pmu *cpu_pmu) { unsigned long flags; struct pmu_hw_events *events = cpu_pmu->get_hw_events(); @@ -1138,10 +1113,12 @@ static void armv7pmu_stop(void) } static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc, - struct hw_perf_event *event) + struct perf_event *event) { int idx; - unsigned long evtype = event->config_base & ARMV7_EVTYPE_EVENT; + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + unsigned long evtype = hwc->config_base & ARMV7_EVTYPE_EVENT; /* Always place a cycle counter into the cycle counter. */ if (evtype == ARMV7_PERFCTR_CPU_CYCLES) { @@ -1192,11 +1169,14 @@ static int armv7pmu_set_event_filter(struct hw_perf_event *event, static void armv7pmu_reset(void *info) { + struct arm_pmu *cpu_pmu = (struct arm_pmu *)info; u32 idx, nb_cnt = cpu_pmu->num_events; /* The counter and interrupt enable registers are unknown at reset. */ - for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) - armv7pmu_disable_event(NULL, idx); + for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) { + armv7_pmnc_disable_counter(idx); + armv7_pmnc_disable_intens(idx); + } /* Initialize & Reset PMNC: C and P bits */ armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); @@ -1232,17 +1212,18 @@ static int armv7_a7_map_event(struct perf_event *event) &armv7_a7_perf_cache_map, 0xFF); } -static struct arm_pmu armv7pmu = { - .handle_irq = armv7pmu_handle_irq, - .enable = armv7pmu_enable_event, - .disable = armv7pmu_disable_event, - .read_counter = armv7pmu_read_counter, - .write_counter = armv7pmu_write_counter, - .get_event_idx = armv7pmu_get_event_idx, - .start = armv7pmu_start, - .stop = armv7pmu_stop, - .reset = armv7pmu_reset, - .max_period = (1LLU << 32) - 1, +static void armv7pmu_init(struct arm_pmu *cpu_pmu) +{ + cpu_pmu->handle_irq = armv7pmu_handle_irq; + cpu_pmu->enable = armv7pmu_enable_event; + cpu_pmu->disable = armv7pmu_disable_event; + cpu_pmu->read_counter = armv7pmu_read_counter; + cpu_pmu->write_counter = armv7pmu_write_counter; + cpu_pmu->get_event_idx = armv7pmu_get_event_idx; + cpu_pmu->start = armv7pmu_start; + cpu_pmu->stop = armv7pmu_stop; + cpu_pmu->reset = armv7pmu_reset; + cpu_pmu->max_period = (1LLU << 32) - 1; }; static u32 __devinit armv7_read_num_pmnc_events(void) @@ -1256,70 +1237,75 @@ static u32 __devinit armv7_read_num_pmnc_events(void) return nb_cnt + 1; } -static struct arm_pmu *__devinit armv7_a8_pmu_init(void) +static int __devinit armv7_a8_pmu_init(struct arm_pmu *cpu_pmu) { - armv7pmu.name = "ARMv7 Cortex-A8"; - armv7pmu.map_event = armv7_a8_map_event; - armv7pmu.num_events = armv7_read_num_pmnc_events(); - return &armv7pmu; + armv7pmu_init(cpu_pmu); + cpu_pmu->name = "ARMv7 Cortex-A8"; + cpu_pmu->map_event = armv7_a8_map_event; + cpu_pmu->num_events = armv7_read_num_pmnc_events(); + return 0; } -static struct arm_pmu *__devinit armv7_a9_pmu_init(void) +static int __devinit armv7_a9_pmu_init(struct arm_pmu *cpu_pmu) { - armv7pmu.name = "ARMv7 Cortex-A9"; - armv7pmu.map_event = armv7_a9_map_event; - armv7pmu.num_events = armv7_read_num_pmnc_events(); - return &armv7pmu; + armv7pmu_init(cpu_pmu); + cpu_pmu->name = "ARMv7 Cortex-A9"; + cpu_pmu->map_event = armv7_a9_map_event; + cpu_pmu->num_events = armv7_read_num_pmnc_events(); + return 0; } -static struct arm_pmu *__devinit armv7_a5_pmu_init(void) +static int __devinit armv7_a5_pmu_init(struct arm_pmu *cpu_pmu) { - armv7pmu.name = "ARMv7 Cortex-A5"; - armv7pmu.map_event = armv7_a5_map_event; - armv7pmu.num_events = armv7_read_num_pmnc_events(); - return &armv7pmu; + armv7pmu_init(cpu_pmu); + cpu_pmu->name = "ARMv7 Cortex-A5"; + cpu_pmu->map_event = armv7_a5_map_event; + cpu_pmu->num_events = armv7_read_num_pmnc_events(); + return 0; } -static struct arm_pmu *__devinit armv7_a15_pmu_init(void) +static int __devinit armv7_a15_pmu_init(struct arm_pmu *cpu_pmu) { - armv7pmu.name = "ARMv7 Cortex-A15"; - armv7pmu.map_event = armv7_a15_map_event; - armv7pmu.num_events = armv7_read_num_pmnc_events(); - armv7pmu.set_event_filter = armv7pmu_set_event_filter; - return &armv7pmu; + armv7pmu_init(cpu_pmu); + cpu_pmu->name = "ARMv7 Cortex-A15"; + cpu_pmu->map_event = armv7_a15_map_event; + cpu_pmu->num_events = armv7_read_num_pmnc_events(); + cpu_pmu->set_event_filter = armv7pmu_set_event_filter; + return 0; } -static struct arm_pmu *__devinit armv7_a7_pmu_init(void) +static int __devinit armv7_a7_pmu_init(struct arm_pmu *cpu_pmu) { - armv7pmu.name = "ARMv7 Cortex-A7"; - armv7pmu.map_event = armv7_a7_map_event; - armv7pmu.num_events = armv7_read_num_pmnc_events(); - armv7pmu.set_event_filter = armv7pmu_set_event_filter; - return &armv7pmu; + armv7pmu_init(cpu_pmu); + cpu_pmu->name = "ARMv7 Cortex-A7"; + cpu_pmu->map_event = armv7_a7_map_event; + cpu_pmu->num_events = armv7_read_num_pmnc_events(); + cpu_pmu->set_event_filter = armv7pmu_set_event_filter; + return 0; } #else -static struct arm_pmu *__devinit armv7_a8_pmu_init(void) +static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu) { - return NULL; + return -ENODEV; } -static struct arm_pmu *__devinit armv7_a9_pmu_init(void) +static inline int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu) { - return NULL; + return -ENODEV; } -static struct arm_pmu *__devinit armv7_a5_pmu_init(void) +static inline int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu) { - return NULL; + return -ENODEV; } -static struct arm_pmu *__devinit armv7_a15_pmu_init(void) +static inline int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu) { - return NULL; + return -ENODEV; } -static struct arm_pmu *__devinit armv7_a7_pmu_init(void) +static inline int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu) { - return NULL; + return -ENODEV; } #endif /* CONFIG_CPU_V7 */ diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index 426e19f380a2..0c8265e53d5f 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c @@ -224,7 +224,8 @@ xscale1pmu_handle_irq(int irq_num, void *dev) { unsigned long pmnc; struct perf_sample_data data; - struct pmu_hw_events *cpuc; + struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev; + struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events(); struct pt_regs *regs; int idx; @@ -248,7 +249,6 @@ xscale1pmu_handle_irq(int irq_num, void *dev) regs = get_irq_regs(); - cpuc = &__get_cpu_var(cpu_hw_events); for (idx = 0; idx < cpu_pmu->num_events; ++idx) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; @@ -260,13 +260,13 @@ xscale1pmu_handle_irq(int irq_num, void *dev) continue; hwc = &event->hw; - armpmu_event_update(event, hwc, idx); + armpmu_event_update(event); perf_sample_data_init(&data, 0, hwc->last_period); - if (!armpmu_event_set_period(event, hwc, idx)) + if (!armpmu_event_set_period(event)) continue; if (perf_event_overflow(event, &data, regs)) - cpu_pmu->disable(hwc, idx); + cpu_pmu->disable(event); } irq_work_run(); @@ -280,11 +280,13 @@ xscale1pmu_handle_irq(int irq_num, void *dev) return IRQ_HANDLED; } -static void -xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) +static void xscale1pmu_enable_event(struct perf_event *event) { unsigned long val, mask, evt, flags; + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; struct pmu_hw_events *events = cpu_pmu->get_hw_events(); + int idx = hwc->idx; switch (idx) { case XSCALE_CYCLE_COUNTER: @@ -314,11 +316,13 @@ xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } -static void -xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) +static void xscale1pmu_disable_event(struct perf_event *event) { unsigned long val, mask, evt, flags; + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; struct pmu_hw_events *events = cpu_pmu->get_hw_events(); + int idx = hwc->idx; switch (idx) { case XSCALE_CYCLE_COUNTER: @@ -348,9 +352,10 @@ xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) static int xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc, - struct hw_perf_event *event) + struct perf_event *event) { - if (XSCALE_PERFCTR_CCNT == event->config_base) { + struct hw_perf_event *hwc = &event->hw; + if (XSCALE_PERFCTR_CCNT == hwc->config_base) { if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask)) return -EAGAIN; @@ -366,8 +371,7 @@ xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc, } } -static void -xscale1pmu_start(void) +static void xscale1pmu_start(struct arm_pmu *cpu_pmu) { unsigned long flags, val; struct pmu_hw_events *events = cpu_pmu->get_hw_events(); @@ -379,8 +383,7 @@ xscale1pmu_start(void) raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } -static void -xscale1pmu_stop(void) +static void xscale1pmu_stop(struct arm_pmu *cpu_pmu) { unsigned long flags, val; struct pmu_hw_events *events = cpu_pmu->get_hw_events(); @@ -392,9 +395,10 @@ xscale1pmu_stop(void) raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } -static inline u32 -xscale1pmu_read_counter(int counter) +static inline u32 xscale1pmu_read_counter(struct perf_event *event) { + struct hw_perf_event *hwc = &event->hw; + int counter = hwc->idx; u32 val = 0; switch (counter) { @@ -412,9 +416,11 @@ xscale1pmu_read_counter(int counter) return val; } -static inline void -xscale1pmu_write_counter(int counter, u32 val) +static inline void xscale1pmu_write_counter(struct perf_event *event, u32 val) { + struct hw_perf_event *hwc = &event->hw; + int counter = hwc->idx; + switch (counter) { case XSCALE_CYCLE_COUNTER: asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val)); @@ -434,24 +440,22 @@ static int xscale_map_event(struct perf_event *event) &xscale_perf_cache_map, 0xFF); } -static struct arm_pmu xscale1pmu = { - .name = "xscale1", - .handle_irq = xscale1pmu_handle_irq, - .enable = xscale1pmu_enable_event, - .disable = xscale1pmu_disable_event, - .read_counter = xscale1pmu_read_counter, - .write_counter = xscale1pmu_write_counter, - .get_event_idx = xscale1pmu_get_event_idx, - .start = xscale1pmu_start, - .stop = xscale1pmu_stop, - .map_event = xscale_map_event, - .num_events = 3, - .max_period = (1LLU << 32) - 1, -}; - -static struct arm_pmu *__devinit xscale1pmu_init(void) +static int __devinit xscale1pmu_init(struct arm_pmu *cpu_pmu) { - return &xscale1pmu; + cpu_pmu->name = "xscale1"; + cpu_pmu->handle_irq = xscale1pmu_handle_irq; + cpu_pmu->enable = xscale1pmu_enable_event; + cpu_pmu->disable = xscale1pmu_disable_event; + cpu_pmu->read_counter = xscale1pmu_read_counter; + cpu_pmu->write_counter = xscale1pmu_write_counter; + cpu_pmu->get_event_idx = xscale1pmu_get_event_idx; + cpu_pmu->start = xscale1pmu_start; + cpu_pmu->stop = xscale1pmu_stop; + cpu_pmu->map_event = xscale_map_event; + cpu_pmu->num_events = 3; + cpu_pmu->max_period = (1LLU << 32) - 1; + + return 0; } #define XSCALE2_OVERFLOWED_MASK 0x01f @@ -567,7 +571,8 @@ xscale2pmu_handle_irq(int irq_num, void *dev) { unsigned long pmnc, of_flags; struct perf_sample_data data; - struct pmu_hw_events *cpuc; + struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev; + struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events(); struct pt_regs *regs; int idx; @@ -585,7 +590,6 @@ xscale2pmu_handle_irq(int irq_num, void *dev) regs = get_irq_regs(); - cpuc = &__get_cpu_var(cpu_hw_events); for (idx = 0; idx < cpu_pmu->num_events; ++idx) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; @@ -597,13 +601,13 @@ xscale2pmu_handle_irq(int irq_num, void *dev) continue; hwc = &event->hw; - armpmu_event_update(event, hwc, idx); + armpmu_event_update(event); perf_sample_data_init(&data, 0, hwc->last_period); - if (!armpmu_event_set_period(event, hwc, idx)) + if (!armpmu_event_set_period(event)) continue; if (perf_event_overflow(event, &data, regs)) - cpu_pmu->disable(hwc, idx); + cpu_pmu->disable(event); } irq_work_run(); @@ -617,11 +621,13 @@ xscale2pmu_handle_irq(int irq_num, void *dev) return IRQ_HANDLED; } -static void -xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) +static void xscale2pmu_enable_event(struct perf_event *event) { unsigned long flags, ien, evtsel; + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; struct pmu_hw_events *events = cpu_pmu->get_hw_events(); + int idx = hwc->idx; ien = xscale2pmu_read_int_enable(); evtsel = xscale2pmu_read_event_select(); @@ -661,11 +667,13 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } -static void -xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) +static void xscale2pmu_disable_event(struct perf_event *event) { unsigned long flags, ien, evtsel, of_flags; + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; struct pmu_hw_events *events = cpu_pmu->get_hw_events(); + int idx = hwc->idx; ien = xscale2pmu_read_int_enable(); evtsel = xscale2pmu_read_event_select(); @@ -713,7 +721,7 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) static int xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc, - struct hw_perf_event *event) + struct perf_event *event) { int idx = xscale1pmu_get_event_idx(cpuc, event); if (idx >= 0) @@ -727,8 +735,7 @@ out: return idx; } -static void -xscale2pmu_start(void) +static void xscale2pmu_start(struct arm_pmu *cpu_pmu) { unsigned long flags, val; struct pmu_hw_events *events = cpu_pmu->get_hw_events(); @@ -740,8 +747,7 @@ xscale2pmu_start(void) raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } -static void -xscale2pmu_stop(void) +static void xscale2pmu_stop(struct arm_pmu *cpu_pmu) { unsigned long flags, val; struct pmu_hw_events *events = cpu_pmu->get_hw_events(); @@ -753,9 +759,10 @@ xscale2pmu_stop(void) raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } -static inline u32 -xscale2pmu_read_counter(int counter) +static inline u32 xscale2pmu_read_counter(struct perf_event *event) { + struct hw_perf_event *hwc = &event->hw; + int counter = hwc->idx; u32 val = 0; switch (counter) { @@ -779,9 +786,11 @@ xscale2pmu_read_counter(int counter) return val; } -static inline void -xscale2pmu_write_counter(int counter, u32 val) +static inline void xscale2pmu_write_counter(struct perf_event *event, u32 val) { + struct hw_perf_event *hwc = &event->hw; + int counter = hwc->idx; + switch (counter) { case XSCALE_CYCLE_COUNTER: asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val)); @@ -801,33 +810,31 @@ xscale2pmu_write_counter(int counter, u32 val) } } -static struct arm_pmu xscale2pmu = { - .name = "xscale2", - .handle_irq = xscale2pmu_handle_irq, - .enable = xscale2pmu_enable_event, - .disable = xscale2pmu_disable_event, - .read_counter = xscale2pmu_read_counter, - .write_counter = xscale2pmu_write_counter, - .get_event_idx = xscale2pmu_get_event_idx, - .start = xscale2pmu_start, - .stop = xscale2pmu_stop, - .map_event = xscale_map_event, - .num_events = 5, - .max_period = (1LLU << 32) - 1, -}; - -static struct arm_pmu *__devinit xscale2pmu_init(void) +static int __devinit xscale2pmu_init(struct arm_pmu *cpu_pmu) { - return &xscale2pmu; + cpu_pmu->name = "xscale2"; + cpu_pmu->handle_irq = xscale2pmu_handle_irq; + cpu_pmu->enable = xscale2pmu_enable_event; + cpu_pmu->disable = xscale2pmu_disable_event; + cpu_pmu->read_counter = xscale2pmu_read_counter; + cpu_pmu->write_counter = xscale2pmu_write_counter; + cpu_pmu->get_event_idx = xscale2pmu_get_event_idx; + cpu_pmu->start = xscale2pmu_start; + cpu_pmu->stop = xscale2pmu_stop; + cpu_pmu->map_event = xscale_map_event; + cpu_pmu->num_events = 5; + cpu_pmu->max_period = (1LLU << 32) - 1; + + return 0; } #else -static struct arm_pmu *__devinit xscale1pmu_init(void) +static inline int xscale1pmu_init(struct arm_pmu *cpu_pmu) { - return NULL; + return -ENODEV; } -static struct arm_pmu *__devinit xscale2pmu_init(void) +static inline int xscale2pmu_init(struct arm_pmu *cpu_pmu) { - return NULL; + return -ENODEV; } #endif /* CONFIG_CPU_XSCALE */ diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 90084a6de35a..44bc0b327e2b 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -34,6 +34,7 @@ #include <linux/leds.h> #include <asm/cacheflush.h> +#include <asm/idmap.h> #include <asm/processor.h> #include <asm/thread_notify.h> #include <asm/stacktrace.h> @@ -56,8 +57,6 @@ static const char *isa_modes[] = { "ARM" , "Thumb" , "Jazelle", "ThumbEE" }; -extern void setup_mm_for_reboot(void); - static volatile int hlt_counter; void disable_hlt(void) @@ -70,6 +69,7 @@ EXPORT_SYMBOL(disable_hlt); void enable_hlt(void) { hlt_counter--; + BUG_ON(hlt_counter < 0); } EXPORT_SYMBOL(enable_hlt); diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 739db3a1b2d2..03deeffd9f6d 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -916,16 +916,11 @@ enum ptrace_syscall_dir { PTRACE_SYSCALL_EXIT, }; -static int ptrace_syscall_trace(struct pt_regs *regs, int scno, - enum ptrace_syscall_dir dir) +static int tracehook_report_syscall(struct pt_regs *regs, + enum ptrace_syscall_dir dir) { unsigned long ip; - current_thread_info()->syscall = scno; - - if (!test_thread_flag(TIF_SYSCALL_TRACE)) - return scno; - /* * IP is used to denote syscall entry/exit: * IP = 0 -> entry, =1 -> exit @@ -944,19 +939,41 @@ static int ptrace_syscall_trace(struct pt_regs *regs, int scno, asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno) { - scno = ptrace_syscall_trace(regs, scno, PTRACE_SYSCALL_ENTER); + current_thread_info()->syscall = scno; + + /* Do the secure computing check first; failures should be fast. */ + if (secure_computing(scno) == -1) + return -1; + + if (test_thread_flag(TIF_SYSCALL_TRACE)) + scno = tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); + if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) trace_sys_enter(regs, scno); + audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0, regs->ARM_r1, regs->ARM_r2, regs->ARM_r3); + return scno; } -asmlinkage int syscall_trace_exit(struct pt_regs *regs, int scno) +asmlinkage void syscall_trace_exit(struct pt_regs *regs) { - scno = ptrace_syscall_trace(regs, scno, PTRACE_SYSCALL_EXIT); - if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) - trace_sys_exit(regs, scno); + /* + * Audit the syscall before anything else, as a debugger may + * come in and change the current registers. + */ audit_syscall_exit(regs); - return scno; + + /* + * Note that we haven't updated the ->syscall field for the + * current thread. This isn't a problem because it will have + * been set on syscall entry and there hasn't been an opportunity + * for a PTRACE_SET_SYSCALL since then. + */ + if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) + trace_sys_exit(regs, regs_return_value(regs)); + + if (test_thread_flag(TIF_SYSCALL_TRACE)) + tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT); } diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index da1d1aa20ad9..9a89bf4aefe1 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -383,6 +383,12 @@ void cpu_init(void) BUG(); } + /* + * This only works on resume and secondary cores. For booting on the + * boot cpu, smp_prepare_boot_cpu is called after percpu area setup. + */ + set_my_cpu_offset(per_cpu_offset(cpu)); + cpu_proc_init(); /* @@ -426,13 +432,14 @@ int __cpu_logical_map[NR_CPUS]; void __init smp_setup_processor_id(void) { int i; - u32 cpu = is_smp() ? read_cpuid_mpidr() & 0xff : 0; + u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0; + u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); cpu_logical_map(0) = cpu; - for (i = 1; i < NR_CPUS; ++i) + for (i = 1; i < nr_cpu_ids; ++i) cpu_logical_map(i) = i == cpu ? 0 : i; - printk(KERN_INFO "Booting Linux on physical CPU %d\n", cpu); + printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr); } static void __init setup_processor(void) @@ -758,6 +765,7 @@ void __init setup_arch(char **cmdline_p) unflatten_device_tree(); + arm_dt_init_cpu_maps(); #ifdef CONFIG_SMP if (is_smp()) { smp_set_ops(mdesc->smp); @@ -841,12 +849,9 @@ static const char *hwcap_str[] = { static int c_show(struct seq_file *m, void *v) { - int i; + int i, j; + u32 cpuid; - seq_printf(m, "Processor\t: %s rev %d (%s)\n", - cpu_name, read_cpuid_id() & 15, elf_platform); - -#if defined(CONFIG_SMP) for_each_online_cpu(i) { /* * glibc reads /proc/cpuinfo to determine the number of @@ -854,45 +859,48 @@ static int c_show(struct seq_file *m, void *v) * "processor". Give glibc what it expects. */ seq_printf(m, "processor\t: %d\n", i); - seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n", + cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id(); + seq_printf(m, "model name\t: %s rev %d (%s)\n", + cpu_name, cpuid & 15, elf_platform); + +#if defined(CONFIG_SMP) + seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ), (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100); - } -#else /* CONFIG_SMP */ - seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", - loops_per_jiffy / (500000/HZ), - (loops_per_jiffy / (5000/HZ)) % 100); +#else + seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", + loops_per_jiffy / (500000/HZ), + (loops_per_jiffy / (5000/HZ)) % 100); #endif + /* dump out the processor features */ + seq_puts(m, "Features\t: "); - /* dump out the processor features */ - seq_puts(m, "Features\t: "); - - for (i = 0; hwcap_str[i]; i++) - if (elf_hwcap & (1 << i)) - seq_printf(m, "%s ", hwcap_str[i]); + for (j = 0; hwcap_str[j]; j++) + if (elf_hwcap & (1 << j)) + seq_printf(m, "%s ", hwcap_str[j]); - seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24); - seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]); + seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24); + seq_printf(m, "CPU architecture: %s\n", + proc_arch[cpu_architecture()]); - if ((read_cpuid_id() & 0x0008f000) == 0x00000000) { - /* pre-ARM7 */ - seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4); - } else { - if ((read_cpuid_id() & 0x0008f000) == 0x00007000) { - /* ARM7 */ - seq_printf(m, "CPU variant\t: 0x%02x\n", - (read_cpuid_id() >> 16) & 127); + if ((cpuid & 0x0008f000) == 0x00000000) { + /* pre-ARM7 */ + seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4); } else { - /* post-ARM7 */ - seq_printf(m, "CPU variant\t: 0x%x\n", - (read_cpuid_id() >> 20) & 15); + if ((cpuid & 0x0008f000) == 0x00007000) { + /* ARM7 */ + seq_printf(m, "CPU variant\t: 0x%02x\n", + (cpuid >> 16) & 127); + } else { + /* post-ARM7 */ + seq_printf(m, "CPU variant\t: 0x%x\n", + (cpuid >> 20) & 15); + } + seq_printf(m, "CPU part\t: 0x%03x\n", + (cpuid >> 4) & 0xfff); } - seq_printf(m, "CPU part\t: 0x%03x\n", - (read_cpuid_id() >> 4) & 0xfff); + seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15); } - seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15); - - seq_puts(m, "\n"); seq_printf(m, "Hardware\t: %s\n", machine_name); seq_printf(m, "Revision\t: %04x\n", system_rev); diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index fbc8b2623d82..84f4cbf652e5 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -281,6 +281,7 @@ static void __cpuinit smp_store_cpu_info(unsigned int cpuid) struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); cpu_info->loops_per_jiffy = loops_per_jiffy; + cpu_info->cpuid = read_cpuid_id(); store_cpu_topology(cpuid); } @@ -313,9 +314,10 @@ asmlinkage void __cpuinit secondary_start_kernel(void) current->active_mm = mm; cpumask_set_cpu(cpu, mm_cpumask(mm)); + cpu_init(); + printk("CPU%u: Booted secondary processor\n", cpu); - cpu_init(); preempt_disable(); trace_hardirqs_off(); @@ -371,6 +373,7 @@ void __init smp_cpus_done(unsigned int max_cpus) void __init smp_prepare_boot_cpu(void) { + set_my_cpu_offset(per_cpu_offset(smp_processor_id())); } void __init smp_prepare_cpus(unsigned int max_cpus) @@ -421,6 +424,11 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask) smp_cross_call(mask, IPI_CALL_FUNC); } +void arch_send_wakeup_ipi_mask(const struct cpumask *mask) +{ + smp_cross_call(mask, IPI_WAKEUP); +} + void arch_send_call_function_single_ipi(int cpu) { smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); @@ -443,7 +451,7 @@ void show_ipi_list(struct seq_file *p, int prec) for (i = 0; i < NR_IPI; i++) { seq_printf(p, "%*s%u: ", prec - 1, "IPI", i); - for_each_present_cpu(cpu) + for_each_online_cpu(cpu) seq_printf(p, "%10u ", __get_irq_stat(cpu, ipi_irqs[i])); diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index b22d700fea27..ff07879ad95d 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -31,6 +31,8 @@ static void __iomem *twd_base; static struct clk *twd_clk; static unsigned long twd_timer_rate; +static bool common_setup_called; +static DEFINE_PER_CPU(bool, percpu_setup_called); static struct clock_event_device __percpu **twd_evt; static int twd_ppi; @@ -248,17 +250,9 @@ static struct clk *twd_get_clock(void) return clk; } - err = clk_prepare(clk); + err = clk_prepare_enable(clk); if (err) { - pr_err("smp_twd: clock failed to prepare: %d\n", err); - clk_put(clk); - return ERR_PTR(err); - } - - err = clk_enable(clk); - if (err) { - pr_err("smp_twd: clock failed to enable: %d\n", err); - clk_unprepare(clk); + pr_err("smp_twd: clock failed to prepare+enable: %d\n", err); clk_put(clk); return ERR_PTR(err); } @@ -272,15 +266,45 @@ static struct clk *twd_get_clock(void) static int __cpuinit twd_timer_setup(struct clock_event_device *clk) { struct clock_event_device **this_cpu_clk; + int cpu = smp_processor_id(); + + /* + * If the basic setup for this CPU has been done before don't + * bother with the below. + */ + if (per_cpu(percpu_setup_called, cpu)) { + __raw_writel(0, twd_base + TWD_TIMER_CONTROL); + clockevents_register_device(*__this_cpu_ptr(twd_evt)); + enable_percpu_irq(clk->irq, 0); + return 0; + } + per_cpu(percpu_setup_called, cpu) = true; - if (!twd_clk) + /* + * This stuff only need to be done once for the entire TWD cluster + * during the runtime of the system. + */ + if (!common_setup_called) { twd_clk = twd_get_clock(); - if (!IS_ERR_OR_NULL(twd_clk)) - twd_timer_rate = clk_get_rate(twd_clk); - else - twd_calibrate_rate(); + /* + * We use IS_ERR_OR_NULL() here, because if the clock stubs + * are active we will get a valid clk reference which is + * however NULL and will return the rate 0. In that case we + * need to calibrate the rate instead. + */ + if (!IS_ERR_OR_NULL(twd_clk)) + twd_timer_rate = clk_get_rate(twd_clk); + else + twd_calibrate_rate(); + + common_setup_called = true; + } + /* + * The following is done once per CPU the first time .setup() is + * called. + */ __raw_writel(0, twd_base + TWD_TIMER_CONTROL); clk->name = "local_timer"; diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 26c12c6440fc..79282ebcd939 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -196,32 +196,7 @@ static inline void parse_dt_topology(void) {} static inline void update_cpu_power(unsigned int cpuid, unsigned int mpidr) {} #endif - -/* - * cpu topology management - */ - -#define MPIDR_SMP_BITMASK (0x3 << 30) -#define MPIDR_SMP_VALUE (0x2 << 30) - -#define MPIDR_MT_BITMASK (0x1 << 24) - -/* - * These masks reflect the current use of the affinity levels. - * The affinity level can be up to 16 bits according to ARM ARM - */ -#define MPIDR_HWID_BITMASK 0xFFFFFF - -#define MPIDR_LEVEL0_MASK 0x3 -#define MPIDR_LEVEL0_SHIFT 0 - -#define MPIDR_LEVEL1_MASK 0xF -#define MPIDR_LEVEL1_SHIFT 8 - -#define MPIDR_LEVEL2_MASK 0xFF -#define MPIDR_LEVEL2_SHIFT 16 - -/* + /* * cpu topology table */ struct cputopo_arm cpu_topology[NR_CPUS]; @@ -282,19 +257,14 @@ void store_cpu_topology(unsigned int cpuid) if (mpidr & MPIDR_MT_BITMASK) { /* core performance interdependency */ - cpuid_topo->thread_id = (mpidr >> MPIDR_LEVEL0_SHIFT) - & MPIDR_LEVEL0_MASK; - cpuid_topo->core_id = (mpidr >> MPIDR_LEVEL1_SHIFT) - & MPIDR_LEVEL1_MASK; - cpuid_topo->socket_id = (mpidr >> MPIDR_LEVEL2_SHIFT) - & MPIDR_LEVEL2_MASK; + cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); + cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); + cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); } else { /* largely independent cores */ cpuid_topo->thread_id = -1; - cpuid_topo->core_id = (mpidr >> MPIDR_LEVEL0_SHIFT) - & MPIDR_LEVEL0_MASK; - cpuid_topo->socket_id = (mpidr >> MPIDR_LEVEL1_SHIFT) - & MPIDR_LEVEL1_MASK; + cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); + cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); } } else { /* diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 36ff15bbfdd4..b9f38e388b43 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -114,6 +114,15 @@ SECTIONS RO_DATA(PAGE_SIZE) + . = ALIGN(4); + __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { + __start___ex_table = .; +#ifdef CONFIG_MMU + *(__ex_table) +#endif + __stop___ex_table = .; + } + #ifdef CONFIG_ARM_UNWIND /* * Stack unwinding tables @@ -220,16 +229,6 @@ SECTIONS READ_MOSTLY_DATA(L1_CACHE_BYTES) /* - * The exception fixup table (might need resorting at runtime) - */ - . = ALIGN(4); - __start___ex_table = .; -#ifdef CONFIG_MMU - *(__ex_table) -#endif - __stop___ex_table = .; - - /* * and the usual data section */ DATA_DATA |