diff options
Diffstat (limited to 'arch')
48 files changed, 460 insertions, 1358 deletions
diff --git a/arch/arc/include/asm/kprobes.h b/arch/arc/include/asm/kprobes.h index 2e52d18e6bc7..2c1b479d5aea 100644 --- a/arch/arc/include/asm/kprobes.h +++ b/arch/arc/include/asm/kprobes.h @@ -45,8 +45,6 @@ struct prev_kprobe { struct kprobe_ctlblk { unsigned int kprobe_status; - struct pt_regs jprobe_saved_regs; - char jprobes_stack[MAX_STACK_SIZE]; struct prev_kprobe prev_kprobe; }; diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c index 42b05046fad9..df35d4c0b0b8 100644 --- a/arch/arc/kernel/kprobes.c +++ b/arch/arc/kernel/kprobes.c @@ -225,24 +225,18 @@ int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs) /* If we have no pre-handler or it returned 0, we continue with * normal processing. If we have a pre-handler and it returned - * non-zero - which is expected from setjmp_pre_handler for - * jprobe, we return without single stepping and leave that to - * the break-handler which is invoked by a kprobe from - * jprobe_return + * non-zero - which means user handler setup registers to exit + * to another instruction, we must skip the single stepping. */ if (!p->pre_handler || !p->pre_handler(p, regs)) { setup_singlestep(p, regs); kcb->kprobe_status = KPROBE_HIT_SS; + } else { + reset_current_kprobe(); + preempt_enable_no_resched(); } return 1; - } else if (kprobe_running()) { - p = __this_cpu_read(current_kprobe); - if (p->break_handler && p->break_handler(p, regs)) { - setup_singlestep(p, regs); - kcb->kprobe_status = KPROBE_HIT_SS; - return 1; - } } /* no_kprobe: */ @@ -386,38 +380,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, return ret; } -int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct jprobe *jp = container_of(p, struct jprobe, kp); - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - unsigned long sp_addr = regs->sp; - - kcb->jprobe_saved_regs = *regs; - memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr)); - regs->ret = (unsigned long)(jp->entry); - - return 1; -} - -void __kprobes jprobe_return(void) -{ - __asm__ __volatile__("unimp_s"); - return; -} - -int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - unsigned long sp_addr; - - *regs = kcb->jprobe_saved_regs; - sp_addr = regs->sp; - memcpy((void *)sp_addr, kcb->jprobes_stack, MIN_STACK_SIZE(sp_addr)); - preempt_enable_no_resched(); - - return 1; -} - static void __used kretprobe_trampoline_holder(void) { __asm__ __volatile__(".global kretprobe_trampoline\n" @@ -483,9 +445,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, kretprobe_assert(ri, orig_ret_address, trampoline_address); regs->ret = orig_ret_address; - reset_current_kprobe(); kretprobe_hash_unlock(current, &flags); - preempt_enable_no_resched(); hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { hlist_del(&ri->hlist); diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h index e46e4e7bdba3..ac54c06764e6 100644 --- a/arch/arm/include/asm/hw_breakpoint.h +++ b/arch/arm/include/asm/hw_breakpoint.h @@ -111,14 +111,17 @@ static inline void decode_ctrl_reg(u32 reg, asm volatile("mcr p14, 0, %0, " #N "," #M ", " #OP2 : : "r" (VAL));\ } while (0) +struct perf_event_attr; struct notifier_block; struct perf_event; struct pmu; extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, int *gen_len, int *gen_type); -extern int arch_check_bp_in_kernelspace(struct perf_event *bp); -extern int arch_validate_hwbkpt_settings(struct perf_event *bp); +extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw); +extern int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw); extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data); diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h index 59655459da59..82290f212d8e 100644 --- a/arch/arm/include/asm/kprobes.h +++ b/arch/arm/include/asm/kprobes.h @@ -44,8 +44,6 @@ struct prev_kprobe { struct kprobe_ctlblk { unsigned int kprobe_status; struct prev_kprobe prev_kprobe; - struct pt_regs jprobe_saved_regs; - char jprobes_stack[MAX_STACK_SIZE]; }; void arch_remove_kprobe(struct kprobe *); diff --git a/arch/arm/include/asm/probes.h b/arch/arm/include/asm/probes.h index 1e5b9bb92270..991c9127c650 100644 --- a/arch/arm/include/asm/probes.h +++ b/arch/arm/include/asm/probes.h @@ -51,7 +51,6 @@ struct arch_probes_insn { * We assume one instruction can consume at most 64 bytes stack, which is * 'push {r0-r15}'. Instructions consume more or unknown stack space like * 'str r0, [sp, #-80]' and 'str r0, [sp, r1]' should be prohibit to probe. - * Both kprobe and jprobe use this macro. */ #define MAX_STACK_SIZE 64 diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 629e25152c0d..1d5fbf1d1c67 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -456,14 +456,13 @@ static int get_hbp_len(u8 hbp_len) /* * Check whether bp virtual address is in kernel space. */ -int arch_check_bp_in_kernelspace(struct perf_event *bp) +int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw) { unsigned int len; unsigned long va; - struct arch_hw_breakpoint *info = counter_arch_bp(bp); - va = info->address; - len = get_hbp_len(info->ctrl.len); + va = hw->address; + len = get_hbp_len(hw->ctrl.len); return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); } @@ -518,42 +517,42 @@ int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, /* * Construct an arch_hw_breakpoint from a perf_event. */ -static int arch_build_bp_info(struct perf_event *bp) +static int arch_build_bp_info(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw) { - struct arch_hw_breakpoint *info = counter_arch_bp(bp); - /* Type */ - switch (bp->attr.bp_type) { + switch (attr->bp_type) { case HW_BREAKPOINT_X: - info->ctrl.type = ARM_BREAKPOINT_EXECUTE; + hw->ctrl.type = ARM_BREAKPOINT_EXECUTE; break; case HW_BREAKPOINT_R: - info->ctrl.type = ARM_BREAKPOINT_LOAD; + hw->ctrl.type = ARM_BREAKPOINT_LOAD; break; case HW_BREAKPOINT_W: - info->ctrl.type = ARM_BREAKPOINT_STORE; + hw->ctrl.type = ARM_BREAKPOINT_STORE; break; case HW_BREAKPOINT_RW: - info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE; + hw->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE; break; default: return -EINVAL; } /* Len */ - switch (bp->attr.bp_len) { + switch (attr->bp_len) { case HW_BREAKPOINT_LEN_1: - info->ctrl.len = ARM_BREAKPOINT_LEN_1; + hw->ctrl.len = ARM_BREAKPOINT_LEN_1; break; case HW_BREAKPOINT_LEN_2: - info->ctrl.len = ARM_BREAKPOINT_LEN_2; + hw->ctrl.len = ARM_BREAKPOINT_LEN_2; break; case HW_BREAKPOINT_LEN_4: - info->ctrl.len = ARM_BREAKPOINT_LEN_4; + hw->ctrl.len = ARM_BREAKPOINT_LEN_4; break; case HW_BREAKPOINT_LEN_8: - info->ctrl.len = ARM_BREAKPOINT_LEN_8; - if ((info->ctrl.type != ARM_BREAKPOINT_EXECUTE) + hw->ctrl.len = ARM_BREAKPOINT_LEN_8; + if ((hw->ctrl.type != ARM_BREAKPOINT_EXECUTE) && max_watchpoint_len >= 8) break; default: @@ -566,24 +565,24 @@ static int arch_build_bp_info(struct perf_event *bp) * by the hardware and must be aligned to the appropriate number of * bytes. */ - if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE && - info->ctrl.len != ARM_BREAKPOINT_LEN_2 && - info->ctrl.len != ARM_BREAKPOINT_LEN_4) + if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE && + hw->ctrl.len != ARM_BREAKPOINT_LEN_2 && + hw->ctrl.len != ARM_BREAKPOINT_LEN_4) return -EINVAL; /* Address */ - info->address = bp->attr.bp_addr; + hw->address = attr->bp_addr; /* Privilege */ - info->ctrl.privilege = ARM_BREAKPOINT_USER; - if (arch_check_bp_in_kernelspace(bp)) - info->ctrl.privilege |= ARM_BREAKPOINT_PRIV; + hw->ctrl.privilege = ARM_BREAKPOINT_USER; + if (arch_check_bp_in_kernelspace(hw)) + hw->ctrl.privilege |= ARM_BREAKPOINT_PRIV; /* Enabled? */ - info->ctrl.enabled = !bp->attr.disabled; + hw->ctrl.enabled = !attr->disabled; /* Mismatch */ - info->ctrl.mismatch = 0; + hw->ctrl.mismatch = 0; return 0; } @@ -591,9 +590,10 @@ static int arch_build_bp_info(struct perf_event *bp) /* * Validate the arch-specific HW Breakpoint register settings. */ -int arch_validate_hwbkpt_settings(struct perf_event *bp) +int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw) { - struct arch_hw_breakpoint *info = counter_arch_bp(bp); int ret = 0; u32 offset, alignment_mask = 0x3; @@ -602,14 +602,14 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) return -ENODEV; /* Build the arch_hw_breakpoint. */ - ret = arch_build_bp_info(bp); + ret = arch_build_bp_info(bp, attr, hw); if (ret) goto out; /* Check address alignment. */ - if (info->ctrl.len == ARM_BREAKPOINT_LEN_8) + if (hw->ctrl.len == ARM_BREAKPOINT_LEN_8) alignment_mask = 0x7; - offset = info->address & alignment_mask; + offset = hw->address & alignment_mask; switch (offset) { case 0: /* Aligned */ @@ -617,19 +617,19 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) case 1: case 2: /* Allow halfword watchpoints and breakpoints. */ - if (info->ctrl.len == ARM_BREAKPOINT_LEN_2) + if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2) break; case 3: /* Allow single byte watchpoint. */ - if (info->ctrl.len == ARM_BREAKPOINT_LEN_1) + if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1) break; default: ret = -EINVAL; goto out; } - info->address &= ~alignment_mask; - info->ctrl.len <<= offset; + hw->address &= ~alignment_mask; + hw->ctrl.len <<= offset; if (is_default_overflow_handler(bp)) { /* @@ -640,7 +640,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) return -EINVAL; /* We don't allow mismatch breakpoints in kernel space. */ - if (arch_check_bp_in_kernelspace(bp)) + if (arch_check_bp_in_kernelspace(hw)) return -EPERM; /* @@ -655,8 +655,8 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) * reports them. */ if (!debug_exception_updates_fsr() && - (info->ctrl.type == ARM_BREAKPOINT_LOAD || - info->ctrl.type == ARM_BREAKPOINT_STORE)) + (hw->ctrl.type == ARM_BREAKPOINT_LOAD || + hw->ctrl.type == ARM_BREAKPOINT_STORE)) return -EINVAL; } diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c index e90cc8a08186..f8bd523d64d1 100644 --- a/arch/arm/probes/kprobes/core.c +++ b/arch/arm/probes/kprobes/core.c @@ -47,9 +47,6 @@ (unsigned long)(addr) + \ (size)) -/* Used as a marker in ARM_pc to note when we're in a jprobe. */ -#define JPROBE_MAGIC_ADDR 0xffffffff - DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); @@ -289,8 +286,8 @@ void __kprobes kprobe_handler(struct pt_regs *regs) break; case KPROBE_REENTER: /* A nested probe was hit in FIQ, it is a BUG */ - pr_warn("Unrecoverable kprobe detected at %p.\n", - p->addr); + pr_warn("Unrecoverable kprobe detected.\n"); + dump_kprobe(p); /* fall through */ default: /* impossible cases */ @@ -303,10 +300,10 @@ void __kprobes kprobe_handler(struct pt_regs *regs) /* * If we have no pre-handler or it returned 0, we - * continue with normal processing. If we have a - * pre-handler and it returned non-zero, it prepped - * for calling the break_handler below on re-entry, - * so get out doing nothing more here. + * continue with normal processing. If we have a + * pre-handler and it returned non-zero, it will + * modify the execution path and no need to single + * stepping. Let's just reset current kprobe and exit. */ if (!p->pre_handler || !p->pre_handler(p, regs)) { kcb->kprobe_status = KPROBE_HIT_SS; @@ -315,20 +312,9 @@ void __kprobes kprobe_handler(struct pt_regs *regs) kcb->kprobe_status = KPROBE_HIT_SSDONE; p->post_handler(p, regs, 0); } - reset_current_kprobe(); - } - } - } else if (cur) { - /* We probably hit a jprobe. Call its break handler. */ - if (cur->break_handler && cur->break_handler(cur, regs)) { - kcb->kprobe_status = KPROBE_HIT_SS; - singlestep(cur, regs, kcb); - if (cur->post_handler) { - kcb->kprobe_status = KPROBE_HIT_SSDONE; - cur->post_handler(cur, regs, 0); } + reset_current_kprobe(); } - reset_current_kprobe(); } else { /* * The probe was removed and a race is in progress. @@ -521,117 +507,6 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, regs->ARM_lr = (unsigned long)&kretprobe_trampoline; } -int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct jprobe *jp = container_of(p, struct jprobe, kp); - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - long sp_addr = regs->ARM_sp; - long cpsr; - - kcb->jprobe_saved_regs = *regs; - memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr)); - regs->ARM_pc = (long)jp->entry; - - cpsr = regs->ARM_cpsr | PSR_I_BIT; -#ifdef CONFIG_THUMB2_KERNEL - /* Set correct Thumb state in cpsr */ - if (regs->ARM_pc & 1) - cpsr |= PSR_T_BIT; - else - cpsr &= ~PSR_T_BIT; -#endif - regs->ARM_cpsr = cpsr; - - preempt_disable(); - return 1; -} - -void __kprobes jprobe_return(void) -{ - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - __asm__ __volatile__ ( - /* - * Setup an empty pt_regs. Fill SP and PC fields as - * they're needed by longjmp_break_handler. - * - * We allocate some slack between the original SP and start of - * our fabricated regs. To be precise we want to have worst case - * covered which is STMFD with all 16 regs so we allocate 2 * - * sizeof(struct_pt_regs)). - * - * This is to prevent any simulated instruction from writing - * over the regs when they are accessing the stack. - */ -#ifdef CONFIG_THUMB2_KERNEL - "sub r0, %0, %1 \n\t" - "mov sp, r0 \n\t" -#else - "sub sp, %0, %1 \n\t" -#endif - "ldr r0, ="__stringify(JPROBE_MAGIC_ADDR)"\n\t" - "str %0, [sp, %2] \n\t" - "str r0, [sp, %3] \n\t" - "mov r0, sp \n\t" - "bl kprobe_handler \n\t" - - /* - * Return to the context saved by setjmp_pre_handler - * and restored by longjmp_break_handler. - */ -#ifdef CONFIG_THUMB2_KERNEL - "ldr lr, [sp, %2] \n\t" /* lr = saved sp */ - "ldrd r0, r1, [sp, %5] \n\t" /* r0,r1 = saved lr,pc */ - "ldr r2, [sp, %4] \n\t" /* r2 = saved psr */ - "stmdb lr!, {r0, r1, r2} \n\t" /* push saved lr and */ - /* rfe context */ - "ldmia sp, {r0 - r12} \n\t" - "mov sp, lr \n\t" - "ldr lr, [sp], #4 \n\t" - "rfeia sp! \n\t" -#else - "ldr r0, [sp, %4] \n\t" - "msr cpsr_cxsf, r0 \n\t" - "ldmia sp, {r0 - pc} \n\t" -#endif - : - : "r" (kcb->jprobe_saved_regs.ARM_sp), - "I" (sizeof(struct pt_regs) * 2), - "J" (offsetof(struct pt_regs, ARM_sp)), - "J" (offsetof(struct pt_regs, ARM_pc)), - "J" (offsetof(struct pt_regs, ARM_cpsr)), - "J" (offsetof(struct pt_regs, ARM_lr)) - : "memory", "cc"); -} - -int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - long stack_addr = kcb->jprobe_saved_regs.ARM_sp; - long orig_sp = regs->ARM_sp; - struct jprobe *jp = container_of(p, struct jprobe, kp); - - if (regs->ARM_pc == JPROBE_MAGIC_ADDR) { - if (orig_sp != stack_addr) { - struct pt_regs *saved_regs = - (struct pt_regs *)kcb->jprobe_saved_regs.ARM_sp; - printk("current sp %lx does not match saved sp %lx\n", - orig_sp, stack_addr); - printk("Saved registers for jprobe %p\n", jp); - show_regs(saved_regs); - printk("Current registers\n"); - show_regs(regs); - BUG(); - } - *regs = kcb->jprobe_saved_regs; - memcpy((void *)stack_addr, kcb->jprobes_stack, - MIN_STACK_SIZE(stack_addr)); - preempt_enable_no_resched(); - return 1; - } - return 0; -} - int __kprobes arch_trampoline_kprobe(struct kprobe *p) { return 0; diff --git a/arch/arm/probes/kprobes/test-core.c b/arch/arm/probes/kprobes/test-core.c index 14db14152909..cc237fa9b90f 100644 --- a/arch/arm/probes/kprobes/test-core.c +++ b/arch/arm/probes/kprobes/test-core.c @@ -1461,7 +1461,6 @@ fail: print_registers(&result_regs); if (mem) { - pr_err("current_stack=%p\n", current_stack); pr_err("expected_memory:\n"); print_memory(expected_memory, mem_size); pr_err("result_memory:\n"); diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h index 41770766d964..6a53e59ced95 100644 --- a/arch/arm64/include/asm/hw_breakpoint.h +++ b/arch/arm64/include/asm/hw_breakpoint.h @@ -119,13 +119,16 @@ static inline void decode_ctrl_reg(u32 reg, struct task_struct; struct notifier_block; +struct perf_event_attr; struct perf_event; struct pmu; extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, int *gen_len, int *gen_type, int *offset); -extern int arch_check_bp_in_kernelspace(struct perf_event *bp); -extern int arch_validate_hwbkpt_settings(struct perf_event *bp); +extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw); +extern int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw); extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data); diff --git a/arch/arm64/include/asm/kprobes.h b/arch/arm64/include/asm/kprobes.h index 6deb8d726041..d5a44cf859e9 100644 --- a/arch/arm64/include/asm/kprobes.h +++ b/arch/arm64/include/asm/kprobes.h @@ -48,7 +48,6 @@ struct kprobe_ctlblk { unsigned long saved_irqflag; struct prev_kprobe prev_kprobe; struct kprobe_step_ctx ss_ctx; - struct pt_regs jprobe_saved_regs; }; void arch_remove_kprobe(struct kprobe *); diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index 413dbe530da8..8c9644376326 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -343,14 +343,13 @@ static int get_hbp_len(u8 hbp_len) /* * Check whether bp virtual address is in kernel space. */ -int arch_check_bp_in_kernelspace(struct perf_event *bp) +int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw) { unsigned int len; unsigned long va; - struct arch_hw_breakpoint *info = counter_arch_bp(bp); - va = info->address; - len = get_hbp_len(info->ctrl.len); + va = hw->address; + len = get_hbp_len(hw->ctrl.len); return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); } @@ -421,53 +420,53 @@ int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, /* * Construct an arch_hw_breakpoint from a perf_event. */ -static int arch_build_bp_info(struct perf_event *bp) +static int arch_build_bp_info(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw) { - struct arch_hw_breakpoint *info = counter_arch_bp(bp); - /* Type */ - switch (bp->attr.bp_type) { + switch (attr->bp_type) { case HW_BREAKPOINT_X: - info->ctrl.type = ARM_BREAKPOINT_EXECUTE; + hw->ctrl.type = ARM_BREAKPOINT_EXECUTE; break; case HW_BREAKPOINT_R: - info->ctrl.type = ARM_BREAKPOINT_LOAD; + hw->ctrl.type = ARM_BREAKPOINT_LOAD; break; case HW_BREAKPOINT_W: - info->ctrl.type = ARM_BREAKPOINT_STORE; + hw->ctrl.type = ARM_BREAKPOINT_STORE; break; case HW_BREAKPOINT_RW: - info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE; + hw->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE; break; default: return -EINVAL; } /* Len */ - switch (bp->attr.bp_len) { + switch (attr->bp_len) { case HW_BREAKPOINT_LEN_1: - info->ctrl.len = ARM_BREAKPOINT_LEN_1; + hw->ctrl.len = ARM_BREAKPOINT_LEN_1; break; case HW_BREAKPOINT_LEN_2: - info->ctrl.len = ARM_BREAKPOINT_LEN_2; + hw->ctrl.len = ARM_BREAKPOINT_LEN_2; break; case HW_BREAKPOINT_LEN_3: - info->ctrl.len = ARM_BREAKPOINT_LEN_3; + hw->ctrl.len = ARM_BREAKPOINT_LEN_3; break; case HW_BREAKPOINT_LEN_4: - info->ctrl.len = ARM_BREAKPOINT_LEN_4; + hw->ctrl.len = ARM_BREAKPOINT_LEN_4; break; case HW_BREAKPOINT_LEN_5: - info->ctrl.len = ARM_BREAKPOINT_LEN_5; + hw->ctrl.len = ARM_BREAKPOINT_LEN_5; break; case HW_BREAKPOINT_LEN_6: - info->ctrl.len = ARM_BREAKPOINT_LEN_6; + hw->ctrl.len = ARM_BREAKPOINT_LEN_6; break; case HW_BREAKPOINT_LEN_7: - info->ctrl.len = ARM_BREAKPOINT_LEN_7; + hw->ctrl.len = ARM_BREAKPOINT_LEN_7; break; case HW_BREAKPOINT_LEN_8: - info->ctrl.len = ARM_BREAKPOINT_LEN_8; + hw->ctrl.len = ARM_BREAKPOINT_LEN_8; break; default: return -EINVAL; @@ -478,37 +477,37 @@ static int arch_build_bp_info(struct perf_event *bp) * AArch32 also requires breakpoints of length 2 for Thumb. * Watchpoints can be of length 1, 2, 4 or 8 bytes. */ - if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { + if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE) { if (is_compat_bp(bp)) { - if (info->ctrl.len != ARM_BREAKPOINT_LEN_2 && - info->ctrl.len != ARM_BREAKPOINT_LEN_4) + if (hw->ctrl.len != ARM_BREAKPOINT_LEN_2 && + hw->ctrl.len != ARM_BREAKPOINT_LEN_4) return -EINVAL; - } else if (info->ctrl.len != ARM_BREAKPOINT_LEN_4) { + } else if (hw->ctrl.len != ARM_BREAKPOINT_LEN_4) { /* * FIXME: Some tools (I'm looking at you perf) assume * that breakpoints should be sizeof(long). This * is nonsense. For now, we fix up the parameter * but we should probably return -EINVAL instead. */ - info->ctrl.len = ARM_BREAKPOINT_LEN_4; + hw->ctrl.len = ARM_BREAKPOINT_LEN_4; } } /* Address */ - info->address = bp->attr.bp_addr; + hw->address = attr->bp_addr; /* * Privilege * Note that we disallow combined EL0/EL1 breakpoints because * that would complicate the stepping code. */ - if (arch_check_bp_in_kernelspace(bp)) - info->ctrl.privilege = AARCH64_BREAKPOINT_EL1; + if (arch_check_bp_in_kernelspace(hw)) + hw->ctrl.privilege = AARCH64_BREAKPOINT_EL1; else - info->ctrl.privilege = AARCH64_BREAKPOINT_EL0; + hw->ctrl.privilege = AARCH64_BREAKPOINT_EL0; /* Enabled? */ - info->ctrl.enabled = !bp->attr.disabled; + hw->ctrl.enabled = !attr->disabled; return 0; } @@ -516,14 +515,15 @@ static int arch_build_bp_info(struct perf_event *bp) /* * Validate the arch-specific HW Breakpoint register settings. */ -int arch_validate_hwbkpt_settings(struct perf_event *bp) +int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw) { - struct arch_hw_breakpoint *info = counter_arch_bp(bp); int ret; u64 alignment_mask, offset; /* Build the arch_hw_breakpoint. */ - ret = arch_build_bp_info(bp); + ret = arch_build_bp_info(bp, attr, hw); if (ret) return ret; @@ -537,42 +537,42 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) * that here. */ if (is_compat_bp(bp)) { - if (info->ctrl.len == ARM_BREAKPOINT_LEN_8) + if (hw->ctrl.len == ARM_BREAKPOINT_LEN_8) alignment_mask = 0x7; else alignment_mask = 0x3; - offset = info->address & alignment_mask; + offset = hw->address & alignment_mask; switch (offset) { case 0: /* Aligned */ break; case 1: /* Allow single byte watchpoint. */ - if (info->ctrl.len == ARM_BREAKPOINT_LEN_1) + if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1) break; case 2: /* Allow halfword watchpoints and breakpoints. */ - if (info->ctrl.len == ARM_BREAKPOINT_LEN_2) + if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2) break; default: return -EINVAL; } } else { - if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) + if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE) alignment_mask = 0x3; else alignment_mask = 0x7; - offset = info->address & alignment_mask; + offset = hw->address & alignment_mask; } - info->address &= ~alignment_mask; - info->ctrl.len <<= offset; + hw->address &= ~alignment_mask; + hw->ctrl.len <<= offset; /* * Disallow per-task kernel breakpoints since these would * complicate the stepping code. */ - if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target) + if (hw->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target) return -EINVAL; return 0; diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index d849d9804011..e78c3ef04d95 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -275,7 +275,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p, break; case KPROBE_HIT_SS: case KPROBE_REENTER: - pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr); + pr_warn("Unrecoverable kprobe detected.\n"); dump_kprobe(p); BUG(); break; @@ -395,9 +395,9 @@ static void __kprobes kprobe_handler(struct pt_regs *regs) /* * If we have no pre-handler or it returned 0, we * continue with normal processing. If we have a - * pre-handler and it returned non-zero, it prepped - * for calling the break_handler below on re-entry, - * so get out doing nothing more here. + * pre-handler and it returned non-zero, it will + * modify the execution path and no need to single + * stepping. Let's just reset current kprobe and exit. * * pre_handler can hit a breakpoint and can step thru * before return, keep PSTATE D-flag enabled until @@ -405,16 +405,8 @@ static void __kprobes kprobe_handler(struct pt_regs *regs) */ if (!p->pre_handler || !p->pre_handler(p, regs)) { setup_singlestep(p, regs, kcb, 0); - return; - } - } - } else if ((le32_to_cpu(*(kprobe_opcode_t *) addr) == - BRK64_OPCODE_KPROBES) && cur_kprobe) { - /* We probably hit a jprobe. Call its break handler. */ - if (cur_kprobe->break_handler && - cur_kprobe->break_handler(cur_kprobe, regs)) { - setup_singlestep(cur_kprobe, regs, kcb, 0); - return; + } else + reset_current_kprobe(); } } /* @@ -465,74 +457,6 @@ kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr) return DBG_HOOK_HANDLED; } -int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct jprobe *jp = container_of(p, struct jprobe, kp); - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - kcb->jprobe_saved_regs = *regs; - /* - * Since we can't be sure where in the stack frame "stacked" - * pass-by-value arguments are stored we just don't try to - * duplicate any of the stack. Do not use jprobes on functions that - * use more than 64 bytes (after padding each to an 8 byte boundary) - * of arguments, or pass individual arguments larger than 16 bytes. - */ - - instruction_pointer_set(regs, (unsigned long) jp->entry); - preempt_disable(); - pause_graph_tracing(); - return 1; -} - -void __kprobes jprobe_return(void) -{ - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - /* - * Jprobe handler return by entering break exception, - * encoded same as kprobe, but with following conditions - * -a special PC to identify it from the other kprobes. - * -restore stack addr to original saved pt_regs - */ - asm volatile(" mov sp, %0 \n" - "jprobe_return_break: brk %1 \n" - : - : "r" (kcb->jprobe_saved_regs.sp), - "I" (BRK64_ESR_KPROBES) - : "memory"); - - unreachable(); -} - -int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - long stack_addr = kcb->jprobe_saved_regs.sp; - long orig_sp = kernel_stack_pointer(regs); - struct jprobe *jp = container_of(p, struct jprobe, kp); - extern const char jprobe_return_break[]; - - if (instruction_pointer(regs) != (u64) jprobe_return_break) - return 0; - - if (orig_sp != stack_addr) { - struct pt_regs *saved_regs = - (struct pt_regs *)kcb->jprobe_saved_regs.sp; - pr_err("current sp %lx does not match saved sp %lx\n", - orig_sp, stack_addr); - pr_err("Saved registers for jprobe %p\n", jp); - __show_regs(saved_regs); - pr_err("Current registers\n"); - __show_regs(regs); - BUG(); - } - unpause_graph_tracing(); - *regs = kcb->jprobe_saved_regs; - preempt_enable_no_resched(); - return 1; -} - bool arch_within_kprobe_blacklist(unsigned long addr) { if ((addr >= (unsigned long)__kprobes_text_start && diff --git a/arch/ia64/include/asm/kprobes.h b/arch/ia64/include/asm/kprobes.h index 0302b3664789..580356a2eea6 100644 --- a/arch/ia64/include/asm/kprobes.h +++ b/arch/ia64/include/asm/kprobes.h @@ -82,8 +82,6 @@ struct prev_kprobe { #define ARCH_PREV_KPROBE_SZ 2 struct kprobe_ctlblk { unsigned long kprobe_status; - struct pt_regs jprobe_saved_regs; - unsigned long jprobes_saved_stacked_regs[MAX_PARAM_RSE_SIZE]; unsigned long *bsp; unsigned long cfm; atomic_t prev_kprobe_index; diff --git a/arch/ia64/include/uapi/asm/break.h b/arch/ia64/include/uapi/asm/break.h index 5d742bcb0018..4ca110f0a94b 100644 --- a/arch/ia64/include/uapi/asm/break.h +++ b/arch/ia64/include/uapi/asm/break.h @@ -14,7 +14,6 @@ */ #define __IA64_BREAK_KDB 0x80100 #define __IA64_BREAK_KPROBE 0x81000 /* .. 0x81fff */ -#define __IA64_BREAK_JPROBE 0x82000 /* * OS-specific break numbers: diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index 498f3da3f225..d0c0ccdd656a 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile @@ -25,7 +25,7 @@ obj-$(CONFIG_NUMA) += numa.o obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o obj-$(CONFIG_IA64_CYCLONE) += cyclone.o obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o -obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o +obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o diff --git a/arch/ia64/kernel/jprobes.S b/arch/ia64/kernel/jprobes.S deleted file mode 100644 index f69389c7be1d..000000000000 --- a/arch/ia64/kernel/jprobes.S +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Jprobe specific operations - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright (C) Intel Corporation, 2005 - * - * 2005-May Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy - * <anil.s.keshavamurthy@intel.com> initial implementation - * - * Jprobes (a.k.a. "jump probes" which is built on-top of kprobes) allow a - * probe to be inserted into the beginning of a function call. The fundamental - * difference between a jprobe and a kprobe is the jprobe handler is executed - * in the same context as the target function, while the kprobe handlers - * are executed in interrupt context. - * - * For jprobes we initially gain control by placing a break point in the - * first instruction of the targeted function. When we catch that specific - * break, we: - * * set the return address to our jprobe_inst_return() function - * * jump to the jprobe handler function - * - * Since we fixed up the return address, the jprobe handler will return to our - * jprobe_inst_return() function, giving us control again. At this point we - * are back in the parents frame marker, so we do yet another call to our - * jprobe_break() function to fix up the frame marker as it would normally - * exist in the target function. - * - * Our jprobe_return function then transfers control back to kprobes.c by - * executing a break instruction using one of our reserved numbers. When we - * catch that break in kprobes.c, we continue like we do for a normal kprobe - * by single stepping the emulated instruction, and then returning execution - * to the correct location. - */ -#include <asm/asmmacro.h> -#include <asm/break.h> - - /* - * void jprobe_break(void) - */ - .section .kprobes.text, "ax" -ENTRY(jprobe_break) - break.m __IA64_BREAK_JPROBE -END(jprobe_break) - - /* - * void jprobe_inst_return(void) - */ -GLOBAL_ENTRY(jprobe_inst_return) - br.call.sptk.many b0=jprobe_break -END(jprobe_inst_return) - -GLOBAL_ENTRY(invalidate_stacked_regs) - movl r16=invalidate_restore_cfm - ;; - mov b6=r16 - ;; - br.ret.sptk.many b6 - ;; -invalidate_restore_cfm: - mov r16=ar.rsc - ;; - mov ar.rsc=r0 - ;; - loadrs - ;; - mov ar.rsc=r16 - ;; - br.cond.sptk.many rp -END(invalidate_stacked_regs) - -GLOBAL_ENTRY(flush_register_stack) - // flush dirty regs to backing store (must be first in insn group) - flushrs - ;; - br.ret.sptk.many rp -END(flush_register_stack) - diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index f5f3a5e6fcd1..aa41bd5cf9b7 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -35,8 +35,6 @@ #include <asm/sections.h> #include <asm/exception.h> -extern void jprobe_inst_return(void); - DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); @@ -480,12 +478,9 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) */ break; } - kretprobe_assert(ri, orig_ret_address, trampoline_address); - reset_current_kprobe(); kretprobe_hash_unlock(current, &flags); - preempt_enable_no_resched(); hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { hlist_del(&ri->hlist); @@ -819,14 +814,6 @@ static int __kprobes pre_kprobes_handler(struct die_args *args) prepare_ss(p, regs); kcb->kprobe_status = KPROBE_REENTER; return 1; - } else if (args->err == __IA64_BREAK_JPROBE) { - /* - * jprobe instrumented function just completed - */ - p = __this_cpu_read(current_kprobe); - if (p->break_handler && p->break_handler(p, regs)) { - goto ss_probe; - } } else if (!is_ia64_break_inst(regs)) { /* The breakpoint instruction was removed by * another cpu right after we hit, no further @@ -861,15 +848,12 @@ static int __kprobes pre_kprobes_handler(struct die_args *args) set_current_kprobe(p, kcb); kcb->kprobe_status = KPROBE_HIT_ACTIVE; - if (p->pre_handler && p->pre_handler(p, regs)) - /* - * Our pre-handler is specifically requesting that we just - * do a return. This is used for both the jprobe pre-handler - * and the kretprobe trampoline - */ + if (p->pre_handler && p->pre_handler(p, regs)) { + reset_current_kprobe(); + preempt_enable_no_resched(); return 1; + } -ss_probe: #if !defined(CONFIG_PREEMPT) if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) { /* Boost up -- we can execute copied instructions directly */ @@ -992,7 +976,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, case DIE_BREAK: /* err is break number from ia64_bad_break() */ if ((args->err >> 12) == (__IA64_BREAK_KPROBE >> 12) - || args->err == __IA64_BREAK_JPROBE || args->err == 0) if (pre_kprobes_handler(args)) ret = NOTIFY_STOP; @@ -1040,74 +1023,6 @@ unsigned long arch_deref_entry_point(void *entry) return ((struct fnptr *)entry)->ip; } -int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct jprobe *jp = container_of(p, struct jprobe, kp); - unsigned long addr = arch_deref_entry_point(jp->entry); - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - struct param_bsp_cfm pa; - int bytes; - - /* - * Callee owns the argument space and could overwrite it, eg - * tail call optimization. So to be absolutely safe - * we save the argument space before transferring the control - * to instrumented jprobe function which runs in - * the process context - */ - pa.ip = regs->cr_iip; - unw_init_running(ia64_get_bsp_cfm, &pa); - bytes = (char *)ia64_rse_skip_regs(pa.bsp, pa.cfm & 0x3f) - - (char *)pa.bsp; - memcpy( kcb->jprobes_saved_stacked_regs, - pa.bsp, - bytes ); - kcb->bsp = pa.bsp; - kcb->cfm = pa.cfm; - - /* save architectural state */ - kcb->jprobe_saved_regs = *regs; - - /* after rfi, execute the jprobe instrumented function */ - regs->cr_iip = addr & ~0xFULL; - ia64_psr(regs)->ri = addr & 0xf; - regs->r1 = ((struct fnptr *)(jp->entry))->gp; - - /* - * fix the return address to our jprobe_inst_return() function - * in the jprobes.S file - */ - regs->b0 = ((struct fnptr *)(jprobe_inst_return))->ip; - - return 1; -} - -/* ia64 does not need this */ -void __kprobes jprobe_return(void) -{ -} - -int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - int bytes; - - /* restoring architectural state */ - *regs = kcb->jprobe_saved_regs; - - /* restoring the original argument space */ - flush_register_stack(); - bytes = (char *)ia64_rse_skip_regs(kcb->bsp, kcb->cfm & 0x3f) - - (char *)kcb->bsp; - memcpy( kcb->bsp, - kcb->jprobes_saved_stacked_regs, - bytes ); - invalidate_stacked_regs(); - - preempt_enable_no_resched(); - return 1; -} - static struct kprobe trampoline_p = { .pre_handler = trampoline_probe_handler }; diff --git a/arch/mips/include/asm/kprobes.h b/arch/mips/include/asm/kprobes.h index ad1a99948f27..a72dfbf1babb 100644 --- a/arch/mips/include/asm/kprobes.h +++ b/arch/mips/include/asm/kprobes.h @@ -68,16 +68,6 @@ struct prev_kprobe { unsigned long saved_epc; }; -#define MAX_JPROBES_STACK_SIZE 128 -#define MAX_JPROBES_STACK_ADDR \ - (((unsigned long)current_thread_info()) + THREAD_SIZE - 32 - sizeof(struct pt_regs)) - -#define MIN_JPROBES_STACK_SIZE(ADDR) \ - ((((ADDR) + MAX_JPROBES_STACK_SIZE) > MAX_JPROBES_STACK_ADDR) \ - ? MAX_JPROBES_STACK_ADDR - (ADDR) \ - : MAX_JPROBES_STACK_SIZE) - - #define SKIP_DELAYSLOT 0x0001 /* per-cpu kprobe control block */ @@ -86,12 +76,9 @@ struct kprobe_ctlblk { unsigned long kprobe_old_SR; unsigned long kprobe_saved_SR; unsigned long kprobe_saved_epc; - unsigned long jprobe_saved_sp; - struct pt_regs jprobe_saved_regs; /* Per-thread fields, used while emulating branches */ unsigned long flags; unsigned long target_epc; - u8 jprobes_stack[MAX_JPROBES_STACK_SIZE]; struct prev_kprobe prev_kprobe; }; diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c index f5c8bce70db2..54cd675c5d1d 100644 --- a/arch/mips/kernel/kprobes.c +++ b/arch/mips/kernel/kprobes.c @@ -326,19 +326,13 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) preempt_enable_no_resched(); } return 1; - } else { - if (addr->word != breakpoint_insn.word) { - /* - * The breakpoint instruction was removed by - * another cpu right after we hit, no further - * handling of this interrupt is appropriate - */ - ret = 1; - goto no_kprobe; - } - p = __this_cpu_read(current_kprobe); - if (p->break_handler && p->break_handler(p, regs)) - goto ss_probe; + } else if (addr->word != breakpoint_insn.word) { + /* + * The breakpoint instruction was removed by + * another cpu right after we hit, no further + * handling of this interrupt is appropriate + */ + ret = 1; } goto no_kprobe; } @@ -364,10 +358,11 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) if (p->pre_handler && p->pre_handler(p, regs)) { /* handler has already set things up, so skip ss setup */ + reset_current_kprobe(); + preempt_enable_no_resched(); return 1; } -ss_probe: prepare_singlestep(p, regs, kcb); if (kcb->flags & SKIP_DELAYSLOT) { kcb->kprobe_status = KPROBE_HIT_SSDONE; @@ -468,51 +463,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, return ret; } -int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct jprobe *jp = container_of(p, struct jprobe, kp); - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - kcb->jprobe_saved_regs = *regs; - kcb->jprobe_saved_sp = regs->regs[29]; - - memcpy(kcb->jprobes_stack, (void *)kcb->jprobe_saved_sp, - MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp)); - - regs->cp0_epc = (unsigned long)(jp->entry); - - return 1; -} - -/* Defined in the inline asm below. */ -void jprobe_return_end(void); - -void __kprobes jprobe_return(void) -{ - /* Assembler quirk necessitates this '0,code' business. */ - asm volatile( - "break 0,%0\n\t" - ".globl jprobe_return_end\n" - "jprobe_return_end:\n" - : : "n" (BRK_KPROBE_BP) : "memory"); -} - -int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - if (regs->cp0_epc >= (unsigned long)jprobe_return && - regs->cp0_epc <= (unsigned long)jprobe_return_end) { - *regs = kcb->jprobe_saved_regs; - memcpy((void *)kcb->jprobe_saved_sp, kcb->jprobes_stack, - MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp)); - preempt_enable_no_resched(); - - return 1; - } - return 0; -} - /* * Function return probe trampoline: * - init_kprobes() establishes a probepoint here @@ -595,9 +545,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, kretprobe_assert(ri, orig_ret_address, trampoline_address); instruction_pointer(regs) = orig_ret_address; - reset_current_kprobe(); kretprobe_hash_unlock(current, &flags); - preempt_enable_no_resched(); hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { hlist_del(&ri->hlist); diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h index 8e7b09703ca4..27d6e3c8fde9 100644 --- a/arch/powerpc/include/asm/hw_breakpoint.h +++ b/arch/powerpc/include/asm/hw_breakpoint.h @@ -52,6 +52,7 @@ struct arch_hw_breakpoint { #include <asm/reg.h> #include <asm/debug.h> +struct perf_event_attr; struct perf_event; struct pmu; struct perf_sample_data; @@ -60,8 +61,10 @@ struct perf_sample_data; extern int hw_breakpoint_slots(int type); extern int arch_bp_generic_fields(int type, int *gen_bp_type); -extern int arch_check_bp_in_kernelspace(struct perf_event *bp); -extern int arch_validate_hwbkpt_settings(struct perf_event *bp); +extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw); +extern int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw); extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data); int arch_install_hw_breakpoint(struct perf_event *bp); diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h index 9f3be5c8a4a3..785c464b6588 100644 --- a/arch/powerpc/include/asm/kprobes.h +++ b/arch/powerpc/include/asm/kprobes.h @@ -88,7 +88,6 @@ struct prev_kprobe { struct kprobe_ctlblk { unsigned long kprobe_status; unsigned long kprobe_saved_msr; - struct pt_regs jprobe_saved_regs; struct prev_kprobe prev_kprobe; }; @@ -103,17 +102,6 @@ extern int kprobe_exceptions_notify(struct notifier_block *self, extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); extern int kprobe_handler(struct pt_regs *regs); extern int kprobe_post_handler(struct pt_regs *regs); -#ifdef CONFIG_KPROBES_ON_FTRACE -extern int __is_active_jprobe(unsigned long addr); -extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb); -#else -static inline int skip_singlestep(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb) -{ - return 0; -} -#endif #else static inline int kprobe_handler(struct pt_regs *regs) { return 0; } static inline int kprobe_post_handler(struct pt_regs *regs) { return 0; } diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 80547dad37da..fec8a6773119 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -119,11 +119,9 @@ void arch_unregister_hw_breakpoint(struct perf_event *bp) /* * Check for virtual address in kernel space. */ -int arch_check_bp_in_kernelspace(struct perf_event *bp) +int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw) { - struct arch_hw_breakpoint *info = counter_arch_bp(bp); - - return is_kernel_addr(info->address); + return is_kernel_addr(hw->address); } int arch_bp_generic_fields(int type, int *gen_bp_type) @@ -141,30 +139,31 @@ int arch_bp_generic_fields(int type, int *gen_bp_type) /* * Validate the arch-specific HW Breakpoint register settings */ -int arch_validate_hwbkpt_settings(struct perf_event *bp) +int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw) { int ret = -EINVAL, length_max; - struct arch_hw_breakpoint *info = counter_arch_bp(bp); if (!bp) return ret; - info->type = HW_BRK_TYPE_TRANSLATE; - if (bp->attr.bp_type & HW_BREAKPOINT_R) - info->type |= HW_BRK_TYPE_READ; - if (bp->attr.bp_type & HW_BREAKPOINT_W) - info->type |= HW_BRK_TYPE_WRITE; - if (info->type == HW_BRK_TYPE_TRANSLATE) + hw->type = HW_BRK_TYPE_TRANSLATE; + if (attr->bp_type & HW_BREAKPOINT_R) + hw->type |= HW_BRK_TYPE_READ; + if (attr->bp_type & HW_BREAKPOINT_W) + hw->type |= HW_BRK_TYPE_WRITE; + if (hw->type == HW_BRK_TYPE_TRANSLATE) /* must set alteast read or write */ return ret; - if (!(bp->attr.exclude_user)) - info->type |= HW_BRK_TYPE_USER; - if (!(bp->attr.exclude_kernel)) - info->type |= HW_BRK_TYPE_KERNEL; - if (!(bp->attr.exclude_hv)) - info->type |= HW_BRK_TYPE_HYP; - info->address = bp->attr.bp_addr; - info->len = bp->attr.bp_len; + if (!attr->exclude_user) + hw->type |= HW_BRK_TYPE_USER; + if (!attr->exclude_kernel) + hw->type |= HW_BRK_TYPE_KERNEL; + if (!attr->exclude_hv) + hw->type |= HW_BRK_TYPE_HYP; + hw->address = attr->bp_addr; + hw->len = attr->bp_len; /* * Since breakpoint length can be a maximum of HW_BREAKPOINT_LEN(8) @@ -178,12 +177,12 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) if (cpu_has_feature(CPU_FTR_DAWR)) { length_max = 512 ; /* 64 doublewords */ /* DAWR region can't cross 512 boundary */ - if ((bp->attr.bp_addr >> 9) != - ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 9)) + if ((attr->bp_addr >> 9) != + ((attr->bp_addr + attr->bp_len - 1) >> 9)) return -EINVAL; } - if (info->len > - (length_max - (info->address & HW_BREAKPOINT_ALIGN))) + if (hw->len > + (length_max - (hw->address & HW_BREAKPOINT_ALIGN))) return -EINVAL; return 0; } diff --git a/arch/powerpc/kernel/kprobes-ftrace.c b/arch/powerpc/kernel/kprobes-ftrace.c index 7a1f99f1b47f..e4a49c051325 100644 --- a/arch/powerpc/kernel/kprobes-ftrace.c +++ b/arch/powerpc/kernel/kprobes-ftrace.c @@ -25,50 +25,6 @@ #include <linux/preempt.h> #include <linux/ftrace.h> -/* - * This is called from ftrace code after invoking registered handlers to - * disambiguate regs->nip changes done by jprobes and livepatch. We check if - * there is an active jprobe at the provided address (mcount location). - */ -int __is_active_jprobe(unsigned long addr) -{ - if (!preemptible()) { - struct kprobe *p = raw_cpu_read(current_kprobe); - return (p && (unsigned long)p->addr == addr) ? 1 : 0; - } - - return 0; -} - -static nokprobe_inline -int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb, unsigned long orig_nip) -{ - /* - * Emulate singlestep (and also recover regs->nip) - * as if there is a nop - */ - regs->nip = (unsigned long)p->addr + MCOUNT_INSN_SIZE; - if (unlikely(p->post_handler)) { - kcb->kprobe_status = KPROBE_HIT_SSDONE; - p->post_handler(p, regs, 0); - } - __this_cpu_write(current_kprobe, NULL); - if (orig_nip) - regs->nip = orig_nip; - return 1; -} - -int skip_singlestep(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb) -{ - if (kprobe_ftrace(p)) - return __skip_singlestep(p, regs, kcb, 0); - else - return 0; -} -NOKPROBE_SYMBOL(skip_singlestep); - /* Ftrace callback handler for kprobes */ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip, struct ftrace_ops *ops, struct pt_regs *regs) @@ -76,18 +32,14 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip, struct kprobe *p; struct kprobe_ctlblk *kcb; - preempt_disable(); - p = get_kprobe((kprobe_opcode_t *)nip); if (unlikely(!p) || kprobe_disabled(p)) - goto end; + return; kcb = get_kprobe_ctlblk(); if (kprobe_running()) { kprobes_inc_nmissed_count(p); } else { - unsigned long orig_nip = regs->nip; - /* * On powerpc, NIP is *before* this instruction for the * pre handler @@ -96,19 +48,23 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip, __this_cpu_write(current_kprobe, p); kcb->kprobe_status = KPROBE_HIT_ACTIVE; - if (!p->pre_handler || !p->pre_handler(p, regs)) - __skip_singlestep(p, regs, kcb, orig_nip); - else { + if (!p->pre_handler || !p->pre_handler(p, regs)) { /* - * If pre_handler returns !0, it sets regs->nip and - * resets current kprobe. In this case, we should not - * re-enable preemption. + * Emulate singlestep (and also recover regs->nip) + * as if there is a nop */ - return; + regs->nip += MCOUNT_INSN_SIZE; + if (unlikely(p->post_handler)) { + kcb->kprobe_status = KPROBE_HIT_SSDONE; + p->post_handler(p, regs, 0); + } } + /* + * If pre_handler returns !0, it changes regs->nip. We have to + * skip emulating post_handler. + */ + __this_cpu_write(current_kprobe, NULL); } -end: - preempt_enable_no_resched(); } NOKPROBE_SYMBOL(kprobe_ftrace_handler); diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index e4c5bf33970b..5c60bb0f927f 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -317,25 +317,17 @@ int kprobe_handler(struct pt_regs *regs) } prepare_singlestep(p, regs); return 1; - } else { - if (*addr != BREAKPOINT_INSTRUCTION) { - /* If trap variant, then it belongs not to us */ - kprobe_opcode_t cur_insn = *addr; - if (is_trap(cur_insn)) - goto no_kprobe; - /* The breakpoint instruction was removed by - * another cpu right after we hit, no further - * handling of this interrupt is appropriate - */ - ret = 1; + } else if (*addr != BREAKPOINT_INSTRUCTION) { + /* If trap variant, then it belongs not to us */ + kprobe_opcode_t cur_insn = *addr; + + if (is_trap(cur_insn)) goto no_kprobe; - } - p = __this_cpu_read(current_kprobe); - if (p->break_handler && p->break_handler(p, regs)) { - if (!skip_singlestep(p, regs, kcb)) - goto ss_probe; - ret = 1; - } + /* The breakpoint instruction was removed by + * another cpu right after we hit, no further + * handling of this interrupt is appropriate + */ + ret = 1; } goto no_kprobe; } @@ -350,7 +342,7 @@ int kprobe_handler(struct pt_regs *regs) */ kprobe_opcode_t cur_insn = *addr; if (is_trap(cur_insn)) - goto no_kprobe; + goto no_kprobe; /* * The breakpoint instruction was removed right * after we hit it. Another cpu has removed @@ -366,11 +358,13 @@ int kprobe_handler(struct pt_regs *regs) kcb->kprobe_status = KPROBE_HIT_ACTIVE; set_current_kprobe(p, regs, kcb); - if (p->pre_handler && p->pre_handler(p, regs)) - /* handler has already set things up, so skip ss setup */ + if (p->pre_handler && p->pre_handler(p, regs)) { + /* handler changed execution path, so skip ss setup */ + reset_current_kprobe(); + preempt_enable_no_resched(); return 1; + } -ss_probe: if (p->ainsn.boostable >= 0) { ret = try_to_emulate(p, regs); @@ -611,60 +605,6 @@ unsigned long arch_deref_entry_point(void *entry) } NOKPROBE_SYMBOL(arch_deref_entry_point); -int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct jprobe *jp = container_of(p, struct jprobe, kp); - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs)); - - /* setup return addr to the jprobe handler routine */ - regs->nip = arch_deref_entry_point(jp->entry); -#ifdef PPC64_ELF_ABI_v2 - regs->gpr[12] = (unsigned long)jp->entry; -#elif defined(PPC64_ELF_ABI_v1) - regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc); -#endif - - /* - * jprobes use jprobe_return() which skips the normal return - * path of the function, and this messes up the accounting of the - * function graph tracer. - * - * Pause function graph tracing while performing the jprobe function. - */ - pause_graph_tracing(); - - return 1; -} -NOKPROBE_SYMBOL(setjmp_pre_handler); - -void __used jprobe_return(void) -{ - asm volatile("jprobe_return_trap:\n" - "trap\n" - ::: "memory"); -} -NOKPROBE_SYMBOL(jprobe_return); - -int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - if (regs->nip != ppc_kallsyms_lookup_name("jprobe_return_trap")) { - pr_debug("longjmp_break_handler NIP (0x%lx) does not match jprobe_return_trap (0x%lx)\n", - regs->nip, ppc_kallsyms_lookup_name("jprobe_return_trap")); - return 0; - } - - memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); - /* It's OK to start function graph tracing again */ - unpause_graph_tracing(); - preempt_enable_no_resched(); - return 1; -} -NOKPROBE_SYMBOL(longjmp_break_handler); - static struct kprobe trampoline_p = { .addr = (kprobe_opcode_t *) &kretprobe_trampoline, .pre_handler = trampoline_probe_handler diff --git a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S index 9a5b5a513604..32476a6e4e9c 100644 --- a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S +++ b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S @@ -104,39 +104,13 @@ ftrace_regs_call: bl ftrace_stub nop - /* Load the possibly modified NIP */ - ld r15, _NIP(r1) - + /* Load ctr with the possibly modified NIP */ + ld r3, _NIP(r1) + mtctr r3 #ifdef CONFIG_LIVEPATCH - cmpd r14, r15 /* has NIP been altered? */ + cmpd r14, r3 /* has NIP been altered? */ #endif -#if defined(CONFIG_LIVEPATCH) && defined(CONFIG_KPROBES_ON_FTRACE) - /* NIP has not been altered, skip over further checks */ - beq 1f - - /* Check if there is an active jprobe on us */ - subi r3, r14, 4 - bl __is_active_jprobe - nop - - /* - * If r3 == 1, then this is a kprobe/jprobe. - * else, this is livepatched function. - * - * The conditional branch for livepatch_handler below will use the - * result of this comparison. For kprobe/jprobe, we just need to branch to - * the new NIP, not call livepatch_handler. The branch below is bne, so we - * want CR0[EQ] to be true if this is a kprobe/jprobe. Which means we want - * CR0[EQ] = (r3 == 1). - */ - cmpdi r3, 1 -1: -#endif - - /* Load CTR with the possibly modified NIP */ - mtctr r15 - /* Restore gprs */ REST_GPR(0,r1) REST_10GPRS(2,r1) @@ -154,10 +128,7 @@ ftrace_regs_call: addi r1, r1, SWITCH_FRAME_SIZE #ifdef CONFIG_LIVEPATCH - /* - * Based on the cmpd or cmpdi above, if the NIP was altered and we're - * not on a kprobe/jprobe, then handle livepatch. - */ + /* Based on the cmpd above, if the NIP was altered handle livepatch */ bne- livepatch_handler #endif diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 3f66fcf8ad99..19d8ab49d1bd 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -1469,7 +1469,7 @@ static int collect_events(struct perf_event *group, int max_count, } /* - * Add a event to the PMU. + * Add an event to the PMU. * If all events are not already frozen, then we disable and * re-enable the PMU in order to get hw_perf_enable to do the * actual work of reconfiguring the PMU. @@ -1548,7 +1548,7 @@ nocheck: } /* - * Remove a event from the PMU. + * Remove an event from the PMU. */ static void power_pmu_del(struct perf_event *event, int ef_flags) { @@ -1742,7 +1742,7 @@ static int power_pmu_commit_txn(struct pmu *pmu) /* * Return 1 if we might be able to put event on a limited PMC, * or 0 if not. - * A event can only go on a limited PMC if it counts something + * An event can only go on a limited PMC if it counts something * that a limited PMC can count, doesn't require interrupts, and * doesn't exclude any processor mode. */ diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h index 13de80cf741c..b106aa29bf55 100644 --- a/arch/s390/include/asm/kprobes.h +++ b/arch/s390/include/asm/kprobes.h @@ -68,8 +68,6 @@ struct kprobe_ctlblk { unsigned long kprobe_saved_imask; unsigned long kprobe_saved_ctl[3]; struct prev_kprobe prev_kprobe; - struct pt_regs jprobe_saved_regs; - kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE]; }; void arch_remove_kprobe(struct kprobe *p); diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 60f60afa645c..7c0a095e9c5f 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -321,38 +321,20 @@ static int kprobe_handler(struct pt_regs *regs) * If we have no pre-handler or it returned 0, we * continue with single stepping. If we have a * pre-handler and it returned non-zero, it prepped - * for calling the break_handler below on re-entry - * for jprobe processing, so get out doing nothing - * more here. + * for changing execution path, so get out doing + * nothing more here. */ push_kprobe(kcb, p); kcb->kprobe_status = KPROBE_HIT_ACTIVE; - if (p->pre_handler && p->pre_handler(p, regs)) + if (p->pre_handler && p->pre_handler(p, regs)) { + pop_kprobe(kcb); + preempt_enable_no_resched(); return 1; + } kcb->kprobe_status = KPROBE_HIT_SS; } enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn); return 1; - } else if (kprobe_running()) { - p = __this_cpu_read(current_kprobe); - if (p->break_handler && p->break_handler(p, regs)) { - /* - * Continuation after the jprobe completed and - * caused the jprobe_return trap. The jprobe - * break_handler "returns" to the original - * function that still has the kprobe breakpoint - * installed. We continue with single stepping. - */ - kcb->kprobe_status = KPROBE_HIT_SS; - enable_singlestep(kcb, regs, - (unsigned long) p->ainsn.insn); - return 1; - } /* else: - * No kprobe at this address and the current kprobe - * has no break handler (no jprobe!). The kernel just - * exploded, let the standard trap handler pick up the - * pieces. - */ } /* else: * No kprobe at this address and no active kprobe. The trap has * not been caused by a kprobe breakpoint. The race of breakpoint @@ -452,9 +434,7 @@ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) regs->psw.addr = orig_ret_address; - pop_kprobe(get_kprobe_ctlblk()); kretprobe_hash_unlock(current, &flags); - preempt_enable_no_resched(); hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { hlist_del(&ri->hlist); @@ -661,60 +641,6 @@ int kprobe_exceptions_notify(struct notifier_block *self, } NOKPROBE_SYMBOL(kprobe_exceptions_notify); -int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct jprobe *jp = container_of(p, struct jprobe, kp); - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - unsigned long stack; - - memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs)); - - /* setup return addr to the jprobe handler routine */ - regs->psw.addr = (unsigned long) jp->entry; - regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); - - /* r15 is the stack pointer */ - stack = (unsigned long) regs->gprs[15]; - - memcpy(kcb->jprobes_stack, (void *) stack, MIN_STACK_SIZE(stack)); - - /* - * jprobes use jprobe_return() which skips the normal return - * path of the function, and this messes up the accounting of the - * function graph tracer to get messed up. - * - * Pause function graph tracing while performing the jprobe function. - */ - pause_graph_tracing(); - return 1; -} -NOKPROBE_SYMBOL(setjmp_pre_handler); - -void jprobe_return(void) -{ - asm volatile(".word 0x0002"); -} -NOKPROBE_SYMBOL(jprobe_return); - -int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - unsigned long stack; - - /* It's OK to start function graph tracing again */ - unpause_graph_tracing(); - - stack = (unsigned long) kcb->jprobe_saved_regs.gprs[15]; - - /* Put the regs back */ - memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); - /* put the stack back */ - memcpy((void *) stack, kcb->jprobes_stack, MIN_STACK_SIZE(stack)); - preempt_enable_no_resched(); - return 1; -} -NOKPROBE_SYMBOL(longjmp_break_handler); - static struct kprobe trampoline = { .addr = (kprobe_opcode_t *) &kretprobe_trampoline, .pre_handler = trampoline_probe_handler diff --git a/arch/sh/include/asm/hw_breakpoint.h b/arch/sh/include/asm/hw_breakpoint.h index 7431c172c0cb..199d17b765f2 100644 --- a/arch/sh/include/asm/hw_breakpoint.h +++ b/arch/sh/include/asm/hw_breakpoint.h @@ -10,7 +10,6 @@ #include <linux/types.h> struct arch_hw_breakpoint { - char *name; /* Contains name of the symbol to set bkpt */ unsigned long address; u16 len; u16 type; @@ -41,6 +40,7 @@ struct sh_ubc { struct clk *clk; /* optional interface clock / MSTP bit */ }; +struct perf_event_attr; struct perf_event; struct task_struct; struct pmu; @@ -54,8 +54,10 @@ static inline int hw_breakpoint_slots(int type) } /* arch/sh/kernel/hw_breakpoint.c */ -extern int arch_check_bp_in_kernelspace(struct perf_event *bp); -extern int arch_validate_hwbkpt_settings(struct perf_event *bp); +extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw); +extern int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw); extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data); diff --git a/arch/sh/include/asm/kprobes.h b/arch/sh/include/asm/kprobes.h index 85d8bcaa8493..6171682f7798 100644 --- a/arch/sh/include/asm/kprobes.h +++ b/arch/sh/include/asm/kprobes.h @@ -27,7 +27,6 @@ struct kprobe; void arch_remove_kprobe(struct kprobe *); void kretprobe_trampoline(void); -void jprobe_return_end(void); /* Architecture specific copy of original instruction*/ struct arch_specific_insn { @@ -43,9 +42,6 @@ struct prev_kprobe { /* per-cpu kprobe control block */ struct kprobe_ctlblk { unsigned long kprobe_status; - unsigned long jprobe_saved_r15; - struct pt_regs jprobe_saved_regs; - kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE]; struct prev_kprobe prev_kprobe; }; diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c index 8648ed05ccf0..d9ff3b42da7c 100644 --- a/arch/sh/kernel/hw_breakpoint.c +++ b/arch/sh/kernel/hw_breakpoint.c @@ -124,14 +124,13 @@ static int get_hbp_len(u16 hbp_len) /* * Check for virtual address in kernel space. */ -int arch_check_bp_in_kernelspace(struct perf_event *bp) +int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw) { unsigned int len; unsigned long va; - struct arch_hw_breakpoint *info = counter_arch_bp(bp); - va = info->address; - len = get_hbp_len(info->len); + va = hw->address; + len = get_hbp_len(hw->len); return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); } @@ -174,40 +173,40 @@ int arch_bp_generic_fields(int sh_len, int sh_type, return 0; } -static int arch_build_bp_info(struct perf_event *bp) +static int arch_build_bp_info(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw) { - struct arch_hw_breakpoint *info = counter_arch_bp(bp); - - info->address = bp->attr.bp_addr; + hw->address = attr->bp_addr; /* Len */ - switch (bp->attr.bp_len) { + switch (attr->bp_len) { case HW_BREAKPOINT_LEN_1: - info->len = SH_BREAKPOINT_LEN_1; + hw->len = SH_BREAKPOINT_LEN_1; break; case HW_BREAKPOINT_LEN_2: - info->len = SH_BREAKPOINT_LEN_2; + hw->len = SH_BREAKPOINT_LEN_2; break; case HW_BREAKPOINT_LEN_4: - info->len = SH_BREAKPOINT_LEN_4; + hw->len = SH_BREAKPOINT_LEN_4; break; case HW_BREAKPOINT_LEN_8: - info->len = SH_BREAKPOINT_LEN_8; + hw->len = SH_BREAKPOINT_LEN_8; break; default: return -EINVAL; } /* Type */ - switch (bp->attr.bp_type) { + switch (attr->bp_type) { case HW_BREAKPOINT_R: - info->type = SH_BREAKPOINT_READ; + hw->type = SH_BREAKPOINT_READ; break; case HW_BREAKPOINT_W: - info->type = SH_BREAKPOINT_WRITE; + hw->type = SH_BREAKPOINT_WRITE; break; case HW_BREAKPOINT_W | HW_BREAKPOINT_R: - info->type = SH_BREAKPOINT_RW; + hw->type = SH_BREAKPOINT_RW; break; default: return -EINVAL; @@ -219,19 +218,20 @@ static int arch_build_bp_info(struct perf_event *bp) /* * Validate the arch-specific HW Breakpoint register settings */ -int arch_validate_hwbkpt_settings(struct perf_event *bp) +int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw) { - struct arch_hw_breakpoint *info = counter_arch_bp(bp); unsigned int align; int ret; - ret = arch_build_bp_info(bp); + ret = arch_build_bp_info(bp, attr, hw); if (ret) return ret; ret = -EINVAL; - switch (info->len) { + switch (hw->len) { case SH_BREAKPOINT_LEN_1: align = 0; break; @@ -249,17 +249,10 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) } /* - * For kernel-addresses, either the address or symbol name can be - * specified. - */ - if (info->name) - info->address = (unsigned long)kallsyms_lookup_name(info->name); - - /* * Check that the low-order bits of the address are appropriate * for the alignment implied by len. */ - if (info->address & align) + if (hw->address & align) return -EINVAL; return 0; @@ -346,7 +339,7 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args) perf_bp_event(bp, args->regs); /* Deliver the signal to userspace */ - if (!arch_check_bp_in_kernelspace(bp)) { + if (!arch_check_bp_in_kernelspace(&bp->hw.info)) { force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __user *)NULL, current); } diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c index 52a5e11247d1..241e903dd3ee 100644 --- a/arch/sh/kernel/kprobes.c +++ b/arch/sh/kernel/kprobes.c @@ -248,11 +248,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) prepare_singlestep(p, regs); kcb->kprobe_status = KPROBE_REENTER; return 1; - } else { - p = __this_cpu_read(current_kprobe); - if (p->break_handler && p->break_handler(p, regs)) { - goto ss_probe; - } } goto no_kprobe; } @@ -277,11 +272,13 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) set_current_kprobe(p, regs, kcb); kcb->kprobe_status = KPROBE_HIT_ACTIVE; - if (p->pre_handler && p->pre_handler(p, regs)) + if (p->pre_handler && p->pre_handler(p, regs)) { /* handler has already set things up, so skip ss setup */ + reset_current_kprobe(); + preempt_enable_no_resched(); return 1; + } -ss_probe: prepare_singlestep(p, regs); kcb->kprobe_status = KPROBE_HIT_SS; return 1; @@ -358,8 +355,6 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) regs->pc = orig_ret_address; kretprobe_hash_unlock(current, &flags); - preempt_enable_no_resched(); - hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { hlist_del(&ri->hlist); kfree(ri); @@ -508,14 +503,8 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, if (post_kprobe_handler(args->regs)) ret = NOTIFY_STOP; } else { - if (kprobe_handler(args->regs)) { + if (kprobe_handler(args->regs)) ret = NOTIFY_STOP; - } else { - p = __this_cpu_read(current_kprobe); - if (p->break_handler && - p->break_handler(p, args->regs)) - ret = NOTIFY_STOP; - } } } } @@ -523,57 +512,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, return ret; } -int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct jprobe *jp = container_of(p, struct jprobe, kp); - unsigned long addr; - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - kcb->jprobe_saved_regs = *regs; - kcb->jprobe_saved_r15 = regs->regs[15]; - addr = kcb->jprobe_saved_r15; - - /* - * TBD: As Linus pointed out, gcc assumes that the callee - * owns the argument space and could overwrite it, e.g. - * tailcall optimization. So, to be absolutely safe - * we also save and restore enough stack bytes to cover - * the argument area. - */ - memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr, - MIN_STACK_SIZE(addr)); - - regs->pc = (unsigned long)(jp->entry); - - return 1; -} - -void __kprobes jprobe_return(void) -{ - asm volatile ("trapa #0x3a\n\t" "jprobe_return_end:\n\t" "nop\n\t"); -} - -int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - unsigned long stack_addr = kcb->jprobe_saved_r15; - u8 *addr = (u8 *)regs->pc; - - if ((addr >= (u8 *)jprobe_return) && - (addr <= (u8 *)jprobe_return_end)) { - *regs = kcb->jprobe_saved_regs; - - memcpy((kprobe_opcode_t *)stack_addr, kcb->jprobes_stack, - MIN_STACK_SIZE(stack_addr)); - - kcb->kprobe_status = KPROBE_HIT_SS; - preempt_enable_no_resched(); - return 1; - } - - return 0; -} - static struct kprobe trampoline_p = { .addr = (kprobe_opcode_t *)&kretprobe_trampoline, .pre_handler = trampoline_probe_handler diff --git a/arch/sparc/include/asm/kprobes.h b/arch/sparc/include/asm/kprobes.h index 3704490b4488..bfcaa6326c20 100644 --- a/arch/sparc/include/asm/kprobes.h +++ b/arch/sparc/include/asm/kprobes.h @@ -44,7 +44,6 @@ struct kprobe_ctlblk { unsigned long kprobe_status; unsigned long kprobe_orig_tnpc; unsigned long kprobe_orig_tstate_pil; - struct pt_regs jprobe_saved_regs; struct prev_kprobe prev_kprobe; }; diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c index ab4ba4347941..dfbca2470536 100644 --- a/arch/sparc/kernel/kprobes.c +++ b/arch/sparc/kernel/kprobes.c @@ -147,18 +147,12 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) kcb->kprobe_status = KPROBE_REENTER; prepare_singlestep(p, regs, kcb); return 1; - } else { - if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) { + } else if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) { /* The breakpoint instruction was removed by * another cpu right after we hit, no further * handling of this interrupt is appropriate */ - ret = 1; - goto no_kprobe; - } - p = __this_cpu_read(current_kprobe); - if (p->break_handler && p->break_handler(p, regs)) - goto ss_probe; + ret = 1; } goto no_kprobe; } @@ -181,10 +175,12 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) set_current_kprobe(p, regs, kcb); kcb->kprobe_status = KPROBE_HIT_ACTIVE; - if (p->pre_handler && p->pre_handler(p, regs)) + if (p->pre_handler && p->pre_handler(p, regs)) { + reset_current_kprobe(); + preempt_enable_no_resched(); return 1; + } -ss_probe: prepare_singlestep(p, regs, kcb); kcb->kprobe_status = KPROBE_HIT_SS; return 1; @@ -441,53 +437,6 @@ out: exception_exit(prev_state); } -/* Jprobes support. */ -int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct jprobe *jp = container_of(p, struct jprobe, kp); - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - memcpy(&(kcb->jprobe_saved_regs), regs, sizeof(*regs)); - - regs->tpc = (unsigned long) jp->entry; - regs->tnpc = ((unsigned long) jp->entry) + 0x4UL; - regs->tstate |= TSTATE_PIL; - - return 1; -} - -void __kprobes jprobe_return(void) -{ - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - register unsigned long orig_fp asm("g1"); - - orig_fp = kcb->jprobe_saved_regs.u_regs[UREG_FP]; - __asm__ __volatile__("\n" -"1: cmp %%sp, %0\n\t" - "blu,a,pt %%xcc, 1b\n\t" - " restore\n\t" - ".globl jprobe_return_trap_instruction\n" -"jprobe_return_trap_instruction:\n\t" - "ta 0x70" - : /* no outputs */ - : "r" (orig_fp)); -} - -extern void jprobe_return_trap_instruction(void); - -int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) -{ - u32 *addr = (u32 *) regs->tpc; - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - if (addr == (u32 *) jprobe_return_trap_instruction) { - memcpy(regs, &(kcb->jprobe_saved_regs), sizeof(*regs)); - preempt_enable_no_resched(); - return 1; - } - return 0; -} - /* The value stored in the return address register is actually 2 * instructions before where the callee will return to. * Sequences usually look something like this @@ -562,9 +511,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, regs->tpc = orig_ret_address; regs->tnpc = orig_ret_address + 4; - reset_current_kprobe(); kretprobe_hash_unlock(current, &flags); - preempt_enable_no_resched(); hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { hlist_del(&ri->hlist); diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 86f0c15dcc2d..035c37481f57 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2041,15 +2041,15 @@ static void intel_pmu_disable_event(struct perf_event *event) cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx); cpuc->intel_cp_status &= ~(1ull << hwc->idx); + if (unlikely(event->attr.precise_ip)) + intel_pmu_pebs_disable(event); + if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { intel_pmu_disable_fixed(hwc); return; } x86_pmu_disable_event(event); - - if (unlikely(event->attr.precise_ip)) - intel_pmu_pebs_disable(event); } static void intel_pmu_del_event(struct perf_event *event) @@ -2068,17 +2068,19 @@ static void intel_pmu_read_event(struct perf_event *event) x86_perf_event_update(event); } -static void intel_pmu_enable_fixed(struct hw_perf_event *hwc) +static void intel_pmu_enable_fixed(struct perf_event *event) { + struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx - INTEL_PMC_IDX_FIXED; - u64 ctrl_val, bits, mask; + u64 ctrl_val, mask, bits = 0; /* - * Enable IRQ generation (0x8), + * Enable IRQ generation (0x8), if not PEBS, * and enable ring-3 counting (0x2) and ring-0 counting (0x1) * if requested: */ - bits = 0x8ULL; + if (!event->attr.precise_ip) + bits |= 0x8; if (hwc->config & ARCH_PERFMON_EVENTSEL_USR) bits |= 0x2; if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) @@ -2120,14 +2122,14 @@ static void intel_pmu_enable_event(struct perf_event *event) if (unlikely(event_is_checkpointed(event))) cpuc->intel_cp_status |= (1ull << hwc->idx); + if (unlikely(event->attr.precise_ip)) + intel_pmu_pebs_enable(event); + if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { - intel_pmu_enable_fixed(hwc); + intel_pmu_enable_fixed(event); return; } - if (unlikely(event->attr.precise_ip)) - intel_pmu_pebs_enable(event); - __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); } @@ -2280,7 +2282,10 @@ again: * counters from the GLOBAL_STATUS mask and we always process PEBS * events via drain_pebs(). */ - status &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK); + if (x86_pmu.flags & PMU_FL_PEBS_ALL) + status &= ~cpuc->pebs_enabled; + else + status &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK); /* * PEBS overflow sets bit 62 in the global status register @@ -4072,7 +4077,6 @@ __init int intel_pmu_init(void) intel_pmu_lbr_init_skl(); x86_pmu.event_constraints = intel_slm_event_constraints; - x86_pmu.pebs_constraints = intel_glp_pebs_event_constraints; x86_pmu.extra_regs = intel_glm_extra_regs; /* * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS @@ -4082,6 +4086,7 @@ __init int intel_pmu_init(void) x86_pmu.pebs_prec_dist = true; x86_pmu.lbr_pt_coexist = true; x86_pmu.flags |= PMU_FL_HAS_RSP_1; + x86_pmu.flags |= PMU_FL_PEBS_ALL; x86_pmu.get_event_constraints = glp_get_event_constraints; x86_pmu.cpu_events = glm_events_attrs; /* Goldmont Plus has 4-wide pipeline */ diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 8dbba77e0518..b7b01d762d32 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -713,12 +713,6 @@ struct event_constraint intel_glm_pebs_event_constraints[] = { EVENT_CONSTRAINT_END }; -struct event_constraint intel_glp_pebs_event_constraints[] = { - /* Allow all events as PEBS with no flags */ - INTEL_ALL_EVENT_CONSTRAINT(0, 0xf), - EVENT_CONSTRAINT_END -}; - struct event_constraint intel_nehalem_pebs_event_constraints[] = { INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */ INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ @@ -871,6 +865,13 @@ struct event_constraint *intel_pebs_constraints(struct perf_event *event) } } + /* + * Extended PEBS support + * Makes the PEBS code search the normal constraints. + */ + if (x86_pmu.flags & PMU_FL_PEBS_ALL) + return NULL; + return &emptyconstraint; } @@ -896,10 +897,16 @@ static inline void pebs_update_threshold(struct cpu_hw_events *cpuc) { struct debug_store *ds = cpuc->ds; u64 threshold; + int reserved; + + if (x86_pmu.flags & PMU_FL_PEBS_ALL) + reserved = x86_pmu.max_pebs_events + x86_pmu.num_counters_fixed; + else + reserved = x86_pmu.max_pebs_events; if (cpuc->n_pebs == cpuc->n_large_pebs) { threshold = ds->pebs_absolute_maximum - - x86_pmu.max_pebs_events * x86_pmu.pebs_record_size; + reserved * x86_pmu.pebs_record_size; } else { threshold = ds->pebs_buffer_base + x86_pmu.pebs_record_size; } @@ -963,7 +970,11 @@ void intel_pmu_pebs_enable(struct perf_event *event) * This must be done in pmu::start(), because PERF_EVENT_IOC_PERIOD. */ if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) { - ds->pebs_event_reset[hwc->idx] = + unsigned int idx = hwc->idx; + + if (idx >= INTEL_PMC_IDX_FIXED) + idx = MAX_PEBS_EVENTS + (idx - INTEL_PMC_IDX_FIXED); + ds->pebs_event_reset[idx] = (u64)(-hwc->sample_period) & x86_pmu.cntval_mask; } else { ds->pebs_event_reset[hwc->idx] = 0; @@ -1481,9 +1492,10 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) struct debug_store *ds = cpuc->ds; struct perf_event *event; void *base, *at, *top; - short counts[MAX_PEBS_EVENTS] = {}; - short error[MAX_PEBS_EVENTS] = {}; - int bit, i; + short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {}; + short error[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {}; + int bit, i, size; + u64 mask; if (!x86_pmu.pebs_active) return; @@ -1493,6 +1505,13 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) ds->pebs_index = ds->pebs_buffer_base; + mask = (1ULL << x86_pmu.max_pebs_events) - 1; + size = x86_pmu.max_pebs_events; + if (x86_pmu.flags & PMU_FL_PEBS_ALL) { + mask |= ((1ULL << x86_pmu.num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED; + size = INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed; + } + if (unlikely(base >= top)) { /* * The drain_pebs() could be called twice in a short period @@ -1502,7 +1521,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) * update the event->count for this case. */ for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled, - x86_pmu.max_pebs_events) { + size) { event = cpuc->events[bit]; if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) intel_pmu_save_and_restart_reload(event, 0); @@ -1515,12 +1534,12 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) u64 pebs_status; pebs_status = p->status & cpuc->pebs_enabled; - pebs_status &= (1ULL << x86_pmu.max_pebs_events) - 1; + pebs_status &= mask; /* PEBS v3 has more accurate status bits */ if (x86_pmu.intel_cap.pebs_format >= 3) { for_each_set_bit(bit, (unsigned long *)&pebs_status, - x86_pmu.max_pebs_events) + size) counts[bit]++; continue; @@ -1568,7 +1587,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) counts[bit]++; } - for (bit = 0; bit < x86_pmu.max_pebs_events; bit++) { + for (bit = 0; bit < size; bit++) { if ((counts[bit] == 0) && (error[bit] == 0)) continue; diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index cf372b90557e..f3e006bed9a7 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -216,6 +216,8 @@ static void intel_pmu_lbr_reset_64(void) void intel_pmu_lbr_reset(void) { + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + if (!x86_pmu.lbr_nr) return; @@ -223,6 +225,9 @@ void intel_pmu_lbr_reset(void) intel_pmu_lbr_reset_32(); else intel_pmu_lbr_reset_64(); + + cpuc->last_task_ctx = NULL; + cpuc->last_log_id = 0; } /* @@ -334,6 +339,7 @@ static inline u64 rdlbr_to(unsigned int idx) static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx) { + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); int i; unsigned lbr_idx, mask; u64 tos; @@ -344,9 +350,21 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx) return; } - mask = x86_pmu.lbr_nr - 1; tos = task_ctx->tos; - for (i = 0; i < tos; i++) { + /* + * Does not restore the LBR registers, if + * - No one else touched them, and + * - Did not enter C6 + */ + if ((task_ctx == cpuc->last_task_ctx) && + (task_ctx->log_id == cpuc->last_log_id) && + rdlbr_from(tos)) { + task_ctx->lbr_stack_state = LBR_NONE; + return; + } + + mask = x86_pmu.lbr_nr - 1; + for (i = 0; i < task_ctx->valid_lbrs; i++) { lbr_idx = (tos - i) & mask; wrlbr_from(lbr_idx, task_ctx->lbr_from[i]); wrlbr_to (lbr_idx, task_ctx->lbr_to[i]); @@ -354,14 +372,24 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx) if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]); } + + for (; i < x86_pmu.lbr_nr; i++) { + lbr_idx = (tos - i) & mask; + wrlbr_from(lbr_idx, 0); + wrlbr_to(lbr_idx, 0); + if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) + wrmsrl(MSR_LBR_INFO_0 + lbr_idx, 0); + } + wrmsrl(x86_pmu.lbr_tos, tos); task_ctx->lbr_stack_state = LBR_NONE; } static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx) { + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); unsigned lbr_idx, mask; - u64 tos; + u64 tos, from; int i; if (task_ctx->lbr_callstack_users == 0) { @@ -371,15 +399,22 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx) mask = x86_pmu.lbr_nr - 1; tos = intel_pmu_lbr_tos(); - for (i = 0; i < tos; i++) { + for (i = 0; i < x86_pmu.lbr_nr; i++) { lbr_idx = (tos - i) & mask; - task_ctx->lbr_from[i] = rdlbr_from(lbr_idx); + from = rdlbr_from(lbr_idx); + if (!from) + break; + task_ctx->lbr_from[i] = from; task_ctx->lbr_to[i] = rdlbr_to(lbr_idx); if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]); } + task_ctx->valid_lbrs = i; task_ctx->tos = tos; task_ctx->lbr_stack_state = LBR_VALID; + + cpuc->last_task_ctx = task_ctx; + cpuc->last_log_id = ++task_ctx->log_id; } void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in) @@ -531,7 +566,7 @@ static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc) */ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc) { - bool need_info = false; + bool need_info = false, call_stack = false; unsigned long mask = x86_pmu.lbr_nr - 1; int lbr_format = x86_pmu.intel_cap.lbr_format; u64 tos = intel_pmu_lbr_tos(); @@ -542,7 +577,7 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc) if (cpuc->lbr_sel) { need_info = !(cpuc->lbr_sel->config & LBR_NO_INFO); if (cpuc->lbr_sel->config & LBR_CALL_STACK) - num = tos; + call_stack = true; } for (i = 0; i < num; i++) { @@ -555,6 +590,13 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc) from = rdlbr_from(lbr_idx); to = rdlbr_to(lbr_idx); + /* + * Read LBR call stack entries + * until invalid entry (0s) is detected. + */ + if (call_stack && !from) + break; + if (lbr_format == LBR_FORMAT_INFO && need_info) { u64 info; diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 9f3711470ec1..156286335351 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -163,6 +163,7 @@ struct intel_excl_cntrs { unsigned core_id; /* per-core: core id */ }; +struct x86_perf_task_context; #define MAX_LBR_ENTRIES 32 enum { @@ -214,6 +215,8 @@ struct cpu_hw_events { struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; struct er_account *lbr_sel; u64 br_sel; + struct x86_perf_task_context *last_task_ctx; + int last_log_id; /* * Intel host/guest exclude bits @@ -648,8 +651,10 @@ struct x86_perf_task_context { u64 lbr_to[MAX_LBR_ENTRIES]; u64 lbr_info[MAX_LBR_ENTRIES]; int tos; + int valid_lbrs; int lbr_callstack_users; int lbr_stack_state; + int log_id; }; #define x86_add_quirk(func_) \ @@ -668,6 +673,7 @@ do { \ #define PMU_FL_HAS_RSP_1 0x2 /* has 2 equivalent offcore_rsp regs */ #define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */ #define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */ +#define PMU_FL_PEBS_ALL 0x10 /* all events are valid PEBS events */ #define EVENT_VAR(_id) event_attr_##_id #define EVENT_PTR(_id) &event_attr_##_id.attr.attr diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h index f59c39835a5a..a1f0e90d0818 100644 --- a/arch/x86/include/asm/hw_breakpoint.h +++ b/arch/x86/include/asm/hw_breakpoint.h @@ -49,11 +49,14 @@ static inline int hw_breakpoint_slots(int type) return HBP_NUM; } +struct perf_event_attr; struct perf_event; struct pmu; -extern int arch_check_bp_in_kernelspace(struct perf_event *bp); -extern int arch_validate_hwbkpt_settings(struct perf_event *bp); +extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw); +extern int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw); extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data); diff --git a/arch/x86/include/asm/intel_ds.h b/arch/x86/include/asm/intel_ds.h index 62a9f4966b42..ae26df1c2789 100644 --- a/arch/x86/include/asm/intel_ds.h +++ b/arch/x86/include/asm/intel_ds.h @@ -8,6 +8,7 @@ /* The maximal number of PEBS events: */ #define MAX_PEBS_EVENTS 8 +#define MAX_FIXED_PEBS_EVENTS 3 /* * A debug store configuration. @@ -23,7 +24,7 @@ struct debug_store { u64 pebs_index; u64 pebs_absolute_maximum; u64 pebs_interrupt_threshold; - u64 pebs_event_reset[MAX_PEBS_EVENTS]; + u64 pebs_event_reset[MAX_PEBS_EVENTS + MAX_FIXED_PEBS_EVENTS]; } __aligned(PAGE_SIZE); DECLARE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store); diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h index 367d99cff426..c8cec1b39b88 100644 --- a/arch/x86/include/asm/kprobes.h +++ b/arch/x86/include/asm/kprobes.h @@ -78,7 +78,7 @@ struct arch_specific_insn { * boostable = true: This instruction has been boosted: we have * added a relative jump after the instruction copy in insn, * so no single-step and fixup are needed (unless there's - * a post_handler or break_handler). + * a post_handler). */ bool boostable; bool if_modifier; @@ -111,9 +111,6 @@ struct kprobe_ctlblk { unsigned long kprobe_status; unsigned long kprobe_old_flags; unsigned long kprobe_saved_flags; - unsigned long *jprobe_saved_sp; - struct pt_regs jprobe_saved_regs; - kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE]; struct prev_kprobe prev_kprobe; }; diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index 8771766d46b6..34a5c1715148 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -169,28 +169,29 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) set_dr_addr_mask(0, i); } -/* - * Check for virtual address in kernel space. - */ -int arch_check_bp_in_kernelspace(struct perf_event *bp) +static int arch_bp_generic_len(int x86_len) { - unsigned int len; - unsigned long va; - struct arch_hw_breakpoint *info = counter_arch_bp(bp); - - va = info->address; - len = bp->attr.bp_len; - - /* - * We don't need to worry about va + len - 1 overflowing: - * we already require that va is aligned to a multiple of len. - */ - return (va >= TASK_SIZE_MAX) || ((va + len - 1) >= TASK_SIZE_MAX); + switch (x86_len) { + case X86_BREAKPOINT_LEN_1: + return HW_BREAKPOINT_LEN_1; + case X86_BREAKPOINT_LEN_2: + return HW_BREAKPOINT_LEN_2; + case X86_BREAKPOINT_LEN_4: + return HW_BREAKPOINT_LEN_4; +#ifdef CONFIG_X86_64 + case X86_BREAKPOINT_LEN_8: + return HW_BREAKPOINT_LEN_8; +#endif + default: + return -EINVAL; + } } int arch_bp_generic_fields(int x86_len, int x86_type, int *gen_len, int *gen_type) { + int len; + /* Type */ switch (x86_type) { case X86_BREAKPOINT_EXECUTE: @@ -211,42 +212,47 @@ int arch_bp_generic_fields(int x86_len, int x86_type, } /* Len */ - switch (x86_len) { - case X86_BREAKPOINT_LEN_1: - *gen_len = HW_BREAKPOINT_LEN_1; - break; - case X86_BREAKPOINT_LEN_2: - *gen_len = HW_BREAKPOINT_LEN_2; - break; - case X86_BREAKPOINT_LEN_4: - *gen_len = HW_BREAKPOINT_LEN_4; - break; -#ifdef CONFIG_X86_64 - case X86_BREAKPOINT_LEN_8: - *gen_len = HW_BREAKPOINT_LEN_8; - break; -#endif - default: + len = arch_bp_generic_len(x86_len); + if (len < 0) return -EINVAL; - } + *gen_len = len; return 0; } - -static int arch_build_bp_info(struct perf_event *bp) +/* + * Check for virtual address in kernel space. + */ +int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw) { - struct arch_hw_breakpoint *info = counter_arch_bp(bp); + unsigned long va; + int len; - info->address = bp->attr.bp_addr; + va = hw->address; + len = arch_bp_generic_len(hw->len); + WARN_ON_ONCE(len < 0); + + /* + * We don't need to worry about va + len - 1 overflowing: + * we already require that va is aligned to a multiple of len. + */ + return (va >= TASK_SIZE_MAX) || ((va + len - 1) >= TASK_SIZE_MAX); +} + +static int arch_build_bp_info(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw) +{ + hw->address = attr->bp_addr; + hw->mask = 0; /* Type */ - switch (bp->attr.bp_type) { + switch (attr->bp_type) { case HW_BREAKPOINT_W: - info->type = X86_BREAKPOINT_WRITE; + hw->type = X86_BREAKPOINT_WRITE; break; case HW_BREAKPOINT_W | HW_BREAKPOINT_R: - info->type = X86_BREAKPOINT_RW; + hw->type = X86_BREAKPOINT_RW; break; case HW_BREAKPOINT_X: /* @@ -254,23 +260,23 @@ static int arch_build_bp_info(struct perf_event *bp) * acceptable for kprobes. On non-kprobes kernels, we don't * allow kernel breakpoints at all. */ - if (bp->attr.bp_addr >= TASK_SIZE_MAX) { + if (attr->bp_addr >= TASK_SIZE_MAX) { #ifdef CONFIG_KPROBES - if (within_kprobe_blacklist(bp->attr.bp_addr)) + if (within_kprobe_blacklist(attr->bp_addr)) return -EINVAL; #else return -EINVAL; #endif } - info->type = X86_BREAKPOINT_EXECUTE; + hw->type = X86_BREAKPOINT_EXECUTE; /* * x86 inst breakpoints need to have a specific undefined len. * But we still need to check userspace is not trying to setup * an unsupported length, to get a range breakpoint for example. */ - if (bp->attr.bp_len == sizeof(long)) { - info->len = X86_BREAKPOINT_LEN_X; + if (attr->bp_len == sizeof(long)) { + hw->len = X86_BREAKPOINT_LEN_X; return 0; } default: @@ -278,28 +284,26 @@ static int arch_build_bp_info(struct perf_event *bp) } /* Len */ - info->mask = 0; - - switch (bp->attr.bp_len) { + switch (attr->bp_len) { case HW_BREAKPOINT_LEN_1: - info->len = X86_BREAKPOINT_LEN_1; + hw->len = X86_BREAKPOINT_LEN_1; break; case HW_BREAKPOINT_LEN_2: - info->len = X86_BREAKPOINT_LEN_2; + hw->len = X86_BREAKPOINT_LEN_2; break; case HW_BREAKPOINT_LEN_4: - info->len = X86_BREAKPOINT_LEN_4; + hw->len = X86_BREAKPOINT_LEN_4; break; #ifdef CONFIG_X86_64 case HW_BREAKPOINT_LEN_8: - info->len = X86_BREAKPOINT_LEN_8; + hw->len = X86_BREAKPOINT_LEN_8; break; #endif default: /* AMD range breakpoint */ - if (!is_power_of_2(bp->attr.bp_len)) + if (!is_power_of_2(attr->bp_len)) return -EINVAL; - if (bp->attr.bp_addr & (bp->attr.bp_len - 1)) + if (attr->bp_addr & (attr->bp_len - 1)) return -EINVAL; if (!boot_cpu_has(X86_FEATURE_BPEXT)) @@ -312,8 +316,8 @@ static int arch_build_bp_info(struct perf_event *bp) * breakpoints, then we'll have to check for kprobe-blacklisted * addresses anywhere in the range. */ - info->mask = bp->attr.bp_len - 1; - info->len = X86_BREAKPOINT_LEN_1; + hw->mask = attr->bp_len - 1; + hw->len = X86_BREAKPOINT_LEN_1; } return 0; @@ -322,22 +326,23 @@ static int arch_build_bp_info(struct perf_event *bp) /* * Validate the arch-specific HW Breakpoint register settings */ -int arch_validate_hwbkpt_settings(struct perf_event *bp) +int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw) { - struct arch_hw_breakpoint *info = counter_arch_bp(bp); unsigned int align; int ret; - ret = arch_build_bp_info(bp); + ret = arch_build_bp_info(bp, attr, hw); if (ret) return ret; - switch (info->len) { + switch (hw->len) { case X86_BREAKPOINT_LEN_1: align = 0; - if (info->mask) - align = info->mask; + if (hw->mask) + align = hw->mask; break; case X86_BREAKPOINT_LEN_2: align = 1; @@ -358,7 +363,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) * Check that the low-order bits of the address are appropriate * for the alignment implied by len. */ - if (info->address & align) + if (hw->address & align) return -EINVAL; return 0; diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h index ae38dccf0c8f..2b949f4fd4d8 100644 --- a/arch/x86/kernel/kprobes/common.h +++ b/arch/x86/kernel/kprobes/common.h @@ -105,14 +105,4 @@ static inline unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsig } #endif -#ifdef CONFIG_KPROBES_ON_FTRACE -extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb); -#else -static inline int skip_singlestep(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb) -{ - return 0; -} -#endif #endif diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 6f4d42377fe5..b0d1e81c96bb 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -66,8 +66,6 @@ #include "common.h" -void jprobe_return_end(void); - DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); @@ -395,8 +393,6 @@ int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn) - (u8 *) real; if ((s64) (s32) newdisp != newdisp) { pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp); - pr_err("\tSrc: %p, Dest: %p, old disp: %x\n", - src, real, insn->displacement.value); return 0; } disp = (u8 *) dest + insn_offset_displacement(insn); @@ -596,7 +592,6 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs, * stepping. */ regs->ip = (unsigned long)p->ainsn.insn; - preempt_enable_no_resched(); return; } #endif @@ -640,8 +635,7 @@ static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs, * Raise a BUG or we'll continue in an endless reentering loop * and eventually a stack overflow. */ - printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n", - p->addr); + pr_err("Unrecoverable kprobe detected.\n"); dump_kprobe(p); BUG(); default: @@ -669,12 +663,10 @@ int kprobe_int3_handler(struct pt_regs *regs) addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t)); /* - * We don't want to be preempted for the entire - * duration of kprobe processing. We conditionally - * re-enable preemption at the end of this function, - * and also in reenter_kprobe() and setup_singlestep(). + * We don't want to be preempted for the entire duration of kprobe + * processing. Since int3 and debug trap disables irqs and we clear + * IF while singlestepping, it must be no preemptible. */ - preempt_disable(); kcb = get_kprobe_ctlblk(); p = get_kprobe(addr); @@ -690,13 +682,14 @@ int kprobe_int3_handler(struct pt_regs *regs) /* * If we have no pre-handler or it returned 0, we * continue with normal processing. If we have a - * pre-handler and it returned non-zero, it prepped - * for calling the break_handler below on re-entry - * for jprobe processing, so get out doing nothing - * more here. + * pre-handler and it returned non-zero, that means + * user handler setup registers to exit to another + * instruction, we must skip the single stepping. */ if (!p->pre_handler || !p->pre_handler(p, regs)) setup_singlestep(p, regs, kcb, 0); + else + reset_current_kprobe(); return 1; } } else if (*addr != BREAKPOINT_INSTRUCTION) { @@ -710,18 +703,9 @@ int kprobe_int3_handler(struct pt_regs *regs) * the original instruction. */ regs->ip = (unsigned long)addr; - preempt_enable_no_resched(); return 1; - } else if (kprobe_running()) { - p = __this_cpu_read(current_kprobe); - if (p->break_handler && p->break_handler(p, regs)) { - if (!skip_singlestep(p, regs, kcb)) - setup_singlestep(p, regs, kcb, 0); - return 1; - } } /* else: not a kprobe fault; let the kernel handle it */ - preempt_enable_no_resched(); return 0; } NOKPROBE_SYMBOL(kprobe_int3_handler); @@ -972,8 +956,6 @@ int kprobe_debug_handler(struct pt_regs *regs) } reset_current_kprobe(); out: - preempt_enable_no_resched(); - /* * if somebody else is singlestepping across a probe point, flags * will have TF set, in which case, continue the remaining processing @@ -1020,7 +1002,6 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr) restore_previous_kprobe(kcb); else reset_current_kprobe(); - preempt_enable_no_resched(); } else if (kcb->kprobe_status == KPROBE_HIT_ACTIVE || kcb->kprobe_status == KPROBE_HIT_SSDONE) { /* @@ -1083,93 +1064,6 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, } NOKPROBE_SYMBOL(kprobe_exceptions_notify); -int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct jprobe *jp = container_of(p, struct jprobe, kp); - unsigned long addr; - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - kcb->jprobe_saved_regs = *regs; - kcb->jprobe_saved_sp = stack_addr(regs); - addr = (unsigned long)(kcb->jprobe_saved_sp); - - /* - * As Linus pointed out, gcc assumes that the callee - * owns the argument space and could overwrite it, e.g. - * tailcall optimization. So, to be absolutely safe - * we also save and restore enough stack bytes to cover - * the argument area. - * Use __memcpy() to avoid KASAN stack out-of-bounds reports as we copy - * raw stack chunk with redzones: - */ - __memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE(addr)); - regs->ip = (unsigned long)(jp->entry); - - /* - * jprobes use jprobe_return() which skips the normal return - * path of the function, and this messes up the accounting of the - * function graph tracer to get messed up. - * - * Pause function graph tracing while performing the jprobe function. - */ - pause_graph_tracing(); - return 1; -} -NOKPROBE_SYMBOL(setjmp_pre_handler); - -void jprobe_return(void) -{ - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - /* Unpoison stack redzones in the frames we are going to jump over. */ - kasan_unpoison_stack_above_sp_to(kcb->jprobe_saved_sp); - - asm volatile ( -#ifdef CONFIG_X86_64 - " xchg %%rbx,%%rsp \n" -#else - " xchgl %%ebx,%%esp \n" -#endif - " int3 \n" - " .globl jprobe_return_end\n" - " jprobe_return_end: \n" - " nop \n"::"b" - (kcb->jprobe_saved_sp):"memory"); -} -NOKPROBE_SYMBOL(jprobe_return); -NOKPROBE_SYMBOL(jprobe_return_end); - -int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - u8 *addr = (u8 *) (regs->ip - 1); - struct jprobe *jp = container_of(p, struct jprobe, kp); - void *saved_sp = kcb->jprobe_saved_sp; - - if ((addr > (u8 *) jprobe_return) && - (addr < (u8 *) jprobe_return_end)) { - if (stack_addr(regs) != saved_sp) { - struct pt_regs *saved_regs = &kcb->jprobe_saved_regs; - printk(KERN_ERR - "current sp %p does not match saved sp %p\n", - stack_addr(regs), saved_sp); - printk(KERN_ERR "Saved registers for jprobe %p\n", jp); - show_regs(saved_regs); - printk(KERN_ERR "Current registers\n"); - show_regs(regs); - BUG(); - } - /* It's OK to start function graph tracing again */ - unpause_graph_tracing(); - *regs = kcb->jprobe_saved_regs; - __memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp)); - preempt_enable_no_resched(); - return 1; - } - return 0; -} -NOKPROBE_SYMBOL(longjmp_break_handler); - bool arch_within_kprobe_blacklist(unsigned long addr) { bool is_in_entry_trampoline_section = false; diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c index 8dc0161cec8f..ef819e19650b 100644 --- a/arch/x86/kernel/kprobes/ftrace.c +++ b/arch/x86/kernel/kprobes/ftrace.c @@ -25,36 +25,6 @@ #include "common.h" -static nokprobe_inline -void __skip_singlestep(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb, unsigned long orig_ip) -{ - /* - * Emulate singlestep (and also recover regs->ip) - * as if there is a 5byte nop - */ - regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE; - if (unlikely(p->post_handler)) { - kcb->kprobe_status = KPROBE_HIT_SSDONE; - p->post_handler(p, regs, 0); - } - __this_cpu_write(current_kprobe, NULL); - if (orig_ip) - regs->ip = orig_ip; -} - -int skip_singlestep(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb) -{ - if (kprobe_ftrace(p)) { - __skip_singlestep(p, regs, kcb, 0); - preempt_enable_no_resched(); - return 1; - } - return 0; -} -NOKPROBE_SYMBOL(skip_singlestep); - /* Ftrace callback handler for kprobes -- called under preepmt disabed */ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *ops, struct pt_regs *regs) @@ -75,18 +45,25 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */ regs->ip = ip + sizeof(kprobe_opcode_t); - /* To emulate trap based kprobes, preempt_disable here */ - preempt_disable(); __this_cpu_write(current_kprobe, p); kcb->kprobe_status = KPROBE_HIT_ACTIVE; if (!p->pre_handler || !p->pre_handler(p, regs)) { - __skip_singlestep(p, regs, kcb, orig_ip); - preempt_enable_no_resched(); + /* + * Emulate singlestep (and also recover regs->ip) + * as if there is a 5byte nop + */ + regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE; + if (unlikely(p->post_handler)) { + kcb->kprobe_status = KPROBE_HIT_SSDONE; + p->post_handler(p, regs, 0); + } + regs->ip = orig_ip; } /* - * If pre_handler returns !0, it sets regs->ip and - * resets current kprobe, and keep preempt count +1. + * If pre_handler returns !0, it changes regs->ip. We have to + * skip emulating post_handler. */ + __this_cpu_write(current_kprobe, NULL); } } NOKPROBE_SYMBOL(kprobe_ftrace_handler); diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 203d398802a3..eaf02f2e7300 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -491,7 +491,6 @@ int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter) regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX; if (!reenter) reset_current_kprobe(); - preempt_enable_no_resched(); return 1; } return 0; diff --git a/arch/xtensa/include/asm/hw_breakpoint.h b/arch/xtensa/include/asm/hw_breakpoint.h index dbe3053b284a..9f119c1ca0b5 100644 --- a/arch/xtensa/include/asm/hw_breakpoint.h +++ b/arch/xtensa/include/asm/hw_breakpoint.h @@ -30,13 +30,16 @@ struct arch_hw_breakpoint { u16 type; }; +struct perf_event_attr; struct perf_event; struct pt_regs; struct task_struct; int hw_breakpoint_slots(int type); -int arch_check_bp_in_kernelspace(struct perf_event *bp); -int arch_validate_hwbkpt_settings(struct perf_event *bp); +int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw); +int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw); int hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data); diff --git a/arch/xtensa/kernel/hw_breakpoint.c b/arch/xtensa/kernel/hw_breakpoint.c index b35656ab7dbd..c2e387c19cda 100644 --- a/arch/xtensa/kernel/hw_breakpoint.c +++ b/arch/xtensa/kernel/hw_breakpoint.c @@ -33,14 +33,13 @@ int hw_breakpoint_slots(int type) } } -int arch_check_bp_in_kernelspace(struct perf_event *bp) +int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw) { unsigned int len; unsigned long va; - struct arch_hw_breakpoint *info = counter_arch_bp(bp); - va = info->address; - len = bp->attr.bp_len; + va = hw->address; + len = hw->len; return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); } @@ -48,50 +47,41 @@ int arch_check_bp_in_kernelspace(struct perf_event *bp) /* * Construct an arch_hw_breakpoint from a perf_event. */ -static int arch_build_bp_info(struct perf_event *bp) +int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw) { - struct arch_hw_breakpoint *info = counter_arch_bp(bp); - /* Type */ - switch (bp->attr.bp_type) { + switch (attr->bp_type) { case HW_BREAKPOINT_X: - info->type = XTENSA_BREAKPOINT_EXECUTE; + hw->type = XTENSA_BREAKPOINT_EXECUTE; break; case HW_BREAKPOINT_R: - info->type = XTENSA_BREAKPOINT_LOAD; + hw->type = XTENSA_BREAKPOINT_LOAD; break; case HW_BREAKPOINT_W: - info->type = XTENSA_BREAKPOINT_STORE; + hw->type = XTENSA_BREAKPOINT_STORE; break; case HW_BREAKPOINT_RW: - info->type = XTENSA_BREAKPOINT_LOAD | XTENSA_BREAKPOINT_STORE; + hw->type = XTENSA_BREAKPOINT_LOAD | XTENSA_BREAKPOINT_STORE; break; default: return -EINVAL; } /* Len */ - info->len = bp->attr.bp_len; - if (info->len < 1 || info->len > 64 || !is_power_of_2(info->len)) + hw->len = attr->bp_len; + if (hw->len < 1 || hw->len > 64 || !is_power_of_2(hw->len)) return -EINVAL; /* Address */ - info->address = bp->attr.bp_addr; - if (info->address & (info->len - 1)) + hw->address = attr->bp_addr; + if (hw->address & (hw->len - 1)) return -EINVAL; return 0; } -int arch_validate_hwbkpt_settings(struct perf_event *bp) -{ - int ret; - - /* Build the arch_hw_breakpoint. */ - ret = arch_build_bp_info(bp); - return ret; -} - int hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data) { |