summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
authorAbhishek Dubey <adubey@linux.ibm.com>2024-08-30 07:31:31 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2024-09-05 22:25:36 +1000
commit19f1bc3fb55452739dd3d56cfd06c29ecdbe3e9f (patch)
treef10906e3149bc2337ca8001710986338eabfa383 /arch/powerpc
parent6f2683274d0d0b51a986f73c2afb9058156f1641 (diff)
powerpc: Replace kretprobe code with rethook on powerpc
This is an adaptation of commit f3a112c0c40d ("x86,rethook,kprobes: Replace kretprobe with rethook on x86") to powerpc. Rethook follows the existing kretprobe implementation, but separates it from kprobes so that it can be used by fprobe (ftrace-based function entry/exit probes). As such, this patch also enables fprobe to work on powerpc. The only other change compared to the existing kretprobe implementation is doing the return address fixup in arch_rethook_fixup_return(). Reference to other archs: commit b57c2f124098 ("riscv: add riscv rethook implementation") commit 7b0a096436c2 ("LoongArch: Replace kretprobe with rethook") Note: ===== In future, rethook will be only for kretprobe, and kretprobe will be replaced by fprobe. https://lore.kernel.org/all/172000134410.63468.13742222887213469474.stgit@devnote2/ We will adapt the above implementation for powerpc once its upstream. Until then, we can have this implementation of rethook to serve current kretprobe usecases. Reviewed-by: Naveen Rao <naveen@kernel.org> Signed-off-by: Abhishek Dubey <adubey@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://msgid.link/20240830113131.7597-1-adubey@linux.ibm.com
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/kernel/Makefile1
-rw-r--r--arch/powerpc/kernel/kprobes.c65
-rw-r--r--arch/powerpc/kernel/optprobes.c2
-rw-r--r--arch/powerpc/kernel/rethook.c73
-rw-r--r--arch/powerpc/kernel/stacktrace.c6
6 files changed, 81 insertions, 67 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index b9f11c262582..1f9d23b276b5 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -269,6 +269,7 @@ config PPC
select HAVE_PERF_EVENTS_NMI if PPC64
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_RETHOOK if KPROBES
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE
select HAVE_RSEQ
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 1784b6a6ca1d..f43c1198768c 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -139,6 +139,7 @@ obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_OPTPROBES) += optprobes.o optprobes_head.o
obj-$(CONFIG_KPROBES_ON_FTRACE) += kprobes-ftrace.o
obj-$(CONFIG_UPROBES) += uprobes.o
+obj-$(CONFIG_RETHOOK) += rethook.o
obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o
obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o
obj-$(CONFIG_ARCH_HAS_DMA_SET_MASK) += dma-mask.o
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 14c5ddec3056..f8aa91bc3b17 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -228,16 +228,6 @@ static nokprobe_inline void set_current_kprobe(struct kprobe *p, struct pt_regs
kcb->kprobe_saved_msr = regs->msr;
}
-void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
-{
- ri->ret_addr = (kprobe_opcode_t *)regs->link;
- ri->fp = NULL;
-
- /* Replace the return addr with trampoline addr */
- regs->link = (unsigned long)__kretprobe_trampoline;
-}
-NOKPROBE_SYMBOL(arch_prepare_kretprobe);
-
static int try_to_emulate(struct kprobe *p, struct pt_regs *regs)
{
int ret;
@@ -395,49 +385,6 @@ no_kprobe:
NOKPROBE_SYMBOL(kprobe_handler);
/*
- * Function return probe trampoline:
- * - init_kprobes() establishes a probepoint here
- * - When the probed function returns, this probe
- * causes the handlers to fire
- */
-asm(".global __kretprobe_trampoline\n"
- ".type __kretprobe_trampoline, @function\n"
- "__kretprobe_trampoline:\n"
- "nop\n"
- "blr\n"
- ".size __kretprobe_trampoline, .-__kretprobe_trampoline\n");
-
-/*
- * Called when the probe at kretprobe trampoline is hit
- */
-static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
-{
- unsigned long orig_ret_address;
-
- orig_ret_address = __kretprobe_trampoline_handler(regs, NULL);
- /*
- * We get here through one of two paths:
- * 1. by taking a trap -> kprobe_handler() -> here
- * 2. by optprobe branch -> optimized_callback() -> opt_pre_handler() -> here
- *
- * When going back through (1), we need regs->nip to be setup properly
- * as it is used to determine the return address from the trap.
- * For (2), since nip is not honoured with optprobes, we instead setup
- * the link register properly so that the subsequent 'blr' in
- * __kretprobe_trampoline jumps back to the right instruction.
- *
- * For nip, we should set the address to the previous instruction since
- * we end up emulating it in kprobe_handler(), which increments the nip
- * again.
- */
- regs_set_return_ip(regs, orig_ret_address - 4);
- regs->link = orig_ret_address;
-
- return 0;
-}
-NOKPROBE_SYMBOL(trampoline_probe_handler);
-
-/*
* Called after single-stepping. p->addr is the address of the
* instruction whose first byte has been replaced by the "breakpoint"
* instruction. To avoid the SMP problems that can occur when we
@@ -539,19 +486,9 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
}
NOKPROBE_SYMBOL(kprobe_fault_handler);
-static struct kprobe trampoline_p = {
- .addr = (kprobe_opcode_t *) &__kretprobe_trampoline,
- .pre_handler = trampoline_probe_handler
-};
-
-int __init arch_init_kprobes(void)
-{
- return register_kprobe(&trampoline_p);
-}
-
int arch_trampoline_kprobe(struct kprobe *p)
{
- if (p->addr == (kprobe_opcode_t *)&__kretprobe_trampoline)
+ if (p->addr == (kprobe_opcode_t *)&arch_rethook_trampoline)
return 1;
return 0;
diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c
index 004fae2044a3..c0b351d61058 100644
--- a/arch/powerpc/kernel/optprobes.c
+++ b/arch/powerpc/kernel/optprobes.c
@@ -56,7 +56,7 @@ static unsigned long can_optimize(struct kprobe *p)
* has a 'nop' instruction, which can be emulated.
* So further checks can be skipped.
*/
- if (p->addr == (kprobe_opcode_t *)&__kretprobe_trampoline)
+ if (p->addr == (kprobe_opcode_t *)&arch_rethook_trampoline)
return addr + sizeof(kprobe_opcode_t);
/*
diff --git a/arch/powerpc/kernel/rethook.c b/arch/powerpc/kernel/rethook.c
new file mode 100644
index 000000000000..5f5f47ae82cf
--- /dev/null
+++ b/arch/powerpc/kernel/rethook.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PowerPC implementation of rethook. This depends on kprobes.
+ */
+
+#include <linux/kprobes.h>
+#include <linux/rethook.h>
+
+/*
+ * Function return trampoline:
+ * - init_kprobes() establishes a probepoint here
+ * - When the probed function returns, this probe
+ * causes the handlers to fire
+ */
+asm(".global arch_rethook_trampoline\n"
+ ".type arch_rethook_trampoline, @function\n"
+ "arch_rethook_trampoline:\n"
+ "nop\n"
+ "blr\n"
+ ".size arch_rethook_trampoline, .-arch_rethook_trampoline\n");
+
+/*
+ * Called when the probe at kretprobe trampoline is hit
+ */
+static int trampoline_rethook_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ return !rethook_trampoline_handler(regs, regs->gpr[1]);
+}
+NOKPROBE_SYMBOL(trampoline_rethook_handler);
+
+void arch_rethook_prepare(struct rethook_node *rh, struct pt_regs *regs, bool mcount)
+{
+ rh->ret_addr = regs->link;
+ rh->frame = regs->gpr[1];
+
+ /* Replace the return addr with trampoline addr */
+ regs->link = (unsigned long)arch_rethook_trampoline;
+}
+NOKPROBE_SYMBOL(arch_rethook_prepare);
+
+/* This is called from rethook_trampoline_handler(). */
+void arch_rethook_fixup_return(struct pt_regs *regs, unsigned long orig_ret_address)
+{
+ /*
+ * We get here through one of two paths:
+ * 1. by taking a trap -> kprobe_handler() -> here
+ * 2. by optprobe branch -> optimized_callback() -> opt_pre_handler() -> here
+ *
+ * When going back through (1), we need regs->nip to be setup properly
+ * as it is used to determine the return address from the trap.
+ * For (2), since nip is not honoured with optprobes, we instead setup
+ * the link register properly so that the subsequent 'blr' in
+ * arch_rethook_trampoline jumps back to the right instruction.
+ *
+ * For nip, we should set the address to the previous instruction since
+ * we end up emulating it in kprobe_handler(), which increments the nip
+ * again.
+ */
+ regs_set_return_ip(regs, orig_ret_address - 4);
+ regs->link = orig_ret_address;
+}
+NOKPROBE_SYMBOL(arch_rethook_fixup_return);
+
+static struct kprobe trampoline_p = {
+ .addr = (kprobe_opcode_t *) &arch_rethook_trampoline,
+ .pre_handler = trampoline_rethook_handler
+};
+
+/* rethook initializer */
+int __init arch_init_kprobes(void)
+{
+ return register_kprobe(&trampoline_p);
+}
diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
index e6a958a5da27..90882b5175cd 100644
--- a/arch/powerpc/kernel/stacktrace.c
+++ b/arch/powerpc/kernel/stacktrace.c
@@ -21,6 +21,7 @@
#include <asm/processor.h>
#include <linux/ftrace.h>
#include <asm/kprobes.h>
+#include <linux/rethook.h>
#include <asm/paca.h>
@@ -133,12 +134,13 @@ int __no_sanitize_address arch_stack_walk_reliable(stack_trace_consume_fn consum
* arch-dependent code, they are generic.
*/
ip = ftrace_graph_ret_addr(task, &graph_idx, ip, stack);
-#ifdef CONFIG_KPROBES
+
/*
* Mark stacktraces with kretprobed functions on them
* as unreliable.
*/
- if (ip == (unsigned long)__kretprobe_trampoline)
+#ifdef CONFIG_RETHOOK
+ if (ip == (unsigned long)arch_rethook_trampoline)
return -EINVAL;
#endif