summaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/asm-offsets.c4
-rw-r--r--arch/powerpc/kernel/entry_64.S97
-rw-r--r--arch/powerpc/kernel/irq.c3
-rw-r--r--arch/powerpc/kernel/process.c6
-rw-r--r--arch/powerpc/kernel/setup_64.c17
5 files changed, 119 insertions, 8 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 0d0183d3180a..c9370d4e36bd 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -86,6 +86,10 @@ int main(void)
DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
#endif /* CONFIG_PPC64 */
+#ifdef CONFIG_LIVEPATCH
+ DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp));
+#endif
+
DEFINE(KSP, offsetof(struct thread_struct, ksp));
DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
#ifdef CONFIG_BOOKE
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 9916d150b28c..39a79c89a4b6 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -20,6 +20,7 @@
#include <linux/errno.h>
#include <linux/err.h>
+#include <linux/magic.h>
#include <asm/unistd.h>
#include <asm/processor.h>
#include <asm/page.h>
@@ -1248,6 +1249,9 @@ _GLOBAL(ftrace_caller)
addi r3,r3,function_trace_op@toc@l
ld r5,0(r3)
+#ifdef CONFIG_LIVEPATCH
+ mr r14,r7 /* remember old NIP */
+#endif
/* Calculate ip from nip-4 into r3 for call below */
subi r3, r7, MCOUNT_INSN_SIZE
@@ -1272,6 +1276,9 @@ ftrace_call:
/* Load ctr with the possibly modified NIP */
ld r3, _NIP(r1)
mtctr r3
+#ifdef CONFIG_LIVEPATCH
+ cmpd r14,r3 /* has NIP been altered? */
+#endif
/* Restore gprs */
REST_8GPRS(0,r1)
@@ -1289,6 +1296,11 @@ ftrace_call:
ld r0, LRSAVE(r1)
mtlr r0
+#ifdef CONFIG_LIVEPATCH
+ /* Based on the cmpd above, if the NIP was altered handle livepatch */
+ bne- livepatch_handler
+#endif
+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
stdu r1, -112(r1)
.globl ftrace_graph_call
@@ -1305,6 +1317,91 @@ _GLOBAL(ftrace_graph_stub)
_GLOBAL(ftrace_stub)
blr
+
+#ifdef CONFIG_LIVEPATCH
+ /*
+ * This function runs in the mcount context, between two functions. As
+ * such it can only clobber registers which are volatile and used in
+ * function linkage.
+ *
+ * We get here when a function A, calls another function B, but B has
+ * been live patched with a new function C.
+ *
+ * On entry:
+ * - we have no stack frame and can not allocate one
+ * - LR points back to the original caller (in A)
+ * - CTR holds the new NIP in C
+ * - r0 & r12 are free
+ *
+ * r0 can't be used as the base register for a DS-form load or store, so
+ * we temporarily shuffle r1 (stack pointer) into r0 and then put it back.
+ */
+livepatch_handler:
+ CURRENT_THREAD_INFO(r12, r1)
+
+ /* Save stack pointer into r0 */
+ mr r0, r1
+
+ /* Allocate 3 x 8 bytes */
+ ld r1, TI_livepatch_sp(r12)
+ addi r1, r1, 24
+ std r1, TI_livepatch_sp(r12)
+
+ /* Save toc & real LR on livepatch stack */
+ std r2, -24(r1)
+ mflr r12
+ std r12, -16(r1)
+
+ /* Store stack end marker */
+ lis r12, STACK_END_MAGIC@h
+ ori r12, r12, STACK_END_MAGIC@l
+ std r12, -8(r1)
+
+ /* Restore real stack pointer */
+ mr r1, r0
+
+ /* Put ctr in r12 for global entry and branch there */
+ mfctr r12
+ bctrl
+
+ /*
+ * Now we are returning from the patched function to the original
+ * caller A. We are free to use r0 and r12, and we can use r2 until we
+ * restore it.
+ */
+
+ CURRENT_THREAD_INFO(r12, r1)
+
+ /* Save stack pointer into r0 */
+ mr r0, r1
+
+ ld r1, TI_livepatch_sp(r12)
+
+ /* Check stack marker hasn't been trashed */
+ lis r2, STACK_END_MAGIC@h
+ ori r2, r2, STACK_END_MAGIC@l
+ ld r12, -8(r1)
+1: tdne r12, r2
+ EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
+
+ /* Restore LR & toc from livepatch stack */
+ ld r12, -16(r1)
+ mtlr r12
+ ld r2, -24(r1)
+
+ /* Pop livepatch stack frame */
+ CURRENT_THREAD_INFO(r12, r0)
+ subi r1, r1, 24
+ std r1, TI_livepatch_sp(r12)
+
+ /* Restore real stack pointer */
+ mr r1, r0
+
+ /* Return to original caller of live patched function */
+ blr
+#endif
+
+
#else
_GLOBAL_TOC(_mcount)
/* Taken from output of objdump from lib64/glibc */
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 290559df1e8b..3cb46a3b1de7 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -66,6 +66,7 @@
#include <asm/udbg.h>
#include <asm/smp.h>
#include <asm/debug.h>
+#include <asm/livepatch.h>
#ifdef CONFIG_PPC64
#include <asm/paca.h>
@@ -607,10 +608,12 @@ void irq_ctx_init(void)
memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
tp = softirq_ctx[i];
tp->cpu = i;
+ klp_init_thread_info(tp);
memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
tp = hardirq_ctx[i];
tp->cpu = i;
+ klp_init_thread_info(tp);
}
}
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index b8500b4ac7fe..2a9280b945e0 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -55,6 +55,8 @@
#include <asm/firmware.h>
#endif
#include <asm/code-patching.h>
+#include <asm/livepatch.h>
+
#include <linux/kprobes.h>
#include <linux/kdebug.h>
@@ -1400,13 +1402,15 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
extern void ret_from_kernel_thread(void);
void (*f)(void);
unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
+ struct thread_info *ti = task_thread_info(p);
+
+ klp_init_thread_info(ti);
/* Copy registers */
sp -= sizeof(struct pt_regs);
childregs = (struct pt_regs *) sp;
if (unlikely(p->flags & PF_KTHREAD)) {
/* kernel thread */
- struct thread_info *ti = (void *)task_stack_page(p);
memset(childregs, 0, sizeof(struct pt_regs));
childregs->gpr[1] = sp + sizeof(struct pt_regs);
/* function */
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index f98be8383a39..96d4a2b23d0f 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -69,6 +69,7 @@
#include <asm/kvm_ppc.h>
#include <asm/hugetlb.h>
#include <asm/epapr_hcalls.h>
+#include <asm/livepatch.h>
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
@@ -667,16 +668,16 @@ static void __init emergency_stack_init(void)
limit = min(safe_stack_limit(), ppc64_rma_size);
for_each_possible_cpu(i) {
- unsigned long sp;
- sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
- sp += THREAD_SIZE;
- paca[i].emergency_sp = __va(sp);
+ struct thread_info *ti;
+ ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
+ klp_init_thread_info(ti);
+ paca[i].emergency_sp = (void *)ti + THREAD_SIZE;
#ifdef CONFIG_PPC_BOOK3S_64
/* emergency stack for machine check exception handling. */
- sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
- sp += THREAD_SIZE;
- paca[i].mc_emergency_sp = __va(sp);
+ ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
+ klp_init_thread_info(ti);
+ paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE;
#endif
}
}
@@ -700,6 +701,8 @@ void __init setup_arch(char **cmdline_p)
if (ppc_md.panic)
setup_panic();
+ klp_init_thread_info(&init_thread_info);
+
init_mm.start_code = (unsigned long)_stext;
init_mm.end_code = (unsigned long) _etext;
init_mm.end_data = (unsigned long) _edata;