summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/s390/kernel/early.c1
-rw-r--r--arch/s390/kernel/kprobes.c27
-rw-r--r--arch/s390/kernel/vmlinux.lds.S3
-rw-r--r--arch/s390/mm/init.c9
-rw-r--r--include/asm-s390/sections.h2
5 files changed, 31 insertions, 11 deletions
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 40dd47970a33..e518dd53eff5 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -27,7 +27,6 @@
#define DEFSYS_CMD_SIZE 96
#define SAVESYS_CMD_SIZE 32
-extern int _eshared;
char kernel_nss_name[NSS_NAME_SIZE + 1];
#ifdef CONFIG_SHARED_KERNEL
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index b2e1dc89a8c6..a466bab6677e 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -155,15 +155,34 @@ void __kprobes get_instruction_type(struct arch_specific_insn *ainsn)
static int __kprobes swap_instruction(void *aref)
{
struct ins_replace_args *args = aref;
+ u32 *addr;
+ u32 instr;
int err = -EFAULT;
+ /*
+ * Text segment is read-only, hence we use stura to bypass dynamic
+ * address translation to exchange the instruction. Since stura
+ * always operates on four bytes, but we only want to exchange two
+ * bytes do some calculations to get things right. In addition we
+ * shall not cross any page boundaries (vmalloc area!) when writing
+ * the new instruction.
+ */
+ addr = (u32 *)ALIGN((unsigned long)args->ptr, 4);
+ if ((unsigned long)args->ptr & 2)
+ instr = ((*addr) & 0xffff0000) | args->new;
+ else
+ instr = ((*addr) & 0x0000ffff) | args->new << 16;
+
asm volatile(
- "0: mvc 0(2,%2),0(%3)\n"
- "1: la %0,0\n"
+ " lra %1,0(%1)\n"
+ "0: stura %2,%1\n"
+ "1: la %0,0\n"
"2:\n"
EX_TABLE(0b,2b)
- : "+d" (err), "=m" (*args->ptr)
- : "a" (args->ptr), "a" (&args->new), "m" (args->new));
+ : "+d" (err)
+ : "a" (addr), "d" (instr)
+ : "memory", "cc");
+
return err;
}
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 8fedb1f9fc97..a48907392522 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -35,9 +35,10 @@ SECTIONS
#ifdef CONFIG_SHARED_KERNEL
. = ALIGN(1048576); /* VM shared segments are 1MB aligned */
+#endif
+ . = ALIGN(4096);
_eshared = .; /* End of shareable data */
-#endif
. = ALIGN(16); /* Exception table */
__start___ex_table = .;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 162a338a5575..b3e7c45efb63 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -26,7 +26,6 @@
#include <linux/pfn.h>
#include <linux/poison.h>
#include <linux/initrd.h>
-
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/uaccess.h>
@@ -96,8 +95,8 @@ static void __init setup_ro_region(void)
pte_t new_pte;
unsigned long address, end;
- address = ((unsigned long)&__start_rodata) & PAGE_MASK;
- end = PFN_ALIGN((unsigned long)&__end_rodata);
+ address = ((unsigned long)&_stext) & PAGE_MASK;
+ end = PFN_ALIGN((unsigned long)&_eshared);
for (; address < end; address += PAGE_SIZE) {
pgd = pgd_offset_k(address);
@@ -173,8 +172,8 @@ void __init mem_init(void)
datasize >>10,
initsize >> 10);
printk("Write protected kernel read-only data: %#lx - %#lx\n",
- (unsigned long)&__start_rodata,
- PFN_ALIGN((unsigned long)&__end_rodata) - 1);
+ (unsigned long)&_stext,
+ PFN_ALIGN((unsigned long)&_eshared) - 1);
}
void free_initmem(void)
diff --git a/include/asm-s390/sections.h b/include/asm-s390/sections.h
index 3a0b8ffeab7a..1c5a2c4ccdad 100644
--- a/include/asm-s390/sections.h
+++ b/include/asm-s390/sections.h
@@ -3,4 +3,6 @@
#include <asm-generic/sections.h>
+extern char _eshared[];
+
#endif