diff options
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/Kconfig.debug | 12 | ||||
-rw-r--r-- | arch/s390/include/asm/page.h | 5 | ||||
-rw-r--r-- | arch/s390/kernel/compat_linux.c | 1 | ||||
-rw-r--r-- | arch/s390/kernel/kprobes.c | 70 | ||||
-rw-r--r-- | arch/s390/kernel/nmi.c | 10 | ||||
-rw-r--r-- | arch/s390/kernel/vtime.c | 19 | ||||
-rw-r--r-- | arch/s390/lib/delay.c | 14 | ||||
-rw-r--r-- | arch/s390/mm/gup.c | 7 |
8 files changed, 105 insertions, 33 deletions
diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug index 45e0c6199f36..05221b13ffb1 100644 --- a/arch/s390/Kconfig.debug +++ b/arch/s390/Kconfig.debug @@ -6,6 +6,18 @@ config TRACE_IRQFLAGS_SUPPORT source "lib/Kconfig.debug" +config STRICT_DEVMEM + def_bool y + prompt "Filter access to /dev/mem" + ---help--- + This option restricts access to /dev/mem. If this option is + disabled, you allow userspace access to all memory, including + kernel and userspace memory. Accidental memory access is likely + to be disastrous. + Memory access is required for experts who want to debug the kernel. + + If you are unsure, say Y. + config DEBUG_STRICT_USER_COPY_CHECKS bool "Strict user copy size checks" ---help--- diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index a8729ea7e9ac..3c987e9ec8d6 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h @@ -130,6 +130,11 @@ struct page; void arch_free_page(struct page *page, int order); void arch_alloc_page(struct page *page, int order); +static inline int devmem_is_allowed(unsigned long pfn) +{ + return 0; +} + #define HAVE_ARCH_FREE_PAGE #define HAVE_ARCH_ALLOC_PAGE diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index 1e6449c79ab6..53acaa86dd94 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c @@ -25,7 +25,6 @@ #include <linux/resource.h> #include <linux/times.h> #include <linux/smp.h> -#include <linux/smp_lock.h> #include <linux/sem.h> #include <linux/msg.h> #include <linux/shm.h> diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index d60fc4398516..2564793ec2b6 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -30,6 +30,7 @@ #include <asm/sections.h> #include <linux/module.h> #include <linux/slab.h> +#include <linux/hardirq.h> DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); @@ -212,7 +213,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) /* Set the PER control regs, turns on single step for this address */ __ctl_load(kprobe_per_regs, 9, 11); regs->psw.mask |= PSW_MASK_PER; - regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK); + regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); } static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) @@ -239,7 +240,7 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, __get_cpu_var(current_kprobe) = p; /* Save the interrupt and per flags */ kcb->kprobe_saved_imask = regs->psw.mask & - (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK); + (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT); /* Save the control regs that govern PER */ __ctl_store(kcb->kprobe_saved_ctl, 9, 11); } @@ -316,8 +317,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) return 1; ss_probe: - if (regs->psw.mask & (PSW_MASK_PER | PSW_MASK_IO)) - local_irq_disable(); prepare_singlestep(p, regs); kcb->kprobe_status = KPROBE_HIT_SS; return 1; @@ -350,6 +349,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, struct hlist_node *node, *tmp; unsigned long flags, orig_ret_address = 0; unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; + kprobe_opcode_t *correct_ret_addr = NULL; INIT_HLIST_HEAD(&empty_rp); kretprobe_hash_lock(current, &head, &flags); @@ -372,10 +372,32 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, /* another task is sharing our hash bucket */ continue; - if (ri->rp && ri->rp->handler) - ri->rp->handler(ri, regs); + orig_ret_address = (unsigned long)ri->ret_addr; + + if (orig_ret_address != trampoline_address) + /* + * This is the real return address. Any other + * instances associated with this task are for + * other calls deeper on the call stack + */ + break; + } + + kretprobe_assert(ri, orig_ret_address, trampoline_address); + + correct_ret_addr = ri->ret_addr; + hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { + if (ri->task != current) + /* another task is sharing our hash bucket */ + continue; orig_ret_address = (unsigned long)ri->ret_addr; + + if (ri->rp && ri->rp->handler) { + ri->ret_addr = correct_ret_addr; + ri->rp->handler(ri, regs); + } + recycle_rp_inst(ri, &empty_rp); if (orig_ret_address != trampoline_address) { @@ -387,7 +409,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, break; } } - kretprobe_assert(ri, orig_ret_address, trampoline_address); + regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE; reset_current_kprobe(); @@ -465,8 +487,6 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs) goto out; } reset_current_kprobe(); - if (regs->psw.mask & (PSW_MASK_PER | PSW_MASK_IO)) - local_irq_enable(); out: preempt_enable_no_resched(); @@ -482,7 +502,7 @@ out: return 1; } -int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) +static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); @@ -508,8 +528,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) restore_previous_kprobe(kcb); else { reset_current_kprobe(); - if (regs->psw.mask & (PSW_MASK_PER | PSW_MASK_IO)) - local_irq_enable(); } preempt_enable_no_resched(); break; @@ -553,6 +571,18 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) return 0; } +int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) +{ + int ret; + + if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) + local_irq_disable(); + ret = kprobe_trap_handler(regs, trapnr); + if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) + local_irq_restore(regs->psw.mask & ~PSW_MASK_PER); + return ret; +} + /* * Wrapper routine to for handling exceptions. */ @@ -560,8 +590,12 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data) { struct die_args *args = (struct die_args *)data; + struct pt_regs *regs = args->regs; int ret = NOTIFY_DONE; + if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) + local_irq_disable(); + switch (val) { case DIE_BPT: if (kprobe_handler(args->regs)) @@ -572,16 +606,17 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, ret = NOTIFY_STOP; break; case DIE_TRAP: - /* kprobe_running() needs smp_processor_id() */ - preempt_disable(); - if (kprobe_running() && - kprobe_fault_handler(args->regs, args->trapnr)) + if (!preemptible() && kprobe_running() && + kprobe_trap_handler(args->regs, args->trapnr)) ret = NOTIFY_STOP; - preempt_enable(); break; default: break; } + + if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) + local_irq_restore(regs->psw.mask & ~PSW_MASK_PER); + return ret; } @@ -595,6 +630,7 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) /* setup return addr to the jprobe handler routine */ regs->psw.addr = (unsigned long)(jp->entry) | PSW_ADDR_AMODE; + regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); /* r14 is the function return address */ kcb->jprobe_saved_r14 = (unsigned long)regs->gprs[14]; diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index ac151399ef34..1995c1712fc8 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c @@ -95,7 +95,6 @@ EXPORT_SYMBOL_GPL(s390_handle_mcck); static int notrace s390_revalidate_registers(struct mci *mci) { int kill_task; - u64 tmpclock; u64 zero; void *fpt_save_area, *fpt_creg_save_area; @@ -214,11 +213,10 @@ static int notrace s390_revalidate_registers(struct mci *mci) : "0", "cc"); #endif /* Revalidate clock comparator register */ - asm volatile( - " stck 0(%1)\n" - " sckc 0(%1)" - : "=m" (tmpclock) : "a" (&(tmpclock)) : "cc", "memory"); - + if (S390_lowcore.clock_comparator == -1) + set_clock_comparator(S390_lowcore.mcck_clock); + else + set_clock_comparator(S390_lowcore.clock_comparator); /* Check if old PSW is valid */ if (!mci->wp) /* diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 56c8687b29b3..7eff9b7347c0 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -19,6 +19,7 @@ #include <linux/kernel_stat.h> #include <linux/rcupdate.h> #include <linux/posix-timers.h> +#include <linux/cpu.h> #include <asm/s390_ext.h> #include <asm/timer.h> @@ -566,6 +567,23 @@ void init_cpu_vtimer(void) __ctl_set_bit(0,10); } +static int __cpuinit s390_nohz_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + struct s390_idle_data *idle; + long cpu = (long) hcpu; + + idle = &per_cpu(s390_idle, cpu); + switch (action) { + case CPU_DYING: + case CPU_DYING_FROZEN: + idle->nohz_delay = 0; + default: + break; + } + return NOTIFY_OK; +} + void __init vtime_init(void) { /* request the cpu timer external interrupt */ @@ -574,5 +592,6 @@ void __init vtime_init(void) /* Enable cpu timer interrupts on the boot cpu. */ init_cpu_vtimer(); + cpu_notifier(s390_nohz_notify, 0); } diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c index 752b362bf651..7c37ec359ec2 100644 --- a/arch/s390/lib/delay.c +++ b/arch/s390/lib/delay.c @@ -29,17 +29,21 @@ static void __udelay_disabled(unsigned long long usecs) { unsigned long mask, cr0, cr0_saved; u64 clock_saved; + u64 end; + mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT; + end = get_clock() + (usecs << 12); clock_saved = local_tick_disable(); - set_clock_comparator(get_clock() + (usecs << 12)); __ctl_store(cr0_saved, 0, 0); cr0 = (cr0_saved & 0xffff00e0) | 0x00000800; __ctl_load(cr0 , 0, 0); - mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT; lockdep_off(); - trace_hardirqs_on(); - __load_psw_mask(mask); - local_irq_disable(); + do { + set_clock_comparator(end); + trace_hardirqs_on(); + __load_psw_mask(mask); + local_irq_disable(); + } while (get_clock() < end); lockdep_on(); __ctl_load(cr0_saved, 0, 0); local_tick_enable(clock_saved); diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index 38e641cdd977..45b405ca2567 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c @@ -20,18 +20,17 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { - unsigned long mask, result; + unsigned long mask; pte_t *ptep, pte; struct page *page; - result = write ? 0 : _PAGE_RO; - mask = result | _PAGE_INVALID | _PAGE_SPECIAL; + mask = (write ? _PAGE_RO : 0) | _PAGE_INVALID | _PAGE_SPECIAL; ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr); do { pte = *ptep; barrier(); - if ((pte_val(pte) & mask) != result) + if ((pte_val(pte) & mask) != 0) return 0; VM_BUG_ON(!pfn_valid(pte_pfn(pte))); page = pte_page(pte); |