diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-06-03 14:29:47 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-06-03 14:29:47 -0700 |
commit | d29e4723017b50a3f10395439f19f8d169515c01 (patch) | |
tree | 4cd0ce0119f731fa6bb96cd67aae757adfc5e6ad /arch | |
parent | 5306d766f15e72bc79c61d88f77e5a6b1fcc0e68 (diff) | |
parent | aed7eb8367939244ba19445292ffdfc398e0d66a (diff) |
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Will Deacon:
"The main thing here is reviving hugetlb support using contiguous ptes,
which we ended up reverting at the last minute in 4.5 pending a fix
which went into the core mm/ code during the recent merge window.
- Revert a previous revert and get hugetlb going with contiguous hints
- Wire up missing compat syscalls
- Enable CONFIG_SET_MODULE_RONX by default
- Add missing line to our compat /proc/cpuinfo output
- Clarify levels in our page table dumps
- Fix booting with RANDOMIZE_TEXT_OFFSET enabled
- Misc fixes to the ARM CPU PMU driver (refcounting, probe failure)
- Remove some dead code and update a comment"
* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
arm64: fix alignment when RANDOMIZE_TEXT_OFFSET is enabled
arm64: move {PAGE,CONT}_SHIFT into Kconfig
arm64: mm: dump: log span level
arm64: update stale PAGE_OFFSET comment
drivers/perf: arm_pmu: Avoid leaking pmu->irq_affinity on error
drivers/perf: arm_pmu: Defer the setting of __oprofile_cpu_pmu
drivers/perf: arm_pmu: Fix reference count of a device_node in of_pmu_irq_cfg
arm64: report CPU number in bad_mode
arm64: unistd32.h: wire up missing syscalls for compat tasks
arm64: Provide "model name" in /proc/cpuinfo for PER_LINUX32 tasks
arm64: enable CONFIG_SET_MODULE_RONX by default
arm64: Remove orphaned __addr_ok() definition
Revert "arm64: hugetlb: partial revert of 66b3923a1a0f"
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm64/Kconfig | 12 | ||||
-rw-r--r-- | arch/arm64/Kconfig.debug | 25 | ||||
-rw-r--r-- | arch/arm64/Makefile | 4 | ||||
-rw-r--r-- | arch/arm64/include/asm/elf.h | 4 | ||||
-rw-r--r-- | arch/arm64/include/asm/memory.h | 3 | ||||
-rw-r--r-- | arch/arm64/include/asm/page.h | 12 | ||||
-rw-r--r-- | arch/arm64/include/asm/uaccess.h | 13 | ||||
-rw-r--r-- | arch/arm64/include/asm/unistd.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/unistd32.h | 8 | ||||
-rw-r--r-- | arch/arm64/kernel/cpuinfo.c | 8 | ||||
-rw-r--r-- | arch/arm64/kernel/traps.c | 5 | ||||
-rw-r--r-- | arch/arm64/mm/dump.c | 8 | ||||
-rw-r--r-- | arch/arm64/mm/hugetlbpage.c | 14 |
13 files changed, 74 insertions, 44 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 76747d92bc72..5fe320899eb4 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -113,6 +113,18 @@ config ARCH_PHYS_ADDR_T_64BIT config MMU def_bool y +config ARM64_PAGE_SHIFT + int + default 16 if ARM64_64K_PAGES + default 14 if ARM64_16K_PAGES + default 12 + +config ARM64_CONT_SHIFT + int + default 5 if ARM64_64K_PAGES + default 7 if ARM64_16K_PAGES + default 4 + config ARCH_MMAP_RND_BITS_MIN default 14 if ARM64_64K_PAGES default 16 if ARM64_16K_PAGES diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug index 710fde4ad0f0..0cc758cdd0dc 100644 --- a/arch/arm64/Kconfig.debug +++ b/arch/arm64/Kconfig.debug @@ -12,7 +12,8 @@ config ARM64_PTDUMP who are working in architecture specific areas of the kernel. It is probably not a good idea to enable this feature in a production kernel. - If in doubt, say "N" + + If in doubt, say N. config PID_IN_CONTEXTIDR bool "Write the current PID to the CONTEXTIDR register" @@ -38,15 +39,15 @@ config ARM64_RANDOMIZE_TEXT_OFFSET value. config DEBUG_SET_MODULE_RONX - bool "Set loadable kernel module data as NX and text as RO" - depends on MODULES - help - This option helps catch unintended modifications to loadable - kernel module's text and read-only data. It also prevents execution - of module data. Such protection may interfere with run-time code - patching and dynamic kernel tracing - and they might also protect - against certain classes of kernel exploits. - If in doubt, say "N". + bool "Set loadable kernel module data as NX and text as RO" + depends on MODULES + default y + help + Is this is set, kernel module text and rodata will be made read-only. + This is to help catch accidental or malicious attempts to change the + kernel's executable code. + + If in doubt, say Y. config DEBUG_RODATA bool "Make kernel text and rodata read-only" @@ -56,7 +57,7 @@ config DEBUG_RODATA is to help catch accidental or malicious attempts to change the kernel's executable code. - If in doubt, say Y + If in doubt, say Y. config DEBUG_ALIGN_RODATA depends on DEBUG_RODATA @@ -69,7 +70,7 @@ config DEBUG_ALIGN_RODATA alignment and potentially wasted space. Turn on this option if performance is more important than memory pressure. - If in doubt, say N + If in doubt, say N. source "drivers/hwtracing/coresight/Kconfig" diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 354d75402ace..7085e322dc42 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -60,7 +60,9 @@ head-y := arch/arm64/kernel/head.o # The byte offset of the kernel image in RAM from the start of RAM. ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y) -TEXT_OFFSET := $(shell awk 'BEGIN {srand(); printf "0x%03x000\n", int(512 * rand())}') +TEXT_OFFSET := $(shell awk "BEGIN {srand(); printf \"0x%06x\n\", \ + int(2 * 1024 * 1024 / (2 ^ $(CONFIG_ARM64_PAGE_SHIFT)) * \ + rand()) * (2 ^ $(CONFIG_ARM64_PAGE_SHIFT))}") else TEXT_OFFSET := 0x00080000 endif diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index 7a09c48c0475..579b6e654f2d 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -160,14 +160,14 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, #define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12)) #endif -#ifdef CONFIG_COMPAT - #ifdef __AARCH64EB__ #define COMPAT_ELF_PLATFORM ("v8b") #else #define COMPAT_ELF_PLATFORM ("v8l") #endif +#ifdef CONFIG_COMPAT + #define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3) /* AArch32 registers. */ diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 72a3025bb583..31b73227b41f 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -55,8 +55,9 @@ #define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)) /* - * PAGE_OFFSET - the virtual address of the start of the kernel image (top + * PAGE_OFFSET - the virtual address of the start of the linear map (top * (VA_BITS - 1)) + * KIMAGE_VADDR - the virtual address of the start of the kernel image * VA_BITS - the maximum number of bits for virtual addresses. * VA_START - the first kernel virtual address. * TASK_SIZE - the maximum size of a user space task. diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index 17b45f7d96d3..8472c6def5ef 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -23,16 +23,8 @@ /* PAGE_SHIFT determines the page size */ /* CONT_SHIFT determines the number of pages which can be tracked together */ -#ifdef CONFIG_ARM64_64K_PAGES -#define PAGE_SHIFT 16 -#define CONT_SHIFT 5 -#elif defined(CONFIG_ARM64_16K_PAGES) -#define PAGE_SHIFT 14 -#define CONT_SHIFT 7 -#else -#define PAGE_SHIFT 12 -#define CONT_SHIFT 4 -#endif +#define PAGE_SHIFT CONFIG_ARM64_PAGE_SHIFT +#define CONT_SHIFT CONFIG_ARM64_CONT_SHIFT #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 0685d74572af..9e397a542756 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -81,19 +81,6 @@ static inline void set_fs(mm_segment_t fs) #define segment_eq(a, b) ((a) == (b)) /* - * Return 1 if addr < current->addr_limit, 0 otherwise. - */ -#define __addr_ok(addr) \ -({ \ - unsigned long flag; \ - asm("cmp %1, %0; cset %0, lo" \ - : "=&r" (flag) \ - : "r" (addr), "0" (current_thread_info()->addr_limit) \ - : "cc"); \ - flag; \ -}) - -/* * Test whether a block of memory is a valid user space address. * Returns 1 if the range is valid, 0 otherwise. * diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 41e58fe3c041..e78ac26324bd 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -44,7 +44,7 @@ #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) -#define __NR_compat_syscalls 390 +#define __NR_compat_syscalls 394 #endif #define __ARCH_WANT_SYS_CLONE diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 5b925b761a2a..b7e8ef16ff0d 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -801,6 +801,14 @@ __SYSCALL(__NR_execveat, compat_sys_execveat) __SYSCALL(__NR_userfaultfd, sys_userfaultfd) #define __NR_membarrier 389 __SYSCALL(__NR_membarrier, sys_membarrier) +#define __NR_mlock2 390 +__SYSCALL(__NR_mlock2, sys_mlock2) +#define __NR_copy_file_range 391 +__SYSCALL(__NR_copy_file_range, sys_copy_file_range) +#define __NR_preadv2 392 +__SYSCALL(__NR_preadv2, compat_sys_preadv2) +#define __NR_pwritev2 393 +__SYSCALL(__NR_pwritev2, compat_sys_pwritev2) /* * Please add new compat syscalls above this comment and update diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 3808470486f3..c173d329397f 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -22,6 +22,8 @@ #include <linux/bitops.h> #include <linux/bug.h> +#include <linux/compat.h> +#include <linux/elf.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/personality.h> @@ -104,6 +106,7 @@ static const char *const compat_hwcap2_str[] = { static int c_show(struct seq_file *m, void *v) { int i, j; + bool compat = personality(current->personality) == PER_LINUX32; for_each_online_cpu(i) { struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i); @@ -115,6 +118,9 @@ static int c_show(struct seq_file *m, void *v) * "processor". Give glibc what it expects. */ seq_printf(m, "processor\t: %d\n", i); + if (compat) + seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n", + MIDR_REVISION(midr), COMPAT_ELF_PLATFORM); seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", loops_per_jiffy / (500000UL/HZ), @@ -127,7 +133,7 @@ static int c_show(struct seq_file *m, void *v) * software which does already (at least for 32-bit). */ seq_puts(m, "Features\t:"); - if (personality(current->personality) == PER_LINUX32) { + if (compat) { #ifdef CONFIG_COMPAT for (j = 0; compat_hwcap_str[j]; j++) if (compat_elf_hwcap & (1 << j)) diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index c5392081b49b..f7cf463107df 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -477,8 +477,9 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) void __user *pc = (void __user *)instruction_pointer(regs); console_verbose(); - pr_crit("Bad mode in %s handler detected, code 0x%08x -- %s\n", - handler[reason], esr, esr_get_class_string(esr)); + pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n", + handler[reason], smp_processor_id(), esr, + esr_get_class_string(esr)); __show_regs(regs); info.si_signo = SIGILL; diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c index 8404190fe2bd..ccfde237d6e6 100644 --- a/arch/arm64/mm/dump.c +++ b/arch/arm64/mm/dump.c @@ -150,6 +150,7 @@ static const struct prot_bits pte_bits[] = { struct pg_level { const struct prot_bits *bits; + const char *name; size_t num; u64 mask; }; @@ -157,15 +158,19 @@ struct pg_level { static struct pg_level pg_level[] = { { }, { /* pgd */ + .name = "PGD", .bits = pte_bits, .num = ARRAY_SIZE(pte_bits), }, { /* pud */ + .name = (CONFIG_PGTABLE_LEVELS > 3) ? "PUD" : "PGD", .bits = pte_bits, .num = ARRAY_SIZE(pte_bits), }, { /* pmd */ + .name = (CONFIG_PGTABLE_LEVELS > 2) ? "PMD" : "PGD", .bits = pte_bits, .num = ARRAY_SIZE(pte_bits), }, { /* pte */ + .name = "PTE", .bits = pte_bits, .num = ARRAY_SIZE(pte_bits), }, @@ -214,7 +219,8 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, delta >>= 10; unit++; } - seq_printf(st->seq, "%9lu%c", delta, *unit); + seq_printf(st->seq, "%9lu%c %s", delta, *unit, + pg_level[st->level].name); if (pg_level[st->level].bits) dump_prot(st, pg_level[st->level].bits, pg_level[st->level].num); diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index aa8aee7d6929..2e49bd252fe7 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -306,6 +306,10 @@ static __init int setup_hugepagesz(char *opt) hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); } else if (ps == PUD_SIZE) { hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); + } else if (ps == (PAGE_SIZE * CONT_PTES)) { + hugetlb_add_hstate(CONT_PTE_SHIFT); + } else if (ps == (PMD_SIZE * CONT_PMDS)) { + hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT); } else { hugetlb_bad_size(); pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10); @@ -314,3 +318,13 @@ static __init int setup_hugepagesz(char *opt) return 1; } __setup("hugepagesz=", setup_hugepagesz); + +#ifdef CONFIG_ARM64_64K_PAGES +static __init int add_default_hugepagesz(void) +{ + if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL) + hugetlb_add_hstate(CONT_PMD_SHIFT); + return 0; +} +arch_initcall(add_default_hugepagesz); +#endif |