summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/cpu/common.c4
-rw-r--r--arch/x86/kernel/crash.c10
-rw-r--r--arch/x86/kernel/kexec-bzimage64.c15
-rw-r--r--arch/x86/kernel/kvm.c8
-rw-r--r--arch/x86/mm/ioremap.c20
-rw-r--r--arch/x86/mm/numa.c89
-rw-r--r--arch/x86/purgatory/Makefile3
7 files changed, 88 insertions, 61 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 700f958652f8..3eff36f719fb 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -964,6 +964,7 @@ static void vgetcpu_set_mode(void)
vgetcpu_mode = VGETCPU_LSL;
}
+#ifdef CONFIG_IA32_EMULATION
/* May not be __init: called during resume */
static void syscall32_cpu_init(void)
{
@@ -975,7 +976,8 @@ static void syscall32_cpu_init(void)
wrmsrl(MSR_CSTAR, ia32_cstar_target);
}
-#endif
+#endif /* CONFIG_IA32_EMULATION */
+#endif /* CONFIG_X86_64 */
#ifdef CONFIG_X86_32
void enable_sep_cpu(void)
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index a618fcd2c07d..f5ab56d14287 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -237,7 +237,7 @@ static void fill_up_crash_elf_data(struct crash_elf_data *ced,
ced->max_nr_ranges++;
/* If crashk_low_res is not 0, another range split possible */
- if (crashk_low_res.end != 0)
+ if (crashk_low_res.end)
ced->max_nr_ranges++;
}
@@ -335,9 +335,11 @@ static int elf_header_exclude_ranges(struct crash_elf_data *ced,
if (ret)
return ret;
- ret = exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end);
- if (ret)
- return ret;
+ if (crashk_low_res.end) {
+ ret = exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end);
+ if (ret)
+ return ret;
+ }
/* Exclude GART region */
if (ced->gart_end) {
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
index 9642b9b33655..ca05f86481aa 100644
--- a/arch/x86/kernel/kexec-bzimage64.c
+++ b/arch/x86/kernel/kexec-bzimage64.c
@@ -26,6 +26,7 @@
#include <asm/setup.h>
#include <asm/crash.h>
#include <asm/efi.h>
+#include <asm/kexec-bzimage64.h>
#define MAX_ELFCOREHDR_STR_LEN 30 /* elfcorehdr=0x<64bit-value> */
@@ -267,7 +268,7 @@ setup_boot_parameters(struct kimage *image, struct boot_params *params,
return ret;
}
-int bzImage64_probe(const char *buf, unsigned long len)
+static int bzImage64_probe(const char *buf, unsigned long len)
{
int ret = -ENOEXEC;
struct setup_header *header;
@@ -325,10 +326,10 @@ int bzImage64_probe(const char *buf, unsigned long len)
return ret;
}
-void *bzImage64_load(struct kimage *image, char *kernel,
- unsigned long kernel_len, char *initrd,
- unsigned long initrd_len, char *cmdline,
- unsigned long cmdline_len)
+static void *bzImage64_load(struct kimage *image, char *kernel,
+ unsigned long kernel_len, char *initrd,
+ unsigned long initrd_len, char *cmdline,
+ unsigned long cmdline_len)
{
struct setup_header *header;
@@ -514,7 +515,7 @@ out_free_params:
}
/* This cleanup function is called after various segments have been loaded */
-int bzImage64_cleanup(void *loader_data)
+static int bzImage64_cleanup(void *loader_data)
{
struct bzimage64_data *ldata = loader_data;
@@ -528,7 +529,7 @@ int bzImage64_cleanup(void *loader_data)
}
#ifdef CONFIG_KEXEC_BZIMAGE_VERIFY_SIG
-int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len)
+static int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len)
{
bool trusted;
int ret;
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 3dd8e2c4d74a..95c3cb16af3e 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <linux/kprobes.h>
#include <linux/debugfs.h>
+#include <linux/nmi.h>
#include <asm/timer.h>
#include <asm/cpu.h>
#include <asm/traps.h>
@@ -499,6 +500,13 @@ void __init kvm_guest_init(void)
#else
kvm_guest_cpu_init();
#endif
+
+ /*
+ * Hard lockup detection is enabled by default. Disable it, as guests
+ * can get false positives too easily, for example if the host is
+ * overcommitted.
+ */
+ watchdog_enable_hardlockup_detector(false);
}
static noinline uint32_t __kvm_cpuid_base(void)
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index baff1da354e0..af78e50ca6ce 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -86,6 +86,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
pgprot_t prot;
int retval;
void __iomem *ret_addr;
+ int ram_region;
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1;
@@ -108,12 +109,23 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
/*
* Don't allow anybody to remap normal RAM that we're using..
*/
- pfn = phys_addr >> PAGE_SHIFT;
- last_pfn = last_addr >> PAGE_SHIFT;
- if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
- __ioremap_check_ram) == 1)
+ /* First check if whole region can be identified as RAM or not */
+ ram_region = region_is_ram(phys_addr, size);
+ if (ram_region > 0) {
+ WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n",
+ (unsigned long int)phys_addr,
+ (unsigned long int)last_addr);
return NULL;
+ }
+ /* If could not be identified(-1), check page by page */
+ if (ram_region < 0) {
+ pfn = phys_addr >> PAGE_SHIFT;
+ last_pfn = last_addr >> PAGE_SHIFT;
+ if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
+ __ioremap_check_ram) == 1)
+ return NULL;
+ }
/*
* Mappings have to be page-aligned
*/
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index d221374d5ce8..1a883705a12a 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -463,6 +463,42 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
return true;
}
+static void __init numa_clear_kernel_node_hotplug(void)
+{
+ int i, nid;
+ nodemask_t numa_kernel_nodes = NODE_MASK_NONE;
+ unsigned long start, end;
+ struct memblock_region *r;
+
+ /*
+ * At this time, all memory regions reserved by memblock are
+ * used by the kernel. Set the nid in memblock.reserved will
+ * mark out all the nodes the kernel resides in.
+ */
+ for (i = 0; i < numa_meminfo.nr_blks; i++) {
+ struct numa_memblk *mb = &numa_meminfo.blk[i];
+
+ memblock_set_node(mb->start, mb->end - mb->start,
+ &memblock.reserved, mb->nid);
+ }
+
+ /* Mark all kernel nodes. */
+ for_each_memblock(reserved, r)
+ node_set(r->nid, numa_kernel_nodes);
+
+ /* Clear MEMBLOCK_HOTPLUG flag for memory in kernel nodes. */
+ for (i = 0; i < numa_meminfo.nr_blks; i++) {
+ nid = numa_meminfo.blk[i].nid;
+ if (!node_isset(nid, numa_kernel_nodes))
+ continue;
+
+ start = numa_meminfo.blk[i].start;
+ end = numa_meminfo.blk[i].end;
+
+ memblock_clear_hotplug(start, end - start);
+ }
+}
+
static int __init numa_register_memblks(struct numa_meminfo *mi)
{
unsigned long uninitialized_var(pfn_align);
@@ -481,6 +517,15 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
}
/*
+ * At very early time, the kernel have to use some memory such as
+ * loading the kernel image. We cannot prevent this anyway. So any
+ * node the kernel resides in should be un-hotpluggable.
+ *
+ * And when we come here, alloc node data won't fail.
+ */
+ numa_clear_kernel_node_hotplug();
+
+ /*
* If sections array is gonna be used for pfn -> nid mapping, check
* whether its granularity is fine enough.
*/
@@ -548,41 +593,6 @@ static void __init numa_init_array(void)
}
}
-static void __init numa_clear_kernel_node_hotplug(void)
-{
- int i, nid;
- nodemask_t numa_kernel_nodes = NODE_MASK_NONE;
- unsigned long start, end;
- struct memblock_region *r;
-
- /*
- * At this time, all memory regions reserved by memblock are
- * used by the kernel. Set the nid in memblock.reserved will
- * mark out all the nodes the kernel resides in.
- */
- for (i = 0; i < numa_meminfo.nr_blks; i++) {
- struct numa_memblk *mb = &numa_meminfo.blk[i];
- memblock_set_node(mb->start, mb->end - mb->start,
- &memblock.reserved, mb->nid);
- }
-
- /* Mark all kernel nodes. */
- for_each_memblock(reserved, r)
- node_set(r->nid, numa_kernel_nodes);
-
- /* Clear MEMBLOCK_HOTPLUG flag for memory in kernel nodes. */
- for (i = 0; i < numa_meminfo.nr_blks; i++) {
- nid = numa_meminfo.blk[i].nid;
- if (!node_isset(nid, numa_kernel_nodes))
- continue;
-
- start = numa_meminfo.blk[i].start;
- end = numa_meminfo.blk[i].end;
-
- memblock_clear_hotplug(start, end - start);
- }
-}
-
static int __init numa_init(int (*init_func)(void))
{
int i;
@@ -637,15 +647,6 @@ static int __init numa_init(int (*init_func)(void))
}
numa_init_array();
- /*
- * At very early time, the kernel have to use some memory such as
- * loading the kernel image. We cannot prevent this anyway. So any
- * node the kernel resides in should be un-hotpluggable.
- *
- * And when we come here, numa_init() won't fail.
- */
- numa_clear_kernel_node_hotplug();
-
return 0;
}
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index 899dd2454256..f52e033557c9 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -18,8 +18,9 @@ $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
targets += kexec-purgatory.c
+CMD_BIN2C = $(objtree)/scripts/basic/bin2c
quiet_cmd_bin2c = BIN2C $@
- cmd_bin2c = cat $(obj)/purgatory.ro | $(objtree)/scripts/basic/bin2c kexec_purgatory > $(obj)/kexec-purgatory.c
+ cmd_bin2c = $(CMD_BIN2C) kexec_purgatory < $< > $@
$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE
$(call if_changed,bin2c)