summaryrefslogtreecommitdiff
path: root/arch/x86_64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64/kernel')
-rw-r--r--arch/x86_64/kernel/Makefile2
-rw-r--r--arch/x86_64/kernel/acpi/Makefile5
-rw-r--r--arch/x86_64/kernel/acpi/processor.c72
-rw-r--r--arch/x86_64/kernel/aperture.c2
-rw-r--r--arch/x86_64/kernel/apic.c92
-rw-r--r--arch/x86_64/kernel/entry.S8
-rw-r--r--arch/x86_64/kernel/head.S7
-rw-r--r--arch/x86_64/kernel/io_apic.c50
-rw-r--r--arch/x86_64/kernel/machine_kexec.c2
-rw-r--r--arch/x86_64/kernel/mce.c2
-rw-r--r--arch/x86_64/kernel/mpparse.c12
-rw-r--r--arch/x86_64/kernel/nmi.c26
-rw-r--r--arch/x86_64/kernel/pci-dma.c3
-rw-r--r--arch/x86_64/kernel/pci-gart.c39
-rw-r--r--arch/x86_64/kernel/pci-nommu.c7
-rw-r--r--arch/x86_64/kernel/pci-swiotlb.c2
-rw-r--r--arch/x86_64/kernel/pmtimer.c25
-rw-r--r--arch/x86_64/kernel/process.c4
-rw-r--r--arch/x86_64/kernel/setup.c16
-rw-r--r--arch/x86_64/kernel/smpboot.c7
-rw-r--r--arch/x86_64/kernel/time.c98
-rw-r--r--arch/x86_64/kernel/traps.c39
-rw-r--r--arch/x86_64/kernel/vmlinux.lds.S10
-rw-r--r--arch/x86_64/kernel/x8664_ksyms.c5
24 files changed, 416 insertions, 119 deletions
diff --git a/arch/x86_64/kernel/Makefile b/arch/x86_64/kernel/Makefile
index 72fe60c20d39..a098a11e7755 100644
--- a/arch/x86_64/kernel/Makefile
+++ b/arch/x86_64/kernel/Makefile
@@ -43,7 +43,7 @@ CFLAGS_vsyscall.o := $(PROFILING) -g0
bootflag-y += ../../i386/kernel/bootflag.o
cpuid-$(subst m,y,$(CONFIG_X86_CPUID)) += ../../i386/kernel/cpuid.o
-topology-y += ../../i386/mach-default/topology.o
+topology-y += ../../i386/kernel/topology.o
microcode-$(subst m,y,$(CONFIG_MICROCODE)) += ../../i386/kernel/microcode.o
intel_cacheinfo-y += ../../i386/kernel/cpu/intel_cacheinfo.o
quirks-y += ../../i386/kernel/quirks.o
diff --git a/arch/x86_64/kernel/acpi/Makefile b/arch/x86_64/kernel/acpi/Makefile
index 7da9ace890bd..4fe97071f297 100644
--- a/arch/x86_64/kernel/acpi/Makefile
+++ b/arch/x86_64/kernel/acpi/Makefile
@@ -1,3 +1,8 @@
obj-y := boot.o
boot-y := ../../../i386/kernel/acpi/boot.o
obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o
+
+ifneq ($(CONFIG_ACPI_PROCESSOR),)
+obj-y += processor.o
+endif
+
diff --git a/arch/x86_64/kernel/acpi/processor.c b/arch/x86_64/kernel/acpi/processor.c
new file mode 100644
index 000000000000..3bdc2baa5bb1
--- /dev/null
+++ b/arch/x86_64/kernel/acpi/processor.c
@@ -0,0 +1,72 @@
+/*
+ * arch/x86_64/kernel/acpi/processor.c
+ *
+ * Copyright (C) 2005 Intel Corporation
+ * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
+ * - Added _PDC for platforms with Intel CPUs
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+
+#include <acpi/processor.h>
+#include <asm/acpi.h>
+
+static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
+{
+ struct acpi_object_list *obj_list;
+ union acpi_object *obj;
+ u32 *buf;
+
+ /* allocate and initialize pdc. It will be used later. */
+ obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
+ if (!obj_list) {
+ printk(KERN_ERR "Memory allocation error\n");
+ return;
+ }
+
+ obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
+ if (!obj) {
+ printk(KERN_ERR "Memory allocation error\n");
+ kfree(obj_list);
+ return;
+ }
+
+ buf = kmalloc(12, GFP_KERNEL);
+ if (!buf) {
+ printk(KERN_ERR "Memory allocation error\n");
+ kfree(obj);
+ kfree(obj_list);
+ return;
+ }
+
+ buf[0] = ACPI_PDC_REVISION_ID;
+ buf[1] = 1;
+ buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
+
+ obj->type = ACPI_TYPE_BUFFER;
+ obj->buffer.length = 12;
+ obj->buffer.pointer = (u8 *) buf;
+ obj_list->count = 1;
+ obj_list->pointer = obj;
+ pr->pdc = obj_list;
+
+ return;
+}
+
+/* Initialize _PDC data based on the CPU vendor */
+void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
+{
+ unsigned int cpu = pr->id;
+ struct cpuinfo_x86 *c = cpu_data + cpu;
+
+ pr->pdc = NULL;
+ if (c->x86_vendor == X86_VENDOR_INTEL && cpu_has(c, X86_FEATURE_EST))
+ init_intel_pdc(pr, c);
+
+ return;
+}
+
+EXPORT_SYMBOL(arch_acpi_processor_init_pdc);
diff --git a/arch/x86_64/kernel/aperture.c b/arch/x86_64/kernel/aperture.c
index e4e2b7d01f89..a0f955b9995f 100644
--- a/arch/x86_64/kernel/aperture.c
+++ b/arch/x86_64/kernel/aperture.c
@@ -248,7 +248,7 @@ void __init iommu_hole_init(void)
/* Got the aperture from the AGP bridge */
} else if (swiotlb && !valid_agp) {
/* Do nothing */
- } else if ((!no_iommu && end_pfn >= MAX_DMA32_PFN) ||
+ } else if ((!no_iommu && end_pfn > MAX_DMA32_PFN) ||
force_iommu ||
valid_agp ||
fallback_aper_force) {
diff --git a/arch/x86_64/kernel/apic.c b/arch/x86_64/kernel/apic.c
index 5d3c5b07b8db..e5b14c57eaa0 100644
--- a/arch/x86_64/kernel/apic.c
+++ b/arch/x86_64/kernel/apic.c
@@ -35,8 +35,12 @@
#include <asm/mach_apic.h>
#include <asm/nmi.h>
#include <asm/idle.h>
+#include <asm/proto.h>
+#include <asm/timex.h>
int apic_verbosity;
+int apic_runs_main_timer;
+int apic_calibrate_pmtmr __initdata;
int disable_apic_timer __initdata;
@@ -68,6 +72,26 @@ int get_maxlvt(void)
return maxlvt;
}
+/*
+ * 'what should we do if we get a hw irq event on an illegal vector'.
+ * each architecture has to answer this themselves.
+ */
+void ack_bad_irq(unsigned int irq)
+{
+ printk("unexpected IRQ trap at vector %02x\n", irq);
+ /*
+ * Currently unexpected vectors happen only on SMP and APIC.
+ * We _must_ ack these because every local APIC has only N
+ * irq slots per priority level, and a 'hanging, unacked' IRQ
+ * holds up an irq slot - in excessive cases (when multiple
+ * unexpected vectors occur) that might lock up the APIC
+ * completely.
+ * But don't ack when the APIC is disabled. -AK
+ */
+ if (!disable_apic)
+ ack_APIC_irq();
+}
+
void clear_local_APIC(void)
{
int maxlvt;
@@ -684,7 +708,7 @@ static void setup_APIC_timer(unsigned int clocks)
local_irq_save(flags);
/* wait for irq slice */
- if (vxtime.hpet_address) {
+ if (vxtime.hpet_address && hpet_use_timer) {
int trigger = hpet_readl(HPET_T0_CMP);
while (hpet_readl(HPET_COUNTER) >= trigger)
/* do nothing */ ;
@@ -702,9 +726,17 @@ static void setup_APIC_timer(unsigned int clocks)
c2 |= inb_p(0x40) << 8;
} while (c2 - c1 < 300);
}
-
__setup_APIC_LVTT(clocks);
-
+ /* Turn off PIT interrupt if we use APIC timer as main timer.
+ Only works with the PM timer right now
+ TBD fix it for HPET too. */
+ if (vxtime.mode == VXTIME_PMTMR &&
+ smp_processor_id() == boot_cpu_id &&
+ apic_runs_main_timer == 1 &&
+ !cpu_isset(boot_cpu_id, timer_interrupt_broadcast_ipi_mask)) {
+ stop_timer_interrupt();
+ apic_runs_main_timer++;
+ }
local_irq_restore(flags);
}
@@ -735,14 +767,27 @@ static int __init calibrate_APIC_clock(void)
__setup_APIC_LVTT(1000000000);
apic_start = apic_read(APIC_TMCCT);
- rdtscl(tsc_start);
-
- do {
+#ifdef CONFIG_X86_PM_TIMER
+ if (apic_calibrate_pmtmr && pmtmr_ioport) {
+ pmtimer_wait(5000); /* 5ms wait */
apic = apic_read(APIC_TMCCT);
- rdtscl(tsc);
- } while ((tsc - tsc_start) < TICK_COUNT && (apic - apic_start) < TICK_COUNT);
+ result = (apic_start - apic) * 1000L / 5;
+ } else
+#endif
+ {
+ rdtscl(tsc_start);
+
+ do {
+ apic = apic_read(APIC_TMCCT);
+ rdtscl(tsc);
+ } while ((tsc - tsc_start) < TICK_COUNT &&
+ (apic - apic_start) < TICK_COUNT);
+
+ result = (apic_start - apic) * 1000L * cpu_khz /
+ (tsc - tsc_start);
+ }
+ printk("result %d\n", result);
- result = (apic_start - apic) * 1000L * cpu_khz / (tsc - tsc_start);
printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n",
result / 1000 / 1000, result / 1000 % 1000);
@@ -872,6 +917,8 @@ void smp_local_timer_interrupt(struct pt_regs *regs)
#ifdef CONFIG_SMP
update_process_times(user_mode(regs));
#endif
+ if (apic_runs_main_timer > 1 && smp_processor_id() == boot_cpu_id)
+ main_timer_handler(regs);
/*
* We take the 'long' return path, and there every subsystem
* grabs the appropriate locks (kernel lock/ irq lock).
@@ -924,7 +971,7 @@ void smp_apic_timer_interrupt(struct pt_regs *regs)
* multi-chassis. Use available data to take a good guess.
* If in doubt, go HPET.
*/
-__init int oem_force_hpet_timer(void)
+__cpuinit int oem_force_hpet_timer(void)
{
int i, clusters, zeros;
unsigned id;
@@ -1081,10 +1128,35 @@ static __init int setup_nolapic(char *str)
static __init int setup_noapictimer(char *str)
{
+ if (str[0] != ' ' && str[0] != 0)
+ return -1;
disable_apic_timer = 1;
return 0;
}
+static __init int setup_apicmaintimer(char *str)
+{
+ apic_runs_main_timer = 1;
+ nohpet = 1;
+ return 0;
+}
+__setup("apicmaintimer", setup_apicmaintimer);
+
+static __init int setup_noapicmaintimer(char *str)
+{
+ apic_runs_main_timer = -1;
+ return 0;
+}
+__setup("noapicmaintimer", setup_noapicmaintimer);
+
+static __init int setup_apicpmtimer(char *s)
+{
+ apic_calibrate_pmtmr = 1;
+ notsc_setup(NULL);
+ return setup_apicmaintimer(NULL);
+}
+__setup("apicpmtimer", setup_apicpmtimer);
+
/* dummy parsing: see setup.c */
__setup("disableapic", setup_disableapic);
diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S
index dbdba56e8faa..7c10e9009d61 100644
--- a/arch/x86_64/kernel/entry.S
+++ b/arch/x86_64/kernel/entry.S
@@ -499,7 +499,9 @@ ENTRY(stub_rt_sigreturn)
movq %gs:pda_irqstackptr,%rax
cmoveq %rax,%rsp /*todo This needs CFI annotation! */
pushq %rdi # save old stack
+#ifndef CONFIG_DEBUG_INFO
CFI_ADJUST_CFA_OFFSET 8
+#endif
call \func
.endm
@@ -509,7 +511,9 @@ ENTRY(common_interrupt)
/* 0(%rsp): oldrsp-ARGOFFSET */
ret_from_intr:
popq %rdi
+#ifndef CONFIG_DEBUG_INFO
CFI_ADJUST_CFA_OFFSET -8
+#endif
cli
decl %gs:pda_irqcount
#ifdef CONFIG_DEBUG_INFO
@@ -550,6 +554,7 @@ iret_label:
/* running with kernel gs */
bad_iret:
movq $-9999,%rdi /* better code? */
+ sti
jmp do_exit
.previous
@@ -922,7 +927,7 @@ KPROBE_ENTRY(debug)
.previous .text
/* runs on exception stack */
-ENTRY(nmi)
+KPROBE_ENTRY(nmi)
INTR_FRAME
pushq $-1
CFI_ADJUST_CFA_OFFSET 8
@@ -969,6 +974,7 @@ paranoid_schedule:
cli
jmp paranoid_userspace
CFI_ENDPROC
+ .previous .text
KPROBE_ENTRY(int3)
INTR_FRAME
diff --git a/arch/x86_64/kernel/head.S b/arch/x86_64/kernel/head.S
index 692c737feddb..02fc7fa0ea28 100644
--- a/arch/x86_64/kernel/head.S
+++ b/arch/x86_64/kernel/head.S
@@ -213,6 +213,11 @@ ENTRY(early_idt_handler)
cmpl $2,early_recursion_flag(%rip)
jz 1f
call dump_stack
+#ifdef CONFIG_KALLSYMS
+ leaq early_idt_ripmsg(%rip),%rdi
+ movq 8(%rsp),%rsi # get rip again
+ call __print_symbol
+#endif
1: hlt
jmp 1b
early_recursion_flag:
@@ -220,6 +225,8 @@ early_recursion_flag:
early_idt_msg:
.asciz "PANIC: early exception rip %lx error %lx cr2 %lx\n"
+early_idt_ripmsg:
+ .asciz "RIP %s\n"
.code32
ENTRY(no_long_mode)
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index e8cf44ef8778..ffed464e6b12 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -30,6 +30,9 @@
#include <linux/mc146818rtc.h>
#include <linux/acpi.h>
#include <linux/sysdev.h>
+#ifdef CONFIG_ACPI
+#include <acpi/acpi_bus.h>
+#endif
#include <asm/io.h>
#include <asm/smp.h>
@@ -47,6 +50,8 @@ static int no_timer_check;
int disable_timer_pin_1 __initdata;
+int timer_over_8254 __initdata = 1;
+
/* Where if anywhere is the i8259 connect in external int mode */
static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
@@ -248,6 +253,20 @@ static int __init enable_ioapic_setup(char *str)
__setup("noapic", disable_ioapic_setup);
__setup("apic", enable_ioapic_setup);
+static int __init setup_disable_8254_timer(char *s)
+{
+ timer_over_8254 = -1;
+ return 1;
+}
+static int __init setup_enable_8254_timer(char *s)
+{
+ timer_over_8254 = 2;
+ return 1;
+}
+
+__setup("disable_8254_timer", setup_disable_8254_timer);
+__setup("enable_8254_timer", setup_enable_8254_timer);
+
#include <asm/pci-direct.h>
#include <linux/pci_ids.h>
#include <linux/pci.h>
@@ -260,6 +279,8 @@ __setup("apic", enable_ioapic_setup);
And another hack to disable the IOMMU on VIA chipsets.
+ ... and others. Really should move this somewhere else.
+
Kludge-O-Rama. */
void __init check_ioapic(void)
{
@@ -304,8 +325,20 @@ void __init check_ioapic(void)
#endif
/* RED-PEN skip them on mptables too? */
return;
+
+ /* This should be actually default, but
+ for 2.6.16 let's do it for ATI only where
+ it's really needed. */
+ case PCI_VENDOR_ID_ATI:
+ if (timer_over_8254 == 1) {
+ timer_over_8254 = 0;
+ printk(KERN_INFO
+ "ATI board detected. Disabling timer routing over 8254.\n");
+ }
+ return;
}
+
/* No multi-function device? */
type = read_pci_config_byte(num,slot,func,
PCI_HEADER_TYPE);
@@ -1749,6 +1782,8 @@ static inline void unlock_ExtINT_logic(void)
* a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
* is so screwy. Thanks to Brian Perkins for testing/hacking this beast
* fanatically on his truly buggy board.
+ *
+ * FIXME: really need to revamp this for modern platforms only.
*/
static inline void check_timer(void)
{
@@ -1771,7 +1806,8 @@ static inline void check_timer(void)
*/
apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
init_8259A(1);
- enable_8259A_irq(0);
+ if (timer_over_8254 > 0)
+ enable_8259A_irq(0);
pin1 = find_isa_irq_pin(0, mp_INT);
apic1 = find_isa_irq_apic(0, mp_INT);
@@ -1826,7 +1862,7 @@ static inline void check_timer(void)
}
printk(" failed.\n");
- if (nmi_watchdog) {
+ if (nmi_watchdog == NMI_IO_APIC) {
printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
nmi_watchdog = 0;
}
@@ -2027,7 +2063,7 @@ int __init io_apic_get_redir_entries (int ioapic)
}
-int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
+int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
{
struct IO_APIC_route_entry entry;
unsigned long flags;
@@ -2049,8 +2085,8 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
entry.delivery_mode = INT_DELIVERY_MODE;
entry.dest_mode = INT_DEST_MODE;
entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
- entry.trigger = edge_level;
- entry.polarity = active_high_low;
+ entry.trigger = triggering;
+ entry.polarity = polarity;
entry.mask = 1; /* Disabled (masked) */
irq = gsi_irq_sharing(irq);
@@ -2065,9 +2101,9 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> "
"IRQ %d Mode:%i Active:%i)\n", ioapic,
mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
- edge_level, active_high_low);
+ triggering, polarity);
- ioapic_register_intr(irq, entry.vector, edge_level);
+ ioapic_register_intr(irq, entry.vector, triggering);
if (!ioapic && (irq < 16))
disable_8259A_irq(irq);
diff --git a/arch/x86_64/kernel/machine_kexec.c b/arch/x86_64/kernel/machine_kexec.c
index 89fab51e20f4..25ac8a3faae6 100644
--- a/arch/x86_64/kernel/machine_kexec.c
+++ b/arch/x86_64/kernel/machine_kexec.c
@@ -140,7 +140,7 @@ static void load_segments(void)
"\tmovl %0,%%ss\n"
"\tmovl %0,%%fs\n"
"\tmovl %0,%%gs\n"
- : : "a" (__KERNEL_DS)
+ : : "a" (__KERNEL_DS) : "memory"
);
}
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index 13a2eada6c95..b8b9529fa89e 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -380,7 +380,7 @@ static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c)
*/
void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
{
- static cpumask_t mce_cpus __initdata = CPU_MASK_NONE;
+ static cpumask_t mce_cpus = CPU_MASK_NONE;
mce_cpu_quirks(c);
diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c
index 1105250bf02c..9013a90b5c2e 100644
--- a/arch/x86_64/kernel/mpparse.c
+++ b/arch/x86_64/kernel/mpparse.c
@@ -288,9 +288,9 @@ static int __init smp_read_mpc(struct mp_config_table *mpc)
memcpy(str,mpc->mpc_productid,12);
str[12]=0;
- printk(KERN_INFO "Product ID: %s ",str);
+ printk("Product ID: %s ",str);
- printk(KERN_INFO "APIC at: 0x%X\n",mpc->mpc_lapic);
+ printk("APIC at: 0x%X\n",mpc->mpc_lapic);
/* save the local APIC address, it might be non-default */
if (!acpi_lapic)
@@ -915,7 +915,7 @@ void __init mp_config_acpi_legacy_irqs (void)
#define MAX_GSI_NUM 4096
-int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
+int mp_register_gsi(u32 gsi, int triggering, int polarity)
{
int ioapic = -1;
int ioapic_pin = 0;
@@ -964,7 +964,7 @@ int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
- if (edge_level) {
+ if (triggering == ACPI_LEVEL_SENSITIVE) {
/*
* For PCI devices assign IRQs in order, avoiding gaps
* due to unused I/O APIC pins.
@@ -986,8 +986,8 @@ int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
}
io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
- edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1,
- active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1);
+ triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
+ polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
return gsi;
}
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 5fae6f0cd994..5bf17e41cd2d 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -24,6 +24,7 @@
#include <linux/sysdev.h>
#include <linux/nmi.h>
#include <linux/sysctl.h>
+#include <linux/kprobes.h>
#include <asm/smp.h>
#include <asm/mtrr.h>
@@ -235,6 +236,7 @@ static void enable_lapic_nmi_watchdog(void)
{
if (nmi_active < 0) {
nmi_watchdog = NMI_LOCAL_APIC;
+ touch_nmi_watchdog();
setup_apic_nmi_watchdog();
}
}
@@ -455,20 +457,22 @@ static DEFINE_PER_CPU(int, nmi_touch);
void touch_nmi_watchdog (void)
{
- int i;
+ if (nmi_watchdog > 0) {
+ unsigned cpu;
- /*
- * Tell other CPUs to reset their alert counters. We cannot
- * do it ourselves because the alert count increase is not
- * atomic.
- */
- for (i = 0; i < NR_CPUS; i++)
- per_cpu(nmi_touch, i) = 1;
+ /*
+ * Tell other CPUs to reset their alert counters. We cannot
+ * do it ourselves because the alert count increase is not
+ * atomic.
+ */
+ for_each_present_cpu (cpu)
+ per_cpu(nmi_touch, cpu) = 1;
+ }
touch_softlockup_watchdog();
}
-void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason)
+void __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
{
int sum;
int touched = 0;
@@ -512,14 +516,14 @@ void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason)
}
}
-static int dummy_nmi_callback(struct pt_regs * regs, int cpu)
+static __kprobes int dummy_nmi_callback(struct pt_regs * regs, int cpu)
{
return 0;
}
static nmi_callback_t nmi_callback = dummy_nmi_callback;
-asmlinkage void do_nmi(struct pt_regs * regs, long error_code)
+asmlinkage __kprobes void do_nmi(struct pt_regs * regs, long error_code)
{
int cpu = safe_smp_processor_id();
diff --git a/arch/x86_64/kernel/pci-dma.c b/arch/x86_64/kernel/pci-dma.c
index 2f5d8328e2b9..4ed391edd47a 100644
--- a/arch/x86_64/kernel/pci-dma.c
+++ b/arch/x86_64/kernel/pci-dma.c
@@ -107,6 +107,9 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
goto again;
}
+ /* Let low level make its own zone decisions */
+ gfp &= ~(GFP_DMA32|GFP_DMA);
+
if (dma_ops->alloc_coherent)
return dma_ops->alloc_coherent(dev, size,
dma_handle, gfp);
diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c
index c37fc7726ba6..0c3f052ba6ce 100644
--- a/arch/x86_64/kernel/pci-gart.c
+++ b/arch/x86_64/kernel/pci-gart.c
@@ -228,11 +228,6 @@ static inline int need_iommu(struct device *dev, unsigned long addr, size_t size
int mmu = high;
if (force_iommu)
mmu = 1;
- if (no_iommu) {
- if (high)
- panic("PCI-DMA: high address but no IOMMU.\n");
- mmu = 0;
- }
return mmu;
}
@@ -241,11 +236,6 @@ static inline int nonforced_iommu(struct device *dev, unsigned long addr, size_t
u64 mask = *dev->dma_mask;
int high = addr + size >= mask;
int mmu = high;
- if (no_iommu) {
- if (high)
- panic("PCI-DMA: high address but no IOMMU.\n");
- mmu = 0;
- }
return mmu;
}
@@ -379,7 +369,7 @@ static int __dma_map_cont(struct scatterlist *sg, int start, int stopat,
SET_LEAK(iommu_page);
addr += PAGE_SIZE;
iommu_page++;
- }
+ }
}
BUG_ON(iommu_page - iommu_start != pages);
return 0;
@@ -457,9 +447,12 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
error:
flush_gart(NULL);
gart_unmap_sg(dev, sg, nents, dir);
- /* When it was forced try again unforced */
- if (force_iommu)
- return dma_map_sg_nonforce(dev, sg, nents, dir);
+ /* When it was forced or merged try again in a dumb way */
+ if (force_iommu || iommu_merge) {
+ out = dma_map_sg_nonforce(dev, sg, nents, dir);
+ if (out > 0)
+ return out;
+ }
if (panic_on_overflow)
panic("dma_map_sg: overflow on %lu pages\n", pages);
iommu_full(dev, pages << PAGE_SHIFT, dir);
@@ -631,20 +624,25 @@ static int __init pci_iommu_init(void)
(agp_copy_info(agp_bridge, &info) < 0);
#endif
- if (swiotlb) {
- no_iommu = 1;
+ if (swiotlb)
return -1;
- }
-
+
if (no_iommu ||
(!force_iommu && end_pfn <= MAX_DMA32_PFN) ||
!iommu_aperture ||
(no_agp && init_k8_gatt(&info) < 0)) {
- no_iommu = 1;
- no_iommu_init();
+ printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
+ if (end_pfn > MAX_DMA32_PFN) {
+ printk(KERN_ERR "WARNING more than 4GB of memory "
+ "but IOMMU not compiled in.\n"
+ KERN_ERR "WARNING 32bit PCI may malfunction.\n"
+ KERN_ERR "You might want to enable "
+ "CONFIG_GART_IOMMU\n");
+ }
return -1;
}
+ printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
aper_size = info.aper_size * 1024 * 1024;
iommu_size = check_iommu_size(info.aper_base, aper_size);
iommu_pages = iommu_size >> PAGE_SHIFT;
@@ -718,7 +716,6 @@ static int __init pci_iommu_init(void)
flush_gart(NULL);
- printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
dma_ops = &gart_dma_ops;
return 0;
diff --git a/arch/x86_64/kernel/pci-nommu.c b/arch/x86_64/kernel/pci-nommu.c
index e41564975195..44adcc2d5e5b 100644
--- a/arch/x86_64/kernel/pci-nommu.c
+++ b/arch/x86_64/kernel/pci-nommu.c
@@ -88,12 +88,5 @@ void __init no_iommu_init(void)
{
if (dma_ops)
return;
- printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
dma_ops = &nommu_dma_ops;
- if (end_pfn > MAX_DMA32_PFN) {
- printk(KERN_ERR
- "WARNING more than 4GB of memory but IOMMU not compiled in.\n"
- KERN_ERR "WARNING 32bit PCI may malfunction.\n"
- KERN_ERR "You might want to enable CONFIG_GART_IOMMU\n");
- }
}
diff --git a/arch/x86_64/kernel/pci-swiotlb.c b/arch/x86_64/kernel/pci-swiotlb.c
index 3569a25ad7fb..990ed67896f2 100644
--- a/arch/x86_64/kernel/pci-swiotlb.c
+++ b/arch/x86_64/kernel/pci-swiotlb.c
@@ -35,8 +35,8 @@ void pci_swiotlb_init(void)
(end_pfn > MAX_DMA32_PFN || force_iommu))
swiotlb = 1;
if (swiotlb) {
- swiotlb_init();
printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
+ swiotlb_init();
dma_ops = &swiotlb_dma_ops;
}
}
diff --git a/arch/x86_64/kernel/pmtimer.c b/arch/x86_64/kernel/pmtimer.c
index feb5f108dd26..5c51d10408a6 100644
--- a/arch/x86_64/kernel/pmtimer.c
+++ b/arch/x86_64/kernel/pmtimer.c
@@ -80,6 +80,31 @@ int pmtimer_mark_offset(void)
return lost - 1;
}
+static unsigned pmtimer_wait_tick(void)
+{
+ u32 a, b;
+ for (a = b = inl(pmtmr_ioport) & ACPI_PM_MASK;
+ a == b;
+ b = inl(pmtmr_ioport) & ACPI_PM_MASK)
+ ;
+ return b;
+}
+
+/* note: wait time is rounded up to one tick */
+void pmtimer_wait(unsigned us)
+{
+ u32 a, b;
+ a = pmtimer_wait_tick();
+ do {
+ b = inl(pmtmr_ioport);
+ } while (cyc2us(b - a) < us);
+}
+
+void pmtimer_resume(void)
+{
+ last_pmtmr_tick = inl(pmtmr_ioport);
+}
+
unsigned int do_gettimeoffset_pm(void)
{
u32 now, offset, delta = 0;
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index 8ded407e4a94..22a05dec81a2 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -516,8 +516,10 @@ out:
* This could still be optimized:
* - fold all the options into a flag word and test it with a single test.
* - could test fs/gs bitsliced
+ *
+ * Kprobes not supported here. Set the probe on schedule instead.
*/
-struct task_struct *
+__kprobes struct task_struct *
__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
{
struct thread_struct *prev = &prev_p->thread,
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index 28895c03cb11..aa55e3cec665 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -94,7 +94,6 @@ unsigned long saved_video_mode;
/*
* Setup options
*/
-struct drive_info_struct { char dummy[32]; } drive_info;
struct screen_info screen_info;
struct sys_desc_table_struct {
unsigned short length;
@@ -424,6 +423,12 @@ static __init void parse_cmdline_early (char ** cmdline_p)
else if(!memcmp(from, "elfcorehdr=", 11))
elfcorehdr_addr = memparse(from+11, &from);
#endif
+
+#ifdef CONFIG_HOTPLUG_CPU
+ else if (!memcmp(from, "additional_cpus=", 16))
+ setup_additional_cpus(from+16);
+#endif
+
next_char:
c = *(from++);
if (!c)
@@ -572,7 +577,6 @@ void __init setup_arch(char **cmdline_p)
unsigned long kernel_end;
ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
- drive_info = DRIVE_INFO;
screen_info = SCREEN_INFO;
edid_info = EDID_INFO;
saved_video_mode = SAVED_VIDEO_MODE;
@@ -741,7 +745,7 @@ void __init setup_arch(char **cmdline_p)
e820_setup_gap();
#ifdef CONFIG_GART_IOMMU
- iommu_hole_init();
+ iommu_hole_init();
#endif
#ifdef CONFIG_VT
@@ -877,6 +881,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
static int __init init_amd(struct cpuinfo_x86 *c)
{
int r;
+ unsigned level;
#ifdef CONFIG_SMP
unsigned long value;
@@ -899,6 +904,11 @@ static int __init init_amd(struct cpuinfo_x86 *c)
3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
clear_bit(0*32+31, &c->x86_capability);
+ /* On C+ stepping K8 rep microcode works well for copy/memset */
+ level = cpuid_eax(1);
+ if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
+ set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
+
r = get_model_name(c);
if (!r) {
switch (c->x86) {
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index a28756ef7cef..66e98659d077 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -59,6 +59,7 @@
#include <asm/nmi.h>
#include <asm/irq.h>
#include <asm/hw_irq.h>
+#include <asm/numa.h>
/* Number of siblings per CPU package */
int smp_num_siblings = 1;
@@ -890,6 +891,7 @@ do_rest:
if (boot_error) {
cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
+ clear_node_cpumask(cpu); /* was set by numa_add_cpu */
cpu_clear(cpu, cpu_present_map);
cpu_clear(cpu, cpu_possible_map);
x86_cpu_to_apicid[cpu] = BAD_APICID;
@@ -1150,8 +1152,6 @@ void __init smp_cpus_done(unsigned int max_cpus)
setup_ioapic_dest();
#endif
- time_init_gtod();
-
check_nmi_watchdog();
}
@@ -1187,6 +1187,7 @@ void remove_cpu_from_maps(void)
cpu_clear(cpu, cpu_callout_map);
cpu_clear(cpu, cpu_callin_map);
clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
+ clear_node_cpumask(cpu);
}
int __cpu_disable(void)
@@ -1241,7 +1242,7 @@ void __cpu_die(unsigned int cpu)
printk(KERN_ERR "CPU %u didn't die...\n", cpu);
}
-static __init int setup_additional_cpus(char *s)
+__init int setup_additional_cpus(char *s)
{
return get_option(&s, &additional_cpus);
}
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index f8c47c688443..3080f84bf7b7 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -48,10 +48,12 @@ static void cpufreq_delayed_get(void);
extern void i8254_timer_resume(void);
extern int using_apic_timer;
+static char *time_init_gtod(void);
+
DEFINE_SPINLOCK(rtc_lock);
DEFINE_SPINLOCK(i8253_lock);
-static int nohpet __initdata = 0;
+int nohpet __initdata = 0;
static int notsc __initdata = 0;
#undef HPET_HACK_ENABLE_DANGEROUS
@@ -59,7 +61,7 @@ static int notsc __initdata = 0;
unsigned int cpu_khz; /* TSC clocks / usec, not used here */
static unsigned long hpet_period; /* fsecs / HPET clock */
unsigned long hpet_tick; /* HPET clocks / interrupt */
-static int hpet_use_timer; /* Use counter of hpet for time keeping, otherwise PIT */
+int hpet_use_timer; /* Use counter of hpet for time keeping, otherwise PIT */
unsigned long vxtime_hz = PIT_TICK_RATE;
int report_lost_ticks; /* command line option */
unsigned long long monotonic_base;
@@ -326,7 +328,10 @@ static noinline void handle_lost_ticks(int lost, struct pt_regs *regs)
print_symbol("rip %s\n", regs->rip);
if (vxtime.mode == VXTIME_TSC && vxtime.hpet_address) {
printk(KERN_WARNING "Falling back to HPET\n");
- vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
+ if (hpet_use_timer)
+ vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
+ else
+ vxtime.last = hpet_readl(HPET_COUNTER);
vxtime.mode = VXTIME_HPET;
do_gettimeoffset = do_gettimeoffset_hpet;
}
@@ -345,7 +350,7 @@ static noinline void handle_lost_ticks(int lost, struct pt_regs *regs)
#endif
}
-static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+void main_timer_handler(struct pt_regs *regs)
{
static unsigned long rtc_update = 0;
unsigned long tsc;
@@ -458,12 +463,17 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
}
write_sequnlock(&xtime_lock);
+}
+static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ if (apic_runs_main_timer > 1)
+ return IRQ_HANDLED;
+ main_timer_handler(regs);
#ifdef CONFIG_X86_LOCAL_APIC
if (using_apic_timer)
smp_send_timer_broadcast_ipi();
#endif
-
return IRQ_HANDLED;
}
@@ -743,7 +753,7 @@ static __init int late_hpet_init(void)
* Timer0 and Timer1 is used by platform.
*/
hd.hd_phys_address = vxtime.hpet_address;
- hd.hd_address = (void *)fix_to_virt(FIX_HPET_BASE);
+ hd.hd_address = (void __iomem *)fix_to_virt(FIX_HPET_BASE);
hd.hd_nirqs = ntimer;
hd.hd_flags = HPET_DATA_PLATFORM;
hpet_reserve_timer(&hd, 0);
@@ -843,17 +853,43 @@ static int hpet_reenable(void)
return hpet_timer_stop_set_go(hpet_tick);
}
-void __init pit_init(void)
+#define PIT_MODE 0x43
+#define PIT_CH0 0x40
+
+static void __init __pit_init(int val, u8 mode)
{
unsigned long flags;
spin_lock_irqsave(&i8253_lock, flags);
- outb_p(0x34, 0x43); /* binary, mode 2, LSB/MSB, ch 0 */
- outb_p(LATCH & 0xff, 0x40); /* LSB */
- outb_p(LATCH >> 8, 0x40); /* MSB */
+ outb_p(mode, PIT_MODE);
+ outb_p(val & 0xff, PIT_CH0); /* LSB */
+ outb_p(val >> 8, PIT_CH0); /* MSB */
spin_unlock_irqrestore(&i8253_lock, flags);
}
+void __init pit_init(void)
+{
+ __pit_init(LATCH, 0x34); /* binary, mode 2, LSB/MSB, ch 0 */
+}
+
+void __init pit_stop_interrupt(void)
+{
+ __pit_init(0, 0x30); /* mode 0 */
+}
+
+void __init stop_timer_interrupt(void)
+{
+ char *name;
+ if (vxtime.hpet_address) {
+ name = "HPET";
+ hpet_timer_stop_set_go(0);
+ } else {
+ name = "PIT";
+ pit_stop_interrupt();
+ }
+ printk(KERN_INFO "timer: %s interrupt stopped.\n", name);
+}
+
int __init time_setup(char *str)
{
report_lost_ticks = 1;
@@ -867,6 +903,7 @@ static struct irqaction irq0 = {
void __init time_init(void)
{
char *timename;
+ char *gtod;
#ifdef HPET_HACK_ENABLE_DANGEROUS
if (!vxtime.hpet_address) {
@@ -911,28 +948,26 @@ void __init time_init(void)
timename = "PIT";
}
- printk(KERN_INFO "time.c: Using %ld.%06ld MHz %s timer.\n",
- vxtime_hz / 1000000, vxtime_hz % 1000000, timename);
+ vxtime.mode = VXTIME_TSC;
+ gtod = time_init_gtod();
+
+ printk(KERN_INFO "time.c: Using %ld.%06ld MHz WALL %s GTOD %s timer.\n",
+ vxtime_hz / 1000000, vxtime_hz % 1000000, timename, gtod);
printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n",
cpu_khz / 1000, cpu_khz % 1000);
- vxtime.mode = VXTIME_TSC;
vxtime.quot = (1000000L << 32) / vxtime_hz;
vxtime.tsc_quot = (1000L << 32) / cpu_khz;
vxtime.last_tsc = get_cycles_sync();
setup_irq(0, &irq0);
set_cyc2ns_scale(cpu_khz);
-
-#ifndef CONFIG_SMP
- time_init_gtod();
-#endif
}
/*
* Make an educated guess if the TSC is trustworthy and synchronized
* over all CPUs.
*/
-__init int unsynchronized_tsc(void)
+__cpuinit int unsynchronized_tsc(void)
{
#ifdef CONFIG_SMP
if (oem_force_hpet_timer())
@@ -947,9 +982,9 @@ __init int unsynchronized_tsc(void)
}
/*
- * Decide after all CPUs are booted what mode gettimeofday should use.
+ * Decide what mode gettimeofday should use.
*/
-void __init time_init_gtod(void)
+__init static char *time_init_gtod(void)
{
char *timetype;
@@ -957,7 +992,10 @@ void __init time_init_gtod(void)
notsc = 1;
if (vxtime.hpet_address && notsc) {
timetype = hpet_use_timer ? "HPET" : "PIT/HPET";
- vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
+ if (hpet_use_timer)
+ vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
+ else
+ vxtime.last = hpet_readl(HPET_COUNTER);
vxtime.mode = VXTIME_HPET;
do_gettimeoffset = do_gettimeoffset_hpet;
#ifdef CONFIG_X86_PM_TIMER
@@ -974,8 +1012,7 @@ void __init time_init_gtod(void)
timetype = hpet_use_timer ? "HPET/TSC" : "PIT/TSC";
vxtime.mode = VXTIME_TSC;
}
-
- printk(KERN_INFO "time.c: Using %s based timekeeping.\n", timetype);
+ return timetype;
}
__setup("report_lost_ticks", time_setup);
@@ -1016,9 +1053,21 @@ static int timer_resume(struct sys_device *dev)
write_seqlock_irqsave(&xtime_lock,flags);
xtime.tv_sec = sec;
xtime.tv_nsec = 0;
+ if (vxtime.mode == VXTIME_HPET) {
+ if (hpet_use_timer)
+ vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
+ else
+ vxtime.last = hpet_readl(HPET_COUNTER);
+#ifdef CONFIG_X86_PM_TIMER
+ } else if (vxtime.mode == VXTIME_PMTMR) {
+ pmtimer_resume();
+#endif
+ } else
+ vxtime.last_tsc = get_cycles_sync();
write_sequnlock_irqrestore(&xtime_lock,flags);
jiffies += sleep_length;
wall_jiffies += sleep_length;
+ monotonic_base += sleep_length * (NSEC_PER_SEC/HZ);
touch_softlockup_watchdog();
return 0;
}
@@ -1278,8 +1327,7 @@ static int __init nohpet_setup(char *s)
__setup("nohpet", nohpet_setup);
-
-static int __init notsc_setup(char *s)
+int __init notsc_setup(char *s)
{
notsc = 1;
return 0;
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index 8bb0aeda78b9..28d50dc540e8 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -90,6 +90,20 @@ static inline void conditional_sti(struct pt_regs *regs)
local_irq_enable();
}
+static inline void preempt_conditional_sti(struct pt_regs *regs)
+{
+ preempt_disable();
+ if (regs->eflags & X86_EFLAGS_IF)
+ local_irq_enable();
+}
+
+static inline void preempt_conditional_cli(struct pt_regs *regs)
+{
+ if (regs->eflags & X86_EFLAGS_IF)
+ local_irq_disable();
+ preempt_enable_no_resched();
+}
+
static int kstack_depth_to_print = 10;
#ifdef CONFIG_KALLSYMS
@@ -372,7 +386,7 @@ void out_of_line_bug(void)
static DEFINE_SPINLOCK(die_lock);
static int die_owner = -1;
-unsigned long oops_begin(void)
+unsigned __kprobes long oops_begin(void)
{
int cpu = safe_smp_processor_id();
unsigned long flags;
@@ -391,7 +405,7 @@ unsigned long oops_begin(void)
return flags;
}
-void oops_end(unsigned long flags)
+void __kprobes oops_end(unsigned long flags)
{
die_owner = -1;
bust_spinlocks(0);
@@ -400,7 +414,7 @@ void oops_end(unsigned long flags)
panic("Oops");
}
-void __die(const char * str, struct pt_regs * regs, long err)
+void __kprobes __die(const char * str, struct pt_regs * regs, long err)
{
static int die_counter;
printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff,++die_counter);
@@ -432,7 +446,7 @@ void die(const char * str, struct pt_regs * regs, long err)
do_exit(SIGSEGV);
}
-void die_nmi(char *str, struct pt_regs *regs)
+void __kprobes die_nmi(char *str, struct pt_regs *regs)
{
unsigned long flags = oops_begin();
@@ -575,7 +589,8 @@ asmlinkage void __kprobes do_general_protection(struct pt_regs * regs,
}
}
-static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
+static __kprobes void
+mem_parity_error(unsigned char reason, struct pt_regs * regs)
{
printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
printk("You probably have a hardware problem with your RAM chips\n");
@@ -585,7 +600,8 @@ static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
outb(reason, 0x61);
}
-static void io_check_error(unsigned char reason, struct pt_regs * regs)
+static __kprobes void
+io_check_error(unsigned char reason, struct pt_regs * regs)
{
printk("NMI: IOCK error (debug interrupt?)\n");
show_registers(regs);
@@ -598,7 +614,8 @@ static void io_check_error(unsigned char reason, struct pt_regs * regs)
outb(reason, 0x61);
}
-static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
+static __kprobes void
+unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
{ printk("Uhhuh. NMI received for unknown reason %02x.\n", reason);
printk("Dazed and confused, but trying to continue\n");
printk("Do you have a strange power saving mode enabled?\n");
@@ -606,7 +623,7 @@ static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
/* Runs on IST stack. This code must keep interrupts off all the time.
Nested NMIs are prevented by the CPU. */
-asmlinkage void default_do_nmi(struct pt_regs *regs)
+asmlinkage __kprobes void default_do_nmi(struct pt_regs *regs)
{
unsigned char reason = 0;
int cpu;
@@ -658,7 +675,7 @@ asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code)
/* Help handler running on IST stack to switch back to user stack
for scheduling or signal handling. The actual stack switch is done in
entry.S */
-asmlinkage struct pt_regs *sync_regs(struct pt_regs *eregs)
+asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
{
struct pt_regs *regs = eregs;
/* Did already sync */
@@ -690,7 +707,7 @@ asmlinkage void __kprobes do_debug(struct pt_regs * regs,
SIGTRAP) == NOTIFY_STOP)
return;
- conditional_sti(regs);
+ preempt_conditional_sti(regs);
/* Mask out spurious debug traps due to lazy DR7 setting */
if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
@@ -735,11 +752,13 @@ asmlinkage void __kprobes do_debug(struct pt_regs * regs,
clear_dr7:
set_debugreg(0UL, 7);
+ preempt_conditional_cli(regs);
return;
clear_TF_reenable:
set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
regs->eflags &= ~TF_MASK;
+ preempt_conditional_cli(regs);
}
static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S
index b0eed1faf740..74db0062d4a2 100644
--- a/arch/x86_64/kernel/vmlinux.lds.S
+++ b/arch/x86_64/kernel/vmlinux.lds.S
@@ -172,13 +172,15 @@ SECTIONS
. = ALIGN(4096);
__initramfs_start = .;
.init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { *(.init.ramfs) }
- __initramfs_end = .;
- . = ALIGN(32);
+ __initramfs_end = .;
+ /* temporary here to work around NR_CPUS. If you see this comment in 2.6.17+
+ complain */
+ . = ALIGN(4096);
+ __init_end = .;
+ . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
__per_cpu_start = .;
.data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { *(.data.percpu) }
__per_cpu_end = .;
- . = ALIGN(4096);
- __init_end = .;
. = ALIGN(4096);
__nosave_begin = .;
diff --git a/arch/x86_64/kernel/x8664_ksyms.c b/arch/x86_64/kernel/x8664_ksyms.c
index b614d54d2ae4..3496abc8d372 100644
--- a/arch/x86_64/kernel/x8664_ksyms.c
+++ b/arch/x86_64/kernel/x8664_ksyms.c
@@ -39,11 +39,6 @@ extern void __write_lock_failed(rwlock_t *rw);
extern void __read_lock_failed(rwlock_t *rw);
#endif
-#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
-extern struct drive_info_struct drive_info;
-EXPORT_SYMBOL(drive_info);
-#endif
-
/* platform dependent support */
EXPORT_SYMBOL(boot_cpu_data);
//EXPORT_SYMBOL(dump_fpu);