diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2007-11-19 12:23:51 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2008-01-29 10:14:57 +0000 |
commit | 87353d8ac39c52784da605ecbe965ecdfad609ad (patch) | |
tree | c95ce7cbe9b099c21cab71a195621801b04bc05a /arch/mips/kernel | |
parent | 19388fb092d89e179575bd0b44f51b57e175edf5 (diff) |
[MIPS] SMP: Call platform methods via ops structure.
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r-- | arch/mips/kernel/mips-mt.c | 1 | ||||
-rw-r--r-- | arch/mips/kernel/setup.c | 3 | ||||
-rw-r--r-- | arch/mips/kernel/smp-mt.c | 193 | ||||
-rw-r--r-- | arch/mips/kernel/smp.c | 23 | ||||
-rw-r--r-- | arch/mips/kernel/smtc-proc.c | 1 | ||||
-rw-r--r-- | arch/mips/kernel/smtc.c | 1 |
6 files changed, 123 insertions, 99 deletions
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c index 3d6b1ec1f328..640fb0cc6e39 100644 --- a/arch/mips/kernel/mips-mt.c +++ b/arch/mips/kernel/mips-mt.c @@ -17,7 +17,6 @@ #include <asm/system.h> #include <asm/hardirq.h> #include <asm/mmu_context.h> -#include <asm/smp.h> #include <asm/mipsmtregs.h> #include <asm/r4kcache.h> #include <asm/cacheflush.h> diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 7b4418dd5857..269c252d956f 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -29,6 +29,7 @@ #include <asm/cpu.h> #include <asm/sections.h> #include <asm/setup.h> +#include <asm/smp-ops.h> #include <asm/system.h> struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly; @@ -575,9 +576,7 @@ void __init setup_arch(char **cmdline_p) arch_mem_init(cmdline_p); resource_init(); -#ifdef CONFIG_SMP plat_smp_setup(); -#endif } static int __init fpu_disable(char *s) diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index 2ab0b7eeaa7e..89e6f6aa5166 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c @@ -215,72 +215,67 @@ static void __init smp_tc_init(unsigned int tc, unsigned int mvpconf0) write_tc_c0_tchalt(TCHALT_H); } -/* - * Common setup before any secondaries are started - * Make sure all CPU's are in a sensible state before we boot any of the - * secondarys - */ -void __init plat_smp_setup(void) +static void vsmp_send_ipi_single(int cpu, unsigned int action) { - unsigned int mvpconf0, ntc, tc, ncpu = 0; - unsigned int nvpe; + int i; + unsigned long flags; + int vpflags; -#ifdef CONFIG_MIPS_MT_FPAFF - /* If we have an FPU, enroll ourselves in the FPU-full mask */ - if (cpu_has_fpu) - cpu_set(0, mt_fpu_cpumask); -#endif /* CONFIG_MIPS_MT_FPAFF */ - if (!cpu_has_mipsmt) - return; + local_irq_save(flags); - /* disable MT so we can configure */ - dvpe(); - dmt(); + vpflags = dvpe(); /* cant access the other CPU's registers whilst MVPE enabled */ - /* Put MVPE's into 'configuration state' */ - set_c0_mvpcontrol(MVPCONTROL_VPC); + switch (action) { + case SMP_CALL_FUNCTION: + i = C_SW1; + break; - mvpconf0 = read_c0_mvpconf0(); - ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT; + case SMP_RESCHEDULE_YOURSELF: + default: + i = C_SW0; + break; + } - nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; - smp_num_siblings = nvpe; + /* 1:1 mapping of vpe and tc... */ + settc(cpu); + write_vpe_c0_cause(read_vpe_c0_cause() | i); + evpe(vpflags); - /* we'll always have more TC's than VPE's, so loop setting everything - to a sensible state */ - for (tc = 0; tc <= ntc; tc++) { - settc(tc); + local_irq_restore(flags); +} - smp_tc_init(tc, mvpconf0); - ncpu = smp_vpe_init(tc, mvpconf0, ncpu); - } +static void vsmp_send_ipi_mask(cpumask_t mask, unsigned int action) +{ + unsigned int i; - /* Release config state */ - clear_c0_mvpcontrol(MVPCONTROL_VPC); + for_each_cpu_mask(i, mask) + vsmp_send_ipi_single(i, action); +} - /* We'll wait until starting the secondaries before starting MVPE */ +static void __cpuinit vsmp_init_secondary(void) +{ + /* Enable per-cpu interrupts */ - printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu); + /* This is Malta specific: IPI,performance and timer inetrrupts */ + write_c0_status((read_c0_status() & ~ST0_IM ) | + (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7)); } -void __init plat_prepare_cpus(unsigned int max_cpus) +static void __cpuinit vsmp_smp_finish(void) { - mips_mt_set_cpuoptions(); - - /* set up ipi interrupts */ - if (cpu_has_vint) { - set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch); - set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch); - } + write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ)); - cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ; - cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ; +#ifdef CONFIG_MIPS_MT_FPAFF + /* If we have an FPU, enroll ourselves in the FPU-full mask */ + if (cpu_has_fpu) + cpu_set(smp_processor_id(), mt_fpu_cpumask); +#endif /* CONFIG_MIPS_MT_FPAFF */ - setup_irq(cpu_ipi_resched_irq, &irq_resched); - setup_irq(cpu_ipi_call_irq, &irq_call); + local_irq_enable(); +} - set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq); - set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq); +static void vsmp_cpus_done(void) +{ } /* @@ -291,7 +286,7 @@ void __init plat_prepare_cpus(unsigned int max_cpus) * (unsigned long)idle->thread_info the gp * assumes a 1:1 mapping of TC => VPE */ -void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle) +static void __cpuinit vsmp_boot_secondary(int cpu, struct task_struct *idle) { struct thread_info *gp = task_thread_info(idle); dvpe(); @@ -325,57 +320,81 @@ void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle) evpe(EVPE_ENABLE); } -void __cpuinit prom_init_secondary(void) -{ - /* Enable per-cpu interrupts */ - - /* This is Malta specific: IPI,performance and timer inetrrupts */ - write_c0_status((read_c0_status() & ~ST0_IM ) | - (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7)); -} - -void __cpuinit prom_smp_finish(void) +/* + * Common setup before any secondaries are started + * Make sure all CPU's are in a sensible state before we boot any of the + * secondarys + */ +static void __init vsmp_smp_setup(void) { - write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ)); + unsigned int mvpconf0, ntc, tc, ncpu = 0; + unsigned int nvpe; #ifdef CONFIG_MIPS_MT_FPAFF /* If we have an FPU, enroll ourselves in the FPU-full mask */ if (cpu_has_fpu) - cpu_set(smp_processor_id(), mt_fpu_cpumask); + cpu_set(0, mt_fpu_cpumask); #endif /* CONFIG_MIPS_MT_FPAFF */ + if (!cpu_has_mipsmt) + return; - local_irq_enable(); -} + /* disable MT so we can configure */ + dvpe(); + dmt(); -void prom_cpus_done(void) -{ -} + /* Put MVPE's into 'configuration state' */ + set_c0_mvpcontrol(MVPCONTROL_VPC); -void core_send_ipi(int cpu, unsigned int action) -{ - int i; - unsigned long flags; - int vpflags; + mvpconf0 = read_c0_mvpconf0(); + ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT; - local_irq_save(flags); + nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; + smp_num_siblings = nvpe; - vpflags = dvpe(); /* cant access the other CPU's registers whilst MVPE enabled */ + /* we'll always have more TC's than VPE's, so loop setting everything + to a sensible state */ + for (tc = 0; tc <= ntc; tc++) { + settc(tc); - switch (action) { - case SMP_CALL_FUNCTION: - i = C_SW1; - break; + smp_tc_init(tc, mvpconf0); + ncpu = smp_vpe_init(tc, mvpconf0, ncpu); + } - case SMP_RESCHEDULE_YOURSELF: - default: - i = C_SW0; - break; + /* Release config state */ + clear_c0_mvpcontrol(MVPCONTROL_VPC); + + /* We'll wait until starting the secondaries before starting MVPE */ + + printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu); +} + +static void __init vsmp_prepare_cpus(unsigned int max_cpus) +{ + mips_mt_set_cpuoptions(); + + /* set up ipi interrupts */ + if (cpu_has_vint) { + set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch); + set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch); } - /* 1:1 mapping of vpe and tc... */ - settc(cpu); - write_vpe_c0_cause(read_vpe_c0_cause() | i); - evpe(vpflags); + cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ; + cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ; - local_irq_restore(flags); + setup_irq(cpu_ipi_resched_irq, &irq_resched); + setup_irq(cpu_ipi_call_irq, &irq_call); + + set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq); + set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq); } + +struct plat_smp_ops vsmp_smp_ops = { + .send_ipi_single = vsmp_send_ipi_single, + .send_ipi_mask = vsmp_send_ipi_mask, + .init_secondary = vsmp_init_secondary, + .smp_finish = vsmp_smp_finish, + .cpus_done = vsmp_cpus_done, + .boot_secondary = vsmp_boot_secondary, + .smp_setup = vsmp_smp_setup, + .prepare_cpus = vsmp_prepare_cpus, +}; diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 335be9bcf0dc..1e5dfc28294a 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -37,7 +37,6 @@ #include <asm/processor.h> #include <asm/system.h> #include <asm/mmu_context.h> -#include <asm/smp.h> #include <asm/time.h> #ifdef CONFIG_MIPS_MT_SMTC @@ -84,6 +83,16 @@ static inline void set_cpu_sibling_map(int cpu) cpu_set(cpu, cpu_sibling_map[cpu]); } +struct plat_smp_ops *mp_ops; + +__cpuinit void register_smp_ops(struct plat_smp_ops *ops) +{ + if (ops) + printk(KERN_WARNING "Overriding previous set SMP ops\n"); + + mp_ops = ops; +} + /* * First C code run on the secondary CPUs after being started up by * the master. @@ -100,7 +109,7 @@ asmlinkage __cpuinit void start_secondary(void) cpu_report(); per_cpu_trap_init(); mips_clockevent_init(); - prom_init_secondary(); + mp_ops->init_secondary(); /* * XXX parity protection should be folded in here when it's converted @@ -112,7 +121,7 @@ asmlinkage __cpuinit void start_secondary(void) cpu = smp_processor_id(); cpu_data[cpu].udelay_val = loops_per_jiffy; - prom_smp_finish(); + mp_ops->smp_finish(); set_cpu_sibling_map(cpu); cpu_set(cpu, cpu_callin_map); @@ -184,7 +193,7 @@ int smp_call_function_mask(cpumask_t mask, void (*func) (void *info), smp_mb(); /* Send a message to all other CPUs and wait for them to respond */ - core_send_ipi_mask(mask, SMP_CALL_FUNCTION); + mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION); /* Wait for response */ /* FIXME: lock-up detection, backtrace on lock-up */ @@ -278,7 +287,7 @@ void smp_send_stop(void) void __init smp_cpus_done(unsigned int max_cpus) { - prom_cpus_done(); + mp_ops->cpus_done(); } /* called from main before smp_init() */ @@ -286,7 +295,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) { init_new_context(current, &init_mm); current_thread_info()->cpu = 0; - plat_prepare_cpus(max_cpus); + mp_ops->prepare_cpus(max_cpus); set_cpu_sibling_map(0); #ifndef CONFIG_HOTPLUG_CPU cpu_present_map = cpu_possible_map; @@ -325,7 +334,7 @@ int __cpuinit __cpu_up(unsigned int cpu) if (IS_ERR(idle)) panic(KERN_ERR "Fork failed for CPU %d", cpu); - prom_boot_secondary(cpu, idle); + mp_ops->boot_secondary(cpu, idle); /* * Trust is futile. We should really have timeouts ... diff --git a/arch/mips/kernel/smtc-proc.c b/arch/mips/kernel/smtc-proc.c index 6f3709996172..fe256559c997 100644 --- a/arch/mips/kernel/smtc-proc.c +++ b/arch/mips/kernel/smtc-proc.c @@ -14,7 +14,6 @@ #include <asm/system.h> #include <asm/hardirq.h> #include <asm/mmu_context.h> -#include <asm/smp.h> #include <asm/mipsregs.h> #include <asm/cacheflush.h> #include <linux/proc_fs.h> diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 9c92d42996cb..85f700e58131 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c @@ -16,7 +16,6 @@ #include <asm/hazards.h> #include <asm/irq.h> #include <asm/mmu_context.h> -#include <asm/smp.h> #include <asm/mipsregs.h> #include <asm/cacheflush.h> #include <asm/time.h> |