diff options
Diffstat (limited to 'arch/mips')
37 files changed, 86 insertions, 247 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index ce30e2f91d77..85aad0321397 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -29,6 +29,8 @@ config MIPS select HAVE_MEMBLOCK select HAVE_MEMBLOCK_NODE_MAP select ARCH_DISCARD_MEMBLOCK + select GENERIC_SMP_IDLE_THREAD + select BUILDTIME_EXTABLE_SORT menu "Machine selection" @@ -1001,12 +1003,12 @@ config HOLES_IN_ZONE bool # -# Endianess selection. Sufficiently obscure so many users don't know what to +# Endianness selection. Sufficiently obscure so many users don't know what to # answer,so we try hard to limit the available choices. Also the use of a # choice statement should be more obvious to the user. # choice - prompt "Endianess selection" + prompt "Endianness selection" help Some MIPS machines can be configured for either little or big endian byte order. These modes require different kernels and a different diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug index 83ed00a5644a..5a43aa0798ca 100644 --- a/arch/mips/Kconfig.debug +++ b/arch/mips/Kconfig.debug @@ -57,7 +57,7 @@ config CMDLINE options. config CMDLINE_OVERRIDE - bool "Built-in command line overrides firware arguments" + bool "Built-in command line overrides firmware arguments" default n depends on CMDLINE_BOOL help diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 4fedf5a51d96..76017c25a9e6 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -235,7 +235,7 @@ endif OBJCOPYFLAGS += --remove-section=.reginfo -head-y := arch/mips/kernel/head.o arch/mips/kernel/init_task.o +head-y := arch/mips/kernel/head.o libs-y += arch/mips/lib/ diff --git a/arch/mips/ath79/dev-wmac.c b/arch/mips/ath79/dev-wmac.c index e21507052066..9c717bf98ffe 100644 --- a/arch/mips/ath79/dev-wmac.c +++ b/arch/mips/ath79/dev-wmac.c @@ -58,8 +58,8 @@ static void __init ar913x_wmac_setup(void) static int ar933x_wmac_reset(void) { - ath79_device_reset_clear(AR933X_RESET_WMAC); ath79_device_reset_set(AR933X_RESET_WMAC); + ath79_device_reset_clear(AR933X_RESET_WMAC); return 0; } diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c index b6bb92c16a47..41dd00884975 100644 --- a/arch/mips/cavium-octeon/dma-octeon.c +++ b/arch/mips/cavium-octeon/dma-octeon.c @@ -157,7 +157,7 @@ static void octeon_dma_sync_sg_for_device(struct device *dev, } static void *octeon_dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp) + dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) { void *ret; @@ -192,7 +192,7 @@ static void *octeon_dma_alloc_coherent(struct device *dev, size_t size, } static void octeon_dma_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle) + void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) { int order = get_order(size); @@ -240,8 +240,8 @@ EXPORT_SYMBOL(dma_to_phys); static struct octeon_dma_map_ops octeon_linear_dma_map_ops = { .dma_map_ops = { - .alloc_coherent = octeon_dma_alloc_coherent, - .free_coherent = octeon_dma_free_coherent, + .alloc = octeon_dma_alloc_coherent, + .free = octeon_dma_free_coherent, .map_page = octeon_dma_map_page, .unmap_page = swiotlb_unmap_page, .map_sg = octeon_dma_map_sg, @@ -325,8 +325,8 @@ void __init plat_swiotlb_setup(void) #ifdef CONFIG_PCI static struct octeon_dma_map_ops _octeon_pci_dma_map_ops = { .dma_map_ops = { - .alloc_coherent = octeon_dma_alloc_coherent, - .free_coherent = octeon_dma_free_coherent, + .alloc = octeon_dma_alloc_coherent, + .free = octeon_dma_free_coherent, .map_page = octeon_dma_map_page, .unmap_page = swiotlb_unmap_page, .map_sg = octeon_dma_map_sg, diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c index c3e2b85c3b02..97e7ce9b50ed 100644 --- a/arch/mips/cavium-octeon/smp.c +++ b/arch/mips/cavium-octeon/smp.c @@ -78,7 +78,7 @@ static inline void octeon_send_ipi_mask(const struct cpumask *mask, } /** - * Detect available CPUs, populate cpu_possible_map + * Detect available CPUs, populate cpu_possible_mask */ static void octeon_smp_hotplug_setup(void) { @@ -268,7 +268,7 @@ static int octeon_cpu_disable(void) spin_lock(&smp_reserve_lock); - cpu_clear(cpu, cpu_online_map); + set_cpu_online(cpu, false); cpu_clear(cpu, cpu_callin_map); local_irq_disable(); fixup_irqs(); diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig index 807c97eed8a8..46c61edcdf7b 100644 --- a/arch/mips/configs/mtx1_defconfig +++ b/arch/mips/configs/mtx1_defconfig @@ -346,11 +346,8 @@ CONFIG_CHELSIO_T1=m CONFIG_IXGB=m CONFIG_S2IO=m CONFIG_MYRI10GE=m -CONFIG_TR=y CONFIG_IBMOL=m CONFIG_IBMLS=m -CONFIG_3C359=m -CONFIG_TMS380TR=m CONFIG_TMSPCI=m CONFIG_ABYSS=m CONFIG_USB_CATC=m @@ -376,7 +373,6 @@ CONFIG_PCMCIA_SMC91C92=m CONFIG_PCMCIA_XIRC2PS=m CONFIG_PCMCIA_AXNET=m CONFIG_ARCNET_COM20020_CS=m -CONFIG_PCMCIA_IBMTR=m CONFIG_WAN=y CONFIG_LANMEDIA=m CONFIG_HDLC=m diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h index 7aa37ddfca4b..be39a12901c6 100644 --- a/arch/mips/include/asm/dma-mapping.h +++ b/arch/mips/include/asm/dma-mapping.h @@ -57,25 +57,31 @@ dma_set_mask(struct device *dev, u64 mask) extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction); -static inline void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp) +#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) + +static inline void *dma_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + struct dma_attrs *attrs) { void *ret; struct dma_map_ops *ops = get_dma_ops(dev); - ret = ops->alloc_coherent(dev, size, dma_handle, gfp); + ret = ops->alloc(dev, size, dma_handle, gfp, attrs); debug_dma_alloc_coherent(dev, size, *dma_handle, ret); return ret; } -static inline void dma_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle) +#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) + +static inline void dma_free_attrs(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle, + struct dma_attrs *attrs) { struct dma_map_ops *ops = get_dma_ops(dev); - ops->free_coherent(dev, size, vaddr, dma_handle); + ops->free(dev, size, vaddr, dma_handle, attrs); debug_dma_free_coherent(dev, size, vaddr, dma_handle); } diff --git a/arch/mips/include/asm/mach-ip27/topology.h b/arch/mips/include/asm/mach-ip27/topology.h index 1b1a7d1632b9..b2cf641f206f 100644 --- a/arch/mips/include/asm/mach-ip27/topology.h +++ b/arch/mips/include/asm/mach-ip27/topology.h @@ -36,23 +36,6 @@ extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES]; #define node_distance(from, to) (__node_distances[(from)][(to)]) -/* sched_domains SD_NODE_INIT for SGI IP27 machines */ -#define SD_NODE_INIT (struct sched_domain) { \ - .parent = NULL, \ - .child = NULL, \ - .groups = NULL, \ - .min_interval = 8, \ - .max_interval = 32, \ - .busy_factor = 32, \ - .imbalance_pct = 125, \ - .cache_nice_tries = 1, \ - .flags = SD_LOAD_BALANCE | \ - SD_BALANCE_EXEC, \ - .last_balance = jiffies, \ - .balance_interval = 1, \ - .nr_balance_failed = 0, \ -} - #include <asm-generic/topology.h> #endif /* _ASM_MACH_TOPOLOGY_H */ diff --git a/arch/mips/include/asm/mach-jz4740/irq.h b/arch/mips/include/asm/mach-jz4740/irq.h index a865c983c70a..5ad1a9c113c6 100644 --- a/arch/mips/include/asm/mach-jz4740/irq.h +++ b/arch/mips/include/asm/mach-jz4740/irq.h @@ -45,7 +45,7 @@ #define JZ4740_IRQ_LCD JZ4740_IRQ(30) /* 2nd-level interrupts */ -#define JZ4740_IRQ_DMA(x) (JZ4740_IRQ(32) + (X)) +#define JZ4740_IRQ_DMA(x) (JZ4740_IRQ(32) + (x)) #define JZ4740_IRQ_INTC_GPIO(x) (JZ4740_IRQ_GPIO0 - (x)) #define JZ4740_IRQ_GPIO(x) (JZ4740_IRQ(48) + (x)) diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index 73c0d45798de..9b02cfba7449 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h @@ -37,12 +37,6 @@ extern void tlbmiss_handler_setup_pgd(unsigned long pgd); write_c0_xcontext((unsigned long) smp_processor_id() << 51); \ } while (0) - -static inline unsigned long get_current_pgd(void) -{ - return PHYS_TO_XKSEG_CACHED((read_c0_context() >> 11) & ~0xfffUL); -} - #else /* CONFIG_MIPS_PGD_C0_CONTEXT: using pgd_current*/ /* diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index 20e9dcf42b27..5e33fabe354d 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h @@ -310,9 +310,6 @@ struct task_struct; /* Free all resources held by a thread. */ #define release_thread(thread) do { } while(0) -/* Prepare to copy thread state - unlazy all lazy status */ -#define prepare_to_copy(tsk) do { } while (0) - extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); extern unsigned long thread_saved_pc(struct task_struct *tsk); diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index 0d85d8e440c5..e2eca7d10598 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h @@ -85,18 +85,6 @@ register struct thread_info *__current_thread_info __asm__("$28"); #define STACK_WARN (THREAD_SIZE / 8) -#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR - -#ifdef CONFIG_DEBUG_STACK_USAGE -#define alloc_thread_info_node(tsk, node) \ - kzalloc_node(THREAD_SIZE, GFP_KERNEL, node) -#else -#define alloc_thread_info_node(tsk, node) \ - kmalloc_node(THREAD_SIZE, GFP_KERNEL, node) -#endif - -#define free_thread_info(info) kfree(info) - #endif /* !__ASSEMBLY__ */ #define PREEMPT_ACTIVE 0x10000000 diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 0c6877ea9004..fdaf65e1a99d 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -2,7 +2,7 @@ # Makefile for the Linux/MIPS kernel. # -extra-y := head.o init_task.o vmlinux.lds +extra-y := head.o vmlinux.lds obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ ptrace.o reset.o setup.o signal.o syscall.o \ diff --git a/arch/mips/kernel/init_task.c b/arch/mips/kernel/init_task.c deleted file mode 100644 index 5f9a76263c9a..000000000000 --- a/arch/mips/kernel/init_task.c +++ /dev/null @@ -1,35 +0,0 @@ -#include <linux/mm.h> -#include <linux/export.h> -#include <linux/sched.h> -#include <linux/init_task.h> -#include <linux/fs.h> -#include <linux/mqueue.h> - -#include <asm/thread_info.h> -#include <asm/uaccess.h> -#include <asm/pgtable.h> - -static struct signal_struct init_signals = INIT_SIGNALS(init_signals); -static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); -/* - * Initial thread structure. - * - * We need to make sure that this is 8192-byte aligned due to the - * way process stacks are handled. This is done by making sure - * the linker maps this in the .text segment right after head.S, - * and making head.S ensure the proper alignment. - * - * The things we do for performance.. - */ -union thread_union init_thread_union __init_task_data - __attribute__((__aligned__(THREAD_SIZE))) = - { INIT_THREAD_INFO(init_task) }; - -/* - * Initial task structure. - * - * All other task structs will be allocated on slabs in fork.c - */ -struct task_struct init_task = INIT_TASK(init_task); - -EXPORT_SYMBOL(init_task); diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index 802e6160f37e..33f63bab478a 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c @@ -173,7 +173,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, if (retval) goto out_unlock; - cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map); + cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask); out_unlock: read_unlock(&tasklist_lock); diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c index 7f3376b1c219..6ded9bd1489c 100644 --- a/arch/mips/kernel/mips-mt.c +++ b/arch/mips/kernel/mips-mt.c @@ -209,7 +209,7 @@ void mips_mt_set_cpuoptions(void) unsigned int nconfig7 = oconfig7; if (mt_opt_norps) { - printk("\"norps\" option deprectated: use \"rpsctl=\"\n"); + printk("\"norps\" option deprecated: use \"rpsctl=\"\n"); } if (mt_opt_rpsctl >= 0) { printk("34K return prediction stack override set to %d.\n", diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 811084f4e422..ab73fa2fb9b5 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -1325,7 +1325,7 @@ static int mipsxx_pmu_handle_shared_irq(void) regs = get_irq_regs(); - perf_sample_data_init(&data, 0); + perf_sample_data_init(&data, 0, 0); switch (counters) { #define HANDLE_COUNTER(n) \ diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index e309665b6c81..f8b2c592514d 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -25,7 +25,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) int i; #ifdef CONFIG_SMP - if (!cpu_isset(n, cpu_online_map)) + if (!cpu_online(n)) return 0; #endif diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 7c24c2973c6d..4812c6d916e4 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -535,7 +535,7 @@ static inline int audit_arch(void) asmlinkage void syscall_trace_enter(struct pt_regs *regs) { /* do the secure computing check first */ - secure_computing(regs->regs[2]); + secure_computing_strict(regs->regs[2]); if (!(current->ptrace & PT_PTRACED)) goto out; diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 185ca00c4c84..d5a338a1739c 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -257,11 +257,8 @@ asmlinkage int sys_sigsuspend(nabi_no_regargs struct pt_regs regs) return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); current->saved_sigmask = current->blocked; - current->blocked = newset; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&newset); current->state = TASK_INTERRUPTIBLE; schedule(); @@ -286,11 +283,8 @@ asmlinkage int sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); current->saved_sigmask = current->blocked; - current->blocked = newset; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&newset); current->state = TASK_INTERRUPTIBLE; schedule(); @@ -362,10 +356,7 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs) goto badframe; sigdelsetmask(&blocked, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = blocked; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&blocked); sig = restore_sigcontext(®s, &frame->sf_sc); if (sig < 0) @@ -401,10 +392,7 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); sig = restore_sigcontext(®s, &frame->rs_uc.uc_mcontext); if (sig < 0) @@ -580,12 +568,7 @@ static int handle_signal(unsigned long sig, siginfo_t *info, if (ret) return ret; - spin_lock_irq(¤t->sighand->siglock); - sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); - if (!(ka->sa.sa_flags & SA_NODEFER)) - sigaddset(¤t->blocked, sig); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + block_sigmask(ka, sig); return ret; } diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index 06b5da392e24..ac3b8d89aae5 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c @@ -290,11 +290,8 @@ asmlinkage int sys32_sigsuspend(nabi_no_regargs struct pt_regs regs) return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); current->saved_sigmask = current->blocked; - current->blocked = newset; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&newset); current->state = TASK_INTERRUPTIBLE; schedule(); @@ -318,11 +315,8 @@ asmlinkage int sys32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); current->saved_sigmask = current->blocked; - current->blocked = newset; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&newset); current->state = TASK_INTERRUPTIBLE; schedule(); @@ -488,10 +482,7 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs) goto badframe; sigdelsetmask(&blocked, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = blocked; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&blocked); sig = restore_sigcontext32(®s, &frame->sf_sc); if (sig < 0) @@ -529,10 +520,7 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); sig = restore_sigcontext32(®s, &frame->rs_uc.uc_mcontext); if (sig < 0) diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c index ae29e894ab8d..86eb4b04631c 100644 --- a/arch/mips/kernel/signal_n32.c +++ b/arch/mips/kernel/signal_n32.c @@ -93,11 +93,8 @@ asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) sigset_from_compat(&newset, &uset); sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); current->saved_sigmask = current->blocked; - current->blocked = newset; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&newset); current->state = TASK_INTERRUPTIBLE; schedule(); @@ -121,10 +118,7 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); sig = restore_sigcontext(®s, &frame->rs_uc.uc_mcontext); if (sig < 0) diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index ca673569fd24..3046e2986006 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -317,7 +317,7 @@ static int bmips_cpu_disable(void) pr_info("SMP: CPU%d is offline\n", cpu); - cpu_clear(cpu, cpu_online_map); + set_cpu_online(cpu, false); cpu_clear(cpu, cpu_callin_map); local_flush_tlb_all(); diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 9c1cce9de35f..71a95f55a649 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -148,7 +148,7 @@ static void stop_this_cpu(void *dummy) /* * Remove this CPU: */ - cpu_clear(smp_processor_id(), cpu_online_map); + set_cpu_online(smp_processor_id(), false); for (;;) { if (cpu_wait) (*cpu_wait)(); /* Wait if available. */ @@ -174,7 +174,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) mp_ops->prepare_cpus(max_cpus); set_cpu_sibling_map(0); #ifndef CONFIG_HOTPLUG_CPU - init_cpu_present(&cpu_possible_map); + init_cpu_present(cpu_possible_mask); #endif } @@ -186,61 +186,9 @@ void __devinit smp_prepare_boot_cpu(void) cpu_set(0, cpu_callin_map); } -/* - * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu - * and keep control until "cpu_online(cpu)" is set. Note: cpu is - * physical, not logical. - */ -static struct task_struct *cpu_idle_thread[NR_CPUS]; - -struct create_idle { - struct work_struct work; - struct task_struct *idle; - struct completion done; - int cpu; -}; - -static void __cpuinit do_fork_idle(struct work_struct *work) +int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) { - struct create_idle *c_idle = - container_of(work, struct create_idle, work); - - c_idle->idle = fork_idle(c_idle->cpu); - complete(&c_idle->done); -} - -int __cpuinit __cpu_up(unsigned int cpu) -{ - struct task_struct *idle; - - /* - * Processor goes to start_secondary(), sets online flag - * The following code is purely to make sure - * Linux can schedule processes on this slave. - */ - if (!cpu_idle_thread[cpu]) { - /* - * Schedule work item to avoid forking user task - * Ported from arch/x86/kernel/smpboot.c - */ - struct create_idle c_idle = { - .cpu = cpu, - .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), - }; - - INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); - schedule_work(&c_idle.work); - wait_for_completion(&c_idle.done); - idle = cpu_idle_thread[cpu] = c_idle.idle; - - if (IS_ERR(idle)) - panic(KERN_ERR "Fork failed for CPU %d", cpu); - } else { - idle = cpu_idle_thread[cpu]; - init_idle(idle, cpu); - } - - mp_ops->boot_secondary(cpu, idle); + mp_ops->boot_secondary(cpu, tidle); /* * Trust is futile. We should really have timeouts ... @@ -248,7 +196,7 @@ int __cpuinit __cpu_up(unsigned int cpu) while (!cpu_isset(cpu, cpu_callin_map)) udelay(100); - cpu_set(cpu, cpu_online_map); + set_cpu_online(cpu, true); return 0; } @@ -320,13 +268,12 @@ void flush_tlb_mm(struct mm_struct *mm) if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { smp_on_other_tlbs(flush_tlb_mm_ipi, mm); } else { - cpumask_t mask = cpu_online_map; unsigned int cpu; - cpu_clear(smp_processor_id(), mask); - for_each_cpu_mask(cpu, mask) - if (cpu_context(cpu, mm)) + for_each_online_cpu(cpu) { + if (cpu != smp_processor_id() && cpu_context(cpu, mm)) cpu_context(cpu, mm) = 0; + } } local_flush_tlb_mm(mm); @@ -360,13 +307,12 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l smp_on_other_tlbs(flush_tlb_range_ipi, &fd); } else { - cpumask_t mask = cpu_online_map; unsigned int cpu; - cpu_clear(smp_processor_id(), mask); - for_each_cpu_mask(cpu, mask) - if (cpu_context(cpu, mm)) + for_each_online_cpu(cpu) { + if (cpu != smp_processor_id() && cpu_context(cpu, mm)) cpu_context(cpu, mm) = 0; + } } local_flush_tlb_range(vma, start, end); preempt_enable(); @@ -407,13 +353,12 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) smp_on_other_tlbs(flush_tlb_page_ipi, &fd); } else { - cpumask_t mask = cpu_online_map; unsigned int cpu; - cpu_clear(smp_processor_id(), mask); - for_each_cpu_mask(cpu, mask) - if (cpu_context(cpu, vma->vm_mm)) + for_each_online_cpu(cpu) { + if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm)) cpu_context(cpu, vma->vm_mm) = 0; + } } local_flush_tlb_page(vma, page); preempt_enable(); diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index c4f75bbc0bd6..f5dd38f1d015 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c @@ -291,7 +291,7 @@ static void smtc_configure_tlb(void) * possibly leave some TCs/VPEs as "slave" processors. * * Use c0_MVPConf0 to find out how many TCs are available, setting up - * cpu_possible_map and the logical/physical mappings. + * cpu_possible_mask and the logical/physical mappings. */ int __init smtc_build_cpu_map(int start_cpu_slot) diff --git a/arch/mips/lantiq/xway/gpio.c b/arch/mips/lantiq/xway/gpio.c index d2fa98f3c78d..c429a5bc080f 100644 --- a/arch/mips/lantiq/xway/gpio.c +++ b/arch/mips/lantiq/xway/gpio.c @@ -188,7 +188,7 @@ int __init ltq_gpio_init(void) int ret = platform_driver_register(<q_gpio_driver); if (ret) - pr_info("ltq_gpio : Error registering platfom driver!"); + pr_info("ltq_gpio : Error registering platform driver!"); return ret; } diff --git a/arch/mips/lantiq/xway/gpio_ebu.c b/arch/mips/lantiq/xway/gpio_ebu.c index b91c7f17f10f..aae17170472f 100644 --- a/arch/mips/lantiq/xway/gpio_ebu.c +++ b/arch/mips/lantiq/xway/gpio_ebu.c @@ -119,7 +119,7 @@ static int __init ltq_ebu_init(void) int ret = platform_driver_register(<q_ebu_driver); if (ret) - pr_info("ltq_ebu : Error registering platfom driver!"); + pr_info("ltq_ebu : Error registering platform driver!"); return ret; } diff --git a/arch/mips/lantiq/xway/gpio_stp.c b/arch/mips/lantiq/xway/gpio_stp.c index ff9991cddeaa..fd07d87adaa9 100644 --- a/arch/mips/lantiq/xway/gpio_stp.c +++ b/arch/mips/lantiq/xway/gpio_stp.c @@ -150,7 +150,7 @@ int __init ltq_stp_init(void) int ret = platform_driver_register(<q_stp_driver); if (ret) - pr_info("ltq_stp: error registering platfom driver"); + pr_info("ltq_stp: error registering platform driver"); return ret; } diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c index 1f9ca07f53c8..47037ec5589b 100644 --- a/arch/mips/mm/c-octeon.c +++ b/arch/mips/mm/c-octeon.c @@ -80,9 +80,9 @@ static void octeon_flush_icache_all_cores(struct vm_area_struct *vma) if (vma) mask = *mm_cpumask(vma->vm_mm); else - mask = cpu_online_map; - cpu_clear(cpu, mask); - for_each_cpu_mask(cpu, mask) + mask = *cpu_online_mask; + cpumask_clear_cpu(cpu, &mask); + for_each_cpu(cpu, &mask) octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH); preempt_enable(); diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index 46084912e588..3fab2046c8a4 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -98,7 +98,7 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size, EXPORT_SYMBOL(dma_alloc_noncoherent); static void *mips_dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t * dma_handle, gfp_t gfp) + dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs) { void *ret; @@ -132,7 +132,7 @@ void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, EXPORT_SYMBOL(dma_free_noncoherent); static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle) + dma_addr_t dma_handle, struct dma_attrs *attrs) { unsigned long addr = (unsigned long) vaddr; int order = get_order(size); @@ -323,8 +323,8 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, EXPORT_SYMBOL(dma_cache_sync); static struct dma_map_ops mips_default_dma_map_ops = { - .alloc_coherent = mips_dma_alloc_coherent, - .free_coherent = mips_dma_free_coherent, + .alloc = mips_dma_alloc_coherent, + .free = mips_dma_free_coherent, .map_page = mips_dma_map_page, .unmap_page = mips_dma_unmap_page, .map_sg = mips_dma_map_sg, diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c index db17f49886c2..fab316de57e9 100644 --- a/arch/mips/netlogic/common/smp.c +++ b/arch/mips/netlogic/common/smp.c @@ -165,7 +165,7 @@ void __init nlm_smp_setup(void) cpu_set(boot_cpu, phys_cpu_present_map); __cpu_number_map[boot_cpu] = 0; __cpu_logical_map[0] = boot_cpu; - cpu_set(0, cpu_possible_map); + set_cpu_possible(0, true); num_cpus = 1; for (i = 0; i < NR_CPUS; i++) { @@ -177,14 +177,14 @@ void __init nlm_smp_setup(void) cpu_set(i, phys_cpu_present_map); __cpu_number_map[i] = num_cpus; __cpu_logical_map[num_cpus] = i; - cpu_set(num_cpus, cpu_possible_map); + set_cpu_possible(num_cpus, true); ++num_cpus; } } pr_info("Phys CPU present map: %lx, possible map %lx\n", (unsigned long)phys_cpu_present_map.bits[0], - (unsigned long)cpu_possible_map.bits[0]); + (unsigned long)cpumask_bits(cpu_possible_mask)[0]); pr_info("Detected %i Slave CPU(s)\n", num_cpus); nlm_set_nmi_handler(nlm_boot_secondary_cpus); diff --git a/arch/mips/pmc-sierra/yosemite/smp.c b/arch/mips/pmc-sierra/yosemite/smp.c index 2608752898c0..b71fae231049 100644 --- a/arch/mips/pmc-sierra/yosemite/smp.c +++ b/arch/mips/pmc-sierra/yosemite/smp.c @@ -146,7 +146,7 @@ static void __cpuinit yos_boot_secondary(int cpu, struct task_struct *idle) } /* - * Detect available CPUs, populate cpu_possible_map before smp_init + * Detect available CPUs, populate cpu_possible_mask before smp_init * * We don't want to start the secondary CPU yet nor do we have a nice probing * feature in PMON so we just assume presence of the secondary core. @@ -155,10 +155,10 @@ static void __init yos_smp_setup(void) { int i; - cpus_clear(cpu_possible_map); + init_cpu_possible(cpu_none_mask); for (i = 0; i < 2; i++) { - cpu_set(i, cpu_possible_map); + set_cpu_possible(i, true); __cpu_number_map[i] = i; __cpu_logical_map[i] = i; } @@ -169,7 +169,7 @@ static void __init yos_prepare_cpus(unsigned int max_cpus) /* * Be paranoid. Enable the IPI only if we're really about to go SMP. */ - if (cpus_weight(cpu_possible_map)) + if (num_possible_cpus()) set_c0_status(STATUSF_IP5); } diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c index c6851df9ab74..735b43bf8f82 100644 --- a/arch/mips/sgi-ip27/ip27-smp.c +++ b/arch/mips/sgi-ip27/ip27-smp.c @@ -76,7 +76,7 @@ static int do_cpumask(cnodeid_t cnode, nasid_t nasid, int highest) /* Only let it join in if it's marked enabled */ if ((acpu->cpu_info.flags & KLINFO_ENABLE) && (tot_cpus_found != NR_CPUS)) { - cpu_set(cpuid, cpu_possible_map); + set_cpu_possible(cpuid, true); alloc_cpupda(cpuid, tot_cpus_found); cpus_found++; tot_cpus_found++; diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c index d667875be564..de88e22694a0 100644 --- a/arch/mips/sibyte/bcm1480/smp.c +++ b/arch/mips/sibyte/bcm1480/smp.c @@ -138,7 +138,7 @@ static void __cpuinit bcm1480_boot_secondary(int cpu, struct task_struct *idle) /* * Use CFE to find out how many CPUs are available, setting up - * cpu_possible_map and the logical/physical mappings. + * cpu_possible_mask and the logical/physical mappings. * XXXKW will the boot CPU ever not be physical 0? * * Common setup before any secondaries are started @@ -147,14 +147,13 @@ static void __init bcm1480_smp_setup(void) { int i, num; - cpus_clear(cpu_possible_map); - cpu_set(0, cpu_possible_map); + init_cpu_possible(cpumask_of(0)); __cpu_number_map[0] = 0; __cpu_logical_map[0] = 0; for (i = 1, num = 0; i < NR_CPUS; i++) { if (cfe_cpu_stop(i) == 0) { - cpu_set(i, cpu_possible_map); + set_cpu_possible(i, true); __cpu_number_map[i] = ++num; __cpu_logical_map[num] = i; } diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c index 38e7f6bd7922..285cfef4ebc0 100644 --- a/arch/mips/sibyte/sb1250/smp.c +++ b/arch/mips/sibyte/sb1250/smp.c @@ -126,7 +126,7 @@ static void __cpuinit sb1250_boot_secondary(int cpu, struct task_struct *idle) /* * Use CFE to find out how many CPUs are available, setting up - * cpu_possible_map and the logical/physical mappings. + * cpu_possible_mask and the logical/physical mappings. * XXXKW will the boot CPU ever not be physical 0? * * Common setup before any secondaries are started @@ -135,14 +135,13 @@ static void __init sb1250_smp_setup(void) { int i, num; - cpus_clear(cpu_possible_map); - cpu_set(0, cpu_possible_map); + init_cpu_possible(cpumask_of(0)); __cpu_number_map[0] = 0; __cpu_logical_map[0] = 0; for (i = 1, num = 0; i < NR_CPUS; i++) { if (cfe_cpu_stop(i) == 0) { - cpu_set(i, cpu_possible_map); + set_cpu_possible(i, true); __cpu_number_map[i] = ++num; __cpu_logical_map[num] = i; } diff --git a/arch/mips/sni/pcimt.c b/arch/mips/sni/pcimt.c index ed3b3d317358..cdb1417fba59 100644 --- a/arch/mips/sni/pcimt.c +++ b/arch/mips/sni/pcimt.c @@ -29,7 +29,7 @@ static void __init sni_pcimt_sc_init(void) scsiz = cacheconf & 7; if (scsiz == 0) { - printk("Second level cache is deactived.\n"); + printk("Second level cache is deactivated.\n"); return; } if (scsiz >= 6) { |