summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/Kconfig2
-rw-r--r--arch/alpha/kernel/smp.c2
-rw-r--r--arch/arc/kernel/smp.c4
-rw-r--r--arch/arc/kernel/unwind.c4
-rw-r--r--arch/arm/boot/compressed/head.S2
-rw-r--r--arch/arm/include/asm/mach/flash.h2
-rw-r--r--arch/arm/kernel/smp.c2
-rw-r--r--arch/arm/mach-omap2/omap_twl.c2
-rw-r--r--arch/arm/mm/cache-v7.S2
-rw-r--r--arch/arm/mm/cache-v7m.S2
-rw-r--r--arch/arm64/kernel/smp.c2
-rw-r--r--arch/arm64/lib/copy_template.S2
-rw-r--r--arch/blackfin/mach-common/smp.c4
-rw-r--r--arch/cris/arch-v32/drivers/cryptocop.c2
-rw-r--r--arch/frv/mm/mmu-context.c2
-rw-r--r--arch/hexagon/kernel/smp.c2
-rw-r--r--arch/ia64/kernel/setup.c2
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_hwperf.c2
-rw-r--r--arch/m32r/kernel/setup.c2
-rw-r--r--arch/m68k/ifpsp060/src/isp.S2
-rw-r--r--arch/metag/kernel/smp.c4
-rw-r--r--arch/mips/kernel/traps.c2
-rw-r--r--arch/mn10300/kernel/smp.c2
-rw-r--r--arch/openrisc/kernel/entry.S6
-rw-r--r--arch/openrisc/kernel/head.S6
-rw-r--r--arch/openrisc/kernel/vmlinux.lds.S4
-rw-r--r--arch/parisc/kernel/smp.c2
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8569mds.dts2
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu.h2
-rw-r--r--arch/powerpc/include/asm/fsl_hcalls.h2
-rw-r--r--arch/powerpc/kernel/smp.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c2
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c2
-rw-r--r--arch/powerpc/xmon/ppc-opc.c2
-rw-r--r--arch/s390/kernel/processor.c2
-rw-r--r--arch/s390/kernel/vtime.c2
-rw-r--r--arch/score/kernel/traps.c2
-rw-r--r--arch/sh/kernel/irq.c2
-rw-r--r--arch/sh/kernel/smp.c4
-rw-r--r--arch/sparc/include/asm/switch_to_32.h2
-rw-r--r--arch/sparc/kernel/leon_smp.c2
-rw-r--r--arch/sparc/kernel/smp_64.c2
-rw-r--r--arch/sparc/kernel/sun4d_smp.c2
-rw-r--r--arch/sparc/kernel/sun4m_smp.c2
-rw-r--r--arch/sparc/kernel/traps_32.c2
-rw-r--r--arch/sparc/kernel/traps_64.c2
-rw-r--r--arch/sparc/kernel/visemul.c2
-rw-r--r--arch/tile/kernel/smpboot.c2
-rw-r--r--arch/x86/include/asm/desc_defs.h2
-rw-r--r--arch/x86/kernel/cpu/common.c4
-rw-r--r--arch/x86/kernel/ftrace.c2
-rw-r--r--arch/x86/kvm/mmu.c2
-rw-r--r--arch/xtensa/kernel/smp.c4
53 files changed, 65 insertions, 65 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 740b8ebfe58c..d49a8e6be5c3 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -29,7 +29,7 @@ config OPROFILE_EVENT_MULTIPLEX
The number of hardware counters is limited. The multiplexing
feature enables OProfile to gather more events than counters
are provided by the hardware. This is realized by switching
- between events at an user specified time interval.
+ between events at a user specified time interval.
If unsure, say N.
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 46bf263c3153..acb4b146a607 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -144,7 +144,7 @@ smp_callin(void)
alpha_mv.smp_callin();
/* All kernel threads share the same mm context. */
- atomic_inc(&init_mm.mm_count);
+ mmgrab(&init_mm);
current->active_mm = &init_mm;
/* inform the notifiers about the new cpu */
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index 2afbafadb6ab..b8e8d3944481 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -139,8 +139,8 @@ void start_kernel_secondary(void)
/* MMU, Caches, Vector Table, Interrupts etc */
setup_processor();
- atomic_inc(&mm->mm_users);
- atomic_inc(&mm->mm_count);
+ mmget(mm);
+ mmgrab(mm);
current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm));
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index 61fd1ce63c56..b6e4f7a7419b 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -1051,9 +1051,9 @@ int arc_unwind(struct unwind_frame_info *frame)
++ptr;
}
if (cie != NULL) {
- /* get code aligment factor */
+ /* get code alignment factor */
state.codeAlign = get_uleb128(&ptr, end);
- /* get data aligment factor */
+ /* get data alignment factor */
state.dataAlign = get_sleb128(&ptr, end);
if (state.codeAlign == 0 || state.dataAlign == 0 || ptr >= end)
cie = NULL;
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index fc6d541549a2..9150f9732785 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -1196,7 +1196,7 @@ skip:
bgt loop1
finished:
ldmfd sp!, {r0-r7, r9-r11}
- mov r10, #0 @ swith back to cache level 0
+ mov r10, #0 @ switch back to cache level 0
mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
iflush:
mcr p15, 0, r10, c7, c10, 4 @ DSB
diff --git a/arch/arm/include/asm/mach/flash.h b/arch/arm/include/asm/mach/flash.h
index 4ca69fe2c850..bada3f845a97 100644
--- a/arch/arm/include/asm/mach/flash.h
+++ b/arch/arm/include/asm/mach/flash.h
@@ -22,7 +22,7 @@ struct mtd_info;
* set_vpp: method called to enable or disable VPP
* mmcontrol: method called to enable or disable Sync. Burst Read in OneNAND
* parts: optional array of mtd_partitions for static partitioning
- * nr_parts: number of mtd_partitions for static partitoning
+ * nr_parts: number of mtd_partitions for static partitioning
*/
struct flash_platform_data {
const char *map_name;
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 46377c40d056..5a07c5a4b894 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -371,7 +371,7 @@ asmlinkage void secondary_start_kernel(void)
* reference and switch to it.
*/
cpu = smp_processor_id();
- atomic_inc(&mm->mm_count);
+ mmgrab(mm);
current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm));
diff --git a/arch/arm/mach-omap2/omap_twl.c b/arch/arm/mach-omap2/omap_twl.c
index 6bf626700557..1346b3ab34a5 100644
--- a/arch/arm/mach-omap2/omap_twl.c
+++ b/arch/arm/mach-omap2/omap_twl.c
@@ -1,5 +1,5 @@
/**
- * OMAP and TWL PMIC specific intializations.
+ * OMAP and TWL PMIC specific initializations.
*
* Copyright (C) 2010 Texas Instruments Incorporated.
* Thara Gopinath
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index a134d8a13d00..de78109d002d 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -164,7 +164,7 @@ skip:
cmp r3, r10
bgt flush_levels
finished:
- mov r10, #0 @ swith back to cache level 0
+ mov r10, #0 @ switch back to cache level 0
mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
dsb st
isb
diff --git a/arch/arm/mm/cache-v7m.S b/arch/arm/mm/cache-v7m.S
index 816a7e44e6f1..788486e830d3 100644
--- a/arch/arm/mm/cache-v7m.S
+++ b/arch/arm/mm/cache-v7m.S
@@ -217,7 +217,7 @@ skip:
cmp r3, r10
bgt flush_levels
finished:
- mov r10, #0 @ swith back to cache level 0
+ mov r10, #0 @ switch back to cache level 0
write_csselr r10, r3 @ select current cache level in cssr
dsb st
isb
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index a8ec5da530af..827d52d78b67 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -222,7 +222,7 @@ asmlinkage void secondary_start_kernel(void)
* All kernel threads share the same mm context; grab a
* reference and switch to it.
*/
- atomic_inc(&mm->mm_count);
+ mmgrab(mm);
current->active_mm = mm;
/*
diff --git a/arch/arm64/lib/copy_template.S b/arch/arm64/lib/copy_template.S
index 410fbdb8163f..f5b9210f1c83 100644
--- a/arch/arm64/lib/copy_template.S
+++ b/arch/arm64/lib/copy_template.S
@@ -62,7 +62,7 @@ D_h .req x14
sub count, count, tmp2
/*
* Copy the leading memory data from src to dst in an increasing
- * address order.By this way,the risk of overwritting the source
+ * address order.By this way,the risk of overwriting the source
* memory data is eliminated when the distance between src and
* dst is less than 16. The memory accesses here are alignment.
*/
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 23c4ef5f8bdc..a2e6db2ce811 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -307,8 +307,8 @@ void secondary_start_kernel(void)
local_irq_disable();
/* Attach the new idle task to the global mm. */
- atomic_inc(&mm->mm_users);
- atomic_inc(&mm->mm_count);
+ mmget(mm);
+ mmgrab(mm);
current->active_mm = mm;
preempt_disable();
diff --git a/arch/cris/arch-v32/drivers/cryptocop.c b/arch/cris/arch-v32/drivers/cryptocop.c
index ae6903d7fdbe..14970f11bbf2 100644
--- a/arch/cris/arch-v32/drivers/cryptocop.c
+++ b/arch/cris/arch-v32/drivers/cryptocop.c
@@ -2086,7 +2086,7 @@ static void cryptocop_job_queue_close(void)
dma_in_cfg.en = regk_dma_no;
REG_WR(dma, IN_DMA_INST, rw_cfg, dma_in_cfg);
- /* Disble the cryptocop. */
+ /* Disable the cryptocop. */
rw_cfg = REG_RD(strcop, regi_strcop, rw_cfg);
rw_cfg.en = 0;
REG_WR(strcop, regi_strcop, rw_cfg, rw_cfg);
diff --git a/arch/frv/mm/mmu-context.c b/arch/frv/mm/mmu-context.c
index 81757d55a5b5..3473bde77f56 100644
--- a/arch/frv/mm/mmu-context.c
+++ b/arch/frv/mm/mmu-context.c
@@ -188,7 +188,7 @@ int cxn_pin_by_pid(pid_t pid)
task_lock(tsk);
if (tsk->mm) {
mm = tsk->mm;
- atomic_inc(&mm->mm_users);
+ mmget(mm);
ret = 0;
}
task_unlock(tsk);
diff --git a/arch/hexagon/kernel/smp.c b/arch/hexagon/kernel/smp.c
index 983bae7d2665..c02a6455839e 100644
--- a/arch/hexagon/kernel/smp.c
+++ b/arch/hexagon/kernel/smp.c
@@ -162,7 +162,7 @@ void start_secondary(void)
);
/* Set the memory struct */
- atomic_inc(&init_mm.mm_count);
+ mmgrab(&init_mm);
current->active_mm = &init_mm;
cpu = smp_processor_id();
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index c483ece3eb84..d68322966f33 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -994,7 +994,7 @@ cpu_init (void)
*/
ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR
| IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
- atomic_inc(&init_mm.mm_count);
+ mmgrab(&init_mm);
current->active_mm = &init_mm;
BUG_ON(current->mm);
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
index 4c3b84d8406a..52704f199dd6 100644
--- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -525,7 +525,7 @@ static int sn_topology_show(struct seq_file *s, void *d)
/* both ends local to this partition */
seq_puts(s, " local");
else if (SN_HWPERF_FOREIGN(p))
- /* both ends of the link in foreign partiton */
+ /* both ends of the link in foreign partition */
seq_puts(s, " foreign");
else
/* link straddles a partition */
diff --git a/arch/m32r/kernel/setup.c b/arch/m32r/kernel/setup.c
index 136c69f1fb8a..b18bc0bd6544 100644
--- a/arch/m32r/kernel/setup.c
+++ b/arch/m32r/kernel/setup.c
@@ -403,7 +403,7 @@ void __init cpu_init (void)
printk(KERN_INFO "Initializing CPU#%d\n", cpu_id);
/* Set up and load the per-CPU TSS and LDT */
- atomic_inc(&init_mm.mm_count);
+ mmgrab(&init_mm);
current->active_mm = &init_mm;
if (current->mm)
BUG();
diff --git a/arch/m68k/ifpsp060/src/isp.S b/arch/m68k/ifpsp060/src/isp.S
index 6dccda766e22..b865c1a052ba 100644
--- a/arch/m68k/ifpsp060/src/isp.S
+++ b/arch/m68k/ifpsp060/src/isp.S
@@ -3814,7 +3814,7 @@ CAS2W2_FILLER:
# (3) Save current DFC/SFC (ASSUMED TO BE EQUAL!!!); Then set #
# SFC/DFC according to whether exception occurred in user or #
# supervisor mode. #
-# (4) Use "plpaw" instruction to pre-load ATC with efective #
+# (4) Use "plpaw" instruction to pre-load ATC with effective #
# address page(s). THIS SHOULD NOT FAULT!!! The relevant #
# page(s) should have been made resident prior to entering #
# this routine. #
diff --git a/arch/metag/kernel/smp.c b/arch/metag/kernel/smp.c
index bad13232de51..c622293254e4 100644
--- a/arch/metag/kernel/smp.c
+++ b/arch/metag/kernel/smp.c
@@ -344,8 +344,8 @@ asmlinkage void secondary_start_kernel(void)
* All kernel threads share the same mm context; grab a
* reference and switch to it.
*/
- atomic_inc(&mm->mm_users);
- atomic_inc(&mm->mm_count);
+ mmget(mm);
+ mmgrab(mm);
current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm));
enter_lazy_tlb(mm, current);
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index cb479be31a50..49c6df20672a 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -2232,7 +2232,7 @@ void per_cpu_trap_init(bool is_boot_cpu)
if (!cpu_data[cpu].asid_cache)
cpu_data[cpu].asid_cache = asid_first_version(cpu);
- atomic_inc(&init_mm.mm_count);
+ mmgrab(&init_mm);
current->active_mm = &init_mm;
BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current);
diff --git a/arch/mn10300/kernel/smp.c b/arch/mn10300/kernel/smp.c
index 426173c4b0b9..e65b5cc2fa67 100644
--- a/arch/mn10300/kernel/smp.c
+++ b/arch/mn10300/kernel/smp.c
@@ -589,7 +589,7 @@ static void __init smp_cpu_init(void)
}
printk(KERN_INFO "Initializing CPU#%d\n", cpu_id);
- atomic_inc(&init_mm.mm_count);
+ mmgrab(&init_mm);
current->active_mm = &init_mm;
BUG_ON(current->mm);
diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S
index bc6500860f4d..1b7160c79646 100644
--- a/arch/openrisc/kernel/entry.S
+++ b/arch/openrisc/kernel/entry.S
@@ -319,7 +319,7 @@ EXCEPTION_ENTRY(_timer_handler)
l.j _ret_from_intr
l.nop
-/* ---[ 0x600: Aligment exception ]-------------------------------------- */
+/* ---[ 0x600: Alignment exception ]-------------------------------------- */
EXCEPTION_ENTRY(_alignment_handler)
CLEAR_LWA_FLAG(r3)
@@ -331,8 +331,8 @@ EXCEPTION_ENTRY(_alignment_handler)
l.nop
#if 0
-EXCEPTION_ENTRY(_aligment_handler)
-// l.mfspr r2,r0,SPR_EEAR_BASE /* Load the efective addres */
+EXCEPTION_ENTRY(_alignment_handler)
+// l.mfspr r2,r0,SPR_EEAR_BASE /* Load the effective address */
l.addi r2,r4,0
// l.mfspr r5,r0,SPR_EPCR_BASE /* Load the insn address */
l.lwz r5,PT_PC(r1)
diff --git a/arch/openrisc/kernel/head.S b/arch/openrisc/kernel/head.S
index 175358e40474..1b7973da4928 100644
--- a/arch/openrisc/kernel/head.S
+++ b/arch/openrisc/kernel/head.S
@@ -325,7 +325,7 @@ _dispatch_do_ipage_fault:
.org 0x500
EXCEPTION_HANDLE(_timer_handler)
-/* ---[ 0x600: Aligment exception ]-------------------------------------- */
+/* ---[ 0x600: Alignment exception ]-------------------------------------- */
.org 0x600
EXCEPTION_HANDLE(_alignment_handler)
@@ -640,8 +640,8 @@ _flush_tlb:
/* ========================================[ cache ]=== */
- /* aligment here so we don't change memory offsets with
- * memory controler defined
+ /* alignment here so we don't change memory offsets with
+ * memory controller defined
*/
.align 0x2000
diff --git a/arch/openrisc/kernel/vmlinux.lds.S b/arch/openrisc/kernel/vmlinux.lds.S
index 552544616b9d..00ddb7804be4 100644
--- a/arch/openrisc/kernel/vmlinux.lds.S
+++ b/arch/openrisc/kernel/vmlinux.lds.S
@@ -19,8 +19,8 @@
/* TODO
* - clean up __offset & stuff
- * - change all 8192 aligment to PAGE !!!
- * - recheck if all aligments are really needed
+ * - change all 8192 alignment to PAGE !!!
+ * - recheck if all alignments are really needed
*/
# define LOAD_OFFSET PAGE_OFFSET
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 75dab2871346..67b452b41ff6 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -279,7 +279,7 @@ smp_cpu_init(int cpunum)
set_cpu_online(cpunum, true);
/* Initialise the idle task for this CPU */
- atomic_inc(&init_mm.mm_count);
+ mmgrab(&init_mm);
current->active_mm = &init_mm;
BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current);
diff --git a/arch/powerpc/boot/dts/fsl/mpc8569mds.dts b/arch/powerpc/boot/dts/fsl/mpc8569mds.dts
index 8e94448f296c..76b2bd6f7742 100644
--- a/arch/powerpc/boot/dts/fsl/mpc8569mds.dts
+++ b/arch/powerpc/boot/dts/fsl/mpc8569mds.dts
@@ -55,7 +55,7 @@
label = "kernel";
reg = <0x01c00000 0x002e0000>;
};
- partiton@1ee0000 {
+ partition@1ee0000 {
label = "dtb";
reg = <0x01ee0000 0x00020000>;
};
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index d73e9dfa5237..1145dc8e726d 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -30,7 +30,7 @@ extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
#ifndef __ASSEMBLY__
/*
- * ISA 3.0 partiton and process table entry format
+ * ISA 3.0 partition and process table entry format
*/
struct prtb_entry {
__be64 prtb0;
diff --git a/arch/powerpc/include/asm/fsl_hcalls.h b/arch/powerpc/include/asm/fsl_hcalls.h
index 3abb58394da4..b889d13547fd 100644
--- a/arch/powerpc/include/asm/fsl_hcalls.h
+++ b/arch/powerpc/include/asm/fsl_hcalls.h
@@ -109,7 +109,7 @@ static inline unsigned int fh_send_nmi(unsigned int vcpu_mask)
#define FH_DTPROP_MAX_PROPLEN 32768
/**
- * fh_partiton_get_dtprop - get a property from a guest device tree.
+ * fh_partition_get_dtprop - get a property from a guest device tree.
* @handle: handle of partition whose device tree is to be accessed
* @dtpath_addr: physical address of device tree path to access
* @propname_addr: physical address of name of property
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 893bd7f79be6..573fb3a461b5 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -707,7 +707,7 @@ void start_secondary(void *unused)
unsigned int cpu = smp_processor_id();
int i, base;
- atomic_inc(&init_mm.mm_count);
+ mmgrab(&init_mm);
current->active_mm = &init_mm;
smp_store_cpu_info(cpu);
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index ddfa069c5e7e..39e77e554cd7 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -3039,7 +3039,7 @@ static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe,
/*
* This function is supposed to be called on basis of PE from top
* to bottom style. So the the I/O or MMIO segment assigned to
- * parent PE could be overrided by its child PEs if necessary.
+ * parent PE could be overridden by its child PEs if necessary.
*/
static void pnv_ioda_setup_pe_seg(struct pnv_ioda_pe *pe)
{
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 0024e451bb36..4d757eaa46bf 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -1020,7 +1020,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
/* check largest block * page size > max memory hotplug addr */
max_addr = memory_hotplug_max();
if (query.largest_available_block < (max_addr >> page_shift)) {
- dev_dbg(&dev->dev, "can't map partiton max 0x%llx with %u "
+ dev_dbg(&dev->dev, "can't map partition max 0x%llx with %u "
"%llu-sized pages\n", max_addr, query.largest_available_block,
1ULL << page_shift);
goto out_failed;
diff --git a/arch/powerpc/xmon/ppc-opc.c b/arch/powerpc/xmon/ppc-opc.c
index 6845e91ba04a..954dbf8222d7 100644
--- a/arch/powerpc/xmon/ppc-opc.c
+++ b/arch/powerpc/xmon/ppc-opc.c
@@ -1587,7 +1587,7 @@ extract_tbr (unsigned long insn,
#define CTX(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x7))
#define CTX_MASK CTX(0x3f, 0x7)
-/* An User Context form instruction. */
+/* A User Context form instruction. */
#define UCTX(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x1f))
#define UCTX_MASK UCTX(0x3f, 0x1f)
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 8733b07b5691..24397b9b3779 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -73,7 +73,7 @@ void cpu_init(void)
get_cpu_id(id);
if (machine_has_cpu_mhz)
update_cpu_mhz(NULL);
- atomic_inc(&init_mm.mm_count);
+ mmgrab(&init_mm);
current->active_mm = &init_mm;
BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current);
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 0a9e5d67547d..87a2eac503fe 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -350,7 +350,7 @@ static void __add_vtimer(struct vtimer_list *timer, int periodic)
}
/*
- * add_virt_timer - add an oneshot virtual CPU timer
+ * add_virt_timer - add a oneshot virtual CPU timer
*/
void add_virt_timer(struct vtimer_list *timer)
{
diff --git a/arch/score/kernel/traps.c b/arch/score/kernel/traps.c
index 2b22bcf02c27..569ac02f68df 100644
--- a/arch/score/kernel/traps.c
+++ b/arch/score/kernel/traps.c
@@ -336,7 +336,7 @@ void __init trap_init(void)
set_except_vector(18, handle_dbe);
flush_icache_range(DEBUG_VECTOR_BASE_ADDR, IRQ_VECTOR_BASE_ADDR);
- atomic_inc(&init_mm.mm_count);
+ mmgrab(&init_mm);
current->active_mm = &init_mm;
cpu_cache_init();
}
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index bc3591125df7..04487e8fc9b1 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -99,7 +99,7 @@ static inline void handle_one_irq(unsigned int irq)
"mov %0, r4 \n"
"mov r15, r8 \n"
"jsr @%1 \n"
- /* swith to the irq stack */
+ /* switch to the irq stack */
" mov %2, r15 \n"
/* restore the stack (ring zero) */
"mov r8, r15 \n"
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 38e7860845db..edc4769b047e 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -178,8 +178,8 @@ asmlinkage void start_secondary(void)
struct mm_struct *mm = &init_mm;
enable_mmu();
- atomic_inc(&mm->mm_count);
- atomic_inc(&mm->mm_users);
+ mmgrab(mm);
+ mmget(mm);
current->active_mm = mm;
#ifdef CONFIG_MMU
enter_lazy_tlb(mm, current);
diff --git a/arch/sparc/include/asm/switch_to_32.h b/arch/sparc/include/asm/switch_to_32.h
index 16f10374feb3..475dd4158ae4 100644
--- a/arch/sparc/include/asm/switch_to_32.h
+++ b/arch/sparc/include/asm/switch_to_32.h
@@ -9,7 +9,7 @@ extern struct thread_info *current_set[NR_CPUS];
* Flush windows so that the VM switch which follows
* would not pull the stack from under us.
*
- * SWITCH_ENTER and SWITH_DO_LAZY_FPU do not work yet (e.g. SMP does not work)
+ * SWITCH_ENTER and SWITCH_DO_LAZY_FPU do not work yet (e.g. SMP does not work)
* XXX WTF is the above comment? Found in late teen 2.4.x.
*/
#ifdef CONFIG_SMP
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c
index 71e16f2241c2..b99d33797e1d 100644
--- a/arch/sparc/kernel/leon_smp.c
+++ b/arch/sparc/kernel/leon_smp.c
@@ -93,7 +93,7 @@ void leon_cpu_pre_online(void *arg)
: "memory" /* paranoid */);
/* Attach to the address space of init_task. */
- atomic_inc(&init_mm.mm_count);
+ mmgrab(&init_mm);
current->active_mm = &init_mm;
while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 0ce347f8e4cc..dcb12d9002e9 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -122,7 +122,7 @@ void smp_callin(void)
current_thread_info()->new_child = 0;
/* Attach to the address space of init_task. */
- atomic_inc(&init_mm.mm_count);
+ mmgrab(&init_mm);
current->active_mm = &init_mm;
/* inform the notifiers about the new cpu */
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index 9d98e5002a09..7b55c50eabe5 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -93,7 +93,7 @@ void sun4d_cpu_pre_online(void *arg)
show_leds(cpuid);
/* Attach to the address space of init_task. */
- atomic_inc(&init_mm.mm_count);
+ mmgrab(&init_mm);
current->active_mm = &init_mm;
local_ops->cache_all();
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index 278c40abce82..633c4cf6fdb0 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -59,7 +59,7 @@ void sun4m_cpu_pre_online(void *arg)
: "memory" /* paranoid */);
/* Attach to the address space of init_task. */
- atomic_inc(&init_mm.mm_count);
+ mmgrab(&init_mm);
current->active_mm = &init_mm;
while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
index 4f21df7d4f13..ecddac5a4c96 100644
--- a/arch/sparc/kernel/traps_32.c
+++ b/arch/sparc/kernel/traps_32.c
@@ -448,7 +448,7 @@ void trap_init(void)
thread_info_offsets_are_bolixed_pete();
/* Attach to the address space of init_task. */
- atomic_inc(&init_mm.mm_count);
+ mmgrab(&init_mm);
current->active_mm = &init_mm;
/* NOTE: Other cpus have this done as they are started
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index dfc97a47c9a0..e022d7b00390 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2837,6 +2837,6 @@ void __init trap_init(void)
/* Attach to the address space of init_task. On SMP we
* do this in smp.c:smp_callin for other cpus.
*/
- atomic_inc(&init_mm.mm_count);
+ mmgrab(&init_mm);
current->active_mm = &init_mm;
}
diff --git a/arch/sparc/kernel/visemul.c b/arch/sparc/kernel/visemul.c
index c4ac58e483a4..8f35eea2103a 100644
--- a/arch/sparc/kernel/visemul.c
+++ b/arch/sparc/kernel/visemul.c
@@ -30,7 +30,7 @@
/* 001001011 - two 32-bit merges */
#define FPMERGE_OPF 0x04b
-/* 000110001 - 8-by-16-bit partitoned product */
+/* 000110001 - 8-by-16-bit partitioned product */
#define FMUL8x16_OPF 0x031
/* 000110011 - 8-by-16-bit upper alpha partitioned product */
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c
index 6c0abaacec33..53ce940a5016 100644
--- a/arch/tile/kernel/smpboot.c
+++ b/arch/tile/kernel/smpboot.c
@@ -160,7 +160,7 @@ static void start_secondary(void)
__this_cpu_write(current_asid, min_asid);
/* Set up this thread as another owner of the init_mm */
- atomic_inc(&init_mm.mm_count);
+ mmgrab(&init_mm);
current->active_mm = &init_mm;
if (current->mm)
BUG();
diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
index eb5deb42484d..49265345d4d2 100644
--- a/arch/x86/include/asm/desc_defs.h
+++ b/arch/x86/include/asm/desc_defs.h
@@ -15,7 +15,7 @@
* FIXME: Accessing the desc_struct through its fields is more elegant,
* and should be the one valid thing to do. However, a lot of open code
* still touches the a and b accessors, and doing this allow us to do it
- * incrementally. We keep the signature as a struct, rather than an union,
+ * incrementally. We keep the signature as a struct, rather than a union,
* so we can get rid of it transparently in the future -- glommer
*/
/* 8 byte segment descriptor */
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index f74e84ea8557..c896ed99109e 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1506,7 +1506,7 @@ void cpu_init(void)
for (i = 0; i <= IO_BITMAP_LONGS; i++)
t->io_bitmap[i] = ~0UL;
- atomic_inc(&init_mm.mm_count);
+ mmgrab(&init_mm);
me->active_mm = &init_mm;
BUG_ON(me->mm);
enter_lazy_tlb(&init_mm, me);
@@ -1557,7 +1557,7 @@ void cpu_init(void)
/*
* Set up and load the per-CPU TSS and LDT
*/
- atomic_inc(&init_mm.mm_count);
+ mmgrab(&init_mm);
curr->active_mm = &init_mm;
BUG_ON(curr->mm);
enter_lazy_tlb(&init_mm, curr);
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 8639bb2ae058..8f3d9cf26ff9 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -535,7 +535,7 @@ static void run_sync(void)
{
int enable_irqs = irqs_disabled();
- /* We may be called with interrupts disbled (on bootup). */
+ /* We may be called with interrupts disabled (on bootup). */
if (enable_irqs)
local_irq_enable();
on_each_cpu(do_sync_core, NULL, 1);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 64821ca3a7c3..46bbbfeaa04d 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4090,7 +4090,7 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu,
* as a SMAP violation if all of the following
* conditions are ture:
* - X86_CR4_SMAP is set in CR4
- * - An user page is accessed
+ * - A user page is accessed
* - Page fault in kernel mode
* - if CPL = 3 or X86_EFLAGS_AC is clear
*
diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
index fc4ad21a5ed4..fcea72019df7 100644
--- a/arch/xtensa/kernel/smp.c
+++ b/arch/xtensa/kernel/smp.c
@@ -135,8 +135,8 @@ void secondary_start_kernel(void)
/* All kernel threads share the same mm context. */
- atomic_inc(&mm->mm_users);
- atomic_inc(&mm->mm_count);
+ mmget(mm);
+ mmgrab(mm);
current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm));
enter_lazy_tlb(mm, current);