summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-02-23 17:55:40 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2023-02-23 17:55:40 -0800
commitd2980d8d826554fa6981d621e569a453787472f8 (patch)
treed75ddea276ae8bf42ecf528f9862714a8bccf8f4 /arch
parent3822a7c40997dc86b1458766a3f146d62393f084 (diff)
parent817013880a6883f7ab08030d1f8cfef5f07ba467 (diff)
Merge tag 'mm-nonmm-stable-2023-02-20-15-29' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull non-MM updates from Andrew Morton: "There is no particular theme here - mainly quick hits all over the tree. Most notable is a set of zlib changes from Mikhail Zaslonko which enhances and fixes zlib's use of S390 hardware support: 'lib/zlib: Set of s390 DFLTCC related patches for kernel zlib'" * tag 'mm-nonmm-stable-2023-02-20-15-29' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (55 commits) Update CREDITS file entry for Jesper Juhl sparc: allow PM configs for sparc32 COMPILE_TEST hung_task: print message when hung_task_warnings gets down to zero. arch/Kconfig: fix indentation scripts/tags.sh: fix the Kconfig tags generation when using latest ctags nilfs2: prevent WARNING in nilfs_dat_commit_end() lib/zlib: remove redundation assignement of avail_in dfltcc_gdht() lib/Kconfig.debug: do not enable DEBUG_PREEMPT by default lib/zlib: DFLTCC always switch to software inflate for Z_PACKET_FLUSH option lib/zlib: DFLTCC support inflate with small window lib/zlib: Split deflate and inflate states for DFLTCC lib/zlib: DFLTCC not writing header bits when avail_out == 0 lib/zlib: fix DFLTCC ignoring flush modes when avail_in == 0 lib/zlib: fix DFLTCC not flushing EOBS when creating raw streams lib/zlib: implement switching between DFLTCC and software lib/zlib: adjust offset calculation for dfltcc_state nilfs2: replace WARN_ONs for invalid DAT metadata block requests scripts/spelling.txt: add "exsits" pattern and fix typo instances fs: gracefully handle ->get_block not mapping bh in __mpage_writepage cramfs: Kconfig: fix spelling & punctuation ...
Diffstat (limited to 'arch')
-rw-r--r--arch/Kconfig128
-rw-r--r--arch/alpha/kernel/process.c2
-rw-r--r--arch/alpha/kernel/smp.c4
-rw-r--r--arch/sparc/Kconfig2
-rw-r--r--arch/x86/kvm/emulate.c8
5 files changed, 72 insertions, 72 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 12e3ddabac9d..e3511afbb7f2 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -35,7 +35,7 @@ config HOTPLUG_SMT
bool
config GENERIC_ENTRY
- bool
+ bool
config KPROBES
bool "Kprobes"
@@ -55,26 +55,26 @@ config JUMP_LABEL
depends on HAVE_ARCH_JUMP_LABEL
select OBJTOOL if HAVE_JUMP_LABEL_HACK
help
- This option enables a transparent branch optimization that
- makes certain almost-always-true or almost-always-false branch
- conditions even cheaper to execute within the kernel.
+ This option enables a transparent branch optimization that
+ makes certain almost-always-true or almost-always-false branch
+ conditions even cheaper to execute within the kernel.
- Certain performance-sensitive kernel code, such as trace points,
- scheduler functionality, networking code and KVM have such
- branches and include support for this optimization technique.
+ Certain performance-sensitive kernel code, such as trace points,
+ scheduler functionality, networking code and KVM have such
+ branches and include support for this optimization technique.
- If it is detected that the compiler has support for "asm goto",
- the kernel will compile such branches with just a nop
- instruction. When the condition flag is toggled to true, the
- nop will be converted to a jump instruction to execute the
- conditional block of instructions.
+ If it is detected that the compiler has support for "asm goto",
+ the kernel will compile such branches with just a nop
+ instruction. When the condition flag is toggled to true, the
+ nop will be converted to a jump instruction to execute the
+ conditional block of instructions.
- This technique lowers overhead and stress on the branch prediction
- of the processor and generally makes the kernel faster. The update
- of the condition is slower, but those are always very rare.
+ This technique lowers overhead and stress on the branch prediction
+ of the processor and generally makes the kernel faster. The update
+ of the condition is slower, but those are always very rare.
- ( On 32-bit x86, the necessary options added to the compiler
- flags may increase the size of the kernel slightly. )
+ ( On 32-bit x86, the necessary options added to the compiler
+ flags may increase the size of the kernel slightly. )
config STATIC_KEYS_SELFTEST
bool "Static key selftest"
@@ -98,9 +98,9 @@ config KPROBES_ON_FTRACE
depends on KPROBES && HAVE_KPROBES_ON_FTRACE
depends on DYNAMIC_FTRACE_WITH_REGS
help
- If function tracer is enabled and the arch supports full
- passing of pt_regs to function tracing, then kprobes can
- optimize on top of function tracing.
+ If function tracer is enabled and the arch supports full
+ passing of pt_regs to function tracing, then kprobes can
+ optimize on top of function tracing.
config UPROBES
def_bool n
@@ -154,21 +154,21 @@ config HAVE_EFFICIENT_UNALIGNED_ACCESS
config ARCH_USE_BUILTIN_BSWAP
bool
help
- Modern versions of GCC (since 4.4) have builtin functions
- for handling byte-swapping. Using these, instead of the old
- inline assembler that the architecture code provides in the
- __arch_bswapXX() macros, allows the compiler to see what's
- happening and offers more opportunity for optimisation. In
- particular, the compiler will be able to combine the byteswap
- with a nearby load or store and use load-and-swap or
- store-and-swap instructions if the architecture has them. It
- should almost *never* result in code which is worse than the
- hand-coded assembler in <asm/swab.h>. But just in case it
- does, the use of the builtins is optional.
+ Modern versions of GCC (since 4.4) have builtin functions
+ for handling byte-swapping. Using these, instead of the old
+ inline assembler that the architecture code provides in the
+ __arch_bswapXX() macros, allows the compiler to see what's
+ happening and offers more opportunity for optimisation. In
+ particular, the compiler will be able to combine the byteswap
+ with a nearby load or store and use load-and-swap or
+ store-and-swap instructions if the architecture has them. It
+ should almost *never* result in code which is worse than the
+ hand-coded assembler in <asm/swab.h>. But just in case it
+ does, the use of the builtins is optional.
- Any architecture with load-and-swap or store-and-swap
- instructions should set this. And it shouldn't hurt to set it
- on architectures that don't have such instructions.
+ Any architecture with load-and-swap or store-and-swap
+ instructions should set this. And it shouldn't hurt to set it
+ on architectures that don't have such instructions.
config KRETPROBES
def_bool y
@@ -720,13 +720,13 @@ config LTO_CLANG_FULL
depends on !COMPILE_TEST
select LTO_CLANG
help
- This option enables Clang's full Link Time Optimization (LTO), which
- allows the compiler to optimize the kernel globally. If you enable
- this option, the compiler generates LLVM bitcode instead of ELF
- object files, and the actual compilation from bitcode happens at
- the LTO link step, which may take several minutes depending on the
- kernel configuration. More information can be found from LLVM's
- documentation:
+ This option enables Clang's full Link Time Optimization (LTO), which
+ allows the compiler to optimize the kernel globally. If you enable
+ this option, the compiler generates LLVM bitcode instead of ELF
+ object files, and the actual compilation from bitcode happens at
+ the LTO link step, which may take several minutes depending on the
+ kernel configuration. More information can be found from LLVM's
+ documentation:
https://llvm.org/docs/LinkTimeOptimization.html
@@ -1330,9 +1330,9 @@ config ARCH_HAS_CC_PLATFORM
bool
config HAVE_SPARSE_SYSCALL_NR
- bool
- help
- An architecture should select this if its syscall numbering is sparse
+ bool
+ help
+ An architecture should select this if its syscall numbering is sparse
to save space. For example, MIPS architecture has a syscall array with
entries at 4000, 5000 and 6000 locations. This option turns on syscall
related optimizations for a given architecture.
@@ -1356,35 +1356,35 @@ config HAVE_PREEMPT_DYNAMIC_CALL
depends on HAVE_STATIC_CALL
select HAVE_PREEMPT_DYNAMIC
help
- An architecture should select this if it can handle the preemption
- model being selected at boot time using static calls.
+ An architecture should select this if it can handle the preemption
+ model being selected at boot time using static calls.
- Where an architecture selects HAVE_STATIC_CALL_INLINE, any call to a
- preemption function will be patched directly.
+ Where an architecture selects HAVE_STATIC_CALL_INLINE, any call to a
+ preemption function will be patched directly.
- Where an architecture does not select HAVE_STATIC_CALL_INLINE, any
- call to a preemption function will go through a trampoline, and the
- trampoline will be patched.
+ Where an architecture does not select HAVE_STATIC_CALL_INLINE, any
+ call to a preemption function will go through a trampoline, and the
+ trampoline will be patched.
- It is strongly advised to support inline static call to avoid any
- overhead.
+ It is strongly advised to support inline static call to avoid any
+ overhead.
config HAVE_PREEMPT_DYNAMIC_KEY
bool
depends on HAVE_ARCH_JUMP_LABEL
select HAVE_PREEMPT_DYNAMIC
help
- An architecture should select this if it can handle the preemption
- model being selected at boot time using static keys.
+ An architecture should select this if it can handle the preemption
+ model being selected at boot time using static keys.
- Each preemption function will be given an early return based on a
- static key. This should have slightly lower overhead than non-inline
- static calls, as this effectively inlines each trampoline into the
- start of its callee. This may avoid redundant work, and may
- integrate better with CFI schemes.
+ Each preemption function will be given an early return based on a
+ static key. This should have slightly lower overhead than non-inline
+ static calls, as this effectively inlines each trampoline into the
+ start of its callee. This may avoid redundant work, and may
+ integrate better with CFI schemes.
- This will have greater overhead than using inline static calls as
- the call to the preemption function cannot be entirely elided.
+ This will have greater overhead than using inline static calls as
+ the call to the preemption function cannot be entirely elided.
config ARCH_WANT_LD_ORPHAN_WARN
bool
@@ -1407,8 +1407,8 @@ config ARCH_SUPPORTS_PAGE_TABLE_CHECK
config ARCH_SPLIT_ARG64
bool
help
- If a 32-bit architecture requires 64-bit arguments to be split into
- pairs of 32-bit arguments, select this option.
+ If a 32-bit architecture requires 64-bit arguments to be split into
+ pairs of 32-bit arguments, select this option.
config ARCH_HAS_ELFCORE_COMPAT
bool
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index ce20c31828a0..0eddd22c6212 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -73,7 +73,7 @@ struct halt_info {
static void
common_shutdown_1(void *generic_ptr)
{
- struct halt_info *how = (struct halt_info *)generic_ptr;
+ struct halt_info *how = generic_ptr;
struct percpu_struct *cpup;
unsigned long *pflags, flags;
int cpuid = smp_processor_id();
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index f4e20f75438f..0ede4b044e86 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -628,7 +628,7 @@ flush_tlb_all(void)
static void
ipi_flush_tlb_mm(void *x)
{
- struct mm_struct *mm = (struct mm_struct *) x;
+ struct mm_struct *mm = x;
if (mm == current->active_mm && !asn_locked())
flush_tlb_current(mm);
else
@@ -670,7 +670,7 @@ struct flush_tlb_page_struct {
static void
ipi_flush_tlb_page(void *x)
{
- struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
+ struct flush_tlb_page_struct *data = x;
struct mm_struct * mm = data->mm;
if (mm == current->active_mm && !asn_locked())
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 4d3d1af90d52..84437a4c6545 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -283,7 +283,7 @@ config ARCH_FORCE_MAX_ORDER
This config option is actually maximum order plus one. For example,
a value of 13 means that the largest free memory block is 2^12 pages.
-if SPARC64
+if SPARC64 || COMPILE_TEST
source "kernel/power/Kconfig"
endif
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 5cc3efa0e21c..56e1cf7c339e 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2615,8 +2615,8 @@ static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
return true;
}
-static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
- u16 port, u16 len)
+static bool emulator_io_permitted(struct x86_emulate_ctxt *ctxt,
+ u16 port, u16 len)
{
if (ctxt->perm_ok)
return true;
@@ -3961,7 +3961,7 @@ static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
static int check_perm_in(struct x86_emulate_ctxt *ctxt)
{
ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
- if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
+ if (!emulator_io_permitted(ctxt, ctxt->src.val, ctxt->dst.bytes))
return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;
@@ -3970,7 +3970,7 @@ static int check_perm_in(struct x86_emulate_ctxt *ctxt)
static int check_perm_out(struct x86_emulate_ctxt *ctxt)
{
ctxt->src.bytes = min(ctxt->src.bytes, 4u);
- if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
+ if (!emulator_io_permitted(ctxt, ctxt->dst.val, ctxt->src.bytes))
return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;