summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/boot/dts/armada-38x.dtsi3
-rw-r--r--arch/arm/boot/dts/imx6qdl-icore.dtsi3
-rw-r--r--arch/arm/boot/dts/imx6sx-sabreauto.dts2
-rw-r--r--arch/arm/boot/dts/imx6sx-sdb.dtsi2
-rw-r--r--arch/arm/boot/dts/keystone-k2g-evm.dts2
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi2
-rw-r--r--arch/arm/boot/dts/sun5i.dtsi2
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi2
-rw-r--r--arch/arm/include/asm/percpu.h2
-rw-r--r--arch/arm/kernel/hw_breakpoint.c27
-rw-r--r--arch/arm/kernel/vdso.c1
-rw-r--r--arch/arm/mm/mmu.c2
-rw-r--r--arch/arm64/Makefile2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi1
-rw-r--r--arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts5
-rw-r--r--arch/arm64/include/asm/alternative.h4
-rw-r--r--arch/arm64/include/asm/checksum.h5
-rw-r--r--arch/arm64/include/asm/kvm_host.h11
-rw-r--r--arch/arm64/include/asm/smp.h1
-rw-r--r--arch/arm64/kernel/vdso32/Makefile2
-rw-r--r--arch/arm64/kvm/mmu.c11
-rw-r--r--arch/parisc/include/asm/atomic.h2
-rw-r--r--arch/parisc/include/asm/cmpxchg.h2
-rw-r--r--arch/parisc/lib/bitops.c12
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S14
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c25
-rw-r--r--arch/powerpc/perf/core-book3s.c6
-rw-r--r--arch/riscv/mm/init.c66
-rw-r--r--arch/riscv/mm/kasan_init.c4
-rw-r--r--arch/s390/kernel/perf_cpum_cf_events.c4
-rw-r--r--arch/sh/include/asm/pgalloc.h10
-rw-r--r--arch/sh/kernel/entry-common.S6
-rw-r--r--arch/x86/include/asm/iosf_mbi.h1
-rw-r--r--arch/x86/kernel/dumpstack.c27
-rw-r--r--arch/x86/kernel/fpu/xstate.c2
-rw-r--r--arch/x86/kernel/i8259.c2
-rw-r--r--arch/x86/kernel/stacktrace.c5
-rw-r--r--arch/x86/kernel/unwind_orc.c8
-rw-r--r--arch/x86/kernel/vmlinux.lds.S1
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/svm/svm.c9
-rw-r--r--arch/x86/kvm/vmx/nested.c16
-rw-r--r--arch/x86/kvm/vmx/nested.h5
-rw-r--r--arch/xtensa/include/asm/checksum.h2
44 files changed, 223 insertions, 100 deletions
diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi
index 348116501aa2..9b1a24cc5e91 100644
--- a/arch/arm/boot/dts/armada-38x.dtsi
+++ b/arch/arm/boot/dts/armada-38x.dtsi
@@ -342,7 +342,8 @@
comphy: phy@18300 {
compatible = "marvell,armada-380-comphy";
- reg = <0x18300 0x100>;
+ reg-names = "comphy", "conf";
+ reg = <0x18300 0x100>, <0x18460 4>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/imx6qdl-icore.dtsi b/arch/arm/boot/dts/imx6qdl-icore.dtsi
index 756f3a9f1b4f..12997dae35d9 100644
--- a/arch/arm/boot/dts/imx6qdl-icore.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-icore.dtsi
@@ -397,7 +397,7 @@
pinctrl_usbotg: usbotggrp {
fsl,pins = <
- MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
+ MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x17059
>;
};
@@ -409,6 +409,7 @@
MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x17070
MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x17070
MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x17070
+ MX6QDL_PAD_GPIO_1__GPIO1_IO01 0x1b0b0
>;
};
diff --git a/arch/arm/boot/dts/imx6sx-sabreauto.dts b/arch/arm/boot/dts/imx6sx-sabreauto.dts
index 825924448ab4..14fd1de52a68 100644
--- a/arch/arm/boot/dts/imx6sx-sabreauto.dts
+++ b/arch/arm/boot/dts/imx6sx-sabreauto.dts
@@ -99,7 +99,7 @@
&fec2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet2>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-handle = <&ethphy0>;
fsl,magic-packet;
status = "okay";
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dtsi b/arch/arm/boot/dts/imx6sx-sdb.dtsi
index 3e5fb72f21fc..c99aa273c296 100644
--- a/arch/arm/boot/dts/imx6sx-sdb.dtsi
+++ b/arch/arm/boot/dts/imx6sx-sdb.dtsi
@@ -213,7 +213,7 @@
&fec2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet2>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-handle = <&ethphy2>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/keystone-k2g-evm.dts b/arch/arm/boot/dts/keystone-k2g-evm.dts
index db640bab8c1d..8b3d64c913d8 100644
--- a/arch/arm/boot/dts/keystone-k2g-evm.dts
+++ b/arch/arm/boot/dts/keystone-k2g-evm.dts
@@ -402,7 +402,7 @@
&gbe0 {
phy-handle = <&ethphy0>;
- phy-mode = "rgmii-id";
+ phy-mode = "rgmii-rxid";
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index bf531efc0610..0f95a6ef8543 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -198,7 +198,7 @@
default-pool {
compatible = "shared-dma-pool";
size = <0x6000000>;
- alloc-ranges = <0x4a000000 0x6000000>;
+ alloc-ranges = <0x40000000 0x10000000>;
reusable;
linux,cma-default;
};
diff --git a/arch/arm/boot/dts/sun5i.dtsi b/arch/arm/boot/dts/sun5i.dtsi
index e6b036734a64..c2b4fbf552a3 100644
--- a/arch/arm/boot/dts/sun5i.dtsi
+++ b/arch/arm/boot/dts/sun5i.dtsi
@@ -117,7 +117,7 @@
default-pool {
compatible = "shared-dma-pool";
size = <0x6000000>;
- alloc-ranges = <0x4a000000 0x6000000>;
+ alloc-ranges = <0x40000000 0x10000000>;
reusable;
linux,cma-default;
};
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index ffe1d10a1a84..6d6a37940db2 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -181,7 +181,7 @@
default-pool {
compatible = "shared-dma-pool";
size = <0x6000000>;
- alloc-ranges = <0x4a000000 0x6000000>;
+ alloc-ranges = <0x40000000 0x10000000>;
reusable;
linux,cma-default;
};
diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h
index f44f448537f2..1a3eedbac4a2 100644
--- a/arch/arm/include/asm/percpu.h
+++ b/arch/arm/include/asm/percpu.h
@@ -5,6 +5,8 @@
#ifndef _ASM_ARM_PERCPU_H_
#define _ASM_ARM_PERCPU_H_
+#include <asm/thread_info.h>
+
/*
* Same as asm-generic/percpu.h, except that we store the per cpu offset
* in the TPIDRPRW. TPIDRPRW only exists on V6K and V7
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 02ca7adf5375..7fff88e61252 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -683,6 +683,12 @@ static void disable_single_step(struct perf_event *bp)
arch_install_hw_breakpoint(bp);
}
+static int watchpoint_fault_on_uaccess(struct pt_regs *regs,
+ struct arch_hw_breakpoint *info)
+{
+ return !user_mode(regs) && info->ctrl.privilege == ARM_BREAKPOINT_USER;
+}
+
static void watchpoint_handler(unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
{
@@ -742,16 +748,27 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
}
pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
+
+ /*
+ * If we triggered a user watchpoint from a uaccess routine,
+ * then handle the stepping ourselves since userspace really
+ * can't help us with this.
+ */
+ if (watchpoint_fault_on_uaccess(regs, info))
+ goto step;
+
perf_bp_event(wp, regs);
/*
- * If no overflow handler is present, insert a temporary
- * mismatch breakpoint so we can single-step over the
- * watchpoint trigger.
+ * Defer stepping to the overflow handler if one is installed.
+ * Otherwise, insert a temporary mismatch breakpoint so that
+ * we can single-step over the watchpoint trigger.
*/
- if (is_default_overflow_handler(wp))
- enable_single_step(wp, instruction_pointer(regs));
+ if (!is_default_overflow_handler(wp))
+ goto unlock;
+step:
+ enable_single_step(wp, instruction_pointer(regs));
unlock:
rcu_read_unlock();
}
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
index 6bfdca4769a7..fddd08a6e063 100644
--- a/arch/arm/kernel/vdso.c
+++ b/arch/arm/kernel/vdso.c
@@ -184,6 +184,7 @@ static void __init patch_vdso(void *ehdr)
if (!cntvct_ok) {
vdso_nullpatch_one(&einfo, "__vdso_gettimeofday");
vdso_nullpatch_one(&einfo, "__vdso_clock_gettime");
+ vdso_nullpatch_one(&einfo, "__vdso_clock_gettime64");
}
}
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 628028bfbb92..bcd82614c25d 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -966,7 +966,7 @@ void __init create_mapping_late(struct mm_struct *mm, struct map_desc *md,
pud_t *pud;
p4d = p4d_alloc(mm, pgd_offset(mm, md->virtual), md->virtual);
- if (!WARN_ON(!p4d))
+ if (WARN_ON(!p4d))
return;
pud = pud_alloc(mm, p4d, md->virtual);
if (WARN_ON(!pud))
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index a0d94d063fa8..70f5905954dd 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -137,7 +137,7 @@ export TEXT_OFFSET
core-y += arch/arm64/
libs-y := arch/arm64/lib/ $(libs-y)
-core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
+libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
# Default target when executing plain make
boot := arch/arm64/boot
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
index 78b1361dfbb9..9ce78a7b117d 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
@@ -161,6 +161,7 @@
resets = <&ccu RST_BUS_VE>;
interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
allwinner,sram = <&ve_sram 1>;
+ iommus = <&iommu 3>;
};
gpu: gpu@1800000 {
diff --git a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
index c8243da71041..eb01cc96ba7a 100644
--- a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
+++ b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
@@ -454,10 +454,7 @@
status = "okay";
phy-mode = "2500base-x";
phys = <&cp1_comphy5 2>;
- fixed-link {
- speed = <2500>;
- full-duplex;
- };
+ managed = "in-band-status";
};
&cp1_spi1 {
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index 12f0eb56a1cc..619db9b4c9d5 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -77,9 +77,9 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
"663:\n\t" \
newinstr "\n" \
"664:\n\t" \
- ".previous\n\t" \
".org . - (664b-663b) + (662b-661b)\n\t" \
- ".org . - (662b-661b) + (664b-663b)\n" \
+ ".org . - (662b-661b) + (664b-663b)\n\t" \
+ ".previous\n" \
".endif\n"
#define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \
diff --git a/arch/arm64/include/asm/checksum.h b/arch/arm64/include/asm/checksum.h
index b6f7bc6da5fb..93a161b3bf3f 100644
--- a/arch/arm64/include/asm/checksum.h
+++ b/arch/arm64/include/asm/checksum.h
@@ -24,16 +24,17 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
__uint128_t tmp;
u64 sum;
+ int n = ihl; /* we want it signed */
tmp = *(const __uint128_t *)iph;
iph += 16;
- ihl -= 4;
+ n -= 4;
tmp += ((tmp >> 64) | (tmp << 64));
sum = tmp >> 64;
do {
sum += *(const u32 *)iph;
iph += 4;
- } while (--ihl);
+ } while (--n > 0);
sum += ((sum >> 32) | (sum << 32));
return csum_fold((__force u32)(sum >> 32));
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index c3e6fcc664b1..e21d4a01372f 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -380,9 +380,14 @@ struct kvm_vcpu_arch {
#define vcpu_has_sve(vcpu) (system_supports_sve() && \
((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
-#define vcpu_has_ptrauth(vcpu) ((system_supports_address_auth() || \
- system_supports_generic_auth()) && \
- ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH))
+#ifdef CONFIG_ARM64_PTR_AUTH
+#define vcpu_has_ptrauth(vcpu) \
+ ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \
+ cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \
+ (vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)
+#else
+#define vcpu_has_ptrauth(vcpu) false
+#endif
#define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs)
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index ea268d88b6f7..a0c8a0b65259 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -30,7 +30,6 @@
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/thread_info.h>
-#include <asm/pointer_auth.h>
DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
index d88148bef6b0..5139a5f19256 100644
--- a/arch/arm64/kernel/vdso32/Makefile
+++ b/arch/arm64/kernel/vdso32/Makefile
@@ -14,7 +14,7 @@ COMPAT_GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE_COMPAT)elfedit))
COMPAT_GCC_TOOLCHAIN := $(realpath $(COMPAT_GCC_TOOLCHAIN_DIR)/..)
CC_COMPAT_CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE_COMPAT:%-=%))
-CC_COMPAT_CLANG_FLAGS += --prefix=$(COMPAT_GCC_TOOLCHAIN_DIR)
+CC_COMPAT_CLANG_FLAGS += --prefix=$(COMPAT_GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE_COMPAT))
CC_COMPAT_CLANG_FLAGS += -no-integrated-as -Qunused-arguments
ifneq ($(COMPAT_GCC_TOOLCHAIN),)
CC_COMPAT_CLANG_FLAGS += --gcc-toolchain=$(COMPAT_GCC_TOOLCHAIN)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 8c0035cab6b6..31058e6e7c2a 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1326,7 +1326,7 @@ static bool stage2_get_leaf_entry(struct kvm *kvm, phys_addr_t addr,
return true;
}
-static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
+static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr, unsigned long sz)
{
pud_t *pudp;
pmd_t *pmdp;
@@ -1338,11 +1338,11 @@ static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
return false;
if (pudp)
- return kvm_s2pud_exec(pudp);
+ return sz <= PUD_SIZE && kvm_s2pud_exec(pudp);
else if (pmdp)
- return kvm_s2pmd_exec(pmdp);
+ return sz <= PMD_SIZE && kvm_s2pmd_exec(pmdp);
else
- return kvm_s2pte_exec(ptep);
+ return sz == PAGE_SIZE && kvm_s2pte_exec(ptep);
}
static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
@@ -1958,7 +1958,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* execute permissions, and we preserve whatever we have.
*/
needs_exec = exec_fault ||
- (fault_status == FSC_PERM && stage2_is_exec(kvm, fault_ipa));
+ (fault_status == FSC_PERM &&
+ stage2_is_exec(kvm, fault_ipa, vma_pagesize));
if (vma_pagesize == PUD_SIZE) {
pud_t new_pud = kvm_pfn_pud(pfn, mem_type);
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 118953d41763..6dd4171c9530 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -212,6 +212,8 @@ atomic64_set(atomic64_t *v, s64 i)
_atomic_spin_unlock_irqrestore(v, flags);
}
+#define atomic64_set_release(v, i) atomic64_set((v), (i))
+
static __inline__ s64
atomic64_read(const atomic64_t *v)
{
diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h
index ab5c215cf46c..068958575871 100644
--- a/arch/parisc/include/asm/cmpxchg.h
+++ b/arch/parisc/include/asm/cmpxchg.h
@@ -60,6 +60,7 @@ extern void __cmpxchg_called_with_bad_pointer(void);
extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old,
unsigned int new_);
extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_);
+extern u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new_);
/* don't worry...optimizer will get rid of most of this */
static inline unsigned long
@@ -71,6 +72,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
#endif
case 4: return __cmpxchg_u32((unsigned int *)ptr,
(unsigned int)old, (unsigned int)new_);
+ case 1: return __cmpxchg_u8((u8 *)ptr, (u8)old, (u8)new_);
}
__cmpxchg_called_with_bad_pointer();
return old;
diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c
index 70ffbcf889b8..2e4d1f05a926 100644
--- a/arch/parisc/lib/bitops.c
+++ b/arch/parisc/lib/bitops.c
@@ -79,3 +79,15 @@ unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsign
_atomic_spin_unlock_irqrestore(ptr, flags);
return (unsigned long)prev;
}
+
+u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new)
+{
+ unsigned long flags;
+ u8 prev;
+
+ _atomic_spin_lock_irqsave(ptr, flags);
+ if ((prev = *ptr) == old)
+ *ptr = new;
+ _atomic_spin_unlock_irqrestore(ptr, flags);
+ return prev;
+}
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 0fc8bad878b2..446e54c3f71e 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -3072,10 +3072,18 @@ do_hash_page:
ori r0,r0,DSISR_BAD_FAULT_64S@l
and. r0,r5,r0 /* weird error? */
bne- handle_page_fault /* if not, try to insert a HPTE */
+
+ /*
+ * If we are in an "NMI" (e.g., an interrupt when soft-disabled), then
+ * don't call hash_page, just fail the fault. This is required to
+ * prevent re-entrancy problems in the hash code, namely perf
+ * interrupts hitting while something holds H_PAGE_BUSY, and taking a
+ * hash fault. See the comment in hash_preload().
+ */
ld r11, PACA_THREAD_INFO(r13)
- lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
- andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
- bne 77f /* then don't call hash_page now */
+ lwz r0,TI_PREEMPT(r11)
+ andis. r0,r0,NMI_MASK@h
+ bne 77f
/*
* r3 contains the trap number
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 468169e33c86..9b9f92ad0e7a 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -1559,6 +1559,7 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
pgd_t *pgdir;
int rc, ssize, update_flags = 0;
unsigned long access = _PAGE_PRESENT | _PAGE_READ | (is_exec ? _PAGE_EXEC : 0);
+ unsigned long flags;
BUG_ON(get_region_id(ea) != USER_REGION_ID);
@@ -1592,6 +1593,28 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
return;
#endif /* CONFIG_PPC_64K_PAGES */
+ /*
+ * __hash_page_* must run with interrupts off, as it sets the
+ * H_PAGE_BUSY bit. It's possible for perf interrupts to hit at any
+ * time and may take a hash fault reading the user stack, see
+ * read_user_stack_slow() in the powerpc/perf code.
+ *
+ * If that takes a hash fault on the same page as we lock here, it
+ * will bail out when seeing H_PAGE_BUSY set, and retry the access
+ * leading to an infinite loop.
+ *
+ * Disabling interrupts here does not prevent perf interrupts, but it
+ * will prevent them taking hash faults (see the NMI test in
+ * do_hash_page), then read_user_stack's copy_from_user_nofault will
+ * fail and perf will fall back to read_user_stack_slow(), which
+ * walks the Linux page tables.
+ *
+ * Interrupts must also be off for the duration of the
+ * mm_is_thread_local test and update, to prevent preempt running the
+ * mm on another CPU (XXX: this may be racy vs kthread_use_mm).
+ */
+ local_irq_save(flags);
+
/* Is that local to this CPU ? */
if (mm_is_thread_local(mm))
update_flags |= HPTE_LOCAL_UPDATE;
@@ -1614,6 +1637,8 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
mm_ctx_user_psize(&mm->context),
mm_ctx_user_psize(&mm->context),
pte_val(*ptep));
+
+ local_irq_restore(flags);
}
/*
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index cd6a742ac6ef..01d70280d287 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -2179,6 +2179,12 @@ static void __perf_event_interrupt(struct pt_regs *regs)
perf_read_regs(regs);
+ /*
+ * If perf interrupts hit in a local_irq_disable (soft-masked) region,
+ * we consider them as NMIs. This is required to prevent hash faults on
+ * user addresses when reading callchains. See the NMI test in
+ * do_hash_page.
+ */
nmi = perf_intr_is_nmi(regs);
if (nmi)
nmi_enter();
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index f4adb3684f3d..79e9d55bdf1a 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -95,19 +95,40 @@ void __init mem_init(void)
#ifdef CONFIG_BLK_DEV_INITRD
static void __init setup_initrd(void)
{
+ phys_addr_t start;
unsigned long size;
- if (initrd_start >= initrd_end) {
- pr_info("initrd not found or empty");
+ /* Ignore the virtul address computed during device tree parsing */
+ initrd_start = initrd_end = 0;
+
+ if (!phys_initrd_size)
+ return;
+ /*
+ * Round the memory region to page boundaries as per free_initrd_mem()
+ * This allows us to detect whether the pages overlapping the initrd
+ * are in use, but more importantly, reserves the entire set of pages
+ * as we don't want these pages allocated for other purposes.
+ */
+ start = round_down(phys_initrd_start, PAGE_SIZE);
+ size = phys_initrd_size + (phys_initrd_start - start);
+ size = round_up(size, PAGE_SIZE);
+
+ if (!memblock_is_region_memory(start, size)) {
+ pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region",
+ (u64)start, size);
goto disable;
}
- if (__pa_symbol(initrd_end) > PFN_PHYS(max_low_pfn)) {
- pr_err("initrd extends beyond end of memory");
+
+ if (memblock_is_region_reserved(start, size)) {
+ pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region\n",
+ (u64)start, size);
goto disable;
}
- size = initrd_end - initrd_start;
- memblock_reserve(__pa_symbol(initrd_start), size);
+ memblock_reserve(start, size);
+ /* Now convert initrd to virtual addresses */
+ initrd_start = (unsigned long)__va(phys_initrd_start);
+ initrd_end = initrd_start + phys_initrd_size;
initrd_below_start_ok = 1;
pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
@@ -126,33 +147,36 @@ void __init setup_bootmem(void)
{
struct memblock_region *reg;
phys_addr_t mem_size = 0;
+ phys_addr_t total_mem = 0;
+ phys_addr_t mem_start, end = 0;
phys_addr_t vmlinux_end = __pa_symbol(&_end);
phys_addr_t vmlinux_start = __pa_symbol(&_start);
/* Find the memory region containing the kernel */
for_each_memblock(memory, reg) {
- phys_addr_t end = reg->base + reg->size;
-
- if (reg->base <= vmlinux_start && vmlinux_end <= end) {
- mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET);
-
- /*
- * Remove memblock from the end of usable area to the
- * end of region
- */
- if (reg->base + mem_size < end)
- memblock_remove(reg->base + mem_size,
- end - reg->base - mem_size);
- }
+ end = reg->base + reg->size;
+ if (!total_mem)
+ mem_start = reg->base;
+ if (reg->base <= vmlinux_start && vmlinux_end <= end)
+ BUG_ON(reg->size == 0);
+ total_mem = total_mem + reg->size;
}
- BUG_ON(mem_size == 0);
+
+ /*
+ * Remove memblock from the end of usable area to the
+ * end of region
+ */
+ mem_size = min(total_mem, (phys_addr_t)-PAGE_OFFSET);
+ if (mem_start + mem_size < end)
+ memblock_remove(mem_start + mem_size,
+ end - mem_start - mem_size);
/* Reserve from the start of the kernel to the end of the kernel */
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
- set_max_mapnr(PFN_DOWN(mem_size));
max_pfn = PFN_DOWN(memblock_end_of_DRAM());
max_low_pfn = max_pfn;
+ set_max_mapnr(max_low_pfn);
#ifdef CONFIG_BLK_DEV_INITRD
setup_initrd();
diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
index 4a8b61806633..87b4ab3d3c77 100644
--- a/arch/riscv/mm/kasan_init.c
+++ b/arch/riscv/mm/kasan_init.c
@@ -44,7 +44,7 @@ asmlinkage void __init kasan_early_init(void)
(__pa(((uintptr_t) kasan_early_shadow_pmd))),
__pgprot(_PAGE_TABLE)));
- flush_tlb_all();
+ local_flush_tlb_all();
}
static void __init populate(void *start, void *end)
@@ -79,7 +79,7 @@ static void __init populate(void *start, void *end)
pfn_pgd(PFN_DOWN(__pa(&pmd[offset])),
__pgprot(_PAGE_TABLE)));
- flush_tlb_all();
+ local_flush_tlb_all();
memset(start, 0, end - start);
}
diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c
index 1e3df52b2b65..37265f551a11 100644
--- a/arch/s390/kernel/perf_cpum_cf_events.c
+++ b/arch/s390/kernel/perf_cpum_cf_events.c
@@ -292,7 +292,7 @@ CPUMF_EVENT_ATTR(cf_z15, TX_C_TABORT_SPECIAL, 0x00f5);
CPUMF_EVENT_ATTR(cf_z15, DFLT_ACCESS, 0x00f7);
CPUMF_EVENT_ATTR(cf_z15, DFLT_CYCLES, 0x00fc);
CPUMF_EVENT_ATTR(cf_z15, DFLT_CC, 0x00108);
-CPUMF_EVENT_ATTR(cf_z15, DFLT_CCERROR, 0x00109);
+CPUMF_EVENT_ATTR(cf_z15, DFLT_CCFINISH, 0x00109);
CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
@@ -629,7 +629,7 @@ static struct attribute *cpumcf_z15_pmu_event_attr[] __initdata = {
CPUMF_EVENT_PTR(cf_z15, DFLT_ACCESS),
CPUMF_EVENT_PTR(cf_z15, DFLT_CYCLES),
CPUMF_EVENT_PTR(cf_z15, DFLT_CC),
- CPUMF_EVENT_PTR(cf_z15, DFLT_CCERROR),
+ CPUMF_EVENT_PTR(cf_z15, DFLT_CCFINISH),
CPUMF_EVENT_PTR(cf_z15, MT_DIAG_CYCLES_ONE_THR_ACTIVE),
CPUMF_EVENT_PTR(cf_z15, MT_DIAG_CYCLES_TWO_THR_ACTIVE),
NULL,
diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h
index 22d968bfe9bb..d770da3f8b6f 100644
--- a/arch/sh/include/asm/pgalloc.h
+++ b/arch/sh/include/asm/pgalloc.h
@@ -12,6 +12,7 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
extern void pmd_free(struct mm_struct *mm, pmd_t *pmd);
+#define __pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, (pmdp))
#endif
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
@@ -33,13 +34,4 @@ do { \
tlb_remove_page((tlb), (pte)); \
} while (0)
-#if CONFIG_PGTABLE_LEVELS > 2
-#define __pmd_free_tlb(tlb, pmdp, addr) \
-do { \
- struct page *page = virt_to_page(pmdp); \
- pgtable_pmd_page_dtor(page); \
- tlb_remove_page((tlb), page); \
-} while (0);
-#endif
-
#endif /* __ASM_SH_PGALLOC_H */
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index 956a7a03b0c8..9bac5bbb67f3 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -199,7 +199,7 @@ syscall_trace_entry:
mov.l @(OFF_R7,r15), r7 ! arg3
mov.l @(OFF_R3,r15), r3 ! syscall_nr
!
- mov.l 2f, r10 ! Number of syscalls
+ mov.l 6f, r10 ! Number of syscalls
cmp/hs r10, r3
bf syscall_call
mov #-ENOSYS, r0
@@ -353,7 +353,7 @@ ENTRY(system_call)
tst r9, r8
bf syscall_trace_entry
!
- mov.l 2f, r8 ! Number of syscalls
+ mov.l 6f, r8 ! Number of syscalls
cmp/hs r8, r3
bt syscall_badsys
!
@@ -392,7 +392,7 @@ syscall_exit:
#if !defined(CONFIG_CPU_SH2)
1: .long TRA
#endif
-2: .long NR_syscalls
+6: .long NR_syscalls
3: .long sys_call_table
7: .long do_syscall_trace_enter
8: .long do_syscall_trace_leave
diff --git a/arch/x86/include/asm/iosf_mbi.h b/arch/x86/include/asm/iosf_mbi.h
index 5270ff39b9af..a1911fea8739 100644
--- a/arch/x86/include/asm/iosf_mbi.h
+++ b/arch/x86/include/asm/iosf_mbi.h
@@ -39,6 +39,7 @@
#define BT_MBI_UNIT_PMC 0x04
#define BT_MBI_UNIT_GFX 0x06
#define BT_MBI_UNIT_SMI 0x0C
+#define BT_MBI_UNIT_CCK 0x14
#define BT_MBI_UNIT_USB 0x43
#define BT_MBI_UNIT_SATA 0xA3
#define BT_MBI_UNIT_PCIE 0xA6
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index b037cfa7c0c5..7401cc12c3cc 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -71,6 +71,22 @@ static void printk_stack_address(unsigned long address, int reliable,
printk("%s %s%pB\n", log_lvl, reliable ? "" : "? ", (void *)address);
}
+static int copy_code(struct pt_regs *regs, u8 *buf, unsigned long src,
+ unsigned int nbytes)
+{
+ if (!user_mode(regs))
+ return copy_from_kernel_nofault(buf, (u8 *)src, nbytes);
+
+ /*
+ * Make sure userspace isn't trying to trick us into dumping kernel
+ * memory by pointing the userspace instruction pointer at it.
+ */
+ if (__chk_range_not_ok(src, nbytes, TASK_SIZE_MAX))
+ return -EINVAL;
+
+ return copy_from_user_nmi(buf, (void __user *)src, nbytes);
+}
+
/*
* There are a couple of reasons for the 2/3rd prologue, courtesy of Linus:
*
@@ -97,17 +113,8 @@ void show_opcodes(struct pt_regs *regs, const char *loglvl)
#define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE)
u8 opcodes[OPCODE_BUFSIZE];
unsigned long prologue = regs->ip - PROLOGUE_SIZE;
- bool bad_ip;
-
- /*
- * Make sure userspace isn't trying to trick us into dumping kernel
- * memory by pointing the userspace instruction pointer at it.
- */
- bad_ip = user_mode(regs) &&
- __chk_range_not_ok(prologue, OPCODE_BUFSIZE, TASK_SIZE_MAX);
- if (bad_ip || copy_from_kernel_nofault(opcodes, (u8 *)prologue,
- OPCODE_BUFSIZE)) {
+ if (copy_code(regs, opcodes, prologue, sizeof(opcodes))) {
printk("%sCode: Bad RIP value.\n", loglvl);
} else {
printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %"
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index bda2e5eaca0e..ad3a2b37927d 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -1074,7 +1074,7 @@ int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int of
copy_part(offsetof(struct fxregs_state, st_space), 128,
&xsave->i387.st_space, &kbuf, &offset_start, &count);
if (header.xfeatures & XFEATURE_MASK_SSE)
- copy_part(xstate_offsets[XFEATURE_MASK_SSE], 256,
+ copy_part(xstate_offsets[XFEATURE_SSE], 256,
&xsave->i387.xmm_space, &kbuf, &offset_start, &count);
/*
* Fill xsave->i387.sw_reserved value for ptrace frame:
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index f3c76252247d..282b4ee1339f 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -207,7 +207,7 @@ spurious_8259A_irq:
* lets ACK and report it. [once per IRQ]
*/
if (!(spurious_irq_mask & irqmask)) {
- printk(KERN_DEBUG
+ printk_deferred(KERN_DEBUG
"spurious 8259A interrupt: IRQ%d.\n", irq);
spurious_irq_mask |= irqmask;
}
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 6ad43fc44556..2fd698e28e4d 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -58,7 +58,6 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
* or a page fault), which can make frame pointers
* unreliable.
*/
-
if (IS_ENABLED(CONFIG_FRAME_POINTER))
return -EINVAL;
}
@@ -81,10 +80,6 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
if (unwind_error(&state))
return -EINVAL;
- /* Success path for non-user tasks, i.e. kthreads and idle tasks */
- if (!(task->flags & (PF_KTHREAD | PF_IDLE)))
- return -EINVAL;
-
return 0;
}
diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
index 7f969b2d240f..ec88bbe08a32 100644
--- a/arch/x86/kernel/unwind_orc.c
+++ b/arch/x86/kernel/unwind_orc.c
@@ -440,8 +440,11 @@ bool unwind_next_frame(struct unwind_state *state)
/*
* Find the orc_entry associated with the text address.
*
- * Decrement call return addresses by one so they work for sibling
- * calls and calls to noreturn functions.
+ * For a call frame (as opposed to a signal frame), state->ip points to
+ * the instruction after the call. That instruction's stack layout
+ * could be different from the call instruction's layout, for example
+ * if the call was to a noreturn function. So get the ORC data for the
+ * call instruction itself.
*/
orc = orc_find(state->signal ? state->ip : state->ip - 1);
if (!orc) {
@@ -662,6 +665,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
state->sp = task->thread.sp;
state->bp = READ_ONCE_NOCHECK(frame->bp);
state->ip = READ_ONCE_NOCHECK(frame->ret_addr);
+ state->signal = (void *)state->ip == ret_from_fork;
}
if (get_stack_info((unsigned long *)state->sp, state->task,
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 3bfc8dd8a43d..9a03e5b23135 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -358,6 +358,7 @@ SECTIONS
.bss : AT(ADDR(.bss) - LOAD_OFFSET) {
__bss_start = .;
*(.bss..page_aligned)
+ . = ALIGN(PAGE_SIZE);
*(BSS_MAIN)
BSS_DECRYPTED
. = ALIGN(PAGE_SIZE);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 5bf72fc86a8e..4ce2ddd26c0b 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -2195,7 +2195,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
{
struct kvm_lapic *apic = vcpu->arch.apic;
- if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) ||
+ if (!kvm_apic_present(vcpu) || apic_lvtt_oneshot(apic) ||
apic_lvtt_period(apic))
return;
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index c0da4dd78ac5..5bbf76189afa 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1090,7 +1090,7 @@ static void init_vmcb(struct vcpu_svm *svm)
svm->nested.vmcb = 0;
svm->vcpu.arch.hflags = 0;
- if (pause_filter_count) {
+ if (!kvm_pause_in_guest(svm->vcpu.kvm)) {
control->pause_filter_count = pause_filter_count;
if (pause_filter_thresh)
control->pause_filter_thresh = pause_filter_thresh;
@@ -2693,7 +2693,7 @@ static int pause_interception(struct vcpu_svm *svm)
struct kvm_vcpu *vcpu = &svm->vcpu;
bool in_kernel = (svm_get_cpl(vcpu) == 0);
- if (pause_filter_thresh)
+ if (!kvm_pause_in_guest(vcpu->kvm))
grow_ple_window(vcpu);
kvm_vcpu_on_spin(vcpu, in_kernel);
@@ -3780,7 +3780,7 @@ static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
{
- if (pause_filter_thresh)
+ if (!kvm_pause_in_guest(vcpu->kvm))
shrink_ple_window(vcpu);
}
@@ -3958,6 +3958,9 @@ static void svm_vm_destroy(struct kvm *kvm)
static int svm_vm_init(struct kvm *kvm)
{
+ if (!pause_filter_count || !pause_filter_thresh)
+ kvm->arch.pause_in_guest = true;
+
if (avic) {
int ret = avic_vm_init(kvm);
if (ret)
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index d4a4cec034d0..11e4df560018 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -6079,6 +6079,9 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
return -EINVAL;
+ if (kvm_state->hdr.vmx.flags & ~KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE)
+ return -EINVAL;
+
/*
* SMM temporarily disables VMX, so we cannot be in guest mode,
* nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags
@@ -6108,9 +6111,16 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
if (ret)
return ret;
- /* Empty 'VMXON' state is permitted */
- if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12))
- return 0;
+ /* Empty 'VMXON' state is permitted if no VMCS loaded */
+ if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12)) {
+ /* See vmx_has_valid_vmcs12. */
+ if ((kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE) ||
+ (kvm_state->flags & KVM_STATE_NESTED_EVMCS) ||
+ (kvm_state->hdr.vmx.vmcs12_pa != -1ull))
+ return -EINVAL;
+ else
+ return 0;
+ }
if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) {
if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa ||
diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h
index 758bccc26cf9..197148d76b8f 100644
--- a/arch/x86/kvm/vmx/nested.h
+++ b/arch/x86/kvm/vmx/nested.h
@@ -47,6 +47,11 @@ static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu)
return to_vmx(vcpu)->nested.cached_shadow_vmcs12;
}
+/*
+ * Note: the same condition is checked against the state provided by userspace
+ * in vmx_set_nested_state; if it is satisfied, the nested state must include
+ * the VMCS12.
+ */
static inline int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
diff --git a/arch/xtensa/include/asm/checksum.h b/arch/xtensa/include/asm/checksum.h
index d8292cc9ebdf..243a5fe79d3c 100644
--- a/arch/xtensa/include/asm/checksum.h
+++ b/arch/xtensa/include/asm/checksum.h
@@ -57,7 +57,7 @@ static inline
__wsum csum_and_copy_from_user(const void __user *src, void *dst,
int len, __wsum sum, int *err_ptr)
{
- if (access_ok(dst, len))
+ if (access_ok(src, len))
return csum_partial_copy_generic((__force const void *)src, dst,
len, sum, err_ptr, NULL);
if (len)