From e20119f7eaaaf6aad5b44f35155ce500429e17f6 Mon Sep 17 00:00:00 2001 From: Takeshi Kihara Date: Thu, 21 Feb 2019 13:59:38 +0100 Subject: arm64: dts: renesas: r8a77990: Fix SCIF5 DMA channels According to the R-Car Gen3 Hardware Manual Errata for Rev 1.50 of Feb 12, 2019, the DMA channels for SCIF5 are corrected from 16..47 to 0..15 on R-Car E3. Signed-off-by: Takeshi Kihara Fixes: a5ebe5e49a862e21 ("arm64: dts: renesas: r8a77990: Add SCIF-{0,1,3,4,5} device nodes") Signed-off-by: Geert Uytterhoeven Reviewed-by: Fabrizio Castro Signed-off-by: Simon Horman --- arch/arm64/boot/dts/renesas/r8a77990.dtsi | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi index 786178cf1ffd..36f409091cfc 100644 --- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi @@ -2,7 +2,7 @@ /* * Device Tree Source for the R-Car E3 (R8A77990) SoC * - * Copyright (C) 2018 Renesas Electronics Corp. + * Copyright (C) 2018-2019 Renesas Electronics Corp. */ #include @@ -1042,9 +1042,8 @@ <&cpg CPG_CORE R8A77990_CLK_S3D1C>, <&scif_clk>; clock-names = "fck", "brg_int", "scif_clk"; - dmas = <&dmac1 0x5b>, <&dmac1 0x5a>, - <&dmac2 0x5b>, <&dmac2 0x5a>; - dma-names = "tx", "rx", "tx", "rx"; + dmas = <&dmac0 0x5b>, <&dmac0 0x5a>; + dma-names = "tx", "rx"; power-domains = <&sysc R8A77990_PD_ALWAYS_ON>; resets = <&cpg 202>; status = "disabled"; -- cgit v1.2.3 From c21cd4ae82e169b394139243684aaacf31bfcdf8 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 21 Feb 2019 15:04:28 +0100 Subject: arm64: dts: renesas: r8a774c0: Fix SCIF5 DMA channels Correct the DMA channels for SCIF5 from 16..47 to 0..15, as was done for R-Car E3. Signed-off-by: Takeshi Kihara Fixes: 2660a6af690ebbb4 ("arm64: dts: renesas: r8a774c0: Add SCIF and HSCIF nodes") Signed-off-by: Geert Uytterhoeven Reviewed-by: Fabrizio Castro Signed-off-by: Simon Horman --- arch/arm64/boot/dts/renesas/r8a774c0.dtsi | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi index f2e390f7f1d5..6590b63268b0 100644 --- a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi @@ -2,7 +2,7 @@ /* * Device Tree Source for the RZ/G2E (R8A774C0) SoC * - * Copyright (C) 2018 Renesas Electronics Corp. + * Copyright (C) 2018-2019 Renesas Electronics Corp. */ #include @@ -990,9 +990,8 @@ <&cpg CPG_CORE R8A774C0_CLK_S3D1C>, <&scif_clk>; clock-names = "fck", "brg_int", "scif_clk"; - dmas = <&dmac1 0x5b>, <&dmac1 0x5a>, - <&dmac2 0x5b>, <&dmac2 0x5a>; - dma-names = "tx", "rx", "tx", "rx"; + dmas = <&dmac0 0x5b>, <&dmac0 0x5a>; + dma-names = "tx", "rx"; power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; resets = <&cpg 202>; status = "disabled"; -- cgit v1.2.3 From 544e784188f1dd7c797c70b213385e67d92005b6 Mon Sep 17 00:00:00 2001 From: Helen Koike Date: Mon, 4 Mar 2019 18:48:37 -0300 Subject: ARM: dts: bcm283x: Fix hdmi hpd gpio pull Raspberry pi board model B revison 2 have the hot plug detector gpio active high (and not low as it was in the dts). Signed-off-by: Helen Koike Fixes: 49ac67e0c39c ("ARM: bcm2835: Add VC4 to the device tree.") Reviewed-by: Eric Anholt Signed-off-by: Eric Anholt --- arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts b/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts index 5641d162dfdb..28e7513ce617 100644 --- a/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts +++ b/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts @@ -93,7 +93,7 @@ }; &hdmi { - hpd-gpios = <&gpio 46 GPIO_ACTIVE_LOW>; + hpd-gpios = <&gpio 46 GPIO_ACTIVE_HIGH>; }; &pwm { -- cgit v1.2.3 From cd479eccd2e057116d504852814402a1e68ead80 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Mon, 4 Mar 2019 12:33:28 +0100 Subject: s390: limit brk randomization to 32MB For a 64-bit process the randomization of the program break is quite large with 1GB. That is as big as the randomization of the anonymous mapping base, for a test case started with '/lib/ld64.so.1 ' it can happen that the heap is placed after the stack. To avoid this limit the program break randomization to 32MB for 64-bit and keep 8MB for 31-bit. Reported-by: Stefan Liebler Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/elf.h | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index 7d22a474a040..f74639a05f0f 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h @@ -252,11 +252,14 @@ do { \ /* * Cache aliasing on the latest machines calls for a mapping granularity - * of 512KB. For 64-bit processes use a 512KB alignment and a randomization - * of up to 1GB. For 31-bit processes the virtual address space is limited, - * use no alignment and limit the randomization to 8MB. + * of 512KB for the anonymous mapping base. For 64-bit processes use a + * 512KB alignment and a randomization of up to 1GB. For 31-bit processes + * the virtual address space is limited, use no alignment and limit the + * randomization to 8MB. + * For the additional randomization of the program break use 32MB for + * 64-bit and 8MB for 31-bit. */ -#define BRK_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ffffUL) +#define BRK_RND_MASK (is_compat_task() ? 0x7ffUL : 0x1fffUL) #define MMAP_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ff80UL) #define MMAP_ALIGN_MASK (is_compat_task() ? 0 : 0x7fUL) #define STACK_RND_MASK MMAP_RND_MASK -- cgit v1.2.3 From 152e9b8676c6e788c6bff095c1eaae7b86df5003 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Wed, 6 Mar 2019 13:31:21 +0200 Subject: s390/vtime: steal time exponential moving average To be able to judge the current overcommitment ratio for a CPU add a lowcore field with the exponential moving average of the steal time. The average is updated every tick. Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/lowcore.h | 61 +++++++++++++++++++++-------------------- arch/s390/kernel/smp.c | 3 +- arch/s390/kernel/vtime.c | 19 ++++++++----- 3 files changed, 45 insertions(+), 38 deletions(-) (limited to 'arch') diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index cc0947e08b6f..5b9f10b1e55d 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -91,52 +91,53 @@ struct lowcore { __u64 hardirq_timer; /* 0x02e8 */ __u64 softirq_timer; /* 0x02f0 */ __u64 steal_timer; /* 0x02f8 */ - __u64 last_update_timer; /* 0x0300 */ - __u64 last_update_clock; /* 0x0308 */ - __u64 int_clock; /* 0x0310 */ - __u64 mcck_clock; /* 0x0318 */ - __u64 clock_comparator; /* 0x0320 */ - __u64 boot_clock[2]; /* 0x0328 */ + __u64 avg_steal_timer; /* 0x0300 */ + __u64 last_update_timer; /* 0x0308 */ + __u64 last_update_clock; /* 0x0310 */ + __u64 int_clock; /* 0x0318*/ + __u64 mcck_clock; /* 0x0320 */ + __u64 clock_comparator; /* 0x0328 */ + __u64 boot_clock[2]; /* 0x0330 */ /* Current process. */ - __u64 current_task; /* 0x0338 */ - __u64 kernel_stack; /* 0x0340 */ + __u64 current_task; /* 0x0340 */ + __u64 kernel_stack; /* 0x0348 */ /* Interrupt, DAT-off and restartstack. */ - __u64 async_stack; /* 0x0348 */ - __u64 nodat_stack; /* 0x0350 */ - __u64 restart_stack; /* 0x0358 */ + __u64 async_stack; /* 0x0350 */ + __u64 nodat_stack; /* 0x0358 */ + __u64 restart_stack; /* 0x0360 */ /* Restart function and parameter. */ - __u64 restart_fn; /* 0x0360 */ - __u64 restart_data; /* 0x0368 */ - __u64 restart_source; /* 0x0370 */ + __u64 restart_fn; /* 0x0368 */ + __u64 restart_data; /* 0x0370 */ + __u64 restart_source; /* 0x0378 */ /* Address space pointer. */ - __u64 kernel_asce; /* 0x0378 */ - __u64 user_asce; /* 0x0380 */ - __u64 vdso_asce; /* 0x0388 */ + __u64 kernel_asce; /* 0x0380 */ + __u64 user_asce; /* 0x0388 */ + __u64 vdso_asce; /* 0x0390 */ /* * The lpp and current_pid fields form a * 64-bit value that is set as program * parameter with the LPP instruction. */ - __u32 lpp; /* 0x0390 */ - __u32 current_pid; /* 0x0394 */ + __u32 lpp; /* 0x0398 */ + __u32 current_pid; /* 0x039c */ /* SMP info area */ - __u32 cpu_nr; /* 0x0398 */ - __u32 softirq_pending; /* 0x039c */ - __u32 preempt_count; /* 0x03a0 */ - __u32 spinlock_lockval; /* 0x03a4 */ - __u32 spinlock_index; /* 0x03a8 */ - __u32 fpu_flags; /* 0x03ac */ - __u64 percpu_offset; /* 0x03b0 */ - __u64 vdso_per_cpu_data; /* 0x03b8 */ - __u64 machine_flags; /* 0x03c0 */ - __u64 gmap; /* 0x03c8 */ - __u8 pad_0x03d0[0x0400-0x03d0]; /* 0x03d0 */ + __u32 cpu_nr; /* 0x03a0 */ + __u32 softirq_pending; /* 0x03a4 */ + __u32 preempt_count; /* 0x03a8 */ + __u32 spinlock_lockval; /* 0x03ac */ + __u32 spinlock_index; /* 0x03b0 */ + __u32 fpu_flags; /* 0x03b4 */ + __u64 percpu_offset; /* 0x03b8 */ + __u64 vdso_per_cpu_data; /* 0x03c0 */ + __u64 machine_flags; /* 0x03c8 */ + __u64 gmap; /* 0x03d0 */ + __u8 pad_0x03d8[0x0400-0x03d8]; /* 0x03d8 */ /* br %r1 trampoline */ __u16 br_r1_trampoline; /* 0x0400 */ diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index b198ece2aad6..b8eb99685546 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -266,7 +266,8 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) lc->percpu_offset = __per_cpu_offset[cpu]; lc->kernel_asce = S390_lowcore.kernel_asce; lc->machine_flags = S390_lowcore.machine_flags; - lc->user_timer = lc->system_timer = lc->steal_timer = 0; + lc->user_timer = lc->system_timer = + lc->steal_timer = lc->avg_steal_timer = 0; __ctl_store(lc->cregs_save_area, 0, 15); save_access_regs((unsigned int *) lc->access_regs_save_area); memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 98f850e00008..a69a0911ed0e 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -124,7 +124,7 @@ static void account_system_index_scaled(struct task_struct *p, u64 cputime, */ static int do_account_vtime(struct task_struct *tsk) { - u64 timer, clock, user, guest, system, hardirq, softirq, steal; + u64 timer, clock, user, guest, system, hardirq, softirq; timer = S390_lowcore.last_update_timer; clock = S390_lowcore.last_update_clock; @@ -182,12 +182,6 @@ static int do_account_vtime(struct task_struct *tsk) if (softirq) account_system_index_scaled(tsk, softirq, CPUTIME_SOFTIRQ); - steal = S390_lowcore.steal_timer; - if ((s64) steal > 0) { - S390_lowcore.steal_timer = 0; - account_steal_time(cputime_to_nsecs(steal)); - } - return virt_timer_forward(user + guest + system + hardirq + softirq); } @@ -213,8 +207,19 @@ void vtime_task_switch(struct task_struct *prev) */ void vtime_flush(struct task_struct *tsk) { + u64 steal, avg_steal; + if (do_account_vtime(tsk)) virt_timer_expire(); + + steal = S390_lowcore.steal_timer; + avg_steal = S390_lowcore.avg_steal_timer / 2; + if ((s64) steal > 0) { + S390_lowcore.steal_timer = 0; + account_steal_time(steal); + avg_steal += steal; + } + S390_lowcore.avg_steal_timer = avg_steal; } /* -- cgit v1.2.3 From 0d9c038feff6f834ad9e5d88b66715235ab23ff3 Mon Sep 17 00:00:00 2001 From: Tony Krowiak Date: Mon, 18 Feb 2019 12:01:35 -0500 Subject: zcrypt: handle AP Info notification from CHSC SEI command The current AP bus implementation periodically polls the AP configuration to detect changes. When the AP configuration is dynamically changed via the SE or an SCLP instruction, the changes will not be reflected to sysfs until the next time the AP configuration is polled. The CHSC architecture provides a Store Event Information (SEI) command to make notification of an AP configuration change. This patch introduces a handler to process notification from the CHSC SEI command by immediately kicking off an AP bus scan-after-event. Signed-off-by: Tony Krowiak Reviewed-by: Halil Pasic Reviewed-by: Sebastian Ott Reviewed-by: Harald Freudenberger Reviewed-by: Cornelia Huck Signed-off-by: Sebastian Ott Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/ap.h | 11 +++++++++++ drivers/s390/cio/chsc.c | 13 +++++++++++++ drivers/s390/crypto/ap_bus.c | 10 ++++++++++ 3 files changed, 34 insertions(+) (limited to 'arch') diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h index 1a6a7092d942..e94a0a28b5eb 100644 --- a/arch/s390/include/asm/ap.h +++ b/arch/s390/include/asm/ap.h @@ -360,4 +360,15 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid, return reg1; } +/* + * Interface to tell the AP bus code that a configuration + * change has happened. The bus code should at least do + * an ap bus resource rescan. + */ +#if IS_ENABLED(CONFIG_ZCRYPT) +void ap_bus_cfg_chg(void); +#else +static inline void ap_bus_cfg_chg(void){}; +#endif + #endif /* _ASM_S390_AP_H_ */ diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index a0baee25134c..eaf4699be2c5 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "css.h" #include "cio.h" @@ -586,6 +587,15 @@ static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area) " failed (rc=%d).\n", ret); } +static void chsc_process_sei_ap_cfg_chg(struct chsc_sei_nt0_area *sei_area) +{ + CIO_CRW_EVENT(3, "chsc: ap config changed\n"); + if (sei_area->rs != 5) + return; + + ap_bus_cfg_chg(); +} + static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) { switch (sei_area->cc) { @@ -612,6 +622,9 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area) case 2: /* i/o resource accessibility */ chsc_process_sei_res_acc(sei_area); break; + case 3: /* ap config changed */ + chsc_process_sei_ap_cfg_chg(sei_area); + break; case 7: /* channel-path-availability information */ chsc_process_sei_chp_avail(sei_area); break; diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 033a1acabf48..1546389d71db 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -867,6 +867,16 @@ void ap_bus_force_rescan(void) } EXPORT_SYMBOL(ap_bus_force_rescan); +/* +* A config change has happened, force an ap bus rescan. +*/ +void ap_bus_cfg_chg(void) +{ + AP_DBF(DBF_INFO, "%s config change, forcing bus rescan\n", __func__); + + ap_bus_force_rescan(); +} + /* * hex2bitmap() - parse hex mask string and set bitmap. * Valid strings are "0x012345678" with at least one valid hex number. -- cgit v1.2.3 From 7a9b6be9fe58194d9a349159176e8cc0d8f10ef8 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Fri, 8 Mar 2019 13:02:16 -0800 Subject: arm64: bcm2835: Add missing dependency on MFD_CORE. When adding the MFD dependency for power domains and WDT in bcm2835, I added it only on the arm32 side and missed it for arm64. Fixes: 5e6acc3e678e ("bcm2835-pm: Move bcm2835-watchdog's DT probe to an MFD.") Signed-off-by: Eric Anholt Reported-by: Stefan Wahren Acked-by: Stefan Wahren --- arch/arm64/Kconfig.platforms | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index 251ecf34cb02..50f9fb562059 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -27,6 +27,7 @@ config ARCH_BCM2835 bool "Broadcom BCM2835 family" select TIMER_OF select GPIOLIB + select MFD_CORE select PINCTRL select PINCTRL_BCM2835 select ARM_AMBA -- cgit v1.2.3 From 86be36f6502c52ddb4b85938145324fd07332da1 Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Fri, 15 Mar 2019 20:21:19 +0530 Subject: powerpc: bpf: Fix generation of load/store DW instructions Yauheni Kaliuta pointed out that PTR_TO_STACK store/load verifier test was failing on powerpc64 BE, and rightfully indicated that the PPC_LD() macro is not masking away the last two bits of the offset per the ISA, resulting in the generation of 'lwa' instruction instead of the intended 'ld' instruction. Segher also pointed out that we can't simply mask away the last two bits as that will result in loading/storing from/to a memory location that was not intended. This patch addresses this by using ldx/stdx if the offset is not word-aligned. We load the offset into a temporary register (TMP_REG_2) and use that as the index register in a subsequent ldx/stdx. We fix PPC_LD() macro to mask off the last two bits, but enhance PPC_BPF_LL() and PPC_BPF_STL() to factor in the offset value and generate the proper instruction sequence. We also convert all existing users of PPC_LD() and PPC_STD() to use these macros. All existing uses of these macros have been audited to ensure that TMP_REG_2 can be clobbered. Fixes: 156d0e290e96 ("powerpc/ebpf/jit: Implement JIT compiler for extended BPF") Cc: stable@vger.kernel.org # v4.9+ Reported-by: Yauheni Kaliuta Signed-off-by: Naveen N. Rao Signed-off-by: Daniel Borkmann --- arch/powerpc/include/asm/ppc-opcode.h | 2 ++ arch/powerpc/net/bpf_jit.h | 17 +++++------------ arch/powerpc/net/bpf_jit32.h | 4 ++++ arch/powerpc/net/bpf_jit64.h | 20 ++++++++++++++++++++ arch/powerpc/net/bpf_jit_comp64.c | 12 ++++++------ 5 files changed, 37 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index f9513ad38fa6..e6c9ad088f45 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -302,6 +302,7 @@ /* Misc instructions for BPF compiler */ #define PPC_INST_LBZ 0x88000000 #define PPC_INST_LD 0xe8000000 +#define PPC_INST_LDX 0x7c00002a #define PPC_INST_LHZ 0xa0000000 #define PPC_INST_LWZ 0x80000000 #define PPC_INST_LHBRX 0x7c00062c @@ -309,6 +310,7 @@ #define PPC_INST_STB 0x98000000 #define PPC_INST_STH 0xb0000000 #define PPC_INST_STD 0xf8000000 +#define PPC_INST_STDX 0x7c00012a #define PPC_INST_STDU 0xf8000001 #define PPC_INST_STW 0x90000000 #define PPC_INST_STWU 0x94000000 diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index 549e9490ff2a..dcac37745b05 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -51,6 +51,8 @@ #define PPC_LIS(r, i) PPC_ADDIS(r, 0, i) #define PPC_STD(r, base, i) EMIT(PPC_INST_STD | ___PPC_RS(r) | \ ___PPC_RA(base) | ((i) & 0xfffc)) +#define PPC_STDX(r, base, b) EMIT(PPC_INST_STDX | ___PPC_RS(r) | \ + ___PPC_RA(base) | ___PPC_RB(b)) #define PPC_STDU(r, base, i) EMIT(PPC_INST_STDU | ___PPC_RS(r) | \ ___PPC_RA(base) | ((i) & 0xfffc)) #define PPC_STW(r, base, i) EMIT(PPC_INST_STW | ___PPC_RS(r) | \ @@ -65,7 +67,9 @@ #define PPC_LBZ(r, base, i) EMIT(PPC_INST_LBZ | ___PPC_RT(r) | \ ___PPC_RA(base) | IMM_L(i)) #define PPC_LD(r, base, i) EMIT(PPC_INST_LD | ___PPC_RT(r) | \ - ___PPC_RA(base) | IMM_L(i)) + ___PPC_RA(base) | ((i) & 0xfffc)) +#define PPC_LDX(r, base, b) EMIT(PPC_INST_LDX | ___PPC_RT(r) | \ + ___PPC_RA(base) | ___PPC_RB(b)) #define PPC_LWZ(r, base, i) EMIT(PPC_INST_LWZ | ___PPC_RT(r) | \ ___PPC_RA(base) | IMM_L(i)) #define PPC_LHZ(r, base, i) EMIT(PPC_INST_LHZ | ___PPC_RT(r) | \ @@ -85,17 +89,6 @@ ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_BPF_STDCX(s, a, b) EMIT(PPC_INST_STDCX | ___PPC_RS(s) | \ ___PPC_RA(a) | ___PPC_RB(b)) - -#ifdef CONFIG_PPC64 -#define PPC_BPF_LL(r, base, i) do { PPC_LD(r, base, i); } while(0) -#define PPC_BPF_STL(r, base, i) do { PPC_STD(r, base, i); } while(0) -#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0) -#else -#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0) -#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0) -#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0) -#endif - #define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i)) #define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i)) #define PPC_CMPW(a, b) EMIT(PPC_INST_CMPW | ___PPC_RA(a) | \ diff --git a/arch/powerpc/net/bpf_jit32.h b/arch/powerpc/net/bpf_jit32.h index 6f4daacad296..ade04547703f 100644 --- a/arch/powerpc/net/bpf_jit32.h +++ b/arch/powerpc/net/bpf_jit32.h @@ -123,6 +123,10 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh); #define PPC_NTOHS_OFFS(r, base, i) PPC_LHZ_OFFS(r, base, i) #endif +#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0) +#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0) +#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0) + #define SEEN_DATAREF 0x10000 /* might call external helpers */ #define SEEN_XREG 0x20000 /* X reg is used */ #define SEEN_MEM 0x40000 /* SEEN_MEM+(1< MAX_TAIL_CALL_CNT) * goto out; */ - PPC_LD(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx)); + PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx)); PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT); PPC_BCC(COND_GT, out); @@ -265,7 +265,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 /* prog = array->ptrs[index]; */ PPC_MULI(b2p[TMP_REG_1], b2p_index, 8); PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array); - PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs)); + PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs)); /* * if (prog == NULL) @@ -275,7 +275,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 PPC_BCC(COND_EQ, out); /* goto *(prog->bpf_func + prologue_size); */ - PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func)); + PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func)); #ifdef PPC64_ELF_ABI_v1 /* skip past the function descriptor */ PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], @@ -606,7 +606,7 @@ bpf_alu32_trunc: * the instructions generated will remain the * same across all passes */ - PPC_STD(dst_reg, 1, bpf_jit_stack_local(ctx)); + PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx)); PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx)); PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]); break; @@ -662,7 +662,7 @@ emit_clear: PPC_LI32(b2p[TMP_REG_1], imm); src_reg = b2p[TMP_REG_1]; } - PPC_STD(src_reg, dst_reg, off); + PPC_BPF_STL(src_reg, dst_reg, off); break; /* @@ -709,7 +709,7 @@ emit_clear: break; /* dst = *(u64 *)(ul) (src + off) */ case BPF_LDX | BPF_MEM | BPF_DW: - PPC_LD(dst_reg, src_reg, off); + PPC_BPF_LL(dst_reg, src_reg, off); break; /* -- cgit v1.2.3 From 924726888f660b2a86382a5dd051ec9ca1b18190 Mon Sep 17 00:00:00 2001 From: "Leonidas P. Papadakos" Date: Fri, 1 Mar 2019 00:29:23 +0200 Subject: arm64: dts: rockchip: fix rk3328-roc-cc gmac2io tx/rx_delay The rk3328-roc-cc board exhibits tx stability issues with large packets, as does the rock64 board, which was fixed with this patch https://patchwork.kernel.org/patch/10178969/ A similar patch was merged for the rk3328-roc-cc here https://patchwork.kernel.org/patch/10804863/ but it doesn't include the tx/rx_delay tweaks, and I find that they help with an issue where large transfers would bring the ethernet link down, causing a link reset regularly. Signed-off-by: Leonidas P. Papadakos Signed-off-by: Heiko Stuebner --- arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts index 33c44e857247..0e34354b2092 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts @@ -108,8 +108,8 @@ snps,reset-gpio = <&gpio1 RK_PC2 GPIO_ACTIVE_LOW>; snps,reset-active-low; snps,reset-delays-us = <0 10000 50000>; - tx_delay = <0x25>; - rx_delay = <0x11>; + tx_delay = <0x24>; + rx_delay = <0x18>; status = "okay"; }; -- cgit v1.2.3 From eb523a4960b6d61bfda1229907ea58841f0340ae Mon Sep 17 00:00:00 2001 From: Ezequiel Garcia Date: Mon, 18 Feb 2019 15:59:26 -0300 Subject: arm64: dts: rockchip: add DDC bus on Rock Pi 4 A DDC I2C bus specifier is required for DDC EDID probing to work properly. Fixes: 1b5715c602fda ("arm64: dts: rockchip: add ROCK Pi 4 DTS support") Signed-off-by: Ezequiel Garcia Signed-off-by: Heiko Stuebner --- arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts index 4a543f2117d4..844eac939a97 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts @@ -158,6 +158,7 @@ }; &hdmi { + ddc-i2c-bus = <&i2c3>; pinctrl-names = "default"; pinctrl-0 = <&hdmi_cec>; status = "okay"; -- cgit v1.2.3 From 6b2fde3dbfab6ebc45b0cd605e17ca5057ff9a3b Mon Sep 17 00:00:00 2001 From: Jonas Karlman Date: Sun, 24 Feb 2019 21:51:22 +0000 Subject: ARM: dts: rockchip: fix rk3288 cpu opp node reference The following error can be seen during boot: of: /cpus/cpu@501: Couldn't find opp node Change cpu nodes to use operating-points-v2 in order to fix this. Fixes: ce76de984649 ("ARM: dts: rockchip: convert rk3288 to operating-points-v2") Cc: stable@vger.kernel.org Signed-off-by: Jonas Karlman Signed-off-by: Heiko Stuebner --- arch/arm/boot/dts/rk3288.dtsi | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi index ca7d52daa8fb..09868dcee34b 100644 --- a/arch/arm/boot/dts/rk3288.dtsi +++ b/arch/arm/boot/dts/rk3288.dtsi @@ -70,7 +70,7 @@ compatible = "arm,cortex-a12"; reg = <0x501>; resets = <&cru SRST_CORE1>; - operating-points = <&cpu_opp_table>; + operating-points-v2 = <&cpu_opp_table>; #cooling-cells = <2>; /* min followed by max */ clock-latency = <40000>; clocks = <&cru ARMCLK>; @@ -80,7 +80,7 @@ compatible = "arm,cortex-a12"; reg = <0x502>; resets = <&cru SRST_CORE2>; - operating-points = <&cpu_opp_table>; + operating-points-v2 = <&cpu_opp_table>; #cooling-cells = <2>; /* min followed by max */ clock-latency = <40000>; clocks = <&cru ARMCLK>; @@ -90,7 +90,7 @@ compatible = "arm,cortex-a12"; reg = <0x503>; resets = <&cru SRST_CORE3>; - operating-points = <&cpu_opp_table>; + operating-points-v2 = <&cpu_opp_table>; #cooling-cells = <2>; /* min followed by max */ clock-latency = <40000>; clocks = <&cru ARMCLK>; -- cgit v1.2.3 From a8772e5d826d0f61f8aa9c284b3ab49035d5273d Mon Sep 17 00:00:00 2001 From: Tomohiro Mayama Date: Sun, 10 Mar 2019 01:10:12 +0900 Subject: arm64: dts: rockchip: Fix vcc_host1_5v GPIO polarity on rk3328-rock64 This patch makes USB ports functioning again. Fixes: 955bebde057e ("arm64: dts: rockchip: add rk3328-rock64 board") Cc: stable@vger.kernel.org Suggested-by: Robin Murphy Signed-off-by: Tomohiro Mayama Tested-by: Katsuhiro Suzuki Signed-off-by: Heiko Stuebner --- arch/arm64/boot/dts/rockchip/rk3328-rock64.dts | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts index 2157a528276b..79b4d1d4b5d6 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts @@ -46,8 +46,7 @@ vcc_host1_5v: vcc_otg_5v: vcc-host1-5v-regulator { compatible = "regulator-fixed"; - enable-active-high; - gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_HIGH>; + gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_LOW>; pinctrl-names = "default"; pinctrl-0 = <&usb20_host_drv>; regulator-name = "vcc_host1_5v"; -- cgit v1.2.3 From 8dbc4d5ddb59f49cb3e85bccf42a4720b27a6576 Mon Sep 17 00:00:00 2001 From: David Summers Date: Sat, 9 Mar 2019 15:39:21 +0000 Subject: ARM: dts: rockchip: Fix SD card detection on rk3288-tinker The Problem: On ASUS Tinker Board S, when booting from the eMMC, and there is card in the sd slot, there are constant errors. Also when warm reboot, uboot can not access the sd slot Cause: Identified by Robin Murphy @ ARM. The Card Detect on rk3288 devices is pulled up by vccio-sd; so when the regulator powers this off, card detect gives spurious errors. A second problem, is during power down, vccio-sd apprears to be powered down. This causes a problem when warm rebooting from the sd card. This was identified by Jonas Karlman. History: A common fault on these rk3288 board, which impliment the reference design. When this arose before: http://lists.infradead.org/pipermail/linux-arm-kernel/2014-August/281153.html And Ulf and Jaehoon clearly said this was a broken card detect design, which should be solved via polling Solution: Hence broken-cd is set as a property. This cures the errors. The powering down of vccio-sd during reboot is cured by adding regulator-boot-on. This solutions has been fairly widely reviewed and tested. Fixes: e58c5e739d6f ("ARM: dts: rockchip: move shared tinker-board nodes to a common dtsi") Cc: stable@vger.kernel.org [Heiko: slightly inaccurate fixes but tinker is a sbc (aka like a Pi) where we can hopefully expect people not to rely on overly old stable kernels] Signed-off-by: David Summers Reviewed-by: Jonas Karlman Tested-by: Jonas Karlman Reviewed-by: Robin Murphy Signed-off-by: Heiko Stuebner --- arch/arm/boot/dts/rk3288-tinker.dtsi | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/rk3288-tinker.dtsi b/arch/arm/boot/dts/rk3288-tinker.dtsi index aa107ee41b8b..ef653c3209bc 100644 --- a/arch/arm/boot/dts/rk3288-tinker.dtsi +++ b/arch/arm/boot/dts/rk3288-tinker.dtsi @@ -254,6 +254,7 @@ }; vccio_sd: LDO_REG5 { + regulator-boot-on; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <3300000>; regulator-name = "vccio_sd"; @@ -430,7 +431,7 @@ bus-width = <4>; cap-mmc-highspeed; cap-sd-highspeed; - card-detect-delay = <200>; + broken-cd; disable-wp; /* wp not hooked up */ pinctrl-names = "default"; pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>; -- cgit v1.2.3 From 6fd8b9780ec1a49ac46e0aaf8775247205e66231 Mon Sep 17 00:00:00 2001 From: Peter Geis Date: Wed, 13 Mar 2019 18:45:36 +0000 Subject: arm64: dts: rockchip: fix rk3328 rgmii high tx error rate Several rk3328 based boards experience high rgmii tx error rates. This is due to several pins in the rk3328.dtsi rgmii pinmux that are missing a defined pull strength setting. This causes the pinmux driver to default to 2ma (bit mask 00). These pins are only defined in the rk3328.dtsi, and are not listed in the rk3328 specification. The TRM only lists them as "Reserved" (RK3328 TRM V1.1, 3.3.3 Detail Register Description, GRF_GPIO0B_IOMUX, GRF_GPIO0C_IOMUX, GRF_GPIO0D_IOMUX). However, removal of these pins from the rgmii pinmux definition causes the interface to fail to transmit. Also, the rgmii tx and rx pins defined in the dtsi are not consistent with the rk3328 specification, with tx pins currently set to 12ma and rx pins set to 2ma. Fix this by setting tx pins to 8ma and the rx pins to 4ma, consistent with the specification. Defining the drive strength for the undefined pins eliminated the high tx packet error rate observed under heavy data transfers. Aligning the drive strength to the TRM values eliminated the occasional packet retry errors under iperf3 testing. This allows much higher data rates with no recorded tx errors. Tested on the rk3328-roc-cc board. Fixes: 52e02d377a72 ("arm64: dts: rockchip: add core dtsi file for RK3328 SoCs") Cc: stable@vger.kernel.org Signed-off-by: Peter Geis Signed-off-by: Heiko Stuebner --- arch/arm64/boot/dts/rockchip/rk3328.dtsi | 44 ++++++++++++++++---------------- 1 file changed, 22 insertions(+), 22 deletions(-) (limited to 'arch') diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi index 84f14b132e8f..c55a3f1a87ff 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi @@ -1642,50 +1642,50 @@ rgmiim1_pins: rgmiim1-pins { rockchip,pins = /* mac_txclk */ - <1 RK_PB4 2 &pcfg_pull_none_12ma>, + <1 RK_PB4 2 &pcfg_pull_none_8ma>, /* mac_rxclk */ - <1 RK_PB5 2 &pcfg_pull_none_2ma>, + <1 RK_PB5 2 &pcfg_pull_none_4ma>, /* mac_mdio */ - <1 RK_PC3 2 &pcfg_pull_none_2ma>, + <1 RK_PC3 2 &pcfg_pull_none_4ma>, /* mac_txen */ - <1 RK_PD1 2 &pcfg_pull_none_12ma>, + <1 RK_PD1 2 &pcfg_pull_none_8ma>, /* mac_clk */ - <1 RK_PC5 2 &pcfg_pull_none_2ma>, + <1 RK_PC5 2 &pcfg_pull_none_4ma>, /* mac_rxdv */ - <1 RK_PC6 2 &pcfg_pull_none_2ma>, + <1 RK_PC6 2 &pcfg_pull_none_4ma>, /* mac_mdc */ - <1 RK_PC7 2 &pcfg_pull_none_2ma>, + <1 RK_PC7 2 &pcfg_pull_none_4ma>, /* mac_rxd1 */ - <1 RK_PB2 2 &pcfg_pull_none_2ma>, + <1 RK_PB2 2 &pcfg_pull_none_4ma>, /* mac_rxd0 */ - <1 RK_PB3 2 &pcfg_pull_none_2ma>, + <1 RK_PB3 2 &pcfg_pull_none_4ma>, /* mac_txd1 */ - <1 RK_PB0 2 &pcfg_pull_none_12ma>, + <1 RK_PB0 2 &pcfg_pull_none_8ma>, /* mac_txd0 */ - <1 RK_PB1 2 &pcfg_pull_none_12ma>, + <1 RK_PB1 2 &pcfg_pull_none_8ma>, /* mac_rxd3 */ - <1 RK_PB6 2 &pcfg_pull_none_2ma>, + <1 RK_PB6 2 &pcfg_pull_none_4ma>, /* mac_rxd2 */ - <1 RK_PB7 2 &pcfg_pull_none_2ma>, + <1 RK_PB7 2 &pcfg_pull_none_4ma>, /* mac_txd3 */ - <1 RK_PC0 2 &pcfg_pull_none_12ma>, + <1 RK_PC0 2 &pcfg_pull_none_8ma>, /* mac_txd2 */ - <1 RK_PC1 2 &pcfg_pull_none_12ma>, + <1 RK_PC1 2 &pcfg_pull_none_8ma>, /* mac_txclk */ - <0 RK_PB0 1 &pcfg_pull_none>, + <0 RK_PB0 1 &pcfg_pull_none_8ma>, /* mac_txen */ - <0 RK_PB4 1 &pcfg_pull_none>, + <0 RK_PB4 1 &pcfg_pull_none_8ma>, /* mac_clk */ - <0 RK_PD0 1 &pcfg_pull_none>, + <0 RK_PD0 1 &pcfg_pull_none_4ma>, /* mac_txd1 */ - <0 RK_PC0 1 &pcfg_pull_none>, + <0 RK_PC0 1 &pcfg_pull_none_8ma>, /* mac_txd0 */ - <0 RK_PC1 1 &pcfg_pull_none>, + <0 RK_PC1 1 &pcfg_pull_none_8ma>, /* mac_txd3 */ - <0 RK_PC7 1 &pcfg_pull_none>, + <0 RK_PC7 1 &pcfg_pull_none_8ma>, /* mac_txd2 */ - <0 RK_PC6 1 &pcfg_pull_none>; + <0 RK_PC6 1 &pcfg_pull_none_8ma>; }; rmiim1_pins: rmiim1-pins { -- cgit v1.2.3 From 09f91381fa5de1d44bc323d8bf345f5d57b3d9b5 Mon Sep 17 00:00:00 2001 From: Peter Geis Date: Wed, 13 Mar 2019 19:02:30 +0000 Subject: arm64: dts: rockchip: fix rk3328 sdmmc0 write errors Various rk3328 based boards experience occasional sdmmc0 write errors. This is due to the rk3328.dtsi tx drive levels being set to 4ma, vs 8ma per the rk3328 datasheet default settings. Fix this by setting the tx signal pins to 8ma. Inspiration from tonymac32's patch, https://github.com/ayufan-rock64/linux-kernel/commit/dc1212b347e0da17c5460bcc0a56b07d02bac3f8 Fixes issues on the rk3328-roc-cc and the rk3328-rock64 (as per the above commit message). Tested on the rk3328-roc-cc board. Fixes: 52e02d377a72 ("arm64: dts: rockchip: add core dtsi file for RK3328 SoCs") Cc: stable@vger.kernel.org Signed-off-by: Peter Geis Signed-off-by: Heiko Stuebner --- arch/arm64/boot/dts/rockchip/rk3328.dtsi | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi index c55a3f1a87ff..dabef1a21649 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi @@ -1445,11 +1445,11 @@ sdmmc0 { sdmmc0_clk: sdmmc0-clk { - rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_4ma>; + rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_8ma>; }; sdmmc0_cmd: sdmmc0-cmd { - rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_4ma>; + rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_8ma>; }; sdmmc0_dectn: sdmmc0-dectn { @@ -1461,14 +1461,14 @@ }; sdmmc0_bus1: sdmmc0-bus1 { - rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>; + rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>; }; sdmmc0_bus4: sdmmc0-bus4 { - rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>, - <1 RK_PA1 1 &pcfg_pull_up_4ma>, - <1 RK_PA2 1 &pcfg_pull_up_4ma>, - <1 RK_PA3 1 &pcfg_pull_up_4ma>; + rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>, + <1 RK_PA1 1 &pcfg_pull_up_8ma>, + <1 RK_PA2 1 &pcfg_pull_up_8ma>, + <1 RK_PA3 1 &pcfg_pull_up_8ma>; }; sdmmc0_gpio: sdmmc0-gpio { -- cgit v1.2.3 From 1a7ee0efb26d6e25433c6d4428028ac614f55ff1 Mon Sep 17 00:00:00 2001 From: Michal Vokáč Date: Fri, 1 Mar 2019 08:26:42 +0100 Subject: ARM: dts: imx6dl-yapp4: Use rgmii-id phy mode on the cpu port MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use rgmii-id phy mode for the CPU port (MAC0) of the QCA8334 switch to add delays to both Tx and Rx clock. It worked with the rgmii mode before because the qca8k driver (incorrectly) enabled delays in that mode and rgmii-id was not implemented at all. Commit 5ecdd77c61c8 ("net: dsa: qca8k: disable delay for RGMII mode") removed the delays from the RGMII mode and hence broke the networking. To fix the problem, commit a968b5e9d587 ("net: dsa: qca8k: Enable delay for RGMII_ID mode") was introduced. Now the correct phy mode is available so use it. Signed-off-by: Michal Vokáč Signed-off-by: Shawn Guo --- arch/arm/boot/dts/imx6dl-yapp4-common.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi index b715ab0fa1ff..091d829f6b05 100644 --- a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi +++ b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi @@ -125,7 +125,7 @@ ethphy0: port@0 { reg = <0>; label = "cpu"; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; ethernet = <&fec>; fixed-link { -- cgit v1.2.3 From 0c17e83fe423467e3ccf0a02f99bd050a73bbeb4 Mon Sep 17 00:00:00 2001 From: Wen Yang Date: Fri, 1 Mar 2019 16:56:46 +0800 Subject: ARM: imx51: fix a leaked reference by adding missing of_node_put The call to of_get_next_child returns a node pointer with refcount incremented thus it must be explicitly decremented after the last usage. Detected by coccinelle with the following warnings: ./arch/arm/mach-imx/mach-imx51.c:64:2-8: ERROR: missing of_node_put; acquired a node pointer with refcount incremented on line 57, but without a corresponding object release within this function. Signed-off-by: Wen Yang Cc: Russell King Cc: Shawn Guo Cc: Sascha Hauer Cc: Pengutronix Kernel Team Cc: Fabio Estevam Cc: NXP Linux Team Cc: Lucas Stach Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Shawn Guo --- arch/arm/mach-imx/mach-imx51.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/arm/mach-imx/mach-imx51.c b/arch/arm/mach-imx/mach-imx51.c index c7169c2f94c4..08c7892866c2 100644 --- a/arch/arm/mach-imx/mach-imx51.c +++ b/arch/arm/mach-imx/mach-imx51.c @@ -59,6 +59,7 @@ static void __init imx51_m4if_setup(void) return; m4if_base = of_iomap(np, 0); + of_node_put(np); if (!m4if_base) { pr_err("Unable to map M4IF registers\n"); return; -- cgit v1.2.3 From 91740fc8242b4f260cfa4d4536d8551804777fae Mon Sep 17 00:00:00 2001 From: Kohji Okuno Date: Tue, 26 Feb 2019 11:34:13 +0900 Subject: ARM: imx6q: cpuidle: fix bug that CPU might not wake up at expected time In the current cpuidle implementation for i.MX6q, the CPU that sets 'WAIT_UNCLOCKED' and the CPU that returns to 'WAIT_CLOCKED' are always the same. While the CPU that sets 'WAIT_UNCLOCKED' is in IDLE state of "WAIT", if the other CPU wakes up and enters IDLE state of "WFI" istead of "WAIT", this CPU can not wake up at expired time. Because, in the case of "WFI", the CPU must be waked up by the local timer interrupt. But, while 'WAIT_UNCLOCKED' is set, the local timer is stopped, when all CPUs execute "wfi" instruction. As a result, the local timer interrupt is not fired. In this situation, this CPU will wake up by IRQ different from local timer. (e.g. broacast timer) So, this fix changes CPU to return to 'WAIT_CLOCKED'. Signed-off-by: Kohji Okuno Fixes: e5f9dec8ff5f ("ARM: imx6q: support WAIT mode using cpuidle") Cc: Signed-off-by: Shawn Guo --- arch/arm/mach-imx/cpuidle-imx6q.c | 27 ++++++++++----------------- 1 file changed, 10 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-imx/cpuidle-imx6q.c b/arch/arm/mach-imx/cpuidle-imx6q.c index bfeb25aaf9a2..326e870d7123 100644 --- a/arch/arm/mach-imx/cpuidle-imx6q.c +++ b/arch/arm/mach-imx/cpuidle-imx6q.c @@ -16,30 +16,23 @@ #include "cpuidle.h" #include "hardware.h" -static atomic_t master = ATOMIC_INIT(0); -static DEFINE_SPINLOCK(master_lock); +static int num_idle_cpus = 0; +static DEFINE_SPINLOCK(cpuidle_lock); static int imx6q_enter_wait(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { - if (atomic_inc_return(&master) == num_online_cpus()) { - /* - * With this lock, we prevent other cpu to exit and enter - * this function again and become the master. - */ - if (!spin_trylock(&master_lock)) - goto idle; + spin_lock(&cpuidle_lock); + if (++num_idle_cpus == num_online_cpus()) imx6_set_lpm(WAIT_UNCLOCKED); - cpu_do_idle(); - imx6_set_lpm(WAIT_CLOCKED); - spin_unlock(&master_lock); - goto done; - } + spin_unlock(&cpuidle_lock); -idle: cpu_do_idle(); -done: - atomic_dec(&master); + + spin_lock(&cpuidle_lock); + if (num_idle_cpus-- == num_online_cpus()) + imx6_set_lpm(WAIT_CLOCKED); + spin_unlock(&cpuidle_lock); return index; } -- cgit v1.2.3 From ebff0b0e3d3c862c16c487959db5e0d879632559 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 4 Mar 2019 17:37:44 +0000 Subject: KVM: arm64: Reset the PMU in preemptible context We've become very cautious to now always reset the vcpu when nothing is loaded on the physical CPU. To do so, we now disable preemption and do a kvm_arch_vcpu_put() to make sure we have all the state in memory (and that it won't be loaded behind out back). This now causes issues with resetting the PMU, which calls into perf. Perf itself uses mutexes, which clashes with the lack of preemption. It is worth realizing that the PMU is fully emulated, and that no PMU state is ever loaded on the physical CPU. This means we can perfectly reset the PMU outside of the non-preemptible section. Fixes: e761a927bc9a ("KVM: arm/arm64: Reset the VCPU without preemption and vcpu state loaded") Reported-by: Julien Grall Tested-by: Julien Grall Signed-off-by: Marc Zyngier --- arch/arm64/kvm/reset.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index f16a5f8ff2b4..e2a0500cd7a2 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -123,6 +123,9 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) int ret = -EINVAL; bool loaded; + /* Reset PMU outside of the non-preemptible section */ + kvm_pmu_vcpu_reset(vcpu); + preempt_disable(); loaded = (vcpu->cpu != -1); if (loaded) @@ -170,9 +173,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) vcpu->arch.reset_state.reset = false; } - /* Reset PMU */ - kvm_pmu_vcpu_reset(vcpu); - /* Default workaround setup is enabled (if supported) */ if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL) vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; -- cgit v1.2.3 From a6ecfb11bf37743c1ac49b266595582b107b61d4 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 19 Mar 2019 12:47:11 +0000 Subject: KVM: arm/arm64: vgic-its: Take the srcu lock when writing to guest memory When halting a guest, QEMU flushes the virtual ITS caches, which amounts to writing to the various tables that the guest has allocated. When doing this, we fail to take the srcu lock, and the kernel shouts loudly if running a lockdep kernel: [ 69.680416] ============================= [ 69.680819] WARNING: suspicious RCU usage [ 69.681526] 5.1.0-rc1-00008-g600025238f51-dirty #18 Not tainted [ 69.682096] ----------------------------- [ 69.682501] ./include/linux/kvm_host.h:605 suspicious rcu_dereference_check() usage! [ 69.683225] [ 69.683225] other info that might help us debug this: [ 69.683225] [ 69.683975] [ 69.683975] rcu_scheduler_active = 2, debug_locks = 1 [ 69.684598] 6 locks held by qemu-system-aar/4097: [ 69.685059] #0: 0000000034196013 (&kvm->lock){+.+.}, at: vgic_its_set_attr+0x244/0x3a0 [ 69.686087] #1: 00000000f2ed935e (&its->its_lock){+.+.}, at: vgic_its_set_attr+0x250/0x3a0 [ 69.686919] #2: 000000005e71ea54 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xd0 [ 69.687698] #3: 00000000c17e548d (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xd0 [ 69.688475] #4: 00000000ba386017 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xd0 [ 69.689978] #5: 00000000c2c3c335 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xd0 [ 69.690729] [ 69.690729] stack backtrace: [ 69.691151] CPU: 2 PID: 4097 Comm: qemu-system-aar Not tainted 5.1.0-rc1-00008-g600025238f51-dirty #18 [ 69.691984] Hardware name: rockchip evb_rk3399/evb_rk3399, BIOS 2019.04-rc3-00124-g2feec69fb1 03/15/2019 [ 69.692831] Call trace: [ 69.694072] lockdep_rcu_suspicious+0xcc/0x110 [ 69.694490] gfn_to_memslot+0x174/0x190 [ 69.694853] kvm_write_guest+0x50/0xb0 [ 69.695209] vgic_its_save_tables_v0+0x248/0x330 [ 69.695639] vgic_its_set_attr+0x298/0x3a0 [ 69.696024] kvm_device_ioctl_attr+0x9c/0xd8 [ 69.696424] kvm_device_ioctl+0x8c/0xf8 [ 69.696788] do_vfs_ioctl+0xc8/0x960 [ 69.697128] ksys_ioctl+0x8c/0xa0 [ 69.697445] __arm64_sys_ioctl+0x28/0x38 [ 69.697817] el0_svc_common+0xd8/0x138 [ 69.698173] el0_svc_handler+0x38/0x78 [ 69.698528] el0_svc+0x8/0xc The fix is to obviously take the srcu lock, just like we do on the read side of things since bf308242ab98. One wonders why this wasn't fixed at the same time, but hey... Fixes: bf308242ab98 ("KVM: arm/arm64: VGIC/ITS: protect kvm_read_guest() calls with SRCU lock") Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_mmu.h | 11 +++++++++++ arch/arm64/include/asm/kvm_mmu.h | 11 +++++++++++ virt/kvm/arm/vgic/vgic-its.c | 8 ++++---- virt/kvm/arm/vgic/vgic-v3.c | 4 ++-- 4 files changed, 28 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 2de96a180166..31de4ab93005 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -381,6 +381,17 @@ static inline int kvm_read_guest_lock(struct kvm *kvm, return ret; } +static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa, + const void *data, unsigned long len) +{ + int srcu_idx = srcu_read_lock(&kvm->srcu); + int ret = kvm_write_guest(kvm, gpa, data, len); + + srcu_read_unlock(&kvm->srcu, srcu_idx); + + return ret; +} + static inline void *kvm_get_hyp_vector(void) { switch(read_cpuid_part()) { diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index b0742a16c6c9..ebeefcf835e8 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -445,6 +445,17 @@ static inline int kvm_read_guest_lock(struct kvm *kvm, return ret; } +static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa, + const void *data, unsigned long len) +{ + int srcu_idx = srcu_read_lock(&kvm->srcu); + int ret = kvm_write_guest(kvm, gpa, data, len); + + srcu_read_unlock(&kvm->srcu, srcu_idx); + + return ret; +} + #ifdef CONFIG_KVM_INDIRECT_VECTORS /* * EL2 vectors can be mapped and rerouted in a number of ways, diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index ab3f47745d9c..c41e11fd841c 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c @@ -1919,7 +1919,7 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev, ((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) | ite->collection->collection_id; val = cpu_to_le64(val); - return kvm_write_guest(kvm, gpa, &val, ite_esz); + return kvm_write_guest_lock(kvm, gpa, &val, ite_esz); } /** @@ -2066,7 +2066,7 @@ static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev, (itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) | (dev->num_eventid_bits - 1)); val = cpu_to_le64(val); - return kvm_write_guest(kvm, ptr, &val, dte_esz); + return kvm_write_guest_lock(kvm, ptr, &val, dte_esz); } /** @@ -2246,7 +2246,7 @@ static int vgic_its_save_cte(struct vgic_its *its, ((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) | collection->collection_id); val = cpu_to_le64(val); - return kvm_write_guest(its->dev->kvm, gpa, &val, esz); + return kvm_write_guest_lock(its->dev->kvm, gpa, &val, esz); } static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz) @@ -2317,7 +2317,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its) */ val = 0; BUG_ON(cte_esz > sizeof(val)); - ret = kvm_write_guest(its->dev->kvm, gpa, &val, cte_esz); + ret = kvm_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz); return ret; } diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index 408a78eb6a97..9f87e58dbd4a 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c @@ -358,7 +358,7 @@ retry: if (status) { /* clear consumed data */ val &= ~(1 << bit_nr); - ret = kvm_write_guest(kvm, ptr, &val, 1); + ret = kvm_write_guest_lock(kvm, ptr, &val, 1); if (ret) return ret; } @@ -409,7 +409,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm) else val &= ~(1 << bit_nr); - ret = kvm_write_guest(kvm, ptr, &val, 1); + ret = kvm_write_guest_lock(kvm, ptr, &val, 1); if (ret) return ret; } -- cgit v1.2.3 From 032f85c9360fb1a08385c584c2c4ed114b33c260 Mon Sep 17 00:00:00 2001 From: Marco Felsch Date: Mon, 4 Mar 2019 11:49:40 +0100 Subject: ARM: dts: pfla02: increase phy reset duration Increase the reset duration to ensure correct phy functionality. The reset duration is taken from barebox commit 52fdd510de ("ARM: dts: pfla02: use long enough reset for ethernet phy"): Use a longer reset time for ethernet phy Micrel KSZ9031RNX. Otherwise a small percentage of modules have 'transmission timeouts' errors like barebox@Phytec phyFLEX-i.MX6 Quad Carrier-Board:/ ifup eth0 warning: No MAC address set. Using random address 7e:94:4d:02:f8:f3 eth0: 1000Mbps full duplex link detected eth0: transmission timeout T eth0: transmission timeout T eth0: transmission timeout T eth0: transmission timeout T eth0: transmission timeout Cc: Stefan Christ Cc: Christian Hemp Signed-off-by: Marco Felsch Fixes: 3180f956668e ("ARM: dts: Phytec imx6q pfla02 and pbab01 support") Signed-off-by: Shawn Guo --- arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi index 433bf09a1954..027df06c5dc7 100644 --- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi +++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi @@ -91,6 +91,7 @@ pinctrl-0 = <&pinctrl_enet>; phy-handle = <ðphy>; phy-mode = "rgmii"; + phy-reset-duration = <10>; /* in msecs */ phy-reset-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>; phy-supply = <&vdd_eth_io_reg>; status = "disabled"; -- cgit v1.2.3 From 3c3736cd32bf5197aed1410ae826d2d254a5b277 Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Wed, 20 Mar 2019 14:57:19 +0000 Subject: KVM: arm/arm64: Fix handling of stage2 huge mappings We rely on the mmu_notifier call backs to handle the split/merge of huge pages and thus we are guaranteed that, while creating a block mapping, either the entire block is unmapped at stage2 or it is missing permission. However, we miss a case where the block mapping is split for dirty logging case and then could later be made block mapping, if we cancel the dirty logging. This not only creates inconsistent TLB entries for the pages in the the block, but also leakes the table pages for PMD level. Handle this corner case for the huge mappings at stage2 by unmapping the non-huge mapping for the block. This could potentially release the upper level table. So we need to restart the table walk once we unmap the range. Fixes : ad361f093c1e31d ("KVM: ARM: Support hugetlbfs backed huge pages") Reported-by: Zheng Xiang Cc: Zheng Xiang Cc: Zenghui Yu Cc: Christoffer Dall Signed-off-by: Suzuki K Poulose Signed-off-by: Marc Zyngier --- arch/arm/include/asm/stage2_pgtable.h | 2 ++ virt/kvm/arm/mmu.c | 59 +++++++++++++++++++++++++---------- 2 files changed, 45 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/stage2_pgtable.h b/arch/arm/include/asm/stage2_pgtable.h index de2089501b8b..9e11dce55e06 100644 --- a/arch/arm/include/asm/stage2_pgtable.h +++ b/arch/arm/include/asm/stage2_pgtable.h @@ -75,6 +75,8 @@ static inline bool kvm_stage2_has_pud(struct kvm *kvm) #define S2_PMD_MASK PMD_MASK #define S2_PMD_SIZE PMD_SIZE +#define S2_PUD_MASK PUD_MASK +#define S2_PUD_SIZE PUD_SIZE static inline bool kvm_stage2_has_pmd(struct kvm *kvm) { diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index bcdf978c0d1d..f9da2fad9bd6 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -1067,25 +1067,43 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache { pmd_t *pmd, old_pmd; +retry: pmd = stage2_get_pmd(kvm, cache, addr); VM_BUG_ON(!pmd); old_pmd = *pmd; + /* + * Multiple vcpus faulting on the same PMD entry, can + * lead to them sequentially updating the PMD with the + * same value. Following the break-before-make + * (pmd_clear() followed by tlb_flush()) process can + * hinder forward progress due to refaults generated + * on missing translations. + * + * Skip updating the page table if the entry is + * unchanged. + */ + if (pmd_val(old_pmd) == pmd_val(*new_pmd)) + return 0; + if (pmd_present(old_pmd)) { /* - * Multiple vcpus faulting on the same PMD entry, can - * lead to them sequentially updating the PMD with the - * same value. Following the break-before-make - * (pmd_clear() followed by tlb_flush()) process can - * hinder forward progress due to refaults generated - * on missing translations. + * If we already have PTE level mapping for this block, + * we must unmap it to avoid inconsistent TLB state and + * leaking the table page. We could end up in this situation + * if the memory slot was marked for dirty logging and was + * reverted, leaving PTE level mappings for the pages accessed + * during the period. So, unmap the PTE level mapping for this + * block and retry, as we could have released the upper level + * table in the process. * - * Skip updating the page table if the entry is - * unchanged. + * Normal THP split/merge follows mmu_notifier callbacks and do + * get handled accordingly. */ - if (pmd_val(old_pmd) == pmd_val(*new_pmd)) - return 0; - + if (!pmd_thp_or_huge(old_pmd)) { + unmap_stage2_range(kvm, addr & S2_PMD_MASK, S2_PMD_SIZE); + goto retry; + } /* * Mapping in huge pages should only happen through a * fault. If a page is merged into a transparent huge @@ -1097,8 +1115,7 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache * should become splitting first, unmapped, merged, * and mapped back in on-demand. */ - VM_BUG_ON(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd)); - + WARN_ON_ONCE(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd)); pmd_clear(pmd); kvm_tlb_flush_vmid_ipa(kvm, addr); } else { @@ -1114,6 +1131,7 @@ static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cac { pud_t *pudp, old_pud; +retry: pudp = stage2_get_pud(kvm, cache, addr); VM_BUG_ON(!pudp); @@ -1121,14 +1139,23 @@ static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cac /* * A large number of vcpus faulting on the same stage 2 entry, - * can lead to a refault due to the - * stage2_pud_clear()/tlb_flush(). Skip updating the page - * tables if there is no change. + * can lead to a refault due to the stage2_pud_clear()/tlb_flush(). + * Skip updating the page tables if there is no change. */ if (pud_val(old_pud) == pud_val(*new_pudp)) return 0; if (stage2_pud_present(kvm, old_pud)) { + /* + * If we already have table level mapping for this block, unmap + * the range for this block and retry. + */ + if (!stage2_pud_huge(kvm, old_pud)) { + unmap_stage2_range(kvm, addr & S2_PUD_MASK, S2_PUD_SIZE); + goto retry; + } + + WARN_ON_ONCE(kvm_pud_pfn(old_pud) != kvm_pud_pfn(*new_pudp)); stage2_pud_clear(kvm, pudp); kvm_tlb_flush_vmid_ipa(kvm, addr); } else { -- cgit v1.2.3 From 3123be11683ed8c2f26f787df81966b538ca9f72 Mon Sep 17 00:00:00 2001 From: Nishad Kamdar Date: Mon, 11 Mar 2019 19:57:04 +0530 Subject: ARM: dts: imx6ull: Use the correct style for SPDX License Identifier This patch corrects the SPDX License Identifier style in imx6ull-pinfunc-snvs.h. Changes made by using a script provided by Joe Perches here: https://lkml.org/lkml/2019/2/7/46 and making some manual changes. Suggested-by: Joe Perches Signed-off-by: Nishad Kamdar Signed-off-by: Shawn Guo --- arch/arm/boot/dts/imx6ull-pinfunc-snvs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/imx6ull-pinfunc-snvs.h b/arch/arm/boot/dts/imx6ull-pinfunc-snvs.h index f6fb6783c193..54cfe72295aa 100644 --- a/arch/arm/boot/dts/imx6ull-pinfunc-snvs.h +++ b/arch/arm/boot/dts/imx6ull-pinfunc-snvs.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2016 Freescale Semiconductor, Inc. * Copyright (C) 2017 NXP -- cgit v1.2.3 From 8efd6365417a044db03009724ecc1a9521524913 Mon Sep 17 00:00:00 2001 From: Dinh Nguyen Date: Wed, 13 Mar 2019 17:28:37 -0500 Subject: arm64: dts: stratix10: add the sysmgr-syscon property from the gmac's The gmac ethernet driver uses the "altr,sysmgr-syscon" property to configure phy settings for the gmac controller. Add the "altr,sysmgr-syscon" property to all gmac nodes. This patch fixes: [ 0.917530] socfpga-dwmac ff800000.ethernet: No sysmgr-syscon node found [ 0.924209] socfpga-dwmac ff800000.ethernet: Unable to parse OF data Cc: stable@vger.kernel.org Reported-by: Ley Foon Tan Signed-off-by: Dinh Nguyen --- arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi index 7c649f6b14cb..cd7c76e58b09 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi @@ -162,6 +162,7 @@ rx-fifo-depth = <16384>; snps,multicast-filter-bins = <256>; iommus = <&smmu 1>; + altr,sysmgr-syscon = <&sysmgr 0x44 0>; status = "disabled"; }; @@ -179,6 +180,7 @@ rx-fifo-depth = <16384>; snps,multicast-filter-bins = <256>; iommus = <&smmu 2>; + altr,sysmgr-syscon = <&sysmgr 0x48 0>; status = "disabled"; }; @@ -196,6 +198,7 @@ rx-fifo-depth = <16384>; snps,multicast-filter-bins = <256>; iommus = <&smmu 3>; + altr,sysmgr-syscon = <&sysmgr 0x4c 0>; status = "disabled"; }; -- cgit v1.2.3 From 41b37f4c0fa67185691bcbd30201cad566f2f0d1 Mon Sep 17 00:00:00 2001 From: Masanari Iida Date: Tue, 19 Mar 2019 01:30:09 +0900 Subject: ARM: dts: imx6qdl: Fix typo in imx6qdl-icore-rqs.dtsi This patch fixes a spelling typo. Signed-off-by: Masanari Iida Fixes: cc42603de320 ("ARM: dts: imx6q-icore-rqs: Add Engicam IMX6 Q7 initial support") Signed-off-by: Shawn Guo --- arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi b/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi index 1d1b4bd0670f..a4217f564a53 100644 --- a/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi +++ b/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi @@ -264,7 +264,7 @@ pinctrl-2 = <&pinctrl_usdhc3_200mhz>; vmcc-supply = <®_sd3_vmmc>; cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>; - bus-witdh = <4>; + bus-width = <4>; no-1-8-v; status = "okay"; }; @@ -275,7 +275,7 @@ pinctrl-1 = <&pinctrl_usdhc4_100mhz>; pinctrl-2 = <&pinctrl_usdhc4_200mhz>; vmcc-supply = <®_sd4_vmmc>; - bus-witdh = <8>; + bus-width = <8>; no-1-8-v; non-removable; status = "okay"; -- cgit v1.2.3 From 15b43e497ffd80ca44c00d781869a0e71f223ea5 Mon Sep 17 00:00:00 2001 From: Michal Vokáč Date: Wed, 20 Mar 2019 12:09:05 +0100 Subject: ARM: dts: imx6dl-yapp4: Use correct pseudo PHY address for the switch MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The switch is accessible through pseudo PHY which is located at 0x10. Signed-off-by: Michal Vokáč Fixes: 87489ec3a77f ("ARM: dts: imx: Add Y Soft IOTA Draco, Hydra and Ursa boards") Signed-off-by: Shawn Guo --- arch/arm/boot/dts/imx6dl-yapp4-common.dtsi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi index 091d829f6b05..e8d800fec637 100644 --- a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi +++ b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi @@ -114,9 +114,9 @@ reg = <2>; }; - switch@0 { + switch@10 { compatible = "qca,qca8334"; - reg = <0>; + reg = <10>; switch_ports: ports { #address-cells = <1>; -- cgit v1.2.3 From 728e096dd70889c2e80dd4153feee91afb1daf72 Mon Sep 17 00:00:00 2001 From: Uwe Kleine-König Date: Thu, 10 Jan 2019 21:19:33 +0100 Subject: ARM: imx_v6_v7_defconfig: continue compiling the pwm driver MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit After the pwm-imx driver was split into two drivers and the Kconfig symbol changed accordingly, use the new name to continue being able to use the PWM hardware. Signed-off-by: Uwe Kleine-König Signed-off-by: Shawn Guo --- arch/arm/configs/imx_v6_v7_defconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig index 5586a5074a96..50fb01d70b10 100644 --- a/arch/arm/configs/imx_v6_v7_defconfig +++ b/arch/arm/configs/imx_v6_v7_defconfig @@ -398,7 +398,7 @@ CONFIG_MAG3110=y CONFIG_MPL3115=y CONFIG_PWM=y CONFIG_PWM_FSL_FTM=y -CONFIG_PWM_IMX=y +CONFIG_PWM_IMX27=y CONFIG_NVMEM_IMX_OCOTP=y CONFIG_NVMEM_VF610_OCOTP=y CONFIG_TEE=y -- cgit v1.2.3 From 507aaeeef80d70c46bdf07cda49234b36c2bbdcb Mon Sep 17 00:00:00 2001 From: Uwe Kleine-König Date: Thu, 10 Jan 2019 21:19:34 +0100 Subject: ARM: imx_v4_v5_defconfig: enable PWM driver MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit While there is no mainline board that makes use of the PWM still enable the driver for it to increase compile test coverage. Signed-off-by: Uwe Kleine-König Signed-off-by: Shawn Guo --- arch/arm/configs/imx_v4_v5_defconfig | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig index 8661dd9b064a..b37f8e675e40 100644 --- a/arch/arm/configs/imx_v4_v5_defconfig +++ b/arch/arm/configs/imx_v4_v5_defconfig @@ -170,6 +170,9 @@ CONFIG_IMX_SDMA=y # CONFIG_IOMMU_SUPPORT is not set CONFIG_IIO=y CONFIG_FSL_MX25_ADC=y +CONFIG_PWM=y +CONFIG_PWM_IMX1=y +CONFIG_PWM_IMX27=y CONFIG_EXT4_FS=y # CONFIG_DNOTIFY is not set CONFIG_VFAT_FS=y -- cgit v1.2.3 From 3e2cf62efec52fb49daed437cc486c3cb9a0afa2 Mon Sep 17 00:00:00 2001 From: Janusz Krzysztofik Date: Tue, 19 Mar 2019 21:19:52 +0100 Subject: ARM: OMAP1: ams-delta: Fix broken GPIO ID allocation In order to request dynamic allocationn of GPIO IDs, a negative number should be passed as a base GPIO ID via platform data. Unfortuntely, commit 771e53c4d1a1 ("ARM: OMAP1: ams-delta: Drop board specific global GPIO numbers") didn't follow that rule while switching to dynamically allocated GPIO IDs for Amstrad Delta latches, making their IDs overlapping with those already assigned to OMAP GPIO devices. Fix it. Fixes: 771e53c4d1a1 ("ARM: OMAP1: ams-delta: Drop board specific global GPIO numbers") Signed-off-by: Janusz Krzysztofik Cc: stable@vger.kernel.org Acked-by: Aaro Koskinen Signed-off-by: Tony Lindgren --- arch/arm/mach-omap1/board-ams-delta.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c index be30c3c061b4..1b15d593837e 100644 --- a/arch/arm/mach-omap1/board-ams-delta.c +++ b/arch/arm/mach-omap1/board-ams-delta.c @@ -182,6 +182,7 @@ static struct resource latch1_resources[] = { static struct bgpio_pdata latch1_pdata = { .label = LATCH1_LABEL, + .base = -1, .ngpio = LATCH1_NGPIO, }; @@ -219,6 +220,7 @@ static struct resource latch2_resources[] = { static struct bgpio_pdata latch2_pdata = { .label = LATCH2_LABEL, + .base = -1, .ngpio = LATCH2_NGPIO, }; -- cgit v1.2.3 From 30645307e5d2c8a4caf978558c66121ac91ad17e Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sat, 23 Feb 2019 14:20:42 +0100 Subject: ARM: OMAP2+: add missing of_node_put after of_device_is_available Add an of_node_put when a tested device node is not available. The semantic patch that fixes this problem is as follows (http://coccinelle.lip6.fr): // @@ identifier f; local idexpression e; expression x; @@ e = f(...); ... when != of_node_put(e) when != x = e when != e = x when any if (<+...of_device_is_available(e)...+>) { ... when != of_node_put(e) ( return e; | + of_node_put(e); return ...; ) } // Fixes: e0c827aca0730 ("drm/omap: Populate DSS children in omapdss driver") Signed-off-by: Julia Lawall Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/display.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index 1444b4b4bd9f..439e143cad7b 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c @@ -250,8 +250,10 @@ static int __init omapdss_init_of(void) if (!node) return 0; - if (!of_device_is_available(node)) + if (!of_device_is_available(node)) { + of_node_put(node); return 0; + } pdev = of_find_device_by_node(node); -- cgit v1.2.3 From 4f96dc0a3e79ec257a2b082dab3ee694ff88c317 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Fri, 15 Mar 2019 12:59:09 +0200 Subject: ARM: dts: am335x-evm: Correct the regulators for the audio codec Correctly map the regulators used by tlv320aic3106. Both 1.8V and 3.3V for the codec is derived from VBAT via fixed regulators. Cc: # v4.14+ Signed-off-by: Peter Ujfalusi Signed-off-by: Tony Lindgren --- arch/arm/boot/dts/am335x-evm.dts | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts index dce5be5df97b..edcff79879e7 100644 --- a/arch/arm/boot/dts/am335x-evm.dts +++ b/arch/arm/boot/dts/am335x-evm.dts @@ -57,6 +57,24 @@ enable-active-high; }; + /* TPS79501 */ + v1_8d_reg: fixedregulator-v1_8d { + compatible = "regulator-fixed"; + regulator-name = "v1_8d"; + vin-supply = <&vbat>; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + + /* TPS79501 */ + v3_3d_reg: fixedregulator-v3_3d { + compatible = "regulator-fixed"; + regulator-name = "v3_3d"; + vin-supply = <&vbat>; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + }; + matrix_keypad: matrix_keypad0 { compatible = "gpio-matrix-keypad"; debounce-delay-ms = <5>; @@ -499,10 +517,10 @@ status = "okay"; /* Regulators */ - AVDD-supply = <&vaux2_reg>; - IOVDD-supply = <&vaux2_reg>; - DRVDD-supply = <&vaux2_reg>; - DVDD-supply = <&vbat>; + AVDD-supply = <&v3_3d_reg>; + IOVDD-supply = <&v3_3d_reg>; + DRVDD-supply = <&v3_3d_reg>; + DVDD-supply = <&v1_8d_reg>; }; }; -- cgit v1.2.3 From 6691370646e844be98bb6558c024269791d20bd7 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Fri, 15 Mar 2019 12:59:17 +0200 Subject: ARM: dts: am335x-evmsk: Correct the regulators for the audio codec Correctly map the regulators used by tlv320aic3106. Both 1.8V and 3.3V for the codec is derived from VBAT via fixed regulators. Cc: # v4.14+ Signed-off-by: Peter Ujfalusi Signed-off-by: Tony Lindgren --- arch/arm/boot/dts/am335x-evmsk.dts | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts index b128998097ce..2c2d8b5b8cf5 100644 --- a/arch/arm/boot/dts/am335x-evmsk.dts +++ b/arch/arm/boot/dts/am335x-evmsk.dts @@ -73,6 +73,24 @@ enable-active-high; }; + /* TPS79518 */ + v1_8d_reg: fixedregulator-v1_8d { + compatible = "regulator-fixed"; + regulator-name = "v1_8d"; + vin-supply = <&vbat>; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + + /* TPS78633 */ + v3_3d_reg: fixedregulator-v3_3d { + compatible = "regulator-fixed"; + regulator-name = "v3_3d"; + vin-supply = <&vbat>; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + }; + leds { pinctrl-names = "default"; pinctrl-0 = <&user_leds_s0>; @@ -501,10 +519,10 @@ status = "okay"; /* Regulators */ - AVDD-supply = <&vaux2_reg>; - IOVDD-supply = <&vaux2_reg>; - DRVDD-supply = <&vaux2_reg>; - DVDD-supply = <&vbat>; + AVDD-supply = <&v3_3d_reg>; + IOVDD-supply = <&v3_3d_reg>; + DRVDD-supply = <&v3_3d_reg>; + DVDD-supply = <&v1_8d_reg>; }; }; -- cgit v1.2.3 From 7f2daa96759b0700ad28579133aa91bc663632a7 Mon Sep 17 00:00:00 2001 From: Peng Hao Date: Sun, 10 Mar 2019 01:29:44 +0800 Subject: x86/resctrl: Remove unused variable Variable "struct rdt_resource *r" is set but not used. So remove it. Signed-off-by: Peng Hao Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/1552152584-26087-1-git-send-email-peng.hao2@zte.com.cn --- arch/x86/kernel/cpu/resctrl/monitor.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index f33f11f69078..1573a0a6b525 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -501,11 +501,8 @@ out_unlock: void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms) { unsigned long delay = msecs_to_jiffies(delay_ms); - struct rdt_resource *r; int cpu; - r = &rdt_resources_all[RDT_RESOURCE_L3]; - cpu = cpumask_any(&dom->cpu_mask); dom->cqm_work_cpu = cpu; -- cgit v1.2.3 From d040e4e8deeaa8257d6aa260e29ad69832b5d630 Mon Sep 17 00:00:00 2001 From: Douglas Anderson Date: Wed, 20 Mar 2019 13:14:00 -0700 Subject: ARM: dts: rockchip: Fix gpu opp node names for rk3288 The device tree compiler yells like this: Warning (unit_address_vs_reg): /gpu-opp-table/opp@100000000: node has a unit name, but no reg property Let's match the cpu opp node names and use a dash. Signed-off-by: Douglas Anderson Reviewed-by: Matthias Kaehlcke Signed-off-by: Heiko Stuebner --- arch/arm/boot/dts/rk3288.dtsi | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi index 09868dcee34b..df0c5456c94f 100644 --- a/arch/arm/boot/dts/rk3288.dtsi +++ b/arch/arm/boot/dts/rk3288.dtsi @@ -1282,27 +1282,27 @@ gpu_opp_table: gpu-opp-table { compatible = "operating-points-v2"; - opp@100000000 { + opp-100000000 { opp-hz = /bits/ 64 <100000000>; opp-microvolt = <950000>; }; - opp@200000000 { + opp-200000000 { opp-hz = /bits/ 64 <200000000>; opp-microvolt = <950000>; }; - opp@300000000 { + opp-300000000 { opp-hz = /bits/ 64 <300000000>; opp-microvolt = <1000000>; }; - opp@400000000 { + opp-400000000 { opp-hz = /bits/ 64 <400000000>; opp-microvolt = <1100000>; }; - opp@500000000 { + opp-500000000 { opp-hz = /bits/ 64 <500000000>; opp-microvolt = <1200000>; }; - opp@600000000 { + opp-600000000 { opp-hz = /bits/ 64 <600000000>; opp-microvolt = <1250000>; }; -- cgit v1.2.3 From 282e2e078ba5338c72150477b743794bc7523917 Mon Sep 17 00:00:00 2001 From: Douglas Anderson Date: Wed, 20 Mar 2019 13:14:01 -0700 Subject: ARM: dts: rockchip: Remove #address/#size-cells from rk3288 mipi_dsi They are pointless. As dtc points out: Warning (avoid_unnecessary_addr_size): /mipi@ff960000: unnecessary #address-cells/#size-cells without "ranges" or child "reg" property Let's remove them. Signed-off-by: Douglas Anderson Reviewed-by: Matthias Kaehlcke Signed-off-by: Heiko Stuebner --- arch/arm/boot/dts/rk3288.dtsi | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi index df0c5456c94f..a024d1e7e74c 100644 --- a/arch/arm/boot/dts/rk3288.dtsi +++ b/arch/arm/boot/dts/rk3288.dtsi @@ -1119,8 +1119,6 @@ clock-names = "ref", "pclk"; power-domains = <&power RK3288_PD_VIO>; rockchip,grf = <&grf>; - #address-cells = <1>; - #size-cells = <0>; status = "disabled"; ports { -- cgit v1.2.3 From 1a96665143c355b1019ed13b927266185d2a1e4f Mon Sep 17 00:00:00 2001 From: Douglas Anderson Date: Wed, 20 Mar 2019 13:14:02 -0700 Subject: ARM: dts: rockchip: Remove #address/#size-cells from rk3288-veyron gpio-keys They are pointless. As dtc points out: Warning (avoid_unnecessary_addr_size): /gpio-keys: unnecessary #address-cells/#size-cells without "ranges" or child "reg" property Let's remove them. Signed-off-by: Douglas Anderson Reviewed-by: Matthias Kaehlcke Signed-off-by: Heiko Stuebner --- arch/arm/boot/dts/rk3288-veyron.dtsi | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/rk3288-veyron.dtsi b/arch/arm/boot/dts/rk3288-veyron.dtsi index 0bc2409f6903..192dbc089ade 100644 --- a/arch/arm/boot/dts/rk3288-veyron.dtsi +++ b/arch/arm/boot/dts/rk3288-veyron.dtsi @@ -25,8 +25,6 @@ gpio_keys: gpio-keys { compatible = "gpio-keys"; - #address-cells = <1>; - #size-cells = <0>; pinctrl-names = "default"; pinctrl-0 = <&pwr_key_l>; -- cgit v1.2.3 From d9470757398a700d9450a43508000bcfd010c7a4 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 22 Mar 2019 23:37:24 +1100 Subject: powerpc/64: Fix memcmp reading past the end of src/dest Chandan reported that fstests' generic/026 test hit a crash: BUG: Unable to handle kernel data access at 0xc00000062ac40000 Faulting instruction address: 0xc000000000092240 Oops: Kernel access of bad area, sig: 11 [#1] LE SMP NR_CPUS=2048 DEBUG_PAGEALLOC NUMA pSeries CPU: 0 PID: 27828 Comm: chacl Not tainted 5.0.0-rc2-next-20190115-00001-g6de6dba64dda #1 NIP: c000000000092240 LR: c00000000066a55c CTR: 0000000000000000 REGS: c00000062c0c3430 TRAP: 0300 Not tainted (5.0.0-rc2-next-20190115-00001-g6de6dba64dda) MSR: 8000000002009033 CR: 44000842 XER: 20000000 CFAR: 00007fff7f3108ac DAR: c00000062ac40000 DSISR: 40000000 IRQMASK: 0 GPR00: 0000000000000000 c00000062c0c36c0 c0000000017f4c00 c00000000121a660 GPR04: c00000062ac3fff9 0000000000000004 0000000000000020 00000000275b19c4 GPR08: 000000000000000c 46494c4500000000 5347495f41434c5f c0000000026073a0 GPR12: 0000000000000000 c0000000027a0000 0000000000000000 0000000000000000 GPR16: 0000000000000000 0000000000000000 0000000000000000 0000000000000000 GPR20: c00000062ea70020 c00000062c0c38d0 0000000000000002 0000000000000002 GPR24: c00000062ac3ffe8 00000000275b19c4 0000000000000001 c00000062ac30000 GPR28: c00000062c0c38d0 c00000062ac30050 c00000062ac30058 0000000000000000 NIP memcmp+0x120/0x690 LR xfs_attr3_leaf_lookup_int+0x53c/0x5b0 Call Trace: xfs_attr3_leaf_lookup_int+0x78/0x5b0 (unreliable) xfs_da3_node_lookup_int+0x32c/0x5a0 xfs_attr_node_addname+0x170/0x6b0 xfs_attr_set+0x2ac/0x340 __xfs_set_acl+0xf0/0x230 xfs_set_acl+0xd0/0x160 set_posix_acl+0xc0/0x130 posix_acl_xattr_set+0x68/0x110 __vfs_setxattr+0xa4/0x110 __vfs_setxattr_noperm+0xac/0x240 vfs_setxattr+0x128/0x130 setxattr+0x248/0x600 path_setxattr+0x108/0x120 sys_setxattr+0x28/0x40 system_call+0x5c/0x70 Instruction dump: 7d201c28 7d402428 7c295040 38630008 38840008 408201f0 4200ffe8 2c050000 4182ff6c 20c50008 54c61838 7d201c28 <7d402428> 7d293436 7d4a3436 7c295040 The instruction dump decodes as: subfic r6,r5,8 rlwinm r6,r6,3,0,28 ldbrx r9,0,r3 ldbrx r10,0,r4 <- Which shows us doing an 8 byte load from c00000062ac3fff9, which crosses the page boundary at c00000062ac40000 and faults. It's not OK for memcmp to read past the end of the source or destination buffers if that would cross a page boundary, because we don't know that the next page is mapped. As pointed out by Segher, we can read past the end of the source or destination as long as we don't cross a 4K boundary, because that's our minimum page size on all platforms. The bug is in the code at the .Lcmp_rest_lt8bytes label. When we get there we know that s1 is 8-byte aligned and we have at least 1 byte to read, so a single 8-byte load won't read past the end of s1 and cross a page boundary. But we have to be more careful with s2. So check if it's within 8 bytes of a 4K boundary and if so go to the byte-by-byte loop. Fixes: 2d9ee327adce ("powerpc/64: Align bytes before fall back to .Lshort in powerpc64 memcmp()") Cc: stable@vger.kernel.org # v4.19+ Reported-by: Chandan Rajendra Signed-off-by: Michael Ellerman Reviewed-by: Segher Boessenkool Tested-by: Chandan Rajendra Signed-off-by: Michael Ellerman --- arch/powerpc/lib/memcmp_64.S | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/lib/memcmp_64.S b/arch/powerpc/lib/memcmp_64.S index 844d8e774492..b7f6f6e0b6e8 100644 --- a/arch/powerpc/lib/memcmp_64.S +++ b/arch/powerpc/lib/memcmp_64.S @@ -215,11 +215,20 @@ _GLOBAL_TOC(memcmp) beq .Lzero .Lcmp_rest_lt8bytes: - /* Here we have only less than 8 bytes to compare with. at least s1 - * Address is aligned with 8 bytes. - * The next double words are load and shift right with appropriate - * bits. + /* + * Here we have less than 8 bytes to compare. At least s1 is aligned to + * 8 bytes, but s2 may not be. We must make sure s2 + 7 doesn't cross a + * page boundary, otherwise we might read past the end of the buffer and + * trigger a page fault. We use 4K as the conservative minimum page + * size. If we detect that case we go to the byte-by-byte loop. + * + * Otherwise the next double word is loaded from s1 and s2, and shifted + * right to compare the appropriate bits. */ + clrldi r6,r4,(64-12) // r6 = r4 & 0xfff + cmpdi r6,0xff8 + bgt .Lshort + subfic r6,r5,8 slwi r6,r6,3 LD rA,0,r3 -- cgit v1.2.3 From 2dbed152e2d4c3fe2442284918d14797898b1e8a Mon Sep 17 00:00:00 2001 From: Sekhar Nori Date: Wed, 20 Feb 2019 16:36:52 +0530 Subject: ARM: davinci: fix build failure with allnoconfig allnoconfig build with just ARCH_DAVINCI enabled fails because drivers/clk/davinci/* depends on REGMAP being enabled. Fix it by selecting REGMAP_MMIO when building in DaVinci support. Signed-off-by: Sekhar Nori Reviewed-by: David Lechner Signed-off-by: Arnd Bergmann --- arch/arm/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 054ead960f98..850b4805e2d1 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -596,6 +596,7 @@ config ARCH_DAVINCI select HAVE_IDE select PM_GENERIC_DOMAINS if PM select PM_GENERIC_DOMAINS_OF if PM && OF + select REGMAP_MMIO select RESET_CONTROLLER select SPARSE_IRQ select USE_OF -- cgit v1.2.3 From fa9463564e77067df81b0b8dec91adbbbc47bfb4 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Mon, 18 Mar 2019 16:31:22 +0100 Subject: ARM: dts: nomadik: Fix polarity of SPI CS The SPI DT bindings are for historical reasons a pitfall, the ability to flag a GPIO line as active high/low with the second cell flags was introduced later so the SPI subsystem will only accept the bool flag spi-cs-high to indicate that the line is active high. It worked by mistake, but the mistake was corrected in another commit. The comment in the DTS file was also misleading: this CS is indeed active high. Fixes: cffbb02dafa3 ("ARM: dts: nomadik: Augment NHK15 panel setting") Signed-off-by: Linus Walleij Signed-off-by: Arnd Bergmann --- arch/arm/boot/dts/ste-nomadik-nhk15.dts | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/ste-nomadik-nhk15.dts b/arch/arm/boot/dts/ste-nomadik-nhk15.dts index 04066f9cb8a3..f2f6558a00f1 100644 --- a/arch/arm/boot/dts/ste-nomadik-nhk15.dts +++ b/arch/arm/boot/dts/ste-nomadik-nhk15.dts @@ -213,12 +213,13 @@ gpio-sck = <&gpio0 5 GPIO_ACTIVE_HIGH>; gpio-mosi = <&gpio0 4 GPIO_ACTIVE_HIGH>; /* - * It's not actually active high, but the frameworks assume - * the polarity of the passed-in GPIO is "normal" (active - * high) then actively drives the line low to select the - * chip. + * This chipselect is active high. Just setting the flags + * to GPIO_ACTIVE_HIGH is not enough for the SPI DT bindings, + * it will be ignored, only the special "spi-cs-high" flag + * really counts. */ cs-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>; + spi-cs-high; num-chipselects = <1>; /* -- cgit v1.2.3 From 93958742192e7956d05989836ada9071f9ffe42e Mon Sep 17 00:00:00 2001 From: Jonathan Hunter Date: Mon, 25 Mar 2019 12:28:07 +0100 Subject: arm64: tegra: Disable CQE Support for SDMMC4 on Tegra186 Enabling CQE support on Tegra186 Jetson TX2 has introduced a regression that is causing accesses to the file-system on the eMMC to fail. Errors such as the following have been observed ... mmc2: running CQE recovery mmc2: mmc_select_hs400 failed, error -110 print_req_error: I/O error, dev mmcblk2, sector 8 flags 80700 mmc2: cqhci: CQE failed to exit halt state For now disable CQE support for Tegra186 until this issue is resolved. Fixes: dfd3cb6feb73 arm64: tegra: Add CQE Support for SDMMC4 Signed-off-by: Jonathan Hunter Signed-off-by: Thierry Reding Signed-off-by: Arnd Bergmann --- arch/arm64/boot/dts/nvidia/tegra186.dtsi | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/boot/dts/nvidia/tegra186.dtsi b/arch/arm64/boot/dts/nvidia/tegra186.dtsi index bb2045be8814..97aeb946ed5e 100644 --- a/arch/arm64/boot/dts/nvidia/tegra186.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra186.dtsi @@ -321,7 +321,6 @@ nvidia,default-trim = <0x9>; nvidia,dqs-trim = <63>; mmc-hs400-1_8v; - supports-cqe; status = "disabled"; }; -- cgit v1.2.3 From 7d56bedb2730dc2ea8abf0fd7240ee99ecfee3c9 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Tue, 26 Mar 2019 10:32:23 -0700 Subject: ARM: dts: Fix dcan clkctrl clock for am3 We must not use legacy clock defines for dts clckctrl clocks as the offsets will be wrong. Fixes: 87fc89ced3a7 ("ARM: dts: am335x: Move l4 child devices to probe them with ti-sysc") Cc: Tero Kristo Signed-off-by: Tony Lindgren --- arch/arm/boot/dts/am33xx-l4.dtsi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi index f459ec316a22..ca6d9f02a800 100644 --- a/arch/arm/boot/dts/am33xx-l4.dtsi +++ b/arch/arm/boot/dts/am33xx-l4.dtsi @@ -1762,7 +1762,7 @@ reg = <0xcc000 0x4>; reg-names = "rev"; /* Domains (P, C): per_pwrdm, l4ls_clkdm */ - clocks = <&l4ls_clkctrl AM3_D_CAN0_CLKCTRL 0>; + clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN0_CLKCTRL 0>; clock-names = "fck"; #address-cells = <1>; #size-cells = <1>; @@ -1785,7 +1785,7 @@ reg = <0xd0000 0x4>; reg-names = "rev"; /* Domains (P, C): per_pwrdm, l4ls_clkdm */ - clocks = <&l4ls_clkctrl AM3_D_CAN1_CLKCTRL 0>; + clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN1_CLKCTRL 0>; clock-names = "fck"; #address-cells = <1>; #size-cells = <1>; -- cgit v1.2.3 From ce9afe08e71e3f7d64f337a6e932e50849230fc2 Mon Sep 17 00:00:00 2001 From: "Gautham R. Shenoy" Date: Fri, 8 Mar 2019 21:03:24 +0530 Subject: powerpc/pseries/energy: Use OF accessor functions to read ibm,drc-indexes In cpu_to_drc_index() in the case when FW_FEATURE_DRC_INFO is absent, we currently use of_read_property() to obtain the pointer to the array corresponding to the property "ibm,drc-indexes". The elements of this array are of type __be32, but are accessed without any conversion to the OS-endianness, which is buggy on a Little Endian OS. Fix this by using of_property_read_u32_index() accessor function to safely read the elements of the array. Fixes: e83636ac3334 ("pseries/drc-info: Search DRC properties for CPU indexes") Cc: stable@vger.kernel.org # v4.16+ Reported-by: Pavithra R. Prakash Signed-off-by: Gautham R. Shenoy Reviewed-by: Vaidyanathan Srinivasan [mpe: Make the WARN_ON a WARN_ON_ONCE so it's not retriggerable] Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/pseries/pseries_energy.c | 27 ++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/pseries/pseries_energy.c b/arch/powerpc/platforms/pseries/pseries_energy.c index 6ed22127391b..921f12182f3e 100644 --- a/arch/powerpc/platforms/pseries/pseries_energy.c +++ b/arch/powerpc/platforms/pseries/pseries_energy.c @@ -77,18 +77,27 @@ static u32 cpu_to_drc_index(int cpu) ret = drc.drc_index_start + (thread_index * drc.sequential_inc); } else { - const __be32 *indexes; - - indexes = of_get_property(dn, "ibm,drc-indexes", NULL); - if (indexes == NULL) - goto err_of_node_put; + u32 nr_drc_indexes, thread_drc_index; /* - * The first element indexes[0] is the number of drc_indexes - * returned in the list. Hence thread_index+1 will get the - * drc_index corresponding to core number thread_index. + * The first element of ibm,drc-indexes array is the + * number of drc_indexes returned in the list. Hence + * thread_index+1 will get the drc_index corresponding + * to core number thread_index. */ - ret = indexes[thread_index + 1]; + rc = of_property_read_u32_index(dn, "ibm,drc-indexes", + 0, &nr_drc_indexes); + if (rc) + goto err_of_node_put; + + WARN_ON_ONCE(thread_index > nr_drc_indexes); + rc = of_property_read_u32_index(dn, "ibm,drc-indexes", + thread_index + 1, + &thread_drc_index); + if (rc) + goto err_of_node_put; + + ret = thread_drc_index; } rc = 0; -- cgit v1.2.3 From dbee9c9c45846f003ec2f819710c2f4835630a6a Mon Sep 17 00:00:00 2001 From: Alan Kao Date: Fri, 22 Mar 2019 14:37:04 +0800 Subject: riscv: fix accessing 8-byte variable from RV32 A memory save operation to 8-byte variable in RV32 is divided into two sw instructions in the put_user macro. The current fixup returns execution flow to the second sw instead of the one after it. This patch fixes this fixup code according to the load access part. Signed-off-by: Alan Kao Cc: Greentime Hu Cc: Vincent Chen Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/uaccess.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h index a00168b980d2..fb53a8089e76 100644 --- a/arch/riscv/include/asm/uaccess.h +++ b/arch/riscv/include/asm/uaccess.h @@ -300,7 +300,7 @@ do { \ " .balign 4\n" \ "4:\n" \ " li %0, %6\n" \ - " jump 2b, %1\n" \ + " jump 3b, %1\n" \ " .previous\n" \ " .section __ex_table,\"a\"\n" \ " .balign " RISCV_SZPTR "\n" \ -- cgit v1.2.3 From 387181dcdb6c1ee254efab4744846a7a53d4c4cb Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Tue, 26 Mar 2019 08:03:47 +0000 Subject: RISC-V: Always compile mm/init.c with cmodel=medany and notrace The Linux RISC-V 32bit kernel is broken after we moved setup_vm() from kernel/setup.c to mm/init.c because Linux RISC-V 32bit kernel by default uses cmodel=medlow which results in a non-position-independent setup_vm(). This patch fixes Linux RISC-V 32bit kernel booting by: 1. Forcing cmodel=medany for mm/init.c 2. Moving remaing MM-related stuff va_pa_offset, pfn_base and empty_zero_page from kernel/setup.c to mm/init.c Further, the setup_vm() cannot handle GCC instrumentation for FTRACE so we disable it for mm/init.c by not using "-pg" compiler flag. Fixes: 6f1e9e946f0b ("RISC-V: Move setup_vm() to mm/init.c") Suggested-by: Christoph Hellwig Suggested-by: Mike Rapoport Signed-off-by: Anup Patel Reviewed-by: Mike Rapoport Reviewed-by: Christoph Hellwig Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/Makefile | 3 --- arch/riscv/kernel/setup.c | 8 -------- arch/riscv/mm/Makefile | 6 ++++++ arch/riscv/mm/init.c | 28 ++++++++++++++++++++++++++++ 4 files changed, 34 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index f13f7f276639..598568168d35 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -4,7 +4,6 @@ ifdef CONFIG_FTRACE CFLAGS_REMOVE_ftrace.o = -pg -CFLAGS_REMOVE_setup.o = -pg endif extra-y += head.o @@ -29,8 +28,6 @@ obj-y += vdso.o obj-y += cacheinfo.o obj-y += vdso/ -CFLAGS_setup.o := -mcmodel=medany - obj-$(CONFIG_FPU) += fpu.o obj-$(CONFIG_SMP) += smpboot.o obj-$(CONFIG_SMP) += smp.o diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index ecb654f6a79e..540a331d1376 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -48,14 +48,6 @@ struct screen_info screen_info = { }; #endif -unsigned long va_pa_offset; -EXPORT_SYMBOL(va_pa_offset); -unsigned long pfn_base; -EXPORT_SYMBOL(pfn_base); - -unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; -EXPORT_SYMBOL(empty_zero_page); - /* The lucky hart to first increment this variable will boot the other cores */ atomic_t hart_lottery; unsigned long boot_cpu_hartid; diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile index eb22ab49b3e0..b68aac701803 100644 --- a/arch/riscv/mm/Makefile +++ b/arch/riscv/mm/Makefile @@ -1,3 +1,9 @@ + +CFLAGS_init.o := -mcmodel=medany +ifdef CONFIG_FTRACE +CFLAGS_REMOVE_init.o = -pg +endif + obj-y += init.o obj-y += fault.o obj-y += extable.o diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index b379a75ac6a6..5fd8c922e1c2 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -25,6 +25,10 @@ #include #include +unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] + __page_aligned_bss; +EXPORT_SYMBOL(empty_zero_page); + static void __init zone_sizes_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, }; @@ -143,6 +147,11 @@ void __init setup_bootmem(void) } } +unsigned long va_pa_offset; +EXPORT_SYMBOL(va_pa_offset); +unsigned long pfn_base; +EXPORT_SYMBOL(pfn_base); + pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; pgd_t trampoline_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); @@ -172,6 +181,25 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) } } +/* + * setup_vm() is called from head.S with MMU-off. + * + * Following requirements should be honoured for setup_vm() to work + * correctly: + * 1) It should use PC-relative addressing for accessing kernel symbols. + * To achieve this we always use GCC cmodel=medany. + * 2) The compiler instrumentation for FTRACE will not work for setup_vm() + * so disable compiler instrumentation when FTRACE is enabled. + * + * Currently, the above requirements are honoured by using custom CFLAGS + * for init.o in mm/Makefile. + */ + +#ifndef __riscv_cmodel_medany +#error "setup_vm() is called from head.S before relocate so it should " + "not use absolute addressing." +#endif + asmlinkage void __init setup_vm(void) { extern char _start; -- cgit v1.2.3 From 0f02daed4e089c7a380a0ffdc9d93a5989043cf4 Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Mon, 4 Mar 2019 13:55:46 +0800 Subject: x86/boot: Fix incorrect ifdeffery scope The declarations related to immovable memory handling are out of the BOOT_COMPRESSED_MISC_H #ifdef scope, wrap them inside. Signed-off-by: Baoquan He Signed-off-by: Borislav Petkov Cc: Chao Fan Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Juergen Gross Cc: "Kirill A. Shutemov" Cc: Thomas Gleixner Cc: Tom Lendacky Cc: x86-ml Link: https://lkml.kernel.org/r/20190304055546.18566-1-bhe@redhat.com --- arch/x86/boot/compressed/misc.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index fd13655e0f9b..d2f184165934 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h @@ -120,8 +120,6 @@ static inline void console_init(void) void set_sev_encryption_mask(void); -#endif - /* acpi.c */ #ifdef CONFIG_ACPI acpi_physical_address get_rsdp_addr(void); @@ -135,3 +133,5 @@ int count_immovable_mem_regions(void); #else static inline int count_immovable_mem_regions(void) { return 0; } #endif + +#endif /* BOOT_COMPRESSED_MISC_H */ -- cgit v1.2.3 From b929a500d68479163c48739d809cbf4c1335db6f Mon Sep 17 00:00:00 2001 From: Matteo Croce Date: Tue, 26 Mar 2019 21:30:46 +0100 Subject: x86/realmode: Don't leak the trampoline kernel address Since commit ad67b74d2469 ("printk: hash addresses printed with %p") at boot "____ptrval____" is printed instead of the trampoline addresses: Base memory trampoline at [(____ptrval____)] 99000 size 24576 Remove the print as we don't want to leak kernel addresses and this statement is not needed anymore. Fixes: ad67b74d2469d9b8 ("printk: hash addresses printed with %p") Signed-off-by: Matteo Croce Signed-off-by: Borislav Petkov Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Thomas Gleixner Cc: x86-ml Link: https://lkml.kernel.org/r/20190326203046.20787-1-mcroce@redhat.com --- arch/x86/realmode/init.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c index d10105825d57..47d097946872 100644 --- a/arch/x86/realmode/init.c +++ b/arch/x86/realmode/init.c @@ -20,8 +20,6 @@ void __init set_real_mode_mem(phys_addr_t mem, size_t size) void *base = __va(mem); real_mode_header = (struct real_mode_header *) base; - printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n", - base, (unsigned long long)mem, size); } void __init reserve_real_mode(void) -- cgit v1.2.3 From 9e0a17db517d83d568bb7fa983b54759d4e34f1f Mon Sep 17 00:00:00 2001 From: Chen Zhou Date: Wed, 27 Mar 2019 21:51:16 +0800 Subject: arm64: replace memblock_alloc_low with memblock_alloc If we use "crashkernel=Y[@X]" and the start address is above 4G, the arm64 kdump capture kernel may call memblock_alloc_low() failure in request_standard_resources(). Replacing memblock_alloc_low() with memblock_alloc(). [ 0.000000] MEMBLOCK configuration: [ 0.000000] memory size = 0x0000000040650000 reserved size = 0x0000000004db7f39 [ 0.000000] memory.cnt = 0x6 [ 0.000000] memory[0x0] [0x00000000395f0000-0x000000003968ffff], 0x00000000000a0000 bytes on node 0 flags: 0x4 [ 0.000000] memory[0x1] [0x0000000039730000-0x000000003973ffff], 0x0000000000010000 bytes on node 0 flags: 0x4 [ 0.000000] memory[0x2] [0x0000000039780000-0x000000003986ffff], 0x00000000000f0000 bytes on node 0 flags: 0x4 [ 0.000000] memory[0x3] [0x0000000039890000-0x0000000039d0ffff], 0x0000000000480000 bytes on node 0 flags: 0x4 [ 0.000000] memory[0x4] [0x000000003ed00000-0x000000003ed2ffff], 0x0000000000030000 bytes on node 0 flags: 0x4 [ 0.000000] memory[0x5] [0x0000002040000000-0x000000207fffffff], 0x0000000040000000 bytes on node 0 flags: 0x0 [ 0.000000] reserved.cnt = 0x7 [ 0.000000] reserved[0x0] [0x0000002040080000-0x0000002041c4dfff], 0x0000000001bce000 bytes flags: 0x0 [ 0.000000] reserved[0x1] [0x0000002041c53000-0x0000002042c203f8], 0x0000000000fcd3f9 bytes flags: 0x0 [ 0.000000] reserved[0x2] [0x000000207da00000-0x000000207dbfffff], 0x0000000000200000 bytes flags: 0x0 [ 0.000000] reserved[0x3] [0x000000207ddef000-0x000000207fbfffff], 0x0000000001e11000 bytes flags: 0x0 [ 0.000000] reserved[0x4] [0x000000207fdf2b00-0x000000207fdfc03f], 0x0000000000009540 bytes flags: 0x0 [ 0.000000] reserved[0x5] [0x000000207fdfd000-0x000000207ffff3ff], 0x0000000000202400 bytes flags: 0x0 [ 0.000000] reserved[0x6] [0x000000207ffffe00-0x000000207fffffff], 0x0000000000000200 bytes flags: 0x0 [ 0.000000] Kernel panic - not syncing: request_standard_resources: Failed to allocate 384 bytes [ 0.000000] CPU: 0 PID: 0 Comm: swapper Not tainted 5.1.0-next-20190321+ #4 [ 0.000000] Call trace: [ 0.000000] dump_backtrace+0x0/0x188 [ 0.000000] show_stack+0x24/0x30 [ 0.000000] dump_stack+0xa8/0xcc [ 0.000000] panic+0x14c/0x31c [ 0.000000] setup_arch+0x2b0/0x5e0 [ 0.000000] start_kernel+0x90/0x52c [ 0.000000] ---[ end Kernel panic - not syncing: request_standard_resources: Failed to allocate 384 bytes ]--- Link: https://www.spinics.net/lists/arm-kernel/msg715293.html Signed-off-by: Chen Zhou Signed-off-by: Catalin Marinas --- arch/arm64/kernel/setup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index f8482fe5a190..413d566405d1 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -217,7 +217,7 @@ static void __init request_standard_resources(void) num_standard_resources = memblock.memory.cnt; res_size = num_standard_resources * sizeof(*standard_resources); - standard_resources = memblock_alloc_low(res_size, SMP_CACHE_BYTES); + standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES); if (!standard_resources) panic("%s: Failed to allocate %zu bytes\n", __func__, res_size); -- cgit v1.2.3 From b6ffdf27f3d4f1e9af56effe6f86989170d71e95 Mon Sep 17 00:00:00 2001 From: Thomas Richter Date: Mon, 18 Mar 2019 15:50:27 +0100 Subject: s390/cpumf: Fix warning from check_processor_id Function __hw_perf_event_init() used a CPU variable without ensuring CPU preemption has been disabled. This caused the following warning in the kernel log: [ 7.277085] BUG: using smp_processor_id() in preemptible [00000000] code: cf-csdiag/1892 [ 7.277111] caller is cf_diag_event_init+0x13a/0x338 [ 7.277122] CPU: 10 PID: 1892 Comm: cf-csdiag Not tainted 5.0.0-20190318.rc0.git0.9e1a11e0f602.300.fc29.s390x+debug #1 [ 7.277131] Hardware name: IBM 2964 NC9 712 (LPAR) [ 7.277139] Call Trace: [ 7.277150] ([<000000000011385a>] show_stack+0x82/0xd0) [ 7.277161] [<0000000000b7a71a>] dump_stack+0x92/0xd0 [ 7.277174] [<00000000007b7e9c>] check_preemption_disabled+0xe4/0x100 [ 7.277183] [<00000000001228aa>] cf_diag_event_init+0x13a/0x338 [ 7.277195] [<00000000002cf3aa>] perf_try_init_event+0x72/0xf0 [ 7.277204] [<00000000002d0bba>] perf_event_alloc+0x6fa/0xce0 [ 7.277214] [<00000000002dc4a8>] __s390x_sys_perf_event_open+0x398/0xd50 [ 7.277224] [<0000000000b9e8f0>] system_call+0xdc/0x2d8 [ 7.277233] 2 locks held by cf-csdiag/1892: [ 7.277241] #0: 00000000976f5510 (&sig->cred_guard_mutex){+.+.}, at: __s390x_sys_perf_event_open+0xd2e/0xd50 [ 7.277257] #1: 00000000363b11bd (&pmus_srcu){....}, at: perf_event_alloc+0x52e/0xce0 The variable is now accessed in proper context. Use get_cpu_var()/put_cpu_var() pair to disable preemption during access. As the hardware authorization settings apply to all CPUs, it does not matter which CPU is used to check the authorization setting. Remove the event->count assignment. It is not needed as function perf_event_alloc() allocates memory for the event with kzalloc() and thus count is already set to zero. Fixes: fe5908bccc56 ("s390/cpum_cf_diag: Add support for s390 counter facility diagnostic trace") Signed-off-by: Thomas Richter Reviewed-by: Hendrik Brueckner Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/perf_cpum_cf_diag.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/perf_cpum_cf_diag.c b/arch/s390/kernel/perf_cpum_cf_diag.c index c6fad208c2fa..b6854812d2ed 100644 --- a/arch/s390/kernel/perf_cpum_cf_diag.c +++ b/arch/s390/kernel/perf_cpum_cf_diag.c @@ -196,23 +196,30 @@ static void cf_diag_perf_event_destroy(struct perf_event *event) */ static int __hw_perf_event_init(struct perf_event *event) { - struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); struct perf_event_attr *attr = &event->attr; + struct cpu_cf_events *cpuhw; enum cpumf_ctr_set i; int err = 0; - debug_sprintf_event(cf_diag_dbg, 5, - "%s event %p cpu %d authorized %#x\n", __func__, - event, event->cpu, cpuhw->info.auth_ctl); + debug_sprintf_event(cf_diag_dbg, 5, "%s event %p cpu %d\n", __func__, + event, event->cpu); event->hw.config = attr->config; event->hw.config_base = 0; - local64_set(&event->count, 0); - /* Add all authorized counter sets to config_base */ + /* Add all authorized counter sets to config_base. The + * the hardware init function is either called per-cpu or just once + * for all CPUS (event->cpu == -1). This depends on the whether + * counting is started for all CPUs or on a per workload base where + * the perf event moves from one CPU to another CPU. + * Checking the authorization on any CPU is fine as the hardware + * applies the same authorization settings to all CPUs. + */ + cpuhw = &get_cpu_var(cpu_cf_events); for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i]) event->hw.config_base |= cpumf_ctr_ctl[i]; + put_cpu_var(cpu_cf_events); /* No authorized counter sets, nothing to count/sample */ if (!event->hw.config_base) { -- cgit v1.2.3 From e7dfb6d04e4715be1f3eb2c60d97b753fd2e4516 Mon Sep 17 00:00:00 2001 From: David Engraf Date: Mon, 11 Mar 2019 08:57:42 +0100 Subject: ARM: dts: at91: Fix typo in ISC_D0 on PC9 The function argument for the ISC_D0 on PC9 was incorrect. According to the documentation it should be 'C' aka 3. Signed-off-by: David Engraf Reviewed-by: Nicolas Ferre Signed-off-by: Ludovic Desroches Fixes: 7f16cb676c00 ("ARM: at91/dt: add sama5d2 pinmux") Cc: # v4.4+ --- arch/arm/boot/dts/sama5d2-pinfunc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/sama5d2-pinfunc.h b/arch/arm/boot/dts/sama5d2-pinfunc.h index 1c01a6f843d8..28a2e45752fe 100644 --- a/arch/arm/boot/dts/sama5d2-pinfunc.h +++ b/arch/arm/boot/dts/sama5d2-pinfunc.h @@ -518,7 +518,7 @@ #define PIN_PC9__GPIO PINMUX_PIN(PIN_PC9, 0, 0) #define PIN_PC9__FIQ PINMUX_PIN(PIN_PC9, 1, 3) #define PIN_PC9__GTSUCOMP PINMUX_PIN(PIN_PC9, 2, 1) -#define PIN_PC9__ISC_D0 PINMUX_PIN(PIN_PC9, 2, 1) +#define PIN_PC9__ISC_D0 PINMUX_PIN(PIN_PC9, 3, 1) #define PIN_PC9__TIOA4 PINMUX_PIN(PIN_PC9, 4, 2) #define PIN_PC10 74 #define PIN_PC10__GPIO PINMUX_PIN(PIN_PC10, 0, 0) -- cgit v1.2.3 From bebd024e4815b1a170fcd21ead9c2222b23ce9e6 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 26 Mar 2019 17:36:06 +0100 Subject: x86/smp: Enforce CONFIG_HOTPLUG_CPU when SMP=y The SMT disable 'nosmt' command line argument is not working properly when CONFIG_HOTPLUG_CPU is disabled. The teardown of the sibling CPUs which are required to be brought up due to the MCE issues, cannot work. The CPUs are then kept in a half dead state. As the 'nosmt' functionality has become popular due to the speculative hardware vulnerabilities, the half torn down state is not a proper solution to the problem. Enforce CONFIG_HOTPLUG_CPU=y when SMP is enabled so the full operation is possible. Reported-by: Tianyu Lan Signed-off-by: Thomas Gleixner Acked-by: Greg Kroah-Hartman Cc: Konrad Wilk Cc: Josh Poimboeuf Cc: Mukesh Ojha Cc: Peter Zijlstra Cc: Jiri Kosina Cc: Rik van Riel Cc: Andy Lutomirski Cc: Micheal Kelley Cc: "K. Y. Srinivasan" Cc: Linus Torvalds Cc: Borislav Petkov Cc: K. Y. Srinivasan Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20190326163811.598166056@linutronix.de --- arch/x86/Kconfig | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index c1f9b3cf437c..5ad92419be19 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2217,14 +2217,8 @@ config RANDOMIZE_MEMORY_PHYSICAL_PADDING If unsure, leave at the default value. config HOTPLUG_CPU - bool "Support for hot-pluggable CPUs" + def_bool y depends on SMP - ---help--- - Say Y here to allow turning CPUs off and on. CPUs can be - controlled through /sys/devices/system/cpu. - ( Note: power management support will enable this option - automatically on SMP systems. ) - Say N if you want to disable CPU hotplug. config BOOTPARAM_HOTPLUG_CPU0 bool "Set default setting of cpu0_hotpluggable" -- cgit v1.2.3 From a9d57ef15cbe327fe54416dd194ee0ea66ae53a4 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Mon, 25 Mar 2019 14:56:20 +0100 Subject: x86/retpolines: Disable switch jump tables when retpolines are enabled MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit ce02ef06fcf7 ("x86, retpolines: Raise limit for generating indirect calls from switch-case") raised the limit under retpolines to 20 switch cases where gcc would only then start to emit jump tables, and therefore effectively disabling the emission of slow indirect calls in this area. After this has been brought to attention to gcc folks [0], Martin Liska has then fixed gcc to align with clang by avoiding to generate switch jump tables entirely under retpolines. This is taking effect in gcc starting from stable version 8.4.0. Given kernel supports compilation with older versions of gcc where the fix is not being available or backported anymore, we need to keep the extra KBUILD_CFLAGS around for some time and generally set the -fno-jump-tables to align with what more recent gcc is doing automatically today. More than 20 switch cases are not expected to be fast-path critical, but it would still be good to align with gcc behavior for versions < 8.4.0 in order to have consistency across supported gcc versions. vmlinux size is slightly growing by 0.27% for older gcc. This flag is only set to work around affected gcc, no change for clang. [0] https://gcc.gnu.org/bugzilla/show_bug.cgi?id=86952 Suggested-by: Martin Liska Signed-off-by: Daniel Borkmann Signed-off-by: Thomas Gleixner Cc: David Woodhouse Cc: Linus Torvalds Cc: Jesper Dangaard Brouer Cc: Björn Töpel Cc: Magnus Karlsson Cc: Alexei Starovoitov Cc: H.J. Lu Cc: Alexei Starovoitov Cc: David S. Miller Link: https://lkml.kernel.org/r/20190325135620.14882-1-daniel@iogearbox.net --- arch/x86/Makefile | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 2d8b9d8ca4f8..a587805c6687 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -219,8 +219,12 @@ ifdef CONFIG_RETPOLINE # Additionally, avoid generating expensive indirect jumps which # are subject to retpolines for small number of switch cases. # clang turns off jump table generation by default when under - # retpoline builds, however, gcc does not for x86. - KBUILD_CFLAGS += $(call cc-option,--param=case-values-threshold=20) + # retpoline builds, however, gcc does not for x86. This has + # only been fixed starting from gcc stable version 8.4.0 and + # onwards, but not for older ones. See gcc bug #86952. + ifndef CONFIG_CC_IS_CLANG + KBUILD_CFLAGS += $(call cc-option,-fno-jump-tables) + endif endif archscripts: scripts_basic -- cgit v1.2.3 From 92c77f7c4d5dfaaf45b2ce19360e69977c264766 Mon Sep 17 00:00:00 2001 From: Ralph Campbell Date: Mon, 25 Mar 2019 17:18:17 -0700 Subject: x86/mm: Don't exceed the valid physical address space valid_phys_addr_range() is used to sanity check the physical address range of an operation, e.g., access to /dev/mem. It uses __pa(high_memory) internally. If memory is populated at the end of the physical address space, then __pa(high_memory) is outside of the physical address space because: high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; For the comparison in valid_phys_addr_range() this is not an issue, but if CONFIG_DEBUG_VIRTUAL is enabled, __pa() maps to __phys_addr(), which verifies that the resulting physical address is within the valid physical address space of the CPU. So in the case that memory is populated at the end of the physical address space, this is not true and triggers a VIRTUAL_BUG_ON(). Use __pa(high_memory - 1) to prevent the conversion from going beyond the end of valid physical addresses. Fixes: be62a3204406 ("x86/mm: Limit mmap() of /dev/mem to valid physical addresses") Signed-off-by: Ralph Campbell Signed-off-by: Thomas Gleixner Cc: Craig Bergstrom Cc: Linus Torvalds Cc: Boris Ostrovsky Cc: Fengguang Wu Cc: Greg Kroah-Hartman Cc: Hans Verkuil Cc: Mauro Carvalho Chehab Cc: Peter Zijlstra Cc: Sander Eikelenboom Cc: Sean Young Link: https://lkml.kernel.org/r/20190326001817.15413-2-rcampbell@nvidia.com --- arch/x86/mm/mmap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index db3165714521..dc726e07d8ba 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -230,7 +230,7 @@ bool mmap_address_hint_valid(unsigned long addr, unsigned long len) /* Can we access it for direct reading/writing? Must be RAM: */ int valid_phys_addr_range(phys_addr_t addr, size_t count) { - return addr + count <= __pa(high_memory); + return addr + count - 1 <= __pa(high_memory - 1); } /* Can we access it through mmap? Must be a valid physical address: */ -- cgit v1.2.3 From f6027c81099e87516d27d123867f10abd04b2d38 Mon Sep 17 00:00:00 2001 From: Jann Horn Date: Thu, 28 Mar 2019 16:49:48 +0100 Subject: x86/cpufeature: Fix __percpu annotation in this_cpu_has() &cpu_info.x86_capability is __percpu, and the second argument of x86_this_cpu_test_bit() is expected to be __percpu. Don't cast the __percpu away and then implicitly add it again. This gets rid of 106 lines of sparse warnings with the kernel config I'm using. Signed-off-by: Jann Horn Signed-off-by: Borislav Petkov Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Masahiro Yamada Cc: Nadav Amit Cc: Thomas Gleixner Cc: x86-ml Link: https://lkml.kernel.org/r/20190328154948.152273-1-jannh@google.com --- arch/x86/include/asm/cpufeature.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index ce95b8cbd229..0e56ff7e4848 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -112,8 +112,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; test_cpu_cap(c, bit)) #define this_cpu_has(bit) \ - (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ - x86_this_cpu_test_bit(bit, (unsigned long *)&cpu_info.x86_capability)) + (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ + x86_this_cpu_test_bit(bit, \ + (unsigned long __percpu *)&cpu_info.x86_capability)) /* * This macro is for detection of features which need kernel -- cgit v1.2.3 From 552c69b1dc714854a5f4e27d37a43c6d797adf7d Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 7 Mar 2019 15:27:43 -0800 Subject: KVM: nVMX: Do not inherit quadrant and invalid for the root shadow EPT Explicitly zero out quadrant and invalid instead of inheriting them from the root_mmu. Functionally, this patch is a nop as we (should) never set quadrant for a direct mapped (EPT) root_mmu and nested EPT is only allowed if EPT is used for L1, and the root_mmu will never be invalid at this point. Explicitly setting flags sets the stage for repurposing the legacy paging bits in role, e.g. nxe, cr0_wp, and sm{a,e}p_andnot_wp, at which point 'smm' would be the only flag to be inherited from root_mmu. Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 7837ab001d80..01bb090aaa5c 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -4918,11 +4918,15 @@ static union kvm_mmu_role kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty, bool execonly) { - union kvm_mmu_role role; + union kvm_mmu_role role = {0}; + union kvm_mmu_page_role root_base = vcpu->arch.root_mmu.mmu_role.base; - /* Base role is inherited from root_mmu */ - role.base.word = vcpu->arch.root_mmu.mmu_role.base.word; - role.ext = kvm_calc_mmu_role_ext(vcpu); + /* Legacy paging and SMM flags are inherited from root_mmu */ + role.base.smm = root_base.smm; + role.base.nxe = root_base.nxe; + role.base.cr0_wp = root_base.cr0_wp; + role.base.smep_andnot_wp = root_base.smep_andnot_wp; + role.base.smap_andnot_wp = root_base.smap_andnot_wp; role.base.level = PT64_ROOT_4LEVEL; role.base.direct = false; @@ -4930,6 +4934,7 @@ kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty, role.base.guest_mode = true; role.base.access = ACC_ALL; + role.ext = kvm_calc_mmu_role_ext(vcpu); role.ext.execonly = execonly; return role; -- cgit v1.2.3 From 47c42e6b4192a2ac8b6c9858ebcf400a9eff7a10 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 7 Mar 2019 15:27:44 -0800 Subject: KVM: x86: fix handling of role.cr4_pae and rename it to 'gpte_size' The cr4_pae flag is a bit of a misnomer, its purpose is really to track whether the guest PTE that is being shadowed is a 4-byte entry or an 8-byte entry. Prior to supporting nested EPT, the size of the gpte was reflected purely by CR4.PAE. KVM fudged things a bit for direct sptes, but it was mostly harmless since the size of the gpte never mattered. Now that a spte may be tracking an indirect EPT entry, relying on CR4.PAE is wrong and ill-named. For direct shadow pages, force the gpte_size to '1' as they are always 8-byte entries; EPT entries can only be 8-bytes and KVM always uses 8-byte entries for NPT and its identity map (when running with EPT but not unrestricted guest). Likewise, nested EPT entries are always 8-bytes. Nested EPT presents a unique scenario as the size of the entries are not dictated by CR4.PAE, but neither is the shadow page a direct map. To handle this scenario, set cr0_wp=1 and smap_andnot_wp=1, an otherwise impossible combination, to denote a nested EPT shadow page. Use the information to avoid incorrectly zapping an unsync'd indirect page in __kvm_sync_page(). Providing a consistent and accurate gpte_size fixes a bug reported by Vitaly where fast_cr3_switch() always fails when switching from L2 to L1 as kvm_mmu_get_page() would force role.cr4_pae=0 for direct pages, whereas kvm_calc_mmu_role_common() would set it according to CR4.PAE. Fixes: 7dcd575520082 ("x86/kvm/mmu: check if tdp/shadow MMU reconfiguration is needed") Reported-by: Vitaly Kuznetsov Tested-by: Vitaly Kuznetsov Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- Documentation/virtual/kvm/mmu.txt | 11 +++++++---- arch/x86/include/asm/kvm_host.h | 4 ++-- arch/x86/kvm/mmu.c | 38 ++++++++++++++++++++++++-------------- arch/x86/kvm/mmutrace.h | 4 ++-- 4 files changed, 35 insertions(+), 22 deletions(-) (limited to 'arch') diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virtual/kvm/mmu.txt index f365102c80f5..2efe0efc516e 100644 --- a/Documentation/virtual/kvm/mmu.txt +++ b/Documentation/virtual/kvm/mmu.txt @@ -142,7 +142,7 @@ Shadow pages contain the following information: If clear, this page corresponds to a guest page table denoted by the gfn field. role.quadrant: - When role.cr4_pae=0, the guest uses 32-bit gptes while the host uses 64-bit + When role.gpte_is_8_bytes=0, the guest uses 32-bit gptes while the host uses 64-bit sptes. That means a guest page table contains more ptes than the host, so multiple shadow pages are needed to shadow one guest page. For first-level shadow pages, role.quadrant can be 0 or 1 and denotes the @@ -158,9 +158,9 @@ Shadow pages contain the following information: The page is invalid and should not be used. It is a root page that is currently pinned (by a cpu hardware register pointing to it); once it is unpinned it will be destroyed. - role.cr4_pae: - Contains the value of cr4.pae for which the page is valid (e.g. whether - 32-bit or 64-bit gptes are in use). + role.gpte_is_8_bytes: + Reflects the size of the guest PTE for which the page is valid, i.e. '1' + if 64-bit gptes are in use, '0' if 32-bit gptes are in use. role.nxe: Contains the value of efer.nxe for which the page is valid. role.cr0_wp: @@ -173,6 +173,9 @@ Shadow pages contain the following information: Contains the value of cr4.smap && !cr0.wp for which the page is valid (pages for which this is true are different from other pages; see the treatment of cr0.wp=0 below). + role.ept_sp: + This is a virtual flag to denote a shadowed nested EPT page. ept_sp + is true if "cr0_wp && smap_andnot_wp", an otherwise invalid combination. role.smm: Is 1 if the page is valid in system management mode. This field determines which of the kvm_memslots array was used to build this diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index a5db4475e72d..88f5192ce05e 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -253,14 +253,14 @@ struct kvm_mmu_memory_cache { * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used * by indirect shadow page can not be more than 15 bits. * - * Currently, we used 14 bits that are @level, @cr4_pae, @quadrant, @access, + * Currently, we used 14 bits that are @level, @gpte_is_8_bytes, @quadrant, @access, * @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp. */ union kvm_mmu_page_role { u32 word; struct { unsigned level:4; - unsigned cr4_pae:1; + unsigned gpte_is_8_bytes:1; unsigned quadrant:2; unsigned direct:1; unsigned access:3; diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 01bb090aaa5c..776a58b00682 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -182,7 +182,7 @@ struct kvm_shadow_walk_iterator { static const union kvm_mmu_page_role mmu_base_role_mask = { .cr0_wp = 1, - .cr4_pae = 1, + .gpte_is_8_bytes = 1, .nxe = 1, .smep_andnot_wp = 1, .smap_andnot_wp = 1, @@ -2205,6 +2205,7 @@ static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, static void kvm_mmu_commit_zap_page(struct kvm *kvm, struct list_head *invalid_list); + #define for_each_valid_sp(_kvm, _sp, _gfn) \ hlist_for_each_entry(_sp, \ &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \ @@ -2215,12 +2216,17 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm, for_each_valid_sp(_kvm, _sp, _gfn) \ if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else +static inline bool is_ept_sp(struct kvm_mmu_page *sp) +{ + return sp->role.cr0_wp && sp->role.smap_andnot_wp; +} + /* @sp->gfn should be write-protected at the call site */ static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, struct list_head *invalid_list) { - if (sp->role.cr4_pae != !!is_pae(vcpu) - || vcpu->arch.mmu->sync_page(vcpu, sp) == 0) { + if ((!is_ept_sp(sp) && sp->role.gpte_is_8_bytes != !!is_pae(vcpu)) || + vcpu->arch.mmu->sync_page(vcpu, sp) == 0) { kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); return false; } @@ -2423,7 +2429,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, role.level = level; role.direct = direct; if (role.direct) - role.cr4_pae = 0; + role.gpte_is_8_bytes = true; role.access = access; if (!vcpu->arch.mmu->direct_map && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) { @@ -4794,7 +4800,6 @@ static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu, role.base.access = ACC_ALL; role.base.nxe = !!is_nx(vcpu); - role.base.cr4_pae = !!is_pae(vcpu); role.base.cr0_wp = is_write_protection(vcpu); role.base.smm = is_smm(vcpu); role.base.guest_mode = is_guest_mode(vcpu); @@ -4815,6 +4820,7 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only) role.base.ad_disabled = (shadow_accessed_mask == 0); role.base.level = kvm_x86_ops->get_tdp_level(vcpu); role.base.direct = true; + role.base.gpte_is_8_bytes = true; return role; } @@ -4879,6 +4885,7 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only) role.base.smap_andnot_wp = role.ext.cr4_smap && !is_write_protection(vcpu); role.base.direct = !is_paging(vcpu); + role.base.gpte_is_8_bytes = !!is_pae(vcpu); if (!is_long_mode(vcpu)) role.base.level = PT32E_ROOT_LEVEL; @@ -4919,21 +4926,24 @@ kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty, bool execonly) { union kvm_mmu_role role = {0}; - union kvm_mmu_page_role root_base = vcpu->arch.root_mmu.mmu_role.base; - /* Legacy paging and SMM flags are inherited from root_mmu */ - role.base.smm = root_base.smm; - role.base.nxe = root_base.nxe; - role.base.cr0_wp = root_base.cr0_wp; - role.base.smep_andnot_wp = root_base.smep_andnot_wp; - role.base.smap_andnot_wp = root_base.smap_andnot_wp; + /* SMM flag is inherited from root_mmu */ + role.base.smm = vcpu->arch.root_mmu.mmu_role.base.smm; role.base.level = PT64_ROOT_4LEVEL; + role.base.gpte_is_8_bytes = true; role.base.direct = false; role.base.ad_disabled = !accessed_dirty; role.base.guest_mode = true; role.base.access = ACC_ALL; + /* + * WP=1 and NOT_WP=1 is an impossible combination, use WP and the + * SMAP variation to denote shadow EPT entries. + */ + role.base.cr0_wp = true; + role.base.smap_andnot_wp = true; + role.ext = kvm_calc_mmu_role_ext(vcpu); role.ext.execonly = execonly; @@ -5184,7 +5194,7 @@ static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa, gpa, bytes, sp->role.word); offset = offset_in_page(gpa); - pte_size = sp->role.cr4_pae ? 8 : 4; + pte_size = sp->role.gpte_is_8_bytes ? 8 : 4; /* * Sometimes, the OS only writes the last one bytes to update status @@ -5208,7 +5218,7 @@ static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte) page_offset = offset_in_page(gpa); level = sp->role.level; *nspte = 1; - if (!sp->role.cr4_pae) { + if (!sp->role.gpte_is_8_bytes) { page_offset <<= 1; /* 32->64 */ /* * A 32-bit pde maps 4MB while the shadow pdes map diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h index 9f6c855a0043..dd30dccd2ad5 100644 --- a/arch/x86/kvm/mmutrace.h +++ b/arch/x86/kvm/mmutrace.h @@ -29,10 +29,10 @@ \ role.word = __entry->role; \ \ - trace_seq_printf(p, "sp gfn %llx l%u%s q%u%s %s%s" \ + trace_seq_printf(p, "sp gfn %llx l%u %u-byte q%u%s %s%s" \ " %snxe %sad root %u %s%c", \ __entry->gfn, role.level, \ - role.cr4_pae ? " pae" : "", \ + role.gpte_is_8_bytes ? 8 : 4, \ role.quadrant, \ role.direct ? " direct" : "", \ access_str[role.access], \ -- cgit v1.2.3 From 05d5a48635259e621ea26d01e8316c6feeb34190 Mon Sep 17 00:00:00 2001 From: "Singh, Brijesh" Date: Fri, 15 Feb 2019 17:24:12 +0000 Subject: KVM: SVM: Workaround errata#1096 (insn_len maybe zero on SMAP violation) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Errata#1096: On a nested data page fault when CR.SMAP=1 and the guest data read generates a SMAP violation, GuestInstrBytes field of the VMCB on a VMEXIT will incorrectly return 0h instead the correct guest instruction bytes . Recommend Workaround: To determine what instruction the guest was executing the hypervisor will have to decode the instruction at the instruction pointer. The recommended workaround can not be implemented for the SEV guest because guest memory is encrypted with the guest specific key, and instruction decoder will not be able to decode the instruction bytes. If we hit this errata in the SEV guest then log the message and request a guest shutdown. Reported-by: Venkatesh Srinivas Cc: Jim Mattson Cc: Tom Lendacky Cc: Borislav Petkov Cc: Joerg Roedel Cc: "Radim Krčmář" Cc: Paolo Bonzini Signed-off-by: Brijesh Singh Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/mmu.c | 8 +++++--- arch/x86/kvm/svm.c | 32 ++++++++++++++++++++++++++++++++ arch/x86/kvm/vmx/vmx.c | 6 ++++++ 4 files changed, 45 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 88f5192ce05e..5b03006c00be 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1192,6 +1192,8 @@ struct kvm_x86_ops { int (*nested_enable_evmcs)(struct kvm_vcpu *vcpu, uint16_t *vmcs_version); uint16_t (*nested_get_evmcs_version)(struct kvm_vcpu *vcpu); + + bool (*need_emulation_on_page_fault)(struct kvm_vcpu *vcpu); }; struct kvm_arch_async_pf { diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 776a58b00682..f6d760dcdb75 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -5408,10 +5408,12 @@ emulate: * This can happen if a guest gets a page-fault on data access but the HW * table walker is not able to read the instruction page (e.g instruction * page is not present in memory). In those cases we simply restart the - * guest. + * guest, with the exception of AMD Erratum 1096 which is unrecoverable. */ - if (unlikely(insn && !insn_len)) - return 1; + if (unlikely(insn && !insn_len)) { + if (!kvm_x86_ops->need_emulation_on_page_fault(vcpu)) + return 1; + } er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len); diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index b5b128a0a051..426039285fd1 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -7098,6 +7098,36 @@ static int nested_enable_evmcs(struct kvm_vcpu *vcpu, return -ENODEV; } +static bool svm_need_emulation_on_page_fault(struct kvm_vcpu *vcpu) +{ + bool is_user, smap; + + is_user = svm_get_cpl(vcpu) == 3; + smap = !kvm_read_cr4_bits(vcpu, X86_CR4_SMAP); + + /* + * Detect and workaround Errata 1096 Fam_17h_00_0Fh + * + * In non SEV guest, hypervisor will be able to read the guest + * memory to decode the instruction pointer when insn_len is zero + * so we return true to indicate that decoding is possible. + * + * But in the SEV guest, the guest memory is encrypted with the + * guest specific key and hypervisor will not be able to decode the + * instruction pointer so we will not able to workaround it. Lets + * print the error and request to kill the guest. + */ + if (is_user && smap) { + if (!sev_guest(vcpu->kvm)) + return true; + + pr_err_ratelimited("KVM: Guest triggered AMD Erratum 1096\n"); + kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); + } + + return false; +} + static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .cpu_has_kvm_support = has_svm, .disabled_by_bios = is_disabled, @@ -7231,6 +7261,8 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .nested_enable_evmcs = nested_enable_evmcs, .nested_get_evmcs_version = nested_get_evmcs_version, + + .need_emulation_on_page_fault = svm_need_emulation_on_page_fault, }; static int __init svm_init(void) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index c73375e01ab8..6aa84e09217b 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7409,6 +7409,11 @@ static int enable_smi_window(struct kvm_vcpu *vcpu) return 0; } +static bool vmx_need_emulation_on_page_fault(struct kvm_vcpu *vcpu) +{ + return 0; +} + static __init int hardware_setup(void) { unsigned long host_bndcfgs; @@ -7711,6 +7716,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { .set_nested_state = NULL, .get_vmcs12_pages = NULL, .nested_enable_evmcs = NULL, + .need_emulation_on_page_fault = vmx_need_emulation_on_page_fault, }; static void vmx_cleanup_l1d_flush(void) -- cgit v1.2.3 From 711eff3a8fa1d6193139a895524240912011b4dc Mon Sep 17 00:00:00 2001 From: Krish Sadhukhan Date: Thu, 7 Feb 2019 14:05:30 -0500 Subject: kvm: nVMX: Add a vmentry check for HOST_SYSENTER_ESP and HOST_SYSENTER_EIP fields According to section "Checks on VMX Controls" in Intel SDM vol 3C, the following check is performed on vmentry of L2 guests: On processors that support Intel 64 architecture, the IA32_SYSENTER_ESP field and the IA32_SYSENTER_EIP field must each contain a canonical address. Signed-off-by: Krish Sadhukhan Reviewed-by: Mihai Carabas Reviewed-by: Jim Mattson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/nested.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch') diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index f24a2c225070..153e539c29c9 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -2585,6 +2585,11 @@ static int nested_check_host_control_regs(struct kvm_vcpu *vcpu, !nested_host_cr4_valid(vcpu, vmcs12->host_cr4) || !nested_cr3_valid(vcpu, vmcs12->host_cr3)) return -EINVAL; + + if (is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu) || + is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu)) + return -EINVAL; + /* * If the load IA32_EFER VM-exit control is 1, bits reserved in the * IA32_EFER MSR must be 0 in the field for that register. In addition, -- cgit v1.2.3 From 4d66623cfba0949b2f0d669bd2ae732124c99ded Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Thu, 27 Sep 2018 08:31:26 +0800 Subject: KVM: x86: remove check on nr_mmu_pages in kvm_arch_commit_memory_region() * nr_mmu_pages would be non-zero only if kvm->arch.n_requested_mmu_pages is non-zero. * nr_mmu_pages is always non-zero, since kvm_mmu_calculate_mmu_pages() never return zero. Based on these two reasons, we can merge the two *if* clause and use the return value from kvm_mmu_calculate_mmu_pages() directly. This simplify the code and also eliminate the possibility for reader to believe nr_mmu_pages would be zero. Signed-off-by: Wei Yang Reviewed-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 2 +- arch/x86/kvm/mmu.c | 2 +- arch/x86/kvm/x86.c | 8 ++------ 3 files changed, 4 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 5b03006c00be..679168931c40 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1254,7 +1254,7 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, gfn_t gfn_offset, unsigned long mask); void kvm_mmu_zap_all(struct kvm *kvm); void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen); -unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); +unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm); void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3); diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index f6d760dcdb75..5a9981465fbb 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -6028,7 +6028,7 @@ out: /* * Calculate mmu pages needed for kvm. */ -unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm) +unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm) { unsigned int nr_mmu_pages; unsigned int nr_pages = 0; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 65e4559eef2f..491e92383da8 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9429,13 +9429,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, const struct kvm_memory_slot *new, enum kvm_mr_change change) { - int nr_mmu_pages = 0; - if (!kvm->arch.n_requested_mmu_pages) - nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); - - if (nr_mmu_pages) - kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); + kvm_mmu_change_mmu_pages(kvm, + kvm_mmu_calculate_default_mmu_pages(kvm)); /* * Dirty logging tracks sptes in 4k granularity, meaning that large -- cgit v1.2.3 From 3d9683cf3bfb6d4e4605a153958dfca7e18b52f2 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 18 Mar 2019 18:08:12 +0900 Subject: KVM: export and iif KVM is supported I do not see any consistency about headers_install of and . According to my analysis of Linux 5.1-rc1, there are 3 groups: [1] Both and are exported alpha, arm, hexagon, mips, powerpc, s390, sparc, x86 [2] is exported, but is not arc, arm64, c6x, h8300, ia64, m68k, microblaze, nios2, openrisc, parisc, sh, unicore32, xtensa [3] Neither nor is exported csky, nds32, riscv This does not match to the actual KVM support. At least, [2] is half-baked. Nor do arch maintainers look like they care about this. For example, commit 0add53713b1c ("microblaze: Add missing kvm_para.h to Kbuild") exported to user-space in order to fix an in-kernel build error. We have two ways to make this consistent: [A] export both and for all architectures, irrespective of the KVM support [B] Match the header export of and to the KVM support My first attempt was [A] because the code looks cleaner, but Paolo suggested [B]. So, this commit goes with [B]. For most architectures, was moved to the kernel-space. I changed include/uapi/linux/Kbuild so that it checks generated asm/kvm_para.h as well as check-in ones. After this commit, there will be two groups: [1] Both and are exported arm, arm64, mips, powerpc, s390, x86 [2] Neither nor is exported alpha, arc, c6x, csky, h8300, hexagon, ia64, m68k, microblaze, nds32, nios2, openrisc, parisc, riscv, sh, sparc, unicore32, xtensa Signed-off-by: Masahiro Yamada Acked-by: Cornelia Huck Signed-off-by: Paolo Bonzini --- arch/alpha/include/asm/Kbuild | 1 + arch/alpha/include/uapi/asm/kvm_para.h | 2 -- arch/arc/include/asm/Kbuild | 1 + arch/arc/include/uapi/asm/Kbuild | 1 - arch/arm/include/uapi/asm/Kbuild | 1 + arch/arm/include/uapi/asm/kvm_para.h | 2 -- arch/c6x/include/asm/Kbuild | 1 + arch/c6x/include/uapi/asm/Kbuild | 1 - arch/h8300/include/asm/Kbuild | 1 + arch/h8300/include/uapi/asm/Kbuild | 1 - arch/hexagon/include/asm/Kbuild | 1 + arch/hexagon/include/uapi/asm/kvm_para.h | 2 -- arch/ia64/include/asm/Kbuild | 1 + arch/ia64/include/uapi/asm/Kbuild | 1 - arch/m68k/include/asm/Kbuild | 1 + arch/m68k/include/uapi/asm/Kbuild | 1 - arch/microblaze/include/asm/Kbuild | 1 + arch/microblaze/include/uapi/asm/Kbuild | 1 - arch/nios2/include/asm/Kbuild | 1 + arch/nios2/include/uapi/asm/Kbuild | 1 - arch/openrisc/include/asm/Kbuild | 1 + arch/openrisc/include/uapi/asm/Kbuild | 1 - arch/parisc/include/asm/Kbuild | 1 + arch/parisc/include/uapi/asm/Kbuild | 1 - arch/sh/include/asm/Kbuild | 1 + arch/sh/include/uapi/asm/Kbuild | 1 - arch/sparc/include/asm/Kbuild | 1 + arch/sparc/include/uapi/asm/kvm_para.h | 2 -- arch/unicore32/include/asm/Kbuild | 1 + arch/unicore32/include/uapi/asm/Kbuild | 1 - arch/xtensa/include/asm/Kbuild | 1 + arch/xtensa/include/uapi/asm/Kbuild | 1 - include/uapi/linux/Kbuild | 2 ++ 33 files changed, 18 insertions(+), 20 deletions(-) delete mode 100644 arch/alpha/include/uapi/asm/kvm_para.h delete mode 100644 arch/arm/include/uapi/asm/kvm_para.h delete mode 100644 arch/hexagon/include/uapi/asm/kvm_para.h delete mode 100644 arch/sparc/include/uapi/asm/kvm_para.h (limited to 'arch') diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild index dc0ab28baca1..70b783333965 100644 --- a/arch/alpha/include/asm/Kbuild +++ b/arch/alpha/include/asm/Kbuild @@ -6,6 +6,7 @@ generic-y += exec.h generic-y += export.h generic-y += fb.h generic-y += irq_work.h +generic-y += kvm_para.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h generic-y += preempt.h diff --git a/arch/alpha/include/uapi/asm/kvm_para.h b/arch/alpha/include/uapi/asm/kvm_para.h deleted file mode 100644 index baacc4996d18..000000000000 --- a/arch/alpha/include/uapi/asm/kvm_para.h +++ /dev/null @@ -1,2 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#include diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild index b41f8881ecc8..decc306a3b52 100644 --- a/arch/arc/include/asm/Kbuild +++ b/arch/arc/include/asm/Kbuild @@ -11,6 +11,7 @@ generic-y += hardirq.h generic-y += hw_irq.h generic-y += irq_regs.h generic-y += irq_work.h +generic-y += kvm_para.h generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h diff --git a/arch/arc/include/uapi/asm/Kbuild b/arch/arc/include/uapi/asm/Kbuild index 755bb11323d8..1c72f04ff75d 100644 --- a/arch/arc/include/uapi/asm/Kbuild +++ b/arch/arc/include/uapi/asm/Kbuild @@ -1,2 +1 @@ -generic-y += kvm_para.h generic-y += ucontext.h diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild index 23b4464c0995..ce8573157774 100644 --- a/arch/arm/include/uapi/asm/Kbuild +++ b/arch/arm/include/uapi/asm/Kbuild @@ -3,3 +3,4 @@ generated-y += unistd-common.h generated-y += unistd-oabi.h generated-y += unistd-eabi.h +generic-y += kvm_para.h diff --git a/arch/arm/include/uapi/asm/kvm_para.h b/arch/arm/include/uapi/asm/kvm_para.h deleted file mode 100644 index baacc4996d18..000000000000 --- a/arch/arm/include/uapi/asm/kvm_para.h +++ /dev/null @@ -1,2 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#include diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild index 63b4a1705182..249c9f6f26dc 100644 --- a/arch/c6x/include/asm/Kbuild +++ b/arch/c6x/include/asm/Kbuild @@ -19,6 +19,7 @@ generic-y += irq_work.h generic-y += kdebug.h generic-y += kmap_types.h generic-y += kprobes.h +generic-y += kvm_para.h generic-y += local.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h diff --git a/arch/c6x/include/uapi/asm/Kbuild b/arch/c6x/include/uapi/asm/Kbuild index 755bb11323d8..1c72f04ff75d 100644 --- a/arch/c6x/include/uapi/asm/Kbuild +++ b/arch/c6x/include/uapi/asm/Kbuild @@ -1,2 +1 @@ -generic-y += kvm_para.h generic-y += ucontext.h diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild index 3e7c8ecf151e..e3dead402e5f 100644 --- a/arch/h8300/include/asm/Kbuild +++ b/arch/h8300/include/asm/Kbuild @@ -23,6 +23,7 @@ generic-y += irq_work.h generic-y += kdebug.h generic-y += kmap_types.h generic-y += kprobes.h +generic-y += kvm_para.h generic-y += linkage.h generic-y += local.h generic-y += local64.h diff --git a/arch/h8300/include/uapi/asm/Kbuild b/arch/h8300/include/uapi/asm/Kbuild index 755bb11323d8..1c72f04ff75d 100644 --- a/arch/h8300/include/uapi/asm/Kbuild +++ b/arch/h8300/include/uapi/asm/Kbuild @@ -1,2 +1 @@ -generic-y += kvm_para.h generic-y += ucontext.h diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild index b25fd42aa0f4..d046e8ccdf78 100644 --- a/arch/hexagon/include/asm/Kbuild +++ b/arch/hexagon/include/asm/Kbuild @@ -19,6 +19,7 @@ generic-y += irq_work.h generic-y += kdebug.h generic-y += kmap_types.h generic-y += kprobes.h +generic-y += kvm_para.h generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h diff --git a/arch/hexagon/include/uapi/asm/kvm_para.h b/arch/hexagon/include/uapi/asm/kvm_para.h deleted file mode 100644 index baacc4996d18..000000000000 --- a/arch/hexagon/include/uapi/asm/kvm_para.h +++ /dev/null @@ -1,2 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#include diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild index 43e21fe3499c..11f191689c9e 100644 --- a/arch/ia64/include/asm/Kbuild +++ b/arch/ia64/include/asm/Kbuild @@ -2,6 +2,7 @@ generated-y += syscall_table.h generic-y += compat.h generic-y += exec.h generic-y += irq_work.h +generic-y += kvm_para.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h generic-y += preempt.h diff --git a/arch/ia64/include/uapi/asm/Kbuild b/arch/ia64/include/uapi/asm/Kbuild index 20018cb883a9..62a9522af51e 100644 --- a/arch/ia64/include/uapi/asm/Kbuild +++ b/arch/ia64/include/uapi/asm/Kbuild @@ -1,2 +1 @@ generated-y += unistd_64.h -generic-y += kvm_para.h diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild index 95f8f631c4df..2c359d9e80f6 100644 --- a/arch/m68k/include/asm/Kbuild +++ b/arch/m68k/include/asm/Kbuild @@ -13,6 +13,7 @@ generic-y += irq_work.h generic-y += kdebug.h generic-y += kmap_types.h generic-y += kprobes.h +generic-y += kvm_para.h generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h diff --git a/arch/m68k/include/uapi/asm/Kbuild b/arch/m68k/include/uapi/asm/Kbuild index 8a7ad40be463..7417847dc438 100644 --- a/arch/m68k/include/uapi/asm/Kbuild +++ b/arch/m68k/include/uapi/asm/Kbuild @@ -1,2 +1 @@ generated-y += unistd_32.h -generic-y += kvm_para.h diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild index 791cc8d54d0a..1a8285c3f693 100644 --- a/arch/microblaze/include/asm/Kbuild +++ b/arch/microblaze/include/asm/Kbuild @@ -17,6 +17,7 @@ generic-y += irq_work.h generic-y += kdebug.h generic-y += kmap_types.h generic-y += kprobes.h +generic-y += kvm_para.h generic-y += linkage.h generic-y += local.h generic-y += local64.h diff --git a/arch/microblaze/include/uapi/asm/Kbuild b/arch/microblaze/include/uapi/asm/Kbuild index 3ce84fbb2678..13f59631c576 100644 --- a/arch/microblaze/include/uapi/asm/Kbuild +++ b/arch/microblaze/include/uapi/asm/Kbuild @@ -1,3 +1,2 @@ generated-y += unistd_32.h -generic-y += kvm_para.h generic-y += ucontext.h diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild index 8fde4fa2c34f..88a667d12aaa 100644 --- a/arch/nios2/include/asm/Kbuild +++ b/arch/nios2/include/asm/Kbuild @@ -23,6 +23,7 @@ generic-y += irq_work.h generic-y += kdebug.h generic-y += kmap_types.h generic-y += kprobes.h +generic-y += kvm_para.h generic-y += local.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h diff --git a/arch/nios2/include/uapi/asm/Kbuild b/arch/nios2/include/uapi/asm/Kbuild index 755bb11323d8..1c72f04ff75d 100644 --- a/arch/nios2/include/uapi/asm/Kbuild +++ b/arch/nios2/include/uapi/asm/Kbuild @@ -1,2 +1 @@ -generic-y += kvm_para.h generic-y += ucontext.h diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild index 5a73e2956ac4..22aa97136c01 100644 --- a/arch/openrisc/include/asm/Kbuild +++ b/arch/openrisc/include/asm/Kbuild @@ -20,6 +20,7 @@ generic-y += irq_work.h generic-y += kdebug.h generic-y += kmap_types.h generic-y += kprobes.h +generic-y += kvm_para.h generic-y += local.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h diff --git a/arch/openrisc/include/uapi/asm/Kbuild b/arch/openrisc/include/uapi/asm/Kbuild index 755bb11323d8..1c72f04ff75d 100644 --- a/arch/openrisc/include/uapi/asm/Kbuild +++ b/arch/openrisc/include/uapi/asm/Kbuild @@ -1,2 +1 @@ -generic-y += kvm_para.h generic-y += ucontext.h diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild index 6f49e77d82a2..9bcd0c903dbb 100644 --- a/arch/parisc/include/asm/Kbuild +++ b/arch/parisc/include/asm/Kbuild @@ -11,6 +11,7 @@ generic-y += irq_regs.h generic-y += irq_work.h generic-y += kdebug.h generic-y += kprobes.h +generic-y += kvm_para.h generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h diff --git a/arch/parisc/include/uapi/asm/Kbuild b/arch/parisc/include/uapi/asm/Kbuild index 22fdbd08cdc8..2bd5b392277c 100644 --- a/arch/parisc/include/uapi/asm/Kbuild +++ b/arch/parisc/include/uapi/asm/Kbuild @@ -1,3 +1,2 @@ generated-y += unistd_32.h generated-y += unistd_64.h -generic-y += kvm_para.h diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild index a6ef3fee5f85..7bf2cb680d32 100644 --- a/arch/sh/include/asm/Kbuild +++ b/arch/sh/include/asm/Kbuild @@ -9,6 +9,7 @@ generic-y += emergency-restart.h generic-y += exec.h generic-y += irq_regs.h generic-y += irq_work.h +generic-y += kvm_para.h generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h diff --git a/arch/sh/include/uapi/asm/Kbuild b/arch/sh/include/uapi/asm/Kbuild index ecfbd40924dd..b8812c74c1de 100644 --- a/arch/sh/include/uapi/asm/Kbuild +++ b/arch/sh/include/uapi/asm/Kbuild @@ -1,5 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 generated-y += unistd_32.h -generic-y += kvm_para.h generic-y += ucontext.h diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index b82f64e28f55..a22cfd5c0ee8 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild @@ -9,6 +9,7 @@ generic-y += exec.h generic-y += export.h generic-y += irq_regs.h generic-y += irq_work.h +generic-y += kvm_para.h generic-y += linkage.h generic-y += local.h generic-y += local64.h diff --git a/arch/sparc/include/uapi/asm/kvm_para.h b/arch/sparc/include/uapi/asm/kvm_para.h deleted file mode 100644 index baacc4996d18..000000000000 --- a/arch/sparc/include/uapi/asm/kvm_para.h +++ /dev/null @@ -1,2 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#include diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild index 1d1544b6ca74..d77d953c04c1 100644 --- a/arch/unicore32/include/asm/Kbuild +++ b/arch/unicore32/include/asm/Kbuild @@ -18,6 +18,7 @@ generic-y += irq_work.h generic-y += kdebug.h generic-y += kmap_types.h generic-y += kprobes.h +generic-y += kvm_para.h generic-y += local.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h diff --git a/arch/unicore32/include/uapi/asm/Kbuild b/arch/unicore32/include/uapi/asm/Kbuild index 755bb11323d8..1c72f04ff75d 100644 --- a/arch/unicore32/include/uapi/asm/Kbuild +++ b/arch/unicore32/include/uapi/asm/Kbuild @@ -1,2 +1 @@ -generic-y += kvm_para.h generic-y += ucontext.h diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index 42b6cb3d16f7..3843198e03d4 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild @@ -15,6 +15,7 @@ generic-y += irq_work.h generic-y += kdebug.h generic-y += kmap_types.h generic-y += kprobes.h +generic-y += kvm_para.h generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h diff --git a/arch/xtensa/include/uapi/asm/Kbuild b/arch/xtensa/include/uapi/asm/Kbuild index 8a7ad40be463..7417847dc438 100644 --- a/arch/xtensa/include/uapi/asm/Kbuild +++ b/arch/xtensa/include/uapi/asm/Kbuild @@ -1,2 +1 @@ generated-y += unistd_32.h -generic-y += kvm_para.h diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index 5f24b50c9e88..059dc2bedaf6 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -7,5 +7,7 @@ no-export-headers += kvm.h endif ifeq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/kvm_para.h),) +ifeq ($(wildcard $(objtree)/arch/$(SRCARCH)/include/generated/uapi/asm/kvm_para.h),) no-export-headers += kvm_para.h endif +endif -- cgit v1.2.3 From f285c633cb6d68d2bf3a8ad65bee3835aac9886c Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Tue, 12 Mar 2019 11:45:59 -0700 Subject: kvm: mmu: Used range based flushing in slot_handle_level_range Replace kvm_flush_remote_tlbs with kvm_flush_remote_tlbs_with_address in slot_handle_level_range. When range based flushes are not enabled kvm_flush_remote_tlbs_with_address falls back to kvm_flush_remote_tlbs. This changes the behavior of many functions that indirectly use slot_handle_level_range, iff the range based flushes are enabled. The only potential problem I see with this is that kvm->tlbs_dirty will be cleared less often, however the only caller of slot_handle_level_range that checks tlbs_dirty is kvm_mmu_notifier_invalidate_range_start which checks it and does a kvm_flush_remote_tlbs after calling kvm_unmap_hva_range anyway. Tested: Ran all kvm-unit-tests on a Intel Haswell machine with and without this patch. The patch introduced no new failures. Signed-off-by: Ben Gardon Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 5a9981465fbb..eee455a8a612 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -5526,7 +5526,9 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { if (flush && lock_flush_tlb) { - kvm_flush_remote_tlbs(kvm); + kvm_flush_remote_tlbs_with_address(kvm, + start_gfn, + iterator.gfn - start_gfn + 1); flush = false; } cond_resched_lock(&kvm->mmu_lock); @@ -5534,7 +5536,8 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, } if (flush && lock_flush_tlb) { - kvm_flush_remote_tlbs(kvm); + kvm_flush_remote_tlbs_with_address(kvm, start_gfn, + end_gfn - start_gfn + 1); flush = false; } -- cgit v1.2.3 From 0cf9135b773bf32fba9dd8e6699c1b331ee4b749 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 7 Mar 2019 15:43:02 -0800 Subject: KVM: x86: Emulate MSR_IA32_ARCH_CAPABILITIES on AMD hosts The CPUID flag ARCH_CAPABILITIES is unconditioinally exposed to host userspace for all x86 hosts, i.e. KVM advertises ARCH_CAPABILITIES regardless of hardware support under the pretense that KVM fully emulates MSR_IA32_ARCH_CAPABILITIES. Unfortunately, only VMX hosts handle accesses to MSR_IA32_ARCH_CAPABILITIES (despite KVM_GET_MSRS also reporting MSR_IA32_ARCH_CAPABILITIES for all hosts). Move the MSR_IA32_ARCH_CAPABILITIES handling to common x86 code so that it's emulated on AMD hosts. Fixes: 1eaafe91a0df4 ("kvm: x86: IA32_ARCH_CAPABILITIES is always supported") Cc: stable@vger.kernel.org Reported-by: Xiaoyao Li Cc: Jim Mattson Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/vmx/vmx.c | 13 ------------- arch/x86/kvm/vmx/vmx.h | 1 - arch/x86/kvm/x86.c | 12 ++++++++++++ 4 files changed, 13 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 679168931c40..264814f26ce0 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -568,6 +568,7 @@ struct kvm_vcpu_arch { bool tpr_access_reporting; u64 ia32_xss; u64 microcode_version; + u64 arch_capabilities; /* * Paging state of the vcpu diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 6aa84e09217b..ab432a930ae8 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1683,12 +1683,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = to_vmx(vcpu)->spec_ctrl; break; - case MSR_IA32_ARCH_CAPABILITIES: - if (!msr_info->host_initiated && - !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES)) - return 1; - msr_info->data = to_vmx(vcpu)->arch_capabilities; - break; case MSR_IA32_SYSENTER_CS: msr_info->data = vmcs_read32(GUEST_SYSENTER_CS); break; @@ -1895,11 +1889,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD, MSR_TYPE_W); break; - case MSR_IA32_ARCH_CAPABILITIES: - if (!msr_info->host_initiated) - return 1; - vmx->arch_capabilities = data; - break; case MSR_IA32_CR_PAT: if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) @@ -4088,8 +4077,6 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx) ++vmx->nmsrs; } - vmx->arch_capabilities = kvm_get_arch_capabilities(); - vm_exit_controls_init(vmx, vmx_vmexit_ctrl()); /* 22.2.1, 20.8.1 */ diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 1554cb45b393..a1e00d0a2482 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -190,7 +190,6 @@ struct vcpu_vmx { u64 msr_guest_kernel_gs_base; #endif - u64 arch_capabilities; u64 spec_ctrl; u32 vm_entry_controls_shadow; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 491e92383da8..9fc378531ca7 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2443,6 +2443,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (msr_info->host_initiated) vcpu->arch.microcode_version = data; break; + case MSR_IA32_ARCH_CAPABILITIES: + if (!msr_info->host_initiated) + return 1; + vcpu->arch.arch_capabilities = data; + break; case MSR_EFER: return set_efer(vcpu, data); case MSR_K7_HWCR: @@ -2747,6 +2752,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_UCODE_REV: msr_info->data = vcpu->arch.microcode_version; break; + case MSR_IA32_ARCH_CAPABILITIES: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES)) + return 1; + msr_info->data = vcpu->arch.arch_capabilities; + break; case MSR_IA32_TSC: msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset; break; @@ -8733,6 +8744,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { + vcpu->arch.arch_capabilities = kvm_get_arch_capabilities(); vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT; kvm_vcpu_mtrr_init(vcpu); vcpu_load(vcpu); -- cgit v1.2.3 From 2bdb76c015df7125783d8394d6339d181cb5bc30 Mon Sep 17 00:00:00 2001 From: Xiaoyao Li Date: Fri, 8 Mar 2019 15:57:20 +0800 Subject: kvm/x86: Move MSR_IA32_ARCH_CAPABILITIES to array emulated_msrs Since MSR_IA32_ARCH_CAPABILITIES is emualted unconditionally even if host doesn't suppot it. We should move it to array emulated_msrs from arry msrs_to_save, to report to userspace that guest support this msr. Signed-off-by: Xiaoyao Li Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9fc378531ca7..9f279bb145e5 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1125,7 +1125,7 @@ static u32 msrs_to_save[] = { #endif MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA, MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX, - MSR_IA32_SPEC_CTRL, MSR_IA32_ARCH_CAPABILITIES, + MSR_IA32_SPEC_CTRL, MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH, MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK, MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B, @@ -1158,6 +1158,7 @@ static u32 emulated_msrs[] = { MSR_IA32_TSC_ADJUST, MSR_IA32_TSCDEADLINE, + MSR_IA32_ARCH_CAPABILITIES, MSR_IA32_MISC_ENABLE, MSR_IA32_MCG_STATUS, MSR_IA32_MCG_CTL, -- cgit v1.2.3 From 013cc6ebbf41496ce4badedd71ea6d4a6d198c14 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Wed, 13 Mar 2019 18:13:42 +0100 Subject: x86/kvm/hyper-v: avoid spurious pending stimer on vCPU init When userspace initializes guest vCPUs it may want to zero all supported MSRs including Hyper-V related ones including HV_X64_MSR_STIMERn_CONFIG/ HV_X64_MSR_STIMERn_COUNT. With commit f3b138c5d89a ("kvm/x86: Update SynIC timers on guest entry only") we began doing stimer_mark_pending() unconditionally on every config change. The issue I'm observing manifests itself as following: - Qemu writes 0 to STIMERn_{CONFIG,COUNT} MSRs and marks all stimers as pending in stimer_pending_bitmap, arms KVM_REQ_HV_STIMER; - kvm_hv_has_stimer_pending() starts returning true; - kvm_vcpu_has_events() starts returning true; - kvm_arch_vcpu_runnable() starts returning true; - when kvm_arch_vcpu_ioctl_run() gets into (vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED) case: - kvm_vcpu_block() gets in 'kvm_vcpu_check_block(vcpu) < 0' and returns immediately, avoiding normal wait path; - -EAGAIN is returned from kvm_arch_vcpu_ioctl_run() immediately forcing userspace to retry. So instead of normal wait path we get a busy loop on all secondary vCPUs before they get INIT signal. This seems to be undesirable, especially given that this happens even when Hyper-V extensions are not used. Generally, it seems to be pointless to mark an stimer as pending in stimer_pending_bitmap and arm KVM_REQ_HV_STIMER as the only thing kvm_hv_process_stimers() will do is clear the corresponding bit. We may just not mark disabled timers as pending instead. Fixes: f3b138c5d89a ("kvm/x86: Update SynIC timers on guest entry only") Signed-off-by: Vitaly Kuznetsov Signed-off-by: Paolo Bonzini --- arch/x86/kvm/hyperv.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 27c43525a05f..421899f6ad7b 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -526,7 +526,9 @@ static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config, new_config.enable = 0; stimer->config.as_uint64 = new_config.as_uint64; - stimer_mark_pending(stimer, false); + if (stimer->config.enable) + stimer_mark_pending(stimer, false); + return 0; } @@ -542,7 +544,10 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count, stimer->config.enable = 0; else if (stimer->config.auto_enable) stimer->config.enable = 1; - stimer_mark_pending(stimer, false); + + if (stimer->config.enable) + stimer_mark_pending(stimer, false); + return 0; } -- cgit v1.2.3 From 45def77ebf79e2e8942b89ed79294d97ce914fa0 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 11 Mar 2019 20:01:05 -0700 Subject: KVM: x86: update %rip after emulating IO Most (all?) x86 platforms provide a port IO based reset mechanism, e.g. OUT 92h or CF9h. Userspace may emulate said mechanism, i.e. reset a vCPU in response to KVM_EXIT_IO, without explicitly announcing to KVM that it is doing a reset, e.g. Qemu jams vCPU state and resumes running. To avoid corruping %rip after such a reset, commit 0967b7bf1c22 ("KVM: Skip pio instruction when it is emulated, not executed") changed the behavior of PIO handlers, i.e. today's "fast" PIO handling to skip the instruction prior to exiting to userspace. Full emulation doesn't need such tricks becase re-emulating the instruction will naturally handle %rip being changed to point at the reset vector. Updating %rip prior to executing to userspace has several drawbacks: - Userspace sees the wrong %rip on the exit, e.g. if PIO emulation fails it will likely yell about the wrong address. - Single step exits to userspace for are effectively dropped as KVM_EXIT_DEBUG is overwritten with KVM_EXIT_IO. - Behavior of PIO emulation is different depending on whether it goes down the fast path or the slow path. Rather than skip the PIO instruction before exiting to userspace, snapshot the linear %rip and cancel PIO completion if the current value does not match the snapshot. For a 64-bit vCPU, i.e. the most common scenario, the snapshot and comparison has negligible overhead as VMCS.GUEST_RIP will be cached regardless, i.e. there is no extra VMREAD in this case. All other alternatives to snapshotting the linear %rip that don't rely on an explicit reset announcenment suffer from one corner case or another. For example, canceling PIO completion on any write to %rip fails if userspace does a save/restore of %rip, and attempting to avoid that issue by canceling PIO only if %rip changed then fails if PIO collides with the reset %rip. Attempting to zero in on the exact reset vector won't work for APs, which means adding more hooks such as the vCPU's MP_STATE, and so on and so forth. Checking for a linear %rip match technically suffers from corner cases, e.g. userspace could theoretically rewrite the underlying code page and expect a different instruction to execute, or the guest hardcodes a PIO reset at 0xfffffff0, but those are far, far outside of what can be considered normal operation. Fixes: 432baf60eee3 ("KVM: VMX: use kvm_fast_pio_in for handling IN I/O") Cc: Reported-by: Jim Mattson Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/x86.c | 36 ++++++++++++++++++++++++++---------- 2 files changed, 27 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 264814f26ce0..159b5988292f 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -350,6 +350,7 @@ struct kvm_mmu_page { }; struct kvm_pio_request { + unsigned long linear_rip; unsigned long count; int in; int port; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9f279bb145e5..099b851dabaf 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6535,14 +6535,27 @@ int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, } EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer); +static int complete_fast_pio_out(struct kvm_vcpu *vcpu) +{ + vcpu->arch.pio.count = 0; + + if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) + return 1; + + return kvm_skip_emulated_instruction(vcpu); +} + static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port) { unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX); int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt, size, port, &val, 1); - /* do not return to emulator after return from userspace */ - vcpu->arch.pio.count = 0; + + if (!ret) { + vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); + vcpu->arch.complete_userspace_io = complete_fast_pio_out; + } return ret; } @@ -6553,6 +6566,11 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu) /* We should only ever be called with arch.pio.count equal to 1 */ BUG_ON(vcpu->arch.pio.count != 1); + if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) { + vcpu->arch.pio.count = 0; + return 1; + } + /* For size less than 4 we merge, else we zero extend */ val = (vcpu->arch.pio.size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX) : 0; @@ -6565,7 +6583,7 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu) vcpu->arch.pio.port, &val, 1); kvm_register_write(vcpu, VCPU_REGS_RAX, val); - return 1; + return kvm_skip_emulated_instruction(vcpu); } static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, @@ -6584,6 +6602,7 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, return ret; } + vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); vcpu->arch.complete_userspace_io = complete_fast_pio_in; return 0; @@ -6591,16 +6610,13 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in) { - int ret = kvm_skip_emulated_instruction(vcpu); + int ret; - /* - * TODO: we might be squashing a KVM_GUESTDBG_SINGLESTEP-triggered - * KVM_EXIT_DEBUG here. - */ if (in) - return kvm_fast_pio_in(vcpu, size, port) && ret; + ret = kvm_fast_pio_in(vcpu, size, port); else - return kvm_fast_pio_out(vcpu, size, port) && ret; + ret = kvm_fast_pio_out(vcpu, size, port); + return ret && kvm_skip_emulated_instruction(vcpu); } EXPORT_SYMBOL_GPL(kvm_fast_pio); -- cgit v1.2.3 From ab8a6d821179ab9bea1a9179f535ccba6330c1ed Mon Sep 17 00:00:00 2001 From: Chong Qiao Date: Thu, 28 Mar 2019 07:08:01 +0800 Subject: MIPS: KGDB: fix kgdb support for SMP platforms. KGDB_call_nmi_hook is called by other cpu through smp call. MIPS smp call is processed in ipi irq handler and regs is saved in handle_int. So kgdb_call_nmi_hook get regs by get_irq_regs and regs will be passed to kgdb_cpu_enter. Signed-off-by: Chong Qiao Reviewed-by: Douglas Anderson Acked-by: Daniel Thompson Signed-off-by: Paul Burton Cc: Ralf Baechle Cc: James Hogan Cc: Will Deacon Cc: Christophe Leroy Cc: linux-mips@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: QiaoChong --- arch/mips/kernel/kgdb.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c index 6e574c02e4c3..ea781b29f7f1 100644 --- a/arch/mips/kernel/kgdb.c +++ b/arch/mips/kernel/kgdb.c @@ -33,6 +33,7 @@ #include #include #include +#include static struct hard_trap_info { unsigned char tt; /* Trap type code for MIPS R3xxx and R4xxx */ @@ -214,7 +215,7 @@ void kgdb_call_nmi_hook(void *ignored) old_fs = get_fs(); set_fs(KERNEL_DS); - kgdb_nmicallback(raw_smp_processor_id(), NULL); + kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs()); set_fs(old_fs); } -- cgit v1.2.3 From e4952b0c2c0309bdbfc4c6cc0dd81e37450d74d0 Mon Sep 17 00:00:00 2001 From: Thomas Bogendoerfer Date: Thu, 28 Mar 2019 14:37:45 +0100 Subject: MIPS: SGI-IP27: Fix use of unchecked pointer in shutdown_bridge_irq smatch complaint: arch/mips/sgi-ip27/ip27-irq.c:123 shutdown_bridge_irq() warn: variable dereferenced before check 'hd' (see line 121) Fix it by removing local variable and use hd->pin directly. Fixes: 69a07a41d908 ("MIPS: SGI-IP27: rework HUB interrupts") Reported-by: Dan Carpenter Signed-off-by: Thomas Bogendoerfer Reviewed-by: Mukesh Ojha Signed-off-by: Paul Burton Cc: Ralf Baechle Cc: James Hogan Cc: linux-mips@vger.kernel.org Cc: linux-kernel@vger.kernel.org --- arch/mips/sgi-ip27/ip27-irq.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c index 710a59764b01..a32f843cdbe0 100644 --- a/arch/mips/sgi-ip27/ip27-irq.c +++ b/arch/mips/sgi-ip27/ip27-irq.c @@ -118,7 +118,6 @@ static void shutdown_bridge_irq(struct irq_data *d) { struct hub_irq_data *hd = irq_data_get_irq_chip_data(d); struct bridge_controller *bc; - int pin = hd->pin; if (!hd) return; @@ -126,7 +125,7 @@ static void shutdown_bridge_irq(struct irq_data *d) disable_hub_irq(d); bc = hd->bc; - bridge_clr(bc, b_int_enable, (1 << pin)); + bridge_clr(bc, b_int_enable, (1 << hd->pin)); bridge_read(bc, b_wid_tflush); } -- cgit v1.2.3 From 6f845ebec2706841d15831fab3ffffcfd9e676fa Mon Sep 17 00:00:00 2001 From: Mahesh Salgaonkar Date: Tue, 26 Mar 2019 18:00:31 +0530 Subject: powerpc/pseries/mce: Fix misleading print for TLB mutlihit On pseries, TLB multihit are reported as D-Cache Multihit. This is because the wrongly populated mc_err_types[] array. Per PAPR, TLB error type is 0x04 and mc_err_types[4] points to "D-Cache" instead of "TLB" string. Fixup the mc_err_types[] array. Machine check error type per PAPR: 0x00 = Uncorrectable Memory Error (UE) 0x01 = SLB error 0x02 = ERAT Error 0x04 = TLB error 0x05 = D-Cache error 0x07 = I-Cache error Fixes: 8f0b80561f21 ("powerpc/pseries: Display machine check error details.") Cc: stable@vger.kernel.org # v4.20+ Reported-by: Aneesh Kumar K.V Signed-off-by: Mahesh Salgaonkar Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/pseries/ras.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index d97d52772789..452dcfd7e5dd 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c @@ -550,6 +550,7 @@ static void pseries_print_mce_info(struct pt_regs *regs, "UE", "SLB", "ERAT", + "Unknown", "TLB", "D-Cache", "Unknown", -- cgit v1.2.3 From ff0e2a7bd13f7c332d7f09ff45d08df4bf512ce0 Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Fri, 22 Mar 2019 10:04:44 +0000 Subject: RISC-V: Fix FIXMAP_TOP to avoid overlap with VMALLOC area The FIXMAP area overlaps with VMALLOC area in Linux-5.1-rc1 hence we get below warning in Linux RISC-V 32bit kernel. This warning does not show-up in Linux RISC-V 64bit kernel due to large VMALLOC area. WARNING: CPU: 0 PID: 22 at mm/vmalloc.c:150 vmap_page_range_noflush+0x134/0x15c Modules linked in: CPU: 0 PID: 22 Comm: kworker/0:1 Not tainted 5.1.0-rc1-00005-gebc2f658040e #1 Workqueue: events pcpu_balance_workfn Call Trace: [] walk_stackframe+0x0/0xa0 [] show_stack+0x28/0x32 [] dump_stack+0x62/0x7e [] __warn+0x98/0xce [] warn_slowpath_null+0x2e/0x3c [] vmap_page_range_noflush+0x134/0x15c [] map_kernel_range_noflush+0xc/0x14 [] pcpu_populate_chunk+0x19e/0x236 [] pcpu_balance_workfn+0x448/0x464 [] process_one_work+0x16c/0x2ea [] worker_thread+0xf2/0x3b2 [] kthread+0xce/0xdc [] ret_from_exception+0x0/0xc This patch fixes above warning by placing FIXMAP area below VMALLOC area. Fixes: f2c17aabc917 ("RISC-V: Implement compile-time fixed mappings") Signed-off-by: Anup Patel Reviewed-by: Christoph Hellwig Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/fixmap.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h index 57afe604b495..c207f6634b91 100644 --- a/arch/riscv/include/asm/fixmap.h +++ b/arch/riscv/include/asm/fixmap.h @@ -26,7 +26,7 @@ enum fixed_addresses { }; #define FIXADDR_SIZE (__end_of_fixed_addresses * PAGE_SIZE) -#define FIXADDR_TOP (PAGE_OFFSET) +#define FIXADDR_TOP (VMALLOC_START) #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) #define FIXMAP_PAGE_IO PAGE_KERNEL -- cgit v1.2.3 From da4ed37873918eeb4e8db7f0cf55e0a7e18788c3 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Thu, 7 Mar 2019 15:56:34 -0800 Subject: RISC-V: Use IS_ENABLED(CONFIG_CMODEL_MEDLOW) IS_ENABLED should generally use CONFIG_ prefaced symbols and it doesn't appear as if there is a CMODEL_MEDLOW define. Signed-off-by: Joe Perches Reviewed-by: Christoph Hellwig Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/module.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c index 7dd308129b40..2872edce894d 100644 --- a/arch/riscv/kernel/module.c +++ b/arch/riscv/kernel/module.c @@ -141,7 +141,7 @@ static int apply_r_riscv_hi20_rela(struct module *me, u32 *location, { s32 hi20; - if (IS_ENABLED(CMODEL_MEDLOW)) { + if (IS_ENABLED(CONFIG_CMODEL_MEDLOW)) { pr_err( "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", me->name, (long long)v, location); -- cgit v1.2.3 From f560bd19d2fe0e54851d706b72acbc6f2eed3567 Mon Sep 17 00:00:00 2001 From: Matteo Croce Date: Thu, 28 Mar 2019 12:42:33 +0100 Subject: x86/realmode: Make set_real_mode_mem() static inline Remove the unused @size argument and move it into a header file, so it can be inlined. [ bp: Massage. ] Signed-off-by: Matteo Croce Signed-off-by: Borislav Petkov Reviewed-by: Mukesh Ojha Cc: Ard Biesheuvel Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: linux-efi Cc: platform-driver-x86@vger.kernel.org Cc: Thomas Gleixner Cc: x86-ml Link: https://lkml.kernel.org/r/20190328114233.27835-1-mcroce@redhat.com --- arch/x86/include/asm/realmode.h | 6 +++++- arch/x86/platform/efi/quirks.c | 2 +- arch/x86/realmode/init.c | 9 +-------- 3 files changed, 7 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h index 63b3393bd98e..c53682303c9c 100644 --- a/arch/x86/include/asm/realmode.h +++ b/arch/x86/include/asm/realmode.h @@ -77,7 +77,11 @@ static inline size_t real_mode_size_needed(void) return ALIGN(real_mode_blob_end - real_mode_blob, PAGE_SIZE); } -void set_real_mode_mem(phys_addr_t mem, size_t size); +static inline void set_real_mode_mem(phys_addr_t mem) +{ + real_mode_header = (struct real_mode_header *) __va(mem); +} + void reserve_real_mode(void); #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c index 458a0e2bcc57..a25a9fd987a9 100644 --- a/arch/x86/platform/efi/quirks.c +++ b/arch/x86/platform/efi/quirks.c @@ -449,7 +449,7 @@ void __init efi_free_boot_services(void) */ rm_size = real_mode_size_needed(); if (rm_size && (start + rm_size) < (1<<20) && size >= rm_size) { - set_real_mode_mem(start, rm_size); + set_real_mode_mem(start); start += rm_size; size -= rm_size; } diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c index 47d097946872..7dce39c8c034 100644 --- a/arch/x86/realmode/init.c +++ b/arch/x86/realmode/init.c @@ -15,13 +15,6 @@ u32 *trampoline_cr4_features; /* Hold the pgd entry used on booting additional CPUs */ pgd_t trampoline_pgd_entry; -void __init set_real_mode_mem(phys_addr_t mem, size_t size) -{ - void *base = __va(mem); - - real_mode_header = (struct real_mode_header *) base; -} - void __init reserve_real_mode(void) { phys_addr_t mem; @@ -40,7 +33,7 @@ void __init reserve_real_mode(void) } memblock_reserve(mem, size); - set_real_mode_mem(mem, size); + set_real_mode_mem(mem); } static void __init setup_real_mode(void) -- cgit v1.2.3 From fd427103e8dfcb4b438269afd710b63e7af61463 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 25 Mar 2019 08:43:33 +0000 Subject: powerpc/32: Fix early boot failure with RTAS built-in Commit 0df977eafc79 ("powerpc/6xx: Don't use SPRN_SPRG2 for storing stack pointer while in RTAS") changes the code to use a field in thread struct to store the stack pointer while in RTAS instead of using SPRN_SPRG2. It therefore converts all places which were manipulating SPRN_SPRG2 to use that field. During early startup, the zeroing of SPRN_SPRG2 has been replaced by a zeroing of that field in thread struct. But at least in start_here, that's done wrongly because it used the physical address of the fields while MMU is on at that time. So the virtual address of the field should be used instead, but in the meantime, thread struct has already been zeroed and initialised so we can just drop this initialisation. Reported-by: Larry Finger Fixes: 0df977eafc79 ("powerpc/6xx: Don't use SPRN_SPRG2 for storing stack pointer while in RTAS") Signed-off-by: Christophe Leroy Tested-by: Larry Finger Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/head_32.S | 8 -------- 1 file changed, 8 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 48051c8977c5..e25b615e9f9e 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -851,10 +851,6 @@ __secondary_start: tophys(r4,r2) addi r4,r4,THREAD /* phys address of our thread_struct */ mtspr SPRN_SPRG_THREAD,r4 -#ifdef CONFIG_PPC_RTAS - li r3,0 - stw r3, RTAS_SP(r4) /* 0 => not in RTAS */ -#endif lis r4, (swapper_pg_dir - PAGE_OFFSET)@h ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l mtspr SPRN_SPRG_PGDIR, r4 @@ -941,10 +937,6 @@ start_here: tophys(r4,r2) addi r4,r4,THREAD /* init task's THREAD */ mtspr SPRN_SPRG_THREAD,r4 -#ifdef CONFIG_PPC_RTAS - li r3,0 - stw r3, RTAS_SP(r4) /* 0 => not in RTAS */ -#endif lis r4, (swapper_pg_dir - PAGE_OFFSET)@h ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l mtspr SPRN_SPRG_PGDIR, r4 -- cgit v1.2.3 From faa3604eda325588451c9c1eb4c8a8d04c1cd633 Mon Sep 17 00:00:00 2001 From: Xiaochen Shen Date: Sat, 30 Mar 2019 05:50:38 +0800 Subject: x86/resctrl: Fix typos in the mba_sc mount option The user can control the MBA memory bandwidth in MBps (Mega Bytes per second) units of the MBA Software Controller (mba_sc) by using the "mba_MBps" mount option. For details, see Documentation/x86/resctrl_ui.txt. However, commit 23bf1b6be9c2 ("kernfs, sysfs, cgroup, intel_rdt: Support fs_context") changed the mount option name from "mba_MBps" to "mba_mpbs" by mistake. Change it back from to "mba_MBps" because it is user-visible, and correct "Opt_mba_mpbs" spelling to "Opt_mba_mbps". [ bp: massage commit message. ] Fixes: 23bf1b6be9c2 ("kernfs, sysfs, cgroup, intel_rdt: Support fs_context") Signed-off-by: Xiaochen Shen Signed-off-by: Borislav Petkov Cc: dhowells@redhat.com Cc: Fenghua Yu Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: pei.p.jia@intel.com Cc: Reinette Chatre Cc: Thomas Gleixner Cc: Tony Luck Cc: x86-ml Link: https://lkml.kernel.org/r/1553896238-22130-1-git-send-email-xiaochen.shen@intel.com --- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 399601eda8e4..54b9eef3eea9 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -2039,14 +2039,14 @@ out: enum rdt_param { Opt_cdp, Opt_cdpl2, - Opt_mba_mpbs, + Opt_mba_mbps, nr__rdt_params }; static const struct fs_parameter_spec rdt_param_specs[] = { fsparam_flag("cdp", Opt_cdp), fsparam_flag("cdpl2", Opt_cdpl2), - fsparam_flag("mba_mpbs", Opt_mba_mpbs), + fsparam_flag("mba_MBps", Opt_mba_mbps), {} }; @@ -2072,7 +2072,7 @@ static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param) case Opt_cdpl2: ctx->enable_cdpl2 = true; return 0; - case Opt_mba_mpbs: + case Opt_mba_mbps: if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) return -EINVAL; ctx->enable_mba_mbps = true; -- cgit v1.2.3 From 583feb08e7f7ac9d533b446882eb3a54737a6dbb Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Wed, 6 Mar 2019 11:50:48 -0800 Subject: perf/x86/intel: Fix handling of wakeup_events for multi-entry PEBS When an event is programmed with attr.wakeup_events=N (N>0), it means the caller is interested in getting a user level notification after N samples have been recorded in the kernel sampling buffer. With precise events on Intel processors, the kernel uses PEBS. The kernel tries minimize sampling overhead by verifying if the event configuration is compatible with multi-entry PEBS mode. If so, the kernel is notified only when the buffer has reached its threshold. Other PEBS operates in single-entry mode, the kenrel is notified for each PEBS sample. The problem is that the current implementation look at frequency mode and event sample_type but ignores the wakeup_events field. Thus, it may not be possible to receive a notification after each precise event. This patch fixes this problem by disabling multi-entry PEBS if wakeup_events is non-zero. Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Andi Kleen Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Vince Weaver Cc: kan.liang@intel.com Link: https://lkml.kernel.org/r/20190306195048.189514-1-eranian@google.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 8baa441d8000..1539647ea39d 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3185,7 +3185,7 @@ static int intel_pmu_hw_config(struct perf_event *event) return ret; if (event->attr.precise_ip) { - if (!event->attr.freq) { + if (!(event->attr.freq || event->attr.wakeup_events)) { event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD; if (!(event->attr.sample_type & ~intel_pmu_large_pebs_flags(event))) -- cgit v1.2.3 From d7262457e35dbe239659e62654e56f8ddb814bed Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 21 Mar 2019 13:38:49 +0100 Subject: perf/x86/intel: Initialize TFA MSR Stephane reported that the TFA MSR is not initialized by the kernel, but the TFA bit could set by firmware or as a leftover from a kexec, which makes the state inconsistent. Reported-by: Stephane Eranian Tested-by: Nelson DSouza Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Vince Weaver Cc: tonyj@suse.com Link: https://lkml.kernel.org/r/20190321123849.GN6521@hirez.programming.kicks-ass.net Signed-off-by: Ingo Molnar --- arch/x86/events/intel/core.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch') diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 1539647ea39d..f61dcbef20ff 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3575,6 +3575,12 @@ static void intel_pmu_cpu_starting(int cpu) cpuc->lbr_sel = NULL; + if (x86_pmu.flags & PMU_FL_TFA) { + WARN_ON_ONCE(cpuc->tfa_shadow); + cpuc->tfa_shadow = ~0ULL; + intel_set_tfa(cpuc, false); + } + if (x86_pmu.version > 1) flip_smm_bit(&x86_pmu.attr_freeze_on_smi); -- cgit v1.2.3 From 914123fa39042e651d79eaf86bbf63a1b938dddf Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Tue, 2 Apr 2019 15:21:14 +0000 Subject: x86/perf/amd: Resolve race condition when disabling PMC On AMD processors, the detection of an overflowed counter in the NMI handler relies on the current value of the counter. So, for example, to check for overflow on a 48 bit counter, bit 47 is checked to see if it is 1 (not overflowed) or 0 (overflowed). There is currently a race condition present when disabling and then updating the PMC. Increased NMI latency in newer AMD processors makes this race condition more pronounced. If the counter value has overflowed, it is possible to update the PMC value before the NMI handler can run. The updated PMC value is not an overflowed value, so when the perf NMI handler does run, it will not find an overflowed counter. This may appear as an unknown NMI resulting in either a panic or a series of messages, depending on how the kernel is configured. To eliminate this race condition, the PMC value must be checked after disabling the counter. Add an AMD function, amd_pmu_disable_all(), that will wait for the NMI handler to reset any active and overflowed counter after calling x86_pmu_disable_all(). Signed-off-by: Tom Lendacky Signed-off-by: Peter Zijlstra (Intel) Cc: # 4.14.x- Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Arnaldo Carvalho de Melo Cc: Borislav Petkov Cc: Jiri Olsa Cc: Linus Torvalds Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Link: https://lkml.kernel.org/r/Message-ID: Signed-off-by: Ingo Molnar --- arch/x86/events/amd/core.c | 65 +++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 62 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index 7d2d7c801dba..c09ee88b0eed 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -3,6 +3,7 @@ #include #include #include +#include #include #include "../perf_event.h" @@ -429,6 +430,64 @@ static void amd_pmu_cpu_dead(int cpu) } } +/* + * When a PMC counter overflows, an NMI is used to process the event and + * reset the counter. NMI latency can result in the counter being updated + * before the NMI can run, which can result in what appear to be spurious + * NMIs. This function is intended to wait for the NMI to run and reset + * the counter to avoid possible unhandled NMI messages. + */ +#define OVERFLOW_WAIT_COUNT 50 + +static void amd_pmu_wait_on_overflow(int idx) +{ + unsigned int i; + u64 counter; + + /* + * Wait for the counter to be reset if it has overflowed. This loop + * should exit very, very quickly, but just in case, don't wait + * forever... + */ + for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) { + rdmsrl(x86_pmu_event_addr(idx), counter); + if (counter & (1ULL << (x86_pmu.cntval_bits - 1))) + break; + + /* Might be in IRQ context, so can't sleep */ + udelay(1); + } +} + +static void amd_pmu_disable_all(void) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + int idx; + + x86_pmu_disable_all(); + + /* + * This shouldn't be called from NMI context, but add a safeguard here + * to return, since if we're in NMI context we can't wait for an NMI + * to reset an overflowed counter value. + */ + if (in_nmi()) + return; + + /* + * Check each counter for overflow and wait for it to be reset by the + * NMI if it has overflowed. This relies on the fact that all active + * counters are always enabled when this function is caled and + * ARCH_PERFMON_EVENTSEL_INT is always set. + */ + for (idx = 0; idx < x86_pmu.num_counters; idx++) { + if (!test_bit(idx, cpuc->active_mask)) + continue; + + amd_pmu_wait_on_overflow(idx); + } +} + static struct event_constraint * amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx, struct perf_event *event) @@ -622,7 +681,7 @@ static ssize_t amd_event_sysfs_show(char *page, u64 config) static __initconst const struct x86_pmu amd_pmu = { .name = "AMD", .handle_irq = x86_pmu_handle_irq, - .disable_all = x86_pmu_disable_all, + .disable_all = amd_pmu_disable_all, .enable_all = x86_pmu_enable_all, .enable = x86_pmu_enable_event, .disable = x86_pmu_disable_event, @@ -732,7 +791,7 @@ void amd_pmu_enable_virt(void) cpuc->perf_ctr_virt_mask = 0; /* Reload all events */ - x86_pmu_disable_all(); + amd_pmu_disable_all(); x86_pmu_enable_all(0); } EXPORT_SYMBOL_GPL(amd_pmu_enable_virt); @@ -750,7 +809,7 @@ void amd_pmu_disable_virt(void) cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY; /* Reload all events */ - x86_pmu_disable_all(); + amd_pmu_disable_all(); x86_pmu_enable_all(0); } EXPORT_SYMBOL_GPL(amd_pmu_disable_virt); -- cgit v1.2.3 From 6d3edaae16c6c7d238360f2841212c2b26774d5e Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Tue, 2 Apr 2019 15:21:16 +0000 Subject: x86/perf/amd: Resolve NMI latency issues for active PMCs On AMD processors, the detection of an overflowed PMC counter in the NMI handler relies on the current value of the PMC. So, for example, to check for overflow on a 48-bit counter, bit 47 is checked to see if it is 1 (not overflowed) or 0 (overflowed). When the perf NMI handler executes it does not know in advance which PMC counters have overflowed. As such, the NMI handler will process all active PMC counters that have overflowed. NMI latency in newer AMD processors can result in multiple overflowed PMC counters being processed in one NMI and then a subsequent NMI, that does not appear to be a back-to-back NMI, not finding any PMC counters that have overflowed. This may appear to be an unhandled NMI resulting in either a panic or a series of messages, depending on how the kernel was configured. To mitigate this issue, add an AMD handle_irq callback function, amd_pmu_handle_irq(), that will invoke the common x86_pmu_handle_irq() function and upon return perform some additional processing that will indicate if the NMI has been handled or would have been handled had an earlier NMI not handled the overflowed PMC. Using a per-CPU variable, a minimum value of the number of active PMCs or 2 will be set whenever a PMC is active. This is used to indicate the possible number of NMIs that can still occur. The value of 2 is used for when an NMI does not arrive at the LAPIC in time to be collapsed into an already pending NMI. Each time the function is called without having handled an overflowed counter, the per-CPU value is checked. If the value is non-zero, it is decremented and the NMI indicates that it handled the NMI. If the value is zero, then the NMI indicates that it did not handle the NMI. Signed-off-by: Tom Lendacky Signed-off-by: Peter Zijlstra (Intel) Cc: # 4.14.x- Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Arnaldo Carvalho de Melo Cc: Borislav Petkov Cc: Jiri Olsa Cc: Linus Torvalds Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Link: https://lkml.kernel.org/r/Message-ID: Signed-off-by: Ingo Molnar --- arch/x86/events/amd/core.c | 56 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 55 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index c09ee88b0eed..34c191453ce3 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -4,10 +4,13 @@ #include #include #include +#include #include #include "../perf_event.h" +static DEFINE_PER_CPU(unsigned int, perf_nmi_counter); + static __initconst const u64 amd_hw_cache_event_ids [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] @@ -488,6 +491,57 @@ static void amd_pmu_disable_all(void) } } +/* + * Because of NMI latency, if multiple PMC counters are active or other sources + * of NMIs are received, the perf NMI handler can handle one or more overflowed + * PMC counters outside of the NMI associated with the PMC overflow. If the NMI + * doesn't arrive at the LAPIC in time to become a pending NMI, then the kernel + * back-to-back NMI support won't be active. This PMC handler needs to take into + * account that this can occur, otherwise this could result in unknown NMI + * messages being issued. Examples of this is PMC overflow while in the NMI + * handler when multiple PMCs are active or PMC overflow while handling some + * other source of an NMI. + * + * Attempt to mitigate this by using the number of active PMCs to determine + * whether to return NMI_HANDLED if the perf NMI handler did not handle/reset + * any PMCs. The per-CPU perf_nmi_counter variable is set to a minimum of the + * number of active PMCs or 2. The value of 2 is used in case an NMI does not + * arrive at the LAPIC in time to be collapsed into an already pending NMI. + */ +static int amd_pmu_handle_irq(struct pt_regs *regs) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + int active, handled; + + /* + * Obtain the active count before calling x86_pmu_handle_irq() since + * it is possible that x86_pmu_handle_irq() may make a counter + * inactive (through x86_pmu_stop). + */ + active = __bitmap_weight(cpuc->active_mask, X86_PMC_IDX_MAX); + + /* Process any counter overflows */ + handled = x86_pmu_handle_irq(regs); + + /* + * If a counter was handled, record the number of possible remaining + * NMIs that can occur. + */ + if (handled) { + this_cpu_write(perf_nmi_counter, + min_t(unsigned int, 2, active)); + + return handled; + } + + if (!this_cpu_read(perf_nmi_counter)) + return NMI_DONE; + + this_cpu_dec(perf_nmi_counter); + + return NMI_HANDLED; +} + static struct event_constraint * amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx, struct perf_event *event) @@ -680,7 +734,7 @@ static ssize_t amd_event_sysfs_show(char *page, u64 config) static __initconst const struct x86_pmu amd_pmu = { .name = "AMD", - .handle_irq = x86_pmu_handle_irq, + .handle_irq = amd_pmu_handle_irq, .disable_all = amd_pmu_disable_all, .enable_all = x86_pmu_enable_all, .enable = x86_pmu_enable_event, -- cgit v1.2.3 From 2201f31f2c6d6030cbd2f7085455e2172725b1c5 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Wed, 3 Apr 2019 20:19:25 -0700 Subject: xtensa: use actual syscall number in do_syscall_trace_leave Syscall may alter pt_regs structure passed to it, resulting in a mismatch between syscall entry end syscall exit entries in the ftrace. Temporary restore syscall field of the pt_regs for the duration of do_syscall_trace_leave. Signed-off-by: Max Filippov --- arch/xtensa/kernel/entry.S | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch') diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index e50f5124dc6f..e54af8b7e0f8 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -1860,6 +1860,8 @@ ENTRY(system_call) l32i a7, a2, PT_SYSCALL 1: + s32i a7, a1, 4 + /* syscall = sys_call_table[syscall_nr] */ movi a4, sys_call_table @@ -1893,8 +1895,12 @@ ENTRY(system_call) retw 1: + l32i a4, a1, 4 + l32i a3, a2, PT_SYSCALL + s32i a4, a2, PT_SYSCALL mov a6, a2 call4 do_syscall_trace_leave + s32i a3, a2, PT_SYSCALL retw ENDPROC(system_call) -- cgit v1.2.3 From 2663147dc7465cb29040a05cc4286fdd839978b5 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Wed, 3 Apr 2019 20:22:42 -0700 Subject: xtensa: fix initialization of pt_regs::syscall in start_thread New pt_regs should indicate that there's no syscall, not that there's syscall #0. While at it wrap macro body in do/while and parenthesize macro arguments. Signed-off-by: Max Filippov --- arch/xtensa/include/asm/processor.h | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h index f7dd895b2353..0c14018d1c26 100644 --- a/arch/xtensa/include/asm/processor.h +++ b/arch/xtensa/include/asm/processor.h @@ -187,15 +187,18 @@ struct thread_struct { /* Clearing a0 terminates the backtrace. */ #define start_thread(regs, new_pc, new_sp) \ - memset(regs, 0, sizeof(*regs)); \ - regs->pc = new_pc; \ - regs->ps = USER_PS_VALUE; \ - regs->areg[1] = new_sp; \ - regs->areg[0] = 0; \ - regs->wmask = 1; \ - regs->depc = 0; \ - regs->windowbase = 0; \ - regs->windowstart = 1; + do { \ + memset((regs), 0, sizeof(*(regs))); \ + (regs)->pc = (new_pc); \ + (regs)->ps = USER_PS_VALUE; \ + (regs)->areg[1] = (new_sp); \ + (regs)->areg[0] = 0; \ + (regs)->wmask = 1; \ + (regs)->depc = 0; \ + (regs)->windowbase = 0; \ + (regs)->windowstart = 1; \ + (regs)->syscall = NO_SYSCALL; \ + } while (0) /* Forward declaration */ struct task_struct; -- cgit v1.2.3 From ba5e60c9b75dec92d4c695b928f69300b17d7686 Mon Sep 17 00:00:00 2001 From: Peng Hao Date: Tue, 2 Apr 2019 22:12:38 +0800 Subject: arm/mach-at91/pm : fix possible object reference leak of_find_device_by_node() takes a reference to the struct device when it finds a match via get_device. When returning error we should call put_device. Reviewed-by: Mukesh Ojha Signed-off-by: Peng Hao Signed-off-by: Ludovic Desroches --- arch/arm/mach-at91/pm.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index 51e808adb00c..2a757dcaa1a5 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c @@ -591,13 +591,13 @@ static int __init at91_pm_backup_init(void) np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-securam"); if (!np) - goto securam_fail; + goto securam_fail_no_ref_dev; pdev = of_find_device_by_node(np); of_node_put(np); if (!pdev) { pr_warn("%s: failed to find securam device!\n", __func__); - goto securam_fail; + goto securam_fail_no_ref_dev; } sram_pool = gen_pool_get(&pdev->dev, NULL); @@ -620,6 +620,8 @@ static int __init at91_pm_backup_init(void) return 0; securam_fail: + put_device(&pdev->dev); +securam_fail_no_ref_dev: iounmap(pm_data.sfrbu); pm_data.sfrbu = NULL; return ret; -- cgit v1.2.3 From 10a16997db3d99fc02c026cf2c6e6c670acafab0 Mon Sep 17 00:00:00 2001 From: "Dmitry V. Levin" Date: Fri, 29 Mar 2019 20:12:21 +0300 Subject: riscv: Fix syscall_get_arguments() and syscall_set_arguments() RISC-V syscall arguments are located in orig_a0,a1..a5 fields of struct pt_regs. Due to an off-by-one bug and a bug in pointer arithmetic syscall_get_arguments() was reading s3..s7 fields instead of a1..a5. Likewise, syscall_set_arguments() was writing s3..s7 fields instead of a1..a5. Link: http://lkml.kernel.org/r/20190329171221.GA32456@altlinux.org Fixes: e2c0cdfba7f69 ("RISC-V: User-facing API") Cc: Ingo Molnar Cc: Kees Cook Cc: Andy Lutomirski Cc: Will Drewry Cc: Albert Ou Cc: linux-riscv@lists.infradead.org Cc: stable@vger.kernel.org # v4.15+ Acked-by: Palmer Dabbelt Signed-off-by: Dmitry V. Levin Signed-off-by: Steven Rostedt (VMware) --- arch/riscv/include/asm/syscall.h | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h index bba3da6ef157..6ea9e1804233 100644 --- a/arch/riscv/include/asm/syscall.h +++ b/arch/riscv/include/asm/syscall.h @@ -79,10 +79,11 @@ static inline void syscall_get_arguments(struct task_struct *task, if (i == 0) { args[0] = regs->orig_a0; args++; - i++; n--; + } else { + i--; } - memcpy(args, ®s->a1 + i * sizeof(regs->a1), n * sizeof(args[0])); + memcpy(args, ®s->a1 + i, n * sizeof(args[0])); } static inline void syscall_set_arguments(struct task_struct *task, @@ -94,10 +95,11 @@ static inline void syscall_set_arguments(struct task_struct *task, if (i == 0) { regs->orig_a0 = args[0]; args++; - i++; n--; - } - memcpy(®s->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0)); + } else { + i--; + } + memcpy(®s->a1 + i, args, n * sizeof(regs->a1)); } static inline int syscall_get_arch(void) -- cgit v1.2.3 From ed3bb007021b9bddb90afae28a19f08ed8890add Mon Sep 17 00:00:00 2001 From: "Dmitry V. Levin" Date: Fri, 29 Mar 2019 20:12:30 +0300 Subject: csky: Fix syscall_get_arguments() and syscall_set_arguments() C-SKY syscall arguments are located in orig_a0,a1,a2,a3,regs[0],regs[1] fields of struct pt_regs. Due to an off-by-one bug and a bug in pointer arithmetic syscall_get_arguments() was reading orig_a0,regs[1..5] fields instead. Likewise, syscall_set_arguments() was writing orig_a0,regs[1..5] fields instead. Link: http://lkml.kernel.org/r/20190329171230.GB32456@altlinux.org Fixes: 4859bfca11c7d ("csky: System Call") Cc: Ingo Molnar Cc: Kees Cook Cc: Andy Lutomirski Cc: Will Drewry Cc: stable@vger.kernel.org # v4.20+ Tested-by: Guo Ren Acked-by: Guo Ren Signed-off-by: Dmitry V. Levin Signed-off-by: Steven Rostedt (VMware) --- arch/csky/include/asm/syscall.h | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/csky/include/asm/syscall.h b/arch/csky/include/asm/syscall.h index d637445737b7..9a9cd81e66c1 100644 --- a/arch/csky/include/asm/syscall.h +++ b/arch/csky/include/asm/syscall.h @@ -49,10 +49,11 @@ syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, if (i == 0) { args[0] = regs->orig_a0; args++; - i++; n--; + } else { + i--; } - memcpy(args, ®s->a1 + i * sizeof(regs->a1), n * sizeof(args[0])); + memcpy(args, ®s->a1 + i, n * sizeof(args[0])); } static inline void @@ -63,10 +64,11 @@ syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, if (i == 0) { regs->orig_a0 = args[0]; args++; - i++; n--; + } else { + i--; } - memcpy(®s->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0)); + memcpy(®s->a1 + i, args, n * sizeof(regs->a1)); } static inline int -- cgit v1.2.3 From 1c41860864c8ae0387ef7d44f0000e99cbb2e06d Mon Sep 17 00:00:00 2001 From: Wei Li Date: Mon, 1 Apr 2019 11:55:57 +0800 Subject: arm64: fix wrong check of on_sdei_stack in nmi context When doing unwind_frame() in the context of pseudo nmi (need enable CONFIG_ARM64_PSEUDO_NMI), reaching the bottom of the stack (fp == 0, pc != 0), function on_sdei_stack() will return true while the sdei acpi table is not inited in fact. This will cause a "NULL pointer dereference" oops when going on. Reviewed-by: Julien Thierry Signed-off-by: Wei Li Signed-off-by: Catalin Marinas --- arch/arm64/kernel/sdei.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch') diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c index 5ba4465e44f0..ea94cf8f9dc6 100644 --- a/arch/arm64/kernel/sdei.c +++ b/arch/arm64/kernel/sdei.c @@ -94,6 +94,9 @@ static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info) unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr); unsigned long high = low + SDEI_STACK_SIZE; + if (!low) + return false; + if (sp < low || sp >= high) return false; @@ -111,6 +114,9 @@ static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info) unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr); unsigned long high = low + SDEI_STACK_SIZE; + if (!low) + return false; + if (sp < low || sp >= high) return false; -- cgit v1.2.3 From 6e3572e83dc3563e3b7e742bcb225b42a60cdaeb Mon Sep 17 00:00:00 2001 From: Horatiu Vultur Date: Thu, 4 Apr 2019 10:25:28 +0200 Subject: MIPS: generic: Add switchdev, pinctrl and fit to ocelot_defconfig Some of the configuration were not selected by default anymore, therefore enable them again. Also remove some configs which are used for MSCC Ocelot. Signed-off-by: Horatiu Vultur Signed-off-by: Paul Burton Cc: Cc: Cc: Cc: Cc: Cc: --- arch/mips/configs/generic/board-ocelot.config | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch') diff --git a/arch/mips/configs/generic/board-ocelot.config b/arch/mips/configs/generic/board-ocelot.config index f607888d2483..184eb65a6ba7 100644 --- a/arch/mips/configs/generic/board-ocelot.config +++ b/arch/mips/configs/generic/board-ocelot.config @@ -1,6 +1,10 @@ # require CONFIG_CPU_MIPS32_R2=y CONFIG_LEGACY_BOARD_OCELOT=y +CONFIG_FIT_IMAGE_FDT_OCELOT=y + +CONFIG_BRIDGE=y +CONFIG_GENERIC_PHY=y CONFIG_MTD=y CONFIG_MTD_CMDLINE_PARTS=y @@ -19,6 +23,8 @@ CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_OF_PLATFORM=y CONFIG_NETDEVICES=y +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_DSA=y CONFIG_MSCC_OCELOT_SWITCH=y CONFIG_MSCC_OCELOT_SWITCH_OCELOT=y CONFIG_MDIO_MSCC_MIIM=y @@ -35,6 +41,8 @@ CONFIG_SPI_DESIGNWARE=y CONFIG_SPI_DW_MMIO=y CONFIG_SPI_SPIDEV=y +CONFIG_PINCTRL_OCELOT=y + CONFIG_GPIO_SYSFS=y CONFIG_POWER_RESET=y -- cgit v1.2.3 From ada770b1e74a77fff2d5f539bf6c42c25f4784db Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Thu, 4 Apr 2019 11:08:40 -0700 Subject: xtensa: fix return_address return_address returns the address that is one level higher in the call stack than requested in its argument, because level 0 corresponds to its caller's return address. Use requested level as the number of stack frames to skip. This fixes the address reported by might_sleep and friends. Cc: stable@vger.kernel.org Signed-off-by: Max Filippov --- arch/xtensa/kernel/stacktrace.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c index 174c11f13bba..b9f82510c650 100644 --- a/arch/xtensa/kernel/stacktrace.c +++ b/arch/xtensa/kernel/stacktrace.c @@ -253,10 +253,14 @@ static int return_address_cb(struct stackframe *frame, void *data) return 1; } +/* + * level == 0 is for the return address from the caller of this function, + * not from this function itself. + */ unsigned long return_address(unsigned level) { struct return_addr_data r = { - .skip = level + 1, + .skip = level, }; walk_stackframe(stack_pointer(NULL), return_address_cb, &r); return r.addr; -- cgit v1.2.3 From ecae26fae15abb7d433557afbd15467ce1c444f5 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Thu, 4 Apr 2019 18:42:05 -0700 Subject: xtensa: fix format string warning in init_pmd Use %lu instead of %zu to fix the following warning introduced with recent memblock refactoring: xtensa/mm/mmu.c:36:9: warning: format '%zu' expects argument of type 'size_t', but argument 3 has type 'long unsigned int Signed-off-by: Max Filippov --- arch/xtensa/mm/mmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c index 2fb7d1172228..03678c4afc39 100644 --- a/arch/xtensa/mm/mmu.c +++ b/arch/xtensa/mm/mmu.c @@ -33,7 +33,7 @@ static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages) pte = memblock_alloc_low(n_pages * sizeof(pte_t), PAGE_SIZE); if (!pte) - panic("%s: Failed to allocate %zu bytes align=%lx\n", + panic("%s: Failed to allocate %lu bytes align=%lx\n", __func__, n_pages * sizeof(pte_t), PAGE_SIZE); for (i = 0; i < n_pages; ++i) -- cgit v1.2.3 From 42d8644bd77dd2d747e004e367cb0c895a606f39 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 4 Apr 2019 18:12:17 +0300 Subject: xen: Prevent buffer overflow in privcmd ioctl The "call" variable comes from the user in privcmd_ioctl_hypercall(). It's an offset into the hypercall_page[] which has (PAGE_SIZE / 32) elements. We need to put an upper bound on it to prevent an out of bounds access. Cc: stable@vger.kernel.org Fixes: 1246ae0bb992 ("xen: add variable hypercall caller") Signed-off-by: Dan Carpenter Reviewed-by: Boris Ostrovsky Signed-off-by: Juergen Gross --- arch/x86/include/asm/xen/hypercall.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index de6f0d59a24f..2863c2026655 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h @@ -206,6 +206,9 @@ xen_single_call(unsigned int call, __HYPERCALL_DECLS; __HYPERCALL_5ARG(a1, a2, a3, a4, a5); + if (call >= PAGE_SIZE / sizeof(hypercall_page[0])) + return -EINVAL; + asm volatile(CALL_NOSPEC : __HYPERCALL_5PARAM : [thunk_target] "a" (&hypercall_page[call]) -- cgit v1.2.3 From b35f549df1d7520d37ba1e6d4a8d4df6bd52d136 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Mon, 7 Nov 2016 16:26:37 -0500 Subject: syscalls: Remove start and number from syscall_get_arguments() args At Linux Plumbers, Andy Lutomirski approached me and pointed out that the function call syscall_get_arguments() implemented in x86 was horribly written and not optimized for the standard case of passing in 0 and 6 for the starting index and the number of system calls to get. When looking at all the users of this function, I discovered that all instances pass in only 0 and 6 for these arguments. Instead of having this function handle different cases that are never used, simply rewrite it to return the first 6 arguments of a system call. This should help out the performance of tracing system calls by ptrace, ftrace and perf. Link: http://lkml.kernel.org/r/20161107213233.754809394@goodmis.org Cc: Oleg Nesterov Cc: Kees Cook Cc: Andy Lutomirski Cc: Dominik Brodowski Cc: Dave Martin Cc: "Dmitry V. Levin" Cc: x86@kernel.org Cc: linux-snps-arc@lists.infradead.org Cc: linux-kernel@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org Cc: linux-c6x-dev@linux-c6x.org Cc: uclinux-h8-devel@lists.sourceforge.jp Cc: linux-hexagon@vger.kernel.org Cc: linux-ia64@vger.kernel.org Cc: linux-mips@vger.kernel.org Cc: nios2-dev@lists.rocketboards.org Cc: openrisc@lists.librecores.org Cc: linux-parisc@vger.kernel.org Cc: linuxppc-dev@lists.ozlabs.org Cc: linux-riscv@lists.infradead.org Cc: linux-s390@vger.kernel.org Cc: linux-sh@vger.kernel.org Cc: sparclinux@vger.kernel.org Cc: linux-um@lists.infradead.org Cc: linux-xtensa@linux-xtensa.org Cc: linux-arch@vger.kernel.org Acked-by: Paul Burton # MIPS parts Acked-by: Max Filippov # For xtensa changes Acked-by: Will Deacon # For the arm64 bits Reviewed-by: Thomas Gleixner # for x86 Reviewed-by: Dmitry V. Levin Reported-by: Andy Lutomirski Signed-off-by: Steven Rostedt (VMware) --- arch/arc/include/asm/syscall.h | 7 ++-- arch/arm/include/asm/syscall.h | 23 ++--------- arch/arm64/include/asm/syscall.h | 22 ++--------- arch/c6x/include/asm/syscall.h | 41 ++++---------------- arch/csky/include/asm/syscall.h | 14 ++----- arch/h8300/include/asm/syscall.h | 34 ++++------------ arch/hexagon/include/asm/syscall.h | 4 +- arch/ia64/include/asm/syscall.h | 5 +-- arch/microblaze/include/asm/syscall.h | 4 +- arch/mips/include/asm/syscall.h | 3 +- arch/mips/kernel/ptrace.c | 2 +- arch/nds32/include/asm/syscall.h | 33 +++------------- arch/nios2/include/asm/syscall.h | 42 ++++---------------- arch/openrisc/include/asm/syscall.h | 6 +-- arch/parisc/include/asm/syscall.h | 30 ++++---------- arch/powerpc/include/asm/syscall.h | 8 ++-- arch/riscv/include/asm/syscall.h | 13 ++----- arch/s390/include/asm/syscall.h | 17 +++----- arch/sh/include/asm/syscall_32.h | 26 +++---------- arch/sh/include/asm/syscall_64.h | 4 +- arch/sparc/include/asm/syscall.h | 4 +- arch/um/include/asm/syscall-generic.h | 39 +++---------------- arch/x86/include/asm/syscall.h | 73 ++++++++--------------------------- arch/xtensa/include/asm/syscall.h | 16 ++------ include/asm-generic/syscall.h | 11 ++---- include/trace/events/syscalls.h | 2 +- kernel/seccomp.c | 2 +- kernel/trace/trace_syscalls.c | 4 +- lib/syscall.c | 2 +- 29 files changed, 113 insertions(+), 378 deletions(-) (limited to 'arch') diff --git a/arch/arc/include/asm/syscall.h b/arch/arc/include/asm/syscall.h index 29de09804306..c7a4201ed62b 100644 --- a/arch/arc/include/asm/syscall.h +++ b/arch/arc/include/asm/syscall.h @@ -55,12 +55,11 @@ syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, */ static inline void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, unsigned long *args) + unsigned long *args) { unsigned long *inside_ptregs = &(regs->r0); - inside_ptregs -= i; - - BUG_ON((i + n) > 6); + unsigned int n = 6; + unsigned int i = 0; while (n--) { args[i++] = (*inside_ptregs); diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h index 06dea6bce293..db969a2972ae 100644 --- a/arch/arm/include/asm/syscall.h +++ b/arch/arm/include/asm/syscall.h @@ -55,29 +55,12 @@ static inline void syscall_set_return_value(struct task_struct *task, static inline void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, unsigned long *args) { - if (n == 0) - return; - - if (i + n > SYSCALL_MAX_ARGS) { - unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i; - unsigned int n_bad = n + i - SYSCALL_MAX_ARGS; - pr_warn("%s called with max args %d, handling only %d\n", - __func__, i + n, SYSCALL_MAX_ARGS); - memset(args_bad, 0, n_bad * sizeof(args[0])); - n = SYSCALL_MAX_ARGS - i; - } - - if (i == 0) { - args[0] = regs->ARM_ORIG_r0; - args++; - i++; - n--; - } + args[0] = regs->ARM_ORIG_r0; + args++; - memcpy(args, ®s->ARM_r0 + i, n * sizeof(args[0])); + memcpy(args, ®s->ARM_r0 + 1, 5 * sizeof(args[0])); } static inline void syscall_set_arguments(struct task_struct *task, diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h index ad8be16a39c9..55b2dab21023 100644 --- a/arch/arm64/include/asm/syscall.h +++ b/arch/arm64/include/asm/syscall.h @@ -65,28 +65,12 @@ static inline void syscall_set_return_value(struct task_struct *task, static inline void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, unsigned long *args) { - if (n == 0) - return; - - if (i + n > SYSCALL_MAX_ARGS) { - unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i; - unsigned int n_bad = n + i - SYSCALL_MAX_ARGS; - pr_warning("%s called with max args %d, handling only %d\n", - __func__, i + n, SYSCALL_MAX_ARGS); - memset(args_bad, 0, n_bad * sizeof(args[0])); - } - - if (i == 0) { - args[0] = regs->orig_x0; - args++; - i++; - n--; - } + args[0] = regs->orig_x0; + args++; - memcpy(args, ®s->regs[i], n * sizeof(args[0])); + memcpy(args, ®s->regs[1], 5 * sizeof(args[0])); } static inline void syscall_set_arguments(struct task_struct *task, diff --git a/arch/c6x/include/asm/syscall.h b/arch/c6x/include/asm/syscall.h index ae2be315ee9c..06db3251926b 100644 --- a/arch/c6x/include/asm/syscall.h +++ b/arch/c6x/include/asm/syscall.h @@ -46,40 +46,15 @@ static inline void syscall_set_return_value(struct task_struct *task, } static inline void syscall_get_arguments(struct task_struct *task, - struct pt_regs *regs, unsigned int i, - unsigned int n, unsigned long *args) + struct pt_regs *regs, + unsigned long *args) { - switch (i) { - case 0: - if (!n--) - break; - *args++ = regs->a4; - case 1: - if (!n--) - break; - *args++ = regs->b4; - case 2: - if (!n--) - break; - *args++ = regs->a6; - case 3: - if (!n--) - break; - *args++ = regs->b6; - case 4: - if (!n--) - break; - *args++ = regs->a8; - case 5: - if (!n--) - break; - *args++ = regs->b8; - case 6: - if (!n--) - break; - default: - BUG(); - } + *args++ = regs->a4; + *args++ = regs->b4; + *args++ = regs->a6; + *args++ = regs->b6; + *args++ = regs->a8; + *args = regs->b8; } static inline void syscall_set_arguments(struct task_struct *task, diff --git a/arch/csky/include/asm/syscall.h b/arch/csky/include/asm/syscall.h index 9a9cd81e66c1..c691fe92edc6 100644 --- a/arch/csky/include/asm/syscall.h +++ b/arch/csky/include/asm/syscall.h @@ -43,17 +43,11 @@ syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, static inline void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, unsigned long *args) + unsigned long *args) { - BUG_ON(i + n > 6); - if (i == 0) { - args[0] = regs->orig_a0; - args++; - n--; - } else { - i--; - } - memcpy(args, ®s->a1 + i, n * sizeof(args[0])); + args[0] = regs->orig_a0; + args++; + memcpy(args, ®s->a1, 5 * sizeof(args[0])); } static inline void diff --git a/arch/h8300/include/asm/syscall.h b/arch/h8300/include/asm/syscall.h index 924990401237..ddd483c6ca95 100644 --- a/arch/h8300/include/asm/syscall.h +++ b/arch/h8300/include/asm/syscall.h @@ -17,34 +17,14 @@ syscall_get_nr(struct task_struct *task, struct pt_regs *regs) static inline void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, unsigned long *args) + unsigned long *args) { - BUG_ON(i + n > 6); - - while (n > 0) { - switch (i) { - case 0: - *args++ = regs->er1; - break; - case 1: - *args++ = regs->er2; - break; - case 2: - *args++ = regs->er3; - break; - case 3: - *args++ = regs->er4; - break; - case 4: - *args++ = regs->er5; - break; - case 5: - *args++ = regs->er6; - break; - } - i++; - n--; - } + *args++ = regs->er1; + *args++ = regs->er2; + *args++ = regs->er3; + *args++ = regs->er4; + *args++ = regs->er5; + *args = regs->er6; } diff --git a/arch/hexagon/include/asm/syscall.h b/arch/hexagon/include/asm/syscall.h index 4af9c7b6f13a..ae3a1e24fabd 100644 --- a/arch/hexagon/include/asm/syscall.h +++ b/arch/hexagon/include/asm/syscall.h @@ -37,10 +37,8 @@ static inline long syscall_get_nr(struct task_struct *task, static inline void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, unsigned long *args) { - BUG_ON(i + n > 6); - memcpy(args, &(®s->r00)[i], n * sizeof(args[0])); + memcpy(args, &(®s->r00)[0], 6 * sizeof(args[0])); } #endif diff --git a/arch/ia64/include/asm/syscall.h b/arch/ia64/include/asm/syscall.h index 1d0b875fec44..8204c1ff70ce 100644 --- a/arch/ia64/include/asm/syscall.h +++ b/arch/ia64/include/asm/syscall.h @@ -63,12 +63,9 @@ extern void ia64_syscall_get_set_arguments(struct task_struct *task, unsigned long *args, int rw); static inline void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, unsigned long *args) { - BUG_ON(i + n > 6); - - ia64_syscall_get_set_arguments(task, regs, i, n, args, 0); + ia64_syscall_get_set_arguments(task, regs, 0, 6, args, 0); } static inline void syscall_set_arguments(struct task_struct *task, diff --git a/arch/microblaze/include/asm/syscall.h b/arch/microblaze/include/asm/syscall.h index 220decd605a4..4b23e44e5c3c 100644 --- a/arch/microblaze/include/asm/syscall.h +++ b/arch/microblaze/include/asm/syscall.h @@ -82,9 +82,11 @@ static inline void microblaze_set_syscall_arg(struct pt_regs *regs, static inline void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, unsigned long *args) { + unsigned int i = 0; + unsigned int n = 6; + while (n--) *args++ = microblaze_get_syscall_arg(regs, i++); } diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h index 6cf8ffb5367e..a2b4748655df 100644 --- a/arch/mips/include/asm/syscall.h +++ b/arch/mips/include/asm/syscall.h @@ -116,9 +116,10 @@ static inline void syscall_set_return_value(struct task_struct *task, static inline void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, unsigned long *args) { + unsigned int i = 0; + unsigned int n = 6; int ret; /* O32 ABI syscall() */ diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 0057c910bc2f..3a62f80958e1 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -1419,7 +1419,7 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall) sd.nr = syscall; sd.arch = syscall_get_arch(); - syscall_get_arguments(current, regs, 0, 6, args); + syscall_get_arguments(current, regs, args); for (i = 0; i < 6; i++) sd.args[i] = args[i]; sd.instruction_pointer = KSTK_EIP(current); diff --git a/arch/nds32/include/asm/syscall.h b/arch/nds32/include/asm/syscall.h index f7e5e86765fe..89a6ec8731d8 100644 --- a/arch/nds32/include/asm/syscall.h +++ b/arch/nds32/include/asm/syscall.h @@ -108,42 +108,21 @@ void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, * syscall_get_arguments - extract system call parameter values * @task: task of interest, must be blocked * @regs: task_pt_regs() of @task - * @i: argument index [0,5] - * @n: number of arguments; n+i must be [1,6]. * @args: array filled with argument values * - * Fetches @n arguments to the system call starting with the @i'th argument - * (from 0 through 5). Argument @i is stored in @args[0], and so on. - * An arch inline version is probably optimal when @i and @n are constants. + * Fetches 6 arguments to the system call (from 0 through 5). The first + * argument is stored in @args[0], and so on. * * It's only valid to call this when @task is stopped for tracing on * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. - * It's invalid to call this with @i + @n > 6; we only support system calls - * taking up to 6 arguments. */ #define SYSCALL_MAX_ARGS 6 void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, unsigned long *args) + unsigned long *args) { - if (n == 0) - return; - if (i + n > SYSCALL_MAX_ARGS) { - unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i; - unsigned int n_bad = n + i - SYSCALL_MAX_ARGS; - pr_warning("%s called with max args %d, handling only %d\n", - __func__, i + n, SYSCALL_MAX_ARGS); - memset(args_bad, 0, n_bad * sizeof(args[0])); - memset(args_bad, 0, n_bad * sizeof(args[0])); - } - - if (i == 0) { - args[0] = regs->orig_r0; - args++; - i++; - n--; - } - - memcpy(args, ®s->uregs[0] + i, n * sizeof(args[0])); + args[0] = regs->orig_r0; + args++; + memcpy(args, ®s->uregs[0] + 1, 5 * sizeof(args[0])); } /** diff --git a/arch/nios2/include/asm/syscall.h b/arch/nios2/include/asm/syscall.h index 9de220854c4a..792bd449d839 100644 --- a/arch/nios2/include/asm/syscall.h +++ b/arch/nios2/include/asm/syscall.h @@ -58,42 +58,14 @@ static inline void syscall_set_return_value(struct task_struct *task, } static inline void syscall_get_arguments(struct task_struct *task, - struct pt_regs *regs, unsigned int i, unsigned int n, - unsigned long *args) + struct pt_regs *regs, unsigned long *args) { - BUG_ON(i + n > 6); - - switch (i) { - case 0: - if (!n--) - break; - *args++ = regs->r4; - case 1: - if (!n--) - break; - *args++ = regs->r5; - case 2: - if (!n--) - break; - *args++ = regs->r6; - case 3: - if (!n--) - break; - *args++ = regs->r7; - case 4: - if (!n--) - break; - *args++ = regs->r8; - case 5: - if (!n--) - break; - *args++ = regs->r9; - case 6: - if (!n--) - break; - default: - BUG(); - } + *args++ = regs->r4; + *args++ = regs->r5; + *args++ = regs->r6; + *args++ = regs->r7; + *args++ = regs->r8; + *args = regs->r9; } static inline void syscall_set_arguments(struct task_struct *task, diff --git a/arch/openrisc/include/asm/syscall.h b/arch/openrisc/include/asm/syscall.h index 2db9f1cf0694..72607860cd55 100644 --- a/arch/openrisc/include/asm/syscall.h +++ b/arch/openrisc/include/asm/syscall.h @@ -56,11 +56,9 @@ syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, static inline void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, unsigned long *args) + unsigned long *args) { - BUG_ON(i + n > 6); - - memcpy(args, ®s->gpr[3 + i], n * sizeof(args[0])); + memcpy(args, ®s->gpr[3], 6 * sizeof(args[0])); } static inline void diff --git a/arch/parisc/include/asm/syscall.h b/arch/parisc/include/asm/syscall.h index 8bff1a58c97f..62a6d477fae0 100644 --- a/arch/parisc/include/asm/syscall.h +++ b/arch/parisc/include/asm/syscall.h @@ -18,29 +18,15 @@ static inline long syscall_get_nr(struct task_struct *tsk, } static inline void syscall_get_arguments(struct task_struct *tsk, - struct pt_regs *regs, unsigned int i, - unsigned int n, unsigned long *args) + struct pt_regs *regs, + unsigned long *args) { - BUG_ON(i); - - switch (n) { - case 6: - args[5] = regs->gr[21]; - case 5: - args[4] = regs->gr[22]; - case 4: - args[3] = regs->gr[23]; - case 3: - args[2] = regs->gr[24]; - case 2: - args[1] = regs->gr[25]; - case 1: - args[0] = regs->gr[26]; - case 0: - break; - default: - BUG(); - } + args[5] = regs->gr[21]; + args[4] = regs->gr[22]; + args[3] = regs->gr[23]; + args[2] = regs->gr[24]; + args[1] = regs->gr[25]; + args[0] = regs->gr[26]; } static inline long syscall_get_return_value(struct task_struct *task, diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h index 1a0e7a8b1c81..5c9b9dc82b7e 100644 --- a/arch/powerpc/include/asm/syscall.h +++ b/arch/powerpc/include/asm/syscall.h @@ -65,22 +65,20 @@ static inline void syscall_set_return_value(struct task_struct *task, static inline void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, unsigned long *args) { unsigned long val, mask = -1UL; - - BUG_ON(i + n > 6); + unsigned int n = 6; #ifdef CONFIG_COMPAT if (test_tsk_thread_flag(task, TIF_32BIT)) mask = 0xffffffff; #endif while (n--) { - if (n == 0 && i == 0) + if (n == 0) val = regs->orig_gpr3; else - val = regs->gpr[3 + i + n]; + val = regs->gpr[3 + n]; args[n] = val & mask; } diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h index 6ea9e1804233..6adca1804be1 100644 --- a/arch/riscv/include/asm/syscall.h +++ b/arch/riscv/include/asm/syscall.h @@ -72,18 +72,11 @@ static inline void syscall_set_return_value(struct task_struct *task, static inline void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, unsigned long *args) { - BUG_ON(i + n > 6); - if (i == 0) { - args[0] = regs->orig_a0; - args++; - n--; - } else { - i--; - } - memcpy(args, ®s->a1 + i, n * sizeof(args[0])); + args[0] = regs->orig_a0; + args++; + memcpy(args, ®s->a1, 5 * sizeof(args[0])); } static inline void syscall_set_arguments(struct task_struct *task, diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h index 96f9a9151fde..ee0b1f6aa36d 100644 --- a/arch/s390/include/asm/syscall.h +++ b/arch/s390/include/asm/syscall.h @@ -56,27 +56,20 @@ static inline void syscall_set_return_value(struct task_struct *task, static inline void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, unsigned long *args) { unsigned long mask = -1UL; + unsigned int n = 6; - /* - * No arguments for this syscall, there's nothing to do. - */ - if (!n) - return; - - BUG_ON(i + n > 6); #ifdef CONFIG_COMPAT if (test_tsk_thread_flag(task, TIF_31BIT)) mask = 0xffffffff; #endif while (n-- > 0) - if (i + n > 0) - args[n] = regs->gprs[2 + i + n] & mask; - if (i == 0) - args[0] = regs->orig_gpr2 & mask; + if (n > 0) + args[n] = regs->gprs[2 + n] & mask; + + args[0] = regs->orig_gpr2 & mask; } static inline void syscall_set_arguments(struct task_struct *task, diff --git a/arch/sh/include/asm/syscall_32.h b/arch/sh/include/asm/syscall_32.h index 6e118799831c..2bf1199a0595 100644 --- a/arch/sh/include/asm/syscall_32.h +++ b/arch/sh/include/asm/syscall_32.h @@ -48,30 +48,16 @@ static inline void syscall_set_return_value(struct task_struct *task, static inline void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, unsigned long *args) { - /* - * Do this simply for now. If we need to start supporting - * fetching arguments from arbitrary indices, this will need some - * extra logic. Presently there are no in-tree users that depend - * on this behaviour. - */ - BUG_ON(i); /* Argument pattern is: R4, R5, R6, R7, R0, R1 */ - switch (n) { - case 6: args[5] = regs->regs[1]; - case 5: args[4] = regs->regs[0]; - case 4: args[3] = regs->regs[7]; - case 3: args[2] = regs->regs[6]; - case 2: args[1] = regs->regs[5]; - case 1: args[0] = regs->regs[4]; - case 0: - break; - default: - BUG(); - } + args[5] = regs->regs[1]; + args[4] = regs->regs[0]; + args[3] = regs->regs[7]; + args[2] = regs->regs[6]; + args[1] = regs->regs[5]; + args[0] = regs->regs[4]; } static inline void syscall_set_arguments(struct task_struct *task, diff --git a/arch/sh/include/asm/syscall_64.h b/arch/sh/include/asm/syscall_64.h index 43882580c7f9..4e8f6460c703 100644 --- a/arch/sh/include/asm/syscall_64.h +++ b/arch/sh/include/asm/syscall_64.h @@ -47,11 +47,9 @@ static inline void syscall_set_return_value(struct task_struct *task, static inline void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, unsigned long *args) { - BUG_ON(i + n > 6); - memcpy(args, ®s->regs[2 + i], n * sizeof(args[0])); + memcpy(args, ®s->regs[2], 6 * sizeof(args[0])); } static inline void syscall_set_arguments(struct task_struct *task, diff --git a/arch/sparc/include/asm/syscall.h b/arch/sparc/include/asm/syscall.h index 053989e3f6a6..872dfee852d6 100644 --- a/arch/sparc/include/asm/syscall.h +++ b/arch/sparc/include/asm/syscall.h @@ -96,11 +96,11 @@ static inline void syscall_set_return_value(struct task_struct *task, static inline void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, unsigned long *args) { int zero_extend = 0; unsigned int j; + unsigned int n = 6; #ifdef CONFIG_SPARC64 if (test_tsk_thread_flag(task, TIF_32BIT)) @@ -108,7 +108,7 @@ static inline void syscall_get_arguments(struct task_struct *task, #endif for (j = 0; j < n; j++) { - unsigned long val = regs->u_regs[UREG_I0 + i + j]; + unsigned long val = regs->u_regs[UREG_I0 + j]; if (zero_extend) args[j] = (u32) val; diff --git a/arch/um/include/asm/syscall-generic.h b/arch/um/include/asm/syscall-generic.h index 9fb9cf8cd39a..25d00acd1322 100644 --- a/arch/um/include/asm/syscall-generic.h +++ b/arch/um/include/asm/syscall-generic.h @@ -53,43 +53,16 @@ static inline void syscall_set_return_value(struct task_struct *task, static inline void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, unsigned long *args) { const struct uml_pt_regs *r = ®s->regs; - switch (i) { - case 0: - if (!n--) - break; - *args++ = UPT_SYSCALL_ARG1(r); - case 1: - if (!n--) - break; - *args++ = UPT_SYSCALL_ARG2(r); - case 2: - if (!n--) - break; - *args++ = UPT_SYSCALL_ARG3(r); - case 3: - if (!n--) - break; - *args++ = UPT_SYSCALL_ARG4(r); - case 4: - if (!n--) - break; - *args++ = UPT_SYSCALL_ARG5(r); - case 5: - if (!n--) - break; - *args++ = UPT_SYSCALL_ARG6(r); - case 6: - if (!n--) - break; - default: - BUG(); - break; - } + *args++ = UPT_SYSCALL_ARG1(r); + *args++ = UPT_SYSCALL_ARG2(r); + *args++ = UPT_SYSCALL_ARG3(r); + *args++ = UPT_SYSCALL_ARG4(r); + *args++ = UPT_SYSCALL_ARG5(r); + *args = UPT_SYSCALL_ARG6(r); } static inline void syscall_set_arguments(struct task_struct *task, diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h index d653139857af..8dbb5c379450 100644 --- a/arch/x86/include/asm/syscall.h +++ b/arch/x86/include/asm/syscall.h @@ -91,11 +91,9 @@ static inline void syscall_set_return_value(struct task_struct *task, static inline void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, unsigned long *args) { - BUG_ON(i + n > 6); - memcpy(args, ®s->bx + i, n * sizeof(args[0])); + memcpy(args, ®s->bx, 6 * sizeof(args[0])); } static inline void syscall_set_arguments(struct task_struct *task, @@ -116,63 +114,26 @@ static inline int syscall_get_arch(void) static inline void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, unsigned long *args) { # ifdef CONFIG_IA32_EMULATION - if (task->thread_info.status & TS_COMPAT) - switch (i) { - case 0: - if (!n--) break; - *args++ = regs->bx; - case 1: - if (!n--) break; - *args++ = regs->cx; - case 2: - if (!n--) break; - *args++ = regs->dx; - case 3: - if (!n--) break; - *args++ = regs->si; - case 4: - if (!n--) break; - *args++ = regs->di; - case 5: - if (!n--) break; - *args++ = regs->bp; - case 6: - if (!n--) break; - default: - BUG(); - break; - } - else + if (task->thread_info.status & TS_COMPAT) { + *args++ = regs->bx; + *args++ = regs->cx; + *args++ = regs->dx; + *args++ = regs->si; + *args++ = regs->di; + *args = regs->bp; + } else # endif - switch (i) { - case 0: - if (!n--) break; - *args++ = regs->di; - case 1: - if (!n--) break; - *args++ = regs->si; - case 2: - if (!n--) break; - *args++ = regs->dx; - case 3: - if (!n--) break; - *args++ = regs->r10; - case 4: - if (!n--) break; - *args++ = regs->r8; - case 5: - if (!n--) break; - *args++ = regs->r9; - case 6: - if (!n--) break; - default: - BUG(); - break; - } + { + *args++ = regs->di; + *args++ = regs->si; + *args++ = regs->dx; + *args++ = regs->r10; + *args++ = regs->r8; + *args = regs->r9; + } } static inline void syscall_set_arguments(struct task_struct *task, diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h index a168bf81c7f4..1504ce9a233a 100644 --- a/arch/xtensa/include/asm/syscall.h +++ b/arch/xtensa/include/asm/syscall.h @@ -59,23 +59,13 @@ static inline void syscall_set_return_value(struct task_struct *task, static inline void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, unsigned long *args) { static const unsigned int reg[] = XTENSA_SYSCALL_ARGUMENT_REGS; - unsigned int j; - - if (n == 0) - return; - - WARN_ON_ONCE(i + n > SYSCALL_MAX_ARGS); + unsigned int i; - for (j = 0; j < n; ++j) { - if (i + j < SYSCALL_MAX_ARGS) - args[j] = regs->areg[reg[i + j]]; - else - args[j] = 0; - } + for (i = 0; i < 6; ++i) + args[i] = regs->areg[reg[i]]; } static inline void syscall_set_arguments(struct task_struct *task, diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h index 0c938a4354f6..269e9412ef42 100644 --- a/include/asm-generic/syscall.h +++ b/include/asm-generic/syscall.h @@ -105,21 +105,16 @@ void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, * syscall_get_arguments - extract system call parameter values * @task: task of interest, must be blocked * @regs: task_pt_regs() of @task - * @i: argument index [0,5] - * @n: number of arguments; n+i must be [1,6]. * @args: array filled with argument values * - * Fetches @n arguments to the system call starting with the @i'th argument - * (from 0 through 5). Argument @i is stored in @args[0], and so on. - * An arch inline version is probably optimal when @i and @n are constants. + * Fetches 6 arguments to the system call. First argument is stored in +* @args[0], and so on. * * It's only valid to call this when @task is stopped for tracing on * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. - * It's invalid to call this with @i + @n > 6; we only support system calls - * taking up to 6 arguments. */ void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, unsigned long *args); + unsigned long *args); /** * syscall_set_arguments - change system call parameter value diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h index 44a3259ed4a5..b6e0cbc2c71f 100644 --- a/include/trace/events/syscalls.h +++ b/include/trace/events/syscalls.h @@ -28,7 +28,7 @@ TRACE_EVENT_FN(sys_enter, TP_fast_assign( __entry->id = id; - syscall_get_arguments(current, regs, 0, 6, __entry->args); + syscall_get_arguments(current, regs, __entry->args); ), TP_printk("NR %ld (%lx, %lx, %lx, %lx, %lx, %lx)", diff --git a/kernel/seccomp.c b/kernel/seccomp.c index 54a0347ca812..df27e499956a 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -149,7 +149,7 @@ static void populate_seccomp_data(struct seccomp_data *sd) sd->nr = syscall_get_nr(task, regs); sd->arch = syscall_get_arch(); - syscall_get_arguments(task, regs, 0, 6, args); + syscall_get_arguments(task, regs, args); sd->args[0] = args[0]; sd->args[1] = args[1]; sd->args[2] = args[2]; diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index e9f5bbbad6d9..fa8fbff736d6 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -348,7 +348,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) entry = ring_buffer_event_data(event); entry->nr = syscall_nr; - syscall_get_arguments(current, regs, 0, 6, args); + syscall_get_arguments(current, regs, args); memcpy(entry->args, args, sizeof(unsigned long) * sys_data->nb_args); event_trigger_unlock_commit(trace_file, buffer, event, entry, @@ -616,7 +616,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) return; rec->nr = syscall_nr; - syscall_get_arguments(current, regs, 0, 6, args); + syscall_get_arguments(current, regs, args); memcpy(&rec->args, args, sizeof(unsigned long) * sys_data->nb_args); if ((valid_prog_array && diff --git a/lib/syscall.c b/lib/syscall.c index e8467e17b9a2..fb328e7ccb08 100644 --- a/lib/syscall.c +++ b/lib/syscall.c @@ -27,7 +27,7 @@ static int collect_syscall(struct task_struct *target, struct syscall_info *info info->data.nr = syscall_get_nr(target, regs); if (info->data.nr != -1L) - syscall_get_arguments(target, regs, 0, 6, + syscall_get_arguments(target, regs, (unsigned long *)&info->data.args[0]); put_task_stack(target); -- cgit v1.2.3 From 32d92586629a8b3637a3c9361709818e25f327ad Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Wed, 27 Mar 2019 20:07:31 -0400 Subject: syscalls: Remove start and number from syscall_set_arguments() args After removing the start and count arguments of syscall_get_arguments() it seems reasonable to remove them from syscall_set_arguments(). Note, as of today, there are no users of syscall_set_arguments(). But we are told that there will be soon. But for now, at least make it consistent with syscall_get_arguments(). Link: http://lkml.kernel.org/r/20190327222014.GA32540@altlinux.org Cc: Oleg Nesterov Cc: Kees Cook Cc: Andy Lutomirski Cc: Dominik Brodowski Cc: Dave Martin Cc: "Dmitry V. Levin" Cc: x86@kernel.org Cc: linux-snps-arc@lists.infradead.org Cc: linux-kernel@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org Cc: linux-c6x-dev@linux-c6x.org Cc: uclinux-h8-devel@lists.sourceforge.jp Cc: linux-hexagon@vger.kernel.org Cc: linux-ia64@vger.kernel.org Cc: linux-mips@vger.kernel.org Cc: nios2-dev@lists.rocketboards.org Cc: openrisc@lists.librecores.org Cc: linux-parisc@vger.kernel.org Cc: linuxppc-dev@lists.ozlabs.org Cc: linux-riscv@lists.infradead.org Cc: linux-s390@vger.kernel.org Cc: linux-sh@vger.kernel.org Cc: sparclinux@vger.kernel.org Cc: linux-um@lists.infradead.org Cc: linux-xtensa@linux-xtensa.org Cc: linux-arch@vger.kernel.org Acked-by: Max Filippov # For xtensa changes Acked-by: Will Deacon # For the arm64 bits Reviewed-by: Thomas Gleixner # for x86 Reviewed-by: Dmitry V. Levin Signed-off-by: Steven Rostedt (VMware) --- arch/arm/include/asm/syscall.h | 22 ++--------- arch/arm64/include/asm/syscall.h | 22 ++--------- arch/c6x/include/asm/syscall.h | 38 +++---------------- arch/csky/include/asm/syscall.h | 14 ++----- arch/ia64/include/asm/syscall.h | 10 ++--- arch/ia64/kernel/ptrace.c | 7 ++-- arch/microblaze/include/asm/syscall.h | 4 +- arch/nds32/include/asm/syscall.h | 29 +++------------ arch/nios2/include/asm/syscall.h | 42 ++++----------------- arch/openrisc/include/asm/syscall.h | 6 +-- arch/powerpc/include/asm/syscall.h | 7 +--- arch/riscv/include/asm/syscall.h | 13 ++----- arch/s390/include/asm/syscall.h | 11 +++--- arch/sh/include/asm/syscall_32.h | 21 +++-------- arch/sh/include/asm/syscall_64.h | 4 +- arch/sparc/include/asm/syscall.h | 7 ++-- arch/um/include/asm/syscall-generic.h | 39 +++----------------- arch/x86/include/asm/syscall.h | 69 ++++++++--------------------------- arch/xtensa/include/asm/syscall.h | 17 ++------- include/asm-generic/syscall.h | 10 +---- 20 files changed, 88 insertions(+), 304 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h index db969a2972ae..080ce70cab12 100644 --- a/arch/arm/include/asm/syscall.h +++ b/arch/arm/include/asm/syscall.h @@ -65,26 +65,12 @@ static inline void syscall_get_arguments(struct task_struct *task, static inline void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, const unsigned long *args) { - if (n == 0) - return; - - if (i + n > SYSCALL_MAX_ARGS) { - pr_warn("%s called with max args %d, handling only %d\n", - __func__, i + n, SYSCALL_MAX_ARGS); - n = SYSCALL_MAX_ARGS - i; - } - - if (i == 0) { - regs->ARM_ORIG_r0 = args[0]; - args++; - i++; - n--; - } - - memcpy(®s->ARM_r0 + i, args, n * sizeof(args[0])); + regs->ARM_ORIG_r0 = args[0]; + args++; + + memcpy(®s->ARM_r0 + 1, args, 5 * sizeof(args[0])); } static inline int syscall_get_arch(void) diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h index 55b2dab21023..a179df3674a1 100644 --- a/arch/arm64/include/asm/syscall.h +++ b/arch/arm64/include/asm/syscall.h @@ -75,26 +75,12 @@ static inline void syscall_get_arguments(struct task_struct *task, static inline void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, const unsigned long *args) { - if (n == 0) - return; - - if (i + n > SYSCALL_MAX_ARGS) { - pr_warning("%s called with max args %d, handling only %d\n", - __func__, i + n, SYSCALL_MAX_ARGS); - n = SYSCALL_MAX_ARGS - i; - } - - if (i == 0) { - regs->orig_x0 = args[0]; - args++; - i++; - n--; - } - - memcpy(®s->regs[i], args, n * sizeof(args[0])); + regs->orig_x0 = args[0]; + args++; + + memcpy(®s->regs[1], args, 5 * sizeof(args[0])); } /* diff --git a/arch/c6x/include/asm/syscall.h b/arch/c6x/include/asm/syscall.h index 06db3251926b..15ba8599858e 100644 --- a/arch/c6x/include/asm/syscall.h +++ b/arch/c6x/include/asm/syscall.h @@ -59,40 +59,14 @@ static inline void syscall_get_arguments(struct task_struct *task, static inline void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, const unsigned long *args) { - switch (i) { - case 0: - if (!n--) - break; - regs->a4 = *args++; - case 1: - if (!n--) - break; - regs->b4 = *args++; - case 2: - if (!n--) - break; - regs->a6 = *args++; - case 3: - if (!n--) - break; - regs->b6 = *args++; - case 4: - if (!n--) - break; - regs->a8 = *args++; - case 5: - if (!n--) - break; - regs->a9 = *args++; - case 6: - if (!n) - break; - default: - BUG(); - } + regs->a4 = *args++; + regs->b4 = *args++; + regs->a6 = *args++; + regs->b6 = *args++; + regs->a8 = *args++; + regs->a9 = *args; } #endif /* __ASM_C6X_SYSCALLS_H */ diff --git a/arch/csky/include/asm/syscall.h b/arch/csky/include/asm/syscall.h index c691fe92edc6..bda0a446c63e 100644 --- a/arch/csky/include/asm/syscall.h +++ b/arch/csky/include/asm/syscall.h @@ -52,17 +52,11 @@ syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, static inline void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, const unsigned long *args) + const unsigned long *args) { - BUG_ON(i + n > 6); - if (i == 0) { - regs->orig_a0 = args[0]; - args++; - n--; - } else { - i--; - } - memcpy(®s->a1 + i, args, n * sizeof(regs->a1)); + regs->orig_a0 = args[0]; + args++; + memcpy(®s->a1, args, 5 * sizeof(regs->a1)); } static inline int diff --git a/arch/ia64/include/asm/syscall.h b/arch/ia64/include/asm/syscall.h index 8204c1ff70ce..0d9e7fab4a79 100644 --- a/arch/ia64/include/asm/syscall.h +++ b/arch/ia64/include/asm/syscall.h @@ -59,23 +59,19 @@ static inline void syscall_set_return_value(struct task_struct *task, } extern void ia64_syscall_get_set_arguments(struct task_struct *task, - struct pt_regs *regs, unsigned int i, unsigned int n, - unsigned long *args, int rw); + struct pt_regs *regs, unsigned long *args, int rw); static inline void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, unsigned long *args) { - ia64_syscall_get_set_arguments(task, regs, 0, 6, args, 0); + ia64_syscall_get_set_arguments(task, regs, args, 0); } static inline void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, unsigned long *args) { - BUG_ON(i + n > 6); - - ia64_syscall_get_set_arguments(task, regs, i, n, args, 1); + ia64_syscall_get_set_arguments(task, regs, args, 1); } static inline int syscall_get_arch(void) diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index 6d50ede0ed69..bf9c24d9ce84 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c @@ -2179,12 +2179,11 @@ static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data) } void ia64_syscall_get_set_arguments(struct task_struct *task, - struct pt_regs *regs, unsigned int i, unsigned int n, - unsigned long *args, int rw) + struct pt_regs *regs, unsigned long *args, int rw) { struct syscall_get_set_args data = { - .i = i, - .n = n, + .i = 0, + .n = 6, .args = args, .regs = regs, .rw = rw, diff --git a/arch/microblaze/include/asm/syscall.h b/arch/microblaze/include/asm/syscall.h index 4b23e44e5c3c..833d3a53dab3 100644 --- a/arch/microblaze/include/asm/syscall.h +++ b/arch/microblaze/include/asm/syscall.h @@ -93,9 +93,11 @@ static inline void syscall_get_arguments(struct task_struct *task, static inline void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, const unsigned long *args) { + unsigned int i = 0; + unsigned int n = 6; + while (n--) microblaze_set_syscall_arg(regs, i++, *args++); } diff --git a/arch/nds32/include/asm/syscall.h b/arch/nds32/include/asm/syscall.h index 89a6ec8731d8..671ebd357496 100644 --- a/arch/nds32/include/asm/syscall.h +++ b/arch/nds32/include/asm/syscall.h @@ -129,39 +129,20 @@ void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, * syscall_set_arguments - change system call parameter value * @task: task of interest, must be in system call entry tracing * @regs: task_pt_regs() of @task - * @i: argument index [0,5] - * @n: number of arguments; n+i must be [1,6]. * @args: array of argument values to store * - * Changes @n arguments to the system call starting with the @i'th argument. - * Argument @i gets value @args[0], and so on. - * An arch inline version is probably optimal when @i and @n are constants. + * Changes 6 arguments to the system call. The first argument gets value + * @args[0], and so on. * * It's only valid to call this when @task is stopped for tracing on * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. - * It's invalid to call this with @i + @n > 6; we only support system calls - * taking up to 6 arguments. */ void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, const unsigned long *args) { - if (n == 0) - return; - - if (i + n > SYSCALL_MAX_ARGS) { - pr_warn("%s called with max args %d, handling only %d\n", - __func__, i + n, SYSCALL_MAX_ARGS); - n = SYSCALL_MAX_ARGS - i; - } - - if (i == 0) { - regs->orig_r0 = args[0]; - args++; - i++; - n--; - } + regs->orig_r0 = args[0]; + args++; - memcpy(®s->uregs[0] + i, args, n * sizeof(args[0])); + memcpy(®s->uregs[0] + 1, args, 5 * sizeof(args[0])); } #endif /* _ASM_NDS32_SYSCALL_H */ diff --git a/arch/nios2/include/asm/syscall.h b/arch/nios2/include/asm/syscall.h index 792bd449d839..d7624ed06efb 100644 --- a/arch/nios2/include/asm/syscall.h +++ b/arch/nios2/include/asm/syscall.h @@ -69,42 +69,14 @@ static inline void syscall_get_arguments(struct task_struct *task, } static inline void syscall_set_arguments(struct task_struct *task, - struct pt_regs *regs, unsigned int i, unsigned int n, - const unsigned long *args) + struct pt_regs *regs, const unsigned long *args) { - BUG_ON(i + n > 6); - - switch (i) { - case 0: - if (!n--) - break; - regs->r4 = *args++; - case 1: - if (!n--) - break; - regs->r5 = *args++; - case 2: - if (!n--) - break; - regs->r6 = *args++; - case 3: - if (!n--) - break; - regs->r7 = *args++; - case 4: - if (!n--) - break; - regs->r8 = *args++; - case 5: - if (!n--) - break; - regs->r9 = *args++; - case 6: - if (!n) - break; - default: - BUG(); - } + regs->r4 = *args++; + regs->r5 = *args++; + regs->r6 = *args++; + regs->r7 = *args++; + regs->r8 = *args++; + regs->r9 = *args; } #endif diff --git a/arch/openrisc/include/asm/syscall.h b/arch/openrisc/include/asm/syscall.h index 72607860cd55..b4ff07c1baed 100644 --- a/arch/openrisc/include/asm/syscall.h +++ b/arch/openrisc/include/asm/syscall.h @@ -63,11 +63,9 @@ syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, static inline void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, const unsigned long *args) + const unsigned long *args) { - BUG_ON(i + n > 6); - - memcpy(®s->gpr[3 + i], args, n * sizeof(args[0])); + memcpy(®s->gpr[3], args, 6 * sizeof(args[0])); } static inline int syscall_get_arch(void) diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h index 5c9b9dc82b7e..1243045bad2d 100644 --- a/arch/powerpc/include/asm/syscall.h +++ b/arch/powerpc/include/asm/syscall.h @@ -86,15 +86,12 @@ static inline void syscall_get_arguments(struct task_struct *task, static inline void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, const unsigned long *args) { - BUG_ON(i + n > 6); - memcpy(®s->gpr[3 + i], args, n * sizeof(args[0])); + memcpy(®s->gpr[3], args, 6 * sizeof(args[0])); /* Also copy the first argument into orig_gpr3 */ - if (i == 0 && n > 0) - regs->orig_gpr3 = args[0]; + regs->orig_gpr3 = args[0]; } static inline int syscall_get_arch(void) diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h index 6adca1804be1..a3d5273ded7c 100644 --- a/arch/riscv/include/asm/syscall.h +++ b/arch/riscv/include/asm/syscall.h @@ -81,18 +81,11 @@ static inline void syscall_get_arguments(struct task_struct *task, static inline void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, const unsigned long *args) { - BUG_ON(i + n > 6); - if (i == 0) { - regs->orig_a0 = args[0]; - args++; - n--; - } else { - i--; - } - memcpy(®s->a1 + i, args, n * sizeof(regs->a1)); + regs->orig_a0 = args[0]; + args++; + memcpy(®s->a1, args, 5 * sizeof(regs->a1)); } static inline int syscall_get_arch(void) diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h index ee0b1f6aa36d..59c3e91f2cdb 100644 --- a/arch/s390/include/asm/syscall.h +++ b/arch/s390/include/asm/syscall.h @@ -74,15 +74,14 @@ static inline void syscall_get_arguments(struct task_struct *task, static inline void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, const unsigned long *args) { - BUG_ON(i + n > 6); + unsigned int n = 6; + while (n-- > 0) - if (i + n > 0) - regs->gprs[2 + i + n] = args[n]; - if (i == 0) - regs->orig_gpr2 = args[0]; + if (n > 0) + regs->gprs[2 + n] = args[n]; + regs->orig_gpr2 = args[0]; } static inline int syscall_get_arch(void) diff --git a/arch/sh/include/asm/syscall_32.h b/arch/sh/include/asm/syscall_32.h index 2bf1199a0595..8c9d7e5e5dcc 100644 --- a/arch/sh/include/asm/syscall_32.h +++ b/arch/sh/include/asm/syscall_32.h @@ -62,23 +62,14 @@ static inline void syscall_get_arguments(struct task_struct *task, static inline void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, const unsigned long *args) { - /* Same note as above applies */ - BUG_ON(i); - - switch (n) { - case 6: regs->regs[1] = args[5]; - case 5: regs->regs[0] = args[4]; - case 4: regs->regs[7] = args[3]; - case 3: regs->regs[6] = args[2]; - case 2: regs->regs[5] = args[1]; - case 1: regs->regs[4] = args[0]; - break; - default: - BUG(); - } + regs->regs[1] = args[5]; + regs->regs[0] = args[4]; + regs->regs[7] = args[3]; + regs->regs[6] = args[2]; + regs->regs[5] = args[1]; + regs->regs[4] = args[0]; } static inline int syscall_get_arch(void) diff --git a/arch/sh/include/asm/syscall_64.h b/arch/sh/include/asm/syscall_64.h index 4e8f6460c703..22fad97da066 100644 --- a/arch/sh/include/asm/syscall_64.h +++ b/arch/sh/include/asm/syscall_64.h @@ -54,11 +54,9 @@ static inline void syscall_get_arguments(struct task_struct *task, static inline void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, const unsigned long *args) { - BUG_ON(i + n > 6); - memcpy(®s->regs[2 + i], args, n * sizeof(args[0])); + memcpy(®s->regs[2], args, 6 * sizeof(args[0])); } static inline int syscall_get_arch(void) diff --git a/arch/sparc/include/asm/syscall.h b/arch/sparc/include/asm/syscall.h index 872dfee852d6..4d075434e816 100644 --- a/arch/sparc/include/asm/syscall.h +++ b/arch/sparc/include/asm/syscall.h @@ -119,13 +119,12 @@ static inline void syscall_get_arguments(struct task_struct *task, static inline void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, const unsigned long *args) { - unsigned int j; + unsigned int i; - for (j = 0; j < n; j++) - regs->u_regs[UREG_I0 + i + j] = args[j]; + for (i = 0; i < 6; i++) + regs->u_regs[UREG_I0 + i] = args[i]; } static inline int syscall_get_arch(void) diff --git a/arch/um/include/asm/syscall-generic.h b/arch/um/include/asm/syscall-generic.h index 25d00acd1322..98e50c50c12e 100644 --- a/arch/um/include/asm/syscall-generic.h +++ b/arch/um/include/asm/syscall-generic.h @@ -67,43 +67,16 @@ static inline void syscall_get_arguments(struct task_struct *task, static inline void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, const unsigned long *args) { struct uml_pt_regs *r = ®s->regs; - switch (i) { - case 0: - if (!n--) - break; - UPT_SYSCALL_ARG1(r) = *args++; - case 1: - if (!n--) - break; - UPT_SYSCALL_ARG2(r) = *args++; - case 2: - if (!n--) - break; - UPT_SYSCALL_ARG3(r) = *args++; - case 3: - if (!n--) - break; - UPT_SYSCALL_ARG4(r) = *args++; - case 4: - if (!n--) - break; - UPT_SYSCALL_ARG5(r) = *args++; - case 5: - if (!n--) - break; - UPT_SYSCALL_ARG6(r) = *args++; - case 6: - if (!n--) - break; - default: - BUG(); - break; - } + UPT_SYSCALL_ARG1(r) = *args++; + UPT_SYSCALL_ARG2(r) = *args++; + UPT_SYSCALL_ARG3(r) = *args++; + UPT_SYSCALL_ARG4(r) = *args++; + UPT_SYSCALL_ARG5(r) = *args++; + UPT_SYSCALL_ARG6(r) = *args; } /* See arch/x86/um/asm/syscall.h for syscall_get_arch() definition. */ diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h index 8dbb5c379450..4c305471ec33 100644 --- a/arch/x86/include/asm/syscall.h +++ b/arch/x86/include/asm/syscall.h @@ -138,63 +138,26 @@ static inline void syscall_get_arguments(struct task_struct *task, static inline void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, const unsigned long *args) { # ifdef CONFIG_IA32_EMULATION - if (task->thread_info.status & TS_COMPAT) - switch (i) { - case 0: - if (!n--) break; - regs->bx = *args++; - case 1: - if (!n--) break; - regs->cx = *args++; - case 2: - if (!n--) break; - regs->dx = *args++; - case 3: - if (!n--) break; - regs->si = *args++; - case 4: - if (!n--) break; - regs->di = *args++; - case 5: - if (!n--) break; - regs->bp = *args++; - case 6: - if (!n--) break; - default: - BUG(); - break; - } - else + if (task->thread_info.status & TS_COMPAT) { + regs->bx = *args++; + regs->cx = *args++; + regs->dx = *args++; + regs->si = *args++; + regs->di = *args++; + regs->bp = *args; + } else # endif - switch (i) { - case 0: - if (!n--) break; - regs->di = *args++; - case 1: - if (!n--) break; - regs->si = *args++; - case 2: - if (!n--) break; - regs->dx = *args++; - case 3: - if (!n--) break; - regs->r10 = *args++; - case 4: - if (!n--) break; - regs->r8 = *args++; - case 5: - if (!n--) break; - regs->r9 = *args++; - case 6: - if (!n--) break; - default: - BUG(); - break; - } + { + regs->di = *args++; + regs->si = *args++; + regs->dx = *args++; + regs->r10 = *args++; + regs->r8 = *args++; + regs->r9 = *args; + } } static inline int syscall_get_arch(void) diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h index 1504ce9a233a..91dc06d58060 100644 --- a/arch/xtensa/include/asm/syscall.h +++ b/arch/xtensa/include/asm/syscall.h @@ -70,24 +70,13 @@ static inline void syscall_get_arguments(struct task_struct *task, static inline void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, const unsigned long *args) { static const unsigned int reg[] = XTENSA_SYSCALL_ARGUMENT_REGS; - unsigned int j; - - if (n == 0) - return; - - if (WARN_ON_ONCE(i + n > SYSCALL_MAX_ARGS)) { - if (i < SYSCALL_MAX_ARGS) - n = SYSCALL_MAX_ARGS - i; - else - return; - } + unsigned int i; - for (j = 0; j < n; ++j) - regs->areg[reg[i + j]] = args[j]; + for (i = 0; i < 6; ++i) + regs->areg[reg[i]] = args[i]; } asmlinkage long xtensa_rt_sigreturn(struct pt_regs*); diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h index 269e9412ef42..b88239e9efe4 100644 --- a/include/asm-generic/syscall.h +++ b/include/asm-generic/syscall.h @@ -120,21 +120,15 @@ void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, * syscall_set_arguments - change system call parameter value * @task: task of interest, must be in system call entry tracing * @regs: task_pt_regs() of @task - * @i: argument index [0,5] - * @n: number of arguments; n+i must be [1,6]. * @args: array of argument values to store * - * Changes @n arguments to the system call starting with the @i'th argument. - * Argument @i gets value @args[0], and so on. - * An arch inline version is probably optimal when @i and @n are constants. + * Changes 6 arguments to the system call. + * The first argument gets value @args[0], and so on. * * It's only valid to call this when @task is stopped for tracing on * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. - * It's invalid to call this with @i + @n > 6; we only support system calls - * taking up to 6 arguments. */ void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned int i, unsigned int n, const unsigned long *args); /** -- cgit v1.2.3 From ede885ecb2cdf8a8dd5367702e3d964ec846a2d5 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Tue, 19 Mar 2019 15:19:56 -0700 Subject: kvm: svm: fix potential get_num_contig_pages overflow get_num_contig_pages() could potentially overflow int so make its type consistent with its usage. Reported-by: Cfir Cohen Cc: stable@vger.kernel.org Signed-off-by: David Rientjes Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 426039285fd1..947c35a1e545 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -6422,11 +6422,11 @@ e_free: return ret; } -static int get_num_contig_pages(int idx, struct page **inpages, - unsigned long npages) +static unsigned long get_num_contig_pages(unsigned long idx, + struct page **inpages, unsigned long npages) { unsigned long paddr, next_paddr; - int i = idx + 1, pages = 1; + unsigned long i = idx + 1, pages = 1; /* find the number of contiguous pages starting from idx */ paddr = __sme_page_pa(inpages[idx]); @@ -6445,12 +6445,12 @@ static int get_num_contig_pages(int idx, struct page **inpages, static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) { - unsigned long vaddr, vaddr_end, next_vaddr, npages, size; + unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i; struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; struct kvm_sev_launch_update_data params; struct sev_data_launch_update_data *data; struct page **inpages; - int i, ret, pages; + int ret; if (!sev_guest(kvm)) return -ENOTTY; -- cgit v1.2.3 From b86bc2858b389255cd44555ce4b1e427b2b770c0 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Mon, 25 Mar 2019 11:47:31 -0700 Subject: KVM: SVM: prevent DBG_DECRYPT and DBG_ENCRYPT overflow This ensures that the address and length provided to DBG_DECRYPT and DBG_ENCRYPT do not cause an overflow. At the same time, pass the actual number of pages pinned in memory to sev_unpin_memory() as a cleanup. Reported-by: Cfir Cohen Signed-off-by: David Rientjes Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 947c35a1e545..e0a791c3d4fc 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -6799,7 +6799,8 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec) struct page **src_p, **dst_p; struct kvm_sev_dbg debug; unsigned long n; - int ret, size; + unsigned int size; + int ret; if (!sev_guest(kvm)) return -ENOTTY; @@ -6807,6 +6808,11 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec) if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug))) return -EFAULT; + if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr) + return -EINVAL; + if (!debug.dst_uaddr) + return -EINVAL; + vaddr = debug.src_uaddr; size = debug.len; vaddr_end = vaddr + size; @@ -6857,8 +6863,8 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec) dst_vaddr, len, &argp->error); - sev_unpin_memory(kvm, src_p, 1); - sev_unpin_memory(kvm, dst_p, 1); + sev_unpin_memory(kvm, src_p, n); + sev_unpin_memory(kvm, dst_p, n); if (ret) goto err; -- cgit v1.2.3 From acff78477b9b4f26ecdf65733a4ed77fe837e9dc Mon Sep 17 00:00:00 2001 From: Marc Orr Date: Mon, 1 Apr 2019 23:55:59 -0700 Subject: KVM: x86: nVMX: close leak of L0's x2APIC MSRs (CVE-2019-3887) The nested_vmx_prepare_msr_bitmap() function doesn't directly guard the x2APIC MSR intercepts with the "virtualize x2APIC mode" MSR. As a result, we discovered the potential for a buggy or malicious L1 to get access to L0's x2APIC MSRs, via an L2, as follows. 1. L1 executes WRMSR(IA32_SPEC_CTRL, 1). This causes the spec_ctrl variable, in nested_vmx_prepare_msr_bitmap() to become true. 2. L1 disables "virtualize x2APIC mode" in VMCS12. 3. L1 enables "APIC-register virtualization" in VMCS12. Now, KVM will set VMCS02's x2APIC MSR intercepts from VMCS12, and then set "virtualize x2APIC mode" to 0 in VMCS02. Oops. This patch closes the leak by explicitly guarding VMCS02's x2APIC MSR intercepts with VMCS12's "virtualize x2APIC mode" control. The scenario outlined above and fix prescribed here, were verified with a related patch in kvm-unit-tests titled "Add leak scenario to virt_x2apic_mode_test". Note, it looks like this issue may have been introduced inadvertently during a merge---see 15303ba5d1cd. Signed-off-by: Marc Orr Reviewed-by: Jim Mattson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/nested.c | 72 +++++++++++++++++++++++++++++------------------ 1 file changed, 44 insertions(+), 28 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 153e539c29c9..897d70e3d291 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -500,6 +500,17 @@ static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1, } } +static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap) { + int msr; + + for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { + unsigned word = msr / BITS_PER_LONG; + + msr_bitmap[word] = ~0; + msr_bitmap[word + (0x800 / sizeof(long))] = ~0; + } +} + /* * Merge L0's and L1's MSR bitmap, return false to indicate that * we do not use the hardware. @@ -541,39 +552,44 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, return false; msr_bitmap_l1 = (unsigned long *)kmap(page); - if (nested_cpu_has_apic_reg_virt(vmcs12)) { - /* - * L0 need not intercept reads for MSRs between 0x800 and 0x8ff, it - * just lets the processor take the value from the virtual-APIC page; - * take those 256 bits directly from the L1 bitmap. - */ - for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { - unsigned word = msr / BITS_PER_LONG; - msr_bitmap_l0[word] = msr_bitmap_l1[word]; - msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0; - } - } else { - for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { - unsigned word = msr / BITS_PER_LONG; - msr_bitmap_l0[word] = ~0; - msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0; - } - } - nested_vmx_disable_intercept_for_msr( - msr_bitmap_l1, msr_bitmap_l0, - X2APIC_MSR(APIC_TASKPRI), - MSR_TYPE_W); + /* + * To keep the control flow simple, pay eight 8-byte writes (sixteen + * 4-byte writes on 32-bit systems) up front to enable intercepts for + * the x2APIC MSR range and selectively disable them below. + */ + enable_x2apic_msr_intercepts(msr_bitmap_l0); + + if (nested_cpu_has_virt_x2apic_mode(vmcs12)) { + if (nested_cpu_has_apic_reg_virt(vmcs12)) { + /* + * L0 need not intercept reads for MSRs between 0x800 + * and 0x8ff, it just lets the processor take the value + * from the virtual-APIC page; take those 256 bits + * directly from the L1 bitmap. + */ + for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { + unsigned word = msr / BITS_PER_LONG; + + msr_bitmap_l0[word] = msr_bitmap_l1[word]; + } + } - if (nested_cpu_has_vid(vmcs12)) { - nested_vmx_disable_intercept_for_msr( - msr_bitmap_l1, msr_bitmap_l0, - X2APIC_MSR(APIC_EOI), - MSR_TYPE_W); nested_vmx_disable_intercept_for_msr( msr_bitmap_l1, msr_bitmap_l0, - X2APIC_MSR(APIC_SELF_IPI), + X2APIC_MSR(APIC_TASKPRI), MSR_TYPE_W); + + if (nested_cpu_has_vid(vmcs12)) { + nested_vmx_disable_intercept_for_msr( + msr_bitmap_l1, msr_bitmap_l0, + X2APIC_MSR(APIC_EOI), + MSR_TYPE_W); + nested_vmx_disable_intercept_for_msr( + msr_bitmap_l1, msr_bitmap_l0, + X2APIC_MSR(APIC_SELF_IPI), + MSR_TYPE_W); + } } if (spec_ctrl) -- cgit v1.2.3 From c73f4c998e1fd4249b9edfa39e23f4fda2b9b041 Mon Sep 17 00:00:00 2001 From: Marc Orr Date: Mon, 1 Apr 2019 23:56:00 -0700 Subject: KVM: x86: nVMX: fix x2APIC VTPR read intercept Referring to the "VIRTUALIZING MSR-BASED APIC ACCESSES" chapter of the SDM, when "virtualize x2APIC mode" is 1 and "APIC-register virtualization" is 0, a RDMSR of 808H should return the VTPR from the virtual APIC page. However, for nested, KVM currently fails to disable the read intercept for this MSR. This means that a RDMSR exit takes precedence over "virtualize x2APIC mode", and KVM passes through L1's TPR to L2, instead of sourcing the value from L2's virtual APIC page. This patch fixes the issue by disabling the read intercept, in VMCS02, for the VTPR when "APIC-register virtualization" is 0. The issue described above and fix prescribed here, were verified with a related patch in kvm-unit-tests titled "Test VMX's virtualize x2APIC mode w/ nested". Signed-off-by: Marc Orr Reviewed-by: Jim Mattson Fixes: c992384bde84f ("KVM: vmx: speed up MSR bitmap merge") Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/nested.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 897d70e3d291..7ec9bb1dd723 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -578,7 +578,7 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, nested_vmx_disable_intercept_for_msr( msr_bitmap_l1, msr_bitmap_l0, X2APIC_MSR(APIC_TASKPRI), - MSR_TYPE_W); + MSR_TYPE_R | MSR_TYPE_W); if (nested_cpu_has_vid(vmcs12)) { nested_vmx_disable_intercept_for_msr( -- cgit v1.2.3 From 298a32b132087550d3fa80641ca58323c5dfd4d9 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 5 Apr 2019 18:38:49 -0700 Subject: kmemleak: powerpc: skip scanning holes in the .bss section Commit 2d4f567103ff ("KVM: PPC: Introduce kvm_tmp framework") adds kvm_tmp[] into the .bss section and then free the rest of unused spaces back to the page allocator. kernel_init kvm_guest_init kvm_free_tmp free_reserved_area free_unref_page free_unref_page_prepare With DEBUG_PAGEALLOC=y, it will unmap those pages from kernel. As the result, kmemleak scan will trigger a panic when it scans the .bss section with unmapped pages. This patch creates dedicated kmemleak objects for the .data, .bss and potentially .data..ro_after_init sections to allow partial freeing via the kmemleak_free_part() in the powerpc kvm_free_tmp() function. Link: http://lkml.kernel.org/r/20190321171917.62049-1-catalin.marinas@arm.com Signed-off-by: Catalin Marinas Reported-by: Qian Cai Acked-by: Michael Ellerman (powerpc) Tested-by: Qian Cai Cc: Paul Mackerras Cc: Benjamin Herrenschmidt Cc: Avi Kivity Cc: Paolo Bonzini Cc: Radim Krcmar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/kernel/kvm.c | 7 +++++++ mm/kmemleak.c | 16 +++++++++++----- 2 files changed, 18 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c index 683b5b3805bd..cd381e2291df 100644 --- a/arch/powerpc/kernel/kvm.c +++ b/arch/powerpc/kernel/kvm.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -712,6 +713,12 @@ static void kvm_use_magic_page(void) static __init void kvm_free_tmp(void) { + /* + * Inform kmemleak about the hole in the .bss section since the + * corresponding pages will be unmapped with DEBUG_PAGEALLOC=y. + */ + kmemleak_free_part(&kvm_tmp[kvm_tmp_index], + ARRAY_SIZE(kvm_tmp) - kvm_tmp_index); free_reserved_area(&kvm_tmp[kvm_tmp_index], &kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL); } diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 707fa5579f66..6c318f5ac234 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -1529,11 +1529,6 @@ static void kmemleak_scan(void) } rcu_read_unlock(); - /* data/bss scanning */ - scan_large_block(_sdata, _edata); - scan_large_block(__bss_start, __bss_stop); - scan_large_block(__start_ro_after_init, __end_ro_after_init); - #ifdef CONFIG_SMP /* per-cpu sections scanning */ for_each_possible_cpu(i) @@ -2071,6 +2066,17 @@ void __init kmemleak_init(void) } local_irq_restore(flags); + /* register the data/bss sections */ + create_object((unsigned long)_sdata, _edata - _sdata, + KMEMLEAK_GREY, GFP_ATOMIC); + create_object((unsigned long)__bss_start, __bss_stop - __bss_start, + KMEMLEAK_GREY, GFP_ATOMIC); + /* only register .data..ro_after_init if not within .data */ + if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata) + create_object((unsigned long)__start_ro_after_init, + __end_ro_after_init - __start_ro_after_init, + KMEMLEAK_GREY, GFP_ATOMIC); + /* * This is the point where tracking allocations is safe. Automatic * scanning is started during the late initcall. Add the early logged -- cgit v1.2.3 From acaf892ecbf5be7710ae05a61fd43c668f68ad95 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 5 Apr 2019 18:39:30 -0700 Subject: sh: fix multiple function definition build errors Many of the sh CPU-types have their own plat_irq_setup() and arch_init_clk_ops() functions, so these same (empty) functions in arch/sh/boards/of-generic.c are not needed and cause build errors. If there is some case where these empty functions are needed, they can be retained by marking them as "__weak" while at the same time making builds that do not need them succeed. Fixes these build errors: arch/sh/boards/of-generic.o: In function `plat_irq_setup': (.init.text+0x134): multiple definition of `plat_irq_setup' arch/sh/kernel/cpu/sh2/setup-sh7619.o:(.init.text+0x30): first defined here arch/sh/boards/of-generic.o: In function `arch_init_clk_ops': (.init.text+0x118): multiple definition of `arch_init_clk_ops' arch/sh/kernel/cpu/sh2/clock-sh7619.o:(.init.text+0x0): first defined here Link: http://lkml.kernel.org/r/9ee4e0c5-f100-86a2-bd4d-1d3287ceab31@infradead.org Signed-off-by: Randy Dunlap Reported-by: kbuild test robot Cc: Takashi Iwai Cc: Yoshinori Sato Cc: Rich Felker Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/sh/boards/of-generic.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/sh/boards/of-generic.c b/arch/sh/boards/of-generic.c index 958f46da3a79..d91065e81a4e 100644 --- a/arch/sh/boards/of-generic.c +++ b/arch/sh/boards/of-generic.c @@ -164,10 +164,10 @@ static struct sh_machine_vector __initmv sh_of_generic_mv = { struct sh_clk_ops; -void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx) +void __init __weak arch_init_clk_ops(struct sh_clk_ops **ops, int idx) { } -void __init plat_irq_setup(void) +void __init __weak plat_irq_setup(void) { } -- cgit v1.2.3 From 5b77e95dd7790ff6c8fbf1cd8d0104ebed818a03 Mon Sep 17 00:00:00 2001 From: Alexander Potapenko Date: Tue, 2 Apr 2019 13:28:13 +0200 Subject: x86/asm: Use stricter assembly constraints in bitops There's a number of problems with how arch/x86/include/asm/bitops.h is currently using assembly constraints for the memory region bitops are modifying: 1) Use memory clobber in bitops that touch arbitrary memory Certain bit operations that read/write bits take a base pointer and an arbitrarily large offset to address the bit relative to that base. Inline assembly constraints aren't expressive enough to tell the compiler that the assembly directive is going to touch a specific memory location of unknown size, therefore we have to use the "memory" clobber to indicate that the assembly is going to access memory locations other than those listed in the inputs/outputs. To indicate that BTR/BTS instructions don't necessarily touch the first sizeof(long) bytes of the argument, we also move the address to assembly inputs. This particular change leads to size increase of 124 kernel functions in a defconfig build. For some of them the diff is in NOP operations, other end up re-reading values from memory and may potentially slow down the execution. But without these clobbers the compiler is free to cache the contents of the bitmaps and use them as if they weren't changed by the inline assembly. 2) Use byte-sized arguments for operations touching single bytes. Passing a long value to ANDB/ORB/XORB instructions makes the compiler treat sizeof(long) bytes as being clobbered, which isn't the case. This may theoretically lead to worse code in the case of heavy optimization. Practical impact: I've built a defconfig kernel and looked through some of the functions generated by GCC 7.3.0 with and without this clobber, and didn't spot any miscompilations. However there is a (trivial) theoretical case where this code leads to miscompilation: https://lkml.org/lkml/2019/3/28/393 using just GCC 8.3.0 with -O2. It isn't hard to imagine someone writes such a function in the kernel someday. So the primary motivation is to fix an existing misuse of the asm directive, which happens to work in certain configurations now, but isn't guaranteed to work under different circumstances. [ --mingo: Added -stable tag because defconfig only builds a fraction of the kernel and the trivial testcase looks normal enough to be used in existing or in-development code. ] Signed-off-by: Alexander Potapenko Cc: Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: Dmitry Vyukov Cc: H. Peter Anvin Cc: James Y Knight Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20190402112813.193378-1-glider@google.com [ Edited the changelog, tidied up one of the defines. ] Signed-off-by: Ingo Molnar --- arch/x86/include/asm/bitops.h | 41 ++++++++++++++++++----------------------- 1 file changed, 18 insertions(+), 23 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index d153d570bb04..8e790ec219a5 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -36,16 +36,17 @@ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). */ -#define BITOP_ADDR(x) "+m" (*(volatile long *) (x)) +#define RLONG_ADDR(x) "m" (*(volatile long *) (x)) +#define WBYTE_ADDR(x) "+m" (*(volatile char *) (x)) -#define ADDR BITOP_ADDR(addr) +#define ADDR RLONG_ADDR(addr) /* * We do the locked ops that don't return the old value as * a mask operation on a byte. */ #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3)) +#define CONST_MASK_ADDR(nr, addr) WBYTE_ADDR((void *)(addr) + ((nr)>>3)) #define CONST_MASK(nr) (1 << ((nr) & 7)) /** @@ -73,7 +74,7 @@ set_bit(long nr, volatile unsigned long *addr) : "memory"); } else { asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0" - : BITOP_ADDR(addr) : "Ir" (nr) : "memory"); + : : RLONG_ADDR(addr), "Ir" (nr) : "memory"); } } @@ -88,7 +89,7 @@ set_bit(long nr, volatile unsigned long *addr) */ static __always_inline void __set_bit(long nr, volatile unsigned long *addr) { - asm volatile(__ASM_SIZE(bts) " %1,%0" : ADDR : "Ir" (nr) : "memory"); + asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory"); } /** @@ -110,8 +111,7 @@ clear_bit(long nr, volatile unsigned long *addr) : "iq" ((u8)~CONST_MASK(nr))); } else { asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0" - : BITOP_ADDR(addr) - : "Ir" (nr)); + : : RLONG_ADDR(addr), "Ir" (nr) : "memory"); } } @@ -131,7 +131,7 @@ static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *ad static __always_inline void __clear_bit(long nr, volatile unsigned long *addr) { - asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr)); + asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory"); } static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) @@ -139,7 +139,7 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile bool negative; asm volatile(LOCK_PREFIX "andb %2,%1" CC_SET(s) - : CC_OUT(s) (negative), ADDR + : CC_OUT(s) (negative), WBYTE_ADDR(addr) : "ir" ((char) ~(1 << nr)) : "memory"); return negative; } @@ -155,13 +155,9 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile * __clear_bit() is non-atomic and implies release semantics before the memory * operation. It can be used for an unlock if no other CPUs can concurrently * modify other bits in the word. - * - * No memory barrier is required here, because x86 cannot reorder stores past - * older loads. Same principle as spin_unlock. */ static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) { - barrier(); __clear_bit(nr, addr); } @@ -176,7 +172,7 @@ static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long * */ static __always_inline void __change_bit(long nr, volatile unsigned long *addr) { - asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr)); + asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory"); } /** @@ -196,8 +192,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr) : "iq" ((u8)CONST_MASK(nr))); } else { asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0" - : BITOP_ADDR(addr) - : "Ir" (nr)); + : : RLONG_ADDR(addr), "Ir" (nr) : "memory"); } } @@ -242,8 +237,8 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long * asm(__ASM_SIZE(bts) " %2,%1" CC_SET(c) - : CC_OUT(c) (oldbit), ADDR - : "Ir" (nr)); + : CC_OUT(c) (oldbit) + : ADDR, "Ir" (nr) : "memory"); return oldbit; } @@ -282,8 +277,8 @@ static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long asm volatile(__ASM_SIZE(btr) " %2,%1" CC_SET(c) - : CC_OUT(c) (oldbit), ADDR - : "Ir" (nr)); + : CC_OUT(c) (oldbit) + : ADDR, "Ir" (nr) : "memory"); return oldbit; } @@ -294,8 +289,8 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon asm volatile(__ASM_SIZE(btc) " %2,%1" CC_SET(c) - : CC_OUT(c) (oldbit), ADDR - : "Ir" (nr) : "memory"); + : CC_OUT(c) (oldbit) + : ADDR, "Ir" (nr) : "memory"); return oldbit; } @@ -326,7 +321,7 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l asm volatile(__ASM_SIZE(bt) " %2,%1" CC_SET(c) : CC_OUT(c) (oldbit) - : "m" (*(unsigned long *)addr), "Ir" (nr)); + : "m" (*(unsigned long *)addr), "Ir" (nr) : "memory"); return oldbit; } -- cgit v1.2.3 From 45efd871bf0a47648f119d1b41467f70484de5bc Mon Sep 17 00:00:00 2001 From: Sven Schnelle Date: Thu, 4 Apr 2019 18:16:03 +0200 Subject: parisc: regs_return_value() should return gpr28 While working on kretprobes for PA-RISC I was wondering while the kprobes sanity test always fails on kretprobes. This is caused by returning gpr20 instead of gpr28. Signed-off-by: Sven Schnelle Signed-off-by: Helge Deller Cc: stable@vger.kernel.org # 4.14+ --- arch/parisc/include/asm/ptrace.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/parisc/include/asm/ptrace.h b/arch/parisc/include/asm/ptrace.h index 2a27b275ab09..4a87b3d600c6 100644 --- a/arch/parisc/include/asm/ptrace.h +++ b/arch/parisc/include/asm/ptrace.h @@ -22,7 +22,7 @@ unsigned long profile_pc(struct pt_regs *); static inline unsigned long regs_return_value(struct pt_regs *regs) { - return regs->gr[20]; + return regs->gr[28]; } static inline void instruction_pointer_set(struct pt_regs *regs, -- cgit v1.2.3 From f324fa58327791b2696628b31480e7e21c745706 Mon Sep 17 00:00:00 2001 From: Sven Schnelle Date: Thu, 4 Apr 2019 18:16:04 +0200 Subject: parisc: also set iaoq_b in instruction_pointer_set() When setting the instruction pointer on PA-RISC we also need to set the back of the instruction queue to the new offset, otherwise we will execute on instruction from the new location, and jumping back to the old location stored in iaoq_b. Signed-off-by: Sven Schnelle Signed-off-by: Helge Deller Fixes: 75ebedf1d263 ("parisc: Add HAVE_REGS_AND_STACK_ACCESS_API feature") Cc: stable@vger.kernel.org # 4.19+ --- arch/parisc/include/asm/ptrace.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/parisc/include/asm/ptrace.h b/arch/parisc/include/asm/ptrace.h index 4a87b3d600c6..9ff033d261ab 100644 --- a/arch/parisc/include/asm/ptrace.h +++ b/arch/parisc/include/asm/ptrace.h @@ -28,7 +28,8 @@ static inline unsigned long regs_return_value(struct pt_regs *regs) static inline void instruction_pointer_set(struct pt_regs *regs, unsigned long val) { - regs->iaoq[0] = val; + regs->iaoq[0] = val; + regs->iaoq[1] = val + 4; } /* Query offset/name of register from its name/offset */ -- cgit v1.2.3 From d006e95b5561f708d0385e9677ffe2c46f2ae345 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Tue, 2 Apr 2019 12:13:27 +0200 Subject: parisc: Detect QEMU earlier in boot process While adding LASI support to QEMU, I noticed that the QEMU detection in the kernel happens much too late. For example, when a LASI chip is found by the kernel, it registers the LASI LED driver as well. But when we run on QEMU it makes sense to avoid spending unnecessary CPU cycles, so we need to access the running_on_QEMU flag earlier than before. This patch now makes the QEMU detection the fist task of the Linux kernel by moving it to where the kernel enters the C-coding. Fixes: 310d82784fb4 ("parisc: qemu idle sleep support") Signed-off-by: Helge Deller Cc: stable@vger.kernel.org # v4.14+ --- arch/parisc/kernel/process.c | 6 ------ arch/parisc/kernel/setup.c | 3 +++ 2 files changed, 3 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index eb39e7e380d7..841db71958cd 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -210,12 +210,6 @@ void __cpuidle arch_cpu_idle(void) static int __init parisc_idle_init(void) { - const char *marker; - - /* check QEMU/SeaBIOS marker in PAGE0 */ - marker = (char *) &PAGE0->pad0; - running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0); - if (!running_on_qemu) cpu_idle_poll_ctrl(1); diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index 15dd9e21be7e..d908058d05c1 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c @@ -397,6 +397,9 @@ void __init start_parisc(void) int ret, cpunum; struct pdc_coproc_cfg coproc_cfg; + /* check QEMU/SeaBIOS marker in PAGE0 */ + running_on_qemu = (memcmp(&PAGE0->pad0, "SeaBIOS", 8) == 0); + cpunum = smp_processor_id(); init_cpu_topology(); -- cgit v1.2.3 From dd9a994fc68d196a052b73747e3366c57d14a09e Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Thu, 4 Apr 2019 12:20:05 +0000 Subject: powerpc/vdso32: fix CLOCK_MONOTONIC on PPC64 Commit b5b4453e7912 ("powerpc/vdso64: Fix CLOCK_MONOTONIC inconsistencies across Y2038") changed the type of wtom_clock_sec to s64 on PPC64. Therefore, VDSO32 needs to read it with a 4 bytes shift in order to retrieve the lower part of it. Fixes: b5b4453e7912 ("powerpc/vdso64: Fix CLOCK_MONOTONIC inconsistencies across Y2038") Reported-by: Christian Zigotzky Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/vdso32/gettimeofday.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S index 1e0bc5955a40..afd516b572f8 100644 --- a/arch/powerpc/kernel/vdso32/gettimeofday.S +++ b/arch/powerpc/kernel/vdso32/gettimeofday.S @@ -98,7 +98,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime) * can be used, r7 contains NSEC_PER_SEC. */ - lwz r5,WTOM_CLOCK_SEC(r9) + lwz r5,(WTOM_CLOCK_SEC+LOPART)(r9) lwz r6,WTOM_CLOCK_NSEC(r9) /* We now have our offset in r5,r6. We create a fake dependency -- cgit v1.2.3 From fbe8758f931ff5468aaeb4b304fc3edb70c908d6 Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Sun, 7 Apr 2019 15:18:41 -0700 Subject: Revert "ARM: dts: nomadik: Fix polarity of SPI CS" This reverts commit fa9463564e77067df81b0b8dec91adbbbc47bfb4. Per Linus Walleij: Dear ARM SoC maintainers, can you please revert this patch. It was the wrong solution to the wrong problem, and I must have acted in stress. Andrey fixed the real bug in a proper way in these commits: commit e5545c94e43b8f6599ffc01df8d1aedf18ee912a "gpio: of: Check propname before applying "cs-gpios" quirks" commit 7ce40277bf848391705011ba37eac2e377cbd9e6 "gpio: of: Check for "spi-cs-high" in child instead of parent node" Signed-off-by: Olof Johansson --- arch/arm/boot/dts/ste-nomadik-nhk15.dts | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/ste-nomadik-nhk15.dts b/arch/arm/boot/dts/ste-nomadik-nhk15.dts index f2f6558a00f1..04066f9cb8a3 100644 --- a/arch/arm/boot/dts/ste-nomadik-nhk15.dts +++ b/arch/arm/boot/dts/ste-nomadik-nhk15.dts @@ -213,13 +213,12 @@ gpio-sck = <&gpio0 5 GPIO_ACTIVE_HIGH>; gpio-mosi = <&gpio0 4 GPIO_ACTIVE_HIGH>; /* - * This chipselect is active high. Just setting the flags - * to GPIO_ACTIVE_HIGH is not enough for the SPI DT bindings, - * it will be ignored, only the special "spi-cs-high" flag - * really counts. + * It's not actually active high, but the frameworks assume + * the polarity of the passed-in GPIO is "normal" (active + * high) then actively drives the line low to select the + * chip. */ cs-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>; - spi-cs-high; num-chipselects = <1>; /* -- cgit v1.2.3 From cd92d74d67c811dc22544430b9ac3029f5bd64c5 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 25 Mar 2019 16:50:42 +0100 Subject: ARM: orion: don't use using 64-bit DMA masks clang warns about statically defined DMA masks from the DMA_BIT_MASK macro with length 64: arch/arm/plat-orion/common.c:625:29: error: shift count >= width of type [-Werror,-Wshift-count-overflow] .coherent_dma_mask = DMA_BIT_MASK(64), ^~~~~~~~~~~~~~~~ include/linux/dma-mapping.h:141:54: note: expanded from macro 'DMA_BIT_MASK' #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) The ones in orion shouldn't really be 64 bit masks, so changing them to what the driver can support avoids the warning. Signed-off-by: Arnd Bergmann Signed-off-by: Olof Johansson --- arch/arm/plat-orion/common.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c index a6c81ce00f52..8647cb80a93b 100644 --- a/arch/arm/plat-orion/common.c +++ b/arch/arm/plat-orion/common.c @@ -622,7 +622,7 @@ static struct platform_device orion_xor0_shared = { .resource = orion_xor0_shared_resources, .dev = { .dma_mask = &orion_xor_dmamask, - .coherent_dma_mask = DMA_BIT_MASK(64), + .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &orion_xor0_pdata, }, }; @@ -683,7 +683,7 @@ static struct platform_device orion_xor1_shared = { .resource = orion_xor1_shared_resources, .dev = { .dma_mask = &orion_xor_dmamask, - .coherent_dma_mask = DMA_BIT_MASK(64), + .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &orion_xor1_pdata, }, }; -- cgit v1.2.3 From 2125801ccce19249708ca3245d48998e70569ab8 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 25 Mar 2019 16:50:43 +0100 Subject: ARM: iop: don't use using 64-bit DMA masks clang warns about statically defined DMA masks from the DMA_BIT_MASK macro with length 64: arch/arm/mach-iop13xx/setup.c:303:35: error: shift count >= width of type [-Werror,-Wshift-count-overflow] static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(64); ^~~~~~~~~~~~~~~~ include/linux/dma-mapping.h:141:54: note: expanded from macro 'DMA_BIT_MASK' #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) ^ ~~~ The ones in iop shouldn't really be 64 bit masks, so changing them to what the driver can support avoids the warning. Signed-off-by: Arnd Bergmann Signed-off-by: Olof Johansson --- arch/arm/mach-iop13xx/setup.c | 8 ++++---- arch/arm/mach-iop13xx/tpmi.c | 10 +++++----- arch/arm/plat-iop/adma.c | 6 +++--- 3 files changed, 12 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-iop13xx/setup.c b/arch/arm/mach-iop13xx/setup.c index 53c316f7301e..fe4932fda01d 100644 --- a/arch/arm/mach-iop13xx/setup.c +++ b/arch/arm/mach-iop13xx/setup.c @@ -300,7 +300,7 @@ static struct resource iop13xx_adma_2_resources[] = { } }; -static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(64); +static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(32); static struct iop_adma_platform_data iop13xx_adma_0_data = { .hw_id = 0, .pool_size = PAGE_SIZE, @@ -324,7 +324,7 @@ static struct platform_device iop13xx_adma_0_channel = { .resource = iop13xx_adma_0_resources, .dev = { .dma_mask = &iop13xx_adma_dmamask, - .coherent_dma_mask = DMA_BIT_MASK(64), + .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = (void *) &iop13xx_adma_0_data, }, }; @@ -336,7 +336,7 @@ static struct platform_device iop13xx_adma_1_channel = { .resource = iop13xx_adma_1_resources, .dev = { .dma_mask = &iop13xx_adma_dmamask, - .coherent_dma_mask = DMA_BIT_MASK(64), + .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = (void *) &iop13xx_adma_1_data, }, }; @@ -348,7 +348,7 @@ static struct platform_device iop13xx_adma_2_channel = { .resource = iop13xx_adma_2_resources, .dev = { .dma_mask = &iop13xx_adma_dmamask, - .coherent_dma_mask = DMA_BIT_MASK(64), + .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = (void *) &iop13xx_adma_2_data, }, }; diff --git a/arch/arm/mach-iop13xx/tpmi.c b/arch/arm/mach-iop13xx/tpmi.c index db511ec2b1df..116feb6b261e 100644 --- a/arch/arm/mach-iop13xx/tpmi.c +++ b/arch/arm/mach-iop13xx/tpmi.c @@ -152,7 +152,7 @@ static struct resource iop13xx_tpmi_3_resources[] = { } }; -u64 iop13xx_tpmi_mask = DMA_BIT_MASK(64); +u64 iop13xx_tpmi_mask = DMA_BIT_MASK(32); static struct platform_device iop13xx_tpmi_0_device = { .name = "iop-tpmi", .id = 0, @@ -160,7 +160,7 @@ static struct platform_device iop13xx_tpmi_0_device = { .resource = iop13xx_tpmi_0_resources, .dev = { .dma_mask = &iop13xx_tpmi_mask, - .coherent_dma_mask = DMA_BIT_MASK(64), + .coherent_dma_mask = DMA_BIT_MASK(32), }, }; @@ -171,7 +171,7 @@ static struct platform_device iop13xx_tpmi_1_device = { .resource = iop13xx_tpmi_1_resources, .dev = { .dma_mask = &iop13xx_tpmi_mask, - .coherent_dma_mask = DMA_BIT_MASK(64), + .coherent_dma_mask = DMA_BIT_MASK(32), }, }; @@ -182,7 +182,7 @@ static struct platform_device iop13xx_tpmi_2_device = { .resource = iop13xx_tpmi_2_resources, .dev = { .dma_mask = &iop13xx_tpmi_mask, - .coherent_dma_mask = DMA_BIT_MASK(64), + .coherent_dma_mask = DMA_BIT_MASK(32), }, }; @@ -193,7 +193,7 @@ static struct platform_device iop13xx_tpmi_3_device = { .resource = iop13xx_tpmi_3_resources, .dev = { .dma_mask = &iop13xx_tpmi_mask, - .coherent_dma_mask = DMA_BIT_MASK(64), + .coherent_dma_mask = DMA_BIT_MASK(32), }, }; diff --git a/arch/arm/plat-iop/adma.c b/arch/arm/plat-iop/adma.c index a4d1f8de3b5b..d9612221e484 100644 --- a/arch/arm/plat-iop/adma.c +++ b/arch/arm/plat-iop/adma.c @@ -143,7 +143,7 @@ struct platform_device iop3xx_dma_0_channel = { .resource = iop3xx_dma_0_resources, .dev = { .dma_mask = &iop3xx_adma_dmamask, - .coherent_dma_mask = DMA_BIT_MASK(64), + .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = (void *) &iop3xx_dma_0_data, }, }; @@ -155,7 +155,7 @@ struct platform_device iop3xx_dma_1_channel = { .resource = iop3xx_dma_1_resources, .dev = { .dma_mask = &iop3xx_adma_dmamask, - .coherent_dma_mask = DMA_BIT_MASK(64), + .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = (void *) &iop3xx_dma_1_data, }, }; @@ -167,7 +167,7 @@ struct platform_device iop3xx_aau_channel = { .resource = iop3xx_aau_resources, .dev = { .dma_mask = &iop3xx_adma_dmamask, - .coherent_dma_mask = DMA_BIT_MASK(64), + .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = (void *) &iop3xx_aau_data, }, }; -- cgit v1.2.3 From 9a8f32038a74cb800e9649afbf4b3dba2b7d6539 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 13 Mar 2019 22:19:16 +0100 Subject: ARM: milbeaut: fix build with !CONFIG_HOTPLUG_CPU When HOTPLUG_CPU is disabled, some fields in the smp operations are not available or needed: arch/arm/mach-milbeaut/platsmp.c:90:3: error: field designator 'cpu_die' does not refer to any field in type 'struct smp_operations' .cpu_die = m10v_cpu_die, ^ arch/arm/mach-milbeaut/platsmp.c:91:3: error: field designator 'cpu_kill' does not refer to any field in type 'struct smp_operations' .cpu_kill = m10v_cpu_kill, ^ Hide them in an #ifdef like the other platforms do. Fixes: 9fb29c734f9e ("ARM: milbeaut: Add basic support for Milbeaut m10v SoC") Signed-off-by: Arnd Bergmann Signed-off-by: Olof Johansson --- arch/arm/mach-milbeaut/platsmp.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/arm/mach-milbeaut/platsmp.c b/arch/arm/mach-milbeaut/platsmp.c index 591543c81399..3ea880f5fcb7 100644 --- a/arch/arm/mach-milbeaut/platsmp.c +++ b/arch/arm/mach-milbeaut/platsmp.c @@ -65,6 +65,7 @@ static void m10v_smp_init(unsigned int max_cpus) writel(KERNEL_UNBOOT_FLAG, m10v_smp_base + cpu * 4); } +#ifdef CONFIG_HOTPLUG_CPU static void m10v_cpu_die(unsigned int l_cpu) { gic_cpu_if_down(0); @@ -83,12 +84,15 @@ static int m10v_cpu_kill(unsigned int l_cpu) return 1; } +#endif static struct smp_operations m10v_smp_ops __initdata = { .smp_prepare_cpus = m10v_smp_init, .smp_boot_secondary = m10v_boot_secondary, +#ifdef CONFIG_HOTPLUG_CPU .cpu_die = m10v_cpu_die, .cpu_kill = m10v_cpu_kill, +#endif }; CPU_METHOD_OF_DECLARE(m10v_smp, "socionext,milbeaut-m10v-smp", &m10v_smp_ops); -- cgit v1.2.3 From 7100e8704b61247649c50551b965e71d168df30b Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 29 Mar 2019 17:42:57 +1000 Subject: powerpc/64s/radix: Fix radix segment exception handling Commit 48e7b76957 ("powerpc/64s/hash: Convert SLB miss handlers to C") broke the radix-mode segment exception handler. In radix mode, this is exception is not an SLB miss, rather it signals that the EA is outside the range translated by any page table. The commit lost the radix feature alternate code patch, which can cause faults to some EAs to kernel BUG at arch/powerpc/mm/slb.c:639! The original radix code would send faults to slb_miss_large_addr, which would end up faulting due to slb_addr_limit being 0. This patch sends radix directly to do_bad_slb_fault, which is a bit clearer. Fixes: 48e7b7695745 ("powerpc/64s/hash: Convert SLB miss handlers to C") Cc: stable@vger.kernel.org # v4.20+ Reported-by: Anton Blanchard Signed-off-by: Nicholas Piggin Reviewed-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/exceptions-64s.S | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index a5b8fbae56a0..9481a117e242 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -656,11 +656,17 @@ EXC_COMMON_BEGIN(data_access_slb_common) ld r4,PACA_EXSLB+EX_DAR(r13) std r4,_DAR(r1) addi r3,r1,STACK_FRAME_OVERHEAD +BEGIN_MMU_FTR_SECTION + /* HPT case, do SLB fault */ bl do_slb_fault cmpdi r3,0 bne- 1f b fast_exception_return 1: /* Error case */ +MMU_FTR_SECTION_ELSE + /* Radix case, access is outside page table range */ + li r3,-EFAULT +ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) std r3,RESULT(r1) bl save_nvgprs RECONCILE_IRQ_STATE(r10, r11) @@ -705,11 +711,17 @@ EXC_COMMON_BEGIN(instruction_access_slb_common) EXCEPTION_PROLOG_COMMON(0x480, PACA_EXSLB) ld r4,_NIP(r1) addi r3,r1,STACK_FRAME_OVERHEAD +BEGIN_MMU_FTR_SECTION + /* HPT case, do SLB fault */ bl do_slb_fault cmpdi r3,0 bne- 1f b fast_exception_return 1: /* Error case */ +MMU_FTR_SECTION_ELSE + /* Radix case, access is outside page table range */ + li r3,-EFAULT +ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) std r3,RESULT(r1) bl save_nvgprs RECONCILE_IRQ_STATE(r10, r11) -- cgit v1.2.3 From 5a3ae7b314a2259b1188b22b392f5eba01e443ee Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Sun, 7 Apr 2019 21:06:16 +0200 Subject: arm64/ftrace: fix inadvertent BUG() in trampoline check The ftrace trampoline code (which deals with modules loaded out of BL range of the core kernel) uses plt_entries_equal() to check whether the per-module trampoline equals a zero buffer, to decide whether the trampoline has already been initialized. This triggers a BUG() in the opcode manipulation code, since we end up checking the ADRP offset of a 0x0 opcode, which is not an ADRP instruction. So instead, add a helper to check whether a PLT is initialized, and call that from the frace code. Cc: # v5.0 Fixes: bdb85cd1d206 ("arm64/module: switch to ADRP/ADD sequences for PLT entries") Acked-by: Mark Rutland Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/include/asm/module.h | 5 +++++ arch/arm64/kernel/ftrace.c | 3 +-- 2 files changed, 6 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h index 905e1bb0e7bd..cd9f4e9d04d3 100644 --- a/arch/arm64/include/asm/module.h +++ b/arch/arm64/include/asm/module.h @@ -73,4 +73,9 @@ static inline bool is_forbidden_offset_for_adrp(void *place) struct plt_entry get_plt_entry(u64 dst, void *pc); bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b); +static inline bool plt_entry_is_initialized(const struct plt_entry *e) +{ + return e->adrp || e->add || e->br; +} + #endif /* __ASM_MODULE_H */ diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index 8e4431a8821f..07b298120182 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -107,8 +107,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline); if (!plt_entries_equal(mod->arch.ftrace_trampoline, &trampoline)) { - if (!plt_entries_equal(mod->arch.ftrace_trampoline, - &(struct plt_entry){})) { + if (plt_entry_is_initialized(mod->arch.ftrace_trampoline)) { pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n"); return -EINVAL; } -- cgit v1.2.3 From 1e6f5440a6814d28c32d347f338bfef68bc3e69d Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 8 Apr 2019 17:56:34 +0100 Subject: arm64: backtrace: Don't bother trying to unwind the userspace stack Calling dump_backtrace() with a pt_regs argument corresponding to userspace doesn't make any sense and our unwinder will simply print "Call trace:" before unwinding the stack looking for user frames. Rather than go through this song and dance, just return early if we're passed a user register state. Cc: Fixes: 1149aad10b1e ("arm64: Add dump_backtrace() in show_regs") Reported-by: Kefeng Wang Signed-off-by: Will Deacon --- arch/arm64/kernel/traps.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 8ad119c3f665..29755989f616 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -102,10 +102,16 @@ static void dump_instr(const char *lvl, struct pt_regs *regs) void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) { struct stackframe frame; - int skip; + int skip = 0; pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); + if (regs) { + if (user_mode(regs)) + return; + skip = 1; + } + if (!tsk) tsk = current; @@ -126,7 +132,6 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) frame.graph = 0; #endif - skip = !!regs; printk("Call trace:\n"); do { /* skip until specified stack frame */ @@ -176,15 +181,13 @@ static int __die(const char *str, int err, struct pt_regs *regs) return ret; print_modules(); - __show_regs(regs); pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n", TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk)); + show_regs(regs); - if (!user_mode(regs)) { - dump_backtrace(regs, tsk); + if (!user_mode(regs)) dump_instr(KERN_EMERG, regs); - } return ret; } -- cgit v1.2.3 From cf7cf6977f531acd5dfe55250d0ee8cbbb6f1ae8 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 9 Apr 2019 15:43:11 +1000 Subject: powerpc/mm: Define MAX_PHYSMEM_BITS for all 64-bit configs The recent commit 8bc086899816 ("powerpc/mm: Only define MAX_PHYSMEM_BITS in SPARSEMEM configurations") removed our definition of MAX_PHYSMEM_BITS when SPARSEMEM is disabled. This inadvertently broke some 64-bit FLATMEM using configs with eg: arch/powerpc/include/asm/book3s/64/mmu-hash.h:584:6: error: "MAX_PHYSMEM_BITS" is not defined, evaluates to 0 #if (MAX_PHYSMEM_BITS > MAX_EA_BITS_PER_CONTEXT) ^~~~~~~~~~~~~~~~ Fix it by making sure we define MAX_PHYSMEM_BITS for all 64-bit configs regardless of SPARSEMEM. Fixes: 8bc086899816 ("powerpc/mm: Only define MAX_PHYSMEM_BITS in SPARSEMEM configurations") Reported-by: Andreas Schwab Reported-by: Hugh Dickins Reviewed-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/mmu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 598cdcdd1355..8ddd4a91bdc1 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -352,7 +352,7 @@ static inline bool strict_kernel_rwx_enabled(void) #if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME) && \ defined (CONFIG_PPC_64K_PAGES) #define MAX_PHYSMEM_BITS 51 -#elif defined(CONFIG_SPARSEMEM) +#elif defined(CONFIG_PPC64) #define MAX_PHYSMEM_BITS 46 #endif -- cgit v1.2.3 From 3966c3feca3fd10b2935caa0b4a08c7dd59469e5 Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Tue, 2 Apr 2019 15:21:18 +0000 Subject: x86/perf/amd: Remove need to check "running" bit in NMI handler Spurious interrupt support was added to perf in the following commit, almost a decade ago: 63e6be6d98e1 ("perf, x86: Catch spurious interrupts after disabling counters") The two previous patches (resolving the race condition when disabling a PMC and NMI latency mitigation) allow for the removal of this older spurious interrupt support. Currently in x86_pmu_stop(), the bit for the PMC in the active_mask bitmap is cleared before disabling the PMC, which sets up a race condition. This race condition was mitigated by introducing the running bitmap. That race condition can be eliminated by first disabling the PMC, waiting for PMC reset on overflow and then clearing the bit for the PMC in the active_mask bitmap. The NMI handler will not re-enable a disabled counter. If x86_pmu_stop() is called from the perf NMI handler, the NMI latency mitigation support will guard against any unhandled NMI messages. Signed-off-by: Tom Lendacky Signed-off-by: Peter Zijlstra (Intel) Cc: # 4.14.x- Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Arnaldo Carvalho de Melo Cc: Borislav Petkov Cc: Jiri Olsa Cc: Linus Torvalds Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Link: https://lkml.kernel.org/r/Message-ID: Signed-off-by: Ingo Molnar --- arch/x86/events/amd/core.c | 21 +++++++++++++++++++-- arch/x86/events/core.c | 13 +++---------- 2 files changed, 22 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index 34c191453ce3..0ecfac84ba91 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -4,8 +4,8 @@ #include #include #include -#include #include +#include #include "../perf_event.h" @@ -491,6 +491,23 @@ static void amd_pmu_disable_all(void) } } +static void amd_pmu_disable_event(struct perf_event *event) +{ + x86_pmu_disable_event(event); + + /* + * This can be called from NMI context (via x86_pmu_stop). The counter + * may have overflowed, but either way, we'll never see it get reset + * by the NMI if we're already in the NMI. And the NMI latency support + * below will take care of any pending NMI that might have been + * generated by the overflow. + */ + if (in_nmi()) + return; + + amd_pmu_wait_on_overflow(event->hw.idx); +} + /* * Because of NMI latency, if multiple PMC counters are active or other sources * of NMIs are received, the perf NMI handler can handle one or more overflowed @@ -738,7 +755,7 @@ static __initconst const struct x86_pmu amd_pmu = { .disable_all = amd_pmu_disable_all, .enable_all = x86_pmu_enable_all, .enable = x86_pmu_enable_event, - .disable = x86_pmu_disable_event, + .disable = amd_pmu_disable_event, .hw_config = amd_pmu_hw_config, .schedule_events = x86_schedule_events, .eventsel = MSR_K7_EVNTSEL0, diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index e2b1447192a8..81911e11a15d 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -1349,8 +1349,9 @@ void x86_pmu_stop(struct perf_event *event, int flags) struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct hw_perf_event *hwc = &event->hw; - if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) { + if (test_bit(hwc->idx, cpuc->active_mask)) { x86_pmu.disable(event); + __clear_bit(hwc->idx, cpuc->active_mask); cpuc->events[hwc->idx] = NULL; WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); hwc->state |= PERF_HES_STOPPED; @@ -1447,16 +1448,8 @@ int x86_pmu_handle_irq(struct pt_regs *regs) apic_write(APIC_LVTPC, APIC_DM_NMI); for (idx = 0; idx < x86_pmu.num_counters; idx++) { - if (!test_bit(idx, cpuc->active_mask)) { - /* - * Though we deactivated the counter some cpus - * might still deliver spurious interrupts still - * in flight. Catch them: - */ - if (__test_and_clear_bit(idx, cpuc->running)) - handled++; + if (!test_bit(idx, cpuc->active_mask)) continue; - } event = cpuc->events[idx]; -- cgit v1.2.3 From 2a29e9f6b9b499f1fc5f4a48220dc3f4428499f9 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 Apr 2019 21:34:34 +0200 Subject: sparc64/pci_sun4v: fix ATU checks for large DMA masks Now that we allow drivers to always need to set larger than required DMA masks we need to be a little more careful in the sun4v PCI iommu driver to chose when to select the ATU support - a larger DMA mask can be set even when the platform does not support ATU, so we always have to check if it is avaiable before using it. Add a little helper for that and use it in all the places where we make ATU usage decisions based on the DMA mask. Fixes: 24132a419c68 ("sparc64/pci_sun4v: allow large DMA masks") Reported-by: Meelis Roos Signed-off-by: Christoph Hellwig Tested-by: Meelis Roos Acked-by: David S. Miller --- arch/sparc/kernel/pci_sun4v.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index a8af6023c126..14b93c5564e3 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c @@ -73,6 +73,11 @@ static inline void iommu_batch_start(struct device *dev, unsigned long prot, uns p->npages = 0; } +static inline bool iommu_use_atu(struct iommu *iommu, u64 mask) +{ + return iommu->atu && mask > DMA_BIT_MASK(32); +} + /* Interrupts must be disabled. */ static long iommu_batch_flush(struct iommu_batch *p, u64 mask) { @@ -92,7 +97,7 @@ static long iommu_batch_flush(struct iommu_batch *p, u64 mask) prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE); while (npages != 0) { - if (mask <= DMA_BIT_MASK(32) || !pbm->iommu->atu) { + if (!iommu_use_atu(pbm->iommu, mask)) { num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), npages, @@ -179,7 +184,6 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, unsigned long flags, order, first_page, npages, n; unsigned long prot = 0; struct iommu *iommu; - struct atu *atu; struct iommu_map_table *tbl; struct page *page; void *ret; @@ -205,13 +209,11 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, memset((char *)first_page, 0, PAGE_SIZE << order); iommu = dev->archdata.iommu; - atu = iommu->atu; - mask = dev->coherent_dma_mask; - if (mask <= DMA_BIT_MASK(32) || !atu) + if (!iommu_use_atu(iommu, mask)) tbl = &iommu->tbl; else - tbl = &atu->tbl; + tbl = &iommu->atu->tbl; entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL, (unsigned long)(-1), 0); @@ -333,7 +335,7 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, atu = iommu->atu; devhandle = pbm->devhandle; - if (dvma <= DMA_BIT_MASK(32)) { + if (!iommu_use_atu(iommu, dvma)) { tbl = &iommu->tbl; iotsb_num = 0; /* we don't care for legacy iommu */ } else { @@ -374,7 +376,7 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, npages >>= IO_PAGE_SHIFT; mask = *dev->dma_mask; - if (mask <= DMA_BIT_MASK(32)) + if (!iommu_use_atu(iommu, mask)) tbl = &iommu->tbl; else tbl = &atu->tbl; @@ -510,7 +512,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, IO_PAGE_SIZE) >> IO_PAGE_SHIFT; mask = *dev->dma_mask; - if (mask <= DMA_BIT_MASK(32)) + if (!iommu_use_atu(iommu, mask)) tbl = &iommu->tbl; else tbl = &atu->tbl; -- cgit v1.2.3 From 045afc24124d80c6998d9c770844c67912083506 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 8 Apr 2019 12:45:09 +0100 Subject: arm64: futex: Fix FUTEX_WAKE_OP atomic ops with non-zero result value Rather embarrassingly, our futex() FUTEX_WAKE_OP implementation doesn't explicitly set the return value on the non-faulting path and instead leaves it holding the result of the underlying atomic operation. This means that any FUTEX_WAKE_OP atomic operation which computes a non-zero value will be reported as having failed. Regrettably, I wrote the buggy code back in 2011 and it was upstreamed as part of the initial arm64 support in 2012. The reasons we appear to get away with this are: 1. FUTEX_WAKE_OP is rarely used and therefore doesn't appear to get exercised by futex() test applications 2. If the result of the atomic operation is zero, the system call behaves correctly 3. Prior to version 2.25, the only operation used by GLIBC set the futex to zero, and therefore worked as expected. From 2.25 onwards, FUTEX_WAKE_OP is not used by GLIBC at all. Fix the implementation by ensuring that the return value is either 0 to indicate that the atomic operation completed successfully, or -EFAULT if we encountered a fault when accessing the user mapping. Cc: Fixes: 6170a97460db ("arm64: Atomic operations") Signed-off-by: Will Deacon --- arch/arm64/include/asm/futex.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index cccb83ad7fa8..e1d95f08f8e1 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h @@ -30,8 +30,8 @@ do { \ " prfm pstl1strm, %2\n" \ "1: ldxr %w1, %2\n" \ insn "\n" \ -"2: stlxr %w3, %w0, %2\n" \ -" cbnz %w3, 1b\n" \ +"2: stlxr %w0, %w3, %2\n" \ +" cbnz %w0, 1b\n" \ " dmb ish\n" \ "3:\n" \ " .pushsection .fixup,\"ax\"\n" \ @@ -50,30 +50,30 @@ do { \ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr) { - int oldval = 0, ret, tmp; + int oldval, ret, tmp; u32 __user *uaddr = __uaccess_mask_ptr(_uaddr); pagefault_disable(); switch (op) { case FUTEX_OP_SET: - __futex_atomic_op("mov %w0, %w4", + __futex_atomic_op("mov %w3, %w4", ret, oldval, uaddr, tmp, oparg); break; case FUTEX_OP_ADD: - __futex_atomic_op("add %w0, %w1, %w4", + __futex_atomic_op("add %w3, %w1, %w4", ret, oldval, uaddr, tmp, oparg); break; case FUTEX_OP_OR: - __futex_atomic_op("orr %w0, %w1, %w4", + __futex_atomic_op("orr %w3, %w1, %w4", ret, oldval, uaddr, tmp, oparg); break; case FUTEX_OP_ANDN: - __futex_atomic_op("and %w0, %w1, %w4", + __futex_atomic_op("and %w3, %w1, %w4", ret, oldval, uaddr, tmp, ~oparg); break; case FUTEX_OP_XOR: - __futex_atomic_op("eor %w0, %w1, %w4", + __futex_atomic_op("eor %w3, %w1, %w4", ret, oldval, uaddr, tmp, oparg); break; default: -- cgit v1.2.3 From 009669e7481361470f7667c96a96893c8ba5f461 Mon Sep 17 00:00:00 2001 From: Lokesh Vutla Date: Tue, 30 Apr 2019 15:42:30 +0530 Subject: arm64: arch_k3: Enable interrupt controller drivers Select the TISCI Interrupt Router, Aggregator drivers and all its dependencies for TI's SoCs based on K3 architecture. Suggested-by: Marc Zyngier Signed-off-by: Lokesh Vutla Acked-by: Tony Lindgren Signed-off-by: Marc Zyngier --- arch/arm64/Kconfig.platforms | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch') diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index b5ca9c50876d..ab9cac8235b3 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -82,6 +82,11 @@ config ARCH_EXYNOS config ARCH_K3 bool "Texas Instruments Inc. K3 multicore SoC architecture" select PM_GENERIC_DOMAINS if PM + select MAILBOX + select TI_MESSAGE_MANAGER + select TI_SCI_PROTOCOL + select TI_SCI_INTR_IRQCHIP + select TI_SCI_INTA_IRQCHIP help This enables support for Texas Instruments' K3 multicore SoC architecture. -- cgit v1.2.3