summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-06-01 17:34:35 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2024-06-01 17:34:35 -0700
commit83814698cf48ce3aadc5d88a3f577f04482ff92a (patch)
treeb35bbbdffe75f446f042aab69e752202789fcd44 /arch
parent54bec8ed5732339438271d07376890454fa58046 (diff)
parentbe2fc65d66e0406cc9d39d40becaecdf4ee765f3 (diff)
Merge tag 'powerpc-6.10-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc fixes from Michael Ellerman: - Enforce full ordering for ATOMIC operations with BPF_FETCH - Fix uaccess build errors seen with GCC 13/14 - Fix build errors on ppc32 due to ARCH_HAS_KERNEL_FPU_SUPPORT - Drop error message from lparcfg guest name lookup Thanks to Christophe Leroy, Guenter Roeck, Nathan Lynch, Naveen N Rao, Puranjay Mohan, and Samuel Holland. * tag 'powerpc-6.10-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: powerpc: Limit ARCH_HAS_KERNEL_FPU_SUPPORT to PPC64 powerpc/uaccess: Use YZ asm constraint for ld powerpc/uaccess: Fix build errors seen with GCC 13/14 powerpc/pseries/lparcfg: drop error message from guest name lookup powerpc/bpf: enforce full ordering for ATOMIC operations with BPF_FETCH
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/include/asm/uaccess.h27
-rw-r--r--arch/powerpc/net/bpf_jit_comp32.c12
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c12
-rw-r--r--arch/powerpc/platforms/pseries/lparcfg.c4
5 files changed, 54 insertions, 3 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 3c968f2f4ac4..c88c6d46a5bc 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -137,7 +137,7 @@ config PPC
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_HUGEPD if HUGETLB_PAGE
select ARCH_HAS_KCOV
- select ARCH_HAS_KERNEL_FPU_SUPPORT if PPC_FPU
+ select ARCH_HAS_KERNEL_FPU_SUPPORT if PPC64 && PPC_FPU
select ARCH_HAS_MEMBARRIER_CALLBACKS
select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_MEMREMAP_COMPAT_ALIGN if PPC_64S_HASH_MMU
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index de10437fd206..fd594bf6c6a9 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -92,9 +92,25 @@ __pu_failed: \
: label)
#endif
+#ifdef CONFIG_CC_IS_CLANG
+#define DS_FORM_CONSTRAINT "Z<>"
+#else
+#define DS_FORM_CONSTRAINT "YZ<>"
+#endif
+
#ifdef __powerpc64__
+#ifdef CONFIG_PPC_KERNEL_PREFIXED
#define __put_user_asm2_goto(x, ptr, label) \
__put_user_asm_goto(x, ptr, label, "std")
+#else
+#define __put_user_asm2_goto(x, addr, label) \
+ asm goto ("1: std%U1%X1 %0,%1 # put_user\n" \
+ EX_TABLE(1b, %l2) \
+ : \
+ : "r" (x), DS_FORM_CONSTRAINT (*addr) \
+ : \
+ : label)
+#endif // CONFIG_PPC_KERNEL_PREFIXED
#else /* __powerpc64__ */
#define __put_user_asm2_goto(x, addr, label) \
asm goto( \
@@ -165,8 +181,19 @@ do { \
#endif
#ifdef __powerpc64__
+#ifdef CONFIG_PPC_KERNEL_PREFIXED
#define __get_user_asm2_goto(x, addr, label) \
__get_user_asm_goto(x, addr, label, "ld")
+#else
+#define __get_user_asm2_goto(x, addr, label) \
+ asm_goto_output( \
+ "1: ld%U1%X1 %0, %1 # get_user\n" \
+ EX_TABLE(1b, %l2) \
+ : "=r" (x) \
+ : DS_FORM_CONSTRAINT (*addr) \
+ : \
+ : label)
+#endif // CONFIG_PPC_KERNEL_PREFIXED
#else /* __powerpc64__ */
#define __get_user_asm2_goto(x, addr, label) \
asm_goto_output( \
diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
index 43b97032a91c..a0c4f1bde83e 100644
--- a/arch/powerpc/net/bpf_jit_comp32.c
+++ b/arch/powerpc/net/bpf_jit_comp32.c
@@ -900,6 +900,15 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
/* Get offset into TMP_REG */
EMIT(PPC_RAW_LI(tmp_reg, off));
+ /*
+ * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync'
+ * before and after the operation.
+ *
+ * This is a requirement in the Linux Kernel Memory Model.
+ * See __cmpxchg_u32() in asm/cmpxchg.h as an example.
+ */
+ if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP))
+ EMIT(PPC_RAW_SYNC());
tmp_idx = ctx->idx * 4;
/* load value from memory into r0 */
EMIT(PPC_RAW_LWARX(_R0, tmp_reg, dst_reg, 0));
@@ -953,6 +962,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
/* For the BPF_FETCH variant, get old data into src_reg */
if (imm & BPF_FETCH) {
+ /* Emit 'sync' to enforce full ordering */
+ if (IS_ENABLED(CONFIG_SMP))
+ EMIT(PPC_RAW_SYNC());
EMIT(PPC_RAW_MR(ret_reg, ax_reg));
if (!fp->aux->verifier_zext)
EMIT(PPC_RAW_LI(ret_reg - 1, 0)); /* higher 32-bit */
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 8afc14a4a125..7703dcf48be8 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -846,6 +846,15 @@ emit_clear:
/* Get offset into TMP_REG_1 */
EMIT(PPC_RAW_LI(tmp1_reg, off));
+ /*
+ * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync'
+ * before and after the operation.
+ *
+ * This is a requirement in the Linux Kernel Memory Model.
+ * See __cmpxchg_u64() in asm/cmpxchg.h as an example.
+ */
+ if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP))
+ EMIT(PPC_RAW_SYNC());
tmp_idx = ctx->idx * 4;
/* load value from memory into TMP_REG_2 */
if (size == BPF_DW)
@@ -908,6 +917,9 @@ emit_clear:
PPC_BCC_SHORT(COND_NE, tmp_idx);
if (imm & BPF_FETCH) {
+ /* Emit 'sync' to enforce full ordering */
+ if (IS_ENABLED(CONFIG_SMP))
+ EMIT(PPC_RAW_SYNC());
EMIT(PPC_RAW_MR(ret_reg, _R0));
/*
* Skip unnecessary zero-extension for 32-bit cmpxchg.
diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c
index 6e7029640c0c..62da20f9700a 100644
--- a/arch/powerpc/platforms/pseries/lparcfg.c
+++ b/arch/powerpc/platforms/pseries/lparcfg.c
@@ -371,8 +371,8 @@ static int read_dt_lpar_name(struct seq_file *m)
static void read_lpar_name(struct seq_file *m)
{
- if (read_rtas_lpar_name(m) && read_dt_lpar_name(m))
- pr_err_once("Error can't get the LPAR name");
+ if (read_rtas_lpar_name(m))
+ read_dt_lpar_name(m);
}
#define SPLPAR_MAXLENGTH 1026*(sizeof(char))