summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-06-10 14:46:54 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-06-10 14:46:54 -0700
commit4152d146ee2169653297e03b9fa2e0f476923959 (patch)
tree94ed306eaacf1ec3baad1f68fd5ab83e658599aa /arch/arm64
parent78c24f7beeae295cb6dfb4274568f9bcfa92b316 (diff)
parentb398ace5d2ea0b7f00d9f1ce23c647e289c206ca (diff)
Merge branch 'rwonce/rework' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux
Pull READ/WRITE_ONCE rework from Will Deacon: "This the READ_ONCE rework I've been working on for a while, which bumps the minimum GCC version and improves code-gen on arm64 when stack protector is enabled" [ Side note: I'm _really_ tempted to raise the minimum gcc version to 4.9, so that we can just say that we require _Generic() support. That would allow us to more cleanly handle a lot of the cases where we depend on very complex macros with 'sizeof' or __builtin_choose_expr() with __builtin_types_compatible_p() etc. This branch has a workaround for sparse not handling _Generic(), either, but that was already fixed in the sparse development branch, so it's really just gcc-4.9 that we'd require. - Linus ] * 'rwonce/rework' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux: compiler_types.h: Use unoptimized __unqual_scalar_typeof for sparse compiler_types.h: Optimize __unqual_scalar_typeof compilation time compiler.h: Enforce that READ_ONCE_NOCHECK() access size is sizeof(long) compiler-types.h: Include naked type in __pick_integer_type() match READ_ONCE: Fix comment describing 2x32-bit atomicity gcov: Remove old GCC 3.4 support arm64: barrier: Use '__unqual_scalar_typeof' for acquire/release macros locking/barriers: Use '__unqual_scalar_typeof' for load-acquire macros READ_ONCE: Drop pointer qualifiers when reading from scalar types READ_ONCE: Enforce atomicity for {READ,WRITE}_ONCE() memory accesses READ_ONCE: Simplify implementations of {READ,WRITE}_ONCE() arm64: csum: Disable KASAN for do_csum() fault_inject: Don't rely on "return value" from WRITE_ONCE() net: tls: Avoid assigning 'const' pointer to non-const pointer netfilter: Avoid assigning 'const' pointer to non-const pointer compiler/gcc: Raise minimum GCC version for kernel builds to 4.8
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/barrier.h16
-rw-r--r--arch/arm64/lib/csum.c20
2 files changed, 20 insertions, 16 deletions
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 7d9cc5ec4971..fb4c27506ef4 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -76,8 +76,8 @@ static inline unsigned long array_index_mask_nospec(unsigned long idx,
#define __smp_store_release(p, v) \
do { \
typeof(p) __p = (p); \
- union { typeof(*p) __val; char __c[1]; } __u = \
- { .__val = (__force typeof(*p)) (v) }; \
+ union { __unqual_scalar_typeof(*p) __val; char __c[1]; } __u = \
+ { .__val = (__force __unqual_scalar_typeof(*p)) (v) }; \
compiletime_assert_atomic_type(*p); \
kasan_check_write(__p, sizeof(*p)); \
switch (sizeof(*p)) { \
@@ -110,7 +110,7 @@ do { \
#define __smp_load_acquire(p) \
({ \
- union { typeof(*p) __val; char __c[1]; } __u; \
+ union { __unqual_scalar_typeof(*p) __val; char __c[1]; } __u; \
typeof(p) __p = (p); \
compiletime_assert_atomic_type(*p); \
kasan_check_read(__p, sizeof(*p)); \
@@ -136,33 +136,33 @@ do { \
: "Q" (*__p) : "memory"); \
break; \
} \
- __u.__val; \
+ (typeof(*p))__u.__val; \
})
#define smp_cond_load_relaxed(ptr, cond_expr) \
({ \
typeof(ptr) __PTR = (ptr); \
- typeof(*ptr) VAL; \
+ __unqual_scalar_typeof(*ptr) VAL; \
for (;;) { \
VAL = READ_ONCE(*__PTR); \
if (cond_expr) \
break; \
__cmpwait_relaxed(__PTR, VAL); \
} \
- VAL; \
+ (typeof(*ptr))VAL; \
})
#define smp_cond_load_acquire(ptr, cond_expr) \
({ \
typeof(ptr) __PTR = (ptr); \
- typeof(*ptr) VAL; \
+ __unqual_scalar_typeof(*ptr) VAL; \
for (;;) { \
VAL = smp_load_acquire(__PTR); \
if (cond_expr) \
break; \
__cmpwait_relaxed(__PTR, VAL); \
} \
- VAL; \
+ (typeof(*ptr))VAL; \
})
#include <asm-generic/barrier.h>
diff --git a/arch/arm64/lib/csum.c b/arch/arm64/lib/csum.c
index 60eccae2abad..78b87a64ca0a 100644
--- a/arch/arm64/lib/csum.c
+++ b/arch/arm64/lib/csum.c
@@ -14,7 +14,11 @@ static u64 accumulate(u64 sum, u64 data)
return tmp + (tmp >> 64);
}
-unsigned int do_csum(const unsigned char *buff, int len)
+/*
+ * We over-read the buffer and this makes KASAN unhappy. Instead, disable
+ * instrumentation and call kasan explicitly.
+ */
+unsigned int __no_sanitize_address do_csum(const unsigned char *buff, int len)
{
unsigned int offset, shift, sum;
const u64 *ptr;
@@ -42,7 +46,7 @@ unsigned int do_csum(const unsigned char *buff, int len)
* odd/even alignment, and means we can ignore it until the very end.
*/
shift = offset * 8;
- data = READ_ONCE_NOCHECK(*ptr++);
+ data = *ptr++;
#ifdef __LITTLE_ENDIAN
data = (data >> shift) << shift;
#else
@@ -58,10 +62,10 @@ unsigned int do_csum(const unsigned char *buff, int len)
while (unlikely(len > 64)) {
__uint128_t tmp1, tmp2, tmp3, tmp4;
- tmp1 = READ_ONCE_NOCHECK(*(__uint128_t *)ptr);
- tmp2 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 2));
- tmp3 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 4));
- tmp4 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 6));
+ tmp1 = *(__uint128_t *)ptr;
+ tmp2 = *(__uint128_t *)(ptr + 2);
+ tmp3 = *(__uint128_t *)(ptr + 4);
+ tmp4 = *(__uint128_t *)(ptr + 6);
len -= 64;
ptr += 8;
@@ -85,7 +89,7 @@ unsigned int do_csum(const unsigned char *buff, int len)
__uint128_t tmp;
sum64 = accumulate(sum64, data);
- tmp = READ_ONCE_NOCHECK(*(__uint128_t *)ptr);
+ tmp = *(__uint128_t *)ptr;
len -= 16;
ptr += 2;
@@ -100,7 +104,7 @@ unsigned int do_csum(const unsigned char *buff, int len)
}
if (len > 0) {
sum64 = accumulate(sum64, data);
- data = READ_ONCE_NOCHECK(*ptr);
+ data = *ptr;
len -= 8;
}
/*