From c6a771d932332568df9f46a3b53507c578e8c8e8 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 14 Apr 2020 22:22:47 +0100 Subject: arm64: csum: Disable KASAN for do_csum() do_csum() over-reads the source buffer and therefore abuses READ_ONCE_NOCHECK() to avoid tripping up KASAN. In preparation for READ_ONCE_NOCHECK() becoming a macro, and therefore losing its '__no_sanitize_address' annotation, just annotate do_csum() explicitly and fall back to normal loads. Cc: Mark Rutland Cc: Robin Murphy Signed-off-by: Will Deacon --- arch/arm64/lib/csum.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/lib/csum.c b/arch/arm64/lib/csum.c index 60eccae2abad..78b87a64ca0a 100644 --- a/arch/arm64/lib/csum.c +++ b/arch/arm64/lib/csum.c @@ -14,7 +14,11 @@ static u64 accumulate(u64 sum, u64 data) return tmp + (tmp >> 64); } -unsigned int do_csum(const unsigned char *buff, int len) +/* + * We over-read the buffer and this makes KASAN unhappy. Instead, disable + * instrumentation and call kasan explicitly. + */ +unsigned int __no_sanitize_address do_csum(const unsigned char *buff, int len) { unsigned int offset, shift, sum; const u64 *ptr; @@ -42,7 +46,7 @@ unsigned int do_csum(const unsigned char *buff, int len) * odd/even alignment, and means we can ignore it until the very end. */ shift = offset * 8; - data = READ_ONCE_NOCHECK(*ptr++); + data = *ptr++; #ifdef __LITTLE_ENDIAN data = (data >> shift) << shift; #else @@ -58,10 +62,10 @@ unsigned int do_csum(const unsigned char *buff, int len) while (unlikely(len > 64)) { __uint128_t tmp1, tmp2, tmp3, tmp4; - tmp1 = READ_ONCE_NOCHECK(*(__uint128_t *)ptr); - tmp2 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 2)); - tmp3 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 4)); - tmp4 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 6)); + tmp1 = *(__uint128_t *)ptr; + tmp2 = *(__uint128_t *)(ptr + 2); + tmp3 = *(__uint128_t *)(ptr + 4); + tmp4 = *(__uint128_t *)(ptr + 6); len -= 64; ptr += 8; @@ -85,7 +89,7 @@ unsigned int do_csum(const unsigned char *buff, int len) __uint128_t tmp; sum64 = accumulate(sum64, data); - tmp = READ_ONCE_NOCHECK(*(__uint128_t *)ptr); + tmp = *(__uint128_t *)ptr; len -= 16; ptr += 2; @@ -100,7 +104,7 @@ unsigned int do_csum(const unsigned char *buff, int len) } if (len > 0) { sum64 = accumulate(sum64, data); - data = READ_ONCE_NOCHECK(*ptr); + data = *ptr; len -= 8; } /* -- cgit v1.2.3 From 10223c5286f7389c022e9e91f12c49918790cf36 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 19 Dec 2019 17:11:43 +0000 Subject: arm64: barrier: Use '__unqual_scalar_typeof' for acquire/release macros Passing volatile-qualified pointers to the arm64 implementations of the load-acquire/store-release macros results in a re-load from the stack and a bunch of associated stack-protector churn due to the temporary result variable inheriting the volatile semantics thanks to the use of 'typeof()'. Define these temporary variables using 'unqual_scalar_typeof' to drop the volatile qualifier in the case that they are scalar types. Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Arnd Bergmann Acked-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/include/asm/barrier.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 7d9cc5ec4971..fb4c27506ef4 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -76,8 +76,8 @@ static inline unsigned long array_index_mask_nospec(unsigned long idx, #define __smp_store_release(p, v) \ do { \ typeof(p) __p = (p); \ - union { typeof(*p) __val; char __c[1]; } __u = \ - { .__val = (__force typeof(*p)) (v) }; \ + union { __unqual_scalar_typeof(*p) __val; char __c[1]; } __u = \ + { .__val = (__force __unqual_scalar_typeof(*p)) (v) }; \ compiletime_assert_atomic_type(*p); \ kasan_check_write(__p, sizeof(*p)); \ switch (sizeof(*p)) { \ @@ -110,7 +110,7 @@ do { \ #define __smp_load_acquire(p) \ ({ \ - union { typeof(*p) __val; char __c[1]; } __u; \ + union { __unqual_scalar_typeof(*p) __val; char __c[1]; } __u; \ typeof(p) __p = (p); \ compiletime_assert_atomic_type(*p); \ kasan_check_read(__p, sizeof(*p)); \ @@ -136,33 +136,33 @@ do { \ : "Q" (*__p) : "memory"); \ break; \ } \ - __u.__val; \ + (typeof(*p))__u.__val; \ }) #define smp_cond_load_relaxed(ptr, cond_expr) \ ({ \ typeof(ptr) __PTR = (ptr); \ - typeof(*ptr) VAL; \ + __unqual_scalar_typeof(*ptr) VAL; \ for (;;) { \ VAL = READ_ONCE(*__PTR); \ if (cond_expr) \ break; \ __cmpwait_relaxed(__PTR, VAL); \ } \ - VAL; \ + (typeof(*ptr))VAL; \ }) #define smp_cond_load_acquire(ptr, cond_expr) \ ({ \ typeof(ptr) __PTR = (ptr); \ - typeof(*ptr) VAL; \ + __unqual_scalar_typeof(*ptr) VAL; \ for (;;) { \ VAL = smp_load_acquire(__PTR); \ if (cond_expr) \ break; \ __cmpwait_relaxed(__PTR, VAL); \ } \ - VAL; \ + (typeof(*ptr))VAL; \ }) #include -- cgit v1.2.3