diff options
author | Will Deacon <will@kernel.org> | 2021-06-24 13:33:57 +0100 |
---|---|---|
committer | Will Deacon <will@kernel.org> | 2021-06-24 13:33:57 +0100 |
commit | 5ceb045541ad979fd304ca2321bf1fbb76189867 (patch) | |
tree | 1dbc577394ea33a53d1f1071cebd93cba2994df3 /arch/arm64/lib/clear_user.S | |
parent | 25377204ebd4db2048c873b7c68874247a391998 (diff) | |
parent | 6b8f648959e5036695f056a60e3444f4753f643e (diff) |
Merge branch 'for-next/cortex-strings' into for-next/core
Update our kernel string routines to the latest Cortex Strings
implementation.
* for-next/cortex-strings:
arm64: update string routine copyrights and URLs
arm64: Rewrite __arch_clear_user()
arm64: Better optimised memchr()
arm64: Import latest memcpy()/memmove() implementation
arm64: Add assembly annotations for weak-PI-alias madness
arm64: Import latest version of Cortex Strings' strncmp
arm64: Import updated version of Cortex Strings' strlen
arm64: Import latest version of Cortex Strings' strcmp
arm64: Import latest version of Cortex Strings' memcmp
Diffstat (limited to 'arch/arm64/lib/clear_user.S')
-rw-r--r-- | arch/arm64/lib/clear_user.S | 47 |
1 files changed, 27 insertions, 20 deletions
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S index af9afcbec92c..a7efb2ad2a1c 100644 --- a/arch/arm64/lib/clear_user.S +++ b/arch/arm64/lib/clear_user.S @@ -1,12 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Based on arch/arm/lib/clear_user.S - * - * Copyright (C) 2012 ARM Ltd. + * Copyright (C) 2021 Arm Ltd. */ -#include <linux/linkage.h> -#include <asm/asm-uaccess.h> +#include <linux/linkage.h> #include <asm/assembler.h> .text @@ -19,25 +16,33 @@ * * Alignment fixed up by hardware. */ + + .p2align 4 + // Alignment is for the loop, but since the prologue (including BTI) + // is also 16 bytes we can keep any padding outside the function SYM_FUNC_START(__arch_clear_user) - mov x2, x1 // save the size for fixup return + add x2, x0, x1 subs x1, x1, #8 b.mi 2f 1: -user_ldst 9f, sttr, xzr, x0, 8 +USER(9f, sttr xzr, [x0]) + add x0, x0, #8 subs x1, x1, #8 - b.pl 1b -2: adds x1, x1, #4 - b.mi 3f -user_ldst 9f, sttr, wzr, x0, 4 - sub x1, x1, #4 -3: adds x1, x1, #2 - b.mi 4f -user_ldst 9f, sttrh, wzr, x0, 2 - sub x1, x1, #2 -4: adds x1, x1, #1 - b.mi 5f -user_ldst 9f, sttrb, wzr, x0, 0 + b.hi 1b +USER(9f, sttr xzr, [x2, #-8]) + mov x0, #0 + ret + +2: tbz x1, #2, 3f +USER(9f, sttr wzr, [x0]) +USER(8f, sttr wzr, [x2, #-4]) + mov x0, #0 + ret + +3: tbz x1, #1, 4f +USER(9f, sttrh wzr, [x0]) +4: tbz x1, #0, 5f +USER(7f, sttrb wzr, [x2, #-1]) 5: mov x0, #0 ret SYM_FUNC_END(__arch_clear_user) @@ -45,6 +50,8 @@ EXPORT_SYMBOL(__arch_clear_user) .section .fixup,"ax" .align 2 -9: mov x0, x2 // return the original size +7: sub x0, x2, #5 // Adjust for faulting on the final byte... +8: add x0, x0, #4 // ...or the second word of the 4-7 byte case +9: sub x0, x2, x0 ret .previous |