1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
|
#include <linux/linkage.h>
#include <asm-generic/export.h>
#include <asm/asm.h>
#include <asm/csr.h>
.macro fixup op reg addr lbl
100:
\op \reg, \addr
.section __ex_table,"a"
.balign RISCV_SZPTR
RISCV_PTR 100b, \lbl
.previous
.endm
ENTRY(__asm_copy_to_user)
ENTRY(__asm_copy_from_user)
/* Enable access to user memory */
li t6, SR_SUM
csrs CSR_STATUS, t6
/* Save for return value */
mv t5, a2
/*
* Register allocation for code below:
* a0 - start of uncopied dst
* a1 - start of uncopied src
* a2 - size
* t0 - end of uncopied dst
*/
add t0, a0, a2
bgtu a0, t0, 5f
/*
* Use byte copy only if too small.
*/
li a3, 8*SZREG /* size must be larger than size in word_copy */
bltu a2, a3, .Lbyte_copy_tail
/*
* Copy first bytes until dst is align to word boundary.
* a0 - start of dst
* t1 - start of aligned dst
*/
addi t1, a0, SZREG-1
andi t1, t1, ~(SZREG-1)
/* dst is already aligned, skip */
beq a0, t1, .Lskip_first_bytes
1:
/* a5 - one byte for copying data */
fixup lb a5, 0(a1), 10f
addi a1, a1, 1 /* src */
fixup sb a5, 0(a0), 10f
addi a0, a0, 1 /* dst */
bltu a0, t1, 1b /* t1 - start of aligned dst */
.Lskip_first_bytes:
/*
* Now dst is aligned.
* Use shift-copy if src is misaligned.
* Use word-copy if both src and dst are aligned because
* can not use shift-copy which do not require shifting
*/
/* a1 - start of src */
andi a3, a1, SZREG-1
bnez a3, .Lshift_copy
.Lword_copy:
/*
* Both src and dst are aligned, unrolled word copy
*
* a0 - start of aligned dst
* a1 - start of aligned src
* a3 - a1 & mask:(SZREG-1)
* t0 - end of aligned dst
*/
addi t0, t0, -(8*SZREG-1) /* not to over run */
2:
fixup REG_L a4, 0(a1), 10f
fixup REG_L a5, SZREG(a1), 10f
fixup REG_L a6, 2*SZREG(a1), 10f
fixup REG_L a7, 3*SZREG(a1), 10f
fixup REG_L t1, 4*SZREG(a1), 10f
fixup REG_L t2, 5*SZREG(a1), 10f
fixup REG_L t3, 6*SZREG(a1), 10f
fixup REG_L t4, 7*SZREG(a1), 10f
fixup REG_S a4, 0(a0), 10f
fixup REG_S a5, SZREG(a0), 10f
fixup REG_S a6, 2*SZREG(a0), 10f
fixup REG_S a7, 3*SZREG(a0), 10f
fixup REG_S t1, 4*SZREG(a0), 10f
fixup REG_S t2, 5*SZREG(a0), 10f
fixup REG_S t3, 6*SZREG(a0), 10f
fixup REG_S t4, 7*SZREG(a0), 10f
addi a0, a0, 8*SZREG
addi a1, a1, 8*SZREG
bltu a0, t0, 2b
addi t0, t0, 8*SZREG-1 /* revert to original value */
j .Lbyte_copy_tail
.Lshift_copy:
/*
* Word copy with shifting.
* For misaligned copy we still perform aligned word copy, but
* we need to use the value fetched from the previous iteration and
* do some shifts.
* This is safe because reading less than a word size.
*
* a0 - start of aligned dst
* a1 - start of src
* a3 - a1 & mask:(SZREG-1)
* t0 - end of uncopied dst
* t1 - end of aligned dst
*/
/* calculating aligned word boundary for dst */
andi t1, t0, ~(SZREG-1)
/* Converting unaligned src to aligned arc */
andi a1, a1, ~(SZREG-1)
/*
* Calculate shifts
* t3 - prev shift
* t4 - current shift
*/
slli t3, a3, LGREG
li a5, SZREG*8
sub t4, a5, t3
/* Load the first word to combine with seceond word */
fixup REG_L a5, 0(a1), 10f
3:
/* Main shifting copy
*
* a0 - start of aligned dst
* a1 - start of aligned src
* t1 - end of aligned dst
*/
/* At least one iteration will be executed */
srl a4, a5, t3
fixup REG_L a5, SZREG(a1), 10f
addi a1, a1, SZREG
sll a2, a5, t4
or a2, a2, a4
fixup REG_S a2, 0(a0), 10f
addi a0, a0, SZREG
bltu a0, t1, 3b
/* Revert src to original unaligned value */
add a1, a1, a3
.Lbyte_copy_tail:
/*
* Byte copy anything left.
*
* a0 - start of remaining dst
* a1 - start of remaining src
* t0 - end of remaining dst
*/
bgeu a0, t0, 5f
4:
fixup lb a5, 0(a1), 10f
addi a1, a1, 1 /* src */
fixup sb a5, 0(a0), 10f
addi a0, a0, 1 /* dst */
bltu a0, t0, 4b /* t0 - end of dst */
5:
/* Disable access to user memory */
csrc CSR_STATUS, t6
li a0, 0
ret
ENDPROC(__asm_copy_to_user)
ENDPROC(__asm_copy_from_user)
EXPORT_SYMBOL(__asm_copy_to_user)
EXPORT_SYMBOL(__asm_copy_from_user)
ENTRY(__clear_user)
/* Enable access to user memory */
li t6, SR_SUM
csrs CSR_STATUS, t6
add a3, a0, a1
addi t0, a0, SZREG-1
andi t1, a3, ~(SZREG-1)
andi t0, t0, ~(SZREG-1)
/*
* a3: terminal address of target region
* t0: lowest doubleword-aligned address in target region
* t1: highest doubleword-aligned address in target region
*/
bgeu t0, t1, 2f
bltu a0, t0, 4f
1:
fixup REG_S, zero, (a0), 11f
addi a0, a0, SZREG
bltu a0, t1, 1b
2:
bltu a0, a3, 5f
3:
/* Disable access to user memory */
csrc CSR_STATUS, t6
li a0, 0
ret
4: /* Edge case: unalignment */
fixup sb, zero, (a0), 11f
addi a0, a0, 1
bltu a0, t0, 4b
j 1b
5: /* Edge case: remainder */
fixup sb, zero, (a0), 11f
addi a0, a0, 1
bltu a0, a3, 5b
j 3b
ENDPROC(__clear_user)
EXPORT_SYMBOL(__clear_user)
.section .fixup,"ax"
.balign 4
/* Fixup code for __copy_user(10) and __clear_user(11) */
10:
/* Disable access to user memory */
csrs CSR_STATUS, t6
mv a0, t5
ret
11:
csrs CSR_STATUS, t6
mv a0, a1
ret
.previous
|