summaryrefslogtreecommitdiffstats
path: root/arch/sh/lib/copy_page.S
blob: d4e9d18cee0bcc41a006bb9de5c10081d84f25bb (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * copy_page, __copy_user_page, __copy_user implementation of SuperH
 *
 * Copyright (C) 2001  Niibe Yutaka & Kaz Kojima
 * Copyright (C) 2002  Toshinobu Sugioka
 * Copyright (C) 2006  Paul Mundt
 */
#include <linux/linkage.h>
#include <asm/page.h>

/*
 * copy_page
 * @to: P1 address
 * @from: P1 address
 *
 * void copy_page(void *to, void *from)
 */

/*
 * r0, r1, r2, r3, r4, r5, r6, r7 --- scratch 
 * r8 --- from + PAGE_SIZE
 * r9 --- not used
 * r10 --- to
 * r11 --- from
 */
ENTRY(copy_page)
	mov.l	r8,@-r15
	mov.l	r10,@-r15
	mov.l	r11,@-r15
	mov	r4,r10
	mov	r5,r11
	mov	r5,r8
	mov	#(PAGE_SIZE >> 10), r0
	shll8	r0
	shll2	r0
	add	r0,r8
	!
1:	mov.l	@r11+,r0
	mov.l	@r11+,r1
	mov.l	@r11+,r2
	mov.l	@r11+,r3
	mov.l	@r11+,r4
	mov.l	@r11+,r5
	mov.l	@r11+,r6
	mov.l	@r11+,r7
#if defined(CONFIG_CPU_SH4)
	movca.l	r0,@r10
#else
	mov.l	r0,@r10
#endif
	add	#32,r10
	mov.l	r7,@-r10
	mov.l	r6,@-r10
	mov.l	r5,@-r10
	mov.l	r4,@-r10
	mov.l	r3,@-r10
	mov.l	r2,@-r10
	mov.l	r1,@-r10
	cmp/eq	r11,r8
	bf/s	1b
	 add	#28,r10
	!
	mov.l	@r15+,r11
	mov.l	@r15+,r10
	mov.l	@r15+,r8
	rts
	 nop

/*
 * __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
 * Return the number of bytes NOT copied
 */
#define EX(...)			\
	9999: __VA_ARGS__ ;		\
	.section __ex_table, "a";	\
	.long 9999b, 6000f	;	\
	.previous
#define EX_NO_POP(...)			\
	9999: __VA_ARGS__ ;		\
	.section __ex_table, "a";	\
	.long 9999b, 6005f	;	\
	.previous
ENTRY(__copy_user)
	! Check if small number of bytes
	mov	#11,r0
	mov	r4,r3
	cmp/gt	r0,r6		! r6 (len) > r0 (11)
	bf/s	.L_cleanup_loop_no_pop
	 add	r6,r3		! last destination address

	! Calculate bytes needed to align to src
	mov.l	r11,@-r15
	neg	r5,r0
	mov.l	r10,@-r15
	add	#4,r0
	mov.l	r9,@-r15
	and	#3,r0
	mov.l	r8,@-r15
	tst	r0,r0
	bt	2f

1:
	! Copy bytes to long word align src
EX(	mov.b	@r5+,r1		)
	dt	r0
	add	#-1,r6
EX(	mov.b	r1,@r4		)
	bf/s	1b
	 add	#1,r4

	! Jump to appropriate routine depending on dest
2:	mov	#3,r1
	mov	r6, r2
	and	r4,r1
	shlr2	r2
	shll2	r1
	mova	.L_jump_tbl,r0
	mov.l	@(r0,r1),r1
	jmp	@r1
	 nop

	.align 2
.L_jump_tbl:
	.long	.L_dest00
	.long	.L_dest01
	.long	.L_dest10
	.long	.L_dest11

/*
 * Come here if there are less than 12 bytes to copy
 *
 * Keep the branch target close, so the bf/s callee doesn't overflow
 * and result in a more expensive branch being inserted. This is the
 * fast-path for small copies, the jump via the jump table will hit the
 * default slow-path cleanup. -PFM.
 */
.L_cleanup_loop_no_pop:
	tst	r6,r6		! Check explicitly for zero
	bt	1f

2:
EX_NO_POP(	mov.b	@r5+,r0		)
	dt	r6
EX_NO_POP(	mov.b	r0,@r4		)
	bf/s	2b
	 add	#1,r4

1:	mov	#0,r0		! normal return
5000:

# Exception handler:
.section .fixup, "ax"
6005:
	mov.l	8000f,r1
	mov	r3,r0
	jmp	@r1
	 sub	r4,r0
	.align	2
8000:	.long	5000b

.previous
	rts
	 nop

! Destination = 00

.L_dest00:
	! Skip the large copy for small transfers
	mov	#(32+32-4), r0
	cmp/gt	r6, r0		! r0 (60) > r6 (len)
	bt	1f

	! Align dest to a 32 byte boundary
	neg	r4,r0
	add	#0x20, r0
	and	#0x1f, r0
	tst	r0, r0
	bt	2f

	sub	r0, r6
	shlr2	r0
3:
EX(	mov.l	@r5+,r1		)
	dt	r0
EX(	mov.l	r1,@r4		)
	bf/s	3b
	 add	#4,r4

2:
EX(	mov.l	@r5+,r0		)
EX(	mov.l	@r5+,r1		)
EX(	mov.l	@r5+,r2		)
EX(	mov.l	@r5+,r7		)
EX(	mov.l	@r5+,r8		)
EX(	mov.l	@r5+,r9		)
EX(	mov.l	@r5+,r10	)
EX(	mov.l	@r5+,r11	)
#ifdef CONFIG_CPU_SH4
EX(	movca.l	r0,@r4		)
#else
EX(	mov.l	r0,@r4		)
#endif
	add	#-32, r6
EX(	mov.l	r1,@(4,r4)	)
	mov	#32, r0
EX(	mov.l	r2,@(8,r4)	)
	cmp/gt	r6, r0		! r0 (32) > r6 (len)
EX(	mov.l	r7,@(12,r4)	)
EX(	mov.l	r8,@(16,r4)	)
EX(	mov.l	r9,@(20,r4)	)
EX(	mov.l	r10,@(24,r4)	)
EX(	mov.l	r11,@(28,r4)	)
	bf/s	2b
	 add	#32,r4

1:	mov	r6, r0
	shlr2	r0
	tst	r0, r0
	bt	.L_cleanup
1:
EX(	mov.l	@r5+,r1		)
	dt	r0
EX(	mov.l	r1,@r4		)
	bf/s	1b
	 add	#4,r4

	bra	.L_cleanup
	 nop

! Destination = 10

.L_dest10:
	mov	r2,r7
	shlr2	r7
	shlr	r7
	tst	r7,r7
	mov	#7,r0
	bt/s	1f
	 and	r0,r2
2:
	dt	r7
#ifdef CONFIG_CPU_LITTLE_ENDIAN
EX(	mov.l	@r5+,r0		)
EX(	mov.l	@r5+,r1		)
EX(	mov.l	@r5+,r8		)
EX(	mov.l	@r5+,r9		)
EX(	mov.l	@r5+,r10	)
EX(	mov.w	r0,@r4		)
	add	#2,r4
	xtrct	r1,r0
	xtrct	r8,r1
	xtrct	r9,r8
	xtrct	r10,r9

EX(	mov.l	r0,@r4		)
EX(	mov.l	r1,@(4,r4)	)
EX(	mov.l	r8,@(8,r4)	)
EX(	mov.l	r9,@(12,r4)	)

EX(	mov.l	@r5+,r1		)
EX(	mov.l	@r5+,r8		)
EX(	mov.l	@r5+,r0		)
	xtrct	r1,r10
	xtrct	r8,r1
	xtrct	r0,r8
	shlr16	r0
EX(	mov.l	r10,@(16,r4)	)
EX(	mov.l	r1,@(20,r4)	)
EX(	mov.l	r8,@(24,r4)	)
EX(	mov.w	r0,@(28,r4)	)
	bf/s	2b
	 add	#30,r4
#else
EX(	mov.l	@(28,r5),r0	)
EX(	mov.l	@(24,r5),r8	)
EX(	mov.l	@(20,r5),r9	)
EX(	mov.l	@(16,r5),r10	)
EX(	mov.w	r0,@(30,r4)	)
	add	#-2,r4
	xtrct	r8,r0
	xtrct	r9,r8
	xtrct	r10,r9
EX(	mov.l	r0,@(28,r4)	)
EX(	mov.l	r8,@(24,r4)	)
EX(	mov.l	r9,@(20,r4)	)

EX(	mov.l	@(12,r5),r0	)
EX(	mov.l	@(8,r5),r8	)
	xtrct	r0,r10
EX(	mov.l	@(4,r5),r9	)
	mov.l	r10,@(16,r4)
EX(	mov.l	@r5,r10		)
	xtrct	r8,r0
	xtrct	r9,r8
	xtrct	r10,r9
EX(	mov.l	r0,@(12,r4)	)
EX(	mov.l	r8,@(8,r4)	)
	swap.w	r10,r0
EX(	mov.l	r9,@(4,r4)	)
EX(	mov.w	r0,@(2,r4)	)

	add	#32,r5
	bf/s	2b
	 add	#34,r4
#endif
	tst	r2,r2
	bt	.L_cleanup

1:	! Read longword, write two words per iteration
EX(	mov.l	@r5+,r0		)
	dt	r2
#ifdef CONFIG_CPU_LITTLE_ENDIAN
EX(	mov.w	r0,@r4		)
	shlr16	r0
EX(	mov.w 	r0,@(2,r4)	)
#else
EX(	mov.w	r0,@(2,r4)	)
	shlr16	r0
EX(	mov.w	r0,@r4		)
#endif
	bf/s	1b
	 add	#4,r4

	bra	.L_cleanup
	 nop

! Destination = 01 or 11

.L_dest01:
.L_dest11:
	! Read longword, write byte, word, byte per iteration
EX(	mov.l	@r5+,r0		)
	dt	r2
#ifdef CONFIG_CPU_LITTLE_ENDIAN
EX(	mov.b	r0,@r4		)
	shlr8	r0
	add	#1,r4
EX(	mov.w	r0,@r4		)
	shlr16	r0
EX(	mov.b	r0,@(2,r4)	)
	bf/s	.L_dest01
	 add	#3,r4
#else
EX(	mov.b	r0,@(3,r4)	)
	shlr8	r0
	swap.w	r0,r7
EX(	mov.b	r7,@r4		)
	add	#1,r4
EX(	mov.w	r0,@r4		)
	bf/s	.L_dest01
	 add	#3,r4
#endif

! Cleanup last few bytes
.L_cleanup:
	mov	r6,r0
	and	#3,r0
	tst	r0,r0
	bt	.L_exit
	mov	r0,r6

.L_cleanup_loop:
EX(	mov.b	@r5+,r0		)
	dt	r6
EX(	mov.b	r0,@r4		)
	bf/s	.L_cleanup_loop
	 add	#1,r4

.L_exit:
	mov	#0,r0		! normal return

5000:

# Exception handler:
.section .fixup, "ax"
6000:
	mov.l	8000f,r1
	mov	r3,r0
	jmp	@r1
	 sub	r4,r0
	.align	2
8000:	.long	5000b

.previous
	mov.l	@r15+,r8
	mov.l	@r15+,r9
	mov.l	@r15+,r10
	rts
	 mov.l	@r15+,r11