aboutsummaryrefslogtreecommitdiffstats
path: root/arch/csky/abiv2/memcmp.S
blob: bf0d809f09e225fd9600993f893ea8de7394a60c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.

#include <linux/linkage.h>
#include "sysdep.h"

ENTRY(memcmp)
	/* Test if len less than 4 bytes.  */
	mov	r3, r0
	movi	r0, 0
	mov	r12, r4
	cmplti	r2, 4
	bt	.L_compare_by_byte

	andi	r13, r0, 3
	movi	r19, 4

	/* Test if s1 is not 4 bytes aligned.  */
	bnez	r13, .L_s1_not_aligned

	LABLE_ALIGN
.L_s1_aligned:
	/* If dest is aligned, then copy.  */
	zext	r18, r2, 31, 4
	/* Test if len less than 16 bytes.  */
	bez	r18, .L_compare_by_word

.L_compare_by_4word:
	/* If aligned, load word each time.  */
	ldw	r20, (r3, 0)
	ldw	r21, (r1, 0)
	/* If s1[i] != s2[i], goto .L_byte_check.  */
	cmpne	r20, r21
	bt	.L_byte_check

	ldw	r20, (r3, 4)
	ldw	r21, (r1, 4)
	cmpne	r20, r21
	bt	.L_byte_check

	ldw	r20, (r3, 8)
	ldw	r21, (r1, 8)
	cmpne	r20, r21
	bt	.L_byte_check

	ldw	r20, (r3, 12)
	ldw	r21, (r1, 12)
	cmpne	r20, r21
	bt	.L_byte_check

	PRE_BNEZAD (r18)
	addi	a3, 16
	addi	a1, 16

	BNEZAD (r18, .L_compare_by_4word)

.L_compare_by_word:
	zext	r18, r2, 3, 2
	bez	r18, .L_compare_by_byte
.L_compare_by_word_loop:
	ldw	r20, (r3, 0)
	ldw	r21, (r1, 0)
	addi	r3, 4
	PRE_BNEZAD (r18)
	cmpne	r20, r21
	addi    r1, 4
	bt	.L_byte_check
	BNEZAD (r18, .L_compare_by_word_loop)

.L_compare_by_byte:
        zext    r18, r2, 1, 0
        bez     r18, .L_return
.L_compare_by_byte_loop:
        ldb     r0, (r3, 0)
        ldb     r4, (r1, 0)
        addi    r3, 1
        subu    r0, r4
        PRE_BNEZAD (r18)
        addi    r1, 1
        bnez    r0, .L_return
        BNEZAD (r18, .L_compare_by_byte_loop)

.L_return:
        mov     r4, r12
        rts

# ifdef __CSKYBE__
/* d[i] != s[i] in word, so we check byte 0.  */
.L_byte_check:
        xtrb0   r0, r20
        xtrb0   r2, r21
        subu    r0, r2
        bnez    r0, .L_return

        /* check byte 1 */
        xtrb1   r0, r20
        xtrb1   r2, r21
        subu    r0, r2
        bnez    r0, .L_return

        /* check byte 2 */
        xtrb2   r0, r20
        xtrb2   r2, r21
        subu    r0, r2
        bnez    r0, .L_return

        /* check byte 3 */
        xtrb3   r0, r20
        xtrb3   r2, r21
        subu    r0, r2
# else
/* s1[i] != s2[i] in word, so we check byte 3.  */
.L_byte_check:
	xtrb3	r0, r20
	xtrb3	r2, r21
        subu    r0, r2
        bnez    r0, .L_return

	/* check byte 2 */
	xtrb2	r0, r20
	xtrb2	r2, r21
        subu    r0, r2
        bnez    r0, .L_return

	/* check byte 1 */
	xtrb1	r0, r20
	xtrb1	r2, r21
	subu	r0, r2
	bnez    r0, .L_return

	/* check byte 0 */
	xtrb0	r0, r20
	xtrb0	r2, r21
	subu	r0, r2
	br	.L_return
# endif /* !__CSKYBE__ */

/* Compare when s1 is not aligned.  */
.L_s1_not_aligned:
	sub	r13, r19, r13
	sub	r2, r13
.L_s1_not_aligned_loop:
	ldb	r0, (r3, 0)
	ldb	r4, (r1, 0)
	addi	r3, 1
	subu	r0, r4
	PRE_BNEZAD (r13)
	addi	r1, 1
	bnez	r0, .L_return
	BNEZAD (r13, .L_s1_not_aligned_loop)
	br	.L_s1_aligned
ENDPROC(memcmp)