1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Accelerated CRC32 implementation with Zbc extension.
*
* Copyright (C) 2024 Intel Corporation
*/
#include <asm/hwcap.h>
#include <asm/alternative-macros.h>
#include <asm/byteorder.h>
#include <linux/types.h>
#include <linux/minmax.h>
#include <linux/crc32poly.h>
#include <linux/crc32.h>
#include <linux/byteorder/generic.h>
/*
* Refer to https://www.corsix.org/content/barrett-reduction-polynomials for
* better understanding of how this math works.
*
* let "+" denotes polynomial add (XOR)
* let "-" denotes polynomial sub (XOR)
* let "*" denotes polynomial multiplication
* let "/" denotes polynomial floor division
* let "S" denotes source data, XLEN bit wide
* let "P" denotes CRC32 polynomial
* let "T" denotes 2^(XLEN+32)
* let "QT" denotes quotient of T/P, with the bit for 2^XLEN being implicit
*
* crc32(S, P)
* => S * (2^32) - S * (2^32) / P * P
* => lowest 32 bits of: S * (2^32) / P * P
* => lowest 32 bits of: S * (2^32) * (T / P) / T * P
* => lowest 32 bits of: S * (2^32) * quotient / T * P
* => lowest 32 bits of: S * quotient / 2^XLEN * P
* => lowest 32 bits of: (clmul_high_part(S, QT) + S) * P
* => clmul_low_part(clmul_high_part(S, QT) + S, P)
*
* In terms of below implementations, the BE case is more intuitive, since the
* higher order bit sits at more significant position.
*/
#if __riscv_xlen == 64
/* Slide by XLEN bits per iteration */
# define STEP_ORDER 3
/* Each below polynomial quotient has an implicit bit for 2^XLEN */
/* Polynomial quotient of (2^(XLEN+32))/CRC32_POLY, in LE format */
# define CRC32_POLY_QT_LE 0x5a72d812fb808b20
/* Polynomial quotient of (2^(XLEN+32))/CRC32C_POLY, in LE format */
# define CRC32C_POLY_QT_LE 0xa434f61c6f5389f8
/* Polynomial quotient of (2^(XLEN+32))/CRC32_POLY, in BE format, it should be
* the same as the bit-reversed version of CRC32_POLY_QT_LE
*/
# define CRC32_POLY_QT_BE 0x04d101df481b4e5a
static inline u64 crc32_le_prep(u32 crc, unsigned long const *ptr)
{
return (u64)crc ^ (__force u64)__cpu_to_le64(*ptr);
}
static inline u32 crc32_le_zbc(unsigned long s, u32 poly, unsigned long poly_qt)
{
u32 crc;
/* We don't have a "clmulrh" insn, so use clmul + slli instead. */
asm volatile (".option push\n"
".option arch,+zbc\n"
"clmul %0, %1, %2\n"
"slli %0, %0, 1\n"
"xor %0, %0, %1\n"
"clmulr %0, %0, %3\n"
"srli %0, %0, 32\n"
".option pop\n"
: "=&r" (crc)
: "r" (s),
"r" (poly_qt),
"r" ((u64)poly << 32)
:);
return crc;
}
static inline u64 crc32_be_prep(u32 crc, unsigned long const *ptr)
{
return ((u64)crc << 32) ^ (__force u64)__cpu_to_be64(*ptr);
}
#elif __riscv_xlen == 32
# define STEP_ORDER 2
/* Each quotient should match the upper half of its analog in RV64 */
# define CRC32_POLY_QT_LE 0xfb808b20
# define CRC32C_POLY_QT_LE 0x6f5389f8
# define CRC32_POLY_QT_BE 0x04d101df
static inline u32 crc32_le_prep(u32 crc, unsigned long const *ptr)
{
return crc ^ (__force u32)__cpu_to_le32(*ptr);
}
static inline u32 crc32_le_zbc(unsigned long s, u32 poly, unsigned long poly_qt)
{
u32 crc;
/* We don't have a "clmulrh" insn, so use clmul + slli instead. */
asm volatile (".option push\n"
".option arch,+zbc\n"
"clmul %0, %1, %2\n"
"slli %0, %0, 1\n"
"xor %0, %0, %1\n"
"clmulr %0, %0, %3\n"
".option pop\n"
: "=&r" (crc)
: "r" (s),
"r" (poly_qt),
"r" (poly)
:);
return crc;
}
static inline u32 crc32_be_prep(u32 crc, unsigned long const *ptr)
{
return crc ^ (__force u32)__cpu_to_be32(*ptr);
}
#else
# error "Unexpected __riscv_xlen"
#endif
static inline u32 crc32_be_zbc(unsigned long s)
{
u32 crc;
asm volatile (".option push\n"
".option arch,+zbc\n"
"clmulh %0, %1, %2\n"
"xor %0, %0, %1\n"
"clmul %0, %0, %3\n"
".option pop\n"
: "=&r" (crc)
: "r" (s),
"r" (CRC32_POLY_QT_BE),
"r" (CRC32_POLY_BE)
:);
return crc;
}
#define STEP (1 << STEP_ORDER)
#define OFFSET_MASK (STEP - 1)
typedef u32 (*fallback)(u32 crc, unsigned char const *p, size_t len);
static inline u32 crc32_le_unaligned(u32 crc, unsigned char const *p,
size_t len, u32 poly,
unsigned long poly_qt)
{
size_t bits = len * 8;
unsigned long s = 0;
u32 crc_low = 0;
for (int i = 0; i < len; i++)
s = ((unsigned long)*p++ << (__riscv_xlen - 8)) | (s >> 8);
s ^= (unsigned long)crc << (__riscv_xlen - bits);
if (__riscv_xlen == 32 || len < sizeof(u32))
crc_low = crc >> bits;
crc = crc32_le_zbc(s, poly, poly_qt);
crc ^= crc_low;
return crc;
}
static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p,
size_t len, u32 poly,
unsigned long poly_qt,
fallback crc_fb)
{
size_t offset, head_len, tail_len;
unsigned long const *p_ul;
unsigned long s;
asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
RISCV_ISA_EXT_ZBC, 1)
: : : : legacy);
/* Handle the unaligned head. */
offset = (unsigned long)p & OFFSET_MASK;
if (offset && len) {
head_len = min(STEP - offset, len);
crc = crc32_le_unaligned(crc, p, head_len, poly, poly_qt);
p += head_len;
len -= head_len;
}
tail_len = len & OFFSET_MASK;
len = len >> STEP_ORDER;
p_ul = (unsigned long const *)p;
for (int i = 0; i < len; i++) {
s = crc32_le_prep(crc, p_ul);
crc = crc32_le_zbc(s, poly, poly_qt);
p_ul++;
}
/* Handle the tail bytes. */
p = (unsigned char const *)p_ul;
if (tail_len)
crc = crc32_le_unaligned(crc, p, tail_len, poly, poly_qt);
return crc;
legacy:
return crc_fb(crc, p, len);
}
u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
{
return crc32_le_generic(crc, p, len, CRC32_POLY_LE, CRC32_POLY_QT_LE,
crc32_le_base);
}
u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
{
return crc32_le_generic(crc, p, len, CRC32C_POLY_LE,
CRC32C_POLY_QT_LE, __crc32c_le_base);
}
static inline u32 crc32_be_unaligned(u32 crc, unsigned char const *p,
size_t len)
{
size_t bits = len * 8;
unsigned long s = 0;
u32 crc_low = 0;
s = 0;
for (int i = 0; i < len; i++)
s = *p++ | (s << 8);
if (__riscv_xlen == 32 || len < sizeof(u32)) {
s ^= crc >> (32 - bits);
crc_low = crc << bits;
} else {
s ^= (unsigned long)crc << (bits - 32);
}
crc = crc32_be_zbc(s);
crc ^= crc_low;
return crc;
}
u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
{
size_t offset, head_len, tail_len;
unsigned long const *p_ul;
unsigned long s;
asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
RISCV_ISA_EXT_ZBC, 1)
: : : : legacy);
/* Handle the unaligned head. */
offset = (unsigned long)p & OFFSET_MASK;
if (offset && len) {
head_len = min(STEP - offset, len);
crc = crc32_be_unaligned(crc, p, head_len);
p += head_len;
len -= head_len;
}
tail_len = len & OFFSET_MASK;
len = len >> STEP_ORDER;
p_ul = (unsigned long const *)p;
for (int i = 0; i < len; i++) {
s = crc32_be_prep(crc, p_ul);
crc = crc32_be_zbc(s);
p_ul++;
}
/* Handle the tail bytes. */
p = (unsigned char const *)p_ul;
if (tail_len)
crc = crc32_be_unaligned(crc, p, tail_len);
return crc;
legacy:
return crc32_be_base(crc, p, len);
}
|