1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
|
// SPDX-License-Identifier: GPL-2.0
#include <linux/export.h>
#include <asm/checksum.h>
#include <asm/fpu.h>
/*
* Computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit).
*
* Returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic.
*
* This function must be called with even lengths, except
* for the last fragment, which may be odd.
*
* It's best to have buff aligned on a 64-bit boundary.
*/
__wsum csum_partial(const void *buff, int len, __wsum sum)
{
DECLARE_KERNEL_FPU_ONSTACK8(vxstate);
if (!cpu_has_vx())
return cksm(buff, len, sum);
kernel_fpu_begin(&vxstate, KERNEL_VXR_V16V23);
fpu_vlvgf(16, (__force u32)sum, 1);
fpu_vzero(17);
fpu_vzero(18);
fpu_vzero(19);
while (len >= 64) {
fpu_vlm(20, 23, buff);
fpu_vcksm(16, 20, 16);
fpu_vcksm(17, 21, 17);
fpu_vcksm(18, 22, 18);
fpu_vcksm(19, 23, 19);
buff += 64;
len -= 64;
}
while (len >= 32) {
fpu_vlm(20, 21, buff);
fpu_vcksm(16, 20, 16);
fpu_vcksm(17, 21, 17);
buff += 32;
len -= 32;
}
while (len >= 16) {
fpu_vl(20, buff);
fpu_vcksm(16, 20, 16);
buff += 16;
len -= 16;
}
if (len) {
fpu_vll(20, len - 1, buff);
fpu_vcksm(16, 20, 16);
}
fpu_vcksm(18, 19, 18);
fpu_vcksm(16, 17, 16);
fpu_vcksm(16, 18, 16);
sum = (__force __wsum)fpu_vlgvf(16, 1);
kernel_fpu_end(&vxstate, KERNEL_VXR_V16V23);
return sum;
}
EXPORT_SYMBOL(csum_partial);
|