1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Checksumming functions for IP, TCP, UDP and so on
7 *
8 * Authors: Jorge Cwik, <jorge@laser.satlink.net>
9 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
10 * Borrows very liberally from tcp.c and ip.c, see those
11 * files for more names.
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18 #ifndef _CHECKSUM_H
19 #define _CHECKSUM_H
20
21 #include <asm/byteorder.h>
22 #include "ip.h"
23
24 /*
25 * This is a version of ip_compute_csum() optimized for IP headers,
26 * which always checksum on 4 octet boundaries.
27 *
28 * By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
29 * Arnt Gulbrandsen.
30 */
31 static inline unsigned short ip_fast_csum(unsigned char * iph,
/* ![[previous]](../icons/n_left.png)
![[next]](../icons/right.png)
![[first]](../icons/n_first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
32 unsigned int ihl) {
33 unsigned short int sum;
34
35 #ifdef __i386__
36 __asm__("
37 movl (%%esi), %%eax
38 andl $15, %%ecx
39 subl $4, %%ecx
40 jbe 2f
41 addl 4(%%esi), %%eax
42 adcl 8(%%esi), %%eax
43 adcl 12(%%esi), %%eax
44 1: adcl 16(%%esi), %%eax
45 lea 4(%%esi), %%esi
46 decl %%ecx
47 jne 1b
48 adcl $0, %%eax
49 movl %%eax, %%ecx
50 shrl $16, %%eax
51 addw %%ecx, %%eax
52 adcl $0, %%eax
53 notl %%eax
54 andl $65535, %%eax
55 2:
56 "
57 : "=a" (sum)
58 : "S" (iph), "c"(ihl)
59 : "ax", "cx", "si");
60 #else
61 #error Not implemented for this CPU
62 #endif
63 return(sum);
64 }
65
66
67
68
69 /*
70 * computes the checksum of the TCP/UDP pseudo-header
71 * returns a 16-bit checksum, already complemented
72 */
73
74 static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
75 unsigned long daddr,
76 unsigned short len,
77 unsigned short proto,
78 unsigned int sum) {
79 #ifdef __i386__
80 __asm__("
81 addl %2, %0
82 adcl %3, %0
83 adcl %4, %0
84 adcl $0, %0
85 movl %0, %2
86 shrl $16, %2
87 addw %2, %0
88 adcl $0, %0
89 notl %0
90 andl $65535, %0
91 "
92 : "=r" (sum)
93 : "0" (daddr), "S"(saddr), "r"((ntohs(len)<<16)+proto*256), "r"(sum)
94 : "si" );
95 #else
96 #error Not implemented for this CPU
97 #endif
98 return((unsigned short)sum);
99 }
100
101
102
103 /*
104 * computes the checksum of a memory block at buff, length len,
105 * and adds in "sum" (32-bit)
106 *
107 * returns a 32-bit number suitable for feeding into itself
108 * or csum_tcpudp_magic
109 *
110 * this function must be called with even lengths, except
111 * for the last fragment, which may be odd
112 *
113 * it's best to have buff aligned on a 32-bit boundary
114 */
115 unsigned int csum_partial(unsigned char * buff, int len, unsigned int sum);
116
117
118
119 /*
120 * the same as csum_partial, but copies from fs:src while it
121 * checksums
122 *
123 * here even more important to align src and dst on a 32-bit (or even
124 * better 64-bit) boundary
125 */
126
127 unsigned int csum_partial_copyffs( char *src, char *dst, int len, int sum);
128
129
130
131
132 /*
133 * this routine is used for miscellaneous IP-like checksums, mainly
134 * in icmp.c
135 */
136
137 static inline unsigned short ip_compute_csum(unsigned char * buff, int len) {
/* ![[previous]](../icons/left.png)
![[next]](../icons/n_right.png)
![[first]](../icons/first.png)
![[last]](../icons/n_last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
138 unsigned short int sum;
139
140 #ifdef __i386__
141 __asm__("
142 movl %%eax, %%ecx
143 shrl $16, %%ecx
144 addw %%cx, %%ax
145 adcl $0, %%eax
146 notl %%eax
147 andl $65535, %%eax
148 "
149 : "=a"(sum)
150 : "a" (csum_partial(buff, len, 0))
151 : "cx");
152 #else
153 #error Not implemented for this CPU
154 #endif
155 return(sum);
156 }
157
158 #endif