This source file includes following definitions.
- ATOMIC_INCR
- ATOMIC_DECR
- ATOMIC_DECR_AND_CHECK
- ATOMIC_INCR
- ATOMIC_DECR
- ATOMIC_DECR_AND_CHECK
- ATOMIC_INCR
- ATOMIC_DECR
- ATOMIC_DECR_AND_CHECK
- ip_rt_fast_lock
- ip_rt_fast_unlock
- ip_rt_unlock
- ip_rt_hash_code
- ip_rt_put
- ip_rt_route
- ip_check_route
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26 #ifndef _ROUTE_H
27 #define _ROUTE_H
28
29 #include <linux/config.h>
30
31
32
33
34
35
36 #define RT_CACHE_DEBUG 1
37
38 #define RT_HASH_DIVISOR 256
39 #define RT_CACHE_SIZE_MAX 256
40
41 #define RTZ_HASH_DIVISOR 256
42
43 #if RT_CACHE_DEBUG >= 2
44 #define RTZ_HASHING_LIMIT 0
45 #else
46 #define RTZ_HASHING_LIMIT 16
47 #endif
48
49
50
51
52 #define RT_CACHE_TIMEOUT (HZ*300)
53
54
55
56
57
58 #define RT_CACHE_BUBBLE_THRESHOULD (HZ*5)
59
60 #include <linux/route.h>
61
62 #ifdef __KERNEL__
63 #define RTF_LOCAL 0x8000
64 #endif
65
66
67
68
69 #if defined(__alpha__)
70
71 static __inline__ void ATOMIC_INCR(unsigned int * addr)
72 {
73 unsigned tmp;
74
75 __asm__ __volatile__(
76 "1:\n\
77 ldl_l %1,%2\n\
78 addl %1,1,%1\n\
79 stl_c %1,%0\n\
80 beq %1,1b\n"
81 : "m=" (*addr), "r=&" (tmp)
82 : "m"(*addr));
83 }
84
85 static __inline__ void ATOMIC_DECR(unsigned int * addr)
86 {
87 unsigned tmp;
88
89 __asm__ __volatile__(
90 "1:\n\
91 ldl_l %1,%2\n\
92 subl %1,1,%1\n\
93 stl_c %1,%0\n\
94 beq %1,1b\n"
95 : "m=" (*addr), "r=&" (tmp)
96 : "m"(*addr));
97 }
98
99 static __inline__ int ATOMIC_DECR_AND_CHECK (unsigned int * addr)
100 {
101 unsigned tmp;
102 int result;
103
104 __asm__ __volatile__(
105 "1:\n\
106 ldl_l %1,%3\n\
107 subl %1,1,%1\n\
108 mov %1,%2\n\
109 stl_c %1,%0\n\
110 beq %1,1b\n"
111 : "m=" (*addr), "r=&" (tmp), "r=&"(result)
112 : "m"(*addr));
113 return result;
114 }
115
116 #elif defined(__i386__)
117 #include <asm/bitops.h>
118
119 extern __inline__ void ATOMIC_INCR(void * addr)
120 {
121 __asm__ __volatile__(
122 "incl %0"
123 :"=m" (ADDR));
124 }
125
126 extern __inline__ void ATOMIC_DECR(void * addr)
127 {
128 __asm__ __volatile__(
129 "decl %0"
130 :"=m" (ADDR));
131 }
132
133
134
135
136
137
138 extern __inline__ unsigned long ATOMIC_DECR_AND_CHECK(void * addr)
139 {
140 unsigned long retval;
141 __asm__ __volatile__(
142 "decl %0\nmovl %0,%1"
143 : "=m" (ADDR), "=r"(retval));
144 return retval;
145 }
146
147
148 #else
149
150 static __inline__ void ATOMIC_INCR(unsigned int * addr)
151 {
152 (*(__volatile__ unsigned int*)addr)++;
153 }
154
155 static __inline__ void ATOMIC_DECR(unsigned int * addr)
156 {
157 (*(__volatile__ unsigned int*)addr)--;
158 }
159
160 static __inline__ int ATOMIC_DECR_AND_CHECK (unsigned int * addr)
161 {
162 ATOMIC_DECR(addr);
163 return *(volatile unsigned int*)addr;
164 }
165
166 #endif
167
168
169
170 struct rtable
171 {
172 struct rtable *rt_next;
173 __u32 rt_dst;
174 __u32 rt_src;
175 __u32 rt_gateway;
176 unsigned rt_refcnt;
177 unsigned rt_use;
178 unsigned long rt_window;
179 unsigned long rt_lastuse;
180 struct hh_cache *rt_hh;
181 struct device *rt_dev;
182 unsigned short rt_flags;
183 unsigned short rt_mtu;
184 unsigned short rt_irtt;
185 unsigned char rt_tos;
186 };
187
188 extern void ip_rt_flush(struct device *dev);
189 extern void ip_rt_redirect(__u32 src, __u32 dst, __u32 gw, struct device *dev);
190 extern struct rtable *ip_rt_slow_route(__u32 daddr, int local);
191 extern int rt_get_info(char * buffer, char **start, off_t offset, int length, int dummy);
192 extern int rt_cache_get_info(char *buffer, char **start, off_t offset, int length, int dummy);
193 extern int ip_rt_ioctl(unsigned int cmd, void *arg);
194 extern int ip_rt_new(struct rtentry *rt);
195 extern void ip_rt_check_expire(void);
196 extern void ip_rt_advice(struct rtable **rp, int advice);
197
198 extern void ip_rt_run_bh(void);
199 extern int ip_rt_lock;
200 extern unsigned ip_rt_bh_mask;
201 extern struct rtable *ip_rt_hash_table[RT_HASH_DIVISOR];
202
203 extern __inline__ void ip_rt_fast_lock(void)
204 {
205 ATOMIC_INCR(&ip_rt_lock);
206 }
207
208 extern __inline__ void ip_rt_fast_unlock(void)
209 {
210 ATOMIC_DECR(&ip_rt_lock);
211 }
212
213 extern __inline__ void ip_rt_unlock(void)
214 {
215 if (!ATOMIC_DECR_AND_CHECK(&ip_rt_lock) && ip_rt_bh_mask)
216 ip_rt_run_bh();
217 }
218
219 extern __inline__ unsigned ip_rt_hash_code(__u32 addr)
220 {
221 unsigned tmp = addr + (addr>>16);
222 return (tmp + (tmp>>8)) & 0xFF;
223 }
224
225
226 extern __inline__ void ip_rt_put(struct rtable * rt)
227 #ifndef MODULE
228 {
229 if (rt)
230 ATOMIC_DECR(&rt->rt_refcnt);
231 }
232 #else
233 ;
234 #endif
235
236 #ifdef CONFIG_KERNELD
237 extern struct rtable * ip_rt_route(__u32 daddr, int local);
238 #else
239 extern __inline__ struct rtable * ip_rt_route(__u32 daddr, int local)
240 #ifndef MODULE
241 {
242 struct rtable * rth;
243
244 ip_rt_fast_lock();
245
246 for (rth=ip_rt_hash_table[ip_rt_hash_code(daddr)^local]; rth; rth=rth->rt_next)
247 {
248 if (rth->rt_dst == daddr)
249 {
250 rth->rt_lastuse = jiffies;
251 ATOMIC_INCR(&rth->rt_use);
252 ATOMIC_INCR(&rth->rt_refcnt);
253 ip_rt_unlock();
254 return rth;
255 }
256 }
257 return ip_rt_slow_route (daddr, local);
258 }
259 #else
260 ;
261 #endif
262 #endif
263
264 extern __inline__ struct rtable * ip_check_route(struct rtable ** rp,
265 __u32 daddr, int local)
266 {
267 struct rtable * rt = *rp;
268
269 if (!rt || rt->rt_dst != daddr || !(rt->rt_flags&RTF_UP)
270 || ((local==1)^((rt->rt_flags&RTF_LOCAL) != 0)))
271 {
272 ip_rt_put(rt);
273 rt = ip_rt_route(daddr, local);
274 *rp = rt;
275 }
276 return rt;
277 }
278
279
280 #endif