This source file includes following definitions.
- ATOMIC_INCR
- ATOMIC_DECR
- ATOMIC_DECR_AND_CHECK
- ATOMIC_INCR
- ATOMIC_DECR
- ATOMIC_DECR_AND_CHECK
- ATOMIC_INCR
- ATOMIC_DECR
- ATOMIC_DECR_AND_CHECK
- ip_rt_fast_lock
- ip_rt_fast_unlock
- ip_rt_unlock
- ip_rt_hash_code
- ip_rt_put
- ip_rt_route
- ip_check_route
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26 #ifndef _ROUTE_H
27 #define _ROUTE_H
28
29 #include <linux/config.h>
30
31
32
33
34
35
36 #define RT_CACHE_DEBUG 1
37
38 #define RT_HASH_DIVISOR 256
39 #define RT_CACHE_SIZE_MAX 256
40
41 #define RTZ_HASH_DIVISOR 256
42
43 #if RT_CACHE_DEBUG >= 2
44 #define RTZ_HASHING_LIMIT 0
45 #else
46 #define RTZ_HASHING_LIMIT 16
47 #endif
48
49
50
51
52 #define RT_CACHE_TIMEOUT (HZ*300)
53
54
55
56
57
58 #define RT_CACHE_BUBBLE_THRESHOLD (HZ*5)
59
60 #include <linux/route.h>
61
62 #ifdef __KERNEL__
63 #define RTF_LOCAL 0x8000
64 #endif
65
66
67
68
69 #if defined(__alpha__)
70
71 static __inline__ void ATOMIC_INCR(unsigned int * addr)
72 {
73 unsigned tmp;
74
75 __asm__ __volatile__(
76 "1:\n\
77 ldl_l %1,%2\n\
78 addl %1,1,%1\n\
79 stl_c %1,%0\n\
80 beq %1,1b\n"
81 : "m=" (*addr), "r=&" (tmp)
82 : "m"(*addr));
83 }
84
85 static __inline__ void ATOMIC_DECR(unsigned int * addr)
86 {
87 unsigned tmp;
88
89 __asm__ __volatile__(
90 "1:\n\
91 ldl_l %1,%2\n\
92 subl %1,1,%1\n\
93 stl_c %1,%0\n\
94 beq %1,1b\n"
95 : "m=" (*addr), "r=&" (tmp)
96 : "m"(*addr));
97 }
98
99 static __inline__ int ATOMIC_DECR_AND_CHECK (unsigned int * addr)
100 {
101 unsigned tmp;
102 int result;
103
104 __asm__ __volatile__(
105 "1:\n\
106 ldl_l %1,%3\n\
107 subl %1,1,%1\n\
108 mov %1,%2\n\
109 stl_c %1,%0\n\
110 beq %1,1b\n"
111 : "m=" (*addr), "r=&" (tmp), "r=&"(result)
112 : "m"(*addr));
113 return result;
114 }
115
116 #elif defined(__i386__)
117 #include <asm/bitops.h>
118
119 extern __inline__ void ATOMIC_INCR(void * addr)
120 {
121 __asm__ __volatile__(
122 "incl %0"
123 :"=m" (ADDR));
124 }
125
126 extern __inline__ void ATOMIC_DECR(void * addr)
127 {
128 __asm__ __volatile__(
129 "decl %0"
130 :"=m" (ADDR));
131 }
132
133
134
135
136
137
138 extern __inline__ unsigned long ATOMIC_DECR_AND_CHECK(void * addr)
139 {
140 unsigned long retval;
141 __asm__ __volatile__(
142 "decl %0\nmovl %0,%1"
143 : "=m" (ADDR), "=r"(retval));
144 return retval;
145 }
146
147
148 #else
149
150 static __inline__ void ATOMIC_INCR(unsigned int * addr)
151 {
152 (*(__volatile__ unsigned int*)addr)++;
153 }
154
155 static __inline__ void ATOMIC_DECR(unsigned int * addr)
156 {
157 (*(__volatile__ unsigned int*)addr)--;
158 }
159
160 static __inline__ int ATOMIC_DECR_AND_CHECK (unsigned int * addr)
161 {
162 ATOMIC_DECR(addr);
163 return *(volatile unsigned int*)addr;
164 }
165
166 #endif
167
168
169
170 struct rtable
171 {
172 struct rtable *rt_next;
173 __u32 rt_dst;
174 __u32 rt_src;
175 __u32 rt_gateway;
176 unsigned rt_refcnt;
177 unsigned rt_use;
178 unsigned long rt_window;
179 unsigned long rt_lastuse;
180 struct hh_cache *rt_hh;
181 struct device *rt_dev;
182 unsigned short rt_flags;
183 unsigned short rt_mtu;
184 unsigned short rt_irtt;
185 unsigned char rt_tos;
186 };
187
188 extern void ip_rt_flush(struct device *dev);
189 extern void ip_rt_redirect(__u32 src, __u32 dst, __u32 gw, struct device *dev);
190 extern struct rtable *ip_rt_slow_route(__u32 daddr, int local);
191 extern int rt_get_info(char * buffer, char **start, off_t offset, int length, int dummy);
192 extern int rt_cache_get_info(char *buffer, char **start, off_t offset, int length, int dummy);
193 extern int ip_rt_ioctl(unsigned int cmd, void *arg);
194 extern int ip_rt_new(struct rtentry *rt);
195 extern int ip_rt_kill(struct rtentry *rt);
196 extern void ip_rt_check_expire(void);
197 extern void ip_rt_advice(struct rtable **rp, int advice);
198
199 extern void ip_rt_run_bh(void);
200 extern int ip_rt_lock;
201 extern unsigned ip_rt_bh_mask;
202 extern struct rtable *ip_rt_hash_table[RT_HASH_DIVISOR];
203
204 extern __inline__ void ip_rt_fast_lock(void)
205 {
206 ATOMIC_INCR(&ip_rt_lock);
207 }
208
209 extern __inline__ void ip_rt_fast_unlock(void)
210 {
211 ATOMIC_DECR(&ip_rt_lock);
212 }
213
214 extern __inline__ void ip_rt_unlock(void)
215 {
216 if (!ATOMIC_DECR_AND_CHECK(&ip_rt_lock) && ip_rt_bh_mask)
217 ip_rt_run_bh();
218 }
219
220 extern __inline__ unsigned ip_rt_hash_code(__u32 addr)
221 {
222 unsigned tmp = addr + (addr>>16);
223 return (tmp + (tmp>>8)) & 0xFF;
224 }
225
226
227 extern __inline__ void ip_rt_put(struct rtable * rt)
228 #ifndef MODULE
229 {
230 if (rt)
231 ATOMIC_DECR(&rt->rt_refcnt);
232 }
233 #else
234 ;
235 #endif
236
237 #ifdef CONFIG_KERNELD
238 extern struct rtable * ip_rt_route(__u32 daddr, int local);
239 #else
240 extern __inline__ struct rtable * ip_rt_route(__u32 daddr, int local)
241 #ifndef MODULE
242 {
243 struct rtable * rth;
244
245 ip_rt_fast_lock();
246
247 for (rth=ip_rt_hash_table[ip_rt_hash_code(daddr)^local]; rth; rth=rth->rt_next)
248 {
249 if (rth->rt_dst == daddr)
250 {
251 rth->rt_lastuse = jiffies;
252 ATOMIC_INCR(&rth->rt_use);
253 ATOMIC_INCR(&rth->rt_refcnt);
254 ip_rt_unlock();
255 return rth;
256 }
257 }
258 return ip_rt_slow_route (daddr, local);
259 }
260 #else
261 ;
262 #endif
263 #endif
264
265 extern __inline__ struct rtable * ip_check_route(struct rtable ** rp,
266 __u32 daddr, int local)
267 {
268 struct rtable * rt = *rp;
269
270 if (!rt || rt->rt_dst != daddr || !(rt->rt_flags&RTF_UP)
271 || ((local==1)^((rt->rt_flags&RTF_LOCAL) != 0)))
272 {
273 ip_rt_put(rt);
274 rt = ip_rt_route(daddr, local);
275 *rp = rt;
276 }
277 return rt;
278 }
279
280
281 #endif