This source file includes following definitions.
- ATOMIC_INCR
- ATOMIC_DECR
- ATOMIC_DECR_AND_CHECK
- ATOMIC_INCR
- ATOMIC_DECR
- ATOMIC_DECR_AND_CHECK
- ip_rt_fast_lock
- ip_rt_fast_unlock
- ip_rt_unlock
- ip_rt_hash_code
- ip_rt_put
- ip_rt_route
- ip_check_route
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26 #ifndef _ROUTE_H
27 #define _ROUTE_H
28
29 #include <linux/config.h>
30
31
32
33
34
35
36 #define RT_CACHE_DEBUG 1
37
38 #define RT_HASH_DIVISOR 256
39 #define RT_CACHE_SIZE_MAX 256
40
41 #define RTZ_HASH_DIVISOR 256
42
43 #if RT_CACHE_DEBUG >= 2
44 #define RTZ_HASHING_LIMIT 0
45 #else
46 #define RTZ_HASHING_LIMIT 16
47 #endif
48
49
50
51
52 #define RT_CACHE_TIMEOUT (HZ*300)
53
54
55
56
57
58 #define RT_CACHE_BUBBLE_THRESHOULD (HZ*5)
59
60 #include <linux/route.h>
61
62 #ifdef __KERNEL__
63 #define RTF_LOCAL 0x8000
64 #endif
65
66
67
68
69
70 #ifdef __i386__
71 #include <asm/bitops.h>
72
73 extern __inline__ void ATOMIC_INCR(void * addr)
74 {
75 __asm__ __volatile__(
76 "incl %0"
77 :"=m" (ADDR));
78 }
79
80 extern __inline__ void ATOMIC_DECR(void * addr)
81 {
82 __asm__ __volatile__(
83 "decl %0"
84 :"=m" (ADDR));
85 }
86
87
88
89
90
91
92 extern __inline__ unsigned long ATOMIC_DECR_AND_CHECK(void * addr)
93 {
94 unsigned long retval;
95 __asm__ __volatile__(
96 "decl %0\nmovl %0,%1"
97 : "=m" (ADDR), "=r"(retval));
98 return retval;
99 }
100
101
102 #else
103
104 static __inline__ void ATOMIC_INCR(void * addr)
105 {
106 (*(__volatile__ unsigned long*)addr)++;
107 }
108
109 static __inline__ void ATOMIC_DECR(void * addr)
110 {
111 (*(__volatile__ unsigned long*)addr)--;
112 }
113
114 static __inline__ int ATOMIC_DECR_AND_CHECK (void * addr)
115 {
116 ATOMIC_DECR(addr);
117 return *(volatile unsigned long*)addr;
118 }
119
120 #endif
121
122
123
124 struct rtable
125 {
126 struct rtable *rt_next;
127 __u32 rt_dst;
128 __u32 rt_src;
129 __u32 rt_gateway;
130 unsigned long rt_refcnt;
131 unsigned long rt_use;
132 unsigned long rt_window;
133 unsigned long rt_lastuse;
134 struct hh_cache *rt_hh;
135 struct device *rt_dev;
136 unsigned short rt_flags;
137 unsigned short rt_mtu;
138 unsigned short rt_irtt;
139 unsigned char rt_tos;
140 };
141
142 extern void ip_rt_flush(struct device *dev);
143 extern void ip_rt_redirect(__u32 src, __u32 dst, __u32 gw, struct device *dev);
144 extern struct rtable *ip_rt_slow_route(__u32 daddr, int local);
145 extern int rt_get_info(char * buffer, char **start, off_t offset, int length, int dummy);
146 extern int rt_cache_get_info(char *buffer, char **start, off_t offset, int length, int dummy);
147 extern int ip_rt_ioctl(unsigned int cmd, void *arg);
148 extern int ip_rt_new(struct rtentry *rt);
149 extern void ip_rt_check_expire(void);
150 extern void ip_rt_advice(struct rtable **rp, int advice);
151
152 extern void ip_rt_run_bh(void);
153 extern int ip_rt_lock;
154 extern unsigned ip_rt_bh_mask;
155 extern struct rtable *ip_rt_hash_table[RT_HASH_DIVISOR];
156
157 extern __inline__ void ip_rt_fast_lock(void)
158 {
159 ATOMIC_INCR(&ip_rt_lock);
160 }
161
162 extern __inline__ void ip_rt_fast_unlock(void)
163 {
164 ATOMIC_DECR(&ip_rt_lock);
165 }
166
167 extern __inline__ void ip_rt_unlock(void)
168 {
169 if (!ATOMIC_DECR_AND_CHECK(&ip_rt_lock) && ip_rt_bh_mask)
170 ip_rt_run_bh();
171 }
172
173 extern __inline__ unsigned ip_rt_hash_code(__u32 addr)
174 {
175 unsigned tmp = addr + (addr>>16);
176 return (tmp + (tmp>>8)) & 0xFF;
177 }
178
179
180 extern __inline__ void ip_rt_put(struct rtable * rt)
181 #ifndef MODULE
182 {
183 if (rt)
184 ATOMIC_DECR(&rt->rt_refcnt);
185 }
186 #else
187 ;
188 #endif
189
190 #ifdef CONFIG_KERNELD
191 extern struct rtable * ip_rt_route(__u32 daddr, int local);
192 #else
193 extern __inline__ struct rtable * ip_rt_route(__u32 daddr, int local)
194 #ifndef MODULE
195 {
196 struct rtable * rth;
197
198 ip_rt_fast_lock();
199
200 for (rth=ip_rt_hash_table[ip_rt_hash_code(daddr)^local]; rth; rth=rth->rt_next)
201 {
202 if (rth->rt_dst == daddr)
203 {
204 rth->rt_lastuse = jiffies;
205 ATOMIC_INCR(&rth->rt_use);
206 ATOMIC_INCR(&rth->rt_refcnt);
207 ip_rt_unlock();
208 return rth;
209 }
210 }
211 return ip_rt_slow_route (daddr, local);
212 }
213 #else
214 ;
215 #endif
216 #endif
217
218 extern __inline__ struct rtable * ip_check_route(struct rtable ** rp,
219 __u32 daddr, int local)
220 {
221 struct rtable * rt = *rp;
222
223 if (!rt || rt->rt_dst != daddr || !(rt->rt_flags&RTF_UP)
224 || ((local==1)^((rt->rt_flags&RTF_LOCAL) != 0)))
225 {
226 ip_rt_put(rt);
227 rt = ip_rt_route(daddr, local);
228 *rp = rt;
229 }
230 return rt;
231 }
232
233
234 #endif