This source file includes following definitions.
- ATOMIC_INCR
- ATOMIC_DECR
- ATOMIC_DECR_AND_CHECK
- ATOMIC_INCR
- ATOMIC_DECR
- ATOMIC_DECR_AND_CHECK
- ip_rt_fast_lock
- ip_rt_fast_unlock
- ip_rt_unlock
- ip_rt_hash_code
- ip_rt_put
- ip_rt_route
- ip_check_route
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26 #ifndef _ROUTE_H
27 #define _ROUTE_H
28
29
30
31
32
33
34 #define RT_CACHE_DEBUG 1
35
36 #define RT_HASH_DIVISOR 256
37 #define RT_CACHE_SIZE_MAX 256
38
39 #define RTZ_HASH_DIVISOR 256
40
41 #if RT_CACHE_DEBUG >= 2
42 #define RTZ_HASHING_LIMIT 0
43 #else
44 #define RTZ_HASHING_LIMIT 16
45 #endif
46
47
48
49
50 #define RT_CACHE_TIMEOUT (HZ*300)
51
52
53
54
55
56 #define RT_CACHE_BUBBLE_THRESHOULD (HZ*5)
57
58 #include <linux/route.h>
59
60 #ifdef __KERNEL__
61 #define RTF_LOCAL 0x8000
62 #endif
63
64
65
66
67
68 #ifdef __i386__
69 #include <asm/bitops.h>
70
71 extern __inline__ void ATOMIC_INCR(void * addr)
72 {
73 __asm__ __volatile__(
74 "incl %0"
75 :"=m" (ADDR));
76 }
77
78 extern __inline__ void ATOMIC_DECR(void * addr)
79 {
80 __asm__ __volatile__(
81 "decl %0"
82 :"=m" (ADDR));
83 }
84
85
86
87
88
89
90 extern __inline__ unsigned long ATOMIC_DECR_AND_CHECK(void * addr)
91 {
92 unsigned long retval;
93 __asm__ __volatile__(
94 "decl %0\nmovl %0,%1"
95 : "=m" (ADDR), "=r"(retval));
96 return retval;
97 }
98
99
100 #else
101
102 static __inline__ void ATOMIC_INCR(void * addr)
103 {
104 (*(__volatile__ unsigned long*)addr)++;
105 }
106
107 static __inline__ void ATOMIC_DECR(void * addr)
108 {
109 (*(__volatile__ unsigned long*)addr)--;
110 }
111
112 static __inline__ int ATOMIC_DECR_AND_CHECK (void * addr)
113 {
114 ATOMIC_DECR(addr);
115 return *(volatile unsigned long*)addr;
116 }
117
118 #endif
119
120
121
122 struct rtable
123 {
124 struct rtable *rt_next;
125 __u32 rt_dst;
126 __u32 rt_src;
127 __u32 rt_gateway;
128 unsigned long rt_refcnt;
129 unsigned long rt_use;
130 unsigned long rt_window;
131 unsigned long rt_lastuse;
132 struct hh_cache *rt_hh;
133 struct device *rt_dev;
134 unsigned short rt_flags;
135 unsigned short rt_mtu;
136 unsigned short rt_irtt;
137 unsigned char rt_tos;
138 };
139
140 extern void ip_rt_flush(struct device *dev);
141 extern void ip_rt_redirect(__u32 src, __u32 dst, __u32 gw, struct device *dev);
142 extern struct rtable *ip_rt_slow_route(__u32 daddr, int local);
143 extern int rt_get_info(char * buffer, char **start, off_t offset, int length, int dummy);
144 extern int rt_cache_get_info(char *buffer, char **start, off_t offset, int length, int dummy);
145 extern int ip_rt_ioctl(unsigned int cmd, void *arg);
146 extern int ip_rt_new(struct rtentry *rt);
147 extern void ip_rt_check_expire(void);
148 extern void ip_rt_advice(struct rtable **rp, int advice);
149
150 extern void ip_rt_run_bh(void);
151 extern int ip_rt_lock;
152 extern unsigned ip_rt_bh_mask;
153 extern struct rtable *ip_rt_hash_table[RT_HASH_DIVISOR];
154
155 extern __inline__ void ip_rt_fast_lock(void)
156 {
157 ATOMIC_INCR(&ip_rt_lock);
158 }
159
160 extern __inline__ void ip_rt_fast_unlock(void)
161 {
162 ATOMIC_DECR(&ip_rt_lock);
163 }
164
165 extern __inline__ void ip_rt_unlock(void)
166 {
167 if (!ATOMIC_DECR_AND_CHECK(&ip_rt_lock) && ip_rt_bh_mask)
168 ip_rt_run_bh();
169 }
170
171 extern __inline__ unsigned ip_rt_hash_code(__u32 addr)
172 {
173 unsigned tmp = addr + (addr>>16);
174 return (tmp + (tmp>>8)) & 0xFF;
175 }
176
177
178 extern __inline__ void ip_rt_put(struct rtable * rt)
179 #ifndef MODULE
180 {
181 if (rt)
182 ATOMIC_DECR(&rt->rt_refcnt);
183 }
184 #else
185 ;
186 #endif
187
188 #ifdef CONFIG_KERNELD
189 extern struct rtable * ip_rt_route(__u32 daddr, int local);
190 #else
191 extern __inline__ struct rtable * ip_rt_route(__u32 daddr, int local)
192 #ifndef MODULE
193 {
194 struct rtable * rth;
195
196 ip_rt_fast_lock();
197
198 for (rth=ip_rt_hash_table[ip_rt_hash_code(daddr)^local]; rth; rth=rth->rt_next)
199 {
200 if (rth->rt_dst == daddr)
201 {
202 rth->rt_lastuse = jiffies;
203 ATOMIC_INCR(&rth->rt_use);
204 ATOMIC_INCR(&rth->rt_refcnt);
205 ip_rt_unlock();
206 return rth;
207 }
208 }
209 return ip_rt_slow_route (daddr, local);
210 }
211 #else
212 ;
213 #endif
214 #endif
215
216 extern __inline__ struct rtable * ip_check_route(struct rtable ** rp,
217 __u32 daddr, int local)
218 {
219 struct rtable * rt = *rp;
220
221 if (!rt || rt->rt_dst != daddr || !(rt->rt_flags&RTF_UP)
222 || ((local==1)^((rt->rt_flags&RTF_LOCAL) != 0)))
223 {
224 ip_rt_put(rt);
225 rt = ip_rt_route(daddr, local);
226 *rp = rt;
227 }
228 return rt;
229 }
230
231
232 #endif