This source file includes following definitions.
- skb_check
- skb_queue_head
- skb_queue_tail
- skb_dequeue
- skb_insert
- skb_append
- skb_unlink
- skb_new_list_head
- skb_peek
- skb_peek_copy
- kfree_skb
- alloc_skb
- kfree_skbmem
- skb_kept_by_device
- skb_device_release
- skb_device_locked
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17 #include <linux/config.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/sched.h>
21 #include <asm/segment.h>
22 #include <asm/system.h>
23 #include <linux/mm.h>
24 #include <linux/interrupt.h>
25 #include <linux/in.h>
26 #include "inet.h"
27 #include "dev.h"
28 #include "ip.h"
29 #include "protocol.h"
30 #include "arp.h"
31 #include "route.h"
32 #include "tcp.h"
33 #include "udp.h"
34 #include "skbuff.h"
35 #include "sock.h"
36
37
38
39
40
41
42
43
44
45
46
47
48 volatile unsigned long net_memory=0;
49 volatile unsigned long net_skbcount=0;
50
51
52
53
54
55
56
57 void skb_check(struct sk_buff *skb, int line, char *file)
58 {
59 if(skb->magic_debug_cookie==SK_FREED_SKB)
60 {
61 printk("File: %s Line %d, found a freed skb lurking in the undergrowth!\n",
62 file,line);
63 printk("skb=%p, real size=%ld, claimed size=%ld, magic=%d, list=%p, free=%d\n",
64 skb,skb->truesize,skb->mem_len,skb->magic,skb->list,skb->free);
65 }
66 if(skb->magic_debug_cookie!=SK_GOOD_SKB)
67 {
68 printk("File: %s Line %d, passed a non skb!\n", file,line);
69 printk("skb=%p, real size=%ld, claimed size=%ld, magic=%d, list=%p, free=%d\n",
70 skb,skb->truesize,skb->mem_len,skb->magic,skb->list,skb->free);
71 }
72 if(skb->mem_len!=skb->truesize)
73 {
74 printk("File: %s Line %d, Dubious size setting!\n",file,line);
75 printk("skb=%p, real size=%ld, claimed size=%ld, magic=%d, list=%p\n",
76 skb,skb->truesize,skb->mem_len,skb->magic,skb->list);
77 }
78
79 }
80
81
82
83
84
85 void skb_queue_head(struct sk_buff *volatile* list,struct sk_buff *newsk)
86 {
87 unsigned long flags;
88
89 IS_SKB(newsk);
90 if(newsk->list)
91 printk("Suspicious queue head: sk_buff on list!\n");
92 save_flags(flags);
93 cli();
94 newsk->list=list;
95
96 newsk->next=*list;
97
98 if(*list)
99 newsk->prev=(*list)->prev;
100 else
101 newsk->prev=newsk;
102 newsk->prev->next=newsk;
103 newsk->next->prev=newsk;
104 IS_SKB(newsk->prev);
105 IS_SKB(newsk->next);
106 *list=newsk;
107 restore_flags(flags);
108 }
109
110
111
112
113
114 void skb_queue_tail(struct sk_buff *volatile* list, struct sk_buff *newsk)
115 {
116 unsigned long flags;
117
118 if(newsk->list)
119 printk("Suspicious queue tail: sk_buff on list!\n");
120
121 IS_SKB(newsk);
122 save_flags(flags);
123 cli();
124
125 newsk->list=list;
126 if(*list)
127 {
128 (*list)->prev->next=newsk;
129 newsk->prev=(*list)->prev;
130 newsk->next=*list;
131 (*list)->prev=newsk;
132 }
133 else
134 {
135 newsk->next=newsk;
136 newsk->prev=newsk;
137 *list=newsk;
138 }
139 IS_SKB(newsk->prev);
140 IS_SKB(newsk->next);
141 restore_flags(flags);
142
143 }
144
145
146
147
148
149
150 struct sk_buff *skb_dequeue(struct sk_buff *volatile* list)
151 {
152 long flags;
153 struct sk_buff *result;
154
155 save_flags(flags);
156 cli();
157
158 if(*list==NULL)
159 {
160 restore_flags(flags);
161 return(NULL);
162 }
163
164 result=*list;
165 if(result->next==result)
166 *list=NULL;
167 else
168 {
169 result->next->prev=result->prev;
170 result->prev->next=result->next;
171 *list=result->next;
172 }
173
174 IS_SKB(result);
175 restore_flags(flags);
176
177 if(result->list!=list)
178 printk("Dequeued packet has invalid list pointer\n");
179
180 result->list=0;
181 result->next=0;
182 result->prev=0;
183 return(result);
184 }
185
186
187
188
189
190 void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
191 {
192 unsigned long flags;
193
194 IS_SKB(old);
195 IS_SKB(newsk);
196
197 if(!old->list)
198 printk("insert before unlisted item!\n");
199 if(newsk->list)
200 printk("inserted item is already on a list.\n");
201
202 save_flags(flags);
203 cli();
204 newsk->list=old->list;
205 newsk->next=old;
206 newsk->prev=old->prev;
207 newsk->next->prev=newsk;
208 newsk->prev->next=newsk;
209
210 restore_flags(flags);
211 }
212
213
214
215
216
217 void skb_append(struct sk_buff *old, struct sk_buff *newsk)
218 {
219 unsigned long flags;
220
221 IS_SKB(old);
222 IS_SKB(newsk);
223
224 if(!old->list)
225 printk("append before unlisted item!\n");
226 if(newsk->list)
227 printk("append item is already on a list.\n");
228
229 save_flags(flags);
230 cli();
231 newsk->list=old->list;
232 newsk->prev=old;
233 newsk->next=old->next;
234 newsk->next->prev=newsk;
235 newsk->prev->next=newsk;
236
237 restore_flags(flags);
238 }
239
240
241
242
243
244
245
246
247 void skb_unlink(struct sk_buff *skb)
248 {
249 unsigned long flags;
250 save_flags(flags);
251 cli();
252
253 IS_SKB(skb);
254
255 if(skb->list)
256 {
257 skb->next->prev=skb->prev;
258 skb->prev->next=skb->next;
259 if(*skb->list==skb)
260 {
261 if(skb->next==skb)
262 *skb->list=NULL;
263 else
264 *skb->list=skb->next;
265 }
266 skb->next=0;
267 skb->prev=0;
268 skb->list=0;
269 }
270 restore_flags(flags);
271 }
272
273
274
275
276
277
278
279 void skb_new_list_head(struct sk_buff *volatile* list)
280 {
281 struct sk_buff *skb=skb_peek(list);
282 if(skb!=NULL)
283 {
284 do
285 {
286 IS_SKB(skb);
287 skb->list=list;
288 skb=skb->next;
289 }
290 while(skb!=*list);
291 }
292 }
293
294
295
296
297
298
299
300
301 struct sk_buff *skb_peek(struct sk_buff *volatile* list)
302 {
303 return *list;
304 }
305
306
307
308
309
310
311
312
313
314 struct sk_buff *skb_peek_copy(struct sk_buff *volatile* list)
315 {
316 struct sk_buff *orig,*newsk;
317 unsigned long flags;
318 unsigned int len;
319
320
321 do
322 {
323 save_flags(flags);
324 cli();
325 orig=skb_peek(list);
326 if(orig==NULL)
327 {
328 restore_flags(flags);
329 return NULL;
330 }
331 IS_SKB(orig);
332 len=orig->truesize;
333 restore_flags(flags);
334
335 newsk=alloc_skb(len,GFP_KERNEL);
336
337 if(newsk==NULL)
338 return NULL;
339
340 save_flags(flags);
341 cli();
342 if(skb_peek(list)!=orig)
343 {
344 restore_flags(flags);
345 newsk->sk=NULL;
346 newsk->free=1;
347 newsk->mem_addr=newsk;
348 newsk->mem_len=len;
349 kfree_skb(newsk, FREE_WRITE);
350 continue;
351 }
352
353 IS_SKB(orig);
354 IS_SKB(newsk);
355 memcpy(newsk,orig,len);
356 newsk->list=NULL;
357 newsk->magic=0;
358 newsk->next=NULL;
359 newsk->prev=NULL;
360 newsk->mem_addr=newsk;
361 newsk->h.raw+=((char *)newsk-(char *)orig);
362 newsk->link3=NULL;
363 newsk->sk=NULL;
364 newsk->free=1;
365 }
366 while(0);
367
368 restore_flags(flags);
369 return(newsk);
370 }
371
372
373
374
375
376
377 void kfree_skb(struct sk_buff *skb, int rw)
378 {
379 if (skb == NULL) {
380 printk("kfree_skb: skb = NULL\n");
381 return;
382 }
383 IS_SKB(skb);
384 if(skb->lock)
385 {
386 skb->free=1;
387 return;
388 }
389
390 if(skb->free == 2)
391 printk("Warning: kfree_skb passed an skb that nobody set the free flag on!\n");
392 if(skb->list)
393 printk("Warning: kfree_skb passed an skb still on a list.\n");
394 skb->magic = 0;
395 if (skb->sk)
396 {
397 if(skb->sk->prot!=NULL)
398 {
399 if (rw)
400 skb->sk->prot->rfree(skb->sk, skb->mem_addr, skb->mem_len);
401 else
402 skb->sk->prot->wfree(skb->sk, skb->mem_addr, skb->mem_len);
403
404 }
405 else
406 {
407
408 if (rw)
409 skb->sk->rmem_alloc-=skb->mem_len;
410 else
411 skb->sk->wmem_alloc-=skb->mem_len;
412 if(!skb->sk->dead)
413 wake_up_interruptible(skb->sk->sleep);
414 kfree_skbmem(skb->mem_addr,skb->mem_len);
415 }
416 }
417 else
418 kfree_skbmem(skb->mem_addr, skb->mem_len);
419 }
420
421
422
423
424
425
426 struct sk_buff *alloc_skb(unsigned int size,int priority)
427 {
428 struct sk_buff *skb;
429 extern unsigned long intr_count;
430
431 if (intr_count && priority != GFP_ATOMIC) {
432 static int count = 0;
433 if (++count < 5) {
434 printk("alloc_skb called nonatomically from interrupt %08lx\n",
435 ((unsigned long *)&size)[-1]);
436 priority = GFP_ATOMIC;
437 }
438 }
439 skb=(struct sk_buff *)kmalloc(size,priority);
440 if(skb==NULL)
441 return NULL;
442 skb->free= 2;
443 skb->list= 0;
444 skb->lock= 0;
445 skb->truesize=size;
446 skb->mem_len=size;
447 skb->mem_addr=skb;
448 skb->fraglist=NULL;
449 net_memory+=size;
450 net_skbcount++;
451 skb->magic_debug_cookie=SK_GOOD_SKB;
452 skb->users=0;
453 return skb;
454 }
455
456
457
458
459
460 void kfree_skbmem(void *mem,unsigned size)
461 {
462 struct sk_buff *x=mem;
463 IS_SKB(x);
464 if(x->magic_debug_cookie==SK_GOOD_SKB)
465 {
466 x->magic_debug_cookie=SK_FREED_SKB;
467 kfree_s(mem,size);
468 net_skbcount--;
469 net_memory-=size;
470 }
471 }
472
473
474
475
476
477 void skb_kept_by_device(struct sk_buff *skb)
478 {
479 skb->lock++;
480 }
481
482 void skb_device_release(struct sk_buff *skb, int mode)
483 {
484 unsigned long flags;
485
486 save_flags(flags);
487 cli();
488 if (!--skb->lock) {
489 if (skb->free==1)
490 kfree_skb(skb,mode);
491 }
492 restore_flags(flags);
493 }
494
495 int skb_device_locked(struct sk_buff *skb)
496 {
497 if(skb->lock)
498 return 1;
499 return 0;
500 }