This source file includes following definitions.
- show_net_buffers
- skb_check
- skb_queue_head_init
- skb_queue_head
- skb_queue_tail
- skb_dequeue
- skb_insert
- skb_append
- skb_unlink
- skb_put
- skb_push
- skb_pull
- skb_headroom
- skb_tailroom
- skb_reserve
- skb_trim
- kfree_skb
- alloc_skb
- kfree_skbmem
- skb_clone
- skb_clone
- skb_device_lock
- skb_device_unlock
- dev_kfree_skb
- dev_alloc_skb
- skb_device_locked
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26 #include <linux/config.h>
27 #include <linux/types.h>
28 #include <linux/kernel.h>
29 #include <linux/sched.h>
30 #include <asm/segment.h>
31 #include <asm/system.h>
32 #include <linux/mm.h>
33 #include <linux/interrupt.h>
34 #include <linux/in.h>
35 #include <linux/inet.h>
36 #include <linux/netdevice.h>
37 #include <net/ip.h>
38 #include <net/protocol.h>
39 #include <linux/string.h>
40 #include <net/route.h>
41 #include <net/tcp.h>
42 #include <net/udp.h>
43 #include <linux/skbuff.h>
44 #include <net/sock.h>
45
46
47
48
49
50
51 volatile unsigned long net_skbcount = 0;
52 volatile unsigned long net_locked = 0;
53 volatile unsigned long net_allocs = 0;
54 volatile unsigned long net_fails = 0;
55 volatile unsigned long net_free_locked = 0;
56
57 extern unsigned long ip_frag_mem;
58
59 void show_net_buffers(void)
60 {
61 printk("Networking buffers in use : %lu\n",net_skbcount);
62 printk("Network buffers locked by drivers : %lu\n",net_locked);
63 printk("Total network buffer allocations : %lu\n",net_allocs);
64 printk("Total failed network buffer allocs : %lu\n",net_fails);
65 printk("Total free while locked events : %lu\n",net_free_locked);
66 #ifdef CONFIG_INET
67 printk("IP fragment buffer size : %lu\n",ip_frag_mem);
68 #endif
69 }
70
71 #if CONFIG_SKB_CHECK
72
73
74
75
76
77 int skb_check(struct sk_buff *skb, int head, int line, char *file)
78 {
79 if (head) {
80 if (skb->magic_debug_cookie != SK_HEAD_SKB) {
81 printk("File: %s Line %d, found a bad skb-head\n",
82 file,line);
83 return -1;
84 }
85 if (!skb->next || !skb->prev) {
86 printk("skb_check: head without next or prev\n");
87 return -1;
88 }
89 if (skb->next->magic_debug_cookie != SK_HEAD_SKB
90 && skb->next->magic_debug_cookie != SK_GOOD_SKB) {
91 printk("File: %s Line %d, bad next head-skb member\n",
92 file,line);
93 return -1;
94 }
95 if (skb->prev->magic_debug_cookie != SK_HEAD_SKB
96 && skb->prev->magic_debug_cookie != SK_GOOD_SKB) {
97 printk("File: %s Line %d, bad prev head-skb member\n",
98 file,line);
99 return -1;
100 }
101 #if 0
102 {
103 struct sk_buff *skb2 = skb->next;
104 int i = 0;
105 while (skb2 != skb && i < 5) {
106 if (skb_check(skb2, 0, line, file) < 0) {
107 printk("bad queue element in whole queue\n");
108 return -1;
109 }
110 i++;
111 skb2 = skb2->next;
112 }
113 }
114 #endif
115 return 0;
116 }
117 if (skb->next != NULL && skb->next->magic_debug_cookie != SK_HEAD_SKB
118 && skb->next->magic_debug_cookie != SK_GOOD_SKB) {
119 printk("File: %s Line %d, bad next skb member\n",
120 file,line);
121 return -1;
122 }
123 if (skb->prev != NULL && skb->prev->magic_debug_cookie != SK_HEAD_SKB
124 && skb->prev->magic_debug_cookie != SK_GOOD_SKB) {
125 printk("File: %s Line %d, bad prev skb member\n",
126 file,line);
127 return -1;
128 }
129
130
131 if(skb->magic_debug_cookie==SK_FREED_SKB)
132 {
133 printk("File: %s Line %d, found a freed skb lurking in the undergrowth!\n",
134 file,line);
135 printk("skb=%p, real size=%d, free=%d\n",
136 skb,skb->truesize,skb->free);
137 return -1;
138 }
139 if(skb->magic_debug_cookie!=SK_GOOD_SKB)
140 {
141 printk("File: %s Line %d, passed a non skb!\n", file,line);
142 printk("skb=%p, real size=%d, free=%d\n",
143 skb,skb->truesize,skb->free);
144 return -1;
145 }
146 if(skb->head>skb->data)
147 {
148 printk("File: %s Line %d, head > data !\n", file,line);
149 printk("skb=%p, head=%p, data=%p\n",
150 skb,skb->head,skb->data);
151 return -1;
152 }
153 if(skb->tail>skb->end)
154 {
155 printk("File: %s Line %d, tail > end!\n", file,line);
156 printk("skb=%p, tail=%p, end=%p\n",
157 skb,skb->tail,skb->end);
158 return -1;
159 }
160 if(skb->data>skb->tail)
161 {
162 printk("File: %s Line %d, data > tail!\n", file,line);
163 printk("skb=%p, data=%p, tail=%p\n",
164 skb,skb->data,skb->tail);
165 return -1;
166 }
167 if(skb->tail-skb->data!=skb->len)
168 {
169 printk("File: %s Line %d, wrong length\n", file,line);
170 printk("skb=%p, data=%p, end=%p len=%ld\n",
171 skb,skb->data,skb->end,skb->len);
172 return -1;
173 }
174 if((unsigned long) skb->end > (unsigned long) skb)
175 {
176 printk("File: %s Line %d, control overrun\n", file,line);
177 printk("skb=%p, end=%p\n",
178 skb,skb->end);
179 return -1;
180 }
181
182
183 return 0;
184 }
185 #endif
186
187
188 #if CONFIG_SKB_CHECK
189 void skb_queue_head_init(struct sk_buff_head *list)
190 {
191 list->prev = (struct sk_buff *)list;
192 list->next = (struct sk_buff *)list;
193 list->magic_debug_cookie = SK_HEAD_SKB;
194 }
195
196
197
198
199
200 void skb_queue_head(struct sk_buff_head *list_,struct sk_buff *newsk)
201 {
202 unsigned long flags;
203 struct sk_buff *list = (struct sk_buff *)list_;
204
205 save_flags(flags);
206 cli();
207
208 IS_SKB(newsk);
209 IS_SKB_HEAD(list);
210 if (newsk->next || newsk->prev)
211 printk("Suspicious queue head: sk_buff on list!\n");
212
213 newsk->next = list->next;
214 newsk->prev = list;
215
216 newsk->next->prev = newsk;
217 newsk->prev->next = newsk;
218
219 restore_flags(flags);
220 }
221
222
223
224
225 void skb_queue_tail(struct sk_buff_head *list_, struct sk_buff *newsk)
226 {
227 unsigned long flags;
228 struct sk_buff *list = (struct sk_buff *)list_;
229
230 save_flags(flags);
231 cli();
232
233 if (newsk->next || newsk->prev)
234 printk("Suspicious queue tail: sk_buff on list!\n");
235 IS_SKB(newsk);
236 IS_SKB_HEAD(list);
237
238 newsk->next = list;
239 newsk->prev = list->prev;
240
241 newsk->next->prev = newsk;
242 newsk->prev->next = newsk;
243
244 restore_flags(flags);
245 }
246
247
248
249
250
251
252 struct sk_buff *skb_dequeue(struct sk_buff_head *list_)
253 {
254 long flags;
255 struct sk_buff *result;
256 struct sk_buff *list = (struct sk_buff *)list_;
257
258 save_flags(flags);
259 cli();
260
261 IS_SKB_HEAD(list);
262
263 result = list->next;
264 if (result == list) {
265 restore_flags(flags);
266 return NULL;
267 }
268
269 result->next->prev = list;
270 list->next = result->next;
271
272 result->next = NULL;
273 result->prev = NULL;
274
275 restore_flags(flags);
276
277 IS_SKB(result);
278 return result;
279 }
280
281
282
283
284 void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
285 {
286 unsigned long flags;
287
288 IS_SKB(old);
289 IS_SKB(newsk);
290
291 if(!old->next || !old->prev)
292 printk("insert before unlisted item!\n");
293 if(newsk->next || newsk->prev)
294 printk("inserted item is already on a list.\n");
295
296 save_flags(flags);
297 cli();
298 newsk->next = old;
299 newsk->prev = old->prev;
300 old->prev = newsk;
301 newsk->prev->next = newsk;
302
303 restore_flags(flags);
304 }
305
306
307
308
309 void skb_append(struct sk_buff *old, struct sk_buff *newsk)
310 {
311 unsigned long flags;
312
313 IS_SKB(old);
314 IS_SKB(newsk);
315
316 if(!old->next || !old->prev)
317 printk("append before unlisted item!\n");
318 if(newsk->next || newsk->prev)
319 printk("append item is already on a list.\n");
320
321 save_flags(flags);
322 cli();
323
324 newsk->prev = old;
325 newsk->next = old->next;
326 newsk->next->prev = newsk;
327 old->next = newsk;
328
329 restore_flags(flags);
330 }
331
332
333
334
335
336
337
338 void skb_unlink(struct sk_buff *skb)
339 {
340 unsigned long flags;
341
342 save_flags(flags);
343 cli();
344
345 IS_SKB(skb);
346
347 if(skb->prev && skb->next)
348 {
349 skb->next->prev = skb->prev;
350 skb->prev->next = skb->next;
351 skb->next = NULL;
352 skb->prev = NULL;
353 }
354 #ifdef PARANOID_BUGHUNT_MODE
355 else
356 printk("skb_unlink: not a linked element\n");
357 #endif
358 restore_flags(flags);
359 }
360
361
362
363
364
365 unsigned char *skb_put(struct sk_buff *skb, int len)
366 {
367 unsigned char *tmp=skb->tail;
368 IS_SKB(skb);
369 skb->tail+=len;
370 skb->len+=len;
371 IS_SKB(skb);
372 if(skb->tail>skb->end)
373 panic("skput:over: %p:%d", __builtin_return_address(0),len);
374 return tmp;
375 }
376
377 unsigned char *skb_push(struct sk_buff *skb, int len)
378 {
379 IS_SKB(skb);
380 skb->data-=len;
381 skb->len+=len;
382 IS_SKB(skb);
383 if(skb->data<skb->head)
384 panic("skpush:under: %p:%d", __builtin_return_address(0),len);
385 return skb->data;
386 }
387
388 unsigned char * skb_pull(struct sk_buff *skb, int len)
389 {
390 IS_SKB(skb);
391 if(len>skb->len)
392 return 0;
393 skb->data+=len;
394 skb->len-=len;
395 return skb->data;
396 }
397
398 int skb_headroom(struct sk_buff *skb)
399 {
400 IS_SKB(skb);
401 return skb->data-skb->head;
402 }
403
404 int skb_tailroom(struct sk_buff *skb)
405 {
406 IS_SKB(skb);
407 return skb->end-skb->tail;
408 }
409
410 void skb_reserve(struct sk_buff *skb, int len)
411 {
412 IS_SKB(skb);
413 skb->data+=len;
414 skb->tail+=len;
415 if(skb->tail>skb->end)
416 panic("sk_res: over");
417 if(skb->data<skb->head)
418 panic("sk_res: under");
419 IS_SKB(skb);
420 }
421
422 void skb_trim(struct sk_buff *skb, int len)
423 {
424 IS_SKB(skb);
425 if(skb->len>len)
426 {
427 skb->len=len;
428 skb->tail=skb->data+len;
429 }
430 }
431
432
433
434 #endif
435
436
437
438
439
440
441 void kfree_skb(struct sk_buff *skb, int rw)
442 {
443 if (skb == NULL)
444 {
445 printk("kfree_skb: skb = NULL (from %p)\n",
446 __builtin_return_address(0));
447 return;
448 }
449 #if CONFIG_SKB_CHECK
450 IS_SKB(skb);
451 #endif
452 if (skb->lock)
453 {
454 skb->free = 3;
455 net_free_locked++;
456 return;
457 }
458 if (skb->free == 2)
459 printk("Warning: kfree_skb passed an skb that nobody set the free flag on! (from %p)\n",
460 __builtin_return_address(0));
461 if (skb->next)
462 printk("Warning: kfree_skb passed an skb still on a list (from %p).\n",
463 __builtin_return_address(0));
464
465 if(skb->destructor)
466 skb->destructor(skb);
467 if (skb->sk)
468 {
469 if(skb->sk->prot!=NULL)
470 {
471 if (rw)
472 sock_rfree(skb->sk, skb);
473 else
474 sock_wfree(skb->sk, skb);
475
476 }
477 else
478 {
479 unsigned long flags;
480
481 save_flags(flags);
482 cli();
483 if (rw)
484 skb->sk->rmem_alloc-=skb->truesize;
485 else
486 skb->sk->wmem_alloc-=skb->truesize;
487 restore_flags(flags);
488 if(!skb->sk->dead)
489 skb->sk->write_space(skb->sk);
490 kfree_skbmem(skb);
491 }
492 }
493 else
494 kfree_skbmem(skb);
495 }
496
497
498
499
500
501 struct sk_buff *alloc_skb(unsigned int size,int priority)
502 {
503 struct sk_buff *skb;
504 unsigned long flags;
505 int len=size;
506 unsigned char *bptr;
507
508 if (intr_count && priority!=GFP_ATOMIC)
509 {
510 static int count = 0;
511 if (++count < 5) {
512 printk("alloc_skb called nonatomically from interrupt %p\n",
513 __builtin_return_address(0));
514 priority = GFP_ATOMIC;
515 }
516 }
517
518 size=(size+15)&~15;
519 size+=sizeof(struct sk_buff);
520
521
522
523
524
525 bptr=(unsigned char *)kmalloc(size,priority);
526 if (bptr == NULL)
527 {
528 net_fails++;
529 return NULL;
530 }
531 #ifdef PARANOID_BUGHUNT_MODE
532 if(skb->magic_debug_cookie == SK_GOOD_SKB)
533 printk("Kernel kmalloc handed us an existing skb (%p)\n",skb);
534 #endif
535
536
537
538
539
540
541
542 net_allocs++;
543
544 skb=(struct sk_buff *)(bptr+size)-1;
545
546 skb->count = 1;
547 skb->data_skb = NULL;
548
549 skb->free = 2;
550 skb->lock = 0;
551 skb->pkt_type = PACKET_HOST;
552 skb->prev = skb->next = NULL;
553 skb->link3 = NULL;
554 skb->sk = NULL;
555 skb->truesize=size;
556 skb->localroute=0;
557 skb->stamp.tv_sec=0;
558 skb->localroute = 0;
559 skb->ip_summed = 0;
560 memset(skb->proto_priv, 0, sizeof(skb->proto_priv));
561 save_flags(flags);
562 cli();
563 net_skbcount++;
564 restore_flags(flags);
565 #if CONFIG_SKB_CHECK
566 skb->magic_debug_cookie = SK_GOOD_SKB;
567 #endif
568 skb->users = 0;
569
570 skb->head=bptr;
571 skb->data=bptr;
572 skb->tail=bptr;
573 skb->end=bptr+len;
574 skb->len=0;
575 skb->destructor=NULL;
576 return skb;
577 }
578
579
580
581
582
583 void kfree_skbmem(struct sk_buff *skb)
584 {
585 unsigned long flags;
586 void * addr = skb->head;
587
588 save_flags(flags);
589 cli();
590
591 if (--skb->count <= 0) {
592
593 if (skb->data_skb) {
594 addr = skb;
595 kfree_skbmem(skb->data_skb);
596 }
597 kfree(addr);
598 net_skbcount--;
599 }
600 restore_flags(flags);
601 }
602
603
604
605
606
607 #if 1
608 struct sk_buff *skb_clone(struct sk_buff *skb, int priority)
609 {
610 struct sk_buff *n;
611 unsigned long flags;
612
613 IS_SKB(skb);
614 n = kmalloc(sizeof(*n), priority);
615 if (!n)
616 return NULL;
617 memcpy(n, skb, sizeof(*n));
618 n->count = 1;
619 if (skb->data_skb)
620 skb = skb->data_skb;
621 save_flags(flags);
622 cli();
623 skb->count++;
624 net_allocs++;
625 net_skbcount++;
626 restore_flags(flags);
627 n->data_skb = skb;
628 n->next = n->prev = n->link3 = NULL;
629 n->sk = NULL;
630 n->truesize = sizeof(*n);
631 n->free = 1;
632 n->tries = 0;
633 n->lock = 0;
634 n->users = 0;
635 return n;
636 }
637 #else
638
639 struct sk_buff *skb_clone(struct sk_buff *skb, int priority)
640 {
641 struct sk_buff *n;
642 unsigned long offset;
643
644
645
646
647
648 IS_SKB(skb);
649
650 n=alloc_skb(skb->truesize-sizeof(struct sk_buff),priority);
651 if(n==NULL)
652 return NULL;
653
654
655
656
657
658 offset=n->head-skb->head;
659
660
661 skb_reserve(n,skb->data-skb->head);
662
663 skb_put(n,skb->len);
664
665 memcpy(n->head,skb->head,skb->end-skb->head);
666 n->link3=NULL;
667 n->sk=NULL;
668 n->when=skb->when;
669 n->dev=skb->dev;
670 n->h.raw=skb->h.raw+offset;
671 n->mac.raw=skb->mac.raw+offset;
672 n->ip_hdr=(struct iphdr *)(((char *)skb->ip_hdr)+offset);
673 n->saddr=skb->saddr;
674 n->daddr=skb->daddr;
675 n->raddr=skb->raddr;
676 n->raddr=skb->seq;
677 n->raddr=skb->end_seq;
678 n->raddr=skb->ack_seq;
679 n->acked=skb->acked;
680 memcpy(n->proto_priv, skb->proto_priv, sizeof(skb->proto_priv));
681 n->used=skb->used;
682 n->free=1;
683 n->arp=skb->arp;
684 n->tries=0;
685 n->lock=0;
686 n->users=0;
687 n->pkt_type=skb->pkt_type;
688 n->stamp=skb->stamp;
689
690 IS_SKB(n);
691 return n;
692 }
693 #endif
694
695
696
697
698
699 void skb_device_lock(struct sk_buff *skb)
700 {
701 if(skb->lock)
702 printk("double lock on device queue!\n");
703 else
704 net_locked++;
705 skb->lock++;
706 }
707
708 void skb_device_unlock(struct sk_buff *skb)
709 {
710 if(skb->lock==0)
711 printk("double unlock on device queue!\n");
712 skb->lock--;
713 if(skb->lock==0)
714 net_locked--;
715 }
716
717 void dev_kfree_skb(struct sk_buff *skb, int mode)
718 {
719 unsigned long flags;
720
721 save_flags(flags);
722 cli();
723 if(skb->lock==1)
724 net_locked--;
725
726 if (!--skb->lock && (skb->free == 1 || skb->free == 3))
727 {
728 restore_flags(flags);
729 kfree_skb(skb,mode);
730 }
731 else
732 restore_flags(flags);
733 }
734
735 struct sk_buff *dev_alloc_skb(unsigned int length)
736 {
737 struct sk_buff *skb;
738
739 skb = alloc_skb(length+16, GFP_ATOMIC);
740 if (skb)
741 skb_reserve(skb,16);
742 return skb;
743 }
744
745 int skb_device_locked(struct sk_buff *skb)
746 {
747 return skb->lock? 1 : 0;
748 }