This source file includes following definitions.
- show_net_buffers
- skb_check
- skb_queue_head_init
- skb_queue_head
- __skb_queue_head
- skb_queue_tail
- __skb_queue_tail
- skb_dequeue
- __skb_dequeue
- skb_insert
- __skb_insert
- skb_append
- skb_unlink
- __skb_unlink
- skb_put
- skb_push
- skb_pull
- skb_headroom
- skb_tailroom
- skb_reserve
- skb_trim
- kfree_skb
- alloc_skb
- __kfree_skbmem
- kfree_skbmem
- skb_clone
- skb_copy
- skb_device_lock
- skb_device_unlock
- dev_kfree_skb
- dev_alloc_skb
- skb_device_locked
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35 #include <linux/config.h>
36 #include <linux/types.h>
37 #include <linux/kernel.h>
38 #include <linux/sched.h>
39 #include <linux/mm.h>
40 #include <linux/interrupt.h>
41 #include <linux/in.h>
42 #include <linux/inet.h>
43 #include <linux/netdevice.h>
44 #include <linux/malloc.h>
45 #include <linux/string.h>
46 #include <linux/skbuff.h>
47
48 #include <net/ip.h>
49 #include <net/protocol.h>
50 #include <net/route.h>
51 #include <net/tcp.h>
52 #include <net/udp.h>
53 #include <net/sock.h>
54
55 #include <asm/segment.h>
56 #include <asm/system.h>
57
58
59
60
61
62 atomic_t net_skbcount = 0;
63 atomic_t net_locked = 0;
64 atomic_t net_allocs = 0;
65 atomic_t net_fails = 0;
66 atomic_t net_free_locked = 0;
67
68 extern atomic_t ip_frag_mem;
69
70 void show_net_buffers(void)
71 {
72 printk("Networking buffers in use : %u\n",net_skbcount);
73 printk("Network buffers locked by drivers : %u\n",net_locked);
74 printk("Total network buffer allocations : %u\n",net_allocs);
75 printk("Total failed network buffer allocs : %u\n",net_fails);
76 printk("Total free while locked events : %u\n",net_free_locked);
77 #ifdef CONFIG_INET
78 printk("IP fragment buffer size : %u\n",ip_frag_mem);
79 #endif
80 }
81
82 #if CONFIG_SKB_CHECK
83
84
85
86
87
88 int skb_check(struct sk_buff *skb, int head, int line, char *file)
89 {
90 if (head) {
91 if (skb->magic_debug_cookie != SK_HEAD_SKB) {
92 printk("File: %s Line %d, found a bad skb-head\n",
93 file,line);
94 return -1;
95 }
96 if (!skb->next || !skb->prev) {
97 printk("skb_check: head without next or prev\n");
98 return -1;
99 }
100 if (skb->next->magic_debug_cookie != SK_HEAD_SKB
101 && skb->next->magic_debug_cookie != SK_GOOD_SKB) {
102 printk("File: %s Line %d, bad next head-skb member\n",
103 file,line);
104 return -1;
105 }
106 if (skb->prev->magic_debug_cookie != SK_HEAD_SKB
107 && skb->prev->magic_debug_cookie != SK_GOOD_SKB) {
108 printk("File: %s Line %d, bad prev head-skb member\n",
109 file,line);
110 return -1;
111 }
112 #if 0
113 {
114 struct sk_buff *skb2 = skb->next;
115 int i = 0;
116 while (skb2 != skb && i < 5) {
117 if (skb_check(skb2, 0, line, file) < 0) {
118 printk("bad queue element in whole queue\n");
119 return -1;
120 }
121 i++;
122 skb2 = skb2->next;
123 }
124 }
125 #endif
126 return 0;
127 }
128 if (skb->next != NULL && skb->next->magic_debug_cookie != SK_HEAD_SKB
129 && skb->next->magic_debug_cookie != SK_GOOD_SKB) {
130 printk("File: %s Line %d, bad next skb member\n",
131 file,line);
132 return -1;
133 }
134 if (skb->prev != NULL && skb->prev->magic_debug_cookie != SK_HEAD_SKB
135 && skb->prev->magic_debug_cookie != SK_GOOD_SKB) {
136 printk("File: %s Line %d, bad prev skb member\n",
137 file,line);
138 return -1;
139 }
140
141
142 if(skb->magic_debug_cookie==SK_FREED_SKB)
143 {
144 printk("File: %s Line %d, found a freed skb lurking in the undergrowth!\n",
145 file,line);
146 printk("skb=%p, real size=%d, free=%d\n",
147 skb,skb->truesize,skb->free);
148 return -1;
149 }
150 if(skb->magic_debug_cookie!=SK_GOOD_SKB)
151 {
152 printk("File: %s Line %d, passed a non skb!\n", file,line);
153 printk("skb=%p, real size=%d, free=%d\n",
154 skb,skb->truesize,skb->free);
155 return -1;
156 }
157 if(skb->head>skb->data)
158 {
159 printk("File: %s Line %d, head > data !\n", file,line);
160 printk("skb=%p, head=%p, data=%p\n",
161 skb,skb->head,skb->data);
162 return -1;
163 }
164 if(skb->tail>skb->end)
165 {
166 printk("File: %s Line %d, tail > end!\n", file,line);
167 printk("skb=%p, tail=%p, end=%p\n",
168 skb,skb->tail,skb->end);
169 return -1;
170 }
171 if(skb->data>skb->tail)
172 {
173 printk("File: %s Line %d, data > tail!\n", file,line);
174 printk("skb=%p, data=%p, tail=%p\n",
175 skb,skb->data,skb->tail);
176 return -1;
177 }
178 if(skb->tail-skb->data!=skb->len)
179 {
180 printk("File: %s Line %d, wrong length\n", file,line);
181 printk("skb=%p, data=%p, end=%p len=%ld\n",
182 skb,skb->data,skb->end,skb->len);
183 return -1;
184 }
185 if((unsigned long) skb->end > (unsigned long) skb)
186 {
187 printk("File: %s Line %d, control overrun\n", file,line);
188 printk("skb=%p, end=%p\n",
189 skb,skb->end);
190 return -1;
191 }
192
193
194 return 0;
195 }
196 #endif
197
198
199 #if CONFIG_SKB_CHECK
200 void skb_queue_head_init(struct sk_buff_head *list)
201 {
202 list->prev = (struct sk_buff *)list;
203 list->next = (struct sk_buff *)list;
204 list->qlen = 0;
205 list->magic_debug_cookie = SK_HEAD_SKB;
206 }
207
208
209
210
211
212 void skb_queue_head(struct sk_buff_head *list_,struct sk_buff *newsk)
213 {
214 unsigned long flags;
215 struct sk_buff *list = (struct sk_buff *)list_;
216
217 save_flags(flags);
218 cli();
219
220 IS_SKB(newsk);
221 IS_SKB_HEAD(list);
222 if (newsk->next || newsk->prev)
223 printk("Suspicious queue head: sk_buff on list!\n");
224
225 newsk->next = list->next;
226 newsk->prev = list;
227
228 newsk->next->prev = newsk;
229 newsk->prev->next = newsk;
230 newsk->list = list_;
231 list_->qlen++;
232
233 restore_flags(flags);
234 }
235
236 void __skb_queue_head(struct sk_buff_head *list_,struct sk_buff *newsk)
237 {
238 struct sk_buff *list = (struct sk_buff *)list_;
239
240
241 IS_SKB(newsk);
242 IS_SKB_HEAD(list);
243 if (newsk->next || newsk->prev)
244 printk("Suspicious queue head: sk_buff on list!\n");
245
246 newsk->next = list->next;
247 newsk->prev = list;
248
249 newsk->next->prev = newsk;
250 newsk->prev->next = newsk;
251 newsk->list = list_;
252 list_->qlen++;
253
254 }
255
256
257
258
259 void skb_queue_tail(struct sk_buff_head *list_, struct sk_buff *newsk)
260 {
261 unsigned long flags;
262 struct sk_buff *list = (struct sk_buff *)list_;
263
264 save_flags(flags);
265 cli();
266
267 if (newsk->next || newsk->prev)
268 printk("Suspicious queue tail: sk_buff on list!\n");
269 IS_SKB(newsk);
270 IS_SKB_HEAD(list);
271
272 newsk->next = list;
273 newsk->prev = list->prev;
274
275 newsk->next->prev = newsk;
276 newsk->prev->next = newsk;
277
278 newsk->list = list_;
279 list_->qlen++;
280
281 restore_flags(flags);
282 }
283
284 void __skb_queue_tail(struct sk_buff_head *list_, struct sk_buff *newsk)
285 {
286 unsigned long flags;
287 struct sk_buff *list = (struct sk_buff *)list_;
288
289 if (newsk->next || newsk->prev)
290 printk("Suspicious queue tail: sk_buff on list!\n");
291 IS_SKB(newsk);
292 IS_SKB_HEAD(list);
293
294 newsk->next = list;
295 newsk->prev = list->prev;
296
297 newsk->next->prev = newsk;
298 newsk->prev->next = newsk;
299
300 newsk->list = list_;
301 list_->qlen++;
302 }
303
304
305
306
307
308
309 struct sk_buff *skb_dequeue(struct sk_buff_head *list_)
310 {
311 long flags;
312 struct sk_buff *result;
313 struct sk_buff *list = (struct sk_buff *)list_;
314
315 save_flags(flags);
316 cli();
317
318 IS_SKB_HEAD(list);
319
320 result = list->next;
321 if (result == list) {
322 restore_flags(flags);
323 return NULL;
324 }
325
326 result->next->prev = list;
327 list->next = result->next;
328
329 result->next = NULL;
330 result->prev = NULL;
331 list_->qlen--;
332 result->list = NULL;
333
334 restore_flags(flags);
335
336 IS_SKB(result);
337 return result;
338 }
339
340 struct sk_buff *__skb_dequeue(struct sk_buff_head *list_)
341 {
342 struct sk_buff *result;
343 struct sk_buff *list = (struct sk_buff *)list_;
344
345 IS_SKB_HEAD(list);
346
347 result = list->next;
348 if (result == list) {
349 return NULL;
350 }
351
352 result->next->prev = list;
353 list->next = result->next;
354
355 result->next = NULL;
356 result->prev = NULL;
357 list_->qlen--;
358 result->list = NULL;
359
360 IS_SKB(result);
361 return result;
362 }
363
364
365
366
367 void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
368 {
369 unsigned long flags;
370
371 IS_SKB(old);
372 IS_SKB(newsk);
373
374 if(!old->next || !old->prev)
375 printk("insert before unlisted item!\n");
376 if(newsk->next || newsk->prev)
377 printk("inserted item is already on a list.\n");
378
379 save_flags(flags);
380 cli();
381 newsk->next = old;
382 newsk->prev = old->prev;
383 old->prev = newsk;
384 newsk->prev->next = newsk;
385 newsk->list = old->list;
386 newsk->list->qlen++;
387
388 restore_flags(flags);
389 }
390
391
392
393
394
395 void __skb_insert(struct sk_buff *newsk,
396 struct sk_buff * prev, struct sk_buff *next,
397 struct sk_buff_head * list)
398 {
399 IS_SKB(prev);
400 IS_SKB(newsk);
401 IS_SKB(next);
402
403 if(!prev->next || !prev->prev)
404 printk("insert after unlisted item!\n");
405 if(!next->next || !next->prev)
406 printk("insert before unlisted item!\n");
407 if(newsk->next || newsk->prev)
408 printk("inserted item is already on a list.\n");
409
410 newsk->next = next;
411 newsk->prev = prev;
412 next->prev = newsk;
413 prev->next = newsk;
414 newsk->list = list;
415 list->qlen++;
416
417 }
418
419
420
421
422 void skb_append(struct sk_buff *old, struct sk_buff *newsk)
423 {
424 unsigned long flags;
425
426 IS_SKB(old);
427 IS_SKB(newsk);
428
429 if(!old->next || !old->prev)
430 printk("append before unlisted item!\n");
431 if(newsk->next || newsk->prev)
432 printk("append item is already on a list.\n");
433
434 save_flags(flags);
435 cli();
436
437 newsk->prev = old;
438 newsk->next = old->next;
439 newsk->next->prev = newsk;
440 old->next = newsk;
441 newsk->list = old->list;
442 newsk->list->qlen++;
443
444 restore_flags(flags);
445 }
446
447
448
449
450
451
452
453 void skb_unlink(struct sk_buff *skb)
454 {
455 unsigned long flags;
456
457 save_flags(flags);
458 cli();
459
460 IS_SKB(skb);
461
462 if(skb->list)
463 {
464 skb->list->qlen--;
465 skb->next->prev = skb->prev;
466 skb->prev->next = skb->next;
467 skb->next = NULL;
468 skb->prev = NULL;
469 skb->list = NULL;
470 }
471 #ifdef PARANOID_BUGHUNT_MODE
472 else
473 printk("skb_unlink: not a linked element\n");
474 #endif
475 restore_flags(flags);
476 }
477
478 void __skb_unlink(struct sk_buff *skb)
479 {
480 IS_SKB(skb);
481
482 if(skb->list)
483 {
484 skb->list->qlen--;
485 skb->next->prev = skb->prev;
486 skb->prev->next = skb->next;
487 skb->next = NULL;
488 skb->prev = NULL;
489 skb->list = NULL;
490 }
491 #ifdef PARANOID_BUGHUNT_MODE
492 else
493 printk("skb_unlink: not a linked element\n");
494 #endif
495 }
496
497
498
499
500
501 unsigned char *skb_put(struct sk_buff *skb, int len)
502 {
503 unsigned char *tmp=skb->tail;
504 IS_SKB(skb);
505 skb->tail+=len;
506 skb->len+=len;
507 IS_SKB(skb);
508 if(skb->tail>skb->end)
509 panic("skput:over: %p:%d", __builtin_return_address(0),len);
510 return tmp;
511 }
512
513 unsigned char *skb_push(struct sk_buff *skb, int len)
514 {
515 IS_SKB(skb);
516 skb->data-=len;
517 skb->len+=len;
518 IS_SKB(skb);
519 if(skb->data<skb->head)
520 panic("skpush:under: %p:%d", __builtin_return_address(0),len);
521 return skb->data;
522 }
523
524 unsigned char * skb_pull(struct sk_buff *skb, int len)
525 {
526 IS_SKB(skb);
527 if(len>skb->len)
528 return 0;
529 skb->data+=len;
530 skb->len-=len;
531 return skb->data;
532 }
533
534 int skb_headroom(struct sk_buff *skb)
535 {
536 IS_SKB(skb);
537 return skb->data-skb->head;
538 }
539
540 int skb_tailroom(struct sk_buff *skb)
541 {
542 IS_SKB(skb);
543 return skb->end-skb->tail;
544 }
545
546 void skb_reserve(struct sk_buff *skb, int len)
547 {
548 IS_SKB(skb);
549 skb->data+=len;
550 skb->tail+=len;
551 if(skb->tail>skb->end)
552 panic("sk_res: over");
553 if(skb->data<skb->head)
554 panic("sk_res: under");
555 IS_SKB(skb);
556 }
557
558 void skb_trim(struct sk_buff *skb, int len)
559 {
560 IS_SKB(skb);
561 if(skb->len>len)
562 {
563 skb->len=len;
564 skb->tail=skb->data+len;
565 }
566 }
567
568
569
570 #endif
571
572
573
574
575
576
577 void kfree_skb(struct sk_buff *skb, int rw)
578 {
579 if (skb == NULL)
580 {
581 printk("kfree_skb: skb = NULL (from %p)\n",
582 __builtin_return_address(0));
583 return;
584 }
585 #if CONFIG_SKB_CHECK
586 IS_SKB(skb);
587 #endif
588 if (skb->lock)
589 {
590 skb->free = 3;
591 net_free_locked++;
592 return;
593 }
594 if (skb->free == 2)
595 printk("Warning: kfree_skb passed an skb that nobody set the free flag on! (from %p)\n",
596 __builtin_return_address(0));
597 if (skb->list)
598 printk("Warning: kfree_skb passed an skb still on a list (from %p).\n",
599 __builtin_return_address(0));
600
601 if(skb->destructor)
602 skb->destructor(skb);
603 if (skb->sk)
604 {
605 struct sock * sk = skb->sk;
606 if(sk->prot!=NULL)
607 {
608 if (rw)
609 sock_rfree(sk, skb);
610 else
611 sock_wfree(sk, skb);
612
613 }
614 else
615 {
616 if (rw)
617 atomic_sub(skb->truesize, &sk->rmem_alloc);
618 else {
619 if(!sk->dead)
620 sk->write_space(sk);
621 atomic_sub(skb->truesize, &sk->wmem_alloc);
622 }
623 kfree_skbmem(skb);
624 }
625 }
626 else
627 kfree_skbmem(skb);
628 }
629
630
631
632
633
634 struct sk_buff *alloc_skb(unsigned int size,int priority)
635 {
636 struct sk_buff *skb;
637 int len=size;
638 unsigned char *bptr;
639
640 if (intr_count && priority!=GFP_ATOMIC)
641 {
642 static int count = 0;
643 if (++count < 5) {
644 printk("alloc_skb called nonatomically from interrupt %p\n",
645 __builtin_return_address(0));
646 priority = GFP_ATOMIC;
647 }
648 }
649
650 size=(size+15)&~15;
651 size+=sizeof(struct sk_buff);
652
653
654
655
656
657 bptr=(unsigned char *)kmalloc(size,priority);
658 if (bptr == NULL)
659 {
660 net_fails++;
661 return NULL;
662 }
663 #ifdef PARANOID_BUGHUNT_MODE
664 if(skb->magic_debug_cookie == SK_GOOD_SKB)
665 printk("Kernel kmalloc handed us an existing skb (%p)\n",skb);
666 #endif
667
668
669
670
671
672
673
674 net_allocs++;
675
676 skb=(struct sk_buff *)(bptr+size)-1;
677
678 skb->count = 1;
679 skb->data_skb = NULL;
680
681 skb->free = 2;
682 skb->lock = 0;
683 skb->pkt_type = PACKET_HOST;
684 skb->pkt_bridged = 0;
685 skb->prev = skb->next = skb->link3 = NULL;
686 skb->list = NULL;
687 skb->sk = NULL;
688 skb->truesize=size;
689 skb->localroute=0;
690 skb->stamp.tv_sec=0;
691 skb->localroute = 0;
692 skb->ip_summed = 0;
693 memset(skb->proto_priv, 0, sizeof(skb->proto_priv));
694 net_skbcount++;
695 #if CONFIG_SKB_CHECK
696 skb->magic_debug_cookie = SK_GOOD_SKB;
697 #endif
698 skb->users = 0;
699
700 skb->head=bptr;
701 skb->data=bptr;
702 skb->tail=bptr;
703 skb->end=bptr+len;
704 skb->len=0;
705 skb->destructor=NULL;
706 return skb;
707 }
708
709
710
711
712
713 static inline void __kfree_skbmem(struct sk_buff *skb)
714 {
715
716 if (atomic_dec_and_test(&skb->count)) {
717 kfree(skb->head);
718 atomic_dec(&net_skbcount);
719 }
720 }
721
722 void kfree_skbmem(struct sk_buff *skb)
723 {
724 void * addr = skb->head;
725
726
727 if (atomic_dec_and_test(&skb->count)) {
728
729 if (skb->data_skb) {
730 addr = skb;
731 __kfree_skbmem(skb->data_skb);
732 }
733 kfree(addr);
734 atomic_dec(&net_skbcount);
735 }
736 }
737
738
739
740
741
742
743 struct sk_buff *skb_clone(struct sk_buff *skb, int priority)
744 {
745 struct sk_buff *n;
746
747 IS_SKB(skb);
748 n = kmalloc(sizeof(*n), priority);
749 if (!n)
750 return NULL;
751 memcpy(n, skb, sizeof(*n));
752 n->count = 1;
753 if (skb->data_skb)
754 skb = skb->data_skb;
755 atomic_inc(&skb->count);
756 atomic_inc(&net_allocs);
757 atomic_inc(&net_skbcount);
758 n->data_skb = skb;
759 n->next = n->prev = n->link3 = NULL;
760 n->list = NULL;
761 n->sk = NULL;
762 n->free = 1;
763 n->tries = 0;
764 n->lock = 0;
765 n->users = 0;
766 return n;
767 }
768
769
770
771
772
773 struct sk_buff *skb_copy(struct sk_buff *skb, int priority)
774 {
775 struct sk_buff *n;
776 unsigned long offset;
777
778
779
780
781
782 IS_SKB(skb);
783
784 n=alloc_skb(skb->truesize-sizeof(struct sk_buff),priority);
785 if(n==NULL)
786 return NULL;
787
788
789
790
791
792 offset=n->head-skb->head;
793
794
795 skb_reserve(n,skb->data-skb->head);
796
797 skb_put(n,skb->len);
798
799 memcpy(n->head,skb->head,skb->end-skb->head);
800 n->link3=NULL;
801 n->list=NULL;
802 n->sk=NULL;
803 n->when=skb->when;
804 n->dev=skb->dev;
805 n->h.raw=skb->h.raw+offset;
806 n->mac.raw=skb->mac.raw+offset;
807 n->ip_hdr=(struct iphdr *)(((char *)skb->ip_hdr)+offset);
808 n->saddr=skb->saddr;
809 n->daddr=skb->daddr;
810 n->raddr=skb->raddr;
811 n->seq=skb->seq;
812 n->end_seq=skb->end_seq;
813 n->ack_seq=skb->ack_seq;
814 n->acked=skb->acked;
815 memcpy(n->proto_priv, skb->proto_priv, sizeof(skb->proto_priv));
816 n->used=skb->used;
817 n->free=1;
818 n->arp=skb->arp;
819 n->tries=0;
820 n->lock=0;
821 n->users=0;
822 n->pkt_type=skb->pkt_type;
823 n->stamp=skb->stamp;
824
825 IS_SKB(n);
826 return n;
827 }
828
829
830
831
832
833 void skb_device_lock(struct sk_buff *skb)
834 {
835 if(skb->lock)
836 printk("double lock on device queue!\n");
837 else
838 net_locked++;
839 skb->lock++;
840 }
841
842 void skb_device_unlock(struct sk_buff *skb)
843 {
844 if(skb->lock==0)
845 printk("double unlock on device queue!\n");
846 skb->lock--;
847 if(skb->lock==0)
848 net_locked--;
849 }
850
851 void dev_kfree_skb(struct sk_buff *skb, int mode)
852 {
853 unsigned long flags;
854
855 save_flags(flags);
856 cli();
857 if(skb->lock)
858 {
859 net_locked--;
860 skb->lock--;
861 }
862 if (!skb->lock && (skb->free == 1 || skb->free == 3))
863 {
864 restore_flags(flags);
865 kfree_skb(skb,mode);
866 }
867 else
868 restore_flags(flags);
869 }
870
871 struct sk_buff *dev_alloc_skb(unsigned int length)
872 {
873 struct sk_buff *skb;
874
875 skb = alloc_skb(length+16, GFP_ATOMIC);
876 if (skb)
877 skb_reserve(skb,16);
878 return skb;
879 }
880
881 int skb_device_locked(struct sk_buff *skb)
882 {
883 return skb->lock? 1 : 0;
884 }