This source file includes following definitions.
- show_net_buffers
- skb_check
- skb_queue_head_init
- skb_queue_head
- __skb_queue_head
- skb_queue_tail
- __skb_queue_tail
- skb_dequeue
- __skb_dequeue
- skb_insert
- __skb_insert
- skb_append
- skb_unlink
- __skb_unlink
- skb_put
- skb_push
- skb_pull
- skb_headroom
- skb_tailroom
- skb_reserve
- skb_trim
- kfree_skb
- alloc_skb
- __kfree_skbmem
- kfree_skbmem
- skb_clone
- skb_copy
- skb_device_lock
- skb_device_unlock
- dev_kfree_skb
- dev_alloc_skb
- skb_device_locked
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35 #include <linux/config.h>
36 #include <linux/types.h>
37 #include <linux/kernel.h>
38 #include <linux/sched.h>
39 #include <asm/segment.h>
40 #include <asm/system.h>
41 #include <linux/mm.h>
42 #include <linux/interrupt.h>
43 #include <linux/in.h>
44 #include <linux/inet.h>
45 #include <linux/netdevice.h>
46 #include <net/ip.h>
47 #include <net/protocol.h>
48 #include <linux/string.h>
49 #include <net/route.h>
50 #include <net/tcp.h>
51 #include <net/udp.h>
52 #include <linux/skbuff.h>
53 #include <net/sock.h>
54
55
56
57
58
59
60 atomic_t net_skbcount = 0;
61 atomic_t net_locked = 0;
62 atomic_t net_allocs = 0;
63 atomic_t net_fails = 0;
64 atomic_t net_free_locked = 0;
65
66 extern atomic_t ip_frag_mem;
67
68 void show_net_buffers(void)
69 {
70 printk("Networking buffers in use : %u\n",net_skbcount);
71 printk("Network buffers locked by drivers : %u\n",net_locked);
72 printk("Total network buffer allocations : %u\n",net_allocs);
73 printk("Total failed network buffer allocs : %u\n",net_fails);
74 printk("Total free while locked events : %u\n",net_free_locked);
75 #ifdef CONFIG_INET
76 printk("IP fragment buffer size : %u\n",ip_frag_mem);
77 #endif
78 }
79
80 #if CONFIG_SKB_CHECK
81
82
83
84
85
86 int skb_check(struct sk_buff *skb, int head, int line, char *file)
87 {
88 if (head) {
89 if (skb->magic_debug_cookie != SK_HEAD_SKB) {
90 printk("File: %s Line %d, found a bad skb-head\n",
91 file,line);
92 return -1;
93 }
94 if (!skb->next || !skb->prev) {
95 printk("skb_check: head without next or prev\n");
96 return -1;
97 }
98 if (skb->next->magic_debug_cookie != SK_HEAD_SKB
99 && skb->next->magic_debug_cookie != SK_GOOD_SKB) {
100 printk("File: %s Line %d, bad next head-skb member\n",
101 file,line);
102 return -1;
103 }
104 if (skb->prev->magic_debug_cookie != SK_HEAD_SKB
105 && skb->prev->magic_debug_cookie != SK_GOOD_SKB) {
106 printk("File: %s Line %d, bad prev head-skb member\n",
107 file,line);
108 return -1;
109 }
110 #if 0
111 {
112 struct sk_buff *skb2 = skb->next;
113 int i = 0;
114 while (skb2 != skb && i < 5) {
115 if (skb_check(skb2, 0, line, file) < 0) {
116 printk("bad queue element in whole queue\n");
117 return -1;
118 }
119 i++;
120 skb2 = skb2->next;
121 }
122 }
123 #endif
124 return 0;
125 }
126 if (skb->next != NULL && skb->next->magic_debug_cookie != SK_HEAD_SKB
127 && skb->next->magic_debug_cookie != SK_GOOD_SKB) {
128 printk("File: %s Line %d, bad next skb member\n",
129 file,line);
130 return -1;
131 }
132 if (skb->prev != NULL && skb->prev->magic_debug_cookie != SK_HEAD_SKB
133 && skb->prev->magic_debug_cookie != SK_GOOD_SKB) {
134 printk("File: %s Line %d, bad prev skb member\n",
135 file,line);
136 return -1;
137 }
138
139
140 if(skb->magic_debug_cookie==SK_FREED_SKB)
141 {
142 printk("File: %s Line %d, found a freed skb lurking in the undergrowth!\n",
143 file,line);
144 printk("skb=%p, real size=%d, free=%d\n",
145 skb,skb->truesize,skb->free);
146 return -1;
147 }
148 if(skb->magic_debug_cookie!=SK_GOOD_SKB)
149 {
150 printk("File: %s Line %d, passed a non skb!\n", file,line);
151 printk("skb=%p, real size=%d, free=%d\n",
152 skb,skb->truesize,skb->free);
153 return -1;
154 }
155 if(skb->head>skb->data)
156 {
157 printk("File: %s Line %d, head > data !\n", file,line);
158 printk("skb=%p, head=%p, data=%p\n",
159 skb,skb->head,skb->data);
160 return -1;
161 }
162 if(skb->tail>skb->end)
163 {
164 printk("File: %s Line %d, tail > end!\n", file,line);
165 printk("skb=%p, tail=%p, end=%p\n",
166 skb,skb->tail,skb->end);
167 return -1;
168 }
169 if(skb->data>skb->tail)
170 {
171 printk("File: %s Line %d, data > tail!\n", file,line);
172 printk("skb=%p, data=%p, tail=%p\n",
173 skb,skb->data,skb->tail);
174 return -1;
175 }
176 if(skb->tail-skb->data!=skb->len)
177 {
178 printk("File: %s Line %d, wrong length\n", file,line);
179 printk("skb=%p, data=%p, end=%p len=%ld\n",
180 skb,skb->data,skb->end,skb->len);
181 return -1;
182 }
183 if((unsigned long) skb->end > (unsigned long) skb)
184 {
185 printk("File: %s Line %d, control overrun\n", file,line);
186 printk("skb=%p, end=%p\n",
187 skb,skb->end);
188 return -1;
189 }
190
191
192 return 0;
193 }
194 #endif
195
196
197 #if CONFIG_SKB_CHECK
198 void skb_queue_head_init(struct sk_buff_head *list)
199 {
200 list->prev = (struct sk_buff *)list;
201 list->next = (struct sk_buff *)list;
202 list->qlen = 0;
203 list->magic_debug_cookie = SK_HEAD_SKB;
204 }
205
206
207
208
209
210 void skb_queue_head(struct sk_buff_head *list_,struct sk_buff *newsk)
211 {
212 unsigned long flags;
213 struct sk_buff *list = (struct sk_buff *)list_;
214
215 save_flags(flags);
216 cli();
217
218 IS_SKB(newsk);
219 IS_SKB_HEAD(list);
220 if (newsk->next || newsk->prev)
221 printk("Suspicious queue head: sk_buff on list!\n");
222
223 newsk->next = list->next;
224 newsk->prev = list;
225
226 newsk->next->prev = newsk;
227 newsk->prev->next = newsk;
228 newsk->list = list_;
229 list_->qlen++;
230
231 restore_flags(flags);
232 }
233
234 void __skb_queue_head(struct sk_buff_head *list_,struct sk_buff *newsk)
235 {
236 struct sk_buff *list = (struct sk_buff *)list_;
237
238
239 IS_SKB(newsk);
240 IS_SKB_HEAD(list);
241 if (newsk->next || newsk->prev)
242 printk("Suspicious queue head: sk_buff on list!\n");
243
244 newsk->next = list->next;
245 newsk->prev = list;
246
247 newsk->next->prev = newsk;
248 newsk->prev->next = newsk;
249 newsk->list = list_;
250 list_->qlen++;
251
252 }
253
254
255
256
257 void skb_queue_tail(struct sk_buff_head *list_, struct sk_buff *newsk)
258 {
259 unsigned long flags;
260 struct sk_buff *list = (struct sk_buff *)list_;
261
262 save_flags(flags);
263 cli();
264
265 if (newsk->next || newsk->prev)
266 printk("Suspicious queue tail: sk_buff on list!\n");
267 IS_SKB(newsk);
268 IS_SKB_HEAD(list);
269
270 newsk->next = list;
271 newsk->prev = list->prev;
272
273 newsk->next->prev = newsk;
274 newsk->prev->next = newsk;
275
276 newsk->list = list_;
277 list_->qlen++;
278
279 restore_flags(flags);
280 }
281
282 void __skb_queue_tail(struct sk_buff_head *list_, struct sk_buff *newsk)
283 {
284 unsigned long flags;
285 struct sk_buff *list = (struct sk_buff *)list_;
286
287 if (newsk->next || newsk->prev)
288 printk("Suspicious queue tail: sk_buff on list!\n");
289 IS_SKB(newsk);
290 IS_SKB_HEAD(list);
291
292 newsk->next = list;
293 newsk->prev = list->prev;
294
295 newsk->next->prev = newsk;
296 newsk->prev->next = newsk;
297
298 newsk->list = list_;
299 list_->qlen++;
300 }
301
302
303
304
305
306
307 struct sk_buff *skb_dequeue(struct sk_buff_head *list_)
308 {
309 long flags;
310 struct sk_buff *result;
311 struct sk_buff *list = (struct sk_buff *)list_;
312
313 save_flags(flags);
314 cli();
315
316 IS_SKB_HEAD(list);
317
318 result = list->next;
319 if (result == list) {
320 restore_flags(flags);
321 return NULL;
322 }
323
324 result->next->prev = list;
325 list->next = result->next;
326
327 result->next = NULL;
328 result->prev = NULL;
329 list_->qlen--;
330 result->list = NULL;
331
332 restore_flags(flags);
333
334 IS_SKB(result);
335 return result;
336 }
337
338 struct sk_buff *__skb_dequeue(struct sk_buff_head *list_)
339 {
340 struct sk_buff *result;
341 struct sk_buff *list = (struct sk_buff *)list_;
342
343 IS_SKB_HEAD(list);
344
345 result = list->next;
346 if (result == list) {
347 return NULL;
348 }
349
350 result->next->prev = list;
351 list->next = result->next;
352
353 result->next = NULL;
354 result->prev = NULL;
355 list_->qlen--;
356 result->list = NULL;
357
358 IS_SKB(result);
359 return result;
360 }
361
362
363
364
365 void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
366 {
367 unsigned long flags;
368
369 IS_SKB(old);
370 IS_SKB(newsk);
371
372 if(!old->next || !old->prev)
373 printk("insert before unlisted item!\n");
374 if(newsk->next || newsk->prev)
375 printk("inserted item is already on a list.\n");
376
377 save_flags(flags);
378 cli();
379 newsk->next = old;
380 newsk->prev = old->prev;
381 old->prev = newsk;
382 newsk->prev->next = newsk;
383 newsk->list = old->list;
384 newsk->list->qlen++;
385
386 restore_flags(flags);
387 }
388
389
390
391
392
393 void __skb_insert(struct sk_buff *newsk,
394 struct sk_buff * prev, struct sk_buff *next,
395 struct sk_buff_head * list)
396 {
397 IS_SKB(prev);
398 IS_SKB(newsk);
399 IS_SKB(next);
400
401 if(!prev->next || !prev->prev)
402 printk("insert after unlisted item!\n");
403 if(!next->next || !next->prev)
404 printk("insert before unlisted item!\n");
405 if(newsk->next || newsk->prev)
406 printk("inserted item is already on a list.\n");
407
408 newsk->next = next;
409 newsk->prev = prev;
410 next->prev = newsk;
411 prev->next = newsk;
412 newsk->list = list;
413 list->qlen++;
414
415 }
416
417
418
419
420 void skb_append(struct sk_buff *old, struct sk_buff *newsk)
421 {
422 unsigned long flags;
423
424 IS_SKB(old);
425 IS_SKB(newsk);
426
427 if(!old->next || !old->prev)
428 printk("append before unlisted item!\n");
429 if(newsk->next || newsk->prev)
430 printk("append item is already on a list.\n");
431
432 save_flags(flags);
433 cli();
434
435 newsk->prev = old;
436 newsk->next = old->next;
437 newsk->next->prev = newsk;
438 old->next = newsk;
439 newsk->list = old->list;
440 newsk->list->qlen++;
441
442 restore_flags(flags);
443 }
444
445
446
447
448
449
450
451 void skb_unlink(struct sk_buff *skb)
452 {
453 unsigned long flags;
454
455 save_flags(flags);
456 cli();
457
458 IS_SKB(skb);
459
460 if(skb->list)
461 {
462 skb->list->qlen--;
463 skb->next->prev = skb->prev;
464 skb->prev->next = skb->next;
465 skb->next = NULL;
466 skb->prev = NULL;
467 skb->list = NULL;
468 }
469 #ifdef PARANOID_BUGHUNT_MODE
470 else
471 printk("skb_unlink: not a linked element\n");
472 #endif
473 restore_flags(flags);
474 }
475
476 void __skb_unlink(struct sk_buff *skb)
477 {
478 IS_SKB(skb);
479
480 if(skb->list)
481 {
482 skb->list->qlen--;
483 skb->next->prev = skb->prev;
484 skb->prev->next = skb->next;
485 skb->next = NULL;
486 skb->prev = NULL;
487 skb->list = NULL;
488 }
489 #ifdef PARANOID_BUGHUNT_MODE
490 else
491 printk("skb_unlink: not a linked element\n");
492 #endif
493 }
494
495
496
497
498
499 unsigned char *skb_put(struct sk_buff *skb, int len)
500 {
501 unsigned char *tmp=skb->tail;
502 IS_SKB(skb);
503 skb->tail+=len;
504 skb->len+=len;
505 IS_SKB(skb);
506 if(skb->tail>skb->end)
507 panic("skput:over: %p:%d", __builtin_return_address(0),len);
508 return tmp;
509 }
510
511 unsigned char *skb_push(struct sk_buff *skb, int len)
512 {
513 IS_SKB(skb);
514 skb->data-=len;
515 skb->len+=len;
516 IS_SKB(skb);
517 if(skb->data<skb->head)
518 panic("skpush:under: %p:%d", __builtin_return_address(0),len);
519 return skb->data;
520 }
521
522 unsigned char * skb_pull(struct sk_buff *skb, int len)
523 {
524 IS_SKB(skb);
525 if(len>skb->len)
526 return 0;
527 skb->data+=len;
528 skb->len-=len;
529 return skb->data;
530 }
531
532 int skb_headroom(struct sk_buff *skb)
533 {
534 IS_SKB(skb);
535 return skb->data-skb->head;
536 }
537
538 int skb_tailroom(struct sk_buff *skb)
539 {
540 IS_SKB(skb);
541 return skb->end-skb->tail;
542 }
543
544 void skb_reserve(struct sk_buff *skb, int len)
545 {
546 IS_SKB(skb);
547 skb->data+=len;
548 skb->tail+=len;
549 if(skb->tail>skb->end)
550 panic("sk_res: over");
551 if(skb->data<skb->head)
552 panic("sk_res: under");
553 IS_SKB(skb);
554 }
555
556 void skb_trim(struct sk_buff *skb, int len)
557 {
558 IS_SKB(skb);
559 if(skb->len>len)
560 {
561 skb->len=len;
562 skb->tail=skb->data+len;
563 }
564 }
565
566
567
568 #endif
569
570
571
572
573
574
575 void kfree_skb(struct sk_buff *skb, int rw)
576 {
577 if (skb == NULL)
578 {
579 printk("kfree_skb: skb = NULL (from %p)\n",
580 __builtin_return_address(0));
581 return;
582 }
583 #if CONFIG_SKB_CHECK
584 IS_SKB(skb);
585 #endif
586 if (skb->lock)
587 {
588 skb->free = 3;
589 net_free_locked++;
590 return;
591 }
592 if (skb->free == 2)
593 printk("Warning: kfree_skb passed an skb that nobody set the free flag on! (from %p)\n",
594 __builtin_return_address(0));
595 if (skb->list)
596 printk("Warning: kfree_skb passed an skb still on a list (from %p).\n",
597 __builtin_return_address(0));
598
599 if(skb->destructor)
600 skb->destructor(skb);
601 if (skb->sk)
602 {
603 struct sock * sk = skb->sk;
604 if(sk->prot!=NULL)
605 {
606 if (rw)
607 sock_rfree(sk, skb);
608 else
609 sock_wfree(sk, skb);
610
611 }
612 else
613 {
614 if (rw)
615 atomic_sub(skb->truesize, &sk->rmem_alloc);
616 else {
617 if(!sk->dead)
618 sk->write_space(sk);
619 atomic_sub(skb->truesize, &sk->wmem_alloc);
620 }
621 kfree_skbmem(skb);
622 }
623 }
624 else
625 kfree_skbmem(skb);
626 }
627
628
629
630
631
632 struct sk_buff *alloc_skb(unsigned int size,int priority)
633 {
634 struct sk_buff *skb;
635 int len=size;
636 unsigned char *bptr;
637
638 if (intr_count && priority!=GFP_ATOMIC)
639 {
640 static int count = 0;
641 if (++count < 5) {
642 printk("alloc_skb called nonatomically from interrupt %p\n",
643 __builtin_return_address(0));
644 priority = GFP_ATOMIC;
645 }
646 }
647
648 size=(size+15)&~15;
649 size+=sizeof(struct sk_buff);
650
651
652
653
654
655 bptr=(unsigned char *)kmalloc(size,priority);
656 if (bptr == NULL)
657 {
658 net_fails++;
659 return NULL;
660 }
661 #ifdef PARANOID_BUGHUNT_MODE
662 if(skb->magic_debug_cookie == SK_GOOD_SKB)
663 printk("Kernel kmalloc handed us an existing skb (%p)\n",skb);
664 #endif
665
666
667
668
669
670
671
672 net_allocs++;
673
674 skb=(struct sk_buff *)(bptr+size)-1;
675
676 skb->count = 1;
677 skb->data_skb = NULL;
678
679 skb->free = 2;
680 skb->lock = 0;
681 skb->pkt_type = PACKET_HOST;
682 skb->prev = skb->next = skb->link3 = NULL;
683 skb->list = NULL;
684 skb->sk = NULL;
685 skb->truesize=size;
686 skb->localroute=0;
687 skb->stamp.tv_sec=0;
688 skb->localroute = 0;
689 skb->ip_summed = 0;
690 memset(skb->proto_priv, 0, sizeof(skb->proto_priv));
691 net_skbcount++;
692 #if CONFIG_SKB_CHECK
693 skb->magic_debug_cookie = SK_GOOD_SKB;
694 #endif
695 skb->users = 0;
696
697 skb->head=bptr;
698 skb->data=bptr;
699 skb->tail=bptr;
700 skb->end=bptr+len;
701 skb->len=0;
702 skb->destructor=NULL;
703 return skb;
704 }
705
706
707
708
709
710 static inline void __kfree_skbmem(struct sk_buff *skb)
711 {
712
713 if (atomic_dec_and_test(&skb->count)) {
714 kfree(skb->head);
715 atomic_dec(&net_skbcount);
716 }
717 }
718
719 void kfree_skbmem(struct sk_buff *skb)
720 {
721 void * addr = skb->head;
722
723
724 if (atomic_dec_and_test(&skb->count)) {
725
726 if (skb->data_skb) {
727 addr = skb;
728 __kfree_skbmem(skb->data_skb);
729 }
730 kfree(addr);
731 atomic_dec(&net_skbcount);
732 }
733 }
734
735
736
737
738
739
740 struct sk_buff *skb_clone(struct sk_buff *skb, int priority)
741 {
742 struct sk_buff *n;
743
744 IS_SKB(skb);
745 n = kmalloc(sizeof(*n), priority);
746 if (!n)
747 return NULL;
748 memcpy(n, skb, sizeof(*n));
749 n->count = 1;
750 if (skb->data_skb)
751 skb = skb->data_skb;
752 atomic_inc(&skb->count);
753 atomic_inc(&net_allocs);
754 atomic_inc(&net_skbcount);
755 n->data_skb = skb;
756 n->next = n->prev = n->link3 = NULL;
757 n->list = NULL;
758 n->sk = NULL;
759 n->free = 1;
760 n->tries = 0;
761 n->lock = 0;
762 n->users = 0;
763 return n;
764 }
765
766
767
768
769
770 struct sk_buff *skb_copy(struct sk_buff *skb, int priority)
771 {
772 struct sk_buff *n;
773 unsigned long offset;
774
775
776
777
778
779 IS_SKB(skb);
780
781 n=alloc_skb(skb->truesize-sizeof(struct sk_buff),priority);
782 if(n==NULL)
783 return NULL;
784
785
786
787
788
789 offset=n->head-skb->head;
790
791
792 skb_reserve(n,skb->data-skb->head);
793
794 skb_put(n,skb->len);
795
796 memcpy(n->head,skb->head,skb->end-skb->head);
797 n->link3=NULL;
798 n->list=NULL;
799 n->sk=NULL;
800 n->when=skb->when;
801 n->dev=skb->dev;
802 n->h.raw=skb->h.raw+offset;
803 n->mac.raw=skb->mac.raw+offset;
804 n->ip_hdr=(struct iphdr *)(((char *)skb->ip_hdr)+offset);
805 n->saddr=skb->saddr;
806 n->daddr=skb->daddr;
807 n->raddr=skb->raddr;
808 n->seq=skb->seq;
809 n->end_seq=skb->end_seq;
810 n->ack_seq=skb->ack_seq;
811 n->acked=skb->acked;
812 memcpy(n->proto_priv, skb->proto_priv, sizeof(skb->proto_priv));
813 n->used=skb->used;
814 n->free=1;
815 n->arp=skb->arp;
816 n->tries=0;
817 n->lock=0;
818 n->users=0;
819 n->pkt_type=skb->pkt_type;
820 n->stamp=skb->stamp;
821
822 IS_SKB(n);
823 return n;
824 }
825
826
827
828
829
830 void skb_device_lock(struct sk_buff *skb)
831 {
832 if(skb->lock)
833 printk("double lock on device queue!\n");
834 else
835 net_locked++;
836 skb->lock++;
837 }
838
839 void skb_device_unlock(struct sk_buff *skb)
840 {
841 if(skb->lock==0)
842 printk("double unlock on device queue!\n");
843 skb->lock--;
844 if(skb->lock==0)
845 net_locked--;
846 }
847
848 void dev_kfree_skb(struct sk_buff *skb, int mode)
849 {
850 unsigned long flags;
851
852 save_flags(flags);
853 cli();
854 if(skb->lock)
855 {
856 net_locked--;
857 skb->lock--;
858 }
859 if (!skb->lock && (skb->free == 1 || skb->free == 3))
860 {
861 restore_flags(flags);
862 kfree_skb(skb,mode);
863 }
864 else
865 restore_flags(flags);
866 }
867
868 struct sk_buff *dev_alloc_skb(unsigned int length)
869 {
870 struct sk_buff *skb;
871
872 skb = alloc_skb(length+16, GFP_ATOMIC);
873 if (skb)
874 skb_reserve(skb,16);
875 return skb;
876 }
877
878 int skb_device_locked(struct sk_buff *skb)
879 {
880 return skb->lock? 1 : 0;
881 }