root/net/core/skbuff.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. show_net_buffers
  2. skb_check
  3. skb_queue_head_init
  4. skb_queue_head
  5. __skb_queue_head
  6. skb_queue_tail
  7. __skb_queue_tail
  8. skb_dequeue
  9. __skb_dequeue
  10. skb_insert
  11. __skb_insert
  12. skb_append
  13. skb_unlink
  14. __skb_unlink
  15. skb_put
  16. skb_push
  17. skb_pull
  18. skb_headroom
  19. skb_tailroom
  20. skb_reserve
  21. skb_trim
  22. kfree_skb
  23. alloc_skb
  24. __kfree_skbmem
  25. kfree_skbmem
  26. skb_clone
  27. skb_copy
  28. skb_device_lock
  29. skb_device_unlock
  30. dev_kfree_skb
  31. dev_alloc_skb
  32. skb_device_locked

   1 /*
   2  *      Routines having to do with the 'struct sk_buff' memory handlers.
   3  *
   4  *      Authors:        Alan Cox <iiitac@pyr.swan.ac.uk>
   5  *                      Florian La Roche <rzsfl@rz.uni-sb.de>
   6  *
   7  *      Fixes:  
   8  *              Alan Cox        :       Fixed the worst of the load balancer bugs.
   9  *              Dave Platt      :       Interrupt stacking fix.
  10  *      Richard Kooijman        :       Timestamp fixes.
  11  *              Alan Cox        :       Changed buffer format.
  12  *              Alan Cox        :       destructor hook for AF_UNIX etc.
  13  *              Linus Torvalds  :       Better skb_clone.
  14  *              Alan Cox        :       Added skb_copy.
  15  *              Alan Cox        :       Added all the changed routines Linus
  16  *                                      only put in the headers
  17  *              Ray VanTassle   :       Fixed --skb->lock in free
  18  *
  19  *      TO FIX:
  20  *              The __skb_ routines ought to check interrupts are disabled
  21  *      when called, and bitch like crazy if not. Unfortunately I don't think
  22  *      we currently have a portable way to check if interrupts are off - 
  23  *      Linus ???
  24  *
  25  *      This program is free software; you can redistribute it and/or
  26  *      modify it under the terms of the GNU General Public License
  27  *      as published by the Free Software Foundation; either version
  28  *      2 of the License, or (at your option) any later version.
  29  */
  30 
  31 /*
  32  *      The functions in this file will not compile correctly with gcc 2.4.x
  33  */
  34 
  35 #include <linux/config.h>
  36 #include <linux/types.h>
  37 #include <linux/kernel.h>
  38 #include <linux/sched.h>
  39 #include <asm/segment.h>
  40 #include <asm/system.h>
  41 #include <linux/mm.h>
  42 #include <linux/interrupt.h>
  43 #include <linux/in.h>
  44 #include <linux/inet.h>
  45 #include <linux/netdevice.h>
  46 #include <net/ip.h>
  47 #include <net/protocol.h>
  48 #include <linux/string.h>
  49 #include <net/route.h>
  50 #include <net/tcp.h>
  51 #include <net/udp.h>
  52 #include <linux/skbuff.h>
  53 #include <net/sock.h>
  54 
  55 
  56 /*
  57  *      Resource tracking variables
  58  */
  59 
  60 atomic_t net_skbcount = 0;
  61 atomic_t net_locked = 0;
  62 atomic_t net_allocs = 0;
  63 atomic_t net_fails  = 0;
  64 atomic_t net_free_locked = 0;
  65 
  66 extern atomic_t ip_frag_mem;
  67 
  68 void show_net_buffers(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  69 {
  70         printk("Networking buffers in use          : %u\n",net_skbcount);
  71         printk("Network buffers locked by drivers  : %u\n",net_locked);
  72         printk("Total network buffer allocations   : %u\n",net_allocs);
  73         printk("Total failed network buffer allocs : %u\n",net_fails);
  74         printk("Total free while locked events     : %u\n",net_free_locked);
  75 #ifdef CONFIG_INET
  76         printk("IP fragment buffer size            : %u\n",ip_frag_mem);
  77 #endif  
  78 }
  79 
  80 #if CONFIG_SKB_CHECK
  81 
  82 /*
  83  *      Debugging paranoia. Can go later when this crud stack works
  84  */
  85 
  86 int skb_check(struct sk_buff *skb, int head, int line, char *file)
     /* [previous][next][first][last][top][bottom][index][help] */
  87 {
  88         if (head) {
  89                 if (skb->magic_debug_cookie != SK_HEAD_SKB) {
  90                         printk("File: %s Line %d, found a bad skb-head\n",
  91                                 file,line);
  92                         return -1;
  93                 }
  94                 if (!skb->next || !skb->prev) {
  95                         printk("skb_check: head without next or prev\n");
  96                         return -1;
  97                 }
  98                 if (skb->next->magic_debug_cookie != SK_HEAD_SKB
  99                         && skb->next->magic_debug_cookie != SK_GOOD_SKB) {
 100                         printk("File: %s Line %d, bad next head-skb member\n",
 101                                 file,line);
 102                         return -1;
 103                 }
 104                 if (skb->prev->magic_debug_cookie != SK_HEAD_SKB
 105                         && skb->prev->magic_debug_cookie != SK_GOOD_SKB) {
 106                         printk("File: %s Line %d, bad prev head-skb member\n",
 107                                 file,line);
 108                         return -1;
 109                 }
 110 #if 0
 111                 {
 112                 struct sk_buff *skb2 = skb->next;
 113                 int i = 0;
 114                 while (skb2 != skb && i < 5) {
 115                         if (skb_check(skb2, 0, line, file) < 0) {
 116                                 printk("bad queue element in whole queue\n");
 117                                 return -1;
 118                         }
 119                         i++;
 120                         skb2 = skb2->next;
 121                 }
 122                 }
 123 #endif
 124                 return 0;
 125         }
 126         if (skb->next != NULL && skb->next->magic_debug_cookie != SK_HEAD_SKB
 127                 && skb->next->magic_debug_cookie != SK_GOOD_SKB) {
 128                 printk("File: %s Line %d, bad next skb member\n",
 129                         file,line);
 130                 return -1;
 131         }
 132         if (skb->prev != NULL && skb->prev->magic_debug_cookie != SK_HEAD_SKB
 133                 && skb->prev->magic_debug_cookie != SK_GOOD_SKB) {
 134                 printk("File: %s Line %d, bad prev skb member\n",
 135                         file,line);
 136                 return -1;
 137         }
 138 
 139 
 140         if(skb->magic_debug_cookie==SK_FREED_SKB)
 141         {
 142                 printk("File: %s Line %d, found a freed skb lurking in the undergrowth!\n",
 143                         file,line);
 144                 printk("skb=%p, real size=%d, free=%d\n",
 145                         skb,skb->truesize,skb->free);
 146                 return -1;
 147         }
 148         if(skb->magic_debug_cookie!=SK_GOOD_SKB)
 149         {
 150                 printk("File: %s Line %d, passed a non skb!\n", file,line);
 151                 printk("skb=%p, real size=%d, free=%d\n",
 152                         skb,skb->truesize,skb->free);
 153                 return -1;
 154         }
 155         if(skb->head>skb->data)
 156         {
 157                 printk("File: %s Line %d, head > data !\n", file,line);
 158                 printk("skb=%p, head=%p, data=%p\n",
 159                         skb,skb->head,skb->data);
 160                 return -1;
 161         }
 162         if(skb->tail>skb->end)
 163         {
 164                 printk("File: %s Line %d, tail > end!\n", file,line);
 165                 printk("skb=%p, tail=%p, end=%p\n",
 166                         skb,skb->tail,skb->end);
 167                 return -1;
 168         }
 169         if(skb->data>skb->tail)
 170         {
 171                 printk("File: %s Line %d, data > tail!\n", file,line);
 172                 printk("skb=%p, data=%p, tail=%p\n",
 173                         skb,skb->data,skb->tail);
 174                 return -1;
 175         }
 176         if(skb->tail-skb->data!=skb->len)
 177         {
 178                 printk("File: %s Line %d, wrong length\n", file,line);
 179                 printk("skb=%p, data=%p, end=%p len=%ld\n",
 180                         skb,skb->data,skb->end,skb->len);
 181                 return -1;
 182         }
 183         if((unsigned long) skb->end > (unsigned long) skb)
 184         {
 185                 printk("File: %s Line %d, control overrun\n", file,line);
 186                 printk("skb=%p, end=%p\n",
 187                         skb,skb->end);
 188                 return -1;
 189         }
 190 
 191         /* Guess it might be acceptable then */
 192         return 0;
 193 }
 194 #endif
 195 
 196 
 197 #if CONFIG_SKB_CHECK
 198 void skb_queue_head_init(struct sk_buff_head *list)
     /* [previous][next][first][last][top][bottom][index][help] */
 199 {
 200         list->prev = (struct sk_buff *)list;
 201         list->next = (struct sk_buff *)list;
 202         list->qlen = 0;
 203         list->magic_debug_cookie = SK_HEAD_SKB;
 204 }
 205 
 206 
 207 /*
 208  *      Insert an sk_buff at the start of a list.
 209  */
 210 void skb_queue_head(struct sk_buff_head *list_,struct sk_buff *newsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 211 {
 212         unsigned long flags;
 213         struct sk_buff *list = (struct sk_buff *)list_;
 214 
 215         save_flags(flags);
 216         cli();
 217 
 218         IS_SKB(newsk);
 219         IS_SKB_HEAD(list);
 220         if (newsk->next || newsk->prev)
 221                 printk("Suspicious queue head: sk_buff on list!\n");
 222 
 223         newsk->next = list->next;
 224         newsk->prev = list;
 225 
 226         newsk->next->prev = newsk;
 227         newsk->prev->next = newsk;
 228         newsk->list = list_;
 229         list_->qlen++;
 230 
 231         restore_flags(flags);
 232 }
 233 
 234 void __skb_queue_head(struct sk_buff_head *list_,struct sk_buff *newsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 235 {
 236         struct sk_buff *list = (struct sk_buff *)list_;
 237 
 238 
 239         IS_SKB(newsk);
 240         IS_SKB_HEAD(list);
 241         if (newsk->next || newsk->prev)
 242                 printk("Suspicious queue head: sk_buff on list!\n");
 243 
 244         newsk->next = list->next;
 245         newsk->prev = list;
 246 
 247         newsk->next->prev = newsk;
 248         newsk->prev->next = newsk;
 249         newsk->list = list_;
 250         list_->qlen++;
 251 
 252 }
 253 
 254 /*
 255  *      Insert an sk_buff at the end of a list.
 256  */
 257 void skb_queue_tail(struct sk_buff_head *list_, struct sk_buff *newsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 258 {
 259         unsigned long flags;
 260         struct sk_buff *list = (struct sk_buff *)list_;
 261 
 262         save_flags(flags);
 263         cli();
 264 
 265         if (newsk->next || newsk->prev)
 266                 printk("Suspicious queue tail: sk_buff on list!\n");
 267         IS_SKB(newsk);
 268         IS_SKB_HEAD(list);
 269 
 270         newsk->next = list;
 271         newsk->prev = list->prev;
 272 
 273         newsk->next->prev = newsk;
 274         newsk->prev->next = newsk;
 275         
 276         newsk->list = list_;
 277         list_->qlen++;
 278 
 279         restore_flags(flags);
 280 }
 281 
 282 void __skb_queue_tail(struct sk_buff_head *list_, struct sk_buff *newsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 283 {
 284         unsigned long flags;
 285         struct sk_buff *list = (struct sk_buff *)list_;
 286 
 287         if (newsk->next || newsk->prev)
 288                 printk("Suspicious queue tail: sk_buff on list!\n");
 289         IS_SKB(newsk);
 290         IS_SKB_HEAD(list);
 291 
 292         newsk->next = list;
 293         newsk->prev = list->prev;
 294 
 295         newsk->next->prev = newsk;
 296         newsk->prev->next = newsk;
 297         
 298         newsk->list = list_;
 299         list_->qlen++;
 300 }
 301 
 302 /*
 303  *      Remove an sk_buff from a list. This routine is also interrupt safe
 304  *      so you can grab read and free buffers as another process adds them.
 305  */
 306 
 307 struct sk_buff *skb_dequeue(struct sk_buff_head *list_)
     /* [previous][next][first][last][top][bottom][index][help] */
 308 {
 309         long flags;
 310         struct sk_buff *result;
 311         struct sk_buff *list = (struct sk_buff *)list_;
 312 
 313         save_flags(flags);
 314         cli();
 315 
 316         IS_SKB_HEAD(list);
 317 
 318         result = list->next;
 319         if (result == list) {
 320                 restore_flags(flags);
 321                 return NULL;
 322         }
 323 
 324         result->next->prev = list;
 325         list->next = result->next;
 326 
 327         result->next = NULL;
 328         result->prev = NULL;
 329         list_->qlen--;
 330         result->list = NULL;
 331         
 332         restore_flags(flags);
 333 
 334         IS_SKB(result);
 335         return result;
 336 }
 337 
 338 struct sk_buff *__skb_dequeue(struct sk_buff_head *list_)
     /* [previous][next][first][last][top][bottom][index][help] */
 339 {
 340         struct sk_buff *result;
 341         struct sk_buff *list = (struct sk_buff *)list_;
 342 
 343         IS_SKB_HEAD(list);
 344 
 345         result = list->next;
 346         if (result == list) {
 347                 return NULL;
 348         }
 349 
 350         result->next->prev = list;
 351         list->next = result->next;
 352 
 353         result->next = NULL;
 354         result->prev = NULL;
 355         list_->qlen--;
 356         result->list = NULL;
 357         
 358         IS_SKB(result);
 359         return result;
 360 }
 361 
 362 /*
 363  *      Insert a packet before another one in a list.
 364  */
 365 void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 366 {
 367         unsigned long flags;
 368 
 369         IS_SKB(old);
 370         IS_SKB(newsk);
 371 
 372         if(!old->next || !old->prev)
 373                 printk("insert before unlisted item!\n");
 374         if(newsk->next || newsk->prev)
 375                 printk("inserted item is already on a list.\n");
 376 
 377         save_flags(flags);
 378         cli();
 379         newsk->next = old;
 380         newsk->prev = old->prev;
 381         old->prev = newsk;
 382         newsk->prev->next = newsk;
 383         newsk->list = old->list;
 384         newsk->list->qlen++;
 385 
 386         restore_flags(flags);
 387 }
 388 
 389 /*
 390  *      Insert a packet before another one in a list.
 391  */
 392 
 393 void __skb_insert(struct sk_buff *newsk,
     /* [previous][next][first][last][top][bottom][index][help] */
 394         struct sk_buff * prev, struct sk_buff *next,
 395         struct sk_buff_head * list)
 396 {
 397         IS_SKB(prev);
 398         IS_SKB(newsk);
 399         IS_SKB(next);
 400 
 401         if(!prev->next || !prev->prev)
 402                 printk("insert after unlisted item!\n");
 403         if(!next->next || !next->prev)
 404                 printk("insert before unlisted item!\n");
 405         if(newsk->next || newsk->prev)
 406                 printk("inserted item is already on a list.\n");
 407 
 408         newsk->next = next;
 409         newsk->prev = prev;
 410         next->prev = newsk;
 411         prev->next = newsk;
 412         newsk->list = list;
 413         list->qlen++;
 414 
 415 }
 416 
 417 /*
 418  *      Place a packet after a given packet in a list.
 419  */
 420 void skb_append(struct sk_buff *old, struct sk_buff *newsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 421 {
 422         unsigned long flags;
 423 
 424         IS_SKB(old);
 425         IS_SKB(newsk);
 426 
 427         if(!old->next || !old->prev)
 428                 printk("append before unlisted item!\n");
 429         if(newsk->next || newsk->prev)
 430                 printk("append item is already on a list.\n");
 431 
 432         save_flags(flags);
 433         cli();
 434 
 435         newsk->prev = old;
 436         newsk->next = old->next;
 437         newsk->next->prev = newsk;
 438         old->next = newsk;
 439         newsk->list = old->list;
 440         newsk->list->qlen++;
 441 
 442         restore_flags(flags);
 443 }
 444 
 445 /*
 446  *      Remove an sk_buff from its list. Works even without knowing the list it
 447  *      is sitting on, which can be handy at times. It also means that THE LIST
 448  *      MUST EXIST when you unlink. Thus a list must have its contents unlinked
 449  *      _FIRST_.
 450  */
 451 void skb_unlink(struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
 452 {
 453         unsigned long flags;
 454 
 455         save_flags(flags);
 456         cli();
 457 
 458         IS_SKB(skb);
 459 
 460         if(skb->list)
 461         {
 462                 skb->list->qlen--;
 463                 skb->next->prev = skb->prev;
 464                 skb->prev->next = skb->next;
 465                 skb->next = NULL;
 466                 skb->prev = NULL;
 467                 skb->list = NULL;
 468         }
 469 #ifdef PARANOID_BUGHUNT_MODE    /* This is legal but we sometimes want to watch it */
 470         else
 471                 printk("skb_unlink: not a linked element\n");
 472 #endif
 473         restore_flags(flags);
 474 }
 475 
 476 void __skb_unlink(struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
 477 {
 478         IS_SKB(skb);
 479 
 480         if(skb->list)
 481         {
 482                 skb->list->qlen--;
 483                 skb->next->prev = skb->prev;
 484                 skb->prev->next = skb->next;
 485                 skb->next = NULL;
 486                 skb->prev = NULL;
 487                 skb->list = NULL;
 488         }
 489 #ifdef PARANOID_BUGHUNT_MODE    /* This is legal but we sometimes want to watch it */
 490         else
 491                 printk("skb_unlink: not a linked element\n");
 492 #endif
 493 }
 494 
 495 /*
 496  *      Add data to an sk_buff
 497  */
 498  
 499 unsigned char *skb_put(struct sk_buff *skb, int len)
     /* [previous][next][first][last][top][bottom][index][help] */
 500 {
 501         unsigned char *tmp=skb->tail;
 502         IS_SKB(skb);
 503         skb->tail+=len;
 504         skb->len+=len;
 505         IS_SKB(skb);
 506         if(skb->tail>skb->end)
 507                 panic("skput:over: %p:%d", __builtin_return_address(0),len);
 508         return tmp;
 509 }
 510 
 511 unsigned char *skb_push(struct sk_buff *skb, int len)
     /* [previous][next][first][last][top][bottom][index][help] */
 512 {
 513         IS_SKB(skb);
 514         skb->data-=len;
 515         skb->len+=len;
 516         IS_SKB(skb);
 517         if(skb->data<skb->head)
 518                 panic("skpush:under: %p:%d", __builtin_return_address(0),len);
 519         return skb->data;
 520 }
 521 
 522 unsigned char * skb_pull(struct sk_buff *skb, int len)
     /* [previous][next][first][last][top][bottom][index][help] */
 523 {
 524         IS_SKB(skb);
 525         if(len>skb->len)
 526                 return 0;
 527         skb->data+=len;
 528         skb->len-=len;
 529         return skb->data;
 530 }
 531 
 532 int skb_headroom(struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
 533 {
 534         IS_SKB(skb);
 535         return skb->data-skb->head;
 536 }
 537 
 538 int skb_tailroom(struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
 539 {
 540         IS_SKB(skb);
 541         return skb->end-skb->tail;
 542 }
 543 
 544 void skb_reserve(struct sk_buff *skb, int len)
     /* [previous][next][first][last][top][bottom][index][help] */
 545 {
 546         IS_SKB(skb);
 547         skb->data+=len;
 548         skb->tail+=len;
 549         if(skb->tail>skb->end)
 550                 panic("sk_res: over");
 551         if(skb->data<skb->head)
 552                 panic("sk_res: under");
 553         IS_SKB(skb);
 554 }
 555 
 556 void skb_trim(struct sk_buff *skb, int len)
     /* [previous][next][first][last][top][bottom][index][help] */
 557 {
 558         IS_SKB(skb);
 559         if(skb->len>len)
 560         {
 561                 skb->len=len;
 562                 skb->tail=skb->data+len;
 563         }
 564 }
 565 
 566 
 567 
 568 #endif
 569 
 570 /*
 571  *      Free an sk_buff. This still knows about things it should
 572  *      not need to like protocols and sockets.
 573  */
 574 
 575 void kfree_skb(struct sk_buff *skb, int rw)
     /* [previous][next][first][last][top][bottom][index][help] */
 576 {
 577         if (skb == NULL)
 578         {
 579                 printk("kfree_skb: skb = NULL (from %p)\n",
 580                         __builtin_return_address(0));
 581                 return;
 582         }
 583 #if CONFIG_SKB_CHECK
 584         IS_SKB(skb);
 585 #endif
 586         if (skb->lock)
 587         {
 588                 skb->free = 3;    /* Free when unlocked */
 589                 net_free_locked++;
 590                 return;
 591         }
 592         if (skb->free == 2)
 593                 printk("Warning: kfree_skb passed an skb that nobody set the free flag on! (from %p)\n",
 594                         __builtin_return_address(0));
 595         if (skb->list)
 596                 printk("Warning: kfree_skb passed an skb still on a list (from %p).\n",
 597                         __builtin_return_address(0));
 598 
 599         if(skb->destructor)
 600                 skb->destructor(skb);
 601         if (skb->sk)
 602         {
 603                 struct sock * sk = skb->sk;
 604                 if(sk->prot!=NULL)
 605                 {
 606                         if (rw)
 607                                 sock_rfree(sk, skb);
 608                         else
 609                                 sock_wfree(sk, skb);
 610 
 611                 }
 612                 else
 613                 {
 614                         if (rw)
 615                                 atomic_sub(skb->truesize, &sk->rmem_alloc);
 616                         else {
 617                                 if(!sk->dead)
 618                                         sk->write_space(sk);
 619                                 atomic_sub(skb->truesize, &sk->wmem_alloc);
 620                         }
 621                         kfree_skbmem(skb);
 622                 }
 623         }
 624         else
 625                 kfree_skbmem(skb);
 626 }
 627 
 628 /*
 629  *      Allocate a new skbuff. We do this ourselves so we can fill in a few 'private'
 630  *      fields and also do memory statistics to find all the [BEEP] leaks.
 631  */
 632 struct sk_buff *alloc_skb(unsigned int size,int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
 633 {
 634         struct sk_buff *skb;
 635         int len=size;
 636         unsigned char *bptr;
 637 
 638         if (intr_count && priority!=GFP_ATOMIC) 
 639         {
 640                 static int count = 0;
 641                 if (++count < 5) {
 642                         printk("alloc_skb called nonatomically from interrupt %p\n",
 643                                 __builtin_return_address(0));
 644                         priority = GFP_ATOMIC;
 645                 }
 646         }
 647 
 648         size=(size+15)&~15;             /* Allow for alignments. Make a multiple of 16 bytes */
 649         size+=sizeof(struct sk_buff);   /* And stick the control itself on the end */
 650         
 651         /*
 652          *      Allocate some space
 653          */
 654          
 655         bptr=(unsigned char *)kmalloc(size,priority);
 656         if (bptr == NULL)
 657         {
 658                 net_fails++;
 659                 return NULL;
 660         }
 661 #ifdef PARANOID_BUGHUNT_MODE
 662         if(skb->magic_debug_cookie == SK_GOOD_SKB)
 663                 printk("Kernel kmalloc handed us an existing skb (%p)\n",skb);
 664 #endif
 665         /*
 666          *      Now we play a little game with the caches. Linux kmalloc is
 667          *      a bit cache dumb, in fact its just about maximally non 
 668          *      optimal for typical kernel buffers. We actually run faster
 669          *      by doing the following. Which is to deliberately put the
 670          *      skb at the _end_ not the start of the memory block.
 671          */
 672         net_allocs++;
 673         
 674         skb=(struct sk_buff *)(bptr+size)-1;
 675 
 676         skb->count = 1;         /* only one reference to this */
 677         skb->data_skb = NULL;   /* and we're our own data skb */
 678 
 679         skb->free = 2;  /* Invalid so we pick up forgetful users */
 680         skb->lock = 0;
 681         skb->pkt_type = PACKET_HOST;    /* Default type */
 682         skb->pkt_bridged = 0;           /* Not bridged */
 683         skb->prev = skb->next = skb->link3 = NULL;
 684         skb->list = NULL;
 685         skb->sk = NULL;
 686         skb->truesize=size;
 687         skb->localroute=0;
 688         skb->stamp.tv_sec=0;    /* No idea about time */
 689         skb->localroute = 0;
 690         skb->ip_summed = 0;
 691         memset(skb->proto_priv, 0, sizeof(skb->proto_priv));
 692         net_skbcount++;
 693 #if CONFIG_SKB_CHECK
 694         skb->magic_debug_cookie = SK_GOOD_SKB;
 695 #endif
 696         skb->users = 0;
 697         /* Load the data pointers */
 698         skb->head=bptr;
 699         skb->data=bptr;
 700         skb->tail=bptr;
 701         skb->end=bptr+len;
 702         skb->len=0;
 703         skb->destructor=NULL;
 704         return skb;
 705 }
 706 
 707 /*
 708  *      Free an skbuff by memory
 709  */
 710 
 711 static inline void __kfree_skbmem(struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
 712 {
 713         /* don't do anything if somebody still uses us */
 714         if (atomic_dec_and_test(&skb->count)) {
 715                 kfree(skb->head);
 716                 atomic_dec(&net_skbcount);
 717         }
 718 }
 719 
 720 void kfree_skbmem(struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
 721 {
 722         void * addr = skb->head;
 723 
 724         /* don't do anything if somebody still uses us */
 725         if (atomic_dec_and_test(&skb->count)) {
 726                 /* free the skb that contains the actual data if we've clone()'d */
 727                 if (skb->data_skb) {
 728                         addr = skb;
 729                         __kfree_skbmem(skb->data_skb);
 730                 }
 731                 kfree(addr);
 732                 atomic_dec(&net_skbcount);
 733         }
 734 }
 735 
 736 /*
 737  *      Duplicate an sk_buff. The new one is not owned by a socket or locked
 738  *      and will be freed on deletion.
 739  */
 740 
 741 struct sk_buff *skb_clone(struct sk_buff *skb, int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
 742 {
 743         struct sk_buff *n;
 744 
 745         IS_SKB(skb);
 746         n = kmalloc(sizeof(*n), priority);
 747         if (!n)
 748                 return NULL;
 749         memcpy(n, skb, sizeof(*n));
 750         n->count = 1;
 751         if (skb->data_skb)
 752                 skb = skb->data_skb;
 753         atomic_inc(&skb->count);
 754         atomic_inc(&net_allocs);
 755         atomic_inc(&net_skbcount);
 756         n->data_skb = skb;
 757         n->next = n->prev = n->link3 = NULL;
 758         n->list = NULL;
 759         n->sk = NULL;
 760         n->free = 1;
 761         n->tries = 0;
 762         n->lock = 0;
 763         n->users = 0;
 764         return n;
 765 }
 766 
 767 /*
 768  *      This is slower, and copies the whole data area 
 769  */
 770  
 771 struct sk_buff *skb_copy(struct sk_buff *skb, int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
 772 {
 773         struct sk_buff *n;
 774         unsigned long offset;
 775 
 776         /*
 777          *      Allocate the copy buffer
 778          */
 779          
 780         IS_SKB(skb);
 781         
 782         n=alloc_skb(skb->truesize-sizeof(struct sk_buff),priority);
 783         if(n==NULL)
 784                 return NULL;
 785 
 786         /*
 787          *      Shift between the two data areas in bytes
 788          */
 789          
 790         offset=n->head-skb->head;
 791 
 792         /* Set the data pointer */
 793         skb_reserve(n,skb->data-skb->head);
 794         /* Set the tail pointer and length */
 795         skb_put(n,skb->len);
 796         /* Copy the bytes */
 797         memcpy(n->head,skb->head,skb->end-skb->head);
 798         n->link3=NULL;
 799         n->list=NULL;
 800         n->sk=NULL;
 801         n->when=skb->when;
 802         n->dev=skb->dev;
 803         n->h.raw=skb->h.raw+offset;
 804         n->mac.raw=skb->mac.raw+offset;
 805         n->ip_hdr=(struct iphdr *)(((char *)skb->ip_hdr)+offset);
 806         n->saddr=skb->saddr;
 807         n->daddr=skb->daddr;
 808         n->raddr=skb->raddr;
 809         n->seq=skb->seq;
 810         n->end_seq=skb->end_seq;
 811         n->ack_seq=skb->ack_seq;
 812         n->acked=skb->acked;
 813         memcpy(n->proto_priv, skb->proto_priv, sizeof(skb->proto_priv));
 814         n->used=skb->used;
 815         n->free=1;
 816         n->arp=skb->arp;
 817         n->tries=0;
 818         n->lock=0;
 819         n->users=0;
 820         n->pkt_type=skb->pkt_type;
 821         n->stamp=skb->stamp;
 822         
 823         IS_SKB(n);
 824         return n;
 825 }
 826 
 827 /*
 828  *     Skbuff device locking
 829  */
 830 
 831 void skb_device_lock(struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
 832 {
 833         if(skb->lock)
 834                 printk("double lock on device queue!\n");
 835         else
 836                 net_locked++;
 837         skb->lock++;
 838 }
 839 
 840 void skb_device_unlock(struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
 841 {
 842         if(skb->lock==0)
 843                 printk("double unlock on device queue!\n");
 844         skb->lock--;
 845         if(skb->lock==0)
 846                 net_locked--;
 847 }
 848 
 849 void dev_kfree_skb(struct sk_buff *skb, int mode)
     /* [previous][next][first][last][top][bottom][index][help] */
 850 {
 851         unsigned long flags;
 852 
 853         save_flags(flags);
 854         cli();
 855         if(skb->lock)
 856         {
 857                 net_locked--;
 858                 skb->lock--;
 859         }
 860         if (!skb->lock && (skb->free == 1 || skb->free == 3))
 861         {
 862                 restore_flags(flags);
 863                 kfree_skb(skb,mode);
 864         }
 865         else
 866                 restore_flags(flags);
 867 }
 868 
 869 struct sk_buff *dev_alloc_skb(unsigned int length)
     /* [previous][next][first][last][top][bottom][index][help] */
 870 {
 871         struct sk_buff *skb;
 872 
 873         skb = alloc_skb(length+16, GFP_ATOMIC);
 874         if (skb)
 875                 skb_reserve(skb,16);
 876         return skb;
 877 }
 878 
 879 int skb_device_locked(struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
 880 {
 881         return skb->lock? 1 : 0;
 882 }

/* [previous][next][first][last][top][bottom][index][help] */