root/net/core/skbuff.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. show_net_buffers
  2. skb_check
  3. skb_queue_head_init
  4. skb_queue_head
  5. __skb_queue_head
  6. skb_queue_tail
  7. __skb_queue_tail
  8. skb_dequeue
  9. __skb_dequeue
  10. skb_insert
  11. __skb_insert
  12. skb_append
  13. skb_unlink
  14. __skb_unlink
  15. skb_put
  16. skb_push
  17. skb_pull
  18. skb_headroom
  19. skb_tailroom
  20. skb_reserve
  21. skb_trim
  22. kfree_skb
  23. alloc_skb
  24. __kfree_skbmem
  25. kfree_skbmem
  26. skb_clone
  27. skb_copy
  28. skb_device_lock
  29. skb_device_unlock
  30. dev_kfree_skb
  31. dev_alloc_skb
  32. skb_device_locked

   1 /*
   2  *      Routines having to do with the 'struct sk_buff' memory handlers.
   3  *
   4  *      Authors:        Alan Cox <iiitac@pyr.swan.ac.uk>
   5  *                      Florian La Roche <rzsfl@rz.uni-sb.de>
   6  *
   7  *      Fixes:  
   8  *              Alan Cox        :       Fixed the worst of the load balancer bugs.
   9  *              Dave Platt      :       Interrupt stacking fix.
  10  *      Richard Kooijman        :       Timestamp fixes.
  11  *              Alan Cox        :       Changed buffer format.
  12  *              Alan Cox        :       destructor hook for AF_UNIX etc.
  13  *              Linus Torvalds  :       Better skb_clone.
  14  *              Alan Cox        :       Added skb_copy.
  15  *              Alan Cox        :       Added all the changed routines Linus
  16  *                                      only put in the headers
  17  *              Ray VanTassle   :       Fixed --skb->lock in free
  18  *
  19  *      TO FIX:
  20  *              The __skb_ routines ought to check interrupts are disabled
  21  *      when called, and bitch like crazy if not. Unfortunately I don't think
  22  *      we currently have a portable way to check if interrupts are off - 
  23  *      Linus ???
  24  *
  25  *      This program is free software; you can redistribute it and/or
  26  *      modify it under the terms of the GNU General Public License
  27  *      as published by the Free Software Foundation; either version
  28  *      2 of the License, or (at your option) any later version.
  29  */
  30 
  31 /*
  32  *      The functions in this file will not compile correctly with gcc 2.4.x
  33  */
  34 
  35 #include <linux/config.h>
  36 #include <linux/types.h>
  37 #include <linux/kernel.h>
  38 #include <linux/sched.h>
  39 #include <linux/mm.h>
  40 #include <linux/interrupt.h>
  41 #include <linux/in.h>
  42 #include <linux/inet.h>
  43 #include <linux/netdevice.h>
  44 #include <linux/malloc.h>
  45 #include <linux/string.h>
  46 #include <linux/skbuff.h>
  47 
  48 #include <net/ip.h>
  49 #include <net/protocol.h>
  50 #include <net/route.h>
  51 #include <net/tcp.h>
  52 #include <net/udp.h>
  53 #include <net/sock.h>
  54 
  55 #include <asm/segment.h>
  56 #include <asm/system.h>
  57 
  58 /*
  59  *      Resource tracking variables
  60  */
  61 
  62 atomic_t net_skbcount = 0;
  63 atomic_t net_locked = 0;
  64 atomic_t net_allocs = 0;
  65 atomic_t net_fails  = 0;
  66 atomic_t net_free_locked = 0;
  67 
  68 extern atomic_t ip_frag_mem;
  69 
  70 void show_net_buffers(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  71 {
  72         printk("Networking buffers in use          : %u\n",net_skbcount);
  73         printk("Network buffers locked by drivers  : %u\n",net_locked);
  74         printk("Total network buffer allocations   : %u\n",net_allocs);
  75         printk("Total failed network buffer allocs : %u\n",net_fails);
  76         printk("Total free while locked events     : %u\n",net_free_locked);
  77 #ifdef CONFIG_INET
  78         printk("IP fragment buffer size            : %u\n",ip_frag_mem);
  79 #endif  
  80 }
  81 
  82 #if CONFIG_SKB_CHECK
  83 
  84 /*
  85  *      Debugging paranoia. Can go later when this crud stack works
  86  */
  87 
  88 int skb_check(struct sk_buff *skb, int head, int line, char *file)
     /* [previous][next][first][last][top][bottom][index][help] */
  89 {
  90         if (head) {
  91                 if (skb->magic_debug_cookie != SK_HEAD_SKB) {
  92                         printk("File: %s Line %d, found a bad skb-head\n",
  93                                 file,line);
  94                         return -1;
  95                 }
  96                 if (!skb->next || !skb->prev) {
  97                         printk("skb_check: head without next or prev\n");
  98                         return -1;
  99                 }
 100                 if (skb->next->magic_debug_cookie != SK_HEAD_SKB
 101                         && skb->next->magic_debug_cookie != SK_GOOD_SKB) {
 102                         printk("File: %s Line %d, bad next head-skb member\n",
 103                                 file,line);
 104                         return -1;
 105                 }
 106                 if (skb->prev->magic_debug_cookie != SK_HEAD_SKB
 107                         && skb->prev->magic_debug_cookie != SK_GOOD_SKB) {
 108                         printk("File: %s Line %d, bad prev head-skb member\n",
 109                                 file,line);
 110                         return -1;
 111                 }
 112 #if 0
 113                 {
 114                 struct sk_buff *skb2 = skb->next;
 115                 int i = 0;
 116                 while (skb2 != skb && i < 5) {
 117                         if (skb_check(skb2, 0, line, file) < 0) {
 118                                 printk("bad queue element in whole queue\n");
 119                                 return -1;
 120                         }
 121                         i++;
 122                         skb2 = skb2->next;
 123                 }
 124                 }
 125 #endif
 126                 return 0;
 127         }
 128         if (skb->next != NULL && skb->next->magic_debug_cookie != SK_HEAD_SKB
 129                 && skb->next->magic_debug_cookie != SK_GOOD_SKB) {
 130                 printk("File: %s Line %d, bad next skb member\n",
 131                         file,line);
 132                 return -1;
 133         }
 134         if (skb->prev != NULL && skb->prev->magic_debug_cookie != SK_HEAD_SKB
 135                 && skb->prev->magic_debug_cookie != SK_GOOD_SKB) {
 136                 printk("File: %s Line %d, bad prev skb member\n",
 137                         file,line);
 138                 return -1;
 139         }
 140 
 141 
 142         if(skb->magic_debug_cookie==SK_FREED_SKB)
 143         {
 144                 printk("File: %s Line %d, found a freed skb lurking in the undergrowth!\n",
 145                         file,line);
 146                 printk("skb=%p, real size=%d, free=%d\n",
 147                         skb,skb->truesize,skb->free);
 148                 return -1;
 149         }
 150         if(skb->magic_debug_cookie!=SK_GOOD_SKB)
 151         {
 152                 printk("File: %s Line %d, passed a non skb!\n", file,line);
 153                 printk("skb=%p, real size=%d, free=%d\n",
 154                         skb,skb->truesize,skb->free);
 155                 return -1;
 156         }
 157         if(skb->head>skb->data)
 158         {
 159                 printk("File: %s Line %d, head > data !\n", file,line);
 160                 printk("skb=%p, head=%p, data=%p\n",
 161                         skb,skb->head,skb->data);
 162                 return -1;
 163         }
 164         if(skb->tail>skb->end)
 165         {
 166                 printk("File: %s Line %d, tail > end!\n", file,line);
 167                 printk("skb=%p, tail=%p, end=%p\n",
 168                         skb,skb->tail,skb->end);
 169                 return -1;
 170         }
 171         if(skb->data>skb->tail)
 172         {
 173                 printk("File: %s Line %d, data > tail!\n", file,line);
 174                 printk("skb=%p, data=%p, tail=%p\n",
 175                         skb,skb->data,skb->tail);
 176                 return -1;
 177         }
 178         if(skb->tail-skb->data!=skb->len)
 179         {
 180                 printk("File: %s Line %d, wrong length\n", file,line);
 181                 printk("skb=%p, data=%p, end=%p len=%ld\n",
 182                         skb,skb->data,skb->end,skb->len);
 183                 return -1;
 184         }
 185         if((unsigned long) skb->end > (unsigned long) skb)
 186         {
 187                 printk("File: %s Line %d, control overrun\n", file,line);
 188                 printk("skb=%p, end=%p\n",
 189                         skb,skb->end);
 190                 return -1;
 191         }
 192 
 193         /* Guess it might be acceptable then */
 194         return 0;
 195 }
 196 #endif
 197 
 198 
 199 #if CONFIG_SKB_CHECK
 200 void skb_queue_head_init(struct sk_buff_head *list)
     /* [previous][next][first][last][top][bottom][index][help] */
 201 {
 202         list->prev = (struct sk_buff *)list;
 203         list->next = (struct sk_buff *)list;
 204         list->qlen = 0;
 205         list->magic_debug_cookie = SK_HEAD_SKB;
 206 }
 207 
 208 
 209 /*
 210  *      Insert an sk_buff at the start of a list.
 211  */
 212 void skb_queue_head(struct sk_buff_head *list_,struct sk_buff *newsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 213 {
 214         unsigned long flags;
 215         struct sk_buff *list = (struct sk_buff *)list_;
 216 
 217         save_flags(flags);
 218         cli();
 219 
 220         IS_SKB(newsk);
 221         IS_SKB_HEAD(list);
 222         if (newsk->next || newsk->prev)
 223                 printk("Suspicious queue head: sk_buff on list!\n");
 224 
 225         newsk->next = list->next;
 226         newsk->prev = list;
 227 
 228         newsk->next->prev = newsk;
 229         newsk->prev->next = newsk;
 230         newsk->list = list_;
 231         list_->qlen++;
 232 
 233         restore_flags(flags);
 234 }
 235 
 236 void __skb_queue_head(struct sk_buff_head *list_,struct sk_buff *newsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 237 {
 238         struct sk_buff *list = (struct sk_buff *)list_;
 239 
 240 
 241         IS_SKB(newsk);
 242         IS_SKB_HEAD(list);
 243         if (newsk->next || newsk->prev)
 244                 printk("Suspicious queue head: sk_buff on list!\n");
 245 
 246         newsk->next = list->next;
 247         newsk->prev = list;
 248 
 249         newsk->next->prev = newsk;
 250         newsk->prev->next = newsk;
 251         newsk->list = list_;
 252         list_->qlen++;
 253 
 254 }
 255 
 256 /*
 257  *      Insert an sk_buff at the end of a list.
 258  */
 259 void skb_queue_tail(struct sk_buff_head *list_, struct sk_buff *newsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 260 {
 261         unsigned long flags;
 262         struct sk_buff *list = (struct sk_buff *)list_;
 263 
 264         save_flags(flags);
 265         cli();
 266 
 267         if (newsk->next || newsk->prev)
 268                 printk("Suspicious queue tail: sk_buff on list!\n");
 269         IS_SKB(newsk);
 270         IS_SKB_HEAD(list);
 271 
 272         newsk->next = list;
 273         newsk->prev = list->prev;
 274 
 275         newsk->next->prev = newsk;
 276         newsk->prev->next = newsk;
 277         
 278         newsk->list = list_;
 279         list_->qlen++;
 280 
 281         restore_flags(flags);
 282 }
 283 
 284 void __skb_queue_tail(struct sk_buff_head *list_, struct sk_buff *newsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 285 {
 286         unsigned long flags;
 287         struct sk_buff *list = (struct sk_buff *)list_;
 288 
 289         if (newsk->next || newsk->prev)
 290                 printk("Suspicious queue tail: sk_buff on list!\n");
 291         IS_SKB(newsk);
 292         IS_SKB_HEAD(list);
 293 
 294         newsk->next = list;
 295         newsk->prev = list->prev;
 296 
 297         newsk->next->prev = newsk;
 298         newsk->prev->next = newsk;
 299         
 300         newsk->list = list_;
 301         list_->qlen++;
 302 }
 303 
 304 /*
 305  *      Remove an sk_buff from a list. This routine is also interrupt safe
 306  *      so you can grab read and free buffers as another process adds them.
 307  */
 308 
 309 struct sk_buff *skb_dequeue(struct sk_buff_head *list_)
     /* [previous][next][first][last][top][bottom][index][help] */
 310 {
 311         long flags;
 312         struct sk_buff *result;
 313         struct sk_buff *list = (struct sk_buff *)list_;
 314 
 315         save_flags(flags);
 316         cli();
 317 
 318         IS_SKB_HEAD(list);
 319 
 320         result = list->next;
 321         if (result == list) {
 322                 restore_flags(flags);
 323                 return NULL;
 324         }
 325 
 326         result->next->prev = list;
 327         list->next = result->next;
 328 
 329         result->next = NULL;
 330         result->prev = NULL;
 331         list_->qlen--;
 332         result->list = NULL;
 333         
 334         restore_flags(flags);
 335 
 336         IS_SKB(result);
 337         return result;
 338 }
 339 
 340 struct sk_buff *__skb_dequeue(struct sk_buff_head *list_)
     /* [previous][next][first][last][top][bottom][index][help] */
 341 {
 342         struct sk_buff *result;
 343         struct sk_buff *list = (struct sk_buff *)list_;
 344 
 345         IS_SKB_HEAD(list);
 346 
 347         result = list->next;
 348         if (result == list) {
 349                 return NULL;
 350         }
 351 
 352         result->next->prev = list;
 353         list->next = result->next;
 354 
 355         result->next = NULL;
 356         result->prev = NULL;
 357         list_->qlen--;
 358         result->list = NULL;
 359         
 360         IS_SKB(result);
 361         return result;
 362 }
 363 
 364 /*
 365  *      Insert a packet before another one in a list.
 366  */
 367 void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 368 {
 369         unsigned long flags;
 370 
 371         IS_SKB(old);
 372         IS_SKB(newsk);
 373 
 374         if(!old->next || !old->prev)
 375                 printk("insert before unlisted item!\n");
 376         if(newsk->next || newsk->prev)
 377                 printk("inserted item is already on a list.\n");
 378 
 379         save_flags(flags);
 380         cli();
 381         newsk->next = old;
 382         newsk->prev = old->prev;
 383         old->prev = newsk;
 384         newsk->prev->next = newsk;
 385         newsk->list = old->list;
 386         newsk->list->qlen++;
 387 
 388         restore_flags(flags);
 389 }
 390 
 391 /*
 392  *      Insert a packet before another one in a list.
 393  */
 394 
 395 void __skb_insert(struct sk_buff *newsk,
     /* [previous][next][first][last][top][bottom][index][help] */
 396         struct sk_buff * prev, struct sk_buff *next,
 397         struct sk_buff_head * list)
 398 {
 399         IS_SKB(prev);
 400         IS_SKB(newsk);
 401         IS_SKB(next);
 402 
 403         if(!prev->next || !prev->prev)
 404                 printk("insert after unlisted item!\n");
 405         if(!next->next || !next->prev)
 406                 printk("insert before unlisted item!\n");
 407         if(newsk->next || newsk->prev)
 408                 printk("inserted item is already on a list.\n");
 409 
 410         newsk->next = next;
 411         newsk->prev = prev;
 412         next->prev = newsk;
 413         prev->next = newsk;
 414         newsk->list = list;
 415         list->qlen++;
 416 
 417 }
 418 
 419 /*
 420  *      Place a packet after a given packet in a list.
 421  */
 422 void skb_append(struct sk_buff *old, struct sk_buff *newsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 423 {
 424         unsigned long flags;
 425 
 426         IS_SKB(old);
 427         IS_SKB(newsk);
 428 
 429         if(!old->next || !old->prev)
 430                 printk("append before unlisted item!\n");
 431         if(newsk->next || newsk->prev)
 432                 printk("append item is already on a list.\n");
 433 
 434         save_flags(flags);
 435         cli();
 436 
 437         newsk->prev = old;
 438         newsk->next = old->next;
 439         newsk->next->prev = newsk;
 440         old->next = newsk;
 441         newsk->list = old->list;
 442         newsk->list->qlen++;
 443 
 444         restore_flags(flags);
 445 }
 446 
 447 /*
 448  *      Remove an sk_buff from its list. Works even without knowing the list it
 449  *      is sitting on, which can be handy at times. It also means that THE LIST
 450  *      MUST EXIST when you unlink. Thus a list must have its contents unlinked
 451  *      _FIRST_.
 452  */
 453 void skb_unlink(struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
 454 {
 455         unsigned long flags;
 456 
 457         save_flags(flags);
 458         cli();
 459 
 460         IS_SKB(skb);
 461 
 462         if(skb->list)
 463         {
 464                 skb->list->qlen--;
 465                 skb->next->prev = skb->prev;
 466                 skb->prev->next = skb->next;
 467                 skb->next = NULL;
 468                 skb->prev = NULL;
 469                 skb->list = NULL;
 470         }
 471 #ifdef PARANOID_BUGHUNT_MODE    /* This is legal but we sometimes want to watch it */
 472         else
 473                 printk("skb_unlink: not a linked element\n");
 474 #endif
 475         restore_flags(flags);
 476 }
 477 
 478 void __skb_unlink(struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
 479 {
 480         IS_SKB(skb);
 481 
 482         if(skb->list)
 483         {
 484                 skb->list->qlen--;
 485                 skb->next->prev = skb->prev;
 486                 skb->prev->next = skb->next;
 487                 skb->next = NULL;
 488                 skb->prev = NULL;
 489                 skb->list = NULL;
 490         }
 491 #ifdef PARANOID_BUGHUNT_MODE    /* This is legal but we sometimes want to watch it */
 492         else
 493                 printk("skb_unlink: not a linked element\n");
 494 #endif
 495 }
 496 
 497 /*
 498  *      Add data to an sk_buff
 499  */
 500  
 501 unsigned char *skb_put(struct sk_buff *skb, int len)
     /* [previous][next][first][last][top][bottom][index][help] */
 502 {
 503         unsigned char *tmp=skb->tail;
 504         IS_SKB(skb);
 505         skb->tail+=len;
 506         skb->len+=len;
 507         IS_SKB(skb);
 508         if(skb->tail>skb->end)
 509                 panic("skput:over: %p:%d", __builtin_return_address(0),len);
 510         return tmp;
 511 }
 512 
 513 unsigned char *skb_push(struct sk_buff *skb, int len)
     /* [previous][next][first][last][top][bottom][index][help] */
 514 {
 515         IS_SKB(skb);
 516         skb->data-=len;
 517         skb->len+=len;
 518         IS_SKB(skb);
 519         if(skb->data<skb->head)
 520                 panic("skpush:under: %p:%d", __builtin_return_address(0),len);
 521         return skb->data;
 522 }
 523 
 524 unsigned char * skb_pull(struct sk_buff *skb, int len)
     /* [previous][next][first][last][top][bottom][index][help] */
 525 {
 526         IS_SKB(skb);
 527         if(len>skb->len)
 528                 return 0;
 529         skb->data+=len;
 530         skb->len-=len;
 531         return skb->data;
 532 }
 533 
 534 int skb_headroom(struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
 535 {
 536         IS_SKB(skb);
 537         return skb->data-skb->head;
 538 }
 539 
 540 int skb_tailroom(struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
 541 {
 542         IS_SKB(skb);
 543         return skb->end-skb->tail;
 544 }
 545 
 546 void skb_reserve(struct sk_buff *skb, int len)
     /* [previous][next][first][last][top][bottom][index][help] */
 547 {
 548         IS_SKB(skb);
 549         skb->data+=len;
 550         skb->tail+=len;
 551         if(skb->tail>skb->end)
 552                 panic("sk_res: over");
 553         if(skb->data<skb->head)
 554                 panic("sk_res: under");
 555         IS_SKB(skb);
 556 }
 557 
 558 void skb_trim(struct sk_buff *skb, int len)
     /* [previous][next][first][last][top][bottom][index][help] */
 559 {
 560         IS_SKB(skb);
 561         if(skb->len>len)
 562         {
 563                 skb->len=len;
 564                 skb->tail=skb->data+len;
 565         }
 566 }
 567 
 568 
 569 
 570 #endif
 571 
 572 /*
 573  *      Free an sk_buff. This still knows about things it should
 574  *      not need to like protocols and sockets.
 575  */
 576 
 577 void kfree_skb(struct sk_buff *skb, int rw)
     /* [previous][next][first][last][top][bottom][index][help] */
 578 {
 579         if (skb == NULL)
 580         {
 581                 printk("kfree_skb: skb = NULL (from %p)\n",
 582                         __builtin_return_address(0));
 583                 return;
 584         }
 585 #if CONFIG_SKB_CHECK
 586         IS_SKB(skb);
 587 #endif
 588         if (skb->lock)
 589         {
 590                 skb->free = 3;    /* Free when unlocked */
 591                 net_free_locked++;
 592                 return;
 593         }
 594         if (skb->free == 2)
 595                 printk("Warning: kfree_skb passed an skb that nobody set the free flag on! (from %p)\n",
 596                         __builtin_return_address(0));
 597         if (skb->list)
 598                 printk("Warning: kfree_skb passed an skb still on a list (from %p).\n",
 599                         __builtin_return_address(0));
 600 
 601         if(skb->destructor)
 602                 skb->destructor(skb);
 603         if (skb->sk)
 604         {
 605                 struct sock * sk = skb->sk;
 606                 if(sk->prot!=NULL)
 607                 {
 608                         if (rw)
 609                                 sock_rfree(sk, skb);
 610                         else
 611                                 sock_wfree(sk, skb);
 612 
 613                 }
 614                 else
 615                 {
 616                         if (rw)
 617                                 atomic_sub(skb->truesize, &sk->rmem_alloc);
 618                         else {
 619                                 if(!sk->dead)
 620                                         sk->write_space(sk);
 621                                 atomic_sub(skb->truesize, &sk->wmem_alloc);
 622                         }
 623                         kfree_skbmem(skb);
 624                 }
 625         }
 626         else
 627                 kfree_skbmem(skb);
 628 }
 629 
 630 /*
 631  *      Allocate a new skbuff. We do this ourselves so we can fill in a few 'private'
 632  *      fields and also do memory statistics to find all the [BEEP] leaks.
 633  */
 634 struct sk_buff *alloc_skb(unsigned int size,int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
 635 {
 636         struct sk_buff *skb;
 637         int len=size;
 638         unsigned char *bptr;
 639 
 640         if (intr_count && priority!=GFP_ATOMIC) 
 641         {
 642                 static int count = 0;
 643                 if (++count < 5) {
 644                         printk("alloc_skb called nonatomically from interrupt %p\n",
 645                                 __builtin_return_address(0));
 646                         priority = GFP_ATOMIC;
 647                 }
 648         }
 649 
 650         size=(size+15)&~15;             /* Allow for alignments. Make a multiple of 16 bytes */
 651         size+=sizeof(struct sk_buff);   /* And stick the control itself on the end */
 652         
 653         /*
 654          *      Allocate some space
 655          */
 656          
 657         bptr=(unsigned char *)kmalloc(size,priority);
 658         if (bptr == NULL)
 659         {
 660                 net_fails++;
 661                 return NULL;
 662         }
 663 #ifdef PARANOID_BUGHUNT_MODE
 664         if(skb->magic_debug_cookie == SK_GOOD_SKB)
 665                 printk("Kernel kmalloc handed us an existing skb (%p)\n",skb);
 666 #endif
 667         /*
 668          *      Now we play a little game with the caches. Linux kmalloc is
 669          *      a bit cache dumb, in fact its just about maximally non 
 670          *      optimal for typical kernel buffers. We actually run faster
 671          *      by doing the following. Which is to deliberately put the
 672          *      skb at the _end_ not the start of the memory block.
 673          */
 674         net_allocs++;
 675         
 676         skb=(struct sk_buff *)(bptr+size)-1;
 677 
 678         skb->count = 1;         /* only one reference to this */
 679         skb->data_skb = NULL;   /* and we're our own data skb */
 680 
 681         skb->free = 2;  /* Invalid so we pick up forgetful users */
 682         skb->lock = 0;
 683         skb->pkt_type = PACKET_HOST;    /* Default type */
 684         skb->pkt_bridged = 0;           /* Not bridged */
 685         skb->prev = skb->next = skb->link3 = NULL;
 686         skb->list = NULL;
 687         skb->sk = NULL;
 688         skb->truesize=size;
 689         skb->localroute=0;
 690         skb->stamp.tv_sec=0;    /* No idea about time */
 691         skb->localroute = 0;
 692         skb->ip_summed = 0;
 693         memset(skb->proto_priv, 0, sizeof(skb->proto_priv));
 694         net_skbcount++;
 695 #if CONFIG_SKB_CHECK
 696         skb->magic_debug_cookie = SK_GOOD_SKB;
 697 #endif
 698         skb->users = 0;
 699         /* Load the data pointers */
 700         skb->head=bptr;
 701         skb->data=bptr;
 702         skb->tail=bptr;
 703         skb->end=bptr+len;
 704         skb->len=0;
 705         skb->destructor=NULL;
 706         return skb;
 707 }
 708 
 709 /*
 710  *      Free an skbuff by memory
 711  */
 712 
 713 static inline void __kfree_skbmem(struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
 714 {
 715         /* don't do anything if somebody still uses us */
 716         if (atomic_dec_and_test(&skb->count)) {
 717                 kfree(skb->head);
 718                 atomic_dec(&net_skbcount);
 719         }
 720 }
 721 
 722 void kfree_skbmem(struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
 723 {
 724         void * addr = skb->head;
 725 
 726         /* don't do anything if somebody still uses us */
 727         if (atomic_dec_and_test(&skb->count)) {
 728                 /* free the skb that contains the actual data if we've clone()'d */
 729                 if (skb->data_skb) {
 730                         addr = skb;
 731                         __kfree_skbmem(skb->data_skb);
 732                 }
 733                 kfree(addr);
 734                 atomic_dec(&net_skbcount);
 735         }
 736 }
 737 
 738 /*
 739  *      Duplicate an sk_buff. The new one is not owned by a socket or locked
 740  *      and will be freed on deletion.
 741  */
 742 
 743 struct sk_buff *skb_clone(struct sk_buff *skb, int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
 744 {
 745         struct sk_buff *n;
 746 
 747         IS_SKB(skb);
 748         n = kmalloc(sizeof(*n), priority);
 749         if (!n)
 750                 return NULL;
 751         memcpy(n, skb, sizeof(*n));
 752         n->count = 1;
 753         if (skb->data_skb)
 754                 skb = skb->data_skb;
 755         atomic_inc(&skb->count);
 756         atomic_inc(&net_allocs);
 757         atomic_inc(&net_skbcount);
 758         n->data_skb = skb;
 759         n->next = n->prev = n->link3 = NULL;
 760         n->list = NULL;
 761         n->sk = NULL;
 762         n->free = 1;
 763         n->tries = 0;
 764         n->lock = 0;
 765         n->users = 0;
 766         return n;
 767 }
 768 
 769 /*
 770  *      This is slower, and copies the whole data area 
 771  */
 772  
 773 struct sk_buff *skb_copy(struct sk_buff *skb, int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
 774 {
 775         struct sk_buff *n;
 776         unsigned long offset;
 777 
 778         /*
 779          *      Allocate the copy buffer
 780          */
 781          
 782         IS_SKB(skb);
 783         
 784         n=alloc_skb(skb->truesize-sizeof(struct sk_buff),priority);
 785         if(n==NULL)
 786                 return NULL;
 787 
 788         /*
 789          *      Shift between the two data areas in bytes
 790          */
 791          
 792         offset=n->head-skb->head;
 793 
 794         /* Set the data pointer */
 795         skb_reserve(n,skb->data-skb->head);
 796         /* Set the tail pointer and length */
 797         skb_put(n,skb->len);
 798         /* Copy the bytes */
 799         memcpy(n->head,skb->head,skb->end-skb->head);
 800         n->link3=NULL;
 801         n->list=NULL;
 802         n->sk=NULL;
 803         n->when=skb->when;
 804         n->dev=skb->dev;
 805         n->h.raw=skb->h.raw+offset;
 806         n->mac.raw=skb->mac.raw+offset;
 807         n->ip_hdr=(struct iphdr *)(((char *)skb->ip_hdr)+offset);
 808         n->saddr=skb->saddr;
 809         n->daddr=skb->daddr;
 810         n->raddr=skb->raddr;
 811         n->seq=skb->seq;
 812         n->end_seq=skb->end_seq;
 813         n->ack_seq=skb->ack_seq;
 814         n->acked=skb->acked;
 815         memcpy(n->proto_priv, skb->proto_priv, sizeof(skb->proto_priv));
 816         n->used=skb->used;
 817         n->free=1;
 818         n->arp=skb->arp;
 819         n->tries=0;
 820         n->lock=0;
 821         n->users=0;
 822         n->pkt_type=skb->pkt_type;
 823         n->stamp=skb->stamp;
 824         
 825         IS_SKB(n);
 826         return n;
 827 }
 828 
 829 /*
 830  *     Skbuff device locking
 831  */
 832 
 833 void skb_device_lock(struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
 834 {
 835         if(skb->lock)
 836                 printk("double lock on device queue!\n");
 837         else
 838                 net_locked++;
 839         skb->lock++;
 840 }
 841 
 842 void skb_device_unlock(struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
 843 {
 844         if(skb->lock==0)
 845                 printk("double unlock on device queue!\n");
 846         skb->lock--;
 847         if(skb->lock==0)
 848                 net_locked--;
 849 }
 850 
 851 void dev_kfree_skb(struct sk_buff *skb, int mode)
     /* [previous][next][first][last][top][bottom][index][help] */
 852 {
 853         unsigned long flags;
 854 
 855         save_flags(flags);
 856         cli();
 857         if(skb->lock)
 858         {
 859                 net_locked--;
 860                 skb->lock--;
 861         }
 862         if (!skb->lock && (skb->free == 1 || skb->free == 3))
 863         {
 864                 restore_flags(flags);
 865                 kfree_skb(skb,mode);
 866         }
 867         else
 868                 restore_flags(flags);
 869 }
 870 
 871 struct sk_buff *dev_alloc_skb(unsigned int length)
     /* [previous][next][first][last][top][bottom][index][help] */
 872 {
 873         struct sk_buff *skb;
 874 
 875         skb = alloc_skb(length+16, GFP_ATOMIC);
 876         if (skb)
 877                 skb_reserve(skb,16);
 878         return skb;
 879 }
 880 
 881 int skb_device_locked(struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
 882 {
 883         return skb->lock? 1 : 0;
 884 }

/* [previous][next][first][last][top][bottom][index][help] */