root/net/core/skbuff.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. show_net_buffers
  2. skb_check
  3. skb_queue_head_init
  4. skb_queue_head
  5. __skb_queue_head
  6. skb_queue_tail
  7. __skb_queue_tail
  8. skb_dequeue
  9. __skb_dequeue
  10. skb_insert
  11. __skb_insert
  12. skb_append
  13. __skb_append
  14. skb_unlink
  15. __skb_unlink
  16. skb_put
  17. skb_push
  18. skb_pull
  19. skb_headroom
  20. skb_tailroom
  21. skb_reserve
  22. skb_trim
  23. kfree_skb
  24. alloc_skb
  25. __kfree_skbmem
  26. kfree_skbmem
  27. skb_clone
  28. skb_copy
  29. skb_device_lock
  30. skb_device_unlock
  31. dev_kfree_skb
  32. dev_alloc_skb
  33. skb_device_locked

   1 /*
   2  *      Routines having to do with the 'struct sk_buff' memory handlers.
   3  *
   4  *      Authors:        Alan Cox <iiitac@pyr.swan.ac.uk>
   5  *                      Florian La Roche <rzsfl@rz.uni-sb.de>
   6  *
   7  *      Fixes:  
   8  *              Alan Cox        :       Fixed the worst of the load balancer bugs.
   9  *              Dave Platt      :       Interrupt stacking fix.
  10  *      Richard Kooijman        :       Timestamp fixes.
  11  *              Alan Cox        :       Changed buffer format.
  12  *              Alan Cox        :       destructor hook for AF_UNIX etc.
  13  *              Linus Torvalds  :       Better skb_clone.
  14  *              Alan Cox        :       Added skb_copy.
  15  *              Alan Cox        :       Added all the changed routines Linus
  16  *                                      only put in the headers
  17  *              Ray VanTassle   :       Fixed --skb->lock in free
  18  *
  19  *      TO FIX:
  20  *              The __skb_ routines ought to check interrupts are disabled
  21  *      when called, and bitch like crazy if not. Unfortunately I don't think
  22  *      we currently have a portable way to check if interrupts are off - 
  23  *      Linus ???
  24  *
  25  *      This program is free software; you can redistribute it and/or
  26  *      modify it under the terms of the GNU General Public License
  27  *      as published by the Free Software Foundation; either version
  28  *      2 of the License, or (at your option) any later version.
  29  */
  30 
  31 /*
  32  *      The functions in this file will not compile correctly with gcc 2.4.x
  33  */
  34 
  35 #include <linux/config.h>
  36 #include <linux/types.h>
  37 #include <linux/kernel.h>
  38 #include <linux/sched.h>
  39 #include <asm/segment.h>
  40 #include <asm/system.h>
  41 #include <linux/mm.h>
  42 #include <linux/interrupt.h>
  43 #include <linux/in.h>
  44 #include <linux/inet.h>
  45 #include <linux/netdevice.h>
  46 #include <net/ip.h>
  47 #include <net/protocol.h>
  48 #include <linux/string.h>
  49 #include <net/route.h>
  50 #include <net/tcp.h>
  51 #include <net/udp.h>
  52 #include <linux/skbuff.h>
  53 #include <net/sock.h>
  54 
  55 
  56 /*
  57  *      Resource tracking variables
  58  */
  59 
  60 atomic_t net_skbcount = 0;
  61 atomic_t net_locked = 0;
  62 atomic_t net_allocs = 0;
  63 atomic_t net_fails  = 0;
  64 atomic_t net_free_locked = 0;
  65 
  66 extern atomic_t ip_frag_mem;
  67 
  68 void show_net_buffers(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  69 {
  70         printk("Networking buffers in use          : %u\n",net_skbcount);
  71         printk("Network buffers locked by drivers  : %u\n",net_locked);
  72         printk("Total network buffer allocations   : %u\n",net_allocs);
  73         printk("Total failed network buffer allocs : %u\n",net_fails);
  74         printk("Total free while locked events     : %u\n",net_free_locked);
  75 #ifdef CONFIG_INET
  76         printk("IP fragment buffer size            : %u\n",ip_frag_mem);
  77 #endif  
  78 }
  79 
  80 #if CONFIG_SKB_CHECK
  81 
  82 /*
  83  *      Debugging paranoia. Can go later when this crud stack works
  84  */
  85 
  86 int skb_check(struct sk_buff *skb, int head, int line, char *file)
     /* [previous][next][first][last][top][bottom][index][help] */
  87 {
  88         if (head) {
  89                 if (skb->magic_debug_cookie != SK_HEAD_SKB) {
  90                         printk("File: %s Line %d, found a bad skb-head\n",
  91                                 file,line);
  92                         return -1;
  93                 }
  94                 if (!skb->next || !skb->prev) {
  95                         printk("skb_check: head without next or prev\n");
  96                         return -1;
  97                 }
  98                 if (skb->next->magic_debug_cookie != SK_HEAD_SKB
  99                         && skb->next->magic_debug_cookie != SK_GOOD_SKB) {
 100                         printk("File: %s Line %d, bad next head-skb member\n",
 101                                 file,line);
 102                         return -1;
 103                 }
 104                 if (skb->prev->magic_debug_cookie != SK_HEAD_SKB
 105                         && skb->prev->magic_debug_cookie != SK_GOOD_SKB) {
 106                         printk("File: %s Line %d, bad prev head-skb member\n",
 107                                 file,line);
 108                         return -1;
 109                 }
 110 #if 0
 111                 {
 112                 struct sk_buff *skb2 = skb->next;
 113                 int i = 0;
 114                 while (skb2 != skb && i < 5) {
 115                         if (skb_check(skb2, 0, line, file) < 0) {
 116                                 printk("bad queue element in whole queue\n");
 117                                 return -1;
 118                         }
 119                         i++;
 120                         skb2 = skb2->next;
 121                 }
 122                 }
 123 #endif
 124                 return 0;
 125         }
 126         if (skb->next != NULL && skb->next->magic_debug_cookie != SK_HEAD_SKB
 127                 && skb->next->magic_debug_cookie != SK_GOOD_SKB) {
 128                 printk("File: %s Line %d, bad next skb member\n",
 129                         file,line);
 130                 return -1;
 131         }
 132         if (skb->prev != NULL && skb->prev->magic_debug_cookie != SK_HEAD_SKB
 133                 && skb->prev->magic_debug_cookie != SK_GOOD_SKB) {
 134                 printk("File: %s Line %d, bad prev skb member\n",
 135                         file,line);
 136                 return -1;
 137         }
 138 
 139 
 140         if(skb->magic_debug_cookie==SK_FREED_SKB)
 141         {
 142                 printk("File: %s Line %d, found a freed skb lurking in the undergrowth!\n",
 143                         file,line);
 144                 printk("skb=%p, real size=%d, free=%d\n",
 145                         skb,skb->truesize,skb->free);
 146                 return -1;
 147         }
 148         if(skb->magic_debug_cookie!=SK_GOOD_SKB)
 149         {
 150                 printk("File: %s Line %d, passed a non skb!\n", file,line);
 151                 printk("skb=%p, real size=%d, free=%d\n",
 152                         skb,skb->truesize,skb->free);
 153                 return -1;
 154         }
 155         if(skb->head>skb->data)
 156         {
 157                 printk("File: %s Line %d, head > data !\n", file,line);
 158                 printk("skb=%p, head=%p, data=%p\n",
 159                         skb,skb->head,skb->data);
 160                 return -1;
 161         }
 162         if(skb->tail>skb->end)
 163         {
 164                 printk("File: %s Line %d, tail > end!\n", file,line);
 165                 printk("skb=%p, tail=%p, end=%p\n",
 166                         skb,skb->tail,skb->end);
 167                 return -1;
 168         }
 169         if(skb->data>skb->tail)
 170         {
 171                 printk("File: %s Line %d, data > tail!\n", file,line);
 172                 printk("skb=%p, data=%p, tail=%p\n",
 173                         skb,skb->data,skb->tail);
 174                 return -1;
 175         }
 176         if(skb->tail-skb->data!=skb->len)
 177         {
 178                 printk("File: %s Line %d, wrong length\n", file,line);
 179                 printk("skb=%p, data=%p, end=%p len=%ld\n",
 180                         skb,skb->data,skb->end,skb->len);
 181                 return -1;
 182         }
 183         if((unsigned long) skb->end > (unsigned long) skb)
 184         {
 185                 printk("File: %s Line %d, control overrun\n", file,line);
 186                 printk("skb=%p, end=%p\n",
 187                         skb,skb->end);
 188                 return -1;
 189         }
 190 
 191         /* Guess it might be acceptable then */
 192         return 0;
 193 }
 194 #endif
 195 
 196 
 197 #if CONFIG_SKB_CHECK
 198 void skb_queue_head_init(struct sk_buff_head *list)
     /* [previous][next][first][last][top][bottom][index][help] */
 199 {
 200         list->prev = (struct sk_buff *)list;
 201         list->next = (struct sk_buff *)list;
 202         list->qlen = 0;
 203         list->magic_debug_cookie = SK_HEAD_SKB;
 204 }
 205 
 206 
 207 /*
 208  *      Insert an sk_buff at the start of a list.
 209  */
 210 void skb_queue_head(struct sk_buff_head *list_,struct sk_buff *newsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 211 {
 212         unsigned long flags;
 213         struct sk_buff *list = (struct sk_buff *)list_;
 214 
 215         save_flags(flags);
 216         cli();
 217 
 218         IS_SKB(newsk);
 219         IS_SKB_HEAD(list);
 220         if (newsk->next || newsk->prev)
 221                 printk("Suspicious queue head: sk_buff on list!\n");
 222 
 223         newsk->next = list->next;
 224         newsk->prev = list;
 225 
 226         newsk->next->prev = newsk;
 227         newsk->prev->next = newsk;
 228         newsk->list = list_;
 229         list_->qlen++;
 230 
 231         restore_flags(flags);
 232 }
 233 
 234 void __skb_queue_head(struct sk_buff_head *list_,struct sk_buff *newsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 235 {
 236         struct sk_buff *list = (struct sk_buff *)list_;
 237 
 238 
 239         IS_SKB(newsk);
 240         IS_SKB_HEAD(list);
 241         if (newsk->next || newsk->prev)
 242                 printk("Suspicious queue head: sk_buff on list!\n");
 243 
 244         newsk->next = list->next;
 245         newsk->prev = list;
 246 
 247         newsk->next->prev = newsk;
 248         newsk->prev->next = newsk;
 249         newsk->list = list_;
 250         list_->qlen++;
 251 
 252 }
 253 
 254 /*
 255  *      Insert an sk_buff at the end of a list.
 256  */
 257 void skb_queue_tail(struct sk_buff_head *list_, struct sk_buff *newsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 258 {
 259         unsigned long flags;
 260         struct sk_buff *list = (struct sk_buff *)list_;
 261 
 262         save_flags(flags);
 263         cli();
 264 
 265         if (newsk->next || newsk->prev)
 266                 printk("Suspicious queue tail: sk_buff on list!\n");
 267         IS_SKB(newsk);
 268         IS_SKB_HEAD(list);
 269 
 270         newsk->next = list;
 271         newsk->prev = list->prev;
 272 
 273         newsk->next->prev = newsk;
 274         newsk->prev->next = newsk;
 275         
 276         newsk->list = list_;
 277         list_->qlen++;
 278 
 279         restore_flags(flags);
 280 }
 281 
 282 void __skb_queue_tail(struct sk_buff_head *list_, struct sk_buff *newsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 283 {
 284         unsigned long flags;
 285         struct sk_buff *list = (struct sk_buff *)list_;
 286 
 287         if (newsk->next || newsk->prev)
 288                 printk("Suspicious queue tail: sk_buff on list!\n");
 289         IS_SKB(newsk);
 290         IS_SKB_HEAD(list);
 291 
 292         newsk->next = list;
 293         newsk->prev = list->prev;
 294 
 295         newsk->next->prev = newsk;
 296         newsk->prev->next = newsk;
 297         
 298         newsk->list = list_;
 299         list_->qlen++;
 300 }
 301 
 302 /*
 303  *      Remove an sk_buff from a list. This routine is also interrupt safe
 304  *      so you can grab read and free buffers as another process adds them.
 305  */
 306 
 307 struct sk_buff *skb_dequeue(struct sk_buff_head *list_)
     /* [previous][next][first][last][top][bottom][index][help] */
 308 {
 309         long flags;
 310         struct sk_buff *result;
 311         struct sk_buff *list = (struct sk_buff *)list_;
 312 
 313         save_flags(flags);
 314         cli();
 315 
 316         IS_SKB_HEAD(list);
 317 
 318         result = list->next;
 319         if (result == list) {
 320                 restore_flags(flags);
 321                 return NULL;
 322         }
 323 
 324         result->next->prev = list;
 325         list->next = result->next;
 326 
 327         result->next = NULL;
 328         result->prev = NULL;
 329         list_->qlen--;
 330         result->list = NULL;
 331         
 332         restore_flags(flags);
 333 
 334         IS_SKB(result);
 335         return result;
 336 }
 337 
 338 struct sk_buff *__skb_dequeue(struct sk_buff_head *list_)
     /* [previous][next][first][last][top][bottom][index][help] */
 339 {
 340         struct sk_buff *result;
 341         struct sk_buff *list = (struct sk_buff *)list_;
 342 
 343         IS_SKB_HEAD(list);
 344 
 345         result = list->next;
 346         if (result == list) {
 347                 return NULL;
 348         }
 349 
 350         result->next->prev = list;
 351         list->next = result->next;
 352 
 353         result->next = NULL;
 354         result->prev = NULL;
 355         list_->qlen--;
 356         result->list = NULL;
 357         
 358         IS_SKB(result);
 359         return result;
 360 }
 361 
 362 /*
 363  *      Insert a packet before another one in a list.
 364  */
 365 void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 366 {
 367         unsigned long flags;
 368 
 369         IS_SKB(old);
 370         IS_SKB(newsk);
 371 
 372         if(!old->next || !old->prev)
 373                 printk("insert before unlisted item!\n");
 374         if(newsk->next || newsk->prev)
 375                 printk("inserted item is already on a list.\n");
 376 
 377         save_flags(flags);
 378         cli();
 379         newsk->next = old;
 380         newsk->prev = old->prev;
 381         old->prev = newsk;
 382         newsk->prev->next = newsk;
 383         newsk->list = old->list;
 384         newsk->list->qlen++;
 385 
 386         restore_flags(flags);
 387 }
 388 
 389 /*
 390  *      Insert a packet before another one in a list.
 391  */
 392 
 393 void __skb_insert(struct sk_buff *old, struct sk_buff *newsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 394 {
 395         IS_SKB(old);
 396         IS_SKB(newsk);
 397 
 398         if(!old->next || !old->prev)
 399                 printk("insert before unlisted item!\n");
 400         if(newsk->next || newsk->prev)
 401                 printk("inserted item is already on a list.\n");
 402 
 403         newsk->next = old;
 404         newsk->prev = old->prev;
 405         old->prev = newsk;
 406         newsk->prev->next = newsk;
 407         newsk->list = old->list;
 408         newsk->list->qlen++;
 409 
 410 }
 411 
 412 /*
 413  *      Place a packet after a given packet in a list.
 414  */
 415 void skb_append(struct sk_buff *old, struct sk_buff *newsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 416 {
 417         unsigned long flags;
 418 
 419         IS_SKB(old);
 420         IS_SKB(newsk);
 421 
 422         if(!old->next || !old->prev)
 423                 printk("append before unlisted item!\n");
 424         if(newsk->next || newsk->prev)
 425                 printk("append item is already on a list.\n");
 426 
 427         save_flags(flags);
 428         cli();
 429 
 430         newsk->prev = old;
 431         newsk->next = old->next;
 432         newsk->next->prev = newsk;
 433         old->next = newsk;
 434         newsk->list = old->list;
 435         newsk->list->qlen++;
 436 
 437         restore_flags(flags);
 438 }
 439 
 440 void __skb_append(struct sk_buff *old, struct sk_buff *newsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 441 {
 442         IS_SKB(old);
 443         IS_SKB(newsk);
 444 
 445         if(!old->next || !old->prev)
 446                 printk("append before unlisted item!\n");
 447         if(newsk->next || newsk->prev)
 448                 printk("append item is already on a list.\n");
 449 
 450         newsk->prev = old;
 451         newsk->next = old->next;
 452         newsk->next->prev = newsk;
 453         old->next = newsk;
 454         newsk->list = old->list;
 455         newsk->list->qlen++;
 456 
 457 }
 458 
 459 /*
 460  *      Remove an sk_buff from its list. Works even without knowing the list it
 461  *      is sitting on, which can be handy at times. It also means that THE LIST
 462  *      MUST EXIST when you unlink. Thus a list must have its contents unlinked
 463  *      _FIRST_.
 464  */
 465 void skb_unlink(struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
 466 {
 467         unsigned long flags;
 468 
 469         save_flags(flags);
 470         cli();
 471 
 472         IS_SKB(skb);
 473 
 474         if(skb->list)
 475         {
 476                 skb->list->qlen--;
 477                 skb->next->prev = skb->prev;
 478                 skb->prev->next = skb->next;
 479                 skb->next = NULL;
 480                 skb->prev = NULL;
 481                 skb->list = NULL;
 482         }
 483 #ifdef PARANOID_BUGHUNT_MODE    /* This is legal but we sometimes want to watch it */
 484         else
 485                 printk("skb_unlink: not a linked element\n");
 486 #endif
 487         restore_flags(flags);
 488 }
 489 
 490 void __skb_unlink(struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
 491 {
 492         IS_SKB(skb);
 493 
 494         if(skb->list)
 495         {
 496                 skb->list->qlen--;
 497                 skb->next->prev = skb->prev;
 498                 skb->prev->next = skb->next;
 499                 skb->next = NULL;
 500                 skb->prev = NULL;
 501                 skb->list = NULL;
 502         }
 503 #ifdef PARANOID_BUGHUNT_MODE    /* This is legal but we sometimes want to watch it */
 504         else
 505                 printk("skb_unlink: not a linked element\n");
 506 #endif
 507 }
 508 
 509 /*
 510  *      Add data to an sk_buff
 511  */
 512  
 513 unsigned char *skb_put(struct sk_buff *skb, int len)
     /* [previous][next][first][last][top][bottom][index][help] */
 514 {
 515         unsigned char *tmp=skb->tail;
 516         IS_SKB(skb);
 517         skb->tail+=len;
 518         skb->len+=len;
 519         IS_SKB(skb);
 520         if(skb->tail>skb->end)
 521                 panic("skput:over: %p:%d", __builtin_return_address(0),len);
 522         return tmp;
 523 }
 524 
 525 unsigned char *skb_push(struct sk_buff *skb, int len)
     /* [previous][next][first][last][top][bottom][index][help] */
 526 {
 527         IS_SKB(skb);
 528         skb->data-=len;
 529         skb->len+=len;
 530         IS_SKB(skb);
 531         if(skb->data<skb->head)
 532                 panic("skpush:under: %p:%d", __builtin_return_address(0),len);
 533         return skb->data;
 534 }
 535 
 536 unsigned char * skb_pull(struct sk_buff *skb, int len)
     /* [previous][next][first][last][top][bottom][index][help] */
 537 {
 538         IS_SKB(skb);
 539         if(len>skb->len)
 540                 return 0;
 541         skb->data+=len;
 542         skb->len-=len;
 543         return skb->data;
 544 }
 545 
 546 int skb_headroom(struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
 547 {
 548         IS_SKB(skb);
 549         return skb->data-skb->head;
 550 }
 551 
 552 int skb_tailroom(struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
 553 {
 554         IS_SKB(skb);
 555         return skb->end-skb->tail;
 556 }
 557 
 558 void skb_reserve(struct sk_buff *skb, int len)
     /* [previous][next][first][last][top][bottom][index][help] */
 559 {
 560         IS_SKB(skb);
 561         skb->data+=len;
 562         skb->tail+=len;
 563         if(skb->tail>skb->end)
 564                 panic("sk_res: over");
 565         if(skb->data<skb->head)
 566                 panic("sk_res: under");
 567         IS_SKB(skb);
 568 }
 569 
 570 void skb_trim(struct sk_buff *skb, int len)
     /* [previous][next][first][last][top][bottom][index][help] */
 571 {
 572         IS_SKB(skb);
 573         if(skb->len>len)
 574         {
 575                 skb->len=len;
 576                 skb->tail=skb->data+len;
 577         }
 578 }
 579 
 580 
 581 
 582 #endif
 583 
 584 /*
 585  *      Free an sk_buff. This still knows about things it should
 586  *      not need to like protocols and sockets.
 587  */
 588 
 589 void kfree_skb(struct sk_buff *skb, int rw)
     /* [previous][next][first][last][top][bottom][index][help] */
 590 {
 591         if (skb == NULL)
 592         {
 593                 printk("kfree_skb: skb = NULL (from %p)\n",
 594                         __builtin_return_address(0));
 595                 return;
 596         }
 597 #if CONFIG_SKB_CHECK
 598         IS_SKB(skb);
 599 #endif
 600         if (skb->lock)
 601         {
 602                 skb->free = 3;    /* Free when unlocked */
 603                 net_free_locked++;
 604                 return;
 605         }
 606         if (skb->free == 2)
 607                 printk("Warning: kfree_skb passed an skb that nobody set the free flag on! (from %p)\n",
 608                         __builtin_return_address(0));
 609         if (skb->list)
 610                 printk("Warning: kfree_skb passed an skb still on a list (from %p).\n",
 611                         __builtin_return_address(0));
 612 
 613         if(skb->destructor)
 614                 skb->destructor(skb);
 615         if (skb->sk)
 616         {
 617                 struct sock * sk = skb->sk;
 618                 if(sk->prot!=NULL)
 619                 {
 620                         if (rw)
 621                                 sock_rfree(sk, skb);
 622                         else
 623                                 sock_wfree(sk, skb);
 624 
 625                 }
 626                 else
 627                 {
 628                         if (rw)
 629                                 atomic_sub(skb->truesize, &sk->rmem_alloc);
 630                         else {
 631                                 atomic_sub(skb->truesize, &sk->wmem_alloc);
 632                                 if(!sk->dead)
 633                                         sk->write_space(sk);
 634                         }
 635                         kfree_skbmem(skb);
 636                 }
 637         }
 638         else
 639                 kfree_skbmem(skb);
 640 }
 641 
 642 /*
 643  *      Allocate a new skbuff. We do this ourselves so we can fill in a few 'private'
 644  *      fields and also do memory statistics to find all the [BEEP] leaks.
 645  */
 646 struct sk_buff *alloc_skb(unsigned int size,int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
 647 {
 648         struct sk_buff *skb;
 649         int len=size;
 650         unsigned char *bptr;
 651 
 652         if (intr_count && priority!=GFP_ATOMIC) 
 653         {
 654                 static int count = 0;
 655                 if (++count < 5) {
 656                         printk("alloc_skb called nonatomically from interrupt %p\n",
 657                                 __builtin_return_address(0));
 658                         priority = GFP_ATOMIC;
 659                 }
 660         }
 661 
 662         size=(size+15)&~15;             /* Allow for alignments. Make a multiple of 16 bytes */
 663         size+=sizeof(struct sk_buff);   /* And stick the control itself on the end */
 664         
 665         /*
 666          *      Allocate some space
 667          */
 668          
 669         bptr=(unsigned char *)kmalloc(size,priority);
 670         if (bptr == NULL)
 671         {
 672                 net_fails++;
 673                 return NULL;
 674         }
 675 #ifdef PARANOID_BUGHUNT_MODE
 676         if(skb->magic_debug_cookie == SK_GOOD_SKB)
 677                 printk("Kernel kmalloc handed us an existing skb (%p)\n",skb);
 678 #endif
 679         /*
 680          *      Now we play a little game with the caches. Linux kmalloc is
 681          *      a bit cache dumb, in fact its just about maximally non 
 682          *      optimal for typical kernel buffers. We actually run faster
 683          *      by doing the following. Which is to deliberately put the
 684          *      skb at the _end_ not the start of the memory block.
 685          */
 686         net_allocs++;
 687         
 688         skb=(struct sk_buff *)(bptr+size)-1;
 689 
 690         skb->count = 1;         /* only one reference to this */
 691         skb->data_skb = NULL;   /* and we're our own data skb */
 692 
 693         skb->free = 2;  /* Invalid so we pick up forgetful users */
 694         skb->lock = 0;
 695         skb->pkt_type = PACKET_HOST;    /* Default type */
 696         skb->prev = skb->next = skb->link3 = NULL;
 697         skb->list = NULL;
 698         skb->sk = NULL;
 699         skb->truesize=size;
 700         skb->localroute=0;
 701         skb->stamp.tv_sec=0;    /* No idea about time */
 702         skb->localroute = 0;
 703         skb->ip_summed = 0;
 704         memset(skb->proto_priv, 0, sizeof(skb->proto_priv));
 705         net_skbcount++;
 706 #if CONFIG_SKB_CHECK
 707         skb->magic_debug_cookie = SK_GOOD_SKB;
 708 #endif
 709         skb->users = 0;
 710         /* Load the data pointers */
 711         skb->head=bptr;
 712         skb->data=bptr;
 713         skb->tail=bptr;
 714         skb->end=bptr+len;
 715         skb->len=0;
 716         skb->destructor=NULL;
 717         return skb;
 718 }
 719 
 720 /*
 721  *      Free an skbuff by memory
 722  */
 723 
 724 static inline void __kfree_skbmem(struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
 725 {
 726         /* don't do anything if somebody still uses us */
 727         if (--skb->count <= 0) {
 728                 kfree(skb->head);
 729                 net_skbcount--;
 730         }
 731 }
 732 
 733 void kfree_skbmem(struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
 734 {
 735         unsigned long flags;
 736         void * addr = skb->head;
 737 
 738         save_flags(flags);
 739         cli();
 740         /* don't do anything if somebody still uses us */
 741         if (--skb->count <= 0) {
 742                 /* free the skb that contains the actual data if we've clone()'d */
 743                 if (skb->data_skb) {
 744                         addr = skb;
 745                         __kfree_skbmem(skb->data_skb);
 746                 }
 747                 kfree(addr);
 748                 net_skbcount--;
 749         }
 750         restore_flags(flags);
 751 }
 752 
 753 /*
 754  *      Duplicate an sk_buff. The new one is not owned by a socket or locked
 755  *      and will be freed on deletion.
 756  */
 757 
 758 struct sk_buff *skb_clone(struct sk_buff *skb, int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
 759 {
 760         struct sk_buff *n;
 761 
 762         IS_SKB(skb);
 763         n = kmalloc(sizeof(*n), priority);
 764         if (!n)
 765                 return NULL;
 766         memcpy(n, skb, sizeof(*n));
 767         n->count = 1;
 768         if (skb->data_skb)
 769                 skb = skb->data_skb;
 770         atomic_inc(&skb->count);
 771         atomic_inc(&net_allocs);
 772         atomic_inc(&net_skbcount);
 773         n->data_skb = skb;
 774         n->next = n->prev = n->link3 = NULL;
 775         n->list = NULL;
 776         n->sk = NULL;
 777         n->free = 1;
 778         n->tries = 0;
 779         n->lock = 0;
 780         n->users = 0;
 781         return n;
 782 }
 783 
 784 /*
 785  *      This is slower, and copies the whole data area 
 786  */
 787  
 788 struct sk_buff *skb_copy(struct sk_buff *skb, int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
 789 {
 790         struct sk_buff *n;
 791         unsigned long offset;
 792 
 793         /*
 794          *      Allocate the copy buffer
 795          */
 796          
 797         IS_SKB(skb);
 798         
 799         n=alloc_skb(skb->truesize-sizeof(struct sk_buff),priority);
 800         if(n==NULL)
 801                 return NULL;
 802 
 803         /*
 804          *      Shift between the two data areas in bytes
 805          */
 806          
 807         offset=n->head-skb->head;
 808 
 809         /* Set the data pointer */
 810         skb_reserve(n,skb->data-skb->head);
 811         /* Set the tail pointer and length */
 812         skb_put(n,skb->len);
 813         /* Copy the bytes */
 814         memcpy(n->head,skb->head,skb->end-skb->head);
 815         n->link3=NULL;
 816         n->list=NULL;
 817         n->sk=NULL;
 818         n->when=skb->when;
 819         n->dev=skb->dev;
 820         n->h.raw=skb->h.raw+offset;
 821         n->mac.raw=skb->mac.raw+offset;
 822         n->ip_hdr=(struct iphdr *)(((char *)skb->ip_hdr)+offset);
 823         n->saddr=skb->saddr;
 824         n->daddr=skb->daddr;
 825         n->raddr=skb->raddr;
 826         n->seq=skb->seq;
 827         n->end_seq=skb->end_seq;
 828         n->ack_seq=skb->ack_seq;
 829         n->acked=skb->acked;
 830         memcpy(n->proto_priv, skb->proto_priv, sizeof(skb->proto_priv));
 831         n->used=skb->used;
 832         n->free=1;
 833         n->arp=skb->arp;
 834         n->tries=0;
 835         n->lock=0;
 836         n->users=0;
 837         n->pkt_type=skb->pkt_type;
 838         n->stamp=skb->stamp;
 839         
 840         IS_SKB(n);
 841         return n;
 842 }
 843 
 844 /*
 845  *     Skbuff device locking
 846  */
 847 
 848 void skb_device_lock(struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
 849 {
 850         if(skb->lock)
 851                 printk("double lock on device queue!\n");
 852         else
 853                 net_locked++;
 854         skb->lock++;
 855 }
 856 
 857 void skb_device_unlock(struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
 858 {
 859         if(skb->lock==0)
 860                 printk("double unlock on device queue!\n");
 861         skb->lock--;
 862         if(skb->lock==0)
 863                 net_locked--;
 864 }
 865 
 866 void dev_kfree_skb(struct sk_buff *skb, int mode)
     /* [previous][next][first][last][top][bottom][index][help] */
 867 {
 868         unsigned long flags;
 869 
 870         save_flags(flags);
 871         cli();
 872         if(skb->lock)
 873         {
 874                 net_locked--;
 875                 skb->lock--;
 876         }
 877         if (!skb->lock && (skb->free == 1 || skb->free == 3))
 878         {
 879                 restore_flags(flags);
 880                 kfree_skb(skb,mode);
 881         }
 882         else
 883                 restore_flags(flags);
 884 }
 885 
 886 struct sk_buff *dev_alloc_skb(unsigned int length)
     /* [previous][next][first][last][top][bottom][index][help] */
 887 {
 888         struct sk_buff *skb;
 889 
 890         skb = alloc_skb(length+16, GFP_ATOMIC);
 891         if (skb)
 892                 skb_reserve(skb,16);
 893         return skb;
 894 }
 895 
 896 int skb_device_locked(struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
 897 {
 898         return skb->lock? 1 : 0;
 899 }

/* [previous][next][first][last][top][bottom][index][help] */