root/net/ipv4/ip_fragment.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. frag_kfree_skb
  2. frag_kfree_s
  3. frag_kmalloc
  4. ip_frag_create
  5. ip_find
  6. ip_free
  7. ip_expire
  8. ip_evictor
  9. ip_create
  10. ip_done
  11. ip_glue
  12. ip_defrag
  13. ip_fragment

   1 /*
   2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
   3  *              operating system.  INET is implemented using the  BSD Socket
   4  *              interface as the means of communication with the user level.
   5  *
   6  *              The IP fragmentation functionality.
   7  *              
   8  * Authors:     Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG>
   9  *              Alan Cox <Alan.Cox@linux.org>
  10  *
  11  * Fixes:
  12  *              Alan Cox        :       Split from ip.c , see ip_input.c for history.
  13  */
  14 
  15 #include <linux/types.h>
  16 #include <linux/mm.h>
  17 #include <linux/sched.h>
  18 #include <linux/skbuff.h>
  19 #include <linux/ip.h>
  20 #include <linux/icmp.h>
  21 #include <linux/netdevice.h>
  22 #include <net/sock.h>
  23 #include <net/ip.h>
  24 #include <net/icmp.h>
  25 #include <linux/tcp.h>
  26 #include <linux/udp.h>
  27 #include <linux/firewall.h>
  28 #include <linux/ip_fw.h>
  29 #include <net/checksum.h>
  30 
  31 /*
  32  *      Fragment cache limits. We will commit 256K at one time. Should we
  33  *      cross that limit we will prune down to 192K. This should cope with
  34  *      even the most extreme cases without allowing an attacker to measurably
  35  *      harm machine performance.
  36  */
  37  
  38 #define IPFRAG_HIGH_THRESH              (256*1024)
  39 #define IPFRAG_LOW_THRESH               (192*1024)
  40 
  41 /*
  42  *      This fragment handler is a bit of a heap. On the other hand it works quite
  43  *      happily and handles things quite well.
  44  */
  45 
  46 static struct ipq *ipqueue = NULL;              /* IP fragment queue    */
  47 
  48 atomic_t ip_frag_mem = 0;                       /* Memory used for fragments */
  49 
  50 /*
  51  *      Memory Tracking Functions
  52  */
  53  
  54 extern __inline__ void frag_kfree_skb(struct sk_buff *skb, int type)
     /* [previous][next][first][last][top][bottom][index][help] */
  55 {
  56         atomic_sub(skb->truesize, &ip_frag_mem);
  57         kfree_skb(skb,type);
  58 }
  59 
  60 extern __inline__ void frag_kfree_s(void *ptr, int len)
     /* [previous][next][first][last][top][bottom][index][help] */
  61 {
  62         atomic_sub(len, &ip_frag_mem);
  63         kfree_s(ptr,len);
  64 }
  65  
  66 extern __inline__ void *frag_kmalloc(int size, int pri)
     /* [previous][next][first][last][top][bottom][index][help] */
  67 {
  68         void *vp=kmalloc(size,pri);
  69         if(!vp)
  70                 return NULL;
  71         atomic_add(size, &ip_frag_mem);
  72         return vp;
  73 }
  74  
  75 /*
  76  *      Create a new fragment entry.
  77  */
  78 
  79 static struct ipfrag *ip_frag_create(int offset, int end, struct sk_buff *skb, unsigned char *ptr)
     /* [previous][next][first][last][top][bottom][index][help] */
  80 {
  81         struct ipfrag *fp;
  82         unsigned long flags;
  83 
  84         fp = (struct ipfrag *) frag_kmalloc(sizeof(struct ipfrag), GFP_ATOMIC);
  85         if (fp == NULL)
  86         {
  87                 NETDEBUG(printk("IP: frag_create: no memory left !\n"));
  88                 return(NULL);
  89         }
  90         memset(fp, 0, sizeof(struct ipfrag));
  91 
  92         /* Fill in the structure. */
  93         fp->offset = offset;
  94         fp->end = end;
  95         fp->len = end - offset;
  96         fp->skb = skb;
  97         fp->ptr = ptr;
  98         
  99         /*
 100          *      Charge for the SKB as well.
 101          */
 102          
 103         save_flags(flags);
 104         cli();
 105         ip_frag_mem+=skb->truesize;
 106         restore_flags(flags);
 107 
 108         return(fp);
 109 }
 110 
 111 
 112 /*
 113  *      Find the correct entry in the "incomplete datagrams" queue for
 114  *      this IP datagram, and return the queue entry address if found.
 115  */
 116 
 117 static struct ipq *ip_find(struct iphdr *iph)
     /* [previous][next][first][last][top][bottom][index][help] */
 118 {
 119         struct ipq *qp;
 120         struct ipq *qplast;
 121 
 122         cli();
 123         qplast = NULL;
 124         for(qp = ipqueue; qp != NULL; qplast = qp, qp = qp->next)
 125         {
 126                 if (iph->id== qp->iph->id && iph->saddr == qp->iph->saddr &&
 127                         iph->daddr == qp->iph->daddr && iph->protocol == qp->iph->protocol)
 128                 {
 129                         del_timer(&qp->timer);  /* So it doesn't vanish on us. The timer will be reset anyway */
 130                         sti();
 131                         return(qp);
 132                 }
 133         }
 134         sti();
 135         return(NULL);
 136 }
 137 
 138 
 139 /*
 140  *      Remove an entry from the "incomplete datagrams" queue, either
 141  *      because we completed, reassembled and processed it, or because
 142  *      it timed out.
 143  */
 144 
 145 static void ip_free(struct ipq *qp)
     /* [previous][next][first][last][top][bottom][index][help] */
 146 {
 147         struct ipfrag *fp;
 148         struct ipfrag *xp;
 149 
 150         /*
 151          * Stop the timer for this entry.
 152          */
 153 
 154         del_timer(&qp->timer);
 155 
 156         /* Remove this entry from the "incomplete datagrams" queue. */
 157         cli();
 158         if (qp->prev == NULL)
 159         {
 160                 ipqueue = qp->next;
 161                 if (ipqueue != NULL)
 162                         ipqueue->prev = NULL;
 163         }
 164         else
 165         {
 166                 qp->prev->next = qp->next;
 167                 if (qp->next != NULL)
 168                         qp->next->prev = qp->prev;
 169         }
 170 
 171         /* Release all fragment data. */
 172 
 173         fp = qp->fragments;
 174         while (fp != NULL)
 175         {
 176                 xp = fp->next;
 177                 IS_SKB(fp->skb);
 178                 frag_kfree_skb(fp->skb,FREE_READ);
 179                 frag_kfree_s(fp, sizeof(struct ipfrag));
 180                 fp = xp;
 181         }
 182 
 183         /* Release the IP header. */
 184         frag_kfree_s(qp->iph, 64 + 8);
 185 
 186         /* Finally, release the queue descriptor itself. */
 187         frag_kfree_s(qp, sizeof(struct ipq));
 188         sti();
 189 }
 190 
 191 
 192 /*
 193  *      Oops- a fragment queue timed out.  Kill it and send an ICMP reply.
 194  */
 195 
 196 static void ip_expire(unsigned long arg)
     /* [previous][next][first][last][top][bottom][index][help] */
 197 {
 198         struct ipq *qp;
 199 
 200         qp = (struct ipq *)arg;
 201 
 202         /*
 203          *      Send an ICMP "Fragment Reassembly Timeout" message.
 204          */
 205 
 206         ip_statistics.IpReasmTimeout++;
 207         ip_statistics.IpReasmFails++;   
 208         /* This if is always true... shrug */
 209         if(qp->fragments!=NULL)
 210                 icmp_send(qp->fragments->skb,ICMP_TIME_EXCEEDED,
 211                                 ICMP_EXC_FRAGTIME, 0, qp->dev);
 212 
 213         /*
 214          *      Nuke the fragment queue.
 215          */
 216         ip_free(qp);
 217 }
 218 
 219 /*
 220  *      Memory limiting on fragments. Evictor trashes the oldest 
 221  *      fragment queue until we are back under the low threshold
 222  */
 223  
 224 static void ip_evictor(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 225 {
 226         while(ip_frag_mem>IPFRAG_LOW_THRESH)
 227         {
 228                 if(!ipqueue)
 229                         panic("ip_evictor: memcount");
 230                 ip_free(ipqueue);
 231         }
 232 }
 233 
 234 /*
 235  *      Add an entry to the 'ipq' queue for a newly received IP datagram.
 236  *      We will (hopefully :-) receive all other fragments of this datagram
 237  *      in time, so we just create a queue for this datagram, in which we
 238  *      will insert the received fragments at their respective positions.
 239  */
 240 
 241 static struct ipq *ip_create(struct sk_buff *skb, struct iphdr *iph, struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 242 {
 243         struct ipq *qp;
 244         int ihlen;
 245 
 246         qp = (struct ipq *) frag_kmalloc(sizeof(struct ipq), GFP_ATOMIC);
 247         if (qp == NULL)
 248         {
 249                 NETDEBUG(printk("IP: create: no memory left !\n"));
 250                 return(NULL);
 251         }
 252         memset(qp, 0, sizeof(struct ipq));
 253 
 254         /*
 255          *      Allocate memory for the IP header (plus 8 octets for ICMP).
 256          */
 257 
 258         ihlen = iph->ihl * 4;
 259         qp->iph = (struct iphdr *) frag_kmalloc(64 + 8, GFP_ATOMIC);
 260         if (qp->iph == NULL)
 261         {
 262                 NETDEBUG(printk("IP: create: no memory left !\n"));
 263                 frag_kfree_s(qp, sizeof(struct ipq));
 264                 return(NULL);
 265         }
 266 
 267         memcpy(qp->iph, iph, ihlen + 8);
 268         qp->len = 0;
 269         qp->ihlen = ihlen;
 270         qp->fragments = NULL;
 271         qp->dev = dev;
 272 
 273         /* Start a timer for this entry. */
 274         qp->timer.expires = jiffies + IP_FRAG_TIME;     /* about 30 seconds     */
 275         qp->timer.data = (unsigned long) qp;            /* pointer to queue     */
 276         qp->timer.function = ip_expire;                 /* expire function      */
 277         add_timer(&qp->timer);
 278 
 279         /* Add this entry to the queue. */
 280         qp->prev = NULL;
 281         cli();
 282         qp->next = ipqueue;
 283         if (qp->next != NULL)
 284                 qp->next->prev = qp;
 285         ipqueue = qp;
 286         sti();
 287         return(qp);
 288 }
 289 
 290 
 291 /*
 292  *      See if a fragment queue is complete.
 293  */
 294 
 295 static int ip_done(struct ipq *qp)
     /* [previous][next][first][last][top][bottom][index][help] */
 296 {
 297         struct ipfrag *fp;
 298         int offset;
 299 
 300         /* Only possible if we received the final fragment. */
 301         if (qp->len == 0)
 302                 return(0);
 303 
 304         /* Check all fragment offsets to see if they connect. */
 305         fp = qp->fragments;
 306         offset = 0;
 307         while (fp != NULL)
 308         {
 309                 if (fp->offset > offset)
 310                         return(0);      /* fragment(s) missing */
 311                 offset = fp->end;
 312                 fp = fp->next;
 313         }
 314 
 315         /* All fragments are present. */
 316         return(1);
 317 }
 318 
 319 
 320 /*
 321  *      Build a new IP datagram from all its fragments.
 322  *
 323  *      FIXME: We copy here because we lack an effective way of handling lists
 324  *      of bits on input. Until the new skb data handling is in I'm not going
 325  *      to touch this with a bargepole. 
 326  */
 327 
 328 static struct sk_buff *ip_glue(struct ipq *qp)
     /* [previous][next][first][last][top][bottom][index][help] */
 329 {
 330         struct sk_buff *skb;
 331         struct iphdr *iph;
 332         struct ipfrag *fp;
 333         unsigned char *ptr;
 334         int count, len;
 335 
 336         /*
 337          *      Allocate a new buffer for the datagram.
 338          */
 339         len = qp->ihlen + qp->len;
 340 
 341         if ((skb = dev_alloc_skb(len)) == NULL)
 342         {
 343                 ip_statistics.IpReasmFails++;
 344                 NETDEBUG(printk("IP: queue_glue: no memory for gluing queue %p\n", qp));
 345                 ip_free(qp);
 346                 return(NULL);
 347         }
 348 
 349         /* Fill in the basic details. */
 350         skb_put(skb,len);
 351         skb->h.raw = skb->data;
 352         skb->free = 1;
 353 
 354         /* Copy the original IP headers into the new buffer. */
 355         ptr = (unsigned char *) skb->h.raw;
 356         memcpy(ptr, ((unsigned char *) qp->iph), qp->ihlen);
 357         ptr += qp->ihlen;
 358 
 359         count = 0;
 360 
 361         /* Copy the data portions of all fragments into the new buffer. */
 362         fp = qp->fragments;
 363         while(fp != NULL)
 364         {
 365                 if(count+fp->len > skb->len)
 366                 {
 367                         NETDEBUG(printk("Invalid fragment list: Fragment over size.\n"));
 368                         ip_free(qp);
 369                         frag_kfree_skb(skb,FREE_WRITE);
 370                         ip_statistics.IpReasmFails++;
 371                         return NULL;
 372                 }
 373                 memcpy((ptr + fp->offset), fp->ptr, fp->len);
 374                 count += fp->len;
 375                 fp = fp->next;
 376         }
 377 
 378         /* We glued together all fragments, so remove the queue entry. */
 379         ip_free(qp);
 380 
 381         /* Done with all fragments. Fixup the new IP header. */
 382         iph = skb->h.iph;
 383         iph->frag_off = 0;
 384         iph->tot_len = htons((iph->ihl * 4) + count);
 385         skb->ip_hdr = iph;
 386 
 387         ip_statistics.IpReasmOKs++;
 388         return(skb);
 389 }
 390 
 391 
 392 /*
 393  *      Process an incoming IP datagram fragment.
 394  */
 395 
 396 struct sk_buff *ip_defrag(struct iphdr *iph, struct sk_buff *skb, struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 397 {
 398         struct ipfrag *prev, *next, *tmp;
 399         struct ipfrag *tfp;
 400         struct ipq *qp;
 401         struct sk_buff *skb2;
 402         unsigned char *ptr;
 403         int flags, offset;
 404         int i, ihl, end;
 405         
 406         ip_statistics.IpReasmReqds++;
 407 
 408         /*
 409          *      Start by cleaning up the memory
 410          */
 411 
 412         if(ip_frag_mem>IPFRAG_HIGH_THRESH)
 413                 ip_evictor();
 414         /* 
 415          *      Find the entry of this IP datagram in the "incomplete datagrams" queue. 
 416          */
 417          
 418         qp = ip_find(iph);
 419 
 420         /* Is this a non-fragmented datagram? */
 421         offset = ntohs(iph->frag_off);
 422         flags = offset & ~IP_OFFSET;
 423         offset &= IP_OFFSET;
 424         if (((flags & IP_MF) == 0) && (offset == 0))
 425         {
 426                 if (qp != NULL)
 427                         ip_free(qp);    /* Huh? How could this exist?? */
 428                 return(skb);
 429         }
 430 
 431         offset <<= 3;           /* offset is in 8-byte chunks */
 432         ihl = iph->ihl * 4;
 433 
 434         /*
 435          * If the queue already existed, keep restarting its timer as long
 436          * as we still are receiving fragments.  Otherwise, create a fresh
 437          * queue entry.
 438          */
 439 
 440         if (qp != NULL)
 441         {
 442                 /* ANK. If the first fragment is received,
 443                  * we should remember the correct IP header (with options)
 444                  */
 445                 if (offset == 0)
 446                 {
 447                         qp->ihlen = ihl;
 448                         memcpy(qp->iph, iph, ihl+8);
 449                 }
 450                 del_timer(&qp->timer);
 451                 qp->timer.expires = jiffies + IP_FRAG_TIME;     /* about 30 seconds */
 452                 qp->timer.data = (unsigned long) qp;    /* pointer to queue */
 453                 qp->timer.function = ip_expire;         /* expire function */
 454                 add_timer(&qp->timer);
 455         }
 456         else
 457         {
 458                 /*
 459                  *      If we failed to create it, then discard the frame
 460                  */
 461                 if ((qp = ip_create(skb, iph, dev)) == NULL)
 462                 {
 463                         skb->sk = NULL;
 464                         frag_kfree_skb(skb, FREE_READ);
 465                         ip_statistics.IpReasmFails++;
 466                         return NULL;
 467                 }
 468         }
 469 
 470         /*
 471          *      Determine the position of this fragment.
 472          */
 473 
 474         end = offset + ntohs(iph->tot_len) - ihl;
 475 
 476         /*
 477          *      Point into the IP datagram 'data' part.
 478          */
 479 
 480         ptr = skb->data + ihl;
 481 
 482         /*
 483          *      Is this the final fragment?
 484          */
 485 
 486         if ((flags & IP_MF) == 0)
 487                 qp->len = end;
 488 
 489         /*
 490          *      Find out which fragments are in front and at the back of us
 491          *      in the chain of fragments so far.  We must know where to put
 492          *      this fragment, right?
 493          */
 494 
 495         prev = NULL;
 496         for(next = qp->fragments; next != NULL; next = next->next)
 497         {
 498                 if (next->offset > offset)
 499                         break;  /* bingo! */
 500                 prev = next;
 501         }
 502 
 503         /*
 504          *      We found where to put this one.
 505          *      Check for overlap with preceding fragment, and, if needed,
 506          *      align things so that any overlaps are eliminated.
 507          */
 508         if (prev != NULL && offset < prev->end)
 509         {
 510                 i = prev->end - offset;
 511                 offset += i;    /* ptr into datagram */
 512                 ptr += i;       /* ptr into fragment data */
 513         }
 514 
 515         /*
 516          * Look for overlap with succeeding segments.
 517          * If we can merge fragments, do it.
 518          */
 519 
 520         for(tmp=next; tmp != NULL; tmp = tfp)
 521         {
 522                 tfp = tmp->next;
 523                 if (tmp->offset >= end)
 524                         break;          /* no overlaps at all */
 525 
 526                 i = end - next->offset;                 /* overlap is 'i' bytes */
 527                 tmp->len -= i;                          /* so reduce size of    */
 528                 tmp->offset += i;                       /* next fragment        */
 529                 tmp->ptr += i;
 530                 /*
 531                  *      If we get a frag size of <= 0, remove it and the packet
 532                  *      that it goes with.
 533                  */
 534                 if (tmp->len <= 0)
 535                 {
 536                         if (tmp->prev != NULL)
 537                                 tmp->prev->next = tmp->next;
 538                         else
 539                                 qp->fragments = tmp->next;
 540 
 541                         if (tfp->next != NULL)
 542                                 tmp->next->prev = tmp->prev;
 543                         
 544                         next=tfp;       /* We have killed the original next frame */
 545 
 546                         frag_kfree_skb(tmp->skb,FREE_READ);
 547                         frag_kfree_s(tmp, sizeof(struct ipfrag));
 548                 }
 549         }
 550 
 551         /*
 552          *      Insert this fragment in the chain of fragments.
 553          */
 554 
 555         tfp = NULL;
 556         tfp = ip_frag_create(offset, end, skb, ptr);
 557 
 558         /*
 559          *      No memory to save the fragment - so throw the lot
 560          */
 561 
 562         if (!tfp)
 563         {
 564                 skb->sk = NULL;
 565                 frag_kfree_skb(skb, FREE_READ);
 566                 return NULL;
 567         }
 568         tfp->prev = prev;
 569         tfp->next = next;
 570         if (prev != NULL)
 571                 prev->next = tfp;
 572         else
 573                 qp->fragments = tfp;
 574 
 575         if (next != NULL)
 576                 next->prev = tfp;
 577 
 578         /*
 579          *      OK, so we inserted this new fragment into the chain.
 580          *      Check if we now have a full IP datagram which we can
 581          *      bump up to the IP layer...
 582          */
 583 
 584         if (ip_done(qp))
 585         {
 586                 skb2 = ip_glue(qp);             /* glue together the fragments */
 587                 return(skb2);
 588         }
 589         return(NULL);
 590 }
 591 
 592 
 593 /*
 594  *      This IP datagram is too large to be sent in one piece.  Break it up into
 595  *      smaller pieces (each of size equal to the MAC header plus IP header plus
 596  *      a block of the data of the original IP data part) that will yet fit in a
 597  *      single device frame, and queue such a frame for sending by calling the
 598  *      ip_queue_xmit().  Note that this is recursion, and bad things will happen
 599  *      if this function causes a loop...
 600  *
 601  *      Yes this is inefficient, feel free to submit a quicker one.
 602  *
 603  */
 604  
 605 void ip_fragment(struct sock *sk, struct sk_buff *skb, struct device *dev, int is_frag)
     /* [previous][next][first][last][top][bottom][index][help] */
 606 {
 607         struct iphdr *iph;
 608         unsigned char *raw;
 609         unsigned char *ptr;
 610         struct sk_buff *skb2;
 611         int left, mtu, hlen, len;
 612         int offset;
 613 
 614         /*
 615          *      Point into the IP datagram header.
 616          */
 617 
 618         raw = skb->data;
 619 #if 0
 620         iph = (struct iphdr *) (raw + dev->hard_header_len);    
 621         skb->ip_hdr = iph;
 622 #else
 623         iph = skb->ip_hdr;
 624 #endif
 625 
 626         /*
 627          *      Setup starting values.
 628          */
 629 
 630         hlen = iph->ihl * 4;
 631         left = ntohs(iph->tot_len) - hlen;      /* Space per frame */
 632         hlen += dev->hard_header_len;           /* Total header size */
 633         mtu = (dev->mtu - hlen);                /* Size of data space */
 634         ptr = (raw + hlen);                     /* Where to start from */
 635 
 636         /*
 637          *      Check for any "DF" flag. [DF means do not fragment]
 638          */
 639 
 640         if (ntohs(iph->frag_off) & IP_DF)
 641         {
 642                 ip_statistics.IpFragFails++;
 643                 NETDEBUG(printk("ip_queue_xmit: frag needed\n"));
 644                 return;
 645         }
 646 
 647         /*
 648          *      The protocol doesn't seem to say what to do in the case that the
 649          *      frame + options doesn't fit the mtu. As it used to fall down dead
 650          *      in this case we were fortunate it didn't happen
 651          */
 652 
 653         if(mtu<8)
 654         {
 655                 /* It's wrong but it's better than nothing */
 656                 icmp_send(skb,ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED,dev->mtu, dev);
 657                 ip_statistics.IpFragFails++;
 658                 return;
 659         }
 660 
 661         /*
 662          *      Fragment the datagram.
 663          */
 664 
 665         /*
 666          *      The initial offset is 0 for a complete frame. When
 667          *      fragmenting fragments it's wherever this one starts.
 668          */
 669 
 670         if (is_frag & 2)
 671                 offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
 672         else
 673                 offset = 0;
 674 
 675 
 676         /*
 677          *      Keep copying data until we run out.
 678          */
 679 
 680         while(left > 0)
 681         {
 682                 len = left;
 683                 /* IF: it doesn't fit, use 'mtu' - the data space left */
 684                 if (len > mtu)
 685                         len = mtu;
 686                 /* IF: we are not sending upto and including the packet end
 687                    then align the next start on an eight byte boundary */
 688                 if (len < left)
 689                 {
 690                         len/=8;
 691                         len*=8;
 692                 }
 693                 /*
 694                  *      Allocate buffer.
 695                  */
 696 
 697                 if ((skb2 = alloc_skb(len + hlen+15,GFP_ATOMIC)) == NULL)
 698                 {
 699                         NETDEBUG(printk("IP: frag: no memory for new fragment!\n"));
 700                         ip_statistics.IpFragFails++;
 701                         return;
 702                 }
 703 
 704                 /*
 705                  *      Set up data on packet
 706                  */
 707 
 708                 skb2->arp = skb->arp;
 709                 if(skb->free==0)
 710                         printk("IP fragmenter: BUG free!=1 in fragmenter\n");
 711                 skb2->free = 1;
 712                 skb_put(skb2,len + hlen);
 713                 skb2->h.raw=(char *) skb2->data;
 714                 /*
 715                  *      Charge the memory for the fragment to any owner
 716                  *      it might possess
 717                  */
 718 
 719                 if (sk)
 720                 {
 721                         atomic_add(skb2->truesize, &sk->wmem_alloc);
 722                         skb2->sk=sk;
 723                 }
 724                 skb2->raddr = skb->raddr;       /* For rebuild_header - must be here */
 725 
 726                 /*
 727                  *      Copy the packet header into the new buffer.
 728                  */
 729 
 730                 memcpy(skb2->h.raw, raw, hlen);
 731 
 732                 /*
 733                  *      Copy a block of the IP datagram.
 734                  */
 735                 memcpy(skb2->h.raw + hlen, ptr, len);
 736                 left -= len;
 737 
 738                 skb2->h.raw+=dev->hard_header_len;
 739 
 740                 /*
 741                  *      Fill in the new header fields.
 742                  */
 743                 iph = (struct iphdr *)(skb2->h.raw/*+dev->hard_header_len*/);
 744                 iph->frag_off = htons((offset >> 3));
 745                 skb2->ip_hdr = iph;
 746 
 747                 /* ANK: dirty, but effective trick. Upgrade options only if
 748                  * the segment to be fragmented was THE FIRST (otherwise,
 749                  * options are already fixed) and make it ONCE
 750                  * on the initial skb, so that all the following fragments
 751                  * will inherit fixed options.
 752                  */
 753                 if (offset == 0)
 754                         ip_options_fragment(skb);
 755 
 756                 /*
 757                  *      Added AC : If we are fragmenting a fragment thats not the
 758                  *                 last fragment then keep MF on each bit
 759                  */
 760                 if (left > 0 || (is_frag & 1))
 761                         iph->frag_off |= htons(IP_MF);
 762                 ptr += len;
 763                 offset += len;
 764 
 765                 /*
 766                  *      Put this fragment into the sending queue.
 767                  */
 768 
 769                 ip_statistics.IpFragCreates++;
 770 
 771                 ip_queue_xmit(sk, dev, skb2, 2);
 772         }
 773         ip_statistics.IpFragOKs++;
 774 }
 775 
 776 

/* [previous][next][first][last][top][bottom][index][help] */