root/net/ipv4/ip_fragment.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. frag_kfree_skb
  2. frag_kfree_s
  3. frag_kmalloc
  4. ip_frag_create
  5. ip_find
  6. ip_free
  7. ip_expire
  8. ip_evictor
  9. ip_create
  10. ip_done
  11. ip_glue
  12. ip_defrag
  13. ip_fragment

   1 /*
   2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
   3  *              operating system.  INET is implemented using the  BSD Socket
   4  *              interface as the means of communication with the user level.
   5  *
   6  *              The IP fragmentation functionality.
   7  *              
   8  * Authors:     Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG>
   9  *              Alan Cox <Alan.Cox@linux.org>
  10  *
  11  * Fixes:
  12  *              Alan Cox        :       Split from ip.c , see ip_input.c for history.
  13  */
  14 
  15 #include <linux/types.h>
  16 #include <linux/mm.h>
  17 #include <linux/sched.h>
  18 #include <linux/skbuff.h>
  19 #include <linux/ip.h>
  20 #include <linux/icmp.h>
  21 #include <linux/netdevice.h>
  22 #include <net/sock.h>
  23 #include <net/ip.h>
  24 #include <net/icmp.h>
  25 #include <linux/tcp.h>
  26 #include <linux/udp.h>
  27 #include <linux/firewall.h>
  28 #include <linux/ip_fw.h>
  29 #include <net/checksum.h>
  30 
  31 /*
  32  *      Fragment cache limits. We will commit 256K at one time. Should we
  33  *      cross that limit we will prune down to 192K. This should cope with
  34  *      even the most extreme cases without allowing an attacker to measurably
  35  *      harm machine performance.
  36  */
  37  
  38 #define IPFRAG_HIGH_THRESH              (256*1024)
  39 #define IPFRAG_LOW_THRESH               (192*1024)
  40 
  41 /*
  42  *      This fragment handler is a bit of a heap. On the other hand it works quite
  43  *      happily and handles things quite well.
  44  */
  45 
  46 static struct ipq *ipqueue = NULL;              /* IP fragment queue    */
  47 
  48 unsigned long ip_frag_mem = 0;                  /* Memory used for fragments */
  49 
  50 /*
  51  *      Memory Tracking Functions
  52  */
  53  
  54 extern __inline__ void frag_kfree_skb(struct sk_buff *skb, int type)
     /* [previous][next][first][last][top][bottom][index][help] */
  55 {
  56         unsigned long flags;
  57         save_flags(flags);
  58         cli();
  59         ip_frag_mem-=skb->truesize;
  60         restore_flags(flags);
  61         kfree_skb(skb,type);
  62 }
  63 
  64 extern __inline__ void frag_kfree_s(void *ptr, int len)
     /* [previous][next][first][last][top][bottom][index][help] */
  65 {
  66         unsigned long flags;
  67         save_flags(flags);
  68         cli();
  69         ip_frag_mem-=len;
  70         restore_flags(flags);
  71         kfree_s(ptr,len);
  72 }
  73  
  74 extern __inline__ void *frag_kmalloc(int size, int pri)
     /* [previous][next][first][last][top][bottom][index][help] */
  75 {
  76         unsigned long flags;
  77         void *vp=kmalloc(size,pri);
  78         if(!vp)
  79                 return NULL;
  80         save_flags(flags);
  81         cli();
  82         ip_frag_mem+=size;
  83         restore_flags(flags);
  84         return vp;
  85 }
  86  
  87 /*
  88  *      Create a new fragment entry.
  89  */
  90 
  91 static struct ipfrag *ip_frag_create(int offset, int end, struct sk_buff *skb, unsigned char *ptr)
     /* [previous][next][first][last][top][bottom][index][help] */
  92 {
  93         struct ipfrag *fp;
  94         unsigned long flags;
  95 
  96         fp = (struct ipfrag *) frag_kmalloc(sizeof(struct ipfrag), GFP_ATOMIC);
  97         if (fp == NULL)
  98         {
  99                 NETDEBUG(printk("IP: frag_create: no memory left !\n"));
 100                 return(NULL);
 101         }
 102         memset(fp, 0, sizeof(struct ipfrag));
 103 
 104         /* Fill in the structure. */
 105         fp->offset = offset;
 106         fp->end = end;
 107         fp->len = end - offset;
 108         fp->skb = skb;
 109         fp->ptr = ptr;
 110         
 111         /*
 112          *      Charge for the SKB as well.
 113          */
 114          
 115         save_flags(flags);
 116         cli();
 117         ip_frag_mem+=skb->truesize;
 118         restore_flags(flags);
 119 
 120         return(fp);
 121 }
 122 
 123 
 124 /*
 125  *      Find the correct entry in the "incomplete datagrams" queue for
 126  *      this IP datagram, and return the queue entry address if found.
 127  */
 128 
 129 static struct ipq *ip_find(struct iphdr *iph)
     /* [previous][next][first][last][top][bottom][index][help] */
 130 {
 131         struct ipq *qp;
 132         struct ipq *qplast;
 133 
 134         cli();
 135         qplast = NULL;
 136         for(qp = ipqueue; qp != NULL; qplast = qp, qp = qp->next)
 137         {
 138                 if (iph->id== qp->iph->id && iph->saddr == qp->iph->saddr &&
 139                         iph->daddr == qp->iph->daddr && iph->protocol == qp->iph->protocol)
 140                 {
 141                         del_timer(&qp->timer);  /* So it doesn't vanish on us. The timer will be reset anyway */
 142                         sti();
 143                         return(qp);
 144                 }
 145         }
 146         sti();
 147         return(NULL);
 148 }
 149 
 150 
 151 /*
 152  *      Remove an entry from the "incomplete datagrams" queue, either
 153  *      because we completed, reassembled and processed it, or because
 154  *      it timed out.
 155  */
 156 
 157 static void ip_free(struct ipq *qp)
     /* [previous][next][first][last][top][bottom][index][help] */
 158 {
 159         struct ipfrag *fp;
 160         struct ipfrag *xp;
 161 
 162         /*
 163          * Stop the timer for this entry.
 164          */
 165 
 166         del_timer(&qp->timer);
 167 
 168         /* Remove this entry from the "incomplete datagrams" queue. */
 169         cli();
 170         if (qp->prev == NULL)
 171         {
 172                 ipqueue = qp->next;
 173                 if (ipqueue != NULL)
 174                         ipqueue->prev = NULL;
 175         }
 176         else
 177         {
 178                 qp->prev->next = qp->next;
 179                 if (qp->next != NULL)
 180                         qp->next->prev = qp->prev;
 181         }
 182 
 183         /* Release all fragment data. */
 184 
 185         fp = qp->fragments;
 186         while (fp != NULL)
 187         {
 188                 xp = fp->next;
 189                 IS_SKB(fp->skb);
 190                 frag_kfree_skb(fp->skb,FREE_READ);
 191                 frag_kfree_s(fp, sizeof(struct ipfrag));
 192                 fp = xp;
 193         }
 194 
 195         /* Release the IP header. */
 196         frag_kfree_s(qp->iph, 64 + 8);
 197 
 198         /* Finally, release the queue descriptor itself. */
 199         frag_kfree_s(qp, sizeof(struct ipq));
 200         sti();
 201 }
 202 
 203 
 204 /*
 205  *      Oops- a fragment queue timed out.  Kill it and send an ICMP reply.
 206  */
 207 
 208 static void ip_expire(unsigned long arg)
     /* [previous][next][first][last][top][bottom][index][help] */
 209 {
 210         struct ipq *qp;
 211 
 212         qp = (struct ipq *)arg;
 213 
 214         /*
 215          *      Send an ICMP "Fragment Reassembly Timeout" message.
 216          */
 217 
 218         ip_statistics.IpReasmTimeout++;
 219         ip_statistics.IpReasmFails++;   
 220         /* This if is always true... shrug */
 221         if(qp->fragments!=NULL)
 222                 icmp_send(qp->fragments->skb,ICMP_TIME_EXCEEDED,
 223                                 ICMP_EXC_FRAGTIME, 0, qp->dev);
 224 
 225         /*
 226          *      Nuke the fragment queue.
 227          */
 228         ip_free(qp);
 229 }
 230 
 231 /*
 232  *      Memory limiting on fragments. Evictor trashes the oldest 
 233  *      fragment queue until we are back under the low threshold
 234  */
 235  
 236 static void ip_evictor(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 237 {
 238         while(ip_frag_mem>IPFRAG_LOW_THRESH)
 239         {
 240                 if(!ipqueue)
 241                         panic("ip_evictor: memcount");
 242                 ip_free(ipqueue);
 243         }
 244 }
 245 
 246 /*
 247  *      Add an entry to the 'ipq' queue for a newly received IP datagram.
 248  *      We will (hopefully :-) receive all other fragments of this datagram
 249  *      in time, so we just create a queue for this datagram, in which we
 250  *      will insert the received fragments at their respective positions.
 251  */
 252 
 253 static struct ipq *ip_create(struct sk_buff *skb, struct iphdr *iph, struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 254 {
 255         struct ipq *qp;
 256         int ihlen;
 257 
 258         qp = (struct ipq *) frag_kmalloc(sizeof(struct ipq), GFP_ATOMIC);
 259         if (qp == NULL)
 260         {
 261                 NETDEBUG(printk("IP: create: no memory left !\n"));
 262                 return(NULL);
 263                 skb->dev = qp->dev;
 264         }
 265         memset(qp, 0, sizeof(struct ipq));
 266 
 267         /*
 268          *      Allocate memory for the IP header (plus 8 octets for ICMP).
 269          */
 270 
 271         ihlen = iph->ihl * 4;
 272         qp->iph = (struct iphdr *) frag_kmalloc(64 + 8, GFP_ATOMIC);
 273         if (qp->iph == NULL)
 274         {
 275                 NETDEBUG(printk("IP: create: no memory left !\n"));
 276                 frag_kfree_s(qp, sizeof(struct ipq));
 277                 return(NULL);
 278         }
 279 
 280         memcpy(qp->iph, iph, ihlen + 8);
 281         qp->len = 0;
 282         qp->ihlen = ihlen;
 283         qp->fragments = NULL;
 284         qp->dev = dev;
 285 
 286         /* Start a timer for this entry. */
 287         qp->timer.expires = jiffies + IP_FRAG_TIME;     /* about 30 seconds     */
 288         qp->timer.data = (unsigned long) qp;            /* pointer to queue     */
 289         qp->timer.function = ip_expire;                 /* expire function      */
 290         add_timer(&qp->timer);
 291 
 292         /* Add this entry to the queue. */
 293         qp->prev = NULL;
 294         cli();
 295         qp->next = ipqueue;
 296         if (qp->next != NULL)
 297                 qp->next->prev = qp;
 298         ipqueue = qp;
 299         sti();
 300         return(qp);
 301 }
 302 
 303 
 304 /*
 305  *      See if a fragment queue is complete.
 306  */
 307 
 308 static int ip_done(struct ipq *qp)
     /* [previous][next][first][last][top][bottom][index][help] */
 309 {
 310         struct ipfrag *fp;
 311         int offset;
 312 
 313         /* Only possible if we received the final fragment. */
 314         if (qp->len == 0)
 315                 return(0);
 316 
 317         /* Check all fragment offsets to see if they connect. */
 318         fp = qp->fragments;
 319         offset = 0;
 320         while (fp != NULL)
 321         {
 322                 if (fp->offset > offset)
 323                         return(0);      /* fragment(s) missing */
 324                 offset = fp->end;
 325                 fp = fp->next;
 326         }
 327 
 328         /* All fragments are present. */
 329         return(1);
 330 }
 331 
 332 
 333 /*
 334  *      Build a new IP datagram from all its fragments.
 335  *
 336  *      FIXME: We copy here because we lack an effective way of handling lists
 337  *      of bits on input. Until the new skb data handling is in I'm not going
 338  *      to touch this with a bargepole. 
 339  */
 340 
 341 static struct sk_buff *ip_glue(struct ipq *qp)
     /* [previous][next][first][last][top][bottom][index][help] */
 342 {
 343         struct sk_buff *skb;
 344         struct iphdr *iph;
 345         struct ipfrag *fp;
 346         unsigned char *ptr;
 347         int count, len;
 348 
 349         /*
 350          *      Allocate a new buffer for the datagram.
 351          */
 352         len = qp->ihlen + qp->len;
 353 
 354         if ((skb = dev_alloc_skb(len)) == NULL)
 355         {
 356                 ip_statistics.IpReasmFails++;
 357                 NETDEBUG(printk("IP: queue_glue: no memory for gluing queue %p\n", qp));
 358                 ip_free(qp);
 359                 return(NULL);
 360         }
 361 
 362         /* Fill in the basic details. */
 363         skb_put(skb,len);
 364         skb->h.raw = skb->data;
 365         skb->free = 1;
 366 
 367         /* Copy the original IP headers into the new buffer. */
 368         ptr = (unsigned char *) skb->h.raw;
 369         memcpy(ptr, ((unsigned char *) qp->iph), qp->ihlen);
 370         ptr += qp->ihlen;
 371 
 372         count = 0;
 373 
 374         /* Copy the data portions of all fragments into the new buffer. */
 375         fp = qp->fragments;
 376         while(fp != NULL)
 377         {
 378                 if(count+fp->len > skb->len)
 379                 {
 380                         NETDEBUG(printk("Invalid fragment list: Fragment over size.\n"));
 381                         ip_free(qp);
 382                         frag_kfree_skb(skb,FREE_WRITE);
 383                         ip_statistics.IpReasmFails++;
 384                         return NULL;
 385                 }
 386                 memcpy((ptr + fp->offset), fp->ptr, fp->len);
 387                 count += fp->len;
 388                 fp = fp->next;
 389         }
 390 
 391         /* We glued together all fragments, so remove the queue entry. */
 392         ip_free(qp);
 393 
 394         /* Done with all fragments. Fixup the new IP header. */
 395         iph = skb->h.iph;
 396         iph->frag_off = 0;
 397         iph->tot_len = htons((iph->ihl * 4) + count);
 398         skb->ip_hdr = iph;
 399 
 400         ip_statistics.IpReasmOKs++;
 401         return(skb);
 402 }
 403 
 404 
 405 /*
 406  *      Process an incoming IP datagram fragment.
 407  */
 408 
 409 struct sk_buff *ip_defrag(struct iphdr *iph, struct sk_buff *skb, struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 410 {
 411         struct ipfrag *prev, *next, *tmp;
 412         struct ipfrag *tfp;
 413         struct ipq *qp;
 414         struct sk_buff *skb2;
 415         unsigned char *ptr;
 416         int flags, offset;
 417         int i, ihl, end;
 418         
 419         ip_statistics.IpReasmReqds++;
 420 
 421         /*
 422          *      Start by cleaning up the memory
 423          */
 424 
 425         if(ip_frag_mem>IPFRAG_HIGH_THRESH)
 426                 ip_evictor();
 427         /* 
 428          *      Find the entry of this IP datagram in the "incomplete datagrams" queue. 
 429          */
 430          
 431         qp = ip_find(iph);
 432 
 433         /* Is this a non-fragmented datagram? */
 434         offset = ntohs(iph->frag_off);
 435         flags = offset & ~IP_OFFSET;
 436         offset &= IP_OFFSET;
 437         if (((flags & IP_MF) == 0) && (offset == 0))
 438         {
 439                 if (qp != NULL)
 440                         ip_free(qp);    /* Huh? How could this exist?? */
 441                 return(skb);
 442         }
 443 
 444         offset <<= 3;           /* offset is in 8-byte chunks */
 445         ihl = iph->ihl * 4;
 446 
 447         /*
 448          * If the queue already existed, keep restarting its timer as long
 449          * as we still are receiving fragments.  Otherwise, create a fresh
 450          * queue entry.
 451          */
 452 
 453         if (qp != NULL)
 454         {
 455                 /* ANK. If the first fragment is received,
 456                  * we should remember the correct IP header (with options)
 457                  */
 458                 if (offset == 0)
 459                 {
 460                         qp->ihlen = ihl;
 461                         memcpy(qp->iph, iph, ihl+8);
 462                 }
 463                 del_timer(&qp->timer);
 464                 qp->timer.expires = jiffies + IP_FRAG_TIME;     /* about 30 seconds */
 465                 qp->timer.data = (unsigned long) qp;    /* pointer to queue */
 466                 qp->timer.function = ip_expire;         /* expire function */
 467                 add_timer(&qp->timer);
 468         }
 469         else
 470         {
 471                 /*
 472                  *      If we failed to create it, then discard the frame
 473                  */
 474                 if ((qp = ip_create(skb, iph, dev)) == NULL)
 475                 {
 476                         skb->sk = NULL;
 477                         frag_kfree_skb(skb, FREE_READ);
 478                         ip_statistics.IpReasmFails++;
 479                         return NULL;
 480                 }
 481         }
 482 
 483         /*
 484          *      Determine the position of this fragment.
 485          */
 486 
 487         end = offset + ntohs(iph->tot_len) - ihl;
 488 
 489         /*
 490          *      Point into the IP datagram 'data' part.
 491          */
 492 
 493         ptr = skb->data + ihl;
 494 
 495         /*
 496          *      Is this the final fragment?
 497          */
 498 
 499         if ((flags & IP_MF) == 0)
 500                 qp->len = end;
 501 
 502         /*
 503          *      Find out which fragments are in front and at the back of us
 504          *      in the chain of fragments so far.  We must know where to put
 505          *      this fragment, right?
 506          */
 507 
 508         prev = NULL;
 509         for(next = qp->fragments; next != NULL; next = next->next)
 510         {
 511                 if (next->offset > offset)
 512                         break;  /* bingo! */
 513                 prev = next;
 514         }
 515 
 516         /*
 517          *      We found where to put this one.
 518          *      Check for overlap with preceding fragment, and, if needed,
 519          *      align things so that any overlaps are eliminated.
 520          */
 521         if (prev != NULL && offset < prev->end)
 522         {
 523                 i = prev->end - offset;
 524                 offset += i;    /* ptr into datagram */
 525                 ptr += i;       /* ptr into fragment data */
 526         }
 527 
 528         /*
 529          * Look for overlap with succeeding segments.
 530          * If we can merge fragments, do it.
 531          */
 532 
 533         for(tmp=next; tmp != NULL; tmp = tfp)
 534         {
 535                 tfp = tmp->next;
 536                 if (tmp->offset >= end)
 537                         break;          /* no overlaps at all */
 538 
 539                 i = end - next->offset;                 /* overlap is 'i' bytes */
 540                 tmp->len -= i;                          /* so reduce size of    */
 541                 tmp->offset += i;                       /* next fragment        */
 542                 tmp->ptr += i;
 543                 /*
 544                  *      If we get a frag size of <= 0, remove it and the packet
 545                  *      that it goes with.
 546                  */
 547                 if (tmp->len <= 0)
 548                 {
 549                         if (tmp->prev != NULL)
 550                                 tmp->prev->next = tmp->next;
 551                         else
 552                                 qp->fragments = tmp->next;
 553 
 554                         if (tfp->next != NULL)
 555                                 tmp->next->prev = tmp->prev;
 556                         
 557                         next=tfp;       /* We have killed the original next frame */
 558 
 559                         frag_kfree_skb(tmp->skb,FREE_READ);
 560                         frag_kfree_s(tmp, sizeof(struct ipfrag));
 561                 }
 562         }
 563 
 564         /*
 565          *      Insert this fragment in the chain of fragments.
 566          */
 567 
 568         tfp = NULL;
 569         tfp = ip_frag_create(offset, end, skb, ptr);
 570 
 571         /*
 572          *      No memory to save the fragment - so throw the lot
 573          */
 574 
 575         if (!tfp)
 576         {
 577                 skb->sk = NULL;
 578                 frag_kfree_skb(skb, FREE_READ);
 579                 return NULL;
 580         }
 581         tfp->prev = prev;
 582         tfp->next = next;
 583         if (prev != NULL)
 584                 prev->next = tfp;
 585         else
 586                 qp->fragments = tfp;
 587 
 588         if (next != NULL)
 589                 next->prev = tfp;
 590 
 591         /*
 592          *      OK, so we inserted this new fragment into the chain.
 593          *      Check if we now have a full IP datagram which we can
 594          *      bump up to the IP layer...
 595          */
 596 
 597         if (ip_done(qp))
 598         {
 599                 skb2 = ip_glue(qp);             /* glue together the fragments */
 600                 return(skb2);
 601         }
 602         return(NULL);
 603 }
 604 
 605 
 606 /*
 607  *      This IP datagram is too large to be sent in one piece.  Break it up into
 608  *      smaller pieces (each of size equal to the MAC header plus IP header plus
 609  *      a block of the data of the original IP data part) that will yet fit in a
 610  *      single device frame, and queue such a frame for sending by calling the
 611  *      ip_queue_xmit().  Note that this is recursion, and bad things will happen
 612  *      if this function causes a loop...
 613  *
 614  *      Yes this is inefficient, feel free to submit a quicker one.
 615  *
 616  */
 617  
 618 void ip_fragment(struct sock *sk, struct sk_buff *skb, struct device *dev, int is_frag)
     /* [previous][next][first][last][top][bottom][index][help] */
 619 {
 620         struct iphdr *iph;
 621         unsigned char *raw;
 622         unsigned char *ptr;
 623         struct sk_buff *skb2;
 624         int left, mtu, hlen, len;
 625         int offset;
 626         unsigned long flags;
 627 
 628         /*
 629          *      Point into the IP datagram header.
 630          */
 631 
 632         raw = skb->data;
 633 #if 0
 634         iph = (struct iphdr *) (raw + dev->hard_header_len);    
 635         skb->ip_hdr = iph;
 636 #else
 637         iph = skb->ip_hdr;
 638 #endif
 639 
 640         /*
 641          *      Setup starting values.
 642          */
 643 
 644         hlen = iph->ihl * 4;
 645         left = ntohs(iph->tot_len) - hlen;      /* Space per frame */
 646         hlen += dev->hard_header_len;           /* Total header size */
 647         mtu = (dev->mtu - hlen);                /* Size of data space */
 648         ptr = (raw + hlen);                     /* Where to start from */
 649 
 650         /*
 651          *      Check for any "DF" flag. [DF means do not fragment]
 652          */
 653 
 654         if (ntohs(iph->frag_off) & IP_DF)
 655         {
 656                 ip_statistics.IpFragFails++;
 657                 printk("ip_queue_xmit: frag needed\n");
 658                 return;
 659         }
 660 
 661         /*
 662          *      The protocol doesn't seem to say what to do in the case that the
 663          *      frame + options doesn't fit the mtu. As it used to fall down dead
 664          *      in this case we were fortunate it didn't happen
 665          */
 666 
 667         if(mtu<8)
 668         {
 669                 /* It's wrong but it's better than nothing */
 670                 icmp_send(skb,ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED,dev->mtu, dev);
 671                 ip_statistics.IpFragFails++;
 672                 return;
 673         }
 674 
 675         /*
 676          *      Fragment the datagram.
 677          */
 678 
 679         /*
 680          *      The initial offset is 0 for a complete frame. When
 681          *      fragmenting fragments it's wherever this one starts.
 682          */
 683 
 684         if (is_frag & 2)
 685                 offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
 686         else
 687                 offset = 0;
 688 
 689 
 690         /*
 691          *      Keep copying data until we run out.
 692          */
 693 
 694         while(left > 0)
 695         {
 696                 len = left;
 697                 /* IF: it doesn't fit, use 'mtu' - the data space left */
 698                 if (len > mtu)
 699                         len = mtu;
 700                 /* IF: we are not sending upto and including the packet end
 701                    then align the next start on an eight byte boundary */
 702                 if (len < left)
 703                 {
 704                         len/=8;
 705                         len*=8;
 706                 }
 707                 /*
 708                  *      Allocate buffer.
 709                  */
 710 
 711                 if ((skb2 = alloc_skb(len + hlen+15,GFP_ATOMIC)) == NULL)
 712                 {
 713                         NETDEBUG(printk("IP: frag: no memory for new fragment!\n"));
 714                         ip_statistics.IpFragFails++;
 715                         return;
 716                 }
 717 
 718                 /*
 719                  *      Set up data on packet
 720                  */
 721 
 722                 skb2->arp = skb->arp;
 723                 if(skb->free==0)
 724                         printk("IP fragmenter: BUG free!=1 in fragmenter\n");
 725                 skb2->free = 1;
 726                 skb_put(skb2,len + hlen);
 727                 skb2->h.raw=(char *) skb2->data;
 728                 /*
 729                  *      Charge the memory for the fragment to any owner
 730                  *      it might possess
 731                  */
 732 
 733                 save_flags(flags);
 734                 if (sk)
 735                 {
 736                         cli();
 737                         sk->wmem_alloc += skb2->truesize;
 738                         skb2->sk=sk;
 739                 }
 740                 restore_flags(flags);
 741                 skb2->raddr = skb->raddr;       /* For rebuild_header - must be here */
 742 
 743                 /*
 744                  *      Copy the packet header into the new buffer.
 745                  */
 746 
 747                 memcpy(skb2->h.raw, raw, hlen);
 748 
 749                 /*
 750                  *      Copy a block of the IP datagram.
 751                  */
 752                 memcpy(skb2->h.raw + hlen, ptr, len);
 753                 left -= len;
 754 
 755                 skb2->h.raw+=dev->hard_header_len;
 756 
 757                 /*
 758                  *      Fill in the new header fields.
 759                  */
 760                 iph = (struct iphdr *)(skb2->h.raw/*+dev->hard_header_len*/);
 761                 iph->frag_off = htons((offset >> 3));
 762                 skb2->ip_hdr = iph;
 763 
 764                 /* ANK: dirty, but effective trick. Upgrade options only if
 765                  * the segment to be fragmented was THE FIRST (otherwise,
 766                  * options are already fixed) and make it ONCE
 767                  * on the initial skb, so that all the following fragments
 768                  * will inherit fixed options.
 769                  */
 770                 if (offset == 0)
 771                         ip_options_fragment(skb);
 772 
 773                 /*
 774                  *      Added AC : If we are fragmenting a fragment thats not the
 775                  *                 last fragment then keep MF on each bit
 776                  */
 777                 if (left > 0 || (is_frag & 1))
 778                         iph->frag_off |= htons(IP_MF);
 779                 ptr += len;
 780                 offset += len;
 781 
 782                 /*
 783                  *      Put this fragment into the sending queue.
 784                  */
 785 
 786                 ip_statistics.IpFragCreates++;
 787 
 788                 ip_queue_xmit(sk, dev, skb2, 2);
 789         }
 790         ip_statistics.IpFragOKs++;
 791 }
 792 
 793 

/* [previous][next][first][last][top][bottom][index][help] */