1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * The IP fragmentation functionality.
7 *
8 * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG>
9 * Alan Cox <Alan.Cox@linux.org>
10 *
11 * Fixes:
12 * Alan Cox : Split from ip.c , see ip_input.c for history.
13 */
14
15 #include <linux/types.h>
16 #include <linux/mm.h>
17 #include <linux/sched.h>
18 #include <linux/skbuff.h>
19 #include <linux/ip.h>
20 #include <linux/icmp.h>
21 #include <linux/netdevice.h>
22 #include <net/sock.h>
23 #include <net/ip.h>
24 #include <net/icmp.h>
25 #include <linux/tcp.h>
26 #include <linux/udp.h>
27 #include <linux/firewall.h>
28 #include <linux/ip_fw.h>
29 #include <net/checksum.h>
30
31 /*
32 * Fragment cache limits. We will commit 256K at one time. Should we
33 * cross that limit we will prune down to 192K. This should cope with
34 * even the most extreme cases without allowing an attacker to measurably
35 * harm machine performance.
36 */
37
38 #define IPFRAG_HIGH_THRESH (256*1024)
39 #define IPFRAG_LOW_THRESH (192*1024)
40
41 /*
42 * This fragment handler is a bit of a heap. On the other hand it works quite
43 * happily and handles things quite well.
44 */
45
46 static struct ipq *ipqueue = NULL; /* IP fragment queue */
47
48 atomic_t ip_frag_mem = 0; /* Memory used for fragments */
49
50 /*
51 * Memory Tracking Functions
52 */
53
54 extern __inline__ void frag_kfree_skb(struct sk_buff *skb, int type)
/* ![[previous]](../icons/n_left.png)
![[next]](../icons/right.png)
![[first]](../icons/n_first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
55 {
56 atomic_sub(skb->truesize, &ip_frag_mem);
57 kfree_skb(skb,type);
58 }
59
60 extern __inline__ void frag_kfree_s(void *ptr, int len)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
61 {
62 atomic_sub(len, &ip_frag_mem);
63 kfree_s(ptr,len);
64 }
65
66 extern __inline__ void *frag_kmalloc(int size, int pri)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
67 {
68 void *vp=kmalloc(size,pri);
69 if(!vp)
70 return NULL;
71 atomic_add(size, &ip_frag_mem);
72 return vp;
73 }
74
75 /*
76 * Create a new fragment entry.
77 */
78
79 static struct ipfrag *ip_frag_create(int offset, int end, struct sk_buff *skb, unsigned char *ptr)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
80 {
81 struct ipfrag *fp;
82 unsigned long flags;
83
84 fp = (struct ipfrag *) frag_kmalloc(sizeof(struct ipfrag), GFP_ATOMIC);
85 if (fp == NULL)
86 {
87 NETDEBUG(printk("IP: frag_create: no memory left !\n"));
88 return(NULL);
89 }
90 memset(fp, 0, sizeof(struct ipfrag));
91
92 /* Fill in the structure. */
93 fp->offset = offset;
94 fp->end = end;
95 fp->len = end - offset;
96 fp->skb = skb;
97 fp->ptr = ptr;
98
99 /*
100 * Charge for the SKB as well.
101 */
102
103 save_flags(flags);
104 cli();
105 ip_frag_mem+=skb->truesize;
106 restore_flags(flags);
107
108 return(fp);
109 }
110
111
112 /*
113 * Find the correct entry in the "incomplete datagrams" queue for
114 * this IP datagram, and return the queue entry address if found.
115 */
116
117 static struct ipq *ip_find(struct iphdr *iph)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
118 {
119 struct ipq *qp;
120 struct ipq *qplast;
121
122 cli();
123 qplast = NULL;
124 for(qp = ipqueue; qp != NULL; qplast = qp, qp = qp->next)
125 {
126 if (iph->id== qp->iph->id && iph->saddr == qp->iph->saddr &&
127 iph->daddr == qp->iph->daddr && iph->protocol == qp->iph->protocol)
128 {
129 del_timer(&qp->timer); /* So it doesn't vanish on us. The timer will be reset anyway */
130 sti();
131 return(qp);
132 }
133 }
134 sti();
135 return(NULL);
136 }
137
138
139 /*
140 * Remove an entry from the "incomplete datagrams" queue, either
141 * because we completed, reassembled and processed it, or because
142 * it timed out.
143 */
144
145 static void ip_free(struct ipq *qp)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
146 {
147 struct ipfrag *fp;
148 struct ipfrag *xp;
149
150 /*
151 * Stop the timer for this entry.
152 */
153
154 del_timer(&qp->timer);
155
156 /* Remove this entry from the "incomplete datagrams" queue. */
157 cli();
158 if (qp->prev == NULL)
159 {
160 ipqueue = qp->next;
161 if (ipqueue != NULL)
162 ipqueue->prev = NULL;
163 }
164 else
165 {
166 qp->prev->next = qp->next;
167 if (qp->next != NULL)
168 qp->next->prev = qp->prev;
169 }
170
171 /* Release all fragment data. */
172
173 fp = qp->fragments;
174 while (fp != NULL)
175 {
176 xp = fp->next;
177 IS_SKB(fp->skb);
178 frag_kfree_skb(fp->skb,FREE_READ);
179 frag_kfree_s(fp, sizeof(struct ipfrag));
180 fp = xp;
181 }
182
183 /* Release the IP header. */
184 frag_kfree_s(qp->iph, 64 + 8);
185
186 /* Finally, release the queue descriptor itself. */
187 frag_kfree_s(qp, sizeof(struct ipq));
188 sti();
189 }
190
191
192 /*
193 * Oops- a fragment queue timed out. Kill it and send an ICMP reply.
194 */
195
196 static void ip_expire(unsigned long arg)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
197 {
198 struct ipq *qp;
199
200 qp = (struct ipq *)arg;
201
202 /*
203 * Send an ICMP "Fragment Reassembly Timeout" message.
204 */
205
206 ip_statistics.IpReasmTimeout++;
207 ip_statistics.IpReasmFails++;
208 /* This if is always true... shrug */
209 if(qp->fragments!=NULL)
210 icmp_send(qp->fragments->skb,ICMP_TIME_EXCEEDED,
211 ICMP_EXC_FRAGTIME, 0, qp->dev);
212
213 /*
214 * Nuke the fragment queue.
215 */
216 ip_free(qp);
217 }
218
219 /*
220 * Memory limiting on fragments. Evictor trashes the oldest
221 * fragment queue until we are back under the low threshold
222 */
223
224 static void ip_evictor(void)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
225 {
226 while(ip_frag_mem>IPFRAG_LOW_THRESH)
227 {
228 if(!ipqueue)
229 panic("ip_evictor: memcount");
230 ip_free(ipqueue);
231 }
232 }
233
234 /*
235 * Add an entry to the 'ipq' queue for a newly received IP datagram.
236 * We will (hopefully :-) receive all other fragments of this datagram
237 * in time, so we just create a queue for this datagram, in which we
238 * will insert the received fragments at their respective positions.
239 */
240
241 static struct ipq *ip_create(struct sk_buff *skb, struct iphdr *iph, struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
242 {
243 struct ipq *qp;
244 int ihlen;
245
246 qp = (struct ipq *) frag_kmalloc(sizeof(struct ipq), GFP_ATOMIC);
247 if (qp == NULL)
248 {
249 NETDEBUG(printk("IP: create: no memory left !\n"));
250 return(NULL);
251 skb->dev = qp->dev;
252 }
253 memset(qp, 0, sizeof(struct ipq));
254
255 /*
256 * Allocate memory for the IP header (plus 8 octets for ICMP).
257 */
258
259 ihlen = iph->ihl * 4;
260 qp->iph = (struct iphdr *) frag_kmalloc(64 + 8, GFP_ATOMIC);
261 if (qp->iph == NULL)
262 {
263 NETDEBUG(printk("IP: create: no memory left !\n"));
264 frag_kfree_s(qp, sizeof(struct ipq));
265 return(NULL);
266 }
267
268 memcpy(qp->iph, iph, ihlen + 8);
269 qp->len = 0;
270 qp->ihlen = ihlen;
271 qp->fragments = NULL;
272 qp->dev = dev;
273
274 /* Start a timer for this entry. */
275 qp->timer.expires = jiffies + IP_FRAG_TIME; /* about 30 seconds */
276 qp->timer.data = (unsigned long) qp; /* pointer to queue */
277 qp->timer.function = ip_expire; /* expire function */
278 add_timer(&qp->timer);
279
280 /* Add this entry to the queue. */
281 qp->prev = NULL;
282 cli();
283 qp->next = ipqueue;
284 if (qp->next != NULL)
285 qp->next->prev = qp;
286 ipqueue = qp;
287 sti();
288 return(qp);
289 }
290
291
292 /*
293 * See if a fragment queue is complete.
294 */
295
296 static int ip_done(struct ipq *qp)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
297 {
298 struct ipfrag *fp;
299 int offset;
300
301 /* Only possible if we received the final fragment. */
302 if (qp->len == 0)
303 return(0);
304
305 /* Check all fragment offsets to see if they connect. */
306 fp = qp->fragments;
307 offset = 0;
308 while (fp != NULL)
309 {
310 if (fp->offset > offset)
311 return(0); /* fragment(s) missing */
312 offset = fp->end;
313 fp = fp->next;
314 }
315
316 /* All fragments are present. */
317 return(1);
318 }
319
320
321 /*
322 * Build a new IP datagram from all its fragments.
323 *
324 * FIXME: We copy here because we lack an effective way of handling lists
325 * of bits on input. Until the new skb data handling is in I'm not going
326 * to touch this with a bargepole.
327 */
328
329 static struct sk_buff *ip_glue(struct ipq *qp)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
330 {
331 struct sk_buff *skb;
332 struct iphdr *iph;
333 struct ipfrag *fp;
334 unsigned char *ptr;
335 int count, len;
336
337 /*
338 * Allocate a new buffer for the datagram.
339 */
340 len = qp->ihlen + qp->len;
341
342 if ((skb = dev_alloc_skb(len)) == NULL)
343 {
344 ip_statistics.IpReasmFails++;
345 NETDEBUG(printk("IP: queue_glue: no memory for gluing queue %p\n", qp));
346 ip_free(qp);
347 return(NULL);
348 }
349
350 /* Fill in the basic details. */
351 skb_put(skb,len);
352 skb->h.raw = skb->data;
353 skb->free = 1;
354
355 /* Copy the original IP headers into the new buffer. */
356 ptr = (unsigned char *) skb->h.raw;
357 memcpy(ptr, ((unsigned char *) qp->iph), qp->ihlen);
358 ptr += qp->ihlen;
359
360 count = 0;
361
362 /* Copy the data portions of all fragments into the new buffer. */
363 fp = qp->fragments;
364 while(fp != NULL)
365 {
366 if(count+fp->len > skb->len)
367 {
368 NETDEBUG(printk("Invalid fragment list: Fragment over size.\n"));
369 ip_free(qp);
370 frag_kfree_skb(skb,FREE_WRITE);
371 ip_statistics.IpReasmFails++;
372 return NULL;
373 }
374 memcpy((ptr + fp->offset), fp->ptr, fp->len);
375 count += fp->len;
376 fp = fp->next;
377 }
378
379 /* We glued together all fragments, so remove the queue entry. */
380 ip_free(qp);
381
382 /* Done with all fragments. Fixup the new IP header. */
383 iph = skb->h.iph;
384 iph->frag_off = 0;
385 iph->tot_len = htons((iph->ihl * 4) + count);
386 skb->ip_hdr = iph;
387
388 ip_statistics.IpReasmOKs++;
389 return(skb);
390 }
391
392
393 /*
394 * Process an incoming IP datagram fragment.
395 */
396
397 struct sk_buff *ip_defrag(struct iphdr *iph, struct sk_buff *skb, struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
398 {
399 struct ipfrag *prev, *next, *tmp;
400 struct ipfrag *tfp;
401 struct ipq *qp;
402 struct sk_buff *skb2;
403 unsigned char *ptr;
404 int flags, offset;
405 int i, ihl, end;
406
407 ip_statistics.IpReasmReqds++;
408
409 /*
410 * Start by cleaning up the memory
411 */
412
413 if(ip_frag_mem>IPFRAG_HIGH_THRESH)
414 ip_evictor();
415 /*
416 * Find the entry of this IP datagram in the "incomplete datagrams" queue.
417 */
418
419 qp = ip_find(iph);
420
421 /* Is this a non-fragmented datagram? */
422 offset = ntohs(iph->frag_off);
423 flags = offset & ~IP_OFFSET;
424 offset &= IP_OFFSET;
425 if (((flags & IP_MF) == 0) && (offset == 0))
426 {
427 if (qp != NULL)
428 ip_free(qp); /* Huh? How could this exist?? */
429 return(skb);
430 }
431
432 offset <<= 3; /* offset is in 8-byte chunks */
433 ihl = iph->ihl * 4;
434
435 /*
436 * If the queue already existed, keep restarting its timer as long
437 * as we still are receiving fragments. Otherwise, create a fresh
438 * queue entry.
439 */
440
441 if (qp != NULL)
442 {
443 /* ANK. If the first fragment is received,
444 * we should remember the correct IP header (with options)
445 */
446 if (offset == 0)
447 {
448 qp->ihlen = ihl;
449 memcpy(qp->iph, iph, ihl+8);
450 }
451 del_timer(&qp->timer);
452 qp->timer.expires = jiffies + IP_FRAG_TIME; /* about 30 seconds */
453 qp->timer.data = (unsigned long) qp; /* pointer to queue */
454 qp->timer.function = ip_expire; /* expire function */
455 add_timer(&qp->timer);
456 }
457 else
458 {
459 /*
460 * If we failed to create it, then discard the frame
461 */
462 if ((qp = ip_create(skb, iph, dev)) == NULL)
463 {
464 skb->sk = NULL;
465 frag_kfree_skb(skb, FREE_READ);
466 ip_statistics.IpReasmFails++;
467 return NULL;
468 }
469 }
470
471 /*
472 * Determine the position of this fragment.
473 */
474
475 end = offset + ntohs(iph->tot_len) - ihl;
476
477 /*
478 * Point into the IP datagram 'data' part.
479 */
480
481 ptr = skb->data + ihl;
482
483 /*
484 * Is this the final fragment?
485 */
486
487 if ((flags & IP_MF) == 0)
488 qp->len = end;
489
490 /*
491 * Find out which fragments are in front and at the back of us
492 * in the chain of fragments so far. We must know where to put
493 * this fragment, right?
494 */
495
496 prev = NULL;
497 for(next = qp->fragments; next != NULL; next = next->next)
498 {
499 if (next->offset > offset)
500 break; /* bingo! */
501 prev = next;
502 }
503
504 /*
505 * We found where to put this one.
506 * Check for overlap with preceding fragment, and, if needed,
507 * align things so that any overlaps are eliminated.
508 */
509 if (prev != NULL && offset < prev->end)
510 {
511 i = prev->end - offset;
512 offset += i; /* ptr into datagram */
513 ptr += i; /* ptr into fragment data */
514 }
515
516 /*
517 * Look for overlap with succeeding segments.
518 * If we can merge fragments, do it.
519 */
520
521 for(tmp=next; tmp != NULL; tmp = tfp)
522 {
523 tfp = tmp->next;
524 if (tmp->offset >= end)
525 break; /* no overlaps at all */
526
527 i = end - next->offset; /* overlap is 'i' bytes */
528 tmp->len -= i; /* so reduce size of */
529 tmp->offset += i; /* next fragment */
530 tmp->ptr += i;
531 /*
532 * If we get a frag size of <= 0, remove it and the packet
533 * that it goes with.
534 */
535 if (tmp->len <= 0)
536 {
537 if (tmp->prev != NULL)
538 tmp->prev->next = tmp->next;
539 else
540 qp->fragments = tmp->next;
541
542 if (tfp->next != NULL)
543 tmp->next->prev = tmp->prev;
544
545 next=tfp; /* We have killed the original next frame */
546
547 frag_kfree_skb(tmp->skb,FREE_READ);
548 frag_kfree_s(tmp, sizeof(struct ipfrag));
549 }
550 }
551
552 /*
553 * Insert this fragment in the chain of fragments.
554 */
555
556 tfp = NULL;
557 tfp = ip_frag_create(offset, end, skb, ptr);
558
559 /*
560 * No memory to save the fragment - so throw the lot
561 */
562
563 if (!tfp)
564 {
565 skb->sk = NULL;
566 frag_kfree_skb(skb, FREE_READ);
567 return NULL;
568 }
569 tfp->prev = prev;
570 tfp->next = next;
571 if (prev != NULL)
572 prev->next = tfp;
573 else
574 qp->fragments = tfp;
575
576 if (next != NULL)
577 next->prev = tfp;
578
579 /*
580 * OK, so we inserted this new fragment into the chain.
581 * Check if we now have a full IP datagram which we can
582 * bump up to the IP layer...
583 */
584
585 if (ip_done(qp))
586 {
587 skb2 = ip_glue(qp); /* glue together the fragments */
588 return(skb2);
589 }
590 return(NULL);
591 }
592
593
594 /*
595 * This IP datagram is too large to be sent in one piece. Break it up into
596 * smaller pieces (each of size equal to the MAC header plus IP header plus
597 * a block of the data of the original IP data part) that will yet fit in a
598 * single device frame, and queue such a frame for sending by calling the
599 * ip_queue_xmit(). Note that this is recursion, and bad things will happen
600 * if this function causes a loop...
601 *
602 * Yes this is inefficient, feel free to submit a quicker one.
603 *
604 */
605
606 void ip_fragment(struct sock *sk, struct sk_buff *skb, struct device *dev, int is_frag)
/* ![[previous]](../icons/left.png)
![[next]](../icons/n_right.png)
![[first]](../icons/first.png)
![[last]](../icons/n_last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
607 {
608 struct iphdr *iph;
609 unsigned char *raw;
610 unsigned char *ptr;
611 struct sk_buff *skb2;
612 int left, mtu, hlen, len;
613 int offset;
614
615 /*
616 * Point into the IP datagram header.
617 */
618
619 raw = skb->data;
620 #if 0
621 iph = (struct iphdr *) (raw + dev->hard_header_len);
622 skb->ip_hdr = iph;
623 #else
624 iph = skb->ip_hdr;
625 #endif
626
627 /*
628 * Setup starting values.
629 */
630
631 hlen = iph->ihl * 4;
632 left = ntohs(iph->tot_len) - hlen; /* Space per frame */
633 hlen += dev->hard_header_len; /* Total header size */
634 mtu = (dev->mtu - hlen); /* Size of data space */
635 ptr = (raw + hlen); /* Where to start from */
636
637 /*
638 * Check for any "DF" flag. [DF means do not fragment]
639 */
640
641 if (ntohs(iph->frag_off) & IP_DF)
642 {
643 ip_statistics.IpFragFails++;
644 NETDEBUG(printk("ip_queue_xmit: frag needed\n"));
645 return;
646 }
647
648 /*
649 * The protocol doesn't seem to say what to do in the case that the
650 * frame + options doesn't fit the mtu. As it used to fall down dead
651 * in this case we were fortunate it didn't happen
652 */
653
654 if(mtu<8)
655 {
656 /* It's wrong but it's better than nothing */
657 icmp_send(skb,ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED,dev->mtu, dev);
658 ip_statistics.IpFragFails++;
659 return;
660 }
661
662 /*
663 * Fragment the datagram.
664 */
665
666 /*
667 * The initial offset is 0 for a complete frame. When
668 * fragmenting fragments it's wherever this one starts.
669 */
670
671 if (is_frag & 2)
672 offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
673 else
674 offset = 0;
675
676
677 /*
678 * Keep copying data until we run out.
679 */
680
681 while(left > 0)
682 {
683 len = left;
684 /* IF: it doesn't fit, use 'mtu' - the data space left */
685 if (len > mtu)
686 len = mtu;
687 /* IF: we are not sending upto and including the packet end
688 then align the next start on an eight byte boundary */
689 if (len < left)
690 {
691 len/=8;
692 len*=8;
693 }
694 /*
695 * Allocate buffer.
696 */
697
698 if ((skb2 = alloc_skb(len + hlen+15,GFP_ATOMIC)) == NULL)
699 {
700 NETDEBUG(printk("IP: frag: no memory for new fragment!\n"));
701 ip_statistics.IpFragFails++;
702 return;
703 }
704
705 /*
706 * Set up data on packet
707 */
708
709 skb2->arp = skb->arp;
710 if(skb->free==0)
711 printk("IP fragmenter: BUG free!=1 in fragmenter\n");
712 skb2->free = 1;
713 skb_put(skb2,len + hlen);
714 skb2->h.raw=(char *) skb2->data;
715 /*
716 * Charge the memory for the fragment to any owner
717 * it might possess
718 */
719
720 if (sk)
721 {
722 atomic_add(skb2->truesize, &sk->wmem_alloc);
723 skb2->sk=sk;
724 }
725 skb2->raddr = skb->raddr; /* For rebuild_header - must be here */
726
727 /*
728 * Copy the packet header into the new buffer.
729 */
730
731 memcpy(skb2->h.raw, raw, hlen);
732
733 /*
734 * Copy a block of the IP datagram.
735 */
736 memcpy(skb2->h.raw + hlen, ptr, len);
737 left -= len;
738
739 skb2->h.raw+=dev->hard_header_len;
740
741 /*
742 * Fill in the new header fields.
743 */
744 iph = (struct iphdr *)(skb2->h.raw/*+dev->hard_header_len*/);
745 iph->frag_off = htons((offset >> 3));
746 skb2->ip_hdr = iph;
747
748 /* ANK: dirty, but effective trick. Upgrade options only if
749 * the segment to be fragmented was THE FIRST (otherwise,
750 * options are already fixed) and make it ONCE
751 * on the initial skb, so that all the following fragments
752 * will inherit fixed options.
753 */
754 if (offset == 0)
755 ip_options_fragment(skb);
756
757 /*
758 * Added AC : If we are fragmenting a fragment thats not the
759 * last fragment then keep MF on each bit
760 */
761 if (left > 0 || (is_frag & 1))
762 iph->frag_off |= htons(IP_MF);
763 ptr += len;
764 offset += len;
765
766 /*
767 * Put this fragment into the sending queue.
768 */
769
770 ip_statistics.IpFragCreates++;
771
772 ip_queue_xmit(sk, dev, skb2, 2);
773 }
774 ip_statistics.IpFragOKs++;
775 }
776
777