1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * The Internet Protocol (IP) module.
7 *
8 * Version: @(#)ip.c 1.0.16b 9/1/93
9 *
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Donald Becker, <becker@super.org>
13 * Alan Cox, <gw4pts@gw4pts.ampr.org>
14 * Richard Underwood
15 * Stefan Becker, <stefanb@yello.ping.de>
16 * Jorge Cwik, <jorge@laser.satlink.net>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 *
19 *
20 * Fixes:
21 * Alan Cox : Commented a couple of minor bits of surplus code
22 * Alan Cox : Undefining IP_FORWARD doesn't include the code
23 * (just stops a compiler warning).
24 * Alan Cox : Frames with >=MAX_ROUTE record routes, strict routes or loose routes
25 * are junked rather than corrupting things.
26 * Alan Cox : Frames to bad broadcast subnets are dumped
27 * We used to process them non broadcast and
28 * boy could that cause havoc.
29 * Alan Cox : ip_forward sets the free flag on the
30 * new frame it queues. Still crap because
31 * it copies the frame but at least it
32 * doesn't eat memory too.
33 * Alan Cox : Generic queue code and memory fixes.
34 * Fred Van Kempen : IP fragment support (borrowed from NET2E)
35 * Gerhard Koerting: Forward fragmented frames correctly.
36 * Gerhard Koerting: Fixes to my fix of the above 8-).
37 * Gerhard Koerting: IP interface addressing fix.
38 * Linus Torvalds : More robustness checks
39 * Alan Cox : Even more checks: Still not as robust as it ought to be
40 * Alan Cox : Save IP header pointer for later
41 * Alan Cox : ip option setting
42 * Alan Cox : Use ip_tos/ip_ttl settings
43 * Alan Cox : Fragmentation bogosity removed
44 * (Thanks to Mark.Bush@prg.ox.ac.uk)
45 * Dmitry Gorodchanin : Send of a raw packet crash fix.
46 * Alan Cox : Silly ip bug when an overlength
47 * fragment turns up. Now frees the
48 * queue.
49 * Linus Torvalds/ : Memory leakage on fragmentation
50 * Alan Cox : handling.
51 * Gerhard Koerting: Forwarding uses IP priority hints
52 * Teemu Rantanen : Fragment problems.
53 * Alan Cox : General cleanup, comments and reformat
54 * Alan Cox : SNMP statistics
55 * Alan Cox : BSD address rule semantics. Also see
56 * UDP as there is a nasty checksum issue
57 * if you do things the wrong way.
58 * Alan Cox : Always defrag, moved IP_FORWARD to the config.in file
59 * Alan Cox : IP options adjust sk->priority.
60 * Pedro Roque : Fix mtu/length error in ip_forward.
61 * Alan Cox : Avoid ip_chk_addr when possible.
62 * Richard Underwood : IP multicasting.
63 * Alan Cox : Cleaned up multicast handlers.
64 * Alan Cox : RAW sockets demultiplex in the BSD style.
65 * Gunther Mayer : Fix the SNMP reporting typo
66 * Alan Cox : Always in group 224.0.0.1
67 * Pauline Middelink : Fast ip_checksum update when forwarding
68 * Masquerading support.
69 * Alan Cox : Multicast loopback error for 224.0.0.1
70 * Alan Cox : IP_MULTICAST_LOOP option.
71 * Alan Cox : Use notifiers.
72 * Bjorn Ekwall : Removed ip_csum (from slhc.c too)
73 * Bjorn Ekwall : Moved ip_fast_csum to ip.h (inline!)
74 * Stefan Becker : Send out ICMP HOST REDIRECT
75 * Arnt Gulbrandsen : ip_build_xmit
76 * Alan Cox : Per socket routing cache
77 * Alan Cox : Fixed routing cache, added header cache.
78 * Alan Cox : Loopback didnt work right in original ip_build_xmit - fixed it.
79 * Alan Cox : Only send ICMP_REDIRECT if src/dest are the same net.
80 * Alan Cox : Incoming IP option handling.
81 * Alan Cox : Set saddr on raw output frames as per BSD.
82 * Alan Cox : Stopped broadcast source route explosions.
83 * Alan Cox : Can disable source routing
84 * Takeshi Sone : Masquerading didn't work.
85 * Dave Bonn,Alan Cox : Faster IP forwarding whenever possible.
86 * Alan Cox : Memory leaks, tramples, misc debugging.
87 * Alan Cox : Fixed multicast (by popular demand 8))
88 * Alan Cox : Fixed forwarding (by even more popular demand 8))
89 * Alan Cox : Fixed SNMP statistics [I think]
90 *
91 *
92 *
93 * To Fix:
94 * IP option processing is mostly not needed. ip_forward needs to know about routing rules
95 * and time stamp but that's about all. Use the route mtu field here too
96 * IP fragmentation wants rewriting cleanly. The RFC815 algorithm is much more efficient
97 * and could be made very efficient with the addition of some virtual memory hacks to permit
98 * the allocation of a buffer that can then be 'grown' by twiddling page tables.
99 * Output fragmentation wants updating along with the buffer management to use a single
100 * interleaved copy algorithm so that fragmenting has a one copy overhead. Actual packet
101 * output should probably do its own fragmentation at the UDP/RAW layer. TCP shouldn't cause
102 * fragmentation anyway.
103 *
104 * FIXME: copy frag 0 iph to qp->iph
105 *
106 * This program is free software; you can redistribute it and/or
107 * modify it under the terms of the GNU General Public License
108 * as published by the Free Software Foundation; either version
109 * 2 of the License, or (at your option) any later version.
110 */
111
112 #include <asm/segment.h>
113 #include <asm/system.h>
114 #include <linux/types.h>
115 #include <linux/kernel.h>
116 #include <linux/sched.h>
117 #include <linux/mm.h>
118 #include <linux/string.h>
119 #include <linux/errno.h>
120 #include <linux/config.h>
121
122 #include <linux/socket.h>
123 #include <linux/sockios.h>
124 #include <linux/in.h>
125 #include <linux/inet.h>
126 #include <linux/netdevice.h>
127 #include <linux/etherdevice.h>
128
129 #include <net/snmp.h>
130 #include <net/ip.h>
131 #include <net/protocol.h>
132 #include <net/route.h>
133 #include <net/tcp.h>
134 #include <net/udp.h>
135 #include <linux/skbuff.h>
136 #include <net/sock.h>
137 #include <net/arp.h>
138 #include <net/icmp.h>
139 #include <net/raw.h>
140 #include <net/checksum.h>
141 #include <linux/igmp.h>
142 #include <linux/ip_fw.h>
143
144 #define CONFIG_IP_DEFRAG
145
146 extern int last_retran;
147 extern void sort_send(struct sock *sk);
148
149 #define min(a,b) ((a)<(b)?(a):(b))
150
151 /*
152 * SNMP management statistics
153 */
154
155 #ifdef CONFIG_IP_FORWARD
156 struct ip_mib ip_statistics={1,64,}; /* Forwarding=Yes, Default TTL=64 */
157 #else
158 struct ip_mib ip_statistics={2,64,}; /* Forwarding=No, Default TTL=64 */
159 #endif
160
161 /*
162 * Handle the issuing of an ioctl() request
163 * for the ip device. This is scheduled to
164 * disappear
165 */
166
167 int ip_ioctl(struct sock *sk, int cmd, unsigned long arg)
/* ![[previous]](../icons/n_left.png)
![[next]](../icons/right.png)
![[first]](../icons/n_first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
168 {
169 switch(cmd)
170 {
171 default:
172 return(-EINVAL);
173 }
174 }
175
176
177 /*
178 * Take an skb, and fill in the MAC header.
179 */
180
181 static int ip_send(struct sk_buff *skb, unsigned long daddr, int len, struct device *dev, unsigned long saddr)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
182 {
183 int mac = 0;
184
185 skb->dev = dev;
186 skb->arp = 1;
187 if (dev->hard_header)
188 {
189 /*
190 * Build a hardware header. Source address is our mac, destination unknown
191 * (rebuild header will sort this out)
192 */
193 skb_reserve(skb,(dev->hard_header_len+15)&~15); /* 16 byte aligned IP headers are good */
194 mac = dev->hard_header(skb, dev, ETH_P_IP, NULL, NULL, len);
195 if (mac < 0)
196 {
197 mac = -mac;
198 skb->arp = 0;
199 skb->raddr = daddr; /* next routing address */
200 }
201 }
202 return mac;
203 }
204
205 int ip_id_count = 0;
206
207 /*
208 * This routine builds the appropriate hardware/IP headers for
209 * the routine. It assumes that if *dev != NULL then the
210 * protocol knows what it's doing, otherwise it uses the
211 * routing/ARP tables to select a device struct.
212 */
213 int ip_build_header(struct sk_buff *skb, unsigned long saddr, unsigned long daddr,
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
214 struct device **dev, int type, struct options *opt, int len, int tos, int ttl)
215 {
216 struct rtable *rt;
217 unsigned long raddr;
218 int tmp;
219 unsigned long src;
220 struct iphdr *iph;
221
222 /*
223 * See if we need to look up the device.
224 */
225
226 #ifdef CONFIG_INET_MULTICAST
227 if(MULTICAST(daddr) && *dev==NULL && skb->sk && *skb->sk->ip_mc_name)
228 *dev=dev_get(skb->sk->ip_mc_name);
229 #endif
230 if (*dev == NULL)
231 {
232 if(skb->localroute)
233 rt = ip_rt_local(daddr, NULL, &src);
234 else
235 rt = ip_rt_route(daddr, NULL, &src);
236 if (rt == NULL)
237 {
238 ip_statistics.IpOutNoRoutes++;
239 return(-ENETUNREACH);
240 }
241
242 *dev = rt->rt_dev;
243 /*
244 * If the frame is from us and going off machine it MUST MUST MUST
245 * have the output device ip address and never the loopback
246 */
247 if (LOOPBACK(saddr) && !LOOPBACK(daddr))
248 saddr = src;/*rt->rt_dev->pa_addr;*/
249 raddr = rt->rt_gateway;
250
251 }
252 else
253 {
254 /*
255 * We still need the address of the first hop.
256 */
257 if(skb->localroute)
258 rt = ip_rt_local(daddr, NULL, &src);
259 else
260 rt = ip_rt_route(daddr, NULL, &src);
261 /*
262 * If the frame is from us and going off machine it MUST MUST MUST
263 * have the output device ip address and never the loopback
264 */
265 if (LOOPBACK(saddr) && !LOOPBACK(daddr))
266 saddr = src;/*rt->rt_dev->pa_addr;*/
267
268 raddr = (rt == NULL) ? 0 : rt->rt_gateway;
269 }
270
271 /*
272 * No source addr so make it our addr
273 */
274 if (saddr == 0)
275 saddr = src;
276
277 /*
278 * No gateway so aim at the real destination
279 */
280 if (raddr == 0)
281 raddr = daddr;
282
283 /*
284 * Now build the MAC header.
285 */
286
287 tmp = ip_send(skb, raddr, len, *dev, saddr);
288
289 /*
290 * Book keeping
291 */
292
293 skb->dev = *dev;
294 skb->saddr = saddr;
295 if (skb->sk)
296 skb->sk->saddr = saddr;
297
298 /*
299 * Now build the IP header.
300 */
301
302 /*
303 * If we are using IPPROTO_RAW, then we don't need an IP header, since
304 * one is being supplied to us by the user
305 */
306
307 if(type == IPPROTO_RAW)
308 return (tmp);
309
310 /*
311 * Build the IP addresses
312 */
313
314 iph=(struct iphdr *)skb_put(skb,sizeof(struct iphdr));
315
316 iph->version = 4;
317 iph->ihl = 5;
318 iph->tos = tos;
319 iph->frag_off = 0;
320 iph->ttl = ttl;
321 iph->daddr = daddr;
322 iph->saddr = saddr;
323 iph->protocol = type;
324 skb->ip_hdr = iph;
325
326 return(20 + tmp); /* IP header plus MAC header size */
327 }
328
329
330 /*
331 * Generate a checksum for an outgoing IP datagram.
332 */
333
334 void ip_send_check(struct iphdr *iph)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
335 {
336 iph->check = 0;
337 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
338 }
339
340 /************************ Fragment Handlers From NET2E **********************************/
341
342
343 /*
344 * This fragment handler is a bit of a heap. On the other hand it works quite
345 * happily and handles things quite well.
346 */
347
348 static struct ipq *ipqueue = NULL; /* IP fragment queue */
349
350 /*
351 * Create a new fragment entry.
352 */
353
354 static struct ipfrag *ip_frag_create(int offset, int end, struct sk_buff *skb, unsigned char *ptr)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
355 {
356 struct ipfrag *fp;
357
358 fp = (struct ipfrag *) kmalloc(sizeof(struct ipfrag), GFP_ATOMIC);
359 if (fp == NULL)
360 {
361 NETDEBUG(printk("IP: frag_create: no memory left !\n"));
362 return(NULL);
363 }
364 memset(fp, 0, sizeof(struct ipfrag));
365
366 /* Fill in the structure. */
367 fp->offset = offset;
368 fp->end = end;
369 fp->len = end - offset;
370 fp->skb = skb;
371 fp->ptr = ptr;
372
373 return(fp);
374 }
375
376
377 /*
378 * Find the correct entry in the "incomplete datagrams" queue for
379 * this IP datagram, and return the queue entry address if found.
380 */
381
382 static struct ipq *ip_find(struct iphdr *iph)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
383 {
384 struct ipq *qp;
385 struct ipq *qplast;
386
387 cli();
388 qplast = NULL;
389 for(qp = ipqueue; qp != NULL; qplast = qp, qp = qp->next)
390 {
391 if (iph->id== qp->iph->id && iph->saddr == qp->iph->saddr &&
392 iph->daddr == qp->iph->daddr && iph->protocol == qp->iph->protocol)
393 {
394 del_timer(&qp->timer); /* So it doesn't vanish on us. The timer will be reset anyway */
395 sti();
396 return(qp);
397 }
398 }
399 sti();
400 return(NULL);
401 }
402
403
404 /*
405 * Remove an entry from the "incomplete datagrams" queue, either
406 * because we completed, reassembled and processed it, or because
407 * it timed out.
408 */
409
410 static void ip_free(struct ipq *qp)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
411 {
412 struct ipfrag *fp;
413 struct ipfrag *xp;
414
415 /*
416 * Stop the timer for this entry.
417 */
418
419 del_timer(&qp->timer);
420
421 /* Remove this entry from the "incomplete datagrams" queue. */
422 cli();
423 if (qp->prev == NULL)
424 {
425 ipqueue = qp->next;
426 if (ipqueue != NULL)
427 ipqueue->prev = NULL;
428 }
429 else
430 {
431 qp->prev->next = qp->next;
432 if (qp->next != NULL)
433 qp->next->prev = qp->prev;
434 }
435
436 /* Release all fragment data. */
437
438 fp = qp->fragments;
439 while (fp != NULL)
440 {
441 xp = fp->next;
442 IS_SKB(fp->skb);
443 kfree_skb(fp->skb,FREE_READ);
444 kfree_s(fp, sizeof(struct ipfrag));
445 fp = xp;
446 }
447
448 /* Release the IP header. */
449 kfree_s(qp->iph, 64 + 8);
450
451 /* Finally, release the queue descriptor itself. */
452 kfree_s(qp, sizeof(struct ipq));
453 sti();
454 }
455
456
457 /*
458 * Oops- a fragment queue timed out. Kill it and send an ICMP reply.
459 */
460
461 static void ip_expire(unsigned long arg)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
462 {
463 struct ipq *qp;
464
465 qp = (struct ipq *)arg;
466
467 /*
468 * Send an ICMP "Fragment Reassembly Timeout" message.
469 */
470
471 ip_statistics.IpReasmTimeout++;
472 ip_statistics.IpReasmFails++;
473 /* This if is always true... shrug */
474 if(qp->fragments!=NULL)
475 icmp_send(qp->fragments->skb,ICMP_TIME_EXCEEDED,
476 ICMP_EXC_FRAGTIME, 0, qp->dev);
477
478 /*
479 * Nuke the fragment queue.
480 */
481 ip_free(qp);
482 }
483
484
485 /*
486 * Add an entry to the 'ipq' queue for a newly received IP datagram.
487 * We will (hopefully :-) receive all other fragments of this datagram
488 * in time, so we just create a queue for this datagram, in which we
489 * will insert the received fragments at their respective positions.
490 */
491
492 static struct ipq *ip_create(struct sk_buff *skb, struct iphdr *iph, struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
493 {
494 struct ipq *qp;
495 int ihlen;
496
497 qp = (struct ipq *) kmalloc(sizeof(struct ipq), GFP_ATOMIC);
498 if (qp == NULL)
499 {
500 NETDEBUG(printk("IP: create: no memory left !\n"));
501 return(NULL);
502 skb->dev = qp->dev;
503 }
504 memset(qp, 0, sizeof(struct ipq));
505
506 /*
507 * Allocate memory for the IP header (plus 8 octets for ICMP).
508 */
509
510 ihlen = iph->ihl * 4;
511 qp->iph = (struct iphdr *) kmalloc(64 + 8, GFP_ATOMIC);
512 if (qp->iph == NULL)
513 {
514 NETDEBUG(printk("IP: create: no memory left !\n"));
515 kfree_s(qp, sizeof(struct ipq));
516 return(NULL);
517 }
518
519 memcpy(qp->iph, iph, ihlen + 8);
520 qp->len = 0;
521 qp->ihlen = ihlen;
522 qp->fragments = NULL;
523 qp->dev = dev;
524
525 /* Start a timer for this entry. */
526 qp->timer.expires = jiffies + IP_FRAG_TIME; /* about 30 seconds */
527 qp->timer.data = (unsigned long) qp; /* pointer to queue */
528 qp->timer.function = ip_expire; /* expire function */
529 add_timer(&qp->timer);
530
531 /* Add this entry to the queue. */
532 qp->prev = NULL;
533 cli();
534 qp->next = ipqueue;
535 if (qp->next != NULL)
536 qp->next->prev = qp;
537 ipqueue = qp;
538 sti();
539 return(qp);
540 }
541
542
543 /*
544 * See if a fragment queue is complete.
545 */
546
547 static int ip_done(struct ipq *qp)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
548 {
549 struct ipfrag *fp;
550 int offset;
551
552 /* Only possible if we received the final fragment. */
553 if (qp->len == 0)
554 return(0);
555
556 /* Check all fragment offsets to see if they connect. */
557 fp = qp->fragments;
558 offset = 0;
559 while (fp != NULL)
560 {
561 if (fp->offset > offset)
562 return(0); /* fragment(s) missing */
563 offset = fp->end;
564 fp = fp->next;
565 }
566
567 /* All fragments are present. */
568 return(1);
569 }
570
571
572 /*
573 * Build a new IP datagram from all its fragments.
574 *
575 * FIXME: We copy here because we lack an effective way of handling lists
576 * of bits on input. Until the new skb data handling is in I'm not going
577 * to touch this with a bargepole.
578 */
579
580 static struct sk_buff *ip_glue(struct ipq *qp)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
581 {
582 struct sk_buff *skb;
583 struct iphdr *iph;
584 struct ipfrag *fp;
585 unsigned char *ptr;
586 int count, len;
587
588 /*
589 * Allocate a new buffer for the datagram.
590 */
591 len = qp->ihlen + qp->len;
592
593 if ((skb = dev_alloc_skb(len)) == NULL)
594 {
595 ip_statistics.IpReasmFails++;
596 NETDEBUG(printk("IP: queue_glue: no memory for gluing queue %p\n", qp));
597 ip_free(qp);
598 return(NULL);
599 }
600
601 /* Fill in the basic details. */
602 skb_put(skb,len);
603 skb->h.raw = skb->data;
604 skb->free = 1;
605
606 /* Copy the original IP headers into the new buffer. */
607 ptr = (unsigned char *) skb->h.raw;
608 memcpy(ptr, ((unsigned char *) qp->iph), qp->ihlen);
609 ptr += qp->ihlen;
610
611 count = 0;
612
613 /* Copy the data portions of all fragments into the new buffer. */
614 fp = qp->fragments;
615 while(fp != NULL)
616 {
617 if(count+fp->len > skb->len)
618 {
619 NETDEBUG(printk("Invalid fragment list: Fragment over size.\n"));
620 ip_free(qp);
621 kfree_skb(skb,FREE_WRITE);
622 ip_statistics.IpReasmFails++;
623 return NULL;
624 }
625 memcpy((ptr + fp->offset), fp->ptr, fp->len);
626 count += fp->len;
627 fp = fp->next;
628 }
629
630 /* We glued together all fragments, so remove the queue entry. */
631 ip_free(qp);
632
633 /* Done with all fragments. Fixup the new IP header. */
634 iph = skb->h.iph;
635 iph->frag_off = 0;
636 iph->tot_len = htons((iph->ihl * 4) + count);
637 skb->ip_hdr = iph;
638
639 ip_statistics.IpReasmOKs++;
640 return(skb);
641 }
642
643
644 /*
645 * Process an incoming IP datagram fragment.
646 */
647
648 static struct sk_buff *ip_defrag(struct iphdr *iph, struct sk_buff *skb, struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
649 {
650 struct ipfrag *prev, *next, *tmp;
651 struct ipfrag *tfp;
652 struct ipq *qp;
653 struct sk_buff *skb2;
654 unsigned char *ptr;
655 int flags, offset;
656 int i, ihl, end;
657
658 ip_statistics.IpReasmReqds++;
659
660 /* Find the entry of this IP datagram in the "incomplete datagrams" queue. */
661 qp = ip_find(iph);
662
663 /* Is this a non-fragmented datagram? */
664 offset = ntohs(iph->frag_off);
665 flags = offset & ~IP_OFFSET;
666 offset &= IP_OFFSET;
667 if (((flags & IP_MF) == 0) && (offset == 0))
668 {
669 if (qp != NULL)
670 ip_free(qp); /* Huh? How could this exist?? */
671 return(skb);
672 }
673
674 offset <<= 3; /* offset is in 8-byte chunks */
675
676 /*
677 * If the queue already existed, keep restarting its timer as long
678 * as we still are receiving fragments. Otherwise, create a fresh
679 * queue entry.
680 */
681
682 if (qp != NULL)
683 {
684 del_timer(&qp->timer);
685 qp->timer.expires = jiffies + IP_FRAG_TIME; /* about 30 seconds */
686 qp->timer.data = (unsigned long) qp; /* pointer to queue */
687 qp->timer.function = ip_expire; /* expire function */
688 add_timer(&qp->timer);
689 }
690 else
691 {
692 /*
693 * If we failed to create it, then discard the frame
694 */
695 if ((qp = ip_create(skb, iph, dev)) == NULL)
696 {
697 skb->sk = NULL;
698 kfree_skb(skb, FREE_READ);
699 ip_statistics.IpReasmFails++;
700 return NULL;
701 }
702 }
703
704 /*
705 * Determine the position of this fragment.
706 */
707
708 ihl = iph->ihl * 4;
709 end = offset + ntohs(iph->tot_len) - ihl;
710
711 /*
712 * Point into the IP datagram 'data' part.
713 */
714
715 ptr = skb->data + ihl;
716
717 /*
718 * Is this the final fragment?
719 */
720
721 if ((flags & IP_MF) == 0)
722 qp->len = end;
723
724 /*
725 * Find out which fragments are in front and at the back of us
726 * in the chain of fragments so far. We must know where to put
727 * this fragment, right?
728 */
729
730 prev = NULL;
731 for(next = qp->fragments; next != NULL; next = next->next)
732 {
733 if (next->offset > offset)
734 break; /* bingo! */
735 prev = next;
736 }
737
738 /*
739 * We found where to put this one.
740 * Check for overlap with preceding fragment, and, if needed,
741 * align things so that any overlaps are eliminated.
742 */
743 if (prev != NULL && offset < prev->end)
744 {
745 i = prev->end - offset;
746 offset += i; /* ptr into datagram */
747 ptr += i; /* ptr into fragment data */
748 }
749
750 /*
751 * Look for overlap with succeeding segments.
752 * If we can merge fragments, do it.
753 */
754
755 for(tmp=next; tmp != NULL; tmp = tfp)
756 {
757 tfp = tmp->next;
758 if (tmp->offset >= end)
759 break; /* no overlaps at all */
760
761 i = end - next->offset; /* overlap is 'i' bytes */
762 tmp->len -= i; /* so reduce size of */
763 tmp->offset += i; /* next fragment */
764 tmp->ptr += i;
765 /*
766 * If we get a frag size of <= 0, remove it and the packet
767 * that it goes with.
768 */
769 if (tmp->len <= 0)
770 {
771 if (tmp->prev != NULL)
772 tmp->prev->next = tmp->next;
773 else
774 qp->fragments = tmp->next;
775
776 if (tfp->next != NULL)
777 tmp->next->prev = tmp->prev;
778
779 next=tfp; /* We have killed the original next frame */
780
781 kfree_skb(tmp->skb,FREE_READ);
782 kfree_s(tmp, sizeof(struct ipfrag));
783 }
784 }
785
786 /*
787 * Insert this fragment in the chain of fragments.
788 */
789
790 tfp = NULL;
791 tfp = ip_frag_create(offset, end, skb, ptr);
792
793 /*
794 * No memory to save the fragment - so throw the lot
795 */
796
797 if (!tfp)
798 {
799 skb->sk = NULL;
800 kfree_skb(skb, FREE_READ);
801 return NULL;
802 }
803 tfp->prev = prev;
804 tfp->next = next;
805 if (prev != NULL)
806 prev->next = tfp;
807 else
808 qp->fragments = tfp;
809
810 if (next != NULL)
811 next->prev = tfp;
812
813 /*
814 * OK, so we inserted this new fragment into the chain.
815 * Check if we now have a full IP datagram which we can
816 * bump up to the IP layer...
817 */
818
819 if (ip_done(qp))
820 {
821 skb2 = ip_glue(qp); /* glue together the fragments */
822 return(skb2);
823 }
824 return(NULL);
825 }
826
827
828 /*
829 * This IP datagram is too large to be sent in one piece. Break it up into
830 * smaller pieces (each of size equal to the MAC header plus IP header plus
831 * a block of the data of the original IP data part) that will yet fit in a
832 * single device frame, and queue such a frame for sending by calling the
833 * ip_queue_xmit(). Note that this is recursion, and bad things will happen
834 * if this function causes a loop...
835 *
836 * Yes this is inefficient, feel free to submit a quicker one.
837 *
838 * **Protocol Violation**
839 * We copy all the options to each fragment. !FIXME!
840 */
841
842 void ip_fragment(struct sock *sk, struct sk_buff *skb, struct device *dev, int is_frag)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
843 {
844 struct iphdr *iph;
845 unsigned char *raw;
846 unsigned char *ptr;
847 struct sk_buff *skb2;
848 int left, mtu, hlen, len;
849 int offset;
850 unsigned long flags;
851
852 /*
853 * Point into the IP datagram header.
854 */
855
856 raw = skb->data;
857 iph = (struct iphdr *) (raw + dev->hard_header_len);
858
859 skb->ip_hdr = iph;
860
861 /*
862 * Setup starting values.
863 */
864
865 hlen = iph->ihl * 4;
866 left = ntohs(iph->tot_len) - hlen; /* Space per frame */
867 hlen += dev->hard_header_len; /* Total header size */
868 mtu = (dev->mtu - hlen); /* Size of data space */
869 ptr = (raw + hlen); /* Where to start from */
870
871 /*
872 * Check for any "DF" flag. [DF means do not fragment]
873 */
874
875 if (ntohs(iph->frag_off) & IP_DF)
876 {
877 /*
878 * Reply giving the MTU of the failed hop.
879 */
880 ip_statistics.IpFragFails++;
881 icmp_send(skb,ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, dev->mtu, dev);
882 return;
883 }
884
885 /*
886 * The protocol doesn't seem to say what to do in the case that the
887 * frame + options doesn't fit the mtu. As it used to fall down dead
888 * in this case we were fortunate it didn't happen
889 */
890
891 if(mtu<8)
892 {
893 /* It's wrong but it's better than nothing */
894 icmp_send(skb,ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED,dev->mtu, dev);
895 ip_statistics.IpFragFails++;
896 return;
897 }
898
899 /*
900 * Fragment the datagram.
901 */
902
903 /*
904 * The initial offset is 0 for a complete frame. When
905 * fragmenting fragments it's wherever this one starts.
906 */
907
908 if (is_frag & 2)
909 offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
910 else
911 offset = 0;
912
913
914 /*
915 * Keep copying data until we run out.
916 */
917
918 while(left > 0)
919 {
920 len = left;
921 /* IF: it doesn't fit, use 'mtu' - the data space left */
922 if (len > mtu)
923 len = mtu;
924 /* IF: we are not sending upto and including the packet end
925 then align the next start on an eight byte boundary */
926 if (len < left)
927 {
928 len/=8;
929 len*=8;
930 }
931 /*
932 * Allocate buffer.
933 */
934
935 if ((skb2 = alloc_skb(len + hlen+15,GFP_ATOMIC)) == NULL)
936 {
937 NETDEBUG(printk("IP: frag: no memory for new fragment!\n"));
938 ip_statistics.IpFragFails++;
939 return;
940 }
941
942 /*
943 * Set up data on packet
944 */
945
946 skb2->arp = skb->arp;
947 if(skb->free==0)
948 printk("IP fragmenter: BUG free!=1 in fragmenter\n");
949 skb2->free = 1;
950 skb_put(skb2,len + hlen);
951 skb2->h.raw=(char *) skb2->data;
952 /*
953 * Charge the memory for the fragment to any owner
954 * it might possess
955 */
956
957 save_flags(flags);
958 if (sk)
959 {
960 cli();
961 sk->wmem_alloc += skb2->truesize;
962 skb2->sk=sk;
963 }
964 restore_flags(flags);
965 skb2->raddr = skb->raddr; /* For rebuild_header - must be here */
966
967 /*
968 * Copy the packet header into the new buffer.
969 */
970
971 memcpy(skb2->h.raw, raw, hlen);
972
973 /*
974 * Copy a block of the IP datagram.
975 */
976 memcpy(skb2->h.raw + hlen, ptr, len);
977 left -= len;
978
979 skb2->h.raw+=dev->hard_header_len;
980
981 /*
982 * Fill in the new header fields.
983 */
984 iph = (struct iphdr *)(skb2->h.raw/*+dev->hard_header_len*/);
985 iph->frag_off = htons((offset >> 3));
986 /*
987 * Added AC : If we are fragmenting a fragment thats not the
988 * last fragment then keep MF on each bit
989 */
990 if (left > 0 || (is_frag & 1))
991 iph->frag_off |= htons(IP_MF);
992 ptr += len;
993 offset += len;
994
995 /*
996 * Put this fragment into the sending queue.
997 */
998
999 ip_statistics.IpFragCreates++;
1000
1001 ip_queue_xmit(sk, dev, skb2, 2);
1002 }
1003 ip_statistics.IpFragOKs++;
1004 }
1005
1006
1007
1008 #ifdef CONFIG_IP_FORWARD
1009
1010 /*
1011 * Forward an IP datagram to its next destination.
1012 */
1013
1014 int ip_forward(struct sk_buff *skb, struct device *dev, int is_frag, unsigned long target_addr, int target_strict)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1015 {
1016 struct device *dev2; /* Output device */
1017 struct iphdr *iph; /* Our header */
1018 struct sk_buff *skb2; /* Output packet */
1019 struct rtable *rt; /* Route we use */
1020 unsigned char *ptr; /* Data pointer */
1021 unsigned long raddr; /* Router IP address */
1022 #ifdef CONFIG_IP_FIREWALL
1023 int fw_res = 0; /* Forwarding result */
1024
1025 /*
1026 * See if we are allowed to forward this.
1027 * Note: demasqueraded fragments are always 'back'warded.
1028 */
1029
1030
1031 if(!(is_frag&4))
1032 {
1033 fw_res=ip_fw_chk(skb->h.iph, dev, ip_fw_fwd_chain, ip_fw_fwd_policy, 0);
1034 switch (fw_res) {
1035 case 1:
1036 #ifdef CONFIG_IP_MASQUERADE
1037 case 2:
1038 #endif
1039 break;
1040 case -1:
1041 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev);
1042 /* fall thru */
1043 default:
1044 return -1;
1045 }
1046 }
1047 #endif
1048 /*
1049 * According to the RFC, we must first decrease the TTL field. If
1050 * that reaches zero, we must reply an ICMP control message telling
1051 * that the packet's lifetime expired.
1052 *
1053 * Exception:
1054 * We may not generate an ICMP for an ICMP. icmp_send does the
1055 * enforcement of this so we can forget it here. It is however
1056 * sometimes VERY important.
1057 */
1058
1059 iph = skb->h.iph;
1060 iph->ttl--;
1061
1062 /*
1063 * Re-compute the IP header checksum.
1064 * This is inefficient. We know what has happened to the header
1065 * and could thus adjust the checksum as Phil Karn does in KA9Q
1066 */
1067
1068 iph->check = ntohs(iph->check) + 0x0100;
1069 if ((iph->check & 0xFF00) == 0)
1070 iph->check++; /* carry overflow */
1071 iph->check = htons(iph->check);
1072
1073 if (iph->ttl <= 0)
1074 {
1075 /* Tell the sender its packet died... */
1076 icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0, dev);
1077 return -1;
1078 }
1079
1080 /*
1081 * OK, the packet is still valid. Fetch its destination address,
1082 * and give it to the IP sender for further processing.
1083 */
1084
1085 rt = ip_rt_route(target_addr, NULL, NULL);
1086 if (rt == NULL)
1087 {
1088 /*
1089 * Tell the sender its packet cannot be delivered. Again
1090 * ICMP is screened later.
1091 */
1092 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_UNREACH, 0, dev);
1093 return -1;
1094 }
1095
1096
1097 /*
1098 * Gosh. Not only is the packet valid; we even know how to
1099 * forward it onto its final destination. Can we say this
1100 * is being plain lucky?
1101 * If the router told us that there is no GW, use the dest.
1102 * IP address itself- we seem to be connected directly...
1103 */
1104
1105 raddr = rt->rt_gateway;
1106
1107 if (raddr != 0)
1108 {
1109 /*
1110 * Strict routing permits no gatewaying
1111 */
1112
1113 if(target_strict)
1114 {
1115 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_SR_FAILED, 0, dev);
1116 return -1;
1117 }
1118
1119 /*
1120 * There is a gateway so find the correct route for it.
1121 * Gateways cannot in turn be gatewayed.
1122 */
1123
1124 rt = ip_rt_route(raddr, NULL, NULL);
1125 if (rt == NULL)
1126 {
1127 /*
1128 * Tell the sender its packet cannot be delivered...
1129 */
1130 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev);
1131 return -1;
1132 }
1133 if (rt->rt_gateway != 0)
1134 raddr = rt->rt_gateway;
1135 }
1136 else
1137 raddr = target_addr;
1138
1139 /*
1140 * Having picked a route we can now send the frame out.
1141 */
1142
1143 dev2 = rt->rt_dev;
1144
1145 /*
1146 * In IP you never have to forward a frame on the interface that it
1147 * arrived upon. We now generate an ICMP HOST REDIRECT giving the route
1148 * we calculated.
1149 */
1150 #ifndef CONFIG_IP_NO_ICMP_REDIRECT
1151 if (dev == dev2 && !((iph->saddr^iph->daddr)&dev->pa_mask) && (rt->rt_flags&RTF_MODIFIED))
1152 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, raddr, dev);
1153 #endif
1154
1155 /*
1156 * We now may allocate a new buffer, and copy the datagram into it.
1157 * If the indicated interface is up and running, kick it.
1158 */
1159
1160 if (dev2->flags & IFF_UP)
1161 {
1162 #ifdef CONFIG_IP_MASQUERADE
1163 /*
1164 * If this fragment needs masquerading, make it so...
1165 * (Dont masquerade de-masqueraded fragments)
1166 */
1167 if (!(is_frag&4) && fw_res==2)
1168 ip_fw_masquerade(&skb, dev2);
1169 #endif
1170 IS_SKB(skb);
1171
1172 if(skb_headroom(skb)<dev2->hard_header_len)
1173 {
1174 skb2 = alloc_skb(dev2->hard_header_len + skb->len + 15, GFP_ATOMIC);
1175 IS_SKB(skb2);
1176
1177 /*
1178 * This is rare and since IP is tolerant of network failures
1179 * quite harmless.
1180 */
1181
1182 if (skb2 == NULL)
1183 {
1184 NETDEBUG(printk("\nIP: No memory available for IP forward\n"));
1185 return -1;
1186 }
1187
1188 /*
1189 * Add the physical headers.
1190 */
1191
1192 ip_send(skb2,raddr,skb->len,dev2,dev2->pa_addr);
1193
1194 /*
1195 * We have to copy the bytes over as the new header wouldn't fit
1196 * the old buffer. This should be very rare.
1197 */
1198
1199 ptr = skb_put(skb2,skb->len);
1200 skb2->free = 1;
1201 skb2->h.raw = ptr;
1202
1203 /*
1204 * Copy the packet data into the new buffer.
1205 */
1206 memcpy(ptr, skb->h.raw, skb->len);
1207 }
1208 else
1209 {
1210 /*
1211 * Build a new MAC header.
1212 */
1213
1214 skb2 = skb;
1215 skb2->dev=dev2;
1216 skb->arp=1;
1217 skb->raddr=raddr;
1218 if(dev2->hard_header)
1219 {
1220 if(dev2->hard_header(skb, dev2, ETH_P_IP, NULL, NULL, skb->len)<0)
1221 skb->arp=0;
1222 }
1223 ip_statistics.IpForwDatagrams++;
1224 }
1225 /*
1226 * See if it needs fragmenting. Note in ip_rcv we tagged
1227 * the fragment type. This must be right so that
1228 * the fragmenter does the right thing.
1229 */
1230
1231 if(skb2->len > dev2->mtu + dev2->hard_header_len)
1232 {
1233 ip_fragment(NULL,skb2,dev2, is_frag);
1234 kfree_skb(skb2,FREE_WRITE);
1235 }
1236 else
1237 {
1238 #ifdef CONFIG_IP_ACCT
1239 /*
1240 * Count mapping we shortcut
1241 */
1242
1243 ip_fw_chk(iph,dev,ip_acct_chain,IP_FW_F_ACCEPT,1);
1244 #endif
1245
1246 /*
1247 * Map service types to priority. We lie about
1248 * throughput being low priority, but it's a good
1249 * choice to help improve general usage.
1250 */
1251 if(iph->tos & IPTOS_LOWDELAY)
1252 dev_queue_xmit(skb2, dev2, SOPRI_INTERACTIVE);
1253 else if(iph->tos & IPTOS_THROUGHPUT)
1254 dev_queue_xmit(skb2, dev2, SOPRI_BACKGROUND);
1255 else
1256 dev_queue_xmit(skb2, dev2, SOPRI_NORMAL);
1257 }
1258 }
1259 else
1260 return -1;
1261
1262 /*
1263 * Tell the caller if their buffer is free.
1264 */
1265
1266 if(skb==skb2)
1267 return 0;
1268 return 1;
1269 }
1270
1271
1272 #endif
1273
1274 /*
1275 * This function receives all incoming IP datagrams.
1276 *
1277 * On entry skb->data points to the start of the IP header and
1278 * the MAC header has been removed.
1279 */
1280
1281 int ip_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1282 {
1283 struct iphdr *iph = skb->h.iph;
1284 struct sock *raw_sk=NULL;
1285 unsigned char hash;
1286 unsigned char flag = 0;
1287 struct inet_protocol *ipprot;
1288 int brd=IS_MYADDR;
1289 unsigned long target_addr;
1290 int target_strict=0;
1291 int is_frag=0;
1292 #ifdef CONFIG_IP_FIREWALL
1293 int err;
1294 #endif
1295
1296 ip_statistics.IpInReceives++;
1297
1298 /*
1299 * Tag the ip header of this packet so we can find it
1300 */
1301
1302 skb->ip_hdr = iph;
1303
1304 /*
1305 * RFC1122: 3.1.2.2 MUST silently discard any IP frame that fails the checksum.
1306 * RFC1122: 3.1.2.3 MUST discard a frame with invalid source address [NEEDS FIXING].
1307 *
1308 * Is the datagram acceptable?
1309 *
1310 * 1. Length at least the size of an ip header
1311 * 2. Version of 4
1312 * 3. Checksums correctly. [Speed optimisation for later, skip loopback checksums]
1313 * 4. Doesn't have a bogus length
1314 * (5. We ought to check for IP multicast addresses and undefined types.. does this matter ?)
1315 */
1316
1317 if (skb->len<sizeof(struct iphdr) || iph->ihl<5 || iph->version != 4 || ip_fast_csum((unsigned char *)iph, iph->ihl) !=0
1318 || skb->len < ntohs(iph->tot_len))
1319 {
1320 ip_statistics.IpInHdrErrors++;
1321 kfree_skb(skb, FREE_WRITE);
1322 return(0);
1323 }
1324
1325 /*
1326 * Our transport medium may have padded the buffer out. Now we know it
1327 * is IP we can trim to the true length of the frame.
1328 * Note this now means skb->len holds ntohs(iph->tot_len).
1329 */
1330
1331 skb_trim(skb,ntohs(iph->tot_len));
1332
1333 /*
1334 * See if the firewall wants to dispose of the packet.
1335 */
1336
1337 #ifdef CONFIG_IP_FIREWALL
1338
1339 if ((err=ip_fw_chk(iph,dev,ip_fw_blk_chain,ip_fw_blk_policy, 0))<1)
1340 {
1341 if(err==-1)
1342 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0, dev);
1343 kfree_skb(skb, FREE_WRITE);
1344 return 0;
1345 }
1346
1347 #endif
1348
1349
1350 /*
1351 * Next analyse the packet for options. Studies show under one packet in
1352 * a thousand have options....
1353 */
1354
1355 target_addr = iph->daddr;
1356
1357 if (iph->ihl != 5)
1358 {
1359 /* Humph.. options. Lots of annoying fiddly bits */
1360
1361 /*
1362 * This is straight from the RFC. It might even be right ;)
1363 *
1364 * RFC 1122: 3.2.1.8 STREAMID option is obsolete and MUST be ignored.
1365 * RFC 1122: 3.2.1.8 MUST NOT crash on a zero length option.
1366 * RFC 1122: 3.2.1.8 MUST support acting as final destination of a source route.
1367 */
1368
1369 int opt_space=4*(iph->ihl-5);
1370 int opt_size;
1371 unsigned char *opt_ptr=skb->h.raw+sizeof(struct iphdr);
1372
1373 skb->ip_summed=0; /* Our free checksum is bogus for this case */
1374
1375 while(opt_space>0)
1376 {
1377 if(*opt_ptr==IPOPT_NOOP)
1378 {
1379 opt_ptr++;
1380 opt_space--;
1381 continue;
1382 }
1383 if(*opt_ptr==IPOPT_END)
1384 break; /* Done */
1385 if(opt_space<2 || (opt_size=opt_ptr[1])<2 || opt_ptr[1]>opt_space)
1386 {
1387 /*
1388 * RFC 1122: 3.2.2.5 SHOULD send parameter problem reports.
1389 */
1390 icmp_send(skb, ICMP_PARAMETERPROB, 0, 0, skb->dev);
1391 kfree_skb(skb, FREE_READ);
1392 return -EINVAL;
1393 }
1394 switch(opt_ptr[0])
1395 {
1396 case IPOPT_SEC:
1397 /* Should we drop this ?? */
1398 break;
1399 case IPOPT_SSRR: /* These work almost the same way */
1400 target_strict=1;
1401 /* Fall through */
1402 case IPOPT_LSRR:
1403 #ifdef CONFIG_IP_NOSR
1404 kfree_skb(skb, FREE_READ);
1405 return -EINVAL;
1406 #endif
1407 case IPOPT_RR:
1408 /*
1409 * RFC 1122: 3.2.1.8 Support for RR is OPTIONAL.
1410 */
1411 if (iph->daddr!=skb->dev->pa_addr && (brd = ip_chk_addr(iph->daddr)) == 0)
1412 break;
1413 if((opt_size<3) || ( opt_ptr[0]==IPOPT_RR && opt_ptr[2] > opt_size-4 ))
1414 {
1415 if(ip_chk_addr(iph->daddr))
1416 icmp_send(skb, ICMP_PARAMETERPROB, 0, 0, skb->dev);
1417 kfree_skb(skb, FREE_READ);
1418 return -EINVAL;
1419 }
1420 if(opt_ptr[2] > opt_size-4 )
1421 break;
1422 /* Bytes are [IPOPT_xxRR][Length][EntryPointer][Entry0][Entry1].... */
1423 /* This isn't going to be too portable - FIXME */
1424 if(opt_ptr[0]!=IPOPT_RR)
1425 {
1426 int t;
1427 target_addr=*(u32 *)(&opt_ptr[opt_ptr[2]]); /* Get hop */
1428 t=ip_chk_addr(target_addr);
1429 if(t==IS_MULTICAST||t==IS_BROADCAST)
1430 {
1431 if(ip_chk_addr(iph->daddr))
1432 icmp_send(skb, ICMP_PARAMETERPROB, 0, 0, skb->dev);
1433 kfree_skb(skb,FREE_READ);
1434 return -EINVAL;
1435 }
1436 }
1437 *(u32 *)(&opt_ptr[opt_ptr[2]])=skb->dev->pa_addr; /* Record hop */
1438 break;
1439 case IPOPT_TIMESTAMP:
1440 /*
1441 * RFC 1122: 3.2.1.8 The timestamp option is OPTIONAL but if implemented
1442 * MUST meet various rules (read the spec).
1443 */
1444 NETDEBUG(printk("ICMP: Someone finish the timestamp routine ;)\n"));
1445 break;
1446 default:
1447 break;
1448 }
1449 opt_ptr+=opt_size;
1450 opt_space-=opt_size;
1451 }
1452
1453 }
1454
1455
1456 /*
1457 * Remember if the frame is fragmented.
1458 */
1459
1460 if(iph->frag_off)
1461 {
1462 if (iph->frag_off & htons(IP_MF))
1463 is_frag|=1;
1464 /*
1465 * Last fragment ?
1466 */
1467
1468 if (iph->frag_off & htons(IP_OFFSET))
1469 is_frag|=2;
1470 }
1471
1472 /*
1473 * Do any IP forwarding required. chk_addr() is expensive -- avoid it someday.
1474 *
1475 * This is inefficient. While finding out if it is for us we could also compute
1476 * the routing table entry. This is where the great unified cache theory comes
1477 * in as and when someone implements it
1478 *
1479 * For most hosts over 99% of packets match the first conditional
1480 * and don't go via ip_chk_addr. Note: brd is set to IS_MYADDR at
1481 * function entry.
1482 */
1483
1484 if ( iph->daddr == skb->dev->pa_addr || (brd = ip_chk_addr(iph->daddr)) != 0)
1485 {
1486 #ifdef CONFIG_IP_MULTICAST
1487
1488 if(brd==IS_MULTICAST && iph->daddr!=IGMP_ALL_HOSTS && !(dev->flags&IFF_LOOPBACK))
1489 {
1490 /*
1491 * Check it is for one of our groups
1492 */
1493 struct ip_mc_list *ip_mc=dev->ip_mc_list;
1494 do
1495 {
1496 if(ip_mc==NULL)
1497 {
1498 kfree_skb(skb, FREE_WRITE);
1499 return 0;
1500 }
1501 if(ip_mc->multiaddr==iph->daddr)
1502 break;
1503 ip_mc=ip_mc->next;
1504 }
1505 while(1);
1506 }
1507 #endif
1508
1509 #ifdef CONFIG_IP_MASQUERADE
1510 /*
1511 * Do we need to de-masquerade this fragment?
1512 */
1513 if (ip_fw_demasquerade(skb))
1514 {
1515 struct iphdr *iph=skb->h.iph;
1516 if(ip_forward(skb, dev, is_frag|4, iph->daddr, 0))
1517 kfree_skb(skb, FREE_WRITE);
1518 return(0);
1519 }
1520 #endif
1521
1522 /*
1523 * Account for the packet
1524 */
1525
1526 #ifdef CONFIG_IP_ACCT
1527 ip_fw_chk(iph,dev,ip_acct_chain,IP_FW_F_ACCEPT,1);
1528 #endif
1529
1530 /*
1531 * Reassemble IP fragments.
1532 */
1533
1534 if(is_frag)
1535 {
1536 /* Defragment. Obtain the complete packet if there is one */
1537 skb=ip_defrag(iph,skb,dev);
1538 if(skb==NULL)
1539 return 0;
1540 skb->dev = dev;
1541 iph=skb->h.iph;
1542 }
1543
1544 /*
1545 * Point into the IP datagram, just past the header.
1546 */
1547
1548 skb->ip_hdr = iph;
1549 skb->h.raw += iph->ihl*4;
1550
1551 /*
1552 * Deliver to raw sockets. This is fun as to avoid copies we want to make no surplus copies.
1553 *
1554 * RFC 1122: SHOULD pass TOS value up to the transport layer.
1555 */
1556
1557 hash = iph->protocol & (SOCK_ARRAY_SIZE-1);
1558
1559 /*
1560 * If there maybe a raw socket we must check - if not we don't care less
1561 */
1562
1563 if((raw_sk=raw_prot.sock_array[hash])!=NULL)
1564 {
1565 struct sock *sknext=NULL;
1566 struct sk_buff *skb1;
1567 raw_sk=get_sock_raw(raw_sk, hash, iph->saddr, iph->daddr);
1568 if(raw_sk) /* Any raw sockets */
1569 {
1570 do
1571 {
1572 /* Find the next */
1573 sknext=get_sock_raw(raw_sk->next, hash, iph->saddr, iph->daddr);
1574 if(sknext)
1575 skb1=skb_clone(skb, GFP_ATOMIC);
1576 else
1577 break; /* One pending raw socket left */
1578 if(skb1)
1579 raw_rcv(raw_sk, skb1, dev, iph->saddr,iph->daddr);
1580 raw_sk=sknext;
1581 }
1582 while(raw_sk!=NULL);
1583
1584 /*
1585 * Here either raw_sk is the last raw socket, or NULL if none
1586 */
1587
1588 /*
1589 * We deliver to the last raw socket AFTER the protocol checks as it avoids a surplus copy
1590 */
1591 }
1592 }
1593
1594 /*
1595 * skb->h.raw now points at the protocol beyond the IP header.
1596 */
1597
1598 hash = iph->protocol & (MAX_INET_PROTOS -1);
1599 for (ipprot = (struct inet_protocol *)inet_protos[hash];ipprot != NULL;ipprot=(struct inet_protocol *)ipprot->next)
1600 {
1601 struct sk_buff *skb2;
1602
1603 if (ipprot->protocol != iph->protocol)
1604 continue;
1605 /*
1606 * See if we need to make a copy of it. This will
1607 * only be set if more than one protocol wants it.
1608 * and then not for the last one. If there is a pending
1609 * raw delivery wait for that
1610 */
1611
1612 if (ipprot->copy || raw_sk)
1613 {
1614 skb2 = skb_clone(skb, GFP_ATOMIC);
1615 if(skb2==NULL)
1616 continue;
1617 }
1618 else
1619 {
1620 skb2 = skb;
1621 }
1622 flag = 1;
1623
1624 /*
1625 * Pass on the datagram to each protocol that wants it,
1626 * based on the datagram protocol. We should really
1627 * check the protocol handler's return values here...
1628 */
1629
1630 ipprot->handler(skb2, dev, NULL, iph->daddr,
1631 (ntohs(iph->tot_len) - (iph->ihl * 4)),
1632 iph->saddr, 0, ipprot);
1633
1634 }
1635
1636 /*
1637 * All protocols checked.
1638 * If this packet was a broadcast, we may *not* reply to it, since that
1639 * causes (proven, grin) ARP storms and a leakage of memory (i.e. all
1640 * ICMP reply messages get queued up for transmission...)
1641 */
1642
1643 if(raw_sk!=NULL) /* Shift to last raw user */
1644 raw_rcv(raw_sk, skb, dev, iph->saddr, iph->daddr);
1645 else if (!flag) /* Free and report errors */
1646 {
1647 if (brd != IS_BROADCAST && brd!=IS_MULTICAST)
1648 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, 0, dev);
1649 kfree_skb(skb, FREE_WRITE);
1650 }
1651
1652 return(0);
1653 }
1654
1655 /*
1656 * Do any IP forwarding required.
1657 */
1658
1659 /*
1660 * Don't forward multicast or broadcast frames.
1661 */
1662
1663 if(skb->pkt_type!=PACKET_HOST || brd==IS_BROADCAST)
1664 {
1665 kfree_skb(skb,FREE_WRITE);
1666 return 0;
1667 }
1668
1669 /*
1670 * The packet is for another target. Forward the frame
1671 */
1672
1673 #ifdef CONFIG_IP_FORWARD
1674 if(ip_forward(skb, dev, is_frag, target_addr, target_strict))
1675 kfree_skb(skb, FREE_WRITE);
1676 #else
1677 /* printk("Machine %lx tried to use us as a forwarder to %lx but we have forwarding disabled!\n",
1678 iph->saddr,iph->daddr);*/
1679 ip_statistics.IpInAddrErrors++;
1680 kfree_skb(skb, FREE_WRITE);
1681 #endif
1682 return(0);
1683 }
1684
1685
1686 /*
1687 * Loop a packet back to the sender.
1688 */
1689
1690 static void ip_loopback(struct device *old_dev, struct sk_buff *skb)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1691 {
1692 extern struct device loopback_dev;
1693 struct device *dev=&loopback_dev;
1694 int len=skb->len-old_dev->hard_header_len;
1695 struct sk_buff *newskb=dev_alloc_skb(len+dev->hard_header_len+15);
1696
1697 if(newskb==NULL)
1698 return;
1699
1700 newskb->link3=NULL;
1701 newskb->sk=NULL;
1702 newskb->dev=dev;
1703 newskb->saddr=skb->saddr;
1704 newskb->daddr=skb->daddr;
1705 newskb->raddr=skb->raddr;
1706 newskb->free=1;
1707 newskb->lock=0;
1708 newskb->users=0;
1709 newskb->pkt_type=skb->pkt_type;
1710
1711 /*
1712 * Put a MAC header on the packet
1713 */
1714 ip_send(newskb, skb->ip_hdr->daddr, len, dev, skb->ip_hdr->saddr);
1715 /*
1716 * Add the rest of the data space.
1717 */
1718 newskb->ip_hdr=(struct iphdr *)skb_put(newskb, len);
1719 /*
1720 * Copy the data
1721 */
1722 memcpy(newskb->ip_hdr,skb->ip_hdr,len);
1723
1724 /* Recurse. The device check against IFF_LOOPBACK will stop infinite recursion */
1725
1726 /*printk("Loopback output queued [%lX to %lX].\n", newskb->ip_hdr->saddr,newskb->ip_hdr->daddr);*/
1727 ip_queue_xmit(NULL, dev, newskb, 1);
1728 }
1729
1730
1731 /*
1732 * Queues a packet to be sent, and starts the transmitter
1733 * if necessary. if free = 1 then we free the block after
1734 * transmit, otherwise we don't. If free==2 we not only
1735 * free the block but also don't assign a new ip seq number.
1736 * This routine also needs to put in the total length,
1737 * and compute the checksum
1738 */
1739
1740 void ip_queue_xmit(struct sock *sk, struct device *dev,
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1741 struct sk_buff *skb, int free)
1742 {
1743 struct iphdr *iph;
1744 unsigned char *ptr;
1745
1746 /* Sanity check */
1747 if (dev == NULL)
1748 {
1749 NETDEBUG(printk("IP: ip_queue_xmit dev = NULL\n"));
1750 return;
1751 }
1752
1753 IS_SKB(skb);
1754
1755 /*
1756 * Do some book-keeping in the packet for later
1757 */
1758
1759
1760 skb->dev = dev;
1761 skb->when = jiffies;
1762
1763 /*
1764 * Find the IP header and set the length. This is bad
1765 * but once we get the skb data handling code in the
1766 * hardware will push its header sensibly and we will
1767 * set skb->ip_hdr to avoid this mess and the fixed
1768 * header length problem
1769 */
1770
1771 ptr = skb->data;
1772 ptr += dev->hard_header_len;
1773 iph = (struct iphdr *)ptr;
1774 skb->ip_hdr = iph;
1775 iph->tot_len = ntohs(skb->len-dev->hard_header_len);
1776
1777 #ifdef CONFIG_IP_FIREWALL
1778 if(ip_fw_chk(iph, dev, ip_fw_blk_chain, ip_fw_blk_policy, 0) != 1)
1779 /* just don't send this packet */
1780 return;
1781 #endif
1782
1783 /*
1784 * No reassigning numbers to fragments...
1785 */
1786
1787 if(free!=2)
1788 iph->id = htons(ip_id_count++);
1789 else
1790 free=1;
1791
1792 /* All buffers without an owner socket get freed */
1793 if (sk == NULL)
1794 free = 1;
1795
1796 skb->free = free;
1797
1798 /*
1799 * Do we need to fragment. Again this is inefficient.
1800 * We need to somehow lock the original buffer and use
1801 * bits of it.
1802 */
1803
1804 if(skb->len > dev->mtu + dev->hard_header_len)
1805 {
1806 ip_fragment(sk,skb,dev,0);
1807 IS_SKB(skb);
1808 kfree_skb(skb,FREE_WRITE);
1809 return;
1810 }
1811
1812 /*
1813 * Add an IP checksum
1814 */
1815
1816 ip_send_check(iph);
1817
1818 /*
1819 * Print the frame when debugging
1820 */
1821
1822 /*
1823 * More debugging. You cannot queue a packet already on a list
1824 * Spot this and moan loudly.
1825 */
1826 if (skb->next != NULL)
1827 {
1828 NETDEBUG(printk("ip_queue_xmit: next != NULL\n"));
1829 skb_unlink(skb);
1830 }
1831
1832 /*
1833 * If a sender wishes the packet to remain unfreed
1834 * we add it to his send queue. This arguably belongs
1835 * in the TCP level since nobody else uses it. BUT
1836 * remember IPng might change all the rules.
1837 */
1838
1839 if (!free)
1840 {
1841 unsigned long flags;
1842 /* The socket now has more outstanding blocks */
1843
1844 sk->packets_out++;
1845
1846 /* Protect the list for a moment */
1847 save_flags(flags);
1848 cli();
1849
1850 if (skb->link3 != NULL)
1851 {
1852 NETDEBUG(printk("ip.c: link3 != NULL\n"));
1853 skb->link3 = NULL;
1854 }
1855 if (sk->send_head == NULL)
1856 {
1857 sk->send_tail = skb;
1858 sk->send_head = skb;
1859 }
1860 else
1861 {
1862 sk->send_tail->link3 = skb;
1863 sk->send_tail = skb;
1864 }
1865 /* skb->link3 is NULL */
1866
1867 /* Interrupt restore */
1868 restore_flags(flags);
1869 }
1870 else
1871 /* Remember who owns the buffer */
1872 skb->sk = sk;
1873
1874 /*
1875 * If the indicated interface is up and running, send the packet.
1876 */
1877
1878 ip_statistics.IpOutRequests++;
1879 #ifdef CONFIG_IP_ACCT
1880 ip_fw_chk(iph,dev,ip_acct_chain,IP_FW_F_ACCEPT,1);
1881 #endif
1882
1883 #ifdef CONFIG_IP_MULTICAST
1884
1885 /*
1886 * Multicasts are looped back for other local users
1887 */
1888
1889 if (MULTICAST(iph->daddr) && !(dev->flags&IFF_LOOPBACK))
1890 {
1891 if(sk==NULL || sk->ip_mc_loop)
1892 {
1893 if(iph->daddr==IGMP_ALL_HOSTS)
1894 ip_loopback(dev,skb);
1895 else
1896 {
1897 struct ip_mc_list *imc=dev->ip_mc_list;
1898 while(imc!=NULL)
1899 {
1900 if(imc->multiaddr==iph->daddr)
1901 {
1902 ip_loopback(dev,skb);
1903 break;
1904 }
1905 imc=imc->next;
1906 }
1907 }
1908 }
1909 /* Multicasts with ttl 0 must not go beyond the host */
1910
1911 if(skb->ip_hdr->ttl==0)
1912 {
1913 kfree_skb(skb, FREE_READ);
1914 return;
1915 }
1916 }
1917 #endif
1918 if((dev->flags&IFF_BROADCAST) && (iph->daddr==dev->pa_brdaddr||iph->daddr==0xFFFFFFFF) && !(dev->flags&IFF_LOOPBACK))
1919 ip_loopback(dev,skb);
1920
1921 if (dev->flags & IFF_UP)
1922 {
1923 /*
1924 * If we have an owner use its priority setting,
1925 * otherwise use NORMAL
1926 */
1927
1928 if (sk != NULL)
1929 {
1930 dev_queue_xmit(skb, dev, sk->priority);
1931 }
1932 else
1933 {
1934 dev_queue_xmit(skb, dev, SOPRI_NORMAL);
1935 }
1936 }
1937 else
1938 {
1939 ip_statistics.IpOutDiscards++;
1940 if (free)
1941 kfree_skb(skb, FREE_WRITE);
1942 }
1943 }
1944
1945
1946
1947 #ifdef CONFIG_IP_MULTICAST
1948
1949 /*
1950 * Write an multicast group list table for the IGMP daemon to
1951 * read.
1952 */
1953
1954 int ip_mc_procinfo(char *buffer, char **start, off_t offset, int length)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1955 {
1956 off_t pos=0, begin=0;
1957 struct ip_mc_list *im;
1958 unsigned long flags;
1959 int len=0;
1960 struct device *dev;
1961
1962 len=sprintf(buffer,"Device : Count\tGroup Users Timer\n");
1963 save_flags(flags);
1964 cli();
1965
1966 for(dev = dev_base; dev; dev = dev->next)
1967 {
1968 if((dev->flags&IFF_UP)&&(dev->flags&IFF_MULTICAST))
1969 {
1970 len+=sprintf(buffer+len,"%-10s: %5d\n",
1971 dev->name, dev->mc_count);
1972 for(im = dev->ip_mc_list; im; im = im->next)
1973 {
1974 len+=sprintf(buffer+len,
1975 "\t\t\t%08lX %5d %d:%08lX\n",
1976 im->multiaddr, im->users,
1977 im->tm_running, im->timer.expires-jiffies);
1978 pos=begin+len;
1979 if(pos<offset)
1980 {
1981 len=0;
1982 begin=pos;
1983 }
1984 if(pos>offset+length)
1985 break;
1986 }
1987 }
1988 }
1989 restore_flags(flags);
1990 *start=buffer+(offset-begin);
1991 len-=(offset-begin);
1992 if(len>length)
1993 len=length;
1994 return len;
1995 }
1996
1997
1998 /*
1999 * Socket option code for IP. This is the end of the line after any TCP,UDP etc options on
2000 * an IP socket.
2001 *
2002 * We implement IP_TOS (type of service), IP_TTL (time to live).
2003 *
2004 * Next release we will sort out IP_OPTIONS since for some people are kind of important.
2005 */
2006
2007 static struct device *ip_mc_find_devfor(unsigned long addr)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
2008 {
2009 struct device *dev;
2010 for(dev = dev_base; dev; dev = dev->next)
2011 {
2012 if((dev->flags&IFF_UP)&&(dev->flags&IFF_MULTICAST)&&
2013 (dev->pa_addr==addr))
2014 return dev;
2015 }
2016
2017 return NULL;
2018 }
2019
2020 #endif
2021
2022 int ip_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
2023 {
2024 int val,err;
2025 unsigned char ucval;
2026 #if defined(CONFIG_IP_FIREWALL) || defined(CONFIG_IP_ACCT)
2027 struct ip_fw tmp_fw;
2028 #endif
2029 if (optval == NULL)
2030 return(-EINVAL);
2031
2032 err=verify_area(VERIFY_READ, optval, sizeof(int));
2033 if(err)
2034 return err;
2035
2036 val = get_user((int *) optval);
2037 ucval=get_user((unsigned char *) optval);
2038
2039 if(level!=SOL_IP)
2040 return -EOPNOTSUPP;
2041
2042 switch(optname)
2043 {
2044 case IP_TOS:
2045 if(val<0||val>255)
2046 return -EINVAL;
2047 sk->ip_tos=val;
2048 if(val==IPTOS_LOWDELAY)
2049 sk->priority=SOPRI_INTERACTIVE;
2050 if(val==IPTOS_THROUGHPUT)
2051 sk->priority=SOPRI_BACKGROUND;
2052 return 0;
2053 case IP_TTL:
2054 if(val<1||val>255)
2055 return -EINVAL;
2056 sk->ip_ttl=val;
2057 return 0;
2058 #ifdef CONFIG_IP_MULTICAST
2059 case IP_MULTICAST_TTL:
2060 {
2061 sk->ip_mc_ttl=(int)ucval;
2062 return 0;
2063 }
2064 case IP_MULTICAST_LOOP:
2065 {
2066 if(ucval!=0 && ucval!=1)
2067 return -EINVAL;
2068 sk->ip_mc_loop=(int)ucval;
2069 return 0;
2070 }
2071 case IP_MULTICAST_IF:
2072 {
2073 struct in_addr addr;
2074 struct device *dev=NULL;
2075
2076 /*
2077 * Check the arguments are allowable
2078 */
2079
2080 err=verify_area(VERIFY_READ, optval, sizeof(addr));
2081 if(err)
2082 return err;
2083
2084 memcpy_fromfs(&addr,optval,sizeof(addr));
2085
2086
2087 /*
2088 * What address has been requested
2089 */
2090
2091 if(addr.s_addr==INADDR_ANY) /* Default */
2092 {
2093 sk->ip_mc_name[0]=0;
2094 return 0;
2095 }
2096
2097 /*
2098 * Find the device
2099 */
2100
2101 dev=ip_mc_find_devfor(addr.s_addr);
2102
2103 /*
2104 * Did we find one
2105 */
2106
2107 if(dev)
2108 {
2109 strcpy(sk->ip_mc_name,dev->name);
2110 return 0;
2111 }
2112 return -EADDRNOTAVAIL;
2113 }
2114
2115 case IP_ADD_MEMBERSHIP:
2116 {
2117
2118 /*
2119 * FIXME: Add/Del membership should have a semaphore protecting them from re-entry
2120 */
2121 struct ip_mreq mreq;
2122 unsigned long route_src;
2123 struct rtable *rt;
2124 struct device *dev=NULL;
2125
2126 /*
2127 * Check the arguments.
2128 */
2129
2130 err=verify_area(VERIFY_READ, optval, sizeof(mreq));
2131 if(err)
2132 return err;
2133
2134 memcpy_fromfs(&mreq,optval,sizeof(mreq));
2135
2136 /*
2137 * Get device for use later
2138 */
2139
2140 if(mreq.imr_interface.s_addr==INADDR_ANY)
2141 {
2142 /*
2143 * Not set so scan.
2144 */
2145 if((rt=ip_rt_route(mreq.imr_multiaddr.s_addr,NULL, &route_src))!=NULL)
2146 {
2147 dev=rt->rt_dev;
2148 rt->rt_use--;
2149 }
2150 }
2151 else
2152 {
2153 /*
2154 * Find a suitable device.
2155 */
2156
2157 dev=ip_mc_find_devfor(mreq.imr_interface.s_addr);
2158 }
2159
2160 /*
2161 * No device, no cookies.
2162 */
2163
2164 if(!dev)
2165 return -ENODEV;
2166
2167 /*
2168 * Join group.
2169 */
2170
2171 return ip_mc_join_group(sk,dev,mreq.imr_multiaddr.s_addr);
2172 }
2173
2174 case IP_DROP_MEMBERSHIP:
2175 {
2176 struct ip_mreq mreq;
2177 struct rtable *rt;
2178 unsigned long route_src;
2179 struct device *dev=NULL;
2180
2181 /*
2182 * Check the arguments
2183 */
2184
2185 err=verify_area(VERIFY_READ, optval, sizeof(mreq));
2186 if(err)
2187 return err;
2188
2189 memcpy_fromfs(&mreq,optval,sizeof(mreq));
2190
2191 /*
2192 * Get device for use later
2193 */
2194
2195 if(mreq.imr_interface.s_addr==INADDR_ANY)
2196 {
2197 if((rt=ip_rt_route(mreq.imr_multiaddr.s_addr,NULL, &route_src))!=NULL)
2198 {
2199 dev=rt->rt_dev;
2200 rt->rt_use--;
2201 }
2202 }
2203 else
2204 {
2205
2206 dev=ip_mc_find_devfor(mreq.imr_interface.s_addr);
2207 }
2208
2209 /*
2210 * Did we find a suitable device.
2211 */
2212
2213 if(!dev)
2214 return -ENODEV;
2215
2216 /*
2217 * Leave group
2218 */
2219
2220 return ip_mc_leave_group(sk,dev,mreq.imr_multiaddr.s_addr);
2221 }
2222 #endif
2223 #ifdef CONFIG_IP_FIREWALL
2224 case IP_FW_ADD_BLK:
2225 case IP_FW_DEL_BLK:
2226 case IP_FW_ADD_FWD:
2227 case IP_FW_DEL_FWD:
2228 case IP_FW_CHK_BLK:
2229 case IP_FW_CHK_FWD:
2230 case IP_FW_FLUSH_BLK:
2231 case IP_FW_FLUSH_FWD:
2232 case IP_FW_ZERO_BLK:
2233 case IP_FW_ZERO_FWD:
2234 case IP_FW_POLICY_BLK:
2235 case IP_FW_POLICY_FWD:
2236 if(!suser())
2237 return -EPERM;
2238 if(optlen>sizeof(tmp_fw) || optlen<1)
2239 return -EINVAL;
2240 err=verify_area(VERIFY_READ,optval,optlen);
2241 if(err)
2242 return err;
2243 memcpy_fromfs(&tmp_fw,optval,optlen);
2244 err=ip_fw_ctl(optname, &tmp_fw,optlen);
2245 return -err; /* -0 is 0 after all */
2246
2247 #endif
2248 #ifdef CONFIG_IP_ACCT
2249 case IP_ACCT_DEL:
2250 case IP_ACCT_ADD:
2251 case IP_ACCT_FLUSH:
2252 case IP_ACCT_ZERO:
2253 if(!suser())
2254 return -EPERM;
2255 if(optlen>sizeof(tmp_fw) || optlen<1)
2256 return -EINVAL;
2257 err=verify_area(VERIFY_READ,optval,optlen);
2258 if(err)
2259 return err;
2260 memcpy_fromfs(&tmp_fw, optval,optlen);
2261 err=ip_acct_ctl(optname, &tmp_fw,optlen);
2262 return -err; /* -0 is 0 after all */
2263 #endif
2264 /* IP_OPTIONS and friends go here eventually */
2265 default:
2266 return(-ENOPROTOOPT);
2267 }
2268 }
2269
2270 /*
2271 * Get the options. Note for future reference. The GET of IP options gets the
2272 * _received_ ones. The set sets the _sent_ ones.
2273 */
2274
2275 int ip_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
2276 {
2277 int val,err;
2278 #ifdef CONFIG_IP_MULTICAST
2279 int len;
2280 #endif
2281
2282 if(level!=SOL_IP)
2283 return -EOPNOTSUPP;
2284
2285 switch(optname)
2286 {
2287 case IP_TOS:
2288 val=sk->ip_tos;
2289 break;
2290 case IP_TTL:
2291 val=sk->ip_ttl;
2292 break;
2293 #ifdef CONFIG_IP_MULTICAST
2294 case IP_MULTICAST_TTL:
2295 val=sk->ip_mc_ttl;
2296 break;
2297 case IP_MULTICAST_LOOP:
2298 val=sk->ip_mc_loop;
2299 break;
2300 case IP_MULTICAST_IF:
2301 err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
2302 if(err)
2303 return err;
2304 len=strlen(sk->ip_mc_name);
2305 err=verify_area(VERIFY_WRITE, optval, len);
2306 if(err)
2307 return err;
2308 put_user(len,(int *) optlen);
2309 memcpy_tofs((void *)optval,sk->ip_mc_name, len);
2310 return 0;
2311 #endif
2312 default:
2313 return(-ENOPROTOOPT);
2314 }
2315 err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
2316 if(err)
2317 return err;
2318 put_user(sizeof(int),(int *) optlen);
2319
2320 err=verify_area(VERIFY_WRITE, optval, sizeof(int));
2321 if(err)
2322 return err;
2323 put_user(val,(int *) optval);
2324
2325 return(0);
2326 }
2327
2328 /*
2329 * Build and send a packet, with as little as one copy
2330 *
2331 * Doesn't care much about ip options... option length can be
2332 * different for fragment at 0 and other fragments.
2333 *
2334 * Note that the fragment at the highest offset is sent first,
2335 * so the getfrag routine can fill in the TCP/UDP checksum header
2336 * field in the last fragment it sends... actually it also helps
2337 * the reassemblers, they can put most packets in at the head of
2338 * the fragment queue, and they know the total size in advance. This
2339 * last feature will measurable improve the Linux fragment handler.
2340 *
2341 * The callback has five args, an arbitrary pointer (copy of frag),
2342 * the source IP address (may depend on the routing table), the
2343 * destination adddress (char *), the offset to copy from, and the
2344 * length to be copied.
2345 *
2346 */
2347
2348 int ip_build_xmit(struct sock *sk,
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
2349 void getfrag (const void *,
2350 int,
2351 char *,
2352 unsigned int,
2353 unsigned int),
2354 const void *frag,
2355 unsigned short int length,
2356 int daddr,
2357 int flags,
2358 int type)
2359 {
2360 struct rtable *rt;
2361 unsigned int fraglen, maxfraglen, fragheaderlen;
2362 int offset, mf;
2363 unsigned long saddr;
2364 unsigned short id;
2365 struct iphdr *iph;
2366 int local=0;
2367 struct device *dev;
2368 int nfrags=0;
2369
2370 ip_statistics.IpOutRequests++;
2371
2372
2373 #ifdef CONFIG_INET_MULTICAST
2374 if(sk && MULTICAST(daddr) && *sk->ip_mc_name)
2375 {
2376 dev=dev_get(skb->ip_mc_name);
2377 if(!dev)
2378 return -ENODEV;
2379 rt=NULL;
2380 }
2381 else
2382 {
2383 #endif
2384 /*
2385 * Perform the IP routing decisions
2386 */
2387
2388 if(sk->localroute || flags&MSG_DONTROUTE)
2389 local=1;
2390
2391 rt = sk->ip_route_cache;
2392
2393 /*
2394 * See if the routing cache is outdated. We need to clean this up once we are happy it is reliable
2395 * by doing the invalidation actively in the route change and header change.
2396 */
2397
2398 saddr=sk->ip_route_saddr;
2399 if(!rt || sk->ip_route_stamp != rt_stamp || daddr!=sk->ip_route_daddr || sk->ip_route_local!=local || sk->saddr!=sk->ip_route_saddr)
2400 {
2401 if(local)
2402 rt = ip_rt_local(daddr, NULL, &saddr);
2403 else
2404 rt = ip_rt_route(daddr, NULL, &saddr);
2405 sk->ip_route_local=local;
2406 sk->ip_route_daddr=daddr;
2407 sk->ip_route_saddr=saddr;
2408 sk->ip_route_stamp=rt_stamp;
2409 sk->ip_route_cache=rt;
2410 sk->ip_hcache_ver=NULL;
2411 sk->ip_hcache_state= 0;
2412 }
2413 else if(rt)
2414 {
2415 /*
2416 * Attempt header caches only if the cached route is being reused. Header cache
2417 * is not ultra cheap to set up. This means we only set it up on the second packet,
2418 * so one shot communications are not slowed. We assume (seems reasonable) that 2 is
2419 * probably going to be a stream of data.
2420 */
2421 if(rt->rt_dev->header_cache && sk->ip_hcache_state!= -1)
2422 {
2423 if(sk->ip_hcache_ver==NULL || sk->ip_hcache_stamp!=*sk->ip_hcache_ver)
2424 rt->rt_dev->header_cache(rt->rt_dev,sk,saddr,daddr);
2425 else
2426 /* Can't cache. Remember this */
2427 sk->ip_hcache_state= -1;
2428 }
2429 }
2430
2431 if (rt == NULL)
2432 {
2433 ip_statistics.IpOutNoRoutes++;
2434 return(-ENETUNREACH);
2435 }
2436
2437 if (sk->saddr && (!LOOPBACK(sk->saddr) || LOOPBACK(daddr)))
2438 saddr = sk->saddr;
2439
2440 dev=rt->rt_dev;
2441 #ifdef CONFIG_INET_MULTICAST
2442 }
2443 #endif
2444
2445 /*
2446 * Now compute the buffer space we require
2447 */
2448
2449 /*
2450 * Try the simple case first. This leaves broadcast, multicast, fragmented frames, and by
2451 * choice RAW frames within 20 bytes of maximum size(rare) to the long path
2452 */
2453
2454 if(length+20 <= dev->mtu && !MULTICAST(daddr) && daddr!=0xFFFFFFFF && daddr!=dev->pa_brdaddr)
2455 {
2456 int error;
2457 struct sk_buff *skb=sock_alloc_send_skb(sk, length+20+15+dev->hard_header_len,0,&error);
2458 if(skb==NULL)
2459 {
2460 ip_statistics.IpOutDiscards++;
2461 return error;
2462 }
2463 skb->dev=dev;
2464 skb->free=1;
2465 skb->when=jiffies;
2466 skb->sk=sk;
2467 skb->arp=0;
2468 skb->saddr=saddr;
2469 length+=20; /* We do this twice so the subtract once is quicker */
2470 skb->raddr=(rt&&rt->rt_gateway)?rt->rt_gateway:daddr;
2471 skb_reserve(skb,(dev->hard_header_len+15)&~15);
2472 if(sk->ip_hcache_state>0)
2473 {
2474 memcpy(skb_push(skb,dev->hard_header_len),sk->ip_hcache_data,dev->hard_header_len);
2475 skb->arp=1;
2476 }
2477 else if(dev->hard_header)
2478 {
2479 if(dev->hard_header(skb,dev,ETH_P_IP,NULL,NULL,0)>0)
2480 skb->arp=1;
2481 }
2482 skb->ip_hdr=iph=(struct iphdr *)skb_put(skb,length);
2483 if(type!=IPPROTO_RAW)
2484 {
2485 iph->version=4;
2486 iph->ihl=5;
2487 iph->tos=sk->ip_tos;
2488 iph->tot_len = htons(length);
2489 iph->id=htons(ip_id_count++);
2490 iph->frag_off = 0;
2491 iph->ttl=sk->ip_ttl;
2492 iph->protocol=type;
2493 iph->saddr=saddr;
2494 iph->daddr=daddr;
2495 iph->check=0;
2496 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
2497 getfrag(frag,saddr,(void *)(iph+1),0, length-20);
2498 }
2499 else
2500 getfrag(frag,saddr,(void *)iph,0,length);
2501 #ifdef CONFIG_IP_ACCT
2502 ip_fw_chk((void *)skb->data,dev,ip_acct_chain, IP_FW_F_ACCEPT,1);
2503 #endif
2504 if(dev->flags&IFF_UP)
2505 dev_queue_xmit(skb,dev,sk->priority);
2506 else
2507 {
2508 ip_statistics.IpOutDiscards++;
2509 kfree_skb(skb, FREE_WRITE);
2510 }
2511 return 0;
2512 }
2513
2514
2515 fragheaderlen = dev->hard_header_len;
2516 if(type != IPPROTO_RAW)
2517 fragheaderlen += 20;
2518
2519 /*
2520 * Fragheaderlen is the size of 'overhead' on each buffer. Now work
2521 * out the size of the frames to send.
2522 */
2523
2524 maxfraglen = ((dev->mtu-20) & ~7) + fragheaderlen;
2525
2526 /*
2527 * Start at the end of the frame by handling the remainder.
2528 */
2529
2530 offset = length - (length % (maxfraglen - fragheaderlen));
2531
2532 /*
2533 * Amount of memory to allocate for final fragment.
2534 */
2535
2536 fraglen = length - offset + fragheaderlen;
2537
2538 if(fraglen==0)
2539 {
2540 fraglen = maxfraglen;
2541 offset -= maxfraglen-fragheaderlen;
2542 }
2543
2544
2545 /*
2546 * The last fragment will not have MF (more fragments) set.
2547 */
2548
2549 mf = 0;
2550
2551 /*
2552 * Can't fragment raw packets
2553 */
2554
2555 if (type == IPPROTO_RAW && offset > 0)
2556 return(-EMSGSIZE);
2557
2558 /*
2559 * Get an identifier
2560 */
2561
2562 id = htons(ip_id_count++);
2563
2564 /*
2565 * Being outputting the bytes.
2566 */
2567
2568 do
2569 {
2570 struct sk_buff * skb;
2571 int error;
2572 char *data;
2573
2574 /*
2575 * Get the memory we require with some space left for alignment.
2576 */
2577
2578 skb = sock_alloc_send_skb(sk, fraglen+15, 0, &error);
2579 if (skb == NULL)
2580 {
2581 ip_statistics.IpOutDiscards++;
2582 if(nfrags>1)
2583 ip_statistics.IpFragCreates++;
2584 return(error);
2585 }
2586
2587 /*
2588 * Fill in the control structures
2589 */
2590
2591 skb->next = skb->prev = NULL;
2592 skb->dev = dev;
2593 skb->when = jiffies;
2594 skb->free = 1; /* dubious, this one */
2595 skb->sk = sk;
2596 skb->arp = 0;
2597 skb->saddr = saddr;
2598 skb->raddr = (rt&&rt->rt_gateway) ? rt->rt_gateway : daddr;
2599 skb_reserve(skb,(dev->hard_header_len+15)&~15);
2600 data = skb_put(skb, fraglen-dev->hard_header_len);
2601
2602 /*
2603 * Save us ARP and stuff. In the optimal case we do no route lookup (route cache ok)
2604 * no ARP lookup (arp cache ok) and output. The cache checks are still too slow but
2605 * this can be fixed later. For gateway routes we ought to have a rt->.. header cache
2606 * pointer to speed header cache builds for identical targets.
2607 */
2608
2609 if(sk->ip_hcache_state>0)
2610 {
2611 memcpy(skb_push(skb,dev->hard_header_len),sk->ip_hcache_data, dev->hard_header_len);
2612 skb->arp=1;
2613 }
2614 else if (dev->hard_header)
2615 {
2616 if(dev->hard_header(skb, dev, ETH_P_IP,
2617 NULL, NULL, 0)>0)
2618 skb->arp=1;
2619 }
2620
2621 /*
2622 * Find where to start putting bytes.
2623 */
2624
2625 skb->ip_hdr = iph = (struct iphdr *)data;
2626
2627 /*
2628 * Only write IP header onto non-raw packets
2629 */
2630
2631 if(type != IPPROTO_RAW)
2632 {
2633
2634 iph->version = 4;
2635 iph->ihl = 5; /* ugh */
2636 iph->tos = sk->ip_tos;
2637 iph->tot_len = htons(fraglen - fragheaderlen + iph->ihl*4);
2638 iph->id = id;
2639 iph->frag_off = htons(offset>>3);
2640 iph->frag_off |= mf;
2641 #ifdef CONFIG_IP_MULTICAST
2642 if (MULTICAST(daddr))
2643 iph->ttl = sk->ip_mc_ttl;
2644 else
2645 #endif
2646 iph->ttl = sk->ip_ttl;
2647 iph->protocol = type;
2648 iph->check = 0;
2649 iph->saddr = saddr;
2650 iph->daddr = daddr;
2651 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
2652 data += iph->ihl*4;
2653
2654 /*
2655 * Any further fragments will have MF set.
2656 */
2657
2658 mf = htons(IP_MF);
2659 }
2660
2661 /*
2662 * User data callback
2663 */
2664
2665 getfrag(frag, saddr, data, offset, fraglen-fragheaderlen);
2666
2667 /*
2668 * Account for the fragment.
2669 */
2670
2671 #ifdef CONFIG_IP_ACCT
2672 if(!offset)
2673 ip_fw_chk(iph, dev, ip_acct_chain, IP_FW_F_ACCEPT, 1);
2674 #endif
2675 offset -= (maxfraglen-fragheaderlen);
2676 fraglen = maxfraglen;
2677
2678 #ifdef CONFIG_IP_MULTICAST
2679
2680 /*
2681 * Multicasts are looped back for other local users
2682 */
2683
2684 if (MULTICAST(daddr) && !(dev->flags&IFF_LOOPBACK))
2685 {
2686 /*
2687 * Loop back any frames. The check for IGMP_ALL_HOSTS is because
2688 * you are always magically a member of this group.
2689 */
2690
2691 if(sk==NULL || sk->ip_mc_loop)
2692 {
2693 if(skb->daddr==IGMP_ALL_HOSTS)
2694 ip_loopback(rt->rt_dev,skb);
2695 else
2696 {
2697 struct ip_mc_list *imc=rt->rt_dev->ip_mc_list;
2698 while(imc!=NULL)
2699 {
2700 if(imc->multiaddr==daddr)
2701 {
2702 ip_loopback(rt->rt_dev,skb);
2703 break;
2704 }
2705 imc=imc->next;
2706 }
2707 }
2708 }
2709
2710 /*
2711 * Multicasts with ttl 0 must not go beyond the host. Fixme: avoid the
2712 * extra clone.
2713 */
2714
2715 if(skb->ip_hdr->ttl==0)
2716 kfree_skb(skb, FREE_READ);
2717 }
2718 #endif
2719
2720 nfrags++;
2721
2722 /*
2723 * BSD loops broadcasts
2724 */
2725
2726 if((dev->flags&IFF_BROADCAST) && (daddr==0xFFFFFFFF || daddr==dev->pa_brdaddr) && !(dev->flags&IFF_LOOPBACK))
2727 ip_loopback(dev,skb);
2728
2729 /*
2730 * Now queue the bytes into the device.
2731 */
2732
2733 if (dev->flags & IFF_UP)
2734 {
2735 dev_queue_xmit(skb, dev, sk->priority);
2736 }
2737 else
2738 {
2739 /*
2740 * Whoops...
2741 *
2742 * FIXME: There is a small nasty here. During the ip_build_xmit we could
2743 * page fault between the route lookup and device send, the device might be
2744 * removed and unloaded.... We need to add device locks on this.
2745 */
2746
2747 ip_statistics.IpOutDiscards++;
2748 if(nfrags>1)
2749 ip_statistics.IpFragCreates+=nfrags;
2750 kfree_skb(skb, FREE_WRITE);
2751 return(0); /* lose rest of fragments */
2752 }
2753 }
2754 while (offset >= 0);
2755 if(nfrags>1)
2756 ip_statistics.IpFragCreates+=nfrags;
2757 return(0);
2758 }
2759
2760
2761 /*
2762 * IP protocol layer initialiser
2763 */
2764
2765 static struct packet_type ip_packet_type =
2766 {
2767 0, /* MUTTER ntohs(ETH_P_IP),*/
2768 NULL, /* All devices */
2769 ip_rcv,
2770 NULL,
2771 NULL,
2772 };
2773
2774 /*
2775 * Device notifier
2776 */
2777
2778 static int ip_rt_event(unsigned long event, void *ptr)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
2779 {
2780 if(event==NETDEV_DOWN)
2781 ip_rt_flush(ptr);
2782 return NOTIFY_DONE;
2783 }
2784
2785 struct notifier_block ip_rt_notifier={
2786 ip_rt_event,
2787 NULL,
2788 0
2789 };
2790
2791 /*
2792 * IP registers the packet type and then calls the subprotocol initialisers
2793 */
2794
2795 void ip_init(void)
/* ![[previous]](../icons/left.png)
![[next]](../icons/n_right.png)
![[first]](../icons/first.png)
![[last]](../icons/n_last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
2796 {
2797 ip_packet_type.type=htons(ETH_P_IP);
2798 dev_add_pack(&ip_packet_type);
2799
2800 /* So we flush routes when a device is downed */
2801 register_netdevice_notifier(&ip_rt_notifier);
2802 /* ip_raw_init();
2803 ip_packet_init();
2804 ip_tcp_init();
2805 ip_udp_init();*/
2806 }
2807