1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * The Internet Protocol (IP) module.
7 *
8 * Version: @(#)ip.c 1.0.16b 9/1/93
9 *
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Donald Becker, <becker@super.org>
13 * Alan Cox, <gw4pts@gw4pts.ampr.org>
14 * Richard Underwood
15 * Stefan Becker, <stefanb@yello.ping.de>
16 * Jorge Cwik, <jorge@laser.satlink.net>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 *
19 *
20 * Fixes:
21 * Alan Cox : Commented a couple of minor bits of surplus code
22 * Alan Cox : Undefining IP_FORWARD doesn't include the code
23 * (just stops a compiler warning).
24 * Alan Cox : Frames with >=MAX_ROUTE record routes, strict routes or loose routes
25 * are junked rather than corrupting things.
26 * Alan Cox : Frames to bad broadcast subnets are dumped
27 * We used to process them non broadcast and
28 * boy could that cause havoc.
29 * Alan Cox : ip_forward sets the free flag on the
30 * new frame it queues. Still crap because
31 * it copies the frame but at least it
32 * doesn't eat memory too.
33 * Alan Cox : Generic queue code and memory fixes.
34 * Fred Van Kempen : IP fragment support (borrowed from NET2E)
35 * Gerhard Koerting: Forward fragmented frames correctly.
36 * Gerhard Koerting: Fixes to my fix of the above 8-).
37 * Gerhard Koerting: IP interface addressing fix.
38 * Linus Torvalds : More robustness checks
39 * Alan Cox : Even more checks: Still not as robust as it ought to be
40 * Alan Cox : Save IP header pointer for later
41 * Alan Cox : ip option setting
42 * Alan Cox : Use ip_tos/ip_ttl settings
43 * Alan Cox : Fragmentation bogosity removed
44 * (Thanks to Mark.Bush@prg.ox.ac.uk)
45 * Dmitry Gorodchanin : Send of a raw packet crash fix.
46 * Alan Cox : Silly ip bug when an overlength
47 * fragment turns up. Now frees the
48 * queue.
49 * Linus Torvalds/ : Memory leakage on fragmentation
50 * Alan Cox : handling.
51 * Gerhard Koerting: Forwarding uses IP priority hints
52 * Teemu Rantanen : Fragment problems.
53 * Alan Cox : General cleanup, comments and reformat
54 * Alan Cox : SNMP statistics
55 * Alan Cox : BSD address rule semantics. Also see
56 * UDP as there is a nasty checksum issue
57 * if you do things the wrong way.
58 * Alan Cox : Always defrag, moved IP_FORWARD to the config.in file
59 * Alan Cox : IP options adjust sk->priority.
60 * Pedro Roque : Fix mtu/length error in ip_forward.
61 * Alan Cox : Avoid ip_chk_addr when possible.
62 * Richard Underwood : IP multicasting.
63 * Alan Cox : Cleaned up multicast handlers.
64 * Alan Cox : RAW sockets demultiplex in the BSD style.
65 * Gunther Mayer : Fix the SNMP reporting typo
66 * Alan Cox : Always in group 224.0.0.1
67 * Pauline Middelink : Fast ip_checksum update when forwarding
68 * Masquerading support.
69 * Alan Cox : Multicast loopback error for 224.0.0.1
70 * Alan Cox : IP_MULTICAST_LOOP option.
71 * Alan Cox : Use notifiers.
72 * Bjorn Ekwall : Removed ip_csum (from slhc.c too)
73 * Bjorn Ekwall : Moved ip_fast_csum to ip.h (inline!)
74 * Stefan Becker : Send out ICMP HOST REDIRECT
75 * Arnt Gulbrandsen : ip_build_xmit
76 * Alan Cox : Per socket routing cache
77 * Alan Cox : Fixed routing cache, added header cache.
78 * Alan Cox : Loopback didnt work right in original ip_build_xmit - fixed it.
79 * Alan Cox : Only send ICMP_REDIRECT if src/dest are the same net.
80 * Alan Cox : Incoming IP option handling.
81 * Alan Cox : Set saddr on raw output frames as per BSD.
82 * Alan Cox : Stopped broadcast source route explosions.
83 * Alan Cox : Can disable source routing
84 *
85 *
86 *
87 * To Fix:
88 * IP option processing is mostly not needed. ip_forward needs to know about routing rules
89 * and time stamp but that's about all. Use the route mtu field here too
90 * IP fragmentation wants rewriting cleanly. The RFC815 algorithm is much more efficient
91 * and could be made very efficient with the addition of some virtual memory hacks to permit
92 * the allocation of a buffer that can then be 'grown' by twiddling page tables.
93 * Output fragmentation wants updating along with the buffer management to use a single
94 * interleaved copy algorithm so that fragmenting has a one copy overhead. Actual packet
95 * output should probably do its own fragmentation at the UDP/RAW layer. TCP shouldn't cause
96 * fragmentation anyway.
97 *
98 * FIXME: copy frag 0 iph to qp->iph
99 *
100 * This program is free software; you can redistribute it and/or
101 * modify it under the terms of the GNU General Public License
102 * as published by the Free Software Foundation; either version
103 * 2 of the License, or (at your option) any later version.
104 */
105
106 #include <asm/segment.h>
107 #include <asm/system.h>
108 #include <linux/types.h>
109 #include <linux/kernel.h>
110 #include <linux/sched.h>
111 #include <linux/mm.h>
112 #include <linux/string.h>
113 #include <linux/errno.h>
114 #include <linux/config.h>
115
116 #include <linux/socket.h>
117 #include <linux/sockios.h>
118 #include <linux/in.h>
119 #include <linux/inet.h>
120 #include <linux/netdevice.h>
121 #include <linux/etherdevice.h>
122
123 #include <net/snmp.h>
124 #include <net/ip.h>
125 #include <net/protocol.h>
126 #include <net/route.h>
127 #include <net/tcp.h>
128 #include <net/udp.h>
129 #include <linux/skbuff.h>
130 #include <net/sock.h>
131 #include <net/arp.h>
132 #include <net/icmp.h>
133 #include <net/raw.h>
134 #include <net/checksum.h>
135 #include <linux/igmp.h>
136 #include <linux/ip_fw.h>
137
138 #define CONFIG_IP_DEFRAG
139
140 extern int last_retran;
141 extern void sort_send(struct sock *sk);
142
143 #define min(a,b) ((a)<(b)?(a):(b))
144 #define LOOPBACK(x) (((x) & htonl(0xff000000)) == htonl(0x7f000000))
145
146 /*
147 * SNMP management statistics
148 */
149
150 #ifdef CONFIG_IP_FORWARD
151 struct ip_mib ip_statistics={1,64,}; /* Forwarding=Yes, Default TTL=64 */
152 #else
153 struct ip_mib ip_statistics={0,64,}; /* Forwarding=No, Default TTL=64 */
154 #endif
155
156 /*
157 * Handle the issuing of an ioctl() request
158 * for the ip device. This is scheduled to
159 * disappear
160 */
161
162 int ip_ioctl(struct sock *sk, int cmd, unsigned long arg)
/* ![[previous]](../icons/n_left.png)
![[next]](../icons/right.png)
![[first]](../icons/n_first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
163 {
164 switch(cmd)
165 {
166 default:
167 return(-EINVAL);
168 }
169 }
170
171
172 /*
173 * Take an skb, and fill in the MAC header.
174 */
175
176 static int ip_send(struct sk_buff *skb, unsigned long daddr, int len, struct device *dev, unsigned long saddr)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
177 {
178 int mac = 0;
179
180 skb->dev = dev;
181 skb->arp = 1;
182 if (dev->hard_header)
183 {
184 /*
185 * Build a hardware header. Source address is our mac, destination unknown
186 * (rebuild header will sort this out)
187 */
188 mac = dev->hard_header(skb->data, dev, ETH_P_IP, NULL, NULL, len, skb);
189 if (mac < 0)
190 {
191 mac = -mac;
192 skb->arp = 0;
193 skb->raddr = daddr; /* next routing address */
194 }
195 }
196 return mac;
197 }
198
199 int ip_id_count = 0;
200
201 /*
202 * This routine builds the appropriate hardware/IP headers for
203 * the routine. It assumes that if *dev != NULL then the
204 * protocol knows what it's doing, otherwise it uses the
205 * routing/ARP tables to select a device struct.
206 */
207 int ip_build_header(struct sk_buff *skb, unsigned long saddr, unsigned long daddr,
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
208 struct device **dev, int type, struct options *opt, int len, int tos, int ttl)
209 {
210 struct rtable *rt;
211 unsigned char *buff;
212 unsigned long raddr;
213 int tmp;
214 unsigned long src;
215 struct iphdr *iph;
216
217 buff = skb->data;
218
219 /*
220 * See if we need to look up the device.
221 */
222
223 #ifdef CONFIG_INET_MULTICAST
224 if(MULTICAST(daddr) && *dev==NULL && skb->sk && *skb->sk->ip_mc_name)
225 *dev=dev_get(skb->sk->ip_mc_name);
226 #endif
227 if (*dev == NULL)
228 {
229 if(skb->localroute)
230 rt = ip_rt_local(daddr, NULL, &src);
231 else
232 rt = ip_rt_route(daddr, NULL, &src);
233 if (rt == NULL)
234 {
235 ip_statistics.IpOutNoRoutes++;
236 return(-ENETUNREACH);
237 }
238
239 *dev = rt->rt_dev;
240 /*
241 * If the frame is from us and going off machine it MUST MUST MUST
242 * have the output device ip address and never the loopback
243 */
244 if (LOOPBACK(saddr) && !LOOPBACK(daddr))
245 saddr = src;/*rt->rt_dev->pa_addr;*/
246 raddr = rt->rt_gateway;
247
248 }
249 else
250 {
251 /*
252 * We still need the address of the first hop.
253 */
254 if(skb->localroute)
255 rt = ip_rt_local(daddr, NULL, &src);
256 else
257 rt = ip_rt_route(daddr, NULL, &src);
258 /*
259 * If the frame is from us and going off machine it MUST MUST MUST
260 * have the output device ip address and never the loopback
261 */
262 if (LOOPBACK(saddr) && !LOOPBACK(daddr))
263 saddr = src;/*rt->rt_dev->pa_addr;*/
264
265 raddr = (rt == NULL) ? 0 : rt->rt_gateway;
266 }
267
268 /*
269 * No source addr so make it our addr
270 */
271 if (saddr == 0)
272 saddr = src;
273
274 /*
275 * No gateway so aim at the real destination
276 */
277 if (raddr == 0)
278 raddr = daddr;
279
280 /*
281 * Now build the MAC header.
282 */
283
284 tmp = ip_send(skb, raddr, len, *dev, saddr);
285 buff += tmp;
286 len -= tmp;
287
288 /*
289 * Book keeping
290 */
291
292 skb->dev = *dev;
293 skb->saddr = saddr;
294 if (skb->sk)
295 skb->sk->saddr = saddr;
296
297 /*
298 * Now build the IP header.
299 */
300
301 /*
302 * If we are using IPPROTO_RAW, then we don't need an IP header, since
303 * one is being supplied to us by the user
304 */
305
306 if(type == IPPROTO_RAW)
307 return (tmp);
308
309 /*
310 * Build the IP addresses
311 */
312
313 iph=(struct iphdr *)buff;
314
315 iph->version = 4;
316 iph->tos = tos;
317 iph->frag_off = 0;
318 iph->ttl = ttl;
319 iph->daddr = daddr;
320 iph->saddr = saddr;
321 iph->protocol = type;
322 iph->ihl = 5;
323 skb->ip_hdr = iph;
324
325 return(20 + tmp); /* IP header plus MAC header size */
326 }
327
328
329 /*
330 * Generate a checksum for an outgoing IP datagram.
331 */
332
333 void ip_send_check(struct iphdr *iph)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
334 {
335 iph->check = 0;
336 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
337 }
338
339 /************************ Fragment Handlers From NET2E **********************************/
340
341
342 /*
343 * This fragment handler is a bit of a heap. On the other hand it works quite
344 * happily and handles things quite well.
345 */
346
347 static struct ipq *ipqueue = NULL; /* IP fragment queue */
348
349 /*
350 * Create a new fragment entry.
351 */
352
353 static struct ipfrag *ip_frag_create(int offset, int end, struct sk_buff *skb, unsigned char *ptr)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
354 {
355 struct ipfrag *fp;
356
357 fp = (struct ipfrag *) kmalloc(sizeof(struct ipfrag), GFP_ATOMIC);
358 if (fp == NULL)
359 {
360 NETDEBUG(printk("IP: frag_create: no memory left !\n"));
361 return(NULL);
362 }
363 memset(fp, 0, sizeof(struct ipfrag));
364
365 /* Fill in the structure. */
366 fp->offset = offset;
367 fp->end = end;
368 fp->len = end - offset;
369 fp->skb = skb;
370 fp->ptr = ptr;
371
372 return(fp);
373 }
374
375
376 /*
377 * Find the correct entry in the "incomplete datagrams" queue for
378 * this IP datagram, and return the queue entry address if found.
379 */
380
381 static struct ipq *ip_find(struct iphdr *iph)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
382 {
383 struct ipq *qp;
384 struct ipq *qplast;
385
386 cli();
387 qplast = NULL;
388 for(qp = ipqueue; qp != NULL; qplast = qp, qp = qp->next)
389 {
390 if (iph->id== qp->iph->id && iph->saddr == qp->iph->saddr &&
391 iph->daddr == qp->iph->daddr && iph->protocol == qp->iph->protocol)
392 {
393 del_timer(&qp->timer); /* So it doesn't vanish on us. The timer will be reset anyway */
394 sti();
395 return(qp);
396 }
397 }
398 sti();
399 return(NULL);
400 }
401
402
403 /*
404 * Remove an entry from the "incomplete datagrams" queue, either
405 * because we completed, reassembled and processed it, or because
406 * it timed out.
407 */
408
409 static void ip_free(struct ipq *qp)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
410 {
411 struct ipfrag *fp;
412 struct ipfrag *xp;
413
414 /*
415 * Stop the timer for this entry.
416 */
417
418 del_timer(&qp->timer);
419
420 /* Remove this entry from the "incomplete datagrams" queue. */
421 cli();
422 if (qp->prev == NULL)
423 {
424 ipqueue = qp->next;
425 if (ipqueue != NULL)
426 ipqueue->prev = NULL;
427 }
428 else
429 {
430 qp->prev->next = qp->next;
431 if (qp->next != NULL)
432 qp->next->prev = qp->prev;
433 }
434
435 /* Release all fragment data. */
436
437 fp = qp->fragments;
438 while (fp != NULL)
439 {
440 xp = fp->next;
441 IS_SKB(fp->skb);
442 kfree_skb(fp->skb,FREE_READ);
443 kfree_s(fp, sizeof(struct ipfrag));
444 fp = xp;
445 }
446
447 /* Release the MAC header. */
448 kfree_s(qp->mac, qp->maclen);
449
450 /* Release the IP header. */
451 kfree_s(qp->iph, 64 + 8);
452
453 /* Finally, release the queue descriptor itself. */
454 kfree_s(qp, sizeof(struct ipq));
455 sti();
456 }
457
458
459 /*
460 * Oops- a fragment queue timed out. Kill it and send an ICMP reply.
461 */
462
463 static void ip_expire(unsigned long arg)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
464 {
465 struct ipq *qp;
466
467 qp = (struct ipq *)arg;
468
469 /*
470 * Send an ICMP "Fragment Reassembly Timeout" message.
471 */
472
473 ip_statistics.IpReasmTimeout++;
474 ip_statistics.IpReasmFails++;
475 /* This if is always true... shrug */
476 if(qp->fragments!=NULL)
477 icmp_send(qp->fragments->skb,ICMP_TIME_EXCEEDED,
478 ICMP_EXC_FRAGTIME, 0, qp->dev);
479
480 /*
481 * Nuke the fragment queue.
482 */
483 ip_free(qp);
484 }
485
486
487 /*
488 * Add an entry to the 'ipq' queue for a newly received IP datagram.
489 * We will (hopefully :-) receive all other fragments of this datagram
490 * in time, so we just create a queue for this datagram, in which we
491 * will insert the received fragments at their respective positions.
492 */
493
494 static struct ipq *ip_create(struct sk_buff *skb, struct iphdr *iph, struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
495 {
496 struct ipq *qp;
497 int maclen;
498 int ihlen;
499
500 qp = (struct ipq *) kmalloc(sizeof(struct ipq), GFP_ATOMIC);
501 if (qp == NULL)
502 {
503 NETDEBUG(printk("IP: create: no memory left !\n"));
504 return(NULL);
505 skb->dev = qp->dev;
506 }
507 memset(qp, 0, sizeof(struct ipq));
508
509 /*
510 * Allocate memory for the MAC header.
511 *
512 * FIXME: We have a maximum MAC address size limit and define
513 * elsewhere. We should use it here and avoid the 3 kmalloc() calls
514 */
515
516 maclen = ((unsigned long) iph) - ((unsigned long) skb->data);
517 qp->mac = (unsigned char *) kmalloc(maclen, GFP_ATOMIC);
518 if (qp->mac == NULL)
519 {
520 NETDEBUG(printk("IP: create: no memory left !\n"));
521 kfree_s(qp, sizeof(struct ipq));
522 return(NULL);
523 }
524
525 /*
526 * Allocate memory for the IP header (plus 8 octets for ICMP).
527 */
528
529 ihlen = (iph->ihl * sizeof(unsigned long));
530 qp->iph = (struct iphdr *) kmalloc(64 + 8, GFP_ATOMIC);
531 if (qp->iph == NULL)
532 {
533 NETDEBUG(printk("IP: create: no memory left !\n"));
534 kfree_s(qp->mac, maclen);
535 kfree_s(qp, sizeof(struct ipq));
536 return(NULL);
537 }
538
539 /* Fill in the structure. */
540 memcpy(qp->mac, skb->data, maclen);
541 memcpy(qp->iph, iph, ihlen + 8);
542 qp->len = 0;
543 qp->ihlen = ihlen;
544 qp->maclen = maclen;
545 qp->fragments = NULL;
546 qp->dev = dev;
547
548 /* Start a timer for this entry. */
549 qp->timer.expires = IP_FRAG_TIME; /* about 30 seconds */
550 qp->timer.data = (unsigned long) qp; /* pointer to queue */
551 qp->timer.function = ip_expire; /* expire function */
552 add_timer(&qp->timer);
553
554 /* Add this entry to the queue. */
555 qp->prev = NULL;
556 cli();
557 qp->next = ipqueue;
558 if (qp->next != NULL)
559 qp->next->prev = qp;
560 ipqueue = qp;
561 sti();
562 return(qp);
563 }
564
565
566 /*
567 * See if a fragment queue is complete.
568 */
569
570 static int ip_done(struct ipq *qp)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
571 {
572 struct ipfrag *fp;
573 int offset;
574
575 /* Only possible if we received the final fragment. */
576 if (qp->len == 0)
577 return(0);
578
579 /* Check all fragment offsets to see if they connect. */
580 fp = qp->fragments;
581 offset = 0;
582 while (fp != NULL)
583 {
584 if (fp->offset > offset)
585 return(0); /* fragment(s) missing */
586 offset = fp->end;
587 fp = fp->next;
588 }
589
590 /* All fragments are present. */
591 return(1);
592 }
593
594
595 /*
596 * Build a new IP datagram from all its fragments.
597 *
598 * FIXME: We copy here because we lack an effective way of handling lists
599 * of bits on input. Until the new skb data handling is in I'm not going
600 * to touch this with a bargepole. This also causes a 4Kish limit on
601 * packet sizes.
602 */
603
604 static struct sk_buff *ip_glue(struct ipq *qp)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
605 {
606 struct sk_buff *skb;
607 struct iphdr *iph;
608 struct ipfrag *fp;
609 unsigned char *ptr;
610 int count, len;
611
612 /*
613 * Allocate a new buffer for the datagram.
614 */
615
616 len = qp->maclen + qp->ihlen + qp->len;
617
618 if ((skb = alloc_skb(len,GFP_ATOMIC)) == NULL)
619 {
620 ip_statistics.IpReasmFails++;
621 NETDEBUG(printk("IP: queue_glue: no memory for gluing queue %p\n", qp));
622 ip_free(qp);
623 return(NULL);
624 }
625
626 /* Fill in the basic details. */
627 skb->len = (len - qp->maclen);
628 skb->h.raw = skb->data;
629 skb->free = 1;
630
631 /* Copy the original MAC and IP headers into the new buffer. */
632 ptr = (unsigned char *) skb->h.raw;
633 memcpy(ptr, ((unsigned char *) qp->mac), qp->maclen);
634 ptr += qp->maclen;
635 memcpy(ptr, ((unsigned char *) qp->iph), qp->ihlen);
636 ptr += qp->ihlen;
637 skb->h.raw += qp->maclen;
638
639 count = 0;
640
641 /* Copy the data portions of all fragments into the new buffer. */
642 fp = qp->fragments;
643 while(fp != NULL)
644 {
645 if(count+fp->len > skb->len)
646 {
647 NETDEBUG(printk("Invalid fragment list: Fragment over size.\n"));
648 ip_free(qp);
649 kfree_skb(skb,FREE_WRITE);
650 ip_statistics.IpReasmFails++;
651 return NULL;
652 }
653 memcpy((ptr + fp->offset), fp->ptr, fp->len);
654 count += fp->len;
655 fp = fp->next;
656 }
657
658 /* We glued together all fragments, so remove the queue entry. */
659 ip_free(qp);
660
661 /* Done with all fragments. Fixup the new IP header. */
662 iph = skb->h.iph;
663 iph->frag_off = 0;
664 iph->tot_len = htons((iph->ihl * sizeof(unsigned long)) + count);
665 skb->ip_hdr = iph;
666
667 ip_statistics.IpReasmOKs++;
668 return(skb);
669 }
670
671
672 /*
673 * Process an incoming IP datagram fragment.
674 */
675
676 static struct sk_buff *ip_defrag(struct iphdr *iph, struct sk_buff *skb, struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
677 {
678 struct ipfrag *prev, *next, *tmp;
679 struct ipfrag *tfp;
680 struct ipq *qp;
681 struct sk_buff *skb2;
682 unsigned char *ptr;
683 int flags, offset;
684 int i, ihl, end;
685
686 ip_statistics.IpReasmReqds++;
687
688 /* Find the entry of this IP datagram in the "incomplete datagrams" queue. */
689 qp = ip_find(iph);
690
691 /* Is this a non-fragmented datagram? */
692 offset = ntohs(iph->frag_off);
693 flags = offset & ~IP_OFFSET;
694 offset &= IP_OFFSET;
695 if (((flags & IP_MF) == 0) && (offset == 0))
696 {
697 if (qp != NULL)
698 ip_free(qp); /* Huh? How could this exist?? */
699 return(skb);
700 }
701
702 offset <<= 3; /* offset is in 8-byte chunks */
703
704 /*
705 * If the queue already existed, keep restarting its timer as long
706 * as we still are receiving fragments. Otherwise, create a fresh
707 * queue entry.
708 */
709
710 if (qp != NULL)
711 {
712 del_timer(&qp->timer);
713 qp->timer.expires = IP_FRAG_TIME; /* about 30 seconds */
714 qp->timer.data = (unsigned long) qp; /* pointer to queue */
715 qp->timer.function = ip_expire; /* expire function */
716 add_timer(&qp->timer);
717 }
718 else
719 {
720 /*
721 * If we failed to create it, then discard the frame
722 */
723 if ((qp = ip_create(skb, iph, dev)) == NULL)
724 {
725 skb->sk = NULL;
726 kfree_skb(skb, FREE_READ);
727 ip_statistics.IpReasmFails++;
728 return NULL;
729 }
730 }
731
732 /*
733 * Determine the position of this fragment.
734 */
735
736 ihl = (iph->ihl * sizeof(unsigned long));
737 end = offset + ntohs(iph->tot_len) - ihl;
738
739 /*
740 * Point into the IP datagram 'data' part.
741 */
742
743 ptr = skb->data + dev->hard_header_len + ihl;
744
745 /*
746 * Is this the final fragment?
747 */
748
749 if ((flags & IP_MF) == 0)
750 qp->len = end;
751
752 /*
753 * Find out which fragments are in front and at the back of us
754 * in the chain of fragments so far. We must know where to put
755 * this fragment, right?
756 */
757
758 prev = NULL;
759 for(next = qp->fragments; next != NULL; next = next->next)
760 {
761 if (next->offset > offset)
762 break; /* bingo! */
763 prev = next;
764 }
765
766 /*
767 * We found where to put this one.
768 * Check for overlap with preceding fragment, and, if needed,
769 * align things so that any overlaps are eliminated.
770 */
771 if (prev != NULL && offset < prev->end)
772 {
773 i = prev->end - offset;
774 offset += i; /* ptr into datagram */
775 ptr += i; /* ptr into fragment data */
776 }
777
778 /*
779 * Look for overlap with succeeding segments.
780 * If we can merge fragments, do it.
781 */
782
783 for(tmp=next; tmp != NULL; tmp = tfp)
784 {
785 tfp = tmp->next;
786 if (tmp->offset >= end)
787 break; /* no overlaps at all */
788
789 i = end - next->offset; /* overlap is 'i' bytes */
790 tmp->len -= i; /* so reduce size of */
791 tmp->offset += i; /* next fragment */
792 tmp->ptr += i;
793 /*
794 * If we get a frag size of <= 0, remove it and the packet
795 * that it goes with.
796 */
797 if (tmp->len <= 0)
798 {
799 if (tmp->prev != NULL)
800 tmp->prev->next = tmp->next;
801 else
802 qp->fragments = tmp->next;
803
804 if (tfp->next != NULL)
805 tmp->next->prev = tmp->prev;
806
807 next=tfp; /* We have killed the original next frame */
808
809 kfree_skb(tmp->skb,FREE_READ);
810 kfree_s(tmp, sizeof(struct ipfrag));
811 }
812 }
813
814 /*
815 * Insert this fragment in the chain of fragments.
816 */
817
818 tfp = NULL;
819 tfp = ip_frag_create(offset, end, skb, ptr);
820
821 /*
822 * No memory to save the fragment - so throw the lot
823 */
824
825 if (!tfp)
826 {
827 skb->sk = NULL;
828 kfree_skb(skb, FREE_READ);
829 return NULL;
830 }
831 tfp->prev = prev;
832 tfp->next = next;
833 if (prev != NULL)
834 prev->next = tfp;
835 else
836 qp->fragments = tfp;
837
838 if (next != NULL)
839 next->prev = tfp;
840
841 /*
842 * OK, so we inserted this new fragment into the chain.
843 * Check if we now have a full IP datagram which we can
844 * bump up to the IP layer...
845 */
846
847 if (ip_done(qp))
848 {
849 skb2 = ip_glue(qp); /* glue together the fragments */
850 return(skb2);
851 }
852 return(NULL);
853 }
854
855
856 /*
857 * This IP datagram is too large to be sent in one piece. Break it up into
858 * smaller pieces (each of size equal to the MAC header plus IP header plus
859 * a block of the data of the original IP data part) that will yet fit in a
860 * single device frame, and queue such a frame for sending by calling the
861 * ip_queue_xmit(). Note that this is recursion, and bad things will happen
862 * if this function causes a loop...
863 *
864 * Yes this is inefficient, feel free to submit a quicker one.
865 *
866 * **Protocol Violation**
867 * We copy all the options to each fragment. !FIXME!
868 */
869 void ip_fragment(struct sock *sk, struct sk_buff *skb, struct device *dev, int is_frag)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
870 {
871 struct iphdr *iph;
872 unsigned char *raw;
873 unsigned char *ptr;
874 struct sk_buff *skb2;
875 int left, mtu, hlen, len;
876 int offset;
877 unsigned long flags;
878
879 /*
880 * Point into the IP datagram header.
881 */
882
883 raw = skb->data;
884 iph = (struct iphdr *) (raw + dev->hard_header_len);
885
886 skb->ip_hdr = iph;
887
888 /*
889 * Setup starting values.
890 */
891
892 hlen = (iph->ihl * sizeof(unsigned long));
893 left = ntohs(iph->tot_len) - hlen; /* Space per frame */
894 hlen += dev->hard_header_len; /* Total header size */
895 mtu = (dev->mtu - hlen); /* Size of data space */
896 ptr = (raw + hlen); /* Where to start from */
897
898 /*
899 * Check for any "DF" flag. [DF means do not fragment]
900 */
901
902 if (ntohs(iph->frag_off) & IP_DF)
903 {
904 /*
905 * Reply giving the MTU of the failed hop.
906 */
907 ip_statistics.IpFragFails++;
908 icmp_send(skb,ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, dev->mtu, dev);
909 return;
910 }
911
912 /*
913 * The protocol doesn't seem to say what to do in the case that the
914 * frame + options doesn't fit the mtu. As it used to fall down dead
915 * in this case we were fortunate it didn't happen
916 */
917
918 if(mtu<8)
919 {
920 /* It's wrong but it's better than nothing */
921 icmp_send(skb,ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED,dev->mtu, dev);
922 ip_statistics.IpFragFails++;
923 return;
924 }
925
926 /*
927 * Fragment the datagram.
928 */
929
930 /*
931 * The initial offset is 0 for a complete frame. When
932 * fragmenting fragments it's wherever this one starts.
933 */
934
935 if (is_frag & 2)
936 offset = (ntohs(iph->frag_off) & 0x1fff) << 3;
937 else
938 offset = 0;
939
940
941 /*
942 * Keep copying data until we run out.
943 */
944
945 while(left > 0)
946 {
947 len = left;
948 /* IF: it doesn't fit, use 'mtu' - the data space left */
949 if (len > mtu)
950 len = mtu;
951 /* IF: we are not sending upto and including the packet end
952 then align the next start on an eight byte boundary */
953 if (len < left)
954 {
955 len/=8;
956 len*=8;
957 }
958 /*
959 * Allocate buffer.
960 */
961
962 if ((skb2 = alloc_skb(len + hlen,GFP_ATOMIC)) == NULL)
963 {
964 NETDEBUG(printk("IP: frag: no memory for new fragment!\n"));
965 ip_statistics.IpFragFails++;
966 return;
967 }
968
969 /*
970 * Set up data on packet
971 */
972
973 skb2->arp = skb->arp;
974 if(skb->free==0)
975 printk("IP fragmenter: BUG free!=1 in fragmenter\n");
976 skb2->free = 1;
977 skb2->len = len + hlen;
978 skb2->h.raw=(char *) skb2->data;
979 /*
980 * Charge the memory for the fragment to any owner
981 * it might possess
982 */
983
984 save_flags(flags);
985 if (sk)
986 {
987 cli();
988 sk->wmem_alloc += skb2->mem_len;
989 skb2->sk=sk;
990 }
991 restore_flags(flags);
992 skb2->raddr = skb->raddr; /* For rebuild_header - must be here */
993
994 /*
995 * Copy the packet header into the new buffer.
996 */
997
998 memcpy(skb2->h.raw, raw, hlen);
999
1000 /*
1001 * Copy a block of the IP datagram.
1002 */
1003 memcpy(skb2->h.raw + hlen, ptr, len);
1004 left -= len;
1005
1006 skb2->h.raw+=dev->hard_header_len;
1007
1008 /*
1009 * Fill in the new header fields.
1010 */
1011 iph = (struct iphdr *)(skb2->h.raw/*+dev->hard_header_len*/);
1012 iph->frag_off = htons((offset >> 3));
1013 /*
1014 * Added AC : If we are fragmenting a fragment thats not the
1015 * last fragment then keep MF on each bit
1016 */
1017 if (left > 0 || (is_frag & 1))
1018 iph->frag_off |= htons(IP_MF);
1019 ptr += len;
1020 offset += len;
1021
1022 /*
1023 * Put this fragment into the sending queue.
1024 */
1025
1026 ip_statistics.IpFragCreates++;
1027
1028 ip_queue_xmit(sk, dev, skb2, 2);
1029 }
1030 ip_statistics.IpFragOKs++;
1031 }
1032
1033
1034
1035 #ifdef CONFIG_IP_FORWARD
1036
1037 /*
1038 * Forward an IP datagram to its next destination.
1039 */
1040
1041 void ip_forward(struct sk_buff *skb, struct device *dev, int is_frag, unsigned long target_addr, int target_strict)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1042 {
1043 struct device *dev2; /* Output device */
1044 struct iphdr *iph; /* Our header */
1045 struct sk_buff *skb2; /* Output packet */
1046 struct rtable *rt; /* Route we use */
1047 unsigned char *ptr; /* Data pointer */
1048 unsigned long raddr; /* Router IP address */
1049 #ifdef CONFIG_IP_FIREWALL
1050 int fw_res = 0; /* Forwarding result */
1051
1052 /*
1053 * See if we are allowed to forward this.
1054 * Note: demasqueraded fragments are always 'back'warded.
1055 */
1056
1057
1058 if(!(is_frag&4) && (fw_res=ip_fw_chk(skb->h.iph, dev, ip_fw_fwd_chain, ip_fw_fwd_policy, 0))!=1)
1059 {
1060 if(fw_res==-1)
1061 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev);
1062 return;
1063 }
1064 #endif
1065 /*
1066 * According to the RFC, we must first decrease the TTL field. If
1067 * that reaches zero, we must reply an ICMP control message telling
1068 * that the packet's lifetime expired.
1069 *
1070 * Exception:
1071 * We may not generate an ICMP for an ICMP. icmp_send does the
1072 * enforcement of this so we can forget it here. It is however
1073 * sometimes VERY important.
1074 */
1075
1076 iph = skb->h.iph;
1077 iph->ttl--;
1078
1079 /*
1080 * Re-compute the IP header checksum.
1081 * This is inefficient. We know what has happened to the header
1082 * and could thus adjust the checksum as Phil Karn does in KA9Q
1083 */
1084
1085 iph->check = ntohs(iph->check) + 0x0100;
1086 if ((iph->check & 0xFF00) == 0)
1087 iph->check++; /* carry overflow */
1088 iph->check = htons(iph->check);
1089
1090 if (iph->ttl <= 0)
1091 {
1092 /* Tell the sender its packet died... */
1093 icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0, dev);
1094 return;
1095 }
1096
1097 /*
1098 * OK, the packet is still valid. Fetch its destination address,
1099 * and give it to the IP sender for further processing.
1100 */
1101
1102 rt = ip_rt_route(target_addr, NULL, NULL);
1103 if (rt == NULL)
1104 {
1105 /*
1106 * Tell the sender its packet cannot be delivered. Again
1107 * ICMP is screened later.
1108 */
1109 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_UNREACH, 0, dev);
1110 return;
1111 }
1112
1113
1114 /*
1115 * Gosh. Not only is the packet valid; we even know how to
1116 * forward it onto its final destination. Can we say this
1117 * is being plain lucky?
1118 * If the router told us that there is no GW, use the dest.
1119 * IP address itself- we seem to be connected directly...
1120 */
1121
1122 raddr = rt->rt_gateway;
1123
1124 if (raddr != 0)
1125 {
1126 /*
1127 * Strict routing permits no gatewaying
1128 */
1129
1130 if(target_strict)
1131 {
1132 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_SR_FAILED, 0, dev);
1133 kfree_skb(skb, FREE_READ);
1134 return;
1135 }
1136
1137 /*
1138 * There is a gateway so find the correct route for it.
1139 * Gateways cannot in turn be gatewayed.
1140 */
1141
1142 rt = ip_rt_route(raddr, NULL, NULL);
1143 if (rt == NULL)
1144 {
1145 /*
1146 * Tell the sender its packet cannot be delivered...
1147 */
1148 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev);
1149 return;
1150 }
1151 if (rt->rt_gateway != 0)
1152 raddr = rt->rt_gateway;
1153 }
1154 else
1155 raddr = target_addr;
1156
1157 /*
1158 * Having picked a route we can now send the frame out.
1159 */
1160
1161 dev2 = rt->rt_dev;
1162
1163 /*
1164 * In IP you never have to forward a frame on the interface that it
1165 * arrived upon. We now generate an ICMP HOST REDIRECT giving the route
1166 * we calculated.
1167 */
1168 #ifndef CONFIG_IP_NO_ICMP_REDIRECT
1169 if (dev == dev2 && !((iph->saddr^iph->daddr)&dev->pa_mask) && rt->rt_flags&RTF_MODIFIED)
1170 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, raddr, dev);
1171 #endif
1172
1173 /*
1174 * We now allocate a new buffer, and copy the datagram into it.
1175 * If the indicated interface is up and running, kick it.
1176 */
1177
1178 if (dev2->flags & IFF_UP)
1179 {
1180 #ifdef CONFIG_IP_MASQUERADE
1181 /*
1182 * If this fragment needs masquerading, make it so...
1183 * (Dont masquerade de-masqueraded fragments)
1184 */
1185 if (!(is_frag&4) && fw_res==2)
1186 ip_fw_masquerade(&skb, dev2);
1187 #endif
1188
1189 /*
1190 * Current design decrees we copy the packet. For identical header
1191 * lengths we could avoid it. The new skb code will let us push
1192 * data so the problem goes away then.
1193 */
1194
1195 skb2 = alloc_skb(dev2->hard_header_len + skb->len, GFP_ATOMIC);
1196 /*
1197 * This is rare and since IP is tolerant of network failures
1198 * quite harmless.
1199 */
1200 if (skb2 == NULL)
1201 {
1202 NETDEBUG(printk("\nIP: No memory available for IP forward\n"));
1203 return;
1204 }
1205 ptr = skb2->data;
1206 skb2->free = 1;
1207 skb2->len = skb->len + dev2->hard_header_len;
1208 skb2->h.raw = ptr;
1209
1210 /*
1211 * Copy the packet data into the new buffer.
1212 */
1213 memcpy(ptr + dev2->hard_header_len, skb->h.raw, skb->len);
1214
1215 /* Now build the MAC header. */
1216 (void) ip_send(skb2, raddr, skb->len, dev2, dev2->pa_addr);
1217
1218 ip_statistics.IpForwDatagrams++;
1219
1220 /*
1221 * See if it needs fragmenting. Note in ip_rcv we tagged
1222 * the fragment type. This must be right so that
1223 * the fragmenter does the right thing.
1224 */
1225
1226 if(skb2->len > dev2->mtu + dev2->hard_header_len)
1227 {
1228 ip_fragment(NULL,skb2,dev2, is_frag);
1229 kfree_skb(skb2,FREE_WRITE);
1230 }
1231 else
1232 {
1233 #ifdef CONFIG_IP_ACCT
1234 /*
1235 * Count mapping we shortcut
1236 */
1237
1238 ip_fw_chk(iph,dev,ip_acct_chain,IP_FW_F_ACCEPT,1);
1239 #endif
1240
1241 /*
1242 * Map service types to priority. We lie about
1243 * throughput being low priority, but it's a good
1244 * choice to help improve general usage.
1245 */
1246 if(iph->tos & IPTOS_LOWDELAY)
1247 dev_queue_xmit(skb2, dev2, SOPRI_INTERACTIVE);
1248 else if(iph->tos & IPTOS_THROUGHPUT)
1249 dev_queue_xmit(skb2, dev2, SOPRI_BACKGROUND);
1250 else
1251 dev_queue_xmit(skb2, dev2, SOPRI_NORMAL);
1252 }
1253 }
1254 }
1255
1256
1257 #endif
1258
1259 /*
1260 * This function receives all incoming IP datagrams.
1261 */
1262
1263 int ip_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1264 {
1265 struct iphdr *iph = skb->h.iph;
1266 struct sock *raw_sk=NULL;
1267 unsigned char hash;
1268 unsigned char flag = 0;
1269 struct inet_protocol *ipprot;
1270 int brd=IS_MYADDR;
1271 unsigned long target_addr;
1272 int target_strict=0;
1273 int is_frag=0;
1274 #ifdef CONFIG_IP_FIREWALL
1275 int err;
1276 #endif
1277
1278 ip_statistics.IpInReceives++;
1279
1280 /*
1281 * Tag the ip header of this packet so we can find it
1282 */
1283
1284 skb->ip_hdr = iph;
1285
1286 /*
1287 * RFC1122: 3.1.2.2 MUST silently discard any IP frame that fails the checksum.
1288 * RFC1122: 3.1.2.3 MUST discard a frame with invalid source address [NEEDS FIXING].
1289 *
1290 * Is the datagram acceptable?
1291 *
1292 * 1. Length at least the size of an ip header
1293 * 2. Version of 4
1294 * 3. Checksums correctly. [Speed optimisation for later, skip loopback checksums]
1295 * 4. Doesn't have a bogus length
1296 * (5. We ought to check for IP multicast addresses and undefined types.. does this matter ?)
1297 */
1298
1299 if (skb->len<sizeof(struct iphdr) || iph->ihl<5 || iph->version != 4 || ip_fast_csum((unsigned char *)iph, iph->ihl) !=0
1300 || skb->len < ntohs(iph->tot_len))
1301 {
1302 ip_statistics.IpInHdrErrors++;
1303 kfree_skb(skb, FREE_WRITE);
1304 return(0);
1305 }
1306
1307 /*
1308 * Our transport medium may have padded the buffer out. Now we know it
1309 * is IP we can trim to the true length of the frame.
1310 */
1311
1312 skb->len=ntohs(iph->tot_len);
1313
1314 /*
1315 * See if the firewall wants to dispose of the packet.
1316 */
1317
1318 #ifdef CONFIG_IP_FIREWALL
1319
1320 if ((err=ip_fw_chk(iph,dev,ip_fw_blk_chain,ip_fw_blk_policy, 0))<1)
1321 {
1322 if(err==-1)
1323 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0, dev);
1324 kfree_skb(skb, FREE_WRITE);
1325 return 0;
1326 }
1327
1328 #endif
1329
1330
1331 /*
1332 * Next analyse the packet for options. Studies show under one packet in
1333 * a thousand have options....
1334 */
1335
1336 target_addr = iph->daddr;
1337
1338 if (iph->ihl != 5)
1339 {
1340 /* Humph.. options. Lots of annoying fiddly bits */
1341
1342 /*
1343 * This is straight from the RFC. It might even be right ;)
1344 *
1345 * RFC 1122: 3.2.1.8 STREAMID option is obsolete and MUST be ignored.
1346 * RFC 1122: 3.2.1.8 MUST NOT crash on a zero length option.
1347 * RFC 1122: 3.2.1.8 MUST support acting as final destination of a source route.
1348 */
1349
1350 int opt_space=4*(iph->ihl-5);
1351 int opt_size;
1352 unsigned char *opt_ptr=skb->h.raw+sizeof(struct iphdr);
1353
1354 while(opt_space>0)
1355 {
1356 if(*opt_ptr==IPOPT_NOOP)
1357 {
1358 opt_ptr++;
1359 opt_space--;
1360 continue;
1361 }
1362 if(*opt_ptr==IPOPT_END)
1363 break; /* Done */
1364 if(opt_space<2 || (opt_size=opt_ptr[1])<2 || opt_ptr[1]>opt_space)
1365 {
1366 /*
1367 * RFC 1122: 3.2.2.5 SHOULD send parameter problem reports.
1368 */
1369 icmp_send(skb, ICMP_PARAMETERPROB, 0, 0, skb->dev);
1370 kfree_skb(skb, FREE_READ);
1371 return -EINVAL;
1372 }
1373 switch(opt_ptr[0])
1374 {
1375 case IPOPT_SEC:
1376 /* Should we drop this ?? */
1377 break;
1378 case IPOPT_SSRR: /* These work almost the same way */
1379 target_strict=1;
1380 /* Fall through */
1381 case IPOPT_LSRR:
1382 #ifdef CONFIG_IP_NOSR
1383 kfree_skb(skb, FREE_READ);
1384 return -EINVAL;
1385 #endif
1386 case IPOPT_RR:
1387 /*
1388 * RFC 1122: 3.2.1.8 Support for RR is OPTIONAL.
1389 */
1390 if (iph->daddr!=skb->dev->pa_addr && (brd = ip_chk_addr(iph->daddr)) == 0)
1391 break;
1392 if((opt_size<3) || ( opt_ptr[0]==IPOPT_RR && opt_ptr[2] > opt_size-4 ))
1393 {
1394 if(ip_chk_addr(iph->daddr))
1395 icmp_send(skb, ICMP_PARAMETERPROB, 0, 0, skb->dev);
1396 kfree_skb(skb, FREE_READ);
1397 return -EINVAL;
1398 }
1399 if(opt_ptr[2] > opt_size-4 )
1400 break;
1401 /* Bytes are [IPOPT_xxRR][Length][EntryPointer][Entry0][Entry1].... */
1402 /* This isn't going to be too portable - FIXME */
1403 if(opt_ptr[0]!=IPOPT_RR)
1404 {
1405 int t;
1406 target_addr=*(long *)(&opt_ptr[opt_ptr[2]]); /* Get hop */
1407 t=ip_chk_addr(target_addr);
1408 if(t==IS_MULTICAST||t==IS_BROADCAST)
1409 {
1410 if(ip_chk_addr(iph->daddr))
1411 icmp_send(skb, ICMP_PARAMETERPROB, 0, 0, skb->dev);
1412 kfree_skb(skb,FREE_READ);
1413 return -EINVAL;
1414 }
1415 }
1416 *(long *)(&opt_ptr[opt_ptr[2]])=skb->dev->pa_addr; /* Record hop */
1417 break;
1418 case IPOPT_TIMESTAMP:
1419 /*
1420 * RFC 1122: 3.2.1.8 The timestamp option is OPTIONAL but if implemented
1421 * MUST meet various rules (read the spec).
1422 */
1423 NETDEBUG(printk("ICMP: Someone finish the timestamp routine ;)\n"));
1424 break;
1425 default:
1426 break;
1427 }
1428 opt_ptr+=opt_size;
1429 opt_space-=opt_size;
1430 }
1431
1432 }
1433
1434
1435 /*
1436 * Remember if the frame is fragmented.
1437 */
1438
1439 if(iph->frag_off)
1440 {
1441 if (iph->frag_off & 0x0020)
1442 is_frag|=1;
1443 /*
1444 * Last fragment ?
1445 */
1446
1447 if (ntohs(iph->frag_off) & 0x1fff)
1448 is_frag|=2;
1449 }
1450
1451 /*
1452 * Do any IP forwarding required. chk_addr() is expensive -- avoid it someday.
1453 *
1454 * This is inefficient. While finding out if it is for us we could also compute
1455 * the routing table entry. This is where the great unified cache theory comes
1456 * in as and when someone implements it
1457 *
1458 * For most hosts over 99% of packets match the first conditional
1459 * and don't go via ip_chk_addr. Note: brd is set to IS_MYADDR at
1460 * function entry.
1461 */
1462
1463 if ( iph->daddr == skb->dev->pa_addr || (brd = ip_chk_addr(iph->daddr)) != 0)
1464 {
1465 #ifdef CONFIG_IP_MULTICAST
1466
1467 if(brd==IS_MULTICAST && iph->daddr!=IGMP_ALL_HOSTS && !(dev->flags&IFF_LOOPBACK))
1468 {
1469 /*
1470 * Check it is for one of our groups
1471 */
1472 struct ip_mc_list *ip_mc=dev->ip_mc_list;
1473 do
1474 {
1475 if(ip_mc==NULL)
1476 {
1477 kfree_skb(skb, FREE_WRITE);
1478 return 0;
1479 }
1480 if(ip_mc->multiaddr==iph->daddr)
1481 break;
1482 ip_mc=ip_mc->next;
1483 }
1484 while(1);
1485 }
1486 #endif
1487
1488 #ifdef CONFIG_IP_MASQUERADE
1489 /*
1490 * Do we need to de-masquerade this fragment?
1491 */
1492 if (ip_fw_demasquerade(skb))
1493 {
1494 struct iphdr *iph=skb->h.iph;
1495 ip_forward(skb, dev, is_frag|4, iph->daddr, 0);
1496 kfree_skb(skb, FREE_WRITE);
1497 return(0);
1498 }
1499 #endif
1500
1501 /*
1502 * Account for the packet
1503 */
1504
1505 #ifdef CONFIG_IP_ACCT
1506 ip_fw_chk(iph,dev,ip_acct_chain,IP_FW_F_ACCEPT,1);
1507 #endif
1508
1509 /*
1510 * Reassemble IP fragments.
1511 */
1512
1513 if(is_frag)
1514 {
1515 /* Defragment. Obtain the complete packet if there is one */
1516 skb=ip_defrag(iph,skb,dev);
1517 if(skb==NULL)
1518 return 0;
1519 skb->dev = dev;
1520 iph=skb->h.iph;
1521 }
1522
1523 /*
1524 * Point into the IP datagram, just past the header.
1525 */
1526
1527 skb->ip_hdr = iph;
1528 skb->h.raw += iph->ihl*4;
1529
1530 /*
1531 * Deliver to raw sockets. This is fun as to avoid copies we want to make no surplus copies.
1532 *
1533 * RFC 1122: SHOULD pass TOS value up to the transport layer.
1534 */
1535
1536 hash = iph->protocol & (SOCK_ARRAY_SIZE-1);
1537
1538 /*
1539 * If there maybe a raw socket we must check - if not we don't care less
1540 */
1541
1542 if((raw_sk=raw_prot.sock_array[hash])!=NULL)
1543 {
1544 struct sock *sknext=NULL;
1545 struct sk_buff *skb1;
1546 raw_sk=get_sock_raw(raw_sk, hash, iph->saddr, iph->daddr);
1547 if(raw_sk) /* Any raw sockets */
1548 {
1549 do
1550 {
1551 /* Find the next */
1552 sknext=get_sock_raw(raw_sk->next, hash, iph->saddr, iph->daddr);
1553 if(sknext)
1554 skb1=skb_clone(skb, GFP_ATOMIC);
1555 else
1556 break; /* One pending raw socket left */
1557 if(skb1)
1558 raw_rcv(raw_sk, skb1, dev, iph->saddr,iph->daddr);
1559 raw_sk=sknext;
1560 }
1561 while(raw_sk!=NULL);
1562
1563 /*
1564 * Here either raw_sk is the last raw socket, or NULL if none
1565 */
1566
1567 /*
1568 * We deliver to the last raw socket AFTER the protocol checks as it avoids a surplus copy
1569 */
1570 }
1571 }
1572
1573 /*
1574 * skb->h.raw now points at the protocol beyond the IP header.
1575 */
1576
1577 hash = iph->protocol & (MAX_INET_PROTOS -1);
1578 for (ipprot = (struct inet_protocol *)inet_protos[hash];ipprot != NULL;ipprot=(struct inet_protocol *)ipprot->next)
1579 {
1580 struct sk_buff *skb2;
1581
1582 if (ipprot->protocol != iph->protocol)
1583 continue;
1584 /*
1585 * See if we need to make a copy of it. This will
1586 * only be set if more than one protocol wants it.
1587 * and then not for the last one. If there is a pending
1588 * raw delivery wait for that
1589 */
1590
1591 if (ipprot->copy || raw_sk)
1592 {
1593 skb2 = skb_clone(skb, GFP_ATOMIC);
1594 if(skb2==NULL)
1595 continue;
1596 }
1597 else
1598 {
1599 skb2 = skb;
1600 }
1601 flag = 1;
1602
1603 /*
1604 * Pass on the datagram to each protocol that wants it,
1605 * based on the datagram protocol. We should really
1606 * check the protocol handler's return values here...
1607 */
1608
1609 ipprot->handler(skb2, dev, NULL, iph->daddr,
1610 (ntohs(iph->tot_len) - (iph->ihl * 4)),
1611 iph->saddr, 0, ipprot);
1612
1613 }
1614
1615 /*
1616 * All protocols checked.
1617 * If this packet was a broadcast, we may *not* reply to it, since that
1618 * causes (proven, grin) ARP storms and a leakage of memory (i.e. all
1619 * ICMP reply messages get queued up for transmission...)
1620 */
1621
1622 if(raw_sk!=NULL) /* Shift to last raw user */
1623 raw_rcv(raw_sk, skb, dev, iph->saddr, iph->daddr);
1624 else if (!flag) /* Free and report errors */
1625 {
1626 if (brd != IS_BROADCAST && brd!=IS_MULTICAST)
1627 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, 0, dev);
1628 kfree_skb(skb, FREE_WRITE);
1629 }
1630
1631 return(0);
1632 }
1633
1634 /*
1635 * Do any IP forwarding required. chk_addr() is expensive -- avoid it someday.
1636 *
1637 * This is inefficient. While finding out if it is for us we could also compute
1638 * the routing table entry. This is where the great unified cache theory comes
1639 * in as and when someone implements it
1640 *
1641 * For most hosts over 99% of packets match the first conditional
1642 * and don't go via ip_chk_addr. Note: brd is set to IS_MYADDR at
1643 * function entry.
1644 */
1645
1646 /*
1647 * Don't forward multicast or broadcast frames.
1648 */
1649
1650 if(skb->pkt_type!=PACKET_HOST || brd==IS_BROADCAST)
1651 {
1652 kfree_skb(skb,FREE_WRITE);
1653 return 0;
1654 }
1655
1656 /*
1657 * The packet is for another target. Forward the frame
1658 */
1659
1660 #ifdef CONFIG_IP_FORWARD
1661 ip_forward(skb, dev, is_frag, target_addr, target_strict);
1662 #else
1663 /* printk("Machine %lx tried to use us as a forwarder to %lx but we have forwarding disabled!\n",
1664 iph->saddr,iph->daddr);*/
1665 ip_statistics.IpInAddrErrors++;
1666 #endif
1667 /*
1668 * The forwarder is inefficient and copies the packet. We
1669 * free the original now.
1670 */
1671
1672 kfree_skb(skb, FREE_WRITE);
1673 return(0);
1674 }
1675
1676
1677 /*
1678 * Loop a packet back to the sender.
1679 */
1680
1681 static void ip_loopback(struct device *old_dev, struct sk_buff *skb)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1682 {
1683 extern struct device loopback_dev;
1684 struct device *dev=&loopback_dev;
1685 int len=skb->len-old_dev->hard_header_len;
1686 struct sk_buff *newskb=alloc_skb(len+dev->hard_header_len, GFP_ATOMIC);
1687
1688 if(newskb==NULL)
1689 return;
1690
1691 newskb->link3=NULL;
1692 newskb->sk=NULL;
1693 newskb->dev=dev;
1694 newskb->saddr=skb->saddr;
1695 newskb->daddr=skb->daddr;
1696 newskb->raddr=skb->raddr;
1697 newskb->free=1;
1698 newskb->lock=0;
1699 newskb->users=0;
1700 newskb->pkt_type=skb->pkt_type;
1701 newskb->len=len+dev->hard_header_len;
1702
1703
1704 newskb->ip_hdr=(struct iphdr *)(newskb->data+ip_send(newskb, skb->ip_hdr->daddr, len, dev, skb->ip_hdr->saddr));
1705 memcpy(newskb->ip_hdr,skb->ip_hdr,len);
1706
1707 /* Recurse. The device check against IFF_LOOPBACK will stop infinite recursion */
1708
1709 /*printk("Loopback output queued [%lX to %lX].\n", newskb->ip_hdr->saddr,newskb->ip_hdr->daddr);*/
1710 ip_queue_xmit(NULL, dev, newskb, 1);
1711 }
1712
1713
1714 /*
1715 * Queues a packet to be sent, and starts the transmitter
1716 * if necessary. if free = 1 then we free the block after
1717 * transmit, otherwise we don't. If free==2 we not only
1718 * free the block but also don't assign a new ip seq number.
1719 * This routine also needs to put in the total length,
1720 * and compute the checksum
1721 */
1722
1723 void ip_queue_xmit(struct sock *sk, struct device *dev,
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1724 struct sk_buff *skb, int free)
1725 {
1726 struct iphdr *iph;
1727 unsigned char *ptr;
1728
1729 /* Sanity check */
1730 if (dev == NULL)
1731 {
1732 NETDEBUG(printk("IP: ip_queue_xmit dev = NULL\n"));
1733 return;
1734 }
1735
1736 IS_SKB(skb);
1737
1738 /*
1739 * Do some book-keeping in the packet for later
1740 */
1741
1742
1743 skb->dev = dev;
1744 skb->when = jiffies;
1745
1746 /*
1747 * Find the IP header and set the length. This is bad
1748 * but once we get the skb data handling code in the
1749 * hardware will push its header sensibly and we will
1750 * set skb->ip_hdr to avoid this mess and the fixed
1751 * header length problem
1752 */
1753
1754 ptr = skb->data;
1755 ptr += dev->hard_header_len;
1756 iph = (struct iphdr *)ptr;
1757 skb->ip_hdr = iph;
1758 iph->tot_len = ntohs(skb->len-dev->hard_header_len);
1759
1760 #ifdef CONFIG_IP_FIREWALL
1761 if(ip_fw_chk(iph, dev, ip_fw_blk_chain, ip_fw_blk_policy, 0) != 1)
1762 /* just don't send this packet */
1763 return;
1764 #endif
1765
1766 /*
1767 * No reassigning numbers to fragments...
1768 */
1769
1770 if(free!=2)
1771 iph->id = htons(ip_id_count++);
1772 else
1773 free=1;
1774
1775 /* All buffers without an owner socket get freed */
1776 if (sk == NULL)
1777 free = 1;
1778
1779 skb->free = free;
1780
1781 /*
1782 * Do we need to fragment. Again this is inefficient.
1783 * We need to somehow lock the original buffer and use
1784 * bits of it.
1785 */
1786
1787 if(skb->len > dev->mtu + dev->hard_header_len)
1788 {
1789 ip_fragment(sk,skb,dev,0);
1790 IS_SKB(skb);
1791 kfree_skb(skb,FREE_WRITE);
1792 return;
1793 }
1794
1795 /*
1796 * Add an IP checksum
1797 */
1798
1799 ip_send_check(iph);
1800
1801 /*
1802 * Print the frame when debugging
1803 */
1804
1805 /*
1806 * More debugging. You cannot queue a packet already on a list
1807 * Spot this and moan loudly.
1808 */
1809 if (skb->next != NULL)
1810 {
1811 NETDEBUG(printk("ip_queue_xmit: next != NULL\n"));
1812 skb_unlink(skb);
1813 }
1814
1815 /*
1816 * If a sender wishes the packet to remain unfreed
1817 * we add it to his send queue. This arguably belongs
1818 * in the TCP level since nobody else uses it. BUT
1819 * remember IPng might change all the rules.
1820 */
1821
1822 if (!free)
1823 {
1824 unsigned long flags;
1825 /* The socket now has more outstanding blocks */
1826
1827 sk->packets_out++;
1828
1829 /* Protect the list for a moment */
1830 save_flags(flags);
1831 cli();
1832
1833 if (skb->link3 != NULL)
1834 {
1835 NETDEBUG(printk("ip.c: link3 != NULL\n"));
1836 skb->link3 = NULL;
1837 }
1838 if (sk->send_head == NULL)
1839 {
1840 sk->send_tail = skb;
1841 sk->send_head = skb;
1842 }
1843 else
1844 {
1845 sk->send_tail->link3 = skb;
1846 sk->send_tail = skb;
1847 }
1848 /* skb->link3 is NULL */
1849
1850 /* Interrupt restore */
1851 restore_flags(flags);
1852 }
1853 else
1854 /* Remember who owns the buffer */
1855 skb->sk = sk;
1856
1857 /*
1858 * If the indicated interface is up and running, send the packet.
1859 */
1860
1861 ip_statistics.IpOutRequests++;
1862 #ifdef CONFIG_IP_ACCT
1863 ip_fw_chk(iph,dev,ip_acct_chain,IP_FW_F_ACCEPT,1);
1864 #endif
1865
1866 #ifdef CONFIG_IP_MULTICAST
1867
1868 /*
1869 * Multicasts are looped back for other local users
1870 */
1871
1872 if (MULTICAST(iph->daddr) && !(dev->flags&IFF_LOOPBACK))
1873 {
1874 if(sk==NULL || sk->ip_mc_loop)
1875 {
1876 if(iph->daddr==IGMP_ALL_HOSTS)
1877 ip_loopback(dev,skb);
1878 else
1879 {
1880 struct ip_mc_list *imc=dev->ip_mc_list;
1881 while(imc!=NULL)
1882 {
1883 if(imc->multiaddr==iph->daddr)
1884 {
1885 ip_loopback(dev,skb);
1886 break;
1887 }
1888 imc=imc->next;
1889 }
1890 }
1891 }
1892 /* Multicasts with ttl 0 must not go beyond the host */
1893
1894 if(skb->ip_hdr->ttl==0)
1895 {
1896 kfree_skb(skb, FREE_READ);
1897 return;
1898 }
1899 }
1900 #endif
1901 if((dev->flags&IFF_BROADCAST) && iph->daddr==dev->pa_brdaddr && !(dev->flags&IFF_LOOPBACK))
1902 ip_loopback(dev,skb);
1903
1904 if (dev->flags & IFF_UP)
1905 {
1906 /*
1907 * If we have an owner use its priority setting,
1908 * otherwise use NORMAL
1909 */
1910
1911 if (sk != NULL)
1912 {
1913 dev_queue_xmit(skb, dev, sk->priority);
1914 }
1915 else
1916 {
1917 dev_queue_xmit(skb, dev, SOPRI_NORMAL);
1918 }
1919 }
1920 else
1921 {
1922 ip_statistics.IpOutDiscards++;
1923 if (free)
1924 kfree_skb(skb, FREE_WRITE);
1925 }
1926 }
1927
1928
1929
1930 #ifdef CONFIG_IP_MULTICAST
1931
1932 /*
1933 * Write an multicast group list table for the IGMP daemon to
1934 * read.
1935 */
1936
1937 int ip_mc_procinfo(char *buffer, char **start, off_t offset, int length)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1938 {
1939 off_t pos=0, begin=0;
1940 struct ip_mc_list *im;
1941 unsigned long flags;
1942 int len=0;
1943 struct device *dev;
1944
1945 len=sprintf(buffer,"Device : Count\tGroup Users Timer\n");
1946 save_flags(flags);
1947 cli();
1948
1949 for(dev = dev_base; dev; dev = dev->next)
1950 {
1951 if((dev->flags&IFF_UP)&&(dev->flags&IFF_MULTICAST))
1952 {
1953 len+=sprintf(buffer+len,"%-10s: %5d\n",
1954 dev->name, dev->mc_count);
1955 for(im = dev->ip_mc_list; im; im = im->next)
1956 {
1957 len+=sprintf(buffer+len,
1958 "\t\t\t%08lX %5d %d:%08lX\n",
1959 im->multiaddr, im->users,
1960 im->tm_running, im->timer.expires);
1961 pos=begin+len;
1962 if(pos<offset)
1963 {
1964 len=0;
1965 begin=pos;
1966 }
1967 if(pos>offset+length)
1968 break;
1969 }
1970 }
1971 }
1972 restore_flags(flags);
1973 *start=buffer+(offset-begin);
1974 len-=(offset-begin);
1975 if(len>length)
1976 len=length;
1977 return len;
1978 }
1979
1980
1981 #endif
1982 /*
1983 * Socket option code for IP. This is the end of the line after any TCP,UDP etc options on
1984 * an IP socket.
1985 *
1986 * We implement IP_TOS (type of service), IP_TTL (time to live).
1987 *
1988 * Next release we will sort out IP_OPTIONS since for some people are kind of important.
1989 */
1990
1991 static struct device *ip_mc_find_devfor(unsigned long addr)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1992 {
1993 struct device *dev;
1994 for(dev = dev_base; dev; dev = dev->next)
1995 {
1996 if((dev->flags&IFF_UP)&&(dev->flags&IFF_MULTICAST)&&
1997 (dev->pa_addr==addr))
1998 return dev;
1999 }
2000
2001 return NULL;
2002 }
2003
2004 int ip_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
2005 {
2006 int val,err;
2007 unsigned char ucval;
2008 #if defined(CONFIG_IP_FIREWALL) || defined(CONFIG_IP_ACCT)
2009 struct ip_fw tmp_fw;
2010 #endif
2011 if (optval == NULL)
2012 return(-EINVAL);
2013
2014 err=verify_area(VERIFY_READ, optval, sizeof(int));
2015 if(err)
2016 return err;
2017
2018 val = get_fs_long((unsigned long *)optval);
2019 ucval=get_fs_byte((unsigned char *)optval);
2020
2021 if(level!=SOL_IP)
2022 return -EOPNOTSUPP;
2023
2024 switch(optname)
2025 {
2026 case IP_TOS:
2027 if(val<0||val>255)
2028 return -EINVAL;
2029 sk->ip_tos=val;
2030 if(val==IPTOS_LOWDELAY)
2031 sk->priority=SOPRI_INTERACTIVE;
2032 if(val==IPTOS_THROUGHPUT)
2033 sk->priority=SOPRI_BACKGROUND;
2034 return 0;
2035 case IP_TTL:
2036 if(val<1||val>255)
2037 return -EINVAL;
2038 sk->ip_ttl=val;
2039 return 0;
2040 #ifdef CONFIG_IP_MULTICAST
2041 case IP_MULTICAST_TTL:
2042 {
2043 sk->ip_mc_ttl=(int)ucval;
2044 return 0;
2045 }
2046 case IP_MULTICAST_LOOP:
2047 {
2048 if(ucval!=0 && ucval!=1)
2049 return -EINVAL;
2050 sk->ip_mc_loop=(int)ucval;
2051 return 0;
2052 }
2053 case IP_MULTICAST_IF:
2054 {
2055 struct in_addr addr;
2056 struct device *dev=NULL;
2057
2058 /*
2059 * Check the arguments are allowable
2060 */
2061
2062 err=verify_area(VERIFY_READ, optval, sizeof(addr));
2063 if(err)
2064 return err;
2065
2066 memcpy_fromfs(&addr,optval,sizeof(addr));
2067
2068
2069 /*
2070 * What address has been requested
2071 */
2072
2073 if(addr.s_addr==INADDR_ANY) /* Default */
2074 {
2075 sk->ip_mc_name[0]=0;
2076 return 0;
2077 }
2078
2079 /*
2080 * Find the device
2081 */
2082
2083 dev=ip_mc_find_devfor(addr.s_addr);
2084
2085 /*
2086 * Did we find one
2087 */
2088
2089 if(dev)
2090 {
2091 strcpy(sk->ip_mc_name,dev->name);
2092 return 0;
2093 }
2094 return -EADDRNOTAVAIL;
2095 }
2096
2097 case IP_ADD_MEMBERSHIP:
2098 {
2099
2100 /*
2101 * FIXME: Add/Del membership should have a semaphore protecting them from re-entry
2102 */
2103 struct ip_mreq mreq;
2104 unsigned long route_src;
2105 struct rtable *rt;
2106 struct device *dev=NULL;
2107
2108 /*
2109 * Check the arguments.
2110 */
2111
2112 err=verify_area(VERIFY_READ, optval, sizeof(mreq));
2113 if(err)
2114 return err;
2115
2116 memcpy_fromfs(&mreq,optval,sizeof(mreq));
2117
2118 /*
2119 * Get device for use later
2120 */
2121
2122 if(mreq.imr_interface.s_addr==INADDR_ANY)
2123 {
2124 /*
2125 * Not set so scan.
2126 */
2127 if((rt=ip_rt_route(mreq.imr_multiaddr.s_addr,NULL, &route_src))!=NULL)
2128 {
2129 dev=rt->rt_dev;
2130 rt->rt_use--;
2131 }
2132 }
2133 else
2134 {
2135 /*
2136 * Find a suitable device.
2137 */
2138
2139 dev=ip_mc_find_devfor(mreq.imr_interface.s_addr);
2140 }
2141
2142 /*
2143 * No device, no cookies.
2144 */
2145
2146 if(!dev)
2147 return -ENODEV;
2148
2149 /*
2150 * Join group.
2151 */
2152
2153 return ip_mc_join_group(sk,dev,mreq.imr_multiaddr.s_addr);
2154 }
2155
2156 case IP_DROP_MEMBERSHIP:
2157 {
2158 struct ip_mreq mreq;
2159 struct rtable *rt;
2160 unsigned long route_src;
2161 struct device *dev=NULL;
2162
2163 /*
2164 * Check the arguments
2165 */
2166
2167 err=verify_area(VERIFY_READ, optval, sizeof(mreq));
2168 if(err)
2169 return err;
2170
2171 memcpy_fromfs(&mreq,optval,sizeof(mreq));
2172
2173 /*
2174 * Get device for use later
2175 */
2176
2177 if(mreq.imr_interface.s_addr==INADDR_ANY)
2178 {
2179 if((rt=ip_rt_route(mreq.imr_multiaddr.s_addr,NULL, &route_src))!=NULL)
2180 {
2181 dev=rt->rt_dev;
2182 rt->rt_use--;
2183 }
2184 }
2185 else
2186 {
2187
2188 dev=ip_mc_find_devfor(mreq.imr_interface.s_addr);
2189 }
2190
2191 /*
2192 * Did we find a suitable device.
2193 */
2194
2195 if(!dev)
2196 return -ENODEV;
2197
2198 /*
2199 * Leave group
2200 */
2201
2202 return ip_mc_leave_group(sk,dev,mreq.imr_multiaddr.s_addr);
2203 }
2204 #endif
2205 #ifdef CONFIG_IP_FIREWALL
2206 case IP_FW_ADD_BLK:
2207 case IP_FW_DEL_BLK:
2208 case IP_FW_ADD_FWD:
2209 case IP_FW_DEL_FWD:
2210 case IP_FW_CHK_BLK:
2211 case IP_FW_CHK_FWD:
2212 case IP_FW_FLUSH_BLK:
2213 case IP_FW_FLUSH_FWD:
2214 case IP_FW_ZERO_BLK:
2215 case IP_FW_ZERO_FWD:
2216 case IP_FW_POLICY_BLK:
2217 case IP_FW_POLICY_FWD:
2218 if(!suser())
2219 return -EPERM;
2220 if(optlen>sizeof(tmp_fw) || optlen<1)
2221 return -EINVAL;
2222 err=verify_area(VERIFY_READ,optval,optlen);
2223 if(err)
2224 return err;
2225 memcpy_fromfs(&tmp_fw,optval,optlen);
2226 err=ip_fw_ctl(optname, &tmp_fw,optlen);
2227 return -err; /* -0 is 0 after all */
2228
2229 #endif
2230 #ifdef CONFIG_IP_ACCT
2231 case IP_ACCT_DEL:
2232 case IP_ACCT_ADD:
2233 case IP_ACCT_FLUSH:
2234 case IP_ACCT_ZERO:
2235 if(!suser())
2236 return -EPERM;
2237 if(optlen>sizeof(tmp_fw) || optlen<1)
2238 return -EINVAL;
2239 err=verify_area(VERIFY_READ,optval,optlen);
2240 if(err)
2241 return err;
2242 memcpy_fromfs(&tmp_fw, optval,optlen);
2243 err=ip_acct_ctl(optname, &tmp_fw,optlen);
2244 return -err; /* -0 is 0 after all */
2245 #endif
2246 /* IP_OPTIONS and friends go here eventually */
2247 default:
2248 return(-ENOPROTOOPT);
2249 }
2250 }
2251
2252 /*
2253 * Get the options. Note for future reference. The GET of IP options gets the
2254 * _received_ ones. The set sets the _sent_ ones.
2255 */
2256
2257 int ip_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
2258 {
2259 int val,err;
2260 #ifdef CONFIG_IP_MULTICAST
2261 int len;
2262 #endif
2263
2264 if(level!=SOL_IP)
2265 return -EOPNOTSUPP;
2266
2267 switch(optname)
2268 {
2269 case IP_TOS:
2270 val=sk->ip_tos;
2271 break;
2272 case IP_TTL:
2273 val=sk->ip_ttl;
2274 break;
2275 #ifdef CONFIG_IP_MULTICAST
2276 case IP_MULTICAST_TTL:
2277 val=sk->ip_mc_ttl;
2278 break;
2279 case IP_MULTICAST_LOOP:
2280 val=sk->ip_mc_loop;
2281 break;
2282 case IP_MULTICAST_IF:
2283 err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
2284 if(err)
2285 return err;
2286 len=strlen(sk->ip_mc_name);
2287 err=verify_area(VERIFY_WRITE, optval, len);
2288 if(err)
2289 return err;
2290 put_fs_long(len,(unsigned long *) optlen);
2291 memcpy_tofs((void *)optval,sk->ip_mc_name, len);
2292 return 0;
2293 #endif
2294 default:
2295 return(-ENOPROTOOPT);
2296 }
2297 err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
2298 if(err)
2299 return err;
2300 put_fs_long(sizeof(int),(unsigned long *) optlen);
2301
2302 err=verify_area(VERIFY_WRITE, optval, sizeof(int));
2303 if(err)
2304 return err;
2305 put_fs_long(val,(unsigned long *)optval);
2306
2307 return(0);
2308 }
2309
2310 /*
2311 * Build and send a packet, with as little as one copy
2312 *
2313 * Doesn't care much about ip options... option length can be
2314 * different for fragment at 0 and other fragments.
2315 *
2316 * Note that the fragment at the highest offset is sent first,
2317 * so the getfrag routine can fill in the TCP/UDP checksum header
2318 * field in the last fragment it sends... actually it also helps
2319 * the reassemblers, they can put most packets in at the head of
2320 * the fragment queue, and they know the total size in advance. This
2321 * last feature will measurable improve the Linux fragment handler.
2322 *
2323 * The callback has five args, an arbitrary pointer (copy of frag),
2324 * the source IP address (may depend on the routing table), the
2325 * destination adddress (char *), the offset to copy from, and the
2326 * length to be copied.
2327 *
2328 */
2329
2330 int ip_build_xmit(struct sock *sk,
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
2331 void getfrag (void *,
2332 int,
2333 char *,
2334 unsigned int,
2335 unsigned int),
2336 void *frag,
2337 unsigned short int length,
2338 int daddr,
2339 int flags,
2340 int type)
2341 {
2342 struct rtable *rt;
2343 unsigned int fraglen, maxfraglen, fragheaderlen;
2344 int offset, mf;
2345 unsigned long saddr;
2346 unsigned short id;
2347 struct iphdr *iph;
2348 int local=0;
2349 struct device *dev;
2350
2351
2352 #ifdef CONFIG_INET_MULTICAST
2353 if(sk && MULTICAST(daddr) && *sk->ip_mc_name)
2354 {
2355 dev=dev_get(skb->ip_mc_name);
2356 if(!dev)
2357 return -ENODEV;
2358 rt=NULL;
2359 }
2360 else
2361 {
2362 #endif
2363 /*
2364 * Perform the IP routing decisions
2365 */
2366
2367 if(sk->localroute || flags&MSG_DONTROUTE)
2368 local=1;
2369
2370 rt = sk->ip_route_cache;
2371
2372 /*
2373 * See if the routing cache is outdated. We need to clean this up once we are happy it is reliable
2374 * by doing the invalidation actively in the route change and header change.
2375 */
2376
2377 saddr=sk->ip_route_saddr;
2378 if(!rt || sk->ip_route_stamp != rt_stamp || daddr!=sk->ip_route_daddr || sk->ip_route_local!=local || sk->saddr!=sk->ip_route_saddr)
2379 {
2380 if(local)
2381 rt = ip_rt_local(daddr, NULL, &saddr);
2382 else
2383 rt = ip_rt_route(daddr, NULL, &saddr);
2384 sk->ip_route_local=local;
2385 sk->ip_route_daddr=daddr;
2386 sk->ip_route_saddr=saddr;
2387 sk->ip_route_stamp=rt_stamp;
2388 sk->ip_route_cache=rt;
2389 sk->ip_hcache_ver=NULL;
2390 sk->ip_hcache_state= 0;
2391 }
2392 else if(rt)
2393 {
2394 /*
2395 * Attempt header caches only if the cached route is being reused. Header cache
2396 * is not ultra cheap to set up. This means we only set it up on the second packet,
2397 * so one shot communications are not slowed. We assume (seems reasonable) that 2 is
2398 * probably going to be a stream of data.
2399 */
2400 if(rt->rt_dev->header_cache && sk->ip_hcache_state!= -1)
2401 {
2402 if(sk->ip_hcache_ver==NULL || sk->ip_hcache_stamp!=*sk->ip_hcache_ver)
2403 rt->rt_dev->header_cache(rt->rt_dev,sk,saddr,daddr);
2404 else
2405 /* Can't cache. Remember this */
2406 sk->ip_hcache_state= -1;
2407 }
2408 }
2409
2410 if (rt == NULL)
2411 {
2412 ip_statistics.IpOutNoRoutes++;
2413 return(-ENETUNREACH);
2414 }
2415
2416 if (sk->saddr && (!LOOPBACK(sk->saddr) || LOOPBACK(daddr)))
2417 saddr = sk->saddr;
2418
2419 dev=rt->rt_dev;
2420 #ifdef CONFIG_INET_MULTICAST
2421 }
2422 #endif
2423
2424 /*
2425 * Now compute the buffer space we require
2426 */
2427
2428 fragheaderlen = dev->hard_header_len;
2429 if(type != IPPROTO_RAW)
2430 fragheaderlen += 20;
2431
2432 /*
2433 * Fragheaderlen is the size of 'overhead' on each buffer. Now work
2434 * out the size of the frames to send.
2435 */
2436
2437 maxfraglen = ((dev->mtu-20) & ~7) + fragheaderlen;
2438
2439 /*
2440 * Start at the end of the frame by handling the remainder.
2441 */
2442
2443 offset = length - (length % (maxfraglen - fragheaderlen));
2444
2445 /*
2446 * Amount of memory to allocate for final fragment.
2447 */
2448
2449 fraglen = length - offset + fragheaderlen;
2450
2451 if(fraglen==0)
2452 {
2453 fraglen = maxfraglen;
2454 offset -= maxfraglen-fragheaderlen;
2455 }
2456
2457
2458 /*
2459 * The last fragment will not have MF (more fragments) set.
2460 */
2461
2462 mf = 0;
2463
2464 /*
2465 * Can't fragment raw packets
2466 */
2467
2468 if (type == IPPROTO_RAW && offset > 0)
2469 return(-EMSGSIZE);
2470
2471 /*
2472 * Get an identifier
2473 */
2474
2475 id = htons(ip_id_count++);
2476
2477 /*
2478 * Being outputting the bytes.
2479 */
2480
2481 do
2482 {
2483 struct sk_buff * skb;
2484 int error;
2485 char *data;
2486
2487 /*
2488 * Get the memory we require.
2489 */
2490
2491 skb = sock_alloc_send_skb(sk, fraglen, 0, &error);
2492 if (skb == NULL)
2493 return(error);
2494
2495 /*
2496 * Fill in the control structures
2497 */
2498
2499 skb->next = skb->prev = NULL;
2500 skb->dev = dev;
2501 skb->when = jiffies;
2502 skb->free = 1; /* dubious, this one */
2503 skb->sk = sk;
2504 skb->arp = 0;
2505 skb->saddr = saddr;
2506 skb->raddr = (rt&&rt->rt_gateway) ? rt->rt_gateway : daddr;
2507 skb->len = fraglen;
2508
2509 /*
2510 * Save us ARP and stuff. In the optimal case we do no route lookup (route cache ok)
2511 * no ARP lookup (arp cache ok) and output. The cache checks are still too slow but
2512 * this can be fixed later. For gateway routes we ought to have a rt->.. header cache
2513 * pointer to speed header cache builds for identical targets.
2514 */
2515
2516 if(sk->ip_hcache_state>0)
2517 {
2518 memcpy(skb->data,sk->ip_hcache_data, dev->hard_header_len);
2519 skb->arp=1;
2520 }
2521 else if (dev->hard_header)
2522 {
2523 if(dev->hard_header(skb->data, dev, ETH_P_IP,
2524 NULL, NULL, 0, NULL)>0)
2525 skb->arp=1;
2526 }
2527
2528 /*
2529 * Find where to start putting bytes.
2530 */
2531
2532 data = (char *)skb->data + dev->hard_header_len;
2533 iph = (struct iphdr *)data;
2534
2535 /*
2536 * Only write IP header onto non-raw packets
2537 */
2538
2539 if(type != IPPROTO_RAW)
2540 {
2541
2542 iph->version = 4;
2543 iph->ihl = 5; /* ugh */
2544 iph->tos = sk->ip_tos;
2545 iph->tot_len = htons(fraglen - fragheaderlen + iph->ihl*4);
2546 iph->id = id;
2547 iph->frag_off = htons(offset>>3);
2548 iph->frag_off |= mf;
2549 #ifdef CONFIG_IP_MULTICAST
2550 if (MULTICAST(daddr))
2551 iph->ttl = sk->ip_mc_ttl;
2552 else
2553 #endif
2554 iph->ttl = sk->ip_ttl;
2555 iph->protocol = type;
2556 iph->check = 0;
2557 iph->saddr = saddr;
2558 iph->daddr = daddr;
2559 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
2560 data += iph->ihl*4;
2561
2562 /*
2563 * Any further fragments will have MF set.
2564 */
2565
2566 mf = htons(IP_MF);
2567 }
2568
2569 /*
2570 * User data callback
2571 */
2572
2573 getfrag(frag, saddr, data, offset, fraglen-fragheaderlen);
2574
2575 /*
2576 * Account for the fragment.
2577 */
2578
2579 #ifdef CONFIG_IP_ACCT
2580 if(!offset)
2581 ip_fw_chk(iph, dev, ip_acct_chain, IP_FW_F_ACCEPT, 1);
2582 #endif
2583 offset -= (maxfraglen-fragheaderlen);
2584 fraglen = maxfraglen;
2585
2586 #ifdef CONFIG_IP_MULTICAST
2587
2588 /*
2589 * Multicasts are looped back for other local users
2590 */
2591
2592 if (MULTICAST(daddr) && !(dev->flags&IFF_LOOPBACK))
2593 {
2594 /*
2595 * Loop back any frames. The check for IGMP_ALL_HOSTS is because
2596 * you are always magically a member of this group.
2597 */
2598
2599 if(sk==NULL || sk->ip_mc_loop)
2600 {
2601 if(skb->daddr==IGMP_ALL_HOSTS)
2602 ip_loopback(rt->rt_dev,skb);
2603 else
2604 {
2605 struct ip_mc_list *imc=rt->rt_dev->ip_mc_list;
2606 while(imc!=NULL)
2607 {
2608 if(imc->multiaddr==daddr)
2609 {
2610 ip_loopback(rt->rt_dev,skb);
2611 break;
2612 }
2613 imc=imc->next;
2614 }
2615 }
2616 }
2617
2618 /*
2619 * Multicasts with ttl 0 must not go beyond the host. Fixme: avoid the
2620 * extra clone.
2621 */
2622
2623 if(skb->ip_hdr->ttl==0)
2624 kfree_skb(skb, FREE_READ);
2625 }
2626 #endif
2627 /*
2628 * Now queue the bytes into the device.
2629 */
2630
2631 if (dev->flags & IFF_UP)
2632 {
2633 dev_queue_xmit(skb, dev, sk->priority);
2634 }
2635 else
2636 {
2637 /*
2638 * Whoops...
2639 *
2640 * FIXME: There is a small nasty here. During the ip_build_xmit we could
2641 * page fault between the route lookup and device send, the device might be
2642 * removed and unloaded.... We need to add device locks on this.
2643 */
2644
2645 ip_statistics.IpOutDiscards++;
2646 kfree_skb(skb, FREE_WRITE);
2647 return(0); /* lose rest of fragments */
2648 }
2649 }
2650 while (offset >= 0);
2651
2652 return(0);
2653 }
2654
2655
2656 /*
2657 * IP protocol layer initialiser
2658 */
2659
2660 static struct packet_type ip_packet_type =
2661 {
2662 0, /* MUTTER ntohs(ETH_P_IP),*/
2663 NULL, /* All devices */
2664 ip_rcv,
2665 NULL,
2666 NULL,
2667 };
2668
2669 /*
2670 * Device notifier
2671 */
2672
2673 static int ip_rt_event(unsigned long event, void *ptr)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
2674 {
2675 if(event==NETDEV_DOWN)
2676 ip_rt_flush(ptr);
2677 return NOTIFY_DONE;
2678 }
2679
2680 struct notifier_block ip_rt_notifier={
2681 ip_rt_event,
2682 NULL,
2683 0
2684 };
2685
2686 /*
2687 * IP registers the packet type and then calls the subprotocol initialisers
2688 */
2689
2690 void ip_init(void)
/* ![[previous]](../icons/left.png)
![[next]](../icons/n_right.png)
![[first]](../icons/first.png)
![[last]](../icons/n_last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
2691 {
2692 ip_packet_type.type=htons(ETH_P_IP);
2693 dev_add_pack(&ip_packet_type);
2694
2695 /* So we flush routes when a device is downed */
2696 register_netdevice_notifier(&ip_rt_notifier);
2697 /* ip_raw_init();
2698 ip_packet_init();
2699 ip_tcp_init();
2700 ip_udp_init();*/
2701 }
2702