1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * The Internet Protocol (IP) module.
7 *
8 * Version: @(#)ip.c 1.0.16b 9/1/93
9 *
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Donald Becker, <becker@super.org>
13 * Alan Cox, <gw4pts@gw4pts.ampr.org>
14 * Richard Underwood
15 * Stefan Becker, <stefanb@yello.ping.de>
16 *
17 *
18 * Fixes:
19 * Alan Cox : Commented a couple of minor bits of surplus code
20 * Alan Cox : Undefining IP_FORWARD doesn't include the code
21 * (just stops a compiler warning).
22 * Alan Cox : Frames with >=MAX_ROUTE record routes, strict routes or loose routes
23 * are junked rather than corrupting things.
24 * Alan Cox : Frames to bad broadcast subnets are dumped
25 * We used to process them non broadcast and
26 * boy could that cause havoc.
27 * Alan Cox : ip_forward sets the free flag on the
28 * new frame it queues. Still crap because
29 * it copies the frame but at least it
30 * doesn't eat memory too.
31 * Alan Cox : Generic queue code and memory fixes.
32 * Fred Van Kempen : IP fragment support (borrowed from NET2E)
33 * Gerhard Koerting: Forward fragmented frames correctly.
34 * Gerhard Koerting: Fixes to my fix of the above 8-).
35 * Gerhard Koerting: IP interface addressing fix.
36 * Linus Torvalds : More robustness checks
37 * Alan Cox : Even more checks: Still not as robust as it ought to be
38 * Alan Cox : Save IP header pointer for later
39 * Alan Cox : ip option setting
40 * Alan Cox : Use ip_tos/ip_ttl settings
41 * Alan Cox : Fragmentation bogosity removed
42 * (Thanks to Mark.Bush@prg.ox.ac.uk)
43 * Dmitry Gorodchanin : Send of a raw packet crash fix.
44 * Alan Cox : Silly ip bug when an overlength
45 * fragment turns up. Now frees the
46 * queue.
47 * Linus Torvalds/ : Memory leakage on fragmentation
48 * Alan Cox : handling.
49 * Gerhard Koerting: Forwarding uses IP priority hints
50 * Teemu Rantanen : Fragment problems.
51 * Alan Cox : General cleanup, comments and reformat
52 * Alan Cox : SNMP statistics
53 * Alan Cox : BSD address rule semantics. Also see
54 * UDP as there is a nasty checksum issue
55 * if you do things the wrong way.
56 * Alan Cox : Always defrag, moved IP_FORWARD to the config.in file
57 * Alan Cox : IP options adjust sk->priority.
58 * Pedro Roque : Fix mtu/length error in ip_forward.
59 * Alan Cox : Avoid ip_chk_addr when possible.
60 * Richard Underwood : IP multicasting.
61 * Alan Cox : Cleaned up multicast handlers.
62 * Alan Cox : RAW sockets demultiplex in the BSD style.
63 * Gunther Mayer : Fix the SNMP reporting typo
64 * Alan Cox : Always in group 224.0.0.1
65 * Alan Cox : Multicast loopback error for 224.0.0.1
66 * Alan Cox : IP_MULTICAST_LOOP option.
67 * Alan Cox : Use notifiers.
68 * Bjorn Ekwall : Removed ip_csum (from slhc.c too)
69 * Bjorn Ekwall : Moved ip_fast_csum to ip.h (inline!)
70 * Stefan Becker : Send out ICMP HOST REDIRECT
71 * Alan Cox : Only send ICMP_REDIRECT if src/dest are the same net.
72 *
73 *
74 * To Fix:
75 * IP option processing is mostly not needed. ip_forward needs to know about routing rules
76 * and time stamp but that's about all. Use the route mtu field here too
77 * IP fragmentation wants rewriting cleanly. The RFC815 algorithm is much more efficient
78 * and could be made very efficient with the addition of some virtual memory hacks to permit
79 * the allocation of a buffer that can then be 'grown' by twiddling page tables.
80 * Output fragmentation wants updating along with the buffer management to use a single
81 * interleaved copy algorithm so that fragmenting has a one copy overhead. Actual packet
82 * output should probably do its own fragmentation at the UDP/RAW layer. TCP shouldn't cause
83 * fragmentation anyway.
84 *
85 * This program is free software; you can redistribute it and/or
86 * modify it under the terms of the GNU General Public License
87 * as published by the Free Software Foundation; either version
88 * 2 of the License, or (at your option) any later version.
89 */
90
91 #include <asm/segment.h>
92 #include <asm/system.h>
93 #include <linux/types.h>
94 #include <linux/kernel.h>
95 #include <linux/sched.h>
96 #include <linux/mm.h>
97 #include <linux/string.h>
98 #include <linux/errno.h>
99 #include <linux/config.h>
100
101 #include <linux/socket.h>
102 #include <linux/sockios.h>
103 #include <linux/in.h>
104 #include <linux/inet.h>
105 #include <linux/netdevice.h>
106 #include <linux/etherdevice.h>
107
108 #include "snmp.h"
109 #include "ip.h"
110 #include "protocol.h"
111 #include "route.h"
112 #include "tcp.h"
113 #include "udp.h"
114 #include <linux/skbuff.h>
115 #include "sock.h"
116 #include "arp.h"
117 #include "icmp.h"
118 #include "raw.h"
119 #include <linux/igmp.h>
120 #include <linux/ip_fw.h>
121
122 #define CONFIG_IP_DEFRAG
123
124 extern int last_retran;
125 extern void sort_send(struct sock *sk);
126
127 #define min(a,b) ((a)<(b)?(a):(b))
128 #define LOOPBACK(x) (((x) & htonl(0xff000000)) == htonl(0x7f000000))
129
130 /*
131 * SNMP management statistics
132 */
133
134 #ifdef CONFIG_IP_FORWARD
135 struct ip_mib ip_statistics={1,64,}; /* Forwarding=Yes, Default TTL=64 */
136 #else
137 struct ip_mib ip_statistics={0,64,}; /* Forwarding=No, Default TTL=64 */
138 #endif
139
140 /*
141 * Handle the issuing of an ioctl() request
142 * for the ip device. This is scheduled to
143 * disappear
144 */
145
146 int ip_ioctl(struct sock *sk, int cmd, unsigned long arg)
/* ![[previous]](../icons/n_left.png)
![[next]](../icons/right.png)
![[first]](../icons/n_first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
147 {
148 switch(cmd)
149 {
150 default:
151 return(-EINVAL);
152 }
153 }
154
155
156 /* these two routines will do routing. */
157
158 static void
159 strict_route(struct iphdr *iph, struct options *opt)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
160 {
161 }
162
163
164 static void
165 loose_route(struct iphdr *iph, struct options *opt)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
166 {
167 }
168
169
170
171
172 /* This routine will check to see if we have lost a gateway. */
173 void
174 ip_route_check(unsigned long daddr)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
175 {
176 }
177
178
179 #if 0
180 /* this routine puts the options at the end of an ip header. */
181 static int
182 build_options(struct iphdr *iph, struct options *opt)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
183 {
184 unsigned char *ptr;
185 /* currently we don't support any options. */
186 ptr = (unsigned char *)(iph+1);
187 *ptr = 0;
188 return (4);
189 }
190 #endif
191
192
193 /*
194 * Take an skb, and fill in the MAC header.
195 */
196
197 static int ip_send(struct sk_buff *skb, unsigned long daddr, int len, struct device *dev, unsigned long saddr)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
198 {
199 int mac = 0;
200
201 skb->dev = dev;
202 skb->arp = 1;
203 if (dev->hard_header)
204 {
205 /*
206 * Build a hardware header. Source address is our mac, destination unknown
207 * (rebuild header will sort this out)
208 */
209 mac = dev->hard_header(skb->data, dev, ETH_P_IP, NULL, NULL, len, skb);
210 if (mac < 0)
211 {
212 mac = -mac;
213 skb->arp = 0;
214 skb->raddr = daddr; /* next routing address */
215 }
216 }
217 return mac;
218 }
219
220 int ip_id_count = 0;
221
222 /*
223 * This routine builds the appropriate hardware/IP headers for
224 * the routine. It assumes that if *dev != NULL then the
225 * protocol knows what it's doing, otherwise it uses the
226 * routing/ARP tables to select a device struct.
227 */
228 int ip_build_header(struct sk_buff *skb, unsigned long saddr, unsigned long daddr,
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
229 struct device **dev, int type, struct options *opt, int len, int tos, int ttl)
230 {
231 static struct options optmem;
232 struct iphdr *iph;
233 struct rtable *rt;
234 unsigned char *buff;
235 unsigned long raddr;
236 int tmp;
237 unsigned long src;
238
239 buff = skb->data;
240
241 /*
242 * See if we need to look up the device.
243 */
244
245 #ifdef CONFIG_INET_MULTICAST
246 if(MULTICAST(daddr) && *dev==NULL && skb->sk && *skb->sk->ip_mc_name)
247 *dev=dev_get(skb->sk->ip_mc_name);
248 #endif
249 if (*dev == NULL)
250 {
251 if(skb->localroute)
252 rt = ip_rt_local(daddr, &optmem, &src);
253 else
254 rt = ip_rt_route(daddr, &optmem, &src);
255 if (rt == NULL)
256 {
257 ip_statistics.IpOutNoRoutes++;
258 return(-ENETUNREACH);
259 }
260
261 *dev = rt->rt_dev;
262 /*
263 * If the frame is from us and going off machine it MUST MUST MUST
264 * have the output device ip address and never the loopback
265 */
266 if (LOOPBACK(saddr) && !LOOPBACK(daddr))
267 saddr = src;/*rt->rt_dev->pa_addr;*/
268 raddr = rt->rt_gateway;
269
270 opt = &optmem;
271 }
272 else
273 {
274 /*
275 * We still need the address of the first hop.
276 */
277 if(skb->localroute)
278 rt = ip_rt_local(daddr, &optmem, &src);
279 else
280 rt = ip_rt_route(daddr, &optmem, &src);
281 /*
282 * If the frame is from us and going off machine it MUST MUST MUST
283 * have the output device ip address and never the loopback
284 */
285 if (LOOPBACK(saddr) && !LOOPBACK(daddr))
286 saddr = src;/*rt->rt_dev->pa_addr;*/
287
288 raddr = (rt == NULL) ? 0 : rt->rt_gateway;
289 }
290
291 /*
292 * No source addr so make it our addr
293 */
294 if (saddr == 0)
295 saddr = src;
296
297 /*
298 * No gateway so aim at the real destination
299 */
300 if (raddr == 0)
301 raddr = daddr;
302
303 /*
304 * Now build the MAC header.
305 */
306
307 tmp = ip_send(skb, raddr, len, *dev, saddr);
308 buff += tmp;
309 len -= tmp;
310
311 /*
312 * Book keeping
313 */
314
315 skb->dev = *dev;
316 skb->saddr = saddr;
317 if (skb->sk)
318 skb->sk->saddr = saddr;
319
320 /*
321 * Now build the IP header.
322 */
323
324 /*
325 * If we are using IPPROTO_RAW, then we don't need an IP header, since
326 * one is being supplied to us by the user
327 */
328
329 if(type == IPPROTO_RAW)
330 return (tmp);
331
332 iph = (struct iphdr *)buff;
333 iph->version = 4;
334 iph->tos = tos;
335 iph->frag_off = 0;
336 iph->ttl = ttl;
337 iph->daddr = daddr;
338 iph->saddr = saddr;
339 iph->protocol = type;
340 iph->ihl = 5;
341 skb->ip_hdr = iph;
342
343 /* Setup the IP options. */
344 #ifdef Not_Yet_Avail
345 build_options(iph, opt);
346 #endif
347
348 return(20 + tmp); /* IP header plus MAC header size */
349 }
350
351
352 static int
353 do_options(struct iphdr *iph, struct options *opt)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
354 {
355 unsigned char *buff;
356 int done = 0;
357 int i, len = sizeof(struct iphdr);
358
359 /* Zero out the options. */
360 opt->record_route.route_size = 0;
361 opt->loose_route.route_size = 0;
362 opt->strict_route.route_size = 0;
363 opt->tstamp.ptr = 0;
364 opt->security = 0;
365 opt->compartment = 0;
366 opt->handling = 0;
367 opt->stream = 0;
368 opt->tcc = 0;
369 return(0);
370
371 /* Advance the pointer to start at the options. */
372 buff = (unsigned char *)(iph + 1);
373
374 /* Now start the processing. */
375 while (!done && len < iph->ihl*4) switch(*buff) {
376 case IPOPT_END:
377 done = 1;
378 break;
379 case IPOPT_NOOP:
380 buff++;
381 len++;
382 break;
383 case IPOPT_SEC:
384 buff++;
385 if (*buff != 11) return(1);
386 buff++;
387 opt->security = ntohs(*(unsigned short *)buff);
388 buff += 2;
389 opt->compartment = ntohs(*(unsigned short *)buff);
390 buff += 2;
391 opt->handling = ntohs(*(unsigned short *)buff);
392 buff += 2;
393 opt->tcc = ((*buff) << 16) + ntohs(*(unsigned short *)(buff+1));
394 buff += 3;
395 len += 11;
396 break;
397 case IPOPT_LSRR:
398 buff++;
399 if ((*buff - 3)% 4 != 0) return(1);
400 len += *buff;
401 opt->loose_route.route_size = (*buff -3)/4;
402 buff++;
403 if (*buff % 4 != 0) return(1);
404 opt->loose_route.pointer = *buff/4 - 1;
405 buff++;
406 buff++;
407 for (i = 0; i < opt->loose_route.route_size; i++) {
408 if(i>=MAX_ROUTE)
409 return(1);
410 opt->loose_route.route[i] = *(unsigned long *)buff;
411 buff += 4;
412 }
413 break;
414 case IPOPT_SSRR:
415 buff++;
416 if ((*buff - 3)% 4 != 0) return(1);
417 len += *buff;
418 opt->strict_route.route_size = (*buff -3)/4;
419 buff++;
420 if (*buff % 4 != 0) return(1);
421 opt->strict_route.pointer = *buff/4 - 1;
422 buff++;
423 buff++;
424 for (i = 0; i < opt->strict_route.route_size; i++) {
425 if(i>=MAX_ROUTE)
426 return(1);
427 opt->strict_route.route[i] = *(unsigned long *)buff;
428 buff += 4;
429 }
430 break;
431 case IPOPT_RR:
432 buff++;
433 if ((*buff - 3)% 4 != 0) return(1);
434 len += *buff;
435 opt->record_route.route_size = (*buff -3)/4;
436 buff++;
437 if (*buff % 4 != 0) return(1);
438 opt->record_route.pointer = *buff/4 - 1;
439 buff++;
440 buff++;
441 for (i = 0; i < opt->record_route.route_size; i++) {
442 if(i>=MAX_ROUTE)
443 return 1;
444 opt->record_route.route[i] = *(unsigned long *)buff;
445 buff += 4;
446 }
447 break;
448 case IPOPT_SID:
449 len += 4;
450 buff +=2;
451 opt->stream = *(unsigned short *)buff;
452 buff += 2;
453 break;
454 case IPOPT_TIMESTAMP:
455 buff++;
456 len += *buff;
457 if (*buff % 4 != 0) return(1);
458 opt->tstamp.len = *buff / 4 - 1;
459 buff++;
460 if ((*buff - 1) % 4 != 0) return(1);
461 opt->tstamp.ptr = (*buff-1)/4;
462 buff++;
463 opt->tstamp.x.full_char = *buff;
464 buff++;
465 for (i = 0; i < opt->tstamp.len; i++) {
466 opt->tstamp.data[i] = *(unsigned long *)buff;
467 buff += 4;
468 }
469 break;
470 default:
471 return(1);
472 }
473
474 if (opt->record_route.route_size == 0) {
475 if (opt->strict_route.route_size != 0) {
476 memcpy(&(opt->record_route), &(opt->strict_route),
477 sizeof(opt->record_route));
478 } else if (opt->loose_route.route_size != 0) {
479 memcpy(&(opt->record_route), &(opt->loose_route),
480 sizeof(opt->record_route));
481 }
482 }
483
484 if (opt->strict_route.route_size != 0 &&
485 opt->strict_route.route_size != opt->strict_route.pointer) {
486 strict_route(iph, opt);
487 return(0);
488 }
489
490 if (opt->loose_route.route_size != 0 &&
491 opt->loose_route.route_size != opt->loose_route.pointer) {
492 loose_route(iph, opt);
493 return(0);
494 }
495
496 return(0);
497 }
498
499 /*
500 * This routine does all the checksum computations that don't
501 * require anything special (like copying or special headers).
502 */
503
504 unsigned short ip_compute_csum(unsigned char * buff, int len)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
505 {
506 unsigned long sum = 0;
507
508 /* Do the first multiple of 4 bytes and convert to 16 bits. */
509 if (len > 3)
510 {
511 __asm__("clc\n"
512 "1:\t"
513 "lodsl\n\t"
514 "adcl %%eax, %%ebx\n\t"
515 "loop 1b\n\t"
516 "adcl $0, %%ebx\n\t"
517 "movl %%ebx, %%eax\n\t"
518 "shrl $16, %%eax\n\t"
519 "addw %%ax, %%bx\n\t"
520 "adcw $0, %%bx"
521 : "=b" (sum) , "=S" (buff)
522 : "0" (sum), "c" (len >> 2) ,"1" (buff)
523 : "ax", "cx", "si", "bx" );
524 }
525 if (len & 2)
526 {
527 __asm__("lodsw\n\t"
528 "addw %%ax, %%bx\n\t"
529 "adcw $0, %%bx"
530 : "=b" (sum), "=S" (buff)
531 : "0" (sum), "1" (buff)
532 : "bx", "ax", "si");
533 }
534 if (len & 1)
535 {
536 __asm__("lodsb\n\t"
537 "movb $0, %%ah\n\t"
538 "addw %%ax, %%bx\n\t"
539 "adcw $0, %%bx"
540 : "=b" (sum), "=S" (buff)
541 : "0" (sum), "1" (buff)
542 : "bx", "ax", "si");
543 }
544 sum =~sum;
545 return(sum & 0xffff);
546 }
547
548 /*
549 * Generate a checksum for an outgoing IP datagram.
550 */
551
552 void ip_send_check(struct iphdr *iph)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
553 {
554 iph->check = 0;
555 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
556 }
557
558 /************************ Fragment Handlers From NET2E **********************************/
559
560
561 /*
562 * This fragment handler is a bit of a heap. On the other hand it works quite
563 * happily and handles things quite well.
564 */
565
566 static struct ipq *ipqueue = NULL; /* IP fragment queue */
567
568 /*
569 * Create a new fragment entry.
570 */
571
572 static struct ipfrag *ip_frag_create(int offset, int end, struct sk_buff *skb, unsigned char *ptr)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
573 {
574 struct ipfrag *fp;
575
576 fp = (struct ipfrag *) kmalloc(sizeof(struct ipfrag), GFP_ATOMIC);
577 if (fp == NULL)
578 {
579 printk("IP: frag_create: no memory left !\n");
580 return(NULL);
581 }
582 memset(fp, 0, sizeof(struct ipfrag));
583
584 /* Fill in the structure. */
585 fp->offset = offset;
586 fp->end = end;
587 fp->len = end - offset;
588 fp->skb = skb;
589 fp->ptr = ptr;
590
591 return(fp);
592 }
593
594
595 /*
596 * Find the correct entry in the "incomplete datagrams" queue for
597 * this IP datagram, and return the queue entry address if found.
598 */
599
600 static struct ipq *ip_find(struct iphdr *iph)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
601 {
602 struct ipq *qp;
603 struct ipq *qplast;
604
605 cli();
606 qplast = NULL;
607 for(qp = ipqueue; qp != NULL; qplast = qp, qp = qp->next)
608 {
609 if (iph->id== qp->iph->id && iph->saddr == qp->iph->saddr &&
610 iph->daddr == qp->iph->daddr && iph->protocol == qp->iph->protocol)
611 {
612 del_timer(&qp->timer); /* So it doesn't vanish on us. The timer will be reset anyway */
613 sti();
614 return(qp);
615 }
616 }
617 sti();
618 return(NULL);
619 }
620
621
622 /*
623 * Remove an entry from the "incomplete datagrams" queue, either
624 * because we completed, reassembled and processed it, or because
625 * it timed out.
626 */
627
628 static void ip_free(struct ipq *qp)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
629 {
630 struct ipfrag *fp;
631 struct ipfrag *xp;
632
633 /*
634 * Stop the timer for this entry.
635 */
636
637 del_timer(&qp->timer);
638
639 /* Remove this entry from the "incomplete datagrams" queue. */
640 cli();
641 if (qp->prev == NULL)
642 {
643 ipqueue = qp->next;
644 if (ipqueue != NULL)
645 ipqueue->prev = NULL;
646 }
647 else
648 {
649 qp->prev->next = qp->next;
650 if (qp->next != NULL)
651 qp->next->prev = qp->prev;
652 }
653
654 /* Release all fragment data. */
655
656 fp = qp->fragments;
657 while (fp != NULL)
658 {
659 xp = fp->next;
660 IS_SKB(fp->skb);
661 kfree_skb(fp->skb,FREE_READ);
662 kfree_s(fp, sizeof(struct ipfrag));
663 fp = xp;
664 }
665
666 /* Release the MAC header. */
667 kfree_s(qp->mac, qp->maclen);
668
669 /* Release the IP header. */
670 kfree_s(qp->iph, qp->ihlen + 8);
671
672 /* Finally, release the queue descriptor itself. */
673 kfree_s(qp, sizeof(struct ipq));
674 sti();
675 }
676
677
678 /*
679 * Oops- a fragment queue timed out. Kill it and send an ICMP reply.
680 */
681
682 static void ip_expire(unsigned long arg)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
683 {
684 struct ipq *qp;
685
686 qp = (struct ipq *)arg;
687
688 /*
689 * Send an ICMP "Fragment Reassembly Timeout" message.
690 */
691
692 ip_statistics.IpReasmTimeout++;
693 ip_statistics.IpReasmFails++;
694 /* This if is always true... shrug */
695 if(qp->fragments!=NULL)
696 icmp_send(qp->fragments->skb,ICMP_TIME_EXCEEDED,
697 ICMP_EXC_FRAGTIME, 0, qp->dev);
698
699 /*
700 * Nuke the fragment queue.
701 */
702 ip_free(qp);
703 }
704
705
706 /*
707 * Add an entry to the 'ipq' queue for a newly received IP datagram.
708 * We will (hopefully :-) receive all other fragments of this datagram
709 * in time, so we just create a queue for this datagram, in which we
710 * will insert the received fragments at their respective positions.
711 */
712
713 static struct ipq *ip_create(struct sk_buff *skb, struct iphdr *iph, struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
714 {
715 struct ipq *qp;
716 int maclen;
717 int ihlen;
718
719 qp = (struct ipq *) kmalloc(sizeof(struct ipq), GFP_ATOMIC);
720 if (qp == NULL)
721 {
722 printk("IP: create: no memory left !\n");
723 return(NULL);
724 skb->dev = qp->dev;
725 }
726 memset(qp, 0, sizeof(struct ipq));
727
728 /*
729 * Allocate memory for the MAC header.
730 *
731 * FIXME: We have a maximum MAC address size limit and define
732 * elsewhere. We should use it here and avoid the 3 kmalloc() calls
733 */
734
735 maclen = ((unsigned long) iph) - ((unsigned long) skb->data);
736 qp->mac = (unsigned char *) kmalloc(maclen, GFP_ATOMIC);
737 if (qp->mac == NULL)
738 {
739 printk("IP: create: no memory left !\n");
740 kfree_s(qp, sizeof(struct ipq));
741 return(NULL);
742 }
743
744 /*
745 * Allocate memory for the IP header (plus 8 octets for ICMP).
746 */
747
748 ihlen = (iph->ihl * sizeof(unsigned long));
749 qp->iph = (struct iphdr *) kmalloc(ihlen + 8, GFP_ATOMIC);
750 if (qp->iph == NULL)
751 {
752 printk("IP: create: no memory left !\n");
753 kfree_s(qp->mac, maclen);
754 kfree_s(qp, sizeof(struct ipq));
755 return(NULL);
756 }
757
758 /* Fill in the structure. */
759 memcpy(qp->mac, skb->data, maclen);
760 memcpy(qp->iph, iph, ihlen + 8);
761 qp->len = 0;
762 qp->ihlen = ihlen;
763 qp->maclen = maclen;
764 qp->fragments = NULL;
765 qp->dev = dev;
766
767 /* Start a timer for this entry. */
768 qp->timer.expires = IP_FRAG_TIME; /* about 30 seconds */
769 qp->timer.data = (unsigned long) qp; /* pointer to queue */
770 qp->timer.function = ip_expire; /* expire function */
771 add_timer(&qp->timer);
772
773 /* Add this entry to the queue. */
774 qp->prev = NULL;
775 cli();
776 qp->next = ipqueue;
777 if (qp->next != NULL)
778 qp->next->prev = qp;
779 ipqueue = qp;
780 sti();
781 return(qp);
782 }
783
784
785 /*
786 * See if a fragment queue is complete.
787 */
788
789 static int ip_done(struct ipq *qp)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
790 {
791 struct ipfrag *fp;
792 int offset;
793
794 /* Only possible if we received the final fragment. */
795 if (qp->len == 0)
796 return(0);
797
798 /* Check all fragment offsets to see if they connect. */
799 fp = qp->fragments;
800 offset = 0;
801 while (fp != NULL)
802 {
803 if (fp->offset > offset)
804 return(0); /* fragment(s) missing */
805 offset = fp->end;
806 fp = fp->next;
807 }
808
809 /* All fragments are present. */
810 return(1);
811 }
812
813
814 /*
815 * Build a new IP datagram from all its fragments.
816 *
817 * FIXME: We copy here because we lack an effective way of handling lists
818 * of bits on input. Until the new skb data handling is in I'm not going
819 * to touch this with a bargepole. This also causes a 4Kish limit on
820 * packet sizes.
821 */
822
823 static struct sk_buff *ip_glue(struct ipq *qp)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
824 {
825 struct sk_buff *skb;
826 struct iphdr *iph;
827 struct ipfrag *fp;
828 unsigned char *ptr;
829 int count, len;
830
831 /*
832 * Allocate a new buffer for the datagram.
833 */
834
835 len = qp->maclen + qp->ihlen + qp->len;
836
837 if ((skb = alloc_skb(len,GFP_ATOMIC)) == NULL)
838 {
839 ip_statistics.IpReasmFails++;
840 printk("IP: queue_glue: no memory for gluing queue 0x%X\n", (int) qp);
841 ip_free(qp);
842 return(NULL);
843 }
844
845 /* Fill in the basic details. */
846 skb->len = (len - qp->maclen);
847 skb->h.raw = skb->data;
848 skb->free = 1;
849
850 /* Copy the original MAC and IP headers into the new buffer. */
851 ptr = (unsigned char *) skb->h.raw;
852 memcpy(ptr, ((unsigned char *) qp->mac), qp->maclen);
853 ptr += qp->maclen;
854 memcpy(ptr, ((unsigned char *) qp->iph), qp->ihlen);
855 ptr += qp->ihlen;
856 skb->h.raw += qp->maclen;
857
858 count = 0;
859
860 /* Copy the data portions of all fragments into the new buffer. */
861 fp = qp->fragments;
862 while(fp != NULL)
863 {
864 if(count+fp->len > skb->len)
865 {
866 printk("Invalid fragment list: Fragment over size.\n");
867 ip_free(qp);
868 kfree_skb(skb,FREE_WRITE);
869 ip_statistics.IpReasmFails++;
870 return NULL;
871 }
872 memcpy((ptr + fp->offset), fp->ptr, fp->len);
873 count += fp->len;
874 fp = fp->next;
875 }
876
877 /* We glued together all fragments, so remove the queue entry. */
878 ip_free(qp);
879
880 /* Done with all fragments. Fixup the new IP header. */
881 iph = skb->h.iph;
882 iph->frag_off = 0;
883 iph->tot_len = htons((iph->ihl * sizeof(unsigned long)) + count);
884 skb->ip_hdr = iph;
885
886 ip_statistics.IpReasmOKs++;
887 return(skb);
888 }
889
890
891 /*
892 * Process an incoming IP datagram fragment.
893 */
894
895 static struct sk_buff *ip_defrag(struct iphdr *iph, struct sk_buff *skb, struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
896 {
897 struct ipfrag *prev, *next;
898 struct ipfrag *tfp;
899 struct ipq *qp;
900 struct sk_buff *skb2;
901 unsigned char *ptr;
902 int flags, offset;
903 int i, ihl, end;
904
905 ip_statistics.IpReasmReqds++;
906
907 /* Find the entry of this IP datagram in the "incomplete datagrams" queue. */
908 qp = ip_find(iph);
909
910 /* Is this a non-fragmented datagram? */
911 offset = ntohs(iph->frag_off);
912 flags = offset & ~IP_OFFSET;
913 offset &= IP_OFFSET;
914 if (((flags & IP_MF) == 0) && (offset == 0))
915 {
916 if (qp != NULL)
917 ip_free(qp); /* Huh? How could this exist?? */
918 return(skb);
919 }
920
921 offset <<= 3; /* offset is in 8-byte chunks */
922
923 /*
924 * If the queue already existed, keep restarting its timer as long
925 * as we still are receiving fragments. Otherwise, create a fresh
926 * queue entry.
927 */
928
929 if (qp != NULL)
930 {
931 del_timer(&qp->timer);
932 qp->timer.expires = IP_FRAG_TIME; /* about 30 seconds */
933 qp->timer.data = (unsigned long) qp; /* pointer to queue */
934 qp->timer.function = ip_expire; /* expire function */
935 add_timer(&qp->timer);
936 }
937 else
938 {
939 /*
940 * If we failed to create it, then discard the frame
941 */
942 if ((qp = ip_create(skb, iph, dev)) == NULL)
943 {
944 skb->sk = NULL;
945 kfree_skb(skb, FREE_READ);
946 ip_statistics.IpReasmFails++;
947 return NULL;
948 }
949 }
950
951 /*
952 * Determine the position of this fragment.
953 */
954
955 ihl = (iph->ihl * sizeof(unsigned long));
956 end = offset + ntohs(iph->tot_len) - ihl;
957
958 /*
959 * Point into the IP datagram 'data' part.
960 */
961
962 ptr = skb->data + dev->hard_header_len + ihl;
963
964 /*
965 * Is this the final fragment?
966 */
967
968 if ((flags & IP_MF) == 0)
969 qp->len = end;
970
971 /*
972 * Find out which fragments are in front and at the back of us
973 * in the chain of fragments so far. We must know where to put
974 * this fragment, right?
975 */
976
977 prev = NULL;
978 for(next = qp->fragments; next != NULL; next = next->next)
979 {
980 if (next->offset > offset)
981 break; /* bingo! */
982 prev = next;
983 }
984
985 /*
986 * We found where to put this one.
987 * Check for overlap with preceding fragment, and, if needed,
988 * align things so that any overlaps are eliminated.
989 */
990 if (prev != NULL && offset < prev->end)
991 {
992 i = prev->end - offset;
993 offset += i; /* ptr into datagram */
994 ptr += i; /* ptr into fragment data */
995 }
996
997 /*
998 * Look for overlap with succeeding segments.
999 * If we can merge fragments, do it.
1000 */
1001
1002 for(; next != NULL; next = tfp)
1003 {
1004 tfp = next->next;
1005 if (next->offset >= end)
1006 break; /* no overlaps at all */
1007
1008 i = end - next->offset; /* overlap is 'i' bytes */
1009 next->len -= i; /* so reduce size of */
1010 next->offset += i; /* next fragment */
1011 next->ptr += i;
1012
1013 /*
1014 * If we get a frag size of <= 0, remove it and the packet
1015 * that it goes with.
1016 */
1017 if (next->len <= 0)
1018 {
1019 if (next->prev != NULL)
1020 next->prev->next = next->next;
1021 else
1022 qp->fragments = next->next;
1023
1024 if (tfp->next != NULL)
1025 next->next->prev = next->prev;
1026
1027 kfree_skb(next->skb,FREE_READ);
1028 kfree_s(next, sizeof(struct ipfrag));
1029 }
1030 }
1031
1032 /*
1033 * Insert this fragment in the chain of fragments.
1034 */
1035
1036 tfp = NULL;
1037 tfp = ip_frag_create(offset, end, skb, ptr);
1038
1039 /*
1040 * No memory to save the fragment - so throw the lot
1041 */
1042
1043 if (!tfp)
1044 {
1045 skb->sk = NULL;
1046 kfree_skb(skb, FREE_READ);
1047 return NULL;
1048 }
1049 tfp->prev = prev;
1050 tfp->next = next;
1051 if (prev != NULL)
1052 prev->next = tfp;
1053 else
1054 qp->fragments = tfp;
1055
1056 if (next != NULL)
1057 next->prev = tfp;
1058
1059 /*
1060 * OK, so we inserted this new fragment into the chain.
1061 * Check if we now have a full IP datagram which we can
1062 * bump up to the IP layer...
1063 */
1064
1065 if (ip_done(qp))
1066 {
1067 skb2 = ip_glue(qp); /* glue together the fragments */
1068 return(skb2);
1069 }
1070 return(NULL);
1071 }
1072
1073
1074 /*
1075 * This IP datagram is too large to be sent in one piece. Break it up into
1076 * smaller pieces (each of size equal to the MAC header plus IP header plus
1077 * a block of the data of the original IP data part) that will yet fit in a
1078 * single device frame, and queue such a frame for sending by calling the
1079 * ip_queue_xmit(). Note that this is recursion, and bad things will happen
1080 * if this function causes a loop...
1081 *
1082 * Yes this is inefficient, feel free to submit a quicker one.
1083 *
1084 * **Protocol Violation**
1085 * We copy all the options to each fragment. !FIXME!
1086 */
1087 void ip_fragment(struct sock *sk, struct sk_buff *skb, struct device *dev, int is_frag)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1088 {
1089 struct iphdr *iph;
1090 unsigned char *raw;
1091 unsigned char *ptr;
1092 struct sk_buff *skb2;
1093 int left, mtu, hlen, len;
1094 int offset;
1095 unsigned long flags;
1096
1097 /*
1098 * Point into the IP datagram header.
1099 */
1100
1101 raw = skb->data;
1102 iph = (struct iphdr *) (raw + dev->hard_header_len);
1103
1104 skb->ip_hdr = iph;
1105
1106 /*
1107 * Setup starting values.
1108 */
1109
1110 hlen = (iph->ihl * sizeof(unsigned long));
1111 left = ntohs(iph->tot_len) - hlen; /* Space per frame */
1112 hlen += dev->hard_header_len; /* Total header size */
1113 mtu = (dev->mtu - hlen); /* Size of data space */
1114 ptr = (raw + hlen); /* Where to start from */
1115
1116 /*
1117 * Check for any "DF" flag. [DF means do not fragment]
1118 */
1119
1120 if (ntohs(iph->frag_off) & IP_DF)
1121 {
1122 /*
1123 * Reply giving the MTU of the failed hop.
1124 */
1125 ip_statistics.IpFragFails++;
1126 icmp_send(skb,ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, dev->mtu, dev);
1127 return;
1128 }
1129
1130 /*
1131 * The protocol doesn't seem to say what to do in the case that the
1132 * frame + options doesn't fit the mtu. As it used to fall down dead
1133 * in this case we were fortunate it didn't happen
1134 */
1135
1136 if(mtu<8)
1137 {
1138 /* It's wrong but it's better than nothing */
1139 icmp_send(skb,ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED,dev->mtu, dev);
1140 ip_statistics.IpFragFails++;
1141 return;
1142 }
1143
1144 /*
1145 * Fragment the datagram.
1146 */
1147
1148 /*
1149 * The initial offset is 0 for a complete frame. When
1150 * fragmenting fragments it's wherever this one starts.
1151 */
1152
1153 if (is_frag & 2)
1154 offset = (ntohs(iph->frag_off) & 0x1fff) << 3;
1155 else
1156 offset = 0;
1157
1158
1159 /*
1160 * Keep copying data until we run out.
1161 */
1162
1163 while(left > 0)
1164 {
1165 len = left;
1166 /* IF: it doesn't fit, use 'mtu' - the data space left */
1167 if (len > mtu)
1168 len = mtu;
1169 /* IF: we are not sending upto and including the packet end
1170 then align the next start on an eight byte boundary */
1171 if (len < left)
1172 {
1173 len/=8;
1174 len*=8;
1175 }
1176 /*
1177 * Allocate buffer.
1178 */
1179
1180 if ((skb2 = alloc_skb(len + hlen,GFP_ATOMIC)) == NULL)
1181 {
1182 printk("IP: frag: no memory for new fragment!\n");
1183 ip_statistics.IpFragFails++;
1184 return;
1185 }
1186
1187 /*
1188 * Set up data on packet
1189 */
1190
1191 skb2->arp = skb->arp;
1192 if(skb->free==0)
1193 printk("IP fragmenter: BUG free!=1 in fragmenter\n");
1194 skb2->free = 1;
1195 skb2->len = len + hlen;
1196 skb2->h.raw=(char *) skb2->data;
1197 /*
1198 * Charge the memory for the fragment to any owner
1199 * it might possess
1200 */
1201
1202 save_flags(flags);
1203 if (sk)
1204 {
1205 cli();
1206 sk->wmem_alloc += skb2->mem_len;
1207 skb2->sk=sk;
1208 }
1209 restore_flags(flags);
1210 skb2->raddr = skb->raddr; /* For rebuild_header - must be here */
1211
1212 /*
1213 * Copy the packet header into the new buffer.
1214 */
1215
1216 memcpy(skb2->h.raw, raw, hlen);
1217
1218 /*
1219 * Copy a block of the IP datagram.
1220 */
1221 memcpy(skb2->h.raw + hlen, ptr, len);
1222 left -= len;
1223
1224 skb2->h.raw+=dev->hard_header_len;
1225
1226 /*
1227 * Fill in the new header fields.
1228 */
1229 iph = (struct iphdr *)(skb2->h.raw/*+dev->hard_header_len*/);
1230 iph->frag_off = htons((offset >> 3));
1231 /*
1232 * Added AC : If we are fragmenting a fragment thats not the
1233 * last fragment then keep MF on each bit
1234 */
1235 if (left > 0 || (is_frag & 1))
1236 iph->frag_off |= htons(IP_MF);
1237 ptr += len;
1238 offset += len;
1239
1240 /*
1241 * Put this fragment into the sending queue.
1242 */
1243
1244 ip_statistics.IpFragCreates++;
1245
1246 ip_queue_xmit(sk, dev, skb2, 2);
1247 }
1248 ip_statistics.IpFragOKs++;
1249 }
1250
1251
1252
1253 #ifdef CONFIG_IP_FORWARD
1254
1255 /*
1256 * Forward an IP datagram to its next destination.
1257 */
1258
1259 static void ip_forward(struct sk_buff *skb, struct device *dev, int is_frag)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1260 {
1261 struct device *dev2; /* Output device */
1262 struct iphdr *iph; /* Our header */
1263 struct sk_buff *skb2; /* Output packet */
1264 struct rtable *rt; /* Route we use */
1265 unsigned char *ptr; /* Data pointer */
1266 unsigned long raddr; /* Router IP address */
1267
1268 /*
1269 * See if we are allowed to forward this.
1270 */
1271
1272 #ifdef CONFIG_IP_FIREWALL
1273 int err;
1274
1275 if((err=ip_fw_chk(skb->h.iph, dev, ip_fw_fwd_chain, ip_fw_fwd_policy, 0))!=1)
1276 {
1277 if(err==-1)
1278 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev);
1279 return;
1280 }
1281 #endif
1282 /*
1283 * According to the RFC, we must first decrease the TTL field. If
1284 * that reaches zero, we must reply an ICMP control message telling
1285 * that the packet's lifetime expired.
1286 *
1287 * Exception:
1288 * We may not generate an ICMP for an ICMP. icmp_send does the
1289 * enforcement of this so we can forget it here. It is however
1290 * sometimes VERY important.
1291 */
1292
1293 iph = skb->h.iph;
1294 iph->ttl--;
1295 if (iph->ttl <= 0)
1296 {
1297 /* Tell the sender its packet died... */
1298 icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0, dev);
1299 return;
1300 }
1301
1302 /*
1303 * Re-compute the IP header checksum.
1304 * This is inefficient. We know what has happened to the header
1305 * and could thus adjust the checksum as Phil Karn does in KA9Q
1306 */
1307
1308 ip_send_check(iph);
1309
1310 /*
1311 * OK, the packet is still valid. Fetch its destination address,
1312 * and give it to the IP sender for further processing.
1313 */
1314
1315 rt = ip_rt_route(iph->daddr, NULL, NULL);
1316 if (rt == NULL)
1317 {
1318 /*
1319 * Tell the sender its packet cannot be delivered. Again
1320 * ICMP is screened later.
1321 */
1322 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_UNREACH, 0, dev);
1323 return;
1324 }
1325
1326
1327 /*
1328 * Gosh. Not only is the packet valid; we even know how to
1329 * forward it onto its final destination. Can we say this
1330 * is being plain lucky?
1331 * If the router told us that there is no GW, use the dest.
1332 * IP address itself- we seem to be connected directly...
1333 */
1334
1335 raddr = rt->rt_gateway;
1336
1337 if (raddr != 0)
1338 {
1339 /*
1340 * There is a gateway so find the correct route for it.
1341 * Gateways cannot in turn be gatewayed.
1342 */
1343 rt = ip_rt_route(raddr, NULL, NULL);
1344 if (rt == NULL)
1345 {
1346 /*
1347 * Tell the sender its packet cannot be delivered...
1348 */
1349 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev);
1350 return;
1351 }
1352 if (rt->rt_gateway != 0)
1353 raddr = rt->rt_gateway;
1354 }
1355 else
1356 raddr = iph->daddr;
1357
1358 /*
1359 * Having picked a route we can now send the frame out.
1360 */
1361
1362 dev2 = rt->rt_dev;
1363
1364 /*
1365 * In IP you never have to forward a frame on the interface that it
1366 * arrived upon. We now generate an ICMP HOST REDIRECT giving the route
1367 * we calculated.
1368 */
1369 #ifdef CONFIG_IP_NO_ICMP_REDIRECT
1370 if (dev == dev2)
1371 return;
1372 #else
1373 if (dev == dev2 && (iph->saddr&dev->pa_mask) == (iph->daddr & dev->pa_mask))
1374 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, raddr, dev);
1375 #endif
1376
1377 /*
1378 * We now allocate a new buffer, and copy the datagram into it.
1379 * If the indicated interface is up and running, kick it.
1380 */
1381
1382 if (dev2->flags & IFF_UP)
1383 {
1384
1385 /*
1386 * Current design decrees we copy the packet. For identical header
1387 * lengths we could avoid it. The new skb code will let us push
1388 * data so the problem goes away then.
1389 */
1390
1391 skb2 = alloc_skb(dev2->hard_header_len + skb->len, GFP_ATOMIC);
1392 /*
1393 * This is rare and since IP is tolerant of network failures
1394 * quite harmless.
1395 */
1396 if (skb2 == NULL)
1397 {
1398 printk("\nIP: No memory available for IP forward\n");
1399 return;
1400 }
1401 ptr = skb2->data;
1402 skb2->free = 1;
1403 skb2->len = skb->len + dev2->hard_header_len;
1404 skb2->h.raw = ptr;
1405
1406 /*
1407 * Copy the packet data into the new buffer.
1408 */
1409 memcpy(ptr + dev2->hard_header_len, skb->h.raw, skb->len);
1410
1411 /* Now build the MAC header. */
1412 (void) ip_send(skb2, raddr, skb->len, dev2, dev2->pa_addr);
1413
1414 ip_statistics.IpForwDatagrams++;
1415
1416 /*
1417 * See if it needs fragmenting. Note in ip_rcv we tagged
1418 * the fragment type. This must be right so that
1419 * the fragmenter does the right thing.
1420 */
1421
1422 if(skb2->len > dev2->mtu + dev2->hard_header_len)
1423 {
1424 ip_fragment(NULL,skb2,dev2, is_frag);
1425 kfree_skb(skb2,FREE_WRITE);
1426 }
1427 else
1428 {
1429 #ifdef CONFIG_IP_ACCT
1430 /*
1431 * Count mapping we shortcut
1432 */
1433
1434 ip_acct_cnt(iph,dev,ip_acct_chain);
1435 #endif
1436
1437 /*
1438 * Map service types to priority. We lie about
1439 * throughput being low priority, but it's a good
1440 * choice to help improve general usage.
1441 */
1442 if(iph->tos & IPTOS_LOWDELAY)
1443 dev_queue_xmit(skb2, dev2, SOPRI_INTERACTIVE);
1444 else if(iph->tos & IPTOS_THROUGHPUT)
1445 dev_queue_xmit(skb2, dev2, SOPRI_BACKGROUND);
1446 else
1447 dev_queue_xmit(skb2, dev2, SOPRI_NORMAL);
1448 }
1449 }
1450 }
1451
1452
1453 #endif
1454
1455 /*
1456 * This function receives all incoming IP datagrams.
1457 */
1458
1459 int ip_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1460 {
1461 struct iphdr *iph = skb->h.iph;
1462 struct sock *raw_sk=NULL;
1463 unsigned char hash;
1464 unsigned char flag = 0;
1465 unsigned char opts_p = 0; /* Set iff the packet has options. */
1466 struct inet_protocol *ipprot;
1467 static struct options opt; /* since we don't use these yet, and they
1468 take up stack space. */
1469 int brd=IS_MYADDR;
1470 int is_frag=0;
1471 #ifdef CONFIG_IP_FIREWALL
1472 int err;
1473 #endif
1474
1475 ip_statistics.IpInReceives++;
1476
1477 /*
1478 * Tag the ip header of this packet so we can find it
1479 */
1480
1481 skb->ip_hdr = iph;
1482
1483 /*
1484 * Is the datagram acceptable?
1485 *
1486 * 1. Length at least the size of an ip header
1487 * 2. Version of 4
1488 * 3. Checksums correctly. [Speed optimisation for later, skip loopback checksums]
1489 * (4. We ought to check for IP multicast addresses and undefined types.. does this matter ?)
1490 */
1491
1492 if (skb->len<sizeof(struct iphdr) || iph->ihl<5 || iph->version != 4 ||
1493 skb->len<ntohs(iph->tot_len) || ip_fast_csum((unsigned char *)iph, iph->ihl) !=0)
1494 {
1495 ip_statistics.IpInHdrErrors++;
1496 kfree_skb(skb, FREE_WRITE);
1497 return(0);
1498 }
1499
1500 /*
1501 * See if the firewall wants to dispose of the packet.
1502 */
1503
1504 #ifdef CONFIG_IP_FIREWALL
1505
1506 if ((err=ip_fw_chk(iph,dev,ip_fw_blk_chain,ip_fw_blk_policy, 0))!=1)
1507 {
1508 if(err==-1)
1509 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0, dev);
1510 kfree_skb(skb, FREE_WRITE);
1511 return 0;
1512 }
1513
1514 #endif
1515
1516 /*
1517 * Our transport medium may have padded the buffer out. Now we know it
1518 * is IP we can trim to the true length of the frame.
1519 */
1520
1521 skb->len=ntohs(iph->tot_len);
1522
1523 /*
1524 * Next analyse the packet for options. Studies show under one packet in
1525 * a thousand have options....
1526 */
1527
1528 if (iph->ihl != 5)
1529 { /* Fast path for the typical optionless IP packet. */
1530 memset((char *) &opt, 0, sizeof(opt));
1531 if (do_options(iph, &opt) != 0)
1532 return 0;
1533 opts_p = 1;
1534 }
1535
1536 /*
1537 * Remember if the frame is fragmented.
1538 */
1539
1540 if(iph->frag_off)
1541 {
1542 if (iph->frag_off & 0x0020)
1543 is_frag|=1;
1544 /*
1545 * Last fragment ?
1546 */
1547
1548 if (ntohs(iph->frag_off) & 0x1fff)
1549 is_frag|=2;
1550 }
1551
1552 /*
1553 * Do any IP forwarding required. chk_addr() is expensive -- avoid it someday.
1554 *
1555 * This is inefficient. While finding out if it is for us we could also compute
1556 * the routing table entry. This is where the great unified cache theory comes
1557 * in as and when someone implements it
1558 *
1559 * For most hosts over 99% of packets match the first conditional
1560 * and don't go via ip_chk_addr. Note: brd is set to IS_MYADDR at
1561 * function entry.
1562 */
1563
1564 if ( iph->daddr != skb->dev->pa_addr && (brd = ip_chk_addr(iph->daddr)) == 0)
1565 {
1566 /*
1567 * Don't forward multicast or broadcast frames.
1568 */
1569
1570 if(skb->pkt_type!=PACKET_HOST || brd==IS_BROADCAST)
1571 {
1572 kfree_skb(skb,FREE_WRITE);
1573 return 0;
1574 }
1575
1576 /*
1577 * The packet is for another target. Forward the frame
1578 */
1579
1580 #ifdef CONFIG_IP_FORWARD
1581 ip_forward(skb, dev, is_frag);
1582 #else
1583 /* printk("Machine %lx tried to use us as a forwarder to %lx but we have forwarding disabled!\n",
1584 iph->saddr,iph->daddr);*/
1585 ip_statistics.IpInAddrErrors++;
1586 #endif
1587 /*
1588 * The forwarder is inefficient and copies the packet. We
1589 * free the original now.
1590 */
1591
1592 kfree_skb(skb, FREE_WRITE);
1593 return(0);
1594 }
1595
1596 #ifdef CONFIG_IP_MULTICAST
1597
1598 if(brd==IS_MULTICAST && iph->daddr!=IGMP_ALL_HOSTS && !(dev->flags&IFF_LOOPBACK))
1599 {
1600 /*
1601 * Check it is for one of our groups
1602 */
1603 struct ip_mc_list *ip_mc=dev->ip_mc_list;
1604 do
1605 {
1606 if(ip_mc==NULL)
1607 {
1608 kfree_skb(skb, FREE_WRITE);
1609 return 0;
1610 }
1611 if(ip_mc->multiaddr==iph->daddr)
1612 break;
1613 ip_mc=ip_mc->next;
1614 }
1615 while(1);
1616 }
1617 #endif
1618 /*
1619 * Account for the packet
1620 */
1621
1622 #ifdef CONFIG_IP_ACCT
1623 ip_acct_cnt(iph,dev, ip_acct_chain);
1624 #endif
1625
1626 /*
1627 * Reassemble IP fragments.
1628 */
1629
1630 if(is_frag)
1631 {
1632 /* Defragment. Obtain the complete packet if there is one */
1633 skb=ip_defrag(iph,skb,dev);
1634 if(skb==NULL)
1635 return 0;
1636 skb->dev = dev;
1637 iph=skb->h.iph;
1638 }
1639
1640
1641
1642 /*
1643 * Point into the IP datagram, just past the header.
1644 */
1645
1646 skb->ip_hdr = iph;
1647 skb->h.raw += iph->ihl*4;
1648
1649 /*
1650 * Deliver to raw sockets. This is fun as to avoid copies we want to make no surplus copies.
1651 */
1652
1653 hash = iph->protocol & (SOCK_ARRAY_SIZE-1);
1654
1655 /* If there maybe a raw socket we must check - if not we don't care less */
1656 if((raw_sk=raw_prot.sock_array[hash])!=NULL)
1657 {
1658 struct sock *sknext=NULL;
1659 struct sk_buff *skb1;
1660 raw_sk=get_sock_raw(raw_sk, hash, iph->saddr, iph->daddr);
1661 if(raw_sk) /* Any raw sockets */
1662 {
1663 do
1664 {
1665 /* Find the next */
1666 sknext=get_sock_raw(raw_sk->next, hash, iph->saddr, iph->daddr);
1667 if(sknext)
1668 skb1=skb_clone(skb, GFP_ATOMIC);
1669 else
1670 break; /* One pending raw socket left */
1671 if(skb1)
1672 raw_rcv(raw_sk, skb1, dev, iph->saddr,iph->daddr);
1673 raw_sk=sknext;
1674 }
1675 while(raw_sk!=NULL);
1676 /* Here either raw_sk is the last raw socket, or NULL if none */
1677 /* We deliver to the last raw socket AFTER the protocol checks as it avoids a surplus copy */
1678 }
1679 }
1680
1681 /*
1682 * skb->h.raw now points at the protocol beyond the IP header.
1683 */
1684
1685 hash = iph->protocol & (MAX_INET_PROTOS -1);
1686 for (ipprot = (struct inet_protocol *)inet_protos[hash];ipprot != NULL;ipprot=(struct inet_protocol *)ipprot->next)
1687 {
1688 struct sk_buff *skb2;
1689
1690 if (ipprot->protocol != iph->protocol)
1691 continue;
1692 /*
1693 * See if we need to make a copy of it. This will
1694 * only be set if more than one protocol wants it.
1695 * and then not for the last one. If there is a pending
1696 * raw delivery wait for that
1697 */
1698 if (ipprot->copy || raw_sk)
1699 {
1700 skb2 = skb_clone(skb, GFP_ATOMIC);
1701 if(skb2==NULL)
1702 continue;
1703 }
1704 else
1705 {
1706 skb2 = skb;
1707 }
1708 flag = 1;
1709
1710 /*
1711 * Pass on the datagram to each protocol that wants it,
1712 * based on the datagram protocol. We should really
1713 * check the protocol handler's return values here...
1714 */
1715 ipprot->handler(skb2, dev, opts_p ? &opt : 0, iph->daddr,
1716 (ntohs(iph->tot_len) - (iph->ihl * 4)),
1717 iph->saddr, 0, ipprot);
1718
1719 }
1720
1721 /*
1722 * All protocols checked.
1723 * If this packet was a broadcast, we may *not* reply to it, since that
1724 * causes (proven, grin) ARP storms and a leakage of memory (i.e. all
1725 * ICMP reply messages get queued up for transmission...)
1726 */
1727
1728 if(raw_sk!=NULL) /* Shift to last raw user */
1729 raw_rcv(raw_sk, skb, dev, iph->saddr, iph->daddr);
1730 else if (!flag) /* Free and report errors */
1731 {
1732 if (brd != IS_BROADCAST && brd!=IS_MULTICAST)
1733 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, 0, dev);
1734 kfree_skb(skb, FREE_WRITE);
1735 }
1736
1737 return(0);
1738 }
1739
1740 /*
1741 * Loop a packet back to the sender.
1742 */
1743
1744 static void ip_loopback(struct device *old_dev, struct sk_buff *skb)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1745 {
1746 extern struct device loopback_dev;
1747 struct device *dev=&loopback_dev;
1748 int len=skb->len-old_dev->hard_header_len;
1749 struct sk_buff *newskb=alloc_skb(len+dev->hard_header_len, GFP_ATOMIC);
1750
1751 if(newskb==NULL)
1752 return;
1753
1754 newskb->link3=NULL;
1755 newskb->sk=NULL;
1756 newskb->dev=dev;
1757 newskb->saddr=skb->saddr;
1758 newskb->daddr=skb->daddr;
1759 newskb->raddr=skb->raddr;
1760 newskb->free=1;
1761 newskb->lock=0;
1762 newskb->users=0;
1763 newskb->pkt_type=skb->pkt_type;
1764 newskb->len=len+dev->hard_header_len;
1765
1766
1767 newskb->ip_hdr=(struct iphdr *)(newskb->data+ip_send(newskb, skb->ip_hdr->daddr, len, dev, skb->ip_hdr->saddr));
1768 memcpy(newskb->ip_hdr,skb->ip_hdr,len);
1769
1770 /* Recurse. The device check against IFF_LOOPBACK will stop infinite recursion */
1771
1772 /*printk("Loopback output queued [%lX to %lX].\n", newskb->ip_hdr->saddr,newskb->ip_hdr->daddr);*/
1773 ip_queue_xmit(NULL, dev, newskb, 1);
1774 }
1775
1776
1777 /*
1778 * Queues a packet to be sent, and starts the transmitter
1779 * if necessary. if free = 1 then we free the block after
1780 * transmit, otherwise we don't. If free==2 we not only
1781 * free the block but also don't assign a new ip seq number.
1782 * This routine also needs to put in the total length,
1783 * and compute the checksum
1784 */
1785
1786 void ip_queue_xmit(struct sock *sk, struct device *dev,
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1787 struct sk_buff *skb, int free)
1788 {
1789 struct iphdr *iph;
1790 unsigned char *ptr;
1791
1792 /* Sanity check */
1793 if (dev == NULL)
1794 {
1795 printk("IP: ip_queue_xmit dev = NULL\n");
1796 return;
1797 }
1798
1799 IS_SKB(skb);
1800
1801 /*
1802 * Do some book-keeping in the packet for later
1803 */
1804
1805
1806 skb->dev = dev;
1807 skb->when = jiffies;
1808
1809 /*
1810 * Find the IP header and set the length. This is bad
1811 * but once we get the skb data handling code in the
1812 * hardware will push its header sensibly and we will
1813 * set skb->ip_hdr to avoid this mess and the fixed
1814 * header length problem
1815 */
1816
1817 ptr = skb->data;
1818 ptr += dev->hard_header_len;
1819 iph = (struct iphdr *)ptr;
1820 skb->ip_hdr = iph;
1821 iph->tot_len = ntohs(skb->len-dev->hard_header_len);
1822
1823 #ifdef CONFIG_IP_FIREWALL
1824 if(ip_fw_chk(iph, dev, ip_fw_blk_chain, ip_fw_blk_policy, 0) != 1)
1825 /* just don't send this packet */
1826 return;
1827 #endif
1828
1829 /*
1830 * No reassigning numbers to fragments...
1831 */
1832
1833 if(free!=2)
1834 iph->id = htons(ip_id_count++);
1835 else
1836 free=1;
1837
1838 /* All buffers without an owner socket get freed */
1839 if (sk == NULL)
1840 free = 1;
1841
1842 skb->free = free;
1843
1844 /*
1845 * Do we need to fragment. Again this is inefficient.
1846 * We need to somehow lock the original buffer and use
1847 * bits of it.
1848 */
1849
1850 if(skb->len > dev->mtu + dev->hard_header_len)
1851 {
1852 ip_fragment(sk,skb,dev,0);
1853 IS_SKB(skb);
1854 kfree_skb(skb,FREE_WRITE);
1855 return;
1856 }
1857
1858 /*
1859 * Add an IP checksum
1860 */
1861
1862 ip_send_check(iph);
1863
1864 /*
1865 * Print the frame when debugging
1866 */
1867
1868 /*
1869 * More debugging. You cannot queue a packet already on a list
1870 * Spot this and moan loudly.
1871 */
1872 if (skb->next != NULL)
1873 {
1874 printk("ip_queue_xmit: next != NULL\n");
1875 skb_unlink(skb);
1876 }
1877
1878 /*
1879 * If a sender wishes the packet to remain unfreed
1880 * we add it to his send queue. This arguably belongs
1881 * in the TCP level since nobody else uses it. BUT
1882 * remember IPng might change all the rules.
1883 */
1884
1885 if (!free)
1886 {
1887 unsigned long flags;
1888 /* The socket now has more outstanding blocks */
1889
1890 sk->packets_out++;
1891
1892 /* Protect the list for a moment */
1893 save_flags(flags);
1894 cli();
1895
1896 if (skb->link3 != NULL)
1897 {
1898 printk("ip.c: link3 != NULL\n");
1899 skb->link3 = NULL;
1900 }
1901 if (sk->send_head == NULL)
1902 {
1903 sk->send_tail = skb;
1904 sk->send_head = skb;
1905 }
1906 else
1907 {
1908 sk->send_tail->link3 = skb;
1909 sk->send_tail = skb;
1910 }
1911 /* skb->link3 is NULL */
1912
1913 /* Interrupt restore */
1914 restore_flags(flags);
1915 }
1916 else
1917 /* Remember who owns the buffer */
1918 skb->sk = sk;
1919
1920 /*
1921 * If the indicated interface is up and running, send the packet.
1922 */
1923
1924 ip_statistics.IpOutRequests++;
1925 #ifdef CONFIG_IP_ACCT
1926 ip_acct_cnt(iph,dev, ip_acct_chain);
1927 #endif
1928
1929 #ifdef CONFIG_IP_MULTICAST
1930
1931 /*
1932 * Multicasts are looped back for other local users
1933 */
1934
1935 if (MULTICAST(iph->daddr) && !(dev->flags&IFF_LOOPBACK))
1936 {
1937 if(sk==NULL || sk->ip_mc_loop)
1938 {
1939 if(iph->daddr==IGMP_ALL_HOSTS)
1940 ip_loopback(dev,skb);
1941 else
1942 {
1943 struct ip_mc_list *imc=dev->ip_mc_list;
1944 while(imc!=NULL)
1945 {
1946 if(imc->multiaddr==iph->daddr)
1947 {
1948 ip_loopback(dev,skb);
1949 break;
1950 }
1951 imc=imc->next;
1952 }
1953 }
1954 }
1955 /* Multicasts with ttl 0 must not go beyond the host */
1956
1957 if(skb->ip_hdr->ttl==0)
1958 {
1959 kfree_skb(skb, FREE_READ);
1960 return;
1961 }
1962 }
1963 #endif
1964 if((dev->flags&IFF_BROADCAST) && iph->daddr==dev->pa_brdaddr && !(dev->flags&IFF_LOOPBACK))
1965 ip_loopback(dev,skb);
1966
1967 if (dev->flags & IFF_UP)
1968 {
1969 /*
1970 * If we have an owner use its priority setting,
1971 * otherwise use NORMAL
1972 */
1973
1974 if (sk != NULL)
1975 {
1976 dev_queue_xmit(skb, dev, sk->priority);
1977 }
1978 else
1979 {
1980 dev_queue_xmit(skb, dev, SOPRI_NORMAL);
1981 }
1982 }
1983 else
1984 {
1985 ip_statistics.IpOutDiscards++;
1986 if (free)
1987 kfree_skb(skb, FREE_WRITE);
1988 }
1989 }
1990
1991
1992
1993 #ifdef CONFIG_IP_MULTICAST
1994
1995 /*
1996 * Write an multicast group list table for the IGMP daemon to
1997 * read.
1998 */
1999
2000 int ip_mc_procinfo(char *buffer, char **start, off_t offset, int length)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
2001 {
2002 off_t pos=0, begin=0;
2003 struct ip_mc_list *im;
2004 unsigned long flags;
2005 int len=0;
2006 struct device *dev;
2007
2008 len=sprintf(buffer,"Device : Count\tGroup Users Timer\n");
2009 save_flags(flags);
2010 cli();
2011
2012 for(dev = dev_base; dev; dev = dev->next)
2013 {
2014 if((dev->flags&IFF_UP)&&(dev->flags&IFF_MULTICAST))
2015 {
2016 len+=sprintf(buffer+len,"%-10s: %5d\n",
2017 dev->name, dev->mc_count);
2018 for(im = dev->ip_mc_list; im; im = im->next)
2019 {
2020 len+=sprintf(buffer+len,
2021 "\t\t\t%08lX %5d %d:%08lX\n",
2022 im->multiaddr, im->users,
2023 im->tm_running, im->timer.expires);
2024 pos=begin+len;
2025 if(pos<offset)
2026 {
2027 len=0;
2028 begin=pos;
2029 }
2030 if(pos>offset+length)
2031 break;
2032 }
2033 }
2034 }
2035 restore_flags(flags);
2036 *start=buffer+(offset-begin);
2037 len-=(offset-begin);
2038 if(len>length)
2039 len=length;
2040 return len;
2041 }
2042
2043
2044 #endif
2045 /*
2046 * Socket option code for IP. This is the end of the line after any TCP,UDP etc options on
2047 * an IP socket.
2048 *
2049 * We implement IP_TOS (type of service), IP_TTL (time to live).
2050 *
2051 * Next release we will sort out IP_OPTIONS since for some people are kind of important.
2052 */
2053
2054 int ip_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
2055 {
2056 int val,err;
2057 #if defined(CONFIG_IP_FIREWALL) || defined(CONFIG_IP_ACCT)
2058 struct ip_fw tmp_fw;
2059 #endif
2060 if (optval == NULL)
2061 return(-EINVAL);
2062
2063 err=verify_area(VERIFY_READ, optval, sizeof(int));
2064 if(err)
2065 return err;
2066
2067 val = get_fs_long((unsigned long *)optval);
2068
2069 if(level!=SOL_IP)
2070 return -EOPNOTSUPP;
2071
2072 switch(optname)
2073 {
2074 case IP_TOS:
2075 if(val<0||val>255)
2076 return -EINVAL;
2077 sk->ip_tos=val;
2078 if(val==IPTOS_LOWDELAY)
2079 sk->priority=SOPRI_INTERACTIVE;
2080 if(val==IPTOS_THROUGHPUT)
2081 sk->priority=SOPRI_BACKGROUND;
2082 return 0;
2083 case IP_TTL:
2084 if(val<1||val>255)
2085 return -EINVAL;
2086 sk->ip_ttl=val;
2087 return 0;
2088 #ifdef CONFIG_IP_MULTICAST
2089 case IP_MULTICAST_TTL:
2090 {
2091 unsigned char ucval;
2092
2093 ucval=get_fs_byte((unsigned char *)optval);
2094 sk->ip_mc_ttl=(int)ucval;
2095 return 0;
2096 }
2097 case IP_MULTICAST_LOOP:
2098 {
2099 unsigned char ucval;
2100
2101 ucval=get_fs_byte((unsigned char *)optval);
2102 if(ucval!=0 && ucval!=1)
2103 return -EINVAL;
2104 sk->ip_mc_loop=(int)ucval;
2105 return 0;
2106 }
2107 case IP_MULTICAST_IF:
2108 {
2109 /* Not fully tested */
2110 struct in_addr addr;
2111 struct device *dev=NULL;
2112
2113 /*
2114 * Check the arguments are allowable
2115 */
2116
2117 err=verify_area(VERIFY_READ, optval, sizeof(addr));
2118 if(err)
2119 return err;
2120
2121 memcpy_fromfs(&addr,optval,sizeof(addr));
2122
2123 printk("MC bind %s\n", in_ntoa(addr.s_addr));
2124
2125 /*
2126 * What address has been requested
2127 */
2128
2129 if(addr.s_addr==INADDR_ANY) /* Default */
2130 {
2131 sk->ip_mc_name[0]=0;
2132 return 0;
2133 }
2134
2135 /*
2136 * Find the device
2137 */
2138
2139 for(dev = dev_base; dev; dev = dev->next)
2140 {
2141 if((dev->flags&IFF_UP)&&(dev->flags&IFF_MULTICAST)&&
2142 (dev->pa_addr==addr.s_addr))
2143 break;
2144 }
2145
2146 /*
2147 * Did we find one
2148 */
2149
2150 if(dev)
2151 {
2152 strcpy(sk->ip_mc_name,dev->name);
2153 return 0;
2154 }
2155 return -EADDRNOTAVAIL;
2156 }
2157
2158 case IP_ADD_MEMBERSHIP:
2159 {
2160
2161 /*
2162 * FIXME: Add/Del membership should have a semaphore protecting them from re-entry
2163 */
2164 struct ip_mreq mreq;
2165 static struct options optmem;
2166 unsigned long route_src;
2167 struct rtable *rt;
2168 struct device *dev=NULL;
2169
2170 /*
2171 * Check the arguments.
2172 */
2173
2174 err=verify_area(VERIFY_READ, optval, sizeof(mreq));
2175 if(err)
2176 return err;
2177
2178 memcpy_fromfs(&mreq,optval,sizeof(mreq));
2179
2180 /*
2181 * Get device for use later
2182 */
2183
2184 if(mreq.imr_interface.s_addr==INADDR_ANY)
2185 {
2186 /*
2187 * Not set so scan.
2188 */
2189 if((rt=ip_rt_route(mreq.imr_multiaddr.s_addr,&optmem, &route_src))!=NULL)
2190 {
2191 dev=rt->rt_dev;
2192 rt->rt_use--;
2193 }
2194 }
2195 else
2196 {
2197 /*
2198 * Find a suitable device.
2199 */
2200 for(dev = dev_base; dev; dev = dev->next)
2201 {
2202 if((dev->flags&IFF_UP)&&(dev->flags&IFF_MULTICAST)&&
2203 (dev->pa_addr==mreq.imr_interface.s_addr))
2204 break;
2205 }
2206 }
2207
2208 /*
2209 * No device, no cookies.
2210 */
2211
2212 if(!dev)
2213 return -ENODEV;
2214
2215 /*
2216 * Join group.
2217 */
2218
2219 return ip_mc_join_group(sk,dev,mreq.imr_multiaddr.s_addr);
2220 }
2221
2222 case IP_DROP_MEMBERSHIP:
2223 {
2224 struct ip_mreq mreq;
2225 struct rtable *rt;
2226 static struct options optmem;
2227 unsigned long route_src;
2228 struct device *dev=NULL;
2229
2230 /*
2231 * Check the arguments
2232 */
2233
2234 err=verify_area(VERIFY_READ, optval, sizeof(mreq));
2235 if(err)
2236 return err;
2237
2238 memcpy_fromfs(&mreq,optval,sizeof(mreq));
2239
2240 /*
2241 * Get device for use later
2242 */
2243
2244 if(mreq.imr_interface.s_addr==INADDR_ANY)
2245 {
2246 if((rt=ip_rt_route(mreq.imr_multiaddr.s_addr,&optmem, &route_src))!=NULL)
2247 {
2248 dev=rt->rt_dev;
2249 rt->rt_use--;
2250 }
2251 }
2252 else
2253 {
2254 for(dev = dev_base; dev; dev = dev->next)
2255 {
2256 if((dev->flags&IFF_UP)&& (dev->flags&IFF_MULTICAST)&&
2257 (dev->pa_addr==mreq.imr_interface.s_addr))
2258 break;
2259 }
2260 }
2261
2262 /*
2263 * Did we find a suitable device.
2264 */
2265
2266 if(!dev)
2267 return -ENODEV;
2268
2269 /*
2270 * Leave group
2271 */
2272
2273 return ip_mc_leave_group(sk,dev,mreq.imr_multiaddr.s_addr);
2274 }
2275 #endif
2276 #ifdef CONFIG_IP_FIREWALL
2277 case IP_FW_ADD_BLK:
2278 case IP_FW_DEL_BLK:
2279 case IP_FW_ADD_FWD:
2280 case IP_FW_DEL_FWD:
2281 case IP_FW_CHK_BLK:
2282 case IP_FW_CHK_FWD:
2283 case IP_FW_FLUSH_BLK:
2284 case IP_FW_FLUSH_FWD:
2285 case IP_FW_ZERO_BLK:
2286 case IP_FW_ZERO_FWD:
2287 case IP_FW_POLICY_BLK:
2288 case IP_FW_POLICY_FWD:
2289 if(!suser())
2290 return -EPERM;
2291 if(optlen>sizeof(tmp_fw) || optlen<1)
2292 return -EINVAL;
2293 err=verify_area(VERIFY_READ,optval,optlen);
2294 if(err)
2295 return err;
2296 memcpy_fromfs(&tmp_fw,optval,optlen);
2297 err=ip_fw_ctl(optname, &tmp_fw,optlen);
2298 return -err; /* -0 is 0 after all */
2299
2300 #endif
2301 #ifdef CONFIG_IP_ACCT
2302 case IP_ACCT_DEL:
2303 case IP_ACCT_ADD:
2304 case IP_ACCT_FLUSH:
2305 case IP_ACCT_ZERO:
2306 if(!suser())
2307 return -EPERM;
2308 if(optlen>sizeof(tmp_fw) || optlen<1)
2309 return -EINVAL;
2310 err=verify_area(VERIFY_READ,optval,optlen);
2311 if(err)
2312 return err;
2313 memcpy_fromfs(&tmp_fw, optval,optlen);
2314 err=ip_acct_ctl(optname, &tmp_fw,optlen);
2315 return -err; /* -0 is 0 after all */
2316 #endif
2317 /* IP_OPTIONS and friends go here eventually */
2318 default:
2319 return(-ENOPROTOOPT);
2320 }
2321 }
2322
2323 /*
2324 * Get the options. Note for future reference. The GET of IP options gets the
2325 * _received_ ones. The set sets the _sent_ ones.
2326 */
2327
2328 int ip_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
2329 {
2330 int val,err;
2331 #ifdef CONFIG_IP_MULTICAST
2332 int len;
2333 #endif
2334
2335 if(level!=SOL_IP)
2336 return -EOPNOTSUPP;
2337
2338 switch(optname)
2339 {
2340 case IP_TOS:
2341 val=sk->ip_tos;
2342 break;
2343 case IP_TTL:
2344 val=sk->ip_ttl;
2345 break;
2346 #ifdef CONFIG_IP_MULTICAST
2347 case IP_MULTICAST_TTL:
2348 val=sk->ip_mc_ttl;
2349 break;
2350 case IP_MULTICAST_LOOP:
2351 val=sk->ip_mc_loop;
2352 break;
2353 case IP_MULTICAST_IF:
2354 err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
2355 if(err)
2356 return err;
2357 len=strlen(sk->ip_mc_name);
2358 err=verify_area(VERIFY_WRITE, optval, len);
2359 if(err)
2360 return err;
2361 put_fs_long(len,(unsigned long *) optlen);
2362 memcpy_tofs((void *)optval,sk->ip_mc_name, len);
2363 return 0;
2364 #endif
2365 default:
2366 return(-ENOPROTOOPT);
2367 }
2368 err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
2369 if(err)
2370 return err;
2371 put_fs_long(sizeof(int),(unsigned long *) optlen);
2372
2373 err=verify_area(VERIFY_WRITE, optval, sizeof(int));
2374 if(err)
2375 return err;
2376 put_fs_long(val,(unsigned long *)optval);
2377
2378 return(0);
2379 }
2380
2381 /*
2382 * IP protocol layer initialiser
2383 */
2384
2385 static struct packet_type ip_packet_type =
2386 {
2387 0, /* MUTTER ntohs(ETH_P_IP),*/
2388 NULL, /* All devices */
2389 ip_rcv,
2390 NULL,
2391 NULL,
2392 };
2393
2394 /*
2395 * Device notifier
2396 */
2397
2398 static int ip_rt_event(unsigned long event, void *ptr)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
2399 {
2400 if(event==NETDEV_DOWN)
2401 ip_rt_flush(ptr);
2402 return NOTIFY_DONE;
2403 }
2404
2405 struct notifier_block ip_rt_notifier={
2406 ip_rt_event,
2407 NULL,
2408 0
2409 };
2410
2411 /*
2412 * IP registers the packet type and then calls the subprotocol initialisers
2413 */
2414
2415 void ip_init(void)
/* ![[previous]](../icons/left.png)
![[next]](../icons/n_right.png)
![[first]](../icons/first.png)
![[last]](../icons/n_last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
2416 {
2417 ip_packet_type.type=htons(ETH_P_IP);
2418 dev_add_pack(&ip_packet_type);
2419
2420 /* So we flush routes when a device is downed */
2421 register_netdevice_notifier(&ip_rt_notifier);
2422 /* ip_raw_init();
2423 ip_packet_init();
2424 ip_tcp_init();
2425 ip_udp_init();*/
2426 }