1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * The Internet Protocol (IP) module. 7 * 8 * Version: @(#)ip.c 1.0.16b 9/1/93 9 * 10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu> 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Donald Becker, <becker@super.org> 13 * Alan Cox, <gw4pts@gw4pts.ampr.org> 14 * Richard Underwood 15 * 16 * Fixes: 17 * Alan Cox : Commented a couple of minor bits of surplus code 18 * Alan Cox : Undefining IP_FORWARD doesn't include the code 19 * (just stops a compiler warning). 20 * Alan Cox : Frames with >=MAX_ROUTE record routes, strict routes or loose routes 21 * are junked rather than corrupting things. 22 * Alan Cox : Frames to bad broadcast subnets are dumped 23 * We used to process them non broadcast and 24 * boy could that cause havoc. 25 * Alan Cox : ip_forward sets the free flag on the 26 * new frame it queues. Still crap because 27 * it copies the frame but at least it 28 * doesn't eat memory too. 29 * Alan Cox : Generic queue code and memory fixes. 30 * Fred Van Kempen : IP fragment support (borrowed from NET2E) 31 * Gerhard Koerting: Forward fragmented frames correctly. 32 * Gerhard Koerting: Fixes to my fix of the above 8-). 33 * Gerhard Koerting: IP interface addressing fix. 34 * Linus Torvalds : More robustness checks 35 * Alan Cox : Even more checks: Still not as robust as it ought to be 36 * Alan Cox : Save IP header pointer for later 37 * Alan Cox : ip option setting 38 * Alan Cox : Use ip_tos/ip_ttl settings 39 * Alan Cox : Fragmentation bogosity removed 40 * (Thanks to Mark.Bush@prg.ox.ac.uk) 41 * Dmitry Gorodchanin : Send of a raw packet crash fix. 42 * Alan Cox : Silly ip bug when an overlength 43 * fragment turns up. Now frees the 44 * queue. 45 * Linus Torvalds/ : Memory leakage on fragmentation 46 * Alan Cox : handling. 47 * Gerhard Koerting: Forwarding uses IP priority hints 48 * Teemu Rantanen : Fragment problems. 49 * Alan Cox : General cleanup, comments and reformat 50 * Alan Cox : SNMP statistics 51 * Alan Cox : BSD address rule semantics. Also see 52 * UDP as there is a nasty checksum issue 53 * if you do things the wrong way. 54 * Alan Cox : Always defrag, moved IP_FORWARD to the config.in file 55 * Alan Cox : IP options adjust sk->priority. 56 * Pedro Roque : Fix mtu/length error in ip_forward. 57 * Alan Cox : Avoid ip_chk_addr when possible. 58 * Richard Underwood : IP multicasting. 59 * Alan Cox : Cleaned up multicast handlers. 60 * Alan Cox : RAW sockets demultiplex in the BSD style. 61 * Gunther Mayer : Fix the SNMP reporting typo 62 * Alan Cox : Always in group 224.0.0.1 63 * Alan Cox : Multicast loopback error for 224.0.0.1 64 * Alan Cox : IP_MULTICAST_LOOP option. 65 * Alan Cox : Use notifiers. 66 * Bjorn Ekwall : Removed ip_csum (from slhc.c too) 67 * Bjorn Ekwall : Moved ip_fast_csum to ip.h (inline!) 68 * 69 * To Fix: 70 * IP option processing is mostly not needed. ip_forward needs to know about routing rules 71 * and time stamp but that's about all. Use the route mtu field here too 72 * IP fragmentation wants rewriting cleanly. The RFC815 algorithm is much more efficient 73 * and could be made very efficient with the addition of some virtual memory hacks to permit 74 * the allocation of a buffer that can then be 'grown' by twiddling page tables. 75 * Output fragmentation wants updating along with the buffer management to use a single 76 * interleaved copy algorithm so that fragmenting has a one copy overhead. Actual packet 77 * output should probably do its own fragmentation at the UDP/RAW layer. TCP shouldn't cause 78 * fragmentation anyway. 79 * 80 * This program is free software; you can redistribute it and/or 81 * modify it under the terms of the GNU General Public License 82 * as published by the Free Software Foundation; either version 83 * 2 of the License, or (at your option) any later version. 84 */ 85
86 #include <asm/segment.h>
87 #include <asm/system.h>
88 #include <linux/types.h>
89 #include <linux/kernel.h>
90 #include <linux/sched.h>
91 #include <linux/mm.h>
92 #include <linux/string.h>
93 #include <linux/errno.h>
94 #include <linux/config.h>
95
96 #include <linux/socket.h>
97 #include <linux/sockios.h>
98 #include <linux/in.h>
99 #include <linux/inet.h>
100 #include <linux/netdevice.h>
101 #include <linux/etherdevice.h>
102
103 #include "snmp.h"
104 #include "ip.h"
105 #include "protocol.h"
106 #include "route.h"
107 #include "tcp.h"
108 #include <linux/skbuff.h>
109 #include "sock.h"
110 #include "arp.h"
111 #include "icmp.h"
112 #include "raw.h"
113 #include "igmp.h"
114 #include <linux/ip_fw.h>
115
116 #define CONFIG_IP_DEFRAG
117
118 externintlast_retran;
119 externvoid sort_send(structsock *sk);
120
121 #definemin(a,b) ((a)<(b)?(a):(b))
122 #defineLOOPBACK(x) (((x) & htonl(0xff000000)) == htonl(0x7f000000))
123
124 /* 125 * SNMP management statistics 126 */ 127
128 #ifdefCONFIG_IP_FORWARD 129 structip_mibip_statistics={1,64,}; /* Forwarding=Yes, Default TTL=64 */ 130 #else 131 structip_mibip_statistics={0,64,}; /* Forwarding=No, Default TTL=64 */ 132 #endif 133
134 /* 135 * Handle the issuing of an ioctl() request 136 * for the ip device. This is scheduled to 137 * disappear 138 */ 139
140 intip_ioctl(structsock *sk, intcmd, unsignedlongarg)
/* */ 141 { 142 switch(cmd)
143 { 144 default:
145 return(-EINVAL);
146 } 147 } 148
149
150 /* these two routines will do routing. */ 151
152 staticvoid 153 strict_route(structiphdr *iph, structoptions *opt)
/* */ 154 { 155 } 156
157
158 staticvoid 159 loose_route(structiphdr *iph, structoptions *opt)
/* */ 160 { 161 } 162
163
164
165
166 /* This routine will check to see if we have lost a gateway. */ 167 void 168 ip_route_check(unsignedlongdaddr)
/* */ 169 { 170 } 171
172
173 #if 0
174 /* this routine puts the options at the end of an ip header. */ 175 staticint 176 build_options(structiphdr *iph, structoptions *opt)
/* */ 177 { 178 unsignedchar *ptr;
179 /* currently we don't support any options. */ 180 ptr = (unsignedchar *)(iph+1);
181 *ptr = 0;
182 return (4);
183 } 184 #endif 185
186
187 /* 188 * Take an skb, and fill in the MAC header. 189 */ 190
191 staticintip_send(structsk_buff *skb, unsignedlongdaddr, intlen, structdevice *dev, unsignedlongsaddr)
/* */ 192 { 193 intmac = 0;
194
195 skb->dev = dev;
196 skb->arp = 1;
197 if (dev->hard_header)
198 { 199 /* 200 * Build a hardware header. Source address is our mac, destination unknown 201 * (rebuild header will sort this out) 202 */ 203 mac = dev->hard_header(skb->data, dev, ETH_P_IP, NULL, NULL, len, skb);
204 if (mac < 0)
205 { 206 mac = -mac;
207 skb->arp = 0;
208 skb->raddr = daddr; /* next routing address */ 209 } 210 } 211 returnmac;
212 } 213
214 intip_id_count = 0;
215
216 /* 217 * This routine builds the appropriate hardware/IP headers for 218 * the routine. It assumes that if *dev != NULL then the 219 * protocol knows what it's doing, otherwise it uses the 220 * routing/ARP tables to select a device struct. 221 */ 222 intip_build_header(structsk_buff *skb, unsignedlongsaddr, unsignedlongdaddr,
/* */ 223 structdevice **dev, inttype, structoptions *opt, intlen, inttos, intttl)
224 { 225 staticstructoptionsoptmem;
226 structiphdr *iph;
227 structrtable *rt;
228 unsignedchar *buff;
229 unsignedlongraddr;
230 inttmp;
231 unsignedlongsrc;
232
233 buff = skb->data;
234
235 /* 236 * See if we need to look up the device. 237 */ 238
239 #ifdef CONFIG_INET_MULTICAST
240 if(MULTICAST(daddr) && *dev==NULL && skb->sk && *skb->sk->ip_mc_name)
241 *dev=dev_get(skb->sk->ip_mc_name);
242 #endif 243 if (*dev == NULL)
244 { 245 if(skb->localroute)
246 rt = ip_rt_local(daddr, &optmem, &src);
247 else 248 rt = ip_rt_route(daddr, &optmem, &src);
249 if (rt == NULL)
250 { 251 ip_statistics.IpOutNoRoutes++;
252 return(-ENETUNREACH);
253 } 254
255 *dev = rt->rt_dev;
256 /* 257 * If the frame is from us and going off machine it MUST MUST MUST 258 * have the output device ip address and never the loopback 259 */ 260 if (LOOPBACK(saddr) && !LOOPBACK(daddr))
261 saddr = src;/*rt->rt_dev->pa_addr;*/ 262 raddr = rt->rt_gateway;
263
264 opt = &optmem;
265 } 266 else 267 { 268 /* 269 * We still need the address of the first hop. 270 */ 271 if(skb->localroute)
272 rt = ip_rt_local(daddr, &optmem, &src);
273 else 274 rt = ip_rt_route(daddr, &optmem, &src);
275 /* 276 * If the frame is from us and going off machine it MUST MUST MUST 277 * have the output device ip address and never the loopback 278 */ 279 if (LOOPBACK(saddr) && !LOOPBACK(daddr))
280 saddr = src;/*rt->rt_dev->pa_addr;*/ 281
282 raddr = (rt == NULL) ? 0 : rt->rt_gateway;
283 } 284
285 /* 286 * No source addr so make it our addr 287 */ 288 if (saddr == 0)
289 saddr = src;
290
291 /* 292 * No gateway so aim at the real destination 293 */ 294 if (raddr == 0)
295 raddr = daddr;
296
297 /* 298 * Now build the MAC header. 299 */ 300
301 tmp = ip_send(skb, raddr, len, *dev, saddr);
302 buff += tmp;
303 len -= tmp;
304
305 /* 306 * Book keeping 307 */ 308
309 skb->dev = *dev;
310 skb->saddr = saddr;
311 if (skb->sk)
312 skb->sk->saddr = saddr;
313
314 /* 315 * Now build the IP header. 316 */ 317
318 /* 319 * If we are using IPPROTO_RAW, then we don't need an IP header, since 320 * one is being supplied to us by the user 321 */ 322
323 if(type == IPPROTO_RAW)
324 return (tmp);
325
326 iph = (structiphdr *)buff;
327 iph->version = 4;
328 iph->tos = tos;
329 iph->frag_off = 0;
330 iph->ttl = ttl;
331 iph->daddr = daddr;
332 iph->saddr = saddr;
333 iph->protocol = type;
334 iph->ihl = 5;
335 skb->ip_hdr = iph;
336
337 /* Setup the IP options. */ 338 #ifdef Not_Yet_Avail
339 build_options(iph, opt);
340 #endif 341
342 return(20 + tmp); /* IP header plus MAC header size */ 343 } 344
345
346 staticint 347 do_options(structiphdr *iph, structoptions *opt)
/* */ 348 { 349 unsignedchar *buff;
350 intdone = 0;
351 inti, len = sizeof(structiphdr);
352
353 /* Zero out the options. */ 354 opt->record_route.route_size = 0;
355 opt->loose_route.route_size = 0;
356 opt->strict_route.route_size = 0;
357 opt->tstamp.ptr = 0;
358 opt->security = 0;
359 opt->compartment = 0;
360 opt->handling = 0;
361 opt->stream = 0;
362 opt->tcc = 0;
363 return(0);
364
365 /* Advance the pointer to start at the options. */ 366 buff = (unsignedchar *)(iph + 1);
367
368 /* Now start the processing. */ 369 while (!done && len < iph->ihl*4) switch(*buff) { 370 caseIPOPT_END:
371 done = 1;
372 break;
373 caseIPOPT_NOOP:
374 buff++;
375 len++;
376 break;
377 caseIPOPT_SEC:
378 buff++;
379 if (*buff != 11) return(1);
380 buff++;
381 opt->security = ntohs(*(unsignedshort *)buff);
382 buff += 2;
383 opt->compartment = ntohs(*(unsignedshort *)buff);
384 buff += 2;
385 opt->handling = ntohs(*(unsignedshort *)buff);
386 buff += 2;
387 opt->tcc = ((*buff) << 16) + ntohs(*(unsignedshort *)(buff+1));
388 buff += 3;
389 len += 11;
390 break;
391 caseIPOPT_LSRR:
392 buff++;
393 if ((*buff - 3)% 4 != 0) return(1);
394 len += *buff;
395 opt->loose_route.route_size = (*buff -3)/4;
396 buff++;
397 if (*buff % 4 != 0) return(1);
398 opt->loose_route.pointer = *buff/4 - 1;
399 buff++;
400 buff++;
401 for (i = 0; i < opt->loose_route.route_size; i++) { 402 if(i>=MAX_ROUTE)
403 return(1);
404 opt->loose_route.route[i] = *(unsignedlong *)buff;
405 buff += 4;
406 } 407 break;
408 caseIPOPT_SSRR:
409 buff++;
410 if ((*buff - 3)% 4 != 0) return(1);
411 len += *buff;
412 opt->strict_route.route_size = (*buff -3)/4;
413 buff++;
414 if (*buff % 4 != 0) return(1);
415 opt->strict_route.pointer = *buff/4 - 1;
416 buff++;
417 buff++;
418 for (i = 0; i < opt->strict_route.route_size; i++) { 419 if(i>=MAX_ROUTE)
420 return(1);
421 opt->strict_route.route[i] = *(unsignedlong *)buff;
422 buff += 4;
423 } 424 break;
425 caseIPOPT_RR:
426 buff++;
427 if ((*buff - 3)% 4 != 0) return(1);
428 len += *buff;
429 opt->record_route.route_size = (*buff -3)/4;
430 buff++;
431 if (*buff % 4 != 0) return(1);
432 opt->record_route.pointer = *buff/4 - 1;
433 buff++;
434 buff++;
435 for (i = 0; i < opt->record_route.route_size; i++) { 436 if(i>=MAX_ROUTE)
437 return 1;
438 opt->record_route.route[i] = *(unsignedlong *)buff;
439 buff += 4;
440 } 441 break;
442 caseIPOPT_SID:
443 len += 4;
444 buff +=2;
445 opt->stream = *(unsignedshort *)buff;
446 buff += 2;
447 break;
448 caseIPOPT_TIMESTAMP:
449 buff++;
450 len += *buff;
451 if (*buff % 4 != 0) return(1);
452 opt->tstamp.len = *buff / 4 - 1;
453 buff++;
454 if ((*buff - 1) % 4 != 0) return(1);
455 opt->tstamp.ptr = (*buff-1)/4;
456 buff++;
457 opt->tstamp.x.full_char = *buff;
458 buff++;
459 for (i = 0; i < opt->tstamp.len; i++) { 460 opt->tstamp.data[i] = *(unsignedlong *)buff;
461 buff += 4;
462 } 463 break;
464 default:
465 return(1);
466 } 467
468 if (opt->record_route.route_size == 0) { 469 if (opt->strict_route.route_size != 0) { 470 memcpy(&(opt->record_route), &(opt->strict_route),
471 sizeof(opt->record_route));
472 }elseif (opt->loose_route.route_size != 0) { 473 memcpy(&(opt->record_route), &(opt->loose_route),
474 sizeof(opt->record_route));
475 } 476 } 477
478 if (opt->strict_route.route_size != 0 &&
479 opt->strict_route.route_size != opt->strict_route.pointer) { 480 strict_route(iph, opt);
481 return(0);
482 } 483
484 if (opt->loose_route.route_size != 0 &&
485 opt->loose_route.route_size != opt->loose_route.pointer) { 486 loose_route(iph, opt);
487 return(0);
488 } 489
490 return(0);
491 } 492
493 /* 494 * This routine does all the checksum computations that don't 495 * require anything special (like copying or special headers). 496 */ 497
498 unsignedshortip_compute_csum(unsignedchar * buff, intlen)
/* */ 499 { 500 unsignedlongsum = 0;
501
502 /* Do the first multiple of 4 bytes and convert to 16 bits. */ 503 if (len > 3)
504 { 505 __asm__("clc\n"
506 "1:\t"
507 "lodsl\n\t"
508 "adcl %%eax, %%ebx\n\t"
509 "loop 1b\n\t"
510 "adcl $0, %%ebx\n\t"
511 "movl %%ebx, %%eax\n\t"
512 "shrl $16, %%eax\n\t"
513 "addw %%ax, %%bx\n\t"
514 "adcw $0, %%bx"
515 : "=b" (sum) , "=S" (buff)
516 : "0" (sum), "c" (len >> 2) ,"1" (buff)
517 : "ax", "cx", "si", "bx" );
518 } 519 if (len & 2)
520 { 521 __asm__("lodsw\n\t"
522 "addw %%ax, %%bx\n\t"
523 "adcw $0, %%bx"
524 : "=b" (sum), "=S" (buff)
525 : "0" (sum), "1" (buff)
526 : "bx", "ax", "si");
527 } 528 if (len & 1)
529 { 530 __asm__("lodsb\n\t"
531 "movb $0, %%ah\n\t"
532 "addw %%ax, %%bx\n\t"
533 "adcw $0, %%bx"
534 : "=b" (sum), "=S" (buff)
535 : "0" (sum), "1" (buff)
536 : "bx", "ax", "si");
537 } 538 sum =~sum;
539 return(sum & 0xffff);
540 } 541
542 /* 543 * Generate a checksum for an outgoing IP datagram. 544 */ 545
546 voidip_send_check(structiphdr *iph)
/* */ 547 { 548 iph->check = 0;
549 iph->check = ip_fast_csum((unsignedchar *)iph, iph->ihl);
550 } 551
552 /************************ Fragment Handlers From NET2E **********************************/ 553
554
555 /* 556 * This fragment handler is a bit of a heap. On the other hand it works quite 557 * happily and handles things quite well. 558 */ 559
560 staticstructipq *ipqueue = NULL; /* IP fragment queue */ 561
562 /* 563 * Create a new fragment entry. 564 */ 565
566 staticstructipfrag *ip_frag_create(intoffset, intend, structsk_buff *skb, unsignedchar *ptr)
/* */ 567 { 568 structipfrag *fp;
569
570 fp = (structipfrag *) kmalloc(sizeof(structipfrag), GFP_ATOMIC);
571 if (fp == NULL)
572 { 573 printk("IP: frag_create: no memory left !\n");
574 return(NULL);
575 } 576 memset(fp, 0, sizeof(structipfrag));
577
578 /* Fill in the structure. */ 579 fp->offset = offset;
580 fp->end = end;
581 fp->len = end - offset;
582 fp->skb = skb;
583 fp->ptr = ptr;
584
585 return(fp);
586 } 587
588
589 /* 590 * Find the correct entry in the "incomplete datagrams" queue for 591 * this IP datagram, and return the queue entry address if found. 592 */ 593
594 staticstructipq *ip_find(structiphdr *iph)
/* */ 595 { 596 structipq *qp;
597 structipq *qplast;
598
599 cli();
600 qplast = NULL;
601 for(qp = ipqueue; qp != NULL; qplast = qp, qp = qp->next)
602 { 603 if (iph->id== qp->iph->id && iph->saddr == qp->iph->saddr &&
604 iph->daddr == qp->iph->daddr && iph->protocol == qp->iph->protocol)
605 { 606 del_timer(&qp->timer); /* So it doesn't vanish on us. The timer will be reset anyway */ 607 sti();
608 return(qp);
609 } 610 } 611 sti();
612 return(NULL);
613 } 614
615
616 /* 617 * Remove an entry from the "incomplete datagrams" queue, either 618 * because we completed, reassembled and processed it, or because 619 * it timed out. 620 */ 621
622 staticvoidip_free(structipq *qp)
/* */ 623 { 624 structipfrag *fp;
625 structipfrag *xp;
626
627 /* 628 * Stop the timer for this entry. 629 */ 630
631 del_timer(&qp->timer);
632
633 /* Remove this entry from the "incomplete datagrams" queue. */ 634 cli();
635 if (qp->prev == NULL)
636 { 637 ipqueue = qp->next;
638 if (ipqueue != NULL)
639 ipqueue->prev = NULL;
640 } 641 else 642 { 643 qp->prev->next = qp->next;
644 if (qp->next != NULL)
645 qp->next->prev = qp->prev;
646 } 647
648 /* Release all fragment data. */ 649
650 fp = qp->fragments;
651 while (fp != NULL)
652 { 653 xp = fp->next;
654 IS_SKB(fp->skb);
655 kfree_skb(fp->skb,FREE_READ);
656 kfree_s(fp, sizeof(structipfrag));
657 fp = xp;
658 } 659
660 /* Release the MAC header. */ 661 kfree_s(qp->mac, qp->maclen);
662
663 /* Release the IP header. */ 664 kfree_s(qp->iph, qp->ihlen + 8);
665
666 /* Finally, release the queue descriptor itself. */ 667 kfree_s(qp, sizeof(structipq));
668 sti();
669 } 670
671
672 /* 673 * Oops- a fragment queue timed out. Kill it and send an ICMP reply. 674 */ 675
676 staticvoidip_expire(unsignedlongarg)
/* */ 677 { 678 structipq *qp;
679
680 qp = (structipq *)arg;
681
682 /* 683 * Send an ICMP "Fragment Reassembly Timeout" message. 684 */ 685
686 ip_statistics.IpReasmTimeout++;
687 ip_statistics.IpReasmFails++;
688 /* This if is always true... shrug */ 689 if(qp->fragments!=NULL)
690 icmp_send(qp->fragments->skb,ICMP_TIME_EXCEEDED,
691 ICMP_EXC_FRAGTIME, qp->dev);
692
693 /* 694 * Nuke the fragment queue. 695 */ 696 ip_free(qp);
697 } 698
699
700 /* 701 * Add an entry to the 'ipq' queue for a newly received IP datagram. 702 * We will (hopefully :-) receive all other fragments of this datagram 703 * in time, so we just create a queue for this datagram, in which we 704 * will insert the received fragments at their respective positions. 705 */ 706
707 staticstructipq *ip_create(structsk_buff *skb, structiphdr *iph, structdevice *dev)
/* */ 708 { 709 structipq *qp;
710 intmaclen;
711 intihlen;
712
713 qp = (structipq *) kmalloc(sizeof(structipq), GFP_ATOMIC);
714 if (qp == NULL)
715 { 716 printk("IP: create: no memory left !\n");
717 return(NULL);
718 skb->dev = qp->dev;
719 } 720 memset(qp, 0, sizeof(structipq));
721
722 /* 723 * Allocate memory for the MAC header. 724 * 725 * FIXME: We have a maximum MAC address size limit and define 726 * elsewhere. We should use it here and avoid the 3 kmalloc() calls 727 */ 728
729 maclen = ((unsignedlong) iph) - ((unsignedlong) skb->data);
730 qp->mac = (unsignedchar *) kmalloc(maclen, GFP_ATOMIC);
731 if (qp->mac == NULL)
732 { 733 printk("IP: create: no memory left !\n");
734 kfree_s(qp, sizeof(structipq));
735 return(NULL);
736 } 737
738 /* 739 * Allocate memory for the IP header (plus 8 octets for ICMP). 740 */ 741
742 ihlen = (iph->ihl * sizeof(unsignedlong));
743 qp->iph = (structiphdr *) kmalloc(ihlen + 8, GFP_ATOMIC);
744 if (qp->iph == NULL)
745 { 746 printk("IP: create: no memory left !\n");
747 kfree_s(qp->mac, maclen);
748 kfree_s(qp, sizeof(structipq));
749 return(NULL);
750 } 751
752 /* Fill in the structure. */ 753 memcpy(qp->mac, skb->data, maclen);
754 memcpy(qp->iph, iph, ihlen + 8);
755 qp->len = 0;
756 qp->ihlen = ihlen;
757 qp->maclen = maclen;
758 qp->fragments = NULL;
759 qp->dev = dev;
760
761 /* Start a timer for this entry. */ 762 qp->timer.expires = IP_FRAG_TIME; /* about 30 seconds */ 763 qp->timer.data = (unsignedlong) qp; /* pointer to queue */ 764 qp->timer.function = ip_expire; /* expire function */ 765 add_timer(&qp->timer);
766
767 /* Add this entry to the queue. */ 768 qp->prev = NULL;
769 cli();
770 qp->next = ipqueue;
771 if (qp->next != NULL)
772 qp->next->prev = qp;
773 ipqueue = qp;
774 sti();
775 return(qp);
776 } 777
778
779 /* 780 * See if a fragment queue is complete. 781 */ 782
783 staticintip_done(structipq *qp)
/* */ 784 { 785 structipfrag *fp;
786 intoffset;
787
788 /* Only possible if we received the final fragment. */ 789 if (qp->len == 0)
790 return(0);
791
792 /* Check all fragment offsets to see if they connect. */ 793 fp = qp->fragments;
794 offset = 0;
795 while (fp != NULL)
796 { 797 if (fp->offset > offset)
798 return(0); /* fragment(s) missing */ 799 offset = fp->end;
800 fp = fp->next;
801 } 802
803 /* All fragments are present. */ 804 return(1);
805 } 806
807
808 /* 809 * Build a new IP datagram from all its fragments. 810 * 811 * FIXME: We copy here because we lack an effective way of handling lists 812 * of bits on input. Until the new skb data handling is in I'm not going 813 * to touch this with a bargepole. This also causes a 4Kish limit on 814 * packet sizes. 815 */ 816
817 staticstructsk_buff *ip_glue(structipq *qp)
/* */ 818 { 819 structsk_buff *skb;
820 structiphdr *iph;
821 structipfrag *fp;
822 unsignedchar *ptr;
823 intcount, len;
824
825 /* 826 * Allocate a new buffer for the datagram. 827 */ 828
829 len = qp->maclen + qp->ihlen + qp->len;
830
831 if ((skb = alloc_skb(len,GFP_ATOMIC)) == NULL)
832 { 833 ip_statistics.IpReasmFails++;
834 printk("IP: queue_glue: no memory for gluing queue 0x%X\n", (int) qp);
835 ip_free(qp);
836 return(NULL);
837 } 838
839 /* Fill in the basic details. */ 840 skb->len = (len - qp->maclen);
841 skb->h.raw = skb->data;
842 skb->free = 1;
843
844 /* Copy the original MAC and IP headers into the new buffer. */ 845 ptr = (unsignedchar *) skb->h.raw;
846 memcpy(ptr, ((unsignedchar *) qp->mac), qp->maclen);
847 ptr += qp->maclen;
848 memcpy(ptr, ((unsignedchar *) qp->iph), qp->ihlen);
849 ptr += qp->ihlen;
850 skb->h.raw += qp->maclen;
851
852 count = 0;
853
854 /* Copy the data portions of all fragments into the new buffer. */ 855 fp = qp->fragments;
856 while(fp != NULL)
857 { 858 if(count+fp->len > skb->len)
859 { 860 printk("Invalid fragment list: Fragment over size.\n");
861 ip_free(qp);
862 kfree_skb(skb,FREE_WRITE);
863 ip_statistics.IpReasmFails++;
864 returnNULL;
865 } 866 memcpy((ptr + fp->offset), fp->ptr, fp->len);
867 count += fp->len;
868 fp = fp->next;
869 } 870
871 /* We glued together all fragments, so remove the queue entry. */ 872 ip_free(qp);
873
874 /* Done with all fragments. Fixup the new IP header. */ 875 iph = skb->h.iph;
876 iph->frag_off = 0;
877 iph->tot_len = htons((iph->ihl * sizeof(unsignedlong)) + count);
878 skb->ip_hdr = iph;
879
880 ip_statistics.IpReasmOKs++;
881 return(skb);
882 } 883
884
885 /* 886 * Process an incoming IP datagram fragment. 887 */ 888
889 staticstructsk_buff *ip_defrag(structiphdr *iph, structsk_buff *skb, structdevice *dev)
/* */ 890 { 891 structipfrag *prev, *next;
892 structipfrag *tfp;
893 structipq *qp;
894 structsk_buff *skb2;
895 unsignedchar *ptr;
896 intflags, offset;
897 inti, ihl, end;
898
899 ip_statistics.IpReasmReqds++;
900
901 /* Find the entry of this IP datagram in the "incomplete datagrams" queue. */ 902 qp = ip_find(iph);
903
904 /* Is this a non-fragmented datagram? */ 905 offset = ntohs(iph->frag_off);
906 flags = offset & ~IP_OFFSET;
907 offset &= IP_OFFSET;
908 if (((flags & IP_MF) == 0) && (offset == 0))
909 { 910 if (qp != NULL)
911 ip_free(qp); /* Huh? How could this exist?? */ 912 return(skb);
913 } 914
915 offset <<= 3; /* offset is in 8-byte chunks */ 916
917 /* 918 * If the queue already existed, keep restarting its timer as long 919 * as we still are receiving fragments. Otherwise, create a fresh 920 * queue entry. 921 */ 922
923 if (qp != NULL)
924 { 925 del_timer(&qp->timer);
926 qp->timer.expires = IP_FRAG_TIME; /* about 30 seconds */ 927 qp->timer.data = (unsignedlong) qp; /* pointer to queue */ 928 qp->timer.function = ip_expire; /* expire function */ 929 add_timer(&qp->timer);
930 } 931 else 932 { 933 /* 934 * If we failed to create it, then discard the frame 935 */ 936 if ((qp = ip_create(skb, iph, dev)) == NULL)
937 { 938 skb->sk = NULL;
939 kfree_skb(skb, FREE_READ);
940 ip_statistics.IpReasmFails++;
941 returnNULL;
942 } 943 } 944
945 /* 946 * Determine the position of this fragment. 947 */ 948
949 ihl = (iph->ihl * sizeof(unsignedlong));
950 end = offset + ntohs(iph->tot_len) - ihl;
951
952 /* 953 * Point into the IP datagram 'data' part. 954 */ 955
956 ptr = skb->data + dev->hard_header_len + ihl;
957
958 /* 959 * Is this the final fragment? 960 */ 961
962 if ((flags & IP_MF) == 0)
963 qp->len = end;
964
965 /* 966 * Find out which fragments are in front and at the back of us 967 * in the chain of fragments so far. We must know where to put 968 * this fragment, right? 969 */ 970
971 prev = NULL;
972 for(next = qp->fragments; next != NULL; next = next->next)
973 { 974 if (next->offset > offset)
975 break; /* bingo! */ 976 prev = next;
977 } 978
979 /* 980 * We found where to put this one. 981 * Check for overlap with preceding fragment, and, if needed, 982 * align things so that any overlaps are eliminated. 983 */ 984 if (prev != NULL && offset < prev->end)
985 { 986 i = prev->end - offset;
987 offset += i; /* ptr into datagram */ 988 ptr += i; /* ptr into fragment data */ 989 } 990
991 /* 992 * Look for overlap with succeeding segments. 993 * If we can merge fragments, do it. 994 */ 995
996 for(; next != NULL; next = tfp)
997 { 998 tfp = next->next;
999 if (next->offset >= end)
1000 break; /* no overlaps at all */1001
1002 i = end - next->offset; /* overlap is 'i' bytes */1003 next->len -= i; /* so reduce size of */1004 next->offset += i; /* next fragment */1005 next->ptr += i;
1006
1007 /*1008 * If we get a frag size of <= 0, remove it and the packet1009 * that it goes with.1010 */1011 if (next->len <= 0)
1012 {1013 if (next->prev != NULL)
1014 next->prev->next = next->next;
1015 else1016 qp->fragments = next->next;
1017
1018 if (tfp->next != NULL)
1019 next->next->prev = next->prev;
1020
1021 kfree_skb(next->skb,FREE_READ);
1022 kfree_s(next, sizeof(structipfrag));
1023 }1024 }1025
1026 /*1027 * Insert this fragment in the chain of fragments.1028 */1029
1030 tfp = NULL;
1031 tfp = ip_frag_create(offset, end, skb, ptr);
1032
1033 /*1034 * No memory to save the fragment - so throw the lot1035 */1036
1037 if (!tfp)
1038 {1039 skb->sk = NULL;
1040 kfree_skb(skb, FREE_READ);
1041 returnNULL;
1042 }1043 tfp->prev = prev;
1044 tfp->next = next;
1045 if (prev != NULL)
1046 prev->next = tfp;
1047 else1048 qp->fragments = tfp;
1049
1050 if (next != NULL)
1051 next->prev = tfp;
1052
1053 /*1054 * OK, so we inserted this new fragment into the chain.1055 * Check if we now have a full IP datagram which we can1056 * bump up to the IP layer...1057 */1058
1059 if (ip_done(qp))
1060 {1061 skb2 = ip_glue(qp); /* glue together the fragments */1062 return(skb2);
1063 }1064 return(NULL);
1065 }1066
1067
1068 /*1069 * This IP datagram is too large to be sent in one piece. Break it up into1070 * smaller pieces (each of size equal to the MAC header plus IP header plus1071 * a block of the data of the original IP data part) that will yet fit in a1072 * single device frame, and queue such a frame for sending by calling the1073 * ip_queue_xmit(). Note that this is recursion, and bad things will happen1074 * if this function causes a loop...1075 *1076 * Yes this is inefficient, feel free to submit a quicker one.1077 *1078 * **Protocol Violation**1079 * We copy all the options to each fragment. !FIXME!1080 */1081 voidip_fragment(structsock *sk, structsk_buff *skb, structdevice *dev, intis_frag)
/* */1082 {1083 structiphdr *iph;
1084 unsignedchar *raw;
1085 unsignedchar *ptr;
1086 structsk_buff *skb2;
1087 intleft, mtu, hlen, len;
1088 intoffset;
1089 unsignedlongflags;
1090
1091 /*1092 * Point into the IP datagram header.1093 */1094
1095 raw = skb->data;
1096 iph = (structiphdr *) (raw + dev->hard_header_len);
1097
1098 skb->ip_hdr = iph;
1099
1100 /*1101 * Setup starting values.1102 */1103
1104 hlen = (iph->ihl * sizeof(unsignedlong));
1105 left = ntohs(iph->tot_len) - hlen; /* Space per frame */1106 hlen += dev->hard_header_len; /* Total header size */1107 mtu = (dev->mtu - hlen); /* Size of data space */1108 ptr = (raw + hlen); /* Where to start from */1109
1110 /*1111 * Check for any "DF" flag. [DF means do not fragment]1112 */1113
1114 if (ntohs(iph->frag_off) & IP_DF)
1115 {1116 ip_statistics.IpFragFails++;
1117 icmp_send(skb,ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, dev);
1118 return;
1119 }1120
1121 /*1122 * The protocol doesn't seem to say what to do in the case that the1123 * frame + options doesn't fit the mtu. As it used to fall down dead1124 * in this case we were fortunate it didn't happen1125 */1126
1127 if(mtu<8)
1128 {1129 /* It's wrong but its better than nothing */1130 icmp_send(skb,ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED,dev);
1131 ip_statistics.IpFragFails++;
1132 return;
1133 }1134
1135 /*1136 * Fragment the datagram.1137 */1138
1139 /*1140 * The initial offset is 0 for a complete frame. When1141 * fragmenting fragments its wherever this one starts.1142 */1143
1144 if (is_frag & 2)
1145 offset = (ntohs(iph->frag_off) & 0x1fff) << 3;
1146 else1147 offset = 0;
1148
1149
1150 /*1151 * Keep copying data until we run out.1152 */1153
1154 while(left > 0)
1155 {1156 len = left;
1157 /* IF: it doesn't fit, use 'mtu' - the data space left */1158 if (len > mtu)
1159 len = mtu;
1160 /* IF: we are not sending upto and including the packet end1161 then align the next start on an eight byte boundary */1162 if (len < left)
1163 {1164 len/=8;
1165 len*=8;
1166 }1167 /*1168 * Allocate buffer.1169 */1170
1171 if ((skb2 = alloc_skb(len + hlen,GFP_ATOMIC)) == NULL)
1172 {1173 printk("IP: frag: no memory for new fragment!\n");
1174 ip_statistics.IpFragFails++;
1175 return;
1176 }1177
1178 /*1179 * Set up data on packet1180 */1181
1182 skb2->arp = skb->arp;
1183 if(skb->free==0)
1184 printk("IP fragmenter: BUG free!=1 in fragmenter\n");
1185 skb2->free = 1;
1186 skb2->len = len + hlen;
1187 skb2->h.raw=(char *) skb2->data;
1188 /*1189 * Charge the memory for the fragment to any owner1190 * it might possess1191 */1192
1193 save_flags(flags);
1194 if (sk)
1195 {1196 cli();
1197 sk->wmem_alloc += skb2->mem_len;
1198 skb2->sk=sk;
1199 }1200 restore_flags(flags);
1201 skb2->raddr = skb->raddr; /* For rebuild_header - must be here */1202
1203 /*1204 * Copy the packet header into the new buffer.1205 */1206
1207 memcpy(skb2->h.raw, raw, hlen);
1208
1209 /*1210 * Copy a block of the IP datagram.1211 */1212 memcpy(skb2->h.raw + hlen, ptr, len);
1213 left -= len;
1214
1215 skb2->h.raw+=dev->hard_header_len;
1216
1217 /*1218 * Fill in the new header fields.1219 */1220 iph = (structiphdr *)(skb2->h.raw/*+dev->hard_header_len*/);
1221 iph->frag_off = htons((offset >> 3));
1222 /*1223 * Added AC : If we are fragmenting a fragment thats not the1224 * last fragment then keep MF on each bit1225 */1226 if (left > 0 || (is_frag & 1))
1227 iph->frag_off |= htons(IP_MF);
1228 ptr += len;
1229 offset += len;
1230
1231 /*1232 * Put this fragment into the sending queue.1233 */1234
1235 ip_statistics.IpFragCreates++;
1236
1237 ip_queue_xmit(sk, dev, skb2, 2);
1238 }1239 ip_statistics.IpFragOKs++;
1240 }1241
1242
1243
1244 #ifdefCONFIG_IP_FORWARD1245
1246 /*1247 * Forward an IP datagram to its next destination.1248 */1249
1250 staticvoidip_forward(structsk_buff *skb, structdevice *dev, intis_frag)
/* */1251 {1252 structdevice *dev2; /* Output device */1253 structiphdr *iph; /* Our header */1254 structsk_buff *skb2; /* Output packet */1255 structrtable *rt; /* Route we use */1256 unsignedchar *ptr; /* Data pointer */1257 unsignedlongraddr; /* Router IP address */1258
1259 /* 1260 * See if we are allowed to forward this.1261 */1262
1263 #ifdefCONFIG_IP_FIREWALL1264 if(!ip_fw_chk(skb->h.iph, ip_fw_fwd_chain))
1265 {1266 return;
1267 }1268 #endif1269 /*1270 * According to the RFC, we must first decrease the TTL field. If1271 * that reaches zero, we must reply an ICMP control message telling1272 * that the packet's lifetime expired.1273 *1274 * Exception:1275 * We may not generate an ICMP for an ICMP. icmp_send does the1276 * enforcement of this so we can forget it here. It is however1277 * sometimes VERY important.1278 */1279
1280 iph = skb->h.iph;
1281 iph->ttl--;
1282 if (iph->ttl <= 0)
1283 {1284 /* Tell the sender its packet died... */1285 icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, dev);
1286 return;
1287 }1288
1289 /*1290 * Re-compute the IP header checksum.1291 * This is inefficient. We know what has happened to the header1292 * and could thus adjust the checksum as Phil Karn does in KA9Q1293 */1294
1295 ip_send_check(iph);
1296
1297 /*1298 * OK, the packet is still valid. Fetch its destination address,1299 * and give it to the IP sender for further processing.1300 */1301
1302 rt = ip_rt_route(iph->daddr, NULL, NULL);
1303 if (rt == NULL)
1304 {1305 /*1306 * Tell the sender its packet cannot be delivered. Again1307 * ICMP is screened later.1308 */1309 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_UNREACH, dev);
1310 return;
1311 }1312
1313
1314 /*1315 * Gosh. Not only is the packet valid; we even know how to1316 * forward it onto its final destination. Can we say this1317 * is being plain lucky?1318 * If the router told us that there is no GW, use the dest.1319 * IP address itself- we seem to be connected directly...1320 */1321
1322 raddr = rt->rt_gateway;
1323
1324 if (raddr != 0)
1325 {1326 /*1327 * There is a gateway so find the correct route for it.1328 * Gateways cannot in turn be gatewayed.1329 */1330 rt = ip_rt_route(raddr, NULL, NULL);
1331 if (rt == NULL)
1332 {1333 /*1334 * Tell the sender its packet cannot be delivered...1335 */1336 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, dev);
1337 return;
1338 }1339 if (rt->rt_gateway != 0)
1340 raddr = rt->rt_gateway;
1341 }1342 else1343 raddr = iph->daddr;
1344
1345 /*1346 * Having picked a route we can now send the frame out.1347 */1348
1349 dev2 = rt->rt_dev;
1350
1351 /*1352 * In IP you never forward a frame on the interface that it arrived1353 * upon. We should generate an ICMP HOST REDIRECT giving the route1354 * we calculated.1355 * For now just dropping the packet is an acceptable compromise.1356 */1357
1358 if (dev == dev2)
1359 return;
1360
1361 /*1362 * We now allocate a new buffer, and copy the datagram into it.1363 * If the indicated interface is up and running, kick it.1364 */1365
1366 if (dev2->flags & IFF_UP)
1367 {1368
1369 /*1370 * Current design decrees we copy the packet. For identical header1371 * lengths we could avoid it. The new skb code will let us push1372 * data so the problem goes away then.1373 */1374
1375 skb2 = alloc_skb(dev2->hard_header_len + skb->len, GFP_ATOMIC);
1376 /*1377 * This is rare and since IP is tolerant of network failures1378 * quite harmless.1379 */1380 if (skb2 == NULL)
1381 {1382 printk("\nIP: No memory available for IP forward\n");
1383 return;
1384 }1385 ptr = skb2->data;
1386 skb2->free = 1;
1387 skb2->len = skb->len + dev2->hard_header_len;
1388 skb2->h.raw = ptr;
1389
1390 /*1391 * Copy the packet data into the new buffer.1392 */1393 memcpy(ptr + dev2->hard_header_len, skb->h.raw, skb->len);
1394
1395 /* Now build the MAC header. */1396 (void) ip_send(skb2, raddr, skb->len, dev2, dev2->pa_addr);
1397
1398 ip_statistics.IpForwDatagrams++;
1399
1400 /*1401 * See if it needs fragmenting. Note in ip_rcv we tagged1402 * the fragment type. This must be right so that1403 * the fragmenter does the right thing.1404 */1405
1406 if(skb2->len > dev2->mtu + dev2->hard_header_len)
1407 {1408 ip_fragment(NULL,skb2,dev2, is_frag);
1409 kfree_skb(skb2,FREE_WRITE);
1410 }1411 else1412 {1413 #ifdefCONFIG_IP_ACCT1414 /*1415 * Count mapping we shortcut1416 */1417
1418 ip_acct_cnt(iph,ip_acct_chain,1);
1419 #endif1420
1421 /*1422 * Map service types to priority. We lie about1423 * throughput being low priority, but its a good1424 * choice to help improve general usage.1425 */1426 if(iph->tos & IPTOS_LOWDELAY)
1427 dev_queue_xmit(skb2, dev2, SOPRI_INTERACTIVE);
1428 elseif(iph->tos & IPTOS_THROUGHPUT)
1429 dev_queue_xmit(skb2, dev2, SOPRI_BACKGROUND);
1430 else1431 dev_queue_xmit(skb2, dev2, SOPRI_NORMAL);
1432 }1433 }1434 }1435
1436
1437 #endif1438
1439 /*1440 * This function receives all incoming IP datagrams.1441 */1442
1443 intip_rcv(structsk_buff *skb, structdevice *dev, structpacket_type *pt)
/* */1444 {1445 structiphdr *iph = skb->h.iph;
1446 structsock *raw_sk=NULL;
1447 unsignedcharhash;
1448 unsignedcharflag = 0;
1449 unsignedcharopts_p = 0; /* Set iff the packet has options. */1450 structinet_protocol *ipprot;
1451 staticstructoptionsopt; /* since we don't use these yet, and they1452 take up stack space. */1453 intbrd=IS_MYADDR;
1454 intis_frag=0;
1455
1456 ip_statistics.IpInReceives++;
1457
1458 /*1459 * Tag the ip header of this packet so we can find it1460 */1461
1462 skb->ip_hdr = iph;
1463
1464 /*1465 * Is the datagram acceptable?1466 *1467 * 1. Length at least the size of an ip header1468 * 2. Version of 41469 * 3. Checksums correctly. [Speed optimisation for later, skip loopback checksums]1470 * (4. We ought to check for IP multicast addresses and undefined types.. does this matter ?)1471 */1472
1473 if (skb->len<sizeof(structiphdr) || iph->ihl<5 || iph->version != 4 || ip_fast_csum((unsignedchar *)iph, iph->ihl) !=0)
1474 {1475 ip_statistics.IpInHdrErrors++;
1476 kfree_skb(skb, FREE_WRITE);
1477 return(0);
1478 }1479
1480 /*1481 * See if the firewall wants to dispose of the packet. 1482 */1483
1484 #ifdefCONFIG_IP_FIREWALL1485
1486 if(!LOOPBACK(iph->daddr) && !ip_fw_chk(iph,ip_fw_blk_chain))
1487 {1488 kfree_skb(skb, FREE_WRITE);
1489 return 0;
1490 }1491
1492 #endif1493
1494 /*1495 * Our transport medium may have padded the buffer out. Now we know it1496 * is IP we can trim to the true length of the frame.1497 */1498
1499 skb->len=ntohs(iph->tot_len);
1500
1501 /*1502 * Next analyse the packet for options. Studies show under one packet in1503 * a thousand have options....1504 */1505
1506 if (iph->ihl != 5)
1507 {/* Fast path for the typical optionless IP packet. */1508 memset((char *) &opt, 0, sizeof(opt));
1509 if (do_options(iph, &opt) != 0)
1510 return 0;
1511 opts_p = 1;
1512 }1513
1514 /*1515 * Remember if the frame is fragmented.1516 */1517
1518 if(iph->frag_off)
1519 {1520 if (iph->frag_off & 0x0020)
1521 is_frag|=1;
1522 /*1523 * Last fragment ?1524 */1525
1526 if (ntohs(iph->frag_off) & 0x1fff)
1527 is_frag|=2;
1528 }1529
1530 /*1531 * Do any IP forwarding required. chk_addr() is expensive -- avoid it someday.1532 *1533 * This is inefficient. While finding out if it is for us we could also compute1534 * the routing table entry. This is where the great unified cache theory comes1535 * in as and when someone implements it1536 *1537 * For most hosts over 99% of packets match the first conditional1538 * and don't go via ip_chk_addr. Note: brd is set to IS_MYADDR at1539 * function entry.1540 */1541
1542 if ( iph->daddr != skb->dev->pa_addr && (brd = ip_chk_addr(iph->daddr)) == 0)
1543 {1544 /*1545 * Don't forward multicast or broadcast frames.1546 */1547
1548 if(skb->pkt_type!=PACKET_HOST || brd==IS_BROADCAST)
1549 {1550 kfree_skb(skb,FREE_WRITE);
1551 return 0;
1552 }1553
1554 /*1555 * The packet is for another target. Forward the frame1556 */1557
1558 #ifdefCONFIG_IP_FORWARD1559 ip_forward(skb, dev, is_frag);
1560 #else1561 /* printk("Machine %lx tried to use us as a forwarder to %lx but we have forwarding disabled!\n",1562 iph->saddr,iph->daddr);*/1563 ip_statistics.IpInAddrErrors++;
1564 #endif1565 /*1566 * The forwarder is inefficient and copies the packet. We1567 * free the original now.1568 */1569
1570 kfree_skb(skb, FREE_WRITE);
1571 return(0);
1572 }1573
1574 #ifdefCONFIG_IP_MULTICAST1575
1576 if(brd==IS_MULTICAST && iph->daddr!=IGMP_ALL_HOSTS && !(dev->flags&IFF_LOOPBACK))
1577 {1578 /*1579 * Check it is for one of our groups1580 */1581 structip_mc_list *ip_mc=dev->ip_mc_list;
1582 do1583 {1584 if(ip_mc==NULL)
1585 {1586 kfree_skb(skb, FREE_WRITE);
1587 return 0;
1588 }1589 if(ip_mc->multiaddr==iph->daddr)
1590 break;
1591 ip_mc=ip_mc->next;
1592 }1593 while(1);
1594 }1595 #endif1596 /*1597 * Account for the packet1598 */1599
1600 #ifdefCONFIG_IP_ACCT1601 ip_acct_cnt(iph,ip_acct_chain,1);
1602 #endif1603
1604 /*1605 * Reassemble IP fragments.1606 */1607
1608 if(is_frag)
1609 {1610 /* Defragment. Obtain the complete packet if there is one */1611 skb=ip_defrag(iph,skb,dev);
1612 if(skb==NULL)
1613 return 0;
1614 skb->dev = dev;
1615 iph=skb->h.iph;
1616 }1617
1618
1619
1620 /*1621 * Point into the IP datagram, just past the header.1622 */1623
1624 skb->ip_hdr = iph;
1625 skb->h.raw += iph->ihl*4;
1626
1627 /*1628 * Deliver to raw sockets. This is fun as to avoid copies we want to make no surplus copies.1629 */1630
1631 hash = iph->protocol & (SOCK_ARRAY_SIZE-1);
1632
1633 /* If there maybe a raw socket we must check - if not we don't care less */1634 if((raw_sk=raw_prot.sock_array[hash])!=NULL)
1635 {1636 structsock *sknext=NULL;
1637 structsk_buff *skb1;
1638 raw_sk=get_sock_raw(raw_sk, hash, iph->saddr, iph->daddr);
1639 if(raw_sk) /* Any raw sockets */1640 {1641 do1642 {1643 /* Find the next */1644 sknext=get_sock_raw(raw_sk->next, hash, iph->saddr, iph->daddr);
1645 if(sknext)
1646 skb1=skb_clone(skb, GFP_ATOMIC);
1647 else1648 break; /* One pending raw socket left */1649 if(skb1)
1650 raw_rcv(raw_sk, skb1, dev, iph->saddr,iph->daddr);
1651 raw_sk=sknext;
1652 }1653 while(raw_sk!=NULL);
1654 /* Here either raw_sk is the last raw socket, or NULL if none */1655 /* We deliver to the last raw socket AFTER the protocol checks as it avoids a surplus copy */1656 }1657 }1658
1659 /*1660 * skb->h.raw now points at the protocol beyond the IP header.1661 */1662
1663 hash = iph->protocol & (MAX_INET_PROTOS -1);
1664 for (ipprot = (structinet_protocol *)inet_protos[hash];ipprot != NULL;ipprot=(structinet_protocol *)ipprot->next)
1665 {1666 structsk_buff *skb2;
1667
1668 if (ipprot->protocol != iph->protocol)
1669 continue;
1670 /*1671 * See if we need to make a copy of it. This will1672 * only be set if more than one protocol wants it.1673 * and then not for the last one. If there is a pending1674 * raw delivery wait for that1675 */1676 if (ipprot->copy || raw_sk)
1677 {1678 skb2 = skb_clone(skb, GFP_ATOMIC);
1679 if(skb2==NULL)
1680 continue;
1681 }1682 else1683 {1684 skb2 = skb;
1685 }1686 flag = 1;
1687
1688 /*1689 * Pass on the datagram to each protocol that wants it,1690 * based on the datagram protocol. We should really1691 * check the protocol handler's return values here...1692 */1693 ipprot->handler(skb2, dev, opts_p ? &opt : 0, iph->daddr,
1694 (ntohs(iph->tot_len) - (iph->ihl * 4)),
1695 iph->saddr, 0, ipprot);
1696
1697 }1698
1699 /*1700 * All protocols checked.1701 * If this packet was a broadcast, we may *not* reply to it, since that1702 * causes (proven, grin) ARP storms and a leakage of memory (i.e. all1703 * ICMP reply messages get queued up for transmission...)1704 */1705
1706 if(raw_sk!=NULL) /* Shift to last raw user */1707 raw_rcv(raw_sk, skb, dev, iph->saddr, iph->daddr);
1708 elseif (!flag) /* Free and report errors */1709 {1710 if (brd != IS_BROADCAST && brd!=IS_MULTICAST)
1711 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, dev);
1712 kfree_skb(skb, FREE_WRITE);
1713 }1714
1715 return(0);
1716 }1717
1718 /*1719 * Loop a packet back to the sender.1720 */1721
1722 staticvoidip_loopback(structdevice *old_dev, structsk_buff *skb)
/* */1723 {1724 externstructdeviceloopback_dev;
1725 structdevice *dev=&loopback_dev;
1726 intlen=skb->len-old_dev->hard_header_len;
1727 structsk_buff *newskb=alloc_skb(len+dev->hard_header_len, GFP_ATOMIC);
1728
1729 if(newskb==NULL)
1730 return;
1731
1732 newskb->link3=NULL;
1733 newskb->sk=NULL;
1734 newskb->dev=dev;
1735 newskb->saddr=skb->saddr;
1736 newskb->daddr=skb->daddr;
1737 newskb->raddr=skb->raddr;
1738 newskb->free=1;
1739 newskb->lock=0;
1740 newskb->users=0;
1741 newskb->pkt_type=skb->pkt_type;
1742 newskb->len=len+dev->hard_header_len;
1743
1744
1745 newskb->ip_hdr=(structiphdr *)(newskb->data+ip_send(newskb, skb->ip_hdr->daddr, len, dev, skb->ip_hdr->saddr));
1746 memcpy(newskb->ip_hdr,skb->ip_hdr,len);
1747
1748 /* Recurse. The device check against IFF_LOOPBACK will stop infinite recursion */1749
1750 /*printk("Loopback output queued [%lX to %lX].\n", newskb->ip_hdr->saddr,newskb->ip_hdr->daddr);*/1751 ip_queue_xmit(NULL, dev, newskb, 1);
1752 }1753
1754
1755 /*1756 * Queues a packet to be sent, and starts the transmitter1757 * if necessary. if free = 1 then we free the block after1758 * transmit, otherwise we don't. If free==2 we not only1759 * free the block but also don't assign a new ip seq number.1760 * This routine also needs to put in the total length,1761 * and compute the checksum1762 */1763
1764 voidip_queue_xmit(structsock *sk, structdevice *dev,
/* */1765 structsk_buff *skb, intfree)
1766 {1767 structiphdr *iph;
1768 unsignedchar *ptr;
1769
1770 /* Sanity check */1771 if (dev == NULL)
1772 {1773 printk("IP: ip_queue_xmit dev = NULL\n");
1774 return;
1775 }1776
1777 IS_SKB(skb);
1778
1779 /*1780 * Do some book-keeping in the packet for later1781 */1782
1783
1784 skb->dev = dev;
1785 skb->when = jiffies;
1786
1787 /*1788 * Find the IP header and set the length. This is bad1789 * but once we get the skb data handling code in the1790 * hardware will push its header sensibly and we will1791 * set skb->ip_hdr to avoid this mess and the fixed1792 * header length problem1793 */1794
1795 ptr = skb->data;
1796 ptr += dev->hard_header_len;
1797 iph = (structiphdr *)ptr;
1798 skb->ip_hdr = iph;
1799 iph->tot_len = ntohs(skb->len-dev->hard_header_len);
1800
1801 /*1802 * No reassigning numbers to fragments...1803 */1804
1805 if(free!=2)
1806 iph->id = htons(ip_id_count++);
1807 else1808 free=1;
1809
1810 /* All buffers without an owner socket get freed */1811 if (sk == NULL)
1812 free = 1;
1813
1814 skb->free = free;
1815
1816 /*1817 * Do we need to fragment. Again this is inefficient.1818 * We need to somehow lock the original buffer and use1819 * bits of it.1820 */1821
1822 if(skb->len > dev->mtu + dev->hard_header_len)
1823 {1824 ip_fragment(sk,skb,dev,0);
1825 IS_SKB(skb);
1826 kfree_skb(skb,FREE_WRITE);
1827 return;
1828 }1829
1830 /*1831 * Add an IP checksum1832 */1833
1834 ip_send_check(iph);
1835
1836 /*1837 * Print the frame when debugging1838 */1839
1840 /*1841 * More debugging. You cannot queue a packet already on a list1842 * Spot this and moan loudly.1843 */1844 if (skb->next != NULL)
1845 {1846 printk("ip_queue_xmit: next != NULL\n");
1847 skb_unlink(skb);
1848 }1849
1850 /*1851 * If a sender wishes the packet to remain unfreed1852 * we add it to his send queue. This arguably belongs1853 * in the TCP level since nobody else uses it. BUT1854 * remember IPng might change all the rules.1855 */1856
1857 if (!free)
1858 {1859 unsignedlongflags;
1860 /* The socket now has more outstanding blocks */1861
1862 sk->packets_out++;
1863
1864 /* Protect the list for a moment */1865 save_flags(flags);
1866 cli();
1867
1868 if (skb->link3 != NULL)
1869 {1870 printk("ip.c: link3 != NULL\n");
1871 skb->link3 = NULL;
1872 }1873 if (sk->send_head == NULL)
1874 {1875 sk->send_tail = skb;
1876 sk->send_head = skb;
1877 }1878 else1879 {1880 sk->send_tail->link3 = skb;
1881 sk->send_tail = skb;
1882 }1883 /* skb->link3 is NULL */1884
1885 /* Interrupt restore */1886 restore_flags(flags);
1887 }1888 else1889 /* Remember who owns the buffer */1890 skb->sk = sk;
1891
1892 /*1893 * If the indicated interface is up and running, send the packet.1894 */1895
1896 ip_statistics.IpOutRequests++;
1897 #ifdefCONFIG_IP_ACCT1898 ip_acct_cnt(iph,ip_acct_chain,1);
1899 #endif1900
1901 #ifdefCONFIG_IP_MULTICAST1902
1903 /*1904 * Multicasts are looped back for other local users1905 */1906
1907 if (MULTICAST(iph->daddr) && !(dev->flags&IFF_LOOPBACK))
1908 {1909 if(sk==NULL || sk->ip_mc_loop)
1910 {1911 if(iph->daddr==IGMP_ALL_HOSTS)
1912 ip_loopback(dev,skb);
1913 else1914 {1915 structip_mc_list *imc=dev->ip_mc_list;
1916 while(imc!=NULL)
1917 {1918 if(imc->multiaddr==iph->daddr)
1919 {1920 ip_loopback(dev,skb);
1921 break;
1922 }1923 imc=imc->next;
1924 }1925 }1926 }1927 /* Multicasts with ttl 0 must not go beyond the host */1928
1929 if(skb->ip_hdr->ttl==0)
1930 {1931 kfree_skb(skb, FREE_READ);
1932 return;
1933 }1934 }1935 #endif1936 if((dev->flags&IFF_BROADCAST) && iph->daddr==dev->pa_brdaddr && !(dev->flags&IFF_LOOPBACK))
1937 ip_loopback(dev,skb);
1938
1939 if (dev->flags & IFF_UP)
1940 {1941 /*1942 * If we have an owner use its priority setting,1943 * otherwise use NORMAL1944 */1945
1946 if (sk != NULL)
1947 {1948 dev_queue_xmit(skb, dev, sk->priority);
1949 }1950 else1951 {1952 dev_queue_xmit(skb, dev, SOPRI_NORMAL);
1953 }1954 }1955 else1956 {1957 ip_statistics.IpOutDiscards++;
1958 if (free)
1959 kfree_skb(skb, FREE_WRITE);
1960 }1961 }1962
1963
1964
1965 #ifdefCONFIG_IP_MULTICAST1966
1967 /*1968 * Write an multicast group list table for the IGMP daemon to1969 * read.1970 */1971
1972 intip_mc_procinfo(char *buffer, char **start, off_toffset, intlength)
/* */1973 {1974 off_tpos=0, begin=0;
1975 structip_mc_list *im;
1976 unsignedlongflags;
1977 intlen=0;
1978 structdevice *dev;
1979
1980 len=sprintf(buffer,"Device : Count\tGroup Users Timer\n");
1981 save_flags(flags);
1982 cli();
1983
1984 for(dev = dev_base; dev; dev = dev->next)
1985 {1986 if((dev->flags&IFF_UP)&&(dev->flags&IFF_MULTICAST))
1987 {1988 len+=sprintf(buffer+len,"%-10s: %5d\n",
1989 dev->name, dev->mc_count);
1990 for(im = dev->ip_mc_list; im; im = im->next)
1991 {1992 len+=sprintf(buffer+len,
1993 "\t\t\t%08lX %5d %d:%08lX\n",
1994 im->multiaddr, im->users,
1995 im->tm_running, im->timer.expires);
1996 pos=begin+len;
1997 if(pos<offset)
1998 {1999 len=0;
2000 begin=pos;
2001 }2002 if(pos>offset+length)
2003 break;
2004 }2005 }2006 }2007 restore_flags(flags);
2008 *start=buffer+(offset-begin);
2009 len-=(offset-begin);
2010 if(len>length)
2011 len=length;
2012 returnlen;
2013 }2014
2015
2016 #endif2017 /*2018 * Socket option code for IP. This is the end of the line after any TCP,UDP etc options on2019 * an IP socket.2020 *2021 * We implement IP_TOS (type of service), IP_TTL (time to live).2022 *2023 * Next release we will sort out IP_OPTIONS since for some people are kind of important.2024 */2025
2026 intip_setsockopt(structsock *sk, intlevel, intoptname, char *optval, intoptlen)
/* */2027 {2028 intval,err;
2029 #ifdefined(CONFIG_IP_FIREWALL) || defined(CONFIG_IP_ACCT)
2030 structip_fwtmp_fw;
2031 #endif2032 if (optval == NULL)
2033 return(-EINVAL);
2034
2035 err=verify_area(VERIFY_READ, optval, sizeof(int));
2036 if(err)
2037 returnerr;
2038
2039 val = get_fs_long((unsignedlong *)optval);
2040
2041 if(level!=SOL_IP)
2042 return -EOPNOTSUPP;
2043
2044 #ifdefCONFIG_IP_MULTICAST2045 if(optname==IP_MULTICAST_TTL)
2046 {2047 unsignedcharucval;
2048 ucval=get_fs_byte((unsignedchar *)optval);
2049 printk("MC TTL %d\n", ucval);
2050 if(ucval<1||ucval>255)
2051 return -EINVAL;
2052 sk->ip_mc_ttl=(int)ucval;
2053 return 0;
2054 }2055 #endif2056
2057 switch(optname)
2058 {2059 caseIP_TOS:
2060 if(val<0||val>255)
2061 return -EINVAL;
2062 sk->ip_tos=val;
2063 if(val==IPTOS_LOWDELAY)
2064 sk->priority=SOPRI_INTERACTIVE;
2065 if(val==IPTOS_THROUGHPUT)
2066 sk->priority=SOPRI_BACKGROUND;
2067 return 0;
2068 caseIP_TTL:
2069 if(val<1||val>255)
2070 return -EINVAL;
2071 sk->ip_ttl=val;
2072 return 0;
2073 #ifdefCONFIG_IP_MULTICAST2074 #ifdef GCC_WORKS
2075 caseIP_MULTICAST_TTL:
2076 {2077 unsignedcharucval;
2078
2079 ucval=get_fs_byte((unsignedchar *)optval);
2080 printk("MC TTL %d\n", ucval);
2081 if(ucval<1||ucval>255)
2082 return -EINVAL;
2083 sk->ip_mc_ttl=(int)ucval;
2084 return 0;
2085 }2086 #endif2087 caseIP_MULTICAST_LOOP:
2088 {2089 unsignedcharucval;
2090
2091 ucval=get_fs_byte((unsignedchar *)optval);
2092 if(ucval!=0 && ucval!=1)
2093 return -EINVAL;
2094 sk->ip_mc_loop=(int)ucval;
2095 return 0;
2096 }2097 caseIP_MULTICAST_IF:
2098 {2099 /* Not fully tested */2100 structin_addraddr;
2101 structdevice *dev=NULL;
2102
2103 /*2104 * Check the arguments are allowable2105 */2106
2107 err=verify_area(VERIFY_READ, optval, sizeof(addr));
2108 if(err)
2109 returnerr;
2110
2111 memcpy_fromfs(&addr,optval,sizeof(addr));
2112
2113 printk("MC bind %s\n", in_ntoa(addr.s_addr));
2114
2115 /*2116 * What address has been requested2117 */2118
2119 if(addr.s_addr==INADDR_ANY) /* Default */2120 {2121 sk->ip_mc_name[0]=0;
2122 return 0;
2123 }2124
2125 /*2126 * Find the device2127 */2128
2129 for(dev = dev_base; dev; dev = dev->next)
2130 {2131 if((dev->flags&IFF_UP)&&(dev->flags&IFF_MULTICAST)&&
2132 (dev->pa_addr==addr.s_addr))
2133 break;
2134 }2135
2136 /*2137 * Did we find one2138 */2139
2140 if(dev)
2141 {2142 strcpy(sk->ip_mc_name,dev->name);
2143 return 0;
2144 }2145 return -EADDRNOTAVAIL;
2146 }2147
2148 caseIP_ADD_MEMBERSHIP:
2149 {2150
2151 /*2152 * FIXME: Add/Del membership should have a semaphore protecting them from re-entry2153 */2154 structip_mreqmreq;
2155 staticstructoptionsoptmem;
2156 unsignedlongroute_src;
2157 structrtable *rt;
2158 structdevice *dev=NULL;
2159
2160 /*2161 * Check the arguments.2162 */2163
2164 err=verify_area(VERIFY_READ, optval, sizeof(mreq));
2165 if(err)
2166 returnerr;
2167
2168 memcpy_fromfs(&mreq,optval,sizeof(mreq));
2169
2170 /* 2171 * Get device for use later2172 */2173
2174 if(mreq.imr_interface.s_addr==INADDR_ANY)
2175 {2176 /*2177 * Not set so scan.2178 */2179 if((rt=ip_rt_route(mreq.imr_multiaddr.s_addr,&optmem, &route_src))!=NULL)
2180 {2181 dev=rt->rt_dev;
2182 rt->rt_use--;
2183 }2184 }2185 else2186 {2187 /*2188 * Find a suitable device.2189 */2190 for(dev = dev_base; dev; dev = dev->next)
2191 {2192 if((dev->flags&IFF_UP)&&(dev->flags&IFF_MULTICAST)&&
2193 (dev->pa_addr==mreq.imr_interface.s_addr))
2194 break;
2195 }2196 }2197
2198 /*2199 * No device, no cookies.2200 */2201
2202 if(!dev)
2203 return -ENODEV;
2204
2205 /*2206 * Join group.2207 */2208
2209 returnip_mc_join_group(sk,dev,mreq.imr_multiaddr.s_addr);
2210 }2211
2212 caseIP_DROP_MEMBERSHIP:
2213 {2214 structip_mreqmreq;
2215 structrtable *rt;
2216 staticstructoptionsoptmem;
2217 unsignedlongroute_src;
2218 structdevice *dev=NULL;
2219
2220 /*2221 * Check the arguments2222 */2223
2224 err=verify_area(VERIFY_READ, optval, sizeof(mreq));
2225 if(err)
2226 returnerr;
2227
2228 memcpy_fromfs(&mreq,optval,sizeof(mreq));
2229
2230 /*2231 * Get device for use later 2232 */2233
2234 if(mreq.imr_interface.s_addr==INADDR_ANY)
2235 {2236 if((rt=ip_rt_route(mreq.imr_multiaddr.s_addr,&optmem, &route_src))!=NULL)
2237 {2238 dev=rt->rt_dev;
2239 rt->rt_use--;
2240 }2241 }2242 else2243 {2244 for(dev = dev_base; dev; dev = dev->next)
2245 {2246 if((dev->flags&IFF_UP)&& (dev->flags&IFF_MULTICAST)&&
2247 (dev->pa_addr==mreq.imr_interface.s_addr))
2248 break;
2249 }2250 }2251
2252 /*2253 * Did we find a suitable device.2254 */2255
2256 if(!dev)
2257 return -ENODEV;
2258
2259 /*2260 * Leave group2261 */2262
2263 returnip_mc_leave_group(sk,dev,mreq.imr_multiaddr.s_addr);
2264 }2265 #endif2266 #ifdefCONFIG_IP_FIREWALL2267 caseIP_FW_ADD_BLK:
2268 caseIP_FW_DEL_BLK:
2269 caseIP_FW_ADD_FWD:
2270 caseIP_FW_DEL_FWD:
2271 caseIP_FW_CHK_BLK:
2272 caseIP_FW_CHK_FWD:
2273 caseIP_FW_FLUSH:
2274 caseIP_FW_POLICY:
2275 if(!suser())
2276 return -EPERM;
2277 if(optlen>sizeof(tmp_fw) || optlen<1)
2278 return -EINVAL;
2279 err=verify_area(VERIFY_READ,optval,optlen);
2280 if(err)
2281 returnerr;
2282 memcpy_fromfs(&tmp_fw,optval,optlen);
2283 err=ip_fw_ctl(optname, &tmp_fw,optlen);
2284 return -err; /* -0 is 0 after all */2285
2286 #endif2287 #ifdefCONFIG_IP_ACCT2288 caseIP_ACCT_DEL:
2289 caseIP_ACCT_ADD:
2290 caseIP_ACCT_FLUSH:
2291 caseIP_ACCT_ZERO:
2292 if(!suser())
2293 return -EPERM;
2294 if(optlen>sizeof(tmp_fw) || optlen<1)
2295 return -EINVAL;
2296 err=verify_area(VERIFY_READ,optval,optlen);
2297 if(err)
2298 returnerr;
2299 memcpy_fromfs(&tmp_fw, optval,optlen);
2300 err=ip_acct_ctl(optname, &tmp_fw,optlen);
2301 return -err; /* -0 is 0 after all */2302 #endif2303 /* IP_OPTIONS and friends go here eventually */2304 default:
2305 return(-ENOPROTOOPT);
2306 }2307 }2308
2309 /*2310 * Get the options. Note for future reference. The GET of IP options gets the2311 * _received_ ones. The set sets the _sent_ ones.2312 */2313
2314 intip_getsockopt(structsock *sk, intlevel, intoptname, char *optval, int *optlen)
/* */2315 {2316 intval,err;
2317 #ifdefCONFIG_IP_MULTICAST2318 intlen;
2319 #endif2320
2321 if(level!=SOL_IP)
2322 return -EOPNOTSUPP;
2323
2324 switch(optname)
2325 {2326 caseIP_TOS:
2327 val=sk->ip_tos;
2328 break;
2329 caseIP_TTL:
2330 val=sk->ip_ttl;
2331 break;
2332 #ifdefCONFIG_IP_MULTICAST2333 caseIP_MULTICAST_TTL:
2334 val=sk->ip_mc_ttl;
2335 break;
2336 caseIP_MULTICAST_LOOP:
2337 val=sk->ip_mc_loop;
2338 break;
2339 caseIP_MULTICAST_IF:
2340 err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
2341 if(err)
2342 returnerr;
2343 len=strlen(sk->ip_mc_name);
2344 err=verify_area(VERIFY_WRITE, optval, len);
2345 if(err)
2346 returnerr;
2347 put_fs_long(len,(unsignedlong *) optlen);
2348 memcpy_tofs((void *)optval,sk->ip_mc_name, len);
2349 return 0;
2350 #endif2351 default:
2352 return(-ENOPROTOOPT);
2353 }2354 err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
2355 if(err)
2356 returnerr;
2357 put_fs_long(sizeof(int),(unsignedlong *) optlen);
2358
2359 err=verify_area(VERIFY_WRITE, optval, sizeof(int));
2360 if(err)
2361 returnerr;
2362 put_fs_long(val,(unsignedlong *)optval);
2363
2364 return(0);
2365 }2366
2367 /*2368 * IP protocol layer initialiser2369 */2370
2371 staticstructpacket_typeip_packet_type =
2372 {2373 0, /* MUTTER ntohs(ETH_P_IP),*/2374 NULL, /* All devices */2375 ip_rcv,
2376 NULL,
2377 NULL,
2378 };
2379
2380 /*2381 * Device notifier2382 */2383
2384 staticintip_rt_event(unsignedlongevent, void *ptr)
/* */2385 {2386 if(event==NETDEV_DOWN)
2387 ip_rt_flush(ptr);
2388 returnNOTIFY_DONE;
2389 }2390
2391 structnotifier_blockip_rt_notifier={2392 ip_rt_event,
2393 NULL,
2394 0
2395 };
2396
2397 /*2398 * IP registers the packet type and then calls the subprotocol initialisers2399 */2400
2401 voidip_init(void)
/* */2402 {2403 ip_packet_type.type=htons(ETH_P_IP);
2404 dev_add_pack(&ip_packet_type);
2405
2406 /* So we flush routes when a device is downed */2407 register_netdevice_notifier(&ip_rt_notifier);
2408 /* ip_raw_init();2409 ip_packet_init();2410 ip_tcp_init();2411 ip_udp_init();*/2412 }