1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * The Internet Protocol (IP) module. 7 * 8 * Version: @(#)ip.c 1.0.16b 9/1/93 9 * 10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu> 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Donald Becker, <becker@super.org> 13 * Alan Cox, <gw4pts@gw4pts.ampr.org> 14 * Richard Underwood 15 * 16 * Fixes: 17 * Alan Cox : Commented a couple of minor bits of surplus code 18 * Alan Cox : Undefining IP_FORWARD doesn't include the code 19 * (just stops a compiler warning). 20 * Alan Cox : Frames with >=MAX_ROUTE record routes, strict routes or loose routes 21 * are junked rather than corrupting things. 22 * Alan Cox : Frames to bad broadcast subnets are dumped 23 * We used to process them non broadcast and 24 * boy could that cause havoc. 25 * Alan Cox : ip_forward sets the free flag on the 26 * new frame it queues. Still crap because 27 * it copies the frame but at least it 28 * doesn't eat memory too. 29 * Alan Cox : Generic queue code and memory fixes. 30 * Fred Van Kempen : IP fragment support (borrowed from NET2E) 31 * Gerhard Koerting: Forward fragmented frames correctly. 32 * Gerhard Koerting: Fixes to my fix of the above 8-). 33 * Gerhard Koerting: IP interface addressing fix. 34 * Linus Torvalds : More robustness checks 35 * Alan Cox : Even more checks: Still not as robust as it ought to be 36 * Alan Cox : Save IP header pointer for later 37 * Alan Cox : ip option setting 38 * Alan Cox : Use ip_tos/ip_ttl settings 39 * Alan Cox : Fragmentation bogosity removed 40 * (Thanks to Mark.Bush@prg.ox.ac.uk) 41 * Dmitry Gorodchanin : Send of a raw packet crash fix. 42 * Alan Cox : Silly ip bug when an overlength 43 * fragment turns up. Now frees the 44 * queue. 45 * Linus Torvalds/ : Memory leakage on fragmentation 46 * Alan Cox : handling. 47 * Gerhard Koerting: Forwarding uses IP priority hints 48 * Teemu Rantanen : Fragment problems. 49 * Alan Cox : General cleanup, comments and reformat 50 * Alan Cox : SNMP statistics 51 * Alan Cox : BSD address rule semantics. Also see 52 * UDP as there is a nasty checksum issue 53 * if you do things the wrong way. 54 * Alan Cox : Always defrag, moved IP_FORWARD to the config.in file 55 * Alan Cox : IP options adjust sk->priority. 56 * Pedro Roque : Fix mtu/length error in ip_forward. 57 * Alan Cox : Avoid ip_chk_addr when possible. 58 * Richard Underwood : IP multicasting. 59 * Alan Cox : Cleaned up multicast handlers. 60 * Alan Cox : RAW sockets demultiplex in the BSD style. 61 * Gunther Mayer : Fix the SNMP reporting typo 62 * Alan Cox : Always in group 224.0.0.1 63 * 64 * To Fix: 65 * IP option processing is mostly not needed. ip_forward needs to know about routing rules 66 * and time stamp but that's about all. Use the route mtu field here too 67 * IP fragmentation wants rewriting cleanly. The RFC815 algorithm is much more efficient 68 * and could be made very efficient with the addition of some virtual memory hacks to permit 69 * the allocation of a buffer that can then be 'grown' by twiddling page tables. 70 * Output fragmentation wants updating along with the buffer management to use a single 71 * interleaved copy algorithm so that fragmenting has a one copy overhead. Actual packet 72 * output should probably do its own fragmentation at the UDP/RAW layer. TCP shouldn't cause 73 * fragmentation anyway. 74 * 75 * This program is free software; you can redistribute it and/or 76 * modify it under the terms of the GNU General Public License 77 * as published by the Free Software Foundation; either version 78 * 2 of the License, or (at your option) any later version. 79 */ 80
81 #include <asm/segment.h>
82 #include <asm/system.h>
83 #include <linux/types.h>
84 #include <linux/kernel.h>
85 #include <linux/sched.h>
86 #include <linux/string.h>
87 #include <linux/errno.h>
88 #include <linux/socket.h>
89 #include <linux/sockios.h>
90 #include <linux/in.h>
91 #include <linux/inet.h>
92 #include <linux/netdevice.h>
93 #include <linux/etherdevice.h>
94 #include "snmp.h"
95 #include "ip.h"
96 #include "protocol.h"
97 #include "route.h"
98 #include "tcp.h"
99 #include <linux/skbuff.h>
100 #include "sock.h"
101 #include "arp.h"
102 #include "icmp.h"
103 #include "raw.h"
104 #include "igmp.h"
105 #include <linux/ip_fw.h>
106
107 #define CONFIG_IP_DEFRAG
108
109 externintlast_retran;
110 externvoid sort_send(structsock *sk);
111
112 #definemin(a,b) ((a)<(b)?(a):(b))
113 #defineLOOPBACK(x) (((x) & htonl(0xff000000)) == htonl(0x7f000000))
114
115 /* 116 * SNMP management statistics 117 */ 118
119 #ifdefCONFIG_IP_FORWARD 120 structip_mibip_statistics={1,64,}; /* Forwarding=Yes, Default TTL=64 */ 121 #else 122 structip_mibip_statistics={0,64,}; /* Forwarding=No, Default TTL=64 */ 123 #endif 124
125 #ifdefCONFIG_IP_MULTICAST 126
127 structip_mc_list *ip_mc_head=NULL;
128
129 #endif 130
131 /* 132 * Handle the issuing of an ioctl() request 133 * for the ip device. This is scheduled to 134 * disappear 135 */ 136
137 intip_ioctl(structsock *sk, intcmd, unsignedlongarg)
/* */ 138 { 139 switch(cmd)
140 { 141 default:
142 return(-EINVAL);
143 } 144 } 145
146
147 /* these two routines will do routing. */ 148
149 staticvoid 150 strict_route(structiphdr *iph, structoptions *opt)
/* */ 151 { 152 } 153
154
155 staticvoid 156 loose_route(structiphdr *iph, structoptions *opt)
/* */ 157 { 158 } 159
160
161
162
163 /* This routine will check to see if we have lost a gateway. */ 164 void 165 ip_route_check(unsignedlongdaddr)
/* */ 166 { 167 } 168
169
170 #if 0
171 /* this routine puts the options at the end of an ip header. */ 172 staticint 173 build_options(structiphdr *iph, structoptions *opt)
/* */ 174 { 175 unsignedchar *ptr;
176 /* currently we don't support any options. */ 177 ptr = (unsignedchar *)(iph+1);
178 *ptr = 0;
179 return (4);
180 } 181 #endif 182
183
184 /* 185 * Take an skb, and fill in the MAC header. 186 */ 187
188 staticintip_send(structsk_buff *skb, unsignedlongdaddr, intlen, structdevice *dev, unsignedlongsaddr)
/* */ 189 { 190 intmac = 0;
191
192 skb->dev = dev;
193 skb->arp = 1;
194 if (dev->hard_header)
195 { 196 /* 197 * Build a hardware header. Source address is our mac, destination unknown 198 * (rebuild header will sort this out) 199 */ 200 mac = dev->hard_header(skb->data, dev, ETH_P_IP, NULL, NULL, len, skb);
201 if (mac < 0)
202 { 203 mac = -mac;
204 skb->arp = 0;
205 skb->raddr = daddr; /* next routing address */ 206 } 207 } 208 returnmac;
209 } 210
211 intip_id_count = 0;
212
213 /* 214 * This routine builds the appropriate hardware/IP headers for 215 * the routine. It assumes that if *dev != NULL then the 216 * protocol knows what it's doing, otherwise it uses the 217 * routing/ARP tables to select a device struct. 218 */ 219 intip_build_header(structsk_buff *skb, unsignedlongsaddr, unsignedlongdaddr,
/* */ 220 structdevice **dev, inttype, structoptions *opt, intlen, inttos, intttl)
221 { 222 staticstructoptionsoptmem;
223 structiphdr *iph;
224 structrtable *rt;
225 unsignedchar *buff;
226 unsignedlongraddr;
227 inttmp;
228 unsignedlongsrc;
229
230 buff = skb->data;
231
232 /* 233 * See if we need to look up the device. 234 */ 235
236 #ifdef CONFIG_INET_MULTICAST
237 if(MULTICAST(daddr) && *dev==NULL && skb->sk && *skb->sk->ip_mc_name)
238 *dev=dev_get(skb->sk->ip_mc_name);
239 #endif 240 if (*dev == NULL)
241 { 242 if(skb->localroute)
243 rt = ip_rt_local(daddr, &optmem, &src);
244 else 245 rt = ip_rt_route(daddr, &optmem, &src);
246 if (rt == NULL)
247 { 248 ip_statistics.IpOutNoRoutes++;
249 return(-ENETUNREACH);
250 } 251
252 *dev = rt->rt_dev;
253 /* 254 * If the frame is from us and going off machine it MUST MUST MUST 255 * have the output device ip address and never the loopback 256 */ 257 if (LOOPBACK(saddr) && !LOOPBACK(daddr))
258 saddr = src;/*rt->rt_dev->pa_addr;*/ 259 raddr = rt->rt_gateway;
260
261 opt = &optmem;
262 } 263 else 264 { 265 /* 266 * We still need the address of the first hop. 267 */ 268 if(skb->localroute)
269 rt = ip_rt_local(daddr, &optmem, &src);
270 else 271 rt = ip_rt_route(daddr, &optmem, &src);
272 /* 273 * If the frame is from us and going off machine it MUST MUST MUST 274 * have the output device ip address and never the loopback 275 */ 276 if (LOOPBACK(saddr) && !LOOPBACK(daddr))
277 saddr = src;/*rt->rt_dev->pa_addr;*/ 278
279 raddr = (rt == NULL) ? 0 : rt->rt_gateway;
280 } 281
282 /* 283 * No source addr so make it our addr 284 */ 285 if (saddr == 0)
286 saddr = src;
287
288 /* 289 * No gateway so aim at the real destination 290 */ 291 if (raddr == 0)
292 raddr = daddr;
293
294 /* 295 * Now build the MAC header. 296 */ 297
298 tmp = ip_send(skb, raddr, len, *dev, saddr);
299 buff += tmp;
300 len -= tmp;
301
302 /* 303 * Book keeping 304 */ 305
306 skb->dev = *dev;
307 skb->saddr = saddr;
308 if (skb->sk)
309 skb->sk->saddr = saddr;
310
311 /* 312 * Now build the IP header. 313 */ 314
315 /* 316 * If we are using IPPROTO_RAW, then we don't need an IP header, since 317 * one is being supplied to us by the user 318 */ 319
320 if(type == IPPROTO_RAW)
321 return (tmp);
322
323 iph = (structiphdr *)buff;
324 iph->version = 4;
325 iph->tos = tos;
326 iph->frag_off = 0;
327 iph->ttl = ttl;
328 iph->daddr = daddr;
329 iph->saddr = saddr;
330 iph->protocol = type;
331 iph->ihl = 5;
332 skb->ip_hdr = iph;
333
334 /* Setup the IP options. */ 335 #ifdef Not_Yet_Avail
336 build_options(iph, opt);
337 #endif 338 #ifdefCONFIG_IP_FIREWALL 339 if(!ip_fw_chk(iph,ip_fw_blk_chain))
340 return -EPERM;
341 #endif 342
343 return(20 + tmp); /* IP header plus MAC header size */ 344 } 345
346
347 staticint 348 do_options(structiphdr *iph, structoptions *opt)
/* */ 349 { 350 unsignedchar *buff;
351 intdone = 0;
352 inti, len = sizeof(structiphdr);
353
354 /* Zero out the options. */ 355 opt->record_route.route_size = 0;
356 opt->loose_route.route_size = 0;
357 opt->strict_route.route_size = 0;
358 opt->tstamp.ptr = 0;
359 opt->security = 0;
360 opt->compartment = 0;
361 opt->handling = 0;
362 opt->stream = 0;
363 opt->tcc = 0;
364 return(0);
365
366 /* Advance the pointer to start at the options. */ 367 buff = (unsignedchar *)(iph + 1);
368
369 /* Now start the processing. */ 370 while (!done && len < iph->ihl*4) switch(*buff) { 371 caseIPOPT_END:
372 done = 1;
373 break;
374 caseIPOPT_NOOP:
375 buff++;
376 len++;
377 break;
378 caseIPOPT_SEC:
379 buff++;
380 if (*buff != 11) return(1);
381 buff++;
382 opt->security = ntohs(*(unsignedshort *)buff);
383 buff += 2;
384 opt->compartment = ntohs(*(unsignedshort *)buff);
385 buff += 2;
386 opt->handling = ntohs(*(unsignedshort *)buff);
387 buff += 2;
388 opt->tcc = ((*buff) << 16) + ntohs(*(unsignedshort *)(buff+1));
389 buff += 3;
390 len += 11;
391 break;
392 caseIPOPT_LSRR:
393 buff++;
394 if ((*buff - 3)% 4 != 0) return(1);
395 len += *buff;
396 opt->loose_route.route_size = (*buff -3)/4;
397 buff++;
398 if (*buff % 4 != 0) return(1);
399 opt->loose_route.pointer = *buff/4 - 1;
400 buff++;
401 buff++;
402 for (i = 0; i < opt->loose_route.route_size; i++) { 403 if(i>=MAX_ROUTE)
404 return(1);
405 opt->loose_route.route[i] = *(unsignedlong *)buff;
406 buff += 4;
407 } 408 break;
409 caseIPOPT_SSRR:
410 buff++;
411 if ((*buff - 3)% 4 != 0) return(1);
412 len += *buff;
413 opt->strict_route.route_size = (*buff -3)/4;
414 buff++;
415 if (*buff % 4 != 0) return(1);
416 opt->strict_route.pointer = *buff/4 - 1;
417 buff++;
418 buff++;
419 for (i = 0; i < opt->strict_route.route_size; i++) { 420 if(i>=MAX_ROUTE)
421 return(1);
422 opt->strict_route.route[i] = *(unsignedlong *)buff;
423 buff += 4;
424 } 425 break;
426 caseIPOPT_RR:
427 buff++;
428 if ((*buff - 3)% 4 != 0) return(1);
429 len += *buff;
430 opt->record_route.route_size = (*buff -3)/4;
431 buff++;
432 if (*buff % 4 != 0) return(1);
433 opt->record_route.pointer = *buff/4 - 1;
434 buff++;
435 buff++;
436 for (i = 0; i < opt->record_route.route_size; i++) { 437 if(i>=MAX_ROUTE)
438 return 1;
439 opt->record_route.route[i] = *(unsignedlong *)buff;
440 buff += 4;
441 } 442 break;
443 caseIPOPT_SID:
444 len += 4;
445 buff +=2;
446 opt->stream = *(unsignedshort *)buff;
447 buff += 2;
448 break;
449 caseIPOPT_TIMESTAMP:
450 buff++;
451 len += *buff;
452 if (*buff % 4 != 0) return(1);
453 opt->tstamp.len = *buff / 4 - 1;
454 buff++;
455 if ((*buff - 1) % 4 != 0) return(1);
456 opt->tstamp.ptr = (*buff-1)/4;
457 buff++;
458 opt->tstamp.x.full_char = *buff;
459 buff++;
460 for (i = 0; i < opt->tstamp.len; i++) { 461 opt->tstamp.data[i] = *(unsignedlong *)buff;
462 buff += 4;
463 } 464 break;
465 default:
466 return(1);
467 } 468
469 if (opt->record_route.route_size == 0) { 470 if (opt->strict_route.route_size != 0) { 471 memcpy(&(opt->record_route), &(opt->strict_route),
472 sizeof(opt->record_route));
473 }elseif (opt->loose_route.route_size != 0) { 474 memcpy(&(opt->record_route), &(opt->loose_route),
475 sizeof(opt->record_route));
476 } 477 } 478
479 if (opt->strict_route.route_size != 0 &&
480 opt->strict_route.route_size != opt->strict_route.pointer) { 481 strict_route(iph, opt);
482 return(0);
483 } 484
485 if (opt->loose_route.route_size != 0 &&
486 opt->loose_route.route_size != opt->loose_route.pointer) { 487 loose_route(iph, opt);
488 return(0);
489 } 490
491 return(0);
492 } 493
494 /* 495 * This is a version of ip_compute_csum() optimized for IP headers, which 496 * always checksum on 4 octet boundaries. 497 */ 498
499 staticinlineunsignedshortip_fast_csum(unsignedchar * buff, intwlen)
/* */ 500 { 501 unsignedlongsum = 0;
502
503 if (wlen)
504 { 505 unsignedlongbogus;
506 __asm__("clc\n"
507 "1:\t"
508 "lodsl\n\t"
509 "adcl %3, %0\n\t"
510 "decl %2\n\t"
511 "jne 1b\n\t"
512 "adcl $0, %0\n\t"
513 "movl %0, %3\n\t"
514 "shrl $16, %3\n\t"
515 "addw %w3, %w0\n\t"
516 "adcw $0, %w0"
517 : "=r" (sum), "=S" (buff), "=r" (wlen), "=a" (bogus)
518 : "0" (sum), "1" (buff), "2" (wlen));
519 } 520 return (~sum) & 0xffff;
521 } 522
523 /* 524 * This routine does all the checksum computations that don't 525 * require anything special (like copying or special headers). 526 */ 527
528 unsignedshortip_compute_csum(unsignedchar * buff, intlen)
/* */ 529 { 530 unsignedlongsum = 0;
531
532 /* Do the first multiple of 4 bytes and convert to 16 bits. */ 533 if (len > 3)
534 { 535 __asm__("clc\n"
536 "1:\t"
537 "lodsl\n\t"
538 "adcl %%eax, %%ebx\n\t"
539 "loop 1b\n\t"
540 "adcl $0, %%ebx\n\t"
541 "movl %%ebx, %%eax\n\t"
542 "shrl $16, %%eax\n\t"
543 "addw %%ax, %%bx\n\t"
544 "adcw $0, %%bx"
545 : "=b" (sum) , "=S" (buff)
546 : "0" (sum), "c" (len >> 2) ,"1" (buff)
547 : "ax", "cx", "si", "bx" );
548 } 549 if (len & 2)
550 { 551 __asm__("lodsw\n\t"
552 "addw %%ax, %%bx\n\t"
553 "adcw $0, %%bx"
554 : "=b" (sum), "=S" (buff)
555 : "0" (sum), "1" (buff)
556 : "bx", "ax", "si");
557 } 558 if (len & 1)
559 { 560 __asm__("lodsb\n\t"
561 "movb $0, %%ah\n\t"
562 "addw %%ax, %%bx\n\t"
563 "adcw $0, %%bx"
564 : "=b" (sum), "=S" (buff)
565 : "0" (sum), "1" (buff)
566 : "bx", "ax", "si");
567 } 568 sum =~sum;
569 return(sum & 0xffff);
570 } 571
572 /* 573 * Check the header of an incoming IP datagram. This version is still used in slhc.c. 574 */ 575
576 intip_csum(structiphdr *iph)
/* */ 577 { 578 returnip_fast_csum((unsignedchar *)iph, iph->ihl);
579 } 580
581 /* 582 * Generate a checksum for an outgoing IP datagram. 583 */ 584
585 voidip_send_check(structiphdr *iph)
/* */ 586 { 587 iph->check = 0;
588 iph->check = ip_fast_csum((unsignedchar *)iph, iph->ihl);
589 } 590
591 /************************ Fragment Handlers From NET2E **********************************/ 592
593
594 /* 595 * This fragment handler is a bit of a heap. On the other hand it works quite 596 * happily and handles things quite well. 597 */ 598
599 staticstructipq *ipqueue = NULL; /* IP fragment queue */ 600
601 /* 602 * Create a new fragment entry. 603 */ 604
605 staticstructipfrag *ip_frag_create(intoffset, intend, structsk_buff *skb, unsignedchar *ptr)
/* */ 606 { 607 structipfrag *fp;
608
609 fp = (structipfrag *) kmalloc(sizeof(structipfrag), GFP_ATOMIC);
610 if (fp == NULL)
611 { 612 printk("IP: frag_create: no memory left !\n");
613 return(NULL);
614 } 615 memset(fp, 0, sizeof(structipfrag));
616
617 /* Fill in the structure. */ 618 fp->offset = offset;
619 fp->end = end;
620 fp->len = end - offset;
621 fp->skb = skb;
622 fp->ptr = ptr;
623
624 return(fp);
625 } 626
627
628 /* 629 * Find the correct entry in the "incomplete datagrams" queue for 630 * this IP datagram, and return the queue entry address if found. 631 */ 632
633 staticstructipq *ip_find(structiphdr *iph)
/* */ 634 { 635 structipq *qp;
636 structipq *qplast;
637
638 cli();
639 qplast = NULL;
640 for(qp = ipqueue; qp != NULL; qplast = qp, qp = qp->next)
641 { 642 if (iph->id== qp->iph->id && iph->saddr == qp->iph->saddr &&
643 iph->daddr == qp->iph->daddr && iph->protocol == qp->iph->protocol)
644 { 645 del_timer(&qp->timer); /* So it doesn't vanish on us. The timer will be reset anyway */ 646 sti();
647 return(qp);
648 } 649 } 650 sti();
651 return(NULL);
652 } 653
654
655 /* 656 * Remove an entry from the "incomplete datagrams" queue, either 657 * because we completed, reassembled and processed it, or because 658 * it timed out. 659 */ 660
661 staticvoidip_free(structipq *qp)
/* */ 662 { 663 structipfrag *fp;
664 structipfrag *xp;
665
666 /* 667 * Stop the timer for this entry. 668 */ 669
670 del_timer(&qp->timer);
671
672 /* Remove this entry from the "incomplete datagrams" queue. */ 673 cli();
674 if (qp->prev == NULL)
675 { 676 ipqueue = qp->next;
677 if (ipqueue != NULL)
678 ipqueue->prev = NULL;
679 } 680 else 681 { 682 qp->prev->next = qp->next;
683 if (qp->next != NULL)
684 qp->next->prev = qp->prev;
685 } 686
687 /* Release all fragment data. */ 688
689 fp = qp->fragments;
690 while (fp != NULL)
691 { 692 xp = fp->next;
693 IS_SKB(fp->skb);
694 kfree_skb(fp->skb,FREE_READ);
695 kfree_s(fp, sizeof(structipfrag));
696 fp = xp;
697 } 698
699 /* Release the MAC header. */ 700 kfree_s(qp->mac, qp->maclen);
701
702 /* Release the IP header. */ 703 kfree_s(qp->iph, qp->ihlen + 8);
704
705 /* Finally, release the queue descriptor itself. */ 706 kfree_s(qp, sizeof(structipq));
707 sti();
708 } 709
710
711 /* 712 * Oops- a fragment queue timed out. Kill it and send an ICMP reply. 713 */ 714
715 staticvoidip_expire(unsignedlongarg)
/* */ 716 { 717 structipq *qp;
718
719 qp = (structipq *)arg;
720
721 /* 722 * Send an ICMP "Fragment Reassembly Timeout" message. 723 */ 724
725 ip_statistics.IpReasmTimeout++;
726 ip_statistics.IpReasmFails++;
727 /* This if is always true... shrug */ 728 if(qp->fragments!=NULL)
729 icmp_send(qp->fragments->skb,ICMP_TIME_EXCEEDED,
730 ICMP_EXC_FRAGTIME, qp->dev);
731
732 /* 733 * Nuke the fragment queue. 734 */ 735 ip_free(qp);
736 } 737
738
739 /* 740 * Add an entry to the 'ipq' queue for a newly received IP datagram. 741 * We will (hopefully :-) receive all other fragments of this datagram 742 * in time, so we just create a queue for this datagram, in which we 743 * will insert the received fragments at their respective positions. 744 */ 745
746 staticstructipq *ip_create(structsk_buff *skb, structiphdr *iph, structdevice *dev)
/* */ 747 { 748 structipq *qp;
749 intmaclen;
750 intihlen;
751
752 qp = (structipq *) kmalloc(sizeof(structipq), GFP_ATOMIC);
753 if (qp == NULL)
754 { 755 printk("IP: create: no memory left !\n");
756 return(NULL);
757 skb->dev = qp->dev;
758 } 759 memset(qp, 0, sizeof(structipq));
760
761 /* 762 * Allocate memory for the MAC header. 763 * 764 * FIXME: We have a maximum MAC address size limit and define 765 * elsewhere. We should use it here and avoid the 3 kmalloc() calls 766 */ 767
768 maclen = ((unsignedlong) iph) - ((unsignedlong) skb->data);
769 qp->mac = (unsignedchar *) kmalloc(maclen, GFP_ATOMIC);
770 if (qp->mac == NULL)
771 { 772 printk("IP: create: no memory left !\n");
773 kfree_s(qp, sizeof(structipq));
774 return(NULL);
775 } 776
777 /* 778 * Allocate memory for the IP header (plus 8 octets for ICMP). 779 */ 780
781 ihlen = (iph->ihl * sizeof(unsignedlong));
782 qp->iph = (structiphdr *) kmalloc(ihlen + 8, GFP_ATOMIC);
783 if (qp->iph == NULL)
784 { 785 printk("IP: create: no memory left !\n");
786 kfree_s(qp->mac, maclen);
787 kfree_s(qp, sizeof(structipq));
788 return(NULL);
789 } 790
791 /* Fill in the structure. */ 792 memcpy(qp->mac, skb->data, maclen);
793 memcpy(qp->iph, iph, ihlen + 8);
794 qp->len = 0;
795 qp->ihlen = ihlen;
796 qp->maclen = maclen;
797 qp->fragments = NULL;
798 qp->dev = dev;
799
800 /* Start a timer for this entry. */ 801 qp->timer.expires = IP_FRAG_TIME; /* about 30 seconds */ 802 qp->timer.data = (unsignedlong) qp; /* pointer to queue */ 803 qp->timer.function = ip_expire; /* expire function */ 804 add_timer(&qp->timer);
805
806 /* Add this entry to the queue. */ 807 qp->prev = NULL;
808 cli();
809 qp->next = ipqueue;
810 if (qp->next != NULL)
811 qp->next->prev = qp;
812 ipqueue = qp;
813 sti();
814 return(qp);
815 } 816
817
818 /* 819 * See if a fragment queue is complete. 820 */ 821
822 staticintip_done(structipq *qp)
/* */ 823 { 824 structipfrag *fp;
825 intoffset;
826
827 /* Only possible if we received the final fragment. */ 828 if (qp->len == 0)
829 return(0);
830
831 /* Check all fragment offsets to see if they connect. */ 832 fp = qp->fragments;
833 offset = 0;
834 while (fp != NULL)
835 { 836 if (fp->offset > offset)
837 return(0); /* fragment(s) missing */ 838 offset = fp->end;
839 fp = fp->next;
840 } 841
842 /* All fragments are present. */ 843 return(1);
844 } 845
846
847 /* 848 * Build a new IP datagram from all its fragments. 849 * 850 * FIXME: We copy here because we lack an effective way of handling lists 851 * of bits on input. Until the new skb data handling is in I'm not going 852 * to touch this with a bargepole. This also causes a 4Kish limit on 853 * packet sizes. 854 */ 855
856 staticstructsk_buff *ip_glue(structipq *qp)
/* */ 857 { 858 structsk_buff *skb;
859 structiphdr *iph;
860 structipfrag *fp;
861 unsignedchar *ptr;
862 intcount, len;
863
864 /* 865 * Allocate a new buffer for the datagram. 866 */ 867
868 len = qp->maclen + qp->ihlen + qp->len;
869
870 if ((skb = alloc_skb(len,GFP_ATOMIC)) == NULL)
871 { 872 ip_statistics.IpReasmFails++;
873 printk("IP: queue_glue: no memory for gluing queue 0x%X\n", (int) qp);
874 ip_free(qp);
875 return(NULL);
876 } 877
878 /* Fill in the basic details. */ 879 skb->len = (len - qp->maclen);
880 skb->h.raw = skb->data;
881 skb->free = 1;
882
883 /* Copy the original MAC and IP headers into the new buffer. */ 884 ptr = (unsignedchar *) skb->h.raw;
885 memcpy(ptr, ((unsignedchar *) qp->mac), qp->maclen);
886 ptr += qp->maclen;
887 memcpy(ptr, ((unsignedchar *) qp->iph), qp->ihlen);
888 ptr += qp->ihlen;
889 skb->h.raw += qp->maclen;
890
891 count = 0;
892
893 /* Copy the data portions of all fragments into the new buffer. */ 894 fp = qp->fragments;
895 while(fp != NULL)
896 { 897 if(count+fp->len > skb->len)
898 { 899 printk("Invalid fragment list: Fragment over size.\n");
900 ip_free(qp);
901 kfree_skb(skb,FREE_WRITE);
902 ip_statistics.IpReasmFails++;
903 returnNULL;
904 } 905 memcpy((ptr + fp->offset), fp->ptr, fp->len);
906 count += fp->len;
907 fp = fp->next;
908 } 909
910 /* We glued together all fragments, so remove the queue entry. */ 911 ip_free(qp);
912
913 /* Done with all fragments. Fixup the new IP header. */ 914 iph = skb->h.iph;
915 iph->frag_off = 0;
916 iph->tot_len = htons((iph->ihl * sizeof(unsignedlong)) + count);
917 skb->ip_hdr = iph;
918
919 ip_statistics.IpReasmOKs++;
920 return(skb);
921 } 922
923
924 /* 925 * Process an incoming IP datagram fragment. 926 */ 927
928 staticstructsk_buff *ip_defrag(structiphdr *iph, structsk_buff *skb, structdevice *dev)
/* */ 929 { 930 structipfrag *prev, *next;
931 structipfrag *tfp;
932 structipq *qp;
933 structsk_buff *skb2;
934 unsignedchar *ptr;
935 intflags, offset;
936 inti, ihl, end;
937
938 ip_statistics.IpReasmReqds++;
939
940 /* Find the entry of this IP datagram in the "incomplete datagrams" queue. */ 941 qp = ip_find(iph);
942
943 /* Is this a non-fragmented datagram? */ 944 offset = ntohs(iph->frag_off);
945 flags = offset & ~IP_OFFSET;
946 offset &= IP_OFFSET;
947 if (((flags & IP_MF) == 0) && (offset == 0))
948 { 949 if (qp != NULL)
950 ip_free(qp); /* Huh? How could this exist?? */ 951 return(skb);
952 } 953
954 offset <<= 3; /* offset is in 8-byte chunks */ 955
956 /* 957 * If the queue already existed, keep restarting its timer as long 958 * as we still are receiving fragments. Otherwise, create a fresh 959 * queue entry. 960 */ 961
962 if (qp != NULL)
963 { 964 del_timer(&qp->timer);
965 qp->timer.expires = IP_FRAG_TIME; /* about 30 seconds */ 966 qp->timer.data = (unsignedlong) qp; /* pointer to queue */ 967 qp->timer.function = ip_expire; /* expire function */ 968 add_timer(&qp->timer);
969 } 970 else 971 { 972 /* 973 * If we failed to create it, then discard the frame 974 */ 975 if ((qp = ip_create(skb, iph, dev)) == NULL)
976 { 977 skb->sk = NULL;
978 kfree_skb(skb, FREE_READ);
979 ip_statistics.IpReasmFails++;
980 returnNULL;
981 } 982 } 983
984 /* 985 * Determine the position of this fragment. 986 */ 987
988 ihl = (iph->ihl * sizeof(unsignedlong));
989 end = offset + ntohs(iph->tot_len) - ihl;
990
991 /* 992 * Point into the IP datagram 'data' part. 993 */ 994
995 ptr = skb->data + dev->hard_header_len + ihl;
996
997 /* 998 * Is this the final fragment? 999 */1000
1001 if ((flags & IP_MF) == 0)
1002 qp->len = end;
1003
1004 /*1005 * Find out which fragments are in front and at the back of us1006 * in the chain of fragments so far. We must know where to put1007 * this fragment, right?1008 */1009
1010 prev = NULL;
1011 for(next = qp->fragments; next != NULL; next = next->next)
1012 {1013 if (next->offset > offset)
1014 break; /* bingo! */1015 prev = next;
1016 }1017
1018 /*1019 * We found where to put this one.1020 * Check for overlap with preceding fragment, and, if needed,1021 * align things so that any overlaps are eliminated.1022 */1023 if (prev != NULL && offset < prev->end)
1024 {1025 i = prev->end - offset;
1026 offset += i; /* ptr into datagram */1027 ptr += i; /* ptr into fragment data */1028 }1029
1030 /*1031 * Look for overlap with succeeding segments.1032 * If we can merge fragments, do it.1033 */1034
1035 for(; next != NULL; next = tfp)
1036 {1037 tfp = next->next;
1038 if (next->offset >= end)
1039 break; /* no overlaps at all */1040
1041 i = end - next->offset; /* overlap is 'i' bytes */1042 next->len -= i; /* so reduce size of */1043 next->offset += i; /* next fragment */1044 next->ptr += i;
1045
1046 /*1047 * If we get a frag size of <= 0, remove it and the packet1048 * that it goes with.1049 */1050 if (next->len <= 0)
1051 {1052 if (next->prev != NULL)
1053 next->prev->next = next->next;
1054 else1055 qp->fragments = next->next;
1056
1057 if (tfp->next != NULL)
1058 next->next->prev = next->prev;
1059
1060 kfree_skb(next->skb,FREE_READ);
1061 kfree_s(next, sizeof(structipfrag));
1062 }1063 }1064
1065 /*1066 * Insert this fragment in the chain of fragments.1067 */1068
1069 tfp = NULL;
1070 tfp = ip_frag_create(offset, end, skb, ptr);
1071
1072 /*1073 * No memory to save the fragment - so throw the lot1074 */1075
1076 if (!tfp)
1077 {1078 skb->sk = NULL;
1079 kfree_skb(skb, FREE_READ);
1080 returnNULL;
1081 }1082 tfp->prev = prev;
1083 tfp->next = next;
1084 if (prev != NULL)
1085 prev->next = tfp;
1086 else1087 qp->fragments = tfp;
1088
1089 if (next != NULL)
1090 next->prev = tfp;
1091
1092 /*1093 * OK, so we inserted this new fragment into the chain.1094 * Check if we now have a full IP datagram which we can1095 * bump up to the IP layer...1096 */1097
1098 if (ip_done(qp))
1099 {1100 skb2 = ip_glue(qp); /* glue together the fragments */1101 return(skb2);
1102 }1103 return(NULL);
1104 }1105
1106
1107 /*1108 * This IP datagram is too large to be sent in one piece. Break it up into1109 * smaller pieces (each of size equal to the MAC header plus IP header plus1110 * a block of the data of the original IP data part) that will yet fit in a1111 * single device frame, and queue such a frame for sending by calling the1112 * ip_queue_xmit(). Note that this is recursion, and bad things will happen1113 * if this function causes a loop...1114 *1115 * Yes this is inefficient, feel free to submit a quicker one.1116 *1117 * **Protocol Violation**1118 * We copy all the options to each fragment. !FIXME!1119 */1120 voidip_fragment(structsock *sk, structsk_buff *skb, structdevice *dev, intis_frag)
/* */1121 {1122 structiphdr *iph;
1123 unsignedchar *raw;
1124 unsignedchar *ptr;
1125 structsk_buff *skb2;
1126 intleft, mtu, hlen, len;
1127 intoffset;
1128 unsignedlongflags;
1129
1130 /*1131 * Point into the IP datagram header.1132 */1133
1134 raw = skb->data;
1135 iph = (structiphdr *) (raw + dev->hard_header_len);
1136
1137 skb->ip_hdr = iph;
1138
1139 /*1140 * Setup starting values.1141 */1142
1143 hlen = (iph->ihl * sizeof(unsignedlong));
1144 left = ntohs(iph->tot_len) - hlen; /* Space per frame */1145 hlen += dev->hard_header_len; /* Total header size */1146 mtu = (dev->mtu - hlen); /* Size of data space */1147 ptr = (raw + hlen); /* Where to start from */1148
1149 /*1150 * Check for any "DF" flag. [DF means do not fragment]1151 */1152
1153 if (ntohs(iph->frag_off) & IP_DF)
1154 {1155 ip_statistics.IpFragFails++;
1156 icmp_send(skb,ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, dev);
1157 return;
1158 }1159
1160 /*1161 * The protocol doesn't seem to say what to do in the case that the1162 * frame + options doesn't fit the mtu. As it used to fall down dead1163 * in this case we were fortunate it didn't happen1164 */1165
1166 if(mtu<8)
1167 {1168 /* It's wrong but its better than nothing */1169 icmp_send(skb,ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED,dev);
1170 ip_statistics.IpFragFails++;
1171 return;
1172 }1173
1174 /*1175 * Fragment the datagram.1176 */1177
1178 /*1179 * The initial offset is 0 for a complete frame. When1180 * fragmenting fragments its wherever this one starts.1181 */1182
1183 if (is_frag & 2)
1184 offset = (ntohs(iph->frag_off) & 0x1fff) << 3;
1185 else1186 offset = 0;
1187
1188
1189 /*1190 * Keep copying data until we run out.1191 */1192
1193 while(left > 0)
1194 {1195 len = left;
1196 /* IF: it doesn't fit, use 'mtu' - the data space left */1197 if (len > mtu)
1198 len = mtu;
1199 /* IF: we are not sending upto and including the packet end1200 then align the next start on an eight byte boundary */1201 if (len < left)
1202 {1203 len/=8;
1204 len*=8;
1205 }1206 /*1207 * Allocate buffer.1208 */1209
1210 if ((skb2 = alloc_skb(len + hlen,GFP_ATOMIC)) == NULL)
1211 {1212 printk("IP: frag: no memory for new fragment!\n");
1213 ip_statistics.IpFragFails++;
1214 return;
1215 }1216
1217 /*1218 * Set up data on packet1219 */1220
1221 skb2->arp = skb->arp;
1222 if(skb->free==0)
1223 printk("IP fragmenter: BUG free!=1 in fragmenter\n");
1224 skb2->free = 1;
1225 skb2->len = len + hlen;
1226 skb2->h.raw=(char *) skb2->data;
1227 /*1228 * Charge the memory for the fragment to any owner1229 * it might possess1230 */1231
1232 save_flags(flags);
1233 if (sk)
1234 {1235 cli();
1236 sk->wmem_alloc += skb2->mem_len;
1237 skb2->sk=sk;
1238 }1239 restore_flags(flags);
1240 skb2->raddr = skb->raddr; /* For rebuild_header - must be here */1241
1242 /*1243 * Copy the packet header into the new buffer.1244 */1245
1246 memcpy(skb2->h.raw, raw, hlen);
1247
1248 /*1249 * Copy a block of the IP datagram.1250 */1251 memcpy(skb2->h.raw + hlen, ptr, len);
1252 left -= len;
1253
1254 skb2->h.raw+=dev->hard_header_len;
1255
1256 /*1257 * Fill in the new header fields.1258 */1259 iph = (structiphdr *)(skb2->h.raw/*+dev->hard_header_len*/);
1260 iph->frag_off = htons((offset >> 3));
1261 /*1262 * Added AC : If we are fragmenting a fragment thats not the1263 * last fragment then keep MF on each bit1264 */1265 if (left > 0 || (is_frag & 1))
1266 iph->frag_off |= htons(IP_MF);
1267 ptr += len;
1268 offset += len;
1269
1270 /*1271 * Put this fragment into the sending queue.1272 */1273
1274 ip_statistics.IpFragCreates++;
1275
1276 ip_queue_xmit(sk, dev, skb2, 2);
1277 }1278 ip_statistics.IpFragOKs++;
1279 }1280
1281
1282
1283 #ifdefCONFIG_IP_FORWARD1284
1285 /*1286 * Forward an IP datagram to its next destination.1287 */1288
1289 staticvoidip_forward(structsk_buff *skb, structdevice *dev, intis_frag)
/* */1290 {1291 structdevice *dev2; /* Output device */1292 structiphdr *iph; /* Our header */1293 structsk_buff *skb2; /* Output packet */1294 structrtable *rt; /* Route we use */1295 unsignedchar *ptr; /* Data pointer */1296 unsignedlongraddr; /* Router IP address */1297
1298 /* 1299 * See if we are allowed to forward this.1300 */1301
1302 #ifdefCONFIG_IP_FIREWALL1303 if(!ip_fw_chk(skb->h.iph, ip_fw_fwd_chain))
1304 {1305 return;
1306 }1307 #endif1308 /*1309 * According to the RFC, we must first decrease the TTL field. If1310 * that reaches zero, we must reply an ICMP control message telling1311 * that the packet's lifetime expired.1312 *1313 * Exception:1314 * We may not generate an ICMP for an ICMP. icmp_send does the1315 * enforcement of this so we can forget it here. It is however1316 * sometimes VERY important.1317 */1318
1319 iph = skb->h.iph;
1320 iph->ttl--;
1321 if (iph->ttl <= 0)
1322 {1323 /* Tell the sender its packet died... */1324 icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, dev);
1325 return;
1326 }1327
1328 /*1329 * Re-compute the IP header checksum.1330 * This is inefficient. We know what has happened to the header1331 * and could thus adjust the checksum as Phil Karn does in KA9Q1332 */1333
1334 ip_send_check(iph);
1335
1336 /*1337 * OK, the packet is still valid. Fetch its destination address,1338 * and give it to the IP sender for further processing.1339 */1340
1341 rt = ip_rt_route(iph->daddr, NULL, NULL);
1342 if (rt == NULL)
1343 {1344 /*1345 * Tell the sender its packet cannot be delivered. Again1346 * ICMP is screened later.1347 */1348 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_UNREACH, dev);
1349 return;
1350 }1351
1352
1353 /*1354 * Gosh. Not only is the packet valid; we even know how to1355 * forward it onto its final destination. Can we say this1356 * is being plain lucky?1357 * If the router told us that there is no GW, use the dest.1358 * IP address itself- we seem to be connected directly...1359 */1360
1361 raddr = rt->rt_gateway;
1362
1363 if (raddr != 0)
1364 {1365 /*1366 * There is a gateway so find the correct route for it.1367 * Gateways cannot in turn be gatewayed.1368 */1369 rt = ip_rt_route(raddr, NULL, NULL);
1370 if (rt == NULL)
1371 {1372 /*1373 * Tell the sender its packet cannot be delivered...1374 */1375 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, dev);
1376 return;
1377 }1378 if (rt->rt_gateway != 0)
1379 raddr = rt->rt_gateway;
1380 }1381 else1382 raddr = iph->daddr;
1383
1384 /*1385 * Having picked a route we can now send the frame out.1386 */1387
1388 dev2 = rt->rt_dev;
1389
1390 /*1391 * In IP you never forward a frame on the interface that it arrived1392 * upon. We should generate an ICMP HOST REDIRECT giving the route1393 * we calculated.1394 * For now just dropping the packet is an acceptable compromise.1395 */1396
1397 if (dev == dev2)
1398 return;
1399
1400 /*1401 * We now allocate a new buffer, and copy the datagram into it.1402 * If the indicated interface is up and running, kick it.1403 */1404
1405 if (dev2->flags & IFF_UP)
1406 {1407
1408 /*1409 * Current design decrees we copy the packet. For identical header1410 * lengths we could avoid it. The new skb code will let us push1411 * data so the problem goes away then.1412 */1413
1414 skb2 = alloc_skb(dev2->hard_header_len + skb->len, GFP_ATOMIC);
1415 /*1416 * This is rare and since IP is tolerant of network failures1417 * quite harmless.1418 */1419 if (skb2 == NULL)
1420 {1421 printk("\nIP: No memory available for IP forward\n");
1422 return;
1423 }1424 ptr = skb2->data;
1425 skb2->free = 1;
1426 skb2->len = skb->len + dev2->hard_header_len;
1427 skb2->h.raw = ptr;
1428
1429 /*1430 * Copy the packet data into the new buffer.1431 */1432 memcpy(ptr + dev2->hard_header_len, skb->h.raw, skb->len);
1433
1434 /* Now build the MAC header. */1435 (void) ip_send(skb2, raddr, skb->len, dev2, dev2->pa_addr);
1436
1437 ip_statistics.IpForwDatagrams++;
1438
1439 /*1440 * See if it needs fragmenting. Note in ip_rcv we tagged1441 * the fragment type. This must be right so that1442 * the fragmenter does the right thing.1443 */1444
1445 if(skb2->len > dev2->mtu + dev2->hard_header_len)
1446 {1447 ip_fragment(NULL,skb2,dev2, is_frag);
1448 kfree_skb(skb2,FREE_WRITE);
1449 }1450 else1451 {1452 #ifdefCONFIG_IP_ACCT1453 /*1454 * Count mapping we shortcut1455 */1456
1457 ip_acct_cnt(iph,ip_acct_chain,1);
1458 #endif1459
1460 /*1461 * Map service types to priority. We lie about1462 * throughput being low priority, but its a good1463 * choice to help improve general usage.1464 */1465 if(iph->tos & IPTOS_LOWDELAY)
1466 dev_queue_xmit(skb2, dev2, SOPRI_INTERACTIVE);
1467 elseif(iph->tos & IPTOS_THROUGHPUT)
1468 dev_queue_xmit(skb2, dev2, SOPRI_BACKGROUND);
1469 else1470 dev_queue_xmit(skb2, dev2, SOPRI_NORMAL);
1471 }1472 }1473 }1474
1475
1476 #endif1477
1478 /*1479 * This function receives all incoming IP datagrams.1480 */1481
1482 intip_rcv(structsk_buff *skb, structdevice *dev, structpacket_type *pt)
/* */1483 {1484 structiphdr *iph = skb->h.iph;
1485 structsock *raw_sk=NULL;
1486 unsignedcharhash;
1487 unsignedcharflag = 0;
1488 unsignedcharopts_p = 0; /* Set iff the packet has options. */1489 structinet_protocol *ipprot;
1490 staticstructoptionsopt; /* since we don't use these yet, and they1491 take up stack space. */1492 intbrd=IS_MYADDR;
1493 intis_frag=0;
1494
1495 ip_statistics.IpInReceives++;
1496
1497 /*1498 * Tag the ip header of this packet so we can find it1499 */1500
1501 skb->ip_hdr = iph;
1502
1503 /*1504 * Is the datagram acceptable?1505 *1506 * 1. Length at least the size of an ip header1507 * 2. Version of 41508 * 3. Checksums correctly. [Speed optimisation for later, skip loopback checksums]1509 * (4. We ought to check for IP multicast addresses and undefined types.. does this matter ?)1510 */1511
1512 if (skb->len<sizeof(structiphdr) || iph->ihl<5 || iph->version != 4 || ip_fast_csum((unsignedchar *)iph, iph->ihl) !=0)
1513 {1514 ip_statistics.IpInHdrErrors++;
1515 kfree_skb(skb, FREE_WRITE);
1516 return(0);
1517 }1518
1519 /*1520 * See if the firewall wants to dispose of the packet. 1521 */1522
1523 #ifdefCONFIG_IP_FIREWALL1524
1525 if(!LOOPBACK(iph->daddr) && !ip_fw_chk(iph,ip_fw_blk_chain))
1526 {1527 kfree_skb(skb, FREE_WRITE);
1528 return 0;
1529 }1530
1531 #endif1532
1533 /*1534 * Our transport medium may have padded the buffer out. Now we know it1535 * is IP we can trim to the true length of the frame.1536 */1537
1538 skb->len=ntohs(iph->tot_len);
1539
1540 /*1541 * Next analyse the packet for options. Studies show under one packet in1542 * a thousand have options....1543 */1544
1545 if (iph->ihl != 5)
1546 {/* Fast path for the typical optionless IP packet. */1547 memset((char *) &opt, 0, sizeof(opt));
1548 if (do_options(iph, &opt) != 0)
1549 return 0;
1550 opts_p = 1;
1551 }1552
1553 /*1554 * Remember if the frame is fragmented.1555 */1556
1557 if(iph->frag_off)
1558 {1559 if (iph->frag_off & 0x0020)
1560 is_frag|=1;
1561 /*1562 * Last fragment ?1563 */1564
1565 if (ntohs(iph->frag_off) & 0x1fff)
1566 is_frag|=2;
1567 }1568
1569 /*1570 * Do any IP forwarding required. chk_addr() is expensive -- avoid it someday.1571 *1572 * This is inefficient. While finding out if it is for us we could also compute1573 * the routing table entry. This is where the great unified cache theory comes1574 * in as and when someone implements it1575 *1576 * For most hosts over 99% of packets match the first conditional1577 * and don't go via ip_chk_addr. Note: brd is set to IS_MYADDR at1578 * function entry.1579 */1580
1581 if ( iph->daddr != skb->dev->pa_addr && (brd = ip_chk_addr(iph->daddr)) == 0)
1582 {1583 /*1584 * Don't forward multicast or broadcast frames.1585 */1586
1587 if(skb->pkt_type!=PACKET_HOST || brd==IS_BROADCAST)
1588 {1589 kfree_skb(skb,FREE_WRITE);
1590 return 0;
1591 }1592
1593 /*1594 * The packet is for another target. Forward the frame1595 */1596
1597 #ifdefCONFIG_IP_FORWARD1598 ip_forward(skb, dev, is_frag);
1599 #else1600 /* printk("Machine %lx tried to use us as a forwarder to %lx but we have forwarding disabled!\n",1601 iph->saddr,iph->daddr);*/1602 ip_statistics.IpInAddrErrors++;
1603 #endif1604 /*1605 * The forwarder is inefficient and copies the packet. We1606 * free the original now.1607 */1608
1609 kfree_skb(skb, FREE_WRITE);
1610 return(0);
1611 }1612
1613 #ifdefCONFIG_IP_MULTICAST1614
1615 if(brd==IS_MULTICAST && iph->daddr!=IGMP_ALL_HOSTS && !(dev->flags&IFF_LOOPBACK))
1616 {1617 /*1618 * Check it is for one of our groups1619 */1620 structip_mc_list *ip_mc=dev->ip_mc_list;
1621 do1622 {1623 if(ip_mc==NULL)
1624 {1625 kfree_skb(skb, FREE_WRITE);
1626 return 0;
1627 }1628 if(ip_mc->multiaddr==iph->daddr)
1629 break;
1630 ip_mc=ip_mc->next;
1631 }1632 while(1);
1633 }1634 #endif1635 /*1636 * Account for the packet1637 */1638
1639 #ifdefCONFIG_IP_ACCT1640 ip_acct_cnt(iph,ip_acct_chain,1);
1641 #endif1642
1643 /*1644 * Reassemble IP fragments.1645 */1646
1647 if(is_frag)
1648 {1649 /* Defragment. Obtain the complete packet if there is one */1650 skb=ip_defrag(iph,skb,dev);
1651 if(skb==NULL)
1652 return 0;
1653 skb->dev = dev;
1654 iph=skb->h.iph;
1655 }1656
1657
1658
1659 /*1660 * Point into the IP datagram, just past the header.1661 */1662
1663 skb->ip_hdr = iph;
1664 skb->h.raw += iph->ihl*4;
1665
1666 /*1667 * Deliver to raw sockets. This is fun as to avoid copies we want to make no surplus copies.1668 */1669
1670 hash = iph->protocol & (SOCK_ARRAY_SIZE-1);
1671
1672 /* If there maybe a raw socket we must check - if not we don't care less */1673 if((raw_sk=raw_prot.sock_array[hash])!=NULL)
1674 {1675 structsock *sknext=NULL;
1676 structsk_buff *skb1;
1677 raw_sk=get_sock_raw(raw_sk, hash, iph->saddr, iph->daddr);
1678 if(raw_sk) /* Any raw sockets */1679 {1680 do1681 {1682 /* Find the next */1683 sknext=get_sock_raw(raw_sk->next, hash, iph->saddr, iph->daddr);
1684 if(sknext)
1685 skb1=skb_clone(skb, GFP_ATOMIC);
1686 else1687 break; /* One pending raw socket left */1688 if(skb1)
1689 raw_rcv(raw_sk, skb1, dev, iph->saddr,iph->daddr);
1690 raw_sk=sknext;
1691 }1692 while(raw_sk!=NULL);
1693 /* Here either raw_sk is the last raw socket, or NULL if none */1694 /* We deliver to the last raw socket AFTER the protocol checks as it avoids a surplus copy */1695 }1696 }1697
1698 /*1699 * skb->h.raw now points at the protocol beyond the IP header.1700 */1701
1702 hash = iph->protocol & (MAX_INET_PROTOS -1);
1703 for (ipprot = (structinet_protocol *)inet_protos[hash];ipprot != NULL;ipprot=(structinet_protocol *)ipprot->next)
1704 {1705 structsk_buff *skb2;
1706
1707 if (ipprot->protocol != iph->protocol)
1708 continue;
1709 /*1710 * See if we need to make a copy of it. This will1711 * only be set if more than one protocol wants it.1712 * and then not for the last one. If there is a pending1713 * raw delivery wait for that1714 */1715 if (ipprot->copy || raw_sk)
1716 {1717 skb2 = skb_clone(skb, GFP_ATOMIC);
1718 if(skb2==NULL)
1719 continue;
1720 }1721 else1722 {1723 skb2 = skb;
1724 }1725 flag = 1;
1726
1727 /*1728 * Pass on the datagram to each protocol that wants it,1729 * based on the datagram protocol. We should really1730 * check the protocol handler's return values here...1731 */1732 ipprot->handler(skb2, dev, opts_p ? &opt : 0, iph->daddr,
1733 (ntohs(iph->tot_len) - (iph->ihl * 4)),
1734 iph->saddr, 0, ipprot);
1735
1736 }1737
1738 /*1739 * All protocols checked.1740 * If this packet was a broadcast, we may *not* reply to it, since that1741 * causes (proven, grin) ARP storms and a leakage of memory (i.e. all1742 * ICMP reply messages get queued up for transmission...)1743 */1744
1745 if(raw_sk!=NULL) /* Shift to last raw user */1746 raw_rcv(raw_sk, skb, dev, iph->saddr, iph->daddr);
1747 elseif (!flag) /* Free and report errors */1748 {1749 if (brd != IS_BROADCAST && brd!=IS_MULTICAST)
1750 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, dev);
1751 kfree_skb(skb, FREE_WRITE);
1752 }1753
1754 return(0);
1755 }1756
1757 /*1758 * Loop a packet back to the sender.1759 */1760
1761 staticvoidip_loopback(structdevice *old_dev, structsk_buff *skb)
/* */1762 {1763 externstructdeviceloopback_dev;
1764 structdevice *dev=&loopback_dev;
1765 intlen=skb->len-old_dev->hard_header_len;
1766 structsk_buff *newskb=alloc_skb(len+dev->hard_header_len, GFP_ATOMIC);
1767
1768 if(newskb==NULL)
1769 return;
1770
1771 newskb->link3=NULL;
1772 newskb->sk=NULL;
1773 newskb->dev=dev;
1774 newskb->saddr=skb->saddr;
1775 newskb->daddr=skb->daddr;
1776 newskb->raddr=skb->raddr;
1777 newskb->free=1;
1778 newskb->lock=0;
1779 newskb->users=0;
1780 newskb->pkt_type=skb->pkt_type;
1781 newskb->len=len+dev->hard_header_len;
1782
1783
1784 newskb->ip_hdr=(structiphdr *)(newskb->data+ip_send(newskb, skb->ip_hdr->daddr, len, dev, skb->ip_hdr->saddr));
1785 memcpy(newskb->ip_hdr,skb->ip_hdr,len);
1786
1787 /* Recurse. The device check against IFF_LOOPBACK will stop infinite recursion */1788
1789 /*printk("Loopback output queued [%lX to %lX].\n", newskb->ip_hdr->saddr,newskb->ip_hdr->daddr);*/1790 ip_queue_xmit(NULL, dev, newskb, 1);
1791 }1792
1793
1794 /*1795 * Queues a packet to be sent, and starts the transmitter1796 * if necessary. if free = 1 then we free the block after1797 * transmit, otherwise we don't. If free==2 we not only1798 * free the block but also don't assign a new ip seq number.1799 * This routine also needs to put in the total length,1800 * and compute the checksum1801 */1802
1803 voidip_queue_xmit(structsock *sk, structdevice *dev,
/* */1804 structsk_buff *skb, intfree)
1805 {1806 structiphdr *iph;
1807 unsignedchar *ptr;
1808
1809 /* Sanity check */1810 if (dev == NULL)
1811 {1812 printk("IP: ip_queue_xmit dev = NULL\n");
1813 return;
1814 }1815
1816 IS_SKB(skb);
1817
1818 /*1819 * Do some book-keeping in the packet for later1820 */1821
1822
1823 skb->dev = dev;
1824 skb->when = jiffies;
1825
1826 /*1827 * Find the IP header and set the length. This is bad1828 * but once we get the skb data handling code in the1829 * hardware will push its header sensibly and we will1830 * set skb->ip_hdr to avoid this mess and the fixed1831 * header length problem1832 */1833
1834 ptr = skb->data;
1835 ptr += dev->hard_header_len;
1836 iph = (structiphdr *)ptr;
1837 skb->ip_hdr = iph;
1838 iph->tot_len = ntohs(skb->len-dev->hard_header_len);
1839
1840 /*1841 * No reassigning numbers to fragments...1842 */1843
1844 if(free!=2)
1845 iph->id = htons(ip_id_count++);
1846 else1847 free=1;
1848
1849 /* All buffers without an owner socket get freed */1850 if (sk == NULL)
1851 free = 1;
1852
1853 skb->free = free;
1854
1855 /*1856 * Do we need to fragment. Again this is inefficient.1857 * We need to somehow lock the original buffer and use1858 * bits of it.1859 */1860
1861 if(skb->len > dev->mtu + dev->hard_header_len)
1862 {1863 ip_fragment(sk,skb,dev,0);
1864 IS_SKB(skb);
1865 kfree_skb(skb,FREE_WRITE);
1866 return;
1867 }1868
1869 /*1870 * Add an IP checksum1871 */1872
1873 ip_send_check(iph);
1874
1875 /*1876 * Print the frame when debugging1877 */1878
1879 /*1880 * More debugging. You cannot queue a packet already on a list1881 * Spot this and moan loudly.1882 */1883 if (skb->next != NULL)
1884 {1885 printk("ip_queue_xmit: next != NULL\n");
1886 skb_unlink(skb);
1887 }1888
1889 /*1890 * If a sender wishes the packet to remain unfreed1891 * we add it to his send queue. This arguably belongs1892 * in the TCP level since nobody else uses it. BUT1893 * remember IPng might change all the rules.1894 */1895
1896 if (!free)
1897 {1898 unsignedlongflags;
1899 /* The socket now has more outstanding blocks */1900
1901 sk->packets_out++;
1902
1903 /* Protect the list for a moment */1904 save_flags(flags);
1905 cli();
1906
1907 if (skb->link3 != NULL)
1908 {1909 printk("ip.c: link3 != NULL\n");
1910 skb->link3 = NULL;
1911 }1912 if (sk->send_head == NULL)
1913 {1914 sk->send_tail = skb;
1915 sk->send_head = skb;
1916 }1917 else1918 {1919 sk->send_tail->link3 = skb;
1920 sk->send_tail = skb;
1921 }1922 /* skb->link3 is NULL */1923
1924 /* Interrupt restore */1925 restore_flags(flags);
1926 /* Set the IP write timeout to the round trip time for the packet.1927 If an acknowledge has not arrived by then we may wish to act */1928 reset_timer(sk, TIME_WRITE, sk->rto);
1929 }1930 else1931 /* Remember who owns the buffer */1932 skb->sk = sk;
1933
1934 /*1935 * If the indicated interface is up and running, send the packet.1936 */1937
1938 ip_statistics.IpOutRequests++;
1939 #ifdefCONFIG_IP_ACCT1940 ip_acct_cnt(iph,ip_acct_chain,1);
1941 #endif1942
1943 #ifdefCONFIG_IP_MULTICAST1944
1945 /*1946 * Multicasts are looped back for other local users1947 */1948
1949 if (MULTICAST(iph->daddr) && !(dev->flags&IFF_LOOPBACK))
1950 {1951 if(sk==NULL || sk->ip_mc_loop)
1952 {1953 if(skb->daddr==IGMP_ALL_HOSTS)
1954 ip_loopback(dev,skb);
1955 else1956 {1957 structip_mc_list *imc=dev->ip_mc_list;
1958 while(imc!=NULL)
1959 {1960 if(imc->multiaddr==iph->daddr)
1961 {1962 ip_loopback(dev,skb);
1963 break;
1964 }1965 imc=imc->next;
1966 }1967 }1968 }1969 /* Multicasts with ttl 0 must not go beyond the host */1970
1971 if(skb->ip_hdr->ttl==0)
1972 {1973 kfree_skb(skb, FREE_READ);
1974 return;
1975 }1976 }1977 #endif1978 if((dev->flags&IFF_BROADCAST) && iph->daddr==dev->pa_brdaddr && !(dev->flags&IFF_LOOPBACK))
1979 ip_loopback(dev,skb);
1980
1981 if (dev->flags & IFF_UP)
1982 {1983 /*1984 * If we have an owner use its priority setting,1985 * otherwise use NORMAL1986 */1987
1988 if (sk != NULL)
1989 {1990 dev_queue_xmit(skb, dev, sk->priority);
1991 }1992 else1993 {1994 dev_queue_xmit(skb, dev, SOPRI_NORMAL);
1995 }1996 }1997 else1998 {1999 ip_statistics.IpOutDiscards++;
2000 if (free)
2001 kfree_skb(skb, FREE_WRITE);
2002 }2003 }2004
2005
2006
2007 #ifdefCONFIG_IP_MULTICAST2008
2009 /*2010 * Write an multicast group list table for the IGMP daemon to2011 * read.2012 */2013
2014 intip_mc_procinfo(char *buffer, char **start, off_toffset, intlength)
/* */2015 {2016 off_tpos=0, begin=0;
2017 structip_mc_list *im;
2018 unsignedlongflags;
2019 intlen=0;
2020
2021
2022 len=sprintf(buffer,"Device : Multicast\n");
2023 save_flags(flags);
2024 cli();
2025
2026 im=ip_mc_head;
2027
2028 while(im!=NULL)
2029 {2030 len+=sprintf(buffer+len,"%-10s: %08lX\n", im->interface->name, im->multiaddr);
2031 pos=begin+len;
2032 if(pos<offset)
2033 {2034 len=0;
2035 begin=pos;
2036 }2037 if(pos>offset+length)
2038 break;
2039 im=im->next;
2040 }2041 restore_flags(flags);
2042 *start=buffer+(offset-begin);
2043 len-=(offset-begin);
2044 if(len>length)
2045 len=length;
2046 returnlen;
2047 }2048
2049
2050 #endif2051 /*2052 * Socket option code for IP. This is the end of the line after any TCP,UDP etc options on2053 * an IP socket.2054 *2055 * We implement IP_TOS (type of service), IP_TTL (time to live).2056 *2057 * Next release we will sort out IP_OPTIONS since for some people are kind of important.2058 */2059
2060 intip_setsockopt(structsock *sk, intlevel, intoptname, char *optval, intoptlen)
/* */2061 {2062 intval,err;
2063 #ifdefined(CONFIG_IP_FIREWALL) || defined(CONFIG_IP_ACCT)
2064 structip_fwtmp_fw;
2065 #endif2066 if (optval == NULL)
2067 return(-EINVAL);
2068
2069 err=verify_area(VERIFY_READ, optval, sizeof(int));
2070 if(err)
2071 returnerr;
2072
2073 val = get_fs_long((unsignedlong *)optval);
2074
2075 if(level!=SOL_IP)
2076 return -EOPNOTSUPP;
2077
2078 #ifdefCONFIG_IP_MULTICAST2079 if(optname==IP_MULTICAST_TTL)
2080 {2081 unsignedcharucval;
2082 ucval=get_fs_byte((unsignedchar *)optval);
2083 printk("MC TTL %d\n", ucval);
2084 if(ucval<1||ucval>255)
2085 return -EINVAL;
2086 sk->ip_mc_ttl=(int)ucval;
2087 return 0;
2088 }2089 #endif2090
2091 switch(optname)
2092 {2093 caseIP_TOS:
2094 if(val<0||val>255)
2095 return -EINVAL;
2096 sk->ip_tos=val;
2097 if(val==IPTOS_LOWDELAY)
2098 sk->priority=SOPRI_INTERACTIVE;
2099 if(val==IPTOS_THROUGHPUT)
2100 sk->priority=SOPRI_BACKGROUND;
2101 return 0;
2102 caseIP_TTL:
2103 if(val<1||val>255)
2104 return -EINVAL;
2105 sk->ip_ttl=val;
2106 return 0;
2107 #ifdefCONFIG_IP_MULTICAST2108 #ifdef GCC_WORKS
2109 caseIP_MULTICAST_TTL:
2110 {2111 unsignedcharucval;
2112
2113 ucval=get_fs_byte((unsignedchar *)optval);
2114 printk("MC TTL %d\n", ucval);
2115 if(ucval<1||ucval>255)
2116 return -EINVAL;
2117 sk->ip_mc_ttl=(int)ucval;
2118 return 0;
2119 }2120 #endif2121 caseIP_MULTICAST_LOOP:
2122 {2123 unsignedcharucval;
2124
2125 ucval=get_fs_byte((unsignedchar *)optval);
2126 if(ucval!=0 && ucval!=1)
2127 return -EINVAL;
2128 sk->ip_mc_loop=(int)ucval;
2129 return 0;
2130 }2131 caseIP_MULTICAST_IF:
2132 {2133 /* Not fully tested */2134 structin_addraddr;
2135 structdevice *dev=NULL;
2136
2137 /*2138 * Check the arguments are allowable2139 */2140
2141 err=verify_area(VERIFY_READ, optval, sizeof(addr));
2142 if(err)
2143 returnerr;
2144
2145 memcpy_fromfs(&addr,optval,sizeof(addr));
2146
2147 printk("MC bind %s\n", in_ntoa(addr.s_addr));
2148
2149 /*2150 * What address has been requested2151 */2152
2153 if(addr.s_addr==INADDR_ANY) /* Default */2154 {2155 sk->ip_mc_name[0]=0;
2156 return 0;
2157 }2158
2159 /*2160 * Find the device2161 */2162
2163 for(dev = dev_base; dev; dev = dev->next)
2164 {2165 if((dev->flags&IFF_UP)&&(dev->flags&IFF_MULTICAST)&&
2166 (dev->pa_addr==addr.s_addr))
2167 break;
2168 }2169
2170 /*2171 * Did we find one2172 */2173
2174 if(dev)
2175 {2176 strcpy(sk->ip_mc_name,dev->name);
2177 return 0;
2178 }2179 return -EADDRNOTAVAIL;
2180 }2181
2182 caseIP_ADD_MEMBERSHIP:
2183 {2184
2185 /*2186 * FIXME: Add/Del membership should have a semaphore protecting them from re-entry2187 */2188 structip_mreqmreq;
2189 staticstructoptionsoptmem;
2190 unsignedlongroute_src;
2191 structrtable *rt;
2192 structdevice *dev=NULL;
2193
2194 /*2195 * Check the arguments.2196 */2197
2198 err=verify_area(VERIFY_READ, optval, sizeof(mreq));
2199 if(err)
2200 returnerr;
2201
2202 memcpy_fromfs(&mreq,optval,sizeof(mreq));
2203
2204 /* 2205 * Get device for use later2206 */2207
2208 if(mreq.imr_interface.s_addr==INADDR_ANY)
2209 {2210 /*2211 * Not set so scan.2212 */2213 if((rt=ip_rt_route(mreq.imr_multiaddr.s_addr,&optmem, &route_src))!=NULL)
2214 {2215 dev=rt->rt_dev;
2216 rt->rt_use--;
2217 }2218 }2219 else2220 {2221 /*2222 * Find a suitable device.2223 */2224 for(dev = dev_base; dev; dev = dev->next)
2225 {2226 if((dev->flags&IFF_UP)&&(dev->flags&IFF_MULTICAST)&&
2227 (dev->pa_addr==mreq.imr_interface.s_addr))
2228 break;
2229 }2230 }2231
2232 /*2233 * No device, no cookies.2234 */2235
2236 if(!dev)
2237 return -ENODEV;
2238
2239 /*2240 * Join group.2241 */2242
2243 returnip_mc_join_group(sk,dev,mreq.imr_multiaddr.s_addr);
2244 }2245
2246 caseIP_DROP_MEMBERSHIP:
2247 {2248 structip_mreqmreq;
2249 structrtable *rt;
2250 staticstructoptionsoptmem;
2251 unsignedlongroute_src;
2252 structdevice *dev=NULL;
2253
2254 /*2255 * Check the arguments2256 */2257
2258 err=verify_area(VERIFY_READ, optval, sizeof(mreq));
2259 if(err)
2260 returnerr;
2261
2262 memcpy_fromfs(&mreq,optval,sizeof(mreq));
2263
2264 /*2265 * Get device for use later 2266 */2267
2268 if(mreq.imr_interface.s_addr==INADDR_ANY)
2269 {2270 if((rt=ip_rt_route(mreq.imr_multiaddr.s_addr,&optmem, &route_src))!=NULL)
2271 {2272 dev=rt->rt_dev;
2273 rt->rt_use--;
2274 }2275 }2276 else2277 {2278 for(dev = dev_base; dev; dev = dev->next)
2279 {2280 if((dev->flags&IFF_UP)&& (dev->flags&IFF_MULTICAST)&&
2281 (dev->pa_addr==mreq.imr_interface.s_addr))
2282 break;
2283 }2284 }2285
2286 /*2287 * Did we find a suitable device.2288 */2289
2290 if(!dev)
2291 return -ENODEV;
2292
2293 /*2294 * Leave group2295 */2296
2297 returnip_mc_leave_group(sk,dev,mreq.imr_multiaddr.s_addr);
2298 }2299 #endif2300 #ifdefCONFIG_IP_FIREWALL2301 caseIP_FW_ADD_BLK:
2302 caseIP_FW_DEL_BLK:
2303 caseIP_FW_ADD_FWD:
2304 caseIP_FW_DEL_FWD:
2305 caseIP_FW_CHK_BLK:
2306 caseIP_FW_CHK_FWD:
2307 caseIP_FW_FLUSH:
2308 caseIP_FW_POLICY:
2309 if(!suser())
2310 return -EPERM;
2311 if(optlen>sizeof(tmp_fw) || optlen<1)
2312 return -EINVAL;
2313 err=verify_area(VERIFY_READ,optval,optlen);
2314 if(err)
2315 returnerr;
2316 memcpy_fromfs(&tmp_fw,optval,optlen);
2317 err=ip_fw_ctl(optname, &tmp_fw,optlen);
2318 return -err; /* -0 is 0 after all */2319
2320 #endif2321 #ifdefCONFIG_IP_ACCT2322 caseIP_ACCT_DEL:
2323 caseIP_ACCT_ADD:
2324 caseIP_ACCT_FLUSH:
2325 caseIP_ACCT_ZERO:
2326 if(!suser())
2327 return -EPERM;
2328 if(optlen>sizeof(tmp_fw) || optlen<1)
2329 return -EINVAL;
2330 err=verify_area(VERIFY_READ,optval,optlen);
2331 if(err)
2332 returnerr;
2333 memcpy_fromfs(&tmp_fw, optval,optlen);
2334 err=ip_acct_ctl(optname, &tmp_fw,optlen);
2335 return -err; /* -0 is 0 after all */2336 #endif2337 /* IP_OPTIONS and friends go here eventually */2338 default:
2339 return(-ENOPROTOOPT);
2340 }2341 }2342
2343 /*2344 * Get the options. Note for future reference. The GET of IP options gets the2345 * _received_ ones. The set sets the _sent_ ones.2346 */2347
2348 intip_getsockopt(structsock *sk, intlevel, intoptname, char *optval, int *optlen)
/* */2349 {2350 intval,err;
2351 #ifdefCONFIG_IP_MULTICAST2352 intlen;
2353 #endif2354
2355 if(level!=SOL_IP)
2356 return -EOPNOTSUPP;
2357
2358 switch(optname)
2359 {2360 caseIP_TOS:
2361 val=sk->ip_tos;
2362 break;
2363 caseIP_TTL:
2364 val=sk->ip_ttl;
2365 break;
2366 #ifdefCONFIG_IP_MULTICAST2367 caseIP_MULTICAST_TTL:
2368 val=sk->ip_mc_ttl;
2369 break;
2370 caseIP_MULTICAST_LOOP:
2371 val=sk->ip_mc_loop;
2372 break;
2373 caseIP_MULTICAST_IF:
2374 err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
2375 if(err)
2376 returnerr;
2377 len=strlen(sk->ip_mc_name);
2378 err=verify_area(VERIFY_WRITE, optval, len);
2379 if(err)
2380 returnerr;
2381 put_fs_long(len,(unsignedlong *) optlen);
2382 memcpy_tofs((void *)optval,sk->ip_mc_name, len);
2383 return 0;
2384 #endif2385 default:
2386 return(-ENOPROTOOPT);
2387 }2388 err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
2389 if(err)
2390 returnerr;
2391 put_fs_long(sizeof(int),(unsignedlong *) optlen);
2392
2393 err=verify_area(VERIFY_WRITE, optval, sizeof(int));
2394 if(err)
2395 returnerr;
2396 put_fs_long(val,(unsignedlong *)optval);
2397
2398 return(0);
2399 }2400
2401 /*2402 * IP protocol layer initialiser2403 */2404
2405 staticstructpacket_typeip_packet_type =
2406 {2407 0, /* MUTTER ntohs(ETH_P_IP),*/2408 0, /* copy */2409 ip_rcv,
2410 NULL,
2411 NULL,
2412 };
2413
2414
2415 /*2416 * IP registers the packet type and then calls the subprotocol initialisers2417 */2418
2419 voidip_init(void)
/* */2420 {2421 ip_packet_type.type=htons(ETH_P_IP);
2422 dev_add_pack(&ip_packet_type);
2423 /* ip_raw_init();2424 ip_packet_init();2425 ip_tcp_init();2426 ip_udp_init();*/2427 }