root/net/inet/ip.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ip_ioctl
  2. strict_route
  3. loose_route
  4. ip_route_check
  5. build_options
  6. ip_send
  7. ip_build_header
  8. do_options
  9. ip_compute_csum
  10. ip_send_check
  11. ip_frag_create
  12. ip_find
  13. ip_free
  14. ip_expire
  15. ip_create
  16. ip_done
  17. ip_glue
  18. ip_defrag
  19. ip_fragment
  20. ip_forward
  21. ip_rcv
  22. ip_loopback
  23. ip_queue_xmit
  24. ip_mc_procinfo
  25. ip_setsockopt
  26. ip_getsockopt
  27. ip_rt_event
  28. ip_init

   1 /*
   2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
   3  *              operating system.  INET is implemented using the  BSD Socket
   4  *              interface as the means of communication with the user level.
   5  *
   6  *              The Internet Protocol (IP) module.
   7  *
   8  * Version:     @(#)ip.c        1.0.16b 9/1/93
   9  *
  10  * Authors:     Ross Biro, <bir7@leland.Stanford.Edu>
  11  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12  *              Donald Becker, <becker@super.org>
  13  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
  14  *              Richard Underwood
  15  *              Stefan Becker, <stefanb@yello.ping.de>
  16  *              
  17  *
  18  * Fixes:
  19  *              Alan Cox        :       Commented a couple of minor bits of surplus code
  20  *              Alan Cox        :       Undefining IP_FORWARD doesn't include the code
  21  *                                      (just stops a compiler warning).
  22  *              Alan Cox        :       Frames with >=MAX_ROUTE record routes, strict routes or loose routes
  23  *                                      are junked rather than corrupting things.
  24  *              Alan Cox        :       Frames to bad broadcast subnets are dumped
  25  *                                      We used to process them non broadcast and
  26  *                                      boy could that cause havoc.
  27  *              Alan Cox        :       ip_forward sets the free flag on the
  28  *                                      new frame it queues. Still crap because
  29  *                                      it copies the frame but at least it
  30  *                                      doesn't eat memory too.
  31  *              Alan Cox        :       Generic queue code and memory fixes.
  32  *              Fred Van Kempen :       IP fragment support (borrowed from NET2E)
  33  *              Gerhard Koerting:       Forward fragmented frames correctly.
  34  *              Gerhard Koerting:       Fixes to my fix of the above 8-).
  35  *              Gerhard Koerting:       IP interface addressing fix.
  36  *              Linus Torvalds  :       More robustness checks
  37  *              Alan Cox        :       Even more checks: Still not as robust as it ought to be
  38  *              Alan Cox        :       Save IP header pointer for later
  39  *              Alan Cox        :       ip option setting
  40  *              Alan Cox        :       Use ip_tos/ip_ttl settings
  41  *              Alan Cox        :       Fragmentation bogosity removed
  42  *                                      (Thanks to Mark.Bush@prg.ox.ac.uk)
  43  *              Dmitry Gorodchanin :    Send of a raw packet crash fix.
  44  *              Alan Cox        :       Silly ip bug when an overlength
  45  *                                      fragment turns up. Now frees the
  46  *                                      queue.
  47  *              Linus Torvalds/ :       Memory leakage on fragmentation
  48  *              Alan Cox        :       handling.
  49  *              Gerhard Koerting:       Forwarding uses IP priority hints
  50  *              Teemu Rantanen  :       Fragment problems.
  51  *              Alan Cox        :       General cleanup, comments and reformat
  52  *              Alan Cox        :       SNMP statistics
  53  *              Alan Cox        :       BSD address rule semantics. Also see
  54  *                                      UDP as there is a nasty checksum issue
  55  *                                      if you do things the wrong way.
  56  *              Alan Cox        :       Always defrag, moved IP_FORWARD to the config.in file
  57  *              Alan Cox        :       IP options adjust sk->priority.
  58  *              Pedro Roque     :       Fix mtu/length error in ip_forward.
  59  *              Alan Cox        :       Avoid ip_chk_addr when possible.
  60  *      Richard Underwood       :       IP multicasting.
  61  *              Alan Cox        :       Cleaned up multicast handlers.
  62  *              Alan Cox        :       RAW sockets demultiplex in the BSD style.
  63  *              Gunther Mayer   :       Fix the SNMP reporting typo
  64  *              Alan Cox        :       Always in group 224.0.0.1
  65  *              Alan Cox        :       Multicast loopback error for 224.0.0.1
  66  *              Alan Cox        :       IP_MULTICAST_LOOP option.
  67  *              Alan Cox        :       Use notifiers.
  68  *              Bjorn Ekwall    :       Removed ip_csum (from slhc.c too)
  69  *              Bjorn Ekwall    :       Moved ip_fast_csum to ip.h (inline!)
  70  *              Stefan Becker   :       Send out ICMP HOST REDIRECT
  71  *              Alan Cox        :       Only send ICMP_REDIRECT if src/dest are the same net.
  72  *  
  73  *
  74  * To Fix:
  75  *              IP option processing is mostly not needed. ip_forward needs to know about routing rules
  76  *              and time stamp but that's about all. Use the route mtu field here too
  77  *              IP fragmentation wants rewriting cleanly. The RFC815 algorithm is much more efficient
  78  *              and could be made very efficient with the addition of some virtual memory hacks to permit
  79  *              the allocation of a buffer that can then be 'grown' by twiddling page tables.
  80  *              Output fragmentation wants updating along with the buffer management to use a single 
  81  *              interleaved copy algorithm so that fragmenting has a one copy overhead. Actual packet
  82  *              output should probably do its own fragmentation at the UDP/RAW layer. TCP shouldn't cause
  83  *              fragmentation anyway.
  84  *
  85  *              This program is free software; you can redistribute it and/or
  86  *              modify it under the terms of the GNU General Public License
  87  *              as published by the Free Software Foundation; either version
  88  *              2 of the License, or (at your option) any later version.
  89  */
  90 
  91 #include <asm/segment.h>
  92 #include <asm/system.h>
  93 #include <linux/types.h>
  94 #include <linux/kernel.h>
  95 #include <linux/sched.h>
  96 #include <linux/mm.h>
  97 #include <linux/string.h>
  98 #include <linux/errno.h>
  99 #include <linux/config.h>
 100 
 101 #include <linux/socket.h>
 102 #include <linux/sockios.h>
 103 #include <linux/in.h>
 104 #include <linux/inet.h>
 105 #include <linux/netdevice.h>
 106 #include <linux/etherdevice.h>
 107 
 108 #include "snmp.h"
 109 #include "ip.h"
 110 #include "protocol.h"
 111 #include "route.h"
 112 #include "tcp.h"
 113 #include "udp.h"
 114 #include <linux/skbuff.h>
 115 #include "sock.h"
 116 #include "arp.h"
 117 #include "icmp.h"
 118 #include "raw.h"
 119 #include <linux/igmp.h>
 120 #include <linux/ip_fw.h>
 121 
 122 #define CONFIG_IP_DEFRAG
 123 
 124 extern int last_retran;
 125 extern void sort_send(struct sock *sk);
 126 
 127 #define min(a,b)        ((a)<(b)?(a):(b))
 128 #define LOOPBACK(x)     (((x) & htonl(0xff000000)) == htonl(0x7f000000))
 129 
 130 /*
 131  *      SNMP management statistics
 132  */
 133 
 134 #ifdef CONFIG_IP_FORWARD
 135 struct ip_mib ip_statistics={1,64,};    /* Forwarding=Yes, Default TTL=64 */
 136 #else
 137 struct ip_mib ip_statistics={0,64,};    /* Forwarding=No, Default TTL=64 */
 138 #endif
 139 
 140 /*
 141  *      Handle the issuing of an ioctl() request
 142  *      for the ip device. This is scheduled to
 143  *      disappear
 144  */
 145 
 146 int ip_ioctl(struct sock *sk, int cmd, unsigned long arg)
     /* [previous][next][first][last][top][bottom][index][help] */
 147 {
 148         switch(cmd)
 149         {
 150                 default:
 151                         return(-EINVAL);
 152         }
 153 }
 154 
 155 
 156 /* these two routines will do routing. */
 157 
 158 static void
 159 strict_route(struct iphdr *iph, struct options *opt)
     /* [previous][next][first][last][top][bottom][index][help] */
 160 {
 161 }
 162 
 163 
 164 static void
 165 loose_route(struct iphdr *iph, struct options *opt)
     /* [previous][next][first][last][top][bottom][index][help] */
 166 {
 167 }
 168 
 169 
 170 
 171 
 172 /* This routine will check to see if we have lost a gateway. */
 173 void
 174 ip_route_check(unsigned long daddr)
     /* [previous][next][first][last][top][bottom][index][help] */
 175 {
 176 }
 177 
 178 
 179 #if 0
 180 /* this routine puts the options at the end of an ip header. */
 181 static int
 182 build_options(struct iphdr *iph, struct options *opt)
     /* [previous][next][first][last][top][bottom][index][help] */
 183 {
 184   unsigned char *ptr;
 185   /* currently we don't support any options. */
 186   ptr = (unsigned char *)(iph+1);
 187   *ptr = 0;
 188   return (4);
 189 }
 190 #endif
 191 
 192 
 193 /*
 194  *      Take an skb, and fill in the MAC header.
 195  */
 196 
 197 static int ip_send(struct sk_buff *skb, unsigned long daddr, int len, struct device *dev, unsigned long saddr)
     /* [previous][next][first][last][top][bottom][index][help] */
 198 {
 199         int mac = 0;
 200 
 201         skb->dev = dev;
 202         skb->arp = 1;
 203         if (dev->hard_header)
 204         {
 205                 /*
 206                  *      Build a hardware header. Source address is our mac, destination unknown
 207                  *      (rebuild header will sort this out)
 208                  */
 209                 mac = dev->hard_header(skb->data, dev, ETH_P_IP, NULL, NULL, len, skb);
 210                 if (mac < 0)
 211                 {
 212                         mac = -mac;
 213                         skb->arp = 0;
 214                         skb->raddr = daddr;     /* next routing address */
 215                 }
 216         }
 217         return mac;
 218 }
 219 
 220 int ip_id_count = 0;
 221 
 222 /*
 223  * This routine builds the appropriate hardware/IP headers for
 224  * the routine.  It assumes that if *dev != NULL then the
 225  * protocol knows what it's doing, otherwise it uses the
 226  * routing/ARP tables to select a device struct.
 227  */
 228 int ip_build_header(struct sk_buff *skb, unsigned long saddr, unsigned long daddr,
     /* [previous][next][first][last][top][bottom][index][help] */
 229                 struct device **dev, int type, struct options *opt, int len, int tos, int ttl)
 230 {
 231         static struct options optmem;
 232         struct iphdr *iph;
 233         struct rtable *rt;
 234         unsigned char *buff;
 235         unsigned long raddr;
 236         int tmp;
 237         unsigned long src;
 238 
 239         buff = skb->data;
 240 
 241         /*
 242          *      See if we need to look up the device.
 243          */
 244 
 245 #ifdef CONFIG_INET_MULTICAST    
 246         if(MULTICAST(daddr) && *dev==NULL && skb->sk && *skb->sk->ip_mc_name)
 247                 *dev=dev_get(skb->sk->ip_mc_name);
 248 #endif
 249         if (*dev == NULL)
 250         {
 251                 if(skb->localroute)
 252                         rt = ip_rt_local(daddr, &optmem, &src);
 253                 else
 254                         rt = ip_rt_route(daddr, &optmem, &src);
 255                 if (rt == NULL)
 256                 {
 257                         ip_statistics.IpOutNoRoutes++;
 258                         return(-ENETUNREACH);
 259                 }
 260 
 261                 *dev = rt->rt_dev;
 262                 /*
 263                  *      If the frame is from us and going off machine it MUST MUST MUST
 264                  *      have the output device ip address and never the loopback
 265                  */
 266                 if (LOOPBACK(saddr) && !LOOPBACK(daddr))
 267                         saddr = src;/*rt->rt_dev->pa_addr;*/
 268                 raddr = rt->rt_gateway;
 269 
 270                 opt = &optmem;
 271         }
 272         else
 273         {
 274                 /*
 275                  *      We still need the address of the first hop.
 276                  */
 277                 if(skb->localroute)
 278                         rt = ip_rt_local(daddr, &optmem, &src);
 279                 else
 280                         rt = ip_rt_route(daddr, &optmem, &src);
 281                 /*
 282                  *      If the frame is from us and going off machine it MUST MUST MUST
 283                  *      have the output device ip address and never the loopback
 284                  */
 285                 if (LOOPBACK(saddr) && !LOOPBACK(daddr))
 286                         saddr = src;/*rt->rt_dev->pa_addr;*/
 287 
 288                 raddr = (rt == NULL) ? 0 : rt->rt_gateway;
 289         }
 290 
 291         /*
 292          *      No source addr so make it our addr
 293          */
 294         if (saddr == 0)
 295                 saddr = src;
 296 
 297         /*
 298          *      No gateway so aim at the real destination
 299          */
 300         if (raddr == 0)
 301                 raddr = daddr;
 302 
 303         /*
 304          *      Now build the MAC header.
 305          */
 306 
 307         tmp = ip_send(skb, raddr, len, *dev, saddr);
 308         buff += tmp;
 309         len -= tmp;
 310 
 311         /*
 312          *      Book keeping
 313          */
 314 
 315         skb->dev = *dev;
 316         skb->saddr = saddr;
 317         if (skb->sk)
 318                 skb->sk->saddr = saddr;
 319 
 320         /*
 321          *      Now build the IP header.
 322          */
 323 
 324         /*
 325          *      If we are using IPPROTO_RAW, then we don't need an IP header, since
 326          *      one is being supplied to us by the user
 327          */
 328 
 329         if(type == IPPROTO_RAW)
 330                 return (tmp);
 331 
 332         iph = (struct iphdr *)buff;
 333         iph->version  = 4;
 334         iph->tos      = tos;
 335         iph->frag_off = 0;
 336         iph->ttl      = ttl;
 337         iph->daddr    = daddr;
 338         iph->saddr    = saddr;
 339         iph->protocol = type;
 340         iph->ihl      = 5;
 341         skb->ip_hdr   = iph;
 342 
 343         /* Setup the IP options. */
 344 #ifdef Not_Yet_Avail
 345         build_options(iph, opt);
 346 #endif
 347 
 348         return(20 + tmp);       /* IP header plus MAC header size */
 349 }
 350 
 351 
 352 static int
 353 do_options(struct iphdr *iph, struct options *opt)
     /* [previous][next][first][last][top][bottom][index][help] */
 354 {
 355   unsigned char *buff;
 356   int done = 0;
 357   int i, len = sizeof(struct iphdr);
 358 
 359   /* Zero out the options. */
 360   opt->record_route.route_size = 0;
 361   opt->loose_route.route_size  = 0;
 362   opt->strict_route.route_size = 0;
 363   opt->tstamp.ptr              = 0;
 364   opt->security                = 0;
 365   opt->compartment             = 0;
 366   opt->handling                = 0;
 367   opt->stream                  = 0;
 368   opt->tcc                     = 0;
 369   return(0);
 370 
 371   /* Advance the pointer to start at the options. */
 372   buff = (unsigned char *)(iph + 1);
 373 
 374   /* Now start the processing. */
 375   while (!done && len < iph->ihl*4) switch(*buff) {
 376         case IPOPT_END:
 377                 done = 1;
 378                 break;
 379         case IPOPT_NOOP:
 380                 buff++;
 381                 len++;
 382                 break;
 383         case IPOPT_SEC:
 384                 buff++;
 385                 if (*buff != 11) return(1);
 386                 buff++;
 387                 opt->security = ntohs(*(unsigned short *)buff);
 388                 buff += 2;
 389                 opt->compartment = ntohs(*(unsigned short *)buff);
 390                 buff += 2;
 391                 opt->handling = ntohs(*(unsigned short *)buff);
 392                 buff += 2;
 393                 opt->tcc = ((*buff) << 16) + ntohs(*(unsigned short *)(buff+1));
 394                 buff += 3;
 395                 len += 11;
 396                 break;
 397         case IPOPT_LSRR:
 398                 buff++;
 399                 if ((*buff - 3)% 4 != 0) return(1);
 400                 len += *buff;
 401                 opt->loose_route.route_size = (*buff -3)/4;
 402                 buff++;
 403                 if (*buff % 4 != 0) return(1);
 404                 opt->loose_route.pointer = *buff/4 - 1;
 405                 buff++;
 406                 buff++;
 407                 for (i = 0; i < opt->loose_route.route_size; i++) {
 408                         if(i>=MAX_ROUTE)
 409                                 return(1);
 410                         opt->loose_route.route[i] = *(unsigned long *)buff;
 411                         buff += 4;
 412                 }
 413                 break;
 414         case IPOPT_SSRR:
 415                 buff++;
 416                 if ((*buff - 3)% 4 != 0) return(1);
 417                 len += *buff;
 418                 opt->strict_route.route_size = (*buff -3)/4;
 419                 buff++;
 420                 if (*buff % 4 != 0) return(1);
 421                 opt->strict_route.pointer = *buff/4 - 1;
 422                 buff++;
 423                 buff++;
 424                 for (i = 0; i < opt->strict_route.route_size; i++) {
 425                         if(i>=MAX_ROUTE)
 426                                 return(1);
 427                         opt->strict_route.route[i] = *(unsigned long *)buff;
 428                         buff += 4;
 429                 }
 430                 break;
 431         case IPOPT_RR:
 432                 buff++;
 433                 if ((*buff - 3)% 4 != 0) return(1);
 434                 len += *buff;
 435                 opt->record_route.route_size = (*buff -3)/4;
 436                 buff++;
 437                 if (*buff % 4 != 0) return(1);
 438                 opt->record_route.pointer = *buff/4 - 1;
 439                 buff++;
 440                 buff++;
 441                 for (i = 0; i < opt->record_route.route_size; i++) {
 442                         if(i>=MAX_ROUTE)
 443                                 return 1;
 444                         opt->record_route.route[i] = *(unsigned long *)buff;
 445                         buff += 4;
 446                 }
 447                 break;
 448         case IPOPT_SID:
 449                 len += 4;
 450                 buff +=2;
 451                 opt->stream = *(unsigned short *)buff;
 452                 buff += 2;
 453                 break;
 454         case IPOPT_TIMESTAMP:
 455                 buff++;
 456                 len += *buff;
 457                 if (*buff % 4 != 0) return(1);
 458                 opt->tstamp.len = *buff / 4 - 1;
 459                 buff++;
 460                 if ((*buff - 1) % 4 != 0) return(1);
 461                 opt->tstamp.ptr = (*buff-1)/4;
 462                 buff++;
 463                 opt->tstamp.x.full_char = *buff;
 464                 buff++;
 465                 for (i = 0; i < opt->tstamp.len; i++) {
 466                         opt->tstamp.data[i] = *(unsigned long *)buff;
 467                         buff += 4;
 468                 }
 469                 break;
 470         default:
 471                 return(1);
 472   }
 473 
 474   if (opt->record_route.route_size == 0) {
 475         if (opt->strict_route.route_size != 0) {
 476                 memcpy(&(opt->record_route), &(opt->strict_route),
 477                                              sizeof(opt->record_route));
 478         } else if (opt->loose_route.route_size != 0) {
 479                 memcpy(&(opt->record_route), &(opt->loose_route),
 480                                              sizeof(opt->record_route));
 481         }
 482   }
 483 
 484   if (opt->strict_route.route_size != 0 &&
 485       opt->strict_route.route_size != opt->strict_route.pointer) {
 486         strict_route(iph, opt);
 487         return(0);
 488   }
 489 
 490   if (opt->loose_route.route_size != 0 &&
 491       opt->loose_route.route_size != opt->loose_route.pointer) {
 492         loose_route(iph, opt);
 493         return(0);
 494   }
 495 
 496   return(0);
 497 }
 498 
 499 /*
 500  * This routine does all the checksum computations that don't
 501  * require anything special (like copying or special headers).
 502  */
 503 
 504 unsigned short ip_compute_csum(unsigned char * buff, int len)
     /* [previous][next][first][last][top][bottom][index][help] */
 505 {
 506         unsigned long sum = 0;
 507 
 508         /* Do the first multiple of 4 bytes and convert to 16 bits. */
 509         if (len > 3)
 510         {
 511                 __asm__("clc\n"
 512                 "1:\t"
 513                 "lodsl\n\t"
 514                 "adcl %%eax, %%ebx\n\t"
 515                 "loop 1b\n\t"
 516                 "adcl $0, %%ebx\n\t"
 517                 "movl %%ebx, %%eax\n\t"
 518                 "shrl $16, %%eax\n\t"
 519                 "addw %%ax, %%bx\n\t"
 520                 "adcw $0, %%bx"
 521                 : "=b" (sum) , "=S" (buff)
 522                 : "0" (sum), "c" (len >> 2) ,"1" (buff)
 523                 : "ax", "cx", "si", "bx" );
 524         }
 525         if (len & 2)
 526         {
 527                 __asm__("lodsw\n\t"
 528                 "addw %%ax, %%bx\n\t"
 529                 "adcw $0, %%bx"
 530                 : "=b" (sum), "=S" (buff)
 531                 : "0" (sum), "1" (buff)
 532                 : "bx", "ax", "si");
 533         }
 534         if (len & 1)
 535         {
 536                 __asm__("lodsb\n\t"
 537                 "movb $0, %%ah\n\t"
 538                 "addw %%ax, %%bx\n\t"
 539                 "adcw $0, %%bx"
 540                 : "=b" (sum), "=S" (buff)
 541                 : "0" (sum), "1" (buff)
 542                 : "bx", "ax", "si");
 543         }
 544         sum =~sum;
 545         return(sum & 0xffff);
 546 }
 547 
 548 /*
 549  *      Generate a checksum for an outgoing IP datagram.
 550  */
 551 
 552 void ip_send_check(struct iphdr *iph)
     /* [previous][next][first][last][top][bottom][index][help] */
 553 {
 554         iph->check = 0;
 555         iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
 556 }
 557 
 558 /************************ Fragment Handlers From NET2E **********************************/
 559 
 560 
 561 /*
 562  *      This fragment handler is a bit of a heap. On the other hand it works quite
 563  *      happily and handles things quite well.
 564  */
 565 
 566 static struct ipq *ipqueue = NULL;              /* IP fragment queue    */
 567 
 568 /*
 569  *      Create a new fragment entry.
 570  */
 571 
 572 static struct ipfrag *ip_frag_create(int offset, int end, struct sk_buff *skb, unsigned char *ptr)
     /* [previous][next][first][last][top][bottom][index][help] */
 573 {
 574         struct ipfrag *fp;
 575 
 576         fp = (struct ipfrag *) kmalloc(sizeof(struct ipfrag), GFP_ATOMIC);
 577         if (fp == NULL)
 578         {
 579                 printk("IP: frag_create: no memory left !\n");
 580                 return(NULL);
 581         }
 582         memset(fp, 0, sizeof(struct ipfrag));
 583 
 584         /* Fill in the structure. */
 585         fp->offset = offset;
 586         fp->end = end;
 587         fp->len = end - offset;
 588         fp->skb = skb;
 589         fp->ptr = ptr;
 590 
 591         return(fp);
 592 }
 593 
 594 
 595 /*
 596  *      Find the correct entry in the "incomplete datagrams" queue for
 597  *      this IP datagram, and return the queue entry address if found.
 598  */
 599 
 600 static struct ipq *ip_find(struct iphdr *iph)
     /* [previous][next][first][last][top][bottom][index][help] */
 601 {
 602         struct ipq *qp;
 603         struct ipq *qplast;
 604 
 605         cli();
 606         qplast = NULL;
 607         for(qp = ipqueue; qp != NULL; qplast = qp, qp = qp->next)
 608         {
 609                 if (iph->id== qp->iph->id && iph->saddr == qp->iph->saddr &&
 610                         iph->daddr == qp->iph->daddr && iph->protocol == qp->iph->protocol)
 611                 {
 612                         del_timer(&qp->timer);  /* So it doesn't vanish on us. The timer will be reset anyway */
 613                         sti();
 614                         return(qp);
 615                 }
 616         }
 617         sti();
 618         return(NULL);
 619 }
 620 
 621 
 622 /*
 623  *      Remove an entry from the "incomplete datagrams" queue, either
 624  *      because we completed, reassembled and processed it, or because
 625  *      it timed out.
 626  */
 627 
 628 static void ip_free(struct ipq *qp)
     /* [previous][next][first][last][top][bottom][index][help] */
 629 {
 630         struct ipfrag *fp;
 631         struct ipfrag *xp;
 632 
 633         /*
 634          * Stop the timer for this entry.
 635          */
 636 
 637         del_timer(&qp->timer);
 638 
 639         /* Remove this entry from the "incomplete datagrams" queue. */
 640         cli();
 641         if (qp->prev == NULL)
 642         {
 643                 ipqueue = qp->next;
 644                 if (ipqueue != NULL)
 645                         ipqueue->prev = NULL;
 646         }
 647         else
 648         {
 649                 qp->prev->next = qp->next;
 650                 if (qp->next != NULL)
 651                         qp->next->prev = qp->prev;
 652         }
 653 
 654         /* Release all fragment data. */
 655 
 656         fp = qp->fragments;
 657         while (fp != NULL)
 658         {
 659                 xp = fp->next;
 660                 IS_SKB(fp->skb);
 661                 kfree_skb(fp->skb,FREE_READ);
 662                 kfree_s(fp, sizeof(struct ipfrag));
 663                 fp = xp;
 664         }
 665 
 666         /* Release the MAC header. */
 667         kfree_s(qp->mac, qp->maclen);
 668 
 669         /* Release the IP header. */
 670         kfree_s(qp->iph, qp->ihlen + 8);
 671 
 672         /* Finally, release the queue descriptor itself. */
 673         kfree_s(qp, sizeof(struct ipq));
 674         sti();
 675 }
 676 
 677 
 678 /*
 679  *      Oops- a fragment queue timed out.  Kill it and send an ICMP reply.
 680  */
 681 
 682 static void ip_expire(unsigned long arg)
     /* [previous][next][first][last][top][bottom][index][help] */
 683 {
 684         struct ipq *qp;
 685 
 686         qp = (struct ipq *)arg;
 687 
 688         /*
 689          *      Send an ICMP "Fragment Reassembly Timeout" message.
 690          */
 691 
 692         ip_statistics.IpReasmTimeout++;
 693         ip_statistics.IpReasmFails++;   
 694         /* This if is always true... shrug */
 695         if(qp->fragments!=NULL)
 696                 icmp_send(qp->fragments->skb,ICMP_TIME_EXCEEDED,
 697                                 ICMP_EXC_FRAGTIME, 0, qp->dev);
 698 
 699         /*
 700          *      Nuke the fragment queue.
 701          */
 702         ip_free(qp);
 703 }
 704 
 705 
 706 /*
 707  *      Add an entry to the 'ipq' queue for a newly received IP datagram.
 708  *      We will (hopefully :-) receive all other fragments of this datagram
 709  *      in time, so we just create a queue for this datagram, in which we
 710  *      will insert the received fragments at their respective positions.
 711  */
 712 
 713 static struct ipq *ip_create(struct sk_buff *skb, struct iphdr *iph, struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 714 {
 715         struct ipq *qp;
 716         int maclen;
 717         int ihlen;
 718 
 719         qp = (struct ipq *) kmalloc(sizeof(struct ipq), GFP_ATOMIC);
 720         if (qp == NULL)
 721         {
 722                 printk("IP: create: no memory left !\n");
 723                 return(NULL);
 724                 skb->dev = qp->dev;
 725         }
 726         memset(qp, 0, sizeof(struct ipq));
 727 
 728         /*
 729          *      Allocate memory for the MAC header.
 730          *
 731          *      FIXME: We have a maximum MAC address size limit and define
 732          *      elsewhere. We should use it here and avoid the 3 kmalloc() calls
 733          */
 734 
 735         maclen = ((unsigned long) iph) - ((unsigned long) skb->data);
 736         qp->mac = (unsigned char *) kmalloc(maclen, GFP_ATOMIC);
 737         if (qp->mac == NULL)
 738         {
 739                 printk("IP: create: no memory left !\n");
 740                 kfree_s(qp, sizeof(struct ipq));
 741                 return(NULL);
 742         }
 743 
 744         /*
 745          *      Allocate memory for the IP header (plus 8 octets for ICMP).
 746          */
 747 
 748         ihlen = (iph->ihl * sizeof(unsigned long));
 749         qp->iph = (struct iphdr *) kmalloc(ihlen + 8, GFP_ATOMIC);
 750         if (qp->iph == NULL)
 751         {
 752                 printk("IP: create: no memory left !\n");
 753                 kfree_s(qp->mac, maclen);
 754                 kfree_s(qp, sizeof(struct ipq));
 755                 return(NULL);
 756         }
 757 
 758         /* Fill in the structure. */
 759         memcpy(qp->mac, skb->data, maclen);
 760         memcpy(qp->iph, iph, ihlen + 8);
 761         qp->len = 0;
 762         qp->ihlen = ihlen;
 763         qp->maclen = maclen;
 764         qp->fragments = NULL;
 765         qp->dev = dev;
 766 
 767         /* Start a timer for this entry. */
 768         qp->timer.expires = IP_FRAG_TIME;               /* about 30 seconds     */
 769         qp->timer.data = (unsigned long) qp;            /* pointer to queue     */
 770         qp->timer.function = ip_expire;                 /* expire function      */
 771         add_timer(&qp->timer);
 772 
 773         /* Add this entry to the queue. */
 774         qp->prev = NULL;
 775         cli();
 776         qp->next = ipqueue;
 777         if (qp->next != NULL)
 778                 qp->next->prev = qp;
 779         ipqueue = qp;
 780         sti();
 781         return(qp);
 782 }
 783 
 784 
 785 /*
 786  *      See if a fragment queue is complete.
 787  */
 788 
 789 static int ip_done(struct ipq *qp)
     /* [previous][next][first][last][top][bottom][index][help] */
 790 {
 791         struct ipfrag *fp;
 792         int offset;
 793 
 794         /* Only possible if we received the final fragment. */
 795         if (qp->len == 0)
 796                 return(0);
 797 
 798         /* Check all fragment offsets to see if they connect. */
 799         fp = qp->fragments;
 800         offset = 0;
 801         while (fp != NULL)
 802         {
 803                 if (fp->offset > offset)
 804                         return(0);      /* fragment(s) missing */
 805                 offset = fp->end;
 806                 fp = fp->next;
 807         }
 808 
 809         /* All fragments are present. */
 810         return(1);
 811 }
 812 
 813 
 814 /*
 815  *      Build a new IP datagram from all its fragments.
 816  *
 817  *      FIXME: We copy here because we lack an effective way of handling lists
 818  *      of bits on input. Until the new skb data handling is in I'm not going
 819  *      to touch this with a bargepole. This also causes a 4Kish limit on
 820  *      packet sizes.
 821  */
 822 
 823 static struct sk_buff *ip_glue(struct ipq *qp)
     /* [previous][next][first][last][top][bottom][index][help] */
 824 {
 825         struct sk_buff *skb;
 826         struct iphdr *iph;
 827         struct ipfrag *fp;
 828         unsigned char *ptr;
 829         int count, len;
 830 
 831         /*
 832          *      Allocate a new buffer for the datagram.
 833          */
 834 
 835         len = qp->maclen + qp->ihlen + qp->len;
 836 
 837         if ((skb = alloc_skb(len,GFP_ATOMIC)) == NULL)
 838         {
 839                 ip_statistics.IpReasmFails++;
 840                 printk("IP: queue_glue: no memory for gluing queue 0x%X\n", (int) qp);
 841                 ip_free(qp);
 842                 return(NULL);
 843         }
 844 
 845         /* Fill in the basic details. */
 846         skb->len = (len - qp->maclen);
 847         skb->h.raw = skb->data;
 848         skb->free = 1;
 849 
 850         /* Copy the original MAC and IP headers into the new buffer. */
 851         ptr = (unsigned char *) skb->h.raw;
 852         memcpy(ptr, ((unsigned char *) qp->mac), qp->maclen);
 853         ptr += qp->maclen;
 854         memcpy(ptr, ((unsigned char *) qp->iph), qp->ihlen);
 855         ptr += qp->ihlen;
 856         skb->h.raw += qp->maclen;
 857 
 858         count = 0;
 859 
 860         /* Copy the data portions of all fragments into the new buffer. */
 861         fp = qp->fragments;
 862         while(fp != NULL)
 863         {
 864                 if(count+fp->len > skb->len)
 865                 {
 866                         printk("Invalid fragment list: Fragment over size.\n");
 867                         ip_free(qp);
 868                         kfree_skb(skb,FREE_WRITE);
 869                         ip_statistics.IpReasmFails++;
 870                         return NULL;
 871                 }
 872                 memcpy((ptr + fp->offset), fp->ptr, fp->len);
 873                 count += fp->len;
 874                 fp = fp->next;
 875         }
 876 
 877         /* We glued together all fragments, so remove the queue entry. */
 878         ip_free(qp);
 879 
 880         /* Done with all fragments. Fixup the new IP header. */
 881         iph = skb->h.iph;
 882         iph->frag_off = 0;
 883         iph->tot_len = htons((iph->ihl * sizeof(unsigned long)) + count);
 884         skb->ip_hdr = iph;
 885 
 886         ip_statistics.IpReasmOKs++;
 887         return(skb);
 888 }
 889 
 890 
 891 /*
 892  *      Process an incoming IP datagram fragment.
 893  */
 894 
 895 static struct sk_buff *ip_defrag(struct iphdr *iph, struct sk_buff *skb, struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 896 {
 897         struct ipfrag *prev, *next;
 898         struct ipfrag *tfp;
 899         struct ipq *qp;
 900         struct sk_buff *skb2;
 901         unsigned char *ptr;
 902         int flags, offset;
 903         int i, ihl, end;
 904 
 905         ip_statistics.IpReasmReqds++;
 906 
 907         /* Find the entry of this IP datagram in the "incomplete datagrams" queue. */
 908         qp = ip_find(iph);
 909 
 910         /* Is this a non-fragmented datagram? */
 911         offset = ntohs(iph->frag_off);
 912         flags = offset & ~IP_OFFSET;
 913         offset &= IP_OFFSET;
 914         if (((flags & IP_MF) == 0) && (offset == 0))
 915         {
 916                 if (qp != NULL)
 917                         ip_free(qp);    /* Huh? How could this exist?? */
 918                 return(skb);
 919         }
 920 
 921         offset <<= 3;           /* offset is in 8-byte chunks */
 922 
 923         /*
 924          * If the queue already existed, keep restarting its timer as long
 925          * as we still are receiving fragments.  Otherwise, create a fresh
 926          * queue entry.
 927          */
 928 
 929         if (qp != NULL)
 930         {
 931                 del_timer(&qp->timer);
 932                 qp->timer.expires = IP_FRAG_TIME;       /* about 30 seconds */
 933                 qp->timer.data = (unsigned long) qp;    /* pointer to queue */
 934                 qp->timer.function = ip_expire;         /* expire function */
 935                 add_timer(&qp->timer);
 936         }
 937         else
 938         {
 939                 /*
 940                  *      If we failed to create it, then discard the frame
 941                  */
 942                 if ((qp = ip_create(skb, iph, dev)) == NULL)
 943                 {
 944                         skb->sk = NULL;
 945                         kfree_skb(skb, FREE_READ);
 946                         ip_statistics.IpReasmFails++;
 947                         return NULL;
 948                 }
 949         }
 950 
 951         /*
 952          *      Determine the position of this fragment.
 953          */
 954 
 955         ihl = (iph->ihl * sizeof(unsigned long));
 956         end = offset + ntohs(iph->tot_len) - ihl;
 957 
 958         /*
 959          *      Point into the IP datagram 'data' part.
 960          */
 961 
 962         ptr = skb->data + dev->hard_header_len + ihl;
 963 
 964         /*
 965          *      Is this the final fragment?
 966          */
 967 
 968         if ((flags & IP_MF) == 0)
 969                 qp->len = end;
 970 
 971         /*
 972          *      Find out which fragments are in front and at the back of us
 973          *      in the chain of fragments so far.  We must know where to put
 974          *      this fragment, right?
 975          */
 976 
 977         prev = NULL;
 978         for(next = qp->fragments; next != NULL; next = next->next)
 979         {
 980                 if (next->offset > offset)
 981                         break;  /* bingo! */
 982                 prev = next;
 983         }
 984 
 985         /*
 986          *      We found where to put this one.
 987          *      Check for overlap with preceding fragment, and, if needed,
 988          *      align things so that any overlaps are eliminated.
 989          */
 990         if (prev != NULL && offset < prev->end)
 991         {
 992                 i = prev->end - offset;
 993                 offset += i;    /* ptr into datagram */
 994                 ptr += i;       /* ptr into fragment data */
 995         }
 996 
 997         /*
 998          * Look for overlap with succeeding segments.
 999          * If we can merge fragments, do it.
1000          */
1001 
1002         for(; next != NULL; next = tfp)
1003         {
1004                 tfp = next->next;
1005                 if (next->offset >= end)
1006                         break;          /* no overlaps at all */
1007 
1008                 i = end - next->offset;                 /* overlap is 'i' bytes */
1009                 next->len -= i;                         /* so reduce size of    */
1010                 next->offset += i;                      /* next fragment        */
1011                 next->ptr += i;
1012 
1013                 /*
1014                  *      If we get a frag size of <= 0, remove it and the packet
1015                  *      that it goes with.
1016                  */
1017                 if (next->len <= 0)
1018                 {
1019                         if (next->prev != NULL)
1020                                 next->prev->next = next->next;
1021                         else
1022                                 qp->fragments = next->next;
1023 
1024                         if (tfp->next != NULL)
1025                                 next->next->prev = next->prev;
1026 
1027                         kfree_skb(next->skb,FREE_READ);
1028                         kfree_s(next, sizeof(struct ipfrag));
1029                 }
1030         }
1031 
1032         /*
1033          *      Insert this fragment in the chain of fragments.
1034          */
1035 
1036         tfp = NULL;
1037         tfp = ip_frag_create(offset, end, skb, ptr);
1038 
1039         /*
1040          *      No memory to save the fragment - so throw the lot
1041          */
1042 
1043         if (!tfp)
1044         {
1045                 skb->sk = NULL;
1046                 kfree_skb(skb, FREE_READ);
1047                 return NULL;
1048         }
1049         tfp->prev = prev;
1050         tfp->next = next;
1051         if (prev != NULL)
1052                 prev->next = tfp;
1053         else
1054                 qp->fragments = tfp;
1055 
1056         if (next != NULL)
1057                 next->prev = tfp;
1058 
1059         /*
1060          *      OK, so we inserted this new fragment into the chain.
1061          *      Check if we now have a full IP datagram which we can
1062          *      bump up to the IP layer...
1063          */
1064 
1065         if (ip_done(qp))
1066         {
1067                 skb2 = ip_glue(qp);             /* glue together the fragments */
1068                 return(skb2);
1069         }
1070         return(NULL);
1071 }
1072 
1073 
1074 /*
1075  *      This IP datagram is too large to be sent in one piece.  Break it up into
1076  *      smaller pieces (each of size equal to the MAC header plus IP header plus
1077  *      a block of the data of the original IP data part) that will yet fit in a
1078  *      single device frame, and queue such a frame for sending by calling the
1079  *      ip_queue_xmit().  Note that this is recursion, and bad things will happen
1080  *      if this function causes a loop...
1081  *
1082  *      Yes this is inefficient, feel free to submit a quicker one.
1083  *
1084  *      **Protocol Violation**
1085  *      We copy all the options to each fragment. !FIXME!
1086  */
1087 void ip_fragment(struct sock *sk, struct sk_buff *skb, struct device *dev, int is_frag)
     /* [previous][next][first][last][top][bottom][index][help] */
1088 {
1089         struct iphdr *iph;
1090         unsigned char *raw;
1091         unsigned char *ptr;
1092         struct sk_buff *skb2;
1093         int left, mtu, hlen, len;
1094         int offset;
1095         unsigned long flags;
1096 
1097         /*
1098          *      Point into the IP datagram header.
1099          */
1100 
1101         raw = skb->data;
1102         iph = (struct iphdr *) (raw + dev->hard_header_len);
1103 
1104         skb->ip_hdr = iph;
1105 
1106         /*
1107          *      Setup starting values.
1108          */
1109 
1110         hlen = (iph->ihl * sizeof(unsigned long));
1111         left = ntohs(iph->tot_len) - hlen;      /* Space per frame */
1112         hlen += dev->hard_header_len;           /* Total header size */
1113         mtu = (dev->mtu - hlen);                /* Size of data space */
1114         ptr = (raw + hlen);                     /* Where to start from */
1115 
1116         /*
1117          *      Check for any "DF" flag. [DF means do not fragment]
1118          */
1119 
1120         if (ntohs(iph->frag_off) & IP_DF)
1121         {
1122                 /*
1123                  *      Reply giving the MTU of the failed hop.
1124                  */
1125                 ip_statistics.IpFragFails++;
1126                 icmp_send(skb,ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, dev->mtu, dev);
1127                 return;
1128         }
1129 
1130         /*
1131          *      The protocol doesn't seem to say what to do in the case that the
1132          *      frame + options doesn't fit the mtu. As it used to fall down dead
1133          *      in this case we were fortunate it didn't happen
1134          */
1135 
1136         if(mtu<8)
1137         {
1138                 /* It's wrong but it's better than nothing */
1139                 icmp_send(skb,ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED,dev->mtu, dev);
1140                 ip_statistics.IpFragFails++;
1141                 return;
1142         }
1143 
1144         /*
1145          *      Fragment the datagram.
1146          */
1147 
1148         /*
1149          *      The initial offset is 0 for a complete frame. When
1150          *      fragmenting fragments it's wherever this one starts.
1151          */
1152 
1153         if (is_frag & 2)
1154                 offset = (ntohs(iph->frag_off) & 0x1fff) << 3;
1155         else
1156                 offset = 0;
1157 
1158 
1159         /*
1160          *      Keep copying data until we run out.
1161          */
1162 
1163         while(left > 0)
1164         {
1165                 len = left;
1166                 /* IF: it doesn't fit, use 'mtu' - the data space left */
1167                 if (len > mtu)
1168                         len = mtu;
1169                 /* IF: we are not sending upto and including the packet end
1170                    then align the next start on an eight byte boundary */
1171                 if (len < left)
1172                 {
1173                         len/=8;
1174                         len*=8;
1175                 }
1176                 /*
1177                  *      Allocate buffer.
1178                  */
1179 
1180                 if ((skb2 = alloc_skb(len + hlen,GFP_ATOMIC)) == NULL)
1181                 {
1182                         printk("IP: frag: no memory for new fragment!\n");
1183                         ip_statistics.IpFragFails++;
1184                         return;
1185                 }
1186 
1187                 /*
1188                  *      Set up data on packet
1189                  */
1190 
1191                 skb2->arp = skb->arp;
1192                 if(skb->free==0)
1193                         printk("IP fragmenter: BUG free!=1 in fragmenter\n");
1194                 skb2->free = 1;
1195                 skb2->len = len + hlen;
1196                 skb2->h.raw=(char *) skb2->data;
1197                 /*
1198                  *      Charge the memory for the fragment to any owner
1199                  *      it might possess
1200                  */
1201 
1202                 save_flags(flags);
1203                 if (sk)
1204                 {
1205                         cli();
1206                         sk->wmem_alloc += skb2->mem_len;
1207                         skb2->sk=sk;
1208                 }
1209                 restore_flags(flags);
1210                 skb2->raddr = skb->raddr;       /* For rebuild_header - must be here */
1211 
1212                 /*
1213                  *      Copy the packet header into the new buffer.
1214                  */
1215 
1216                 memcpy(skb2->h.raw, raw, hlen);
1217 
1218                 /*
1219                  *      Copy a block of the IP datagram.
1220                  */
1221                 memcpy(skb2->h.raw + hlen, ptr, len);
1222                 left -= len;
1223 
1224                 skb2->h.raw+=dev->hard_header_len;
1225 
1226                 /*
1227                  *      Fill in the new header fields.
1228                  */
1229                 iph = (struct iphdr *)(skb2->h.raw/*+dev->hard_header_len*/);
1230                 iph->frag_off = htons((offset >> 3));
1231                 /*
1232                  *      Added AC : If we are fragmenting a fragment thats not the
1233                  *                 last fragment then keep MF on each bit
1234                  */
1235                 if (left > 0 || (is_frag & 1))
1236                         iph->frag_off |= htons(IP_MF);
1237                 ptr += len;
1238                 offset += len;
1239 
1240                 /*
1241                  *      Put this fragment into the sending queue.
1242                  */
1243 
1244                 ip_statistics.IpFragCreates++;
1245 
1246                 ip_queue_xmit(sk, dev, skb2, 2);
1247         }
1248         ip_statistics.IpFragOKs++;
1249 }
1250 
1251 
1252 
1253 #ifdef CONFIG_IP_FORWARD
1254 
1255 /*
1256  *      Forward an IP datagram to its next destination.
1257  */
1258 
1259 static void ip_forward(struct sk_buff *skb, struct device *dev, int is_frag)
     /* [previous][next][first][last][top][bottom][index][help] */
1260 {
1261         struct device *dev2;    /* Output device */
1262         struct iphdr *iph;      /* Our header */
1263         struct sk_buff *skb2;   /* Output packet */
1264         struct rtable *rt;      /* Route we use */
1265         unsigned char *ptr;     /* Data pointer */
1266         unsigned long raddr;    /* Router IP address */
1267         
1268         /* 
1269          *      See if we are allowed to forward this.
1270          */
1271 
1272 #ifdef CONFIG_IP_FIREWALL
1273         int err;
1274         
1275         if((err=ip_fw_chk(skb->h.iph, dev, ip_fw_fwd_chain, ip_fw_fwd_policy, 0))!=1)
1276         {
1277                 if(err==-1)
1278                         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev);
1279                 return;
1280         }
1281 #endif
1282         /*
1283          *      According to the RFC, we must first decrease the TTL field. If
1284          *      that reaches zero, we must reply an ICMP control message telling
1285          *      that the packet's lifetime expired.
1286          *
1287          *      Exception:
1288          *      We may not generate an ICMP for an ICMP. icmp_send does the
1289          *      enforcement of this so we can forget it here. It is however
1290          *      sometimes VERY important.
1291          */
1292 
1293         iph = skb->h.iph;
1294         iph->ttl--;
1295         if (iph->ttl <= 0)
1296         {
1297                 /* Tell the sender its packet died... */
1298                 icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0, dev);
1299                 return;
1300         }
1301 
1302         /*
1303          *      Re-compute the IP header checksum.
1304          *      This is inefficient. We know what has happened to the header
1305          *      and could thus adjust the checksum as Phil Karn does in KA9Q
1306          */
1307 
1308         ip_send_check(iph);
1309 
1310         /*
1311          * OK, the packet is still valid.  Fetch its destination address,
1312          * and give it to the IP sender for further processing.
1313          */
1314 
1315         rt = ip_rt_route(iph->daddr, NULL, NULL);
1316         if (rt == NULL)
1317         {
1318                 /*
1319                  *      Tell the sender its packet cannot be delivered. Again
1320                  *      ICMP is screened later.
1321                  */
1322                 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_UNREACH, 0, dev);
1323                 return;
1324         }
1325 
1326 
1327         /*
1328          * Gosh.  Not only is the packet valid; we even know how to
1329          * forward it onto its final destination.  Can we say this
1330          * is being plain lucky?
1331          * If the router told us that there is no GW, use the dest.
1332          * IP address itself- we seem to be connected directly...
1333          */
1334 
1335         raddr = rt->rt_gateway;
1336 
1337         if (raddr != 0)
1338         {
1339                 /*
1340                  *      There is a gateway so find the correct route for it.
1341                  *      Gateways cannot in turn be gatewayed.
1342                  */
1343                 rt = ip_rt_route(raddr, NULL, NULL);
1344                 if (rt == NULL)
1345                 {
1346                         /*
1347                          *      Tell the sender its packet cannot be delivered...
1348                          */
1349                         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev);
1350                         return;
1351                 }
1352                 if (rt->rt_gateway != 0)
1353                         raddr = rt->rt_gateway;
1354         }
1355         else
1356                 raddr = iph->daddr;
1357 
1358         /*
1359          *      Having picked a route we can now send the frame out.
1360          */
1361 
1362         dev2 = rt->rt_dev;
1363 
1364         /*
1365          *      In IP you never have to forward a frame on the interface that it 
1366          *      arrived upon. We now generate an ICMP HOST REDIRECT giving the route
1367          *      we calculated.
1368          */
1369 #ifdef CONFIG_IP_NO_ICMP_REDIRECT
1370         if (dev == dev2)
1371                 return;
1372 #else
1373         if (dev == dev2 && (iph->saddr&dev->pa_mask) == (iph->daddr & dev->pa_mask))
1374                 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, raddr, dev);
1375 #endif          
1376 
1377         /*
1378          * We now allocate a new buffer, and copy the datagram into it.
1379          * If the indicated interface is up and running, kick it.
1380          */
1381 
1382         if (dev2->flags & IFF_UP)
1383         {
1384 
1385                 /*
1386                  *      Current design decrees we copy the packet. For identical header
1387                  *      lengths we could avoid it. The new skb code will let us push
1388                  *      data so the problem goes away then.
1389                  */
1390 
1391                 skb2 = alloc_skb(dev2->hard_header_len + skb->len, GFP_ATOMIC);
1392                 /*
1393                  *      This is rare and since IP is tolerant of network failures
1394                  *      quite harmless.
1395                  */
1396                 if (skb2 == NULL)
1397                 {
1398                         printk("\nIP: No memory available for IP forward\n");
1399                         return;
1400                 }
1401                 ptr = skb2->data;
1402                 skb2->free = 1;
1403                 skb2->len = skb->len + dev2->hard_header_len;
1404                 skb2->h.raw = ptr;
1405 
1406                 /*
1407                  *      Copy the packet data into the new buffer.
1408                  */
1409                 memcpy(ptr + dev2->hard_header_len, skb->h.raw, skb->len);
1410 
1411                 /* Now build the MAC header. */
1412                 (void) ip_send(skb2, raddr, skb->len, dev2, dev2->pa_addr);
1413 
1414                 ip_statistics.IpForwDatagrams++;
1415 
1416                 /*
1417                  *      See if it needs fragmenting. Note in ip_rcv we tagged
1418                  *      the fragment type. This must be right so that
1419                  *      the fragmenter does the right thing.
1420                  */
1421 
1422                 if(skb2->len > dev2->mtu + dev2->hard_header_len)
1423                 {
1424                         ip_fragment(NULL,skb2,dev2, is_frag);
1425                         kfree_skb(skb2,FREE_WRITE);
1426                 }
1427                 else
1428                 {
1429 #ifdef CONFIG_IP_ACCT           
1430                         /*
1431                          *      Count mapping we shortcut
1432                          */
1433                          
1434                         ip_acct_cnt(iph,dev,ip_acct_chain);
1435 #endif                  
1436                         
1437                         /*
1438                          *      Map service types to priority. We lie about
1439                          *      throughput being low priority, but it's a good
1440                          *      choice to help improve general usage.
1441                          */
1442                         if(iph->tos & IPTOS_LOWDELAY)
1443                                 dev_queue_xmit(skb2, dev2, SOPRI_INTERACTIVE);
1444                         else if(iph->tos & IPTOS_THROUGHPUT)
1445                                 dev_queue_xmit(skb2, dev2, SOPRI_BACKGROUND);
1446                         else
1447                                 dev_queue_xmit(skb2, dev2, SOPRI_NORMAL);
1448                 }
1449         }
1450 }
1451 
1452 
1453 #endif
1454 
1455 /*
1456  *      This function receives all incoming IP datagrams.
1457  */
1458 
1459 int ip_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
     /* [previous][next][first][last][top][bottom][index][help] */
1460 {
1461         struct iphdr *iph = skb->h.iph;
1462         struct sock *raw_sk=NULL;
1463         unsigned char hash;
1464         unsigned char flag = 0;
1465         unsigned char opts_p = 0;       /* Set iff the packet has options. */
1466         struct inet_protocol *ipprot;
1467         static struct options opt; /* since we don't use these yet, and they
1468                                 take up stack space. */
1469         int brd=IS_MYADDR;
1470         int is_frag=0;
1471 #ifdef CONFIG_IP_FIREWALL
1472         int err;
1473 #endif  
1474 
1475         ip_statistics.IpInReceives++;
1476 
1477         /*
1478          *      Tag the ip header of this packet so we can find it
1479          */
1480 
1481         skb->ip_hdr = iph;
1482 
1483         /*
1484          *      Is the datagram acceptable?
1485          *
1486          *      1.      Length at least the size of an ip header
1487          *      2.      Version of 4
1488          *      3.      Checksums correctly. [Speed optimisation for later, skip loopback checksums]
1489          *      (4.     We ought to check for IP multicast addresses and undefined types.. does this matter ?)
1490          */
1491 
1492         if (skb->len<sizeof(struct iphdr) || iph->ihl<5 || iph->version != 4 || ip_fast_csum((unsigned char *)iph, iph->ihl) !=0)
1493         {
1494                 ip_statistics.IpInHdrErrors++;
1495                 kfree_skb(skb, FREE_WRITE);
1496                 return(0);
1497         }
1498         
1499         /*
1500          *      See if the firewall wants to dispose of the packet. 
1501          */
1502 
1503 #ifdef  CONFIG_IP_FIREWALL
1504         
1505         if ((err=ip_fw_chk(iph,dev,ip_fw_blk_chain,ip_fw_blk_policy, 0))!=1)
1506         {
1507                 if(err==-1)
1508                         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0, dev);
1509                 kfree_skb(skb, FREE_WRITE);
1510                 return 0;       
1511         }
1512 
1513 #endif
1514         
1515         /*
1516          *      Our transport medium may have padded the buffer out. Now we know it
1517          *      is IP we can trim to the true length of the frame.
1518          */
1519 
1520         skb->len=ntohs(iph->tot_len);
1521 
1522         /*
1523          *      Next analyse the packet for options. Studies show under one packet in
1524          *      a thousand have options....
1525          */
1526 
1527         if (iph->ihl != 5)
1528         {       /* Fast path for the typical optionless IP packet. */
1529                 memset((char *) &opt, 0, sizeof(opt));
1530                 if (do_options(iph, &opt) != 0)
1531                         return 0;
1532                 opts_p = 1;
1533         }
1534 
1535         /*
1536          *      Remember if the frame is fragmented.
1537          */
1538          
1539         if(iph->frag_off)
1540         {
1541                 if (iph->frag_off & 0x0020)
1542                         is_frag|=1;
1543                 /*
1544                  *      Last fragment ?
1545                  */
1546         
1547                 if (ntohs(iph->frag_off) & 0x1fff)
1548                         is_frag|=2;
1549         }
1550         
1551         /*
1552          *      Do any IP forwarding required.  chk_addr() is expensive -- avoid it someday.
1553          *
1554          *      This is inefficient. While finding out if it is for us we could also compute
1555          *      the routing table entry. This is where the great unified cache theory comes
1556          *      in as and when someone implements it
1557          *
1558          *      For most hosts over 99% of packets match the first conditional
1559          *      and don't go via ip_chk_addr. Note: brd is set to IS_MYADDR at
1560          *      function entry.
1561          */
1562 
1563         if ( iph->daddr != skb->dev->pa_addr && (brd = ip_chk_addr(iph->daddr)) == 0)
1564         {
1565                 /*
1566                  *      Don't forward multicast or broadcast frames.
1567                  */
1568 
1569                 if(skb->pkt_type!=PACKET_HOST || brd==IS_BROADCAST)
1570                 {
1571                         kfree_skb(skb,FREE_WRITE);
1572                         return 0;
1573                 }
1574 
1575                 /*
1576                  *      The packet is for another target. Forward the frame
1577                  */
1578 
1579 #ifdef CONFIG_IP_FORWARD
1580                 ip_forward(skb, dev, is_frag);
1581 #else
1582 /*              printk("Machine %lx tried to use us as a forwarder to %lx but we have forwarding disabled!\n",
1583                         iph->saddr,iph->daddr);*/
1584                 ip_statistics.IpInAddrErrors++;
1585 #endif
1586                 /*
1587                  *      The forwarder is inefficient and copies the packet. We
1588                  *      free the original now.
1589                  */
1590 
1591                 kfree_skb(skb, FREE_WRITE);
1592                 return(0);
1593         }
1594         
1595 #ifdef CONFIG_IP_MULTICAST      
1596 
1597         if(brd==IS_MULTICAST && iph->daddr!=IGMP_ALL_HOSTS && !(dev->flags&IFF_LOOPBACK))
1598         {
1599                 /*
1600                  *      Check it is for one of our groups
1601                  */
1602                 struct ip_mc_list *ip_mc=dev->ip_mc_list;
1603                 do
1604                 {
1605                         if(ip_mc==NULL)
1606                         {       
1607                                 kfree_skb(skb, FREE_WRITE);
1608                                 return 0;
1609                         }
1610                         if(ip_mc->multiaddr==iph->daddr)
1611                                 break;
1612                         ip_mc=ip_mc->next;
1613                 }
1614                 while(1);
1615         }
1616 #endif
1617         /*
1618          *      Account for the packet
1619          */
1620          
1621 #ifdef CONFIG_IP_ACCT
1622         ip_acct_cnt(iph,dev, ip_acct_chain);
1623 #endif  
1624 
1625         /*
1626          * Reassemble IP fragments.
1627          */
1628 
1629         if(is_frag)
1630         {
1631                 /* Defragment. Obtain the complete packet if there is one */
1632                 skb=ip_defrag(iph,skb,dev);
1633                 if(skb==NULL)
1634                         return 0;
1635                 skb->dev = dev;
1636                 iph=skb->h.iph;
1637         }
1638         
1639                  
1640 
1641         /*
1642          *      Point into the IP datagram, just past the header.
1643          */
1644 
1645         skb->ip_hdr = iph;
1646         skb->h.raw += iph->ihl*4;
1647         
1648         /*
1649          *      Deliver to raw sockets. This is fun as to avoid copies we want to make no surplus copies.
1650          */
1651          
1652         hash = iph->protocol & (SOCK_ARRAY_SIZE-1);
1653         
1654         /* If there maybe a raw socket we must check - if not we don't care less */
1655         if((raw_sk=raw_prot.sock_array[hash])!=NULL)
1656         {
1657                 struct sock *sknext=NULL;
1658                 struct sk_buff *skb1;
1659                 raw_sk=get_sock_raw(raw_sk, hash,  iph->saddr, iph->daddr);
1660                 if(raw_sk)      /* Any raw sockets */
1661                 {
1662                         do
1663                         {
1664                                 /* Find the next */
1665                                 sknext=get_sock_raw(raw_sk->next, hash, iph->saddr, iph->daddr);
1666                                 if(sknext)
1667                                         skb1=skb_clone(skb, GFP_ATOMIC);
1668                                 else
1669                                         break;  /* One pending raw socket left */
1670                                 if(skb1)
1671                                         raw_rcv(raw_sk, skb1, dev, iph->saddr,iph->daddr);
1672                                 raw_sk=sknext;
1673                         }
1674                         while(raw_sk!=NULL);
1675                         /* Here either raw_sk is the last raw socket, or NULL if none */
1676                         /* We deliver to the last raw socket AFTER the protocol checks as it avoids a surplus copy */
1677                 }
1678         }
1679         
1680         /*
1681          *      skb->h.raw now points at the protocol beyond the IP header.
1682          */
1683 
1684         hash = iph->protocol & (MAX_INET_PROTOS -1);
1685         for (ipprot = (struct inet_protocol *)inet_protos[hash];ipprot != NULL;ipprot=(struct inet_protocol *)ipprot->next)
1686         {
1687                 struct sk_buff *skb2;
1688 
1689                 if (ipprot->protocol != iph->protocol)
1690                         continue;
1691        /*
1692         *       See if we need to make a copy of it.  This will
1693         *       only be set if more than one protocol wants it.
1694         *       and then not for the last one. If there is a pending
1695         *       raw delivery wait for that
1696         */
1697                 if (ipprot->copy || raw_sk)
1698                 {
1699                         skb2 = skb_clone(skb, GFP_ATOMIC);
1700                         if(skb2==NULL)
1701                                 continue;
1702                 }
1703                 else
1704                 {
1705                         skb2 = skb;
1706                 }
1707                 flag = 1;
1708 
1709                /*
1710                 * Pass on the datagram to each protocol that wants it,
1711                 * based on the datagram protocol.  We should really
1712                 * check the protocol handler's return values here...
1713                 */
1714                 ipprot->handler(skb2, dev, opts_p ? &opt : 0, iph->daddr,
1715                                 (ntohs(iph->tot_len) - (iph->ihl * 4)),
1716                                 iph->saddr, 0, ipprot);
1717 
1718         }
1719 
1720         /*
1721          * All protocols checked.
1722          * If this packet was a broadcast, we may *not* reply to it, since that
1723          * causes (proven, grin) ARP storms and a leakage of memory (i.e. all
1724          * ICMP reply messages get queued up for transmission...)
1725          */
1726 
1727         if(raw_sk!=NULL)        /* Shift to last raw user */
1728                 raw_rcv(raw_sk, skb, dev, iph->saddr, iph->daddr);
1729         else if (!flag)         /* Free and report errors */
1730         {
1731                 if (brd != IS_BROADCAST && brd!=IS_MULTICAST)
1732                         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, 0, dev);
1733                 kfree_skb(skb, FREE_WRITE);
1734         }
1735 
1736         return(0);
1737 }
1738 
1739 /*
1740  *      Loop a packet back to the sender.
1741  */
1742  
1743 static void ip_loopback(struct device *old_dev, struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
1744 {
1745         extern struct device loopback_dev;
1746         struct device *dev=&loopback_dev;
1747         int len=skb->len-old_dev->hard_header_len;
1748         struct sk_buff *newskb=alloc_skb(len+dev->hard_header_len, GFP_ATOMIC);
1749         
1750         if(newskb==NULL)
1751                 return;
1752                 
1753         newskb->link3=NULL;
1754         newskb->sk=NULL;
1755         newskb->dev=dev;
1756         newskb->saddr=skb->saddr;
1757         newskb->daddr=skb->daddr;
1758         newskb->raddr=skb->raddr;
1759         newskb->free=1;
1760         newskb->lock=0;
1761         newskb->users=0;
1762         newskb->pkt_type=skb->pkt_type;
1763         newskb->len=len+dev->hard_header_len;
1764         
1765         
1766         newskb->ip_hdr=(struct iphdr *)(newskb->data+ip_send(newskb, skb->ip_hdr->daddr, len, dev, skb->ip_hdr->saddr));
1767         memcpy(newskb->ip_hdr,skb->ip_hdr,len);
1768 
1769         /* Recurse. The device check against IFF_LOOPBACK will stop infinite recursion */
1770                 
1771         /*printk("Loopback output queued [%lX to %lX].\n", newskb->ip_hdr->saddr,newskb->ip_hdr->daddr);*/
1772         ip_queue_xmit(NULL, dev, newskb, 1);
1773 }
1774 
1775 
1776 /*
1777  * Queues a packet to be sent, and starts the transmitter
1778  * if necessary.  if free = 1 then we free the block after
1779  * transmit, otherwise we don't. If free==2 we not only
1780  * free the block but also don't assign a new ip seq number.
1781  * This routine also needs to put in the total length,
1782  * and compute the checksum
1783  */
1784 
1785 void ip_queue_xmit(struct sock *sk, struct device *dev,
     /* [previous][next][first][last][top][bottom][index][help] */
1786               struct sk_buff *skb, int free)
1787 {
1788         struct iphdr *iph;
1789         unsigned char *ptr;
1790 
1791         /* Sanity check */
1792         if (dev == NULL)
1793         {
1794                 printk("IP: ip_queue_xmit dev = NULL\n");
1795                 return;
1796         }
1797 
1798         IS_SKB(skb);
1799 
1800         /*
1801          *      Do some book-keeping in the packet for later
1802          */
1803 
1804 
1805         skb->dev = dev;
1806         skb->when = jiffies;
1807 
1808         /*
1809          *      Find the IP header and set the length. This is bad
1810          *      but once we get the skb data handling code in the
1811          *      hardware will push its header sensibly and we will
1812          *      set skb->ip_hdr to avoid this mess and the fixed
1813          *      header length problem
1814          */
1815 
1816         ptr = skb->data;
1817         ptr += dev->hard_header_len;
1818         iph = (struct iphdr *)ptr;
1819         skb->ip_hdr = iph;
1820         iph->tot_len = ntohs(skb->len-dev->hard_header_len);
1821 
1822 #ifdef CONFIG_IP_FIREWALL
1823         if(ip_fw_chk(iph, dev, ip_fw_blk_chain, ip_fw_blk_policy, 0) != 1)
1824                 /* just don't send this packet */
1825                 return;
1826 #endif  
1827 
1828         /*
1829          *      No reassigning numbers to fragments...
1830          */
1831 
1832         if(free!=2)
1833                 iph->id      = htons(ip_id_count++);
1834         else
1835                 free=1;
1836 
1837         /* All buffers without an owner socket get freed */
1838         if (sk == NULL)
1839                 free = 1;
1840 
1841         skb->free = free;
1842 
1843         /*
1844          *      Do we need to fragment. Again this is inefficient.
1845          *      We need to somehow lock the original buffer and use
1846          *      bits of it.
1847          */
1848 
1849         if(skb->len > dev->mtu + dev->hard_header_len)
1850         {
1851                 ip_fragment(sk,skb,dev,0);
1852                 IS_SKB(skb);
1853                 kfree_skb(skb,FREE_WRITE);
1854                 return;
1855         }
1856 
1857         /*
1858          *      Add an IP checksum
1859          */
1860 
1861         ip_send_check(iph);
1862 
1863         /*
1864          *      Print the frame when debugging
1865          */
1866 
1867         /*
1868          *      More debugging. You cannot queue a packet already on a list
1869          *      Spot this and moan loudly.
1870          */
1871         if (skb->next != NULL)
1872         {
1873                 printk("ip_queue_xmit: next != NULL\n");
1874                 skb_unlink(skb);
1875         }
1876 
1877         /*
1878          *      If a sender wishes the packet to remain unfreed
1879          *      we add it to his send queue. This arguably belongs
1880          *      in the TCP level since nobody else uses it. BUT
1881          *      remember IPng might change all the rules.
1882          */
1883 
1884         if (!free)
1885         {
1886                 unsigned long flags;
1887                 /* The socket now has more outstanding blocks */
1888 
1889                 sk->packets_out++;
1890 
1891                 /* Protect the list for a moment */
1892                 save_flags(flags);
1893                 cli();
1894 
1895                 if (skb->link3 != NULL)
1896                 {
1897                         printk("ip.c: link3 != NULL\n");
1898                         skb->link3 = NULL;
1899                 }
1900                 if (sk->send_head == NULL)
1901                 {
1902                         sk->send_tail = skb;
1903                         sk->send_head = skb;
1904                 }
1905                 else
1906                 {
1907                         sk->send_tail->link3 = skb;
1908                         sk->send_tail = skb;
1909                 }
1910                 /* skb->link3 is NULL */
1911 
1912                 /* Interrupt restore */
1913                 restore_flags(flags);
1914         }
1915         else
1916                 /* Remember who owns the buffer */
1917                 skb->sk = sk;
1918 
1919         /*
1920          *      If the indicated interface is up and running, send the packet.
1921          */
1922          
1923         ip_statistics.IpOutRequests++;
1924 #ifdef CONFIG_IP_ACCT
1925         ip_acct_cnt(iph,dev, ip_acct_chain);
1926 #endif  
1927         
1928 #ifdef CONFIG_IP_MULTICAST      
1929 
1930         /*
1931          *      Multicasts are looped back for other local users
1932          */
1933          
1934         if (MULTICAST(iph->daddr) && !(dev->flags&IFF_LOOPBACK))
1935         {
1936                 if(sk==NULL || sk->ip_mc_loop)
1937                 {
1938                         if(iph->daddr==IGMP_ALL_HOSTS)
1939                                 ip_loopback(dev,skb);
1940                         else
1941                         {
1942                                 struct ip_mc_list *imc=dev->ip_mc_list;
1943                                 while(imc!=NULL)
1944                                 {
1945                                         if(imc->multiaddr==iph->daddr)
1946                                         {
1947                                                 ip_loopback(dev,skb);
1948                                                 break;
1949                                         }
1950                                         imc=imc->next;
1951                                 }
1952                         }
1953                 }
1954                 /* Multicasts with ttl 0 must not go beyond the host */
1955                 
1956                 if(skb->ip_hdr->ttl==0)
1957                 {
1958                         kfree_skb(skb, FREE_READ);
1959                         return;
1960                 }
1961         }
1962 #endif
1963         if((dev->flags&IFF_BROADCAST) && iph->daddr==dev->pa_brdaddr && !(dev->flags&IFF_LOOPBACK))
1964                 ip_loopback(dev,skb);
1965                 
1966         if (dev->flags & IFF_UP)
1967         {
1968                 /*
1969                  *      If we have an owner use its priority setting,
1970                  *      otherwise use NORMAL
1971                  */
1972 
1973                 if (sk != NULL)
1974                 {
1975                         dev_queue_xmit(skb, dev, sk->priority);
1976                 }
1977                 else
1978                 {
1979                         dev_queue_xmit(skb, dev, SOPRI_NORMAL);
1980                 }
1981         }
1982         else
1983         {
1984                 ip_statistics.IpOutDiscards++;
1985                 if (free)
1986                         kfree_skb(skb, FREE_WRITE);
1987         }
1988 }
1989 
1990 
1991 
1992 #ifdef CONFIG_IP_MULTICAST
1993 
1994 /*
1995  *      Write an multicast group list table for the IGMP daemon to
1996  *      read.
1997  */
1998  
1999 int ip_mc_procinfo(char *buffer, char **start, off_t offset, int length)
     /* [previous][next][first][last][top][bottom][index][help] */
2000 {
2001         off_t pos=0, begin=0;
2002         struct ip_mc_list *im;
2003         unsigned long flags;
2004         int len=0;
2005         struct device *dev;
2006         
2007         len=sprintf(buffer,"Device    : Count\tGroup    Users Timer\n");  
2008         save_flags(flags);
2009         cli();
2010         
2011         for(dev = dev_base; dev; dev = dev->next)
2012         {
2013                 if((dev->flags&IFF_UP)&&(dev->flags&IFF_MULTICAST))
2014                 {
2015                         len+=sprintf(buffer+len,"%-10s: %5d\n",
2016                                         dev->name, dev->mc_count);
2017                         for(im = dev->ip_mc_list; im; im = im->next)
2018                         {
2019                                 len+=sprintf(buffer+len,
2020                                         "\t\t\t%08lX %5d %d:%08lX\n",
2021                                         im->multiaddr, im->users,
2022                                         im->tm_running, im->timer.expires);
2023                                 pos=begin+len;
2024                                 if(pos<offset)
2025                                 {
2026                                         len=0;
2027                                         begin=pos;
2028                                 }
2029                                 if(pos>offset+length)
2030                                         break;
2031                         }
2032                 }
2033         }
2034         restore_flags(flags);
2035         *start=buffer+(offset-begin);
2036         len-=(offset-begin);
2037         if(len>length)
2038                 len=length;     
2039         return len;
2040 }
2041 
2042 
2043 #endif  
2044 /*
2045  *      Socket option code for IP. This is the end of the line after any TCP,UDP etc options on
2046  *      an IP socket.
2047  *
2048  *      We implement IP_TOS (type of service), IP_TTL (time to live).
2049  *
2050  *      Next release we will sort out IP_OPTIONS since for some people are kind of important.
2051  */
2052 
2053 int ip_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen)
     /* [previous][next][first][last][top][bottom][index][help] */
2054 {
2055         int val,err;
2056 #if defined(CONFIG_IP_FIREWALL) || defined(CONFIG_IP_ACCT)
2057         struct ip_fw tmp_fw;
2058 #endif  
2059         if (optval == NULL)
2060                 return(-EINVAL);
2061 
2062         err=verify_area(VERIFY_READ, optval, sizeof(int));
2063         if(err)
2064                 return err;
2065 
2066         val = get_fs_long((unsigned long *)optval);
2067 
2068         if(level!=SOL_IP)
2069                 return -EOPNOTSUPP;
2070 
2071         switch(optname)
2072         {
2073                 case IP_TOS:
2074                         if(val<0||val>255)
2075                                 return -EINVAL;
2076                         sk->ip_tos=val;
2077                         if(val==IPTOS_LOWDELAY)
2078                                 sk->priority=SOPRI_INTERACTIVE;
2079                         if(val==IPTOS_THROUGHPUT)
2080                                 sk->priority=SOPRI_BACKGROUND;
2081                         return 0;
2082                 case IP_TTL:
2083                         if(val<1||val>255)
2084                                 return -EINVAL;
2085                         sk->ip_ttl=val;
2086                         return 0;
2087 #ifdef CONFIG_IP_MULTICAST
2088                 case IP_MULTICAST_TTL: 
2089                 {
2090                         unsigned char ucval;
2091 
2092                         ucval=get_fs_byte((unsigned char *)optval);
2093                         if(ucval<1||ucval>255)
2094                                 return -EINVAL;
2095                         sk->ip_mc_ttl=(int)ucval;
2096                         return 0;
2097                 }
2098                 case IP_MULTICAST_LOOP: 
2099                 {
2100                         unsigned char ucval;
2101 
2102                         ucval=get_fs_byte((unsigned char *)optval);
2103                         if(ucval!=0 && ucval!=1)
2104                                  return -EINVAL;
2105                         sk->ip_mc_loop=(int)ucval;
2106                         return 0;
2107                 }
2108                 case IP_MULTICAST_IF: 
2109                 {
2110                         /* Not fully tested */
2111                         struct in_addr addr;
2112                         struct device *dev=NULL;
2113                         
2114                         /*
2115                          *      Check the arguments are allowable
2116                          */
2117 
2118                         err=verify_area(VERIFY_READ, optval, sizeof(addr));
2119                         if(err)
2120                                 return err;
2121                                 
2122                         memcpy_fromfs(&addr,optval,sizeof(addr));
2123                         
2124                         printk("MC bind %s\n", in_ntoa(addr.s_addr));
2125                         
2126                         /*
2127                          *      What address has been requested
2128                          */
2129                         
2130                         if(addr.s_addr==INADDR_ANY)     /* Default */
2131                         {
2132                                 sk->ip_mc_name[0]=0;
2133                                 return 0;
2134                         }
2135                         
2136                         /*
2137                          *      Find the device
2138                          */
2139                          
2140                         for(dev = dev_base; dev; dev = dev->next)
2141                         {
2142                                 if((dev->flags&IFF_UP)&&(dev->flags&IFF_MULTICAST)&&
2143                                         (dev->pa_addr==addr.s_addr))
2144                                         break;
2145                         }
2146                         
2147                         /*
2148                          *      Did we find one
2149                          */
2150                          
2151                         if(dev) 
2152                         {
2153                                 strcpy(sk->ip_mc_name,dev->name);
2154                                 return 0;
2155                         }
2156                         return -EADDRNOTAVAIL;
2157                 }
2158                 
2159                 case IP_ADD_MEMBERSHIP: 
2160                 {
2161                 
2162 /*
2163  *      FIXME: Add/Del membership should have a semaphore protecting them from re-entry
2164  */
2165                         struct ip_mreq mreq;
2166                         static struct options optmem;
2167                         unsigned long route_src;
2168                         struct rtable *rt;
2169                         struct device *dev=NULL;
2170                         
2171                         /*
2172                          *      Check the arguments.
2173                          */
2174 
2175                         err=verify_area(VERIFY_READ, optval, sizeof(mreq));
2176                         if(err)
2177                                 return err;
2178 
2179                         memcpy_fromfs(&mreq,optval,sizeof(mreq));
2180 
2181                         /* 
2182                          *      Get device for use later
2183                          */
2184 
2185                         if(mreq.imr_interface.s_addr==INADDR_ANY) 
2186                         {
2187                                 /*
2188                                  *      Not set so scan.
2189                                  */
2190                                 if((rt=ip_rt_route(mreq.imr_multiaddr.s_addr,&optmem, &route_src))!=NULL)
2191                                 {
2192                                         dev=rt->rt_dev;
2193                                         rt->rt_use--;
2194                                 }
2195                         }
2196                         else
2197                         {
2198                                 /*
2199                                  *      Find a suitable device.
2200                                  */
2201                                 for(dev = dev_base; dev; dev = dev->next)
2202                                 {
2203                                         if((dev->flags&IFF_UP)&&(dev->flags&IFF_MULTICAST)&&
2204                                                 (dev->pa_addr==mreq.imr_interface.s_addr))
2205                                                 break;
2206                                 }
2207                         }
2208                         
2209                         /*
2210                          *      No device, no cookies.
2211                          */
2212                          
2213                         if(!dev)
2214                                 return -ENODEV;
2215                                 
2216                         /*
2217                          *      Join group.
2218                          */
2219                          
2220                         return ip_mc_join_group(sk,dev,mreq.imr_multiaddr.s_addr);
2221                 }
2222                 
2223                 case IP_DROP_MEMBERSHIP: 
2224                 {
2225                         struct ip_mreq mreq;
2226                         struct rtable *rt;
2227                         static struct options optmem;
2228                         unsigned long route_src;
2229                         struct device *dev=NULL;
2230 
2231                         /*
2232                          *      Check the arguments
2233                          */
2234                          
2235                         err=verify_area(VERIFY_READ, optval, sizeof(mreq));
2236                         if(err)
2237                                 return err;
2238 
2239                         memcpy_fromfs(&mreq,optval,sizeof(mreq));
2240 
2241                         /*
2242                          *      Get device for use later 
2243                          */
2244  
2245                         if(mreq.imr_interface.s_addr==INADDR_ANY) 
2246                         {
2247                                 if((rt=ip_rt_route(mreq.imr_multiaddr.s_addr,&optmem, &route_src))!=NULL)
2248                                 {
2249                                         dev=rt->rt_dev;
2250                                         rt->rt_use--;
2251                                 }
2252                         }
2253                         else 
2254                         {
2255                                 for(dev = dev_base; dev; dev = dev->next)
2256                                 {
2257                                         if((dev->flags&IFF_UP)&& (dev->flags&IFF_MULTICAST)&&
2258                                                         (dev->pa_addr==mreq.imr_interface.s_addr))
2259                                                 break;
2260                                 }
2261                         }
2262                         
2263                         /*
2264                          *      Did we find a suitable device.
2265                          */
2266                          
2267                         if(!dev)
2268                                 return -ENODEV;
2269                                 
2270                         /*
2271                          *      Leave group
2272                          */
2273                          
2274                         return ip_mc_leave_group(sk,dev,mreq.imr_multiaddr.s_addr);
2275                 }
2276 #endif                  
2277 #ifdef CONFIG_IP_FIREWALL
2278                 case IP_FW_ADD_BLK:
2279                 case IP_FW_DEL_BLK:
2280                 case IP_FW_ADD_FWD:
2281                 case IP_FW_DEL_FWD:
2282                 case IP_FW_CHK_BLK:
2283                 case IP_FW_CHK_FWD:
2284                 case IP_FW_FLUSH_BLK:
2285                 case IP_FW_FLUSH_FWD:
2286                 case IP_FW_ZERO_BLK:
2287                 case IP_FW_ZERO_FWD:
2288                 case IP_FW_POLICY_BLK:
2289                 case IP_FW_POLICY_FWD:
2290                         if(!suser())
2291                                 return -EPERM;
2292                         if(optlen>sizeof(tmp_fw) || optlen<1)
2293                                 return -EINVAL;
2294                         err=verify_area(VERIFY_READ,optval,optlen);
2295                         if(err)
2296                                 return err;
2297                         memcpy_fromfs(&tmp_fw,optval,optlen);
2298                         err=ip_fw_ctl(optname, &tmp_fw,optlen);
2299                         return -err;    /* -0 is 0 after all */
2300                         
2301 #endif
2302 #ifdef CONFIG_IP_ACCT
2303                 case IP_ACCT_DEL:
2304                 case IP_ACCT_ADD:
2305                 case IP_ACCT_FLUSH:
2306                 case IP_ACCT_ZERO:
2307                         if(!suser())
2308                                 return -EPERM;
2309                         if(optlen>sizeof(tmp_fw) || optlen<1)
2310                                 return -EINVAL;
2311                         err=verify_area(VERIFY_READ,optval,optlen);
2312                         if(err)
2313                                 return err;
2314                         memcpy_fromfs(&tmp_fw, optval,optlen);
2315                         err=ip_acct_ctl(optname, &tmp_fw,optlen);
2316                         return -err;    /* -0 is 0 after all */
2317 #endif
2318                 /* IP_OPTIONS and friends go here eventually */
2319                 default:
2320                         return(-ENOPROTOOPT);
2321         }
2322 }
2323 
2324 /*
2325  *      Get the options. Note for future reference. The GET of IP options gets the
2326  *      _received_ ones. The set sets the _sent_ ones.
2327  */
2328 
2329 int ip_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen)
     /* [previous][next][first][last][top][bottom][index][help] */
2330 {
2331         int val,err;
2332 #ifdef CONFIG_IP_MULTICAST
2333         int len;
2334 #endif
2335         
2336         if(level!=SOL_IP)
2337                 return -EOPNOTSUPP;
2338 
2339         switch(optname)
2340         {
2341                 case IP_TOS:
2342                         val=sk->ip_tos;
2343                         break;
2344                 case IP_TTL:
2345                         val=sk->ip_ttl;
2346                         break;
2347 #ifdef CONFIG_IP_MULTICAST                      
2348                 case IP_MULTICAST_TTL:
2349                         val=sk->ip_mc_ttl;
2350                         break;
2351                 case IP_MULTICAST_LOOP:
2352                         val=sk->ip_mc_loop;
2353                         break;
2354                 case IP_MULTICAST_IF:
2355                         err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
2356                         if(err)
2357                                 return err;
2358                         len=strlen(sk->ip_mc_name);
2359                         err=verify_area(VERIFY_WRITE, optval, len);
2360                         if(err)
2361                                 return err;
2362                         put_fs_long(len,(unsigned long *) optlen);
2363                         memcpy_tofs((void *)optval,sk->ip_mc_name, len);
2364                         return 0;
2365 #endif
2366                 default:
2367                         return(-ENOPROTOOPT);
2368         }
2369         err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
2370         if(err)
2371                 return err;
2372         put_fs_long(sizeof(int),(unsigned long *) optlen);
2373 
2374         err=verify_area(VERIFY_WRITE, optval, sizeof(int));
2375         if(err)
2376                 return err;
2377         put_fs_long(val,(unsigned long *)optval);
2378 
2379         return(0);
2380 }
2381 
2382 /*
2383  *      IP protocol layer initialiser
2384  */
2385 
2386 static struct packet_type ip_packet_type =
2387 {
2388         0,      /* MUTTER ntohs(ETH_P_IP),*/
2389         NULL,   /* All devices */
2390         ip_rcv,
2391         NULL,
2392         NULL,
2393 };
2394 
2395 /*
2396  *      Device notifier
2397  */
2398  
2399 static int ip_rt_event(unsigned long event, void *ptr)
     /* [previous][next][first][last][top][bottom][index][help] */
2400 {
2401         if(event==NETDEV_DOWN)
2402                 ip_rt_flush(ptr);
2403         return NOTIFY_DONE;
2404 }
2405 
2406 struct notifier_block ip_rt_notifier={
2407         ip_rt_event,
2408         NULL,
2409         0
2410 };
2411 
2412 /*
2413  *      IP registers the packet type and then calls the subprotocol initialisers
2414  */
2415 
2416 void ip_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
2417 {
2418         ip_packet_type.type=htons(ETH_P_IP);
2419         dev_add_pack(&ip_packet_type);
2420 
2421         /* So we flush routes when a device is downed */        
2422         register_netdevice_notifier(&ip_rt_notifier);
2423 /*      ip_raw_init();
2424         ip_packet_init();
2425         ip_tcp_init();
2426         ip_udp_init();*/
2427 }

/* [previous][next][first][last][top][bottom][index][help] */